43 static void fcmul_add_c(
float *sum,
const float *t,
const float *
c, ptrdiff_t
len)
47 for (n = 0; n <
len; n++) {
48 const float cre =
c[2 * n ];
49 const float cim =
c[2 * n + 1];
50 const float tre = t[2 * n ];
51 const float tim = t[2 * n + 1];
53 sum[2 * n ] += tre * cre - tim * cim;
54 sum[2 * n + 1] += tre * cim + tim * cre;
57 sum[2 * n] += t[2 * n] *
c[2 * n];
62 for (
int n = 0; n <
len; n++)
63 for (
int m = 0; m <= n; m++)
69 if ((nb_samples & 15) == 0 && nb_samples >= 16) {
70 s->fdsp->vector_fmac_scalar(dst,
src, 1.f, nb_samples);
72 for (
int n = 0; n < nb_samples; n++)
80 const float *
in = (
const float *)
s->in->extended_data[ch] +
offset;
81 float *
block, *buf, *ptr = (
float *)
out->extended_data[ch] +
offset;
82 const int nb_samples =
FFMIN(
s->min_part_size,
out->nb_samples -
offset);
91 if (
s->min_part_size >= 8) {
95 for (n = 0; n < nb_samples; n++)
128 memmove(
src,
src +
s->min_part_size, (seg->
input_size -
s->min_part_size) *
sizeof(*src));
130 for (n = 0; n < nb_samples; n++) {
136 memset(sum, 0,
sizeof(*sum) * seg->
fft_length);
166 memcpy(dst, buf, seg->
part_size *
sizeof(*dst));
173 memmove(
src,
src +
s->min_part_size, (seg->
input_size -
s->min_part_size) *
sizeof(*src));
178 if (
s->min_part_size >= 8) {
179 s->fdsp->vector_fmul_scalar(ptr, ptr,
s->wet_gain,
FFALIGN(nb_samples, 4));
182 for (n = 0; n < nb_samples; n++)
183 ptr[n] *=
s->wet_gain;
203 const int start = (
out->channels * jobnr) / nb_jobs;
204 const int end = (
out->channels * (jobnr+1)) / nb_jobs;
206 for (
int ch = start; ch < end; ch++) {
248 for (
i = 0; txt[
i];
i++) {
252 for (char_y = 0; char_y < font_height; char_y++) {
254 if (font[txt[
i] * font_height + char_y] &
mask)
265 int dx =
FFABS(x1-x0);
266 int dy =
FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
267 int err = (dx>dy ? dx : -dy) / 2, e2;
272 if (x0 == x1 && y0 == y1)
292 float *mag, *phase, *delay,
min = FLT_MAX,
max = FLT_MIN;
293 float min_delay = FLT_MAX, max_delay = FLT_MIN;
294 int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
298 memset(
out->data[0], 0,
s->h *
out->linesize[0]);
303 if (!mag || !phase || !delay)
307 for (
i = 0;
i <
s->w;
i++) {
308 const float *
src = (
const float *)
s->ir[
s->selir]->extended_data[
channel];
309 double w =
i *
M_PI / (
s->w - 1);
310 double div, real_num = 0., imag_num = 0., real = 0., imag = 0.;
312 for (x = 0; x <
s->nb_taps; x++) {
313 real += cos(-x *
w) *
src[x];
314 imag += sin(-x *
w) *
src[x];
315 real_num += cos(-x *
w) *
src[x] * x;
316 imag_num += sin(-x *
w) *
src[x] * x;
319 mag[
i] =
hypot(real, imag);
320 phase[
i] = atan2(imag, real);
321 div = real * real + imag * imag;
322 delay[
i] = (real_num * real + imag_num * imag) / div;
325 min_delay =
fminf(min_delay, delay[
i]);
326 max_delay =
fmaxf(max_delay, delay[
i]);
329 for (
i = 0;
i <
s->w;
i++) {
330 int ymag = mag[
i] /
max * (
s->h - 1);
331 int ydelay = (delay[
i] - min_delay) / (max_delay - min_delay) * (
s->h - 1);
332 int yphase = (0.5 * (1. + phase[
i] /
M_PI)) * (
s->h - 1);
334 ymag =
s->h - 1 -
av_clip(ymag, 0,
s->h - 1);
335 yphase =
s->h - 1 -
av_clip(yphase, 0,
s->h - 1);
336 ydelay =
s->h - 1 -
av_clip(ydelay, 0,
s->h - 1);
341 prev_yphase = yphase;
343 prev_ydelay = ydelay;
350 prev_yphase = yphase;
351 prev_ydelay = ydelay;
354 if (
s->w > 400 &&
s->h > 100) {
359 drawtext(
out, 2, 12,
"Min Magnitude:", 0xDDDDDDDD);
364 snprintf(text,
sizeof(text),
"%.2f", max_delay);
368 snprintf(text,
sizeof(text),
"%.2f", min_delay);
379 int offset,
int nb_partitions,
int part_size)
401 for (
int ch = 0; ch <
ctx->inputs[0]->channels && part_size >= 8; ch++) {
425 for (
int ch = 0; ch <
s->nb_channels; ch++) {
432 for (
int ch = 0; ch <
s->nb_channels; ch++) {
453 int ret,
i, ch, n, cur_nb_taps;
457 int part_size, max_part_size;
464 if (
s->minp >
s->maxp) {
470 max_part_size = 1 <<
av_log2(
s->maxp);
472 s->min_part_size = part_size;
474 for (
i = 0; left > 0;
i++) {
475 int step = part_size == max_part_size ? INT_MAX : 1 + (
i == 0);
476 int nb_partitions =
FFMIN(step, (left + part_size - 1) / part_size);
478 s->nb_segments =
i + 1;
482 offset += nb_partitions * part_size;
483 left -= nb_partitions * part_size;
485 part_size =
FFMIN(part_size, max_part_size);
489 if (!
s->ir[
s->selir]) {
501 cur_nb_taps =
s->ir[
s->selir]->nb_samples;
508 for (ch = 0; ch <
ctx->inputs[1 +
s->selir]->channels; ch++) {
509 float *time = (
float *)
s->ir[
s->selir]->extended_data[!
s->one2many * ch];
511 for (
i = 0;
i < cur_nb_taps;
i++)
514 s->gain =
ctx->inputs[1 +
s->selir]->channels / power;
517 for (ch = 0; ch <
ctx->inputs[1 +
s->selir]->channels; ch++) {
518 float *time = (
float *)
s->ir[
s->selir]->extended_data[!
s->one2many * ch];
520 for (
i = 0;
i < cur_nb_taps;
i++)
523 s->gain =
ctx->inputs[1 +
s->selir]->channels / power;
526 for (ch = 0; ch <
ctx->inputs[1 +
s->selir]->channels; ch++) {
527 float *time = (
float *)
s->ir[
s->selir]->extended_data[!
s->one2many * ch];
529 for (
i = 0;
i < cur_nb_taps;
i++)
530 power += time[
i] * time[
i];
532 s->gain = sqrtf(ch / power);
538 s->gain =
FFMIN(
s->gain *
s->ir_gain, 1.f);
540 for (ch = 0; ch <
ctx->inputs[1 +
s->selir]->channels; ch++) {
541 float *time = (
float *)
s->ir[
s->selir]->extended_data[!
s->one2many * ch];
543 s->fdsp->vector_fmul_scalar(time, time,
s->gain,
FFALIGN(cur_nb_taps, 4));
549 for (ch = 0; ch <
ctx->inputs[1 +
s->selir]->channels; ch++) {
550 float *time = (
float *)
s->ir[
s->selir]->extended_data[!
s->one2many * ch];
566 const float scale = 1.f / seg->
part_size;
568 const int remaining =
s->nb_taps - toffset;
572 for (n = 0; n <
size; n++)
573 coeff[coffset + n].
re = time[toffset + n];
585 coeff[coffset].im = 0;
588 coeff[coffset + n].im =
block[2 * n + 1] * scale;
615 int nb_taps, max_nb_taps;
618 max_nb_taps =
s->max_ir_len *
ctx->outputs[0]->sample_rate;
619 if (nb_taps > max_nb_taps) {
631 int ret, status, available, wanted;
638 if (!
s->eof_coeffs[
s->selir]) {
644 s->eof_coeffs[
s->selir] = 1;
646 if (!
s->eof_coeffs[
s->selir]) {
655 if (!
s->have_coeffs &&
s->eof_coeffs[
s->selir]) {
662 wanted =
FFMAX(
s->min_part_size, (available /
s->min_part_size) *
s->min_part_size);
670 if (
s->response &&
s->have_coeffs) {
676 s->video->pts = new_pts;
755 for (
int i = 1;
i <
ctx->nb_inputs;
i++) {
774 s->one2many =
ctx->inputs[1 +
s->selir]->channels == 1;
781 s->nb_coef_channels =
ctx->inputs[1 +
s->selir]->channels;
791 for (
int i = 0;
i <
s->nb_segments;
i++) {
797 for (
int i = 0;
i <
s->nb_irs;
i++) {
801 for (
unsigned i = 1;
i <
ctx->nb_inputs;
i++)
849 for (
int n = 0; n <
s->nb_irs; n++) {
877 .
name =
"filter_response",
904 int prev_ir =
s->selir;
910 s->selir =
FFMIN(
s->nb_irs - 1,
s->selir);
912 if (prev_ir !=
s->selir) {
919 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
920 #define AFR AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
921 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
922 #define OFFSET(x) offsetof(AudioFIRContext, x)
939 {
"channel",
"set IR channel to display frequency response",
OFFSET(ir_channel),
AV_OPT_TYPE_INT, {.i64=0}, 0, 1024,
VF },
953 .description =
NULL_IF_CONFIG_SMALL(
"Apply Finite Impulse Response filter with supplied coefficients in additional stream(s)."),
955 .priv_class = &afir_class,
static enum AVSampleFormat sample_fmts[]
void ff_afir_init_x86(AudioFIRDSPContext *s)
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
int ff_inlink_queued_samples(AVFilterLink *link)
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Main libavfilter public API header.
char * av_asprintf(const char *fmt,...)
#define flags(name, subs,...)
common internal and external API header
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
float fminf(float, float)
float fmaxf(float, float)
channel
Use these values when setting the channel map with ebur128_set_channel().
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
#define FFERROR_NOT_READY
Filters implementation helper functions.
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
static int ff_outlink_frame_wanted(AVFilterLink *link)
Test if a frame is wanted on an output link.
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
#define AV_CH_LAYOUT_MONO
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
void av_rdft_calc(RDFTContext *s, FFTSample *data)
void av_rdft_end(RDFTContext *s)
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
#define AVERROR_EOF
End of file.
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
AVSampleFormat
Audio sample formats.
@ AV_SAMPLE_FMT_FLTP
float, planar
#define AV_NOPTS_VALUE
Undefined timestamp value.
static int process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
static int config_video(AVFilterLink *outlink)
static int convert_coeffs(AVFilterContext *ctx)
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
static int fir_quantum(AVFilterContext *ctx, AVFrame *out, int ch, int offset)
static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static int query_formats(AVFilterContext *ctx)
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int offset, int nb_partitions, int part_size)
static int fir_channel(AVFilterContext *ctx, AVFrame *out, int ch)
static int check_ir(AVFilterLink *link)
void ff_afir_init(AudioFIRDSPContext *dsp)
static void fir_fadd(AudioFIRContext *s, float *dst, const float *src, int nb_samples)
AVFILTER_DEFINE_CLASS(afir)
static void draw_response(AVFilterContext *ctx, AVFrame *out)
static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg)
static int activate(AVFilterContext *ctx)
static av_cold int init(AVFilterContext *ctx)
static av_cold void uninit(AVFilterContext *ctx)
static int config_output(AVFilterLink *outlink)
static const AVOption afir_options[]
static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t len)
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static enum AVPixelFormat pix_fmts[]
static av_const double hypot(double x, double y)
static const uint16_t mask[17]
enum MovChannelLayoutTag * layouts
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
A list of supported channel layouts.
A link between two filters.
AVFilterFormatsConfig incfg
Lists of supported formats / etc.
int w
agreed upon image width
int h
agreed upon image height
int channels
Number of channels.
AVFilterContext * src
source filter
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
int sample_rate
samples per second
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
uint64_t channel_layout
channel layout of current buffer (see libavutil/channel_layout.h)
AVFilterContext * dst
dest filter
A filter pad used for either input or output.
const char * name
Pad name.
const char * name
Filter name.
AVFormatInternal * internal
An opaque field for libavformat internal usage.
This structure describes decoded (raw) audio or video data.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
uint8_t ** extended_data
pointers to the data planes/channels.
Rational number (pair of numerator and denominator).
void(* fcmul_add)(float *sum, const float *t, const float *c, ptrdiff_t len)
#define av_malloc_array(a, b)
static const double coeff[2][5]
static const uint8_t offset[127][2]
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
const uint8_t avpriv_cga_font[2048]
CGA/EGA/VGA ROM font data.