FFmpeg  4.0
af_anequalizer.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/intreadwrite.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/ffmath.h"
25 #include "libavutil/opt.h"
26 #include "libavutil/parseutils.h"
27 #include "avfilter.h"
28 #include "internal.h"
29 #include "audio.h"
30 
31 #define FILTER_ORDER 4
32 
33 enum FilterType {
38 };
39 
40 typedef struct FoSection {
41  double a0, a1, a2, a3, a4;
42  double b0, b1, b2, b3, b4;
43 
44  double num[4];
45  double denum[4];
46 } FoSection;
47 
48 typedef struct EqualizatorFilter {
49  int ignore;
50  int channel;
51  int type;
52 
53  double freq;
54  double gain;
55  double width;
56 
59 
60 typedef struct AudioNEqualizerContext {
61  const AVClass *class;
62  char *args;
63  char *colors;
65  int w, h;
66 
67  double mag;
68  int fscale;
74 
75 #define OFFSET(x) offsetof(AudioNEqualizerContext, x)
76 #define A AV_OPT_FLAG_AUDIO_PARAM
77 #define V AV_OPT_FLAG_VIDEO_PARAM
78 #define F AV_OPT_FLAG_FILTERING_PARAM
79 
80 static const AVOption anequalizer_options[] = {
81  { "params", NULL, OFFSET(args), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, A|F },
82  { "curves", "draw frequency response curves", OFFSET(draw_curves), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, V|F },
83  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, V|F },
84  { "mgain", "set max gain", OFFSET(mag), AV_OPT_TYPE_DOUBLE, {.dbl=60}, -900, 900, V|F },
85  { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, V|F, "fscale" },
86  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, V|F, "fscale" },
87  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, V|F, "fscale" },
88  { "colors", "set channels curves colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, V|F },
89  { NULL }
90 };
91 
92 AVFILTER_DEFINE_CLASS(anequalizer);
93 
95 {
97  char *colors, *color, *saveptr = NULL;
98  int ch, i, n;
99 
100  colors = av_strdup(s->colors);
101  if (!colors)
102  return;
103 
104  memset(out->data[0], 0, s->h * out->linesize[0]);
105 
106  for (ch = 0; ch < inlink->channels; ch++) {
107  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
108  int prev_v = -1;
109  double f;
110 
111  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
112  if (color)
113  av_parse_color(fg, color, -1, ctx);
114 
115  for (f = 0; f < s->w; f++) {
116  double zr, zi, zr2, zi2;
117  double Hr, Hi;
118  double Hmag = 1;
119  double w;
120  int v, y, x;
121 
122  w = M_PI * (s->fscale ? pow(s->w - 1, f / s->w) : f) / (s->w - 1);
123  zr = cos(w);
124  zr2 = zr * zr;
125  zi = -sin(w);
126  zi2 = zi * zi;
127 
128  for (n = 0; n < s->nb_filters; n++) {
129  if (s->filters[n].channel != ch ||
130  s->filters[n].ignore)
131  continue;
132 
133  for (i = 0; i < FILTER_ORDER / 2; i++) {
134  FoSection *S = &s->filters[n].section[i];
135 
136  /* H *= (((((S->b4 * z + S->b3) * z + S->b2) * z + S->b1) * z + S->b0) /
137  ((((S->a4 * z + S->a3) * z + S->a2) * z + S->a1) * z + S->a0)); */
138 
139  Hr = S->b4*(1-8*zr2*zi2) + S->b2*(zr2-zi2) + zr*(S->b1+S->b3*(zr2-3*zi2))+ S->b0;
140  Hi = zi*(S->b3*(3*zr2-zi2) + S->b1 + 2*zr*(2*S->b4*(zr2-zi2) + S->b2));
141  Hmag *= hypot(Hr, Hi);
142  Hr = S->a4*(1-8*zr2*zi2) + S->a2*(zr2-zi2) + zr*(S->a1+S->a3*(zr2-3*zi2))+ S->a0;
143  Hi = zi*(S->a3*(3*zr2-zi2) + S->a1 + 2*zr*(2*S->a4*(zr2-zi2) + S->a2));
144  Hmag /= hypot(Hr, Hi);
145  }
146  }
147 
148  v = av_clip((1. + -20 * log10(Hmag) / s->mag) * s->h / 2, 0, s->h - 1);
149  x = lrint(f);
150  if (prev_v == -1)
151  prev_v = v;
152  if (v <= prev_v) {
153  for (y = v; y <= prev_v; y++)
154  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
155  } else {
156  for (y = prev_v; y <= v; y++)
157  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
158  }
159 
160  prev_v = v;
161  }
162  }
163 
164  av_free(colors);
165 }
166 
167 static int config_video(AVFilterLink *outlink)
168 {
169  AVFilterContext *ctx = outlink->src;
170  AudioNEqualizerContext *s = ctx->priv;
171  AVFilterLink *inlink = ctx->inputs[0];
172  AVFrame *out;
173 
174  outlink->w = s->w;
175  outlink->h = s->h;
176 
177  av_frame_free(&s->video);
178  s->video = out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
179  if (!out)
180  return AVERROR(ENOMEM);
181  outlink->sample_aspect_ratio = (AVRational){1,1};
182 
183  draw_curves(ctx, inlink, out);
184 
185  return 0;
186 }
187 
189 {
190  AudioNEqualizerContext *s = ctx->priv;
191  AVFilterPad pad, vpad;
192 
193  pad = (AVFilterPad){
194  .name = av_strdup("out0"),
195  .type = AVMEDIA_TYPE_AUDIO,
196  };
197 
198  if (!pad.name)
199  return AVERROR(ENOMEM);
200 
201  if (s->draw_curves) {
202  vpad = (AVFilterPad){
203  .name = av_strdup("out1"),
204  .type = AVMEDIA_TYPE_VIDEO,
205  .config_props = config_video,
206  };
207  if (!vpad.name)
208  return AVERROR(ENOMEM);
209  }
210 
211  ff_insert_outpad(ctx, 0, &pad);
212 
213  if (s->draw_curves)
214  ff_insert_outpad(ctx, 1, &vpad);
215 
216  return 0;
217 }
218 
220 {
221  AVFilterLink *inlink = ctx->inputs[0];
222  AVFilterLink *outlink = ctx->outputs[0];
223  AudioNEqualizerContext *s = ctx->priv;
226  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
227  static const enum AVSampleFormat sample_fmts[] = {
230  };
231  int ret;
232 
233  if (s->draw_curves) {
234  AVFilterLink *videolink = ctx->outputs[1];
235  formats = ff_make_format_list(pix_fmts);
236  if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
237  return ret;
238  }
239 
240  formats = ff_make_format_list(sample_fmts);
241  if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0 ||
242  (ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
243  return ret;
244 
245  layouts = ff_all_channel_counts();
246  if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0 ||
247  (ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts)) < 0)
248  return ret;
249 
250  formats = ff_all_samplerates();
251  if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0 ||
252  (ret = ff_formats_ref(formats, &outlink->in_samplerates)) < 0)
253  return ret;
254 
255  return 0;
256 }
257 
259 {
260  AudioNEqualizerContext *s = ctx->priv;
261 
262  av_freep(&ctx->output_pads[0].name);
263  if (s->draw_curves)
264  av_freep(&ctx->output_pads[1].name);
265  av_frame_free(&s->video);
266  av_freep(&s->filters);
267  s->nb_filters = 0;
268  s->nb_allocated = 0;
269 }
270 
271 static void butterworth_fo_section(FoSection *S, double beta,
272  double si, double g, double g0,
273  double D, double c0)
274 {
275  if (c0 == 1 || c0 == -1) {
276  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
277  S->b1 = 2*c0*(g*g*beta*beta - g0*g0)/D;
278  S->b2 = (g*g*beta*beta - 2*g0*g*beta*si + g0*g0)/D;
279  S->b3 = 0;
280  S->b4 = 0;
281 
282  S->a0 = 1;
283  S->a1 = 2*c0*(beta*beta - 1)/D;
284  S->a2 = (beta*beta - 2*beta*si + 1)/D;
285  S->a3 = 0;
286  S->a4 = 0;
287  } else {
288  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
289  S->b1 = -4*c0*(g0*g0 + g*g0*si*beta)/D;
290  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - g*g*beta*beta)/D;
291  S->b3 = -4*c0*(g0*g0 - g*g0*si*beta)/D;
292  S->b4 = (g*g*beta*beta - 2*g*g0*si*beta + g0*g0)/D;
293 
294  S->a0 = 1;
295  S->a1 = -4*c0*(1 + si*beta)/D;
296  S->a2 = 2*(1 + 2*c0*c0 - beta*beta)/D;
297  S->a3 = -4*c0*(1 - si*beta)/D;
298  S->a4 = (beta*beta - 2*si*beta + 1)/D;
299  }
300 }
301 
303  int N, double w0, double wb,
304  double G, double Gb, double G0)
305 {
306  double g, c0, g0, beta;
307  double epsilon;
308  int r = N % 2;
309  int L = (N - r) / 2;
310  int i;
311 
312  if (G == 0 && G0 == 0) {
313  f->section[0].a0 = 1;
314  f->section[0].b0 = 1;
315  f->section[1].a0 = 1;
316  f->section[1].b0 = 1;
317  return;
318  }
319 
320  G = ff_exp10(G/20);
321  Gb = ff_exp10(Gb/20);
322  G0 = ff_exp10(G0/20);
323 
324  epsilon = sqrt((G * G - Gb * Gb) / (Gb * Gb - G0 * G0));
325  g = pow(G, 1.0 / N);
326  g0 = pow(G0, 1.0 / N);
327  beta = pow(epsilon, -1.0 / N) * tan(wb/2);
328  c0 = cos(w0);
329 
330  for (i = 1; i <= L; i++) {
331  double ui = (2.0 * i - 1) / N;
332  double si = sin(M_PI * ui / 2.0);
333  double Di = beta * beta + 2 * si * beta + 1;
334 
335  butterworth_fo_section(&f->section[i - 1], beta, si, g, g0, Di, c0);
336  }
337 }
338 
339 static void chebyshev1_fo_section(FoSection *S, double a,
340  double c, double tetta_b,
341  double g0, double si, double b,
342  double D, double c0)
343 {
344  if (c0 == 1 || c0 == -1) {
345  S->b0 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) + 2*g0*b*si*tetta_b*tetta_b + g0*g0)/D;
346  S->b1 = 2*c0*(tetta_b*tetta_b*(b*b+g0*g0*c*c) - g0*g0)/D;
347  S->b2 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) - 2*g0*b*si*tetta_b + g0*g0)/D;
348  S->b3 = 0;
349  S->b4 = 0;
350 
351  S->a0 = 1;
352  S->a1 = 2*c0*(tetta_b*tetta_b*(a*a+c*c) - 1)/D;
353  S->a2 = (tetta_b*tetta_b*(a*a+c*c) - 2*a*si*tetta_b + 1)/D;
354  S->a3 = 0;
355  S->a4 = 0;
356  } else {
357  S->b0 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b + 2*g0*b*si*tetta_b + g0*g0)/D;
358  S->b1 = -4*c0*(g0*g0 + g0*b*si*tetta_b)/D;
359  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - (b*b + g0*g0*c*c)*tetta_b*tetta_b)/D;
360  S->b3 = -4*c0*(g0*g0 - g0*b*si*tetta_b)/D;
361  S->b4 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b - 2*g0*b*si*tetta_b + g0*g0)/D;
362 
363  S->a0 = 1;
364  S->a1 = -4*c0*(1 + a*si*tetta_b)/D;
365  S->a2 = 2*(1 + 2*c0*c0 - (a*a + c*c)*tetta_b*tetta_b)/D;
366  S->a3 = -4*c0*(1 - a*si*tetta_b)/D;
367  S->a4 = ((a*a + c*c)*tetta_b*tetta_b - 2*a*si*tetta_b + 1)/D;
368  }
369 }
370 
372  int N, double w0, double wb,
373  double G, double Gb, double G0)
374 {
375  double a, b, c0, g0, alfa, beta, tetta_b;
376  double epsilon;
377  int r = N % 2;
378  int L = (N - r) / 2;
379  int i;
380 
381  if (G == 0 && G0 == 0) {
382  f->section[0].a0 = 1;
383  f->section[0].b0 = 1;
384  f->section[1].a0 = 1;
385  f->section[1].b0 = 1;
386  return;
387  }
388 
389  G = ff_exp10(G/20);
390  Gb = ff_exp10(Gb/20);
391  G0 = ff_exp10(G0/20);
392 
393  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
394  g0 = pow(G0,1.0/N);
395  alfa = pow(1.0/epsilon + sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
396  beta = pow(G/epsilon + Gb * sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
397  a = 0.5 * (alfa - 1.0/alfa);
398  b = 0.5 * (beta - g0*g0*(1/beta));
399  tetta_b = tan(wb/2);
400  c0 = cos(w0);
401 
402  for (i = 1; i <= L; i++) {
403  double ui = (2.0*i-1.0)/N;
404  double ci = cos(M_PI*ui/2.0);
405  double si = sin(M_PI*ui/2.0);
406  double Di = (a*a + ci*ci)*tetta_b*tetta_b + 2.0*a*si*tetta_b + 1;
407 
408  chebyshev1_fo_section(&f->section[i - 1], a, ci, tetta_b, g0, si, b, Di, c0);
409  }
410 }
411 
412 static void chebyshev2_fo_section(FoSection *S, double a,
413  double c, double tetta_b,
414  double g, double si, double b,
415  double D, double c0)
416 {
417  if (c0 == 1 || c0 == -1) {
418  S->b0 = (g*g*tetta_b*tetta_b + 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
419  S->b1 = 2*c0*(g*g*tetta_b*tetta_b - b*b - g*g*c*c)/D;
420  S->b2 = (g*g*tetta_b*tetta_b - 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
421  S->b3 = 0;
422  S->b4 = 0;
423 
424  S->a0 = 1;
425  S->a1 = 2*c0*(tetta_b*tetta_b - a*a - c*c)/D;
426  S->a2 = (tetta_b*tetta_b - 2*tetta_b*a*si + a*a + c*c)/D;
427  S->a3 = 0;
428  S->a4 = 0;
429  } else {
430  S->b0 = (g*g*tetta_b*tetta_b + 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
431  S->b1 = -4*c0*(b*b + g*g*c*c + g*b*si*tetta_b)/D;
432  S->b2 = 2*((b*b + g*g*c*c)*(1 + 2*c0*c0) - g*g*tetta_b*tetta_b)/D;
433  S->b3 = -4*c0*(b*b + g*g*c*c - g*b*si*tetta_b)/D;
434  S->b4 = (g*g*tetta_b*tetta_b - 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
435 
436  S->a0 = 1;
437  S->a1 = -4*c0*(a*a + c*c + a*si*tetta_b)/D;
438  S->a2 = 2*((a*a + c*c)*(1 + 2*c0*c0) - tetta_b*tetta_b)/D;
439  S->a3 = -4*c0*(a*a + c*c - a*si*tetta_b)/D;
440  S->a4 = (tetta_b*tetta_b - 2*a*si*tetta_b + a*a + c*c)/D;
441  }
442 }
443 
445  int N, double w0, double wb,
446  double G, double Gb, double G0)
447 {
448  double a, b, c0, tetta_b;
449  double epsilon, g, eu, ew;
450  int r = N % 2;
451  int L = (N - r) / 2;
452  int i;
453 
454  if (G == 0 && G0 == 0) {
455  f->section[0].a0 = 1;
456  f->section[0].b0 = 1;
457  f->section[1].a0 = 1;
458  f->section[1].b0 = 1;
459  return;
460  }
461 
462  G = ff_exp10(G/20);
463  Gb = ff_exp10(Gb/20);
464  G0 = ff_exp10(G0/20);
465 
466  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
467  g = pow(G, 1.0 / N);
468  eu = pow(epsilon + sqrt(1 + epsilon*epsilon), 1.0/N);
469  ew = pow(G0*epsilon + Gb*sqrt(1 + epsilon*epsilon), 1.0/N);
470  a = (eu - 1.0/eu)/2.0;
471  b = (ew - g*g/ew)/2.0;
472  tetta_b = tan(wb/2);
473  c0 = cos(w0);
474 
475  for (i = 1; i <= L; i++) {
476  double ui = (2.0 * i - 1.0)/N;
477  double ci = cos(M_PI * ui / 2.0);
478  double si = sin(M_PI * ui / 2.0);
479  double Di = tetta_b*tetta_b + 2*a*si*tetta_b + a*a + ci*ci;
480 
481  chebyshev2_fo_section(&f->section[i - 1], a, ci, tetta_b, g, si, b, Di, c0);
482  }
483 }
484 
485 static double butterworth_compute_bw_gain_db(double gain)
486 {
487  double bw_gain = 0;
488 
489  if (gain <= -6)
490  bw_gain = gain + 3;
491  else if(gain > -6 && gain < 6)
492  bw_gain = gain * 0.5;
493  else if(gain >= 6)
494  bw_gain = gain - 3;
495 
496  return bw_gain;
497 }
498 
499 static double chebyshev1_compute_bw_gain_db(double gain)
500 {
501  double bw_gain = 0;
502 
503  if (gain <= -6)
504  bw_gain = gain + 1;
505  else if(gain > -6 && gain < 6)
506  bw_gain = gain * 0.9;
507  else if(gain >= 6)
508  bw_gain = gain - 1;
509 
510  return bw_gain;
511 }
512 
513 static double chebyshev2_compute_bw_gain_db(double gain)
514 {
515  double bw_gain = 0;
516 
517  if (gain <= -6)
518  bw_gain = -3;
519  else if(gain > -6 && gain < 6)
520  bw_gain = gain * 0.3;
521  else if(gain >= 6)
522  bw_gain = 3;
523 
524  return bw_gain;
525 }
526 
527 static inline double hz_2_rad(double x, double fs)
528 {
529  return 2 * M_PI * x / fs;
530 }
531 
532 static void equalizer(EqualizatorFilter *f, double sample_rate)
533 {
534  double w0 = hz_2_rad(f->freq, sample_rate);
535  double wb = hz_2_rad(f->width, sample_rate);
536  double bw_gain;
537 
538  switch (f->type) {
539  case BUTTERWORTH:
540  bw_gain = butterworth_compute_bw_gain_db(f->gain);
541  butterworth_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
542  break;
543  case CHEBYSHEV1:
544  bw_gain = chebyshev1_compute_bw_gain_db(f->gain);
545  chebyshev1_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
546  break;
547  case CHEBYSHEV2:
548  bw_gain = chebyshev2_compute_bw_gain_db(f->gain);
549  chebyshev2_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
550  break;
551  }
552 
553 }
554 
556 {
557  equalizer(&s->filters[s->nb_filters], inlink->sample_rate);
558  if (s->nb_filters >= s->nb_allocated) {
560 
561  filters = av_calloc(s->nb_allocated, 2 * sizeof(*s->filters));
562  if (!filters)
563  return AVERROR(ENOMEM);
564  memcpy(filters, s->filters, sizeof(*s->filters) * s->nb_allocated);
565  av_free(s->filters);
566  s->filters = filters;
567  s->nb_allocated *= 2;
568  }
569  s->nb_filters++;
570 
571  return 0;
572 }
573 
574 static int config_input(AVFilterLink *inlink)
575 {
576  AVFilterContext *ctx = inlink->dst;
577  AudioNEqualizerContext *s = ctx->priv;
578  char *args = av_strdup(s->args);
579  char *saveptr = NULL;
580  int ret = 0;
581 
582  if (!args)
583  return AVERROR(ENOMEM);
584 
585  s->nb_allocated = 32 * inlink->channels;
586  s->filters = av_calloc(inlink->channels, 32 * sizeof(*s->filters));
587  if (!s->filters) {
588  s->nb_allocated = 0;
589  av_free(args);
590  return AVERROR(ENOMEM);
591  }
592 
593  while (1) {
594  char *arg = av_strtok(s->nb_filters == 0 ? args : NULL, "|", &saveptr);
595 
596  if (!arg)
597  break;
598 
599  s->filters[s->nb_filters].type = 0;
600  if (sscanf(arg, "c%d f=%lf w=%lf g=%lf t=%d", &s->filters[s->nb_filters].channel,
601  &s->filters[s->nb_filters].freq,
602  &s->filters[s->nb_filters].width,
603  &s->filters[s->nb_filters].gain,
604  &s->filters[s->nb_filters].type) != 5 &&
605  sscanf(arg, "c%d f=%lf w=%lf g=%lf", &s->filters[s->nb_filters].channel,
606  &s->filters[s->nb_filters].freq,
607  &s->filters[s->nb_filters].width,
608  &s->filters[s->nb_filters].gain) != 4 ) {
609  av_free(args);
610  return AVERROR(EINVAL);
611  }
612 
613  if (s->filters[s->nb_filters].freq < 0 ||
614  s->filters[s->nb_filters].freq > inlink->sample_rate / 2.0)
615  s->filters[s->nb_filters].ignore = 1;
616 
617  if (s->filters[s->nb_filters].channel < 0 ||
618  s->filters[s->nb_filters].channel >= inlink->channels)
619  s->filters[s->nb_filters].ignore = 1;
620 
621  s->filters[s->nb_filters].type = av_clip(s->filters[s->nb_filters].type, 0, NB_TYPES - 1);
622  ret = add_filter(s, inlink);
623  if (ret < 0)
624  break;
625  }
626 
627  av_free(args);
628 
629  return ret;
630 }
631 
632 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
633  char *res, int res_len, int flags)
634 {
635  AudioNEqualizerContext *s = ctx->priv;
636  AVFilterLink *inlink = ctx->inputs[0];
637  int ret = AVERROR(ENOSYS);
638 
639  if (!strcmp(cmd, "change")) {
640  double freq, width, gain;
641  int filter;
642 
643  if (sscanf(args, "%d|f=%lf|w=%lf|g=%lf", &filter, &freq, &width, &gain) != 4)
644  return AVERROR(EINVAL);
645 
646  if (filter < 0 || filter >= s->nb_filters)
647  return AVERROR(EINVAL);
648 
649  if (freq < 0 || freq > inlink->sample_rate / 2.0)
650  return AVERROR(EINVAL);
651 
652  s->filters[filter].freq = freq;
653  s->filters[filter].width = width;
654  s->filters[filter].gain = gain;
655  equalizer(&s->filters[filter], inlink->sample_rate);
656  if (s->draw_curves)
657  draw_curves(ctx, inlink, s->video);
658 
659  ret = 0;
660  }
661 
662  return ret;
663 }
664 
665 static inline double section_process(FoSection *S, double in)
666 {
667  double out;
668 
669  out = S->b0 * in;
670  out+= S->b1 * S->num[0] - S->denum[0] * S->a1;
671  out+= S->b2 * S->num[1] - S->denum[1] * S->a2;
672  out+= S->b3 * S->num[2] - S->denum[2] * S->a3;
673  out+= S->b4 * S->num[3] - S->denum[3] * S->a4;
674 
675  S->num[3] = S->num[2];
676  S->num[2] = S->num[1];
677  S->num[1] = S->num[0];
678  S->num[0] = in;
679 
680  S->denum[3] = S->denum[2];
681  S->denum[2] = S->denum[1];
682  S->denum[1] = S->denum[0];
683  S->denum[0] = out;
684 
685  return out;
686 }
687 
688 static double process_sample(FoSection *s1, double in)
689 {
690  double p0 = in, p1;
691  int i;
692 
693  for (i = 0; i < FILTER_ORDER / 2; i++) {
694  p1 = section_process(&s1[i], p0);
695  p0 = p1;
696  }
697 
698  return p1;
699 }
700 
701 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
702 {
703  AVFilterContext *ctx = inlink->dst;
704  AudioNEqualizerContext *s = ctx->priv;
705  AVFilterLink *outlink = ctx->outputs[0];
706  double *bptr;
707  int i, n;
708 
709  for (i = 0; i < s->nb_filters; i++) {
710  EqualizatorFilter *f = &s->filters[i];
711 
712  if (f->gain == 0. || f->ignore)
713  continue;
714 
715  bptr = (double *)buf->extended_data[f->channel];
716  for (n = 0; n < buf->nb_samples; n++) {
717  double sample = bptr[n];
718 
719  sample = process_sample(f->section, sample);
720  bptr[n] = sample;
721  }
722  }
723 
724  if (s->draw_curves) {
725  const int64_t pts = buf->pts +
726  av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate },
727  outlink->time_base);
728  int ret;
729 
730  s->video->pts = pts;
731  ret = ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video));
732  if (ret < 0)
733  return ret;
734  }
735 
736  return ff_filter_frame(outlink, buf);
737 }
738 
739 static const AVFilterPad inputs[] = {
740  {
741  .name = "default",
742  .type = AVMEDIA_TYPE_AUDIO,
743  .config_props = config_input,
744  .filter_frame = filter_frame,
745  .needs_writable = 1,
746  },
747  { NULL }
748 };
749 
751  .name = "anequalizer",
752  .description = NULL_IF_CONFIG_SMALL("Apply high-order audio parametric multi band equalizer."),
753  .priv_size = sizeof(AudioNEqualizerContext),
754  .priv_class = &anequalizer_class,
755  .init = init,
756  .uninit = uninit,
758  .inputs = inputs,
759  .outputs = NULL,
762 };
#define NULL
Definition: coverity.c:32
const char * s
Definition: avisynth_c.h:768
static void chebyshev2_fo_section(FoSection *S, double a, double c, double tetta_b, double g, double si, double b, double D, double c0)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
AVOption.
Definition: opt.h:246
static void chebyshev1_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Main libavfilter public API header.
const char * g
Definition: vf_curves.c:112
static double butterworth_compute_bw_gain_db(double gain)
static int config_video(AVFilterLink *outlink)
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
const char * b
Definition: vf_curves.c:113
static void butterworth_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
double, planar
Definition: samplefmt.h:70
static void butterworth_fo_section(FoSection *S, double beta, double si, double g, double g0, double D, double c0)
FoSection section[2]
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVFILTER_DEFINE_CLASS(anequalizer)
#define sample
#define N
Definition: af_mcompand.c:54
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
FilterType
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:435
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:349
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:114
uint8_t
#define av_cold
Definition: attributes.h:82
AVOptions.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:311
static const AVOption anequalizer_options[]
static void chebyshev2_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
static void chebyshev1_fo_section(FoSection *S, double a, double c, double tetta_b, double g0, double si, double b, double D, double c0)
static double hz_2_rad(double x, double fs)
static int flags
Definition: log.c:55
#define FILTER_ORDER
#define A
static av_cold void uninit(AVFilterContext *ctx)
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:111
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:354
A filter pad used for either input or output.
Definition: internal.h:54
#define F
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
static int config_input(AVFilterLink *inlink)
#define S(s, c, i)
#define AVERROR(e)
Definition: error.h:43
static void equalizer(EqualizatorFilter *f, double sample_rate)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
const char * r
Definition: vf_curves.c:111
void * priv
private data for use by the filter
Definition: avfilter.h:353
const char * arg
Definition: jacosubdec.c:66
uint16_t width
Definition: gdv.c:47
#define V
static void draw_curves(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *out)
static int query_formats(AVFilterContext *ctx)
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:89
static av_const double hypot(double x, double y)
Definition: libm.h:366
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
uint8_t w
Definition: llviddspenc.c:38
const char AVS_Value args
Definition: avisynth_c.h:780
AVFormatContext * ctx
Definition: movenc.c:48
static const AVFilterPad inputs[]
#define AV_RL32
Definition: intreadwrite.h:146
int n
Definition: avisynth_c.h:684
#define L(x)
Definition: vp56_arith.h:36
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:538
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static double process_sample(FoSection *s1, double in)
A list of supported channel layouts.
Definition: formats.h:85
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
sample_rate
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static double chebyshev2_compute_bw_gain_db(double gain)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:249
void * buf
Definition: avisynth_c.h:690
AVFilter ff_af_anequalizer
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
const char * name
Filter name.
Definition: avfilter.h:148
static int add_filter(AudioNEqualizerContext *s, AVFilterLink *inlink)
#define s1
Definition: regdef.h:38
offset must point to two consecutive integers
Definition: opt.h:233
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
static int64_t pts
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:232
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
static double section_process(FoSection *S, double in)
double denum[4]
internal math functions header
static double chebyshev1_compute_bw_gain_db(double gain)
#define G
Definition: huffyuvdsp.h:33
D(D(float, sse)
Definition: rematrix_init.c:28
static double c[64]
#define ui(width, name)
Definition: cbs_mpeg2.c:85
static av_cold int init(AVFilterContext *ctx)
#define OFFSET(x)
#define av_free(p)
static const struct PPFilter filters[]
Definition: postprocess.c:134
A list of supported formats for one end of a filter link.
Definition: formats.h:64
#define lrint
Definition: tablegen.h:53
An instance of a filter.
Definition: avfilter.h:338
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
FILE * out
Definition: movenc.c:54
#define av_freep(p)
#define M_PI
Definition: mathematics.h:52
formats
Definition: signature.h:48
EqualizatorFilter * filters
internal API functions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:410
double num[4]
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:265
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:285
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:284
for(j=16;j >0;--j)
#define AV_WL32(p, v)
Definition: intreadwrite.h:426