FFmpeg  4.0
vf_convolve.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/imgutils.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
26 #include "libavcodec/avfft.h"
27 
28 #include "avfilter.h"
29 #include "formats.h"
30 #include "framesync.h"
31 #include "internal.h"
32 #include "video.h"
33 
34 #define MAX_THREADS 16
35 
36 typedef struct ConvolveContext {
37  const AVClass *class;
39 
42 
43  int fft_bits[4];
44  int fft_len[4];
45  int planewidth[4];
46  int planeheight[4];
47 
52 
53  int depth;
54  int planes;
55  int impulse;
56  float noise;
57  int nb_planes;
58  int got_impulse[4];
59 
60  int (*filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
62 
63 #define OFFSET(x) offsetof(ConvolveContext, x)
64 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
65 
66 static const AVOption convolve_options[] = {
67  { "planes", "set planes to convolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
68  { "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
69  { "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
70  { "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
71  { "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
72  { NULL },
73 };
74 
76 {
77  static const enum AVPixelFormat pixel_fmts_fftfilt[] = {
96  };
97 
98  AVFilterFormats *fmts_list = ff_make_format_list(pixel_fmts_fftfilt);
99  if (!fmts_list)
100  return AVERROR(ENOMEM);
101  return ff_set_common_formats(ctx, fmts_list);
102 }
103 
104 static int config_input_main(AVFilterLink *inlink)
105 {
106  ConvolveContext *s = inlink->dst->priv;
108  int fft_bits, i;
109 
110  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
111  s->planewidth[0] = s->planewidth[3] = inlink->w;
112  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
113  s->planeheight[0] = s->planeheight[3] = inlink->h;
114 
115  s->nb_planes = desc->nb_components;
116  s->depth = desc->comp[0].depth;
117 
118  for (i = 0; i < s->nb_planes; i++) {
119  int w = s->planewidth[i];
120  int h = s->planeheight[i];
121  int n = FFMAX(w, h);
122 
123  for (fft_bits = 1; 1 << fft_bits < n; fft_bits++);
124 
125  s->fft_bits[i] = fft_bits;
126  s->fft_len[i] = 1 << s->fft_bits[i];
127 
128  if (!(s->fft_hdata[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(FFTComplex))))
129  return AVERROR(ENOMEM);
130 
131  if (!(s->fft_vdata[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(FFTComplex))))
132  return AVERROR(ENOMEM);
133 
134  if (!(s->fft_hdata_impulse[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(FFTComplex))))
135  return AVERROR(ENOMEM);
136 
137  if (!(s->fft_vdata_impulse[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(FFTComplex))))
138  return AVERROR(ENOMEM);
139  }
140 
141  return 0;
142 }
143 
145 {
146  AVFilterContext *ctx = inlink->dst;
147 
148  if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
149  ctx->inputs[0]->h != ctx->inputs[1]->h) {
150  av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
151  return AVERROR(EINVAL);
152  }
153  if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
154  av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
155  return AVERROR(EINVAL);
156  }
157 
158  return 0;
159 }
160 
161 typedef struct ThreadData {
162  FFTComplex *hdata, *vdata;
163  int plane, n;
164 } ThreadData;
165 
166 static int fft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
167 {
168  ConvolveContext *s = ctx->priv;
169  ThreadData *td = arg;
170  FFTComplex *hdata = td->hdata;
171  const int plane = td->plane;
172  const int n = td->n;
173  int start = (n * jobnr) / nb_jobs;
174  int end = (n * (jobnr+1)) / nb_jobs;
175  int y;
176 
177  for (y = start; y < end; y++) {
178  av_fft_permute(s->fft[plane][jobnr], hdata + y * n);
179  av_fft_calc(s->fft[plane][jobnr], hdata + y * n);
180  }
181 
182  return 0;
183 }
184 
186  AVFrame *in, int w, int h, int n, int plane, float scale)
187 {
188  const int iw = (n - w) / 2, ih = (n - h) / 2;
189  int y, x;
190 
191  if (s->depth == 8) {
192  for (y = 0; y < h; y++) {
193  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
194 
195  for (x = 0; x < w; x++) {
196  fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
197  fft_hdata[(y + ih) * n + iw + x].im = 0;
198  }
199 
200  for (x = 0; x < iw; x++) {
201  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
202  fft_hdata[(y + ih) * n + x].im = 0;
203  }
204 
205  for (x = n - iw; x < n; x++) {
206  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
207  fft_hdata[(y + ih) * n + x].im = 0;
208  }
209  }
210 
211  for (y = 0; y < ih; y++) {
212  for (x = 0; x < n; x++) {
213  fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
214  fft_hdata[y * n + x].im = 0;
215  }
216  }
217 
218  for (y = n - ih; y < n; y++) {
219  for (x = 0; x < n; x++) {
220  fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
221  fft_hdata[y * n + x].im = 0;
222  }
223  }
224  } else {
225  for (y = 0; y < h; y++) {
226  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
227 
228  for (x = 0; x < w; x++) {
229  fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
230  fft_hdata[(y + ih) * n + iw + x].im = 0;
231  }
232 
233  for (x = 0; x < iw; x++) {
234  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
235  fft_hdata[(y + ih) * n + x].im = 0;
236  }
237 
238  for (x = n - iw; x < n; x++) {
239  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
240  fft_hdata[(y + ih) * n + x].im = 0;
241  }
242  }
243 
244  for (y = 0; y < ih; y++) {
245  for (x = 0; x < n; x++) {
246  fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
247  fft_hdata[y * n + x].im = 0;
248  }
249  }
250 
251  for (y = n - ih; y < n; y++) {
252  for (x = 0; x < n; x++) {
253  fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
254  fft_hdata[y * n + x].im = 0;
255  }
256  }
257  }
258 }
259 
260 static int fft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
261 {
262  ConvolveContext *s = ctx->priv;
263  ThreadData *td = arg;
264  FFTComplex *hdata = td->hdata;
265  FFTComplex *vdata = td->vdata;
266  const int plane = td->plane;
267  const int n = td->n;
268  int start = (n * jobnr) / nb_jobs;
269  int end = (n * (jobnr+1)) / nb_jobs;
270  int y, x;
271 
272  for (y = start; y < end; y++) {
273  for (x = 0; x < n; x++) {
274  vdata[y * n + x].re = hdata[x * n + y].re;
275  vdata[y * n + x].im = hdata[x * n + y].im;
276  }
277 
278  av_fft_permute(s->fft[plane][jobnr], vdata + y * n);
279  av_fft_calc(s->fft[plane][jobnr], vdata + y * n);
280  }
281 
282  return 0;
283 }
284 
285 static int ifft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
286 {
287  ConvolveContext *s = ctx->priv;
288  ThreadData *td = arg;
289  FFTComplex *hdata = td->hdata;
290  FFTComplex *vdata = td->vdata;
291  const int plane = td->plane;
292  const int n = td->n;
293  int start = (n * jobnr) / nb_jobs;
294  int end = (n * (jobnr+1)) / nb_jobs;
295  int y, x;
296 
297  for (y = start; y < end; y++) {
298  av_fft_permute(s->ifft[plane][jobnr], vdata + y * n);
299  av_fft_calc(s->ifft[plane][jobnr], vdata + y * n);
300 
301  for (x = 0; x < n; x++) {
302  hdata[x * n + y].re = vdata[y * n + x].re;
303  hdata[x * n + y].im = vdata[y * n + x].im;
304  }
305  }
306 
307  return 0;
308 }
309 
310 static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
311 {
312  ConvolveContext *s = ctx->priv;
313  ThreadData *td = arg;
314  FFTComplex *hdata = td->hdata;
315  const int plane = td->plane;
316  const int n = td->n;
317  int start = (n * jobnr) / nb_jobs;
318  int end = (n * (jobnr+1)) / nb_jobs;
319  int y;
320 
321  for (y = start; y < end; y++) {
322  av_fft_permute(s->ifft[plane][jobnr], hdata + y * n);
323  av_fft_calc(s->ifft[plane][jobnr], hdata + y * n);
324  }
325 
326  return 0;
327 }
328 
330  int w, int h, int n, int plane, float scale)
331 {
332  const int max = (1 << s->depth) - 1;
333  const int hh = h / 2;
334  const int hw = w / 2;
335  int y, x;
336 
337  if (s->depth == 8) {
338  for (y = 0; y < hh; y++) {
339  uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane] + hw;
340  for (x = 0; x < hw; x++)
341  dst[x] = av_clip_uint8(input[y * n + x].re * scale);
342  }
343  for (y = 0; y < hh; y++) {
344  uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane];
345  for (x = 0; x < hw; x++)
346  dst[x] = av_clip_uint8(input[y * n + n - hw + x].re * scale);
347  }
348  for (y = 0; y < hh; y++) {
349  uint8_t *dst = out->data[plane] + y * out->linesize[plane] + hw;
350  for (x = 0; x < hw; x++)
351  dst[x] = av_clip_uint8(input[(n - hh + y) * n + x].re * scale);
352  }
353  for (y = 0; y < hh; y++) {
354  uint8_t *dst = out->data[plane] + y * out->linesize[plane];
355  for (x = 0; x < hw; x++)
356  dst[x] = av_clip_uint8(input[(n - hh + y) * n + n - hw + x].re * scale);
357  }
358  } else {
359  for (y = 0; y < hh; y++) {
360  uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane] + hw * 2);
361  for (x = 0; x < hw; x++)
362  dst[x] = av_clip(input[y * n + x].re * scale, 0, max);
363  }
364  for (y = 0; y < hh; y++) {
365  uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane]);
366  for (x = 0; x < hw; x++)
367  dst[x] = av_clip(input[y * n + n - hw + x].re * scale, 0, max);
368  }
369  for (y = 0; y < hh; y++) {
370  uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane] + hw * 2);
371  for (x = 0; x < hw; x++)
372  dst[x] = av_clip(input[(n - hh + y) * n + x].re * scale, 0, max);
373  }
374  for (y = 0; y < hh; y++) {
375  uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane]);
376  for (x = 0; x < hw; x++)
377  dst[x] = av_clip(input[(n - hh + y) * n + n - hw + x].re * scale, 0, max);
378  }
379  }
380 }
381 
382 static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
383 {
384  ConvolveContext *s = ctx->priv;
385  ThreadData *td = arg;
386  FFTComplex *input = td->hdata;
387  FFTComplex *filter = td->vdata;
388  const float noise = s->noise;
389  const int n = td->n;
390  int start = (n * jobnr) / nb_jobs;
391  int end = (n * (jobnr+1)) / nb_jobs;
392  int y, x;
393 
394  for (y = start; y < end; y++) {
395  int yn = y * n;
396 
397  for (x = 0; x < n; x++) {
398  FFTSample re, im, ire, iim;
399 
400  re = input[yn + x].re;
401  im = input[yn + x].im;
402  ire = filter[yn + x].re + noise;
403  iim = filter[yn + x].im;
404 
405  input[yn + x].re = ire * re - iim * im;
406  input[yn + x].im = iim * re + ire * im;
407  }
408  }
409 
410  return 0;
411 }
412 
413 static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
414 {
415  ConvolveContext *s = ctx->priv;
416  ThreadData *td = arg;
417  FFTComplex *input = td->hdata;
418  FFTComplex *filter = td->vdata;
419  const float noise = s->noise;
420  const int n = td->n;
421  int start = (n * jobnr) / nb_jobs;
422  int end = (n * (jobnr+1)) / nb_jobs;
423  int y, x;
424 
425  for (y = start; y < end; y++) {
426  int yn = y * n;
427 
428  for (x = 0; x < n; x++) {
429  FFTSample re, im, ire, iim, div;
430 
431  re = input[yn + x].re;
432  im = input[yn + x].im;
433  ire = filter[yn + x].re;
434  iim = filter[yn + x].im;
435  div = ire * ire + iim * iim + noise;
436 
437  input[yn + x].re = (ire * re + iim * im) / div;
438  input[yn + x].im = (ire * im - iim * re) / div;
439  }
440  }
441 
442  return 0;
443 }
444 
446 {
447  AVFilterContext *ctx = fs->parent;
448  AVFilterLink *outlink = ctx->outputs[0];
449  ConvolveContext *s = ctx->priv;
450  AVFrame *mainpic = NULL, *impulsepic = NULL;
451  int ret, y, x, plane;
452 
453  ret = ff_framesync_dualinput_get(fs, &mainpic, &impulsepic);
454  if (ret < 0)
455  return ret;
456  if (!impulsepic)
457  return ff_filter_frame(outlink, mainpic);
458 
459  for (plane = 0; plane < s->nb_planes; plane++) {
461  FFTComplex *input = s->fft_vdata[plane];
462  const int n = s->fft_len[plane];
463  const int w = s->planewidth[plane];
464  const int h = s->planeheight[plane];
465  float total = 0;
466  ThreadData td;
467 
468  if (!(s->planes & (1 << plane))) {
469  continue;
470  }
471 
472  td.plane = plane, td.n = n;
473  get_input(s, s->fft_hdata[plane], mainpic, w, h, n, plane, 1.f);
474 
475  td.hdata = s->fft_hdata[plane];
476  td.vdata = s->fft_vdata[plane];
477 
480 
481  if ((!s->impulse && !s->got_impulse[plane]) || s->impulse) {
482  if (s->depth == 8) {
483  for (y = 0; y < h; y++) {
484  const uint8_t *src = (const uint8_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
485  for (x = 0; x < w; x++) {
486  total += src[x];
487  }
488  }
489  } else {
490  for (y = 0; y < h; y++) {
491  const uint16_t *src = (const uint16_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
492  for (x = 0; x < w; x++) {
493  total += src[x];
494  }
495  }
496  }
497  total = FFMAX(1, total);
498 
499  get_input(s, s->fft_hdata_impulse[plane], impulsepic, w, h, n, plane, 1.f / total);
500 
501  td.hdata = s->fft_hdata_impulse[plane];
502  td.vdata = s->fft_vdata_impulse[plane];
503 
506 
507  s->got_impulse[plane] = 1;
508  }
509 
510  td.hdata = input;
511  td.vdata = filter;
512 
513  ctx->internal->execute(ctx, s->filter, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
514 
515  td.hdata = s->fft_hdata[plane];
516  td.vdata = s->fft_vdata[plane];
517 
520 
521  get_output(s, s->fft_hdata[plane], mainpic, w, h, n, plane, 1.f / (n * n));
522  }
523 
524  return ff_filter_frame(outlink, mainpic);
525 }
526 
527 static int config_output(AVFilterLink *outlink)
528 {
529  AVFilterContext *ctx = outlink->src;
530  ConvolveContext *s = ctx->priv;
531  AVFilterLink *mainlink = ctx->inputs[0];
532  int ret, i, j;
533 
534  s->fs.on_event = do_convolve;
535  ret = ff_framesync_init_dualinput(&s->fs, ctx);
536  if (ret < 0)
537  return ret;
538  outlink->w = mainlink->w;
539  outlink->h = mainlink->h;
540  outlink->time_base = mainlink->time_base;
541  outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
542  outlink->frame_rate = mainlink->frame_rate;
543 
544  if ((ret = ff_framesync_configure(&s->fs)) < 0)
545  return ret;
546 
547  for (i = 0; i < s->nb_planes; i++) {
548  for (j = 0; j < MAX_THREADS; j++) {
549  s->fft[i][j] = av_fft_init(s->fft_bits[i], 0);
550  s->ifft[i][j] = av_fft_init(s->fft_bits[i], 1);
551  if (!s->fft[i][j] || !s->ifft[i][j])
552  return AVERROR(ENOMEM);
553  }
554  }
555 
556  return 0;
557 }
558 
560 {
561  ConvolveContext *s = ctx->priv;
562  return ff_framesync_activate(&s->fs);
563 }
564 
566 {
567  ConvolveContext *s = ctx->priv;
568 
569  if (!strcmp(ctx->filter->name, "convolve")) {
571  } else if (!strcmp(ctx->filter->name, "deconvolve")) {
572  s->filter = complex_divide;
573  } else {
574  return AVERROR_BUG;
575  }
576 
577  return 0;
578 }
579 
581 {
582  ConvolveContext *s = ctx->priv;
583  int i, j;
584 
585  for (i = 0; i < 4; i++) {
586  av_freep(&s->fft_hdata[i]);
587  av_freep(&s->fft_vdata[i]);
588  av_freep(&s->fft_hdata_impulse[i]);
589  av_freep(&s->fft_vdata_impulse[i]);
590 
591  for (j = 0; j < MAX_THREADS; j++) {
592  av_fft_end(s->fft[i][j]);
593  av_fft_end(s->ifft[i][j]);
594  }
595  }
596 
597  ff_framesync_uninit(&s->fs);
598 }
599 
600 static const AVFilterPad convolve_inputs[] = {
601  {
602  .name = "main",
603  .type = AVMEDIA_TYPE_VIDEO,
604  .config_props = config_input_main,
605  },{
606  .name = "impulse",
607  .type = AVMEDIA_TYPE_VIDEO,
608  .config_props = config_input_impulse,
609  },
610  { NULL }
611 };
612 
613 static const AVFilterPad convolve_outputs[] = {
614  {
615  .name = "default",
616  .type = AVMEDIA_TYPE_VIDEO,
617  .config_props = config_output,
618  },
619  { NULL }
620 };
621 
622 #if CONFIG_CONVOLVE_FILTER
623 
625 
627  .name = "convolve",
628  .description = NULL_IF_CONFIG_SMALL("Convolve first video stream with second video stream."),
629  .preinit = convolve_framesync_preinit,
630  .init = init,
631  .uninit = uninit,
632  .query_formats = query_formats,
633  .activate = activate,
634  .priv_size = sizeof(ConvolveContext),
635  .priv_class = &convolve_class,
636  .inputs = convolve_inputs,
637  .outputs = convolve_outputs,
639 };
640 
641 #endif /* CONFIG_CONVOLVE_FILTER */
642 
643 #if CONFIG_DECONVOLVE_FILTER
644 
645 static const AVOption deconvolve_options[] = {
646  { "planes", "set planes to deconvolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
647  { "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
648  { "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
649  { "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
650  { "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
651  { NULL },
652 };
653 
655 
657  .name = "deconvolve",
658  .description = NULL_IF_CONFIG_SMALL("Deconvolve first video stream with second video stream."),
659  .preinit = deconvolve_framesync_preinit,
660  .init = init,
661  .uninit = uninit,
662  .query_formats = query_formats,
663  .activate = activate,
664  .priv_size = sizeof(ConvolveContext),
665  .priv_class = &deconvolve_class,
666  .inputs = convolve_inputs,
667  .outputs = convolve_outputs,
669 };
670 
671 #endif /* CONFIG_DECONVOLVE_FILTER */
int plane
Definition: avisynth_c.h:422
#define NULL
Definition: coverity.c:32
#define FRAMESYNC_DEFINE_CLASS(name, context, field)
Definition: framesync.h:300
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:407
const char * s
Definition: avisynth_c.h:768
static int fft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:260
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:401
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2363
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
static const AVFilterPad convolve_outputs[]
Definition: vf_convolve.c:613
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:403
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:378
av_cold void av_fft_end(FFTContext *s)
Definition: avfft.c:48
static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:382
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:388
float re
Definition: fft.c:82
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:404
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
misc image utilities
static int fft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:166
Main libavfilter public API header.
const char * desc
Definition: nvenc.c:65
int got_impulse[4]
Definition: vf_convolve.c:58
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:164
static int ifft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:285
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:384
int(* filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:60
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:349
FFTSample re
Definition: avfft.h:38
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:372
static int activate(AVFilterContext *ctx)
Definition: vf_convolve.c:559
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:117
void av_fft_permute(FFTContext *s, FFTComplex *z)
Do the permutation needed BEFORE calling ff_fft_calc().
Definition: avfft.c:38
#define FLAGS
Definition: vf_convolve.c:64
static int config_input_main(AVFilterLink *inlink)
Definition: vf_convolve.c:104
#define src
Definition: vp8dsp.c:254
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_convolve.c:580
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:350
const char * name
Pad name.
Definition: internal.h:60
AVFilterContext * parent
Parent filter context.
Definition: framesync.h:152
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:351
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:97
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
AVOptions.
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:361
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:379
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:400
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:383
int plane
Definition: vf_blend.c:57
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:96
static int flags
Definition: log.c:55
#define FFMIN3(a, b, c)
Definition: common.h:97
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:75
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:381
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:373
static int do_convolve(FFFrameSync *fs)
Definition: vf_convolve.c:445
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:406
static int config_output(AVFilterLink *outlink)
Definition: vf_convolve.c:527
#define av_log(a,...)
FFTComplex * hdata
Definition: vf_convolve.c:162
A filter pad used for either input or output.
Definition: internal.h:54
FFTComplex * fft_hdata[4]
Definition: vf_convolve.c:48
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:172
static av_cold int init(AVFilterContext *ctx)
Definition: vf_convolve.c:565
static const AVOption convolve_options[]
Definition: vf_convolve.c:66
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
#define td
Definition: regdef.h:70
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
FFTContext * ifft[4][MAX_THREADS]
Definition: vf_convolve.c:41
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:293
Frame sync structure.
Definition: framesync.h:146
#define AVERROR(e)
Definition: error.h:43
FFTComplex * fft_vdata_impulse[4]
Definition: vf_convolve.c:51
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:408
const char * arg
Definition: jacosubdec.c:66
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:389
FFTContext * av_fft_init(int nbits, int inverse)
Set up a complex FFT.
Definition: avfft.c:28
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter&#39;s input and try to produce output.
Definition: framesync.c:344
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:371
int(* on_event)(struct FFFrameSync *fs)
Callback called when a frame event is ready.
Definition: framesync.h:172
static void convolve(float *tgt, const float *src, int len, int n)
Definition: ra288.c:92
#define FFMAX(a, b)
Definition: common.h:94
float FFTSample
Definition: avfft.h:35
int planeheight[4]
Definition: vf_convolve.c:46
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:390
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
FFTContext * fft[4][MAX_THREADS]
Definition: vf_convolve.c:40
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:366
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
FFTComplex * fft_vdata[4]
Definition: vf_convolve.c:49
Definition: fft.h:88
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:387
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:352
#define OFFSET(x)
Definition: vf_convolve.c:63
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:74
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
#define MAX_THREADS
Definition: vf_convolve.c:34
static void get_input(ConvolveContext *s, FFTComplex *fft_hdata, AVFrame *in, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:185
int n
Definition: avisynth_c.h:684
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:405
AVFilter ff_vf_convolve
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
FFFrameSync fs
Definition: vf_convolve.c:38
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:367
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:386
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:379
static int query_formats(AVFilterContext *ctx)
Definition: vf_convolve.c:75
static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:413
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:376
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:249
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:173
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
FFT functions.
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:368
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:68
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
float im
Definition: fft.c:82
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:374
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:365
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:133
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
AVFilter ff_vf_deconvolve
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:377
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:385
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:369
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:375
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:232
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
Y , 8bpp.
Definition: pixfmt.h:70
FFTSample im
Definition: avfft.h:38
static void get_output(ConvolveContext *s, FFTComplex *input, AVFrame *out, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:329
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:211
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:402
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:76
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:69
avfilter_execute_func * execute
Definition: internal.h:155
static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:310
A list of supported formats for one end of a filter link.
Definition: formats.h:64
FFTComplex * vdata
Definition: vf_convolve.c:162
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:254
int planewidth[4]
Definition: vf_convolve.c:45
An instance of a filter.
Definition: avfilter.h:338
FILE * out
Definition: movenc.c:54
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:95
static int config_input_impulse(AVFilterLink *inlink)
Definition: vf_convolve.c:144
void INT64 start
Definition: avisynth_c.h:690
static const AVFilterPad convolve_inputs[]
Definition: vf_convolve.c:600
internal API functions
FFTComplex * fft_hdata_impulse[4]
Definition: vf_convolve.c:50
int depth
Number of bits in the component.
Definition: pixdesc.h:58
void av_fft_calc(FFTContext *s, FFTComplex *z)
Do a complex FFT with the parameters defined in av_fft_init().
Definition: avfft.c:43
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:380
for(j=16;j >0;--j)
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58