FFmpeg  4.0
avf_showwaves.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * audio to video multimedia filter
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/avstring.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/parseutils.h"
31 #include "avfilter.h"
32 #include "formats.h"
33 #include "audio.h"
34 #include "video.h"
35 #include "internal.h"
36 
43 };
44 
51 };
52 
57 };
58 
59 struct frame_node {
61  struct frame_node *next;
62 };
63 
64 typedef struct ShowWavesContext {
65  const AVClass *class;
66  int w, h;
68  char *colors;
69  int buf_idx;
70  int16_t *buf_idy; /* y coordinate of previous sample for each channel */
72  int n;
73  int pixstep;
75  int mode; ///< ShowWavesMode
76  int scale; ///< ShowWavesScale
77  int draw_mode; ///< ShowWavesDrawMode
80 
81  int (*get_h)(int16_t sample, int height);
82  void (*draw_sample)(uint8_t *buf, int height, int linesize,
83  int16_t *prev_y, const uint8_t color[4], int h);
84 
85  /* single picture */
89  int64_t total_samples;
90  int64_t *sum; /* abs sum of the samples per channel */
92 
93 #define OFFSET(x) offsetof(ShowWavesContext, x)
94 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
95 
96 static const AVOption showwaves_options[] = {
97  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
98  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
99  { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
100  { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
101  { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
102  { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
103  { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
104  { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
105  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
106  { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
107  { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
108  { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
109  { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
110  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
111  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
112  { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
113  { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
114  { "draw", "set draw mode", OFFSET(draw_mode), AV_OPT_TYPE_INT, {.i64 = DRAW_SCALE}, 0, DRAW_NB-1, FLAGS, .unit="draw" },
115  { "scale", "scale pixel values for each drawn sample", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_SCALE}, .flags=FLAGS, .unit="draw"},
116  { "full", "draw every pixel for sample directly", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_FULL}, .flags=FLAGS, .unit="draw"},
117  { NULL }
118 };
119 
120 AVFILTER_DEFINE_CLASS(showwaves);
121 
123 {
124  ShowWavesContext *showwaves = ctx->priv;
125 
126  av_frame_free(&showwaves->outpicref);
127  av_freep(&showwaves->buf_idy);
128  av_freep(&showwaves->fg);
129 
130  if (showwaves->single_pic) {
131  struct frame_node *node = showwaves->audio_frames;
132  while (node) {
133  struct frame_node *tmp = node;
134 
135  node = node->next;
136  av_frame_free(&tmp->frame);
137  av_freep(&tmp);
138  }
139  av_freep(&showwaves->sum);
140  showwaves->last_frame = NULL;
141  }
142 }
143 
145 {
148  AVFilterLink *inlink = ctx->inputs[0];
149  AVFilterLink *outlink = ctx->outputs[0];
152  int ret;
153 
154  /* set input audio formats */
155  formats = ff_make_format_list(sample_fmts);
156  if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
157  return ret;
158 
159  layouts = ff_all_channel_layouts();
160  if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
161  return ret;
162 
163  formats = ff_all_samplerates();
164  if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
165  return ret;
166 
167  /* set output video format */
168  formats = ff_make_format_list(pix_fmts);
169  if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
170  return ret;
171 
172  return 0;
173 }
174 
175 static int get_lin_h(int16_t sample, int height)
176 {
177  return height/2 - av_rescale(sample, height/2, INT16_MAX);
178 }
179 
180 static int get_lin_h2(int16_t sample, int height)
181 {
182  return av_rescale(FFABS(sample), height, INT16_MAX);
183 }
184 
185 static int get_log_h(int16_t sample, int height)
186 {
187  return height/2 - FFSIGN(sample) * (log10(1 + FFABS(sample)) * (height/2) / log10(1 + INT16_MAX));
188 }
189 
190 static int get_log_h2(int16_t sample, int height)
191 {
192  return log10(1 + FFABS(sample)) * height / log10(1 + INT16_MAX);
193 }
194 
195 static int get_sqrt_h(int16_t sample, int height)
196 {
197  return height/2 - FFSIGN(sample) * (sqrt(FFABS(sample)) * (height/2) / sqrt(INT16_MAX));
198 }
199 
200 static int get_sqrt_h2(int16_t sample, int height)
201 {
202  return sqrt(FFABS(sample)) * height / sqrt(INT16_MAX);
203 }
204 
205 static int get_cbrt_h(int16_t sample, int height)
206 {
207  return height/2 - FFSIGN(sample) * (cbrt(FFABS(sample)) * (height/2) / cbrt(INT16_MAX));
208 }
209 
210 static int get_cbrt_h2(int16_t sample, int height)
211 {
212  return cbrt(FFABS(sample)) * height / cbrt(INT16_MAX);
213 }
214 
215 static void draw_sample_point_rgba_scale(uint8_t *buf, int height, int linesize,
216  int16_t *prev_y,
217  const uint8_t color[4], int h)
218 {
219  if (h >= 0 && h < height) {
220  buf[h * linesize + 0] += color[0];
221  buf[h * linesize + 1] += color[1];
222  buf[h * linesize + 2] += color[2];
223  buf[h * linesize + 3] += color[3];
224  }
225 }
226 
227 static void draw_sample_point_rgba_full(uint8_t *buf, int height, int linesize,
228  int16_t *prev_y,
229  const uint8_t color[4], int h)
230 {
231  if (h >= 0 && h < height) {
232  buf[h * linesize + 0] = color[0];
233  buf[h * linesize + 1] = color[1];
234  buf[h * linesize + 2] = color[2];
235  buf[h * linesize + 3] = color[3];
236  }
237 }
238 
239 static void draw_sample_line_rgba_scale(uint8_t *buf, int height, int linesize,
240  int16_t *prev_y,
241  const uint8_t color[4], int h)
242 {
243  int k;
244  int start = height/2;
245  int end = av_clip(h, 0, height-1);
246  if (start > end)
247  FFSWAP(int16_t, start, end);
248  for (k = start; k < end; k++) {
249  buf[k * linesize + 0] += color[0];
250  buf[k * linesize + 1] += color[1];
251  buf[k * linesize + 2] += color[2];
252  buf[k * linesize + 3] += color[3];
253  }
254 }
255 
256 static void draw_sample_line_rgba_full(uint8_t *buf, int height, int linesize,
257  int16_t *prev_y,
258  const uint8_t color[4], int h)
259 {
260  int k;
261  int start = height/2;
262  int end = av_clip(h, 0, height-1);
263  if (start > end)
264  FFSWAP(int16_t, start, end);
265  for (k = start; k < end; k++) {
266  buf[k * linesize + 0] = color[0];
267  buf[k * linesize + 1] = color[1];
268  buf[k * linesize + 2] = color[2];
269  buf[k * linesize + 3] = color[3];
270  }
271 }
272 
273 static void draw_sample_p2p_rgba_scale(uint8_t *buf, int height, int linesize,
274  int16_t *prev_y,
275  const uint8_t color[4], int h)
276 {
277  int k;
278  if (h >= 0 && h < height) {
279  buf[h * linesize + 0] += color[0];
280  buf[h * linesize + 1] += color[1];
281  buf[h * linesize + 2] += color[2];
282  buf[h * linesize + 3] += color[3];
283  if (*prev_y && h != *prev_y) {
284  int start = *prev_y;
285  int end = av_clip(h, 0, height-1);
286  if (start > end)
287  FFSWAP(int16_t, start, end);
288  for (k = start + 1; k < end; k++) {
289  buf[k * linesize + 0] += color[0];
290  buf[k * linesize + 1] += color[1];
291  buf[k * linesize + 2] += color[2];
292  buf[k * linesize + 3] += color[3];
293  }
294  }
295  }
296  *prev_y = h;
297 }
298 
299 static void draw_sample_p2p_rgba_full(uint8_t *buf, int height, int linesize,
300  int16_t *prev_y,
301  const uint8_t color[4], int h)
302 {
303  int k;
304  if (h >= 0 && h < height) {
305  buf[h * linesize + 0] = color[0];
306  buf[h * linesize + 1] = color[1];
307  buf[h * linesize + 2] = color[2];
308  buf[h * linesize + 3] = color[3];
309  if (*prev_y && h != *prev_y) {
310  int start = *prev_y;
311  int end = av_clip(h, 0, height-1);
312  if (start > end)
313  FFSWAP(int16_t, start, end);
314  for (k = start + 1; k < end; k++) {
315  buf[k * linesize + 0] = color[0];
316  buf[k * linesize + 1] = color[1];
317  buf[k * linesize + 2] = color[2];
318  buf[k * linesize + 3] = color[3];
319  }
320  }
321  }
322  *prev_y = h;
323 }
324 
325 static void draw_sample_cline_rgba_scale(uint8_t *buf, int height, int linesize,
326  int16_t *prev_y,
327  const uint8_t color[4], int h)
328 {
329  int k;
330  const int start = (height - h) / 2;
331  const int end = start + h;
332  for (k = start; k < end; k++) {
333  buf[k * linesize + 0] += color[0];
334  buf[k * linesize + 1] += color[1];
335  buf[k * linesize + 2] += color[2];
336  buf[k * linesize + 3] += color[3];
337  }
338 }
339  static void draw_sample_cline_rgba_full(uint8_t *buf, int height, int linesize,
340  int16_t *prev_y,
341  const uint8_t color[4], int h)
342 {
343  int k;
344  const int start = (height - h) / 2;
345  const int end = start + h;
346  for (k = start; k < end; k++) {
347  buf[k * linesize + 0] = color[0];
348  buf[k * linesize + 1] = color[1];
349  buf[k * linesize + 2] = color[2];
350  buf[k * linesize + 3] = color[3];
351  }
352 }
353 
354 static void draw_sample_point_gray(uint8_t *buf, int height, int linesize,
355  int16_t *prev_y,
356  const uint8_t color[4], int h)
357 {
358  if (h >= 0 && h < height)
359  buf[h * linesize] += color[0];
360 }
361 
362 static void draw_sample_line_gray(uint8_t *buf, int height, int linesize,
363  int16_t *prev_y,
364  const uint8_t color[4], int h)
365 {
366  int k;
367  int start = height/2;
368  int end = av_clip(h, 0, height-1);
369  if (start > end)
370  FFSWAP(int16_t, start, end);
371  for (k = start; k < end; k++)
372  buf[k * linesize] += color[0];
373 }
374 
375 static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize,
376  int16_t *prev_y,
377  const uint8_t color[4], int h)
378 {
379  int k;
380  if (h >= 0 && h < height) {
381  buf[h * linesize] += color[0];
382  if (*prev_y && h != *prev_y) {
383  int start = *prev_y;
384  int end = av_clip(h, 0, height-1);
385  if (start > end)
386  FFSWAP(int16_t, start, end);
387  for (k = start + 1; k < end; k++)
388  buf[k * linesize] += color[0];
389  }
390  }
391  *prev_y = h;
392 }
393 
394 static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize,
395  int16_t *prev_y,
396  const uint8_t color[4], int h)
397 {
398  int k;
399  const int start = (height - h) / 2;
400  const int end = start + h;
401  for (k = start; k < end; k++)
402  buf[k * linesize] += color[0];
403 }
404 
405 static int config_output(AVFilterLink *outlink)
406 {
407  AVFilterContext *ctx = outlink->src;
408  AVFilterLink *inlink = ctx->inputs[0];
409  ShowWavesContext *showwaves = ctx->priv;
410  int nb_channels = inlink->channels;
411  char *colors, *saveptr = NULL;
412  uint8_t x;
413  int ch;
414 
415  if (showwaves->single_pic)
416  showwaves->n = 1;
417 
418  if (!showwaves->n)
419  showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5);
420 
421  showwaves->buf_idx = 0;
422  if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
423  av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
424  return AVERROR(ENOMEM);
425  }
426  outlink->w = showwaves->w;
427  outlink->h = showwaves->h;
428  outlink->sample_aspect_ratio = (AVRational){1,1};
429 
430  outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
431  (AVRational){showwaves->w,1});
432 
433  av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
434  showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
435 
436  switch (outlink->format) {
437  case AV_PIX_FMT_GRAY8:
438  switch (showwaves->mode) {
439  case MODE_POINT: showwaves->draw_sample = draw_sample_point_gray; break;
440  case MODE_LINE: showwaves->draw_sample = draw_sample_line_gray; break;
441  case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_gray; break;
442  case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_gray; break;
443  default:
444  return AVERROR_BUG;
445  }
446  showwaves->pixstep = 1;
447  break;
448  case AV_PIX_FMT_RGBA:
449  switch (showwaves->mode) {
454  default:
455  return AVERROR_BUG;
456  }
457  showwaves->pixstep = 4;
458  break;
459  }
460 
461  switch (showwaves->scale) {
462  case SCALE_LIN:
463  switch (showwaves->mode) {
464  case MODE_POINT:
465  case MODE_LINE:
466  case MODE_P2P: showwaves->get_h = get_lin_h; break;
467  case MODE_CENTERED_LINE: showwaves->get_h = get_lin_h2; break;
468  default:
469  return AVERROR_BUG;
470  }
471  break;
472  case SCALE_LOG:
473  switch (showwaves->mode) {
474  case MODE_POINT:
475  case MODE_LINE:
476  case MODE_P2P: showwaves->get_h = get_log_h; break;
477  case MODE_CENTERED_LINE: showwaves->get_h = get_log_h2; break;
478  default:
479  return AVERROR_BUG;
480  }
481  break;
482  case SCALE_SQRT:
483  switch (showwaves->mode) {
484  case MODE_POINT:
485  case MODE_LINE:
486  case MODE_P2P: showwaves->get_h = get_sqrt_h; break;
487  case MODE_CENTERED_LINE: showwaves->get_h = get_sqrt_h2; break;
488  default:
489  return AVERROR_BUG;
490  }
491  break;
492  case SCALE_CBRT:
493  switch (showwaves->mode) {
494  case MODE_POINT:
495  case MODE_LINE:
496  case MODE_P2P: showwaves->get_h = get_cbrt_h; break;
497  case MODE_CENTERED_LINE: showwaves->get_h = get_cbrt_h2; break;
498  default:
499  return AVERROR_BUG;
500  }
501  break;
502  }
503 
504  showwaves->fg = av_malloc_array(nb_channels, 4 * sizeof(*showwaves->fg));
505  if (!showwaves->fg)
506  return AVERROR(ENOMEM);
507 
508  colors = av_strdup(showwaves->colors);
509  if (!colors)
510  return AVERROR(ENOMEM);
511 
512  if (showwaves->draw_mode == DRAW_SCALE) {
513  /* multiplication factor, pre-computed to avoid in-loop divisions */
514  x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * showwaves->n);
515  } else {
516  x = 255;
517  }
518  if (outlink->format == AV_PIX_FMT_RGBA) {
519  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
520 
521  for (ch = 0; ch < nb_channels; ch++) {
522  char *color;
523 
524  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
525  if (color)
526  av_parse_color(fg, color, -1, ctx);
527  showwaves->fg[4*ch + 0] = fg[0] * x / 255.;
528  showwaves->fg[4*ch + 1] = fg[1] * x / 255.;
529  showwaves->fg[4*ch + 2] = fg[2] * x / 255.;
530  showwaves->fg[4*ch + 3] = fg[3] * x / 255.;
531  }
532  } else {
533  for (ch = 0; ch < nb_channels; ch++)
534  showwaves->fg[4 * ch + 0] = x;
535  }
536  av_free(colors);
537 
538  return 0;
539 }
540 
541 inline static int push_frame(AVFilterLink *outlink)
542 {
543  AVFilterContext *ctx = outlink->src;
544  AVFilterLink *inlink = ctx->inputs[0];
545  ShowWavesContext *showwaves = outlink->src->priv;
546  int nb_channels = inlink->channels;
547  int ret, i;
548 
549  ret = ff_filter_frame(outlink, showwaves->outpicref);
550  showwaves->outpicref = NULL;
551  showwaves->buf_idx = 0;
552  for (i = 0; i < nb_channels; i++)
553  showwaves->buf_idy[i] = 0;
554  return ret;
555 }
556 
557 static int push_single_pic(AVFilterLink *outlink)
558 {
559  AVFilterContext *ctx = outlink->src;
560  AVFilterLink *inlink = ctx->inputs[0];
561  ShowWavesContext *showwaves = ctx->priv;
562  int64_t n = 0, max_samples = showwaves->total_samples / outlink->w;
563  AVFrame *out = showwaves->outpicref;
564  struct frame_node *node;
565  const int nb_channels = inlink->channels;
566  const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
567  const int linesize = out->linesize[0];
568  const int pixstep = showwaves->pixstep;
569  int col = 0;
570  int64_t *sum = showwaves->sum;
571 
572  if (max_samples == 0) {
573  av_log(ctx, AV_LOG_ERROR, "Too few samples\n");
574  return AVERROR(EINVAL);
575  }
576 
577  av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", max_samples);
578 
579  memset(sum, 0, nb_channels);
580 
581  for (node = showwaves->audio_frames; node; node = node->next) {
582  int i;
583  const AVFrame *frame = node->frame;
584  const int16_t *p = (const int16_t *)frame->data[0];
585 
586  for (i = 0; i < frame->nb_samples; i++) {
587  int ch;
588 
589  for (ch = 0; ch < nb_channels; ch++)
590  sum[ch] += abs(p[ch + i*nb_channels]) << 1;
591  if (n++ == max_samples) {
592  for (ch = 0; ch < nb_channels; ch++) {
593  int16_t sample = sum[ch] / max_samples;
594  uint8_t *buf = out->data[0] + col * pixstep;
595  int h;
596 
597  if (showwaves->split_channels)
598  buf += ch*ch_height*linesize;
599  av_assert0(col < outlink->w);
600  h = showwaves->get_h(sample, ch_height);
601  showwaves->draw_sample(buf, ch_height, linesize, &showwaves->buf_idy[ch], &showwaves->fg[ch * 4], h);
602  sum[ch] = 0;
603  }
604  col++;
605  n = 0;
606  }
607  }
608  }
609 
610  return push_frame(outlink);
611 }
612 
613 
614 static int request_frame(AVFilterLink *outlink)
615 {
616  ShowWavesContext *showwaves = outlink->src->priv;
617  AVFilterLink *inlink = outlink->src->inputs[0];
618  int ret;
619 
620  ret = ff_request_frame(inlink);
621  if (ret == AVERROR_EOF && showwaves->outpicref) {
622  if (showwaves->single_pic)
623  push_single_pic(outlink);
624  else
625  push_frame(outlink);
626  }
627 
628  return ret;
629 }
630 
631 static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p,
632  const AVFilterLink *inlink, AVFilterLink *outlink,
633  const AVFrame *in)
634 {
635  if (!showwaves->outpicref) {
636  int j;
637  AVFrame *out = showwaves->outpicref =
638  ff_get_video_buffer(outlink, outlink->w, outlink->h);
639  if (!out)
640  return AVERROR(ENOMEM);
641  out->width = outlink->w;
642  out->height = outlink->h;
643  out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels,
644  av_make_q(1, inlink->sample_rate),
645  outlink->time_base);
646  for (j = 0; j < outlink->h; j++)
647  memset(out->data[0] + j*out->linesize[0], 0, outlink->w * showwaves->pixstep);
648  }
649  return 0;
650 }
651 
653 {
654  ShowWavesContext *showwaves = ctx->priv;
655 
656  if (!strcmp(ctx->filter->name, "showwavespic")) {
657  showwaves->single_pic = 1;
658  showwaves->mode = MODE_CENTERED_LINE;
659  }
660 
661  return 0;
662 }
663 
664 #if CONFIG_SHOWWAVES_FILTER
665 
666 static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
667 {
668  AVFilterContext *ctx = inlink->dst;
669  AVFilterLink *outlink = ctx->outputs[0];
670  ShowWavesContext *showwaves = ctx->priv;
671  const int nb_samples = insamples->nb_samples;
672  AVFrame *outpicref = showwaves->outpicref;
673  int16_t *p = (int16_t *)insamples->data[0];
674  int nb_channels = inlink->channels;
675  int i, j, ret = 0;
676  const int pixstep = showwaves->pixstep;
677  const int n = showwaves->n;
678  const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
679 
680  /* draw data in the buffer */
681  for (i = 0; i < nb_samples; i++) {
682 
683  ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
684  if (ret < 0)
685  goto end;
686  outpicref = showwaves->outpicref;
687 
688  for (j = 0; j < nb_channels; j++) {
689  uint8_t *buf = outpicref->data[0] + showwaves->buf_idx * pixstep;
690  const int linesize = outpicref->linesize[0];
691  int h;
692 
693  if (showwaves->split_channels)
694  buf += j*ch_height*linesize;
695  h = showwaves->get_h(*p++, ch_height);
696  showwaves->draw_sample(buf, ch_height, linesize,
697  &showwaves->buf_idy[j], &showwaves->fg[j * 4], h);
698  }
699 
700  showwaves->sample_count_mod++;
701  if (showwaves->sample_count_mod == n) {
702  showwaves->sample_count_mod = 0;
703  showwaves->buf_idx++;
704  }
705  if (showwaves->buf_idx == showwaves->w)
706  if ((ret = push_frame(outlink)) < 0)
707  break;
708  outpicref = showwaves->outpicref;
709  }
710 
711 end:
712  av_frame_free(&insamples);
713  return ret;
714 }
715 
716 static const AVFilterPad showwaves_inputs[] = {
717  {
718  .name = "default",
719  .type = AVMEDIA_TYPE_AUDIO,
720  .filter_frame = showwaves_filter_frame,
721  },
722  { NULL }
723 };
724 
725 static const AVFilterPad showwaves_outputs[] = {
726  {
727  .name = "default",
728  .type = AVMEDIA_TYPE_VIDEO,
729  .config_props = config_output,
730  .request_frame = request_frame,
731  },
732  { NULL }
733 };
734 
736  .name = "showwaves",
737  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
738  .init = init,
739  .uninit = uninit,
740  .query_formats = query_formats,
741  .priv_size = sizeof(ShowWavesContext),
742  .inputs = showwaves_inputs,
743  .outputs = showwaves_outputs,
744  .priv_class = &showwaves_class,
745 };
746 
747 #endif // CONFIG_SHOWWAVES_FILTER
748 
749 #if CONFIG_SHOWWAVESPIC_FILTER
750 
751 #define OFFSET(x) offsetof(ShowWavesContext, x)
752 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
753 
754 static const AVOption showwavespic_options[] = {
755  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
756  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
757  { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
758  { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
759  { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
760  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
761  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
762  { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
763  { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
764  { NULL }
765 };
766 
767 AVFILTER_DEFINE_CLASS(showwavespic);
768 
769 static int showwavespic_config_input(AVFilterLink *inlink)
770 {
771  AVFilterContext *ctx = inlink->dst;
772  ShowWavesContext *showwaves = ctx->priv;
773 
774  if (showwaves->single_pic) {
775  showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum));
776  if (!showwaves->sum)
777  return AVERROR(ENOMEM);
778  }
779 
780  return 0;
781 }
782 
783 static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
784 {
785  AVFilterContext *ctx = inlink->dst;
786  AVFilterLink *outlink = ctx->outputs[0];
787  ShowWavesContext *showwaves = ctx->priv;
788  int16_t *p = (int16_t *)insamples->data[0];
789  int ret = 0;
790 
791  if (showwaves->single_pic) {
792  struct frame_node *f;
793 
794  ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
795  if (ret < 0)
796  goto end;
797 
798  /* queue the audio frame */
799  f = av_malloc(sizeof(*f));
800  if (!f) {
801  ret = AVERROR(ENOMEM);
802  goto end;
803  }
804  f->frame = insamples;
805  f->next = NULL;
806  if (!showwaves->last_frame) {
807  showwaves->audio_frames =
808  showwaves->last_frame = f;
809  } else {
810  showwaves->last_frame->next = f;
811  showwaves->last_frame = f;
812  }
813  showwaves->total_samples += insamples->nb_samples;
814 
815  return 0;
816  }
817 
818 end:
819  av_frame_free(&insamples);
820  return ret;
821 }
822 
823 static const AVFilterPad showwavespic_inputs[] = {
824  {
825  .name = "default",
826  .type = AVMEDIA_TYPE_AUDIO,
827  .config_props = showwavespic_config_input,
828  .filter_frame = showwavespic_filter_frame,
829  },
830  { NULL }
831 };
832 
833 static const AVFilterPad showwavespic_outputs[] = {
834  {
835  .name = "default",
836  .type = AVMEDIA_TYPE_VIDEO,
837  .config_props = config_output,
838  .request_frame = request_frame,
839  },
840  { NULL }
841 };
842 
844  .name = "showwavespic",
845  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."),
846  .init = init,
847  .uninit = uninit,
848  .query_formats = query_formats,
849  .priv_size = sizeof(ShowWavesContext),
850  .inputs = showwavespic_inputs,
851  .outputs = showwavespic_outputs,
852  .priv_class = &showwavespic_class,
853 };
854 
855 #endif // CONFIG_SHOWWAVESPIC_FILTER
ShowWavesMode
Definition: avf_showwaves.c:37
#define NULL
Definition: coverity.c:32
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
int(* get_h)(int16_t sample, int height)
Definition: avf_showwaves.c:81
AVOption.
Definition: opt.h:246
static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p, const AVFilterLink *inlink, AVFilterLink *outlink, const AVFrame *in)
int16_t * buf_idy
Definition: avf_showwaves.c:70
#define OFFSET(x)
Definition: avf_showwaves.c:93
Main libavfilter public API header.
ShowWavesScale
Definition: avf_showwaves.c:45
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static void draw_sample_line_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define sample
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:435
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVOptions.
static void draw_sample_p2p_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:311
static av_cold void uninit(AVFilterContext *ctx)
#define height
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define av_log(a,...)
int mode
ShowWavesMode.
Definition: avf_showwaves.c:75
static void draw_sample_cline_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:354
A filter pad used for either input or output.
Definition: internal.h:54
int64_t total_samples
Definition: avf_showwaves.c:89
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVRational rate
Definition: avf_showwaves.c:67
int width
Definition: frame.h:276
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static void draw_sample_cline_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
struct frame_node * last_frame
Definition: avf_showwaves.c:88
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define cbrt
Definition: tablegen.h:35
simple assert() macros that are a bit more flexible than ISO C assert().
AVFilter ff_avf_showwavespic
#define FFMAX(a, b)
Definition: common.h:94
static int get_lin_h2(int16_t sample, int height)
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:89
ShowWavesDrawMode
Definition: avf_showwaves.c:53
static int get_cbrt_h(int16_t sample, int height)
AVFrame * outpicref
Definition: avf_showwaves.c:71
audio channel layout utility functions
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
struct frame_node * audio_frames
Definition: avf_showwaves.c:87
static void draw_sample_line_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
static int push_single_pic(AVFilterLink *outlink)
#define FFSIGN(a)
Definition: common.h:73
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
int n
Definition: avisynth_c.h:684
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
if(ret< 0)
Definition: vf_mcdeint.c:279
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (...
Definition: formats.c:401
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
A list of supported channel layouts.
Definition: formats.h:85
struct frame_node * next
Definition: avf_showwaves.c:61
int scale
ShowWavesScale.
Definition: avf_showwaves.c:76
static void draw_sample_point_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static int config_output(AVFilterLink *outlink)
typedef void(RENAME(mix_any_func_type))
static int push_frame(AVFilterLink *outlink)
static int get_cbrt_h2(int16_t sample, int height)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:249
static int request_frame(AVFilterLink *outlink)
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
static int get_log_h2(int16_t sample, int height)
int draw_mode
ShowWavesDrawMode.
Definition: avf_showwaves.c:77
void * buf
Definition: avisynth_c.h:690
static int get_log_h(int16_t sample, int height)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVFILTER_DEFINE_CLASS(showwaves)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static int query_formats(AVFilterContext *ctx)
offset must point to AVRational
Definition: opt.h:236
static void draw_sample_line_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
const char * name
Filter name.
Definition: avfilter.h:148
static void draw_sample_point_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
offset must point to two consecutive integers
Definition: opt.h:233
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
static int get_sqrt_h2(int16_t sample, int height)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:232
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
static void draw_sample_point_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static void draw_sample_p2p_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
int
Y , 8bpp.
Definition: pixfmt.h:70
static int get_sqrt_h(int16_t sample, int height)
signed 16 bits
Definition: samplefmt.h:61
static int get_lin_h(int16_t sample, int height)
#define FLAGS
Definition: avf_showwaves.c:94
#define av_free(p)
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int height
Definition: frame.h:276
FILE * out
Definition: movenc.c:54
#define av_freep(p)
void(* draw_sample)(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:82
void INT64 start
Definition: avisynth_c.h:690
#define av_malloc_array(a, b)
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
formats
Definition: signature.h:48
#define FFSWAP(type, a, b)
Definition: common.h:99
int nb_channels
internal API functions
AVFilter ff_avf_showwaves
static const AVOption showwaves_options[]
Definition: avf_showwaves.c:96
static av_cold int init(AVFilterContext *ctx)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:284
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
for(j=16;j >0;--j)
AVFrame * frame
Definition: avf_showwaves.c:60
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
Definition: mem.c:191
static uint8_t tmp[11]
Definition: aes_ctr.c:26