FFmpeg  4.0
af_join.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 /**
20  * @file
21  * Audio join filter
22  *
23  * Join multiple audio inputs as different channels in
24  * a single output
25  */
26 
27 #include "libavutil/avassert.h"
29 #include "libavutil/common.h"
30 #include "libavutil/opt.h"
31 
32 #include "audio.h"
33 #include "avfilter.h"
34 #include "formats.h"
35 #include "filters.h"
36 #include "internal.h"
37 
38 typedef struct ChannelMap {
39  int input; ///< input stream index
40  int in_channel_idx; ///< index of in_channel in the input stream data
41  uint64_t in_channel; ///< layout describing the input channel
42  uint64_t out_channel; ///< layout describing the output channel
43 } ChannelMap;
44 
45 typedef struct JoinContext {
46  const AVClass *class;
47 
48  int inputs;
49  char *map;
51  uint64_t channel_layout;
52 
55 
56  /**
57  * Temporary storage for input frames, until we get one on each input.
58  */
60 
61  /**
62  * Temporary storage for buffer references, for assembling the output frame.
63  */
65 } JoinContext;
66 
67 #define OFFSET(x) offsetof(JoinContext, x)
68 #define A AV_OPT_FLAG_AUDIO_PARAM
69 #define F AV_OPT_FLAG_FILTERING_PARAM
70 static const AVOption join_options[] = {
71  { "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A|F },
72  { "channel_layout", "Channel layout of the "
73  "output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A|F },
74  { "map", "A comma-separated list of channels maps in the format "
75  "'input_stream.input_channel-output_channel.",
76  OFFSET(map), AV_OPT_TYPE_STRING, .flags = A|F },
77  { NULL }
78 };
79 
81 
83 {
84  JoinContext *s = ctx->priv;
85  char separator = '|';
86  char *cur = s->map;
87 
88  while (cur && *cur) {
89  char *sep, *next, *p;
90  uint64_t in_channel = 0, out_channel = 0;
91  int input_idx, out_ch_idx, in_ch_idx;
92 
93  next = strchr(cur, separator);
94  if (next)
95  *next++ = 0;
96 
97  /* split the map into input and output parts */
98  if (!(sep = strchr(cur, '-'))) {
99  av_log(ctx, AV_LOG_ERROR, "Missing separator '-' in channel "
100  "map '%s'\n", cur);
101  return AVERROR(EINVAL);
102  }
103  *sep++ = 0;
104 
105 #define PARSE_CHANNEL(str, var, inout) \
106  if (!(var = av_get_channel_layout(str))) { \
107  av_log(ctx, AV_LOG_ERROR, "Invalid " inout " channel: %s.\n", str);\
108  return AVERROR(EINVAL); \
109  } \
110  if (av_get_channel_layout_nb_channels(var) != 1) { \
111  av_log(ctx, AV_LOG_ERROR, "Channel map describes more than one " \
112  inout " channel.\n"); \
113  return AVERROR(EINVAL); \
114  }
115 
116  /* parse output channel */
117  PARSE_CHANNEL(sep, out_channel, "output");
118  if (!(out_channel & s->channel_layout)) {
119  av_log(ctx, AV_LOG_ERROR, "Output channel '%s' is not present in "
120  "requested channel layout.\n", sep);
121  return AVERROR(EINVAL);
122  }
123 
125  out_channel);
126  if (s->channels[out_ch_idx].input >= 0) {
127  av_log(ctx, AV_LOG_ERROR, "Multiple maps for output channel "
128  "'%s'.\n", sep);
129  return AVERROR(EINVAL);
130  }
131 
132  /* parse input channel */
133  input_idx = strtol(cur, &cur, 0);
134  if (input_idx < 0 || input_idx >= s->inputs) {
135  av_log(ctx, AV_LOG_ERROR, "Invalid input stream index: %d.\n",
136  input_idx);
137  return AVERROR(EINVAL);
138  }
139 
140  if (*cur)
141  cur++;
142 
143  in_ch_idx = strtol(cur, &p, 0);
144  if (p == cur) {
145  /* channel specifier is not a number,
146  * try to parse as channel name */
147  PARSE_CHANNEL(cur, in_channel, "input");
148  }
149 
150  s->channels[out_ch_idx].input = input_idx;
151  if (in_channel)
152  s->channels[out_ch_idx].in_channel = in_channel;
153  else
154  s->channels[out_ch_idx].in_channel_idx = in_ch_idx;
155 
156  cur = next;
157  }
158  return 0;
159 }
160 
162 {
163  JoinContext *s = ctx->priv;
164  int ret, i;
165 
167  av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
168  s->channel_layout_str);
169  return AVERROR(EINVAL);
170  }
171 
173  s->channels = av_mallocz_array(s->nb_channels, sizeof(*s->channels));
174  s->buffers = av_mallocz_array(s->nb_channels, sizeof(*s->buffers));
175  s->input_frames = av_mallocz_array(s->inputs, sizeof(*s->input_frames));
176  if (!s->channels || !s->buffers|| !s->input_frames)
177  return AVERROR(ENOMEM);
178 
179  for (i = 0; i < s->nb_channels; i++) {
181  s->channels[i].input = -1;
182  }
183 
184  if ((ret = parse_maps(ctx)) < 0)
185  return ret;
186 
187  for (i = 0; i < s->inputs; i++) {
188  char name[32];
189  AVFilterPad pad = { 0 };
190 
191  snprintf(name, sizeof(name), "input%d", i);
192  pad.type = AVMEDIA_TYPE_AUDIO;
193  pad.name = av_strdup(name);
194  if (!pad.name)
195  return AVERROR(ENOMEM);
196 
197  if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
198  av_freep(&pad.name);
199  return ret;
200  }
201  }
202 
203  return 0;
204 }
205 
207 {
208  JoinContext *s = ctx->priv;
209  int i;
210 
211  for (i = 0; i < ctx->nb_inputs; i++) {
212  av_freep(&ctx->input_pads[i].name);
213  av_frame_free(&s->input_frames[i]);
214  }
215 
216  av_freep(&s->channels);
217  av_freep(&s->buffers);
218  av_freep(&s->input_frames);
219 }
220 
222 {
223  JoinContext *s = ctx->priv;
225  int i, ret;
226 
227  if ((ret = ff_add_channel_layout(&layouts, s->channel_layout)) < 0 ||
228  (ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
229  return ret;
230 
231  for (i = 0; i < ctx->nb_inputs; i++) {
232  layouts = ff_all_channel_layouts();
233  if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
234  return ret;
235  }
236 
237  if ((ret = ff_set_common_formats(ctx, ff_planar_sample_fmts())) < 0 ||
238  (ret = ff_set_common_samplerates(ctx, ff_all_samplerates())) < 0)
239  return ret;
240 
241  return 0;
242 }
243 
245  uint64_t *inputs)
246 {
247  int i;
248 
249  for (i = 0; i < ctx->nb_inputs; i++) {
250  AVFilterLink *link = ctx->inputs[i];
251 
252  if (ch->out_channel & link->channel_layout &&
253  !(ch->out_channel & inputs[i])) {
254  ch->input = i;
255  ch->in_channel = ch->out_channel;
256  inputs[i] |= ch->out_channel;
257  return;
258  }
259  }
260 }
261 
263  uint64_t *inputs)
264 {
265  int i;
266 
267  for (i = 0; i < ctx->nb_inputs; i++) {
268  AVFilterLink *link = ctx->inputs[i];
269 
270  if ((inputs[i] & link->channel_layout) != link->channel_layout) {
271  uint64_t unused = link->channel_layout & ~inputs[i];
272 
273  ch->input = i;
275  inputs[i] |= ch->in_channel;
276  return;
277  }
278  }
279 }
280 
281 static int join_config_output(AVFilterLink *outlink)
282 {
283  AVFilterContext *ctx = outlink->src;
284  JoinContext *s = ctx->priv;
285  uint64_t *inputs; // nth element tracks which channels are used from nth input
286  int i, ret = 0;
287 
288  /* initialize inputs to user-specified mappings */
289  if (!(inputs = av_mallocz_array(ctx->nb_inputs, sizeof(*inputs))))
290  return AVERROR(ENOMEM);
291  for (i = 0; i < s->nb_channels; i++) {
292  ChannelMap *ch = &s->channels[i];
293  AVFilterLink *inlink;
294 
295  if (ch->input < 0)
296  continue;
297 
298  inlink = ctx->inputs[ch->input];
299 
300  if (!ch->in_channel)
302  ch->in_channel_idx);
303 
304  if (!(ch->in_channel & inlink->channel_layout)) {
305  av_log(ctx, AV_LOG_ERROR, "Requested channel %s is not present in "
306  "input stream #%d.\n", av_get_channel_name(ch->in_channel),
307  ch->input);
308  ret = AVERROR(EINVAL);
309  goto fail;
310  }
311 
312  inputs[ch->input] |= ch->in_channel;
313  }
314 
315  /* guess channel maps when not explicitly defined */
316  /* first try unused matching channels */
317  for (i = 0; i < s->nb_channels; i++) {
318  ChannelMap *ch = &s->channels[i];
319 
320  if (ch->input < 0)
321  guess_map_matching(ctx, ch, inputs);
322  }
323 
324  /* if the above failed, try to find _any_ unused input channel */
325  for (i = 0; i < s->nb_channels; i++) {
326  ChannelMap *ch = &s->channels[i];
327 
328  if (ch->input < 0)
329  guess_map_any(ctx, ch, inputs);
330 
331  if (ch->input < 0) {
332  av_log(ctx, AV_LOG_ERROR, "Could not find input channel for "
333  "output channel '%s'.\n",
335  goto fail;
336  }
337 
339  ch->in_channel);
340  }
341 
342  /* print mappings */
343  av_log(ctx, AV_LOG_VERBOSE, "mappings: ");
344  for (i = 0; i < s->nb_channels; i++) {
345  ChannelMap *ch = &s->channels[i];
346  av_log(ctx, AV_LOG_VERBOSE, "%d.%s => %s ", ch->input,
349  }
350  av_log(ctx, AV_LOG_VERBOSE, "\n");
351 
352  for (i = 0; i < ctx->nb_inputs; i++) {
353  if (!inputs[i])
354  av_log(ctx, AV_LOG_WARNING, "No channels are used from input "
355  "stream %d.\n", i);
356  }
357 
358 fail:
359  av_freep(&inputs);
360  return ret;
361 }
362 
364 {
365  AVFilterLink *outlink = ctx->outputs[0];
366  JoinContext *s = ctx->priv;
367  AVFrame *frame;
368  int linesize = INT_MAX;
369  int nb_samples = INT_MAX;
370  int nb_buffers = 0;
371  int i, j, ret;
372 
373  for (i = 0; i < ctx->nb_inputs; i++) {
374  if (!s->input_frames[i])
375  return 0;
376  nb_samples = FFMIN(nb_samples, s->input_frames[i]->nb_samples);
377  }
378  if (!nb_samples)
379  return 0;
380 
381  /* setup the output frame */
382  frame = av_frame_alloc();
383  if (!frame)
384  return AVERROR(ENOMEM);
385  if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) {
387  sizeof(*frame->extended_data));
388  if (!frame->extended_data) {
389  ret = AVERROR(ENOMEM);
390  goto fail;
391  }
392  }
393 
394  /* copy the data pointers */
395  for (i = 0; i < s->nb_channels; i++) {
396  ChannelMap *ch = &s->channels[i];
397  AVFrame *cur = s->input_frames[ch->input];
398  AVBufferRef *buf;
399 
400  frame->extended_data[i] = cur->extended_data[ch->in_channel_idx];
401  linesize = FFMIN(linesize, cur->linesize[0]);
402 
403  /* add the buffer where this plan is stored to the list if it's
404  * not already there */
406  if (!buf) {
407  ret = AVERROR(EINVAL);
408  goto fail;
409  }
410  for (j = 0; j < nb_buffers; j++)
411  if (s->buffers[j]->buffer == buf->buffer)
412  break;
413  if (j == i)
414  s->buffers[nb_buffers++] = buf;
415  }
416 
417  /* create references to the buffers we copied to output */
418  if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) {
419  frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf);
421  sizeof(*frame->extended_buf));
422  if (!frame->extended_buf) {
423  frame->nb_extended_buf = 0;
424  ret = AVERROR(ENOMEM);
425  goto fail;
426  }
427  }
428  for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) {
429  frame->buf[i] = av_buffer_ref(s->buffers[i]);
430  if (!frame->buf[i]) {
431  ret = AVERROR(ENOMEM);
432  goto fail;
433  }
434  }
435  for (i = 0; i < frame->nb_extended_buf; i++) {
436  frame->extended_buf[i] = av_buffer_ref(s->buffers[i +
437  FF_ARRAY_ELEMS(frame->buf)]);
438  if (!frame->extended_buf[i]) {
439  ret = AVERROR(ENOMEM);
440  goto fail;
441  }
442  }
443 
444  frame->nb_samples = nb_samples;
445  frame->channel_layout = outlink->channel_layout;
446  frame->channels = outlink->channels;
447  frame->sample_rate = outlink->sample_rate;
448  frame->format = outlink->format;
449  frame->pts = s->input_frames[0]->pts;
450  frame->linesize[0] = linesize;
451  if (frame->data != frame->extended_data) {
452  memcpy(frame->data, frame->extended_data, sizeof(*frame->data) *
453  FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels));
454  }
455 
456  ret = ff_filter_frame(outlink, frame);
457 
458  for (i = 0; i < ctx->nb_inputs; i++)
459  av_frame_free(&s->input_frames[i]);
460 
461  return ret;
462 
463 fail:
464  av_frame_free(&frame);
465  return ret;
466 }
467 
469 {
470  JoinContext *s = ctx->priv;
471  int i, ret, status;
472  int nb_samples = 0;
473  int64_t pts;
474 
475  if (!s->input_frames[0]) {
476  ret = ff_inlink_consume_frame(ctx->inputs[0], &s->input_frames[0]);
477  if (ret < 0) {
478  return ret;
479  } else if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
480  ff_outlink_set_status(ctx->outputs[0], status, pts);
481  return 0;
482  } else {
483  if (ff_outlink_frame_wanted(ctx->outputs[0]) && !s->input_frames[0]) {
485  return 0;
486  }
487  }
488  if (!s->input_frames[0]) {
489  return 0;
490  }
491  }
492 
493  nb_samples = s->input_frames[0]->nb_samples;
494 
495  for (i = 1; i < ctx->nb_inputs && nb_samples > 0; i++) {
496  if (s->input_frames[i])
497  continue;
498 
499  if (ff_inlink_check_available_samples(ctx->inputs[i], nb_samples) > 0) {
500  ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &s->input_frames[i]);
501  if (ret < 0) {
502  return ret;
503  } else if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
504  ff_outlink_set_status(ctx->outputs[0], status, pts);
505  return 0;
506  }
507  } else {
508  if (ff_outlink_frame_wanted(ctx->outputs[0])) {
510  return 0;
511  }
512  }
513  }
514 
515  return try_push_frame(ctx);
516 }
517 
519  {
520  .name = "default",
521  .type = AVMEDIA_TYPE_AUDIO,
522  .config_props = join_config_output,
523  },
524  { NULL }
525 };
526 
528  .name = "join",
529  .description = NULL_IF_CONFIG_SMALL("Join multiple audio streams into "
530  "multi-channel output."),
531  .priv_size = sizeof(JoinContext),
532  .priv_class = &join_class,
533  .init = join_init,
534  .uninit = join_uninit,
535  .activate = activate,
537  .inputs = NULL,
538  .outputs = avfilter_af_join_outputs,
540 };
const char * name
Definition: avisynth_c.h:775
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1471
#define NULL
Definition: coverity.c:32
const char * s
Definition: avisynth_c.h:768
uint64_t in_channel
layout describing the input channel
Definition: af_channelmap.c:41
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
AVOption.
Definition: opt.h:246
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
Main libavfilter public API header.
#define A
Definition: af_join.c:68
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:410
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:105
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:428
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:65
int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
Test if enough samples are available on the link.
Definition: avfilter.c:1456
AVBufferRef ** buffers
Temporary storage for buffer references, for assembling the output frame.
Definition: af_join.c:64
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:169
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1592
static int ff_outlink_frame_wanted(AVFilterLink *link)
Test if a frame is wanted on an output link.
Definition: filters.h:152
char * map
Definition: af_join.c:49
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
const char * name
Pad name.
Definition: internal.h:60
int nb_channels
Definition: af_join.c:53
uint64_t av_get_channel_layout(const char *name)
Return a channel layout id that matches name, or 0 if no match is found.
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:435
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
#define av_cold
Definition: attributes.h:82
static av_cold void join_uninit(AVFilterContext *ctx)
Definition: af_join.c:206
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
AVOptions.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:311
static av_cold int join_init(AVFilterContext *ctx)
Definition: af_join.c:161
static AVFrame * frame
static void guess_map_matching(AVFilterContext *ctx, ChannelMap *ch, uint64_t *inputs)
Definition: af_join.c:244
static int flags
Definition: log.c:55
int input
input stream index
Definition: af_join.c:39
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define av_log(a,...)
static int join_query_formats(AVFilterContext *ctx)
Definition: af_join.c:221
A filter pad used for either input or output.
Definition: internal.h:54
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1436
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:345
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:343
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
simple assert() macros that are a bit more flexible than ISO C assert().
#define fail()
Definition: checkasm.h:116
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:396
static const AVOption join_options[]
Definition: af_join.c:70
int channels
number of audio channels, only used for audio.
Definition: frame.h:523
audio channel layout utility functions
unsigned nb_inputs
number of input pads
Definition: avfilter.h:347
#define FFMIN(a, b)
Definition: common.h:96
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:424
AVFormatContext * ctx
Definition: movenc.c:48
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:657
AVFilterFormats * ff_planar_sample_fmts(void)
Construct a formats list containing all planar sample formats.
Definition: formats.c:382
AVFilter ff_af_join
Definition: af_join.c:527
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (...
Definition: formats.c:401
static const AVFilterPad avfilter_af_join_outputs[]
Definition: af_join.c:518
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
int in_channel_idx
index of in_channel in the input stream data
Definition: af_channelmap.c:43
A list of supported channel layouts.
Definition: formats.h:85
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:291
static int activate(AVFilterContext *ctx)
Definition: af_join.c:468
AVFILTER_DEFINE_CLASS(join)
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:249
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1490
int inputs
Definition: af_join.c:48
void * buf
Definition: avisynth_c.h:690
AVBuffer * buffer
Definition: buffer.h:82
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:391
#define PARSE_CHANNEL(str, var, inout)
Filter definition.
Definition: avfilter.h:144
uint64_t out_channel
layout describing the output channel
Definition: af_channelmap.c:42
const char * name
Filter name.
Definition: avfilter.h:148
const VDPAUPixFmtMap * map
#define snprintf
Definition: snprintf.h:34
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
int av_get_channel_layout_channel_index(uint64_t channel_layout, uint64_t channel)
Get the index of a channel in channel_layout.
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
static int64_t pts
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:232
AVFrame ** input_frames
Temporary storage for input frames, until we get one on each input.
Definition: af_join.c:59
ChannelMap * channels
Definition: af_join.c:54
#define F
Definition: af_join.c:69
A reference to a data buffer.
Definition: buffer.h:81
#define OFFSET(x)
Definition: af_join.c:67
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
static void guess_map_any(AVFilterContext *ctx, ChannelMap *ch, uint64_t *inputs)
Definition: af_join.c:262
common internal and external API header
uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index)
Get the channel with the given index in channel_layout.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
static int parse_maps(AVFilterContext *ctx)
Definition: af_join.c:82
char * channel_layout_str
Definition: af_join.c:50
const char * av_get_channel_name(uint64_t channel)
Get the name of a given channel.
static int try_push_frame(AVFilterContext *ctx)
Definition: af_join.c:363
An instance of a filter.
Definition: avfilter.h:338
#define av_freep(p)
static int join_config_output(AVFilterLink *outlink)
Definition: af_join.c:281
internal API functions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:265
uint64_t channel_layout
Definition: af_join.c:51
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:284
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:556
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
Definition: mem.c:191
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
Definition: internal.h:277