1 /*
2  * Copyright (c) 2012 Nicolas George
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14  * See the GNU Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public License
17  * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * concat audio-video filter
24  */
25 
26 #include "libavutil/avstring.h"
27 #include "libavutil/channel_layout.h"
28 #include "libavutil/opt.h"
29 #include "avfilter.h"
30 #include "filters.h"
31 #include "internal.h"
32 #include "video.h"
33 #include "audio.h"
34 
35 #define TYPE_ALL 2
36 
37 typedef struct ConcatContext {
38     const AVClass *class;
39     unsigned nb_streams[TYPE_ALL]; /**< number of out streams of each type */
40     unsigned nb_segments;
41     unsigned cur_idx; /**< index of the first input of current segment */
42     int64_t delta_ts; /**< timestamp to add to produce output timestamps */
43     unsigned nb_in_active; /**< number of active inputs in current segment */
44     unsigned unsafe;
45     struct concat_in {
46         int64_t pts;
47         int64_t nb_frames;
48         unsigned eof;
49     } *in;
50 } ConcatContext;
51 
52 #define OFFSET(x) offsetof(ConcatContext, x)
53 #define A AV_OPT_FLAG_AUDIO_PARAM
54 #define F AV_OPT_FLAG_FILTERING_PARAM
55 #define V AV_OPT_FLAG_VIDEO_PARAM
56 
57 static const AVOption concat_options[] = {
58     { "n", "specify the number of segments", OFFSET(nb_segments),
59       AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, V|A|F},
60     { "v", "specify the number of video streams",
61       OFFSET(nb_streams[AVMEDIA_TYPE_VIDEO]),
62       AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, V|F },
63     { "a", "specify the number of audio streams",
64       OFFSET(nb_streams[AVMEDIA_TYPE_AUDIO]),
65       AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A|F},
66     { "unsafe", "enable unsafe mode",
67       OFFSET(unsafe),
68       AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, V|A|F},
69     { NULL }
70 };
71 
72 AVFILTER_DEFINE_CLASS(concat);
73 
query_formats(AVFilterContext *ctx)74 static int query_formats(AVFilterContext *ctx)
75 {
76     ConcatContext *cat = ctx->priv;
77     unsigned type, nb_str, idx0 = 0, idx, str, seg;
78     AVFilterFormats *formats, *rates = NULL;
79     AVFilterChannelLayouts *layouts = NULL;
80     int ret;
81 
82     for (type = 0; type < TYPE_ALL; type++) {
83         nb_str = cat->nb_streams[type];
84         for (str = 0; str < nb_str; str++) {
85             idx = idx0;
86 
87             /* Set the output formats */
88             formats = ff_all_formats(type);
89             if ((ret = ff_formats_ref(formats, &ctx->outputs[idx]->incfg.formats)) < 0)
90                 return ret;
91 
92             if (type == AVMEDIA_TYPE_AUDIO) {
93                 rates = ff_all_samplerates();
94                 if ((ret = ff_formats_ref(rates, &ctx->outputs[idx]->incfg.samplerates)) < 0)
95                     return ret;
96                 layouts = ff_all_channel_layouts();
97                 if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[idx]->incfg.channel_layouts)) < 0)
98                     return ret;
99             }
100 
101             /* Set the same formats for each corresponding input */
102             for (seg = 0; seg < cat->nb_segments; seg++) {
103                 if ((ret = ff_formats_ref(formats, &ctx->inputs[idx]->outcfg.formats)) < 0)
104                     return ret;
105                 if (type == AVMEDIA_TYPE_AUDIO) {
106                     if ((ret = ff_formats_ref(rates, &ctx->inputs[idx]->outcfg.samplerates)) < 0 ||
107                         (ret = ff_channel_layouts_ref(layouts, &ctx->inputs[idx]->outcfg.channel_layouts)) < 0)
108                         return ret;
109                 }
110                 idx += ctx->nb_outputs;
111             }
112 
113             idx0++;
114         }
115     }
116     return 0;
117 }
118 
config_output(AVFilterLink *outlink)119 static int config_output(AVFilterLink *outlink)
120 {
121     AVFilterContext *ctx = outlink->src;
122     ConcatContext *cat   = ctx->priv;
123     unsigned out_no = FF_OUTLINK_IDX(outlink);
124     unsigned in_no  = out_no, seg;
125     AVFilterLink *inlink = ctx->inputs[in_no];
126 
127     /* enhancement: find a common one */
128     outlink->time_base           = AV_TIME_BASE_Q;
129     outlink->w                   = inlink->w;
130     outlink->h                   = inlink->h;
131     outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
132     outlink->format              = inlink->format;
133     outlink->frame_rate          = inlink->frame_rate;
134 
135     for (seg = 1; seg < cat->nb_segments; seg++) {
136         inlink = ctx->inputs[in_no + seg * ctx->nb_outputs];
137         if (outlink->frame_rate.num != inlink->frame_rate.num ||
138             outlink->frame_rate.den != inlink->frame_rate.den) {
139             av_log(ctx, AV_LOG_VERBOSE,
140                     "Video inputs have different frame rates, output will be VFR\n");
141             outlink->frame_rate = av_make_q(1, 0);
142             break;
143         }
144     }
145 
146     for (seg = 1; seg < cat->nb_segments; seg++) {
147         inlink = ctx->inputs[in_no + seg * ctx->nb_outputs];
148         if (!outlink->sample_aspect_ratio.num)
149             outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
150         /* possible enhancement: unsafe mode, do not check */
151         if (outlink->w                       != inlink->w                       ||
152             outlink->h                       != inlink->h                       ||
153             outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num &&
154                                                 inlink->sample_aspect_ratio.num ||
155             outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
156             av_log(ctx, AV_LOG_ERROR, "Input link %s parameters "
157                    "(size %dx%d, SAR %d:%d) do not match the corresponding "
158                    "output link %s parameters (%dx%d, SAR %d:%d)\n",
159                    ctx->input_pads[in_no].name, inlink->w, inlink->h,
160                    inlink->sample_aspect_ratio.num,
161                    inlink->sample_aspect_ratio.den,
162                    ctx->input_pads[out_no].name, outlink->w, outlink->h,
163                    outlink->sample_aspect_ratio.num,
164                    outlink->sample_aspect_ratio.den);
165             if (!cat->unsafe)
166                 return AVERROR(EINVAL);
167         }
168     }
169 
170     return 0;
171 }
172 
push_frame(AVFilterContext *ctx, unsigned in_no, AVFrame *buf)173 static int push_frame(AVFilterContext *ctx, unsigned in_no, AVFrame *buf)
174 {
175     ConcatContext *cat = ctx->priv;
176     unsigned out_no = in_no % ctx->nb_outputs;
177     AVFilterLink * inlink = ctx-> inputs[ in_no];
178     AVFilterLink *outlink = ctx->outputs[out_no];
179     struct concat_in *in = &cat->in[in_no];
180 
181     buf->pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
182     in->pts = buf->pts;
183     in->nb_frames++;
184     /* add duration to input PTS */
185     if (inlink->sample_rate)
186         /* use number of audio samples */
187         in->pts += av_rescale_q(buf->nb_samples,
188                                 av_make_q(1, inlink->sample_rate),
189                                 outlink->time_base);
190     else if (in->nb_frames >= 2)
191         /* use mean duration */
192         in->pts = av_rescale(in->pts, in->nb_frames, in->nb_frames - 1);
193 
194     buf->pts += cat->delta_ts;
195     return ff_filter_frame(outlink, buf);
196 }
197 
get_video_buffer(AVFilterLink *inlink, int w, int h)198 static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
199 {
200     AVFilterContext *ctx = inlink->dst;
201     unsigned in_no = FF_INLINK_IDX(inlink);
202     AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
203 
204     return ff_get_video_buffer(outlink, w, h);
205 }
206 
get_audio_buffer(AVFilterLink *inlink, int nb_samples)207 static AVFrame *get_audio_buffer(AVFilterLink *inlink, int nb_samples)
208 {
209     AVFilterContext *ctx = inlink->dst;
210     unsigned in_no = FF_INLINK_IDX(inlink);
211     AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
212 
213     return ff_get_audio_buffer(outlink, nb_samples);
214 }
215 
close_input(AVFilterContext *ctx, unsigned in_no)216 static void close_input(AVFilterContext *ctx, unsigned in_no)
217 {
218     ConcatContext *cat = ctx->priv;
219 
220     cat->in[in_no].eof = 1;
221     cat->nb_in_active--;
222     av_log(ctx, AV_LOG_VERBOSE, "EOF on %s, %d streams left in segment.\n",
223            ctx->input_pads[in_no].name, cat->nb_in_active);
224 }
225 
find_next_delta_ts(AVFilterContext *ctx, int64_t *seg_delta)226 static void find_next_delta_ts(AVFilterContext *ctx, int64_t *seg_delta)
227 {
228     ConcatContext *cat = ctx->priv;
229     unsigned i = cat->cur_idx;
230     unsigned imax = i + ctx->nb_outputs;
231     int64_t pts;
232 
233     pts = cat->in[i++].pts;
234     for (; i < imax; i++)
235         pts = FFMAX(pts, cat->in[i].pts);
236     cat->delta_ts += pts;
237     *seg_delta = pts;
238 }
239 
send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no, int64_t seg_delta)240 static int send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no,
241                         int64_t seg_delta)
242 {
243     ConcatContext *cat = ctx->priv;
244     AVFilterLink *outlink = ctx->outputs[out_no];
245     int64_t base_pts = cat->in[in_no].pts + cat->delta_ts - seg_delta;
246     int64_t nb_samples, sent = 0;
247     int frame_nb_samples, ret;
248     AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate };
249     AVFrame *buf;
250 
251     if (!rate_tb.den)
252         return AVERROR_BUG;
253     if (cat->in[in_no].pts < INT64_MIN + seg_delta)
254         return AVERROR_INVALIDDATA;
255     if (seg_delta < cat->in[in_no].pts)
256         return AVERROR_INVALIDDATA;
257     nb_samples = av_rescale_q(seg_delta - cat->in[in_no].pts,
258                               outlink->time_base, rate_tb);
259     frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */
260     while (nb_samples) {
261         frame_nb_samples = FFMIN(frame_nb_samples, nb_samples);
262         buf = ff_get_audio_buffer(outlink, frame_nb_samples);
263         if (!buf)
264             return AVERROR(ENOMEM);
265         av_samples_set_silence(buf->extended_data, 0, frame_nb_samples,
266                                outlink->ch_layout.nb_channels, outlink->format);
267         buf->pts = base_pts + av_rescale_q(sent, rate_tb, outlink->time_base);
268         ret = ff_filter_frame(outlink, buf);
269         if (ret < 0)
270             return ret;
271         sent       += frame_nb_samples;
272         nb_samples -= frame_nb_samples;
273     }
274     return 0;
275 }
276 
flush_segment(AVFilterContext *ctx)277 static int flush_segment(AVFilterContext *ctx)
278 {
279     int ret;
280     ConcatContext *cat = ctx->priv;
281     unsigned str, str_max;
282     int64_t seg_delta;
283 
284     find_next_delta_ts(ctx, &seg_delta);
285     cat->cur_idx += ctx->nb_outputs;
286     cat->nb_in_active = ctx->nb_outputs;
287     av_log(ctx, AV_LOG_VERBOSE, "Segment finished at pts=%"PRId64"\n",
288            cat->delta_ts);
289 
290     if (cat->cur_idx < ctx->nb_inputs) {
291         /* pad audio streams with silence */
292         str = cat->nb_streams[AVMEDIA_TYPE_VIDEO];
293         str_max = str + cat->nb_streams[AVMEDIA_TYPE_AUDIO];
294         for (; str < str_max; str++) {
295             ret = send_silence(ctx, cat->cur_idx - ctx->nb_outputs + str, str,
296                                seg_delta);
297             if (ret < 0)
298                 return ret;
299         }
300     }
301     return 0;
302 }
303 
init(AVFilterContext *ctx)304 static av_cold int init(AVFilterContext *ctx)
305 {
306     ConcatContext *cat = ctx->priv;
307     unsigned seg, type, str;
308     int ret;
309 
310     /* create input pads */
311     for (seg = 0; seg < cat->nb_segments; seg++) {
312         for (type = 0; type < TYPE_ALL; type++) {
313             for (str = 0; str < cat->nb_streams[type]; str++) {
314                 AVFilterPad pad = {
315                     .type             = type,
316                 };
317                 if (type == AVMEDIA_TYPE_VIDEO)
318                     pad.get_buffer.video = get_video_buffer;
319                 else
320                     pad.get_buffer.audio = get_audio_buffer;
321                 pad.name = av_asprintf("in%d:%c%d", seg, "va"[type], str);
322                 if ((ret = ff_append_inpad_free_name(ctx, &pad)) < 0)
323                     return ret;
324             }
325         }
326     }
327     /* create output pads */
328     for (type = 0; type < TYPE_ALL; type++) {
329         for (str = 0; str < cat->nb_streams[type]; str++) {
330             AVFilterPad pad = {
331                 .type          = type,
332                 .config_props  = config_output,
333             };
334             pad.name = av_asprintf("out:%c%d", "va"[type], str);
335             if ((ret = ff_append_outpad_free_name(ctx, &pad)) < 0)
336                 return ret;
337         }
338     }
339 
340     cat->in = av_calloc(ctx->nb_inputs, sizeof(*cat->in));
341     if (!cat->in)
342         return AVERROR(ENOMEM);
343     cat->nb_in_active = ctx->nb_outputs;
344     return 0;
345 }
346 
uninit(AVFilterContext *ctx)347 static av_cold void uninit(AVFilterContext *ctx)
348 {
349     ConcatContext *cat = ctx->priv;
350 
351     av_freep(&cat->in);
352 }
353 
activate(AVFilterContext *ctx)354 static int activate(AVFilterContext *ctx)
355 {
356     ConcatContext *cat = ctx->priv;
357     AVFrame *frame;
358     unsigned i, j;
359     int ret, status;
360     int64_t pts;
361 
362     /* Forward status back */
363     for (i = 0; i < ctx->nb_outputs; i++) {
364         status = ff_outlink_get_status(ctx->outputs[i]);
365         if (!status)
366             continue;
367         for (j = i; j < ctx->nb_inputs; j += ctx->nb_outputs) {
368             if (!cat->in[j].eof) {
369                 cat->in[j].eof = 1;
370                 ff_inlink_set_status(ctx->inputs[j], status);
371                 return 0;
372             }
373         }
374 
375     }
376 
377     /* Forward available frames */
378     if (cat->cur_idx < ctx->nb_inputs) {
379         for (i = 0; i < ctx->nb_outputs; i++) {
380             ret = ff_inlink_consume_frame(ctx->inputs[cat->cur_idx + i], &frame);
381             if (ret < 0)
382                 return ret;
383             if (ret) {
384                 ff_filter_set_ready(ctx, 10);
385                 return push_frame(ctx, cat->cur_idx + i, frame);
386             }
387         }
388     }
389 
390     /* Forward status change */
391     if (cat->cur_idx < ctx->nb_inputs) {
392         for (i = 0; i < ctx->nb_outputs; i++) {
393             AVFilterLink *inlink = ctx->inputs[cat->cur_idx + i];
394 
395             ret = ff_inlink_acknowledge_status(inlink, &status, &pts);
396             /* TODO use pts */
397             if (ret > 0) {
398                 close_input(ctx, cat->cur_idx + i);
399                 if (cat->cur_idx + ctx->nb_outputs >= ctx->nb_inputs) {
400                     int64_t eof_pts = cat->delta_ts;
401 
402                     eof_pts += av_rescale_q(pts, inlink->time_base, ctx->outputs[i]->time_base);
403                     ff_outlink_set_status(ctx->outputs[i], status, eof_pts);
404                 }
405                 if (!cat->nb_in_active) {
406                     ret = flush_segment(ctx);
407                     if (ret < 0)
408                         return ret;
409                 }
410                 ff_filter_set_ready(ctx, 10);
411                 return 0;
412             }
413         }
414     }
415 
416     ret = FFERROR_NOT_READY;
417     for (i = 0; i < ctx->nb_outputs; i++) {
418         if (ff_outlink_frame_wanted(ctx->outputs[i])) {
419             if (cat->in[cat->cur_idx + i].eof) {
420                 for (j = 0; j < ctx->nb_outputs; j++)
421                     if (!cat->in[cat->cur_idx + j].eof)
422                         ff_inlink_request_frame(ctx->inputs[cat->cur_idx + j]);
423                 return 0;
424             } else {
425                 ff_inlink_request_frame(ctx->inputs[cat->cur_idx + i]);
426                 ret = 0;
427             }
428         }
429     }
430 
431     return ret;
432 }
433 
process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)434 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
435                            char *res, int res_len, int flags)
436 {
437     int ret = AVERROR(ENOSYS);
438 
439     if (!strcmp(cmd, "next")) {
440         av_log(ctx, AV_LOG_VERBOSE, "Command received: next\n");
441         return flush_segment(ctx);
442     }
443 
444     return ret;
445 }
446 
447 const AVFilter ff_avf_concat = {
448     .name          = "concat",
449     .description   = NULL_IF_CONFIG_SMALL("Concatenate audio and video streams."),
450     .init          = init,
451     .uninit        = uninit,
452     .activate      = activate,
453     .priv_size     = sizeof(ConcatContext),
454     .inputs        = NULL,
455     .outputs       = NULL,
456     .priv_class    = &concat_class,
457     .flags         = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_DYNAMIC_OUTPUTS,
458     FILTER_QUERY_FUNC(query_formats),
459     .process_command = process_command,
460 };
461