1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "config_components.h"
20 
21 #include <stdint.h>
22 
23 #include "libavutil/avstring.h"
24 #include "libavutil/channel_layout.h"
25 #include "libavutil/common.h"
26 #include "libavutil/log.h"
27 #include "libavutil/mathematics.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/parseutils.h"
30 #include "libavutil/samplefmt.h"
31 
32 #include "audio.h"
33 #include "avfilter.h"
34 #include "filters.h"
35 #include "internal.h"
36 
37 typedef struct SegmentContext {
38     const AVClass *class;
39 
40     char *timestamps_str;
41     char *points_str;
42     int use_timestamps;
43 
44     int current_point;
45     int nb_points;
46     int64_t last_pts;
47 
48     int64_t *points;
49 } SegmentContext;
50 
count_points(char *item_str, int *nb_items)51 static void count_points(char *item_str, int *nb_items)
52 {
53     char *p;
54 
55     if (!item_str)
56         return;
57 
58     *nb_items = 1;
59     for (p = item_str; *p; p++) {
60         if (*p == '|')
61             (*nb_items)++;
62     }
63 }
64 
parse_points(AVFilterContext *ctx, char *item_str, int nb_points, int64_t *points)65 static int parse_points(AVFilterContext *ctx, char *item_str, int nb_points, int64_t *points)
66 {
67     SegmentContext *s = ctx->priv;
68     char *arg, *p = item_str;
69     char *saveptr = NULL;
70     int64_t ref, cur = 0;
71     int ret = 0;
72 
73     for (int i = 0; i < nb_points; i++) {
74         if (!(arg = av_strtok(p, "|", &saveptr)))
75             return AVERROR(EINVAL);
76 
77         p = NULL;
78         ref = 0;
79         if (*arg == '+') {
80             ref = cur;
81             arg++;
82         }
83 
84         if (s->use_timestamps) {
85             ret = av_parse_time(&points[i], arg, s->use_timestamps);
86         } else {
87             if (sscanf(arg, "%"SCNd64, &points[i]) != 1)
88                 ret = AVERROR(EINVAL);
89         }
90 
91         if (ret < 0) {
92             av_log(ctx, AV_LOG_ERROR, "Invalid splits supplied: %s\n", arg);
93             return ret;
94         }
95 
96         cur = points[i];
97         points[i] += ref;
98     }
99 
100     return 0;
101 }
102 
init(AVFilterContext *ctx, enum AVMediaType type)103 static av_cold int init(AVFilterContext *ctx, enum AVMediaType type)
104 {
105     SegmentContext *s = ctx->priv;
106     char *split_str;
107     int ret;
108 
109     if (s->timestamps_str && s->points_str) {
110         av_log(ctx, AV_LOG_ERROR, "Both timestamps and counts supplied.\n");
111         return AVERROR(EINVAL);
112     } else if (s->timestamps_str) {
113         s->use_timestamps = 1;
114         split_str = s->timestamps_str;
115     } else if (s->points_str) {
116         split_str = s->points_str;
117     } else {
118         av_log(ctx, AV_LOG_ERROR, "Neither timestamps nor durations nor counts supplied.\n");
119         return AVERROR(EINVAL);
120     }
121 
122     count_points(split_str, &s->nb_points);
123     s->nb_points++;
124 
125     s->points = av_calloc(s->nb_points, sizeof(*s->points));
126     if (!s->points)
127         return AVERROR(ENOMEM);
128 
129     ret = parse_points(ctx, split_str, s->nb_points - 1, s->points);
130     if (ret < 0)
131         return ret;
132 
133     s->points[s->nb_points - 1] = INT64_MAX;
134 
135     for (int i = 0; i < s->nb_points; i++) {
136         AVFilterPad pad = { 0 };
137 
138         pad.type = type;
139         pad.name = av_asprintf("output%d", i);
140         if (!pad.name)
141             return AVERROR(ENOMEM);
142 
143         if ((ret = ff_append_outpad_free_name(ctx, &pad)) < 0)
144             return ret;
145     }
146 
147     return 0;
148 }
149 
config_input(AVFilterLink *inlink)150 static int config_input(AVFilterLink *inlink)
151 {
152     AVFilterContext *ctx = inlink->dst;
153     SegmentContext *s = ctx->priv;
154     AVRational tb = inlink->time_base;
155 
156     if (s->use_timestamps) {
157         for (int i = 0; i < s->nb_points - 1; i++)
158             s->points[i] = av_rescale_q(s->points[i], AV_TIME_BASE_Q, tb);
159     }
160 
161     return 0;
162 }
163 
current_segment_finished(AVFilterContext *ctx, AVFrame *frame)164 static int current_segment_finished(AVFilterContext *ctx, AVFrame *frame)
165 {
166     SegmentContext *s = ctx->priv;
167     AVFilterLink *inlink = ctx->inputs[0];
168     int ret = 0;
169 
170     if (s->use_timestamps) {
171         ret = frame->pts >= s->points[s->current_point];
172     } else {
173         switch (inlink->type) {
174         case AVMEDIA_TYPE_VIDEO:
175             ret = inlink->frame_count_out - 1 >= s->points[s->current_point];
176             break;
177         case AVMEDIA_TYPE_AUDIO:
178             ret = inlink->sample_count_out - frame->nb_samples >= s->points[s->current_point];
179             break;
180         }
181     }
182 
183     return ret;
184 }
185 
activate(AVFilterContext *ctx)186 static int activate(AVFilterContext *ctx)
187 {
188     AVFilterLink *inlink = ctx->inputs[0];
189     SegmentContext *s = ctx->priv;
190     AVFrame *frame = NULL;
191     int ret, status;
192     int64_t max_samples;
193     int64_t diff;
194     int64_t pts;
195 
196     for (int i = s->current_point; i < s->nb_points; i++) {
197         FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[i], ctx);
198     }
199 
200     switch (inlink->type) {
201     case AVMEDIA_TYPE_VIDEO:
202         ret = ff_inlink_consume_frame(inlink, &frame);
203         break;
204     case AVMEDIA_TYPE_AUDIO:
205         diff = s->points[s->current_point] - inlink->sample_count_out;
206         while (diff <= 0) {
207             ff_outlink_set_status(ctx->outputs[s->current_point], AVERROR_EOF, s->last_pts);
208             s->current_point++;
209             if (s->current_point >= s->nb_points)
210                 return AVERROR(EINVAL);
211 
212             diff = s->points[s->current_point] - inlink->sample_count_out;
213         }
214         if (s->use_timestamps) {
215             max_samples = av_rescale_q(diff, av_make_q(1, inlink->sample_rate), inlink->time_base);
216         } else {
217             max_samples = FFMAX(1, FFMIN(diff, INT_MAX));
218         }
219         if (max_samples <= 0 || max_samples > INT_MAX)
220             ret = ff_inlink_consume_frame(inlink, &frame);
221         else
222             ret = ff_inlink_consume_samples(inlink, 1, max_samples, &frame);
223         break;
224     default:
225         return AVERROR_BUG;
226     }
227 
228     if (ret > 0) {
229         s->last_pts = frame->pts;
230         while (current_segment_finished(ctx, frame)) {
231             ff_outlink_set_status(ctx->outputs[s->current_point], AVERROR_EOF, frame->pts);
232             s->current_point++;
233         }
234 
235         if (s->current_point >= s->nb_points) {
236             av_frame_free(&frame);
237             return AVERROR(EINVAL);
238         }
239 
240         ret = ff_filter_frame(ctx->outputs[s->current_point], frame);
241     }
242 
243     if (ret < 0) {
244         return ret;
245     } else if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
246         for (int i = s->current_point; i < s->nb_points; i++)
247             ff_outlink_set_status(ctx->outputs[i], status, pts);
248         return 0;
249     } else {
250         for (int i = s->current_point; i < s->nb_points; i++) {
251             if (ff_outlink_frame_wanted(ctx->outputs[i]))
252                 ff_inlink_request_frame(inlink);
253         }
254         return 0;
255     }
256 }
257 
uninit(AVFilterContext *ctx)258 static av_cold void uninit(AVFilterContext *ctx)
259 {
260     SegmentContext *s = ctx->priv;
261 
262     av_freep(&s->points);
263 }
264 
265 #define OFFSET(x) offsetof(SegmentContext, x)
266 #define COMMON_OPTS \
267     { "timestamps", "timestamps of input at which to split input", OFFSET(timestamps_str),  AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS }, \
268 
269 #if CONFIG_SEGMENT_FILTER
270 
video_init(AVFilterContext *ctx)271 static av_cold int video_init(AVFilterContext *ctx)
272 {
273     return init(ctx, AVMEDIA_TYPE_VIDEO);
274 }
275 
276 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
277 static const AVOption segment_options[] = {
278     COMMON_OPTS
279     { "frames", "frames at which to split input", OFFSET(points_str), AV_OPT_TYPE_STRING,  { .str = NULL }, 0, 0, FLAGS },
280     { NULL }
281 };
282 #undef FLAGS
283 
284 AVFILTER_DEFINE_CLASS(segment);
285 
286 static const AVFilterPad segment_inputs[] = {
287     {
288         .name         = "default",
289         .type         = AVMEDIA_TYPE_VIDEO,
290         .config_props = config_input,
291     },
292 };
293 
294 const AVFilter ff_vf_segment = {
295     .name        = "segment",
296     .description = NULL_IF_CONFIG_SMALL("Segment video stream."),
297     .init        = video_init,
298     .uninit      = uninit,
299     .priv_size   = sizeof(SegmentContext),
300     .priv_class  = &segment_class,
301     .activate    = activate,
302     FILTER_INPUTS(segment_inputs),
303     .outputs     = NULL,
304     .flags       = AVFILTER_FLAG_DYNAMIC_OUTPUTS | AVFILTER_FLAG_METADATA_ONLY,
305 };
306 #endif // CONFIG_SEGMENT_FILTER
307 
308 #if CONFIG_ASEGMENT_FILTER
309 
audio_init(AVFilterContext *ctx)310 static av_cold int audio_init(AVFilterContext *ctx)
311 {
312     return init(ctx, AVMEDIA_TYPE_AUDIO);
313 }
314 
315 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
316 static const AVOption asegment_options[] = {
317     COMMON_OPTS
318     { "samples", "samples at which to split input", OFFSET(points_str), AV_OPT_TYPE_STRING,  { .str = NULL }, 0, 0, FLAGS },
319     { NULL }
320 };
321 #undef FLAGS
322 
323 AVFILTER_DEFINE_CLASS(asegment);
324 
325 static const AVFilterPad asegment_inputs[] = {
326     {
327         .name         = "default",
328         .type         = AVMEDIA_TYPE_AUDIO,
329         .config_props = config_input,
330     },
331 };
332 
333 const AVFilter ff_af_asegment = {
334     .name        = "asegment",
335     .description = NULL_IF_CONFIG_SMALL("Segment audio stream."),
336     .init        = audio_init,
337     .uninit      = uninit,
338     .priv_size   = sizeof(SegmentContext),
339     .priv_class  = &asegment_class,
340     .activate    = activate,
341     FILTER_INPUTS(asegment_inputs),
342     .outputs     = NULL,
343     .flags       = AVFILTER_FLAG_DYNAMIC_OUTPUTS | AVFILTER_FLAG_METADATA_ONLY,
344 };
345 #endif // CONFIG_ASEGMENT_FILTER
346