1 /*
2  * PGS subtitle decoder
3  * Copyright (c) 2009 Stephen Backway
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * PGS subtitle decoder
25  */
26 
27 #include "avcodec.h"
28 #include "bytestream.h"
29 #include "codec_internal.h"
30 #include "internal.h"
31 #include "mathops.h"
32 
33 #include "libavutil/colorspace.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/opt.h"
36 
37 #define RGBA(r,g,b,a) (((unsigned)(a) << 24) | ((r) << 16) | ((g) << 8) | (b))
38 #define MAX_EPOCH_PALETTES 8   // Max 8 allowed per PGS epoch
39 #define MAX_EPOCH_OBJECTS  64  // Max 64 allowed per PGS epoch
40 #define MAX_OBJECT_REFS    2   // Max objects per display set
41 
42 enum SegmentType {
43     PALETTE_SEGMENT      = 0x14,
44     OBJECT_SEGMENT       = 0x15,
45     PRESENTATION_SEGMENT = 0x16,
46     WINDOW_SEGMENT       = 0x17,
47     DISPLAY_SEGMENT      = 0x80,
48 };
49 
50 typedef struct PGSSubObjectRef {
51     int     id;
52     int     window_id;
53     uint8_t composition_flag;
54     int     x;
55     int     y;
56     int     crop_x;
57     int     crop_y;
58     int     crop_w;
59     int     crop_h;
60 } PGSSubObjectRef;
61 
62 typedef struct PGSSubPresentation {
63     int id_number;
64     int palette_id;
65     int object_count;
66     PGSSubObjectRef objects[MAX_OBJECT_REFS];
67     int64_t pts;
68 } PGSSubPresentation;
69 
70 typedef struct PGSSubObject {
71     int          id;
72     int          w;
73     int          h;
74     uint8_t      *rle;
75     unsigned int rle_buffer_size, rle_data_len;
76     unsigned int rle_remaining_len;
77 } PGSSubObject;
78 
79 typedef struct PGSSubObjects {
80     int          count;
81     PGSSubObject object[MAX_EPOCH_OBJECTS];
82 } PGSSubObjects;
83 
84 typedef struct PGSSubPalette {
85     int         id;
86     uint32_t    clut[256];
87 } PGSSubPalette;
88 
89 typedef struct PGSSubPalettes {
90     int           count;
91     PGSSubPalette palette[MAX_EPOCH_PALETTES];
92 } PGSSubPalettes;
93 
94 typedef struct PGSSubContext {
95     AVClass *class;
96     PGSSubPresentation presentation;
97     PGSSubPalettes     palettes;
98     PGSSubObjects      objects;
99     int forced_subs_only;
100 } PGSSubContext;
101 
flush_cache(AVCodecContext *avctx)102 static void flush_cache(AVCodecContext *avctx)
103 {
104     PGSSubContext *ctx = avctx->priv_data;
105     int i;
106 
107     for (i = 0; i < ctx->objects.count; i++) {
108         av_freep(&ctx->objects.object[i].rle);
109         ctx->objects.object[i].rle_buffer_size  = 0;
110         ctx->objects.object[i].rle_remaining_len  = 0;
111     }
112     ctx->objects.count = 0;
113     ctx->palettes.count = 0;
114 }
115 
find_object(int id, PGSSubObjects *objects)116 static PGSSubObject * find_object(int id, PGSSubObjects *objects)
117 {
118     int i;
119 
120     for (i = 0; i < objects->count; i++) {
121         if (objects->object[i].id == id)
122             return &objects->object[i];
123     }
124     return NULL;
125 }
126 
find_palette(int id, PGSSubPalettes *palettes)127 static PGSSubPalette * find_palette(int id, PGSSubPalettes *palettes)
128 {
129     int i;
130 
131     for (i = 0; i < palettes->count; i++) {
132         if (palettes->palette[i].id == id)
133             return &palettes->palette[i];
134     }
135     return NULL;
136 }
137 
init_decoder(AVCodecContext *avctx)138 static av_cold int init_decoder(AVCodecContext *avctx)
139 {
140     avctx->pix_fmt     = AV_PIX_FMT_PAL8;
141 
142     return 0;
143 }
144 
close_decoder(AVCodecContext *avctx)145 static av_cold int close_decoder(AVCodecContext *avctx)
146 {
147     flush_cache(avctx);
148 
149     return 0;
150 }
151 
152 /**
153  * Decode the RLE data.
154  *
155  * The subtitle is stored as a Run Length Encoded image.
156  *
157  * @param avctx contains the current codec context
158  * @param sub pointer to the processed subtitle data
159  * @param buf pointer to the RLE data to process
160  * @param buf_size size of the RLE data to process
161  */
decode_rle(AVCodecContext *avctx, AVSubtitleRect *rect, const uint8_t *buf, unsigned int buf_size)162 static int decode_rle(AVCodecContext *avctx, AVSubtitleRect *rect,
163                       const uint8_t *buf, unsigned int buf_size)
164 {
165     const uint8_t *rle_bitmap_end;
166     int pixel_count, line_count;
167 
168     rle_bitmap_end = buf + buf_size;
169 
170     rect->data[0] = av_malloc_array(rect->w, rect->h);
171 
172     if (!rect->data[0])
173         return AVERROR(ENOMEM);
174 
175     pixel_count = 0;
176     line_count  = 0;
177 
178     while (buf < rle_bitmap_end && line_count < rect->h) {
179         uint8_t flags, color;
180         int run;
181 
182         color = bytestream_get_byte(&buf);
183         run   = 1;
184 
185         if (color == 0x00) {
186             flags = bytestream_get_byte(&buf);
187             run   = flags & 0x3f;
188             if (flags & 0x40)
189                 run = (run << 8) + bytestream_get_byte(&buf);
190             color = flags & 0x80 ? bytestream_get_byte(&buf) : 0;
191         }
192 
193         if (run > 0 && pixel_count + run <= rect->w * rect->h) {
194             memset(rect->data[0] + pixel_count, color, run);
195             pixel_count += run;
196         } else if (!run) {
197             /*
198              * New Line. Check if correct pixels decoded, if not display warning
199              * and adjust bitmap pointer to correct new line position.
200              */
201             if (pixel_count % rect->w > 0) {
202                 av_log(avctx, AV_LOG_ERROR, "Decoded %d pixels, when line should be %d pixels\n",
203                        pixel_count % rect->w, rect->w);
204                 if (avctx->err_recognition & AV_EF_EXPLODE) {
205                     return AVERROR_INVALIDDATA;
206                 }
207             }
208             line_count++;
209         }
210     }
211 
212     if (pixel_count < rect->w * rect->h) {
213         av_log(avctx, AV_LOG_ERROR, "Insufficient RLE data for subtitle\n");
214         return AVERROR_INVALIDDATA;
215     }
216 
217     ff_dlog(avctx, "Pixel Count = %d, Area = %d\n", pixel_count, rect->w * rect->h);
218 
219     return 0;
220 }
221 
222 /**
223  * Parse the picture segment packet.
224  *
225  * The picture segment contains details on the sequence id,
226  * width, height and Run Length Encoded (RLE) bitmap data.
227  *
228  * @param avctx contains the current codec context
229  * @param buf pointer to the packet to process
230  * @param buf_size size of packet to process
231  */
parse_object_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size)232 static int parse_object_segment(AVCodecContext *avctx,
233                                   const uint8_t *buf, int buf_size)
234 {
235     PGSSubContext *ctx = avctx->priv_data;
236     PGSSubObject *object;
237 
238     uint8_t sequence_desc;
239     unsigned int rle_bitmap_len, width, height;
240     int id;
241 
242     if (buf_size <= 4)
243         return AVERROR_INVALIDDATA;
244     buf_size -= 4;
245 
246     id = bytestream_get_be16(&buf);
247     object = find_object(id, &ctx->objects);
248     if (!object) {
249         if (ctx->objects.count >= MAX_EPOCH_OBJECTS) {
250             av_log(avctx, AV_LOG_ERROR, "Too many objects in epoch\n");
251             return AVERROR_INVALIDDATA;
252         }
253         object = &ctx->objects.object[ctx->objects.count++];
254         object->id = id;
255     }
256 
257     /* skip object version number */
258     buf += 1;
259 
260     /* Read the Sequence Description to determine if start of RLE data or appended to previous RLE */
261     sequence_desc = bytestream_get_byte(&buf);
262 
263     if (!(sequence_desc & 0x80)) {
264         /* Additional RLE data */
265         if (buf_size > object->rle_remaining_len)
266             return AVERROR_INVALIDDATA;
267 
268         memcpy(object->rle + object->rle_data_len, buf, buf_size);
269         object->rle_data_len += buf_size;
270         object->rle_remaining_len -= buf_size;
271 
272         return 0;
273     }
274 
275     if (buf_size <= 7)
276         return AVERROR_INVALIDDATA;
277     buf_size -= 7;
278 
279     /* Decode rle bitmap length, stored size includes width/height data */
280     rle_bitmap_len = bytestream_get_be24(&buf) - 2*2;
281 
282     if (buf_size > rle_bitmap_len) {
283         av_log(avctx, AV_LOG_ERROR,
284                "Buffer dimension %d larger than the expected RLE data %d\n",
285                buf_size, rle_bitmap_len);
286         return AVERROR_INVALIDDATA;
287     }
288 
289     /* Get bitmap dimensions from data */
290     width  = bytestream_get_be16(&buf);
291     height = bytestream_get_be16(&buf);
292 
293     /* Make sure the bitmap is not too large */
294     if (avctx->width < width || avctx->height < height || !width || !height) {
295         av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions (%dx%d) invalid.\n", width, height);
296         return AVERROR_INVALIDDATA;
297     }
298 
299     object->w = width;
300     object->h = height;
301 
302     av_fast_padded_malloc(&object->rle, &object->rle_buffer_size, rle_bitmap_len);
303 
304     if (!object->rle) {
305         object->rle_data_len = 0;
306         object->rle_remaining_len = 0;
307         return AVERROR(ENOMEM);
308     }
309 
310     memcpy(object->rle, buf, buf_size);
311     object->rle_data_len = buf_size;
312     object->rle_remaining_len = rle_bitmap_len - buf_size;
313 
314     return 0;
315 }
316 
317 /**
318  * Parse the palette segment packet.
319  *
320  * The palette segment contains details of the palette,
321  * a maximum of 256 colors can be defined.
322  *
323  * @param avctx contains the current codec context
324  * @param buf pointer to the packet to process
325  * @param buf_size size of packet to process
326  */
parse_palette_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size)327 static int parse_palette_segment(AVCodecContext *avctx,
328                                   const uint8_t *buf, int buf_size)
329 {
330     PGSSubContext *ctx = avctx->priv_data;
331     PGSSubPalette *palette;
332 
333     const uint8_t *buf_end = buf + buf_size;
334     const uint8_t *cm      = ff_crop_tab + MAX_NEG_CROP;
335     int color_id;
336     int y, cb, cr, alpha;
337     int r, g, b, r_add, g_add, b_add;
338     int id;
339 
340     id  = bytestream_get_byte(&buf);
341     palette = find_palette(id, &ctx->palettes);
342     if (!palette) {
343         if (ctx->palettes.count >= MAX_EPOCH_PALETTES) {
344             av_log(avctx, AV_LOG_ERROR, "Too many palettes in epoch\n");
345             return AVERROR_INVALIDDATA;
346         }
347         palette = &ctx->palettes.palette[ctx->palettes.count++];
348         palette->id  = id;
349     }
350 
351     /* Skip palette version */
352     buf += 1;
353 
354     while (buf < buf_end) {
355         color_id  = bytestream_get_byte(&buf);
356         y         = bytestream_get_byte(&buf);
357         cr        = bytestream_get_byte(&buf);
358         cb        = bytestream_get_byte(&buf);
359         alpha     = bytestream_get_byte(&buf);
360 
361         /* Default to BT.709 colorspace. In case of <= 576 height use BT.601 */
362         if (avctx->height <= 0 || avctx->height > 576) {
363             YUV_TO_RGB1_CCIR_BT709(cb, cr);
364         } else {
365             YUV_TO_RGB1_CCIR(cb, cr);
366         }
367 
368         YUV_TO_RGB2_CCIR(r, g, b, y);
369 
370         ff_dlog(avctx, "Color %d := (%d,%d,%d,%d)\n", color_id, r, g, b, alpha);
371 
372         /* Store color in palette */
373         palette->clut[color_id] = RGBA(r,g,b,alpha);
374     }
375     return 0;
376 }
377 
378 /**
379  * Parse the presentation segment packet.
380  *
381  * The presentation segment contains details on the video
382  * width, video height, x & y subtitle position.
383  *
384  * @param avctx contains the current codec context
385  * @param buf pointer to the packet to process
386  * @param buf_size size of packet to process
387  * @todo TODO: Implement cropping
388  */
parse_presentation_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int64_t pts)389 static int parse_presentation_segment(AVCodecContext *avctx,
390                                       const uint8_t *buf, int buf_size,
391                                       int64_t pts)
392 {
393     PGSSubContext *ctx = avctx->priv_data;
394     int i, state, ret;
395     const uint8_t *buf_end = buf + buf_size;
396 
397     // Video descriptor
398     int w = bytestream_get_be16(&buf);
399     int h = bytestream_get_be16(&buf);
400 
401     ctx->presentation.pts = pts;
402 
403     ff_dlog(avctx, "Video Dimensions %dx%d\n",
404             w, h);
405     ret = ff_set_dimensions(avctx, w, h);
406     if (ret < 0)
407         return ret;
408 
409     /* Skip 1 bytes of unknown, frame rate */
410     buf++;
411 
412     // Composition descriptor
413     ctx->presentation.id_number = bytestream_get_be16(&buf);
414     /*
415      * state is a 2 bit field that defines pgs epoch boundaries
416      * 00 - Normal, previously defined objects and palettes are still valid
417      * 01 - Acquisition point, previous objects and palettes can be released
418      * 10 - Epoch start, previous objects and palettes can be released
419      * 11 - Epoch continue, previous objects and palettes can be released
420      *
421      * reserved 6 bits discarded
422      */
423     state = bytestream_get_byte(&buf) >> 6;
424     if (state != 0) {
425         flush_cache(avctx);
426     }
427 
428     /*
429      * skip palette_update_flag (0x80),
430      */
431     buf += 1;
432     ctx->presentation.palette_id = bytestream_get_byte(&buf);
433     ctx->presentation.object_count = bytestream_get_byte(&buf);
434     if (ctx->presentation.object_count > MAX_OBJECT_REFS) {
435         av_log(avctx, AV_LOG_ERROR,
436                "Invalid number of presentation objects %d\n",
437                ctx->presentation.object_count);
438         ctx->presentation.object_count = 2;
439         if (avctx->err_recognition & AV_EF_EXPLODE) {
440             return AVERROR_INVALIDDATA;
441         }
442     }
443 
444 
445     for (i = 0; i < ctx->presentation.object_count; i++)
446     {
447         PGSSubObjectRef *const object = &ctx->presentation.objects[i];
448 
449         if (buf_end - buf < 8) {
450             av_log(avctx, AV_LOG_ERROR, "Insufficent space for object\n");
451             ctx->presentation.object_count = i;
452             return AVERROR_INVALIDDATA;
453         }
454 
455         object->id               = bytestream_get_be16(&buf);
456         object->window_id        = bytestream_get_byte(&buf);
457         object->composition_flag = bytestream_get_byte(&buf);
458 
459         object->x = bytestream_get_be16(&buf);
460         object->y = bytestream_get_be16(&buf);
461 
462         // If cropping
463         if (object->composition_flag & 0x80) {
464             object->crop_x = bytestream_get_be16(&buf);
465             object->crop_y = bytestream_get_be16(&buf);
466             object->crop_w = bytestream_get_be16(&buf);
467             object->crop_h = bytestream_get_be16(&buf);
468         }
469 
470         ff_dlog(avctx, "Subtitle Placement x=%d, y=%d\n",
471                 object->x, object->y);
472 
473         if (object->x > avctx->width || object->y > avctx->height) {
474             av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n",
475                    object->x, object->y,
476                     avctx->width, avctx->height);
477             object->y = object->x = 0;
478             if (avctx->err_recognition & AV_EF_EXPLODE) {
479                 return AVERROR_INVALIDDATA;
480             }
481         }
482     }
483 
484     return 0;
485 }
486 
487 /**
488  * Parse the display segment packet.
489  *
490  * The display segment controls the updating of the display.
491  *
492  * @param avctx contains the current codec context
493  * @param data pointer to the data pertaining the subtitle to display
494  * @param buf pointer to the packet to process
495  * @param buf_size size of packet to process
496  */
display_end_segment(AVCodecContext *avctx, AVSubtitle *sub, const uint8_t *buf, int buf_size)497 static int display_end_segment(AVCodecContext *avctx, AVSubtitle *sub,
498                                const uint8_t *buf, int buf_size)
499 {
500     PGSSubContext *ctx = avctx->priv_data;
501     int64_t pts;
502     PGSSubPalette *palette;
503     int i, ret;
504 
505     pts = ctx->presentation.pts != AV_NOPTS_VALUE ? ctx->presentation.pts : sub->pts;
506     memset(sub, 0, sizeof(*sub));
507     sub->pts = pts;
508     ctx->presentation.pts = AV_NOPTS_VALUE;
509     sub->start_display_time = 0;
510     // There is no explicit end time for PGS subtitles.  The end time
511     // is defined by the start of the next sub which may contain no
512     // objects (i.e. clears the previous sub)
513     sub->end_display_time   = UINT32_MAX;
514     sub->format             = 0;
515 
516     // Blank if last object_count was 0.
517     if (!ctx->presentation.object_count)
518         return 1;
519     sub->rects = av_calloc(ctx->presentation.object_count, sizeof(*sub->rects));
520     if (!sub->rects) {
521         return AVERROR(ENOMEM);
522     }
523     palette = find_palette(ctx->presentation.palette_id, &ctx->palettes);
524     if (!palette) {
525         // Missing palette.  Should only happen with damaged streams.
526         av_log(avctx, AV_LOG_ERROR, "Invalid palette id %d\n",
527                ctx->presentation.palette_id);
528         avsubtitle_free(sub);
529         return AVERROR_INVALIDDATA;
530     }
531     for (i = 0; i < ctx->presentation.object_count; i++) {
532         AVSubtitleRect *const rect = av_mallocz(sizeof(*rect));
533         PGSSubObject *object;
534 
535         if (!rect)
536             return AVERROR(ENOMEM);
537         sub->rects[sub->num_rects++] = rect;
538         rect->type = SUBTITLE_BITMAP;
539 
540         /* Process bitmap */
541         object = find_object(ctx->presentation.objects[i].id, &ctx->objects);
542         if (!object) {
543             // Missing object.  Should only happen with damaged streams.
544             av_log(avctx, AV_LOG_ERROR, "Invalid object id %d\n",
545                    ctx->presentation.objects[i].id);
546             if (avctx->err_recognition & AV_EF_EXPLODE)
547                 return AVERROR_INVALIDDATA;
548             // Leaves rect empty with 0 width and height.
549             continue;
550         }
551         if (ctx->presentation.objects[i].composition_flag & 0x40)
552             rect->flags |= AV_SUBTITLE_FLAG_FORCED;
553 
554         rect->x    = ctx->presentation.objects[i].x;
555         rect->y    = ctx->presentation.objects[i].y;
556 
557         if (object->rle) {
558             rect->w    = object->w;
559             rect->h    = object->h;
560 
561             rect->linesize[0] = object->w;
562 
563             if (object->rle_remaining_len) {
564                 av_log(avctx, AV_LOG_ERROR, "RLE data length %u is %u bytes shorter than expected\n",
565                        object->rle_data_len, object->rle_remaining_len);
566                 if (avctx->err_recognition & AV_EF_EXPLODE)
567                     return AVERROR_INVALIDDATA;
568             }
569             ret = decode_rle(avctx, rect, object->rle, object->rle_data_len);
570             if (ret < 0) {
571                 if ((avctx->err_recognition & AV_EF_EXPLODE) ||
572                     ret == AVERROR(ENOMEM)) {
573                     return ret;
574                 }
575                 rect->w = 0;
576                 rect->h = 0;
577                 continue;
578             }
579         }
580         /* Allocate memory for colors */
581         rect->nb_colors = 256;
582         rect->data[1]   = av_mallocz(AVPALETTE_SIZE);
583         if (!rect->data[1])
584             return AVERROR(ENOMEM);
585 
586         if (!ctx->forced_subs_only || ctx->presentation.objects[i].composition_flag & 0x40)
587             memcpy(rect->data[1], palette->clut, rect->nb_colors * sizeof(uint32_t));
588     }
589     return 1;
590 }
591 
decode(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)592 static int decode(AVCodecContext *avctx, AVSubtitle *sub,
593                   int *got_sub_ptr, const AVPacket *avpkt)
594 {
595     const uint8_t *buf = avpkt->data;
596     int buf_size       = avpkt->size;
597 
598     const uint8_t *buf_end;
599     uint8_t       segment_type;
600     int           segment_length;
601     int i, ret;
602 
603     ff_dlog(avctx, "PGS sub packet:\n");
604 
605     for (i = 0; i < buf_size; i++) {
606         ff_dlog(avctx, "%02x ", buf[i]);
607         if (i % 16 == 15)
608             ff_dlog(avctx, "\n");
609     }
610 
611     if (i & 15)
612         ff_dlog(avctx, "\n");
613 
614     *got_sub_ptr = 0;
615 
616     /* Ensure that we have received at a least a segment code and segment length */
617     if (buf_size < 3)
618         return -1;
619 
620     buf_end = buf + buf_size;
621 
622     /* Step through buffer to identify segments */
623     while (buf < buf_end) {
624         segment_type   = bytestream_get_byte(&buf);
625         segment_length = bytestream_get_be16(&buf);
626 
627         ff_dlog(avctx, "Segment Length %d, Segment Type %x\n", segment_length, segment_type);
628 
629         if (segment_type != DISPLAY_SEGMENT && segment_length > buf_end - buf)
630             break;
631 
632         ret = 0;
633         switch (segment_type) {
634         case PALETTE_SEGMENT:
635             ret = parse_palette_segment(avctx, buf, segment_length);
636             break;
637         case OBJECT_SEGMENT:
638             ret = parse_object_segment(avctx, buf, segment_length);
639             break;
640         case PRESENTATION_SEGMENT:
641             ret = parse_presentation_segment(avctx, buf, segment_length, sub->pts);
642             break;
643         case WINDOW_SEGMENT:
644             /*
645              * Window Segment Structure (No new information provided):
646              *     2 bytes: Unknown,
647              *     2 bytes: X position of subtitle,
648              *     2 bytes: Y position of subtitle,
649              *     2 bytes: Width of subtitle,
650              *     2 bytes: Height of subtitle.
651              */
652             break;
653         case DISPLAY_SEGMENT:
654             if (*got_sub_ptr) {
655                 av_log(avctx, AV_LOG_ERROR, "Duplicate display segment\n");
656                 ret = AVERROR_INVALIDDATA;
657                 break;
658             }
659             ret = display_end_segment(avctx, sub, buf, segment_length);
660             if (ret >= 0)
661                 *got_sub_ptr = ret;
662             break;
663         default:
664             av_log(avctx, AV_LOG_ERROR, "Unknown subtitle segment type 0x%x, length %d\n",
665                    segment_type, segment_length);
666             ret = AVERROR_INVALIDDATA;
667             break;
668         }
669         if (ret < 0 && (ret == AVERROR(ENOMEM) ||
670                         avctx->err_recognition & AV_EF_EXPLODE))
671             return ret;
672 
673         buf += segment_length;
674     }
675 
676     return buf_size;
677 }
678 
679 #define OFFSET(x) offsetof(PGSSubContext, x)
680 #define SD AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_DECODING_PARAM
681 static const AVOption options[] = {
682     {"forced_subs_only", "Only show forced subtitles", OFFSET(forced_subs_only), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, SD},
683     { NULL },
684 };
685 
686 static const AVClass pgsdec_class = {
687     .class_name = "PGS subtitle decoder",
688     .item_name  = av_default_item_name,
689     .option     = options,
690     .version    = LIBAVUTIL_VERSION_INT,
691 };
692 
693 const FFCodec ff_pgssub_decoder = {
694     .p.name         = "pgssub",
695     .p.long_name    = NULL_IF_CONFIG_SMALL("HDMV Presentation Graphic Stream subtitles"),
696     .p.type         = AVMEDIA_TYPE_SUBTITLE,
697     .p.id           = AV_CODEC_ID_HDMV_PGS_SUBTITLE,
698     .priv_data_size = sizeof(PGSSubContext),
699     .init           = init_decoder,
700     .close          = close_decoder,
701     FF_CODEC_DECODE_SUB_CB(decode),
702     .p.priv_class   = &pgsdec_class,
703     .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE,
704 };
705