1/* 2 * Raw Video Decoder 3 * Copyright (c) 2001 Fabrice Bellard 4 * 5 * This file is part of FFmpeg. 6 * 7 * FFmpeg is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * FFmpeg is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with FFmpeg; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 20 */ 21 22/** 23 * @file 24 * Raw Video Decoder 25 */ 26 27#include "avcodec.h" 28#include "bswapdsp.h" 29#include "codec_internal.h" 30#include "decode.h" 31#include "get_bits.h" 32#include "internal.h" 33#include "raw.h" 34#include "libavutil/avassert.h" 35#include "libavutil/buffer.h" 36#include "libavutil/common.h" 37#include "libavutil/intreadwrite.h" 38#include "libavutil/imgutils.h" 39#include "libavutil/opt.h" 40 41typedef struct RawVideoContext { 42 AVClass *av_class; 43 AVBufferRef *palette; 44 int frame_size; /* size of the frame in bytes */ 45 int flip; 46 int is_1_2_4_8_bpp; // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut 47 int is_mono; 48 int is_pal8; 49 int is_nut_mono; 50 int is_nut_pal8; 51 int is_yuv2; 52 int is_lt_16bpp; // 16bpp pixfmt and bits_per_coded_sample < 16 53 int tff; 54 55 BswapDSPContext bbdsp; 56 void *bitstream_buf; 57 unsigned int bitstream_buf_size; 58} RawVideoContext; 59 60static const AVOption options[]={ 61{"top", "top field first", offsetof(RawVideoContext, tff), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AV_OPT_FLAG_DECODING_PARAM|AV_OPT_FLAG_VIDEO_PARAM}, 62{NULL} 63}; 64 65static const AVClass rawdec_class = { 66 .class_name = "rawdec", 67 .option = options, 68 .version = LIBAVUTIL_VERSION_INT, 69}; 70 71static av_cold int raw_init_decoder(AVCodecContext *avctx) 72{ 73 RawVideoContext *context = avctx->priv_data; 74 const AVPixFmtDescriptor *desc; 75 76 ff_bswapdsp_init(&context->bbdsp); 77 78 if ( avctx->codec_tag == MKTAG('r','a','w',' ') 79 || avctx->codec_tag == MKTAG('N','O','1','6')) 80 avctx->pix_fmt = avpriv_pix_fmt_find(PIX_FMT_LIST_MOV, 81 avctx->bits_per_coded_sample); 82 else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W')) 83 avctx->pix_fmt = avpriv_pix_fmt_find(PIX_FMT_LIST_AVI, 84 avctx->bits_per_coded_sample); 85 else if (avctx->codec_tag && (avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0)) 86 avctx->pix_fmt = avpriv_pix_fmt_find(PIX_FMT_LIST_RAW, avctx->codec_tag); 87 else if (avctx->pix_fmt == AV_PIX_FMT_NONE && avctx->bits_per_coded_sample) 88 avctx->pix_fmt = avpriv_pix_fmt_find(PIX_FMT_LIST_AVI, 89 avctx->bits_per_coded_sample); 90 91 desc = av_pix_fmt_desc_get(avctx->pix_fmt); 92 if (!desc) { 93 av_log(avctx, AV_LOG_ERROR, "Invalid pixel format.\n"); 94 return AVERROR(EINVAL); 95 } 96 97 if (desc->flags & AV_PIX_FMT_FLAG_PAL) { 98 context->palette = av_buffer_alloc(AVPALETTE_SIZE); 99 if (!context->palette) 100 return AVERROR(ENOMEM); 101 memset(context->palette->data, 0, AVPALETTE_SIZE); 102 if (avctx->bits_per_coded_sample == 1) 103 memset(context->palette->data, 0xff, 4); 104 } 105 106 if ((avctx->extradata_size >= 9 && 107 !memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) || 108 avctx->codec_tag == MKTAG('c','y','u','v') || 109 avctx->codec_tag == MKTAG(3, 0, 0, 0) || 110 avctx->codec_tag == MKTAG('W','R','A','W')) 111 context->flip = 1; 112 113 if (avctx->pix_fmt == AV_PIX_FMT_MONOWHITE || 114 avctx->pix_fmt == AV_PIX_FMT_MONOBLACK) 115 context->is_mono = 1; 116 else if (avctx->pix_fmt == AV_PIX_FMT_PAL8) 117 context->is_pal8 = 1; 118 119 if (avctx->codec_tag == MKTAG('B','1','W','0') || 120 avctx->codec_tag == MKTAG('B','0','W','1')) 121 context->is_nut_mono = 1; 122 else if (avctx->codec_tag == MKTAG('P','A','L',8)) 123 context->is_nut_pal8 = 1; 124 125 if (avctx->codec_tag == AV_RL32("yuv2") && 126 avctx->pix_fmt == AV_PIX_FMT_YUYV422) 127 context->is_yuv2 = 1; 128 129 return 0; 130} 131 132static void flip(AVCodecContext *avctx, AVFrame *frame) 133{ 134 frame->data[0] += frame->linesize[0] * (avctx->height - 1); 135 frame->linesize[0] *= -1; 136} 137 138/* 139 * Scale sample to 16-bit resolution 140 */ 141#define SCALE16(x, bits) (((x) << (16 - (bits))) | ((x) >> (2 * (bits) - 16))) 142 143/** 144 * Scale buffer to 16 bits per coded sample resolution 145 */ 146#define MKSCALE16(name, r16, w16) \ 147static void name(AVCodecContext *avctx, uint8_t * dst, const uint8_t *buf, int buf_size, int packed) \ 148{ \ 149 int i; \ 150 if (!packed) { \ 151 for (i = 0; i + 1 < buf_size; i += 2) \ 152 w16(dst + i, SCALE16(r16(buf + i), avctx->bits_per_coded_sample)); \ 153 } else { \ 154 GetBitContext gb; \ 155 init_get_bits(&gb, buf, buf_size * 8); \ 156 for (i = 0; i < avctx->width * avctx->height; i++) { \ 157 int sample = get_bits(&gb, avctx->bits_per_coded_sample); \ 158 w16(dst + i*2, SCALE16(sample, avctx->bits_per_coded_sample)); \ 159 } \ 160 } \ 161} 162 163MKSCALE16(scale16be, AV_RB16, AV_WB16) 164MKSCALE16(scale16le, AV_RL16, AV_WL16) 165 166static int raw_decode(AVCodecContext *avctx, AVFrame *frame, 167 int *got_frame, AVPacket *avpkt) 168{ 169 const AVPixFmtDescriptor *desc; 170 RawVideoContext *context = avctx->priv_data; 171 const uint8_t *buf = avpkt->data; 172 int buf_size = avpkt->size; 173 int linesize_align = 4; 174 int stride; 175 int res, len; 176 int need_copy; 177 178 if (avctx->width <= 0) { 179 av_log(avctx, AV_LOG_ERROR, "width is not set\n"); 180 return AVERROR_INVALIDDATA; 181 } 182 if (avctx->height <= 0) { 183 av_log(avctx, AV_LOG_ERROR, "height is not set\n"); 184 return AVERROR_INVALIDDATA; 185 } 186 187 if (context->is_nut_mono) 188 stride = avctx->width / 8 + (avctx->width & 7 ? 1 : 0); 189 else if (context->is_nut_pal8) 190 stride = avctx->width; 191 else 192 stride = avpkt->size / avctx->height; 193 194 av_log(avctx, AV_LOG_DEBUG, "PACKET SIZE: %d, STRIDE: %d\n", avpkt->size, stride); 195 196 if (stride == 0 || avpkt->size < stride * avctx->height) { 197 av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size); 198 return AVERROR_INVALIDDATA; 199 } 200 201 desc = av_pix_fmt_desc_get(avctx->pix_fmt); 202 203 if ((avctx->bits_per_coded_sample == 8 || avctx->bits_per_coded_sample == 4 || 204 avctx->bits_per_coded_sample == 2 || avctx->bits_per_coded_sample == 1 || 205 (avctx->bits_per_coded_sample == 0 && (context->is_nut_pal8 || context->is_mono)) ) && 206 (context->is_mono || context->is_pal8) && 207 (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' ') || 208 context->is_nut_mono || context->is_nut_pal8)) { 209 context->is_1_2_4_8_bpp = 1; 210 if (context->is_mono) { 211 int row_bytes = avctx->width / 8 + (avctx->width & 7 ? 1 : 0); 212 context->frame_size = av_image_get_buffer_size(avctx->pix_fmt, 213 FFALIGN(row_bytes, 16) * 8, 214 avctx->height, 1); 215 } else 216 context->frame_size = av_image_get_buffer_size(avctx->pix_fmt, 217 FFALIGN(avctx->width, 16), 218 avctx->height, 1); 219 } else { 220 context->is_lt_16bpp = av_get_bits_per_pixel(desc) == 16 && avctx->bits_per_coded_sample > 8 && avctx->bits_per_coded_sample < 16; 221 context->frame_size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width, 222 avctx->height, 1); 223 } 224 if (context->frame_size < 0) 225 return context->frame_size; 226 227 need_copy = !avpkt->buf || context->is_1_2_4_8_bpp || context->is_yuv2 || context->is_lt_16bpp; 228 229 frame->pict_type = AV_PICTURE_TYPE_I; 230 frame->key_frame = 1; 231 232 res = ff_decode_frame_props(avctx, frame); 233 if (res < 0) 234 return res; 235 236 frame->pkt_pos = avctx->internal->last_pkt_props->pos; 237 frame->pkt_duration = avctx->internal->last_pkt_props->duration; 238 239 if (context->tff >= 0) { 240 frame->interlaced_frame = 1; 241 frame->top_field_first = context->tff; 242 } 243 244 if ((res = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0) 245 return res; 246 247 if (need_copy) 248 frame->buf[0] = av_buffer_alloc(FFMAX(context->frame_size, buf_size)); 249 else 250 frame->buf[0] = av_buffer_ref(avpkt->buf); 251 if (!frame->buf[0]) 252 return AVERROR(ENOMEM); 253 254 // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut 255 if (context->is_1_2_4_8_bpp) { 256 int i, j, row_pix = 0; 257 uint8_t *dst = frame->buf[0]->data; 258 buf_size = context->frame_size - (context->is_pal8 ? AVPALETTE_SIZE : 0); 259 if (avctx->bits_per_coded_sample == 8 || context->is_nut_pal8 || context->is_mono) { 260 int pix_per_byte = context->is_mono ? 8 : 1; 261 for (i = 0, j = 0; j < buf_size && i<avpkt->size; i++, j++) { 262 dst[j] = buf[i]; 263 row_pix += pix_per_byte; 264 if (row_pix >= avctx->width) { 265 i += stride - (i % stride) - 1; 266 j += 16 - (j % 16) - 1; 267 row_pix = 0; 268 } 269 } 270 } else if (avctx->bits_per_coded_sample == 4) { 271 for (i = 0, j = 0; 2 * j + 1 < buf_size && i<avpkt->size; i++, j++) { 272 dst[2 * j + 0] = buf[i] >> 4; 273 dst[2 * j + 1] = buf[i] & 15; 274 row_pix += 2; 275 if (row_pix >= avctx->width) { 276 i += stride - (i % stride) - 1; 277 j += 8 - (j % 8) - 1; 278 row_pix = 0; 279 } 280 } 281 } else if (avctx->bits_per_coded_sample == 2) { 282 for (i = 0, j = 0; 4 * j + 3 < buf_size && i<avpkt->size; i++, j++) { 283 dst[4 * j + 0] = buf[i] >> 6; 284 dst[4 * j + 1] = buf[i] >> 4 & 3; 285 dst[4 * j + 2] = buf[i] >> 2 & 3; 286 dst[4 * j + 3] = buf[i] & 3; 287 row_pix += 4; 288 if (row_pix >= avctx->width) { 289 i += stride - (i % stride) - 1; 290 j += 4 - (j % 4) - 1; 291 row_pix = 0; 292 } 293 } 294 } else { 295 av_assert0(avctx->bits_per_coded_sample == 1); 296 for (i = 0, j = 0; 8 * j + 7 < buf_size && i<avpkt->size; i++, j++) { 297 dst[8 * j + 0] = buf[i] >> 7; 298 dst[8 * j + 1] = buf[i] >> 6 & 1; 299 dst[8 * j + 2] = buf[i] >> 5 & 1; 300 dst[8 * j + 3] = buf[i] >> 4 & 1; 301 dst[8 * j + 4] = buf[i] >> 3 & 1; 302 dst[8 * j + 5] = buf[i] >> 2 & 1; 303 dst[8 * j + 6] = buf[i] >> 1 & 1; 304 dst[8 * j + 7] = buf[i] & 1; 305 row_pix += 8; 306 if (row_pix >= avctx->width) { 307 i += stride - (i % stride) - 1; 308 j += 2 - (j % 2) - 1; 309 row_pix = 0; 310 } 311 } 312 } 313 linesize_align = 16; 314 buf = dst; 315 } else if (context->is_lt_16bpp) { 316 uint8_t *dst = frame->buf[0]->data; 317 int packed = (avctx->codec_tag & 0xFFFFFF) == MKTAG('B','I','T', 0); 318 int swap = avctx->codec_tag >> 24; 319 320 if (packed && swap) { 321 av_fast_padded_malloc(&context->bitstream_buf, &context->bitstream_buf_size, buf_size); 322 if (!context->bitstream_buf) 323 return AVERROR(ENOMEM); 324 if (swap == 16) 325 context->bbdsp.bswap16_buf(context->bitstream_buf, (const uint16_t*)buf, buf_size / 2); 326 else if (swap == 32) 327 context->bbdsp.bswap_buf(context->bitstream_buf, (const uint32_t*)buf, buf_size / 4); 328 else 329 return AVERROR_INVALIDDATA; 330 buf = context->bitstream_buf; 331 } 332 333 if (desc->flags & AV_PIX_FMT_FLAG_BE) 334 scale16be(avctx, dst, buf, buf_size, packed); 335 else 336 scale16le(avctx, dst, buf, buf_size, packed); 337 338 buf = dst; 339 } else if (need_copy) { 340 memcpy(frame->buf[0]->data, buf, buf_size); 341 buf = frame->buf[0]->data; 342 } 343 344 if (avctx->codec_tag == MKTAG('A', 'V', '1', 'x') || 345 avctx->codec_tag == MKTAG('A', 'V', 'u', 'p')) 346 buf += buf_size - context->frame_size; 347 348 len = context->frame_size - (avctx->pix_fmt==AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0); 349 if (buf_size < len && ((avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0) || !need_copy)) { 350 av_log(avctx, AV_LOG_ERROR, "Invalid buffer size, packet size %d < expected frame_size %d\n", buf_size, len); 351 av_buffer_unref(&frame->buf[0]); 352 return AVERROR(EINVAL); 353 } 354 355 if ((res = av_image_fill_arrays(frame->data, frame->linesize, 356 buf, avctx->pix_fmt, 357 avctx->width, avctx->height, 1)) < 0) { 358 av_buffer_unref(&frame->buf[0]); 359 return res; 360 } 361 362 if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { 363 int ret; 364 365 if (!context->palette) 366 context->palette = av_buffer_alloc(AVPALETTE_SIZE); 367 if (!context->palette) { 368 av_buffer_unref(&frame->buf[0]); 369 return AVERROR(ENOMEM); 370 } 371 ret = av_buffer_make_writable(&context->palette); 372 if (ret < 0) { 373 av_buffer_unref(&frame->buf[0]); 374 return ret; 375 } 376 377 if (ff_copy_palette(context->palette->data, avpkt, avctx)) { 378 frame->palette_has_changed = 1; 379 } else if (context->is_nut_pal8) { 380 int vid_size = avctx->width * avctx->height; 381 int pal_size = avpkt->size - vid_size; 382 383 if (avpkt->size > vid_size && pal_size <= AVPALETTE_SIZE) { 384 const uint8_t *pal = avpkt->data + vid_size; 385 memcpy(context->palette->data, pal, pal_size); 386 frame->palette_has_changed = 1; 387 } 388 } 389 } 390 391 if ((avctx->pix_fmt==AV_PIX_FMT_RGB24 || 392 avctx->pix_fmt==AV_PIX_FMT_BGR24 || 393 avctx->pix_fmt==AV_PIX_FMT_GRAY8 || 394 avctx->pix_fmt==AV_PIX_FMT_RGB555LE || 395 avctx->pix_fmt==AV_PIX_FMT_RGB555BE || 396 avctx->pix_fmt==AV_PIX_FMT_RGB565LE || 397 avctx->pix_fmt==AV_PIX_FMT_MONOWHITE || 398 avctx->pix_fmt==AV_PIX_FMT_MONOBLACK || 399 avctx->pix_fmt==AV_PIX_FMT_PAL8) && 400 FFALIGN(frame->linesize[0], linesize_align) * avctx->height <= buf_size) 401 frame->linesize[0] = FFALIGN(frame->linesize[0], linesize_align); 402 403 if (avctx->pix_fmt == AV_PIX_FMT_NV12 && avctx->codec_tag == MKTAG('N', 'V', '1', '2') && 404 FFALIGN(frame->linesize[0], linesize_align) * avctx->height + 405 FFALIGN(frame->linesize[1], linesize_align) * ((avctx->height + 1) / 2) <= buf_size) { 406 int la0 = FFALIGN(frame->linesize[0], linesize_align); 407 frame->data[1] += (la0 - frame->linesize[0]) * avctx->height; 408 frame->linesize[0] = la0; 409 frame->linesize[1] = FFALIGN(frame->linesize[1], linesize_align); 410 } 411 412 if (avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->frame_size) { 413 frame->buf[1] = av_buffer_ref(context->palette); 414 if (!frame->buf[1]) { 415 av_buffer_unref(&frame->buf[0]); 416 return AVERROR(ENOMEM); 417 } 418 frame->data[1] = frame->buf[1]->data; 419 } 420 421 if (avctx->pix_fmt == AV_PIX_FMT_BGR24 && 422 ((frame->linesize[0] + 3) & ~3) * avctx->height <= buf_size) 423 frame->linesize[0] = (frame->linesize[0] + 3) & ~3; 424 425 if (context->flip) 426 flip(avctx, frame); 427 428 if (avctx->codec_tag == MKTAG('Y', 'V', '1', '2') || 429 avctx->codec_tag == MKTAG('Y', 'V', '1', '6') || 430 avctx->codec_tag == MKTAG('Y', 'V', '2', '4') || 431 avctx->codec_tag == MKTAG('Y', 'V', 'U', '9')) 432 FFSWAP(uint8_t *, frame->data[1], frame->data[2]); 433 434 if (avctx->codec_tag == AV_RL32("I420") && (avctx->width+1)*(avctx->height+1) * 3/2 == buf_size) { 435 frame->data[1] = frame->data[1] + (avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height; 436 frame->data[2] = frame->data[2] + ((avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height)*5/4; 437 } 438 439 if (avctx->codec_tag == AV_RL32("yuv2") && 440 avctx->pix_fmt == AV_PIX_FMT_YUYV422) { 441 int x, y; 442 uint8_t *line = frame->data[0]; 443 for (y = 0; y < avctx->height; y++) { 444 for (x = 0; x < avctx->width; x++) 445 line[2 * x + 1] ^= 0x80; 446 line += frame->linesize[0]; 447 } 448 } 449 450 if (avctx->codec_tag == AV_RL32("b64a") && 451 avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) { 452 uint8_t *dst = frame->data[0]; 453 uint64_t v; 454 int x, y; 455 for (y = 0; y < avctx->height; y++) { 456 for (x = 0; x >> 3 < avctx->width; x += 8) { 457 v = AV_RB64(&dst[x]); 458 AV_WB64(&dst[x], v << 16 | v >> 48); 459 } 460 dst += frame->linesize[0]; 461 } 462 } 463 464 if (avctx->field_order > AV_FIELD_PROGRESSIVE) { /* we have interlaced material flagged in container */ 465 frame->interlaced_frame = 1; 466 if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB) 467 frame->top_field_first = 1; 468 } 469 470 *got_frame = 1; 471 return buf_size; 472} 473 474static av_cold int raw_close_decoder(AVCodecContext *avctx) 475{ 476 RawVideoContext *context = avctx->priv_data; 477 478 av_buffer_unref(&context->palette); 479 av_freep(&context->bitstream_buf); 480 return 0; 481} 482 483const FFCodec ff_rawvideo_decoder = { 484 .p.name = "rawvideo", 485 .p.long_name = NULL_IF_CONFIG_SMALL("raw video"), 486 .p.type = AVMEDIA_TYPE_VIDEO, 487 .p.id = AV_CODEC_ID_RAWVIDEO, 488 .priv_data_size = sizeof(RawVideoContext), 489 .init = raw_init_decoder, 490 .close = raw_close_decoder, 491 FF_CODEC_DECODE_CB(raw_decode), 492 .p.priv_class = &rawdec_class, 493 .p.capabilities = AV_CODEC_CAP_PARAM_CHANGE, 494 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, 495}; 496