1/* 2 * Ut Video decoder 3 * Copyright (c) 2011 Konstantin Shishkov 4 * 5 * This file is part of FFmpeg. 6 * 7 * FFmpeg is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * FFmpeg is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with FFmpeg; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 20 */ 21 22/** 23 * @file 24 * Ut Video decoder 25 */ 26 27#include <inttypes.h> 28#include <stdlib.h> 29 30#define CACHED_BITSTREAM_READER !ARCH_X86_32 31#define UNCHECKED_BITSTREAM_READER 1 32 33#include "libavutil/intreadwrite.h" 34#include "libavutil/pixdesc.h" 35#include "avcodec.h" 36#include "bswapdsp.h" 37#include "bytestream.h" 38#include "codec_internal.h" 39#include "get_bits.h" 40#include "thread.h" 41#include "utvideo.h" 42 43typedef struct HuffEntry { 44 uint8_t len; 45 uint16_t sym; 46} HuffEntry; 47 48static int build_huff(UtvideoContext *c, const uint8_t *src, VLC *vlc, 49 int *fsym, unsigned nb_elems) 50{ 51 int i; 52 HuffEntry he[1024]; 53 uint8_t bits[1024]; 54 uint16_t codes_count[33] = { 0 }; 55 56 *fsym = -1; 57 for (i = 0; i < nb_elems; i++) { 58 if (src[i] == 0) { 59 *fsym = i; 60 return 0; 61 } else if (src[i] == 255) { 62 bits[i] = 0; 63 } else if (src[i] <= 32) { 64 bits[i] = src[i]; 65 } else 66 return AVERROR_INVALIDDATA; 67 68 codes_count[bits[i]]++; 69 } 70 if (codes_count[0] == nb_elems) 71 return AVERROR_INVALIDDATA; 72 73 /* For Ut Video, longer codes are to the left of the tree and 74 * for codes with the same length the symbol is descending from 75 * left to right. So after the next loop --codes_count[i] will 76 * be the index of the first (lowest) symbol of length i when 77 * indexed by the position in the tree with left nodes being first. */ 78 for (int i = 31; i >= 0; i--) 79 codes_count[i] += codes_count[i + 1]; 80 81 for (unsigned i = 0; i < nb_elems; i++) 82 he[--codes_count[bits[i]]] = (HuffEntry) { bits[i], i }; 83 84#define VLC_BITS 11 85 return ff_init_vlc_from_lengths(vlc, VLC_BITS, codes_count[0], 86 &he[0].len, sizeof(*he), 87 &he[0].sym, sizeof(*he), 2, 0, 0, c->avctx); 88} 89 90static int decode_plane10(UtvideoContext *c, int plane_no, 91 uint16_t *dst, ptrdiff_t stride, 92 int width, int height, 93 const uint8_t *src, const uint8_t *huff, 94 int use_pred) 95{ 96 int i, j, slice, pix, ret; 97 int sstart, send; 98 VLC vlc; 99 GetBitContext gb; 100 int prev, fsym; 101 102 if ((ret = build_huff(c, huff, &vlc, &fsym, 1024)) < 0) { 103 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n"); 104 return ret; 105 } 106 if (fsym >= 0) { // build_huff reported a symbol to fill slices with 107 send = 0; 108 for (slice = 0; slice < c->slices; slice++) { 109 uint16_t *dest; 110 111 sstart = send; 112 send = (height * (slice + 1) / c->slices); 113 dest = dst + sstart * stride; 114 115 prev = 0x200; 116 for (j = sstart; j < send; j++) { 117 for (i = 0; i < width; i++) { 118 pix = fsym; 119 if (use_pred) { 120 prev += pix; 121 prev &= 0x3FF; 122 pix = prev; 123 } 124 dest[i] = pix; 125 } 126 dest += stride; 127 } 128 } 129 return 0; 130 } 131 132 send = 0; 133 for (slice = 0; slice < c->slices; slice++) { 134 uint16_t *dest; 135 int slice_data_start, slice_data_end, slice_size; 136 137 sstart = send; 138 send = (height * (slice + 1) / c->slices); 139 dest = dst + sstart * stride; 140 141 // slice offset and size validation was done earlier 142 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0; 143 slice_data_end = AV_RL32(src + slice * 4); 144 slice_size = slice_data_end - slice_data_start; 145 146 if (!slice_size) { 147 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol " 148 "yet a slice has a length of zero.\n"); 149 goto fail; 150 } 151 152 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); 153 c->bdsp.bswap_buf((uint32_t *) c->slice_bits, 154 (uint32_t *)(src + slice_data_start + c->slices * 4), 155 (slice_data_end - slice_data_start + 3) >> 2); 156 init_get_bits(&gb, c->slice_bits, slice_size * 8); 157 158 prev = 0x200; 159 for (j = sstart; j < send; j++) { 160 for (i = 0; i < width; i++) { 161 pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3); 162 if (pix < 0) { 163 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n"); 164 goto fail; 165 } 166 if (use_pred) { 167 prev += pix; 168 prev &= 0x3FF; 169 pix = prev; 170 } 171 dest[i] = pix; 172 } 173 dest += stride; 174 if (get_bits_left(&gb) < 0) { 175 av_log(c->avctx, AV_LOG_ERROR, 176 "Slice decoding ran out of bits\n"); 177 goto fail; 178 } 179 } 180 if (get_bits_left(&gb) > 32) 181 av_log(c->avctx, AV_LOG_WARNING, 182 "%d bits left after decoding slice\n", get_bits_left(&gb)); 183 } 184 185 ff_free_vlc(&vlc); 186 187 return 0; 188fail: 189 ff_free_vlc(&vlc); 190 return AVERROR_INVALIDDATA; 191} 192 193static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt) 194{ 195 const int is_luma = (pix_fmt == AV_PIX_FMT_YUV420P) && !plane_no; 196 197 if (interlaced) 198 return ~(1 + 2 * is_luma); 199 200 return ~is_luma; 201} 202 203static int decode_plane(UtvideoContext *c, int plane_no, 204 uint8_t *dst, ptrdiff_t stride, 205 int width, int height, 206 const uint8_t *src, int use_pred) 207{ 208 int i, j, slice, pix; 209 int sstart, send; 210 VLC vlc; 211 GetBitContext gb; 212 int ret, prev, fsym; 213 const int cmask = compute_cmask(plane_no, c->interlaced, c->avctx->pix_fmt); 214 215 if (c->pack) { 216 send = 0; 217 for (slice = 0; slice < c->slices; slice++) { 218 GetBitContext cbit, pbit; 219 uint8_t *dest, *p; 220 221 ret = init_get_bits8_le(&cbit, c->control_stream[plane_no][slice], c->control_stream_size[plane_no][slice]); 222 if (ret < 0) 223 return ret; 224 225 ret = init_get_bits8_le(&pbit, c->packed_stream[plane_no][slice], c->packed_stream_size[plane_no][slice]); 226 if (ret < 0) 227 return ret; 228 229 sstart = send; 230 send = (height * (slice + 1) / c->slices) & cmask; 231 dest = dst + sstart * stride; 232 233 if (3 * ((dst + send * stride - dest + 7)/8) > get_bits_left(&cbit)) 234 return AVERROR_INVALIDDATA; 235 236 for (p = dest; p < dst + send * stride; p += 8) { 237 int bits = get_bits_le(&cbit, 3); 238 239 if (bits == 0) { 240 *(uint64_t *) p = 0; 241 } else { 242 uint32_t sub = 0x80 >> (8 - (bits + 1)), add; 243 int k; 244 245 if ((bits + 1) * 8 > get_bits_left(&pbit)) 246 return AVERROR_INVALIDDATA; 247 248 for (k = 0; k < 8; k++) { 249 250 p[k] = get_bits_le(&pbit, bits + 1); 251 add = (~p[k] & sub) << (8 - bits); 252 p[k] -= sub; 253 p[k] += add; 254 } 255 } 256 } 257 } 258 259 return 0; 260 } 261 262 if (build_huff(c, src, &vlc, &fsym, 256)) { 263 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n"); 264 return AVERROR_INVALIDDATA; 265 } 266 if (fsym >= 0) { // build_huff reported a symbol to fill slices with 267 send = 0; 268 for (slice = 0; slice < c->slices; slice++) { 269 uint8_t *dest; 270 271 sstart = send; 272 send = (height * (slice + 1) / c->slices) & cmask; 273 dest = dst + sstart * stride; 274 275 prev = 0x80; 276 for (j = sstart; j < send; j++) { 277 for (i = 0; i < width; i++) { 278 pix = fsym; 279 if (use_pred) { 280 prev += (unsigned)pix; 281 pix = prev; 282 } 283 dest[i] = pix; 284 } 285 dest += stride; 286 } 287 } 288 return 0; 289 } 290 291 src += 256; 292 293 send = 0; 294 for (slice = 0; slice < c->slices; slice++) { 295 uint8_t *dest; 296 int slice_data_start, slice_data_end, slice_size; 297 298 sstart = send; 299 send = (height * (slice + 1) / c->slices) & cmask; 300 dest = dst + sstart * stride; 301 302 // slice offset and size validation was done earlier 303 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0; 304 slice_data_end = AV_RL32(src + slice * 4); 305 slice_size = slice_data_end - slice_data_start; 306 307 if (!slice_size) { 308 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol " 309 "yet a slice has a length of zero.\n"); 310 goto fail; 311 } 312 313 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); 314 c->bdsp.bswap_buf((uint32_t *) c->slice_bits, 315 (uint32_t *)(src + slice_data_start + c->slices * 4), 316 (slice_data_end - slice_data_start + 3) >> 2); 317 init_get_bits(&gb, c->slice_bits, slice_size * 8); 318 319 prev = 0x80; 320 for (j = sstart; j < send; j++) { 321 for (i = 0; i < width; i++) { 322 pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3); 323 if (pix < 0) { 324 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n"); 325 goto fail; 326 } 327 if (use_pred) { 328 prev += pix; 329 pix = prev; 330 } 331 dest[i] = pix; 332 } 333 if (get_bits_left(&gb) < 0) { 334 av_log(c->avctx, AV_LOG_ERROR, 335 "Slice decoding ran out of bits\n"); 336 goto fail; 337 } 338 dest += stride; 339 } 340 if (get_bits_left(&gb) > 32) 341 av_log(c->avctx, AV_LOG_WARNING, 342 "%d bits left after decoding slice\n", get_bits_left(&gb)); 343 } 344 345 ff_free_vlc(&vlc); 346 347 return 0; 348fail: 349 ff_free_vlc(&vlc); 350 return AVERROR_INVALIDDATA; 351} 352 353#undef A 354#undef B 355#undef C 356 357static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, 358 int width, int height, int slices, int rmode) 359{ 360 int i, j, slice; 361 int A, B, C; 362 uint8_t *bsrc; 363 int slice_start, slice_height; 364 const int cmask = ~rmode; 365 366 for (slice = 0; slice < slices; slice++) { 367 slice_start = ((slice * height) / slices) & cmask; 368 slice_height = ((((slice + 1) * height) / slices) & cmask) - 369 slice_start; 370 371 if (!slice_height) 372 continue; 373 bsrc = src + slice_start * stride; 374 375 // first line - left neighbour prediction 376 bsrc[0] += 0x80; 377 c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); 378 bsrc += stride; 379 if (slice_height <= 1) 380 continue; 381 // second line - first element has top prediction, the rest uses median 382 C = bsrc[-stride]; 383 bsrc[0] += C; 384 A = bsrc[0]; 385 for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */ 386 B = bsrc[i - stride]; 387 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); 388 C = B; 389 A = bsrc[i]; 390 } 391 if (width > 16) 392 c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride + 16, 393 bsrc + 16, width - 16, &A, &B); 394 395 bsrc += stride; 396 // the rest of lines use continuous median prediction 397 for (j = 2; j < slice_height; j++) { 398 c->llviddsp.add_median_pred(bsrc, bsrc - stride, 399 bsrc, width, &A, &B); 400 bsrc += stride; 401 } 402 } 403} 404 405/* UtVideo interlaced mode treats every two lines as a single one, 406 * so restoring function should take care of possible padding between 407 * two parts of the same "line". 408 */ 409static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, 410 int width, int height, int slices, int rmode) 411{ 412 int i, j, slice; 413 int A, B, C; 414 uint8_t *bsrc; 415 int slice_start, slice_height; 416 const int cmask = ~(rmode ? 3 : 1); 417 const ptrdiff_t stride2 = stride << 1; 418 419 for (slice = 0; slice < slices; slice++) { 420 slice_start = ((slice * height) / slices) & cmask; 421 slice_height = ((((slice + 1) * height) / slices) & cmask) - 422 slice_start; 423 slice_height >>= 1; 424 if (!slice_height) 425 continue; 426 427 bsrc = src + slice_start * stride; 428 429 // first line - left neighbour prediction 430 bsrc[0] += 0x80; 431 A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); 432 c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A); 433 bsrc += stride2; 434 if (slice_height <= 1) 435 continue; 436 // second line - first element has top prediction, the rest uses median 437 C = bsrc[-stride2]; 438 bsrc[0] += C; 439 A = bsrc[0]; 440 for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */ 441 B = bsrc[i - stride2]; 442 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); 443 C = B; 444 A = bsrc[i]; 445 } 446 if (width > 16) 447 c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride2 + 16, 448 bsrc + 16, width - 16, &A, &B); 449 450 c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride, 451 bsrc + stride, width, &A, &B); 452 bsrc += stride2; 453 // the rest of lines use continuous median prediction 454 for (j = 2; j < slice_height; j++) { 455 c->llviddsp.add_median_pred(bsrc, bsrc - stride2, 456 bsrc, width, &A, &B); 457 c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride, 458 bsrc + stride, width, &A, &B); 459 bsrc += stride2; 460 } 461 } 462} 463 464static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, 465 int width, int height, int slices, int rmode) 466{ 467 int i, j, slice; 468 int A, B, C; 469 uint8_t *bsrc; 470 int slice_start, slice_height; 471 const int cmask = ~rmode; 472 int min_width = FFMIN(width, 32); 473 474 for (slice = 0; slice < slices; slice++) { 475 slice_start = ((slice * height) / slices) & cmask; 476 slice_height = ((((slice + 1) * height) / slices) & cmask) - 477 slice_start; 478 479 if (!slice_height) 480 continue; 481 bsrc = src + slice_start * stride; 482 483 // first line - left neighbour prediction 484 bsrc[0] += 0x80; 485 c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); 486 bsrc += stride; 487 if (slice_height <= 1) 488 continue; 489 for (j = 1; j < slice_height; j++) { 490 // second line - first element has top prediction, the rest uses gradient 491 bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF; 492 for (i = 1; i < min_width; i++) { /* dsp need align 32 */ 493 A = bsrc[i - stride]; 494 B = bsrc[i - (stride + 1)]; 495 C = bsrc[i - 1]; 496 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF; 497 } 498 if (width > 32) 499 c->llviddsp.add_gradient_pred(bsrc + 32, stride, width - 32); 500 bsrc += stride; 501 } 502 } 503} 504 505static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, 506 int width, int height, int slices, int rmode) 507{ 508 int i, j, slice; 509 int A, B, C; 510 uint8_t *bsrc; 511 int slice_start, slice_height; 512 const int cmask = ~(rmode ? 3 : 1); 513 const ptrdiff_t stride2 = stride << 1; 514 int min_width = FFMIN(width, 32); 515 516 for (slice = 0; slice < slices; slice++) { 517 slice_start = ((slice * height) / slices) & cmask; 518 slice_height = ((((slice + 1) * height) / slices) & cmask) - 519 slice_start; 520 slice_height >>= 1; 521 if (!slice_height) 522 continue; 523 524 bsrc = src + slice_start * stride; 525 526 // first line - left neighbour prediction 527 bsrc[0] += 0x80; 528 A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); 529 c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A); 530 bsrc += stride2; 531 if (slice_height <= 1) 532 continue; 533 for (j = 1; j < slice_height; j++) { 534 // second line - first element has top prediction, the rest uses gradient 535 bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF; 536 for (i = 1; i < min_width; i++) { /* dsp need align 32 */ 537 A = bsrc[i - stride2]; 538 B = bsrc[i - (stride2 + 1)]; 539 C = bsrc[i - 1]; 540 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF; 541 } 542 if (width > 32) 543 c->llviddsp.add_gradient_pred(bsrc + 32, stride2, width - 32); 544 545 A = bsrc[-stride]; 546 B = bsrc[-(1 + stride + stride - width)]; 547 C = bsrc[width - 1]; 548 bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF; 549 for (i = 1; i < width; i++) { 550 A = bsrc[i - stride]; 551 B = bsrc[i - (1 + stride)]; 552 C = bsrc[i - 1 + stride]; 553 bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF; 554 } 555 bsrc += stride2; 556 } 557 } 558} 559 560static int decode_frame(AVCodecContext *avctx, AVFrame *frame, 561 int *got_frame, AVPacket *avpkt) 562{ 563 const uint8_t *buf = avpkt->data; 564 int buf_size = avpkt->size; 565 UtvideoContext *c = avctx->priv_data; 566 int i, j; 567 const uint8_t *plane_start[5]; 568 int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size; 569 int ret; 570 GetByteContext gb; 571 572 if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0) 573 return ret; 574 575 /* parse plane structure to get frame flags and validate slice offsets */ 576 bytestream2_init(&gb, buf, buf_size); 577 578 if (c->pack) { 579 const uint8_t *packed_stream; 580 const uint8_t *control_stream; 581 GetByteContext pb; 582 uint32_t nb_cbs; 583 int left; 584 585 c->frame_info = PRED_GRADIENT << 8; 586 587 if (bytestream2_get_byte(&gb) != 1) 588 return AVERROR_INVALIDDATA; 589 bytestream2_skip(&gb, 3); 590 c->offset = bytestream2_get_le32(&gb); 591 592 if (buf_size <= c->offset + 8LL) 593 return AVERROR_INVALIDDATA; 594 595 bytestream2_init(&pb, buf + 8 + c->offset, buf_size - 8 - c->offset); 596 597 nb_cbs = bytestream2_get_le32(&pb); 598 if (nb_cbs > c->offset) 599 return AVERROR_INVALIDDATA; 600 601 packed_stream = buf + 8; 602 control_stream = packed_stream + (c->offset - nb_cbs); 603 left = control_stream - packed_stream; 604 605 for (i = 0; i < c->planes; i++) { 606 for (j = 0; j < c->slices; j++) { 607 c->packed_stream[i][j] = packed_stream; 608 c->packed_stream_size[i][j] = bytestream2_get_le32(&pb); 609 if (c->packed_stream_size[i][j] > left) 610 return AVERROR_INVALIDDATA; 611 left -= c->packed_stream_size[i][j]; 612 packed_stream += c->packed_stream_size[i][j]; 613 } 614 } 615 616 left = buf + buf_size - control_stream; 617 618 for (i = 0; i < c->planes; i++) { 619 for (j = 0; j < c->slices; j++) { 620 c->control_stream[i][j] = control_stream; 621 c->control_stream_size[i][j] = bytestream2_get_le32(&pb); 622 if (c->control_stream_size[i][j] > left) 623 return AVERROR_INVALIDDATA; 624 left -= c->control_stream_size[i][j]; 625 control_stream += c->control_stream_size[i][j]; 626 } 627 } 628 } else if (c->pro) { 629 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) { 630 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n"); 631 return AVERROR_INVALIDDATA; 632 } 633 c->frame_info = bytestream2_get_le32u(&gb); 634 c->slices = ((c->frame_info >> 16) & 0xff) + 1; 635 for (i = 0; i < c->planes; i++) { 636 plane_start[i] = gb.buffer; 637 if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) { 638 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n"); 639 return AVERROR_INVALIDDATA; 640 } 641 slice_start = 0; 642 slice_end = 0; 643 for (j = 0; j < c->slices; j++) { 644 slice_end = bytestream2_get_le32u(&gb); 645 if (slice_end < 0 || slice_end < slice_start || 646 bytestream2_get_bytes_left(&gb) < slice_end + 1024LL) { 647 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n"); 648 return AVERROR_INVALIDDATA; 649 } 650 slice_size = slice_end - slice_start; 651 slice_start = slice_end; 652 max_slice_size = FFMAX(max_slice_size, slice_size); 653 } 654 plane_size = slice_end; 655 bytestream2_skipu(&gb, plane_size); 656 bytestream2_skipu(&gb, 1024); 657 } 658 plane_start[c->planes] = gb.buffer; 659 } else { 660 for (i = 0; i < c->planes; i++) { 661 plane_start[i] = gb.buffer; 662 if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) { 663 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n"); 664 return AVERROR_INVALIDDATA; 665 } 666 bytestream2_skipu(&gb, 256); 667 slice_start = 0; 668 slice_end = 0; 669 for (j = 0; j < c->slices; j++) { 670 slice_end = bytestream2_get_le32u(&gb); 671 if (slice_end < 0 || slice_end < slice_start || 672 bytestream2_get_bytes_left(&gb) < slice_end) { 673 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n"); 674 return AVERROR_INVALIDDATA; 675 } 676 slice_size = slice_end - slice_start; 677 slice_start = slice_end; 678 max_slice_size = FFMAX(max_slice_size, slice_size); 679 } 680 plane_size = slice_end; 681 bytestream2_skipu(&gb, plane_size); 682 } 683 plane_start[c->planes] = gb.buffer; 684 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) { 685 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n"); 686 return AVERROR_INVALIDDATA; 687 } 688 c->frame_info = bytestream2_get_le32u(&gb); 689 } 690 av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n", 691 c->frame_info); 692 693 c->frame_pred = (c->frame_info >> 8) & 3; 694 695 max_slice_size += 4*avctx->width; 696 697 if (!c->pack) { 698 av_fast_malloc(&c->slice_bits, &c->slice_bits_size, 699 max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE); 700 701 if (!c->slice_bits) { 702 av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n"); 703 return AVERROR(ENOMEM); 704 } 705 } 706 707 switch (c->avctx->pix_fmt) { 708 case AV_PIX_FMT_GBRP: 709 case AV_PIX_FMT_GBRAP: 710 for (i = 0; i < c->planes; i++) { 711 ret = decode_plane(c, i, frame->data[i], 712 frame->linesize[i], avctx->width, 713 avctx->height, plane_start[i], 714 c->frame_pred == PRED_LEFT); 715 if (ret) 716 return ret; 717 if (c->frame_pred == PRED_MEDIAN) { 718 if (!c->interlaced) { 719 restore_median_planar(c, frame->data[i], 720 frame->linesize[i], avctx->width, 721 avctx->height, c->slices, 0); 722 } else { 723 restore_median_planar_il(c, frame->data[i], 724 frame->linesize[i], 725 avctx->width, avctx->height, c->slices, 726 0); 727 } 728 } else if (c->frame_pred == PRED_GRADIENT) { 729 if (!c->interlaced) { 730 restore_gradient_planar(c, frame->data[i], 731 frame->linesize[i], avctx->width, 732 avctx->height, c->slices, 0); 733 } else { 734 restore_gradient_planar_il(c, frame->data[i], 735 frame->linesize[i], 736 avctx->width, avctx->height, c->slices, 737 0); 738 } 739 } 740 } 741 c->utdsp.restore_rgb_planes(frame->data[2], frame->data[0], frame->data[1], 742 frame->linesize[2], frame->linesize[0], frame->linesize[1], 743 avctx->width, avctx->height); 744 break; 745 case AV_PIX_FMT_GBRAP10: 746 case AV_PIX_FMT_GBRP10: 747 for (i = 0; i < c->planes; i++) { 748 ret = decode_plane10(c, i, (uint16_t *)frame->data[i], 749 frame->linesize[i] / 2, avctx->width, 750 avctx->height, plane_start[i], 751 plane_start[i + 1] - 1024, 752 c->frame_pred == PRED_LEFT); 753 if (ret) 754 return ret; 755 } 756 c->utdsp.restore_rgb_planes10((uint16_t *)frame->data[2], (uint16_t *)frame->data[0], (uint16_t *)frame->data[1], 757 frame->linesize[2] / 2, frame->linesize[0] / 2, frame->linesize[1] / 2, 758 avctx->width, avctx->height); 759 break; 760 case AV_PIX_FMT_YUV420P: 761 for (i = 0; i < 3; i++) { 762 ret = decode_plane(c, i, frame->data[i], frame->linesize[i], 763 avctx->width >> !!i, avctx->height >> !!i, 764 plane_start[i], c->frame_pred == PRED_LEFT); 765 if (ret) 766 return ret; 767 if (c->frame_pred == PRED_MEDIAN) { 768 if (!c->interlaced) { 769 restore_median_planar(c, frame->data[i], frame->linesize[i], 770 avctx->width >> !!i, avctx->height >> !!i, 771 c->slices, !i); 772 } else { 773 restore_median_planar_il(c, frame->data[i], frame->linesize[i], 774 avctx->width >> !!i, 775 avctx->height >> !!i, 776 c->slices, !i); 777 } 778 } else if (c->frame_pred == PRED_GRADIENT) { 779 if (!c->interlaced) { 780 restore_gradient_planar(c, frame->data[i], frame->linesize[i], 781 avctx->width >> !!i, avctx->height >> !!i, 782 c->slices, !i); 783 } else { 784 restore_gradient_planar_il(c, frame->data[i], frame->linesize[i], 785 avctx->width >> !!i, 786 avctx->height >> !!i, 787 c->slices, !i); 788 } 789 } 790 } 791 break; 792 case AV_PIX_FMT_YUV422P: 793 for (i = 0; i < 3; i++) { 794 ret = decode_plane(c, i, frame->data[i], frame->linesize[i], 795 avctx->width >> !!i, avctx->height, 796 plane_start[i], c->frame_pred == PRED_LEFT); 797 if (ret) 798 return ret; 799 if (c->frame_pred == PRED_MEDIAN) { 800 if (!c->interlaced) { 801 restore_median_planar(c, frame->data[i], frame->linesize[i], 802 avctx->width >> !!i, avctx->height, 803 c->slices, 0); 804 } else { 805 restore_median_planar_il(c, frame->data[i], frame->linesize[i], 806 avctx->width >> !!i, avctx->height, 807 c->slices, 0); 808 } 809 } else if (c->frame_pred == PRED_GRADIENT) { 810 if (!c->interlaced) { 811 restore_gradient_planar(c, frame->data[i], frame->linesize[i], 812 avctx->width >> !!i, avctx->height, 813 c->slices, 0); 814 } else { 815 restore_gradient_planar_il(c, frame->data[i], frame->linesize[i], 816 avctx->width >> !!i, avctx->height, 817 c->slices, 0); 818 } 819 } 820 } 821 break; 822 case AV_PIX_FMT_YUV444P: 823 for (i = 0; i < 3; i++) { 824 ret = decode_plane(c, i, frame->data[i], frame->linesize[i], 825 avctx->width, avctx->height, 826 plane_start[i], c->frame_pred == PRED_LEFT); 827 if (ret) 828 return ret; 829 if (c->frame_pred == PRED_MEDIAN) { 830 if (!c->interlaced) { 831 restore_median_planar(c, frame->data[i], frame->linesize[i], 832 avctx->width, avctx->height, 833 c->slices, 0); 834 } else { 835 restore_median_planar_il(c, frame->data[i], frame->linesize[i], 836 avctx->width, avctx->height, 837 c->slices, 0); 838 } 839 } else if (c->frame_pred == PRED_GRADIENT) { 840 if (!c->interlaced) { 841 restore_gradient_planar(c, frame->data[i], frame->linesize[i], 842 avctx->width, avctx->height, 843 c->slices, 0); 844 } else { 845 restore_gradient_planar_il(c, frame->data[i], frame->linesize[i], 846 avctx->width, avctx->height, 847 c->slices, 0); 848 } 849 } 850 } 851 break; 852 case AV_PIX_FMT_YUV420P10: 853 for (i = 0; i < 3; i++) { 854 ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2, 855 avctx->width >> !!i, avctx->height >> !!i, 856 plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT); 857 if (ret) 858 return ret; 859 } 860 break; 861 case AV_PIX_FMT_YUV422P10: 862 for (i = 0; i < 3; i++) { 863 ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2, 864 avctx->width >> !!i, avctx->height, 865 plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT); 866 if (ret) 867 return ret; 868 } 869 break; 870 } 871 872 frame->key_frame = 1; 873 frame->pict_type = AV_PICTURE_TYPE_I; 874 frame->interlaced_frame = !!c->interlaced; 875 876 *got_frame = 1; 877 878 /* always report that the buffer was completely consumed */ 879 return buf_size; 880} 881 882static av_cold int decode_init(AVCodecContext *avctx) 883{ 884 UtvideoContext * const c = avctx->priv_data; 885 int h_shift, v_shift; 886 887 c->avctx = avctx; 888 889 ff_utvideodsp_init(&c->utdsp); 890 ff_bswapdsp_init(&c->bdsp); 891 ff_llviddsp_init(&c->llviddsp); 892 893 c->slice_bits_size = 0; 894 895 switch (avctx->codec_tag) { 896 case MKTAG('U', 'L', 'R', 'G'): 897 c->planes = 3; 898 avctx->pix_fmt = AV_PIX_FMT_GBRP; 899 break; 900 case MKTAG('U', 'L', 'R', 'A'): 901 c->planes = 4; 902 avctx->pix_fmt = AV_PIX_FMT_GBRAP; 903 break; 904 case MKTAG('U', 'L', 'Y', '0'): 905 c->planes = 3; 906 avctx->pix_fmt = AV_PIX_FMT_YUV420P; 907 avctx->colorspace = AVCOL_SPC_BT470BG; 908 break; 909 case MKTAG('U', 'L', 'Y', '2'): 910 c->planes = 3; 911 avctx->pix_fmt = AV_PIX_FMT_YUV422P; 912 avctx->colorspace = AVCOL_SPC_BT470BG; 913 break; 914 case MKTAG('U', 'L', 'Y', '4'): 915 c->planes = 3; 916 avctx->pix_fmt = AV_PIX_FMT_YUV444P; 917 avctx->colorspace = AVCOL_SPC_BT470BG; 918 break; 919 case MKTAG('U', 'Q', 'Y', '0'): 920 c->planes = 3; 921 c->pro = 1; 922 avctx->pix_fmt = AV_PIX_FMT_YUV420P10; 923 break; 924 case MKTAG('U', 'Q', 'Y', '2'): 925 c->planes = 3; 926 c->pro = 1; 927 avctx->pix_fmt = AV_PIX_FMT_YUV422P10; 928 break; 929 case MKTAG('U', 'Q', 'R', 'G'): 930 c->planes = 3; 931 c->pro = 1; 932 avctx->pix_fmt = AV_PIX_FMT_GBRP10; 933 break; 934 case MKTAG('U', 'Q', 'R', 'A'): 935 c->planes = 4; 936 c->pro = 1; 937 avctx->pix_fmt = AV_PIX_FMT_GBRAP10; 938 break; 939 case MKTAG('U', 'L', 'H', '0'): 940 c->planes = 3; 941 avctx->pix_fmt = AV_PIX_FMT_YUV420P; 942 avctx->colorspace = AVCOL_SPC_BT709; 943 break; 944 case MKTAG('U', 'L', 'H', '2'): 945 c->planes = 3; 946 avctx->pix_fmt = AV_PIX_FMT_YUV422P; 947 avctx->colorspace = AVCOL_SPC_BT709; 948 break; 949 case MKTAG('U', 'L', 'H', '4'): 950 c->planes = 3; 951 avctx->pix_fmt = AV_PIX_FMT_YUV444P; 952 avctx->colorspace = AVCOL_SPC_BT709; 953 break; 954 case MKTAG('U', 'M', 'Y', '2'): 955 c->planes = 3; 956 c->pack = 1; 957 avctx->pix_fmt = AV_PIX_FMT_YUV422P; 958 avctx->colorspace = AVCOL_SPC_BT470BG; 959 break; 960 case MKTAG('U', 'M', 'H', '2'): 961 c->planes = 3; 962 c->pack = 1; 963 avctx->pix_fmt = AV_PIX_FMT_YUV422P; 964 avctx->colorspace = AVCOL_SPC_BT709; 965 break; 966 case MKTAG('U', 'M', 'Y', '4'): 967 c->planes = 3; 968 c->pack = 1; 969 avctx->pix_fmt = AV_PIX_FMT_YUV444P; 970 avctx->colorspace = AVCOL_SPC_BT470BG; 971 break; 972 case MKTAG('U', 'M', 'H', '4'): 973 c->planes = 3; 974 c->pack = 1; 975 avctx->pix_fmt = AV_PIX_FMT_YUV444P; 976 avctx->colorspace = AVCOL_SPC_BT709; 977 break; 978 case MKTAG('U', 'M', 'R', 'G'): 979 c->planes = 3; 980 c->pack = 1; 981 avctx->pix_fmt = AV_PIX_FMT_GBRP; 982 break; 983 case MKTAG('U', 'M', 'R', 'A'): 984 c->planes = 4; 985 c->pack = 1; 986 avctx->pix_fmt = AV_PIX_FMT_GBRAP; 987 break; 988 default: 989 av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n", 990 avctx->codec_tag); 991 return AVERROR_INVALIDDATA; 992 } 993 994 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &h_shift, &v_shift); 995 if ((avctx->width & ((1<<h_shift)-1)) || 996 (avctx->height & ((1<<v_shift)-1))) { 997 avpriv_request_sample(avctx, "Odd dimensions"); 998 return AVERROR_PATCHWELCOME; 999 } 1000 1001 if (c->pack && avctx->extradata_size >= 16) { 1002 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n", 1003 avctx->extradata[3], avctx->extradata[2], 1004 avctx->extradata[1], avctx->extradata[0]); 1005 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n", 1006 AV_RB32(avctx->extradata + 4)); 1007 c->compression = avctx->extradata[8]; 1008 if (c->compression != 2) 1009 avpriv_request_sample(avctx, "Unknown compression type"); 1010 c->slices = avctx->extradata[9] + 1; 1011 } else if (!c->pro && avctx->extradata_size >= 16) { 1012 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n", 1013 avctx->extradata[3], avctx->extradata[2], 1014 avctx->extradata[1], avctx->extradata[0]); 1015 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n", 1016 AV_RB32(avctx->extradata + 4)); 1017 c->frame_info_size = AV_RL32(avctx->extradata + 8); 1018 c->flags = AV_RL32(avctx->extradata + 12); 1019 1020 if (c->frame_info_size != 4) 1021 avpriv_request_sample(avctx, "Frame info not 4 bytes"); 1022 av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags); 1023 c->slices = (c->flags >> 24) + 1; 1024 c->compression = c->flags & 1; 1025 c->interlaced = c->flags & 0x800; 1026 } else if (c->pro && avctx->extradata_size == 8) { 1027 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n", 1028 avctx->extradata[3], avctx->extradata[2], 1029 avctx->extradata[1], avctx->extradata[0]); 1030 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n", 1031 AV_RB32(avctx->extradata + 4)); 1032 c->interlaced = 0; 1033 c->frame_info_size = 4; 1034 } else { 1035 av_log(avctx, AV_LOG_ERROR, 1036 "Insufficient extradata size %d, should be at least 16\n", 1037 avctx->extradata_size); 1038 return AVERROR_INVALIDDATA; 1039 } 1040 1041 return 0; 1042} 1043 1044static av_cold int decode_end(AVCodecContext *avctx) 1045{ 1046 UtvideoContext * const c = avctx->priv_data; 1047 1048 av_freep(&c->slice_bits); 1049 1050 return 0; 1051} 1052 1053const FFCodec ff_utvideo_decoder = { 1054 .p.name = "utvideo", 1055 .p.long_name = NULL_IF_CONFIG_SMALL("Ut Video"), 1056 .p.type = AVMEDIA_TYPE_VIDEO, 1057 .p.id = AV_CODEC_ID_UTVIDEO, 1058 .priv_data_size = sizeof(UtvideoContext), 1059 .init = decode_init, 1060 .close = decode_end, 1061 FF_CODEC_DECODE_CB(decode_frame), 1062 .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS, 1063 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, 1064}; 1065