1 /*
2 * Duck/ON2 TrueMotion 2 Decoder
3 * Copyright (c) 2005 Konstantin Shishkov
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /**
23 * @file
24 * Duck TrueMotion2 decoder.
25 */
26
27 #include <inttypes.h>
28
29 #include "avcodec.h"
30 #include "bswapdsp.h"
31 #include "bytestream.h"
32 #include "codec_internal.h"
33 #include "get_bits.h"
34 #include "internal.h"
35
36 #define TM2_ESCAPE 0x80000000
37 #define TM2_DELTAS 64
38
39 /* Huffman-coded streams of different types of blocks */
40 enum TM2_STREAMS {
41 TM2_C_HI = 0,
42 TM2_C_LO,
43 TM2_L_HI,
44 TM2_L_LO,
45 TM2_UPD,
46 TM2_MOT,
47 TM2_TYPE,
48 TM2_NUM_STREAMS
49 };
50
51 /* Block types */
52 enum TM2_BLOCKS {
53 TM2_HI_RES = 0,
54 TM2_MED_RES,
55 TM2_LOW_RES,
56 TM2_NULL_RES,
57 TM2_UPDATE,
58 TM2_STILL,
59 TM2_MOTION
60 };
61
62 typedef struct TM2Context {
63 AVCodecContext *avctx;
64 AVFrame *pic;
65
66 GetBitContext gb;
67 int error;
68 BswapDSPContext bdsp;
69
70 uint8_t *buffer;
71 int buffer_size;
72
73 /* TM2 streams */
74 int *tokens[TM2_NUM_STREAMS];
75 int tok_lens[TM2_NUM_STREAMS];
76 int tok_ptrs[TM2_NUM_STREAMS];
77 int deltas[TM2_NUM_STREAMS][TM2_DELTAS];
78 /* for blocks decoding */
79 int D[4];
80 int CD[4];
81 int *last;
82 int *clast;
83
84 /* data for current and previous frame */
85 int *Y_base, *UV_base;
86 int *Y1, *U1, *V1, *Y2, *U2, *V2;
87 int y_stride, uv_stride;
88 int cur;
89 } TM2Context;
90
91 /**
92 * Huffman codes for each of streams
93 */
94 typedef struct TM2Codes {
95 VLC vlc; ///< table for FFmpeg bitstream reader
96 int bits;
97 int *recode; ///< table for converting from code indexes to values
98 int length;
99 } TM2Codes;
100
101 /**
102 * structure for gathering Huffman codes information
103 */
104 typedef struct TM2Huff {
105 int val_bits; ///< length of literal
106 int max_bits; ///< maximum length of code
107 int min_bits; ///< minimum length of code
108 int nodes; ///< total number of nodes in tree
109 int num; ///< current number filled
110 int max_num; ///< total number of codes
111 int *nums; ///< literals
112 uint8_t *lens; ///< codelengths
113 } TM2Huff;
114
115 /**
116 *
117 * @returns the length of the longest code or an AVERROR code
118 */
tm2_read_tree(TM2Context *ctx, int length, TM2Huff *huff)119 static int tm2_read_tree(TM2Context *ctx, int length, TM2Huff *huff)
120 {
121 int ret, ret2;
122 if (length > huff->max_bits) {
123 av_log(ctx->avctx, AV_LOG_ERROR, "Tree exceeded its given depth (%i)\n",
124 huff->max_bits);
125 return AVERROR_INVALIDDATA;
126 }
127
128 if (!get_bits1(&ctx->gb)) { /* literal */
129 if (length == 0) {
130 length = 1;
131 }
132 if (huff->num >= huff->max_num) {
133 av_log(ctx->avctx, AV_LOG_DEBUG, "Too many literals\n");
134 return AVERROR_INVALIDDATA;
135 }
136 huff->nums[huff->num] = get_bits_long(&ctx->gb, huff->val_bits);
137 huff->lens[huff->num] = length;
138 huff->num++;
139 return length;
140 } else { /* non-terminal node */
141 if ((ret2 = tm2_read_tree(ctx, length + 1, huff)) < 0)
142 return ret2;
143 if ((ret = tm2_read_tree(ctx, length + 1, huff)) < 0)
144 return ret;
145 }
146 return FFMAX(ret, ret2);
147 }
148
tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)149 static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)
150 {
151 TM2Huff huff;
152 int res = 0;
153
154 huff.val_bits = get_bits(&ctx->gb, 5);
155 huff.max_bits = get_bits(&ctx->gb, 5);
156 huff.min_bits = get_bits(&ctx->gb, 5);
157 huff.nodes = get_bits(&ctx->gb, 17);
158 huff.num = 0;
159
160 /* check for correct codes parameters */
161 if ((huff.val_bits < 1) || (huff.val_bits > 32) ||
162 (huff.max_bits < 0) || (huff.max_bits > 25)) {
163 av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal "
164 "length: %i, max code length: %i\n", huff.val_bits, huff.max_bits);
165 return AVERROR_INVALIDDATA;
166 }
167 if ((huff.nodes <= 0) || (huff.nodes > 0x10000)) {
168 av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree "
169 "nodes: %i\n", huff.nodes);
170 return AVERROR_INVALIDDATA;
171 }
172 /* one-node tree */
173 if (huff.max_bits == 0)
174 huff.max_bits = 1;
175
176 /* allocate space for codes - it is exactly ceil(nodes / 2) entries */
177 huff.max_num = (huff.nodes + 1) >> 1;
178 huff.nums = av_calloc(huff.max_num, sizeof(int));
179 huff.lens = av_mallocz(huff.max_num);
180
181 if (!huff.nums || !huff.lens) {
182 res = AVERROR(ENOMEM);
183 goto out;
184 }
185
186 res = tm2_read_tree(ctx, 0, &huff);
187
188 if (res >= 0 && res != huff.max_bits) {
189 av_log(ctx->avctx, AV_LOG_ERROR, "Got less bits than expected: %i of %i\n",
190 res, huff.max_bits);
191 res = AVERROR_INVALIDDATA;
192 }
193 if (huff.num != huff.max_num) {
194 av_log(ctx->avctx, AV_LOG_ERROR, "Got less codes than expected: %i of %i\n",
195 huff.num, huff.max_num);
196 res = AVERROR_INVALIDDATA;
197 }
198
199 /* convert codes to vlc_table */
200 if (res >= 0) {
201 res = ff_init_vlc_from_lengths(&code->vlc, huff.max_bits, huff.max_num,
202 huff.lens, sizeof(huff.lens[0]),
203 NULL, 0, 0, 0, 0, ctx->avctx);
204 if (res < 0)
205 av_log(ctx->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
206 else {
207 code->bits = huff.max_bits;
208 code->length = huff.max_num;
209 code->recode = huff.nums;
210 huff.nums = NULL;
211 }
212 }
213
214 out:
215 /* free allocated memory */
216 av_free(huff.nums);
217 av_free(huff.lens);
218
219 return res;
220 }
221
tm2_free_codes(TM2Codes *code)222 static void tm2_free_codes(TM2Codes *code)
223 {
224 av_free(code->recode);
225 if (code->vlc.table)
226 ff_free_vlc(&code->vlc);
227 }
228
tm2_get_token(GetBitContext *gb, TM2Codes *code)229 static inline int tm2_get_token(GetBitContext *gb, TM2Codes *code)
230 {
231 int val;
232 val = get_vlc2(gb, code->vlc.table, code->bits, 1);
233 if(val<0)
234 return -1;
235 return code->recode[val];
236 }
237
238 #define TM2_OLD_HEADER_MAGIC 0x00000100
239 #define TM2_NEW_HEADER_MAGIC 0x00000101
240
tm2_read_header(TM2Context *ctx, const uint8_t *buf)241 static inline int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
242 {
243 uint32_t magic = AV_RL32(buf);
244
245 switch (magic) {
246 case TM2_OLD_HEADER_MAGIC:
247 avpriv_request_sample(ctx->avctx, "Old TM2 header");
248 return 0;
249 case TM2_NEW_HEADER_MAGIC:
250 return 0;
251 default:
252 av_log(ctx->avctx, AV_LOG_ERROR, "Not a TM2 header: 0x%08"PRIX32"\n",
253 magic);
254 return AVERROR_INVALIDDATA;
255 }
256 }
257
tm2_read_deltas(TM2Context *ctx, int stream_id)258 static int tm2_read_deltas(TM2Context *ctx, int stream_id)
259 {
260 int d, mb;
261 int i, v;
262
263 d = get_bits(&ctx->gb, 9);
264 mb = get_bits(&ctx->gb, 5);
265
266 av_assert2(mb < 32);
267 if ((d < 1) || (d > TM2_DELTAS) || (mb < 1)) {
268 av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect delta table: %i deltas x %i bits\n", d, mb);
269 return AVERROR_INVALIDDATA;
270 }
271
272 for (i = 0; i < d; i++) {
273 v = get_bits_long(&ctx->gb, mb);
274 if (v & (1 << (mb - 1)))
275 ctx->deltas[stream_id][i] = v - (1U << mb);
276 else
277 ctx->deltas[stream_id][i] = v;
278 }
279 for (; i < TM2_DELTAS; i++)
280 ctx->deltas[stream_id][i] = 0;
281
282 return 0;
283 }
284
tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)285 static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)
286 {
287 int i, ret;
288 int skip = 0;
289 int len, toks, pos;
290 TM2Codes codes;
291 GetByteContext gb;
292
293 if (buf_size < 4) {
294 av_log(ctx->avctx, AV_LOG_ERROR, "not enough space for len left\n");
295 return AVERROR_INVALIDDATA;
296 }
297
298 /* get stream length in dwords */
299 bytestream2_init(&gb, buf, buf_size);
300 len = bytestream2_get_be32(&gb);
301
302 if (len == 0)
303 return 4;
304
305 if (len >= INT_MAX / 4 - 1 || len < 0 || len * 4 + 4 > buf_size) {
306 av_log(ctx->avctx, AV_LOG_ERROR, "Error, invalid stream size.\n");
307 return AVERROR_INVALIDDATA;
308 }
309 skip = len * 4 + 4;
310
311 toks = bytestream2_get_be32(&gb);
312 if (toks & 1) {
313 len = bytestream2_get_be32(&gb);
314 if (len == TM2_ESCAPE) {
315 len = bytestream2_get_be32(&gb);
316 }
317 if (len > 0) {
318 pos = bytestream2_tell(&gb);
319 if (skip <= pos)
320 return AVERROR_INVALIDDATA;
321 init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
322 if ((ret = tm2_read_deltas(ctx, stream_id)) < 0)
323 return ret;
324 bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
325 }
326 }
327 /* skip unused fields */
328 len = bytestream2_get_be32(&gb);
329 if (len == TM2_ESCAPE) { /* some unknown length - could be escaped too */
330 bytestream2_skip(&gb, 8); /* unused by decoder */
331 } else {
332 bytestream2_skip(&gb, 4); /* unused by decoder */
333 }
334
335 pos = bytestream2_tell(&gb);
336 if (skip <= pos)
337 return AVERROR_INVALIDDATA;
338 init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
339 if ((ret = tm2_build_huff_table(ctx, &codes)) < 0)
340 return ret;
341 bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
342
343 toks >>= 1;
344 /* check if we have sane number of tokens */
345 if ((toks < 0) || (toks > 0xFFFFFF)) {
346 av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
347 ret = AVERROR_INVALIDDATA;
348 goto end;
349 }
350 ret = av_reallocp_array(&ctx->tokens[stream_id], toks, sizeof(int));
351 if (ret < 0) {
352 ctx->tok_lens[stream_id] = 0;
353 goto end;
354 }
355 ctx->tok_lens[stream_id] = toks;
356 len = bytestream2_get_be32(&gb);
357 if (len > 0) {
358 pos = bytestream2_tell(&gb);
359 if (skip <= pos) {
360 ret = AVERROR_INVALIDDATA;
361 goto end;
362 }
363 init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
364 for (i = 0; i < toks; i++) {
365 if (get_bits_left(&ctx->gb) <= 0) {
366 av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
367 ret = AVERROR_INVALIDDATA;
368 goto end;
369 }
370 ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes);
371 if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS || ctx->tokens[stream_id][i]<0) {
372 av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
373 ctx->tokens[stream_id][i], stream_id, i);
374 ret = AVERROR_INVALIDDATA;
375 goto end;
376 }
377 }
378 } else {
379 if (len < 0) {
380 ret = AVERROR_INVALIDDATA;
381 goto end;
382 }
383 for (i = 0; i < toks; i++) {
384 ctx->tokens[stream_id][i] = codes.recode[0];
385 if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) {
386 av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
387 ctx->tokens[stream_id][i], stream_id, i);
388 ret = AVERROR_INVALIDDATA;
389 goto end;
390 }
391 }
392 }
393
394 ret = skip;
395
396 end:
397 tm2_free_codes(&codes);
398 return ret;
399 }
400
GET_TOK(TM2Context *ctx,int type)401 static inline int GET_TOK(TM2Context *ctx,int type)
402 {
403 if (ctx->tok_ptrs[type] >= ctx->tok_lens[type]) {
404 av_log(ctx->avctx, AV_LOG_ERROR, "Read token from stream %i out of bounds (%i>=%i)\n", type, ctx->tok_ptrs[type], ctx->tok_lens[type]);
405 ctx->error = 1;
406 return 0;
407 }
408 if (type <= TM2_MOT) {
409 if (ctx->tokens[type][ctx->tok_ptrs[type]] >= TM2_DELTAS) {
410 av_log(ctx->avctx, AV_LOG_ERROR, "token %d is too large\n", ctx->tokens[type][ctx->tok_ptrs[type]]);
411 return 0;
412 }
413 return ctx->deltas[type][ctx->tokens[type][ctx->tok_ptrs[type]++]];
414 }
415 return ctx->tokens[type][ctx->tok_ptrs[type]++];
416 }
417
418 /* blocks decoding routines */
419
420 /* common Y, U, V pointers initialisation */
421 #define TM2_INIT_POINTERS() \
422 int *last, *clast; \
423 int *Y, *U, *V;\
424 int Ystride, Ustride, Vstride;\
425 \
426 Ystride = ctx->y_stride;\
427 Vstride = ctx->uv_stride;\
428 Ustride = ctx->uv_stride;\
429 Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\
430 V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\
431 U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\
432 last = ctx->last + bx * 4;\
433 clast = ctx->clast + bx * 4;
434
435 #define TM2_INIT_POINTERS_2() \
436 unsigned *Yo, *Uo, *Vo;\
437 int oYstride, oUstride, oVstride;\
438 \
439 TM2_INIT_POINTERS();\
440 oYstride = Ystride;\
441 oVstride = Vstride;\
442 oUstride = Ustride;\
443 Yo = (ctx->cur?ctx->Y1:ctx->Y2) + by * 4 * oYstride + bx * 4;\
444 Vo = (ctx->cur?ctx->V1:ctx->V2) + by * 2 * oVstride + bx * 2;\
445 Uo = (ctx->cur?ctx->U1:ctx->U2) + by * 2 * oUstride + bx * 2;
446
447 /* recalculate last and delta values for next blocks */
448 #define TM2_RECALC_BLOCK(CHR, stride, last, CD) {\
449 CD[0] = (unsigned)CHR[ 1] - (unsigned)last[1];\
450 CD[1] = (unsigned)CHR[stride + 1] - (unsigned) CHR[1];\
451 last[0] = (int)CHR[stride + 0];\
452 last[1] = (int)CHR[stride + 1];}
453
454 /* common operations - add deltas to 4x4 block of luma or 2x2 blocks of chroma */
tm2_apply_deltas(TM2Context *ctx, int* Y, int stride, int *deltas, int *last)455 static inline void tm2_apply_deltas(TM2Context *ctx, int* Y, int stride, int *deltas, int *last)
456 {
457 unsigned ct, d;
458 int i, j;
459
460 for (j = 0; j < 4; j++){
461 ct = ctx->D[j];
462 for (i = 0; i < 4; i++){
463 d = deltas[i + j * 4];
464 ct += d;
465 last[i] += ct;
466 Y[i] = av_clip_uint8(last[i]);
467 }
468 Y += stride;
469 ctx->D[j] = ct;
470 }
471 }
472
tm2_high_chroma(int *data, int stride, int *last, unsigned *CD, int *deltas)473 static inline void tm2_high_chroma(int *data, int stride, int *last, unsigned *CD, int *deltas)
474 {
475 int i, j;
476 for (j = 0; j < 2; j++) {
477 for (i = 0; i < 2; i++) {
478 CD[j] += deltas[i + j * 2];
479 last[i] += CD[j];
480 data[i] = last[i];
481 }
482 data += stride;
483 }
484 }
485
tm2_low_chroma(int *data, int stride, int *clast, unsigned *CD, int *deltas, int bx)486 static inline void tm2_low_chroma(int *data, int stride, int *clast, unsigned *CD, int *deltas, int bx)
487 {
488 int t;
489 int l;
490 int prev;
491
492 if (bx > 0)
493 prev = clast[-3];
494 else
495 prev = 0;
496 t = (int)(CD[0] + CD[1]) >> 1;
497 l = (int)(prev - CD[0] - CD[1] + clast[1]) >> 1;
498 CD[1] = CD[0] + CD[1] - t;
499 CD[0] = t;
500 clast[0] = l;
501
502 tm2_high_chroma(data, stride, clast, CD, deltas);
503 }
504
tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)505 static inline void tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
506 {
507 int i;
508 int deltas[16];
509 TM2_INIT_POINTERS();
510
511 /* hi-res chroma */
512 for (i = 0; i < 4; i++) {
513 deltas[i] = GET_TOK(ctx, TM2_C_HI);
514 deltas[i + 4] = GET_TOK(ctx, TM2_C_HI);
515 }
516 tm2_high_chroma(U, Ustride, clast, ctx->CD, deltas);
517 tm2_high_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas + 4);
518
519 /* hi-res luma */
520 for (i = 0; i < 16; i++)
521 deltas[i] = GET_TOK(ctx, TM2_L_HI);
522
523 tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
524 }
525
tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)526 static inline void tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
527 {
528 int i;
529 int deltas[16];
530 TM2_INIT_POINTERS();
531
532 /* low-res chroma */
533 deltas[0] = GET_TOK(ctx, TM2_C_LO);
534 deltas[1] = deltas[2] = deltas[3] = 0;
535 tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
536
537 deltas[0] = GET_TOK(ctx, TM2_C_LO);
538 deltas[1] = deltas[2] = deltas[3] = 0;
539 tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
540
541 /* hi-res luma */
542 for (i = 0; i < 16; i++)
543 deltas[i] = GET_TOK(ctx, TM2_L_HI);
544
545 tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
546 }
547
tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)548 static inline void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
549 {
550 int i;
551 int t1, t2;
552 int deltas[16];
553 TM2_INIT_POINTERS();
554
555 /* low-res chroma */
556 deltas[0] = GET_TOK(ctx, TM2_C_LO);
557 deltas[1] = deltas[2] = deltas[3] = 0;
558 tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
559
560 deltas[0] = GET_TOK(ctx, TM2_C_LO);
561 deltas[1] = deltas[2] = deltas[3] = 0;
562 tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
563
564 /* low-res luma */
565 for (i = 0; i < 16; i++)
566 deltas[i] = 0;
567
568 deltas[ 0] = GET_TOK(ctx, TM2_L_LO);
569 deltas[ 2] = GET_TOK(ctx, TM2_L_LO);
570 deltas[ 8] = GET_TOK(ctx, TM2_L_LO);
571 deltas[10] = GET_TOK(ctx, TM2_L_LO);
572
573 if (bx > 0)
574 last[0] = (int)((unsigned)last[-1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3] + last[1]) >> 1;
575 else
576 last[0] = (int)((unsigned)last[1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3])>> 1;
577 last[2] = (int)((unsigned)last[1] + last[3]) >> 1;
578
579 t1 = ctx->D[0] + (unsigned)ctx->D[1];
580 ctx->D[0] = t1 >> 1;
581 ctx->D[1] = t1 - (t1 >> 1);
582 t2 = ctx->D[2] + (unsigned)ctx->D[3];
583 ctx->D[2] = t2 >> 1;
584 ctx->D[3] = t2 - (t2 >> 1);
585
586 tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
587 }
588
tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)589 static inline void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
590 {
591 int i;
592 int ct;
593 unsigned left, right;
594 int diff;
595 int deltas[16];
596 TM2_INIT_POINTERS();
597
598 /* null chroma */
599 deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
600 tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
601
602 deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
603 tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
604
605 /* null luma */
606 for (i = 0; i < 16; i++)
607 deltas[i] = 0;
608
609 ct = (unsigned)ctx->D[0] + ctx->D[1] + ctx->D[2] + ctx->D[3];
610
611 if (bx > 0)
612 left = last[-1] - (unsigned)ct;
613 else
614 left = 0;
615
616 right = last[3];
617 diff = right - left;
618 last[0] = left + (diff >> 2);
619 last[1] = left + (diff >> 1);
620 last[2] = right - (diff >> 2);
621 last[3] = right;
622 {
623 unsigned tp = left;
624
625 ctx->D[0] = (tp + (ct >> 2)) - left;
626 left += ctx->D[0];
627 ctx->D[1] = (tp + (ct >> 1)) - left;
628 left += ctx->D[1];
629 ctx->D[2] = ((tp + ct) - (ct >> 2)) - left;
630 left += ctx->D[2];
631 ctx->D[3] = (tp + ct) - left;
632 }
633 tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
634 }
635
tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by)636 static inline void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
637 {
638 int i, j;
639 TM2_INIT_POINTERS_2();
640
641 /* update chroma */
642 for (j = 0; j < 2; j++) {
643 for (i = 0; i < 2; i++){
644 U[i] = Uo[i];
645 V[i] = Vo[i];
646 }
647 U += Ustride; V += Vstride;
648 Uo += oUstride; Vo += oVstride;
649 }
650 U -= Ustride * 2;
651 V -= Vstride * 2;
652 TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
653 TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
654
655 /* update deltas */
656 ctx->D[0] = Yo[3] - last[3];
657 ctx->D[1] = Yo[3 + oYstride] - Yo[3];
658 ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];
659 ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
660
661 for (j = 0; j < 4; j++) {
662 for (i = 0; i < 4; i++) {
663 Y[i] = Yo[i];
664 last[i] = Yo[i];
665 }
666 Y += Ystride;
667 Yo += oYstride;
668 }
669 }
670
tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by)671 static inline void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
672 {
673 int i, j;
674 unsigned d;
675 TM2_INIT_POINTERS_2();
676
677 /* update chroma */
678 for (j = 0; j < 2; j++) {
679 for (i = 0; i < 2; i++) {
680 U[i] = Uo[i] + GET_TOK(ctx, TM2_UPD);
681 V[i] = Vo[i] + GET_TOK(ctx, TM2_UPD);
682 }
683 U += Ustride;
684 V += Vstride;
685 Uo += oUstride;
686 Vo += oVstride;
687 }
688 U -= Ustride * 2;
689 V -= Vstride * 2;
690 TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
691 TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
692
693 /* update deltas */
694 ctx->D[0] = Yo[3] - last[3];
695 ctx->D[1] = Yo[3 + oYstride] - Yo[3];
696 ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];
697 ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
698
699 for (j = 0; j < 4; j++) {
700 d = last[3];
701 for (i = 0; i < 4; i++) {
702 Y[i] = Yo[i] + (unsigned)GET_TOK(ctx, TM2_UPD);
703 last[i] = Y[i];
704 }
705 ctx->D[j] = last[3] - d;
706 Y += Ystride;
707 Yo += oYstride;
708 }
709 }
710
tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by)711 static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
712 {
713 int i, j;
714 int mx, my;
715 TM2_INIT_POINTERS_2();
716
717 mx = GET_TOK(ctx, TM2_MOT);
718 my = GET_TOK(ctx, TM2_MOT);
719 mx = av_clip(mx, -(bx * 4 + 4), ctx->avctx->width - bx * 4);
720 my = av_clip(my, -(by * 4 + 4), ctx->avctx->height - by * 4);
721
722 if (4*bx+mx<0 || 4*by+my<0 || 4*bx+mx+4 > ctx->avctx->width || 4*by+my+4 > ctx->avctx->height) {
723 av_log(ctx->avctx, AV_LOG_ERROR, "MV out of picture\n");
724 return;
725 }
726
727 Yo += my * oYstride + mx;
728 Uo += (my >> 1) * oUstride + (mx >> 1);
729 Vo += (my >> 1) * oVstride + (mx >> 1);
730
731 /* copy chroma */
732 for (j = 0; j < 2; j++) {
733 for (i = 0; i < 2; i++) {
734 U[i] = Uo[i];
735 V[i] = Vo[i];
736 }
737 U += Ustride;
738 V += Vstride;
739 Uo += oUstride;
740 Vo += oVstride;
741 }
742 U -= Ustride * 2;
743 V -= Vstride * 2;
744 TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
745 TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
746
747 /* copy luma */
748 for (j = 0; j < 4; j++) {
749 for (i = 0; i < 4; i++) {
750 Y[i] = Yo[i];
751 }
752 Y += Ystride;
753 Yo += oYstride;
754 }
755 /* calculate deltas */
756 Y -= Ystride * 4;
757 ctx->D[0] = (unsigned)Y[3] - last[3];
758 ctx->D[1] = (unsigned)Y[3 + Ystride] - Y[3];
759 ctx->D[2] = (unsigned)Y[3 + Ystride * 2] - Y[3 + Ystride];
760 ctx->D[3] = (unsigned)Y[3 + Ystride * 3] - Y[3 + Ystride * 2];
761 for (i = 0; i < 4; i++)
762 last[i] = Y[i + Ystride * 3];
763 }
764
tm2_decode_blocks(TM2Context *ctx, AVFrame *p)765 static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p)
766 {
767 int i, j;
768 int w = ctx->avctx->width, h = ctx->avctx->height, bw = w >> 2, bh = h >> 2, cw = w >> 1;
769 int type;
770 int keyframe = 1;
771 int *Y, *U, *V;
772 uint8_t *dst;
773
774 for (i = 0; i < TM2_NUM_STREAMS; i++)
775 ctx->tok_ptrs[i] = 0;
776
777 if (ctx->tok_lens[TM2_TYPE]<bw*bh) {
778 av_log(ctx->avctx,AV_LOG_ERROR,"Got %i tokens for %i blocks\n",ctx->tok_lens[TM2_TYPE],bw*bh);
779 return AVERROR_INVALIDDATA;
780 }
781
782 memset(ctx->last, 0, 4 * bw * sizeof(int));
783 memset(ctx->clast, 0, 4 * bw * sizeof(int));
784
785 for (j = 0; j < bh; j++) {
786 memset(ctx->D, 0, 4 * sizeof(int));
787 memset(ctx->CD, 0, 4 * sizeof(int));
788 for (i = 0; i < bw; i++) {
789 type = GET_TOK(ctx, TM2_TYPE);
790 switch(type) {
791 case TM2_HI_RES:
792 tm2_hi_res_block(ctx, p, i, j);
793 break;
794 case TM2_MED_RES:
795 tm2_med_res_block(ctx, p, i, j);
796 break;
797 case TM2_LOW_RES:
798 tm2_low_res_block(ctx, p, i, j);
799 break;
800 case TM2_NULL_RES:
801 tm2_null_res_block(ctx, p, i, j);
802 break;
803 case TM2_UPDATE:
804 tm2_update_block(ctx, p, i, j);
805 keyframe = 0;
806 break;
807 case TM2_STILL:
808 tm2_still_block(ctx, p, i, j);
809 keyframe = 0;
810 break;
811 case TM2_MOTION:
812 tm2_motion_block(ctx, p, i, j);
813 keyframe = 0;
814 break;
815 default:
816 av_log(ctx->avctx, AV_LOG_ERROR, "Skipping unknown block type %i\n", type);
817 }
818 if (ctx->error)
819 return AVERROR_INVALIDDATA;
820 }
821 }
822
823 /* copy data from our buffer to AVFrame */
824 Y = (ctx->cur?ctx->Y2:ctx->Y1);
825 U = (ctx->cur?ctx->U2:ctx->U1);
826 V = (ctx->cur?ctx->V2:ctx->V1);
827 dst = p->data[0];
828 for (j = 0; j < h; j++) {
829 for (i = 0; i < w; i++) {
830 unsigned y = Y[i], u = U[i >> 1], v = V[i >> 1];
831 dst[3*i+0] = av_clip_uint8(y + v);
832 dst[3*i+1] = av_clip_uint8(y);
833 dst[3*i+2] = av_clip_uint8(y + u);
834 }
835
836 /* horizontal edge extension */
837 Y[-4] = Y[-3] = Y[-2] = Y[-1] = Y[0];
838 Y[w + 3] = Y[w + 2] = Y[w + 1] = Y[w] = Y[w - 1];
839
840 /* vertical edge extension */
841 if (j == 0) {
842 memcpy(Y - 4 - 1 * ctx->y_stride, Y - 4, ctx->y_stride);
843 memcpy(Y - 4 - 2 * ctx->y_stride, Y - 4, ctx->y_stride);
844 memcpy(Y - 4 - 3 * ctx->y_stride, Y - 4, ctx->y_stride);
845 memcpy(Y - 4 - 4 * ctx->y_stride, Y - 4, ctx->y_stride);
846 } else if (j == h - 1) {
847 memcpy(Y - 4 + 1 * ctx->y_stride, Y - 4, ctx->y_stride);
848 memcpy(Y - 4 + 2 * ctx->y_stride, Y - 4, ctx->y_stride);
849 memcpy(Y - 4 + 3 * ctx->y_stride, Y - 4, ctx->y_stride);
850 memcpy(Y - 4 + 4 * ctx->y_stride, Y - 4, ctx->y_stride);
851 }
852
853 Y += ctx->y_stride;
854 if (j & 1) {
855 /* horizontal edge extension */
856 U[-2] = U[-1] = U[0];
857 V[-2] = V[-1] = V[0];
858 U[cw + 1] = U[cw] = U[cw - 1];
859 V[cw + 1] = V[cw] = V[cw - 1];
860
861 /* vertical edge extension */
862 if (j == 1) {
863 memcpy(U - 2 - 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
864 memcpy(V - 2 - 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
865 memcpy(U - 2 - 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
866 memcpy(V - 2 - 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
867 } else if (j == h - 1) {
868 memcpy(U - 2 + 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
869 memcpy(V - 2 + 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
870 memcpy(U - 2 + 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
871 memcpy(V - 2 + 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
872 }
873
874 U += ctx->uv_stride;
875 V += ctx->uv_stride;
876 }
877 dst += p->linesize[0];
878 }
879
880 return keyframe;
881 }
882
883 static const int tm2_stream_order[TM2_NUM_STREAMS] = {
884 TM2_C_HI, TM2_C_LO, TM2_L_HI, TM2_L_LO, TM2_UPD, TM2_MOT, TM2_TYPE
885 };
886
887 #define TM2_HEADER_SIZE 40
888
decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)889 static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
890 int *got_frame, AVPacket *avpkt)
891 {
892 TM2Context * const l = avctx->priv_data;
893 const uint8_t *buf = avpkt->data;
894 int buf_size = avpkt->size & ~3;
895 AVFrame * const p = l->pic;
896 int offset = TM2_HEADER_SIZE;
897 int i, t, ret;
898
899 l->error = 0;
900
901 av_fast_padded_malloc(&l->buffer, &l->buffer_size, buf_size);
902 if (!l->buffer) {
903 av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
904 return AVERROR(ENOMEM);
905 }
906
907 if ((ret = ff_reget_buffer(avctx, p, 0)) < 0)
908 return ret;
909
910 l->bdsp.bswap_buf((uint32_t *) l->buffer, (const uint32_t *) buf,
911 buf_size >> 2);
912
913 if ((ret = tm2_read_header(l, l->buffer)) < 0) {
914 return ret;
915 }
916
917 for (i = 0; i < TM2_NUM_STREAMS; i++) {
918 if (offset >= buf_size) {
919 av_log(avctx, AV_LOG_ERROR, "no space for tm2_read_stream\n");
920 return AVERROR_INVALIDDATA;
921 }
922
923 t = tm2_read_stream(l, l->buffer + offset, tm2_stream_order[i],
924 buf_size - offset);
925 if (t < 0) {
926 int j = tm2_stream_order[i];
927 if (l->tok_lens[j])
928 memset(l->tokens[j], 0, sizeof(**l->tokens) * l->tok_lens[j]);
929 return t;
930 }
931 offset += t;
932 }
933 p->key_frame = tm2_decode_blocks(l, p);
934 if (p->key_frame)
935 p->pict_type = AV_PICTURE_TYPE_I;
936 else
937 p->pict_type = AV_PICTURE_TYPE_P;
938
939 l->cur = !l->cur;
940 *got_frame = 1;
941 ret = av_frame_ref(rframe, l->pic);
942
943 return (ret < 0) ? ret : buf_size;
944 }
945
decode_init(AVCodecContext *avctx)946 static av_cold int decode_init(AVCodecContext *avctx)
947 {
948 TM2Context * const l = avctx->priv_data;
949 int w = avctx->width, h = avctx->height;
950
951 if ((avctx->width & 3) || (avctx->height & 3)) {
952 av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n");
953 return AVERROR(EINVAL);
954 }
955
956 l->avctx = avctx;
957 avctx->pix_fmt = AV_PIX_FMT_BGR24;
958
959 l->pic = av_frame_alloc();
960 if (!l->pic)
961 return AVERROR(ENOMEM);
962
963 ff_bswapdsp_init(&l->bdsp);
964
965 l->last = av_malloc_array(w, 2 * sizeof(*l->last));
966 if (!l->last)
967 return AVERROR(ENOMEM);
968 l->clast = l->last + w;
969
970 w += 8;
971 h += 8;
972 l->Y_base = av_calloc(w * h, 2 * sizeof(*l->Y_base));
973 if (!l->Y_base)
974 return AVERROR(ENOMEM);
975 l->y_stride = w;
976 l->Y1 = l->Y_base + l->y_stride * 4 + 4;
977 l->Y2 = l->Y1 + w * h;
978 w = (w + 1) >> 1;
979 h = (h + 1) >> 1;
980 l->UV_base = av_calloc(w * h, 4 * sizeof(*l->UV_base));
981 if (!l->UV_base)
982 return AVERROR(ENOMEM);
983 l->uv_stride = w;
984 l->U1 = l->UV_base + l->uv_stride * 2 + 2;
985 l->U2 = l->U1 + w * h;
986 l->V1 = l->U2 + w * h;
987 l->V2 = l->V1 + w * h;
988
989 return 0;
990 }
991
decode_end(AVCodecContext *avctx)992 static av_cold int decode_end(AVCodecContext *avctx)
993 {
994 TM2Context * const l = avctx->priv_data;
995 int i;
996
997 av_freep(&l->last);
998 for (i = 0; i < TM2_NUM_STREAMS; i++)
999 av_freep(&l->tokens[i]);
1000
1001 av_freep(&l->Y_base);
1002 av_freep(&l->UV_base);
1003 av_freep(&l->buffer);
1004 l->buffer_size = 0;
1005
1006 av_frame_free(&l->pic);
1007
1008 return 0;
1009 }
1010
1011 const FFCodec ff_truemotion2_decoder = {
1012 .p.name = "truemotion2",
1013 .p.long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 2.0"),
1014 .p.type = AVMEDIA_TYPE_VIDEO,
1015 .p.id = AV_CODEC_ID_TRUEMOTION2,
1016 .priv_data_size = sizeof(TM2Context),
1017 .init = decode_init,
1018 .close = decode_end,
1019 FF_CODEC_DECODE_CB(decode_frame),
1020 .p.capabilities = AV_CODEC_CAP_DR1,
1021 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
1022 };
1023