1/* 2 * RV40 decoder 3 * Copyright (c) 2007 Konstantin Shishkov 4 * 5 * This file is part of FFmpeg. 6 * 7 * FFmpeg is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * FFmpeg is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with FFmpeg; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 20 */ 21 22/** 23 * @file 24 * RV40 decoder 25 */ 26 27#include "config.h" 28 29#include "libavutil/imgutils.h" 30#include "libavutil/thread.h" 31 32#include "avcodec.h" 33#include "codec_internal.h" 34#include "mpegutils.h" 35#include "mpegvideo.h" 36#include "mpegvideodec.h" 37#include "golomb.h" 38 39#include "rv34.h" 40#include "rv40vlc2.h" 41#include "rv40data.h" 42 43static VLC aic_top_vlc; 44static VLC aic_mode1_vlc[AIC_MODE1_NUM], aic_mode2_vlc[AIC_MODE2_NUM]; 45static VLC ptype_vlc[NUM_PTYPE_VLCS], btype_vlc[NUM_BTYPE_VLCS]; 46 47static av_cold void rv40_init_table(VLC *vlc, unsigned *offset, int nb_bits, 48 int nb_codes, const uint8_t (*tab)[2]) 49{ 50 static VLCElem vlc_buf[11776]; 51 52 vlc->table = &vlc_buf[*offset]; 53 vlc->table_allocated = 1 << nb_bits; 54 *offset += 1 << nb_bits; 55 56 ff_init_vlc_from_lengths(vlc, nb_bits, nb_codes, 57 &tab[0][1], 2, &tab[0][0], 2, 1, 58 0, INIT_VLC_USE_NEW_STATIC, NULL); 59} 60 61/** 62 * Initialize all tables. 63 */ 64static av_cold void rv40_init_tables(void) 65{ 66 int i, offset = 0; 67 static VLCElem aic_mode2_table[11814]; 68 69 rv40_init_table(&aic_top_vlc, &offset, AIC_TOP_BITS, AIC_TOP_SIZE, 70 rv40_aic_top_vlc_tab); 71 for(i = 0; i < AIC_MODE1_NUM; i++){ 72 // Every tenth VLC table is empty 73 if((i % 10) == 9) continue; 74 rv40_init_table(&aic_mode1_vlc[i], &offset, AIC_MODE1_BITS, 75 AIC_MODE1_SIZE, aic_mode1_vlc_tabs[i]); 76 } 77 for (unsigned i = 0, offset = 0; i < AIC_MODE2_NUM; i++){ 78 uint16_t syms[AIC_MODE2_SIZE]; 79 80 for (int j = 0; j < AIC_MODE2_SIZE; j++) { 81 int first = aic_mode2_vlc_syms[i][j] >> 4; 82 int second = aic_mode2_vlc_syms[i][j] & 0xF; 83 if (HAVE_BIGENDIAN) 84 syms[j] = (first << 8) | second; 85 else 86 syms[j] = first | (second << 8); 87 } 88 aic_mode2_vlc[i].table = &aic_mode2_table[offset]; 89 aic_mode2_vlc[i].table_allocated = FF_ARRAY_ELEMS(aic_mode2_table) - offset; 90 ff_init_vlc_from_lengths(&aic_mode2_vlc[i], AIC_MODE2_BITS, AIC_MODE2_SIZE, 91 aic_mode2_vlc_bits[i], 1, 92 syms, 2, 2, 0, INIT_VLC_STATIC_OVERLONG, NULL); 93 offset += aic_mode2_vlc[i].table_size; 94 } 95 for(i = 0; i < NUM_PTYPE_VLCS; i++){ 96 rv40_init_table(&ptype_vlc[i], &offset, PTYPE_VLC_BITS, PTYPE_VLC_SIZE, 97 ptype_vlc_tabs[i]); 98 } 99 for(i = 0; i < NUM_BTYPE_VLCS; i++){ 100 rv40_init_table(&btype_vlc[i], &offset, BTYPE_VLC_BITS, BTYPE_VLC_SIZE, 101 btype_vlc_tabs[i]); 102 } 103} 104 105/** 106 * Get stored dimension from bitstream. 107 * 108 * If the width/height is the standard one then it's coded as a 3-bit index. 109 * Otherwise it is coded as escaped 8-bit portions. 110 */ 111static int get_dimension(GetBitContext *gb, const int *dim) 112{ 113 int t = get_bits(gb, 3); 114 int val = dim[t]; 115 if(val < 0) 116 val = dim[get_bits1(gb) - val]; 117 if(!val){ 118 do{ 119 if (get_bits_left(gb) < 8) 120 return AVERROR_INVALIDDATA; 121 t = get_bits(gb, 8); 122 val += t << 2; 123 }while(t == 0xFF); 124 } 125 return val; 126} 127 128/** 129 * Get encoded picture size - usually this is called from rv40_parse_slice_header. 130 */ 131static void rv40_parse_picture_size(GetBitContext *gb, int *w, int *h) 132{ 133 *w = get_dimension(gb, rv40_standard_widths); 134 *h = get_dimension(gb, rv40_standard_heights); 135} 136 137static int rv40_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceInfo *si) 138{ 139 int mb_bits; 140 int w = r->s.width, h = r->s.height; 141 int mb_size; 142 int ret; 143 144 memset(si, 0, sizeof(SliceInfo)); 145 if(get_bits1(gb)) 146 return AVERROR_INVALIDDATA; 147 si->type = get_bits(gb, 2); 148 if(si->type == 1) si->type = 0; 149 si->quant = get_bits(gb, 5); 150 if(get_bits(gb, 2)) 151 return AVERROR_INVALIDDATA; 152 si->vlc_set = get_bits(gb, 2); 153 skip_bits1(gb); 154 si->pts = get_bits(gb, 13); 155 if(!si->type || !get_bits1(gb)) 156 rv40_parse_picture_size(gb, &w, &h); 157 if ((ret = av_image_check_size(w, h, 0, r->s.avctx)) < 0) 158 return ret; 159 si->width = w; 160 si->height = h; 161 mb_size = ((w + 15) >> 4) * ((h + 15) >> 4); 162 mb_bits = ff_rv34_get_start_offset(gb, mb_size); 163 si->start = get_bits(gb, mb_bits); 164 165 return 0; 166} 167 168/** 169 * Decode 4x4 intra types array. 170 */ 171static int rv40_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst) 172{ 173 MpegEncContext *s = &r->s; 174 int i, j, k, v; 175 int A, B, C; 176 int pattern; 177 int8_t *ptr; 178 179 for(i = 0; i < 4; i++, dst += r->intra_types_stride){ 180 if(!i && s->first_slice_line){ 181 pattern = get_vlc2(gb, aic_top_vlc.table, AIC_TOP_BITS, 1); 182 dst[0] = (pattern >> 2) & 2; 183 dst[1] = (pattern >> 1) & 2; 184 dst[2] = pattern & 2; 185 dst[3] = (pattern << 1) & 2; 186 continue; 187 } 188 ptr = dst; 189 for(j = 0; j < 4; j++){ 190 /* Coefficients are read using VLC chosen by the prediction pattern 191 * The first one (used for retrieving a pair of coefficients) is 192 * constructed from the top, top right and left coefficients 193 * The second one (used for retrieving only one coefficient) is 194 * top + 10 * left. 195 */ 196 A = ptr[-r->intra_types_stride + 1]; // it won't be used for the last coefficient in a row 197 B = ptr[-r->intra_types_stride]; 198 C = ptr[-1]; 199 pattern = A + B * (1 << 4) + C * (1 << 8); 200 for(k = 0; k < MODE2_PATTERNS_NUM; k++) 201 if(pattern == rv40_aic_table_index[k]) 202 break; 203 if(j < 3 && k < MODE2_PATTERNS_NUM){ //pattern is found, decoding 2 coefficients 204 AV_WN16(ptr, get_vlc2(gb, aic_mode2_vlc[k].table, AIC_MODE2_BITS, 2)); 205 ptr += 2; 206 j++; 207 }else{ 208 if(B != -1 && C != -1) 209 v = get_vlc2(gb, aic_mode1_vlc[B + C*10].table, AIC_MODE1_BITS, 1); 210 else{ // tricky decoding 211 v = 0; 212 switch(C){ 213 case -1: // code 0 -> 1, 1 -> 0 214 if(B < 2) 215 v = get_bits1(gb) ^ 1; 216 break; 217 case 0: 218 case 2: // code 0 -> 2, 1 -> 0 219 v = (get_bits1(gb) ^ 1) << 1; 220 break; 221 } 222 } 223 *ptr++ = v; 224 } 225 } 226 } 227 return 0; 228} 229 230/** 231 * Decode macroblock information. 232 */ 233static int rv40_decode_mb_info(RV34DecContext *r) 234{ 235 MpegEncContext *s = &r->s; 236 GetBitContext *gb = &s->gb; 237 int q, i; 238 int prev_type = 0; 239 int mb_pos = s->mb_x + s->mb_y * s->mb_stride; 240 241 if(!r->s.mb_skip_run) { 242 r->s.mb_skip_run = get_interleaved_ue_golomb(gb) + 1; 243 if(r->s.mb_skip_run > (unsigned)s->mb_num) 244 return -1; 245 } 246 247 if(--r->s.mb_skip_run) 248 return RV34_MB_SKIP; 249 250 if(r->avail_cache[6-4]){ 251 int blocks[RV34_MB_TYPES] = {0}; 252 int count = 0; 253 if(r->avail_cache[6-1]) 254 blocks[r->mb_type[mb_pos - 1]]++; 255 blocks[r->mb_type[mb_pos - s->mb_stride]]++; 256 if(r->avail_cache[6-2]) 257 blocks[r->mb_type[mb_pos - s->mb_stride + 1]]++; 258 if(r->avail_cache[6-5]) 259 blocks[r->mb_type[mb_pos - s->mb_stride - 1]]++; 260 for(i = 0; i < RV34_MB_TYPES; i++){ 261 if(blocks[i] > count){ 262 count = blocks[i]; 263 prev_type = i; 264 if(count>1) 265 break; 266 } 267 } 268 } else if (r->avail_cache[6-1]) 269 prev_type = r->mb_type[mb_pos - 1]; 270 271 if(s->pict_type == AV_PICTURE_TYPE_P){ 272 prev_type = block_num_to_ptype_vlc_num[prev_type]; 273 q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1); 274 if(q < PBTYPE_ESCAPE) 275 return q; 276 q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1); 277 av_log(s->avctx, AV_LOG_ERROR, "Dquant for P-frame\n"); 278 }else{ 279 prev_type = block_num_to_btype_vlc_num[prev_type]; 280 q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1); 281 if(q < PBTYPE_ESCAPE) 282 return q; 283 q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1); 284 av_log(s->avctx, AV_LOG_ERROR, "Dquant for B-frame\n"); 285 } 286 return 0; 287} 288 289enum RV40BlockPos{ 290 POS_CUR, 291 POS_TOP, 292 POS_LEFT, 293 POS_BOTTOM, 294}; 295 296#define MASK_CUR 0x0001 297#define MASK_RIGHT 0x0008 298#define MASK_BOTTOM 0x0010 299#define MASK_TOP 0x1000 300#define MASK_Y_TOP_ROW 0x000F 301#define MASK_Y_LAST_ROW 0xF000 302#define MASK_Y_LEFT_COL 0x1111 303#define MASK_Y_RIGHT_COL 0x8888 304#define MASK_C_TOP_ROW 0x0003 305#define MASK_C_LAST_ROW 0x000C 306#define MASK_C_LEFT_COL 0x0005 307#define MASK_C_RIGHT_COL 0x000A 308 309static const int neighbour_offs_x[4] = { 0, 0, -1, 0 }; 310static const int neighbour_offs_y[4] = { 0, -1, 0, 1 }; 311 312static void rv40_adaptive_loop_filter(RV34DSPContext *rdsp, 313 uint8_t *src, int stride, int dmode, 314 int lim_q1, int lim_p1, 315 int alpha, int beta, int beta2, 316 int chroma, int edge, int dir) 317{ 318 int filter_p1, filter_q1; 319 int strong; 320 int lims; 321 322 strong = rdsp->rv40_loop_filter_strength[dir](src, stride, beta, beta2, 323 edge, &filter_p1, &filter_q1); 324 325 lims = filter_p1 + filter_q1 + ((lim_q1 + lim_p1) >> 1) + 1; 326 327 if (strong) { 328 rdsp->rv40_strong_loop_filter[dir](src, stride, alpha, 329 lims, dmode, chroma); 330 } else if (filter_p1 & filter_q1) { 331 rdsp->rv40_weak_loop_filter[dir](src, stride, 1, 1, alpha, beta, 332 lims, lim_q1, lim_p1); 333 } else if (filter_p1 | filter_q1) { 334 rdsp->rv40_weak_loop_filter[dir](src, stride, filter_p1, filter_q1, 335 alpha, beta, lims >> 1, lim_q1 >> 1, 336 lim_p1 >> 1); 337 } 338} 339 340/** 341 * RV40 loop filtering function 342 */ 343static void rv40_loop_filter(RV34DecContext *r, int row) 344{ 345 MpegEncContext *s = &r->s; 346 int mb_pos, mb_x; 347 int i, j, k; 348 uint8_t *Y, *C; 349 int alpha, beta, betaY, betaC; 350 int q; 351 int mbtype[4]; ///< current macroblock and its neighbours types 352 /** 353 * flags indicating that macroblock can be filtered with strong filter 354 * it is set only for intra coded MB and MB with DCs coded separately 355 */ 356 int mb_strong[4]; 357 int clip[4]; ///< MB filter clipping value calculated from filtering strength 358 /** 359 * coded block patterns for luma part of current macroblock and its neighbours 360 * Format: 361 * LSB corresponds to the top left block, 362 * each nibble represents one row of subblocks. 363 */ 364 int cbp[4]; 365 /** 366 * coded block patterns for chroma part of current macroblock and its neighbours 367 * Format is the same as for luma with two subblocks in a row. 368 */ 369 int uvcbp[4][2]; 370 /** 371 * This mask represents the pattern of luma subblocks that should be filtered 372 * in addition to the coded ones because they lie at the edge of 373 * 8x8 block with different enough motion vectors 374 */ 375 unsigned mvmasks[4]; 376 377 mb_pos = row * s->mb_stride; 378 for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){ 379 int mbtype = s->current_picture_ptr->mb_type[mb_pos]; 380 if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype)) 381 r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF; 382 if(IS_INTRA(mbtype)) 383 r->cbp_chroma[mb_pos] = 0xFF; 384 } 385 mb_pos = row * s->mb_stride; 386 for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){ 387 int y_h_deblock, y_v_deblock; 388 int c_v_deblock[2], c_h_deblock[2]; 389 int clip_left; 390 int avail[4]; 391 unsigned y_to_deblock; 392 int c_to_deblock[2]; 393 394 q = s->current_picture_ptr->qscale_table[mb_pos]; 395 alpha = rv40_alpha_tab[q]; 396 beta = rv40_beta_tab [q]; 397 betaY = betaC = beta * 3; 398 if(s->width * s->height <= 176*144) 399 betaY += beta; 400 401 avail[0] = 1; 402 avail[1] = row; 403 avail[2] = mb_x; 404 avail[3] = row < s->mb_height - 1; 405 for(i = 0; i < 4; i++){ 406 if(avail[i]){ 407 int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride; 408 mvmasks[i] = r->deblock_coefs[pos]; 409 mbtype [i] = s->current_picture_ptr->mb_type[pos]; 410 cbp [i] = r->cbp_luma[pos]; 411 uvcbp[i][0] = r->cbp_chroma[pos] & 0xF; 412 uvcbp[i][1] = r->cbp_chroma[pos] >> 4; 413 }else{ 414 mvmasks[i] = 0; 415 mbtype [i] = mbtype[0]; 416 cbp [i] = 0; 417 uvcbp[i][0] = uvcbp[i][1] = 0; 418 } 419 mb_strong[i] = IS_INTRA(mbtype[i]) || IS_SEPARATE_DC(mbtype[i]); 420 clip[i] = rv40_filter_clip_tbl[mb_strong[i] + 1][q]; 421 } 422 y_to_deblock = mvmasks[POS_CUR] 423 | (mvmasks[POS_BOTTOM] << 16); 424 /* This pattern contains bits signalling that horizontal edges of 425 * the current block can be filtered. 426 * That happens when either of adjacent subblocks is coded or lies on 427 * the edge of 8x8 blocks with motion vectors differing by more than 428 * 3/4 pel in any component (any edge orientation for some reason). 429 */ 430 y_h_deblock = y_to_deblock 431 | ((cbp[POS_CUR] << 4) & ~MASK_Y_TOP_ROW) 432 | ((cbp[POS_TOP] & MASK_Y_LAST_ROW) >> 12); 433 /* This pattern contains bits signalling that vertical edges of 434 * the current block can be filtered. 435 * That happens when either of adjacent subblocks is coded or lies on 436 * the edge of 8x8 blocks with motion vectors differing by more than 437 * 3/4 pel in any component (any edge orientation for some reason). 438 */ 439 y_v_deblock = y_to_deblock 440 | ((cbp[POS_CUR] << 1) & ~MASK_Y_LEFT_COL) 441 | ((cbp[POS_LEFT] & MASK_Y_RIGHT_COL) >> 3); 442 if(!mb_x) 443 y_v_deblock &= ~MASK_Y_LEFT_COL; 444 if(!row) 445 y_h_deblock &= ~MASK_Y_TOP_ROW; 446 if(row == s->mb_height - 1 || (mb_strong[POS_CUR] | mb_strong[POS_BOTTOM])) 447 y_h_deblock &= ~(MASK_Y_TOP_ROW << 16); 448 /* Calculating chroma patterns is similar and easier since there is 449 * no motion vector pattern for them. 450 */ 451 for(i = 0; i < 2; i++){ 452 c_to_deblock[i] = (uvcbp[POS_BOTTOM][i] << 4) | uvcbp[POS_CUR][i]; 453 c_v_deblock[i] = c_to_deblock[i] 454 | ((uvcbp[POS_CUR] [i] << 1) & ~MASK_C_LEFT_COL) 455 | ((uvcbp[POS_LEFT][i] & MASK_C_RIGHT_COL) >> 1); 456 c_h_deblock[i] = c_to_deblock[i] 457 | ((uvcbp[POS_TOP][i] & MASK_C_LAST_ROW) >> 2) 458 | (uvcbp[POS_CUR][i] << 2); 459 if(!mb_x) 460 c_v_deblock[i] &= ~MASK_C_LEFT_COL; 461 if(!row) 462 c_h_deblock[i] &= ~MASK_C_TOP_ROW; 463 if(row == s->mb_height - 1 || (mb_strong[POS_CUR] | mb_strong[POS_BOTTOM])) 464 c_h_deblock[i] &= ~(MASK_C_TOP_ROW << 4); 465 } 466 467 for(j = 0; j < 16; j += 4){ 468 Y = s->current_picture_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize; 469 for(i = 0; i < 4; i++, Y += 4){ 470 int ij = i + j; 471 int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0; 472 int dither = j ? ij : i*4; 473 474 // if bottom block is coded then we can filter its top edge 475 // (or bottom edge of this block, which is the same) 476 if(y_h_deblock & (MASK_BOTTOM << ij)){ 477 rv40_adaptive_loop_filter(&r->rdsp, Y+4*s->linesize, 478 s->linesize, dither, 479 y_to_deblock & (MASK_BOTTOM << ij) ? clip[POS_CUR] : 0, 480 clip_cur, alpha, beta, betaY, 481 0, 0, 0); 482 } 483 // filter left block edge in ordinary mode (with low filtering strength) 484 if(y_v_deblock & (MASK_CUR << ij) && (i || !(mb_strong[POS_CUR] | mb_strong[POS_LEFT]))){ 485 if(!i) 486 clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0; 487 else 488 clip_left = y_to_deblock & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0; 489 rv40_adaptive_loop_filter(&r->rdsp, Y, s->linesize, dither, 490 clip_cur, 491 clip_left, 492 alpha, beta, betaY, 0, 0, 1); 493 } 494 // filter top edge of the current macroblock when filtering strength is high 495 if(!j && y_h_deblock & (MASK_CUR << i) && (mb_strong[POS_CUR] | mb_strong[POS_TOP])){ 496 rv40_adaptive_loop_filter(&r->rdsp, Y, s->linesize, dither, 497 clip_cur, 498 mvmasks[POS_TOP] & (MASK_TOP << i) ? clip[POS_TOP] : 0, 499 alpha, beta, betaY, 0, 1, 0); 500 } 501 // filter left block edge in edge mode (with high filtering strength) 502 if(y_v_deblock & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] | mb_strong[POS_LEFT])){ 503 clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0; 504 rv40_adaptive_loop_filter(&r->rdsp, Y, s->linesize, dither, 505 clip_cur, 506 clip_left, 507 alpha, beta, betaY, 0, 1, 1); 508 } 509 } 510 } 511 for(k = 0; k < 2; k++){ 512 for(j = 0; j < 2; j++){ 513 C = s->current_picture_ptr->f->data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize; 514 for(i = 0; i < 2; i++, C += 4){ 515 int ij = i + j*2; 516 int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0; 517 if(c_h_deblock[k] & (MASK_CUR << (ij+2))){ 518 int clip_bot = c_to_deblock[k] & (MASK_CUR << (ij+2)) ? clip[POS_CUR] : 0; 519 rv40_adaptive_loop_filter(&r->rdsp, C+4*s->uvlinesize, s->uvlinesize, i*8, 520 clip_bot, 521 clip_cur, 522 alpha, beta, betaC, 1, 0, 0); 523 } 524 if((c_v_deblock[k] & (MASK_CUR << ij)) && (i || !(mb_strong[POS_CUR] | mb_strong[POS_LEFT]))){ 525 if(!i) 526 clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0; 527 else 528 clip_left = c_to_deblock[k] & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0; 529 rv40_adaptive_loop_filter(&r->rdsp, C, s->uvlinesize, j*8, 530 clip_cur, 531 clip_left, 532 alpha, beta, betaC, 1, 0, 1); 533 } 534 if(!j && c_h_deblock[k] & (MASK_CUR << ij) && (mb_strong[POS_CUR] | mb_strong[POS_TOP])){ 535 int clip_top = uvcbp[POS_TOP][k] & (MASK_CUR << (ij+2)) ? clip[POS_TOP] : 0; 536 rv40_adaptive_loop_filter(&r->rdsp, C, s->uvlinesize, i*8, 537 clip_cur, 538 clip_top, 539 alpha, beta, betaC, 1, 1, 0); 540 } 541 if(c_v_deblock[k] & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] | mb_strong[POS_LEFT])){ 542 clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0; 543 rv40_adaptive_loop_filter(&r->rdsp, C, s->uvlinesize, j*8, 544 clip_cur, 545 clip_left, 546 alpha, beta, betaC, 1, 1, 1); 547 } 548 } 549 } 550 } 551 } 552} 553 554/** 555 * Initialize decoder. 556 */ 557static av_cold int rv40_decode_init(AVCodecContext *avctx) 558{ 559 static AVOnce init_static_once = AV_ONCE_INIT; 560 RV34DecContext *r = avctx->priv_data; 561 int ret; 562 563 r->rv30 = 0; 564 if ((ret = ff_rv34_decode_init(avctx)) < 0) 565 return ret; 566 r->parse_slice_header = rv40_parse_slice_header; 567 r->decode_intra_types = rv40_decode_intra_types; 568 r->decode_mb_info = rv40_decode_mb_info; 569 r->loop_filter = rv40_loop_filter; 570 r->luma_dc_quant_i = rv40_luma_dc_quant[0]; 571 r->luma_dc_quant_p = rv40_luma_dc_quant[1]; 572 ff_rv40dsp_init(&r->rdsp); 573 ff_thread_once(&init_static_once, rv40_init_tables); 574 return 0; 575} 576 577const FFCodec ff_rv40_decoder = { 578 .p.name = "rv40", 579 .p.long_name = NULL_IF_CONFIG_SMALL("RealVideo 4.0"), 580 .p.type = AVMEDIA_TYPE_VIDEO, 581 .p.id = AV_CODEC_ID_RV40, 582 .priv_data_size = sizeof(RV34DecContext), 583 .init = rv40_decode_init, 584 .close = ff_rv34_decode_end, 585 FF_CODEC_DECODE_CB(ff_rv34_decode_frame), 586 .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | 587 AV_CODEC_CAP_FRAME_THREADS, 588 .flush = ff_mpeg_flush, 589 .p.pix_fmts = (const enum AVPixelFormat[]) { 590 AV_PIX_FMT_YUV420P, 591 AV_PIX_FMT_NONE 592 }, 593 .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_rv34_decode_update_thread_context), 594 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | 595 FF_CODEC_CAP_ALLOCATE_PROGRESS, 596}; 597