Lines Matching refs:sl
37 static int get_scale_factor(H264SliceContext *sl,
40 int poc0 = sl->ref_list[0][i].poc;
45 avpriv_request_sample(sl->h264->avctx, "pocdiff overflow");
47 if (td == 0 || sl->ref_list[0][i].parent->long_ref) {
55 av_log(sl->h264->avctx, AV_LOG_DEBUG, "pocdiff0 overflow\n");
62 H264SliceContext *sl)
66 const int poc1 = sl->ref_list[1][0].poc;
72 const int poc1 = sl->ref_list[1][0].parent->field_poc[field];
73 for (i = 0; i < 2 * sl->ref_count[0]; i++)
74 sl->dist_scale_factor_field[field][i ^ field] =
75 get_scale_factor(sl, poc, poc1, i + 16);
78 for (i = 0; i < sl->ref_count[0]; i++)
79 sl->dist_scale_factor[i] = get_scale_factor(sl, poc, poc1, i);
82 static void fill_colmap(const H264Context *h, H264SliceContext *sl,
86 H264Picture *const ref1 = sl->ref_list[1][0].parent;
89 int end = mbafi ? 16 + 2 * sl->ref_count[0] : sl->ref_count[0];
106 if (4 * sl->ref_list[0][j].parent->frame_num +
107 (sl->ref_list[0][j].reference & 3) == poc) {
120 void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
122 H264Ref *const ref1 = &sl->ref_list[1][0];
128 for (list = 0; list < sl->list_count; list++) {
129 cur->ref_count[sidx][list] = sl->ref_count[list];
130 for (j = 0; j < sl->ref_count[list]; j++)
131 cur->ref_poc[sidx][list][j] = 4 * sl->ref_list[list][j].parent->frame_num +
132 (sl->ref_list[list][j].reference & 3);
146 sl->col_fieldoff = 0;
148 if (sl->list_count != 2 || !sl->ref_count[1])
153 int *col_poc = sl->ref_list[1][0].parent->field_poc;
156 sl->col_parity = 1;
158 sl->col_parity = (FFABS(col_poc[0] - (int64_t)cur_poc) >=
161 sidx = sl->col_parity;
163 } else if (!(h->picture_structure & sl->ref_list[1][0].reference) &&
164 !sl->ref_list[1][0].parent->mbaff) {
165 sl->col_fieldoff = 2 * sl->ref_list[1][0].reference - 3;
168 if (sl->slice_type_nos != AV_PICTURE_TYPE_B || sl->direct_spatial_mv_pred)
172 fill_colmap(h, sl, sl->map_col_to_list0, list, sidx, ref1sidx, 0);
175 fill_colmap(h, sl, sl->map_col_to_list0_field[field], list, field,
199 static void pred_spatial_direct_motion(const H264Context *const h, H264SliceContext *sl,
204 int mb_xy = sl->mb_xy, mb_y = sl->mb_y;
215 assert(sl->ref_list[1][0].reference & 3);
217 await_reference_mb_row(h, &sl->ref_list[1][0],
218 sl->mb_y + !!IS_INTERLACED(*mb_type));
225 int left_ref = sl->ref_cache[list][scan8[0] - 1];
226 int top_ref = sl->ref_cache[list][scan8[0] - 8];
227 int refc = sl->ref_cache[list][scan8[0] - 8 + 4];
228 const int16_t *C = sl->mv_cache[list][scan8[0] - 8 + 4];
230 refc = sl->ref_cache[list][scan8[0] - 8 - 1];
231 C = sl->mv_cache[list][scan8[0] - 8 - 1];
239 const int16_t *const A = sl->mv_cache[list][scan8[0] - 1];
240 const int16_t *const B = sl->mv_cache[list][scan8[0] - 8];
258 av_assert2(ref[list] < (sl->ref_count[list] << !!FRAME_MBAFF(h)));
276 fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
277 fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
278 fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
279 fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
286 if (IS_INTERLACED(sl->ref_list[1][0].parent->mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
288 mb_y = (sl->mb_y & ~1) + sl->col_parity;
289 mb_xy = sl->mb_x +
290 ((sl->mb_y & ~1) + sl->col_parity) * h->mb_stride;
293 mb_y += sl->col_fieldoff;
294 mb_xy += h->mb_stride * sl->col_fieldoff; // non-zero for FL -> FL & differ parity
299 mb_y = sl->mb_y & ~1;
300 mb_xy = (sl->mb_y & ~1) * h->mb_stride + sl->mb_x;
301 mb_type_col[0] = sl->ref_list[1][0].parent->mb_type[mb_xy];
302 mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy + h->mb_stride];
322 mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy];
342 await_reference_mb_row(h, &sl->ref_list[1][0], mb_y);
344 l1mv0 = (void*)&sl->ref_list[1][0].parent->motion_val[0][h->mb2b_xy[mb_xy]];
345 l1mv1 = (void*)&sl->ref_list[1][0].parent->motion_val[1][h->mb2b_xy[mb_xy]];
346 l1ref0 = &sl->ref_list[1][0].parent->ref_index[0][4 * mb_xy];
347 l1ref1 = &sl->ref_list[1][0].parent->ref_index[1][4 * mb_xy];
349 if (sl->mb_y & 1) {
366 if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8]))
368 sl->sub_mb_type[i8] = sub_mb_type;
370 fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
372 fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8,
374 if (!IS_INTRA(mb_type_col[y8]) && !sl->ref_list[1][0].parent->long_ref &&
393 fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, a, 4);
394 fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, b, 4);
403 fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
404 fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
405 if (!IS_INTRA(mb_type_col[0]) && !sl->ref_list[1][0].parent->long_ref &&
422 fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, a, 4);
423 fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, b, 4);
430 if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8]))
432 sl->sub_mb_type[i8] = sub_mb_type;
434 fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, mv[0], 4);
435 fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, mv[1], 4);
436 fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
438 fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8,
443 if (!IS_INTRA(mb_type_col[0]) && !sl->ref_list[1][0].parent->long_ref &&
453 fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2,
456 fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2,
467 AV_ZERO32(sl->mv_cache[0][scan8[i8 * 4 + i4]]);
469 AV_ZERO32(sl->mv_cache[1][scan8[i8 * 4 + i4]]);
474 sl->sub_mb_type[i8] += MB_TYPE_16x16 - MB_TYPE_8x8;
486 static void pred_temp_direct_motion(const H264Context *const h, H264SliceContext *sl,
491 int mb_xy = sl->mb_xy, mb_y = sl->mb_y;
499 assert(sl->ref_list[1][0].reference & 3);
501 await_reference_mb_row(h, &sl->ref_list[1][0],
502 sl->mb_y + !!IS_INTERLACED(*mb_type));
504 if (IS_INTERLACED(sl->ref_list[1][0].parent->mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
506 mb_y = (sl->mb_y & ~1) + sl->col_parity;
507 mb_xy = sl->mb_x +
508 ((sl->mb_y & ~1) + sl->col_parity) * h->mb_stride;
511 mb_y += sl->col_fieldoff;
512 mb_xy += h->mb_stride * sl->col_fieldoff; // non-zero for FL -> FL & differ parity
517 mb_y = sl->mb_y & ~1;
518 mb_xy = sl->mb_x + (sl->mb_y & ~1) * h->mb_stride;
519 mb_type_col[0] = sl->ref_list[1][0].parent->mb_type[mb_xy];
520 mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy + h->mb_stride];
543 mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy];
566 await_reference_mb_row(h, &sl->ref_list[1][0], mb_y);
568 l1mv0 = (void*)&sl->ref_list[1][0].parent->motion_val[0][h->mb2b_xy[mb_xy]];
569 l1mv1 = (void*)&sl->ref_list[1][0].parent->motion_val[1][h->mb2b_xy[mb_xy]];
570 l1ref0 = &sl->ref_list[1][0].parent->ref_index[0][4 * mb_xy];
571 l1ref1 = &sl->ref_list[1][0].parent->ref_index[1][4 * mb_xy];
573 if (sl->mb_y & 1) {
582 const int *map_col_to_list0[2] = { sl->map_col_to_list0[0],
583 sl->map_col_to_list0[1] };
584 const int *dist_scale_factor = sl->dist_scale_factor;
588 map_col_to_list0[0] = sl->map_col_to_list0_field[sl->mb_y & 1][0];
589 map_col_to_list0[1] = sl->map_col_to_list0_field[sl->mb_y & 1][1];
590 dist_scale_factor = sl->dist_scale_factor_field[sl->mb_y & 1];
592 ref_offset = (sl->ref_list[1][0].parent->mbaff << 4) & (mb_type_col[0] >> 3);
604 if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8]))
606 sl->sub_mb_type[i8] = sub_mb_type;
608 fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1);
610 fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1);
611 fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4);
612 fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4);
625 fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
633 fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8,
635 fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8,
647 fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
662 fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
663 fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4);
664 fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4);
672 if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8]))
674 sl->sub_mb_type[i8] = sub_mb_type;
675 fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1);
677 fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1);
678 fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4);
679 fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4);
693 fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
699 fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8,
701 fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8,
707 int16_t *mv_l0 = sl->mv_cache[0][scan8[i8 * 4 + i4]];
710 AV_WN32A(sl->mv_cache[1][scan8[i8 * 4 + i4]],
720 void ff_h264_pred_direct_motion(const H264Context *const h, H264SliceContext *sl,
723 if (sl->direct_spatial_mv_pred)
724 pred_spatial_direct_motion(h, sl, mb_type);
726 pred_temp_direct_motion(h, sl, mb_type);