1 /*
2 * H.26L/H.264/AVC/JVT/14496-10/... loop filter
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /**
23 * @file
24 * H.264 / AVC / MPEG-4 part10 loop filter.
25 * @author Michael Niedermayer <michaelni@gmx.at>
26 */
27
28 #include "libavutil/internal.h"
29 #include "libavutil/intreadwrite.h"
30 #include "libavutil/mem_internal.h"
31 #include "avcodec.h"
32 #include "h264dec.h"
33 #include "h264_ps.h"
34 #include "mpegutils.h"
35
36 /* Deblocking filter (p153) */
37 static const uint8_t alpha_table[52*3] = {
38 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
39 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
40 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
41 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
42 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
43 0, 0, 0, 0, 0, 0, 4, 4, 5, 6,
44 7, 8, 9, 10, 12, 13, 15, 17, 20, 22,
45 25, 28, 32, 36, 40, 45, 50, 56, 63, 71,
46 80, 90,101,113,127,144,162,182,203,226,
47 255,255,
48 255,255,255,255,255,255,255,255,255,255,255,255,255,
49 255,255,255,255,255,255,255,255,255,255,255,255,255,
50 255,255,255,255,255,255,255,255,255,255,255,255,255,
51 255,255,255,255,255,255,255,255,255,255,255,255,255,
52 };
53 static const uint8_t beta_table[52*3] = {
54 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
57 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 0, 0, 0, 0, 2, 2, 2, 3,
60 3, 3, 3, 4, 4, 4, 6, 6, 7, 7,
61 8, 8, 9, 9, 10, 10, 11, 11, 12, 12,
62 13, 13, 14, 14, 15, 15, 16, 16, 17, 17,
63 18, 18,
64 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
65 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
66 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
67 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
68 };
69 static const uint8_t tc0_table[52*3][4] = {
70 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
71 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
72 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
73 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
74 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
75 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
76 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
77 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
78 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
79 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
80 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
81 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 1 },
82 {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 1, 1 }, {-1, 0, 1, 1 }, {-1, 1, 1, 1 },
83 {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 },
84 {-1, 1, 1, 2 }, {-1, 1, 2, 3 }, {-1, 1, 2, 3 }, {-1, 2, 2, 3 }, {-1, 2, 2, 4 }, {-1, 2, 3, 4 },
85 {-1, 2, 3, 4 }, {-1, 3, 3, 5 }, {-1, 3, 4, 6 }, {-1, 3, 4, 6 }, {-1, 4, 5, 7 }, {-1, 4, 5, 8 },
86 {-1, 4, 6, 9 }, {-1, 5, 7,10 }, {-1, 6, 8,11 }, {-1, 6, 8,13 }, {-1, 7,10,14 }, {-1, 8,11,16 },
87 {-1, 9,12,18 }, {-1,10,13,20 }, {-1,11,15,23 }, {-1,13,17,25 },
88 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
89 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
90 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
91 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
92 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
93 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
94 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
95 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
96 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
97 };
98
99 /* intra: 0 if this loopfilter call is guaranteed to be inter (bS < 4), 1 if it might be intra (bS == 4) */
filter_mb_edgev(uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, const H264Context *h, int intra)100 static av_always_inline void filter_mb_edgev(uint8_t *pix, int stride,
101 const int16_t bS[4],
102 unsigned int qp, int a, int b,
103 const H264Context *h, int intra)
104 {
105 const unsigned int index_a = qp + a;
106 const int alpha = alpha_table[index_a];
107 const int beta = beta_table[qp + b];
108 if (alpha ==0 || beta == 0) return;
109
110 if( bS[0] < 4 || !intra ) {
111 int8_t tc[4];
112 tc[0] = tc0_table[index_a][bS[0]];
113 tc[1] = tc0_table[index_a][bS[1]];
114 tc[2] = tc0_table[index_a][bS[2]];
115 tc[3] = tc0_table[index_a][bS[3]];
116 h->h264dsp.h264_h_loop_filter_luma(pix, stride, alpha, beta, tc);
117 } else {
118 h->h264dsp.h264_h_loop_filter_luma_intra(pix, stride, alpha, beta);
119 }
120 }
121
filter_mb_edgecv(uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, const H264Context *h, int intra)122 static av_always_inline void filter_mb_edgecv(uint8_t *pix, int stride,
123 const int16_t bS[4],
124 unsigned int qp, int a, int b,
125 const H264Context *h, int intra)
126 {
127 const unsigned int index_a = qp + a;
128 const int alpha = alpha_table[index_a];
129 const int beta = beta_table[qp + b];
130 if (alpha ==0 || beta == 0) return;
131
132 if( bS[0] < 4 || !intra ) {
133 int8_t tc[4];
134 tc[0] = tc0_table[index_a][bS[0]]+1;
135 tc[1] = tc0_table[index_a][bS[1]]+1;
136 tc[2] = tc0_table[index_a][bS[2]]+1;
137 tc[3] = tc0_table[index_a][bS[3]]+1;
138 h->h264dsp.h264_h_loop_filter_chroma(pix, stride, alpha, beta, tc);
139 } else {
140 h->h264dsp.h264_h_loop_filter_chroma_intra(pix, stride, alpha, beta);
141 }
142 }
143
filter_mb_mbaff_edgev(const H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp, int a, int b, int intra)144 static av_always_inline void filter_mb_mbaff_edgev(const H264Context *h, uint8_t *pix,
145 int stride,
146 const int16_t bS[7], int bsi,
147 int qp, int a, int b,
148 int intra)
149 {
150 const unsigned int index_a = qp + a;
151 const int alpha = alpha_table[index_a];
152 const int beta = beta_table[qp + b];
153 if (alpha ==0 || beta == 0) return;
154
155 if( bS[0] < 4 || !intra ) {
156 int8_t tc[4];
157 tc[0] = tc0_table[index_a][bS[0*bsi]];
158 tc[1] = tc0_table[index_a][bS[1*bsi]];
159 tc[2] = tc0_table[index_a][bS[2*bsi]];
160 tc[3] = tc0_table[index_a][bS[3*bsi]];
161 h->h264dsp.h264_h_loop_filter_luma_mbaff(pix, stride, alpha, beta, tc);
162 } else {
163 h->h264dsp.h264_h_loop_filter_luma_mbaff_intra(pix, stride, alpha, beta);
164 }
165 }
166
filter_mb_mbaff_edgecv(const H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp, int a, int b, int intra)167 static av_always_inline void filter_mb_mbaff_edgecv(const H264Context *h,
168 uint8_t *pix, int stride,
169 const int16_t bS[7],
170 int bsi, int qp, int a,
171 int b, int intra)
172 {
173 const unsigned int index_a = qp + a;
174 const int alpha = alpha_table[index_a];
175 const int beta = beta_table[qp + b];
176 if (alpha ==0 || beta == 0) return;
177
178 if( bS[0] < 4 || !intra ) {
179 int8_t tc[4];
180 tc[0] = tc0_table[index_a][bS[0*bsi]] + 1;
181 tc[1] = tc0_table[index_a][bS[1*bsi]] + 1;
182 tc[2] = tc0_table[index_a][bS[2*bsi]] + 1;
183 tc[3] = tc0_table[index_a][bS[3*bsi]] + 1;
184 h->h264dsp.h264_h_loop_filter_chroma_mbaff(pix, stride, alpha, beta, tc);
185 } else {
186 h->h264dsp.h264_h_loop_filter_chroma_mbaff_intra(pix, stride, alpha, beta);
187 }
188 }
189
filter_mb_edgeh(uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, const H264Context *h, int intra)190 static av_always_inline void filter_mb_edgeh(uint8_t *pix, int stride,
191 const int16_t bS[4],
192 unsigned int qp, int a, int b,
193 const H264Context *h, int intra)
194 {
195 const unsigned int index_a = qp + a;
196 const int alpha = alpha_table[index_a];
197 const int beta = beta_table[qp + b];
198 if (alpha ==0 || beta == 0) return;
199
200 if( bS[0] < 4 || !intra ) {
201 int8_t tc[4];
202 tc[0] = tc0_table[index_a][bS[0]];
203 tc[1] = tc0_table[index_a][bS[1]];
204 tc[2] = tc0_table[index_a][bS[2]];
205 tc[3] = tc0_table[index_a][bS[3]];
206 h->h264dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc);
207 } else {
208 h->h264dsp.h264_v_loop_filter_luma_intra(pix, stride, alpha, beta);
209 }
210 }
211
filter_mb_edgech(uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, const H264Context *h, int intra)212 static av_always_inline void filter_mb_edgech(uint8_t *pix, int stride,
213 const int16_t bS[4],
214 unsigned int qp, int a, int b,
215 const H264Context *h, int intra)
216 {
217 const unsigned int index_a = qp + a;
218 const int alpha = alpha_table[index_a];
219 const int beta = beta_table[qp + b];
220 if (alpha ==0 || beta == 0) return;
221
222 if( bS[0] < 4 || !intra ) {
223 int8_t tc[4];
224 tc[0] = tc0_table[index_a][bS[0]]+1;
225 tc[1] = tc0_table[index_a][bS[1]]+1;
226 tc[2] = tc0_table[index_a][bS[2]]+1;
227 tc[3] = tc0_table[index_a][bS[3]]+1;
228 h->h264dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc);
229 } else {
230 h->h264dsp.h264_v_loop_filter_chroma_intra(pix, stride, alpha, beta);
231 }
232 }
233
h264_filter_mb_fast_internal(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int pixel_shift)234 static av_always_inline void h264_filter_mb_fast_internal(const H264Context *h,
235 H264SliceContext *sl,
236 int mb_x, int mb_y,
237 uint8_t *img_y,
238 uint8_t *img_cb,
239 uint8_t *img_cr,
240 unsigned int linesize,
241 unsigned int uvlinesize,
242 int pixel_shift)
243 {
244 int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
245 int chroma444 = CHROMA444(h);
246 int chroma422 = CHROMA422(h);
247
248 int mb_xy = sl->mb_xy;
249 int left_type = sl->left_type[LTOP];
250 int top_type = sl->top_type;
251
252 int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8);
253 int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset;
254 int b = 52 + sl->slice_beta_offset - qp_bd_offset;
255
256 int mb_type = h->cur_pic.mb_type[mb_xy];
257 int qp = h->cur_pic.qscale_table[mb_xy];
258 int qp0 = h->cur_pic.qscale_table[mb_xy - 1];
259 int qp1 = h->cur_pic.qscale_table[sl->top_mb_xy];
260 int qpc = get_chroma_qp(h->ps.pps, 0, qp);
261 int qpc0 = get_chroma_qp(h->ps.pps, 0, qp0);
262 int qpc1 = get_chroma_qp(h->ps.pps, 0, qp1);
263 qp0 = (qp + qp0 + 1) >> 1;
264 qp1 = (qp + qp1 + 1) >> 1;
265 qpc0 = (qpc + qpc0 + 1) >> 1;
266 qpc1 = (qpc + qpc1 + 1) >> 1;
267
268 if( IS_INTRA(mb_type) ) {
269 static const int16_t bS4[4] = {4,4,4,4};
270 static const int16_t bS3[4] = {3,3,3,3};
271 const int16_t *bSH = FIELD_PICTURE(h) ? bS3 : bS4;
272 if(left_type)
273 filter_mb_edgev( &img_y[4*0<<pixel_shift], linesize, bS4, qp0, a, b, h, 1);
274 if( IS_8x8DCT(mb_type) ) {
275 filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
276 if(top_type){
277 filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1);
278 }
279 filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, a, b, h, 0);
280 } else {
281 filter_mb_edgev( &img_y[4*1<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
282 filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
283 filter_mb_edgev( &img_y[4*3<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
284 if(top_type){
285 filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1);
286 }
287 filter_mb_edgeh( &img_y[4*1*linesize], linesize, bS3, qp, a, b, h, 0);
288 filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, a, b, h, 0);
289 filter_mb_edgeh( &img_y[4*3*linesize], linesize, bS3, qp, a, b, h, 0);
290 }
291 if(chroma){
292 if(chroma444){
293 if(left_type){
294 filter_mb_edgev( &img_cb[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1);
295 filter_mb_edgev( &img_cr[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1);
296 }
297 if( IS_8x8DCT(mb_type) ) {
298 filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
299 filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
300 if(top_type){
301 filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 );
302 filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 );
303 }
304 filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
305 filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
306 } else {
307 filter_mb_edgev( &img_cb[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
308 filter_mb_edgev( &img_cr[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
309 filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
310 filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
311 filter_mb_edgev( &img_cb[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
312 filter_mb_edgev( &img_cr[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
313 if(top_type){
314 filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1);
315 filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1);
316 }
317 filter_mb_edgeh( &img_cb[4*1*linesize], linesize, bS3, qpc, a, b, h, 0);
318 filter_mb_edgeh( &img_cr[4*1*linesize], linesize, bS3, qpc, a, b, h, 0);
319 filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
320 filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
321 filter_mb_edgeh( &img_cb[4*3*linesize], linesize, bS3, qpc, a, b, h, 0);
322 filter_mb_edgeh( &img_cr[4*3*linesize], linesize, bS3, qpc, a, b, h, 0);
323 }
324 }else if(chroma422){
325 if(left_type){
326 filter_mb_edgecv(&img_cb[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
327 filter_mb_edgecv(&img_cr[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
328 }
329 filter_mb_edgecv(&img_cb[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
330 filter_mb_edgecv(&img_cr[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
331 if(top_type){
332 filter_mb_edgech(&img_cb[4*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
333 filter_mb_edgech(&img_cr[4*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
334 }
335 filter_mb_edgech(&img_cb[4*1*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
336 filter_mb_edgech(&img_cr[4*1*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
337 filter_mb_edgech(&img_cb[4*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
338 filter_mb_edgech(&img_cr[4*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
339 filter_mb_edgech(&img_cb[4*3*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
340 filter_mb_edgech(&img_cr[4*3*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
341 }else{
342 if(left_type){
343 filter_mb_edgecv( &img_cb[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
344 filter_mb_edgecv( &img_cr[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
345 }
346 filter_mb_edgecv( &img_cb[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
347 filter_mb_edgecv( &img_cr[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
348 if(top_type){
349 filter_mb_edgech( &img_cb[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
350 filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
351 }
352 filter_mb_edgech( &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
353 filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
354 }
355 }
356 return;
357 } else {
358 LOCAL_ALIGNED(8, int16_t, bS, [2], [4][4]);
359 int edges;
360 if( IS_8x8DCT(mb_type) && (sl->cbp&7) == 7 && !chroma444 ) {
361 edges = 4;
362 AV_WN64A(bS[0][0], 0x0002000200020002ULL);
363 AV_WN64A(bS[0][2], 0x0002000200020002ULL);
364 AV_WN64A(bS[1][0], 0x0002000200020002ULL);
365 AV_WN64A(bS[1][2], 0x0002000200020002ULL);
366 } else {
367 int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0;
368 int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[LTOP] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0;
369 int step = 1+(mb_type>>24); //IS_8x8DCT(mb_type) ? 2 : 1;
370 edges = 4 - 3*((mb_type>>3) & !(sl->cbp & 15)); //(mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4;
371 h->h264dsp.h264_loop_filter_strength(bS, sl->non_zero_count_cache, sl->ref_cache, sl->mv_cache,
372 sl->list_count==2, edges, step, mask_edge0, mask_edge1, FIELD_PICTURE(h));
373 }
374 if( IS_INTRA(left_type) )
375 AV_WN64A(bS[0][0], 0x0004000400040004ULL);
376 if( IS_INTRA(top_type) )
377 AV_WN64A(bS[1][0], FIELD_PICTURE(h) ? 0x0003000300030003ULL : 0x0004000400040004ULL);
378
379 #define FILTER(hv,dir,edge,intra)\
380 if(AV_RN64A(bS[dir][edge])) { \
381 filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qp : qp##dir, a, b, h, intra );\
382 if(chroma){\
383 if(chroma444){\
384 filter_mb_edge##hv( &img_cb[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
385 filter_mb_edge##hv( &img_cr[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
386 } else if(!(edge&1)) {\
387 filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
388 filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
389 }\
390 }\
391 }
392 if(left_type)
393 FILTER(v,0,0,1);
394 if( edges == 1 ) {
395 if(top_type)
396 FILTER(h,1,0,1);
397 } else if( IS_8x8DCT(mb_type) ) {
398 FILTER(v,0,2,0);
399 if(top_type)
400 FILTER(h,1,0,1);
401 FILTER(h,1,2,0);
402 } else {
403 FILTER(v,0,1,0);
404 FILTER(v,0,2,0);
405 FILTER(v,0,3,0);
406 if(top_type)
407 FILTER(h,1,0,1);
408 FILTER(h,1,1,0);
409 FILTER(h,1,2,0);
410 FILTER(h,1,3,0);
411 }
412 #undef FILTER
413 }
414 }
415
ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)416 void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl,
417 int mb_x, int mb_y, uint8_t *img_y,
418 uint8_t *img_cb, uint8_t *img_cr,
419 unsigned int linesize, unsigned int uvlinesize)
420 {
421 av_assert2(!FRAME_MBAFF(h));
422 if(!h->h264dsp.h264_loop_filter_strength || h->ps.pps->chroma_qp_diff) {
423 ff_h264_filter_mb(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize);
424 return;
425 }
426
427 #if CONFIG_SMALL
428 h264_filter_mb_fast_internal(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, h->pixel_shift);
429 #else
430 if(h->pixel_shift){
431 h264_filter_mb_fast_internal(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, 1);
432 }else{
433 h264_filter_mb_fast_internal(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, 0);
434 }
435 #endif
436 }
437
check_mv(H264SliceContext *sl, long b_idx, long bn_idx, int mvy_limit)438 static int check_mv(H264SliceContext *sl, long b_idx, long bn_idx, int mvy_limit)
439 {
440 int v;
441
442 v = sl->ref_cache[0][b_idx] != sl->ref_cache[0][bn_idx];
443 if (!v && sl->ref_cache[0][b_idx] != -1)
444 v = sl->mv_cache[0][b_idx][0] - sl->mv_cache[0][bn_idx][0] + 3 >= 7U |
445 FFABS(sl->mv_cache[0][b_idx][1] - sl->mv_cache[0][bn_idx][1]) >= mvy_limit;
446
447 if (sl->list_count == 2) {
448 if(!v)
449 v = sl->ref_cache[1][b_idx] != sl->ref_cache[1][bn_idx] |
450 sl->mv_cache[1][b_idx][0] - sl->mv_cache[1][bn_idx][0] + 3 >= 7U |
451 FFABS(sl->mv_cache[1][b_idx][1] - sl->mv_cache[1][bn_idx][1]) >= mvy_limit;
452
453 if(v){
454 if (sl->ref_cache[0][b_idx] != sl->ref_cache[1][bn_idx] |
455 sl->ref_cache[1][b_idx] != sl->ref_cache[0][bn_idx])
456 return 1;
457 return
458 sl->mv_cache[0][b_idx][0] - sl->mv_cache[1][bn_idx][0] + 3 >= 7U |
459 FFABS(sl->mv_cache[0][b_idx][1] - sl->mv_cache[1][bn_idx][1]) >= mvy_limit |
460 sl->mv_cache[1][b_idx][0] - sl->mv_cache[0][bn_idx][0] + 3 >= 7U |
461 FFABS(sl->mv_cache[1][b_idx][1] - sl->mv_cache[0][bn_idx][1]) >= mvy_limit;
462 }
463 }
464
465 return v;
466 }
467
filter_mb_dir(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int a, int b, int chroma, int dir)468 static av_always_inline void filter_mb_dir(const H264Context *h, H264SliceContext *sl,
469 int mb_x, int mb_y,
470 uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
471 unsigned int linesize, unsigned int uvlinesize,
472 int mb_xy, int mb_type, int mvy_limit,
473 int first_vertical_edge_done, int a, int b,
474 int chroma, int dir)
475 {
476 int edge;
477 int chroma_qp_avg[2];
478 int chroma444 = CHROMA444(h);
479 int chroma422 = CHROMA422(h);
480 const int mbm_xy = dir == 0 ? mb_xy -1 : sl->top_mb_xy;
481 const int mbm_type = dir == 0 ? sl->left_type[LTOP] : sl->top_type;
482
483 // how often to recheck mv-based bS when iterating between edges
484 static const uint8_t mask_edge_tab[2][8]={{0,3,3,3,1,1,1,1},
485 {0,3,1,1,3,3,3,3}};
486 const int mask_edge = mask_edge_tab[dir][(mb_type>>3)&7];
487 const int edges = mask_edge== 3 && !(sl->cbp&15) ? 1 : 4;
488
489 // how often to recheck mv-based bS when iterating along each edge
490 const int mask_par0 = mb_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir));
491
492 if(mbm_type && !first_vertical_edge_done){
493
494 if (FRAME_MBAFF(h) && (dir == 1) && ((mb_y&1) == 0)
495 && IS_INTERLACED(mbm_type&~mb_type)
496 ) {
497 // This is a special case in the norm where the filtering must
498 // be done twice (one each of the field) even if we are in a
499 // frame macroblock.
500 //
501 unsigned int tmp_linesize = 2 * linesize;
502 unsigned int tmp_uvlinesize = 2 * uvlinesize;
503 int mbn_xy = mb_xy - 2 * h->mb_stride;
504 int j;
505
506 for(j=0; j<2; j++, mbn_xy += h->mb_stride){
507 LOCAL_ALIGNED(8, int16_t, bS, [4]);
508 int qp;
509 if (IS_INTRA(mb_type | h->cur_pic.mb_type[mbn_xy])) {
510 AV_WN64A(bS, 0x0003000300030003ULL);
511 } else {
512 if (!CABAC(h) && IS_8x8DCT(h->cur_pic.mb_type[mbn_xy])) {
513 bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000) || sl->non_zero_count_cache[scan8[0]+0]);
514 bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000) || sl->non_zero_count_cache[scan8[0]+1]);
515 bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000) || sl->non_zero_count_cache[scan8[0]+2]);
516 bS[3]= 1+((h->cbp_table[mbn_xy] & 0x8000) || sl->non_zero_count_cache[scan8[0]+3]);
517 }else{
518 const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy] + 3*4;
519 int i;
520 for( i = 0; i < 4; i++ ) {
521 bS[i] = 1 + !!(sl->non_zero_count_cache[scan8[0]+i] | mbn_nnz[i]);
522 }
523 }
524 }
525 // Do not use s->qscale as luma quantizer because it has not the same
526 // value in IPCM macroblocks.
527 qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbn_xy] + 1) >> 1;
528 ff_tlog(h->avctx, "filter mb:%d/%d dir:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, qp, tmp_linesize, tmp_uvlinesize);
529 { int i; for (i = 0; i < 4; i++) ff_tlog(h->avctx, " bS[%d]:%d", i, bS[i]); ff_tlog(h->avctx, "\n"); }
530 filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 );
531 chroma_qp_avg[0] = (sl->chroma_qp[0] + get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
532 chroma_qp_avg[1] = (sl->chroma_qp[1] + get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
533 if (chroma) {
534 if (chroma444) {
535 filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
536 filter_mb_edgeh (&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], a, b, h, 0);
537 } else {
538 filter_mb_edgech(&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
539 filter_mb_edgech(&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], a, b, h, 0);
540 }
541 }
542 }
543 }else{
544 LOCAL_ALIGNED(8, int16_t, bS, [4]);
545 int qp;
546
547 if( IS_INTRA(mb_type|mbm_type)) {
548 AV_WN64A(bS, 0x0003000300030003ULL);
549 if ( (!IS_INTERLACED(mb_type|mbm_type))
550 || ((FRAME_MBAFF(h) || (h->picture_structure != PICT_FRAME)) && (dir == 0))
551 )
552 AV_WN64A(bS, 0x0004000400040004ULL);
553 } else {
554 int i;
555 int mv_done;
556
557 if( dir && FRAME_MBAFF(h) && IS_INTERLACED(mb_type ^ mbm_type)) {
558 AV_WN64A(bS, 0x0001000100010001ULL);
559 mv_done = 1;
560 }
561 else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) {
562 int b_idx= 8 + 4;
563 int bn_idx= b_idx - (dir ? 8:1);
564
565 bS[0] = bS[1] = bS[2] = bS[3] = check_mv(sl, 8 + 4, bn_idx, mvy_limit);
566 mv_done = 1;
567 }
568 else
569 mv_done = 0;
570
571 for( i = 0; i < 4; i++ ) {
572 int x = dir == 0 ? 0 : i;
573 int y = dir == 0 ? i : 0;
574 int b_idx= 8 + 4 + x + 8*y;
575 int bn_idx= b_idx - (dir ? 8:1);
576
577 if (sl->non_zero_count_cache[b_idx] |
578 sl->non_zero_count_cache[bn_idx]) {
579 bS[i] = 2;
580 }
581 else if(!mv_done)
582 {
583 bS[i] = check_mv(sl, b_idx, bn_idx, mvy_limit);
584 }
585 }
586 }
587
588 /* Filter edge */
589 // Do not use s->qscale as luma quantizer because it has not the same
590 // value in IPCM macroblocks.
591 if(bS[0]+bS[1]+bS[2]+bS[3]){
592 qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbm_xy] + 1) >> 1;
593 //ff_tlog(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], h->cur_pic.qscale_table[mbn_xy]);
594 ff_tlog(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
595 //{ int i; for (i = 0; i < 4; i++) ff_tlog(h->avctx, " bS[%d]:%d", i, bS[i]); ff_tlog(h->avctx, "\n"); }
596 chroma_qp_avg[0] = (sl->chroma_qp[0] + get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
597 chroma_qp_avg[1] = (sl->chroma_qp[1] + get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
598 if( dir == 0 ) {
599 filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 );
600 if (chroma) {
601 if (chroma444) {
602 filter_mb_edgev ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
603 filter_mb_edgev ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
604 } else {
605 filter_mb_edgecv( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
606 filter_mb_edgecv( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
607 }
608 }
609 } else {
610 filter_mb_edgeh( &img_y[0], linesize, bS, qp, a, b, h, 1 );
611 if (chroma) {
612 if (chroma444) {
613 filter_mb_edgeh ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
614 filter_mb_edgeh ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
615 } else {
616 filter_mb_edgech( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
617 filter_mb_edgech( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
618 }
619 }
620 }
621 }
622 }
623 }
624
625 /* Calculate bS */
626 for( edge = 1; edge < edges; edge++ ) {
627 LOCAL_ALIGNED(8, int16_t, bS, [4]);
628 int qp;
629 const int deblock_edge = !IS_8x8DCT(mb_type & (edge<<24)); // (edge&1) && IS_8x8DCT(mb_type)
630
631 if (!deblock_edge && (!chroma422 || dir == 0))
632 continue;
633
634 if( IS_INTRA(mb_type)) {
635 AV_WN64A(bS, 0x0003000300030003ULL);
636 } else {
637 int i;
638 int mv_done;
639
640 if( edge & mask_edge ) {
641 AV_ZERO64(bS);
642 mv_done = 1;
643 }
644 else if( mask_par0 ) {
645 int b_idx= 8 + 4 + edge * (dir ? 8:1);
646 int bn_idx= b_idx - (dir ? 8:1);
647
648 bS[0] = bS[1] = bS[2] = bS[3] = check_mv(sl, b_idx, bn_idx, mvy_limit);
649 mv_done = 1;
650 }
651 else
652 mv_done = 0;
653
654 for( i = 0; i < 4; i++ ) {
655 int x = dir == 0 ? edge : i;
656 int y = dir == 0 ? i : edge;
657 int b_idx= 8 + 4 + x + 8*y;
658 int bn_idx= b_idx - (dir ? 8:1);
659
660 if (sl->non_zero_count_cache[b_idx] |
661 sl->non_zero_count_cache[bn_idx]) {
662 bS[i] = 2;
663 }
664 else if(!mv_done)
665 {
666 bS[i] = check_mv(sl, b_idx, bn_idx, mvy_limit);
667 }
668 }
669
670 if(bS[0]+bS[1]+bS[2]+bS[3] == 0)
671 continue;
672 }
673
674 /* Filter edge */
675 // Do not use s->qscale as luma quantizer because it has not the same
676 // value in IPCM macroblocks.
677 qp = h->cur_pic.qscale_table[mb_xy];
678 //ff_tlog(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], h->cur_pic.qscale_table[mbn_xy]);
679 ff_tlog(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
680 //{ int i; for (i = 0; i < 4; i++) ff_tlog(h->avctx, " bS[%d]:%d", i, bS[i]); ff_tlog(h->avctx, "\n"); }
681 if( dir == 0 ) {
682 filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, a, b, h, 0 );
683 if (chroma) {
684 if (chroma444) {
685 filter_mb_edgev ( &img_cb[4*edge << h->pixel_shift], uvlinesize, bS, sl->chroma_qp[0], a, b, h, 0);
686 filter_mb_edgev ( &img_cr[4*edge << h->pixel_shift], uvlinesize, bS, sl->chroma_qp[1], a, b, h, 0);
687 } else if( (edge&1) == 0 ) {
688 filter_mb_edgecv( &img_cb[2*edge << h->pixel_shift], uvlinesize, bS, sl->chroma_qp[0], a, b, h, 0);
689 filter_mb_edgecv( &img_cr[2*edge << h->pixel_shift], uvlinesize, bS, sl->chroma_qp[1], a, b, h, 0);
690 }
691 }
692 } else {
693 if (chroma422) {
694 if (deblock_edge)
695 filter_mb_edgeh(&img_y[4*edge*linesize], linesize, bS, qp, a, b, h, 0);
696 if (chroma) {
697 filter_mb_edgech(&img_cb[4*edge*uvlinesize], uvlinesize, bS, sl->chroma_qp[0], a, b, h, 0);
698 filter_mb_edgech(&img_cr[4*edge*uvlinesize], uvlinesize, bS, sl->chroma_qp[1], a, b, h, 0);
699 }
700 } else {
701 filter_mb_edgeh(&img_y[4*edge*linesize], linesize, bS, qp, a, b, h, 0);
702 if (chroma) {
703 if (chroma444) {
704 filter_mb_edgeh (&img_cb[4*edge*uvlinesize], uvlinesize, bS, sl->chroma_qp[0], a, b, h, 0);
705 filter_mb_edgeh (&img_cr[4*edge*uvlinesize], uvlinesize, bS, sl->chroma_qp[1], a, b, h, 0);
706 } else if ((edge&1) == 0) {
707 filter_mb_edgech(&img_cb[2*edge*uvlinesize], uvlinesize, bS, sl->chroma_qp[0], a, b, h, 0);
708 filter_mb_edgech(&img_cr[2*edge*uvlinesize], uvlinesize, bS, sl->chroma_qp[1], a, b, h, 0);
709 }
710 }
711 }
712 }
713 }
714 }
715
ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)716 void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl,
717 int mb_x, int mb_y,
718 uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
719 unsigned int linesize, unsigned int uvlinesize)
720 {
721 const int mb_xy= mb_x + mb_y*h->mb_stride;
722 const int mb_type = h->cur_pic.mb_type[mb_xy];
723 const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
724 int first_vertical_edge_done = 0;
725 int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
726 int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8);
727 int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset;
728 int b = 52 + sl->slice_beta_offset - qp_bd_offset;
729
730 if (FRAME_MBAFF(h)
731 // and current and left pair do not have the same interlaced type
732 && IS_INTERLACED(mb_type ^ sl->left_type[LTOP])
733 // and left mb is in available to us
734 && sl->left_type[LTOP]) {
735 /* First vertical edge is different in MBAFF frames
736 * There are 8 different bS to compute and 2 different Qp
737 */
738 LOCAL_ALIGNED(8, int16_t, bS, [8]);
739 int qp[2];
740 int bqp[2];
741 int rqp[2];
742 int mb_qp, mbn0_qp, mbn1_qp;
743 int i;
744 first_vertical_edge_done = 1;
745
746 if( IS_INTRA(mb_type) ) {
747 AV_WN64A(&bS[0], 0x0004000400040004ULL);
748 AV_WN64A(&bS[4], 0x0004000400040004ULL);
749 } else {
750 static const uint8_t offset[2][2][8]={
751 {
752 {3+4*0, 3+4*0, 3+4*0, 3+4*0, 3+4*1, 3+4*1, 3+4*1, 3+4*1},
753 {3+4*2, 3+4*2, 3+4*2, 3+4*2, 3+4*3, 3+4*3, 3+4*3, 3+4*3},
754 },{
755 {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3},
756 {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3},
757 }
758 };
759 const uint8_t *off= offset[MB_FIELD(sl)][mb_y&1];
760 for( i = 0; i < 8; i++ ) {
761 int j= MB_FIELD(sl) ? i>>2 : i&1;
762 int mbn_xy = sl->left_mb_xy[LEFT(j)];
763 int mbn_type = sl->left_type[LEFT(j)];
764
765 if( IS_INTRA( mbn_type ) )
766 bS[i] = 4;
767 else{
768 bS[i] = 1 + !!(sl->non_zero_count_cache[12+8*(i>>1)] |
769 ((!h->ps.pps->cabac && IS_8x8DCT(mbn_type)) ?
770 (h->cbp_table[mbn_xy] & (((MB_FIELD(sl) ? (i&2) : (mb_y&1)) ? 8 : 2) << 12))
771 :
772 h->non_zero_count[mbn_xy][ off[i] ]));
773 }
774 }
775 }
776
777 mb_qp = h->cur_pic.qscale_table[mb_xy];
778 mbn0_qp = h->cur_pic.qscale_table[sl->left_mb_xy[0]];
779 mbn1_qp = h->cur_pic.qscale_table[sl->left_mb_xy[1]];
780 qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
781 bqp[0] = (get_chroma_qp(h->ps.pps, 0, mb_qp) +
782 get_chroma_qp(h->ps.pps, 0, mbn0_qp) + 1) >> 1;
783 rqp[0] = (get_chroma_qp(h->ps.pps, 1, mb_qp) +
784 get_chroma_qp(h->ps.pps, 1, mbn0_qp) + 1) >> 1;
785 qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1;
786 bqp[1] = (get_chroma_qp(h->ps.pps, 0, mb_qp) +
787 get_chroma_qp(h->ps.pps, 0, mbn1_qp) + 1 ) >> 1;
788 rqp[1] = (get_chroma_qp(h->ps.pps, 1, mb_qp) +
789 get_chroma_qp(h->ps.pps, 1, mbn1_qp) + 1 ) >> 1;
790
791 /* Filter edge */
792 ff_tlog(h->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize);
793 { int i; for (i = 0; i < 8; i++) ff_tlog(h->avctx, " bS[%d]:%d", i, bS[i]); ff_tlog(h->avctx, "\n"); }
794 if (MB_FIELD(sl)) {
795 filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0], a, b, 1 );
796 filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1], a, b, 1 );
797 if (chroma){
798 if (CHROMA444(h)) {
799 filter_mb_mbaff_edgev ( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 );
800 filter_mb_mbaff_edgev ( h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 );
801 filter_mb_mbaff_edgev ( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 );
802 filter_mb_mbaff_edgev ( h, img_cr + 8*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1 );
803 } else if (CHROMA422(h)) {
804 filter_mb_mbaff_edgecv(h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1);
805 filter_mb_mbaff_edgecv(h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1);
806 filter_mb_mbaff_edgecv(h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1);
807 filter_mb_mbaff_edgecv(h, img_cr + 8*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1);
808 }else{
809 filter_mb_mbaff_edgecv( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 );
810 filter_mb_mbaff_edgecv( h, img_cb + 4*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 );
811 filter_mb_mbaff_edgecv( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 );
812 filter_mb_mbaff_edgecv( h, img_cr + 4*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1 );
813 }
814 }
815 }else{
816 filter_mb_mbaff_edgev ( h, img_y , 2* linesize, bS , 2, qp [0], a, b, 1 );
817 filter_mb_mbaff_edgev ( h, img_y + linesize, 2* linesize, bS+1, 2, qp [1], a, b, 1 );
818 if (chroma){
819 if (CHROMA444(h)) {
820 filter_mb_mbaff_edgev ( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 );
821 filter_mb_mbaff_edgev ( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 );
822 filter_mb_mbaff_edgev ( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 );
823 filter_mb_mbaff_edgev ( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1], a, b, 1 );
824 }else{
825 filter_mb_mbaff_edgecv( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 );
826 filter_mb_mbaff_edgecv( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 );
827 filter_mb_mbaff_edgecv( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 );
828 filter_mb_mbaff_edgecv( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1], a, b, 1 );
829 }
830 }
831 }
832 }
833
834 #if CONFIG_SMALL
835 {
836 int dir;
837 for (dir = 0; dir < 2; dir++)
838 filter_mb_dir(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize,
839 uvlinesize, mb_xy, mb_type, mvy_limit,
840 dir ? 0 : first_vertical_edge_done, a, b,
841 chroma, dir);
842 }
843 #else
844 filter_mb_dir(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, first_vertical_edge_done, a, b, chroma, 0);
845 filter_mb_dir(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, 0, a, b, chroma, 1);
846 #endif
847 }
848