1/* 2 * Copyright (c) 2016 Martin Storsjo 3 * 4 * This file is part of FFmpeg. 5 * 6 * FFmpeg is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * FFmpeg is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License along 17 * with FFmpeg; if not, write to the Free Software Foundation, Inc., 18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 19 */ 20 21#include <string.h> 22 23#include "libavcodec/avcodec.h" 24#include "libavcodec/vp8dsp.h" 25 26#include "libavutil/common.h" 27#include "libavutil/intreadwrite.h" 28#include "libavutil/mem_internal.h" 29 30#include "checkasm.h" 31 32#define PIXEL_STRIDE 16 33 34#define randomize_buffers(src, dst, stride, coef) \ 35 do { \ 36 int x, y; \ 37 for (y = 0; y < 4; y++) { \ 38 AV_WN32A((src) + y * (stride), rnd()); \ 39 AV_WN32A((dst) + y * (stride), rnd()); \ 40 for (x = 0; x < 4; x++) \ 41 (coef)[y * 4 + x] = (src)[y * (stride) + x] - \ 42 (dst)[y * (stride) + x]; \ 43 } \ 44 } while (0) 45 46static void dct4x4(int16_t *coef) 47{ 48 int i; 49 for (i = 0; i < 4; i++) { 50 const int a1 = (coef[i*4 + 0] + coef[i*4 + 3]) * 8; 51 const int b1 = (coef[i*4 + 1] + coef[i*4 + 2]) * 8; 52 const int c1 = (coef[i*4 + 1] - coef[i*4 + 2]) * 8; 53 const int d1 = (coef[i*4 + 0] - coef[i*4 + 3]) * 8; 54 coef[i*4 + 0] = a1 + b1; 55 coef[i*4 + 1] = (c1 * 2217 + d1 * 5352 + 14500) >> 12; 56 coef[i*4 + 2] = a1 - b1; 57 coef[i*4 + 3] = (d1 * 2217 - c1 * 5352 + 7500) >> 12; 58 } 59 for (i = 0; i < 4; i++) { 60 const int a1 = coef[i + 0*4] + coef[i + 3*4]; 61 const int b1 = coef[i + 1*4] + coef[i + 2*4]; 62 const int c1 = coef[i + 1*4] - coef[i + 2*4]; 63 const int d1 = coef[i + 0*4] - coef[i + 3*4]; 64 coef[i + 0*4] = (a1 + b1 + 7) >> 4; 65 coef[i + 1*4] = ((c1 * 2217 + d1 * 5352 + 12000) >> 16) + !!d1; 66 coef[i + 2*4] = (a1 - b1 + 7) >> 4; 67 coef[i + 3*4] = (d1 * 2217 - c1 * 5352 + 51000) >> 16; 68 } 69} 70 71static void wht4x4(int16_t *coef) 72{ 73 int i; 74 for (i = 0; i < 4; i++) { 75 int a1 = coef[0 * 4 + i]; 76 int b1 = coef[1 * 4 + i]; 77 int c1 = coef[2 * 4 + i]; 78 int d1 = coef[3 * 4 + i]; 79 int e1; 80 a1 += b1; 81 d1 -= c1; 82 e1 = (a1 - d1) >> 1; 83 b1 = e1 - b1; 84 c1 = e1 - c1; 85 a1 -= c1; 86 d1 += b1; 87 coef[0 * 4 + i] = a1; 88 coef[1 * 4 + i] = c1; 89 coef[2 * 4 + i] = d1; 90 coef[3 * 4 + i] = b1; 91 } 92 for (i = 0; i < 4; i++) { 93 int a1 = coef[i * 4 + 0]; 94 int b1 = coef[i * 4 + 1]; 95 int c1 = coef[i * 4 + 2]; 96 int d1 = coef[i * 4 + 3]; 97 int e1; 98 a1 += b1; 99 d1 -= c1; 100 e1 = (a1 - d1) >> 1; 101 b1 = e1 - b1; 102 c1 = e1 - c1; 103 a1 -= c1; 104 d1 += b1; 105 coef[i * 4 + 0] = a1 * 2; 106 coef[i * 4 + 1] = c1 * 2; 107 coef[i * 4 + 2] = d1 * 2; 108 coef[i * 4 + 3] = b1 * 2; 109 } 110} 111 112static void check_idct(void) 113{ 114 LOCAL_ALIGNED_16(uint8_t, src, [4 * 4]); 115 LOCAL_ALIGNED_16(uint8_t, dst, [4 * 4]); 116 LOCAL_ALIGNED_16(uint8_t, dst0, [4 * 4]); 117 LOCAL_ALIGNED_16(uint8_t, dst1, [4 * 4]); 118 LOCAL_ALIGNED_16(int16_t, coef, [4 * 4]); 119 LOCAL_ALIGNED_16(int16_t, subcoef0, [4 * 4]); 120 LOCAL_ALIGNED_16(int16_t, subcoef1, [4 * 4]); 121 VP8DSPContext d; 122 int dc; 123 declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, int16_t *block, ptrdiff_t stride); 124 125 ff_vp8dsp_init(&d); 126 randomize_buffers(src, dst, 4, coef); 127 128 dct4x4(coef); 129 130 for (dc = 0; dc <= 1; dc++) { 131 void (*idct)(uint8_t *, int16_t *, ptrdiff_t) = dc ? d.vp8_idct_dc_add : d.vp8_idct_add; 132 133 if (check_func(idct, "vp8_idct_%sadd", dc ? "dc_" : "")) { 134 if (dc) { 135 memset(subcoef0, 0, 4 * 4 * sizeof(int16_t)); 136 subcoef0[0] = coef[0]; 137 } else { 138 memcpy(subcoef0, coef, 4 * 4 * sizeof(int16_t)); 139 } 140 memcpy(dst0, dst, 4 * 4); 141 memcpy(dst1, dst, 4 * 4); 142 memcpy(subcoef1, subcoef0, 4 * 4 * sizeof(int16_t)); 143 // Note, this uses a pixel stride of 4, even though the real decoder uses a stride as a 144 // multiple of 16. If optimizations want to take advantage of that, this test needs to be 145 // updated to make it more like the h264dsp tests. 146 call_ref(dst0, subcoef0, 4); 147 call_new(dst1, subcoef1, 4); 148 if (memcmp(dst0, dst1, 4 * 4) || 149 memcmp(subcoef0, subcoef1, 4 * 4 * sizeof(int16_t))) 150 fail(); 151 152 bench_new(dst1, subcoef1, 4); 153 } 154 } 155} 156 157static void check_idct_dc4(void) 158{ 159 LOCAL_ALIGNED_16(uint8_t, src, [4 * 4 * 4]); 160 LOCAL_ALIGNED_16(uint8_t, dst, [4 * 4 * 4]); 161 LOCAL_ALIGNED_16(uint8_t, dst0, [4 * 4 * 4]); 162 LOCAL_ALIGNED_16(uint8_t, dst1, [4 * 4 * 4]); 163 LOCAL_ALIGNED_16(int16_t, coef, [4], [4 * 4]); 164 LOCAL_ALIGNED_16(int16_t, subcoef0, [4], [4 * 4]); 165 LOCAL_ALIGNED_16(int16_t, subcoef1, [4], [4 * 4]); 166 VP8DSPContext d; 167 int i, chroma; 168 declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, int16_t block[4][16], ptrdiff_t stride); 169 170 ff_vp8dsp_init(&d); 171 172 for (chroma = 0; chroma <= 1; chroma++) { 173 void (*idct4dc)(uint8_t *, int16_t[4][16], ptrdiff_t) = chroma ? d.vp8_idct_dc_add4uv : d.vp8_idct_dc_add4y; 174 if (check_func(idct4dc, "vp8_idct_dc_add4%s", chroma ? "uv" : "y")) { 175 ptrdiff_t stride = chroma ? 8 : 16; 176 int w = chroma ? 2 : 4; 177 for (i = 0; i < 4; i++) { 178 int blockx = 4 * (i % w); 179 int blocky = 4 * (i / w); 180 randomize_buffers(src + stride * blocky + blockx, dst + stride * blocky + blockx, stride, coef[i]); 181 dct4x4(coef[i]); 182 memset(&coef[i][1], 0, 15 * sizeof(int16_t)); 183 } 184 185 memcpy(dst0, dst, 4 * 4 * 4); 186 memcpy(dst1, dst, 4 * 4 * 4); 187 memcpy(subcoef0, coef, 4 * 4 * 4 * sizeof(int16_t)); 188 memcpy(subcoef1, coef, 4 * 4 * 4 * sizeof(int16_t)); 189 call_ref(dst0, subcoef0, stride); 190 call_new(dst1, subcoef1, stride); 191 if (memcmp(dst0, dst1, 4 * 4 * 4) || 192 memcmp(subcoef0, subcoef1, 4 * 4 * 4 * sizeof(int16_t))) 193 fail(); 194 bench_new(dst1, subcoef1, stride); 195 } 196 } 197 198} 199 200static void check_luma_dc_wht(void) 201{ 202 LOCAL_ALIGNED_16(int16_t, dc, [4 * 4]); 203 LOCAL_ALIGNED_16(int16_t, dc0, [4 * 4]); 204 LOCAL_ALIGNED_16(int16_t, dc1, [4 * 4]); 205 int16_t block[4][4][16]; 206 LOCAL_ALIGNED_16(int16_t, block0, [4], [4][16]); 207 LOCAL_ALIGNED_16(int16_t, block1, [4], [4][16]); 208 VP8DSPContext d; 209 int dc_only; 210 int blockx, blocky; 211 declare_func_emms(AV_CPU_FLAG_MMX, void, int16_t block[4][4][16], int16_t dc[16]); 212 213 ff_vp8dsp_init(&d); 214 215 for (blocky = 0; blocky < 4; blocky++) { 216 for (blockx = 0; blockx < 4; blockx++) { 217 uint8_t src[16], dst[16]; 218 randomize_buffers(src, dst, 4, block[blocky][blockx]); 219 220 dct4x4(block[blocky][blockx]); 221 dc[blocky * 4 + blockx] = block[blocky][blockx][0]; 222 block[blocky][blockx][0] = rnd(); 223 } 224 } 225 wht4x4(dc); 226 227 for (dc_only = 0; dc_only <= 1; dc_only++) { 228 void (*idct)(int16_t [4][4][16], int16_t [16]) = dc_only ? d.vp8_luma_dc_wht_dc : d.vp8_luma_dc_wht; 229 230 if (check_func(idct, "vp8_luma_dc_wht%s", dc_only ? "_dc" : "")) { 231 if (dc_only) { 232 memset(dc0, 0, 16 * sizeof(int16_t)); 233 dc0[0] = dc[0]; 234 } else { 235 memcpy(dc0, dc, 16 * sizeof(int16_t)); 236 } 237 memcpy(dc1, dc0, 16 * sizeof(int16_t)); 238 memcpy(block0, block, 4 * 4 * 16 * sizeof(int16_t)); 239 memcpy(block1, block, 4 * 4 * 16 * sizeof(int16_t)); 240 call_ref(block0, dc0); 241 call_new(block1, dc1); 242 if (memcmp(block0, block1, 4 * 4 * 16 * sizeof(int16_t)) || 243 memcmp(dc0, dc1, 16 * sizeof(int16_t))) 244 fail(); 245 bench_new(block1, dc1); 246 } 247 } 248} 249 250#define SRC_BUF_STRIDE 32 251#define SRC_BUF_SIZE (((size << (size < 16)) + 5) * SRC_BUF_STRIDE) 252// The mc subpixel interpolation filter needs the 2 previous pixels in either 253// direction, the +1 is to make sure the actual load addresses always are 254// unaligned. 255#define src (buf + 2 * SRC_BUF_STRIDE + 2 + 1) 256 257#undef randomize_buffers 258#define randomize_buffers() \ 259 do { \ 260 int k; \ 261 for (k = 0; k < SRC_BUF_SIZE; k += 4) { \ 262 AV_WN32A(buf + k, rnd()); \ 263 } \ 264 } while (0) 265 266static void check_mc(void) 267{ 268 LOCAL_ALIGNED_16(uint8_t, buf, [32 * 32]); 269 LOCAL_ALIGNED_16(uint8_t, dst0, [16 * 16]); 270 LOCAL_ALIGNED_16(uint8_t, dst1, [16 * 16]); 271 VP8DSPContext d; 272 int type, k, dx, dy; 273 declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *, ptrdiff_t, uint8_t *, ptrdiff_t, int, int, int); 274 275 ff_vp78dsp_init(&d); 276 277 for (type = 0; type < 2; type++) { 278 vp8_mc_func (*tab)[3][3] = type ? d.put_vp8_bilinear_pixels_tab : d.put_vp8_epel_pixels_tab; 279 for (k = 1; k < 8; k++) { 280 int hsize = k / 3; 281 int size = 16 >> hsize; 282 int height = (size << 1) >> (k % 3); 283 for (dy = 0; dy < 3; dy++) { 284 for (dx = 0; dx < 3; dx++) { 285 char str[100]; 286 if (dx || dy) { 287 if (type == 0) { 288 static const char *dx_names[] = { "", "h4", "h6" }; 289 static const char *dy_names[] = { "", "v4", "v6" }; 290 snprintf(str, sizeof(str), "epel%d_%s%s", size, dx_names[dx], dy_names[dy]); 291 } else { 292 snprintf(str, sizeof(str), "bilin%d_%s%s", size, dx ? "h" : "", dy ? "v" : ""); 293 } 294 } else { 295 snprintf(str, sizeof(str), "pixels%d", size); 296 } 297 if (check_func(tab[hsize][dy][dx], "vp8_put_%s", str)) { 298 int mx, my; 299 int i; 300 if (type == 0) { 301 mx = dx == 2 ? 2 + 2 * (rnd() % 3) : dx == 1 ? 1 + 2 * (rnd() % 4) : 0; 302 my = dy == 2 ? 2 + 2 * (rnd() % 3) : dy == 1 ? 1 + 2 * (rnd() % 4) : 0; 303 } else { 304 mx = dx ? 1 + (rnd() % 7) : 0; 305 my = dy ? 1 + (rnd() % 7) : 0; 306 } 307 randomize_buffers(); 308 for (i = -2; i <= 3; i++) { 309 int val = (i == -1 || i == 2) ? 0 : 0xff; 310 // Set pixels in the first row and column to the maximum pattern, 311 // to test for potential overflows in the filter. 312 src[i ] = val; 313 src[i * SRC_BUF_STRIDE] = val; 314 } 315 call_ref(dst0, size, src, SRC_BUF_STRIDE, height, mx, my); 316 call_new(dst1, size, src, SRC_BUF_STRIDE, height, mx, my); 317 if (memcmp(dst0, dst1, size * height)) 318 fail(); 319 bench_new(dst1, size, src, SRC_BUF_STRIDE, height, mx, my); 320 } 321 } 322 } 323 } 324 } 325} 326 327#undef randomize_buffers 328 329#define setpx(a, b, c) buf[(a) + (b) * jstride] = av_clip_uint8(c) 330// Set the pixel to c +/- [0,d] 331#define setdx(a, b, c, d) setpx(a, b, c - (d) + (rnd() % ((d) * 2 + 1))) 332// Set the pixel to c +/- [d,d+e] (making sure it won't be clipped) 333#define setdx2(a, b, o, c, d, e) setpx(a, b, o = c + ((d) + (rnd() % (e))) * (c >= 128 ? -1 : 1)) 334 335static void randomize_loopfilter_buffers(int lineoff, int str, 336 int dir, int flim_E, int flim_I, 337 int hev_thresh, uint8_t *buf, 338 int force_hev) 339{ 340 uint32_t mask = 0xff; 341 int off = dir ? lineoff : lineoff * str; 342 int istride = dir ? 1 : str; 343 int jstride = dir ? str : 1; 344 int i; 345 for (i = 0; i < 8; i += 2) { 346 // Row 0 will trigger hev for q0/q1, row 2 will trigger hev for p0/p1, 347 // rows 4 and 6 will not trigger hev. 348 // force_hev 1 will make sure all rows trigger hev, while force_hev -1 349 // makes none of them trigger it. 350 int idx = off + i * istride, p2, p1, p0, q0, q1, q2; 351 setpx(idx, 0, q0 = rnd() & mask); 352 if (i == 0 && force_hev >= 0 || force_hev > 0) 353 setdx2(idx, 1, q1, q0, hev_thresh + 1, flim_I - hev_thresh - 1); 354 else 355 setdx(idx, 1, q1 = q0, hev_thresh); 356 setdx(idx, 2, q2 = q1, flim_I); 357 setdx(idx, 3, q2, flim_I); 358 setdx(idx, -1, p0 = q0, flim_E >> 2); 359 if (i == 2 && force_hev >= 0 || force_hev > 0) 360 setdx2(idx, -2, p1, p0, hev_thresh + 1, flim_I - hev_thresh - 1); 361 else 362 setdx(idx, -2, p1 = p0, hev_thresh); 363 setdx(idx, -3, p2 = p1, flim_I); 364 setdx(idx, -4, p2, flim_I); 365 } 366} 367 368// Fill the buffer with random pixels 369static void fill_loopfilter_buffers(uint8_t *buf, ptrdiff_t stride, int w, int h) 370{ 371 int x, y; 372 for (y = 0; y < h; y++) 373 for (x = 0; x < w; x++) 374 buf[y * stride + x] = rnd() & 0xff; 375} 376 377#define randomize_buffers(buf, lineoff, str, force_hev) \ 378 randomize_loopfilter_buffers(lineoff, str, dir, flim_E, flim_I, hev_thresh, buf, force_hev) 379 380static void check_loopfilter_16y(void) 381{ 382 LOCAL_ALIGNED_16(uint8_t, base0, [32 + 16 * 16]); 383 LOCAL_ALIGNED_16(uint8_t, base1, [32 + 16 * 16]); 384 VP8DSPContext d; 385 int dir, edge, force_hev; 386 int flim_E = 20, flim_I = 10, hev_thresh = 7; 387 declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *, ptrdiff_t, int, int, int); 388 389 ff_vp8dsp_init(&d); 390 391 for (dir = 0; dir < 2; dir++) { 392 int midoff = dir ? 4 * 16 : 4; 393 int midoff_aligned = dir ? 4 * 16 : 16; 394 uint8_t *buf0 = base0 + midoff_aligned; 395 uint8_t *buf1 = base1 + midoff_aligned; 396 for (edge = 0; edge < 2; edge++) { 397 void (*func)(uint8_t *, ptrdiff_t, int, int, int) = NULL; 398 switch (dir << 1 | edge) { 399 case (0 << 1) | 0: func = d.vp8_h_loop_filter16y; break; 400 case (1 << 1) | 0: func = d.vp8_v_loop_filter16y; break; 401 case (0 << 1) | 1: func = d.vp8_h_loop_filter16y_inner; break; 402 case (1 << 1) | 1: func = d.vp8_v_loop_filter16y_inner; break; 403 } 404 if (check_func(func, "vp8_loop_filter16y%s_%s", edge ? "_inner" : "", dir ? "v" : "h")) { 405 for (force_hev = -1; force_hev <= 1; force_hev++) { 406 fill_loopfilter_buffers(buf0 - midoff, 16, 16, 16); 407 randomize_buffers(buf0, 0, 16, force_hev); 408 randomize_buffers(buf0, 8, 16, force_hev); 409 memcpy(buf1 - midoff, buf0 - midoff, 16 * 16); 410 call_ref(buf0, 16, flim_E, flim_I, hev_thresh); 411 call_new(buf1, 16, flim_E, flim_I, hev_thresh); 412 if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16)) 413 fail(); 414 } 415 fill_loopfilter_buffers(buf0 - midoff, 16, 16, 16); 416 randomize_buffers(buf0, 0, 16, 0); 417 randomize_buffers(buf0, 8, 16, 0); 418 bench_new(buf0, 16, flim_E, flim_I, hev_thresh); 419 } 420 } 421 } 422} 423 424static void check_loopfilter_8uv(void) 425{ 426 LOCAL_ALIGNED_16(uint8_t, base0u, [32 + 16 * 16]); 427 LOCAL_ALIGNED_16(uint8_t, base0v, [32 + 16 * 16]); 428 LOCAL_ALIGNED_16(uint8_t, base1u, [32 + 16 * 16]); 429 LOCAL_ALIGNED_16(uint8_t, base1v, [32 + 16 * 16]); 430 VP8DSPContext d; 431 int dir, edge, force_hev; 432 int flim_E = 20, flim_I = 10, hev_thresh = 7; 433 declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *, uint8_t *, ptrdiff_t, int, int, int); 434 435 ff_vp8dsp_init(&d); 436 437 for (dir = 0; dir < 2; dir++) { 438 int midoff = dir ? 4 * 16 : 4; 439 int midoff_aligned = dir ? 4 * 16 : 16; 440 uint8_t *buf0u = base0u + midoff_aligned; 441 uint8_t *buf0v = base0v + midoff_aligned; 442 uint8_t *buf1u = base1u + midoff_aligned; 443 uint8_t *buf1v = base1v + midoff_aligned; 444 for (edge = 0; edge < 2; edge++) { 445 void (*func)(uint8_t *, uint8_t *, ptrdiff_t, int, int, int) = NULL; 446 switch (dir << 1 | edge) { 447 case (0 << 1) | 0: func = d.vp8_h_loop_filter8uv; break; 448 case (1 << 1) | 0: func = d.vp8_v_loop_filter8uv; break; 449 case (0 << 1) | 1: func = d.vp8_h_loop_filter8uv_inner; break; 450 case (1 << 1) | 1: func = d.vp8_v_loop_filter8uv_inner; break; 451 } 452 if (check_func(func, "vp8_loop_filter8uv%s_%s", edge ? "_inner" : "", dir ? "v" : "h")) { 453 for (force_hev = -1; force_hev <= 1; force_hev++) { 454 fill_loopfilter_buffers(buf0u - midoff, 16, 16, 16); 455 fill_loopfilter_buffers(buf0v - midoff, 16, 16, 16); 456 randomize_buffers(buf0u, 0, 16, force_hev); 457 randomize_buffers(buf0v, 0, 16, force_hev); 458 memcpy(buf1u - midoff, buf0u - midoff, 16 * 16); 459 memcpy(buf1v - midoff, buf0v - midoff, 16 * 16); 460 461 call_ref(buf0u, buf0v, 16, flim_E, flim_I, hev_thresh); 462 call_new(buf1u, buf1v, 16, flim_E, flim_I, hev_thresh); 463 if (memcmp(buf0u - midoff, buf1u - midoff, 16 * 16) || 464 memcmp(buf0v - midoff, buf1v - midoff, 16 * 16)) 465 fail(); 466 } 467 fill_loopfilter_buffers(buf0u - midoff, 16, 16, 16); 468 fill_loopfilter_buffers(buf0v - midoff, 16, 16, 16); 469 randomize_buffers(buf0u, 0, 16, 0); 470 randomize_buffers(buf0v, 0, 16, 0); 471 bench_new(buf0u, buf0v, 16, flim_E, flim_I, hev_thresh); 472 } 473 } 474 } 475} 476 477static void check_loopfilter_simple(void) 478{ 479 LOCAL_ALIGNED_16(uint8_t, base0, [32 + 16 * 16]); 480 LOCAL_ALIGNED_16(uint8_t, base1, [32 + 16 * 16]); 481 VP8DSPContext d; 482 int dir; 483 int flim_E = 20, flim_I = 30, hev_thresh = 0; 484 declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *, ptrdiff_t, int); 485 486 ff_vp8dsp_init(&d); 487 488 for (dir = 0; dir < 2; dir++) { 489 int midoff = dir ? 4 * 16 : 4; 490 int midoff_aligned = dir ? 4 * 16 : 16; 491 uint8_t *buf0 = base0 + midoff_aligned; 492 uint8_t *buf1 = base1 + midoff_aligned; 493 void (*func)(uint8_t *, ptrdiff_t, int) = dir ? d.vp8_v_loop_filter_simple : d.vp8_h_loop_filter_simple; 494 if (check_func(func, "vp8_loop_filter_simple_%s", dir ? "v" : "h")) { 495 fill_loopfilter_buffers(buf0 - midoff, 16, 16, 16); 496 randomize_buffers(buf0, 0, 16, -1); 497 randomize_buffers(buf0, 8, 16, -1); 498 memcpy(buf1 - midoff, buf0 - midoff, 16 * 16); 499 call_ref(buf0, 16, flim_E); 500 call_new(buf1, 16, flim_E); 501 if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16)) 502 fail(); 503 bench_new(buf0, 16, flim_E); 504 } 505 } 506} 507 508void checkasm_check_vp8dsp(void) 509{ 510 check_idct(); 511 check_idct_dc4(); 512 check_luma_dc_wht(); 513 report("idct"); 514 check_mc(); 515 report("mc"); 516 check_loopfilter_16y(); 517 check_loopfilter_8uv(); 518 check_loopfilter_simple(); 519 report("loopfilter"); 520} 521