Lines Matching refs:flat

237     __m128i mask, hev, flat, thresh, b_limit, limit;
251 hev, mask, flat);
270 __m128i mask, hev, flat, thresh0, b_limit0;
293 hev, mask, flat);
310 __m128i mask, hev, flat, thresh, b_limit, limit;
329 hev, mask, flat);
330 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
334 flat = __lsx_vilvl_d(zero, flat);
336 /* if flat is zero for all pixels, then no need to calculate other filter */
337 if (__lsx_bz_v(flat)) {
358 p2_out = __lsx_vbitsel_v(p2, p2_filter8, flat);
359 p1_out = __lsx_vbitsel_v(p1_out, p1_filter8, flat);
360 p0_out = __lsx_vbitsel_v(p0_out, p0_filter8, flat);
361 q0_out = __lsx_vbitsel_v(q0_out, q0_filter8, flat);
362 q1_out = __lsx_vbitsel_v(q1_out, q1_filter8, flat);
363 q2_out = __lsx_vbitsel_v(q2, q2_filter8, flat);
384 __m128i flat, mask, hev, tmp, thresh, b_limit, limit;
414 hev, mask, flat);
415 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
419 /* if flat is zero for all pixels, then no need to calculate other filter */
420 if (__lsx_bz_v(flat)) {
448 p2_out = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
449 p1_out = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
450 p0_out = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
451 q0_out = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
452 q1_out = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
453 q2_out = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
475 __m128i flat, mask, hev, tmp, thresh, b_limit, limit;
502 hev, mask, flat);
503 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
507 flat = __lsx_vilvl_d(zero, flat);
509 /* if flat is zero for all pixels, then no need to calculate other filter */
510 if (__lsx_bz_v(flat)) {
531 p2_out = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
532 p1_out = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
533 p0_out = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
534 q0_out = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
535 q1_out = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
536 q2_out = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
557 __m128i flat, mask, hev, tmp, thresh, b_limit, limit;
584 hev, mask, flat);
585 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
589 flat = __lsx_vilvh_d(flat, zero);
591 /* if flat is zero for all pixels, then no need to calculate other filter */
592 if (__lsx_bz_v(flat)) {
613 p2_out = __lsx_vbitsel_v(p2, p2_filt8_h, flat);
614 p1_out = __lsx_vbitsel_v(p1_out, p1_filt8_h, flat);
615 p0_out = __lsx_vbitsel_v(p0_out, p0_filt8_h, flat);
616 q0_out = __lsx_vbitsel_v(q0_out, q0_filt8_h, flat);
617 q1_out = __lsx_vbitsel_v(q1_out, q1_filt8_h, flat);
618 q2_out = __lsx_vbitsel_v(q2, q2_filt8_h, flat);
640 __m128i flat, mask, hev, thresh, b_limit, limit;
662 hev, mask, flat);
663 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
667 /* if flat is zero for all pixels, then no need to calculate other filter */
668 if (__lsx_bz_v(flat)) {
697 p2_out = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
698 p1_out = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
699 p0_out = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
700 q0_out = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
701 q1_out = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
702 q2_out = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
710 __lsx_vst(flat, filter48, 96);
725 __m128i flat, flat2, filter8;
738 flat = __lsx_vld(filter48, 96);
753 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
1107 __m128i flat2, mask, hev, flat, thresh, b_limit, limit;
1129 hev, mask, flat);
1130 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
1134 flat = __lsx_vilvl_d(zero, flat);
1136 /* if flat is zero for all pixels, then no need to calculate other filter */
1137 if (__lsx_bz_v(flat)) {
1160 p2_out = __lsx_vbitsel_v(p2, p2_filter8, flat);
1161 p1_out = __lsx_vbitsel_v(p1_out, p1_filter8, flat);
1162 p0_out = __lsx_vbitsel_v(p0_out, p0_filter8, flat);
1163 q0_out = __lsx_vbitsel_v(q0_out, q0_filter8, flat);
1164 q1_out = __lsx_vbitsel_v(q1_out, q1_filter8, flat);
1165 q2_out = __lsx_vbitsel_v(q2, q2_filter8, flat);
1173 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
1360 __m128i mask, hev, flat, limit, thresh, b_limit;
1378 hev, mask, flat);
1405 __m128i mask, hev, flat;
1445 hev, mask, flat);
1487 __m128i flat, mask, hev, thresh, b_limit, limit;
1512 hev, mask, flat);
1514 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
1519 flat = __lsx_vilvl_d(zero, flat);
1521 /* if flat is zero for all pixels, then no need to calculate other filter */
1522 if (__lsx_bz_v(flat)) {
1554 p2 = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
1555 p1 = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
1556 p0 = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
1557 q0 = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
1558 q1 = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
1559 q2 = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
1605 __m128i flat, mask, hev, thresh, b_limit, limit;
1651 hev, mask, flat);
1653 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
1658 /* if flat is zero for all pixels, then no need to calculate other filter */
1659 if (__lsx_bz_v(flat)) {
1712 p2 = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
1713 p1 = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
1714 p0 = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
1715 q0 = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
1716 q1 = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
1717 q2 = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
1790 __m128i flat, mask, hev, thresh, b_limit, limit;
1833 hev, mask, flat);
1835 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
1840 flat = __lsx_vilvl_d(zero, flat);
1842 /* if flat is zero for all pixels, then no need to calculate other filter */
1843 if (__lsx_bz_v(flat)) {
1887 p2 = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
1888 p1 = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
1889 p0 = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
1890 q0 = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
1891 q1 = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
1892 q2 = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
1965 __m128i flat, mask, hev, thresh, b_limit, limit;
2008 hev, mask, flat);
2010 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
2015 flat = __lsx_vilvh_d(flat, zero);
2017 /* if flat is zero for all pixels, then no need to calculate other filter */
2018 if (__lsx_bz_v(flat)) {
2063 p2 = __lsx_vbitsel_v(p2, p2_filt8_h, flat);
2064 p1 = __lsx_vbitsel_v(p1_out, p1_filt8_h, flat);
2065 p0 = __lsx_vbitsel_v(p0_out, p0_filt8_h, flat);
2066 q0 = __lsx_vbitsel_v(q0_out, q0_filt8_h, flat);
2067 q1 = __lsx_vbitsel_v(q1_out, q1_filt8_h, flat);
2068 q2 = __lsx_vbitsel_v(q2, q2_filt8_h, flat);
2270 __m128i flat, mask, hev, thresh, b_limit, limit;
2288 hev, mask, flat);
2290 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
2295 flat = __lsx_vilvl_d(zero, flat);
2297 /* if flat is zero for all pixels, then no need to calculate other filter */
2298 if (__lsx_bz_v(flat)) {
2337 p2_out = __lsx_vbitsel_v(p2, p2_l, flat);
2338 p1_out = __lsx_vbitsel_v(p1_out, p1_l, flat);
2339 p0_out = __lsx_vbitsel_v(p0_out, p0_l, flat);
2340 q0_out = __lsx_vbitsel_v(q0_out, q0_l, flat);
2341 q1_out = __lsx_vbitsel_v(q1_out, q1_l, flat);
2342 q2_out = __lsx_vbitsel_v(q2, q2_l, flat);
2350 __lsx_vst(flat, filter48, 96);
2361 __m128i filter8, flat, flat2;
2379 flat = __lsx_vld(filter48, 96);
2382 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
2648 __m128i flat, mask, hev, thresh, b_limit, limit;
2669 hev, mask, flat);
2671 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
2676 /* if flat is zero for all pixels, then no need to calculate other filter */
2677 if (__lsx_bz_v(flat)) {
2730 p2_out = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
2731 p1_out = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
2732 p0_out = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
2733 q0_out = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
2734 q1_out = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
2735 q2_out = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
2743 __lsx_vst(flat, filter48, 96);
2754 __m128i flat, flat2, filter8;
2768 flat = __lsx_vld(filter48, 96);
2777 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);