/third_party/ffmpeg/libavcodec/mips/ |
H A D | idctdsp_msa.c | 28 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in put_pixels_clamped_msa() local 30 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in put_pixels_clamped_msa() 31 CLIP_SH8_0_255(in0, in1, in2, in3, in4, in5, in6, in7); in put_pixels_clamped_msa() 33 PCKEV_B4_SH(in4, in4, in5, in5, in6, in6, in7, in7, in4, in5, in6, in7); in put_pixels_clamped_msa() 40 in5_d = __msa_copy_u_d((v2i64) in5, 0); in put_pixels_clamped_msa() 52 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in put_signed_pixels_clamped_msa() local 54 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in put_signed_pixels_clamped_msa() 61 in5 in put_signed_pixels_clamped_msa() 86 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; add_pixels_clamped_msa() local [all...] |
H A D | hevc_idct_msa.c | 94 #define HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in5, in6, in7, shift) \ 104 ILVR_H4_SH(in4, in0, in6, in2, in5, in1, in3, in7, \ 106 ILVL_H4_SH(in4, in0, in6, in2, in5, in1, in3, in7, \ 185 PCKEV_H2_SH(sum1_l, sum1_r, sum2_l, sum2_r, in2, in5); \ 334 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_idct_8x8_msa() local 336 LD_SH8(coeffs, 8, in0, in1, in2, in3, in4, in5, in6, in7); in hevc_idct_8x8_msa() 337 HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in5, in6, in7, 7); in hevc_idct_8x8_msa() 338 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in hevc_idct_8x8_msa() 339 in0, in1, in2, in3, in4, in5, in6, in7); in hevc_idct_8x8_msa() 340 HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in5, in in hevc_idct_8x8_msa() 353 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_16x16_msa() local 443 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_8x32_column_msa() local 604 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_transpose_32x8_to_8x32() local 617 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_transpose_8x32_to_32x8() local 740 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_addblk_8x8_msa() local 775 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_addblk_16x16_msa() local 842 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_addblk_32x32_msa() local [all...] |
H A D | mpegvideoencdsp_msa.c | 27 v16u8 in0, in1, in2, in3, in4, in5, in6, in7; in sum_u8src_16width_msa() local 30 LD_UB8(src, stride, in0, in1, in2, in3, in4, in5, in6, in7); in sum_u8src_16width_msa() 35 HADD_UB4_UB(in4, in5, in6, in7, in4, in5, in6, in7); in sum_u8src_16width_msa() 44 sum += HADD_UH_U32(in5); in sum_u8src_16width_msa()
|
H A D | simple_idct_msa.c | 28 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in simple_idct_msa() local 41 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in simple_idct_msa() 46 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in simple_idct_msa() 47 in0, in1, in2, in3, in4, in5, in6, in7); in simple_idct_msa() 48 select_vec = in1 | in2 | in3 | in4 | in5 | in6 | in7; in simple_idct_msa() 81 ILVRL_H2_SW(in5, in7, temp0_r, temp0_l); in simple_idct_msa() 118 in5 = (v8i16) __msa_bmnz_v((v16u8) a2_r, (v16u8) temp, (v16u8) select_vec); in simple_idct_msa() 121 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in simple_idct_msa() 122 in0, in1, in2, in3, in4, in5, in6, in7); in simple_idct_msa() 160 ILVRL_H2_SW(in5, in in simple_idct_msa() 191 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; simple_idct_put_msa() local 367 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; simple_idct_add_msa() local [all...] |
H A D | vc1dsp_msa.c | 30 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in ff_vc1_inv_trans_8x8_msa() local 44 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in ff_vc1_inv_trans_8x8_msa() 50 UNPCK_SH_SW(in5, in_r5, in_l5); in ff_vc1_inv_trans_8x8_msa() 136 in4, in5, in6, in7); in ff_vc1_inv_trans_8x8_msa() 137 ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, block, 8); in ff_vc1_inv_trans_8x8_msa() 142 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in ff_vc1_inv_trans_4x8_msa() local 159 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in ff_vc1_inv_trans_4x8_msa() 165 UNPCK_R_SH_SW(in5, in_r5); in ff_vc1_inv_trans_4x8_msa() 233 v4i32 in0, in1, in2, in3, in4, in5, in6, in7; in ff_vc1_inv_trans_8x4_msa() local 250 UNPCK_SH_SW(t2, in1, in5); in ff_vc1_inv_trans_8x4_msa() [all...] |
H A D | compute_antialias_float.h | 68 float in1, in2, in3, in4, in5, in6, in7, in8; in compute_antialias_mips_float() local 92 "lwc1 %[in5], -2*4(%[ptr]) \t\n" in compute_antialias_mips_float() 100 "mul.s %[out3], %[in5], %[in6] \t\n" in compute_antialias_mips_float() 101 "mul.s %[out4], %[in5], %[in7] \t\n" in compute_antialias_mips_float() 113 "lwc1 %[in5], -4*4(%[ptr]) \t\n" in compute_antialias_mips_float() 119 "mul.s %[out3], %[in5], %[in6] \t\n" in compute_antialias_mips_float() 121 "mul.s %[out4], %[in5], %[in7] \t\n" in compute_antialias_mips_float() 134 "lwc1 %[in5], -6*4(%[ptr]) \t\n" in compute_antialias_mips_float() 140 "mul.s %[out3], %[in5], %[in6] \t\n" in compute_antialias_mips_float() 141 "mul.s %[out4], %[in5], in compute_antialias_mips_float() [all...] |
H A D | mpegaudiodsp_mips_float.c | 75 float in1, in2, in3, in4, in5, in6, in7, in8; in ff_mpadsp_apply_window_mips_float() local 99 "lwc1 %[in5], 128*4(%[window]) \t\n" in ff_mpadsp_apply_window_mips_float() 106 "madd.s %[sum], %[sum], %[in5], %[in6] \t\n" in ff_mpadsp_apply_window_mips_float() 109 "lwc1 %[in5], 384*4(%[window]) \t\n" in ff_mpadsp_apply_window_mips_float() 120 "madd.s %[sum], %[sum], %[in5], %[in6] \t\n" in ff_mpadsp_apply_window_mips_float() 121 "lwc1 %[in5], 160*4(%[window]) \t\n" in ff_mpadsp_apply_window_mips_float() 132 "nmsub.s %[sum], %[sum], %[in5], %[in6] \t\n" in ff_mpadsp_apply_window_mips_float() 133 "lwc1 %[in5], 416*4(%[window]) \t\n" in ff_mpadsp_apply_window_mips_float() 144 "nmsub.s %[sum], %[sum], %[in5], %[in6] \t\n" in ff_mpadsp_apply_window_mips_float() 157 "lwc1 %[in5], 6 in ff_mpadsp_apply_window_mips_float() 799 float in1, in2, in3, in4, in5, in6; imdct36_mips_float() local [all...] |
H A D | vp9_idct_msa.c | 86 #define VP9_DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, \ 92 DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5, \ 116 #define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \ 146 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ 150 cnst1_m, cnst2_m, cnst3_m, in5, in2, \ 152 BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \ 172 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ 323 #define TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in 620 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; vp9_idct8x8_12_colcol_addblk_msa() local 684 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; vp9_idct8x8_colcol_addblk_msa() local 711 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; vp9_iadst8x8_colcol_addblk_msa() local 837 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; vp9_iadst_idct_8x8_add_msa() local 864 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; vp9_idct_iadst_8x8_add_msa() local [all...] |
H A D | hevc_mc_bi_msa.c | 142 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_6w_msa() local 154 LD_SH8(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5, in6, in7); in hevc_bi_copy_6w_msa() 164 HEVC_BI_RND_CLIP4_MAX_SATU(in4, in5, in6, in7, dst4, dst5, dst6, dst7, in hevc_bi_copy_6w_msa() 193 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_8w_msa() local 227 LD_SH6(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5); in hevc_bi_copy_8w_msa() 232 HEVC_BI_RND_CLIP2_MAX_SATU(in4, in5, dst4, dst5, 7, dst4, dst5); in hevc_bi_copy_8w_msa() 252 LD_SH8(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5, in6, in hevc_bi_copy_8w_msa() 259 HEVC_BI_RND_CLIP4_MAX_SATU(in4, in5, in6, in7, dst4, dst5, dst6, in hevc_bi_copy_8w_msa() 281 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_12w_msa() local 289 LD_SH4(src1_ptr + 8, src2_stride, in4, in5, in in hevc_bi_copy_12w_msa() 319 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_bi_copy_16w_msa() local 358 v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, dst11; hevc_bi_copy_24w_msa() local 405 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_bi_copy_32w_msa() local 449 v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, dst11; hevc_bi_copy_48w_msa() local 503 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_bi_copy_64w_msa() local 544 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_bi_8t_4w_msa() local 989 v8i16 in0, in1, in2, in3, in4, in5; hevc_hz_bi_8t_48w_msa() local 1187 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_bi_8t_4w_msa() local 1350 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_bi_8t_12w_msa() local 2283 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_bi_4t_4x8multiple_msa() local 2470 v8i16 in0, in1, in2, in3, in4, in5; hevc_hz_bi_4t_8x6_msa() local 2615 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_bi_4t_12w_msa() local 2750 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_bi_4t_24w_msa() local 3012 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_bi_4t_4x8multiple_msa() local 3248 v8i16 in0, in1, in2, in3, in4, in5; hevc_vt_bi_4t_8x6_msa() local 3393 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_bi_4t_12w_msa() local 3562 v8i16 in0, in1, in2, in3, in4, in5; hevc_vt_bi_4t_24w_msa() local 3686 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_bi_4t_32w_msa() local 4101 v8i16 in4 = { 0 }, in5 = { 0 }; hevc_hv_bi_4t_6w_msa() local 4401 v8i16 in0, in1, in2, in3, in4, in5; hevc_hv_bi_4t_8x6_msa() local [all...] |
H A D | hevc_mc_biw_msa.c | 243 v8i16 in0, in1, in2, in3, in4, in5; in hevc_biwgt_copy_8w_msa() local 278 LD_SH6(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5); in hevc_biwgt_copy_8w_msa() 284 HEVC_BIW_RND_CLIP2_MAX_SATU(dst4, dst5, in4, in5, weight_vec, rnd_vec, in hevc_biwgt_copy_8w_msa() 331 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_biwgt_copy_12w_msa() local 347 LD_SH4(src1_ptr + 8, src2_stride, in4, in5, in6, in7); in hevc_biwgt_copy_12w_msa() 350 ILVR_D2_SH(in5, in4, in7, in6, in4, in5); in hevc_biwgt_copy_12w_msa() 363 HEVC_BIW_RND_CLIP2_MAX_SATU(dst4, dst5, in4, in5, weight_vec, rnd_vec, in hevc_biwgt_copy_12w_msa() 390 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_biwgt_copy_16w_msa() local 406 LD_SH4(src1_ptr + 8, src2_stride, in4, in5, in in hevc_biwgt_copy_16w_msa() 445 v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, dst11; hevc_biwgt_copy_24w_msa() local 509 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_biwgt_copy_32w_msa() local 570 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, in0, in1, in2, in3, in4, in5; hevc_biwgt_copy_48w_msa() local 622 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_biwgt_copy_64w_msa() local 1419 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_biwgt_8t_4w_msa() local 2774 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_biwgt_4t_4x8multiple_msa() local 3004 v8i16 in0, in1, in2, in3, in4, in5; hevc_hz_biwgt_4t_8x6_msa() local 3181 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_biwgt_4t_12w_msa() local 3266 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_biwgt_4t_16w_msa() local 3360 v8i16 in0, in1, in2, in3, in4, in5; hevc_hz_biwgt_4t_24w_msa() local 3647 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_biwgt_4t_4x8multiple_msa() local 3896 v8i16 in0, in1, in2, in3, in4, in5; hevc_vt_biwgt_4t_8x6_msa() local 4074 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_biwgt_4t_12w_msa() local 4261 v8i16 in0, in1, in2, in3, in4, in5; hevc_vt_biwgt_4t_24w_msa() local 4398 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_biwgt_4t_32w_msa() local 4877 v8i16 in4 = { 0 }, in5 = { 0 }; hevc_hv_biwgt_4t_6w_msa() local 5289 v8i16 in0, in1, in2, in3, in4, in5; hevc_hv_biwgt_4t_8x6_msa() local [all...] |
H A D | h263dsp_msa.c | 32 v16u8 in0, in1, in2, in3, in4, in5, in6, in7; in h263_h_loop_filter_msa() local 38 LD_UB8(src, stride, in0, in1, in2, in3, in4, in5, in6, in7); in h263_h_loop_filter_msa() 39 TRANSPOSE8x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in h263_h_loop_filter_msa()
|
/third_party/ffmpeg/libavutil/mips/ |
H A D | generic_macros_msa.h | 379 #define ST_V6(RTYPE, in0, in1, in2, in3, in4, in5, pdst, stride) \ 382 ST_V2(RTYPE, in4, in5, (pdst) + 4 * stride, stride); \ 386 #define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ 389 ST_V4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \ 520 Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride 527 #define ST12x8_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ 540 out5_m = __msa_copy_u_d((v2i64) in5, 0); \ 549 out13_m = __msa_copy_u_w((v4i32) in5, 2); \ 597 #define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ 601 AVER_UB2(RTYPE, in4, in5, in [all...] |
/third_party/ffmpeg/libavcodec/loongarch/ |
H A D | hevc_idct_lsx.c | 101 #define HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in5, in6, in7, shift) \ 111 DUP4_ARG2(__lsx_vilvl_h, in4, in0, in6, in2, in5, in1, in3, in7, \ 113 DUP4_ARG2(__lsx_vilvh_h, in4, in0, in6, in2, in5, in1, in3, in7, \ 192 in5 = __lsx_vssrarni_h_w(sum2_l, sum2_r, shift); \ 363 __m128i in0, in1, in2, in3, in4, in5, in6, in7; in ff_hevc_idct_8x8_lsx() local 368 coeffs, 112, in4, in5, in6, in7); in ff_hevc_idct_8x8_lsx() 369 HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in5, in6, in7, 7); in ff_hevc_idct_8x8_lsx() 370 LSX_TRANSPOSE8x8_H(in0, in1, in2, in3, in4, in5, in6, in7, in ff_hevc_idct_8x8_lsx() 371 in0, in1, in2, in3, in4, in5, in6, in7); in ff_hevc_idct_8x8_lsx() 372 HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in5, in in ff_hevc_idct_8x8_lsx() 393 __m128i in0, in1, in2, in3, in4, in5, in6, in7; ff_hevc_idct_16x16_lsx() local 544 __m128i in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_8x32_column_lsx() local 764 __m128i in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_transpose_32x8_to_8x32() local 789 __m128i in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_transpose_8x32_to_32x8() local [all...] |
H A D | vp9_idct_lsx.c | 132 #define VP9_ILVLTRANS4x8_H(in0, in1, in2, in3, in4, in5, in6, in7, \ 139 DUP4_ARG2(__lsx_vilvl_h, in1, in0, in3, in2, in5, in4, in7, in6, \ 194 #define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, \ 207 VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \ 208 DUP2_ARG2(__lsx_vsub_h, in1, in3, in7, in5, res0_m, res1_m); \ 221 tp7_m = __lsx_vadd_h(in7, in5); \ 253 __m128i in0, in1, in2, in3, in4, in5, in6, in7; in vp9_idct8x8_12_colcol_addblk_lsx() local 262 in4, in5, in6, in7); in vp9_idct8x8_12_colcol_addblk_lsx() 271 DUP4_ARG2(__lsx_vilvl_d,in1, in0, in3, in2, in5, in in vp9_idct8x8_12_colcol_addblk_lsx() 336 __m128i in0, in1, in2, in3, in4, in5, in6, in7; vp9_idct8x8_colcol_addblk_lsx() local [all...] |
H A D | vc1dsp_lasx.c | 142 __m256i in0, in1, in2, in3, in4, in5, in6, in7; in ff_vc1_inv_trans_8x8_dc_lasx() local 153 0, dst + stride3, 0, in4, in5, in6, in7); in ff_vc1_inv_trans_8x8_dc_lasx() 155 DUP4_ARG2(__lasx_xvilvl_d, in1, in0, in3, in2, in5, in4, in7, in6, in ff_vc1_inv_trans_8x8_dc_lasx() 297 __m256i in0, in1, in2, in3, in4, in5, in6, in7; in ff_vc1_inv_trans_4x8_dc_lasx() local 307 0, dst + stride3, 0, in4, in5, in6, in7); in ff_vc1_inv_trans_4x8_dc_lasx() 309 DUP4_ARG2(__lasx_xvilvl_w, in1, in0, in3, in2, in5, in4, in7, in6, in ff_vc1_inv_trans_4x8_dc_lasx() 808 __m256i in0, in1, in2, in3, in4, in5, in6, in7, in put_vc1_mspel_mc_h_lasx() local 838 DUP2_ARG2(__lasx_xvldx, _src, stride, _src, stride2, in5, in6); in put_vc1_mspel_mc_h_lasx() 848 DUP4_ARG2(__lasx_xvilvl_b, in2, in0, in3, in1, in6, in4, in7, in5, in put_vc1_mspel_mc_h_lasx() 865 DUP4_ARG2(__lasx_xvilvh_b, in2, in0, in3, in1, in6, in4, in7, in5, in put_vc1_mspel_mc_h_lasx() [all...] |
H A D | hevc_mc_bi_lsx.c | 148 __m128i in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_6w_lsx() local 172 src2_stride_2x, in5, in6); in hevc_bi_copy_6w_lsx() 183 out2 = hevc_bi_rnd_clip(in4, dst4, in5, dst5); in hevc_bi_copy_6w_lsx() 244 __m128i in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_8w_lsx() local 274 src2_stride_2x, in5, in6); in hevc_bi_copy_8w_lsx() 279 out2 = hevc_bi_rnd_clip(in4, dst4, in5, dst5); in hevc_bi_copy_8w_lsx() 328 __m128i in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_12w_lsx() local 344 in5, in6); in hevc_bi_copy_12w_lsx() 348 DUP2_ARG2(__lsx_vilvl_d, in5, in4, in7, in6, in4, in5); in hevc_bi_copy_12w_lsx() 386 __m128i in0, in1, in2, in3, in4, in5, in6, in7; hevc_bi_copy_16w_lsx() local 1073 __m128i in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_4t_24w_lsx() local 1221 __m128i in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_4t_12w_lsx() local 1374 __m128i in0, in1, in2, in3, in4, in5; hevc_vt_4t_24w_lsx() local 1877 __m128i in0, in1, in2, in3, in4, in5; hevc_hv_4t_8x6_lsx() local [all...] |
/third_party/skia/third_party/externals/libwebp/src/dsp/ |
H A D | common_sse41.h | 40 __m128i* const in3, __m128i* const in4, __m128i* const in5) { in VP8PlanarTo24b_SSE41() 77 WEBP_SSE41_SHUFF(B, in4, in5) in VP8PlanarTo24b_SSE41() 93 *in5 = _mm_or_si128(RG5, B5); in VP8PlanarTo24b_SSE41() 38 VP8PlanarTo24b_SSE41( __m128i* const in0, __m128i* const in1, __m128i* const in2, __m128i* const in3, __m128i* const in4, __m128i* const in5) VP8PlanarTo24b_SSE41() argument
|
H A D | msa_macro.h | 310 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ 313 ST_B4(RTYPE, in4, in5, in6, in7, pdst + 4 * stride, stride); \ 873 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ 876 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ 899 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ 902 ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ 923 #define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ 926 ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \ 984 #define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ 987 PCKEV_B2(RTYPE, in4, in5, in [all...] |
/third_party/node/deps/openssl/openssl/crypto/aes/asm/ |
H A D | aesp8-ppc.pl | 678 my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13)); 789 lvx_u $in5,$x50,$inp 797 le?vperm $in5,$in5,$in5,$inpperm 802 vxor $out5,$in5,$rndkey0 909 vxor $in5,$in5,v31 927 vncipherlast $out6,$out6,$in5 929 lvx_u $in5, [all...] |
/third_party/openssl/crypto/aes/asm/ |
H A D | aesp8-ppc.pl | 678 my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13)); 789 lvx_u $in5,$x50,$inp 797 le?vperm $in5,$in5,$in5,$inpperm 802 vxor $out5,$in5,$rndkey0 909 vxor $in5,$in5,v31 927 vncipherlast $out6,$out6,$in5 929 lvx_u $in5, [all...] |
/third_party/lame/mpglib/ |
H A D | layer3.c | 1499 in5 = in[5*3]; \ in dct12() 1500 in5 += (in4 = in[4*3]); \ in dct12() 1506 in5 += in3; in3 += in1; \ in dct12() 1517 in1 += in5 * COS6_2; \ in dct12() 1519 in5 = (in1 + in3) * tfcos12[0]; \ in dct12() 1522 in3 = in4 + in5; \ in dct12() 1523 in4 -= in5; \ in dct12() 1530 real in0,in1,in2,in3,in4,in5; in dct12() local 1540 real tmp2 = (in1 - in5) * tfcos12[1]; in dct12() 1566 real in0,in1,in2,in3,in4,in5; in dct12() local 1600 real in0,in1,in2,in3,in4,in5; dct12() local [all...] |
/third_party/ffmpeg/libavcodec/aarch64/ |
H A D | hevcdsp_idct_neon.S | 280 .macro tr_8x4 shift, in0,in0t, in1,in1t, in2,in2t, in3,in3t, in4,in4t, in5,in5t, in6,in6t, in7,in7t, p1, p2 290 sum_sub v30.4s, \in5\in5t, v0.h[7], +, \p2 291 sum_sub v28.4s, \in5\in5t, v0.h[6], +, \p2 292 sum_sub v29.4s, \in5\in5t, v0.h[4], -, \p2 305 sum_sub v31.4s, \in5\in5t, v0.h[5], +, \p2 307 fixsqrshrn \in5,\in5t, v26, \shift 411 .macro butterfly16 in0, in1, in2, in3, in4, in5, in6, in7 416 add \in3, \in4, \in5 417 sub \in4, \in4, \in5 418 add \in5, \in [all...] |
/third_party/ffmpeg/libavcodec/ |
H A D | mpegaudiodec_template.c | 326 SUINTFLOAT in0, in1, in2, in3, in4, in5, t1, t2; in imdct12() local 333 in5 = in[5*3] + in[4*3]; in imdct12() 334 in5 += in3; in imdct12() 341 t2 = MULH3(in1 - in5, C4, 2); in imdct12() 350 in5 += 2*in1; in imdct12() 351 in1 = MULH3(in5 + in3, C5, 1); in imdct12() 358 in5 = MULH3(in5 - in3, C6, 2); in imdct12() 360 out[ 5] = in0 - in5; in imdct12() 362 out[11] = in0 + in5; in imdct12() [all...] |
/third_party/node/deps/openssl/openssl/crypto/bn/asm/ |
H A D | ia64-mont.pl | 144 zxt4 num=in5 };; 433 cmp4.le p4,p5=3,in5 } 436 cmp4.le p6,p7=4,in5 };; 440 cmp4.le p8,p9=5,in5 } 443 cmp4.le p10,p11=6,in5 } 446 cmp4.le p12,p13=7,in5 } 449 cmp4.le p14,p15=8,in5 } 452 addp4 r28=-1,in5 }
|
/third_party/openssl/crypto/bn/asm/ |
H A D | ia64-mont.pl | 144 zxt4 num=in5 };; 433 cmp4.le p4,p5=3,in5 } 436 cmp4.le p6,p7=4,in5 };; 440 cmp4.le p8,p9=5,in5 } 443 cmp4.le p10,p11=6,in5 } 446 cmp4.le p12,p13=7,in5 } 449 cmp4.le p14,p15=8,in5 } 452 addp4 r28=-1,in5 }
|