Home
last modified time | relevance | path

Searched refs:in4 (Results 1 - 25 of 64) sorted by relevance

123

/third_party/ffmpeg/libavcodec/mips/
H A Didctdsp_msa.c28 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in put_pixels_clamped_msa() local
30 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in put_pixels_clamped_msa()
31 CLIP_SH8_0_255(in0, in1, in2, in3, in4, in5, in6, in7); in put_pixels_clamped_msa()
33 PCKEV_B4_SH(in4, in4, in5, in5, in6, in6, in7, in7, in4, in5, in6, in7); in put_pixels_clamped_msa()
39 in4_d = __msa_copy_u_d((v2i64) in4, 0); in put_pixels_clamped_msa()
52 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in put_signed_pixels_clamped_msa() local
54 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in put_signed_pixels_clamped_msa()
60 in4 in put_signed_pixels_clamped_msa()
86 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; add_pixels_clamped_msa() local
[all...]
H A Dhevc_idct_msa.c94 #define HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in5, in6, in7, shift) \
104 ILVR_H4_SH(in4, in0, in6, in2, in5, in1, in3, in7, \
106 ILVL_H4_SH(in4, in0, in6, in2, in5, in1, in3, in7, \
146 PCKEV_H2_SH(sum1_l, sum1_r, sum2_l, sum2_r, in3, in4); \
334 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_idct_8x8_msa() local
336 LD_SH8(coeffs, 8, in0, in1, in2, in3, in4, in5, in6, in7); in hevc_idct_8x8_msa()
337 HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in5, in6, in7, 7); in hevc_idct_8x8_msa()
338 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in hevc_idct_8x8_msa()
339 in0, in1, in2, in3, in4, in5, in6, in7); in hevc_idct_8x8_msa()
340 HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in in hevc_idct_8x8_msa()
353 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_16x16_msa() local
443 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_8x32_column_msa() local
604 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_transpose_32x8_to_8x32() local
617 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_transpose_8x32_to_32x8() local
740 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_addblk_8x8_msa() local
775 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_addblk_16x16_msa() local
842 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_addblk_32x32_msa() local
[all...]
H A Dmpegvideoencdsp_msa.c27 v16u8 in0, in1, in2, in3, in4, in5, in6, in7; in sum_u8src_16width_msa() local
30 LD_UB8(src, stride, in0, in1, in2, in3, in4, in5, in6, in7); in sum_u8src_16width_msa()
35 HADD_UB4_UB(in4, in5, in6, in7, in4, in5, in6, in7); in sum_u8src_16width_msa()
43 sum += HADD_UH_U32(in4); in sum_u8src_16width_msa()
H A Dsimple_idct_msa.c28 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in simple_idct_msa() local
41 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in simple_idct_msa()
46 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in simple_idct_msa()
47 in0, in1, in2, in3, in4, in5, in6, in7); in simple_idct_msa()
48 select_vec = in1 | in2 | in3 | in4 | in5 | in6 | in7; in simple_idct_msa()
66 UNPCK_SH_SW(in4, temp0_r, temp0_l); in simple_idct_msa()
117 in4 = (v8i16) __msa_bmnz_v((v16u8) a3_r, (v16u8) temp, (v16u8) select_vec); in simple_idct_msa()
121 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in simple_idct_msa()
122 in0, in1, in2, in3, in4, in5, in6, in7); in simple_idct_msa()
139 UNPCK_SH_SW(in4, temp0_ in simple_idct_msa()
191 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; simple_idct_put_msa() local
367 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; simple_idct_add_msa() local
[all...]
H A Dvc1dsp_msa.c30 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in ff_vc1_inv_trans_8x8_msa() local
44 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in ff_vc1_inv_trans_8x8_msa()
49 UNPCK_SH_SW(in4, in_r4, in_l4); in ff_vc1_inv_trans_8x8_msa()
136 in4, in5, in6, in7); in ff_vc1_inv_trans_8x8_msa()
137 ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, block, 8); in ff_vc1_inv_trans_8x8_msa()
142 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in ff_vc1_inv_trans_4x8_msa() local
159 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in ff_vc1_inv_trans_4x8_msa()
164 UNPCK_R_SH_SW(in4, in_r4); in ff_vc1_inv_trans_4x8_msa()
233 v4i32 in0, in1, in2, in3, in4, in5, in6, in7; in ff_vc1_inv_trans_8x4_msa() local
249 UNPCK_SH_SW(t1, in0, in4); in ff_vc1_inv_trans_8x4_msa()
[all...]
H A Dcompute_antialias_float.h68 float in1, in2, in3, in4, in5, in6, in7, in8; in compute_antialias_mips_float() local
91 "lwc1 %[in4], 0(%[ptr]) \t\n" in compute_antialias_mips_float()
98 "nmsub.s %[out1], %[out1], %[in3], %[in4] \t\n" in compute_antialias_mips_float()
99 "madd.s %[out2], %[out2], %[in2], %[in4] \t\n" in compute_antialias_mips_float()
111 "lwc1 %[in4], 2*4(%[ptr]) \t\n" in compute_antialias_mips_float()
117 "nmsub.s %[out1], %[out1], %[in3], %[in4] \t\n" in compute_antialias_mips_float()
120 "madd.s %[out2], %[out2], %[in2], %[in4] \t\n" in compute_antialias_mips_float()
130 "lwc1 %[in4], 4*4(%[ptr]) \t\n" in compute_antialias_mips_float()
137 "nmsub.s %[out1], %[out1], %[in3], %[in4] \t\n" in compute_antialias_mips_float()
138 "madd.s %[out2], %[out2], %[in2], %[in4] \ in compute_antialias_mips_float()
[all...]
H A Dmpegaudiodsp_mips_float.c75 float in1, in2, in3, in4, in5, in6, in7, in8; in ff_mpadsp_apply_window_mips_float() local
96 "lwc1 %[in4], 80*4(%[synth_buf]) \t\n" in ff_mpadsp_apply_window_mips_float()
102 "madd.s %[sum], %[sum], %[in3], %[in4] \t\n" in ff_mpadsp_apply_window_mips_float()
108 "lwc1 %[in4], 336*4(%[synth_buf]) \t\n" in ff_mpadsp_apply_window_mips_float()
117 "madd.s %[sum], %[sum], %[in3], %[in4] \t\n" in ff_mpadsp_apply_window_mips_float()
119 "lwc1 %[in4], 112*4(%[synth_buf]) \t\n" in ff_mpadsp_apply_window_mips_float()
129 "nmsub.s %[sum], %[sum], %[in3], %[in4] \t\n" in ff_mpadsp_apply_window_mips_float()
131 "lwc1 %[in4], 368*4(%[synth_buf]) \t\n" in ff_mpadsp_apply_window_mips_float()
140 "nmsub.s %[sum], %[sum], %[in3], %[in4] \t\n" in ff_mpadsp_apply_window_mips_float()
156 "lwc1 %[in4], 6 in ff_mpadsp_apply_window_mips_float()
799 float in1, in2, in3, in4, in5, in6; imdct36_mips_float() local
[all...]
H A Dvp9_idct_msa.c86 #define VP9_DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, \
92 DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5, \
116 #define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \
134 ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \
137 in4, in3); \
163 ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \
323 #define TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, \
330 ILVR_H4_SH(in1, in0, in3, in2, in5, in4, in7, in6, \
479 #define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in
620 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; vp9_idct8x8_12_colcol_addblk_msa() local
684 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; vp9_idct8x8_colcol_addblk_msa() local
711 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; vp9_iadst8x8_colcol_addblk_msa() local
837 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; vp9_iadst_idct_8x8_add_msa() local
864 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; vp9_idct_iadst_8x8_add_msa() local
[all...]
H A Diirfilter_mips.c94 float in1, in2, in3, in4; in iir_filter_flt_mips() local
113 "mul.s %[in4], %[src0_3], %[gain] \n\t" in iir_filter_flt_mips()
118 "madd.s %[in4], %[in4], $f0, $f7 \n\t" in iir_filter_flt_mips()
132 "madd.s %[in4], %[in4], $f1, %[in1] \n\t" in iir_filter_flt_mips()
137 "madd.s %[in4], %[in4], $f2, %[in2] \n\t" in iir_filter_flt_mips()
142 "madd.s %[in4], %[in4], in iir_filter_flt_mips()
[all...]
H A Dhevc_mc_bi_msa.c142 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_6w_msa() local
154 LD_SH8(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5, in6, in7); in hevc_bi_copy_6w_msa()
164 HEVC_BI_RND_CLIP4_MAX_SATU(in4, in5, in6, in7, dst4, dst5, dst6, dst7, in hevc_bi_copy_6w_msa()
193 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_8w_msa() local
227 LD_SH6(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5); in hevc_bi_copy_8w_msa()
232 HEVC_BI_RND_CLIP2_MAX_SATU(in4, in5, dst4, dst5, 7, dst4, dst5); in hevc_bi_copy_8w_msa()
252 LD_SH8(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5, in6, in hevc_bi_copy_8w_msa()
259 HEVC_BI_RND_CLIP4_MAX_SATU(in4, in5, in6, in7, dst4, dst5, dst6, in hevc_bi_copy_8w_msa()
281 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_12w_msa() local
289 LD_SH4(src1_ptr + 8, src2_stride, in4, in in hevc_bi_copy_12w_msa()
319 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_bi_copy_16w_msa() local
358 v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, dst11; hevc_bi_copy_24w_msa() local
405 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_bi_copy_32w_msa() local
449 v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, dst11; hevc_bi_copy_48w_msa() local
503 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_bi_copy_64w_msa() local
544 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_bi_8t_4w_msa() local
989 v8i16 in0, in1, in2, in3, in4, in5; hevc_hz_bi_8t_48w_msa() local
1187 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_bi_8t_4w_msa() local
1350 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_bi_8t_12w_msa() local
2283 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_bi_4t_4x8multiple_msa() local
2470 v8i16 in0, in1, in2, in3, in4, in5; hevc_hz_bi_4t_8x6_msa() local
2615 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_bi_4t_12w_msa() local
2750 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_bi_4t_24w_msa() local
3012 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_bi_4t_4x8multiple_msa() local
3248 v8i16 in0, in1, in2, in3, in4, in5; hevc_vt_bi_4t_8x6_msa() local
3393 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_bi_4t_12w_msa() local
3562 v8i16 in0, in1, in2, in3, in4, in5; hevc_vt_bi_4t_24w_msa() local
3686 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_bi_4t_32w_msa() local
4101 v8i16 in4 = { 0 }, in5 = { 0 }; hevc_hv_bi_4t_6w_msa() local
4401 v8i16 in0, in1, in2, in3, in4, in5; hevc_hv_bi_4t_8x6_msa() local
[all...]
H A Dhevc_mc_biw_msa.c243 v8i16 in0, in1, in2, in3, in4, in5; in hevc_biwgt_copy_8w_msa() local
278 LD_SH6(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5); in hevc_biwgt_copy_8w_msa()
284 HEVC_BIW_RND_CLIP2_MAX_SATU(dst4, dst5, in4, in5, weight_vec, rnd_vec, in hevc_biwgt_copy_8w_msa()
331 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_biwgt_copy_12w_msa() local
347 LD_SH4(src1_ptr + 8, src2_stride, in4, in5, in6, in7); in hevc_biwgt_copy_12w_msa()
350 ILVR_D2_SH(in5, in4, in7, in6, in4, in5); in hevc_biwgt_copy_12w_msa()
363 HEVC_BIW_RND_CLIP2_MAX_SATU(dst4, dst5, in4, in5, weight_vec, rnd_vec, in hevc_biwgt_copy_12w_msa()
390 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_biwgt_copy_16w_msa() local
406 LD_SH4(src1_ptr + 8, src2_stride, in4, in in hevc_biwgt_copy_16w_msa()
445 v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, dst11; hevc_biwgt_copy_24w_msa() local
509 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_biwgt_copy_32w_msa() local
570 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, in0, in1, in2, in3, in4, in5; hevc_biwgt_copy_48w_msa() local
622 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_biwgt_copy_64w_msa() local
1419 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_biwgt_8t_4w_msa() local
2774 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_biwgt_4t_4x8multiple_msa() local
3004 v8i16 in0, in1, in2, in3, in4, in5; hevc_hz_biwgt_4t_8x6_msa() local
3181 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_biwgt_4t_12w_msa() local
3266 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_biwgt_4t_16w_msa() local
3360 v8i16 in0, in1, in2, in3, in4, in5; hevc_hz_biwgt_4t_24w_msa() local
3647 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_biwgt_4t_4x8multiple_msa() local
3896 v8i16 in0, in1, in2, in3, in4, in5; hevc_vt_biwgt_4t_8x6_msa() local
4074 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_biwgt_4t_12w_msa() local
4261 v8i16 in0, in1, in2, in3, in4, in5; hevc_vt_biwgt_4t_24w_msa() local
4398 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_biwgt_4t_32w_msa() local
4877 v8i16 in4 = { 0 }, in5 = { 0 }; hevc_hv_biwgt_4t_6w_msa() local
5289 v8i16 in0, in1, in2, in3, in4, in5; hevc_hv_biwgt_4t_8x6_msa() local
[all...]
H A Dh263dsp_msa.c32 v16u8 in0, in1, in2, in3, in4, in5, in6, in7; in h263_h_loop_filter_msa() local
38 LD_UB8(src, stride, in0, in1, in2, in3, in4, in5, in6, in7); in h263_h_loop_filter_msa()
39 TRANSPOSE8x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in h263_h_loop_filter_msa()
H A Dfft_mips.c331 const FFTSample *in1, *in2, *in3, *in4; in ff_imdct_half_mips() local
349 in4 = input + n2 - 3; in ff_imdct_half_mips()
364 "lwc1 %[temp5], 0(%[in4]) \t\n" in ff_imdct_half_mips()
381 PTR_ADDIU " %[in4], %[in4], -16 \t\n" in ff_imdct_half_mips()
391 [in3]"+r"(in3), [in4]"+r"(in4) in ff_imdct_half_mips()
/third_party/ffmpeg/libavutil/mips/
H A Dgeneric_macros_msa.h379 #define ST_V6(RTYPE, in0, in1, in2, in3, in4, in5, pdst, stride) \
382 ST_V2(RTYPE, in4, in5, (pdst) + 4 * stride, stride); \
386 #define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
389 ST_V4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \
520 Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride
527 #define ST12x8_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
539 out4_m = __msa_copy_u_d((v2i64) in4, 0); \
548 out12_m = __msa_copy_u_w((v4i32) in4, 2); \
597 #define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
601 AVER_UB2(RTYPE, in4, in
[all...]
/third_party/ffmpeg/libavcodec/loongarch/
H A Dhevc_idct_lsx.c101 #define HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in5, in6, in7, shift) \
111 DUP4_ARG2(__lsx_vilvl_h, in4, in0, in6, in2, in5, in1, in3, in7, \
113 DUP4_ARG2(__lsx_vilvh_h, in4, in0, in6, in2, in5, in1, in3, in7, \
153 in4 = __lsx_vssrarni_h_w(sum2_l, sum2_r, shift); \
363 __m128i in0, in1, in2, in3, in4, in5, in6, in7; in ff_hevc_idct_8x8_lsx() local
368 coeffs, 112, in4, in5, in6, in7); in ff_hevc_idct_8x8_lsx()
369 HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in5, in6, in7, 7); in ff_hevc_idct_8x8_lsx()
370 LSX_TRANSPOSE8x8_H(in0, in1, in2, in3, in4, in5, in6, in7, in ff_hevc_idct_8x8_lsx()
371 in0, in1, in2, in3, in4, in5, in6, in7); in ff_hevc_idct_8x8_lsx()
372 HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in in ff_hevc_idct_8x8_lsx()
393 __m128i in0, in1, in2, in3, in4, in5, in6, in7; ff_hevc_idct_16x16_lsx() local
544 __m128i in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_8x32_column_lsx() local
764 __m128i in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_transpose_32x8_to_8x32() local
789 __m128i in0, in1, in2, in3, in4, in5, in6, in7; hevc_idct_transpose_8x32_to_32x8() local
[all...]
H A Dvp9_idct_lsx.c132 #define VP9_ILVLTRANS4x8_H(in0, in1, in2, in3, in4, in5, in6, in7, \
139 DUP4_ARG2(__lsx_vilvl_h, in1, in0, in3, in2, in5, in4, in7, in6, \
194 #define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, \
224 VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, \
225 in0, in4, in2, in6); \
226 LSX_BUTTERFLY_4_H(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m); \
253 __m128i in0, in1, in2, in3, in4, in5, in6, in7; in vp9_idct8x8_12_colcol_addblk_lsx() local
262 in4, in5, in6, in7); in vp9_idct8x8_12_colcol_addblk_lsx()
271 DUP4_ARG2(__lsx_vilvl_d,in1, in0, in3, in2, in5, in4, in7, in vp9_idct8x8_12_colcol_addblk_lsx()
315 in0, in1, in2, in3, in4, in in vp9_idct8x8_12_colcol_addblk_lsx()
336 __m128i in0, in1, in2, in3, in4, in5, in6, in7; vp9_idct8x8_colcol_addblk_lsx() local
[all...]
H A Dvc1dsp_lasx.c142 __m256i in0, in1, in2, in3, in4, in5, in6, in7; in ff_vc1_inv_trans_8x8_dc_lasx() local
153 0, dst + stride3, 0, in4, in5, in6, in7); in ff_vc1_inv_trans_8x8_dc_lasx()
155 DUP4_ARG2(__lasx_xvilvl_d, in1, in0, in3, in2, in5, in4, in7, in6, in ff_vc1_inv_trans_8x8_dc_lasx()
297 __m256i in0, in1, in2, in3, in4, in5, in6, in7; in ff_vc1_inv_trans_4x8_dc_lasx() local
307 0, dst + stride3, 0, in4, in5, in6, in7); in ff_vc1_inv_trans_4x8_dc_lasx()
309 DUP4_ARG2(__lasx_xvilvl_w, in1, in0, in3, in2, in5, in4, in7, in6, in ff_vc1_inv_trans_4x8_dc_lasx()
808 __m256i in0, in1, in2, in3, in4, in5, in6, in7, in put_vc1_mspel_mc_h_lasx() local
837 in4 = __lasx_xvld(_src, 0); in put_vc1_mspel_mc_h_lasx()
848 DUP4_ARG2(__lasx_xvilvl_b, in2, in0, in3, in1, in6, in4, in7, in5, in put_vc1_mspel_mc_h_lasx()
865 DUP4_ARG2(__lasx_xvilvh_b, in2, in0, in3, in1, in6, in4, in in put_vc1_mspel_mc_h_lasx()
[all...]
H A Dhevc_mc_bi_lsx.c148 __m128i in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_6w_lsx() local
170 in4 = __lsx_vld(src1_ptr, 0); in hevc_bi_copy_6w_lsx()
183 out2 = hevc_bi_rnd_clip(in4, dst4, in5, dst5); in hevc_bi_copy_6w_lsx()
244 __m128i in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_8w_lsx() local
272 in4 = __lsx_vld(src1_ptr, 0); in hevc_bi_copy_8w_lsx()
279 out2 = hevc_bi_rnd_clip(in4, dst4, in5, dst5); in hevc_bi_copy_8w_lsx()
328 __m128i in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_12w_lsx() local
342 in4 = __lsx_vld(_src1, 0); in hevc_bi_copy_12w_lsx()
348 DUP2_ARG2(__lsx_vilvl_d, in5, in4, in7, in6, in4, in in hevc_bi_copy_12w_lsx()
386 __m128i in0, in1, in2, in3, in4, in5, in6, in7; hevc_bi_copy_16w_lsx() local
1073 __m128i in0, in1, in2, in3, in4, in5, in6, in7; hevc_hz_4t_24w_lsx() local
1221 __m128i in0, in1, in2, in3, in4, in5, in6, in7; hevc_vt_4t_12w_lsx() local
1374 __m128i in0, in1, in2, in3, in4, in5; hevc_vt_4t_24w_lsx() local
1877 __m128i in0, in1, in2, in3, in4, in5; hevc_hv_4t_8x6_lsx() local
[all...]
/third_party/skia/third_party/externals/libwebp/src/dsp/
H A Dcommon_sse41.h40 __m128i* const in3, __m128i* const in4, __m128i* const in5) { in VP8PlanarTo24b_SSE41()
77 WEBP_SSE41_SHUFF(B, in4, in5) in VP8PlanarTo24b_SSE41()
92 *in4 = _mm_or_si128(RG4, B4); in VP8PlanarTo24b_SSE41()
38 VP8PlanarTo24b_SSE41( __m128i* const in0, __m128i* const in1, __m128i* const in2, __m128i* const in3, __m128i* const in4, __m128i* const in5) VP8PlanarTo24b_SSE41() argument
H A Dmsa_macro.h310 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
313 ST_B4(RTYPE, in4, in5, in6, in7, pdst + 4 * stride, stride); \
873 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
876 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
899 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
902 ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
923 #define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
926 ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
984 #define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
987 PCKEV_B2(RTYPE, in4, in
[all...]
/third_party/node/deps/openssl/openssl/crypto/aes/asm/
H A Daesv8-armx.pl433 my ($dat4,$in4,$tmp4);
435 ($dat2,$dat3,$dat4,$in2,$in3,$in4,$tmp3,$tmp4)=map("q$_",(16..23));
709 vld1.8 {$in4},[$inp],#16
722 vorr $dat4,$in4,$in4
738 vorr $dat2,$in4,$in4
1029 vld1.8 {$in4},[$inp],#16
1042 vorr $dat4,$in4,$in4
[all...]
H A Daesp8-ppc.pl678 my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13));
682 my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment
786 lvx_u $in4,$x40,$inp
793 le?vperm $in4,$in4,$in4,$inpperm
800 vxor $out4,$in4,$rndkey0
907 vxor $in4,$in4,v31
924 vncipherlast $out5,$out5,$in4
[all...]
/third_party/openssl/crypto/aes/asm/
H A Daesv8-armx.pl449 my ($dat4,$in4,$tmp4);
451 ($dat2,$dat3,$dat4,$in2,$in3,$in4,$tmp3,$tmp4)=map("q$_",(16..23));
725 vld1.8 {$in4},[$inp],#16
738 vorr $dat4,$in4,$in4
754 vorr $dat2,$in4,$in4
1045 vld1.8 {$in4},[$inp],#16
1058 vorr $dat4,$in4,$in4
[all...]
H A Daesp8-ppc.pl678 my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13));
682 my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment
786 lvx_u $in4,$x40,$inp
793 le?vperm $in4,$in4,$in4,$inpperm
800 vxor $out4,$in4,$rndkey0
907 vxor $in4,$in4,v31
924 vncipherlast $out5,$out5,$in4
[all...]
/third_party/lame/mpglib/
H A Dlayer3.c1500 in5 += (in4 = in[4*3]); \ in dct12()
1501 in4 += (in3 = in[3*3]); \ in dct12()
1512 in0 += in4 * COS6_2; \ in dct12()
1514 in4 = in0 + in2; \ in dct12()
1522 in3 = in4 + in5; \ in dct12()
1523 in4 -= in5; \ in dct12()
1530 real in0,in1,in2,in3,in4,in5; in dct12() local
1538 real tmp0,tmp1 = (in0 - in4); in dct12()
1559 ts[(6+2)*SBLIMIT] = out1[6+2] + in4 * wi[2]; in dct12()
1560 ts[(11-2)*SBLIMIT] = out1[11-2] + in4 * w in dct12()
1566 real in0,in1,in2,in3,in4,in5; dct12() local
1600 real in0,in1,in2,in3,in4,in5; dct12() local
[all...]

Completed in 40 milliseconds

123