/third_party/ffmpeg/libavcodec/mips/ |
H A D | idctdsp_msa.c | 35 in0_d = __msa_copy_u_d((v2i64) in0, 0); in put_pixels_clamped_msa() 36 in1_d = __msa_copy_u_d((v2i64) in1, 0); in put_pixels_clamped_msa() 37 in2_d = __msa_copy_u_d((v2i64) in2, 0); in put_pixels_clamped_msa() 38 in3_d = __msa_copy_u_d((v2i64) in3, 0); in put_pixels_clamped_msa() 39 in4_d = __msa_copy_u_d((v2i64) in4, 0); in put_pixels_clamped_msa() 40 in5_d = __msa_copy_u_d((v2i64) in5, 0); in put_pixels_clamped_msa() 41 in6_d = __msa_copy_u_d((v2i64) in6, 0); in put_pixels_clamped_msa() 42 in7_d = __msa_copy_u_d((v2i64) in7, 0); in put_pixels_clamped_msa() 69 in0_d = __msa_copy_u_d((v2i64) in0, 0); in put_signed_pixels_clamped_msa() 70 in1_d = __msa_copy_u_d((v2i64) in in put_signed_pixels_clamped_msa() [all...] |
H A D | qpeldsp_msa.c | 334 inp0 = (v16u8) __msa_insve_d((v2i64) inp0, 1, (v2i64) inp1); in horiz_mc_qpel_aver_src0_8width_msa() 335 inp2 = (v16u8) __msa_insve_d((v2i64) inp2, 1, (v2i64) inp3); in horiz_mc_qpel_aver_src0_8width_msa() 485 inp0 = (v16u8) __msa_insve_d((v2i64) inp0, 1, (v2i64) inp1); in horiz_mc_qpel_aver_src1_8width_msa() 486 inp2 = (v16u8) __msa_insve_d((v2i64) inp2, 1, (v2i64) inp3); in horiz_mc_qpel_aver_src1_8width_msa() 563 inp0 = (v16u8) __msa_insve_d((v2i64) inp0, 1, (v2i64) inp in horiz_mc_qpel_no_rnd_aver_src0_8width_msa() [all...] |
H A D | vp9_lpf_msa.c | 217 p1_d = __msa_copy_u_d((v2i64) p1_out, 0); in ff_loop_filter_v_4_8_msa() 218 p0_d = __msa_copy_u_d((v2i64) p0_out, 0); in ff_loop_filter_v_4_8_msa() 219 q0_d = __msa_copy_u_d((v2i64) q0_out, 0); in ff_loop_filter_v_4_8_msa() 220 q1_d = __msa_copy_u_d((v2i64) q1_out, 0); in ff_loop_filter_v_4_8_msa() 238 thresh0 = (v16u8) __msa_ilvr_d((v2i64) thresh1, (v2i64) thresh0); in ff_loop_filter_v_44_16_msa() 242 b_limit0 = (v16u8) __msa_ilvr_d((v2i64) b_limit1, (v2i64) b_limit0); in ff_loop_filter_v_44_16_msa() 246 limit0 = (v16u8) __msa_ilvr_d((v2i64) limit1, (v2i64) limit in ff_loop_filter_v_44_16_msa() [all...] |
H A D | h264_deblock_msa.c | 147 tmp_2 = (v8i16)__msa_ilvl_d((v2i64)tmp_0, (v2i64)tmp_0); in ff_h264_loop_filter_strength_msa() 148 tmp_3 = (v8i16)__msa_ilvl_d((v2i64)tmp_1, (v2i64)tmp_1); in ff_h264_loop_filter_strength_msa() 150 tmp_0 = (v16i8)__msa_ilvr_d((v2i64)tmp_3, (v2i64)tmp_2); in ff_h264_loop_filter_strength_msa() 151 tmp_1 = (v16i8)__msa_ilvr_d((v2i64)tmp_5, (v2i64)tmp_4); in ff_h264_loop_filter_strength_msa()
|
H A D | vp8_lpf_msa.c | 347 p2_d = __msa_copy_u_d((v2i64) p2, 0); in ff_vp8_v_loop_filter8uv_msa() 348 p1_d = __msa_copy_u_d((v2i64) p1, 0); in ff_vp8_v_loop_filter8uv_msa() 349 p0_d = __msa_copy_u_d((v2i64) p0, 0); in ff_vp8_v_loop_filter8uv_msa() 350 q0_d = __msa_copy_u_d((v2i64) q0, 0); in ff_vp8_v_loop_filter8uv_msa() 351 q1_d = __msa_copy_u_d((v2i64) q1, 0); in ff_vp8_v_loop_filter8uv_msa() 352 q2_d = __msa_copy_u_d((v2i64) q2, 0); in ff_vp8_v_loop_filter8uv_msa() 360 p2_d = __msa_copy_u_d((v2i64) p2, 1); in ff_vp8_v_loop_filter8uv_msa() 361 p1_d = __msa_copy_u_d((v2i64) p1, 1); in ff_vp8_v_loop_filter8uv_msa() 362 p0_d = __msa_copy_u_d((v2i64) p0, 1); in ff_vp8_v_loop_filter8uv_msa() 363 q0_d = __msa_copy_u_d((v2i64) q in ff_vp8_v_loop_filter8uv_msa() [all...] |
H A D | hevc_lpf_sao_msa.c | 44 v2i64 cmp0, cmp1, cmp2, p_is_pcm_vec, q_is_pcm_vec; in hevc_loopfilter_luma_hor_msa() 45 v2i64 cmp3; in hevc_loopfilter_luma_hor_msa() 79 cmp0 = (v2i64) __msa_fill_w(d0030); in hevc_loopfilter_luma_hor_msa() 80 cmp1 = (v2i64) __msa_fill_w(d0434); in hevc_loopfilter_luma_hor_msa() 81 cmp3 = (v2i64) __msa_ilvev_w((v4i32) cmp1, (v4i32) cmp0); in hevc_loopfilter_luma_hor_msa() 82 cmp3 = (v2i64) __msa_ceqi_w((v4i32) cmp3, 0); in hevc_loopfilter_luma_hor_msa() 103 cmp0 = (v2i64) __msa_fill_h(tc0); in hevc_loopfilter_luma_hor_msa() 104 cmp1 = (v2i64) __msa_fill_h(tc4); in hevc_loopfilter_luma_hor_msa() 128 cmp0 = (v2i64) __msa_fill_w(flag0); in hevc_loopfilter_luma_hor_msa() 129 cmp1 = (v2i64) __msa_fill_ in hevc_loopfilter_luma_hor_msa() [all...] |
H A D | vp3dsp_idct_msa.c | 545 a0 = (v4i32) __msa_pckev_d((v2i64)c1, (v2i64)c0); in ff_put_no_rnd_pixels_l2_msa() 546 a2 = (v4i32) __msa_pckod_d((v2i64)c1, (v2i64)c0); in ff_put_no_rnd_pixels_l2_msa() 547 a1 = (v4i32) __msa_pckev_d((v2i64)c3, (v2i64)c2); in ff_put_no_rnd_pixels_l2_msa() 548 a3 = (v4i32) __msa_pckod_d((v2i64)c3, (v2i64)c2); in ff_put_no_rnd_pixels_l2_msa() 553 b0 = (v4i32) __msa_pckev_d((v2i64)c1, (v2i64)c in ff_put_no_rnd_pixels_l2_msa() [all...] |
H A D | hpeldsp_msa.c | 1092 out0 = __msa_copy_u_d((v2i64) src0, 0); in copy_width8_msa() 1093 out1 = __msa_copy_u_d((v2i64) src1, 0); in copy_width8_msa() 1094 out2 = __msa_copy_u_d((v2i64) src2, 0); in copy_width8_msa() 1095 out3 = __msa_copy_u_d((v2i64) src3, 0); in copy_width8_msa() 1096 out4 = __msa_copy_u_d((v2i64) src4, 0); in copy_width8_msa() 1097 out5 = __msa_copy_u_d((v2i64) src5, 0); in copy_width8_msa() 1098 out6 = __msa_copy_u_d((v2i64) src6, 0); in copy_width8_msa() 1099 out7 = __msa_copy_u_d((v2i64) src7, 0); in copy_width8_msa() 1109 out0 = __msa_copy_u_d((v2i64) src0, 0); in copy_width8_msa() 1110 out1 = __msa_copy_u_d((v2i64) src in copy_width8_msa() [all...] |
H A D | h263dsp_msa.c | 146 res0 = __msa_copy_u_d((v2i64) in0, 0); in h263_v_loop_filter_msa() 147 res1 = __msa_copy_u_d((v2i64) in3, 0); in h263_v_loop_filter_msa() 148 res2 = __msa_copy_u_d((v2i64) in2, 0); in h263_v_loop_filter_msa() 149 res3 = __msa_copy_u_d((v2i64) in1, 0); in h263_v_loop_filter_msa()
|
H A D | simple_idct_msa.c | 346 tmp0 = __msa_copy_u_d((v2i64) in0, 1); in simple_idct_put_msa() 347 tmp1 = __msa_copy_u_d((v2i64) in1, 1); in simple_idct_put_msa() 348 tmp2 = __msa_copy_u_d((v2i64) in2, 1); in simple_idct_put_msa() 349 tmp3 = __msa_copy_u_d((v2i64) in3, 1); in simple_idct_put_msa() 354 tmp3 = __msa_copy_u_d((v2i64) in4, 1); in simple_idct_put_msa() 355 tmp2 = __msa_copy_u_d((v2i64) in5, 1); in simple_idct_put_msa() 356 tmp1 = __msa_copy_u_d((v2i64) in6, 1); in simple_idct_put_msa() 357 tmp0 = __msa_copy_u_d((v2i64) in7, 1); in simple_idct_put_msa() 517 tmp0 = __msa_copy_u_d((v2i64) in0, 1); in simple_idct_add_msa() 518 tmp1 = __msa_copy_u_d((v2i64) in in simple_idct_add_msa() [all...] |
H A D | h264pred_msa.c | 113 uint64_t out = __msa_copy_u_d((v2i64) store, 0); \ 148 v2i64 sum; in intra_predict_plane_8x8_msa() 194 out0 = __msa_copy_s_d((v2i64) vec10, 0); in intra_predict_plane_8x8_msa() 195 out1 = __msa_copy_s_d((v2i64) vec11, 0); in intra_predict_plane_8x8_msa() 358 out0 = __msa_copy_u_d((v2i64) res0, 0); in intra_predict_vert_dc_8x8_msa() 640 sum = (v2u64) __msa_srari_d((v2i64) sum, 4); in ff_h264_intra_pred_dc_top_16x16_msa() 654 out = __msa_copy_u_d((v2i64) store, 0); in ff_h264_intra_pred_dc_128_8x8_msa()
|
H A D | hevc_mc_uni_msa.c | 1095 out0 = __msa_copy_u_d((v2i64) tmp0, 0); in common_vt_8t_12w_msa() 1096 out1 = __msa_copy_u_d((v2i64) tmp1, 0); in common_vt_8t_12w_msa() 1105 out0 = __msa_copy_u_d((v2i64) tmp2, 0); in common_vt_8t_12w_msa() 1106 out1 = __msa_copy_u_d((v2i64) tmp3, 0); in common_vt_8t_12w_msa() 1396 dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1); in hevc_hv_uni_8t_4w_msa() 1426 dst117 = (v8i16) __msa_splati_d((v2i64) dst117, 1); in hevc_hv_uni_8t_4w_msa() 1465 dst66 = (v8i16) __msa_splati_d((v2i64) dst1410, 1); in hevc_hv_uni_8t_4w_msa() 1781 dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1); in hevc_hv_uni_8t_12w_msa() 1811 dst117 = (v8i16) __msa_splati_d((v2i64) dst117, 1); in hevc_hv_uni_8t_12w_msa() 1850 dst66 = (v8i16) __msa_splati_d((v2i64) dst141 in hevc_hv_uni_8t_12w_msa() [all...] |
/third_party/ffmpeg/libavutil/mips/ |
H A D | generic_macros_msa.h | 488 out0_m = __msa_copy_u_d((v2i64) in, idx); \ 494 out0_m = __msa_copy_u_d((v2i64) in, idx0); \ 495 out1_m = __msa_copy_u_d((v2i64) in, idx1); \ 502 out0_m = __msa_copy_u_d((v2i64) in0, idx0); \ 503 out1_m = __msa_copy_u_d((v2i64) in0, idx1); \ 504 out2_m = __msa_copy_u_d((v2i64) in1, idx2); \ 505 out3_m = __msa_copy_u_d((v2i64) in1, idx3); \ 535 out0_m = __msa_copy_u_d((v2i64) in0, 0); \ 536 out1_m = __msa_copy_u_d((v2i64) in1, 0); \ 537 out2_m = __msa_copy_u_d((v2i64) in [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
H A D | X86TargetTransformInfo.cpp | 210 // v2i64/v4i64 mul is custom lowered as a series of long: in getArithmeticInstrCost() 215 { ISD::MUL, MVT::v2i64, 17 }, in getArithmeticInstrCost() 217 { ISD::ADD, MVT::v2i64, 4 }, in getArithmeticInstrCost() 218 { ISD::SUB, MVT::v2i64, 4 }, in getArithmeticInstrCost() 304 { ISD::SRA, MVT::v2i64, 1 }, in getArithmeticInstrCost() 468 { ISD::SHL, MVT::v2i64, 1 }, // psllq. in getArithmeticInstrCost() 472 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. in getArithmeticInstrCost() 487 { ISD::MUL, MVT::v2i64, 1 }, in getArithmeticInstrCost() 532 { ISD::SRA, MVT::v2i64, 1 }, in getArithmeticInstrCost() 565 { ISD::SHL, MVT::v2i64, in getArithmeticInstrCost() [all...] |
/third_party/skia/third_party/externals/libwebp/src/dsp/ |
H A D | msa_macro.h | 589 const v2i64 res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); in func_hadd_sw_s32() 590 const v2i64 res1_m = __msa_splati_d(res0_m, 1); in func_hadd_sw_s32() 591 const v2i64 out = res0_m + res1_m; in func_hadd_sw_s32() 606 const v2i64 res0 = __msa_hadd_s_d(res, res); in func_hadd_sh_s32() 607 const v2i64 res1 = __msa_splati_d(res0, 1); in func_hadd_sh_s32() 608 const v2i64 res2 = res0 + res1; in func_hadd_sh_s32() 625 v2u64 res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); in func_hadd_uh_u32() 699 out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \ 700 out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \ 720 #define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS_ [all...] |
H A D | dec_msa.c | 499 p2_d = __msa_copy_s_d((v2i64)p2, 0); in VFilter8() 500 p1_d = __msa_copy_s_d((v2i64)p1, 0); in VFilter8() 501 p0_d = __msa_copy_s_d((v2i64)p0, 0); in VFilter8() 502 q0_d = __msa_copy_s_d((v2i64)q0, 0); in VFilter8() 503 q1_d = __msa_copy_s_d((v2i64)q1, 0); in VFilter8() 504 q2_d = __msa_copy_s_d((v2i64)q2, 0); in VFilter8() 511 p2_d = __msa_copy_s_d((v2i64)p2, 1); in VFilter8() 512 p1_d = __msa_copy_s_d((v2i64)p1, 1); in VFilter8() 513 p0_d = __msa_copy_s_d((v2i64)p0, 1); in VFilter8() 514 q0_d = __msa_copy_s_d((v2i64)q in VFilter8() [all...] |
H A D | enc_msa.c | 127 out0 = __msa_copy_s_d((v2i64)t0, 0); in FTransform_MSA() 128 out1 = __msa_copy_s_d((v2i64)t0, 1); in FTransform_MSA() 129 out2 = __msa_copy_s_d((v2i64)t1, 0); in FTransform_MSA() 130 out3 = __msa_copy_s_d((v2i64)t1, 1); in FTransform_MSA() 265 const v16u8 A = (v16u8)__msa_insert_d((v2i64)A1, 0, val_m); in VE4() 299 const v16u8 A1 = (v16u8)__msa_insert_d((v2i64)A2, 0, val_m); in RD4() 319 const v16u8 A = (v16u8)__msa_insert_d((v2i64)A1, 0, val_m); in LD4() 564 const v2i64 temp2 = (v2i64)__msa_hadd_u_d(temp1, temp1); \ 565 const v2i64 temp [all...] |
H A D | lossless_msa.c | 50 pix_d = __msa_copy_s_d((v2i64)dst1, 0); \ 57 uint64_t pix_d = __msa_copy_s_d((v2i64)dst0, 0); \ 323 const uint64_t pix_d = __msa_copy_s_d((v2i64)dst0, 0); in TransformColorInverse_MSA() 328 const uint64_t pix_d = __msa_copy_s_d((v2i64)dst0, 0); in TransformColorInverse_MSA()
|
H A D | lossless_enc_msa.c | 82 const uint64_t pix_d = __msa_copy_s_d((v2i64)dst0, 0); in TransformColor_MSA() 87 const uint64_t pix_d = __msa_copy_s_d((v2i64)dst0, 0); in TransformColor_MSA()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
H A D | AArch64ISelDAGToDAG.cpp | 645 case MVT::v2i64: in tryMULLV64LaneV128() 656 case MVT::v2i64: in tryMULLV64LaneV128() 3203 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { in Select() 3230 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { in Select() 3257 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { in Select() 3284 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { in Select() 3311 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { in Select() 3338 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { in Select() 3365 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { in Select() 3392 } else if (VT == MVT::v2i64 || V in Select() [all...] |
H A D | AArch64TargetTransformInfo.cpp | 328 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, in getCastInstrCost() 331 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, in getCastInstrCost() 336 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, in getCastInstrCost() 339 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, in getCastInstrCost() 369 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, in getCastInstrCost() 372 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, in getCastInstrCost() 374 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext). in getCastInstrCost() 375 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 }, in getCastInstrCost() 378 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 }, in getCastInstrCost() 968 { TTI::SK_Broadcast, MVT::v2i64, in getShuffleCost() [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
H A D | ARMTargetTransformInfo.cpp | 221 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, in getCastInstrCost() 222 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, in getCastInstrCost() 369 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 }, in getCastInstrCost() 370 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 }, in getCastInstrCost() 373 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 }, in getCastInstrCost() 374 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, in getCastInstrCost() 375 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 }, in getCastInstrCost() 376 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 }, in getCastInstrCost() 583 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, in getShuffleCost() 605 {ISD::VECTOR_SHUFFLE, MVT::v2i64, in getShuffleCost() [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/WebAssembly/ |
H A D | WebAssemblyISelLowering.cpp | 66 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass); in WebAssemblyTargetLowering() 122 setOperationAction(Op, MVT::v2i64, Expand); in WebAssemblyTargetLowering() 136 for (auto T : {MVT::v2i64, MVT::v2f64}) in WebAssemblyTargetLowering() 143 for (auto T: {MVT::v2i64, MVT::v2f64}) in WebAssemblyTargetLowering() 151 setOperationAction(Op, MVT::v2i64, Custom); in WebAssemblyTargetLowering() 159 for (auto T : {MVT::v2i64, MVT::v2f64}) in WebAssemblyTargetLowering() 164 setOperationAction(ISD::MUL, MVT::v2i64, Expand); in WebAssemblyTargetLowering() 171 for (auto T : {MVT::v2i64, MVT::v2f64}) in WebAssemblyTargetLowering() 181 setOperationAction(Op, MVT::v2i64, Expand); in WebAssemblyTargetLowering() 203 setCondCodeAction(static_cast<ISD::CondCode>(CC), MVT::v2i64, Custo in WebAssemblyTargetLowering() member in MVT [all...] |
/third_party/skia/third_party/externals/libpng/mips/ |
H A D | filter_msa_intrinsics.c | 490 dst0 = (v16u8) __msa_pckev_d((v2i64) dst1, (v2i64) dst0); in png_read_filter_row_sub4_msa() 532 out0 = __msa_copy_s_d((v2i64) dst0, 0); in png_read_filter_row_sub3_msa() 586 dst0 = (v16u8) __msa_pckev_d((v2i64) dst1, (v2i64) dst0); in png_read_filter_row_avg4_msa() 644 out0 = __msa_copy_s_d((v2i64) dst0, 0); in png_read_filter_row_avg3_msa() 718 dst0 = (v16u8) __msa_pckev_d((v2i64) dst1, (v2i64) dst0); in png_read_filter_row_paeth4_msa() 796 out0 = __msa_copy_s_d((v2i64) dst0, 0); in png_read_filter_row_paeth3_msa()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Support/ |
H A D | MachineValueType.h | 108 v2i64 = 58, // 2 x i64 352 SimpleTy == MVT::v2i64 || SimpleTy == MVT::v1i128 || in is128BitVector() 496 case v2i64: in getVectorElementType() 640 case v2i64: in getVectorNumElements() 762 case v2i64: in getSizeInBits() 975 if (NumElements == 2) return MVT::v2i64; in getVectorVT()
|