/third_party/vixl/examples/aarch64/ |
H A D | add4-double.cc | 43 __ Ucvtf(d2, x0); in GenerateAdd4Double() 44 __ Ucvtf(d3, x1); in GenerateAdd4Double()
|
/third_party/vixl/test/aarch64/ |
H A D | test-disasm-neon-aarch64.cc | 3924 COMPARE_MACRO(Ucvtf(v5.V2S(), v3.V2S()), in TEST() 3927 COMPARE_MACRO(Ucvtf(v6.V4S(), v4.V4S()), in TEST() 3930 COMPARE_MACRO(Ucvtf(v7.V2D(), v5.V2D()), in TEST() 3933 COMPARE_MACRO(Ucvtf(s8, s6), "ucvtf s8, s6"); in TEST() 3934 COMPARE_MACRO(Ucvtf(d8, d6), "ucvtf d8, d6"); in TEST() 3992 COMPARE_2REGMISC_FP16(Ucvtf, "ucvtf"); in TEST() 4428 COMPARE_MACRO(Ucvtf(v5.V4H(), v3.V4H(), 11), "ucvtf v5.4h, v3.4h, #11"); in TEST() 4429 COMPARE_MACRO(Ucvtf(v6.V8H(), v4.V8H(), 12), "ucvtf v6.8h, v4.8h, #12"); in TEST() 4430 COMPARE_MACRO(Ucvtf(v5.V2S(), v3.V2S(), 11), "ucvtf v5.2s, v3.2s, #11"); in TEST() 4431 COMPARE_MACRO(Ucvtf(v in TEST() [all...] |
H A D | test-assembler-fp-aarch64.cc | 4637 __ Ucvtf(d1, x10); 4639 __ Ucvtf(d3, w11); 4648 __ Ucvtf(d1, x10, fbits); 4650 __ Ucvtf(d3, w11, fbits); 4661 __ Ucvtf(d1, x10, fbits); 4792 __ Ucvtf(s1, x10); 4794 __ Ucvtf(s3, w11); 4803 __ Ucvtf(s1, x10, fbits); 4805 __ Ucvtf(s3, w11, fbits); 4816 __ Ucvtf(s [all...] |
H A D | test-assembler-sve-aarch64.cc | 15942 __ Ucvtf(zd_ucvtf_all_active.WithLaneSize(dst_type_size_in_bits), 15972 __ Ucvtf(zd_ucvtf_merged.WithLaneSize(dst_type_size_in_bits),
|
/third_party/node/deps/v8/src/wasm/baseline/arm64/ |
H A D | liftoff-assembler-arm64.h | 1416 Ucvtf(fp_cmp, dst.gp().W()); // i32 -> f64. in emit_type_conversion() 1494 Ucvtf(dst.fp().S(), src.gp().W()); in emit_type_conversion() 1500 Ucvtf(dst.fp().S(), src.gp().X()); in emit_type_conversion() 1512 Ucvtf(dst.fp().D(), src.gp().W()); in emit_type_conversion() 1518 Ucvtf(dst.fp().D(), src.gp().X()); in emit_type_conversion() 1911 Ucvtf(dst.fp().V2D(), dst.fp().V2D()); in emit_f64x2_convert_low_i32x4_u() 2931 Ucvtf(dst.fp().V4S(), src.fp().V4S()); in emit_i8x16_bitmask()
|
/third_party/node/deps/v8/src/compiler/backend/arm64/ |
H A D | code-generator-arm64.cc | 1807 __ Ucvtf(i.OutputFloat32Register(), i.InputRegister32(0)); in AssembleArchInstruction() 1810 __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0)); in AssembleArchInstruction() 1813 __ Ucvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0)); in AssembleArchInstruction() 1816 __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister64(0)); in AssembleArchInstruction() 2216 __ Ucvtf(dst, dst); in AssembleArchInstruction() 2300 SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4S); in AssembleArchInstruction()
|
/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | macro-assembler-arm64.h | 1137 inline void Ucvtf(const VRegister& fd, const Register& rn, 1139 void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) { in Ucvtf() function in v8::internal::TurboAssembler
|
H A D | macro-assembler-arm64-inl.h | 991 void TurboAssembler::Ucvtf(const VRegister& fd, const Register& rn, in Ucvtf() function in v8::internal::TurboAssembler
|
/third_party/vixl/src/aarch64/ |
H A D | macro-assembler-aarch64.h | 2723 void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) { in Ucvtf() function in vixl::aarch64::MacroAssembler 3501 void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) { in Ucvtf() function in vixl::aarch64::MacroAssembler 6266 void Ucvtf(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { in Ucvtf() function in vixl::aarch64::MacroAssembler
|