/third_party/vixl/test/aarch64/ |
H A D | test-trace-aarch64.cc | 2420 __ ursra(d27, d16, 45); in GenerateTestSequenceNEON() 2421 __ ursra(v18.V16B(), v17.V16B(), 3); in GenerateTestSequenceNEON() 2422 __ ursra(v26.V2D(), v28.V2D(), 58); in GenerateTestSequenceNEON() 2423 __ ursra(v8.V2S(), v22.V2S(), 31); in GenerateTestSequenceNEON() 2424 __ ursra(v31.V4H(), v4.V4H(), 7); in GenerateTestSequenceNEON() 2425 __ ursra(v31.V4S(), v15.V4S(), 2); in GenerateTestSequenceNEON() 2426 __ ursra(v3.V8B(), v1.V8B(), 5); in GenerateTestSequenceNEON() 2427 __ ursra(v18.V8H(), v14.V8H(), 13); in GenerateTestSequenceNEON()
|
H A D | test-cpu-features-aarch64.cc | 2683 TEST_NEON(ursra_0, ursra(v0.V8B(), v1.V8B(), 3)) 2684 TEST_NEON(ursra_1, ursra(v0.V16B(), v1.V16B(), 6)) 2685 TEST_NEON(ursra_2, ursra(v0.V4H(), v1.V4H(), 12)) 2686 TEST_NEON(ursra_3, ursra(v0.V8H(), v1.V8H(), 7)) 2687 TEST_NEON(ursra_4, ursra(v0.V2S(), v1.V2S(), 6)) 2688 TEST_NEON(ursra_5, ursra(v0.V4S(), v1.V4S(), 6)) 2689 TEST_NEON(ursra_6, ursra(v0.V2D(), v1.V2D(), 26)) 2690 TEST_NEON(ursra_7, ursra(d0, d1, 20))
|
H A D | test-disasm-sve-aarch64.cc | 6431 COMPARE(ursra(z0.VnB(), z8.VnB(), 1), "ursra z0.b, z8.b, #1"); in TEST() 6432 COMPARE(ursra(z0.VnB(), z8.VnB(), 2), "ursra z0.b, z8.b, #2"); in TEST() 6433 COMPARE(ursra(z0.VnB(), z8.VnB(), 5), "ursra z0.b, z8.b, #5"); in TEST() 6434 COMPARE(ursra(z0.VnB(), z8.VnB(), 8), "ursra z0.b, z8.b, #8"); in TEST() 6435 COMPARE(ursra(z0.VnH(), z8.VnH(), 1), "ursra z in TEST() [all...] |
H A D | test-simulator-aarch64.cc | 2470 // test for shift and accumulate instructions (srsra/ssra/usra/ursra). in Test2OpImmNEON_Helper() 4791 DEFINE_TEST_NEON_2OPIMM(ursra, Basic, TypeWidth) 4823 DEFINE_TEST_NEON_2OPIMM_SCALAR_D(ursra, Basic, TypeWidth)
|
H A D | test-api-movprfx-aarch64.cc | 2391 __ ursra(z0.VnB(), z8.VnB(), 1); in TEST() 3135 __ ursra(z0.VnB(), z8.VnB(), 1); in TEST() 3566 __ ursra(z0.VnB(), z0.VnB(), 1); in TEST()
|
/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | assembler-arm64.h | 1369 void ursra(const VRegister& vd, const VRegister& vn, int shift);
|
H A D | macro-assembler-arm64.h | 1246 V(ursra, Ursra) \
|
H A D | assembler-arm64.cc | 1734 void Assembler::ursra(const VRegister& vd, const VRegister& vn, int shift) { in ursra() function in v8::internal::Assembler
|
/third_party/vixl/src/aarch64/ |
H A D | macro-assembler-sve-aarch64.cc | 2243 ShiftRightAccumulate(&Assembler::ursra, zd, za, zn, shift); in Ursra()
|
H A D | assembler-aarch64.h | 3240 void ursra(const VRegister& vd, const VRegister& vn, int shift); 6838 void ursra(const ZRegister& zda, const ZRegister& zn, int shift);
|
H A D | simulator-aarch64.cc | 3235 ursra(vform, zd, zn, shift_dist); in Simulator() 9360 ursra(vf, rd, rn, right_shift); in Simulator() 9463 ursra(vf, rd, rn, right_shift); in Simulator()
|
H A D | simulator-aarch64.h | 4208 LogicVRegister ursra(VectorFormat vform,
|
H A D | assembler-aarch64.cc | 5682 void Assembler::ursra(const VRegister& vd, const VRegister& vn, int shift) {
|
H A D | assembler-sve-aarch64.cc | 9577 void Assembler::ursra(const ZRegister& zda, const ZRegister& zn, int shift) { in ursra() function in vixl::aarch64::Assembler
|
H A D | logic-aarch64.cc | 1793 LogicVRegister Simulator::ursra(VectorFormat vform,
|
H A D | macro-assembler-aarch64.h | 3187 V(ursra, Ursra) \
|
/third_party/node/deps/v8/src/execution/arm64/ |
H A D | simulator-arm64.h | 1923 LogicVRegister ursra(VectorFormat vform, LogicVRegister dst,
|
H A D | simulator-arm64.cc | 5777 ursra(vf, rd, rn, right_shift); 5876 ursra(vf, rd, rn, right_shift);
|
H A D | simulator-logic-arm64.cc | 1493 LogicVRegister Simulator::ursra(VectorFormat vform, LogicVRegister dst, in ursra() function in v8::internal::Simulator
|