/third_party/vixl/benchmarks/aarch64/ |
H A D | bench-utils.cc | 228 __ Mvn(PickR(size), Operand(PickR(size))); in GenerateOperandSequence()
|
/third_party/vixl/test/aarch32/ |
H A D | test-simulator-cond-rd-operand-const-a32.cc | 120 M(Mvn) \
|
H A D | test-disasm-a32.cc | 2350 TEST_SHIFT_T32(Mvn, "mvn", 0x0000000a) in TEST() 2376 // For Mvn and Mvns, we don't allow PC as a destination. in TEST() 2377 TEST_WIDE_IMMEDIATE(Mvn, "mvn", 0x0000000e); in TEST() 2379 MUST_FAIL_TEST_BOTH(Mvn(pc, 0xbadbeef), "Ill-formed 'mvn' instruction.\n"); in TEST() 2380 MUST_FAIL_TEST_BOTH(Mvn(eq, pc, 0xbadbeef), in TEST() 3532 COMPARE_T32(Mvn(eq, r4, r6), in TEST() 3536 COMPARE_T32(Mvn(eq, r8, r6), in TEST() 4165 CHECK_T32_16(Mvn(DontCare, r6, r7), "mvns r6, r7\n"); in TEST() 4167 CHECK_T32_16_IT_BLOCK(Mvn(DontCare, eq, r6, r7), in TEST()
|
H A D | test-simulator-cond-rd-operand-const-t32.cc | 120 M(Mvn) \
|
H A D | test-simulator-cond-rd-operand-rn-a32.cc | 120 M(Mvn) \
|
H A D | test-simulator-cond-rd-operand-rn-t32.cc | 120 M(Mvn) \
|
H A D | test-simulator-cond-rd-operand-rn-shift-amount-1to31-a32.cc | 120 M(Mvn) \
|
H A D | test-simulator-cond-rd-operand-rn-shift-amount-1to31-t32.cc | 120 M(Mvn) \
|
H A D | test-simulator-cond-rd-operand-rn-shift-amount-1to32-a32.cc | 120 M(Mvn) \
|
H A D | test-simulator-cond-rd-operand-rn-shift-amount-1to32-t32.cc | 120 M(Mvn) \
|
H A D | test-simulator-cond-rd-operand-rn-shift-rs-a32.cc | 120 M(Mvn) \
|
/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | macro-assembler-arm64.h | 292 V(mvn, Mvn) \ 658 inline void Mvn(const Register& rd, uint64_t imm); 659 void Mvn(const Register& rd, const Operand& operand);
|
H A D | macro-assembler-arm64.cc | 211 Mvn(rd, rn); in LogicalMacro() 374 // could also be achieved using an orr instruction (like orn used by Mvn), in Mov() 579 void TurboAssembler::Mvn(const Register& rd, const Operand& operand) { in Mvn() function in v8::internal::TurboAssembler
|
H A D | macro-assembler-arm64-inl.h | 233 void TurboAssembler::Mvn(const Register& rd, uint64_t imm) { in Mvn() function in v8::internal::TurboAssembler
|
/third_party/vixl/test/aarch64/ |
H A D | test-disasm-aarch64.cc | 86 COMPARE_MACRO(Mvn(w0, Operand(0x101)), "mov w0, #0xfffffefe"); in TEST() 87 COMPARE_MACRO(Mvn(x1, Operand(0xfff1)), "mov x1, #0xffffffffffff000e"); in TEST() 88 COMPARE_MACRO(Mvn(w2, Operand(w3)), "mvn w2, w3"); in TEST() 89 COMPARE_MACRO(Mvn(x4, Operand(x5)), "mvn x4, x5"); in TEST() 90 COMPARE_MACRO(Mvn(w6, Operand(w7, LSL, 12)), "mvn w6, w7, lsl #12"); in TEST() 91 COMPARE_MACRO(Mvn(x8, Operand(x9, ASR, 63)), "mvn x8, x9, asr #63"); in TEST() 99 // Mvn uses the destination register as a scratch if it can. This only occurs in TEST() 101 COMPARE_MACRO(Mvn(x0, Operand(w1, SXTW)), in TEST() 104 COMPARE_MACRO(Mvn(x0, Operand(x0, SXTW)), in TEST()
|
H A D | test-assembler-aarch64.cc | 178 __ Mvn(w0, 0xfff); in TEST() 179 __ Mvn(x1, 0xfff); in TEST() 180 __ Mvn(w2, Operand(w0, LSL, 1)); in TEST() 181 __ Mvn(x3, Operand(x1, LSL, 2)); in TEST() 182 __ Mvn(w4, Operand(w0, LSR, 3)); in TEST() 183 __ Mvn(x5, Operand(x1, LSR, 4)); in TEST() 184 __ Mvn(w6, Operand(w0, ASR, 11)); in TEST() 185 __ Mvn(x7, Operand(x1, ASR, 12)); in TEST() 186 __ Mvn(w8, Operand(w0, ROR, 13)); in TEST() 187 __ Mvn(x in TEST() [all...] |
H A D | test-assembler-neon-aarch64.cc | 6836 __ Mvn(v16.V16B(), v0.V16B()); in TEST() 6837 __ Mvn(v17.V8H(), v0.V8H()); in TEST() 6838 __ Mvn(v18.V4S(), v0.V4S()); in TEST() 6839 __ Mvn(v19.V2D(), v0.V2D()); in TEST() 6841 __ Mvn(v24.V8B(), v0.V8B()); in TEST() 6842 __ Mvn(v25.V4H(), v0.V4H()); in TEST() 6843 __ Mvn(v26.V2S(), v0.V2S()); in TEST()
|
H A D | test-disasm-neon-aarch64.cc | 3519 COMPARE_MACRO(Mvn(v4.V8B(), v5.V8B()), in TEST() 3522 COMPARE_MACRO(Mvn(v4.V16B(), v5.V16B()), in TEST()
|
/third_party/vixl/src/aarch64/ |
H A D | macro-assembler-aarch64.cc | 935 Mvn(rd, rn); in Emit() 1005 // could also be achieved using an orr instruction (like orn used by Mvn), in Emit() 1208 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { in Emit() function in vixl::aarch64::MacroAssembler 1215 Mvn(rd, operand.GetImmediate()); in Emit()
|
H A D | macro-assembler-aarch64.h | 849 void Mvn(const Register& rd, uint64_t imm) { in Mvn() function in vixl::aarch64::MacroAssembler 852 void Mvn(const Register& rd, const Operand& operand); 3042 V(mvn, Mvn) \
|
/third_party/node/deps/v8/src/compiler/backend/arm64/ |
H A D | code-generator-arm64.cc | 1363 __ Mvn(i.OutputRegister(), i.InputOperand(0)); in AssembleArchInstruction() 1366 __ Mvn(i.OutputRegister32(), i.InputOperand32(0)); in AssembleArchInstruction() 2269 __ Mvn(dst, dst); in AssembleArchInstruction() 2433 __ Mvn(dst, dst); in AssembleArchInstruction() 2638 SIMD_UNOP_CASE(kArm64S128Not, Mvn, 16B); in AssembleArchInstruction()
|
/third_party/skia/third_party/externals/swiftshader/third_party/subzero/src/ |
H A D | IceInstARM32.h | 406 Mvn, enumerator 1054 using InstARM32Mvn = InstARM32UnaryopGPR<InstARM32::Mvn, false>;
|
H A D | IceInstARM32.cpp | 3449 template class InstARM32UnaryopGPR<InstARM32::Mvn, false>;
|
/third_party/vixl/src/aarch32/ |
H A D | macro-assembler-aarch32.h | 3077 void Mvn(Condition cond, Register rd, const Operand& operand) { in MacroAssembler() function in vixl::aarch32::MacroAssembler 3094 void Mvn(Register rd, const Operand& operand) { Mvn(al, rd, operand); } in MacroAssembler() function in vixl::aarch32::MacroAssembler 3095 void Mvn(FlagsUpdate flags, in MacroAssembler() function in vixl::aarch32::MacroAssembler 3101 Mvn(cond, rd, operand); in MacroAssembler() 3113 Mvn(cond, rd, operand); in MacroAssembler() 3118 void Mvn(FlagsUpdate flags, Register rd, const Operand& operand) { in MacroAssembler() function in vixl::aarch32::MacroAssembler 3119 Mvn(flags, al, rd, operand); in MacroAssembler()
|