/third_party/node/deps/v8/src/regexp/arm64/ |
H A D | regexp-macro-assembler-arm64.cc | 271 Operand(current_input_offset(), SXTW)); in CheckCharacters() 354 Operand(capture_start_offset, SXTW)); in CheckNotBackReferenceIgnoreCase() 357 Operand(capture_length, SXTW)); in CheckNotBackReferenceIgnoreCase() 360 Operand(current_input_offset(), SXTW)); in CheckNotBackReferenceIgnoreCase() 364 Operand(capture_length, SXTW)); in CheckNotBackReferenceIgnoreCase() 401 Operand(capture_length, SXTW)); in CheckNotBackReferenceIgnoreCase() 404 __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW)); in CheckNotBackReferenceIgnoreCase() local 423 __ Add(x0, input_end(), Operand(capture_start_offset, SXTW)); in CheckNotBackReferenceIgnoreCase() 427 __ Add(x1, input_end(), Operand(current_input_offset(), SXTW)); in CheckNotBackReferenceIgnoreCase() 429 __ Sub(x1, x1, Operand(capture_length, SXTW)); in CheckNotBackReferenceIgnoreCase() 533 __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW)); CheckNotBackReference() local [all...] |
/third_party/ffmpeg/libswscale/aarch64/ |
H A D | yuv2rgb_neon.S | 105 add x6, x6, w16, SXTW // srcC += incC 116 add x6, x6, w16, SXTW // srcU += incU 117 add x13, x13, w17, SXTW // srcV += incV 121 add x6, x6, w7, SXTW // srcU += incU 122 add x13, x13, w14, SXTW // srcV += incV 192 add x2, x2, w3, SXTW // dst += padding 193 add x4, x4, w5, SXTW // srcY += paddingY
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/MCTargetDesc/ |
H A D | AArch64AddressingModes.h | 47 SXTW, enumerator 66 case AArch64_AM::SXTW: return "sxtw"; in getShiftExtendName() 133 case 6: return AArch64_AM::SXTW; in getExtendType() 160 case AArch64_AM::SXTW: return 6; break; in getExtendEncoding()
|
/third_party/vixl/test/aarch64/ |
H A D | test-disasm-aarch64.cc | 84 COMPARE_MACRO(Mov(x16, Operand(x17, SXTW, 3)), "sbfiz x16, x17, #3, #32"); in TEST() 101 COMPARE_MACRO(Mvn(x0, Operand(w1, SXTW)), in TEST() 104 COMPARE_MACRO(Mvn(x0, Operand(x0, SXTW)), in TEST() 340 COMPARE(add(x24, x25, Operand(x26, SXTW, 1)), "add x24, x25, w26, sxtw #1"); in TEST() 366 COMPARE(sub(x24, x25, Operand(x26, SXTW, 1)), "sub x24, x25, w26, sxtw #1"); in TEST() 1106 COMPARE(ldr(w12, MemOperand(x13, w14, SXTW)), "ldr w12, [x13, w14, sxtw]"); in TEST() 1107 COMPARE(ldr(w15, MemOperand(x16, w17, SXTW, 2)), in TEST() 1116 COMPARE(ldr(x12, MemOperand(x13, w14, SXTW)), "ldr x12, [x13, w14, sxtw]"); in TEST() 1117 COMPARE(ldr(x15, MemOperand(x16, w17, SXTW, 3)), in TEST() 1127 COMPARE(str(w12, MemOperand(x13, w14, SXTW)), "st in TEST() [all...] |
H A D | test-disasm-neon-aarch64.cc | 207 COMPARE(ldr(b2, MemOperand(x3, w4, SXTW)), "ldr b2, [x3, w4, sxtw]"); in TEST() 215 COMPARE(ldr(h2, MemOperand(x3, w4, SXTW)), "ldr h2, [x3, w4, sxtw]"); in TEST() 217 COMPARE(ldr(h4, MemOperand(x5, w5, SXTW, 1)), "ldr h4, [x5, w5, sxtw #1]"); in TEST() 219 COMPARE(ldr(h31, MemOperand(sp, wzr, SXTW, 1)), in TEST() 224 COMPARE(ldr(s2, MemOperand(x3, w4, SXTW)), "ldr s2, [x3, w4, sxtw]"); in TEST() 226 COMPARE(ldr(s4, MemOperand(x5, w5, SXTW, 2)), "ldr s4, [x5, w5, sxtw #2]"); in TEST() 228 COMPARE(ldr(s31, MemOperand(sp, wzr, SXTW, 2)), in TEST() 233 COMPARE(ldr(d2, MemOperand(x3, w4, SXTW)), "ldr d2, [x3, w4, sxtw]"); in TEST() 235 COMPARE(ldr(d4, MemOperand(x5, w5, SXTW, 3)), "ldr d4, [x5, w5, sxtw #3]"); in TEST() 237 COMPARE(ldr(d31, MemOperand(sp, wzr, SXTW, in TEST() [all...] |
H A D | test-assembler-sve-aarch64.cc | 8876 VIXL_ASSERT((static_cast<int>(mod) == SXTW) || 9086 ldff1_32_scaled_offset_helper(kHRegSize, ldff1h, ld1h, SXTW); 9091 ldff1_32_scaled_offset_helper(kSRegSize, ldff1w, ld1w, SXTW); 9096 ldff1_32_scaled_offset_helper(kHRegSize, ldff1sh, ld1sh, SXTW); 9115 ldff1_32_unscaled_offset_helper(kBRegSize, ldff1b, ld1b, SXTW); 9120 ldff1_32_unscaled_offset_helper(kHRegSize, ldff1h, ld1h, SXTW); 9125 ldff1_32_unscaled_offset_helper(kSRegSize, ldff1w, ld1w, SXTW); 9130 ldff1_32_unscaled_offset_helper(kBRegSize, ldff1sb, ld1sb, SXTW); 9135 ldff1_32_unscaled_offset_helper(kHRegSize, ldff1sh, ld1sh, SXTW); 9155 ldff1_32_unpacked_scaled_offset_helper(kHRegSize, ldff1h, ld1h, SXTW); [all...] |
H A D | test-disasm-sve-aarch64.cc | 129 COMPARE(adr(z19.VnD(), SVEMemOperand(z22.VnD(), z11.VnD(), SXTW)), in TEST() 131 COMPARE(adr(z19.VnD(), SVEMemOperand(z22.VnD(), z11.VnD(), SXTW, 1)), in TEST() 133 COMPARE(adr(z19.VnD(), SVEMemOperand(z22.VnD(), z11.VnD(), SXTW, 2)), in TEST() 135 COMPARE(adr(z19.VnD(), SVEMemOperand(z22.VnD(), z11.VnD(), SXTW, 3)), in TEST() 3409 COMPARE(ld1b(z9.VnS(), p5.Zeroing(), SVEMemOperand(x2, z1.VnS(), SXTW)), in TEST() 3413 COMPARE(ld1h(z17.VnS(), p2.Zeroing(), SVEMemOperand(x11, z24.VnS(), SXTW)), in TEST() 3419 COMPARE(ld1sb(z22.VnS(), p3.Zeroing(), SVEMemOperand(x23, z23.VnS(), SXTW)), in TEST() 3427 COMPARE(ld1w(z0.VnS(), p6.Zeroing(), SVEMemOperand(x28, z21.VnS(), SXTW, 2)), in TEST() 3429 COMPARE(ld1sh(z11.VnS(), p4.Zeroing(), SVEMemOperand(sp, z0.VnS(), SXTW, 1)), in TEST() 3438 COMPARE(ldff1w(z12.VnS(), p3.Zeroing(), SVEMemOperand(x25, z27.VnS(), SXTW)), in TEST() [all...] |
H A D | test-api-aarch64.cc | 1010 // UXTW and SXTW could be treated as plain registers in 32-bit contexts, but in TEST() 1013 VIXL_CHECK(!Operand(w16, SXTW).IsPlainRegister()); in TEST() 1083 VIXL_CHECK(!SVEMemOperand(x2, z3.VnD(), SXTW).IsEquivalentToScalar()); in TEST() 1097 VIXL_CHECK(SVEMemOperand(x8, z1.VnS(), SXTW).IsScalarPlusVector()); in TEST() 1117 VIXL_CHECK(SVEMemOperand(x8, z1.VnS(), SXTW).IsScatterGather()); in TEST()
|
H A D | test-cpu-features-aarch64.cc | 235 TEST_NONE(cmn_2, cmn(x0, Operand(w1, SXTW, 3))) 299 TEST_NONE(ldrb_4, ldrb(w0, MemOperand(x1, w2, SXTW, 0))) 316 TEST_NONE(ldrsb_10, ldrsb(x0, MemOperand(x1, w2, SXTW, 0))) 326 TEST_NONE(ldrsh_8, ldrsh(x0, MemOperand(x1, w2, SXTW, 0))) 332 TEST_NONE(ldrsw_4, ldrsw(x0, MemOperand(x1, w2, SXTW, 0))) 342 TEST_NONE(ldr_8, ldr(w0, MemOperand(x1, w2, SXTW, 0))) 404 TEST_NONE(prfm_2, prfm(PLDL1KEEP, MemOperand(x0, w1, SXTW, 0))) 455 TEST_NONE(strb_4, strb(w0, MemOperand(x1, w2, SXTW, 0))) 468 TEST_NONE(str_6, str(w0, MemOperand(x1, w2, SXTW, 2))) 489 TEST_NONE(sub_0, sub(w0, w1, Operand(w2, SXTW, [all...] |
H A D | test-assembler-aarch64.cc | 193 __ Mvn(x15, Operand(w2, SXTW, 4)); in TEST() 428 __ Mov(x30, Operand(x12, SXTW, 1)); in TEST() 503 __ Orr(x12, x0, Operand(x1, SXTW, 2)); in TEST() 597 __ Orn(x12, x0, Operand(x1, SXTW, 2)); in TEST() 664 __ And(x12, x0, Operand(x1, SXTW, 2)); in TEST() 810 __ Bic(x12, x0, Operand(x1, SXTW, 2)); in TEST() 942 __ Eor(x12, x0, Operand(x1, SXTW, 2)); in TEST() 1009 __ Eon(x12, x0, Operand(x1, SXTW, 2)); in TEST() 3568 __ Ldr(w3, MemOperand(x18, x27, SXTW)); 3569 __ Ldr(w4, MemOperand(x18, x28, SXTW, [all...] |
H A D | test-assembler-neon-aarch64.cc | 201 __ Ldr(b1, MemOperand(x17, x19, SXTW)); in TEST() 205 __ Ldr(h4, MemOperand(x17, x19, SXTW, 1)); in TEST() 210 __ Ldr(s18, MemOperand(x17, x19, SXTW, 2)); in TEST() 215 __ Ldr(d22, MemOperand(x17, x19, SXTW, 3)); in TEST() 220 __ Ldr(q26, MemOperand(x17, x19, SXTW, 4)); in TEST() 227 __ Str(s27, MemOperand(x20, x19, SXTW, 2)); in TEST() 231 __ Str(q27, MemOperand(x20, x19, SXTW, 4)); in TEST()
|
/third_party/vixl/src/aarch64/ |
H A D | operands-aarch64.cc | 261 VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX)); in MemOperand() 322 VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX)); in MemOperand()
|
H A D | operands-aarch64.h | 325 // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}. 612 // The modifier can be LSL or an an extend mode (UXTW or SXTW) here. Unlike in IsScalarPlusVector() 699 if (extend == SXTW) return SVE_SXTW; in GetSVEOffsetModifierFor()
|
H A D | constants-aarch64.h | 365 SXTW = 6, enumerator
|
H A D | simulator-aarch64.cc | 1091 case SXTW: in Simulator() 4269 VIXL_ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); in Simulator()
|
/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | assembler-arm64-inl.h | 395 DCHECK((extend == UXTW) || (extend == SXTW) || (extend == SXTX)); in MemOperand() 447 DCHECK((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX)); in MemOperand()
|
H A D | constants-arm64.h | 377 SXTW = 6, enumerator
|
H A D | macro-assembler-arm64.cc | 1995 Operand(builtin_index.W(), SXTW, kSystemPointerSizeLog2 - kSmiShift)); in LoadEntryFromBuiltinIndex()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/Utils/ |
H A D | AArch64BaseInfo.h | 464 SXTW, enumerator
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
H A D | AArch64FastISel.cpp | 750 Addr.setExtendType(AArch64_AM::SXTW); in computeAddress() 834 Addr.setExtendType(AArch64_AM::SXTW); in computeAddress() 892 Addr.setExtendType(AArch64_AM::SXTW); in computeAddress() 1069 if (Addr.getExtendType() == AArch64_AM::SXTW || in simplifyAddress() 1085 else if (Addr.getExtendType() == AArch64_AM::SXTW) in simplifyAddress() 1146 bool IsSigned = Addr.getExtendType() == AArch64_AM::SXTW || in addLoadStoreOperands() 1858 Addr.getExtendType() == AArch64_AM::SXTW) in emitLoad() 2149 Addr.getExtendType() == AArch64_AM::SXTW) in emitStore()
|
H A D | AArch64ISelDAGToDAG.cpp | 501 return AArch64_AM::SXTW; in getExtendTypeForNode() 936 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl, in SelectExtendedSHL() 1005 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl, in SelectAddrModeWRO() 1017 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl, in SelectAddrModeWRO()
|
H A D | AArch64InstructionSelector.cpp | 4362 SignExtend = Ext == AArch64_AM::SXTW; 4541 unsigned SignExtend = Ext == AArch64_AM::SXTW; 4728 return AArch64_AM::SXTW;
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/AsmParser/ |
H A D | AArch64AsmParser.cpp | 1100 ShiftExtendTy == AArch64_AM::SXTW) && in isSVEDataVectorRegWithShiftExtend() 1253 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW || in isExtend() 1266 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW; in isExtend64() 1291 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) && in isMemWExtend() 1753 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; in addMemExtendOperands() 1765 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; in addMemExtend8Operands() 2756 .Case("sxtw", AArch64_AM::SXTW) in tryParseOptionalShiftExtend()
|
/third_party/node/deps/v8/src/compiler/backend/arm64/ |
H A D | code-generator-arm64.cc | 134 return Operand(InputRegister32(index), SXTW); in InputOperand2_32() 164 return Operand(InputRegister64(index), SXTW); in InputOperand2_64()
|
/third_party/node/deps/v8/src/execution/arm64/ |
H A D | simulator-arm64.cc | 1149 case SXTW: 1983 DCHECK((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
|