Home
last modified time | relevance | path

Searched refs:UXTW (Results 1 - 25 of 37) sorted by relevance

12

/third_party/ffmpeg/libswscale/aarch64/
H A Dhscale.S57 add x17, x3, w8, UXTW // srcp + filterPos[0]
58 add x8, x3, w0, UXTW // srcp + filterPos[1]
59 add x0, x3, w11, UXTW // srcp + filterPos[2]
60 add x11, x3, w9, UXTW // srcp + filterPos[3]
127 ldr w8, [x3, w8, UXTW] // src[filterPos[idx + 0]][0..3]
128 ldr w9, [x3, w9, UXTW] // src[filterPos[idx + 1]][0..3]
129 ldr w10, [x3, w10, UXTW] // src[filterPos[idx + 2]][0..3]
130 ldr w11, [x3, w11, UXTW] // src[filterPos[idx + 3]][0..3]
131 ldr w12, [x3, w12, UXTW] // src[filterPos[idx + 4]][0..3]
132 ldr w13, [x3, w13, UXTW] // sr
[all...]
/third_party/ffmpeg/libavfilter/aarch64/
H A Dvf_nlmeans_neon.S38 sub x3, x3, w6, UXTW // s1 padding (s1_linesize - w)
39 sub x5, x5, w6, UXTW // s2 padding (s2_linesize - w)
40 sub x9, x0, w1, UXTW #2 // dst_top
41 sub x1, x1, w6, UXTW // dst padding (dst_linesize_32 - w)
/third_party/vixl/src/aarch64/
H A Doperands-aarch64.cc192 // __ Add(w0, w0, Operand(w1, UXTW)); in IsPlainRegister()
194 // __ Add(x0, x0, Operand(w1, UXTW)); in IsPlainRegister()
221 return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_); in ToExtendedRegister()
261 VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX)); in MemOperand()
322 VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX)); in MemOperand()
H A Doperands-aarch64.h325 // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
434 // or [x0, wzr, UXTW #3].
528 // "scalar-plus-vector", like [x0, z1.d, UXTW]
568 // "vector-plus-vector", like [z0.d, z1.d, UXTW]
592 // [x0, #0], [x0, xzr] or [x0, wzr, UXTW #3].
612 // The modifier can be LSL or an an extend mode (UXTW or SXTW) here. Unlike in IsScalarPlusVector()
698 if (extend == UXTW) return SVE_UXTW; in GetSVEOffsetModifierFor()
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/MCTargetDesc/
H A DAArch64AddressingModes.h42 UXTW, enumerator
62 case AArch64_AM::UXTW: return "uxtw"; in getShiftExtendName()
129 case 2: return AArch64_AM::UXTW; in getExtendType()
156 case AArch64_AM::UXTW: return 2; break; in getExtendEncoding()
H A DAArch64InstPrinter.cpp998 // UXTW/UXTX as LSL, and if the shift amount is also zero, print nothing at in printArithExtend()
1000 if (ExtType == AArch64_AM::UXTW || ExtType == AArch64_AM::UXTX) { in printArithExtend()
1006 ExtType == AArch64_AM::UXTW) ) { in printArithExtend()
/third_party/vixl/test/aarch64/
H A Dtest-api-aarch64.cc1010 // UXTW and SXTW could be treated as plain registers in 32-bit contexts, but in TEST()
1012 VIXL_CHECK(!Operand(w15, UXTW).IsPlainRegister()); in TEST()
1025 VIXL_CHECK(!MemOperand(x5, wzr, UXTW).IsPlainRegister()); in TEST()
1026 VIXL_CHECK(!MemOperand(x6, wzr, UXTW, 3).IsPlainRegister()); in TEST()
1044 VIXL_CHECK(MemOperand(x5, wzr, UXTW).IsEquivalentToPlainRegister()); in TEST()
1045 VIXL_CHECK(MemOperand(x6, wzr, UXTW, 3).IsEquivalentToPlainRegister()); in TEST()
1067 VIXL_CHECK(!SVEMemOperand(x2, z3.VnS(), UXTW).IsPlainScalar()); in TEST()
1098 VIXL_CHECK(SVEMemOperand(x9, z2.VnD(), UXTW).IsScalarPlusVector()); in TEST()
1118 VIXL_CHECK(SVEMemOperand(x9, z2.VnD(), UXTW).IsScatterGather()); in TEST()
H A Dtest-disasm-neon-aarch64.cc206 COMPARE(ldr(b1, MemOperand(x2, w3, UXTW)), "ldr b1, [x2, w3, uxtw]"); in TEST()
211 COMPARE(ldr(b31, MemOperand(sp, wzr, UXTW)), "ldr b31, [sp, wzr, uxtw]"); in TEST()
214 COMPARE(ldr(h1, MemOperand(x2, w3, UXTW)), "ldr h1, [x2, w3, uxtw]"); in TEST()
216 COMPARE(ldr(h3, MemOperand(x4, w5, UXTW, 1)), "ldr h3, [x4, w5, uxtw #1]"); in TEST()
223 COMPARE(ldr(s1, MemOperand(x2, w3, UXTW)), "ldr s1, [x2, w3, uxtw]"); in TEST()
225 COMPARE(ldr(s3, MemOperand(x4, w5, UXTW, 2)), "ldr s3, [x4, w5, uxtw #2]"); in TEST()
232 COMPARE(ldr(d1, MemOperand(x2, w3, UXTW)), "ldr d1, [x2, w3, uxtw]"); in TEST()
234 COMPARE(ldr(d3, MemOperand(x4, w5, UXTW, 3)), "ldr d3, [x4, w5, uxtw #3]"); in TEST()
241 COMPARE(ldr(q1, MemOperand(x2, w3, UXTW)), "ldr q1, [x2, w3, uxtw]"); in TEST()
243 COMPARE(ldr(q3, MemOperand(x4, w5, UXTW, in TEST()
[all...]
H A Dtest-assembler-sve-aarch64.cc8877 (static_cast<int>(mod) == UXTW));
8882 offs_is_unsigned = (static_cast<int>(mod) == UXTW) ? true : false;
9085 ldff1_32_scaled_offset_helper(kHRegSize, ldff1h, ld1h, UXTW);
9090 ldff1_32_scaled_offset_helper(kSRegSize, ldff1w, ld1w, UXTW);
9095 ldff1_32_scaled_offset_helper(kHRegSize, ldff1sh, ld1sh, UXTW);
9114 ldff1_32_unscaled_offset_helper(kBRegSize, ldff1b, ld1b, UXTW);
9119 ldff1_32_unscaled_offset_helper(kHRegSize, ldff1h, ld1h, UXTW);
9124 ldff1_32_unscaled_offset_helper(kSRegSize, ldff1w, ld1w, UXTW);
9129 ldff1_32_unscaled_offset_helper(kBRegSize, ldff1sb, ld1sb, UXTW);
9134 ldff1_32_unscaled_offset_helper(kHRegSize, ldff1sh, ld1sh, UXTW);
[all...]
H A Dtest-disasm-aarch64.cc335 COMPARE(adds(x9, x10, Operand(x11, UXTW, 3)), "adds x9, x10, w11, uxtw #3"); in TEST()
347 COMPARE(add(wsp, wsp, Operand(w4, UXTW, 2)), "add wsp, wsp, w4, lsl #2"); in TEST()
361 COMPARE(subs(x9, x10, Operand(x11, UXTW, 3)), "subs x9, x10, w11, uxtw #3"); in TEST()
373 COMPARE(sub(wsp, wsp, Operand(w4, UXTW, 2)), "sub wsp, wsp, w4, lsl #2"); in TEST()
1102 COMPARE(ldr(w0, MemOperand(x1, w2, UXTW)), "ldr w0, [x1, w2, uxtw]"); in TEST()
1103 COMPARE(ldr(w3, MemOperand(x4, w5, UXTW, 2)), "ldr w3, [x4, w5, uxtw #2]"); in TEST()
1112 COMPARE(ldr(x0, MemOperand(x1, w2, UXTW)), "ldr x0, [x1, w2, uxtw]"); in TEST()
1113 COMPARE(ldr(x3, MemOperand(x4, w5, UXTW, 3)), "ldr x3, [x4, w5, uxtw #3]"); in TEST()
1123 COMPARE(str(w0, MemOperand(x1, w2, UXTW)), "str w0, [x1, w2, uxtw]"); in TEST()
1124 COMPARE(str(w3, MemOperand(x4, w5, UXTW, in TEST()
[all...]
H A Dtest-simulator-aarch64.cc298 __ Ldr(fn, MemOperand(inputs_base, index_n, UXTW, n_index_shift)); in Test1Op_Helper()
444 __ Ldr(fn, MemOperand(inputs_base, index_n, UXTW, index_shift)); in Test2Op_Helper()
448 __ Ldr(fm, MemOperand(inputs_base, index_m, UXTW, index_shift)); in Test2Op_Helper()
588 __ Ldr(fn, MemOperand(inputs_base, index_n, UXTW, index_shift)); in Test3Op_Helper()
592 __ Ldr(fm, MemOperand(inputs_base, index_m, UXTW, index_shift)); in Test3Op_Helper()
596 __ Ldr(fa, MemOperand(inputs_base, index_a, UXTW, index_shift)); in Test3Op_Helper()
737 __ Ldr(fn, MemOperand(inputs_base, index_n, UXTW, index_shift)); in TestCmp_Helper()
741 __ Ldr(fm, MemOperand(inputs_base, index_m, UXTW, index_shift)); in TestCmp_Helper()
876 __ Ldr(fn, MemOperand(inputs_base, index_n, UXTW, index_shift)); in TestCmpZero_Helper()
1019 __ Ldr(fn, MemOperand(inputs_base, index_n, UXTW, n_index_shif in TestFPToFixed_Helper()
[all...]
H A Dtest-disasm-sve-aarch64.cc137 COMPARE(adr(z30.VnD(), SVEMemOperand(z14.VnD(), z16.VnD(), UXTW)), in TEST()
139 COMPARE(adr(z30.VnD(), SVEMemOperand(z14.VnD(), z16.VnD(), UXTW, 1)), in TEST()
141 COMPARE(adr(z30.VnD(), SVEMemOperand(z14.VnD(), z16.VnD(), UXTW, 2)), in TEST()
143 COMPARE(adr(z30.VnD(), SVEMemOperand(z14.VnD(), z16.VnD(), UXTW, 3)), in TEST()
3411 COMPARE(ld1b(z9.VnS(), p5.Zeroing(), SVEMemOperand(sp, z1.VnS(), UXTW)), in TEST()
3415 COMPARE(ld1w(z22.VnS(), p6.Zeroing(), SVEMemOperand(sp, z5.VnS(), UXTW)), in TEST()
3417 COMPARE(ld1sb(z12.VnS(), p7.Zeroing(), SVEMemOperand(x17, z23.VnS(), UXTW)), in TEST()
3421 COMPARE(ld1sh(z11.VnS(), p2.Zeroing(), SVEMemOperand(x18, z10.VnS(), UXTW)), in TEST()
3425 COMPARE(ld1h(z9.VnS(), p3.Zeroing(), SVEMemOperand(sp, z4.VnS(), UXTW, 1)), in TEST()
3434 COMPARE(ldff1b(z18.VnS(), p6.Zeroing(), SVEMemOperand(x27, z24.VnS(), UXTW)), in TEST()
[all...]
H A Dtest-cpu-features-aarch64.cc177 TEST_NONE(add_0, add(w0, w1, Operand(w2, UXTW, 3)))
304 TEST_NONE(ldrh_3, ldrh(w0, MemOperand(x1, w2, UXTW, 1)))
313 TEST_NONE(ldrsb_7, ldrsb(w0, MemOperand(x1, w2, UXTW, 0)))
324 TEST_NONE(ldrsh_6, ldrsh(w0, MemOperand(x1, w2, UXTW, 0)))
344 TEST_NONE(ldr_10, ldr(x0, MemOperand(x1, w2, UXTW, 3)))
460 TEST_NONE(strh_3, strh(w0, MemOperand(x1, w2, UXTW, 0)))
470 TEST_NONE(str_8, str(x0, MemOperand(x1, w2, UXTW, 0)))
674 TEST_FP(ldr_13, ldr(h0, MemOperand(x1, w2, UXTW, 1)))
708 TEST_FP(str_11, str(h0, MemOperand(x1, w2, UXTW, 1)))
710 TEST_FP(str_13, str(s0, MemOperand(x1, w2, UXTW,
[all...]
H A Dtest-assembler-aarch64.cc192 __ Mvn(x14, Operand(w2, UXTW, 4)); in TEST()
366 __ Mov(x27, Operand(w13, UXTW, 4)); in TEST()
427 __ Mov(x29, Operand(x12, UXTW, 1)); in TEST()
499 __ Orr(w8, w0, Operand(w1, UXTW, 2)); in TEST()
593 __ Orn(w8, w0, Operand(w1, UXTW, 2)); in TEST()
660 __ And(w8, w0, Operand(w1, UXTW, 2)); in TEST()
806 __ Bic(w8, w0, Operand(w1, UXTW, 2)); in TEST()
938 __ Eor(w8, w0, Operand(w1, UXTW, 2)); in TEST()
1005 __ Eon(w8, w0, Operand(w1, UXTW, 2)); in TEST()
4607 __ Prfm(op, MemOperand(x0, input, UXTW));
[all...]
/third_party/node/deps/v8/src/codegen/arm64/
H A Dassembler-arm64-inl.h306 return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_); in ToExtendedRegister()
395 DCHECK((extend == UXTW) || (extend == SXTW) || (extend == SXTX)); in MemOperand()
447 DCHECK((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX)); in MemOperand()
/third_party/ffmpeg/libavcodec/aarch64/
H A Dh264cmc_neon.S41 add x6, x6, w9, UXTW
211 add x6, x6, w9, UXTW
H A Dh264idct_neon.S390 ldrb w9, [x4, w9, UXTW]
/third_party/node/deps/v8/src/regexp/arm64/
H A Dregexp-macro-assembler-arm64.cc214 __ Add(x10, code_pointer(), Operand(w10, UXTW)); in Backtrack()
639 __ Ldrb(w11, MemOperand(x11, w10, UXTW)); in CheckBitInTable()
720 __ Ldrb(w10, MemOperand(x10, current_character(), UXTW)); in CheckSpecialCharacterClass()
733 __ Ldrb(w10, MemOperand(x10, current_character(), UXTW)); in CheckSpecialCharacterClass()
/third_party/vixl/benchmarks/aarch64/
H A Dbench-utils.cc245 __ Peek(PickR(size), Operand(claim.W(), UXTW)); in GenerateOperandSequence()
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/Utils/
H A DAArch64BaseInfo.h459 UXTW, enumerator
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/
H A DAArch64FastISel.cpp744 Addr.setExtendType(AArch64_AM::UXTW); in computeAddress()
768 Addr.setExtendType(AArch64_AM::UXTW); in computeAddress()
828 Addr.setExtendType(AArch64_AM::UXTW); in computeAddress()
865 Addr.setExtendType(AArch64_AM::UXTW); in computeAddress()
887 Addr.setExtendType(AArch64_AM::UXTW); in computeAddress()
1070 Addr.getExtendType() == AArch64_AM::UXTW ) in simplifyAddress()
1081 if (Addr.getExtendType() == AArch64_AM::UXTW) in simplifyAddress()
1857 if (Addr.getExtendType() == AArch64_AM::UXTW || in emitLoad()
2148 if (Addr.getExtendType() == AArch64_AM::UXTW || in emitStore()
H A DAArch64ISelDAGToDAG.cpp513 return AArch64_AM::UXTW; in getExtendTypeForNode()
531 return AArch64_AM::UXTW; in getExtendTypeForNode()
667 /// Instructions that accept extend modifiers like UXTW expect the register
711 if (Ext == AArch64_AM::UXTW && in SelectArithExtendedRegister()
/third_party/node/deps/v8/src/wasm/baseline/arm64/
H A Dliftoff-assembler-arm64.h142 : MemOperand(effective_addr, offset.W(), UXTW); in GetMemOp()
157 assm->Add(tmp, addr, Operand(offset, UXTW)); in GetEffectiveAddress()
496 Operand offset_op = offset_reg.is_valid() ? Operand(offset_reg.W(), UXTW) in StoreTaggedPointer()
860 Cmp(result.gp().W(), Operand(expected.gp().W(), UXTW)); in AtomicCompareExchange()
/third_party/node/deps/v8/src/baseline/arm64/
H A Dbaseline-assembler-arm64-inl.h555 __ Add(temp, temp, Operand(reg, UXTW, entry_size_log2)); in Switch()
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/AsmParser/
H A DAArch64AsmParser.cpp1099 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW || in isSVEDataVectorRegWithShiftExtend()
1253 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW || in isExtend()
1266 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW; in isExtend64()
1291 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) && in isMemWExtend()
1737 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW; in addExtendOperands()
2752 .Case("uxtw", AArch64_AM::UXTW) in tryParseOptionalShiftExtend()

Completed in 70 milliseconds

12