/third_party/vixl/test/aarch64/ |
H A D | test-simulator-sve2-aarch64.cc | 191 ASSERT_EQUAL_64(expected_hashes[core.GetSVELaneCount(kQRegSize) - 1], x0); in TEST_SVE() 335 ASSERT_EQUAL_64(expected_hashes[core.GetSVELaneCount(kQRegSize) - 1], x0); in TEST_SVE() 479 ASSERT_EQUAL_64(expected_hashes[core.GetSVELaneCount(kQRegSize) - 1], x0); in TEST_SVE() 623 ASSERT_EQUAL_64(expected_hashes[core.GetSVELaneCount(kQRegSize) - 1], x0); in TEST_SVE() 867 ASSERT_EQUAL_64(expected_hashes[core.GetSVELaneCount(kQRegSize) - 1], x0); in TEST_SVE() 1111 ASSERT_EQUAL_64(expected_hashes[core.GetSVELaneCount(kQRegSize) - 1], x0); in TEST_SVE() 1255 ASSERT_EQUAL_64(expected_hashes[core.GetSVELaneCount(kQRegSize) - 1], x0); in TEST_SVE() 1399 ASSERT_EQUAL_64(expected_hashes[core.GetSVELaneCount(kQRegSize) - 1], x0); in TEST_SVE() 1543 ASSERT_EQUAL_64(expected_hashes[core.GetSVELaneCount(kQRegSize) - 1], x0); in TEST_SVE() 1715 ASSERT_EQUAL_64(expected_hashes[core.GetSVELaneCount(kQRegSize) in TEST_SVE() [all...] |
H A D | test-simulator-sve-aarch64.cc | 182 ASSERT_EQUAL_64(expected_hashes[core.GetSVELaneCount(kQRegSize) - 1], x0); in TEST_SVE() 266 ASSERT_EQUAL_64(expected_hashes[core.GetSVELaneCount(kQRegSize) - 1], x0); in TEST_SVE()
|
H A D | test-api-aarch64.cc | 250 VIXL_CHECK(VRegister(4, kQRegSize).Is(q4)); in TEST() 257 VIXL_CHECK(VRegister(4, kQRegSize, 1).Is(q4)); in TEST() 266 VIXL_CHECK(VRegister(5, kQRegSize, 2).Is(v5.V2D())); in TEST() 267 VIXL_CHECK(VRegister(6, kQRegSize, 4).Is(v6.V4S())); in TEST() 268 VIXL_CHECK(VRegister(7, kQRegSize, 8).Is(v7.V8H())); in TEST() 269 VIXL_CHECK(VRegister(8, kQRegSize, 16).Is(v8.V16B())); in TEST() 330 VIXL_CHECK(CPURegister(6, kQRegSize, CPURegister::kVRegister).Is(q6)); in TEST() 331 VIXL_CHECK(CPURegister(7, kQRegSize, CPURegister::kVRegister).Is(v7)); in TEST() 1164 VRegister temp = temps.AcquireVRegisterOfSize(kQRegSize); in TEST()
|
H A D | test-simulator-aarch64.cc | 1691 VRegister vdstr = VRegister(0, kQRegSize); in Test1OpAcrossNEON_Helper() 2183 VIXL_ASSERT((vm_bits * vm_subvector_count) <= kQRegSize); in TestByElementNEON_Helper()
|
H A D | test-assembler-sve-aarch64.cc | 5713 if (vl > (index[4] * kQRegSize)) { 11787 constexpr int s = kQRegSize / kSRegSize; 11821 constexpr int d = kQRegSize / kDRegSize; 11851 constexpr int s = kQRegSize / kSRegSize; 11885 constexpr int d = kQRegSize / kDRegSize;
|
H A D | test-trace-aarch64.cc | 3085 for (unsigned chunk = 0; chunk < (vl_in_bits / kQRegSize); chunk++) {
|
/third_party/vixl/benchmarks/aarch64/ |
H A D | bench-utils.cc | 125 // - We need at least 4 * kQRegSize bytes for Ld4/St4. in GeneratePrologue() 129 __ Claim((4 * kQRegSize) + (16 * GetRandomBits(3))); in GeneratePrologue() 387 VRegister vt(16 + static_cast<unsigned>(GetRandomBits(4)), kQRegSize); in GenerateNEONSequence() 388 VRegister vt2((vt.GetCode() + 1) % kNumberOfVRegisters, kQRegSize); in GenerateNEONSequence() 389 VRegister vt3((vt.GetCode() + 2) % kNumberOfVRegisters, kQRegSize); in GenerateNEONSequence() 390 VRegister vt4((vt.GetCode() + 3) % kNumberOfVRegisters, kQRegSize); in GenerateNEONSequence()
|
H A D | bench-utils.h | 235 unsigned size_in_bits = vixl::aarch64::kQRegSize);
|
/third_party/vixl/src/aarch64/ |
H A D | registers-aarch64.h | 483 case kQRegSize: 587 explicit VRegister(int code, int size_in_bits = kQRegSize, int lanes = 1) 655 ZRegister VnQ() const { return ZRegister(GetCode(), kQRegSize); } 809 V(QRegister, kQRegSize, VRegister) \
|
H A D | instructions-aarch64.h | 75 const unsigned kQRegSize = 128; member 77 const unsigned kQRegSizeInBytes = kQRegSize / 8;
|
H A D | simulator-aarch64.cc | 1517 case kQRegSize: in Simulator() 1591 VIXL_ASSERT((vl % kQRegSize) == 0); in Simulator() 1592 for (unsigned i = 0; i < (vl / kQRegSize); i++) { in Simulator() 1603 VIXL_ASSERT((vl % kQRegSize) == 0); in Simulator() 1604 for (unsigned i = 0; i < (vl / kQRegSize); i++) { in Simulator() 1615 VIXL_ASSERT((vl % kQRegSize) == 0); in Simulator() 1617 for (unsigned i = 0; i < (vl / kQRegSize); i++) { in Simulator() 1630 VIXL_ASSERT((q_index * kQRegSize) < GetVectorLengthInBits()); in Simulator() 1644 int lsb = q_index * kQRegSize; in Simulator() 1645 int msb = lsb + kQRegSize in Simulator() [all...] |
H A D | instructions-aarch64.cc | 1249 return kQRegSize; in RegisterSizeInBitsFromFormat()
|
H A D | operands-aarch64.h | 279 if (type == CPURegister::kVRegister) size = kQRegSize; in GetDefaultSizeFor()
|
H A D | logic-aarch64.cc | 2874 int elements_per_segment = kQRegSize / LaneSizeInBitsFromFormat(vform); 2930 int segment_size = std::min(kQRegSize, RegisterSizeInBitsFromFormat(vform)); 7634 int lanes_per_segment = kQRegSize / LaneSizeInBitsFromFormat(vform); 7770 int b_per_segment = kQRegSize / kBRegSize; 7771 int s_per_segment = kQRegSize / kSRegSize;
|
H A D | disasm-aarch64.cc | 6500 reg_size = kQRegSize; in Disassembler()
|
/third_party/vixl/examples/aarch64/ |
H A D | neon-matrix-multiply.cc | 50 VRegister v_out = VRegister(out_column, kQRegSize, 4); in GenerateMultiplyColumn()
|
/third_party/node/deps/v8/src/execution/arm64/ |
H A D | simulator-arm64.h | 374 using SimVRegister = SimRegisterBase<kQRegSize>; // v0-v31 517 for (unsigned i = size; i < kQRegSize; i++) { in ClearForWrite() 638 Saturation saturated_[kQRegSize]; 641 bool round_[kQRegSize]; 998 uint8_t val[kQRegSize]; 1006 (sizeof(T) == kQRegSize), in vreg() 1068 (sizeof(value) == kQRegSize), in set_vreg() 1120 (sizeof(value) == kDRegSize) || (sizeof(value) == kQRegSize)); in set_vreg_no_log() 1385 void PrintVRegisterRawHelper(unsigned code, int bytes = kQRegSize,
|
H A D | simulator-arm64.cc | 1204 case kQRegSize: 1216 case kQRegSize: 1418 int byte = kQRegSize - 1; 1457 DCHECK_LE(msb, static_cast<unsigned>(kQRegSize)); 1471 if (msb < (kQRegSize - 1)) { 2223 DCHECK_EQ(access_size, static_cast<unsigned>(kQRegSize)); 2259 DCHECK_EQ(access_size, static_cast<unsigned>(kQRegSize));
|
/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | constants-arm64.h | 68 const int kQRegSize = kQRegSizeInBits >> 3; member
|
H A D | assembler-arm64.cc | 2554 DCHECK_LT(lane, kQRegSize / lane_size); in LoadStoreStructSingle()
|