/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
H A D | MVEGatherScatterLowering.cpp | 80 // returning the base directly and the offsets indirectly using the Offsets 82 Value *checkGEP(Value *&Offsets, Type *Ty, Value *Ptr, IRBuilder<> Builder); 118 Value *MVEGatherScatterLowering::checkGEP(Value *&Offsets, Type *Ty, Value *Ptr, in checkGEP() argument 139 Offsets = GEP->getOperand(1); in checkGEP() 142 if (ZExtInst *ZextOffs = dyn_cast<ZExtInst>(Offsets)) in checkGEP() 143 Offsets = ZextOffs->getOperand(0); in checkGEP() 148 if (OffsType != Offsets->getType()) { in checkGEP() 150 Offsets->getType()->getScalarSizeInBits()) { in checkGEP() 152 Offsets = Builder.CreateZExt(Offsets, OffsTyp in checkGEP() 239 Value *Offsets; tryCreateMaskedGatherOffset() local [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-subzero/lib/Support/ |
H A D | Signals.cpp | 53 const char **Modules, intptr_t *Offsets, 102 std::vector<intptr_t> Offsets(Depth, 0); in printSymbolizedStackTrace() 103 if (!findModulesAndOffsets(StackTrace, Depth, Modules.data(), Offsets.data(), in printSymbolizedStackTrace() 117 Input << Modules[i] << " " << (void*)Offsets[i] << "\n"; in printSymbolizedStackTrace() 171 OS << "(" << Modules[i] << '+' << format_hex(Offsets[i], 0) << ")"; in printSymbolizedStackTrace()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Remarks/ |
H A D | RemarkParser.cpp | 31 Offsets.push_back(Split.first.data() - Buffer.data()); in ParsedStringTable() 37 if (Index >= Offsets.size()) in operator []() 41 Offsets.size()); in operator []() 43 size_t Offset = Offsets[Index]; in operator []() 47 (Index == Offsets.size() - 1) ? Buffer.size() : Offsets[Index + 1]; in operator []()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Support/ |
H A D | Signals.cpp | 92 const char **Modules, intptr_t *Offsets, 139 std::vector<intptr_t> Offsets(Depth, 0); in printSymbolizedStackTrace() 140 if (!findModulesAndOffsets(StackTrace, Depth, Modules.data(), Offsets.data(), in printSymbolizedStackTrace() 154 Input << Modules[i] << " " << (void*)Offsets[i] << "\n"; in printSymbolizedStackTrace() 211 OS << "(" << Modules[i] << '+' << format_hex(Offsets[i], 0) << ")"; in printSymbolizedStackTrace()
|
H A D | SourceMgr.cpp | 76 std::vector<T> *Offsets = nullptr; in getLineNumber() local 78 Offsets = new std::vector<T>(); in getLineNumber() 79 OffsetCache = Offsets; in getLineNumber() 85 Offsets->push_back(static_cast<T>(N)); in getLineNumber() 89 Offsets = OffsetCache.get<std::vector<T> *>(); in getLineNumber() 100 return llvm::lower_bound(*Offsets, PtrOffset) - Offsets->begin() + 1; in getLineNumber()
|
H A D | IntervalMap.cpp | 18 void Path::replaceRoot(void *Root, unsigned Size, IdxPair Offsets) { in replaceRoot() argument 20 path.front() = Entry(Root, Size, Offsets.first); in replaceRoot() 21 path.insert(path.begin() + 1, Entry(subtree(0), Offsets.second)); in replaceRoot()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/VE/ |
H A D | VEFrameLowering.h | 51 static const SpillSlot Offsets[] = { variable 57 NumEntries = array_lengthof(Offsets); 58 return Offsets;
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/ |
H A D | Analysis.h | 69 /// If Offsets is non-null, it points to a vector to be filled in 74 SmallVectorImpl<uint64_t> *Offsets = nullptr, 81 SmallVectorImpl<uint64_t> *Offsets = nullptr, 88 /// If Offsets is non-null, it points to a vector to be filled in 93 SmallVectorImpl<uint64_t> *Offsets = nullptr,
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
H A D | Analysis.cpp | 80 /// If Offsets is non-null, it points to a vector to be filled in 86 SmallVectorImpl<uint64_t> *Offsets, 95 ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets, 104 ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets, 115 if (Offsets) 116 Offsets->push_back(StartingOffset); 121 SmallVectorImpl<uint64_t> *Offsets, in ComputeValueVTs() 123 return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets, in ComputeValueVTs() 129 SmallVectorImpl<uint64_t> *Offsets, in computeValueLLTs() 135 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets, in computeValueLLTs() 119 ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl<EVT> &ValueVTs, SmallVectorImpl<uint64_t> *Offsets, uint64_t StartingOffset) ComputeValueVTs() argument 127 computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl<LLT> &ValueTys, SmallVectorImpl<uint64_t> *Offsets, uint64_t StartingOffset) computeValueLLTs() argument [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/MCTargetDesc/ |
H A D | R600MCCodeEmitter.cpp | 131 int64_t Offsets[3] = { in encodeInstruction() local 140 SrcSelect[ELEMENT_W] << 29 | Offsets[0] << 0 | Offsets[1] << 5 | in encodeInstruction() 141 Offsets[2] << 10; in encodeInstruction()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Remarks/ |
H A D | RemarkParser.h | 64 std::vector<size_t> Offsets; member 74 size_t size() const { return Offsets.size(); } in size()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Transforms/IPO/ |
H A D | LowerTypeTests.h | 62 SmallVector<uint64_t, 16> Offsets; member 74 Offsets.push_back(Offset); in addOffset()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/DebugInfo/DWARF/ |
H A D | DWARFListTable.h | 78 std::vector<uint64_t> Offsets; member in llvm::DWARFListTableHeader 96 Offsets.clear(); in clear() 120 if (Index < Offsets.size()) in getOffsetEntry() 121 return Offsets[Index]; in getOffsetEntry()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/ |
H A D | HexagonFrameLowering.h | 86 static const SpillSlot Offsets[] = { variable 94 NumEntries = array_lengthof(Offsets); 95 return Offsets;
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/GlobalISel/ |
H A D | IRTranslator.cpp | 165 auto *Offsets = VMap.getOffsets(Val); in allocateVRegs() local 168 Offsets->empty() ? Offsets : nullptr); in allocateVRegs() 184 auto *Offsets = VMap.getOffsets(Val); in getOrCreateVRegs() local 191 Offsets->empty() ? Offsets : nullptr); in getOrCreateVRegs() 871 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); in translateLoad() local 889 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); in translateLoad() 891 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); in translateLoad() 897 MinAlign(BaseAlign, Offsets[ in translateLoad() 915 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); translateStore() local 976 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); translateExtractValue() local 2106 valueIsSplit(const Value &V, SmallVectorImpl<uint64_t> *Offsets) valueIsSplit() argument [all...] |
H A D | CallLowering.cpp | 140 SmallVector<uint64_t, 8> Offsets; in packRegs() local 141 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); in packRegs() 148 MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]); in packRegs() 163 SmallVector<uint64_t, 8> Offsets; in unpackRegs() local 164 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); in unpackRegs() 168 MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]); in unpackRegs()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Object/ |
H A D | Archive.cpp | 801 const char *Offsets = Buf; in getMember() local 803 Offsets += sizeof(uint64_t); in getMember() 805 Offsets += sizeof(uint32_t); in getMember() 808 Offset = read32be(Offsets + SymbolIndex * 4); in getMember() 810 Offset = read64be(Offsets + SymbolIndex * 8); in getMember() 813 // Offsets (the first uint32_t is the number of bytes of the ranlib in getMember() 818 Offset = read32le(Offsets + SymbolIndex * 8 + 4); in getMember() 821 // Offsets (the first uint64_t is the number of bytes of the ranlib_64 in getMember() 826 Offset = read64le(Offsets + SymbolIndex * 16 + 8); in getMember() 848 Offset = read32le(Offsets in getMember() [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/DebugInfo/DWARF/ |
H A D | DWARFListTable.cpp | 85 Offsets.push_back(Data.getRelocatedValue(OffsetByteSize, OffsetPtr)); in extract() 102 for (const auto &Off : Offsets) { in dump()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/NVPTX/ |
H A D | NVPTXISelLowering.cpp | 165 SmallVectorImpl<uint64_t> *Offsets = nullptr, in ComputePTXValueVTs() 175 if (Offsets) { in ComputePTXValueVTs() 176 Offsets->push_back(StartingOffset + 0); in ComputePTXValueVTs() 177 Offsets->push_back(StartingOffset + 8); in ComputePTXValueVTs() 188 ComputePTXValueVTs(TLI, DL, EI, ValueVTs, Offsets, in ComputePTXValueVTs() 213 if (Offsets) in ComputePTXValueVTs() 214 Offsets->push_back(Off + j * EltVT.getStoreSize()); in ComputePTXValueVTs() 218 if (Offsets) in ComputePTXValueVTs() 219 Offsets->push_back(Off); in ComputePTXValueVTs() 236 const SmallVectorImpl<uint64_t> &Offsets, unsigne in CanMergeParamLoadStoresStartingAt() 234 CanMergeParamLoadStoresStartingAt( unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs, const SmallVectorImpl<uint64_t> &Offsets, unsigned ParamAlignment) CanMergeParamLoadStoresStartingAt() argument 298 VectorizePTXValueVTs(const SmallVectorImpl<EVT> &ValueVTs, const SmallVectorImpl<uint64_t> &Offsets, unsigned ParamAlignment) VectorizePTXValueVTs() argument 1466 SmallVector<uint64_t, 16> Offsets; LowerCall() local 1583 ComputePTXValueVTs(*this, DL, PTy->getElementType(), VTs, &Offsets, 0); LowerCall() local 1767 SmallVector<uint64_t, 16> Offsets; LowerCall() local 2558 SmallVector<uint64_t, 16> Offsets; LowerFormalArguments() local 2676 SmallVector<uint64_t, 16> Offsets; LowerReturn() local [all...] |
/third_party/skia/src/sfnt/ |
H A D | SkOTTable_loca.h | 23 union Offsets { union
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Scalar/ |
H A D | SROA.cpp | 3718 auto &Offsets = SplitOffsetsMap[I]; 3719 assert(Offsets.Splits.empty() && 3721 Offsets.S = &S; 3722 Offsets.Splits.push_back(P.endOffset() - S.beginOffset()); 3732 auto &Offsets = SplitOffsetsMapI->second; 3734 assert(Offsets.S == S && "Found a mismatched slice!"); 3735 assert(!Offsets.Splits.empty() && 3737 assert(Offsets.Splits.back() == 3738 P.beginOffset() - Offsets.S->beginOffset() && 3744 Offsets [all...] |
/third_party/skia/third_party/externals/abseil-cpp/absl/container/internal/ |
H A D | layout_test.cc | 240 TEST(Layout, Offsets) { in TEST() 243 EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); in TEST() 244 EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0)); in TEST() 245 EXPECT_THAT(L(3).Offsets(), ElementsAre(0)); in TEST() 249 EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); in TEST() 250 EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0, 12)); in TEST() 251 EXPECT_THAT(L::Partial(3, 5).Offsets(), ElementsAre(0, 12)); in TEST() 252 EXPECT_THAT(L(3, 5).Offsets(), ElementsAre(0, 12)); in TEST() 256 EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); in TEST() 257 EXPECT_THAT(L::Partial(1).Offsets(), ElementsAr in TEST() [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/SelectionDAG/ |
H A D | ScheduleDAGSDNodes.cpp | 224 SmallVector<int64_t, 4> Offsets; in ClusterNeighboringLoads() local 249 Offsets.push_back(Offset1); in ClusterNeighboringLoads() 251 Offsets.push_back(Offset2); in ClusterNeighboringLoads() 263 llvm::sort(Offsets); in ClusterNeighboringLoads() 268 int64_t BaseOff = Offsets[0]; in ClusterNeighboringLoads() 271 for (unsigned i = 1, e = Offsets.size(); i != e; ++i) { in ClusterNeighboringLoads() 272 int64_t Offset = Offsets[i]; in ClusterNeighboringLoads()
|
/third_party/skia/third_party/externals/swiftshader/third_party/subzero/src/ |
H A D | IceCfg.cpp | 832 CfgVector<int32_t> Offsets; in sortAndCombineAllocas() local 845 Offsets.push_back(Target->getFramePointerOffset(CurrentOffset, Size)); in sortAndCombineAllocas() 856 Offsets.push_back(CurrentOffset + OutArgsOffsetOrZero); in sortAndCombineAllocas() 864 assert(Allocas.size() == Offsets.size()); in sortAndCombineAllocas() 872 Operand *AllocaOffset = Ctx->getConstantInt32(Offsets[i]); in sortAndCombineAllocas() 893 Dest->setRematerializable(getTarget()->getStackReg(), Offsets[i]); in sortAndCombineAllocas() 895 Dest->setRematerializable(getTarget()->getFrameReg(), Offsets[i]); in sortAndCombineAllocas()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
H A D | SIISelLowering.cpp | 6310 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); in LowerINTRINSIC_W_CHAIN() local 6315 Offsets.first, // voffset in LowerINTRINSIC_W_CHAIN() 6317 Offsets.second, // offset in LowerINTRINSIC_W_CHAIN() 6330 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); in LowerINTRINSIC_W_CHAIN() local 6335 Offsets.first, // voffset in LowerINTRINSIC_W_CHAIN() 6337 Offsets.second, // offset in LowerINTRINSIC_W_CHAIN() 6380 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); in LowerINTRINSIC_W_CHAIN() local 6386 Offsets.first, // voffset in LowerINTRINSIC_W_CHAIN() 6388 Offsets.second, // offset in LowerINTRINSIC_W_CHAIN() 6404 auto Offsets in LowerINTRINSIC_W_CHAIN() local 6510 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); LowerINTRINSIC_W_CHAIN() local 6584 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); LowerINTRINSIC_W_CHAIN() local 6676 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); LowerINTRINSIC_W_CHAIN() local 6697 auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG); LowerINTRINSIC_W_CHAIN() local 6888 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); LowerINTRINSIC_VOID() local 6913 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); LowerINTRINSIC_VOID() local 6992 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); LowerINTRINSIC_VOID() local 7037 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); LowerINTRINSIC_VOID() local 7185 setBufferOffsets(SDValue CombinedOffset, SelectionDAG &DAG, SDValue *Offsets, unsigned Align) const setBufferOffsets() argument [all...] |