/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Support/ |
H A D | Unicode.cpp | 345 for (size_t i = 0, e = Text.size(); i < e; i += Length) { in columnWidthUTF8() 347 if (Length <= 0 || i + Length > Text.size()) in columnWidthUTF8()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
H A D | GCNRegBankReassign.cpp | 331 if (Reg + StartBit >= RegsUsed.size()) 475 // SGPR tuples must be aligned, so step is size in banks it 535 for (unsigned I = 0, E = OperandMasks.size(); I + 1 < E; ++I) {
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Mips/ |
H A D | Mips16ISelLowering.cpp | 291 if (Args.size() >= 1) { in getMips16HelperFunctionStubNumber() 301 if (Args.size() >=2) { in getMips16HelperFunctionStubNumber()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/PowerPC/ |
H A D | PPCTargetTransformInfo.cpp | 29 cl::desc("The loop prefetch cache line size")); 224 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) { in mightUseCTR() 227 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j) in mightUseCTR() 636 // Check first if the user specified a custom line size. in getCacheLineSize() 640 // On P7, P8 or P9 we have a cache line size of 128. in getCacheLineSize() 642 // Assume that Future CPU has the same cache line size as the others. in getCacheLineSize()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
H A D | X86AvoidStoreForwardingBlocks.cpp | 103 /// Break a copy of size Size to smaller copies. 495 assert(Size == 0 && "Wrong size division"); in buildCopies() 650 if (BlockingStoresDispSizeMap.size() <= 1) in removeRedundantBlockingStores() 657 while (DispSizeStack.size()) { in removeRedundantBlockingStores()
|
H A D | X86WinEHState.cpp | 486 Args.push_back(Builder.getInt32(OptionalArgs.size())); in rewriteSetJmpCallSite() 517 assert(BBColors.size() == 1 && "multi-color BB not removed by preparation"); in getBaseStateForBB()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Scalar/ |
H A D | LoopDistribute.cpp | 287 unsigned getSize() const { return PartitionContainer.size(); } in getSize() 510 unsigned N = RtPtrCheck->Pointers.size(); in computePartitionSetForPointers()
|
/third_party/spirv-headers/tools/buildHeaders/jsoncpp/dist/json/ |
H A D | json.h | 622 * size of this class, so we have nowhere to store the length, 632 Value(const std::string& value); ///< Copy data() til size(). Embedded zeroes too. 698 ArrayIndex size() const; 712 /// Resize the array to size elements. 717 void resize(ArrayIndex size); 722 /// in the array so that its size is index+1. 730 /// in the array so that its size is index+1. 749 /// Return true if index < size(). 753 /// Equivalent to jsonvalue[jsonvalue.size()] = value; 1336 * Value offset exceeds the document size [all...] |
/third_party/skia/third_party/externals/icu/source/common/ |
H A D | rbbi.cpp | 1198 int32_t i = gLanguageBreakFactories->size(); in getLanguageBreakEngineFromFactory() 1231 int32_t i = fLanguageBreakEngines->size(); in getLanguageBreakEngine()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/ |
H A D | MachineRegisterInfo.h | 700 /// have a class smaller than before and of size less than \p MinNumRegs. 747 /// or size assigned yet. This is only allowed to be used 754 unsigned getNumVirtRegs() const { return VRegInfo.size(); } in getNumVirtRegs() 794 unsigned BestHint = (RegAllocHints[VReg.id()].second.size() ? in getRegAllocationHint()
|
/third_party/vixl/src/aarch64/ |
H A D | registers-aarch64.h | 115 // For scalar registers, the lane size matches the register size, and is 137 // Registers without a known size report kUnknownSize. 150 // Take advantage of the size encoding to calculate this efficiently. 237 // Z registers are valid with or without a lane size, so we don't need to 245 // P registers are valid with or without a lane size, so we don't need to 271 // Two registers with unknown size are considered to have the same size if 273 // size, even though we don't know what that is. 289 // For core (W, X) and FP/NEON registers, we only consider the code, size [all...] |
/third_party/skia/third_party/externals/dawn/src/tests/unittests/validation/ |
H A D | StorageTextureValidationTests.cpp | 99 descriptor.size = {16, 16, arrayLayerCount}; in CreateTexture() 526 descriptor.size = 1024; in TEST_F()
|
/third_party/protobuf/ruby/ext/google/protobuf_c/ |
H A D | message.c | 70 msg = (void*)ALLOC_N(uint8_t, sizeof(MessageHeader) + desc->layout->size); in Message_alloc() 73 memcpy(Message_data(msg), desc->layout->empty_template, desc->layout->size); in Message_alloc()
|
/third_party/python/Lib/test/ |
H A D | test_binascii.py | 447 @bigmemtest(size=_4G + 4, memuse=1, dry_run=False) 448 def test_big_buffer(self, size):
|
/third_party/protobuf/src/google/protobuf/ |
H A D | message.h | 223 // but classes optimized for code size may be happy with keeping them. See 334 // update the cached size. If you override ByteSize(), you do not need 340 virtual void SetCachedSize(int size) const; 1116 RegisterAllTypesInternal(const Metadata* file_level_metadata, int size);
|
/third_party/protobuf/src/google/protobuf/stubs/ |
H A D | map_util.h | 715 // given size (unless it's already at least that big). Because of this, in AppendKeysFromMap() 716 // the use case that involves appending a lot of small maps (total size in AppendKeysFromMap() 723 key_container->reserve(map_container.size()); in AppendKeysFromMap() 758 value_container->reserve(map_container.size()); in AppendValuesFromMap()
|
/third_party/skia/third_party/externals/abseil-cpp/absl/time/internal/cctz/src/ |
H A D | time_zone_format.cc | 219 // the following code grows the buffer size from 2x the format string in FormatTM() 222 std::size_t buf_size = fmt.size() * i; in FormatTM() 336 result.reserve(format.size()); // A reasonable guess for the result size. in format()
|
/third_party/rust/crates/rustix/src/ |
H A D | io_uring.rs | 63 /// `io_uring_enter(fd, to_submit, min_complete, flags, arg, size)`—Initiate 83 size: usize, in io_uring_enter() 91 size, in io_uring_enter() 1005 // Check that the size and alignment of a type match the `sys` bindings. in io_uring_layouts() 1050 // Check the size and alignment. in io_uring_layouts()
|
/third_party/selinux/checkpolicy/ |
H A D | checkpolicy.c | 118 #define FGETS(out, size, in) \ 120 if (fgets(out,size,in)==NULL) { \
|
/third_party/rust/crates/rustix/src/backend/linux_raw/net/ |
H A D | syscalls.rs | 789 "unexpected getsockopt size" 810 "unexpected getsockopt size"
|
/third_party/spirv-tools/source/opt/ |
H A D | graphics_robust_access_pass.cpp | 129 // - Clamp coordinate to the image size returned by OpImageQuerySize 139 // query the runtime array size. 731 for (uint32_t i = 0; i < ops.size() - 1; i++) { in MakeRuntimeArrayLengthInst() 784 uint32_t(struct_type->element_types().size() - 1); in MakeRuntimeArrayLengthInst() 861 // image "size", returned by OpImageQuerySize. (Note: OpImageQuerySizeLod in ClampCoordinateForImageTexelPointer() 877 // - The third component from the size query is the layer count. in ClampCoordinateForImageTexelPointer() 957 // size is the layer count. In the query, we have to accommodate folding in ClampCoordinateForImageTexelPointer()
|
H A D | ir_builder.h | 165 for (size_t i = 0; i < operands.size(); i++) { in AddNaryOp() 289 assert(incomings.size() % 2 == 0 && "A sequence of pairs is expected"); in AddPhi() 375 // match the size od |ids|.
|
/third_party/vk-gl-cts/external/vulkancts/modules_no_buildgn/vulkan/ray_tracing/ |
H A D | vktRayTracingBuildTests.cpp | 285 result->setInstanceCount(blases.size()); 289 for (size_t instanceNdx = 0; instanceNdx < blases.size(); ++instanceNdx) 574 // In the case of AABB geometries, implementations may increase their size in in validateBuffer()
|
H A D | vktRayTracingMemGuaranteeTests.cpp | 550 result->setInstanceCount(bottomLevelAccelerationStructures.size()); in initTopAccelerationStructure() 552 for (size_t structNdx = 0; structNdx < bottomLevelAccelerationStructures.size(); ++structNdx) in initTopAccelerationStructure()
|
/third_party/vk-gl-cts/external/vulkancts/modules_no_buildgn/vulkan/renderpass/ |
H A D | vktDynamicRenderingRandomTests.cpp | 130 const VkDeviceSize bufferSize = vertices.size() * sizeof(vertices[0]); in DynamicRenderingTestInstance() 477 // Use a render area with an even size to make the margin around the quad symmetrical. in iterate() 790 static_cast<deUint32>(cmdBufferHandles.size()), // deUint32 commandBufferCount in iterate()
|