/third_party/ltp/tools/sparse/sparse-src/ |
H A D | flow.c | 417 unsigned int a_size = a->size; in overlapping_memop() 418 unsigned int b_size = b->size; in overlapping_memop() 429 return a->offset == b->offset && a->size == b->size; in same_memop() 540 int offset = insn->offset, bit = bytes_to_bits(offset) + insn->size; in check_access() 813 insn->size = 0; in convert_to_jump()
|
/third_party/node/deps/v8/src/compiler/backend/ |
H A D | register-allocator-verifier.cc | 72 constraints_.reserve(sequence->instructions().size()); in RegisterAllocatorVerifier() 132 CHECK(sequence()->instructions().size() == constraints()->size()); in VerifyAssignment() 395 // The phi size test below is because we can, technically, have phi in CreateForBlock() 397 } else if (block->PredecessorCount() == 1 && block->phis().size() == 0) { in CreateForBlock() 458 CHECK(origin->PredecessorCount() > 1 || origin->phis().size() > 0); in ValidatePendingAssessment() 552 const size_t block_count = sequence()->instruction_blocks().size(); in VerifyGapMoves()
|
/third_party/node/deps/v8/src/compiler/backend/riscv64/ |
H A D | instruction-scheduler-riscv64.cc | 1056 int ExtractBitsLatency(bool sign_extend, int size) { in ExtractBitsLatency() argument 1059 switch (size) { in ExtractBitsLatency() 1076 int Word32AtomicExchangeLatency(bool sign_extend, int size) { in Word32AtomicExchangeLatency() argument 1078 ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() + in Word32AtomicExchangeLatency() 1082 int Word32AtomicCompareExchangeLatency(bool sign_extend, int size) { in Word32AtomicCompareExchangeLatency() argument 1084 ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() + in Word32AtomicCompareExchangeLatency()
|
/third_party/node/deps/v8/src/heap/ |
H A D | new-spaces.cc | 451 size_t size = to_space_.CommittedPhysicalMemory(); in CommittedPhysicalMemory() local 453 size += from_space_.CommittedPhysicalMemory(); in CommittedPhysicalMemory() 455 return size; in CommittedPhysicalMemory() 496 // Double the semispace size but only up to maximum capacity. in Grow() 763 int size = object.Size(cage_base); in Verify() local 764 object.IterateBody(map, size, &visitor); in Verify() 773 current += size; in Verify()
|
/third_party/node/deps/v8/src/strings/ |
H A D | unicode.cc | 55 static bool LookupPredicate(const int32_t* table, uint16_t size, uchar chr) { in LookupPredicate() argument 59 unsigned int high = size - 1; in LookupPredicate() 66 (mid + 1 == size || in LookupPredicate() 105 static int LookupMapping(const int32_t* table, uint16_t size, in LookupMapping() argument 113 unsigned int high = size - 1; in LookupMapping() 120 (mid + 1 == size || in LookupMapping()
|
/third_party/mesa3d/src/gallium/drivers/freedreno/ |
H A D | freedreno_gmem.c | 86 * key, only the size per pixel. And the max_scissor bounds is not 96 uint8_t gmem_page_align; /* alignment in multiples of 0x1000 to reduce key size */ 143 unsigned size = gmem->cbuf_cpp[i] * gmem->bin_w * gmem->bin_h; in dump_gmem_state() local 144 printf(" cbuf[%d]: base=0x%06x, size=0x%x, cpp=%u\n", i, in dump_gmem_state() 145 gmem->cbuf_base[i], size, gmem->cbuf_cpp[i]); in dump_gmem_state() 147 total = gmem->cbuf_base[i] + size; in dump_gmem_state() 154 unsigned size = gmem->zsbuf_cpp[i] * gmem->bin_w * gmem->bin_h; in dump_gmem_state() local 155 printf(" zsbuf[%d]: base=0x%06x, size=0x%x, cpp=%u\n", i, in dump_gmem_state() 156 gmem->zsbuf_base[i], size, gmem->zsbuf_cpp[i]); in dump_gmem_state() 158 total = gmem->zsbuf_base[i] + size; in dump_gmem_state() [all...] |
/third_party/mesa3d/src/nouveau/codegen/ |
H A D | nv50_ir_target_nvc0.cpp | 48 TargetNVC0::getBuiltinCode(const uint32_t **code, uint32_t *size) const in getBuiltinCode() 54 *size = sizeof(gk104_builtin_code); in getBuiltinCode() 61 *size = sizeof(gk110_builtin_code); in getBuiltinCode() 65 *size = sizeof(gf100_builtin_code); in getBuiltinCode() 168 void TargetNVC0::initProps(const struct nvc0_opProperties *props, int size) 170 for (int i = 0; i < size; ++i) {
|
/third_party/mesa3d/src/gallium/auxiliary/gallivm/ |
H A D | lp_bld_format_aos.c | 129 if (desc->channel[chan].size != type.width) { in format_matches_type() 243 * matches floating point size */ in lp_build_unpack_arith_rgba_aos() 265 unsigned bits = desc->channel[i].size; in lp_build_unpack_arith_rgba_aos() 319 unsigned bits = desc->channel[i].size; in lp_build_unpack_arith_rgba_aos() 402 unsigned bits = desc->channel[i].size; in lp_build_pack_rgba_aos() 563 mask = (1 << format_desc->channel[j].size) - 1; in lp_build_fetch_rgba_aos() 576 chans[j] = scale_bits_up(gallivm, format_desc->channel[j].size, in lp_build_fetch_rgba_aos()
|
/third_party/mesa3d/src/gallium/drivers/r600/sb/ |
H A D | sb_gcm.cpp | 375 cnt_ready[sq] = bu_ready[sq].size(); in bu_sched_bb() 408 if (n->is_fetch_inst() && n->src.size() == 12) { in bu_sched_bb() 431 (bu_ready[SQ_TEX].size() || bu_ready[SQ_VTX].size() || bu_ready[SQ_GDS].size())) { in bu_sched_bb() 501 if (ucs_level == nuc_stk.size()) { in push_uc_stack()
|
/third_party/mesa3d/src/gallium/drivers/nouveau/nvc0/ |
H A D | nvc0_vbo_translate.c | 48 ctx->vertex_size = nvc0->vertex->size; in nvc0_push_context_init() 225 const unsigned size = count * nvc0->vertex->size; in nvc0_push_setup_vertex_array() local 227 void *const dest = nouveau_scratch_get(&nvc0->base, size, &va, &bo); in nvc0_push_setup_vertex_array() 237 PUSH_DATAh(push, va + size - 1); in nvc0_push_setup_vertex_array() 238 PUSH_DATA (push, va + size - 1); in nvc0_push_setup_vertex_array()
|
/third_party/libphonenumber/metadata/src/main/java/com/google/i18n/phonenumbers/metadata/ |
H A D | RangeTree.java | 182 for (int n = 0; n < edges.size(); n++) { in Node() 634 // Add this digit to a new entry in the input map (and increase the map size). in recurse() 660 for (idx = 0; idx < out.size(); idx++) { in recurse() 669 if (idx == out.size()) { in recurse() 987 * {@code 0 <= index < size()}). Note that this method makes no promises about the specific 1001 if (index < 0 || index >= size()) { in sample() 1003 String.format("index (%d) out of bounds [0...%d]", index, size())); in sample() 1050 public long size() { in size() method in RangeTree
|
/third_party/rust/crates/libc/src/unix/linux_like/linux/gnu/b64/mips64/ |
H A D | mod.rs | 198 size: [ 206 size: [ 214 size: [ 222 size: [ 230 size: [ 238 size: [
|
/third_party/rust/crates/libc/src/unix/linux_like/linux/gnu/b64/loongarch64/ |
H A D | mod.rs | 203 size: [ 211 size: [ 219 size: [ 227 size: [ 235 size: [ 243 size: [
|
/third_party/python/Lib/ |
H A D | ftplib.py | 343 connection and the expected size of the transfer. The 344 expected size may be None if it could not be determined. 351 size = None 388 size = parse150(resp) 389 return conn, size 488 blocksize: The maximum data size to read from fp and send over 575 of information desired (e.g. ["type", "size", "perm"]). 627 def size(self, filename): member in FTP 628 '''Retrieve the size of a file.''' 793 conn, size [all...] |
/third_party/python/Lib/asyncio/ |
H A D | sslproto.py | 169 """Return the current size of the write buffers.""" 199 """Return the current size of the read buffer.""" 261 max_size = 256 * 1024 # Buffer size passed to read() 817 size = self._get_write_buffer_size() 818 if size >= self._outgoing_high_water and not self._app_writing_paused: 831 elif size <= self._outgoing_low_water and self._app_writing_paused: 875 size = self._get_read_buffer_size() 876 if size >= self._incoming_high_water and not self._ssl_reading_paused: 879 elif size <= self._incoming_low_water and self._ssl_reading_paused:
|
/third_party/vk-gl-cts/external/vulkancts/framework/vulkan/ |
H A D | vkPrograms.cpp | 141 const bool ok = optimizer.Run(binary.data(), binary.size(), &binary, optimizer_options); in optimizeCompiledBinary() 152 return new ProgramBinary(PROGRAM_FORMAT_SPIRV, binary.size()*sizeof(deUint32), (const deUint8*)&binary[0]); in createProgramBinaryFromSpirV() 163 if (!validateSpirV(binary.size(), &binary[0], &validationLog, options)) in validateCompiledBinary() 176 if (!validateSpirV(binary.size(), &binary[0], &validationLog, options)) in validateCompiledBinary() 488 if (ok) ok = fread(&temp, 1, 4, file) == 4; // Chunk size (skip) in shadercacheLoad() 551 if (ok) ok = fread(&temp, 1, 4, file) == 4; // Chunk size (skip) in shadercacheSave() 704 stripSpirVDebugInfo(nonStrippedBinary.size(), &nonStrippedBinary[0], &binary); in buildProgram() 792 stripSpirVDebugInfo(nonStrippedBinary.size(), &nonStrippedBinary[0], &binary); in buildProgram()
|
/third_party/vk-gl-cts/external/vulkancts/modules/vulkan/geometry/ |
H A D | vktGeometryBasicGeometryShaderTests.cpp | 117 bufferSize, // VkDeviceSize size; in uploadImage() 524 const VkDeviceSize indexBufferSize = m_indices.size() * sizeof(deUint16); in createIndicesBuffer() 530 indexBufferSize, // VkDeviceSize size; in createIndicesBuffer() 551 vk.cmdDrawIndexed(cmdBuffer, static_cast<deUint32>(m_indices.size()), 1, 0, 0, 0); in drawCommand() 601 const int max_vertices = m_pattern.size() == 2 ? std::max(m_pattern[0], m_pattern[1]) : m_pattern[0]; in initPrograms() 617 << " const highp float rowHeight = 2.0 / float(" << m_pattern.size() << ");\n" in initPrograms() 620 if (m_pattern.size() == 2) in initPrograms() 654 return new GeometryOutputCountTestInstance (context, VK_PRIMITIVE_TOPOLOGY_POINT_LIST, static_cast<int>(m_pattern.size()), getName()); in createInstance()
|
/third_party/vk-gl-cts/external/vulkancts/modules/vulkan/ray_tracing/ |
H A D | vktRayTracingOpacityMicromapTests.cpp | 350 while (opacityMicromapData.size() < opacityMicromapBytes) { in iterate() 368 // Fill out VkMicromapUsageEXT with size information in iterate() 377 deMemset(data, 0, size_t(micromapDataBufferCreateInfo.size)); in iterate() 389 for (size_t i = 0; i < opacityMicromapData.size(); i++) { in iterate() 398 // Query the size from the build info in iterate() 441 sizeInfo.micromapSize, // VkDeviceSize size; in iterate() 707 std::vector<deUint32> outputData(expectedOutputModes.size()); in iterate() 714 for (size_t i = 0; i < outputData.size(); ++i) in iterate() 764 for (deUint32 bit = 0; bit < testFlagBitNames.size(); bit++) in createOpacityMicromapTests()
|
/third_party/vk-gl-cts/external/vulkancts/modules/vulkan/shader_object/ |
H A D | vktShaderObjectCreateTests.cpp | 265 std::vector<vk::VkShaderEXT> shadersSeparate (shaderCreateInfos.size()); in iterate() 266 std::vector<vk::VkShaderEXT> shadersTogether (shaderCreateInfos.size()); in iterate() 267 for (deUint32 i = 0; i < (deUint32)shaderCreateInfos.size(); ++i) in iterate() 271 vk.createShadersEXT(device, (deUint32)shaderCreateInfos.size(), &shaderCreateInfos[0], DE_NULL, &shadersTogether[0]); in iterate() 274 for (deUint32 i = 0; i < (deUint32)shaderCreateInfos.size(); ++i) in iterate() 282 log << tcu::TestLog::Message << "Data size of shader created separately is " << dataSizeSeparate << ", but data size of shader created in the same call with others is " << dataSizeTogether << tcu::TestLog::EndMessage; in iterate() 522 stage = stages[random.getUint32() % stages.size()]; in iterate() 589 log << tcu::TestLog::Message << "Shader at index " << failIndex << "was created with size 0, but vkCreateShadersEXT returned " << result << tcu::TestLog::EndMessage; in iterate()
|
/third_party/vk-gl-cts/external/vulkancts/modules_no_buildgn/vulkan/geometry/ |
H A D | vktGeometryBasicGeometryShaderTests.cpp | 117 bufferSize, // VkDeviceSize size; in uploadImage() 524 const VkDeviceSize indexBufferSize = m_indices.size() * sizeof(deUint16); in createIndicesBuffer() 530 indexBufferSize, // VkDeviceSize size; in createIndicesBuffer() 551 vk.cmdDrawIndexed(cmdBuffer, static_cast<deUint32>(m_indices.size()), 1, 0, 0, 0); in drawCommand() 602 const int max_vertices = m_pattern.size() == 2 ? std::max(m_pattern[0], m_pattern[1]) : m_pattern[0]; in initPrograms() 618 << " const highp float rowHeight = 2.0 / float(" << m_pattern.size() << ");\n" in initPrograms() 621 if (m_pattern.size() == 2) in initPrograms() 655 return new GeometryOutputCountTestInstance (context, VK_PRIMITIVE_TOPOLOGY_POINT_LIST, static_cast<int>(m_pattern.size()), getName()); in createInstance()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
H A D | Analysis.cpp | 181 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) { in hasInlineAsmMemConstraint() 183 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) { in hasInlineAsmMemConstraint() 272 /// will record the smallest size attained. 322 if (ValLoc.size() >= InsertLoc.size() && in getNoopInput() 327 ValLoc.resize(ValLoc.size() - InsertLoc.size()); in getNoopInput()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/SelectionDAG/ |
H A D | LegalizeTypes.cpp | 178 for (unsigned i = 0, e = NewNodes.size(); i != e; ++i) { in PerformExpensiveChecks() 483 // to see if they are new also. The depth of this walk is bounded by the size in AnalyzeNewNode() 840 /// Convert to an integer of the same size. 847 /// Convert to a vector of integers of the same size. 895 if (Results.size() == N->getNumValues() + 1 && LegalizeResult) { in CustomLowerNode() 905 assert(Results.size() == N->getNumValues() && in CustomLowerNode() 907 for (unsigned i = 0, e = Results.size(); i != e; ++i) { in CustomLowerNode() 929 assert(Results.size() == N->getNumValues() && in CustomWidenLowerNode() 931 for (unsigned i = 0, e = Results.size(); i != e; ++i) { in CustomWidenLowerNode() 1011 /// size o [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/IR/ |
H A D | ConstantsContext.h | 245 GetElementPtrConstantExpr *Result = new (IdxList.size() + 1) in Create() 393 if (Operands.size() != C->getNumOperands()) in operator ==() 395 for (unsigned I = 0, E = Operands.size(); I != E; ++I) in operator ==() 408 return new (Operands.size()) ConstantClass(Ty, Operands); in create() 510 if (Ops.size() != CE->getNumOperands()) in operator ==() 514 for (unsigned I = 0, E = Ops.size(); I != E; ++I) in operator ==()
|
/third_party/skia/third_party/externals/spirv-tools/test/opt/loop_optimizations/ |
H A D | unroll_simple.cpp | 1755 EXPECT_EQ(outer_loop.GetBlocks().size(), 9u); in TEST_F() 1757 EXPECT_EQ(inner_loop.GetBlocks().size(), 4u); in TEST_F() 1768 EXPECT_EQ(outer_loop.GetBlocks().size(), 25u); in TEST_F() 1799 EXPECT_EQ(outer_loop.GetBlocks().size(), 9u); in TEST_F() 1801 EXPECT_EQ(inner_loop.GetBlocks().size(), 4u); in TEST_F() 1812 EXPECT_EQ(outer_loop.GetBlocks().size(), 18u); in TEST_F()
|
/third_party/skia/src/codec/ |
H A D | SkJpegCodec.cpp | 433 bool SkJpegCodec::onDimensionsSupported(const SkISize& size) { in onDimensionsSupported() argument 439 const unsigned int dstWidth = size.width(); in onDimensionsSupported() 440 const unsigned int dstHeight = size.height(); in onDimensionsSupported() 708 // block size. If this is the case, it will decrease the value of in onStartScanlineDecode() 802 // allocate memory as if the size of the Y plane is always the size of the in is_yuv_supported() 989 SkISize* size, in SkGetJpegInfo() 1020 if (size) { in SkGetJpegInfo() 1021 *size = {SkToS32(dinfo->image_width), SkToS32(dinfo->image_height)}; in SkGetJpegInfo() 988 SkGetJpegInfo(const void* data, size_t len, SkISize* size, SkEncodedInfo::Color* colorType, SkEncodedOrigin* orientation) SkGetJpegInfo() argument
|