/third_party/ltp/testcases/kernel/power_management/lib/ |
H A D | pm_sched_mc.py | 57 for i in range(0, cpu_count): 154 for i in range(0, cpu_count): 174 for i in range(0, cpu_count): 192 for i in range(0, cpu_count): 209 for i in range(0, len(siblings_list)): 244 for i in range(0, cpu_count): 465 for i in range(1, len(stats_stop[l])): 469 for i in range(1, len(stats_stop[l])): 474 for i in range(0, len(cpu_labels)): 479 for i in range( [all...] |
/third_party/skia/third_party/externals/tint/fuzzers/tint_ast_fuzzer/ |
H A D | probability_context.cc | 36 std::pair<uint32_t, uint32_t> range) { in RandomFromRange() 37 assert(range.first <= range.second && "Range must be non-decreasing"); in RandomFromRange() 39 range.first, range.second + 1); // + 1 need since range is inclusive. in RandomFromRange() 35 RandomFromRange( std::pair<uint32_t, uint32_t> range) RandomFromRange() argument
|
/kernel/linux/linux-5.10/arch/x86/kvm/ |
H A D | mtrr.c | 133 * - range, a range is covered in one memory cache type. 296 static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end) in var_mtrr_range() argument 300 *start = range->base & PAGE_MASK; in var_mtrr_range() 302 mask = range->mask & PAGE_MASK; in var_mtrr_range() 331 /* variable range MTRRs. */ in update_mtrr() 339 static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range) in var_mtrr_range_is_valid() argument 341 return (range->mask & (1 << 11)) != 0; in var_mtrr_range_is_valid() 467 struct kvm_mtrr_range *range; member 495 struct kvm_mtrr_range *range) in match_var_range() 494 match_var_range(struct mtrr_iter *iter, struct kvm_mtrr_range *range) match_var_range() argument [all...] |
/kernel/linux/linux-6.6/arch/x86/kvm/ |
H A D | mtrr.c | 141 * - range, a range is covered in one memory cache type. 304 static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end) in var_mtrr_range() argument 308 *start = range->base & PAGE_MASK; in var_mtrr_range() 310 mask = range->mask & PAGE_MASK; in var_mtrr_range() 337 /* variable range MTRRs. */ in update_mtrr() 344 static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range) in var_mtrr_range_is_valid() argument 346 return (range->mask & (1 << 11)) != 0; in var_mtrr_range_is_valid() 462 struct kvm_mtrr_range *range; member 490 struct kvm_mtrr_range *range) in match_var_range() 489 match_var_range(struct mtrr_iter *iter, struct kvm_mtrr_range *range) match_var_range() argument [all...] |
/third_party/skia/third_party/externals/sfntly/java/src/com/google/typography/font/sfntly/table/core/ |
H A D | OS2Table.java | 477 public static UnicodeRange range(int bit) { in range() method in OS2Table.UnicodeRange 486 long[] range = {range1, range2, range3, range4}; in asSet() 496 if ((range[rangeIndex] & 1 << rangeBit) == 1 << rangeBit) { in asSet() 504 long[] range = new long[4]; in asArray() 510 range[urSegment] |= urFlag; in asArray() 512 return range; in asArray() 675 public static UnicodeRange range(int bit) { in range() method in OS2Table.CodePageRange 684 long[] range = {range1, range2}; in asSet() 694 if ((range[rangeIndex] & 1 << rangeBit) == 1 << rangeBit) { in asSet() 702 long[] range in asArray() 921 setUlUnicodeRange1(long range) setUlUnicodeRange1() argument 929 setUlUnicodeRange2(long range) setUlUnicodeRange2() argument 937 setUlUnicodeRange3(long range) setUlUnicodeRange3() argument 945 setUlUnicodeRange4(long range) setUlUnicodeRange4() argument 1054 setUlCodePageRange1(long range) setUlCodePageRange1() argument 1062 setUlCodePageRange2(long range) setUlCodePageRange2() argument [all...] |
/third_party/skia/third_party/externals/dawn/src/dawn_native/vulkan/ |
H A D | TextureVk.cpp | 170 const SubresourceRange& range) { in BuildMemoryBarrier() 179 barrier.subresourceRange.aspectMask = VulkanAspectMask(range.aspects); in BuildMemoryBarrier() 180 barrier.subresourceRange.baseMipLevel = range.baseMipLevel; in BuildMemoryBarrier() 181 barrier.subresourceRange.levelCount = range.levelCount; in BuildMemoryBarrier() 182 barrier.subresourceRange.baseArrayLayer = range.baseArrayLayer; in BuildMemoryBarrier() 183 barrier.subresourceRange.layerCount = range.layerCount; in BuildMemoryBarrier() 998 textureUsages.Iterate([&](const SubresourceRange& range, wgpu::TextureUsage usage) { in TransitionUsageForPass() 999 SubresourceRange updateRange = range; in TransitionUsageForPass() 1032 subresourceUsages, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage, in TransitionUsageForPassImpl() 1039 imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, newUsage, range)); in TransitionUsageForPassImpl() 167 BuildMemoryBarrier(const Texture* texture, wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage, const SubresourceRange& range) BuildMemoryBarrier() argument 1056 TransitionUsageNow(CommandRecordingContext* recordingContext, wgpu::TextureUsage usage, const SubresourceRange& range) TransitionUsageNow() argument 1078 TransitionUsageAndGetResourceBarrier( wgpu::TextureUsage usage, const SubresourceRange& range, std::vector<VkImageMemoryBarrier>* imageBarriers, VkPipelineStageFlags* srcStages, VkPipelineStageFlags* dstStages) TransitionUsageAndGetResourceBarrier() argument 1101 TransitionUsageAndGetResourceBarrierImpl( wgpu::TextureUsage usage, const SubresourceRange& range, std::vector<VkImageMemoryBarrier>* imageBarriers, VkPipelineStageFlags* srcStages, VkPipelineStageFlags* dstStages) TransitionUsageAndGetResourceBarrierImpl() argument 1127 ClearTexture(CommandRecordingContext* recordingContext, const SubresourceRange& range, TextureBase::ClearValue clearValue) ClearTexture() argument 1270 EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext, const SubresourceRange& range) EnsureSubresourceContentInitialized() argument [all...] |
/third_party/mesa3d/src/gallium/drivers/virgl/ |
H A D | virgl_buffer.c | 41 if (trans->range.end <= trans->range.start) { in virgl_buffer_transfer_unmap() 46 transfer->box.x += trans->range.start; in virgl_buffer_transfer_unmap() 47 transfer->box.width = trans->range.end - trans->range.start; in virgl_buffer_transfer_unmap() 79 util_range_add(transfer->resource, &trans->range, box->x, box->x + box->width); in virgl_buffer_transfer_flush_region()
|
/third_party/python/Tools/scripts/ |
H A D | make_ctype.py | 17 for i in range(128): 39 for i in range(128, 256, 16): 59 for i in range(0, 256, 8): 61 for i in range(i, i+8): 74 for i in range(0, 256, 8): 76 for i in range(i, i+8):
|
/third_party/python/Doc/includes/ |
H A D | mp_workers.py | 42 TASKS1 = [(mul, (i, 7)) for i in range(20)] 43 TASKS2 = [(plus, (i, 8)) for i in range(10)] 54 for i in range(NUMBER_OF_PROCESSES): 59 for i in range(len(TASKS1)): 67 for i in range(len(TASKS2)): 71 for i in range(NUMBER_OF_PROCESSES):
|
/kernel/linux/linux-5.10/net/netfilter/ |
H A D | nft_masq.c | 103 struct nf_nat_range2 range; in nft_masq_ipv4_eval() local 105 memset(&range, 0, sizeof(range)); in nft_masq_ipv4_eval() 106 range.flags = priv->flags; in nft_masq_ipv4_eval() 108 range.min_proto.all = (__force __be16)nft_reg_load16( in nft_masq_ipv4_eval() 110 range.max_proto.all = (__force __be16)nft_reg_load16( in nft_masq_ipv4_eval() 114 &range, nft_out(pkt)); in nft_masq_ipv4_eval() 149 struct nf_nat_range2 range; in nft_masq_ipv6_eval() local 151 memset(&range, 0, sizeof(range)); in nft_masq_ipv6_eval() [all...] |
H A D | nf_nat_helper.c | 182 struct nf_nat_range2 range; in nf_nat_follow_master() local 188 range.flags = NF_NAT_RANGE_MAP_IPS; in nf_nat_follow_master() 189 range.min_addr = range.max_addr in nf_nat_follow_master() 191 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); in nf_nat_follow_master() 194 range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); in nf_nat_follow_master() 195 range.min_proto = range.max_proto = exp->saved_proto; in nf_nat_follow_master() 196 range.min_addr = range in nf_nat_follow_master() [all...] |
H A D | xt_REDIRECT.c | 36 const struct nf_nat_range2 *range = par->targinfo; in redirect_tg6_checkentry() local 38 if (range->flags & NF_NAT_RANGE_MAP_IPS) in redirect_tg6_checkentry() 53 if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) { in redirect_tg4_check() 68 struct nf_nat_range2 range = { in redirect_tg4() local 69 .flags = mr->range[0].flags, in redirect_tg4() 70 .min_proto = mr->range[0].min, in redirect_tg4() 71 .max_proto = mr->range[0].max, in redirect_tg4() 74 return nf_nat_redirect_ipv4(skb, &range, xt_hooknum(par)); in redirect_tg4()
|
H A D | nf_nat_redirect.c | 29 nf_nat_redirect(struct sk_buff *skb, const struct nf_nat_range2 *range, in nf_nat_redirect() argument 40 newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; in nf_nat_redirect() 43 newrange.min_proto = range->min_proto; in nf_nat_redirect() 44 newrange.max_proto = range->max_proto; in nf_nat_redirect() 50 nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_range2 *range, in nf_nat_redirect_ipv4() argument 77 return nf_nat_redirect(skb, range, &newdst); in nf_nat_redirect_ipv4() 104 nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, in nf_nat_redirect_ipv6() argument 136 return nf_nat_redirect(skb, range, &newdst); in nf_nat_redirect_ipv6()
|
/kernel/linux/linux-5.10/net/netlink/ |
H A D | policy.c | 291 struct netlink_range_validation range; in __netlink_policy_dump_write_attr() local 310 nla_get_range_unsigned(pt, &range); in __netlink_policy_dump_write_attr() 313 range.min, NL_POLICY_TYPE_ATTR_PAD) || in __netlink_policy_dump_write_attr() 315 range.max, NL_POLICY_TYPE_ATTR_PAD)) in __netlink_policy_dump_write_attr() 323 struct netlink_range_validation_signed range; in __netlink_policy_dump_write_attr() local 334 nla_get_range_signed(pt, &range); in __netlink_policy_dump_write_attr() 337 range.min, NL_POLICY_TYPE_ATTR_PAD) || in __netlink_policy_dump_write_attr() 339 range.max, NL_POLICY_TYPE_ATTR_PAD)) in __netlink_policy_dump_write_attr() 361 struct netlink_range_validation range; in __netlink_policy_dump_write_attr() local 363 nla_get_range_unsigned(pt, &range); in __netlink_policy_dump_write_attr() [all...] |
/kernel/linux/linux-6.6/fs/xfs/ |
H A D | xfs_discard.c | 42 * lock, gathers a range of inode cluster buffers that are allocated, drops the 222 * use daddr format for all range/len calculations as that is in xfs_trim_gather_extents() 223 * the format the range/len variables are supplied in by in xfs_trim_gather_extents() 239 * If the extent is entirely outside of the range we are in xfs_trim_gather_extents() 249 * If any blocks in the range are still busy, skip the in xfs_trim_gather_extents() 355 * trim a range of the filesystem. 360 * is a linear address range. Hence we need to use DADDR based conversions and 371 struct fstrim_range range; in xfs_ioc_trim() local 389 if (copy_from_user(&range, urange, sizeof(range))) in xfs_ioc_trim() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/selftests/ |
H A D | intel_uncore.c | 76 const struct i915_range *range; in intel_shadow_table_check() local 81 range = range_lists[j].regs; in intel_shadow_table_check() 82 for (i = 0, prev = -1; i < range_lists[j].size; i++, range++) { in intel_shadow_table_check() 83 if (range->end < range->start) { in intel_shadow_table_check() 84 pr_err("%s: range[%d]:(%06x-%06x) has end before start\n", in intel_shadow_table_check() 85 __func__, i, range->start, range->end); in intel_shadow_table_check() 89 if (prev >= (s32)range->start) { in intel_shadow_table_check() 90 pr_err("%s: range[ in intel_shadow_table_check() [all...] |
/kernel/linux/linux-6.6/net/netlink/ |
H A D | policy.c | 291 struct netlink_range_validation range; in __netlink_policy_dump_write_attr() local 310 nla_get_range_unsigned(pt, &range); in __netlink_policy_dump_write_attr() 313 range.min, NL_POLICY_TYPE_ATTR_PAD) || in __netlink_policy_dump_write_attr() 315 range.max, NL_POLICY_TYPE_ATTR_PAD)) in __netlink_policy_dump_write_attr() 323 struct netlink_range_validation_signed range; in __netlink_policy_dump_write_attr() local 334 nla_get_range_signed(pt, &range); in __netlink_policy_dump_write_attr() 337 range.min, NL_POLICY_TYPE_ATTR_PAD) || in __netlink_policy_dump_write_attr() 339 range.max, NL_POLICY_TYPE_ATTR_PAD)) in __netlink_policy_dump_write_attr() 361 struct netlink_range_validation range; in __netlink_policy_dump_write_attr() local 363 nla_get_range_unsigned(pt, &range); in __netlink_policy_dump_write_attr() [all...] |
/kernel/linux/linux-5.10/fs/jfs/ |
H A D | ioctl.c | 126 struct fstrim_range range; in jfs_ioctl() local 137 if (copy_from_user(&range, (struct fstrim_range __user *)arg, in jfs_ioctl() 138 sizeof(range))) in jfs_ioctl() 141 range.minlen = max_t(unsigned int, range.minlen, in jfs_ioctl() 144 ret = jfs_ioc_trim(inode, &range); in jfs_ioctl() 148 if (copy_to_user((struct fstrim_range __user *)arg, &range, in jfs_ioctl() 149 sizeof(range))) in jfs_ioctl()
|
/kernel/linux/linux-6.6/fs/jfs/ |
H A D | ioctl.c | 113 struct fstrim_range range; in jfs_ioctl() local 124 if (copy_from_user(&range, (struct fstrim_range __user *)arg, in jfs_ioctl() 125 sizeof(range))) in jfs_ioctl() 128 range.minlen = max_t(unsigned int, range.minlen, in jfs_ioctl() 131 ret = jfs_ioc_trim(inode, &range); in jfs_ioctl() 135 if (copy_to_user((struct fstrim_range __user *)arg, &range, in jfs_ioctl() 136 sizeof(range))) in jfs_ioctl()
|
/kernel/linux/linux-6.6/net/netfilter/ |
H A D | xt_REDIRECT.c | 36 const struct nf_nat_range2 *range = par->targinfo; in redirect_tg6_checkentry() local 38 if (range->flags & NF_NAT_RANGE_MAP_IPS) in redirect_tg6_checkentry() 53 if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) { in redirect_tg4_check() 68 struct nf_nat_range2 range = { in redirect_tg4() local 69 .flags = mr->range[0].flags, in redirect_tg4() 70 .min_proto = mr->range[0].min, in redirect_tg4() 71 .max_proto = mr->range[0].max, in redirect_tg4() 74 return nf_nat_redirect_ipv4(skb, &range, xt_hooknum(par)); in redirect_tg4()
|
H A D | nf_nat_redirect.c | 29 nf_nat_redirect(struct sk_buff *skb, const struct nf_nat_range2 *range, in nf_nat_redirect() argument 40 newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; in nf_nat_redirect() 43 newrange.min_proto = range->min_proto; in nf_nat_redirect() 44 newrange.max_proto = range->max_proto; in nf_nat_redirect() 50 nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_range2 *range, in nf_nat_redirect_ipv4() argument 77 return nf_nat_redirect(skb, range, &newdst); in nf_nat_redirect_ipv4() 104 nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, in nf_nat_redirect_ipv6() argument 136 return nf_nat_redirect(skb, range, &newdst); in nf_nat_redirect_ipv6()
|
/third_party/f2fs-tools/mkfs/ |
H A D | f2fs_format_utils.c | 55 unsigned long long range[2]; in trim_device() local 73 range[0] = 0; in trim_device() 74 range[1] = bytes; in trim_device() 81 range[0], range[1]) < 0) { in trim_device() 93 if (ioctl(fd, BLKSECDISCARD, &range) < 0) { in trim_device() 102 if (ioctl(fd, BLKDISCARD, &range) < 0) { in trim_device() 105 MSG(0, "Info: Discarded %llu MB\n", range[1] >> 20); in trim_device()
|
/third_party/python/Lib/test/ |
H A D | sortperf.py | 27 result = [r() for i in range(n)] 46 for i in range(10): 99 for dummy in range(3): 107 L[-10:] = [random.random() for dummy in range(10)] 111 for dummy in range(n // 100): 134 L = list(range(half - 1, -1, -1)) 135 L.extend(range(half)) 146 Two arguments: tabulate a range (inclusive). 150 # default range (inclusive) 157 # two arguments: specify range [all...] |
/third_party/skia/third_party/externals/tint/src/ast/ |
H A D | struct_member_test.cc | 30 EXPECT_EQ(st->source.range.begin.line, 0u); in TEST_F() 31 EXPECT_EQ(st->source.range.begin.column, 0u); in TEST_F() 32 EXPECT_EQ(st->source.range.end.line, 0u); in TEST_F() 33 EXPECT_EQ(st->source.range.end.column, 0u); in TEST_F() 43 EXPECT_EQ(st->source.range.begin.line, 27u); in TEST_F() 44 EXPECT_EQ(st->source.range.begin.column, 4u); in TEST_F() 45 EXPECT_EQ(st->source.range.end.line, 27u); in TEST_F() 46 EXPECT_EQ(st->source.range.end.column, 8u); in TEST_F()
|
/kernel/linux/linux-5.10/drivers/infiniband/core/ |
H A D | umem_odp.c | 111 * Implicit ODP umems do not have a VA range and do not have any page lists. 335 * Maps the range passed in the argument to DMA addresses. 349 * range. 350 * @fault: is faulting required for the given range 361 struct hmm_range range = {}; variable 384 range.notifier = &umem_odp->notifier; 385 range.start = ALIGN_DOWN(user_virt, 1UL << page_shift); 386 range.end = ALIGN(user_virt + bcnt, 1UL << page_shift); 387 pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT; 388 num_pfns = (range [all...] |