/third_party/ltp/lib/ |
H A D | tst_timer_test.c | 57 static float bucket_len(unsigned int bucket, unsigned int max_bucket, in bucket_len() argument 60 return 1.00 * bucket * cols / max_bucket; in bucket_len() 99 unsigned int bucket; in frequency_plot() local 100 bucket = flooru(1.00 * (samples[i] - min_sample)/bucket_size); in frequency_plot() 101 buckets[bucket]++; in frequency_plot()
|
/third_party/node/deps/v8/third_party/jinja2/ |
H A D | loaders.py | 121 bucket = bcc.get_bucket(environment, name, filename, source) 122 code = bucket.code 129 # if the bytecode cache is available and the bucket doesn't 130 # have a code so far, we give the bucket the new code and put 132 if bcc is not None and bucket.code is None: 133 bucket.code = code 134 bcc.set_bucket(bucket)
|
/third_party/node/tools/inspector_protocol/jinja2/ |
H A D | loaders.py | 119 bucket = bcc.get_bucket(environment, name, filename, source) 120 code = bucket.code 127 # if the bytecode cache is available and the bucket doesn't 128 # have a code so far, we give the bucket the new code and put 130 if bcc is not None and bucket.code is None: 131 bucket.code = code 132 bcc.set_bucket(bucket)
|
/third_party/node/deps/cares/src/lib/ |
H A D | ares__htable.c | 168 * the hash of the function reduced to the size of the bucket list. 235 /* Nothing in this bucket */ in ares__htable_expand() 242 * if so, just move the bucket over */ in ares__htable_expand() 278 /* Collision occurred since the bucket wasn't empty */ in ares__htable_expand() 285 /* Abandoned bucket, destroy */ in ares__htable_expand() 313 ares_bool_t ares__htable_insert(ares__htable_t *htable, void *bucket) in ares__htable_insert() argument 319 if (htable == NULL || bucket == NULL) { in ares__htable_insert() 324 key = htable->bucket_key(bucket); in ares__htable_insert() 327 /* See if we have a matching bucket already, if so, replace it */ in ares__htable_insert() 330 ares__llist_node_replace(node, bucket); in ares__htable_insert() [all...] |
/third_party/skia/third_party/externals/jinja2/ |
H A D | loaders.py | 121 bucket = bcc.get_bucket(environment, name, filename, source) 122 code = bucket.code 129 # if the bytecode cache is available and the bucket doesn't 130 # have a code so far, we give the bucket the new code and put 132 if bcc is not None and bucket.code is None: 133 bucket.code = code 134 bcc.set_bucket(bucket)
|
/third_party/rust/crates/unicode-width/scripts/ |
H A D | unicode.py | 183 """A bucket contains a group of codepoints and an ordered width list. If one bucket's width 187 """Creates an empty bucket.""" 192 """Adds a codepoint/width pair to the bucket, and appends `width` to the width list.""" 197 """If either `self` or `attempt`'s width list starts with the other bucket's width list, 210 """Return a list of the codepoint/width pairs in this bucket, sorted by codepoint.""" 216 """If all codepoints in this bucket have the same width, return that width; otherwise, 230 same bucket. Returns a list of the buckets in increasing order of those bits.""" 247 indexes into the bucket list (~= indexes into the sub-tables of the next-level table.) The 249 same width list, which means that they can be merged into the same bucket [all...] |
/third_party/icu/icu4c/source/test/intltest/ |
H A D | alphaindextst.cpp | 36 const AlphabeticIndex::Bucket *bucket; in joinLabelsAndAppend() local 37 for (int32_t i = 0; (bucket = index.getBucket(i)) != NULL; ++i) { in joinLabelsAndAppend() 41 dest.append(bucket->getLabel()); in joinLabelsAndAppend() 379 assertEquals("initial bucket index", -1, index.getBucketIndex()); in ManyLocalesTest() 383 assertEquals("bucket index", bucketIndex, index.getBucketIndex()); in ManyLocalesTest() 388 const AlphabeticIndex::Bucket *bucket = immIndex->getBucket(bucketIndex); in ManyLocalesTest() local 389 TEST_ASSERT(bucket != NULL); in ManyLocalesTest() 390 assertEquals("bucket label vs. immutable: locale=" + UnicodeString(localeName) + in ManyLocalesTest() 392 label, bucket->getLabel()); in ManyLocalesTest() 393 TEST_ASSERT(&label != &bucket in ManyLocalesTest() 753 const AlphabeticIndex::Bucket *bucket = index->getBucket(1); checkHasBuckets() local [all...] |
/third_party/skia/experimental/skrive/src/ |
H A D | Shape.cpp | 42 auto& bucket = paint->style() == SkPaint::kFill_Style ? fFills : fStrokes; in onRevalidate() local 43 bucket.push_back(paint); in onRevalidate()
|
/kernel/linux/linux-5.10/drivers/md/bcache/ |
H A D | extents.c | 10 * bucket priority is increased on cache hit, and periodically all the buckets 54 size_t bucket = PTR_BUCKET_NR(c, k, i); in __ptr_invalid() local 58 bucket < ca->sb.first_bucket || in __ptr_invalid() 59 bucket >= ca->sb.nbuckets) in __ptr_invalid() 75 size_t bucket = PTR_BUCKET_NR(c, k, i); in bch_ptr_status() local 80 if (bucket < ca->sb.first_bucket) in bch_ptr_status() 82 if (bucket >= ca->sb.nbuckets) in bch_ptr_status() 138 pr_cont(" bucket %zu", n); in bch_bkey_dump() 177 struct bucket *g; in btree_ptr_bad_expensive() 199 "inconsistent btree pointer %s: bucket in btree_ptr_bad_expensive() [all...] |
/kernel/linux/linux-6.6/drivers/md/bcache/ |
H A D | extents.c | 10 * bucket priority is increased on cache hit, and periodically all the buckets 54 size_t bucket = PTR_BUCKET_NR(c, k, i); in __ptr_invalid() local 58 bucket < ca->sb.first_bucket || in __ptr_invalid() 59 bucket >= ca->sb.nbuckets) in __ptr_invalid() 75 size_t bucket = PTR_BUCKET_NR(c, k, i); in bch_ptr_status() local 80 if (bucket < ca->sb.first_bucket) in bch_ptr_status() 82 if (bucket >= ca->sb.nbuckets) in bch_ptr_status() 138 pr_cont(" bucket %zu", n); in bch_bkey_dump() 177 struct bucket *g; in btree_ptr_bad_expensive() 199 "inconsistent btree pointer %s: bucket in btree_ptr_bad_expensive() [all...] |
/kernel/linux/linux-5.10/arch/hexagon/kernel/ |
H A D | ptrace.c | 78 unsigned long bucket; in genregs_set() local 108 INEXT(&bucket, cause); in genregs_set() 109 INEXT(&bucket, badva); in genregs_set()
|
/kernel/linux/linux-6.6/arch/hexagon/kernel/ |
H A D | ptrace.c | 78 unsigned long bucket; in genregs_set() local 108 INEXT(&bucket, cause); in genregs_set() 109 INEXT(&bucket, badva); in genregs_set()
|
/third_party/node/deps/v8/src/objects/ |
H A D | ordered-hash-table.h | 38 // [kPrefixSize + 2]: bucket count 42 // item in this bucket is stored. 49 // entry in this hash bucket. 60 // [kPrefixSize + 2]: bucket count 140 // The extra +1 is for linking the bucket chains together. 220 int bucket = HashToBucket(hash); in HashToEntryRaw() local 221 Object entry = this->get(HashTableStartIndex() + bucket); in HashToEntryRaw() 358 // Each bucket and chain value is a byte long. The padding exists so 359 // that the DataTable entries start aligned. A bucket or chain value 391 // [40] : First chain-link for bucket [all...] |
/kernel/linux/linux-6.6/fs/fscache/ |
H A D | volume.c | 165 unsigned int bucket, collidee_debug_id = 0; in fscache_hash_volume() local 167 bucket = candidate->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1); in fscache_hash_volume() 168 h = &fscache_volume_hash[bucket]; in fscache_hash_volume() 363 unsigned int bucket; in fscache_unhash_volume() local 365 bucket = volume->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1); in fscache_unhash_volume() 366 h = &fscache_volume_hash[bucket]; in fscache_unhash_volume()
|
/third_party/node/deps/v8/tools/ |
H A D | generate-runtime-call-stats.py | 404 bucket = Bucket(bucket_name, repeats) 405 buckets[bucket_name] = bucket 407 bucket = buckets[bucket_name] 409 bucket.add_data_point(name, i, value["count"], value["duration"] / 1000.0) 415 for bucket in buckets.values(): 416 table += bucket.as_list(
|
/third_party/mesa3d/src/panfrost/lib/ |
H A D | pan_bo.c | 48 * Cached BOs are sorted into a bucket based on rounding their size down to the 49 * nearest power-of-two. Each bucket contains a linked list of free panfrost_bo 51 * corresponding bucket. Getting a BO from the cache consists of finding the 52 * appropriate bucket and sorting. A cache eviction is a kernel-level free of a 53 * BO and removing it from the bucket. We special case evicting all BOs from 156 /* Helper to calculate the bucket index of a BO */ 161 /* Round down to POT to compute a bucket index */ in pan_bucket_index() 165 /* Clamp the bucket index; all huge allocations will be in pan_bucket_index() 166 * sorted into the largest bucket */ in pan_bucket_index() 192 struct list_head *bucket in panfrost_bo_cache_fetch() local 271 struct list_head *bucket = pan_bucket(dev, MAX2(bo->size, 4096)); panfrost_bo_cache_put() local 314 struct list_head *bucket = &dev->bo_cache.buckets[i]; panfrost_bo_cache_evict_all() local [all...] |
/third_party/skia/third_party/externals/freetype/src/cache/ |
H A D | ftccache.c | 86 /* get a top bucket for specified hash from cache, 128 * the bucket lists in ftc_cache_resize() 142 /* split a single bucket */ in ftc_cache_resize() 486 FTC_Node* bucket; in FT_LOCAL_DEF() local 499 bucket = pnode = FTC_NODE_TOP_FOR_HASH( cache, hash ); in FT_LOCAL_DEF() 518 /* Update bucket by modified linked list */ in FT_LOCAL_DEF() 519 bucket = pnode = FTC_NODE_TOP_FOR_HASH( cache, hash ); in FT_LOCAL_DEF() 535 if ( node != *bucket ) in FT_LOCAL_DEF() 538 node->link = *bucket; in FT_LOCAL_DEF() 539 *bucket in FT_LOCAL_DEF() [all...] |
/third_party/mesa3d/src/util/ |
H A D | format_srgb.py | 100 for bucket in range(0, nbuckets): 101 start = ((127 - numexp) << 23) + bucket*(bucketsize << stepshift)
|
/kernel/linux/linux-5.10/net/ipv4/ |
H A D | tcp_ipv4.c | 147 held not per host, but per port pair and TW bucket is used as state in tcp_twsk_unique() 150 If TW bucket has been already destroyed we fall back to VJ's scheme in tcp_twsk_unique() 157 * and releasing the bucket lock. in tcp_twsk_unique() 2265 /* Clean up a referenced TCP bind bucket. */ in tcp_v4_destroy_sock() 2285 * starting from bucket given in st->bucket; when st->bucket is zero the 2304 ilb = &tcp_hashinfo.listening_hash[st->bucket]; in listening_get_next() 2310 ilb = &tcp_hashinfo.listening_hash[st->bucket]; in listening_get_next() 2325 if (++st->bucket < INET_LHTABLE_SIZ in listening_get_next() 2457 int bucket = st->bucket; tcp_seek_last_pos() local [all...] |
/kernel/linux/linux-5.10/net/netfilter/ |
H A D | nf_conntrack_core.c | 740 unsigned int bucket, hsize; in ____nf_conntrack_find() local 744 bucket = reciprocal_scale(hash, hsize); in ____nf_conntrack_find() 746 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { in ____nf_conntrack_find() 763 if (get_nulls_value(n) != bucket) { in ____nf_conntrack_find() 1307 unsigned int i, bucket; in early_drop() local 1316 bucket = reciprocal_scale(hash, hsize); in early_drop() 1318 bucket = (bucket + 1) % hsize; in early_drop() 1320 drops = early_drop_list(net, &ct_hash[bucket]); in early_drop() 2176 void *data, unsigned int *bucket) in get_next_corpse() 2175 get_next_corpse(int (*iter)(struct nf_conn *i, void *data), void *data, unsigned int *bucket) get_next_corpse() argument 2226 unsigned int bucket = 0; nf_ct_iterate_cleanup() local 2456 int i, bucket; nf_conntrack_hash_resize() local [all...] |
/kernel/linux/linux-6.6/fs/fuse/ |
H A D | inode.c | 631 struct fuse_sync_bucket *bucket; in fuse_sync_bucket_alloc() local 633 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL | __GFP_NOFAIL); in fuse_sync_bucket_alloc() 634 if (bucket) { in fuse_sync_bucket_alloc() 635 init_waitqueue_head(&bucket->waitq); in fuse_sync_bucket_alloc() 637 atomic_set(&bucket->count, 1); in fuse_sync_bucket_alloc() 639 return bucket; in fuse_sync_bucket_alloc() 644 struct fuse_sync_bucket *bucket, *new_bucket; in fuse_sync_fs_writes() local 649 bucket = rcu_dereference_protected(fc->curr_bucket, 1); in fuse_sync_fs_writes() 650 count = atomic_read(&bucket in fuse_sync_fs_writes() 943 struct fuse_sync_bucket *bucket; fuse_conn_put() local [all...] |
/kernel/linux/linux-5.10/net/openvswitch/ |
H A D | meter.c | 389 /* Figure out max delta_t that is enough to fill any bucket. in dp_meter_create() 390 * Keep max_delta_t size to the bucket units: in dp_meter_create() 393 * Start with a full bucket. in dp_meter_create() 395 band->bucket = (band->burst_size + band->rate) * 1000ULL; in dp_meter_create() 396 band_max_delta_t = div_u64(band->bucket, band->rate); in dp_meter_create() 625 /* Make sure delta_ms will not be too large, so that bucket will not in ovs_meter_execute() 638 * second. We maintain the bucket in the units of either bits or in ovs_meter_execute() 641 * bucket units: in ovs_meter_execute() 645 * 'cost' is the number of bucket units in this packet. in ovs_meter_execute() 656 band->bucket in ovs_meter_execute() [all...] |
/kernel/linux/linux-5.10/net/netfilter/ipvs/ |
H A D | ip_vs_lblcr.c | 273 struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ member 335 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblcr_hash() 348 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblcr_get() 405 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblcr_flush() 433 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_full_check() 488 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_check_expire() 526 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblcr_init_svc()
|
/kernel/linux/linux-6.6/net/openvswitch/ |
H A D | meter.c | 386 /* Figure out max delta_t that is enough to fill any bucket. in dp_meter_create() 387 * Keep max_delta_t size to the bucket units: in dp_meter_create() 390 * Start with a full bucket. in dp_meter_create() 392 band->bucket = band->burst_size * 1000ULL; in dp_meter_create() 393 band_max_delta_t = div_u64(band->bucket, band->rate); in dp_meter_create() 622 /* Make sure delta_ms will not be too large, so that bucket will not in ovs_meter_execute() 635 * second. We maintain the bucket in the units of either bits or in ovs_meter_execute() 638 * bucket units: in ovs_meter_execute() 642 * 'cost' is the number of bucket units in this packet. in ovs_meter_execute() 653 band->bucket in ovs_meter_execute() [all...] |
/kernel/linux/linux-6.6/net/netfilter/ipvs/ |
H A D | ip_vs_lblcr.c | 273 struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ member 335 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblcr_hash() 348 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblcr_get() 405 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblcr_flush() 433 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_full_check() 488 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_check_expire() 526 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblcr_init_svc()
|