Home
last modified time | relevance | path

Searched refs:buckets (Results 1 - 25 of 99) sorted by relevance

1234

/third_party/node/deps/cares/src/lib/
H A Dares__htable.c45 /* NOTE: if we converted buckets into ares__slist_t we could guarantee on
51 ares__llist_t **buckets; member
68 static void ares__htable_buckets_destroy(ares__llist_t **buckets, in ares__htable_buckets_destroy() argument
74 if (buckets == NULL) { in ares__htable_buckets_destroy()
79 if (buckets[i] == NULL) { in ares__htable_buckets_destroy()
84 ares__llist_replace_destructor(buckets[i], NULL); in ares__htable_buckets_destroy()
87 ares__llist_destroy(buckets[i]); in ares__htable_buckets_destroy()
90 ares_free(buckets); in ares__htable_buckets_destroy()
98 ares__htable_buckets_destroy(htable->buckets, htable->size, ARES_TRUE); in ares__htable_destroy()
125 htable->buckets in ares__htable_create()
191 ares__llist_t **buckets = NULL; ares__htable_expand() local
[all...]
H A Dares__htable_asvp.c118 const void **buckets = NULL; in ares__htable_asvp_keys() local
129 buckets = ares__htable_all_buckets(htable->hash, &cnt); in ares__htable_asvp_keys()
130 if (buckets == NULL || cnt == 0) { in ares__htable_asvp_keys()
136 ares_free(buckets); in ares__htable_asvp_keys()
141 out[i] = ((const ares__htable_asvp_bucket_t *)buckets[i])->key; in ares__htable_asvp_keys()
144 ares_free(buckets); in ares__htable_asvp_keys()
/third_party/rust/crates/aho-corasick/src/packed/teddy/
H A Dcompile.rs19 /// slim Teddy is used (8 buckets) and `true` means fat Teddy is used
20 /// (16 buckets). Fat Teddy requires AVX2, so if that CPU feature isn't
54 /// 16 buckets where as Slim Teddy uses 8 buckets. More buckets are useful
115 let Compiler { buckets, masks, .. } = compiler; in build_imp()
122 buckets, in build_imp()
131 buckets, in build_imp()
140 buckets, in build_imp()
149 buckets, in build_imp()
300 let mut buckets = vec![vec![]; self.buckets.len()]; fmt() variables
[all...]
/third_party/libdrm/
H A Dxf86drmHash.c126 for (bucket = table->buckets[i]; bucket;) { in drmHashDestroy()
148 for (bucket = table->buckets[hash]; bucket; bucket = bucket->next) { in HashFind()
153 bucket->next = table->buckets[hash]; in HashFind()
154 table->buckets[hash] = bucket; in HashFind()
194 bucket->next = table->buckets[hash]; in drmHashInsert()
195 table->buckets[hash] = bucket; in drmHashInsert()
211 table->buckets[hash] = bucket->next; in drmHashDelete()
227 table->p1 = table->buckets[table->p0]; in drmHashNext()
240 table->p1 = table->buckets[0]; in drmHashFirst()
/third_party/python/Python/
H A Dhashtable.c58 ((_Py_hashtable_entry_t *)_Py_SLIST_HEAD(&(HT)->buckets[BUCKET]))
105 /* makes sure the real size of the buckets array is a power of 2 */
123 /* buckets */ in _Py_hashtable_size()
193 _Py_slist_remove(&ht->buckets[index], (_Py_slist_item_t *)previous, in _Py_hashtable_steal()
242 _Py_slist_prepend(&ht->buckets[index], (_Py_slist_item_t*)entry); in _Py_hashtable_set()
287 size_t buckets_size = new_size * sizeof(ht->buckets[0]); in hashtable_rehash()
296 _Py_hashtable_entry_t *entry = BUCKETS_HEAD(ht->buckets[bucket]); in hashtable_rehash()
308 ht->alloc.free(ht->buckets); in hashtable_rehash()
310 ht->buckets = new_buckets; in hashtable_rehash()
339 size_t buckets_size = ht->nbuckets * sizeof(ht->buckets[ in _Py_hashtable_new_full()
[all...]
/third_party/node/deps/v8/src/heap/
H A Dslot-set.h26 // Possibly empty buckets (buckets that do not contain any slots) are discovered
28 // or in another thread, so all those buckets need to be revisited.
29 // Track possibly empty buckets within a SlotSet in this data structure. The
58 void Insert(size_t bucket_index, size_t buckets) { in Insert() argument
64 Allocate(buckets); in Insert()
92 void Allocate(size_t buckets) { in Allocate() argument
94 size_t words = WordsForBuckets(buckets); in Allocate()
113 static size_t WordsForBuckets(size_t buckets) { in WordsForBuckets() argument
114 return (buckets in WordsForBuckets()
142 Allocate(size_t buckets) Allocate() argument
171 Delete(SlotSet* slot_set, size_t buckets) Delete() argument
254 RemoveRange(size_t start_offset, size_t end_offset, size_t buckets, EmptyBucketMode mode) RemoveRange() argument
369 FreeEmptyBuckets(size_t buckets) FreeEmptyBuckets() argument
382 CheckPossiblyEmptyBuckets(size_t buckets, PossiblyEmptyBuckets* possibly_empty_buckets) CheckPossiblyEmptyBuckets() argument
591 Bucket** buckets() { return reinterpret_cast<Bucket**>(this); } buckets() function
[all...]
/third_party/icu/icu4j/main/classes/collate/src/com/ibm/icu/text/
H A DAlphabeticIndex.java48 * The class also supports having buckets for strings before the first (underflow),
70 * // Show index at top. We could skip or gray out empty buckets
78 * // Show the buckets with their contents, skipping empty buckets
90 * if its bucket is empty. Small buckets could also be combined based on size, such as:
102 * they "lazily" build the index buckets.
105 * buckets and their labels and label types.
128 * Prefix string for Chinese index buckets.
156 private BucketList<V> buckets; field in AlphabeticIndex
165 * and random access to buckets an
173 private final BucketList<V> buckets; global() field in AlphabeticIndex.ImmutableIndex
[all...]
/third_party/icu/ohos_icu4j/src/main/java/ohos/global/icu/text/
H A DAlphabeticIndex.java49 * The class also supports having buckets for strings before the first (underflow),
71 * // Show index at top. We could skip or gray out empty buckets
79 * // Show the buckets with their contents, skipping empty buckets
91 * if its bucket is empty. Small buckets could also be combined based on size, such as:
103 * they "lazily" build the index buckets.
106 * buckets and their labels and label types.
128 * Prefix string for Chinese index buckets.
156 private BucketList<V> buckets; field in AlphabeticIndex
165 * and random access to buckets an
172 private final BucketList<V> buckets; global() field in AlphabeticIndex.ImmutableIndex
[all...]
/third_party/mesa3d/src/glx/
H A Dglxhash.c121 __glxHashBucketPtr buckets[HASH_SIZE]; member
171 table->buckets[i] = NULL; in __glxHashCreate()
187 for (bucket = table->buckets[i]; bucket;) { in __glxHashDestroy()
210 for (bucket = table->buckets[hash]; bucket; bucket = bucket->next) { in HashFind()
215 bucket->next = table->buckets[hash]; in HashFind()
216 table->buckets[hash] = bucket; in HashFind()
264 bucket->next = table->buckets[hash]; in __glxHashInsert()
265 table->buckets[hash] = bucket; in __glxHashInsert()
287 table->buckets[hash] = bucket->next; in __glxHashDelete()
304 table->p1 = table->buckets[tabl in __glxHashNext()
[all...]
/third_party/mesa3d/src/virtio/vulkan/
H A Dvn_renderer_internal.c17 static_assert(ARRAY_SIZE(cache->buckets) <= 32, ""); in vn_renderer_shmem_cache_init()
24 for (uint32_t i = 0; i < ARRAY_SIZE(cache->buckets); i++) { in vn_renderer_shmem_cache_init()
25 struct vn_renderer_shmem_bucket *bucket = &cache->buckets[i]; in vn_renderer_shmem_cache_init()
40 struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx]; in vn_renderer_shmem_cache_fini()
60 if (unlikely(idx >= ARRAY_SIZE(cache->buckets))) in choose_bucket()
64 return &cache->buckets[idx]; in choose_bucket()
74 struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx]; in vn_renderer_shmem_cache_remove_expired_locked()
170 const struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx]; in vn_renderer_shmem_cache_debug_dump()
176 vn_log(NULL, " buckets[%d]: %d shmems", idx, count); in vn_renderer_shmem_cache_debug_dump()
/third_party/mesa3d/src/gallium/auxiliary/pipebuffer/
H A Dpb_bufmgr_slab.c157 struct pb_manager **buckets; member
492 return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc); in pb_slab_range_manager_create_buffer()
521 mgr->buckets[i]->destroy(mgr->buckets[i]); in pb_slab_range_manager_destroy()
522 FREE(mgr->buckets); in pb_slab_range_manager_destroy()
561 mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets)); in pb_slab_range_manager_create()
562 if (!mgr->buckets) in pb_slab_range_manager_create()
567 mgr->buckets[ in pb_slab_range_manager_create()
[all...]
H A Dpb_cache.c86 struct list_head *cache = &mgr->buckets[entry->bucket_index]; in pb_cache_add_buffer()
96 release_expired_buffers_locked(&mgr->buckets[i], current_time); in pb_cache_add_buffer()
158 struct list_head *cache = &mgr->buckets[bucket_index]; in pb_cache_reclaim_buffer()
235 struct list_head *cache = &mgr->buckets[i]; in pb_cache_release_all_buffers()
265 * @param num_heaps Number of separate caches/buckets indexed by bucket_index
289 mgr->buckets = CALLOC(num_heaps, sizeof(struct list_head)); in pb_cache_init()
290 if (!mgr->buckets) in pb_cache_init()
294 list_inithead(&mgr->buckets[i]); in pb_cache_init()
317 FREE(mgr->buckets); in pb_cache_deinit()
318 mgr->buckets in pb_cache_deinit()
[all...]
/third_party/libbpf/src/
H A Dhashmap.h77 struct hashmap_entry **buckets; member
170 for (cur = map->buckets[bkt]; cur; cur = cur->next)
182 for (cur = map->buckets[bkt]; \
193 for (cur = map->buckets \
194 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
201 for (cur = map->buckets \
202 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
H A Dhashmap.c21 /* start with 4 buckets */
45 map->buckets = NULL; in hashmap__init()
71 free(map->buckets); in hashmap__clear()
72 map->buckets = NULL; in hashmap__clear()
124 free(map->buckets); in hashmap_grow()
125 map->buckets = new_buckets; in hashmap_grow()
137 if (!map->buckets) in hashmap_find_entry()
140 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry()
200 hashmap_add_entry(&map->buckets[h], entry); in hashmap_insert()
/third_party/rust/crates/aho-corasick/src/packed/
H A Drabinkarp.rs9 /// The number of buckets to store our patterns in. We don't want this to be
13 /// The number of buckets MUST be a power of two. Otherwise, determining the
29 /// several buckets to hopefully make the confirmation step faster.
45 buckets: Vec<Vec<(Hash, PatternID)>>,
80 buckets: vec![vec![]; NUM_BUCKETS], in new()
88 rk.buckets[bucket].push((hash, id)); in new()
101 assert_eq!(NUM_BUCKETS, self.buckets.len()); in find_at()
113 let bucket = &self.buckets[hash % NUM_BUCKETS]; in find_at()
137 self.buckets.len() * mem::size_of::<Vec<(Hash, PatternID)>>() in heap_bytes()
/third_party/mesa3d/src/gallium/auxiliary/cso_cache/
H A Dcso_hash.c110 struct cso_node **oldBuckets = hash->buckets; in cso_data_rehash()
116 hash->buckets = MALLOC(sizeof(struct cso_node*) * hash->numBuckets); in cso_data_rehash()
118 hash->buckets[i] = e; in cso_data_rehash()
132 beforeFirstNode = &hash->buckets[h % hash->numBuckets]; in cso_data_rehash()
162 struct cso_node **bucket = hash->buckets; in cso_data_first_node()
192 hash->buckets = NULL; in cso_hash_init()
203 struct cso_node **bucket = hash->buckets; in cso_hash_deinit()
214 FREE(hash->buckets); in cso_hash_deinit()
244 bucket = a.d->buckets + start; in cso_hash_data_next()
291 node_ptr = &hash->buckets[nod in cso_hash_erase()
[all...]
/third_party/ltp/lib/
H A Dtst_timer_test.c82 unsigned int i, buckets[rows]; in frequency_plot() local
89 memset(buckets, 0, sizeof(buckets)); in frequency_plot()
92 * We work with discrete data buckets smaller than 1 does not make in frequency_plot()
93 * sense as well as it's a good idea to keep buckets integer sized in frequency_plot()
101 buckets[bucket]++; in frequency_plot()
104 unsigned int max_bucket = buckets[0]; in frequency_plot()
106 max_bucket = MAX(max_bucket, buckets[i]); in frequency_plot()
116 if (buckets[l]) in frequency_plot()
121 if (buckets[ in frequency_plot()
[all...]
/third_party/node/deps/brotli/c/enc/
H A Dhash_longest_match_quickly_inc.h58 uint32_t* BROTLI_RESTRICT buckets = self->buckets_; in Prepare() local
66 buckets[key] = 0; in Prepare()
70 buckets[(key + (j << 3)) & BUCKET_MASK] = 0; in Prepare()
79 memset(buckets, 0, sizeof(uint32_t) * BUCKET_SIZE); in Prepare()
155 uint32_t* BROTLI_RESTRICT buckets = self->buckets_; in FindLongestMatch() local
179 buckets[key] = (uint32_t)cur_ix; in FindLongestMatch()
194 prev_ix = buckets[key]; in FindLongestMatch()
195 buckets[key] = (uint32_t)cur_ix; in FindLongestMatch()
226 prev_ix = buckets[keys[i]]; in FindLongestMatch()
257 buckets[key_ou in FindLongestMatch()
[all...]
/third_party/node/deps/npm/node_modules/cacache/lib/
H A Dverify.js185 const buckets = {}
193 if (buckets[hashed] && !excluded) {
194 buckets[hashed].push(entry)
195 } else if (buckets[hashed] && excluded) {
198 buckets[hashed] = []
199 buckets[hashed]._path = index.bucketPath(cache, k)
201 buckets[hashed] = [entry]
202 buckets[hashed]._path = index.bucketPath(cache, k)
207 Object.keys(buckets),
209 return rebuildBucket(cache, buckets[ke
[all...]
/third_party/skia/third_party/externals/brotli/c/enc/
H A Dhash_longest_match_quickly_inc.h58 uint32_t* BROTLI_RESTRICT buckets = self->buckets_; in Prepare() local
66 buckets[key] = 0; in Prepare()
70 buckets[(key + (j << 3)) & BUCKET_MASK] = 0; in Prepare()
79 memset(buckets, 0, sizeof(uint32_t) * BUCKET_SIZE); in Prepare()
155 uint32_t* BROTLI_RESTRICT buckets = self->buckets_; in FindLongestMatch() local
179 buckets[key] = (uint32_t)cur_ix; in FindLongestMatch()
194 prev_ix = buckets[key]; in FindLongestMatch()
195 buckets[key] = (uint32_t)cur_ix; in FindLongestMatch()
226 prev_ix = buckets[keys[i]]; in FindLongestMatch()
257 buckets[key_ou in FindLongestMatch()
[all...]
/third_party/skia/third_party/externals/freetype/src/cache/
H A Dftccache.c100 return cache->buckets + idx; in ftc_get_top_node_for_hash()
107 * buckets array appropriately, we simply degrade the hash table's
118 FT_UFast count = mask + p + 1; /* number of buckets */ in ftc_cache_resize()
121 /* do we need to expand the buckets array? */ in ftc_cache_resize()
127 /* try to expand the buckets array _before_ splitting in ftc_cache_resize()
137 if ( FT_RENEW_ARRAY( cache->buckets, in ftc_cache_resize()
143 pnode = cache->buckets + p; in ftc_cache_resize()
161 cache->buckets[p + mask + 1] = new_list; in ftc_cache_resize()
174 /* do we need to shrink the buckets array? */ in ftc_cache_resize()
191 if ( FT_QRENEW_ARRAY( cache->buckets, in ftc_cache_resize()
[all...]
/third_party/libunwind/libunwind/src/ia64/
H A DGscript.c84 cache->buckets[i].lru_chain = (i - 1); in flush_script_cache()
85 cache->buckets[i].coll_chain = -1; in flush_script_cache()
86 cache->buckets[i].ip = 0; in flush_script_cache()
130 struct ia64_script *script = cache->buckets + c->hint; in script_lookup()
144 script = cache->buckets + index; in script_lookup()
150 c->hint = cache->buckets[c->prev_script].hint = in script_lookup()
151 (script - cache->buckets); in script_lookup()
156 script = cache->buckets + script->coll_chain; in script_lookup()
177 script = cache->buckets + head; in script_new()
181 cache->buckets[cach in script_new()
[all...]
/third_party/rust/crates/unicode-width/scripts/
H A Dunicode.py184 list overlaps with another's width list, those buckets can be merged via `try_extend`."""
230 same bucket. Returns a list of the buckets in increasing order of those bits."""
233 buckets = [Bucket() for _ in range(0, 2 ** num_bits)]
236 buckets[(codepoint >> low_bit) & mask].append(codepoint, width)
237 return buckets
245 Typically, tables contain a list of buckets of codepoints. Bucket `i`'s codepoints should
248 key to compression is that two different buckets in two different sub-tables may have the
252 discard the buckets and convert the entries into `EffectiveWidth` values."""
267 buckets = []
269 buckets
290 def buckets(self): global() member in Table
[all...]
/third_party/skia/third_party/externals/abseil-cpp/absl/random/
H A Dlog_uniform_int_distribution_test.cc160 std::vector<int64_t> buckets(max_bucket + 1); in ChiSquaredTestImpl()
166 // Convert the output of the generator to one of num_bucket buckets. in ChiSquaredTestImpl()
169 ++buckets[bucket]; in ChiSquaredTestImpl()
174 const int dof = buckets.size() - 1; in ChiSquaredTestImpl()
175 const double expected = trials / static_cast<double>(buckets.size()); in ChiSquaredTestImpl()
180 std::begin(buckets), std::end(buckets), expected); in ChiSquaredTestImpl()
186 for (size_t i = 0; i < buckets.size(); i++) { in ChiSquaredTestImpl()
187 ABSL_INTERNAL_LOG(INFO, absl::StrCat(i, ": ", buckets[i])); in ChiSquaredTestImpl()
/third_party/node/deps/v8/tools/
H A Dgenerate-runtime-call-stats.py36 help="group common stats together into buckets")
124 help="useable with --group to only show buckets specified by filter")
374 buckets = {}
403 if bucket_name not in buckets:
405 buckets[bucket_name] = bucket
407 bucket = buckets[bucket_name]
410 return buckets
413 def create_table(buckets, record_bucket_names=True, filter=None):
415 for bucket in buckets.values():
447 buckets
[all...]

Completed in 13 milliseconds

1234