Home
last modified time | relevance | path

Searched refs:buckets (Results 76 - 100 of 242) sorted by relevance

12345678910

/kernel/linux/linux-6.6/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
H A Dpno.c298 struct brcmf_gscan_bucket_config **buckets, in brcmf_pno_prep_fwconfig()
323 *buckets = NULL; in brcmf_pno_prep_fwconfig()
355 *buckets = fw_buckets; in brcmf_pno_prep_fwconfig()
396 struct brcmf_gscan_bucket_config *buckets; in brcmf_pno_config_sched_scans() local
403 n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets, in brcmf_pno_config_sched_scans()
437 memcpy(gscan_cfg->bucket, buckets, in brcmf_pno_config_sched_scans()
438 array_size(n_buckets, sizeof(*buckets))); in brcmf_pno_config_sched_scans()
463 kfree(buckets); in brcmf_pno_config_sched_scans()
296 brcmf_pno_prep_fwconfig(struct brcmf_pno_info *pi, struct brcmf_pno_config_le *pno_cfg, struct brcmf_gscan_bucket_config **buckets, u32 *scan_freq) brcmf_pno_prep_fwconfig() argument
/third_party/libunwind/libunwind/src/dwarf/
H A DGparser.c554 cache->buckets = cache->default_buckets; in dwarf_flush_rs_cache()
561 if (cache->buckets && cache->buckets != cache->default_buckets) in dwarf_flush_rs_cache()
562 munmap(cache->buckets, DWARF_UNW_CACHE_SIZE(cache->prev_log_size) in dwarf_flush_rs_cache()
563 * sizeof (cache->buckets[0])); in dwarf_flush_rs_cache()
569 GET_MEMORY(cache->buckets, DWARF_UNW_CACHE_SIZE(cache->log_size) in dwarf_flush_rs_cache()
570 * sizeof (cache->buckets[0])); in dwarf_flush_rs_cache()
573 if (!cache->hash || !cache->buckets || !cache->links) in dwarf_flush_rs_cache()
669 return &cache->buckets[index]; in rs_lookup()
677 return &cache->buckets[inde in rs_lookup()
[all...]
/kernel/linux/linux-5.10/block/
H A Dkyber-iosched.c88 * Requests latencies are recorded in a histogram with buckets defined relative
102 * The width of the latency histogram buckets is
107 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
111 /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
134 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS]; member
213 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in flush_latency_buckets() local
214 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type]; in flush_latency_buckets()
218 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0); in flush_latency_buckets()
229 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in calculate_percentile() local
233 samples += buckets[bucke in calculate_percentile()
[all...]
/kernel/linux/linux-6.6/block/
H A Dkyber-iosched.c88 * Requests latencies are recorded in a histogram with buckets defined relative
102 * The width of the latency histogram buckets is
107 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
111 /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
134 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS]; member
214 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in flush_latency_buckets() local
215 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type]; in flush_latency_buckets()
219 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0); in flush_latency_buckets()
230 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in calculate_percentile() local
234 samples += buckets[bucke in calculate_percentile()
[all...]
H A Dblk-stat.h16 * buckets by @bucket_fn and added to a per-cpu buffer, @cpu_stat. When the
31 * @cpu_stat: Per-cpu statistics buckets.
43 * @buckets: Number of statistics buckets.
45 unsigned int buckets; member
48 * @stat: Array of statistics buckets.
79 * @buckets: Number of statistics buckets.
89 unsigned int buckets, void *data);
/third_party/mesa3d/src/gallium/auxiliary/cso_cache/
H A Dcso_hash.h67 struct cso_node **buckets; member
146 node = &hash->buckets[akey % hash->numBuckets]; in cso_hash_find_node()
/kernel/linux/linux-5.10/net/sched/
H A Dsch_hhf.c19 * Flows are classified into two buckets: non-heavy-hitter and heavy-hitter
20 * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified
22 * The buckets are dequeued by a Weighted Deficit Round Robin (WDRR) scheduler,
103 #define WDRR_BUCKET_CNT 2 /* two buckets for Weighted DRR */
128 struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; member
147 struct list_head new_buckets; /* list of new buckets */
148 struct list_head old_buckets; /* list of old buckets */
244 /* Assigns packets to WDRR buckets. Implements a multi-stage filter to
355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop()
357 bucket = &q->buckets[WDRR_BUCKET_FOR_NON_H in hhf_drop()
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx5/core/lag/
H A Dport_sel.c51 ft_attr.max_fte = ldev->ports * ldev->buckets; in mlx5_lag_create_port_sel_table()
78 for (j = 0; j < ldev->buckets; j++) { in mlx5_lag_create_port_sel_table()
81 idx = i * ldev->buckets + j; in mlx5_lag_create_port_sel_table()
346 for (j = 0; j < ldev->buckets; j++) { in mlx5_lag_destroy_definer()
347 idx = i * ldev->buckets + j; in mlx5_lag_destroy_definer()
575 for (j = 0; j < ldev->buckets; j++) { in __mlx5_lag_modify_definers_destinations()
576 idx = i * ldev->buckets + j; in __mlx5_lag_modify_definers_destinations()
H A Dlag.c207 for (j = 0; j < ldev->buckets; j++) { in mlx5_lag_print_mapping()
208 idx = i * ldev->buckets + j; in mlx5_lag_print_mapping()
279 ldev->buckets = 1; in mlx5_lag_dev_alloc()
307 * As we have ldev->buckets slots per port first assume the native
314 u8 buckets, in mlx5_infer_tx_affinity_mapping()
334 /* Use native mapping by default where each port's buckets in mlx5_infer_tx_affinity_mapping()
338 for (j = 0; j < buckets; j++) { in mlx5_infer_tx_affinity_mapping()
339 idx = i * buckets + j; in mlx5_infer_tx_affinity_mapping()
350 for (j = 0; j < buckets; j++) { in mlx5_infer_tx_affinity_mapping()
352 ports[disabled[i] * buckets in mlx5_infer_tx_affinity_mapping()
312 mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, u8 num_ports, u8 buckets, u8 *ports) mlx5_infer_tx_affinity_mapping() argument
[all...]
/kernel/linux/linux-6.6/net/sched/
H A Dsch_hhf.c19 * Flows are classified into two buckets: non-heavy-hitter and heavy-hitter
20 * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified
22 * The buckets are dequeued by a Weighted Deficit Round Robin (WDRR) scheduler,
103 #define WDRR_BUCKET_CNT 2 /* two buckets for Weighted DRR */
128 struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; member
147 struct list_head new_buckets; /* list of new buckets */
148 struct list_head old_buckets; /* list of old buckets */
244 /* Assigns packets to WDRR buckets. Implements a multi-stage filter to
355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop()
357 bucket = &q->buckets[WDRR_BUCKET_FOR_NON_H in hhf_drop()
[all...]
/third_party/node/deps/brotli/c/enc/
H A Dhash_longest_match64_inc.h33 /* Number of hash buckets. */
113 uint32_t* BROTLI_RESTRICT buckets = self->buckets_; in Store() local
119 buckets[offset] = (uint32_t)ix; in Store()
171 uint32_t* BROTLI_RESTRICT buckets = self->buckets_; in FindLongestMatch() local
222 uint32_t* BROTLI_RESTRICT bucket = &buckets[key << self->block_bits_]; in FindLongestMatch()
/third_party/skia/third_party/externals/brotli/c/enc/
H A Dhash_longest_match64_inc.h33 /* Number of hash buckets. */
113 uint32_t* BROTLI_RESTRICT buckets = self->buckets_; in Store() local
119 buckets[offset] = (uint32_t)ix; in Store()
171 uint32_t* BROTLI_RESTRICT buckets = self->buckets_; in FindLongestMatch() local
222 uint32_t* BROTLI_RESTRICT bucket = &buckets[key << self->block_bits_]; in FindLongestMatch()
/third_party/ffmpeg/libavcodec/
H A Ddnxhdenc.c1150 int buckets[RADIX_PASSES][NBUCKETS]) in radix_count()
1153 memset(buckets, 0, sizeof(buckets[0][0]) * RADIX_PASSES * NBUCKETS); in radix_count()
1157 buckets[j][get_bucket(v, 0)]++; in radix_count()
1165 buckets[j][i] = offset -= buckets[j][i]; in radix_count()
1166 av_assert1(!buckets[j][0]); in radix_count()
1171 int size, int buckets[NBUCKETS], int pass) in radix_sort_pass()
1177 int pos = buckets[v]++; in radix_sort_pass()
1184 int buckets[RADIX_PASSE in radix_sort() local
1149 radix_count(const RCCMPEntry *data, int size, int buckets[RADIX_PASSES][NBUCKETS]) radix_count() argument
1170 radix_sort_pass(RCCMPEntry *dst, const RCCMPEntry *data, int size, int buckets[NBUCKETS], int pass) radix_sort_pass() argument
[all...]
/kernel/linux/linux-5.10/drivers/md/persistent-data/
H A Ddm-transaction-manager.c96 struct hlist_head buckets[DM_HASH_SIZE]; member
110 hlist_for_each_entry(si, tm->buckets + bucket, hlist) in is_shadow()
134 hlist_add_head(&si->hlist, tm->buckets + bucket); in insert_shadow()
148 bucket = tm->buckets + i; in wipe_shadow_table()
177 INIT_HLIST_HEAD(tm->buckets + i); in dm_tm_create()
/kernel/linux/linux-6.6/drivers/md/persistent-data/
H A Ddm-transaction-manager.c98 struct hlist_head buckets[DM_HASH_SIZE]; member
112 hlist_for_each_entry(si, tm->buckets + bucket, hlist) in is_shadow()
136 hlist_add_head(&si->hlist, tm->buckets + bucket); in insert_shadow()
150 bucket = tm->buckets + i; in wipe_shadow_table()
179 INIT_HLIST_HEAD(tm->buckets + i); in dm_tm_create()
/third_party/node/deps/v8/src/heap/
H A Dremembered-set.h42 slots += slot_set->Iterate(chunk->address(), 0, chunk->buckets(), in Iterate()
62 static_cast<int>(end_offset), chunk->buckets(), in RemoveRange()
187 chunk->buckets(), callback, in IterateAndTrackEmptyBuckets()
197 if (slot_set != nullptr && slot_set->FreeEmptyBuckets(chunk->buckets())) { in FreeEmptyBuckets()
206 slot_set->CheckPossiblyEmptyBuckets(chunk->buckets(), in CheckPossiblyEmptyBuckets()
/third_party/skia/third_party/externals/abseil-cpp/absl/random/
H A Dzipf_distribution_test.cc229 // Find the split-points for the buckets. in TEST_P()
267 std::vector<int64_t> buckets(points.size(), 0); in TEST_P()
278 buckets[std::distance(std::begin(points), it)]++; in TEST_P()
298 std::begin(buckets), std::end(buckets), std::begin(expected), in TEST_P()
308 ABSL_INTERNAL_LOG(INFO, absl::StrCat(points[i], ": ", buckets[i], in TEST_P()
H A Dpoisson_distribution_test.cc206 // range buckets. However there is no closed form solution for the inverse cdf
272 absl::flat_hash_map<int32_t, int> buckets; in SingleZTest() local
277 buckets[x]++; in SingleZTest()
362 void InitChiSquaredTest(const double buckets);
374 const double buckets) { in InitChiSquaredTest()
381 // buckets to the extent that it is possible. However for poisson in InitChiSquaredTest()
385 const double inc = 1.0 / buckets; in InitChiSquaredTest()
442 ABSL_INTERNAL_LOG(INFO, absl::StrCat("VALUES buckets=", counts.size(), in ChiSquaredTestImpl()
373 InitChiSquaredTest( const double buckets) InitChiSquaredTest() argument
/kernel/linux/linux-5.10/net/netfilter/
H A Dnft_set_hash.c435 /* Number of buckets is stored in u32, so cap our result to 1U<<31 */
460 u32 buckets; member
478 hash = reciprocal_scale(hash, priv->buckets); in nft_hash_lookup()
498 hash = reciprocal_scale(hash, priv->buckets); in nft_hash_get()
518 hash = reciprocal_scale(hash, priv->buckets); in nft_hash_lookup_fast()
542 hash = reciprocal_scale(hash, priv->buckets); in nft_jhash()
624 for (i = 0; i < priv->buckets; i++) { in nft_hash_walk()
655 priv->buckets = nft_hash_buckets(desc->size); in nft_hash_init()
669 for (i = 0; i < priv->buckets; i++) { in nft_hash_destroy()
/kernel/linux/linux-5.10/kernel/bpf/
H A Dstackmap.c32 struct stack_map_bucket *buckets[]; member
420 bucket = READ_ONCE(smap->buckets[id]); in __bpf_get_stackid()
464 old_bucket = xchg(&smap->buckets[id], new_bucket); in __bpf_get_stackid()
776 bucket = xchg(&smap->buckets[id], NULL); in bpf_stackmap_copy()
784 old_bucket = xchg(&smap->buckets[id], bucket); in bpf_stackmap_copy()
803 if (id >= smap->n_buckets || !smap->buckets[id]) in stack_map_get_next_key()
809 while (id < smap->n_buckets && !smap->buckets[id]) in stack_map_get_next_key()
835 old_bucket = xchg(&smap->buckets[id], NULL); in stack_map_delete_elem()
/kernel/linux/linux-6.6/net/netfilter/
H A Dnft_set_hash.c449 /* Number of buckets is stored in u32, so cap our result to 1U<<31 */
474 u32 buckets; member
493 hash = reciprocal_scale(hash, priv->buckets); in nft_hash_lookup()
513 hash = reciprocal_scale(hash, priv->buckets); in nft_hash_get()
534 hash = reciprocal_scale(hash, priv->buckets); in nft_hash_lookup_fast()
558 hash = reciprocal_scale(hash, priv->buckets); in nft_jhash()
640 for (i = 0; i < priv->buckets; i++) { in nft_hash_walk()
671 priv->buckets = nft_hash_buckets(desc->size); in nft_hash_init()
685 for (i = 0; i < priv->buckets; i++) { in nft_hash_destroy()
/foundation/filemanagement/dfs_service/test/unittests/clouddisk_database/mock/
H A Dmeta_file_clouddisk_mock.cpp256 uint64_t buckets = (1ULL << (level + 1)) - 1; in GetOverallBucket() local
257 return static_cast<uint32_t>(buckets); in GetOverallBucket()
262 size_t buckets = GetOverallBucket(level); in GetDcacheFileSize() local
263 return buckets * DENTRYGROUP_SIZE * BUCKET_BLOCKS + DENTRYGROUP_HEADER; in GetDcacheFileSize()
287 uint64_t buckets = (1ULL << level); in GetBucketByLevel() local
288 return static_cast<uint32_t>(buckets); in GetBucketByLevel()
/kernel/linux/linux-5.10/drivers/md/bcache/
H A Dalloc.c7 * Allocation in bcache is done in terms of buckets:
17 * of buckets on disk, with a pointer to them in the journal header.
25 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
26 * call prio_write(), and when prio_write() finishes we pull buckets off the
31 * smaller freelist, and buckets on that list are always ready to be used.
36 * There is another freelist, because sometimes we have buckets that we know
38 * priorities to be rewritten. These come from freed btree nodes and buckets
40 * them (because they were overwritten). That's the unused list - buckets on the
57 * buckets are ready.
59 * invalidate_buckets_(lru|fifo)() find buckets tha
[all...]
/kernel/linux/linux-6.6/drivers/md/bcache/
H A Dalloc.c7 * Allocation in bcache is done in terms of buckets:
17 * of buckets on disk, with a pointer to them in the journal header.
25 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
26 * call prio_write(), and when prio_write() finishes we pull buckets off the
31 * smaller freelist, and buckets on that list are always ready to be used.
36 * There is another freelist, because sometimes we have buckets that we know
38 * priorities to be rewritten. These come from freed btree nodes and buckets
40 * them (because they were overwritten). That's the unused list - buckets on the
57 * buckets are ready.
59 * invalidate_buckets_(lru|fifo)() find buckets tha
[all...]
/kernel/linux/linux-6.6/include/net/netns/
H A Dunix.h12 struct hlist_head *buckets; member

Completed in 22 milliseconds

12345678910