Home
last modified time | relevance | path

Searched refs:buckets (Results 51 - 75 of 242) sorted by relevance

12345678910

/kernel/linux/linux-5.10/net/netfilter/ipvs/
H A Dip_vs_dh.c64 struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE]; member
90 return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest); in ip_vs_dh_get()
95 * Assign all the hash buckets of the specified table with the service.
106 b = &s->buckets[0]; in ip_vs_dh_reassign()
132 * Flush all the hash buckets of the specified table.
140 b = &s->buckets[0]; in ip_vs_dh_flush()
166 /* assign the hash buckets with current dests */ in ip_vs_dh_init_svc()
177 /* got to clean up hash buckets here */ in ip_vs_dh_done_svc()
192 /* assign the hash buckets with the updated service */ in ip_vs_dh_dest_changed()
H A Dip_vs_sh.c70 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; member
108 struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get()
130 dest = rcu_dereference(s->buckets[ihash].dest); in ip_vs_sh_get_fallback()
145 dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get_fallback()
160 * Assign all the hash buckets of the specified table with the service.
172 b = &s->buckets[0]; in ip_vs_sh_reassign()
208 * Flush all the hash buckets of the specified table.
216 b = &s->buckets[0]; in ip_vs_sh_flush()
242 /* assign the hash buckets with current dests */ in ip_vs_sh_init_svc()
253 /* got to clean up hash buckets her in ip_vs_sh_done_svc()
[all...]
/kernel/linux/linux-6.6/net/netfilter/ipvs/
H A Dip_vs_dh.c64 struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE]; member
90 return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest); in ip_vs_dh_get()
95 * Assign all the hash buckets of the specified table with the service.
106 b = &s->buckets[0]; in ip_vs_dh_reassign()
132 * Flush all the hash buckets of the specified table.
140 b = &s->buckets[0]; in ip_vs_dh_flush()
166 /* assign the hash buckets with current dests */ in ip_vs_dh_init_svc()
177 /* got to clean up hash buckets here */ in ip_vs_dh_done_svc()
192 /* assign the hash buckets with the updated service */ in ip_vs_dh_dest_changed()
H A Dip_vs_sh.c70 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; member
108 struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get()
130 dest = rcu_dereference(s->buckets[ihash].dest); in ip_vs_sh_get_fallback()
145 dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get_fallback()
160 * Assign all the hash buckets of the specified table with the service.
172 b = &s->buckets[0]; in ip_vs_sh_reassign()
208 * Flush all the hash buckets of the specified table.
216 b = &s->buckets[0]; in ip_vs_sh_flush()
242 /* assign the hash buckets with current dests */ in ip_vs_sh_init_svc()
253 /* got to clean up hash buckets her in ip_vs_sh_done_svc()
[all...]
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/ipoib/
H A Dipoib_vlan.c45 struct hlist_head buckets[1 << MLX5I_MAX_LOG_PKEY_SUP]; member
71 static struct qpn_to_netdev *mlx5i_find_qpn_to_netdev_node(struct hlist_head *buckets, in mlx5i_find_qpn_to_netdev_node() argument
74 struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)]; in mlx5i_find_qpn_to_netdev_node()
99 hlist_add_head(&new_node->hlist, &ht->buckets[key]); in mlx5i_pkey_add_qpn()
112 node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn); in mlx5i_pkey_del_qpn()
131 node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn); in mlx5i_pkey_get_netdev()
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx5/core/ipoib/
H A Dipoib_vlan.c45 struct hlist_head buckets[1 << MLX5I_MAX_LOG_PKEY_SUP]; member
71 static struct qpn_to_netdev *mlx5i_find_qpn_to_netdev_node(struct hlist_head *buckets, in mlx5i_find_qpn_to_netdev_node() argument
74 struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)]; in mlx5i_find_qpn_to_netdev_node()
99 hlist_add_head(&new_node->hlist, &ht->buckets[key]); in mlx5i_pkey_add_qpn()
112 node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn); in mlx5i_pkey_del_qpn()
131 node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn); in mlx5i_pkey_get_netdev()
/kernel/linux/linux-5.10/drivers/md/
H A Ddm-region-hash.c70 struct list_head *buckets; member
176 * Calculate a suitable number of buckets for our hash in dm_region_hash_create()
206 rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets))); in dm_region_hash_create()
207 if (!rh->buckets) { in dm_region_hash_create()
214 INIT_LIST_HEAD(rh->buckets + i); in dm_region_hash_create()
228 vfree(rh->buckets); in dm_region_hash_create()
244 list_for_each_entry_safe(reg, nreg, rh->buckets + h, in dm_region_hash_destroy()
255 vfree(rh->buckets); in dm_region_hash_destroy()
274 struct list_head *bucket = rh->buckets in __rh_lookup()
[all...]
/kernel/linux/linux-6.6/kernel/bpf/
H A Dbpf_local_storage.c24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)]; in select_bucket()
777 usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log); in bpf_local_storage_map_mem_usage()
807 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */ in bpf_local_storage_map_alloc()
811 smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets), in bpf_local_storage_map_alloc()
813 if (!smap->buckets) { in bpf_local_storage_map_alloc()
819 INIT_HLIST_HEAD(&smap->buckets[i].list); in bpf_local_storage_map_alloc()
820 raw_spin_lock_init(&smap->buckets[i].lock); in bpf_local_storage_map_alloc()
843 kvfree(smap->buckets); in bpf_local_storage_map_alloc()
876 b = &smap->buckets[ in bpf_local_storage_map_free()
[all...]
H A Dstackmap.c31 struct stack_map_bucket *buckets[]; member
233 bucket = READ_ONCE(smap->buckets[id]); in __bpf_get_stackid()
277 old_bucket = xchg(&smap->buckets[id], new_bucket); in __bpf_get_stackid()
587 bucket = xchg(&smap->buckets[id], NULL); in bpf_stackmap_copy()
595 old_bucket = xchg(&smap->buckets[id], bucket); in bpf_stackmap_copy()
614 if (id >= smap->n_buckets || !smap->buckets[id]) in stack_map_get_next_key()
620 while (id < smap->n_buckets && !smap->buckets[id]) in stack_map_get_next_key()
646 old_bucket = xchg(&smap->buckets[id], NULL); in stack_map_delete_elem()
/kernel/linux/linux-6.6/drivers/md/
H A Ddm-region-hash.c73 struct list_head *buckets; member
179 * Calculate a suitable number of buckets for our hash in dm_region_hash_create()
209 rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets))); in dm_region_hash_create()
210 if (!rh->buckets) { in dm_region_hash_create()
217 INIT_LIST_HEAD(rh->buckets + i); in dm_region_hash_create()
231 vfree(rh->buckets); in dm_region_hash_create()
247 list_for_each_entry_safe(reg, nreg, rh->buckets + h, in dm_region_hash_destroy()
258 vfree(rh->buckets); in dm_region_hash_destroy()
277 struct list_head *bucket = rh->buckets in __rh_lookup()
[all...]
/third_party/skia/third_party/externals/freetype/src/base/
H A Dftdbgmem.c116 FT_MemNode* buckets; member
282 node = table->buckets[i]; in ft_mem_table_resize()
296 if ( table->buckets ) in ft_mem_table_resize()
297 ft_mem_table_free( table, table->buckets ); in ft_mem_table_resize()
299 table->buckets = new_buckets; in ft_mem_table_resize()
316 FT_MemNode *pnode = table->buckets + i, next, node = *pnode; in ft_mem_table_destroy()
345 table->buckets[i] = NULL; in ft_mem_table_destroy()
348 ft_mem_table_free( table, table->buckets ); in ft_mem_table_destroy()
349 table->buckets = NULL; in ft_mem_table_destroy()
392 pnode = table->buckets in ft_mem_table_get_nodep()
[all...]
/kernel/linux/linux-5.10/fs/nfs/
H A Dpnfs_nfs.c102 p = kmalloc(struct_size(p, buckets, n), gfp_flags); in pnfs_alloc_commit_array()
109 for (b = &p->buckets[0]; n != 0; b++, n--) { in pnfs_alloc_commit_array()
259 struct pnfs_commit_bucket *buckets, in pnfs_bucket_scan_array()
267 cnt = pnfs_bucket_scan_ds_commit_list(&buckets[i], cinfo, max); in pnfs_bucket_scan_array()
288 cnt = pnfs_bucket_scan_array(cinfo, array->buckets, in pnfs_generic_scan_commit_lists()
304 struct pnfs_commit_bucket *buckets, in pnfs_bucket_recover_commit_reqs()
314 for (i = 0, b = buckets; i < nbuckets; i++, b++) { in pnfs_bucket_recover_commit_reqs()
343 array->buckets, in pnfs_generic_recover_commit_reqs()
355 pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets, in pnfs_bucket_search_commit_reqs() argument
364 for (i = 0, b = buckets; in pnfs_bucket_search_commit_reqs()
258 pnfs_bucket_scan_array(struct nfs_commit_info *cinfo, struct pnfs_commit_bucket *buckets, unsigned int nbuckets, int max) pnfs_bucket_scan_array() argument
303 pnfs_bucket_recover_commit_reqs(struct list_head *dst, struct pnfs_commit_bucket *buckets, unsigned int nbuckets, struct nfs_commit_info *cinfo) pnfs_bucket_recover_commit_reqs() argument
430 pnfs_generic_retry_commit(struct pnfs_commit_bucket *buckets, unsigned int nbuckets, struct nfs_commit_info *cinfo, unsigned int idx) pnfs_generic_retry_commit() argument
451 pnfs_bucket_alloc_ds_commits(struct list_head *list, struct pnfs_commit_bucket *buckets, unsigned int nbuckets, struct nfs_commit_info *cinfo) pnfs_bucket_alloc_ds_commits() argument
[all...]
H A Dnfs42xattr.c49 * 64 buckets is a good default. There is likely no reasonable
70 struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE]; member
111 INIT_HLIST_HEAD(&cache->buckets[i].hlist); in nfs4_xattr_hash_init()
112 spin_lock_init(&cache->buckets[i].lock); in nfs4_xattr_hash_init()
113 cache->buckets[i].cache = cache; in nfs4_xattr_hash_init()
114 cache->buckets[i].draining = false; in nfs4_xattr_hash_init()
276 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) in nfs4_xattr_free_cache_cb()
278 cache->buckets[i].draining = false; in nfs4_xattr_free_cache_cb()
370 * Mark all buckets as draining, so that no new entries are added. This
395 bucket = &cache->buckets[ in nfs4_xattr_discard_cache()
[all...]
/kernel/linux/linux-6.6/fs/nfs/
H A Dpnfs_nfs.c102 p = kmalloc(struct_size(p, buckets, n), gfp_flags); in pnfs_alloc_commit_array()
109 for (b = &p->buckets[0]; n != 0; b++, n--) { in pnfs_alloc_commit_array()
259 struct pnfs_commit_bucket *buckets, in pnfs_bucket_scan_array()
267 cnt = pnfs_bucket_scan_ds_commit_list(&buckets[i], cinfo, max); in pnfs_bucket_scan_array()
288 cnt = pnfs_bucket_scan_array(cinfo, array->buckets, in pnfs_generic_scan_commit_lists()
304 struct pnfs_commit_bucket *buckets, in pnfs_bucket_recover_commit_reqs()
314 for (i = 0, b = buckets; i < nbuckets; i++, b++) { in pnfs_bucket_recover_commit_reqs()
343 array->buckets, in pnfs_generic_recover_commit_reqs()
355 pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets, in pnfs_bucket_search_commit_reqs() argument
364 for (i = 0, b = buckets; in pnfs_bucket_search_commit_reqs()
258 pnfs_bucket_scan_array(struct nfs_commit_info *cinfo, struct pnfs_commit_bucket *buckets, unsigned int nbuckets, int max) pnfs_bucket_scan_array() argument
303 pnfs_bucket_recover_commit_reqs(struct list_head *dst, struct pnfs_commit_bucket *buckets, unsigned int nbuckets, struct nfs_commit_info *cinfo) pnfs_bucket_recover_commit_reqs() argument
430 pnfs_generic_retry_commit(struct pnfs_commit_bucket *buckets, unsigned int nbuckets, struct nfs_commit_info *cinfo, unsigned int idx) pnfs_generic_retry_commit() argument
451 pnfs_bucket_alloc_ds_commits(struct list_head *list, struct pnfs_commit_bucket *buckets, unsigned int nbuckets, struct nfs_commit_info *cinfo) pnfs_bucket_alloc_ds_commits() argument
[all...]
H A Dnfs42xattr.c49 * 64 buckets is a good default. There is likely no reasonable
70 struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE]; member
111 INIT_HLIST_HEAD(&cache->buckets[i].hlist); in nfs4_xattr_hash_init()
112 spin_lock_init(&cache->buckets[i].lock); in nfs4_xattr_hash_init()
113 cache->buckets[i].cache = cache; in nfs4_xattr_hash_init()
114 cache->buckets[i].draining = false; in nfs4_xattr_hash_init()
276 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) in nfs4_xattr_free_cache_cb()
278 cache->buckets[i].draining = false; in nfs4_xattr_free_cache_cb()
369 * Mark all buckets as draining, so that no new entries are added. This
394 bucket = &cache->buckets[ in nfs4_xattr_discard_cache()
[all...]
/third_party/libdrm/intel/
H A Duthash.h104 /* initial number of buckets */
105 #define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */
106 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */
126 HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \
182 (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
184 if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \
185 memset((head)->hh.tbl->buckets, 0, \
265 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \
301 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \
341 uthash_free((head)->hh.tbl->buckets, \
1031 UT_hash_bucket *buckets; global() member
[all...]
/kernel/linux/linux-5.10/net/ceph/crush/
H A Dmapper.c492 /* choose through intervening buckets */ in crush_choose_firstn()
527 itemtype = map->buckets[-1-item]->type; in crush_choose_firstn()
540 in = map->buckets[-1-item]; in crush_choose_firstn()
564 map->buckets[-1-item], in crush_choose_firstn()
697 /* choose through intervening buckets */ in crush_choose_indep()
741 itemtype = map->buckets[-1-item]->type; in crush_choose_indep()
758 in = map->buckets[-1-item]; in crush_choose_indep()
778 map->buckets[-1-item], in crush_choose_indep()
865 if (!map->buckets[b]) in crush_init_workspace()
869 switch (map->buckets[ in crush_init_workspace()
[all...]
/kernel/linux/linux-6.6/net/ceph/crush/
H A Dmapper.c492 /* choose through intervening buckets */ in crush_choose_firstn()
527 itemtype = map->buckets[-1-item]->type; in crush_choose_firstn()
540 in = map->buckets[-1-item]; in crush_choose_firstn()
564 map->buckets[-1-item], in crush_choose_firstn()
697 /* choose through intervening buckets */ in crush_choose_indep()
741 itemtype = map->buckets[-1-item]->type; in crush_choose_indep()
758 in = map->buckets[-1-item]; in crush_choose_indep()
778 map->buckets[-1-item], in crush_choose_indep()
865 if (!map->buckets[b]) in crush_init_workspace()
869 switch (map->buckets[ in crush_init_workspace()
[all...]
/kernel/linux/linux-6.6/tools/perf/util/
H A Dftrace.h43 int buckets[]);
68 int buckets[] __maybe_unused) in perf_ftrace__latency_read_bpf()
H A Dbpf_ftrace.c122 int buckets[]) in perf_ftrace__latency_read_bpf()
138 buckets[idx] = 0; in perf_ftrace__latency_read_bpf()
143 buckets[idx] += hist[i]; in perf_ftrace__latency_read_bpf()
121 perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused, int buckets[]) perf_ftrace__latency_read_bpf() argument
/kernel/linux/linux-5.10/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
H A Dpno.c298 struct brcmf_gscan_bucket_config **buckets, in brcmf_pno_prep_fwconfig()
323 *buckets = NULL; in brcmf_pno_prep_fwconfig()
355 *buckets = fw_buckets; in brcmf_pno_prep_fwconfig()
396 struct brcmf_gscan_bucket_config *buckets; in brcmf_pno_config_sched_scans() local
403 n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets, in brcmf_pno_config_sched_scans()
408 gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets); in brcmf_pno_config_sched_scans()
437 memcpy(&gscan_cfg->bucket[0], buckets, in brcmf_pno_config_sched_scans()
438 n_buckets * sizeof(*buckets)); in brcmf_pno_config_sched_scans()
463 kfree(buckets); in brcmf_pno_config_sched_scans()
296 brcmf_pno_prep_fwconfig(struct brcmf_pno_info *pi, struct brcmf_pno_config_le *pno_cfg, struct brcmf_gscan_bucket_config **buckets, u32 *scan_freq) brcmf_pno_prep_fwconfig() argument
/third_party/node/deps/brotli/c/enc/
H A Dhash_to_binary_tree_inc.h72 uint32_t* BROTLI_RESTRICT buckets = self->buckets_; in Prepare() local
77 buckets[i] = invalid_pos; in Prepare()
124 uint32_t* BROTLI_RESTRICT buckets = self->buckets_; in StoreAndFindMatches() local
126 size_t prev_ix = buckets[key]; in StoreAndFindMatches()
141 buckets[key] = (uint32_t)cur_ix; in StoreAndFindMatches()
/third_party/skia/third_party/externals/brotli/c/enc/
H A Dhash_to_binary_tree_inc.h72 uint32_t* BROTLI_RESTRICT buckets = self->buckets_; in Prepare() local
77 buckets[i] = invalid_pos; in Prepare()
124 uint32_t* BROTLI_RESTRICT buckets = self->buckets_; in StoreAndFindMatches() local
126 size_t prev_ix = buckets[key]; in StoreAndFindMatches()
141 buckets[key] = (uint32_t)cur_ix; in StoreAndFindMatches()
/kernel/linux/linux-5.10/block/
H A Dblk-stat.h16 * buckets by @bucket_fn and added to a per-cpu buffer, @cpu_stat. When the
31 * @cpu_stat: Per-cpu statistics buckets.
43 * @buckets: Number of statistics buckets.
45 unsigned int buckets; member
48 * @stat: Array of statistics buckets.
77 * @buckets: Number of statistics buckets.
87 unsigned int buckets, void *data);
/foundation/distributeddatamgr/datamgr_service/services/distributeddataservice/rust/extension/
H A Dextension_util.cpp21 std::pair<OhCloudExtVector *, size_t> ExtensionUtil::Convert(DBVBuckets &&buckets) in Convert() argument
28 for (auto &bucket : buckets) { in Convert()
179 DBVBuckets buckets; in ConvertBuckets() local
183 return buckets; in ConvertBuckets()
185 buckets.reserve(len); in ConvertBuckets()
191 return buckets; in ConvertBuckets()
197 buckets.emplace_back(ConvertBucket(pValues.get())); in ConvertBuckets()
199 return buckets; in ConvertBuckets()

Completed in 19 milliseconds

12345678910