Home
last modified time | relevance | path

Searched refs:bucket (Results 76 - 100 of 304) sorted by relevance

12345678910>>...13

/kernel/linux/linux-5.10/net/netfilter/ipvs/
H A Dip_vs_lblc.c103 struct hlist_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ member
172 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblc_hash()
185 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblc_get()
239 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblc_flush()
268 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblc_full_check()
324 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblc_check_expire()
363 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblc_init_svc()
H A Dip_vs_lblcr.c273 struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ member
335 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblcr_hash()
348 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblcr_get()
405 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblcr_flush()
433 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_full_check()
488 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_check_expire()
526 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblcr_init_svc()
/kernel/linux/linux-6.6/drivers/misc/vmw_vmci/
H A Dvmci_doorbell.c120 u32 bucket = VMCI_DOORBELL_HASH(idx); in dbell_index_table_find() local
123 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], in dbell_index_table_find()
139 u32 bucket; in dbell_index_table_add() local
187 bucket = VMCI_DOORBELL_HASH(entry->idx); in dbell_index_table_add()
188 hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); in dbell_index_table_add()
355 u32 bucket = VMCI_DOORBELL_HASH(notify_idx); in dbell_fire_entries() local
360 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) { in dbell_fire_entries()
/kernel/linux/linux-6.6/net/netfilter/ipvs/
H A Dip_vs_lblc.c103 struct hlist_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ member
172 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblc_hash()
185 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblc_get()
239 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblc_flush()
268 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblc_full_check()
324 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblc_check_expire()
363 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblc_init_svc()
H A Dip_vs_lblcr.c273 struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ member
335 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblcr_hash()
348 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblcr_get()
405 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblcr_flush()
433 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_full_check()
488 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_check_expire()
526 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblcr_init_svc()
/kernel/linux/linux-5.10/arch/hexagon/kernel/
H A Dptrace.c78 unsigned long bucket; in genregs_set() local
108 INEXT(&bucket, cause); in genregs_set()
109 INEXT(&bucket, badva); in genregs_set()
/kernel/linux/linux-6.6/arch/hexagon/kernel/
H A Dptrace.c78 unsigned long bucket; in genregs_set() local
108 INEXT(&bucket, cause); in genregs_set()
109 INEXT(&bucket, badva); in genregs_set()
/kernel/linux/linux-5.10/drivers/md/bcache/
H A Dextents.c10 * bucket priority is increased on cache hit, and periodically all the buckets
54 size_t bucket = PTR_BUCKET_NR(c, k, i); in __ptr_invalid() local
58 bucket < ca->sb.first_bucket || in __ptr_invalid()
59 bucket >= ca->sb.nbuckets) in __ptr_invalid()
75 size_t bucket = PTR_BUCKET_NR(c, k, i); in bch_ptr_status() local
80 if (bucket < ca->sb.first_bucket) in bch_ptr_status()
82 if (bucket >= ca->sb.nbuckets) in bch_ptr_status()
138 pr_cont(" bucket %zu", n); in bch_bkey_dump()
177 struct bucket *g; in btree_ptr_bad_expensive()
199 "inconsistent btree pointer %s: bucket in btree_ptr_bad_expensive()
[all...]
H A Dmovinggc.c185 static bool bucket_cmp(struct bucket *l, struct bucket *r) in bucket_cmp()
192 struct bucket *b; in bucket_heap_top()
200 struct bucket *b; in bch_moving_gc()
/kernel/linux/linux-6.6/drivers/md/bcache/
H A Dextents.c10 * bucket priority is increased on cache hit, and periodically all the buckets
54 size_t bucket = PTR_BUCKET_NR(c, k, i); in __ptr_invalid() local
58 bucket < ca->sb.first_bucket || in __ptr_invalid()
59 bucket >= ca->sb.nbuckets) in __ptr_invalid()
75 size_t bucket = PTR_BUCKET_NR(c, k, i); in bch_ptr_status() local
80 if (bucket < ca->sb.first_bucket) in bch_ptr_status()
82 if (bucket >= ca->sb.nbuckets) in bch_ptr_status()
138 pr_cont(" bucket %zu", n); in bch_bkey_dump()
177 struct bucket *g; in btree_ptr_bad_expensive()
199 "inconsistent btree pointer %s: bucket in btree_ptr_bad_expensive()
[all...]
H A Dmovinggc.c185 static bool bucket_cmp(struct bucket *l, struct bucket *r) in bucket_cmp()
192 struct bucket *b; in bucket_heap_top()
200 struct bucket *b; in bch_moving_gc()
/kernel/linux/linux-6.6/fs/fscache/
H A Dvolume.c165 unsigned int bucket, collidee_debug_id = 0; in fscache_hash_volume() local
167 bucket = candidate->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1); in fscache_hash_volume()
168 h = &fscache_volume_hash[bucket]; in fscache_hash_volume()
363 unsigned int bucket; in fscache_unhash_volume() local
365 bucket = volume->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1); in fscache_unhash_volume()
366 h = &fscache_volume_hash[bucket]; in fscache_unhash_volume()
/kernel/linux/linux-5.10/net/ipv4/
H A Dtcp_ipv4.c147 held not per host, but per port pair and TW bucket is used as state in tcp_twsk_unique()
150 If TW bucket has been already destroyed we fall back to VJ's scheme in tcp_twsk_unique()
157 * and releasing the bucket lock. in tcp_twsk_unique()
2265 /* Clean up a referenced TCP bind bucket. */ in tcp_v4_destroy_sock()
2285 * starting from bucket given in st->bucket; when st->bucket is zero the
2304 ilb = &tcp_hashinfo.listening_hash[st->bucket]; in listening_get_next()
2310 ilb = &tcp_hashinfo.listening_hash[st->bucket]; in listening_get_next()
2325 if (++st->bucket < INET_LHTABLE_SIZ in listening_get_next()
2457 int bucket = st->bucket; tcp_seek_last_pos() local
[all...]
/kernel/linux/linux-5.10/net/openvswitch/
H A Dmeter.c389 /* Figure out max delta_t that is enough to fill any bucket. in dp_meter_create()
390 * Keep max_delta_t size to the bucket units: in dp_meter_create()
393 * Start with a full bucket. in dp_meter_create()
395 band->bucket = (band->burst_size + band->rate) * 1000ULL; in dp_meter_create()
396 band_max_delta_t = div_u64(band->bucket, band->rate); in dp_meter_create()
625 /* Make sure delta_ms will not be too large, so that bucket will not in ovs_meter_execute()
638 * second. We maintain the bucket in the units of either bits or in ovs_meter_execute()
641 * bucket units: in ovs_meter_execute()
645 * 'cost' is the number of bucket units in this packet. in ovs_meter_execute()
656 band->bucket in ovs_meter_execute()
[all...]
/kernel/linux/linux-6.6/net/openvswitch/
H A Dmeter.c386 /* Figure out max delta_t that is enough to fill any bucket. in dp_meter_create()
387 * Keep max_delta_t size to the bucket units: in dp_meter_create()
390 * Start with a full bucket. in dp_meter_create()
392 band->bucket = band->burst_size * 1000ULL; in dp_meter_create()
393 band_max_delta_t = div_u64(band->bucket, band->rate); in dp_meter_create()
622 /* Make sure delta_ms will not be too large, so that bucket will not in ovs_meter_execute()
635 * second. We maintain the bucket in the units of either bits or in ovs_meter_execute()
638 * bucket units: in ovs_meter_execute()
642 * 'cost' is the number of bucket units in this packet. in ovs_meter_execute()
653 band->bucket in ovs_meter_execute()
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_bo_list.c188 /* This is based on the bucket sort with O(n) time complexity. in amdgpu_bo_list_get_list()
189 * An item with priority "i" is added to bucket[i]. The lists are then in amdgpu_bo_list_get_list()
192 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; in amdgpu_bo_list_get_list() local
197 INIT_LIST_HEAD(&bucket[i]); in amdgpu_bo_list_get_list()
209 list_add_tail(&e->tv.head, &bucket[priority]); in amdgpu_bo_list_get_list()
216 list_splice(&bucket[i], validated); in amdgpu_bo_list_get_list()
/kernel/linux/linux-5.10/security/safesetid/
H A Dsecurityfs.c74 int bucket; in __release_ruleset() local
78 hash_for_each_safe(pol->rules, bucket, tmp, rule, next) in __release_ruleset()
100 int bucket; in verify_ruleset() local
104 hash_for_each(pol->rules, bucket, rule, next) { in verify_ruleset()
/kernel/linux/linux-5.10/net/core/
H A Dnet-procfs.c35 unsigned int bucket; in dev_from_bucket() local
42 bucket = get_bucket(*pos) + 1; in dev_from_bucket()
43 *pos = set_bucket_offset(bucket, 1); in dev_from_bucket()
44 } while (bucket < NETDEV_HASHENTRIES); in dev_from_bucket()
/kernel/linux/linux-6.6/security/safesetid/
H A Dsecurityfs.c74 int bucket; in __release_ruleset() local
78 hash_for_each_safe(pol->rules, bucket, tmp, rule, next) in __release_ruleset()
100 int bucket; in verify_ruleset() local
104 hash_for_each(pol->rules, bucket, rule, next) { in verify_ruleset()
/kernel/linux/linux-5.10/net/netfilter/
H A Dnf_conntrack_core.c740 unsigned int bucket, hsize; in ____nf_conntrack_find() local
744 bucket = reciprocal_scale(hash, hsize); in ____nf_conntrack_find()
746 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { in ____nf_conntrack_find()
763 if (get_nulls_value(n) != bucket) { in ____nf_conntrack_find()
1307 unsigned int i, bucket; in early_drop() local
1316 bucket = reciprocal_scale(hash, hsize); in early_drop()
1318 bucket = (bucket + 1) % hsize; in early_drop()
1320 drops = early_drop_list(net, &ct_hash[bucket]); in early_drop()
2176 void *data, unsigned int *bucket) in get_next_corpse()
2175 get_next_corpse(int (*iter)(struct nf_conn *i, void *data), void *data, unsigned int *bucket) get_next_corpse() argument
2226 unsigned int bucket = 0; nf_ct_iterate_cleanup() local
2456 int i, bucket; nf_conntrack_hash_resize() local
[all...]
/kernel/linux/linux-6.6/fs/fuse/
H A Dinode.c631 struct fuse_sync_bucket *bucket; in fuse_sync_bucket_alloc() local
633 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL | __GFP_NOFAIL); in fuse_sync_bucket_alloc()
634 if (bucket) { in fuse_sync_bucket_alloc()
635 init_waitqueue_head(&bucket->waitq); in fuse_sync_bucket_alloc()
637 atomic_set(&bucket->count, 1); in fuse_sync_bucket_alloc()
639 return bucket; in fuse_sync_bucket_alloc()
644 struct fuse_sync_bucket *bucket, *new_bucket; in fuse_sync_fs_writes() local
649 bucket = rcu_dereference_protected(fc->curr_bucket, 1); in fuse_sync_fs_writes()
650 count = atomic_read(&bucket in fuse_sync_fs_writes()
943 struct fuse_sync_bucket *bucket; fuse_conn_put() local
[all...]
/kernel/linux/linux-6.6/net/ipv4/
H A Dtcp_ipv4.c148 held not per host, but per port pair and TW bucket is used as state in tcp_twsk_unique()
151 If TW bucket has been already destroyed we fall back to VJ's scheme in tcp_twsk_unique()
2328 /* Clean up a referenced TCP bind bucket. */ in tcp_v4_destroy_sock()
2357 /* Find a non empty bucket (starting from st->bucket)
2366 for (; st->bucket <= hinfo->lhash2_mask; st->bucket++) { in listening_get_first()
2371 ilb2 = &hinfo->lhash2[st->bucket]; in listening_get_first()
2386 /* Find the next sk of "cur" within the same bucket (i.e. st->bucket)
2526 int bucket = st->bucket; tcp_seek_last_pos() local
[all...]
/kernel/linux/linux-5.10/tools/testing/selftests/vDSO/
H A Dparse_vdso.c50 ELF(Word) *bucket, *chain;
155 vdso_info.bucket = &hash[2]; in vdso_init_from_sysinfo_ehdr()
207 ELF(Word) chain = vdso_info.bucket[elf_hash(name) % vdso_info.nbucket]; in vdso_sym()
/kernel/linux/linux-5.10/net/llc/
H A Dllc_proc.c67 static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket) in laddr_hash_next() argument
72 while (++bucket < LLC_SK_LADDR_HASH_ENTRIES) in laddr_hash_next()
73 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket]) in laddr_hash_next()
/kernel/linux/linux-6.6/tools/testing/selftests/vDSO/
H A Dparse_vdso.c50 ELF(Word) *bucket, *chain;
155 vdso_info.bucket = &hash[2]; in vdso_init_from_sysinfo_ehdr()
207 ELF(Word) chain = vdso_info.bucket[elf_hash(name) % vdso_info.nbucket]; in vdso_sym()

Completed in 25 milliseconds

12345678910>>...13