/kernel/linux/linux-5.10/kernel/bpf/ |
H A D | stackmap.c | 404 struct stack_map_bucket *bucket, *new_bucket, *old_bucket; in __bpf_get_stackid() local 420 bucket = READ_ONCE(smap->buckets[id]); in __bpf_get_stackid() 422 hash_matches = bucket && bucket->hash == hash; in __bpf_get_stackid() 428 /* for build_id+offset, pop a bucket before slow cmp */ in __bpf_get_stackid() 438 if (hash_matches && bucket->nr == trace_nr && in __bpf_get_stackid() 439 memcmp(bucket->data, new_bucket->data, trace_len) == 0) { in __bpf_get_stackid() 443 if (bucket && !(flags & BPF_F_REUSE_STACKID)) { in __bpf_get_stackid() 448 if (hash_matches && bucket->nr == trace_nr && in __bpf_get_stackid() 449 memcmp(bucket in __bpf_get_stackid() 770 struct stack_map_bucket *bucket, *old_bucket; bpf_stackmap_copy() local [all...] |
/kernel/linux/linux-5.10/drivers/md/bcache/ |
H A D | bcache.h | 42 * To do this, we first divide the cache device up into buckets. A bucket is the 46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with 51 * The priority is used to implement an LRU. We reset a bucket's priority when 53 * of each bucket. It could be used to implement something more sophisticated, 58 * must match the gen of the bucket it points into. Thus, to reuse a bucket all 62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that 100 * accomplished by either by invalidating pointers (by incrementing a bucket's 110 * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and 111 * free smaller than a bucket 197 struct bucket { global() struct [all...] |
/kernel/linux/linux-6.6/drivers/md/bcache/ |
H A D | bcache.h | 42 * To do this, we first divide the cache device up into buckets. A bucket is the 46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with 51 * The priority is used to implement an LRU. We reset a bucket's priority when 53 * of each bucket. It could be used to implement something more sophisticated, 58 * must match the gen of the bucket it points into. Thus, to reuse a bucket all 62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that 100 * accomplished by either by invalidating pointers (by incrementing a bucket's 110 * Our unit of allocation is a bucket, and we can't arbitrarily allocate and 111 * free smaller than a bucket 197 struct bucket { global() struct [all...] |
/kernel/linux/linux-6.6/tools/tracing/rtla/src/ |
H A D | timerlat_hist.c | 175 int bucket; in timerlat_hist_update() local 181 bucket = latency / data->bucket_size; in timerlat_hist_update() 203 if (bucket < entries) in timerlat_hist_update() 204 hist[bucket]++; in timerlat_hist_update() 411 int bucket, cpu; in timerlat_print_stats() local 416 for (bucket = 0; bucket < data->entries; bucket++) { in timerlat_print_stats() 421 bucket * data->bucket_size); in timerlat_print_stats() 431 total += data->hist[cpu].irq[bucket]; in timerlat_print_stats() [all...] |
H A D | osnoise_hist.c | 132 int bucket; in osnoise_hist_update_multiple() local 138 bucket = duration / data->bucket_size; in osnoise_hist_update_multiple() 148 if (bucket < entries) in osnoise_hist_update_multiple() 149 hist[bucket] += count; in osnoise_hist_update_multiple() 177 * Set the size of the bucket. in osnoise_init_trace_hist() 376 int bucket, cpu; in osnoise_print_stats() local 381 for (bucket = 0; bucket < data->entries; bucket++) { in osnoise_print_stats() 386 bucket * dat in osnoise_print_stats() [all...] |
/kernel/linux/linux-5.10/drivers/md/persistent-data/ |
H A D | dm-transaction-manager.c | 106 unsigned bucket = dm_hash_block(b, DM_HASH_MASK); in is_shadow() local 110 hlist_for_each_entry(si, tm->buckets + bucket, hlist) in is_shadow() 126 unsigned bucket; in insert_shadow() local 132 bucket = dm_hash_block(b, DM_HASH_MASK); in insert_shadow() 134 hlist_add_head(&si->hlist, tm->buckets + bucket); in insert_shadow() 143 struct hlist_head *bucket; in wipe_shadow_table() local 148 bucket = tm->buckets + i; in wipe_shadow_table() 149 hlist_for_each_entry_safe(si, tmp, bucket, hlist) in wipe_shadow_table() 152 INIT_HLIST_HEAD(bucket); in wipe_shadow_table()
|
/kernel/linux/linux-5.10/net/atm/ |
H A D | proc.c | 69 int bucket; member 78 static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) in __vcc_walk() argument 83 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) { in __vcc_walk() 84 struct hlist_head *head = &vcc_hash[*bucket]; in __vcc_walk() 98 if (!sk && ++*bucket < VCC_HTABLE_SIZE) { in __vcc_walk() 99 sk = sk_head(&vcc_hash[*bucket]); in __vcc_walk() 113 return __vcc_walk(&state->sk, family, &state->bucket, l) ? in vcc_walk()
|
/kernel/linux/linux-6.6/drivers/md/persistent-data/ |
H A D | dm-transaction-manager.c | 108 unsigned int bucket = dm_hash_block(b, DM_HASH_MASK); in is_shadow() local 112 hlist_for_each_entry(si, tm->buckets + bucket, hlist) in is_shadow() 128 unsigned int bucket; in insert_shadow() local 134 bucket = dm_hash_block(b, DM_HASH_MASK); in insert_shadow() 136 hlist_add_head(&si->hlist, tm->buckets + bucket); in insert_shadow() 145 struct hlist_head *bucket; in wipe_shadow_table() local 150 bucket = tm->buckets + i; in wipe_shadow_table() 151 hlist_for_each_entry_safe(si, tmp, bucket, hlist) in wipe_shadow_table() 154 INIT_HLIST_HEAD(bucket); in wipe_shadow_table()
|
/kernel/linux/linux-6.6/net/atm/ |
H A D | proc.c | 69 int bucket; member 78 static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) in __vcc_walk() argument 83 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) { in __vcc_walk() 84 struct hlist_head *head = &vcc_hash[*bucket]; in __vcc_walk() 98 if (!sk && ++*bucket < VCC_HTABLE_SIZE) { in __vcc_walk() 99 sk = sk_head(&vcc_hash[*bucket]); in __vcc_walk() 113 return __vcc_walk(&state->sk, family, &state->bucket, l) ? in vcc_walk()
|
/kernel/linux/linux-5.10/drivers/net/wireguard/ |
H A D | ratelimiter.c | 92 struct hlist_head *bucket; in wg_ratelimiter_allow() local 97 bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) & in wg_ratelimiter_allow() 104 bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) & in wg_ratelimiter_allow() 111 hlist_for_each_entry_rcu(entry, bucket, hash) { in wg_ratelimiter_allow() 149 hlist_add_head_rcu(&entry->hash, bucket); in wg_ratelimiter_allow()
|
/kernel/linux/linux-6.6/drivers/net/wireguard/ |
H A D | ratelimiter.c | 92 struct hlist_head *bucket; in wg_ratelimiter_allow() local 97 bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) & in wg_ratelimiter_allow() 104 bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) & in wg_ratelimiter_allow() 111 hlist_for_each_entry_rcu(entry, bucket, hash) { in wg_ratelimiter_allow() 149 hlist_add_head_rcu(&entry->hash, bucket); in wg_ratelimiter_allow()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/freescale/fman/ |
H A D | fman_dtsec.c | 533 static void set_bucket(struct dtsec_regs __iomem *regs, int bucket, in set_bucket() argument 536 int reg_idx = (bucket >> 5) & 0xf; in set_bucket() 537 int bit_idx = bucket & 0x1f; in set_bucket() 1056 s32 bucket; in dtsec_add_hash_mac_address() local 1070 pr_err("Could not compute hash bucket\n"); in dtsec_add_hash_mac_address() 1082 *In bucket index output the low 5 bits identify the hash register in dtsec_add_hash_mac_address() 1087 bucket = (s32)((crc >> 23) & 0x1ff); in dtsec_add_hash_mac_address() 1089 bucket = (s32)((crc >> 24) & 0xff); in dtsec_add_hash_mac_address() 1094 bucket += 0x100; in dtsec_add_hash_mac_address() 1097 set_bucket(dtsec->regs, bucket, tru in dtsec_add_hash_mac_address() 1167 s32 bucket; dtsec_del_hash_mac_address() local [all...] |
/kernel/linux/linux-5.10/net/netfilter/ |
H A D | xt_hashlimit.c | 50 MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match"); 1056 unsigned int *bucket; variable 1062 bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC); 1063 if (!bucket) 1066 *bucket = *pos; 1067 return bucket; 1073 unsigned int *bucket = v; in dl_seq_next() local 1075 *pos = ++(*bucket); in dl_seq_next() 1080 return bucket; in dl_seq_next() 1087 unsigned int *bucket variable 1090 kfree(bucket); global() variable 1173 unsigned int *bucket = (unsigned int *)v; dl_seq_show_v2() local 1187 unsigned int *bucket = v; dl_seq_show_v1() local 1201 unsigned int *bucket = v; dl_seq_show() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/freescale/fman/ |
H A D | fman_dtsec.c | 458 static void set_bucket(struct dtsec_regs __iomem *regs, int bucket, in set_bucket() argument 461 int reg_idx = (bucket >> 5) & 0xf; in set_bucket() 462 int bit_idx = bucket & 0x1f; in set_bucket() 1017 s32 bucket; in dtsec_add_hash_mac_address() local 1028 pr_err("Could not compute hash bucket\n"); in dtsec_add_hash_mac_address() 1040 *In bucket index output the low 5 bits identify the hash register in dtsec_add_hash_mac_address() 1045 bucket = (s32)((crc >> 23) & 0x1ff); in dtsec_add_hash_mac_address() 1047 bucket = (s32)((crc >> 24) & 0xff); in dtsec_add_hash_mac_address() 1052 bucket += 0x100; in dtsec_add_hash_mac_address() 1055 set_bucket(dtsec->regs, bucket, tru in dtsec_add_hash_mac_address() 1120 s32 bucket; dtsec_del_hash_mac_address() local [all...] |
/kernel/linux/linux-6.6/net/netfilter/ |
H A D | xt_hashlimit.c | 50 MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match"); 1056 unsigned int *bucket; variable 1062 bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC); 1063 if (!bucket) 1066 *bucket = *pos; 1067 return bucket; 1073 unsigned int *bucket = v; in dl_seq_next() local 1075 *pos = ++(*bucket); in dl_seq_next() 1080 return bucket; in dl_seq_next() 1087 unsigned int *bucket variable 1090 kfree(bucket); global() variable 1173 unsigned int *bucket = (unsigned int *)v; dl_seq_show_v2() local 1187 unsigned int *bucket = v; dl_seq_show_v1() local 1201 unsigned int *bucket = v; dl_seq_show() local [all...] |
/kernel/linux/linux-5.10/net/core/ |
H A D | sock_map.c | 906 struct bpf_shtab_bucket *bucket; in __sock_hash_lookup_elem() local 912 bucket = sock_hash_select_bucket(htab, hash); in __sock_hash_lookup_elem() 913 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); in __sock_hash_lookup_elem() 930 struct bpf_shtab_bucket *bucket; in sock_hash_delete_from_link() local 933 bucket = sock_hash_select_bucket(htab, elem->hash); in sock_hash_delete_from_link() 939 raw_spin_lock_bh(&bucket->lock); in sock_hash_delete_from_link() 940 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, in sock_hash_delete_from_link() 947 raw_spin_unlock_bh(&bucket->lock); in sock_hash_delete_from_link() 954 struct bpf_shtab_bucket *bucket; in sock_hash_delete_elem() local 959 bucket in sock_hash_delete_elem() 1005 struct bpf_shtab_bucket *bucket; sock_hash_update_common() local 1176 struct bpf_shtab_bucket *bucket; sock_hash_free() local 1344 struct bpf_shtab_bucket *bucket; sock_hash_seq_find_next() local [all...] |
/kernel/linux/linux-6.6/net/core/ |
H A D | sock_map.c | 884 struct bpf_shtab_bucket *bucket; in __sock_hash_lookup_elem() local 890 bucket = sock_hash_select_bucket(htab, hash); in __sock_hash_lookup_elem() 891 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); in __sock_hash_lookup_elem() 908 struct bpf_shtab_bucket *bucket; in sock_hash_delete_from_link() local 911 bucket = sock_hash_select_bucket(htab, elem->hash); in sock_hash_delete_from_link() 917 spin_lock_bh(&bucket->lock); in sock_hash_delete_from_link() 918 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, in sock_hash_delete_from_link() 925 spin_unlock_bh(&bucket->lock); in sock_hash_delete_from_link() 932 struct bpf_shtab_bucket *bucket; in sock_hash_delete_elem() local 937 bucket in sock_hash_delete_elem() 984 struct bpf_shtab_bucket *bucket; sock_hash_update_common() local 1134 struct bpf_shtab_bucket *bucket; sock_hash_free() local 1302 struct bpf_shtab_bucket *bucket; sock_hash_seq_find_next() local [all...] |
/kernel/linux/linux-5.10/fs/xfs/libxfs/ |
H A D | xfs_ag.c | 296 int bucket; in xfs_agflblock_init() local 305 for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++) in xfs_agflblock_init() 306 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); in xfs_agflblock_init() 316 int bucket; in xfs_agiblock_init() local 334 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) in xfs_agiblock_init() 335 agi->agi_unlinked[bucket] in xfs_agiblock_init() [all...] |
/kernel/linux/linux-5.10/fs/btrfs/ |
H A D | compression.c | 827 struct bucket_item *bucket; member 842 kfree(workspace->bucket); in free_heuristic_ws() 859 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); in alloc_heuristic_ws() 860 if (!ws->bucket) in alloc_heuristic_ws() 1369 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { in shannon_entropy() 1370 p = ws->bucket[i].count; in shannon_entropy() 1502 struct bucket_item *bucket = ws->bucket; in byte_core_set_size() local 1505 radix_sort(ws->bucket, w in byte_core_set_size() [all...] |
/kernel/linux/linux-6.6/fs/btrfs/ |
H A D | compression.c | 585 struct bucket_item *bucket; member 600 kfree(workspace->bucket); in free_heuristic_ws() 617 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); in alloc_heuristic_ws() 618 if (!ws->bucket) in alloc_heuristic_ws() 1101 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { in shannon_entropy() 1102 p = ws->bucket[i].count; in shannon_entropy() 1234 struct bucket_item *bucket = ws->bucket; in byte_core_set_size() local 1237 radix_sort(ws->bucket, w in byte_core_set_size() [all...] |
/kernel/linux/linux-6.6/io_uring/ |
H A D | poll.c | 158 * contrast to per bucket spinlocks. Likely, tctx_task_work() in io_poll_tw_hash_eject() 858 struct io_hash_bucket *bucket; in __io_poll_cancel() local 863 req = io_poll_file_find(ctx, cd, table, &bucket); in __io_poll_cancel() 865 req = io_poll_find(ctx, false, cd, table, &bucket); in __io_poll_cancel() 869 if (bucket) in __io_poll_cancel() 870 spin_unlock(&bucket->lock); in __io_poll_cancel() 981 struct io_hash_bucket *bucket; in io_poll_remove() local 986 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket); in io_poll_remove() 988 if (bucket) in io_poll_remove() 989 spin_unlock(&bucket in io_poll_remove() [all...] |
/kernel/linux/linux-5.10/net/ipv6/ |
H A D | route.c | 1444 static void rt6_remove_exception(struct rt6_exception_bucket *bucket, in rt6_remove_exception() argument 1450 if (!bucket || !rt6_ex) in rt6_remove_exception() 1466 WARN_ON_ONCE(!bucket->depth); in rt6_remove_exception() 1467 bucket->depth--; in rt6_remove_exception() 1470 /* Remove oldest rt6_ex in bucket and free the memory 1473 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket) in rt6_exception_remove_oldest() argument 1477 if (!bucket) in rt6_exception_remove_oldest() 1480 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { in rt6_exception_remove_oldest() 1484 rt6_remove_exception(bucket, oldest); in rt6_exception_remove_oldest() 1511 * and update bucket pointe 1516 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket, const struct in6_addr *daddr, const struct in6_addr *saddr) __rt6_find_exception_spinlock() argument 1549 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket, const struct in6_addr *daddr, const struct in6_addr *saddr) __rt6_find_exception_rcu() argument 1611 struct rt6_exception_bucket *bucket; fib6_nh_get_excptn_bucket() local 1630 fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket) fib6_nh_excptn_bucket_flushed() argument 1641 struct rt6_exception_bucket *bucket; fib6_nh_excptn_bucket_set_flushed() local 1657 struct rt6_exception_bucket *bucket; rt6_insert_exception() local 1738 struct rt6_exception_bucket *bucket; fib6_nh_flush_exceptions() local 1792 struct rt6_exception_bucket *bucket; rt6_find_cached_rt() local 1834 struct rt6_exception_bucket *bucket; fib6_nh_remove_exception() local 1918 struct rt6_exception_bucket *bucket; fib6_nh_update_exception() local 2015 struct rt6_exception_bucket *bucket; rt6_exceptions_update_pmtu() local 2044 struct rt6_exception_bucket *bucket; fib6_nh_exceptions_clean_tohost() local 2074 rt6_age_examine_exception(struct rt6_exception_bucket *bucket, struct rt6_exception *rt6_ex, struct fib6_gc_args *gc_args, unsigned long now) rt6_age_examine_exception() argument 2122 struct rt6_exception_bucket *bucket; fib6_nh_age_exceptions() local 3540 struct rt6_exception_bucket *bucket; fib6_nh_release() local 5728 struct rt6_exception_bucket *bucket; rt6_nh_dump_exceptions() local [all...] |
/kernel/linux/linux-6.6/net/ipv6/ |
H A D | route.c | 1443 static void rt6_remove_exception(struct rt6_exception_bucket *bucket, in rt6_remove_exception() argument 1449 if (!bucket || !rt6_ex) in rt6_remove_exception() 1465 WARN_ON_ONCE(!bucket->depth); in rt6_remove_exception() 1466 bucket->depth--; in rt6_remove_exception() 1469 /* Remove oldest rt6_ex in bucket and free the memory 1472 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket) in rt6_exception_remove_oldest() argument 1476 if (!bucket) in rt6_exception_remove_oldest() 1479 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { in rt6_exception_remove_oldest() 1483 rt6_remove_exception(bucket, oldest); in rt6_exception_remove_oldest() 1510 * and update bucket pointe 1515 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket, const struct in6_addr *daddr, const struct in6_addr *saddr) __rt6_find_exception_spinlock() argument 1548 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket, const struct in6_addr *daddr, const struct in6_addr *saddr) __rt6_find_exception_rcu() argument 1610 struct rt6_exception_bucket *bucket; fib6_nh_get_excptn_bucket() local 1629 fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket) fib6_nh_excptn_bucket_flushed() argument 1640 struct rt6_exception_bucket *bucket; fib6_nh_excptn_bucket_set_flushed() local 1656 struct rt6_exception_bucket *bucket; rt6_insert_exception() local 1737 struct rt6_exception_bucket *bucket; fib6_nh_flush_exceptions() local 1791 struct rt6_exception_bucket *bucket; rt6_find_cached_rt() local 1833 struct rt6_exception_bucket *bucket; fib6_nh_remove_exception() local 1917 struct rt6_exception_bucket *bucket; fib6_nh_update_exception() local 2014 struct rt6_exception_bucket *bucket; rt6_exceptions_update_pmtu() local 2043 struct rt6_exception_bucket *bucket; fib6_nh_exceptions_clean_tohost() local 2073 rt6_age_examine_exception(struct rt6_exception_bucket *bucket, struct rt6_exception *rt6_ex, struct fib6_gc_args *gc_args, unsigned long now) rt6_age_examine_exception() argument 2118 struct rt6_exception_bucket *bucket; fib6_nh_age_exceptions() local 3640 struct rt6_exception_bucket *bucket; fib6_nh_release() local 5827 struct rt6_exception_bucket *bucket; rt6_nh_dump_exceptions() local [all...] |
/kernel/linux/linux-5.10/net/openvswitch/ |
H A D | vport.c | 97 struct hlist_head *bucket = hash_bucket(net, name); in ovs_vport_locate() local 100 hlist_for_each_entry_rcu(vport, bucket, hash_node, in ovs_vport_locate() 196 struct hlist_head *bucket; in ovs_vport_add() local 207 bucket = hash_bucket(ovs_dp_get_net(vport->dp), in ovs_vport_add() 209 hlist_add_head_rcu(&vport->hash_node, bucket); in ovs_vport_add()
|
/kernel/linux/linux-5.10/drivers/misc/vmw_vmci/ |
H A D | vmci_doorbell.c | 120 u32 bucket = VMCI_DOORBELL_HASH(idx); in dbell_index_table_find() local 123 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], in dbell_index_table_find() 139 u32 bucket; in dbell_index_table_add() local 187 bucket = VMCI_DOORBELL_HASH(entry->idx); in dbell_index_table_add() 188 hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); in dbell_index_table_add() 355 u32 bucket = VMCI_DOORBELL_HASH(notify_idx); in dbell_fire_entries() local 360 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) { in dbell_fire_entries()
|