/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
H A D | dr_icm_pool.c | 38 /* protect the ICM bucket */ 181 struct mlx5dr_icm_bucket *bucket = chunk->bucket; in dr_icm_chunk_ste_init() local 183 chunk->ste_arr = kvzalloc(bucket->num_of_entries * in dr_icm_chunk_ste_init() 188 chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries * in dr_icm_chunk_ste_init() 193 chunk->miss_list = kvmalloc(bucket->num_of_entries * in dr_icm_chunk_ste_init() 207 static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket) in dr_icm_chunks_create() argument 210 struct mlx5dr_icm_pool *pool = bucket->pool; in dr_icm_chunks_create() 215 mr_req_size = bucket->num_of_entries * bucket in dr_icm_chunks_create() 282 struct mlx5dr_icm_bucket *bucket = chunk->bucket; dr_icm_chunk_destroy() local 293 dr_icm_bucket_init(struct mlx5dr_icm_pool *pool, struct mlx5dr_icm_bucket *bucket, enum mlx5dr_icm_chunk_size chunk_size) dr_icm_bucket_init() argument 311 dr_icm_bucket_cleanup(struct mlx5dr_icm_bucket *bucket) dr_icm_bucket_cleanup() argument 341 dr_icm_reuse_hot_entries(struct mlx5dr_icm_pool *pool, struct mlx5dr_icm_bucket *bucket) dr_icm_reuse_hot_entries() argument 353 dr_icm_chill_bucket_start(struct mlx5dr_icm_bucket *bucket) dr_icm_chill_bucket_start() argument 360 dr_icm_chill_bucket_end(struct mlx5dr_icm_bucket *bucket) dr_icm_chill_bucket_end() argument 367 dr_icm_chill_bucket_abort(struct mlx5dr_icm_bucket *bucket) dr_icm_chill_bucket_abort() argument 378 struct mlx5dr_icm_bucket *bucket; dr_icm_chill_buckets_start() local 402 struct mlx5dr_icm_bucket *bucket; dr_icm_chill_buckets_end() local 424 struct mlx5dr_icm_bucket *bucket; dr_icm_chill_buckets_abort() local 451 struct mlx5dr_icm_bucket *bucket; mlx5dr_icm_alloc_chunk() local 496 struct mlx5dr_icm_bucket *bucket = chunk->bucket; mlx5dr_icm_free_chunk() local [all...] |
/kernel/linux/linux-5.10/net/mptcp/ |
H A D | token.c | 54 /* called with bucket lock held */ 67 /* called with bucket lock held */ 112 struct token_bucket *bucket; in mptcp_token_new_request() local 123 bucket = token_bucket(token); in mptcp_token_new_request() 124 spin_lock_bh(&bucket->lock); in mptcp_token_new_request() 125 if (__token_bucket_busy(bucket, token)) { in mptcp_token_new_request() 126 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request() 130 hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain); in mptcp_token_new_request() 131 bucket->chain_len++; in mptcp_token_new_request() 132 spin_unlock_bh(&bucket in mptcp_token_new_request() 157 struct token_bucket *bucket; mptcp_token_new_connect() local 194 struct token_bucket *bucket; mptcp_token_accept() local 210 struct token_bucket *bucket; mptcp_token_exists() local 246 struct token_bucket *bucket; mptcp_token_get_sock() local 302 struct token_bucket *bucket = &token_hash[slot]; mptcp_token_iter_next() local 351 struct token_bucket *bucket; mptcp_token_destroy_request() local 374 struct token_bucket *bucket; mptcp_token_destroy() local [all...] |
/kernel/linux/linux-6.6/net/mptcp/ |
H A D | token.c | 53 /* called with bucket lock held */ 66 /* called with bucket lock held */ 111 struct token_bucket *bucket; in mptcp_token_new_request() local 122 bucket = token_bucket(token); in mptcp_token_new_request() 123 spin_lock_bh(&bucket->lock); in mptcp_token_new_request() 124 if (__token_bucket_busy(bucket, token)) { in mptcp_token_new_request() 125 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request() 129 hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain); in mptcp_token_new_request() 130 bucket->chain_len++; in mptcp_token_new_request() 131 spin_unlock_bh(&bucket in mptcp_token_new_request() 157 struct token_bucket *bucket; mptcp_token_new_connect() local 196 struct token_bucket *bucket; mptcp_token_accept() local 213 struct token_bucket *bucket; mptcp_token_exists() local 249 struct token_bucket *bucket; mptcp_token_get_sock() local 305 struct token_bucket *bucket = &token_hash[slot]; mptcp_token_iter_next() local 354 struct token_bucket *bucket; mptcp_token_destroy_request() local 378 struct token_bucket *bucket; mptcp_token_destroy() local [all...] |
/kernel/linux/linux-5.10/net/ceph/crush/ |
H A D | mapper.c | 58 * bucket choose methods 60 * For each bucket algorithm, we have a "choose" method that, given a 62 * will produce an item in the bucket. 66 * Choose based on a random permutation of the bucket. 70 * calculate an actual random permutation of the bucket members. 74 static int bucket_perm_choose(const struct crush_bucket *bucket, in bucket_perm_choose() argument 78 unsigned int pr = r % bucket->size; in bucket_perm_choose() 83 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose() 88 s = crush_hash32_3(bucket in bucket_perm_choose() 135 bucket_uniform_choose(const struct crush_bucket_uniform *bucket, struct crush_work_bucket *work, int x, int r) bucket_uniform_choose() argument 142 bucket_list_choose(const struct crush_bucket_list *bucket, int x, int r) bucket_list_choose() argument 196 bucket_tree_choose(const struct crush_bucket_tree *bucket, int x, int r) bucket_tree_choose() argument 228 bucket_straw_choose(const struct crush_bucket_straw *bucket, int x, int r) bucket_straw_choose() argument 305 get_choose_arg_weights(const struct crush_bucket_straw2 *bucket, const struct crush_choose_arg *arg, int position) get_choose_arg_weights() argument 317 get_choose_arg_ids(const struct crush_bucket_straw2 *bucket, const struct crush_choose_arg *arg) get_choose_arg_ids() argument 326 bucket_straw2_choose(const struct crush_bucket_straw2 *bucket, int x, int r, const struct crush_choose_arg *arg, int position) bucket_straw2_choose() argument 449 crush_choose_firstn(const struct crush_map *map, struct crush_work *work, const struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int numrep, int type, int *out, int outpos, int out_size, unsigned int tries, unsigned int recurse_tries, unsigned int local_retries, unsigned int local_fallback_retries, int recurse_to_leaf, unsigned int vary_r, unsigned int stable, int *out2, int parent_r, const struct crush_choose_arg *choose_args) crush_choose_firstn() argument 643 crush_choose_indep(const struct crush_map *map, struct crush_work *work, const struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int left, int numrep, int type, int *out, int outpos, unsigned int tries, unsigned int recurse_tries, int recurse_to_leaf, int *out2, int parent_r, const struct crush_choose_arg *choose_args) crush_choose_indep() argument [all...] |
/kernel/linux/linux-6.6/net/ceph/crush/ |
H A D | mapper.c | 58 * bucket choose methods 60 * For each bucket algorithm, we have a "choose" method that, given a 62 * will produce an item in the bucket. 66 * Choose based on a random permutation of the bucket. 70 * calculate an actual random permutation of the bucket members. 74 static int bucket_perm_choose(const struct crush_bucket *bucket, in bucket_perm_choose() argument 78 unsigned int pr = r % bucket->size; in bucket_perm_choose() 83 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose() 88 s = crush_hash32_3(bucket in bucket_perm_choose() 135 bucket_uniform_choose(const struct crush_bucket_uniform *bucket, struct crush_work_bucket *work, int x, int r) bucket_uniform_choose() argument 142 bucket_list_choose(const struct crush_bucket_list *bucket, int x, int r) bucket_list_choose() argument 196 bucket_tree_choose(const struct crush_bucket_tree *bucket, int x, int r) bucket_tree_choose() argument 228 bucket_straw_choose(const struct crush_bucket_straw *bucket, int x, int r) bucket_straw_choose() argument 305 get_choose_arg_weights(const struct crush_bucket_straw2 *bucket, const struct crush_choose_arg *arg, int position) get_choose_arg_weights() argument 317 get_choose_arg_ids(const struct crush_bucket_straw2 *bucket, const struct crush_choose_arg *arg) get_choose_arg_ids() argument 326 bucket_straw2_choose(const struct crush_bucket_straw2 *bucket, int x, int r, const struct crush_choose_arg *arg, int position) bucket_straw2_choose() argument 449 crush_choose_firstn(const struct crush_map *map, struct crush_work *work, const struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int numrep, int type, int *out, int outpos, int out_size, unsigned int tries, unsigned int recurse_tries, unsigned int local_retries, unsigned int local_fallback_retries, int recurse_to_leaf, unsigned int vary_r, unsigned int stable, int *out2, int parent_r, const struct crush_choose_arg *choose_args) crush_choose_firstn() argument 643 crush_choose_indep(const struct crush_map *map, struct crush_work *work, const struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int left, int numrep, int type, int *out, int outpos, unsigned int tries, unsigned int recurse_tries, int recurse_to_leaf, int *out2, int parent_r, const struct crush_choose_arg *choose_args) crush_choose_indep() argument [all...] |
/kernel/linux/linux-5.10/block/ |
H A D | blk-stat.c | 56 int bucket, cpu; in blk_stat_add() local 69 bucket = cb->bucket_fn(rq); in blk_stat_add() 70 if (bucket < 0) in blk_stat_add() 73 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add() 83 unsigned int bucket; in blk_stat_timer_fn() local 86 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 87 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn() 93 for (bucket in blk_stat_timer_fn() 139 unsigned int bucket; blk_stat_add_callback() local [all...] |
/kernel/linux/linux-6.6/block/ |
H A D | blk-stat.c | 55 int bucket, cpu; in blk_stat_add() local 69 bucket = cb->bucket_fn(rq); in blk_stat_add() 70 if (bucket < 0) in blk_stat_add() 73 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add() 83 unsigned int bucket; in blk_stat_timer_fn() local 86 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 87 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn() 93 for (bucket in blk_stat_timer_fn() 139 unsigned int bucket; blk_stat_add_callback() local [all...] |
/kernel/linux/linux-5.10/drivers/interconnect/qcom/ |
H A D | bcm-voter.c | 71 size_t i, bucket; in bcm_aggregate() local 76 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate() 79 temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width, in bcm_aggregate() 81 agg_avg[bucket] = max(agg_avg[bucket], temp); in bcm_aggregate() 83 temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width, in bcm_aggregate() 85 agg_peak[bucket] = max(agg_peak[bucket], tem in bcm_aggregate() 138 tcs_list_gen(struct bcm_voter *voter, int bucket, struct tcs_cmd tcs_list[MAX_VCD], int n[MAX_VCD + 1]) tcs_list_gen() argument [all...] |
/kernel/linux/linux-5.10/net/sched/ |
H A D | sch_hhf.c | 21 * as heavy-hitter, it is immediately switched to the heavy-hitter bucket. 23 * in which the heavy-hitter bucket is served with less weight. 61 * dispatched to the heavy-hitter bucket accordingly. 68 * bucket. 71 * to the non-heavy-hitter bucket. 74 * send p to the heavy-hitter bucket. 105 WDRR_BUCKET_FOR_HH = 0, /* bucket id for heavy-hitters */ 106 WDRR_BUCKET_FOR_NON_HH = 1 /* bucket id for non-heavy-hitters */ 328 /* Removes one skb from head of bucket. */ 329 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) in dequeue_head() argument 339 bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) bucket_add() argument 352 struct wdrr_bucket *bucket; hhf_drop() local 376 struct wdrr_bucket *bucket; hhf_enqueue() local 422 struct wdrr_bucket *bucket; hhf_dequeue() local 645 struct wdrr_bucket *bucket = q->buckets + i; hhf_init() local [all...] |
/kernel/linux/linux-6.6/net/sched/ |
H A D | sch_hhf.c | 21 * as heavy-hitter, it is immediately switched to the heavy-hitter bucket. 23 * in which the heavy-hitter bucket is served with less weight. 61 * dispatched to the heavy-hitter bucket accordingly. 68 * bucket. 71 * to the non-heavy-hitter bucket. 74 * send p to the heavy-hitter bucket. 105 WDRR_BUCKET_FOR_HH = 0, /* bucket id for heavy-hitters */ 106 WDRR_BUCKET_FOR_NON_HH = 1 /* bucket id for non-heavy-hitters */ 328 /* Removes one skb from head of bucket. */ 329 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) in dequeue_head() argument 339 bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) bucket_add() argument 352 struct wdrr_bucket *bucket; hhf_drop() local 376 struct wdrr_bucket *bucket; hhf_enqueue() local 422 struct wdrr_bucket *bucket; hhf_dequeue() local 642 struct wdrr_bucket *bucket = q->buckets + i; hhf_init() local [all...] |
/kernel/linux/linux-6.6/drivers/interconnect/qcom/ |
H A D | bcm-voter.c | 65 int bucket, i; in bcm_aggregate_mask() local 67 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate_mask() 68 bcm->vote_x[bucket] = 0; in bcm_aggregate_mask() 69 bcm->vote_y[bucket] = 0; in bcm_aggregate_mask() 74 /* If any vote in this bucket exists, keep the BCM enabled */ in bcm_aggregate_mask() 75 if (node->sum_avg[bucket] || node->max_peak[bucket]) { in bcm_aggregate_mask() 76 bcm->vote_x[bucket] in bcm_aggregate_mask() 94 size_t i, bucket; bcm_aggregate() local 156 tcs_list_gen(struct bcm_voter *voter, int bucket, struct tcs_cmd tcs_list[MAX_VCD], int n[MAX_VCD + 1]) tcs_list_gen() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/sw/rdmavt/ |
H A D | trace_qp.h | 60 TP_PROTO(struct rvt_qp *qp, u32 bucket), 61 TP_ARGS(qp, bucket), 65 __field(u32, bucket) 70 __entry->bucket = bucket; 73 "[%s] qpn 0x%x bucket %u", 76 __entry->bucket 81 TP_PROTO(struct rvt_qp *qp, u32 bucket), 82 TP_ARGS(qp, bucket)); 85 TP_PROTO(struct rvt_qp *qp, u32 bucket), [all...] |
/kernel/linux/linux-5.10/net/9p/ |
H A D | error.c | 181 int bucket; in p9_error_init() local 184 for (bucket = 0; bucket < ERRHASHSZ; bucket++) in p9_error_init() 185 INIT_HLIST_HEAD(&hash_errmap[bucket]); in p9_error_init() 190 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; in p9_error_init() 192 hlist_add_head(&c->list, &hash_errmap[bucket]); in p9_error_init() 210 int bucket; in p9_errstr2errno() local 214 bucket = jhash(errstr, len, 0) % ERRHASHSZ; in p9_errstr2errno() 215 hlist_for_each_entry(c, &hash_errmap[bucket], lis in p9_errstr2errno() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/sw/rdmavt/ |
H A D | trace_qp.h | 18 TP_PROTO(struct rvt_qp *qp, u32 bucket), 19 TP_ARGS(qp, bucket), 23 __field(u32, bucket) 28 __entry->bucket = bucket; 31 "[%s] qpn 0x%x bucket %u", 34 __entry->bucket 39 TP_PROTO(struct rvt_qp *qp, u32 bucket), 40 TP_ARGS(qp, bucket)); 43 TP_PROTO(struct rvt_qp *qp, u32 bucket), [all...] |
/kernel/linux/linux-6.6/net/9p/ |
H A D | error.c | 179 int bucket; in p9_error_init() local 182 for (bucket = 0; bucket < ERRHASHSZ; bucket++) in p9_error_init() 183 INIT_HLIST_HEAD(&hash_errmap[bucket]); in p9_error_init() 188 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; in p9_error_init() 190 hlist_add_head(&c->list, &hash_errmap[bucket]); in p9_error_init() 208 int bucket; in p9_errstr2errno() local 212 bucket = jhash(errstr, len, 0) % ERRHASHSZ; in p9_errstr2errno() 213 hlist_for_each_entry(c, &hash_errmap[bucket], lis in p9_errstr2errno() [all...] |
/kernel/linux/linux-5.10/fs/nfs/ |
H A D | nfs42xattr.c | 87 struct nfs4_xattr_bucket *bucket; member 120 * 1. inode i_lock or bucket lock 238 entry->bucket = NULL; in nfs4_xattr_alloc_entry() 389 struct nfs4_xattr_bucket *bucket; in nfs4_xattr_discard_cache() local 395 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache() 397 spin_lock(&bucket->lock); in nfs4_xattr_discard_cache() 398 bucket->draining = true; in nfs4_xattr_discard_cache() 399 hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { in nfs4_xattr_discard_cache() 404 spin_unlock(&bucket->lock); in nfs4_xattr_discard_cache() 512 nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, cons argument 530 struct nfs4_xattr_bucket *bucket; nfs4_xattr_hash_add() local 567 struct nfs4_xattr_bucket *bucket; nfs4_xattr_hash_remove() local 590 struct nfs4_xattr_bucket *bucket; nfs4_xattr_hash_find() local 893 struct nfs4_xattr_bucket *bucket; entry_lru_isolate() local [all...] |
/kernel/linux/linux-6.6/fs/nfs/ |
H A D | nfs42xattr.c | 87 struct nfs4_xattr_bucket *bucket; member 120 * 1. inode i_lock or bucket lock 238 entry->bucket = NULL; in nfs4_xattr_alloc_entry() 388 struct nfs4_xattr_bucket *bucket; in nfs4_xattr_discard_cache() local 394 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache() 396 spin_lock(&bucket->lock); in nfs4_xattr_discard_cache() 397 bucket->draining = true; in nfs4_xattr_discard_cache() 398 hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { in nfs4_xattr_discard_cache() 403 spin_unlock(&bucket->lock); in nfs4_xattr_discard_cache() 511 nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, cons argument 529 struct nfs4_xattr_bucket *bucket; nfs4_xattr_hash_add() local 566 struct nfs4_xattr_bucket *bucket; nfs4_xattr_hash_remove() local 589 struct nfs4_xattr_bucket *bucket; nfs4_xattr_hash_find() local 892 struct nfs4_xattr_bucket *bucket; entry_lru_isolate() local [all...] |
/kernel/linux/linux-5.10/net/vmw_vsock/ |
H A D | diag.c | 52 unsigned int bucket; in vsock_diag_dump() local 63 bucket = cb->args[1]; in vsock_diag_dump() 72 while (bucket < ARRAY_SIZE(vsock_bind_table)) { in vsock_diag_dump() 73 struct list_head *head = &vsock_bind_table[bucket]; in vsock_diag_dump() 94 bucket++; in vsock_diag_dump() 98 bucket = 0; in vsock_diag_dump() 102 while (bucket < ARRAY_SIZE(vsock_connected_table)) { in vsock_diag_dump() 103 struct list_head *head = &vsock_connected_table[bucket]; in vsock_diag_dump() 128 bucket++; in vsock_diag_dump() 135 cb->args[1] = bucket; in vsock_diag_dump() [all...] |
/kernel/linux/linux-6.6/net/vmw_vsock/ |
H A D | diag.c | 52 unsigned int bucket; in vsock_diag_dump() local 63 bucket = cb->args[1]; in vsock_diag_dump() 72 while (bucket < ARRAY_SIZE(vsock_bind_table)) { in vsock_diag_dump() 73 struct list_head *head = &vsock_bind_table[bucket]; in vsock_diag_dump() 94 bucket++; in vsock_diag_dump() 98 bucket = 0; in vsock_diag_dump() 102 while (bucket < ARRAY_SIZE(vsock_connected_table)) { in vsock_diag_dump() 103 struct list_head *head = &vsock_connected_table[bucket]; in vsock_diag_dump() 128 bucket++; in vsock_diag_dump() 135 cb->args[1] = bucket; in vsock_diag_dump() [all...] |
/kernel/linux/linux-5.10/fs/dlm/ |
H A D | debug_fs.c | 368 unsigned bucket; member 427 unsigned bucket, entry; in table_seq_start() local 430 bucket = n >> 32; in table_seq_start() 433 if (bucket >= ls->ls_rsbtbl_size) in table_seq_start() 450 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; in table_seq_start() 452 spin_lock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start() 459 ri->bucket = bucket; in table_seq_start() 460 spin_unlock(&ls->ls_rsbtbl[bucket] in table_seq_start() 507 unsigned bucket; table_seq_next() local [all...] |
/kernel/linux/linux-6.6/fs/dlm/ |
H A D | debug_fs.c | 417 unsigned bucket; member 484 unsigned bucket, entry; in table_seq_start() local 487 bucket = n >> 32; in table_seq_start() 490 if (bucket >= ls->ls_rsbtbl_size) in table_seq_start() 509 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; in table_seq_start() 511 spin_lock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start() 518 ri->bucket = bucket; in table_seq_start() 519 spin_unlock(&ls->ls_rsbtbl[bucket] in table_seq_start() 566 unsigned bucket; table_seq_next() local [all...] |
/kernel/linux/linux-5.10/net/rxrpc/ |
H A D | proc.c | 256 unsigned int bucket, n; in __acquires() local 266 bucket = *_pos >> shift; in __acquires() 268 if (bucket >= HASH_SIZE(rxnet->peer_hash)) { in __acquires() 273 if (bucket == 0) in __acquires() 279 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); in __acquires() 282 bucket++; in __acquires() 284 *_pos = (bucket << shift) | n; in __acquires() 291 unsigned int bucket, n; in rxrpc_peer_seq_next() local 298 bucket = *_pos >> shift; in rxrpc_peer_seq_next() 300 p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _po in rxrpc_peer_seq_next() [all...] |
/kernel/linux/linux-6.6/drivers/cpuidle/governors/ |
H A D | menu.c | 116 unsigned int bucket; member 124 int bucket = 0; in which_bucket() local 133 bucket = BUCKETS/2; in which_bucket() 136 return bucket; in which_bucket() 138 return bucket + 1; in which_bucket() 140 return bucket + 2; in which_bucket() 142 return bucket + 3; in which_bucket() 144 return bucket + 4; in which_bucket() 145 return bucket + 5; in which_bucket() 293 data->bucket in menu_select() [all...] |
/kernel/linux/linux-5.10/kernel/dma/ |
H A D | debug.c | 246 * Request exclusive access to a hash bucket for a given dma_debug_entry. 261 * Give up exclusive access to the hash bucket 263 static void put_hash_bucket(struct hash_bucket *bucket, 265 __releases(&bucket->lock) 267 spin_unlock_irqrestore(&bucket->lock, flags); 290 * Search a given entry in the hash bucket list 292 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, in __hash_bucket_find() argument 299 list_for_each_entry(entry, &bucket->list, list) { in __hash_bucket_find() 342 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, in bucket_find_exact() argument 345 return __hash_bucket_find(bucket, re in bucket_find_exact() 348 bucket_find_contain(struct hash_bucket **bucket, struct dma_debug_entry *ref, unsigned long *flags) bucket_find_contain() argument 378 hash_bucket_add(struct hash_bucket *bucket, struct dma_debug_entry *entry) hash_bucket_add() argument 408 struct hash_bucket *bucket = &dma_entry_hash[idx]; debug_dma_dump_mappings() local 557 struct hash_bucket *bucket; add_dma_entry() local 785 struct hash_bucket *bucket = &dma_entry_hash[idx]; dump_show() local 959 struct hash_bucket *bucket; check_unmap() local 1102 struct hash_bucket *bucket; check_sync() local 1253 struct hash_bucket *bucket; debug_dma_mapping_error() local 1344 struct hash_bucket *bucket; get_nr_mapped_entries() local [all...] |
/kernel/linux/linux-6.6/kernel/dma/ |
H A D | debug.c | 247 * Request exclusive access to a hash bucket for a given dma_debug_entry. 262 * Give up exclusive access to the hash bucket 264 static void put_hash_bucket(struct hash_bucket *bucket, 266 __releases(&bucket->lock) 268 spin_unlock_irqrestore(&bucket->lock, flags); 291 * Search a given entry in the hash bucket list 293 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, in __hash_bucket_find() argument 300 list_for_each_entry(entry, &bucket->list, list) { in __hash_bucket_find() 343 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, in bucket_find_exact() argument 346 return __hash_bucket_find(bucket, re in bucket_find_exact() 349 bucket_find_contain(struct hash_bucket **bucket, struct dma_debug_entry *ref, unsigned long *flags) bucket_find_contain() argument 377 hash_bucket_add(struct hash_bucket *bucket, struct dma_debug_entry *entry) hash_bucket_add() argument 528 struct hash_bucket *bucket = &dma_entry_hash[idx]; debug_dma_dump_mappings() local 560 struct hash_bucket *bucket = &dma_entry_hash[idx]; dump_show() local 589 struct hash_bucket *bucket; add_dma_entry() local 963 struct hash_bucket *bucket; check_unmap() local 1096 struct hash_bucket *bucket; check_sync() local 1248 struct hash_bucket *bucket; debug_dma_mapping_error() local 1340 struct hash_bucket *bucket; get_nr_mapped_entries() local [all...] |