/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
H A D | dr_icm_pool.c | 38 /* protect the ICM bucket */ 181 struct mlx5dr_icm_bucket *bucket = chunk->bucket; in dr_icm_chunk_ste_init() local 183 chunk->ste_arr = kvzalloc(bucket->num_of_entries * in dr_icm_chunk_ste_init() 188 chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries * in dr_icm_chunk_ste_init() 193 chunk->miss_list = kvmalloc(bucket->num_of_entries * in dr_icm_chunk_ste_init() 207 static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket) in dr_icm_chunks_create() argument 210 struct mlx5dr_icm_pool *pool = bucket->pool; in dr_icm_chunks_create() 215 mr_req_size = bucket->num_of_entries * bucket in dr_icm_chunks_create() 282 struct mlx5dr_icm_bucket *bucket = chunk->bucket; dr_icm_chunk_destroy() local 293 dr_icm_bucket_init(struct mlx5dr_icm_pool *pool, struct mlx5dr_icm_bucket *bucket, enum mlx5dr_icm_chunk_size chunk_size) dr_icm_bucket_init() argument 311 dr_icm_bucket_cleanup(struct mlx5dr_icm_bucket *bucket) dr_icm_bucket_cleanup() argument 341 dr_icm_reuse_hot_entries(struct mlx5dr_icm_pool *pool, struct mlx5dr_icm_bucket *bucket) dr_icm_reuse_hot_entries() argument 353 dr_icm_chill_bucket_start(struct mlx5dr_icm_bucket *bucket) dr_icm_chill_bucket_start() argument 360 dr_icm_chill_bucket_end(struct mlx5dr_icm_bucket *bucket) dr_icm_chill_bucket_end() argument 367 dr_icm_chill_bucket_abort(struct mlx5dr_icm_bucket *bucket) dr_icm_chill_bucket_abort() argument 378 struct mlx5dr_icm_bucket *bucket; dr_icm_chill_buckets_start() local 402 struct mlx5dr_icm_bucket *bucket; dr_icm_chill_buckets_end() local 424 struct mlx5dr_icm_bucket *bucket; dr_icm_chill_buckets_abort() local 451 struct mlx5dr_icm_bucket *bucket; mlx5dr_icm_alloc_chunk() local 496 struct mlx5dr_icm_bucket *bucket = chunk->bucket; mlx5dr_icm_free_chunk() local [all...] |
/third_party/libdrm/ |
H A D | xf86drmHash.c | 119 HashBucketPtr bucket; in drmHashDestroy() local 126 for (bucket = table->buckets[i]; bucket;) { in drmHashDestroy() 127 next = bucket->next; in drmHashDestroy() 128 drmFree(bucket); in drmHashDestroy() 129 bucket = next; in drmHashDestroy() 136 /* Find the bucket and organize the list so that this bucket is at the 144 HashBucketPtr bucket; in HashFind() local 148 for (bucket in HashFind() 170 HashBucketPtr bucket; drmHashLookup() local 183 HashBucketPtr bucket; drmHashInsert() local 203 HashBucketPtr bucket; drmHashDelete() local [all...] |
/kernel/linux/linux-5.10/net/mptcp/ |
H A D | token.c | 54 /* called with bucket lock held */ 67 /* called with bucket lock held */ 112 struct token_bucket *bucket; in mptcp_token_new_request() local 123 bucket = token_bucket(token); in mptcp_token_new_request() 124 spin_lock_bh(&bucket->lock); in mptcp_token_new_request() 125 if (__token_bucket_busy(bucket, token)) { in mptcp_token_new_request() 126 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request() 130 hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain); in mptcp_token_new_request() 131 bucket->chain_len++; in mptcp_token_new_request() 132 spin_unlock_bh(&bucket in mptcp_token_new_request() 157 struct token_bucket *bucket; mptcp_token_new_connect() local 194 struct token_bucket *bucket; mptcp_token_accept() local 210 struct token_bucket *bucket; mptcp_token_exists() local 246 struct token_bucket *bucket; mptcp_token_get_sock() local 302 struct token_bucket *bucket = &token_hash[slot]; mptcp_token_iter_next() local 351 struct token_bucket *bucket; mptcp_token_destroy_request() local 374 struct token_bucket *bucket; mptcp_token_destroy() local [all...] |
/kernel/linux/linux-6.6/net/mptcp/ |
H A D | token.c | 53 /* called with bucket lock held */ 66 /* called with bucket lock held */ 111 struct token_bucket *bucket; in mptcp_token_new_request() local 122 bucket = token_bucket(token); in mptcp_token_new_request() 123 spin_lock_bh(&bucket->lock); in mptcp_token_new_request() 124 if (__token_bucket_busy(bucket, token)) { in mptcp_token_new_request() 125 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request() 129 hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain); in mptcp_token_new_request() 130 bucket->chain_len++; in mptcp_token_new_request() 131 spin_unlock_bh(&bucket in mptcp_token_new_request() 157 struct token_bucket *bucket; mptcp_token_new_connect() local 196 struct token_bucket *bucket; mptcp_token_accept() local 213 struct token_bucket *bucket; mptcp_token_exists() local 249 struct token_bucket *bucket; mptcp_token_get_sock() local 305 struct token_bucket *bucket = &token_hash[slot]; mptcp_token_iter_next() local 354 struct token_bucket *bucket; mptcp_token_destroy_request() local 378 struct token_bucket *bucket; mptcp_token_destroy() local [all...] |
/third_party/mesa3d/src/glx/ |
H A D | glxhash.c | 179 __glxHashBucketPtr bucket; in __glxHashDestroy() local 187 for (bucket = table->buckets[i]; bucket;) { in __glxHashDestroy() 188 next = bucket->next; in __glxHashDestroy() 189 HASH_FREE(bucket); in __glxHashDestroy() 190 bucket = next; in __glxHashDestroy() 197 /* Find the bucket and organize the list so that this bucket is at the 205 __glxHashBucketPtr bucket; in HashFind() local 210 for (bucket in HashFind() 234 __glxHashBucketPtr bucket; __glxHashLookup() local 250 __glxHashBucketPtr bucket; __glxHashInsert() local 277 __glxHashBucketPtr bucket; __glxHashDelete() local 337 count_entries(__glxHashBucketPtr bucket) count_entries() argument 359 __glxHashBucketPtr bucket; compute_dist() local [all...] |
/third_party/node/deps/v8/src/heap/ |
H A D | slot-set.h | 132 // Each bucket is a bitmap with a bit corresponding to a single slot offset. 136 FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately. 137 KEEP_EMPTY_BUCKETS // An empty bucket will be kept. 166 *slot_set->bucket(i) = nullptr; in Allocate() 182 DCHECK_NULL(*slot_set->bucket(i)); in Delete() 194 // Converts the slot offset into bucket index. 208 Bucket* bucket = LoadBucket<access_mode>(bucket_index); in Insert() local 209 if (bucket == nullptr) { in Insert() 210 bucket = new Bucket; in Insert() 211 if (!SwapInNewBucket<access_mode>(bucket_index, bucket)) { in Insert() 232 Bucket* bucket = LoadBucket(bucket_index); Contains() local 242 Bucket* bucket = LoadBucket(bucket_index); Remove() local 266 Bucket* bucket; RemoveRange() local 324 Bucket* bucket = LoadBucket(bucket_index); Lookup() local 386 Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index); CheckPossiblyEmptyBuckets() local 476 Bucket* bucket = LoadBucket(bucket_index); Iterate() local 512 Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index); FreeBucketIfEmpty() local 524 ClearBucket(Bucket* bucket, int start_cell, int end_cell) ClearBucket() argument 536 Bucket* bucket = LoadBucket<access_mode>(bucket_index); ReleaseBucket() local 542 LoadBucket(Bucket** bucket) LoadBucket() argument 554 StoreBucket(Bucket** bucket, Bucket* value) StoreBucket() argument 592 Bucket** bucket(size_t bucket_index) { return buckets() + bucket_index; } bucket() function [all...] |
/base/telephony/call_manager/services/call/call_state_observer/src/ |
H A D | call_records_handler.cpp | 46 DataShare::DataShareValuesBucket bucket; in AddCallLogInfo() local 48 MakeCallLogInsertBucket(bucket, info, displayName, numberLocation); in AddCallLogInfo() 49 bool ret = callDataPtr_->Insert(bucket); in AddCallLogInfo() 71 void CallRecordsHandler::MakeCallLogInsertBucket(DataShare::DataShareValuesBucket &bucket, in MakeCallLogInsertBucket() argument 74 bucket.Put(CALL_PHONE_NUMBER, std::string(info.phoneNumber)); in MakeCallLogInsertBucket() 75 bucket.Put(CALL_DISPLAY_NAME, displayName); in MakeCallLogInsertBucket() 76 bucket.Put(CALL_DIRECTION, static_cast<int32_t>(info.directionType)); in MakeCallLogInsertBucket() 77 bucket.Put(CALL_VOICEMAIL_URI, std::string("")); in MakeCallLogInsertBucket() 78 bucket.Put(CALL_SIM_TYPE, 0); in MakeCallLogInsertBucket() 79 bucket in MakeCallLogInsertBucket() 170 DataShare::DataShareValuesBucket bucket; RemoveMissedIncomingCallNotification() local [all...] |
/third_party/node/deps/cares/src/lib/ |
H A D | ares__htable_strvp.c | 60 static const void *bucket_key(const void *bucket) in bucket_key() argument 62 const ares__htable_strvp_bucket_t *arg = bucket; in bucket_key() 66 static void bucket_free(void *bucket) in bucket_free() argument 68 ares__htable_strvp_bucket_t *arg = bucket; in bucket_free() 118 ares__htable_strvp_bucket_t *bucket = NULL; in ares__htable_strvp_insert() local 124 bucket = ares_malloc(sizeof(*bucket)); in ares__htable_strvp_insert() 125 if (bucket == NULL) { in ares__htable_strvp_insert() 129 bucket->parent = htable; in ares__htable_strvp_insert() 130 bucket in ares__htable_strvp_insert() 153 ares__htable_strvp_bucket_t *bucket = NULL; ares__htable_strvp_get() local [all...] |
H A D | ares__htable_szvp.c | 60 static const void *bucket_key(const void *bucket) in bucket_key() argument 62 const ares__htable_szvp_bucket_t *arg = bucket; in bucket_key() 66 static void bucket_free(void *bucket) in bucket_free() argument 68 ares__htable_szvp_bucket_t *arg = bucket; in bucket_free() 118 ares__htable_szvp_bucket_t *bucket = NULL; in ares__htable_szvp_insert() local 124 bucket = ares_malloc(sizeof(*bucket)); in ares__htable_szvp_insert() 125 if (bucket == NULL) { in ares__htable_szvp_insert() 129 bucket->parent = htable; in ares__htable_szvp_insert() 130 bucket in ares__htable_szvp_insert() 149 ares__htable_szvp_bucket_t *bucket = NULL; ares__htable_szvp_get() local [all...] |
H A D | ares__htable_asvp.c | 60 static const void *bucket_key(const void *bucket) in bucket_key() argument 62 const ares__htable_asvp_bucket_t *arg = bucket; in bucket_key() 66 static void bucket_free(void *bucket) in bucket_free() argument 68 ares__htable_asvp_bucket_t *arg = bucket; in bucket_free() 152 ares__htable_asvp_bucket_t *bucket = NULL; in ares__htable_asvp_insert() local 158 bucket = ares_malloc(sizeof(*bucket)); in ares__htable_asvp_insert() 159 if (bucket == NULL) { in ares__htable_asvp_insert() 163 bucket->parent = htable; in ares__htable_asvp_insert() 164 bucket in ares__htable_asvp_insert() 183 ares__htable_asvp_bucket_t *bucket = NULL; ares__htable_asvp_get() local [all...] |
/third_party/mesa3d/src/gallium/drivers/nouveau/ |
H A D | nouveau_mm.c | 38 struct mm_bucket bucket[MM_NUM_BUCKETS]; member 99 return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER]; in mm_bucket_by_order() 123 mm_slab_new(struct nouveau_mman *cache, struct mm_bucket *bucket, int chunk_order) in mm_slab_new() argument 129 simple_mtx_assert_locked(&bucket->lock); in mm_slab_new() 155 assert(bucket == mm_bucket_by_order(cache, chunk_order)); in mm_slab_new() 156 list_add(&slab->head, &bucket->free); in mm_slab_new() 172 struct mm_bucket *bucket; in nouveau_mm_allocate() local 177 bucket = mm_bucket_by_size(cache, size); in nouveau_mm_allocate() 178 if (!bucket) { in nouveau_mm_allocate() 193 simple_mtx_lock(&bucket in nouveau_mm_allocate() 226 struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order); nouveau_mm_free() local [all...] |
/third_party/mesa3d/src/etnaviv/drm/ |
H A D | etnaviv_bo_cache.c | 75 struct etna_bo_bucket *bucket = &cache->cache_bucket[i]; in etna_bo_cache_cleanup() local 78 while (!list_is_empty(&bucket->list)) { in etna_bo_cache_cleanup() 79 bo = list_entry(bucket->list.next, struct etna_bo, list); in etna_bo_cache_cleanup() 99 * way to the correct bucket size rather than looping.. in get_bucket() 102 struct etna_bo_bucket *bucket = &cache->cache_bucket[i]; in get_bucket() local 103 if (bucket->size >= size) { in get_bucket() 104 return bucket; in get_bucket() 111 static struct etna_bo *find_in_bucket(struct etna_bo_bucket *bucket, uint32_t flags) in find_in_bucket() argument 117 if (list_is_empty(&bucket->list)) in find_in_bucket() 120 LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &bucket in find_in_bucket() 152 struct etna_bo_bucket *bucket; etna_bo_cache_alloc() local 174 struct etna_bo_bucket *bucket; etna_bo_cache_free() local [all...] |
/third_party/libdrm/etnaviv/ |
H A D | etnaviv_bo_cache.c | 78 struct etna_bo_bucket *bucket = &cache->cache_bucket[i]; in etna_bo_cache_cleanup() local 81 while (!LIST_IS_EMPTY(&bucket->list)) { in etna_bo_cache_cleanup() 82 bo = LIST_ENTRY(struct etna_bo, bucket->list.next, list); in etna_bo_cache_cleanup() 101 * way to the correct bucket size rather than looping.. in get_bucket() 104 struct etna_bo_bucket *bucket = &cache->cache_bucket[i]; in get_bucket() local 105 if (bucket->size >= size) { in get_bucket() 106 return bucket; in get_bucket() 121 static struct etna_bo *find_in_bucket(struct etna_bo_bucket *bucket, uint32_t flags) in find_in_bucket() argument 127 if (LIST_IS_EMPTY(&bucket->list)) in find_in_bucket() 130 LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &bucket in find_in_bucket() 162 struct etna_bo_bucket *bucket; etna_bo_cache_alloc() local 183 struct etna_bo_bucket *bucket = get_bucket(cache, bo->size); etna_bo_cache_free() local [all...] |
/third_party/mesa3d/src/freedreno/drm/ |
H A D | freedreno_bo_cache.c | 46 * @coarse: if true, only power-of-two bucket sizes, otherwise 90 struct fd_bo_bucket *bucket = &cache->cache_bucket[i]; in fd_bo_cache_cleanup() local 93 while (!list_is_empty(&bucket->list)) { in fd_bo_cache_cleanup() 94 bo = list_entry(bucket->list.next, struct fd_bo, list); in fd_bo_cache_cleanup() 115 * way to the correct bucket size rather than looping.. in get_bucket() 118 struct fd_bo_bucket *bucket = &cache->cache_bucket[i]; in get_bucket() local 119 if (bucket->size >= size) { in get_bucket() 120 return bucket; in get_bucket() 128 find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags) in find_in_bucket() argument 140 list_for_each_entry (struct fd_bo, entry, &bucket in find_in_bucket() 159 struct fd_bo_bucket *bucket; fd_bo_cache_alloc() local 195 struct fd_bo_bucket *bucket = get_bucket(cache, bo->size); fd_bo_cache_free() local [all...] |
/third_party/node/tools/inspector_protocol/jinja2/ |
H A D | bccache.py | 76 """Resets the bucket (unloads the bytecode).""" 101 raise TypeError('can\'t write empty bucket') 131 def load_bytecode(self, bucket): 132 filename = path.join(self.directory, bucket.key) 135 bucket.load_bytecode(f) 137 def dump_bytecode(self, bucket): 138 filename = path.join(self.directory, bucket.key) 140 bucket.write_bytecode(f) 146 def load_bytecode(self, bucket): 148 bucket [all...] |
/third_party/mesa3d/src/virtio/vulkan/ |
H A D | vn_renderer_internal.c | 25 struct vn_renderer_shmem_bucket *bucket = &cache->buckets[i]; in vn_renderer_shmem_cache_init() local 26 list_inithead(&bucket->shmems); in vn_renderer_shmem_cache_init() 40 struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx]; in vn_renderer_shmem_cache_fini() local 43 &bucket->shmems, cache_head) in vn_renderer_shmem_cache_fini() 74 struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx]; in vn_renderer_shmem_cache_remove_expired_locked() local 76 assert(!list_is_empty(&bucket->shmems)); in vn_renderer_shmem_cache_remove_expired_locked() 78 &bucket->shmems, struct vn_renderer_shmem, cache_head); in vn_renderer_shmem_cache_remove_expired_locked() 82 &bucket->shmems, cache_head) { in vn_renderer_shmem_cache_remove_expired_locked() 100 struct vn_renderer_shmem_bucket *bucket = in vn_renderer_shmem_cache_add() local 102 if (!bucket) in vn_renderer_shmem_cache_add() 125 struct vn_renderer_shmem_bucket *bucket = choose_bucket(cache, size, &idx); vn_renderer_shmem_cache_get() local 170 const struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx]; vn_renderer_shmem_cache_debug_dump() local [all...] |
/kernel/linux/linux-5.10/net/ceph/crush/ |
H A D | mapper.c | 58 * bucket choose methods 60 * For each bucket algorithm, we have a "choose" method that, given a 62 * will produce an item in the bucket. 66 * Choose based on a random permutation of the bucket. 70 * calculate an actual random permutation of the bucket members. 74 static int bucket_perm_choose(const struct crush_bucket *bucket, in bucket_perm_choose() argument 78 unsigned int pr = r % bucket->size; in bucket_perm_choose() 83 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose() 88 s = crush_hash32_3(bucket in bucket_perm_choose() 135 bucket_uniform_choose(const struct crush_bucket_uniform *bucket, struct crush_work_bucket *work, int x, int r) bucket_uniform_choose() argument 142 bucket_list_choose(const struct crush_bucket_list *bucket, int x, int r) bucket_list_choose() argument 196 bucket_tree_choose(const struct crush_bucket_tree *bucket, int x, int r) bucket_tree_choose() argument 228 bucket_straw_choose(const struct crush_bucket_straw *bucket, int x, int r) bucket_straw_choose() argument 305 get_choose_arg_weights(const struct crush_bucket_straw2 *bucket, const struct crush_choose_arg *arg, int position) get_choose_arg_weights() argument 317 get_choose_arg_ids(const struct crush_bucket_straw2 *bucket, const struct crush_choose_arg *arg) get_choose_arg_ids() argument 326 bucket_straw2_choose(const struct crush_bucket_straw2 *bucket, int x, int r, const struct crush_choose_arg *arg, int position) bucket_straw2_choose() argument 449 crush_choose_firstn(const struct crush_map *map, struct crush_work *work, const struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int numrep, int type, int *out, int outpos, int out_size, unsigned int tries, unsigned int recurse_tries, unsigned int local_retries, unsigned int local_fallback_retries, int recurse_to_leaf, unsigned int vary_r, unsigned int stable, int *out2, int parent_r, const struct crush_choose_arg *choose_args) crush_choose_firstn() argument 643 crush_choose_indep(const struct crush_map *map, struct crush_work *work, const struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int left, int numrep, int type, int *out, int outpos, unsigned int tries, unsigned int recurse_tries, int recurse_to_leaf, int *out2, int parent_r, const struct crush_choose_arg *choose_args) crush_choose_indep() argument [all...] |
/kernel/linux/linux-6.6/net/ceph/crush/ |
H A D | mapper.c | 58 * bucket choose methods 60 * For each bucket algorithm, we have a "choose" method that, given a 62 * will produce an item in the bucket. 66 * Choose based on a random permutation of the bucket. 70 * calculate an actual random permutation of the bucket members. 74 static int bucket_perm_choose(const struct crush_bucket *bucket, in bucket_perm_choose() argument 78 unsigned int pr = r % bucket->size; in bucket_perm_choose() 83 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose() 88 s = crush_hash32_3(bucket in bucket_perm_choose() 135 bucket_uniform_choose(const struct crush_bucket_uniform *bucket, struct crush_work_bucket *work, int x, int r) bucket_uniform_choose() argument 142 bucket_list_choose(const struct crush_bucket_list *bucket, int x, int r) bucket_list_choose() argument 196 bucket_tree_choose(const struct crush_bucket_tree *bucket, int x, int r) bucket_tree_choose() argument 228 bucket_straw_choose(const struct crush_bucket_straw *bucket, int x, int r) bucket_straw_choose() argument 305 get_choose_arg_weights(const struct crush_bucket_straw2 *bucket, const struct crush_choose_arg *arg, int position) get_choose_arg_weights() argument 317 get_choose_arg_ids(const struct crush_bucket_straw2 *bucket, const struct crush_choose_arg *arg) get_choose_arg_ids() argument 326 bucket_straw2_choose(const struct crush_bucket_straw2 *bucket, int x, int r, const struct crush_choose_arg *arg, int position) bucket_straw2_choose() argument 449 crush_choose_firstn(const struct crush_map *map, struct crush_work *work, const struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int numrep, int type, int *out, int outpos, int out_size, unsigned int tries, unsigned int recurse_tries, unsigned int local_retries, unsigned int local_fallback_retries, int recurse_to_leaf, unsigned int vary_r, unsigned int stable, int *out2, int parent_r, const struct crush_choose_arg *choose_args) crush_choose_firstn() argument 643 crush_choose_indep(const struct crush_map *map, struct crush_work *work, const struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int left, int numrep, int type, int *out, int outpos, unsigned int tries, unsigned int recurse_tries, int recurse_to_leaf, int *out2, int parent_r, const struct crush_choose_arg *choose_args) crush_choose_indep() argument [all...] |
/third_party/node/deps/v8/third_party/jinja2/ |
H A D | bccache.py | 53 """Resets the bucket (unloads the bytecode).""" 78 raise TypeError("can't write empty bucket") 108 def load_bytecode(self, bucket): 109 filename = path.join(self.directory, bucket.key) 112 bucket.load_bytecode(f) 114 def dump_bytecode(self, bucket): 115 filename = path.join(self.directory, bucket.key) 117 bucket.write_bytecode(f) 123 def load_bytecode(self, bucket): 125 bucket [all...] |
/third_party/skia/third_party/externals/jinja2/ |
H A D | bccache.py | 53 """Resets the bucket (unloads the bytecode).""" 78 raise TypeError("can't write empty bucket") 108 def load_bytecode(self, bucket): 109 filename = path.join(self.directory, bucket.key) 112 bucket.load_bytecode(f) 114 def dump_bytecode(self, bucket): 115 filename = path.join(self.directory, bucket.key) 117 bucket.write_bytecode(f) 123 def load_bytecode(self, bucket): 125 bucket [all...] |
/third_party/libdrm/freedreno/ |
H A D | freedreno_bo_cache.c | 48 * @coarse: if true, only power-of-two bucket sizes, otherwise 90 struct fd_bo_bucket *bucket = &cache->cache_bucket[i]; in fd_bo_cache_cleanup() local 93 while (!LIST_IS_EMPTY(&bucket->list)) { in fd_bo_cache_cleanup() 94 bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list); in fd_bo_cache_cleanup() 114 * way to the correct bucket size rather than looping.. in get_bucket() 117 struct fd_bo_bucket *bucket = &cache->cache_bucket[i]; in get_bucket() local 118 if (bucket->size >= size) { in get_bucket() 119 return bucket; in get_bucket() 134 static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags) in find_in_bucket() argument 146 if (!LIST_IS_EMPTY(&bucket in find_in_bucket() 165 struct fd_bo_bucket *bucket; fd_bo_cache_alloc() local 196 struct fd_bo_bucket *bucket = get_bucket(cache, bo->size); fd_bo_cache_free() local [all...] |
/kernel/linux/linux-5.10/block/ |
H A D | blk-stat.c | 56 int bucket, cpu; in blk_stat_add() local 69 bucket = cb->bucket_fn(rq); in blk_stat_add() 70 if (bucket < 0) in blk_stat_add() 73 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add() 83 unsigned int bucket; in blk_stat_timer_fn() local 86 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 87 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn() 93 for (bucket in blk_stat_timer_fn() 139 unsigned int bucket; blk_stat_add_callback() local [all...] |
/kernel/linux/linux-6.6/block/ |
H A D | blk-stat.c | 55 int bucket, cpu; in blk_stat_add() local 69 bucket = cb->bucket_fn(rq); in blk_stat_add() 70 if (bucket < 0) in blk_stat_add() 73 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add() 83 unsigned int bucket; in blk_stat_timer_fn() local 86 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 87 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn() 93 for (bucket in blk_stat_timer_fn() 139 unsigned int bucket; blk_stat_add_callback() local [all...] |
/base/hiviewdfx/hiappevent/frameworks/native/libhiappevent/cache/ |
H A D | app_event_dao.cpp | 81 NativeRdb::ValuesBucket bucket;
in Insert() local 82 bucket.PutString(Events::FIELD_DOMAIN, event->GetDomain());
in Insert() 83 bucket.PutString(Events::FIELD_NAME, event->GetName());
in Insert() 84 bucket.PutInt(Events::FIELD_TYPE, event->GetType());
in Insert() 85 bucket.PutLong(Events::FIELD_TIME, event->GetTime());
in Insert() 86 bucket.PutString(Events::FIELD_TZ, event->GetTimeZone());
in Insert() 87 bucket.PutInt(Events::FIELD_PID, event->GetPid());
in Insert() 88 bucket.PutInt(Events::FIELD_TID, event->GetTid());
in Insert() 89 bucket.PutLong(Events::FIELD_TRACE_ID, event->GetTraceId());
in Insert() 90 bucket in Insert() [all...] |
/kernel/linux/linux-5.10/drivers/interconnect/qcom/ |
H A D | bcm-voter.c | 71 size_t i, bucket; in bcm_aggregate() local 76 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate() 79 temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width, in bcm_aggregate() 81 agg_avg[bucket] = max(agg_avg[bucket], temp); in bcm_aggregate() 83 temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width, in bcm_aggregate() 85 agg_peak[bucket] = max(agg_peak[bucket], tem in bcm_aggregate() 138 tcs_list_gen(struct bcm_voter *voter, int bucket, struct tcs_cmd tcs_list[MAX_VCD], int n[MAX_VCD + 1]) tcs_list_gen() argument [all...] |