Lines Matching defs:htab

104 /* each htab element is struct htab_elem + key + value */
111 struct bpf_htab *htab;
125 static inline bool htab_is_prealloc(const struct bpf_htab *htab)
127 return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
130 static inline bool htab_use_raw_lock(const struct bpf_htab *htab)
132 return (!IS_ENABLED(CONFIG_PREEMPT_RT) || htab_is_prealloc(htab));
135 static void htab_init_buckets(struct bpf_htab *htab)
139 for (i = 0; i < htab->n_buckets; i++) {
140 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
141 if (htab_use_raw_lock(htab))
142 raw_spin_lock_init(&htab->buckets[i].raw_lock);
144 spin_lock_init(&htab->buckets[i].lock);
148 static inline unsigned long htab_lock_bucket(const struct bpf_htab *htab,
153 if (htab_use_raw_lock(htab))
160 static inline void htab_unlock_bucket(const struct bpf_htab *htab,
164 if (htab_use_raw_lock(htab))
172 static bool htab_is_lru(const struct bpf_htab *htab)
174 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
175 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
178 static bool htab_is_percpu(const struct bpf_htab *htab)
180 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
181 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
200 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
202 return (struct htab_elem *) (htab->elems + i * htab->elem_size);
205 static void htab_free_elems(struct bpf_htab *htab)
209 if (!htab_is_percpu(htab))
212 for (i = 0; i < htab->map.max_entries; i++) {
215 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
216 htab->map.key_size);
221 bpf_map_area_free(htab->elems);
224 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
235 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
238 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
243 memcpy(l->key, key, htab->map.key_size);
250 static int prealloc_init(struct bpf_htab *htab)
252 u32 num_entries = htab->map.max_entries;
255 if (!htab_is_percpu(htab) && !htab_is_lru(htab))
258 htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
259 htab->map.numa_node);
260 if (!htab->elems)
263 if (!htab_is_percpu(htab))
267 u32 size = round_up(htab->map.value_size, 8);
273 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
279 if (htab_is_lru(htab))
280 err = bpf_lru_init(&htab->lru,
281 htab->map.map_flags & BPF_F_NO_COMMON_LRU,
285 htab);
287 err = pcpu_freelist_init(&htab->freelist);
292 if (htab_is_lru(htab))
293 bpf_lru_populate(&htab->lru, htab->elems,
295 htab->elem_size, num_entries);
297 pcpu_freelist_populate(&htab->freelist,
298 htab->elems + offsetof(struct htab_elem, fnode),
299 htab->elem_size, num_entries);
304 htab_free_elems(htab);
308 static void prealloc_destroy(struct bpf_htab *htab)
310 htab_free_elems(htab);
312 if (htab_is_lru(htab))
313 bpf_lru_destroy(&htab->lru);
315 pcpu_freelist_destroy(&htab->freelist);
318 static int alloc_extra_elems(struct bpf_htab *htab)
330 l = pcpu_freelist_pop(&htab->freelist);
337 htab->extra_elems = pptr;
358 BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
424 struct bpf_htab *htab;
428 htab = kzalloc(sizeof(*htab), GFP_USER);
429 if (!htab)
432 bpf_map_init_from_attr(&htab->map, attr);
439 htab->map.max_entries = roundup(attr->max_entries,
441 if (htab->map.max_entries < attr->max_entries)
442 htab->map.max_entries = rounddown(attr->max_entries,
450 if (htab->map.max_entries > 1UL << 31)
453 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
455 htab->elem_size = sizeof(struct htab_elem) +
456 round_up(htab->map.key_size, 8);
458 htab->elem_size += sizeof(void *);
460 htab->elem_size += round_up(htab->map.value_size, 8);
463 if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
466 cost = (u64) htab->n_buckets * sizeof(struct bucket) +
467 (u64) htab->elem_size * htab->map.max_entries;
470 cost += (u64) round_up(htab->map.value_size, 8) *
471 num_possible_cpus() * htab->map.max_entries;
473 cost += (u64) htab->elem_size * num_possible_cpus();
476 err = bpf_map_charge_init(&htab->map.memory, cost);
481 htab->buckets = bpf_map_area_alloc(htab->n_buckets *
483 htab->map.numa_node);
484 if (!htab->buckets)
487 if (htab->map.map_flags & BPF_F_ZERO_SEED)
488 htab->hashrnd = 0;
490 htab->hashrnd = get_random_int();
492 htab_init_buckets(htab);
495 err = prealloc_init(htab);
503 err = alloc_extra_elems(htab);
509 return &htab->map;
512 prealloc_destroy(htab);
514 bpf_map_area_free(htab->buckets);
516 bpf_map_charge_finish(&htab->map.memory);
518 kfree(htab);
527 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
529 return &htab->buckets[hash & (htab->n_buckets - 1)];
532 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
534 return &__select_bucket(htab, hash)->head;
580 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
589 hash = htab_map_hash(key, key_size, htab->hashrnd);
591 head = select_bucket(htab, hash);
593 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
684 * older elements from the htab.
688 struct bpf_htab *htab = (struct bpf_htab *)arg;
696 b = __select_bucket(htab, tgt_l->hash);
699 flags = htab_lock_bucket(htab, b);
707 htab_unlock_bucket(htab, b, flags);
715 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
728 hash = htab_map_hash(key, key_size, htab->hashrnd);
730 head = select_bucket(htab, hash);
733 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
749 i = hash & (htab->n_buckets - 1);
754 for (; i < htab->n_buckets; i++) {
755 head = select_bucket(htab, i);
771 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
773 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
774 free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
781 struct bpf_htab *htab = l->htab;
783 htab_elem_free(htab, l);
786 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
788 struct bpf_map *map = &htab->map;
797 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
799 htab_put_fd_value(htab, l);
801 if (htab_is_prealloc(htab)) {
802 __pcpu_freelist_push(&htab->freelist, &l->fnode);
804 atomic_dec(&htab->count);
805 l->htab = htab;
810 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
815 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
817 u32 size = round_up(htab->map.value_size, 8);
828 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
837 if (htab_is_prealloc(htab) && !onallcpus) {
838 u32 size = round_up(htab->map.value_size, 8);
850 pcpu_copy_value(htab, pptr, value, onallcpus);
854 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
856 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
860 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
865 u32 size = htab->map.value_size;
866 bool prealloc = htab_is_prealloc(htab);
875 pl_new = this_cpu_ptr(htab->extra_elems);
877 htab_put_fd_value(htab, old_elem);
882 l = __pcpu_freelist_pop(&htab->freelist);
888 if (atomic_inc_return(&htab->count) > htab->map.max_entries)
898 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
899 htab->map.numa_node);
904 check_and_init_map_lock(&htab->map,
924 pcpu_init_value(htab, pptr, value, onallcpus);
928 } else if (fd_htab_map_needs_adjust(htab)) {
932 copy_map_value(&htab->map,
940 atomic_dec(&htab->count);
944 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
962 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
978 hash = htab_map_hash(key, key_size, htab->hashrnd);
980 b = __select_bucket(htab, hash);
988 htab->n_buckets);
989 ret = check_flags(htab, l_old, map_flags);
1005 flags = htab_lock_bucket(htab, b);
1009 ret = check_flags(htab, l_old, map_flags);
1027 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1041 if (!htab_is_prealloc(htab))
1042 free_htab_elem(htab, l_old);
1046 htab_unlock_bucket(htab, b, flags);
1053 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1069 hash = htab_map_hash(key, key_size, htab->hashrnd);
1071 b = __select_bucket(htab, hash);
1076 * to remove older elements from htab and this removal
1079 l_new = prealloc_lru_pop(htab, key, hash);
1084 flags = htab_lock_bucket(htab, b);
1088 ret = check_flags(htab, l_old, map_flags);
1103 htab_unlock_bucket(htab, b, flags);
1106 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1108 bpf_lru_push_free(&htab->lru, &l_old->lru_node);
1117 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1133 hash = htab_map_hash(key, key_size, htab->hashrnd);
1135 b = __select_bucket(htab, hash);
1138 flags = htab_lock_bucket(htab, b);
1142 ret = check_flags(htab, l_old, map_flags);
1148 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1151 l_new = alloc_htab_elem(htab, key, value, key_size,
1161 htab_unlock_bucket(htab, b, flags);
1169 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1185 hash = htab_map_hash(key, key_size, htab->hashrnd);
1187 b = __select_bucket(htab, hash);
1192 * to remove older elem from htab and this removal
1196 l_new = prealloc_lru_pop(htab, key, hash);
1201 flags = htab_lock_bucket(htab, b);
1205 ret = check_flags(htab, l_old, map_flags);
1213 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1216 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1223 htab_unlock_bucket(htab, b, flags);
1225 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1245 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1257 hash = htab_map_hash(key, key_size, htab->hashrnd);
1258 b = __select_bucket(htab, hash);
1261 flags = htab_lock_bucket(htab, b);
1267 free_htab_elem(htab, l);
1271 htab_unlock_bucket(htab, b, flags);
1277 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1289 hash = htab_map_hash(key, key_size, htab->hashrnd);
1290 b = __select_bucket(htab, hash);
1293 flags = htab_lock_bucket(htab, b);
1302 htab_unlock_bucket(htab, b, flags);
1304 bpf_lru_push_free(&htab->lru, &l->lru_node);
1308 static void delete_all_elements(struct bpf_htab *htab)
1312 for (i = 0; i < htab->n_buckets; i++) {
1313 struct hlist_nulls_head *head = select_bucket(htab, i);
1319 htab_elem_free(htab, l);
1327 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1338 if (!htab_is_prealloc(htab))
1339 delete_all_elements(htab);
1341 prealloc_destroy(htab);
1343 free_percpu(htab->extra_elems);
1344 bpf_map_area_free(htab->buckets);
1345 kfree(htab);
1376 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1413 if (batch >= htab->n_buckets)
1416 key_size = htab->map.key_size;
1417 roundup_key_size = round_up(htab->map.key_size, 8);
1418 value_size = htab->map.value_size;
1445 b = &htab->buckets[batch];
1449 flags = htab_lock_bucket(htab, b);
1466 htab_unlock_bucket(htab, b, flags);
1477 htab_unlock_bucket(htab, b, flags);
1523 free_htab_elem(htab, l);
1530 htab_unlock_bucket(htab, b, flags);
1536 bpf_lru_push_free(&htab->lru, &l->lru_node);
1543 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1560 if (batch >= htab->n_buckets) {
1653 struct bpf_htab *htab;
1663 const struct bpf_htab *htab = info->htab;
1672 if (bucket_id >= htab->n_buckets)
1686 b = &htab->buckets[bucket_id++];
1691 for (i = bucket_id; i < htab->n_buckets; i++) {
1692 b = &htab->buckets[i];
1810 seq_info->htab = container_of(map, struct bpf_htab, map);
1848 BATCH_OPS(htab),
1931 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1935 if (htab_is_lru(htab))
2018 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2024 for (i = 0; i < htab->n_buckets; i++) {
2025 head = select_bucket(htab, i);