Lines Matching refs:map

35 #define IS_FD_ARRAY(map)                                                                                               \
36 ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
37 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
38 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
39 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
40 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || IS_FD_HASH(map))
104 struct bpf_map *map;
125 map = ops->map_alloc(attr);
126 if (IS_ERR(map)) {
127 return map;
129 map->ops = ops;
130 map->map_type = type;
131 return map;
134 static u32 bpf_map_value_size(struct bpf_map *map)
136 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
137 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
138 return round_up(map->value_size, 0x8) * num_possible_cpus();
139 } else if (IS_FD_MAP(map)) {
142 return map->value_size;
146 static void maybe_wait_bpf_programs(struct bpf_map *map)
150 * that could be running use the new map value.
152 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
157 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, void *value, __u64 flags)
162 if (bpf_map_is_dev_bound(map)) {
163 return bpf_map_offload_update_elem(map, key, value, flags);
164 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
165 return map->ops->map_update_elem(map, key, value, flags);
166 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || map->map_type == BPF_MAP_TYPE_SOCKMAP) {
167 return sock_map_update_elem_sys(map, key, value, flags);
168 } else if (IS_FD_PROG_ARRAY(map)) {
169 return bpf_fd_array_map_update_elem(map, f.file, key, value, flags);
173 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
174 err = bpf_percpu_hash_update(map, key, value, flags);
175 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
176 err = bpf_percpu_array_update(map, key, value, flags);
177 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
178 err = bpf_percpu_cgroup_storage_update(map, key, value, flags);
179 } else if (IS_FD_ARRAY(map)) {
181 err = bpf_fd_array_map_update_elem(map, f.file, key, value, flags);
183 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
185 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, flags);
187 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
189 err = bpf_fd_reuseport_array_update_elem(map, key, value, flags);
190 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || map->map_type == BPF_MAP_TYPE_STACK) {
191 err = map->ops->map_push_elem(map, value, flags);
194 err = map->ops->map_update_elem(map, key, value, flags);
198 maybe_wait_bpf_programs(map);
203 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, __u64 flags)
208 if (bpf_map_is_dev_bound(map)) {
209 return bpf_map_offload_lookup_elem(map, key, value);
213 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
214 err = bpf_percpu_hash_copy(map, key, value);
215 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
216 err = bpf_percpu_array_copy(map, key, value);
217 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
218 err = bpf_percpu_cgroup_storage_copy(map, key, value);
219 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
220 err = bpf_stackmap_copy(map, key, value);
221 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
222 err = bpf_fd_array_map_lookup_elem(map, key, value);
223 } else if (IS_FD_HASH(map)) {
224 err = bpf_fd_htab_map_lookup_elem(map, key, value);
225 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
226 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
227 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || map->map_type == BPF_MAP_TYPE_STACK) {
228 err = map->ops->map_peek_elem(map, value);
229 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
230 /* struct_ops map requires directly updating "value" */
231 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
234 if (map->ops->map_lookup_elem_sys_only) {
235 ptr = map->ops->map_lookup_elem_sys_only(map, key);
237 ptr = map->ops->map_lookup_elem(map, key);
247 copy_map_value_locked(map, value, ptr, true);
249 copy_map_value(map, value, ptr);
252 check_and_init_map_lock(map, value);
258 maybe_wait_bpf_programs(map);
317 /* Some map creation flags are not tied to the map object but
318 * rather to the map fd instead, so they have no meaning upon
319 * map object inspection since multiple file descriptors with
321 * this has zero meaning for the map itself, lets clear these
327 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
329 map->map_type = attr->map_type;
330 map->key_size = attr->key_size;
331 map->value_size = attr->value_size;
332 map->max_entries = attr->max_entries;
333 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
334 map->numa_node = bpf_map_attr_numa_node(attr);
391 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
395 ret = bpf_charge_memlock(map->memory.user, pages);
399 map->memory.pages += pages;
403 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
405 bpf_uncharge_memlock(map->memory.user, pages);
406 map->memory.pages -= pages;
409 static int bpf_map_alloc_id(struct bpf_map *map)
415 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
417 map->id = id;
429 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
438 if (!map->id) {
448 idr_remove(&map_idr, map->id);
449 map->id = 0;
461 struct bpf_map *map = container_of(work, struct bpf_map, work);
464 bpf_map_charge_move(&mem, &map->memory);
465 security_bpf_map_free(map);
467 map->ops->map_free(map);
471 static void bpf_map_put_uref(struct bpf_map *map)
473 if (atomic64_dec_and_test(&map->usercnt)) {
474 if (map->ops->map_release_uref) {
475 map->ops->map_release_uref(map);
480 /* decrement map refcnt and schedule it for freeing via workqueue
481 * (unrelying map implementation ops->map_free() might sleep)
483 static void _bpf_map_put(struct bpf_map *map, bool do_idr_lock)
485 if (atomic64_dec_and_test(&map->refcnt)) {
487 bpf_map_free_id(map, do_idr_lock);
488 btf_put(map->btf);
489 INIT_WORK(&map->work, bpf_map_free_deferred);
490 schedule_work(&map->work);
494 void bpf_map_put(struct bpf_map *map)
496 _bpf_map_put(map, true);
500 void bpf_map_put_with_uref(struct bpf_map *map)
502 bpf_map_put_uref(map);
503 bpf_map_put(map);
508 struct bpf_map *map = filp->private_data;
510 if (map->ops->map_release) {
511 map->ops->map_release(map, filp);
514 bpf_map_put_with_uref(map);
518 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
523 * map permissions facing syscall side.
525 if (READ_ONCE(map->frozen)) {
534 const struct bpf_map *map = filp->private_data;
538 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
539 array = container_of(map, struct bpf_array, map);
555 map->map_type, map->key_size, map->value_size, map->max_entries, map->map_flags,
556 map->memory.pages * 1ULL << PAGE_SHIFT, map->id, READ_ONCE(map->frozen));
583 struct bpf_map *map = vma->vm_file->private_data;
586 mutex_lock(&map->freeze_mutex);
587 map->writecnt++;
588 mutex_unlock(&map->freeze_mutex);
595 struct bpf_map *map = vma->vm_file->private_data;
598 mutex_lock(&map->freeze_mutex);
599 map->writecnt--;
600 mutex_unlock(&map->freeze_mutex);
611 struct bpf_map *map = filp->private_data;
614 if (!map->ops->map_mmap || map_value_has_spin_lock(map)) {
622 mutex_lock(&map->freeze_mutex);
625 if (map->frozen) {
629 /* map is meant to be read-only, so do not allow mapping as
634 if (map->map_flags & BPF_F_RDONLY_PROG) {
642 vma->vm_private_data = map;
649 err = map->ops->map_mmap(map, vma);
655 map->writecnt++;
658 mutex_unlock(&map->freeze_mutex);
664 struct bpf_map *map = filp->private_data;
666 if (map->ops->map_poll) {
667 return map->ops->map_poll(map, filp, pts);
684 int bpf_map_new_fd(struct bpf_map *map, int flags)
688 ret = security_bpf_map(map, OPEN_FMODE(flags));
693 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, flags | O_CLOEXEC);
740 int map_check_no_btf(const struct bpf_map *map, const struct btf *btf, const struct btf_type *key_type,
746 static int map_check_btf(struct bpf_map *map, const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
755 if (!key_type || key_size != map->key_size) {
760 if (!map->ops->map_check_btf) {
766 if (!value_type || value_size != map->value_size) {
770 map->spin_lock_off = btf_find_spin_lock(btf, value_type);
772 if (map_value_has_spin_lock(map)) {
773 if (map->map_flags & BPF_F_RDONLY_PROG) {
776 if (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_ARRAY &&
777 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
778 map->map_type != BPF_MAP_TYPE_INODE_STORAGE) {
781 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > map->value_size) {
782 WARN_ONCE(1, "verifier bug spin_lock_off %d value_size %d\n", map->spin_lock_off, map->value_size);
787 if (map->ops->map_check_btf) {
788 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
800 struct bpf_map *map;
826 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
827 map = find_and_alloc_map(attr);
828 if (IS_ERR(map)) {
829 return PTR_ERR(map);
832 err = bpf_obj_name_cpy(map->name, attr->map_name, sizeof(attr->map_name));
837 atomic64_set(&map->refcnt, 1);
838 atomic64_set(&map->usercnt, 1);
839 mutex_init(&map->freeze_mutex);
841 map->spin_lock_off = -EINVAL;
843 /* Even the map's value is a kernel's struct,
857 map->btf = btf;
860 err = map_check_btf(map, btf, attr->btf_key_type_id, attr->btf_value_type_id);
866 map->btf_key_type_id = attr->btf_key_type_id;
867 map->btf_value_type_id = attr->btf_value_type_id;
868 map->btf_vmlinux_value_type_id = attr->btf_vmlinux_value_type_id;
871 err = security_bpf_map_alloc(map);
876 err = bpf_map_alloc_id(map);
881 err = bpf_map_new_fd(map, f_flags);
885 * bpf_map_alloc_id() has published the map
889 bpf_map_put_with_uref(map);
896 security_bpf_map_free(map);
898 btf_put(map->btf);
899 bpf_map_charge_move(&mem, &map->memory);
900 map->ops->map_free(map);
921 void bpf_map_inc(struct bpf_map *map)
923 atomic64_inc(&map->refcnt);
927 void bpf_map_inc_with_uref(struct bpf_map *map)
929 atomic64_inc(&map->refcnt);
930 atomic64_inc(&map->usercnt);
937 struct bpf_map *map;
939 map = __bpf_map_get(f);
940 if (IS_ERR(map)) {
941 return map;
944 bpf_map_inc(map);
947 return map;
953 struct bpf_map *map;
955 map = __bpf_map_get(f);
956 if (IS_ERR(map)) {
957 return map;
960 bpf_map_inc_with_uref(map);
963 return map;
967 static struct bpf_map *_bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
971 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
976 atomic64_inc(&map->usercnt);
979 return map;
982 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
985 map = _bpf_map_inc_not_zero(map, false);
988 return map;
992 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1018 struct bpf_map *map;
1033 map = __bpf_map_get(f);
1034 if (IS_ERR(map)) {
1035 return PTR_ERR(map);
1037 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1042 if ((attr->flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)) {
1047 key = __bpf_copy_key(ukey, map->key_size);
1053 value_size = bpf_map_value_size(map);
1061 err = bpf_map_copy_value(map, key, value, attr->flags);
1089 struct bpf_map *map;
1100 map = __bpf_map_get(f);
1101 if (IS_ERR(map)) {
1102 return PTR_ERR(map);
1104 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1109 if ((attr->flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)) {
1114 key = __bpf_copy_key(ukey, map->key_size);
1120 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
1121 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
1122 value_size = round_up(map->value_size, 0x8) * num_possible_cpus();
1124 value_size = map->value_size;
1138 err = bpf_map_update_value(map, f, key, value, attr->flags);
1155 struct bpf_map *map;
1165 map = __bpf_map_get(f);
1166 if (IS_ERR(map)) {
1167 return PTR_ERR(map);
1169 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1174 key = __bpf_copy_key(ukey, map->key_size);
1180 if (bpf_map_is_dev_bound(map)) {
1181 err = bpf_map_offload_delete_elem(map, key);
1183 } else if (IS_FD_PROG_ARRAY(map) || map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1185 err = map->ops->map_delete_elem(map, key);
1191 err = map->ops->map_delete_elem(map, key);
1194 maybe_wait_bpf_programs(map);
1210 struct bpf_map *map;
1220 map = __bpf_map_get(f);
1221 if (IS_ERR(map)) {
1222 return PTR_ERR(map);
1224 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1230 key = __bpf_copy_key(ukey, map->key_size);
1240 next_key = kmalloc(map->key_size, GFP_USER);
1245 if (bpf_map_is_dev_bound(map)) {
1246 err = bpf_map_offload_get_next_key(map, key, next_key);
1251 err = map->ops->map_get_next_key(map, key, next_key);
1259 if (copy_to_user(unext_key, next_key, map->key_size) != 0) {
1274 int generic_map_delete_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr)
1285 if ((attr->batch.elem_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)) {
1294 key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1301 if (copy_from_user(key, keys + cp * map->key_size, map->key_size)) {
1305 if (bpf_map_is_dev_bound(map)) {
1306 err = bpf_map_offload_delete_elem(map, key);
1312 err = map->ops->map_delete_elem(map, key);
1315 maybe_wait_bpf_programs(map);
1328 int generic_map_update_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr)
1342 if ((attr->batch.elem_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)) {
1346 value_size = bpf_map_value_size(map);
1353 key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1367 if (copy_from_user(key, keys + cp * map->key_size, map->key_size) ||
1372 err = bpf_map_update_value(map, f, key, value, attr->batch.elem_flags);
1392 int generic_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr)
1406 if ((attr->batch.elem_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)) {
1410 value_size = bpf_map_value_size(map);
1421 buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1426 buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1434 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) {
1438 value = key + map->key_size;
1445 err = map->ops->map_get_next_key(map, prev_key, key);
1450 err = bpf_map_copy_value(map, key, value, attr->batch.elem_flags);
1465 if (copy_to_user(keys + cp * map->key_size, key, map->key_size)) {
1489 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) {
1506 struct bpf_map *map;
1517 map = __bpf_map_get(f);
1518 if (IS_ERR(map)) {
1519 return PTR_ERR(map);
1521 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1526 key = __bpf_copy_key(ukey, map->key_size);
1532 value_size = map->value_size;
1540 if (map->map_type == BPF_MAP_TYPE_QUEUE || map->map_type == BPF_MAP_TYPE_STACK) {
1541 err = map->ops->map_pop_elem(map, value);
1571 struct bpf_map *map;
1579 map = __bpf_map_get(f);
1580 if (IS_ERR(map)) {
1581 return PTR_ERR(map);
1584 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1589 mutex_lock(&map->freeze_mutex);
1591 if (map->writecnt) {
1595 if (READ_ONCE(map->frozen)) {
1604 WRITE_ONCE(map->frozen, true);
1606 mutex_unlock(&map->freeze_mutex);
3215 struct bpf_map *map;
3220 map = idr_get_next(&map_idr, id);
3221 if (map) {
3222 map = _bpf_map_inc_not_zero(map, false);
3223 if (IS_ERR(map)) {
3232 return map;
3308 struct bpf_map *map;
3327 map = idr_find(&map_idr, id);
3328 if (map) {
3329 map = _bpf_map_inc_not_zero(map, true);
3331 map = ERR_PTR(-ENOENT);
3335 if (IS_ERR(map)) {
3336 return PTR_ERR(map);
3339 fd = bpf_map_new_fd(map, f_flags);
3341 bpf_map_put_with_uref(map);
3349 const struct bpf_map *map;
3354 map = prog->aux->used_maps[i];
3355 if (map == (void *)addr) {
3359 if (!map->ops->map_direct_value_meta) {
3362 if (!map->ops->map_direct_value_meta(map, addr, off)) {
3367 map = NULL;
3371 return map;
3376 const struct bpf_map *map;
3415 map = bpf_map_from_imm(prog, imm, &off, &type);
3416 if (map) {
3418 insns[i].imm = map->id;
3749 static int bpf_map_get_info_by_fd(struct file *file, struct bpf_map *map, const union bpf_attr *attr,
3764 info.type = map->map_type;
3765 info.id = map->id;
3766 info.key_size = map->key_size;
3767 info.value_size = map->value_size;
3768 info.max_entries = map->max_entries;
3769 info.map_flags = map->map_flags;
3770 memcpy(info.name, map->name, sizeof(map->name));
3772 if (map->btf) {
3773 info.btf_id = btf_id(map->btf);
3774 info.btf_key_type_id = map->btf_key_type_id;
3775 info.btf_value_type_id = map->btf_value_type_id;
3777 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
3779 if (bpf_map_is_dev_bound(map)) {
3780 err = bpf_map_offload_info_fill(&info, map);
4050 err = fn(map, attr, uattr); \
4055 struct bpf_map *map;
4065 map = __bpf_map_get(f);
4066 if (IS_ERR(map)) {
4067 return PTR_ERR(map);
4071 !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
4076 if (cmd != BPF_MAP_LOOKUP_BATCH && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4082 BPF_DO_BATCH(map->ops->map_lookup_batch);
4084 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
4086 BPF_DO_BATCH(map->ops->map_update_batch);
4088 BPF_DO_BATCH(map->ops->map_delete_batch);
4405 struct bpf_map *map;
4422 map = bpf_map_get(attr->prog_bind_map.map_fd);
4423 if (IS_ERR(map)) {
4424 ret = PTR_ERR(map);
4433 if (used_maps_old[i] == map) {
4434 bpf_map_put(map);
4446 used_maps_new[prog->aux->used_map_cnt] = map;
4457 bpf_map_put(map);