Lines Matching refs:map

55 /* map is generic key/value storage optionally accesible by eBPF programs */
60 void (*map_release)(struct bpf_map *map, struct file *map_file);
61 void (*map_free)(struct bpf_map *map);
62 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
63 void (*map_release_uref)(struct bpf_map *map);
64 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
65 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr);
66 int (*map_lookup_and_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr);
67 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr);
68 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr);
71 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
72 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
73 int (*map_delete_elem)(struct bpf_map *map, void *key);
74 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
75 int (*map_pop_elem)(struct bpf_map *map, void *value);
76 int (*map_peek_elem)(struct bpf_map *map, void *value);
78 /* funcs called by prog_array and perf_event_array map */
79 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, int fd);
81 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
83 void (*map_seq_show_elem)(struct bpf_map *map, void *key, struct seq_file *m);
84 int (*map_check_btf)(const struct bpf_map *map, const struct btf *btf, const struct btf_type *key_type,
88 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
89 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
90 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, struct bpf_prog *new);
93 int (*map_direct_value_addr)(const struct bpf_map *map, u64 *imm, u32 off);
94 int (*map_direct_value_meta)(const struct bpf_map *map, u64 imm, u32 *off);
95 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
96 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, struct poll_table_struct *pts);
104 * used as an inner map. It is a runtime check to ensure
105 * an inner map can be inserted to an outer map.
107 * Some properties of the inner map has been used during the
108 * verification time. When inserting an inner map at the runtime,
109 * map_meta_equal has to ensure the inserting map has the same
164 static inline bool map_value_has_spin_lock(const struct bpf_map *map)
166 return map->spin_lock_off >= 0;
169 static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
171 if (likely(!map_value_has_spin_lock(map))) {
174 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = (struct bpf_spin_lock) {};
178 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
180 if (unlikely(map_value_has_spin_lock(map))) {
181 u32 off = map->spin_lock_off;
185 map->value_size - off - sizeof(struct bpf_spin_lock));
187 memcpy(dst, src, map->value_size);
190 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, bool lock_src);
197 int (*map_get_next_key)(struct bpf_offloaded_map *map, void *key, void *next_key);
198 int (*map_lookup_elem)(struct bpf_offloaded_map *map, void *key, void *value);
199 int (*map_update_elem)(struct bpf_offloaded_map *map, void *key, void *value, u64 flags);
200 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
204 struct bpf_map map;
211 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
213 return container_of(map, struct bpf_offloaded_map, map);
216 static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
218 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
221 static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
223 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && map->ops->map_seq_show_elem;
226 int map_check_no_btf(const struct bpf_map *map, const struct btf *btf, const struct btf_type *key_type,
274 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
275 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
276 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
281 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
322 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
402 PTR_TO_MAP_VALUE, /* reg points to map element value */
760 struct bpf_map *map;
861 * is going to use this map or by the first program which FD is
862 * stored in the map to make sure that all callers and callees have
872 struct bpf_map *map;
928 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, void *value);
961 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, void *value)
968 struct bpf_map map;
987 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
989 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1025 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size,
1162 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
1163 * these events can happen inside a region which holds a map bucket lock
1235 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
1240 void bpf_map_inc(struct bpf_map *map);
1241 void bpf_map_inc_with_uref(struct bpf_map *map);
1242 struct bpf_map *__must_check bpf_map_inc_not_zero(struct bpf_map *map);
1243 void bpf_map_put_with_uref(struct bpf_map *map);
1244 void bpf_map_put(struct bpf_map *map);
1245 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
1246 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
1253 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
1254 int generic_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr);
1255 int generic_map_update_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr);
1256 int generic_map_delete_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr);
1287 int bpf_map_new_fd(struct bpf_map *map, int flags);
1313 struct bpf_map *map;
1342 __bpf_md_ptr(struct bpf_map *, map);
1358 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
1359 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
1360 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, u64 flags);
1361 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, u64 flags);
1363 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
1365 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, void *key, void *value, u64 map_flags);
1366 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1367 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, void *key, void *value, u64 map_flags);
1368 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1403 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
1404 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
1409 bool dev_map_can_have_prog(struct bpf_map *map);
1411 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
1414 bool cpu_map_prog_allowed(struct bpf_map *map);
1416 /* Return map's numa specified by userspace */
1528 static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
1533 static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
1537 static inline bool dev_map_can_have_prog(struct bpf_map *map)
1566 static inline struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
1580 static inline bool cpu_map_prog_allowed(struct bpf_map *map)
1614 static inline void bpf_map_put(struct bpf_map *map)
1648 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
1650 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
1651 int bpf_map_offload_update_elem(struct bpf_map *map, void *key, void *value, u64 flags);
1652 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
1653 int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key);
1655 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
1674 static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1676 return unlikely(map->ops == &bpf_map_offload_ops);
1680 void bpf_map_offload_map_free(struct bpf_map *map);
1692 static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1702 static inline void bpf_map_offload_map_free(struct bpf_map *map)
1708 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, struct bpf_prog *old, u32 which);
1711 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
1715 static inline int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, struct bpf_prog *old, u32 which)
1730 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags)
1738 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, void *value);
1739 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags);
1746 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, void *value)
1751 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags)