Lines Matching defs:map

86  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'

87 * and the range of [ptr, ptr + map's value_size) is accessible.
95 * 'pointer to map element key'
102 * ret_type says that this function returns 'pointer to map elem value or null'
105 * the helper function as a pointer to map element key.
110 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
114 * here kernel can access 'key' and 'map' pointers safely, knowing that
115 * [key, key + map->key_size) bytes are valid and were initialized on
126 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
129 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
134 * returns ether pointer to map value or NULL.
210 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, const struct bpf_map *map, bool unpriv)
214 aux->map_ptr_state = (unsigned long)map | (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
452 static bool is_acquire_function(enum bpf_func_id func_id, const struct bpf_map *map)
454 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
2765 struct bpf_map *map = regs[regno].map_ptr;
2766 u32 cap = bpf_map_flags_to_cap(map);
2768 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", map->value_size, off, size);
2772 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", map->value_size, off, size);
2779 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
2793 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", mem_size, off, size);
2861 /* check read/write into a map element with possible variable offset */
2867 struct bpf_map *map = reg->map_ptr;
2870 err = check_mem_region_access(env, regno, off, size, map->value_size, zero_size_allowed);
2875 if (map_value_has_spin_lock(map)) {
2876 u32 lock = map->spin_lock_off;
3469 static bool bpf_map_is_rdonly(const struct bpf_map *map)
3471 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
3474 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
3480 err = map->ops->map_direct_value_addr(map, &addr, off);
3552 struct bpf_map *map = reg->map_ptr;
3563 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
3564 verbose(env, "map_ptr access not supported for map type %d\n", map->map_type);
3568 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
3713 verbose(env, "R%d leaks addr into map\n", value_regno);
3722 struct bpf_map *map = reg->map_ptr;
3724 /* if map is read-only, track its contents as scalars */
3725 if (tnum_is_const(reg->var_off) && bpf_map_is_rdonly(map) && map->ops->map_direct_value_addr) {
3729 err = bpf_map_direct_read(map, map_off, size, &val);
4125 * the range of access to valid map value pointer and doesn't care about actual
4126 * address of the map element.
4127 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
4136 * cur_state->active_spin_lock remembers which map value element got locked
4144 struct bpf_map *map = reg->map_ptr;
4151 if (!map->btf) {
4152 verbose(env, "map '%s' has to have BTF in order to use bpf_spin_lock\n", map->name);
4155 if (!map_value_has_spin_lock(map)) {
4156 if (map->spin_lock_off == -E2BIG) {
4157 verbose(env, "map '%s' has more than one 'struct bpf_spin_lock'\n", map->name);
4158 } else if (map->spin_lock_off == -ENOENT) {
4159 verbose(env, "map '%s' doesn't have 'struct bpf_spin_lock'\n", map->name);
4161 verbose(env, "map '%s' is not a struct type or bpf_spin_lock is mangled\n", map->name);
4165 if (map->spin_lock_off != val + reg->off) {
4225 verbose(env, "invalid map_ptr to access map->type\n");
4503 * check that [key, key + map->key_size) are within
4512 verbose(env, "invalid map_ptr to access map->key\n");
4522 * check [value, value + map->value_size) validity
4526 verbose(env, "invalid map_ptr to access map->value\n");
4658 static int check_map_func_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, int func_id)
4660 if (!map) {
4664 /* We need a two way check, first is from map perspective ... */
4665 switch (map->map_type) {
4770 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
4783 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
4790 if (map->map_type != BPF_MAP_TYPE_RINGBUF) {
4795 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) {
4801 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) {
4806 if (map->map_type != BPF_MAP_TYPE_DEVMAP && map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
4807 map->map_type != BPF_MAP_TYPE_CPUMAP && map->map_type != BPF_MAP_TYPE_XSKMAP) {
4814 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) {
4821 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) {
4826 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
4831 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && map->map_type != BPF_MAP_TYPE_SOCKMAP &&
4832 map->map_type != BPF_MAP_TYPE_SOCKHASH) {
4839 if (map->map_type != BPF_MAP_TYPE_QUEUE && map->map_type != BPF_MAP_TYPE_STACK) {
4845 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
4851 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) {
4861 verbose(env, "cannot pass map_type %d into func %s#%d\n", map->map_type, func_id_name(func_id), func_id);
5225 struct bpf_map *map = meta->map_ptr;
5233 if (map == NULL) {
5240 * state of the map from program side.
5242 if ((map->map_flags & BPF_F_RDONLY_PROG) &&
5245 verbose(env, "write into map forbidden\n");
5261 struct bpf_map *map = meta->map_ptr;
5268 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
5275 max = map->max_entries;
5401 /* check that flags argument in get_local_storage(map, flags) is 0,
5431 * to map element returned from bpf_map_lookup_elem()
5924 "R%d pointer arithmetic of map value goes out of range, "
7645 const struct bpf_map *map = reg->map_ptr;
7647 if (map->inner_map_meta) {
7649 reg->map_ptr = map->inner_map_meta;
7650 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
7652 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || map->map_type == BPF_MAP_TYPE_SOCKHASH) {
7987 struct bpf_map *map;
8036 map = env->used_maps[aux->map_index];
8037 dst_reg->map_ptr = map;
8042 if (map_value_has_spin_lock(map)) {
8860 * so, we require the new id to match; otherwise, we add the id pair to the map.
9035 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
9055 * bpf_spin_lock inside map element and in such cases if
9056 * the rest of the prog is valid for one map element then
9057 * it's valid for all map elements regardless of the key
9784 * src_reg == stack|map in some other branch.
10040 static int check_map_prealloc(struct bpf_map *map)
10042 return (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
10043 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
10044 !(map->map_flags & BPF_F_NO_PREALLOC);
10060 static bool is_preallocated_map(struct bpf_map *map)
10062 if (!check_map_prealloc(map)) {
10065 if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) {
10071 static int check_map_prog_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, struct bpf_prog *prog)
10092 if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) {
10094 verbose(env, "perf_event programs can only use preallocated hash map\n");
10098 verbose(env, "trace type programs can only use preallocated hash map\n");
10107 if ((is_tracing_prog_type(prog_type) || prog_type == BPF_PROG_TYPE_SOCKET_FILTER) && map_value_has_spin_lock(map)) {
10112 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && !bpf_offload_prog_map_match(prog, map)) {
10113 verbose(env, "offload device mismatch between prog and map\n");
10117 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
10118 verbose(env, "bpf_struct_ops map cannot be used in prog\n");
10123 switch (map->map_type) {
10127 if (!is_preallocated_map(map)) {
10141 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
10143 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
10148 * 1. if it accesses map FD, replace it with actual map pointer.
10178 struct bpf_map *map;
10212 map = __bpf_map_get(f);
10213 if (IS_ERR(map)) {
10215 return PTR_ERR(map);
10218 err = check_map_prog_compatibility(env, map, env->prog);
10226 addr = (unsigned long)map;
10236 if (!map->ops->map_direct_value_addr) {
10237 verbose(env, "no direct value access support for this map type\n");
10242 err = map->ops->map_direct_value_addr(map, &addr, off);
10244 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", map->value_size, off);
10256 /* check whether we recorded this map already */
10258 if (env->used_maps[j] == map) {
10270 /* hold the map. If the program is rejected by verifier,
10271 * the map will be released by release_maps() or it
10275 bpf_map_inc(map);
10278 env->used_maps[env->used_map_cnt++] = map;
10280 if (bpf_map_is_cgroup_storage(map) && bpf_cgroup_storage_assign(env->prog->aux, map)) {
10302 * These pointers will be used later by verifier to validate map access.
11149 map_ptr = prog->aux->poke_tab[i].tail_call.map;
11387 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
11419 insn_buf[0x1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, container_of(map_ptr, struct bpf_array, map)->index_mask);
11469 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, (void *(*)(struct bpf_map * map, void *key)) NULL));
11470 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, (int (*)(struct bpf_map * map, void *key)) NULL));
11472 (int (*)(struct bpf_map * map, void *key, void *value, u64 flags)) NULL));
11474 !__same_type(ops->map_push_elem, (int (*)(struct bpf_map * map, void *value, u64 flags)) NULL));
11475 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, (int (*)(struct bpf_map * map, void *value)) NULL));
11476 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, (int (*)(struct bpf_map * map, void *value)) NULL));
11539 map_ptr = prog->aux->poke_tab[i].tail_call.map;
12355 /* if we didn't copy map pointers into bpf_prog_info, release