Lines Matching defs:meta

2639                                          struct bpf_call_arg_meta *meta);

2667 /* Note that we pass a NULL meta, so raw access will not be permitted.
2898 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta,
2923 if (meta) {
2924 return meta->pkt_access;
3949 struct bpf_call_arg_meta *meta)
4003 if (meta && meta->raw_mode) {
4004 meta = NULL;
4011 if (meta && meta->raw_mode) {
4012 meta->access_size = access_size;
4013 meta->regno = regno;
4074 struct bpf_call_arg_meta *meta)
4086 meta && meta->raw_mode ? BPF_WRITE : BPF_READ)) {
4094 if (meta && meta->raw_mode) {
4107 meta);
4220 static int resolve_map_arg_type(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta,
4223 if (!meta->map_ptr) {
4229 switch (meta->map_ptr->map_type) {
4409 static int check_func_arg(struct bpf_verifier_env *env, u32 arg, struct bpf_call_arg_meta *meta,
4435 if (type_is_pkt_pointer(type) && !may_access_direct_pkt_data(env, meta, BPF_READ)) {
4441 err = resolve_map_arg_type(env, meta, &arg_type);
4490 if (meta->ref_obj_id) {
4492 reg->ref_obj_id, meta->ref_obj_id);
4495 meta->ref_obj_id = reg->ref_obj_id;
4500 meta->map_ptr = reg->map_ptr;
4506 if (!meta->map_ptr) {
4515 err = check_helper_mem_access(env, regno, meta->map_ptr->key_size, false, NULL);
4524 if (!meta->map_ptr) {
4529 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
4530 err = check_helper_mem_access(env, regno, meta->map_ptr->value_size, false, meta);
4536 meta->ret_btf_id = reg->btf_id;
4538 if (meta->func_id == BPF_FUNC_spin_lock) {
4542 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
4554 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM);
4566 meta->msize_max_value = reg->umax_value;
4577 meta = NULL;
4586 err = check_helper_mem_access(env, regno - 1, 0, zero_size_allowed, meta);
4596 err = check_helper_mem_access(env, regno - 1, reg->umax_value, zero_size_allowed, meta);
4605 meta->mem_size = reg->var_off.value;
4609 err = check_helper_mem_access(env, regno, size, false, meta);
5205 struct bpf_call_arg_meta *meta)
5215 ret_reg->smax_value = meta->msize_max_value;
5216 ret_reg->s32_max_value = meta->msize_max_value;
5222 static int record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, int func_id, int insn_idx)
5225 struct bpf_map *map = meta->map_ptr;
5250 bpf_map_ptr_store(aux, meta->map_ptr, !meta->map_ptr->bypass_spec_v1);
5251 } else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) {
5252 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, !meta->map_ptr->bypass_spec_v1);
5257 static int record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, int func_id, int insn_idx)
5261 struct bpf_map *map = meta->map_ptr;
5312 struct bpf_call_arg_meta meta;
5348 memset(&meta, 0, sizeof(meta));
5349 meta.pkt_access = fn->pkt_access;
5357 meta.func_id = func_id;
5360 err = check_func_arg(env, i, &meta, fn);
5366 err = record_func_map(env, &meta, func_id, insn_idx);
5371 err = record_func_key(env, &meta, func_id, insn_idx);
5379 for (i = 0; i < meta.access_size; i++) {
5380 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1, false);
5393 err = release_reference(env, meta.ref_obj_id);
5433 if (meta.map_ptr == NULL) {
5437 regs[BPF_REG_0].map_ptr = meta.map_ptr;
5439 if (!type_may_be_null(ret_type) && map_value_has_spin_lock(meta.map_ptr)) {
5454 regs[BPF_REG_0].mem_size = meta.mem_size;
5459 t = btf_type_skip_modifiers(btf_vmlinux, meta.ret_btf_id, NULL);
5483 regs[BPF_REG_0].btf_id = meta.ret_btf_id;
5507 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
5508 } else if (is_acquire_function(func_id, meta.map_ptr)) {
5519 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
5521 err = check_map_func_compatibility(env, meta.map_ptr, func_id);