Lines Matching refs:reg

76  * Meaning that this reg is a pointer to stack plus known immediate constant.

136 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
411 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
413 return reg->type == PTR_TO_MAP_VALUE && map_value_has_spin_lock(reg->map_ptr);
548 static struct bpf_func_state *func(struct bpf_verifier_env *env, const struct bpf_reg_state *reg)
552 return cur->frame[reg->frameno];
562 const struct bpf_reg_state *reg;
570 reg = &state->regs[i];
571 t = reg->type;
576 print_liveness(env, reg->live);
578 if (t == SCALAR_VALUE && reg->precise) {
581 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && tnum_is_const(reg->var_off)) {
582 /* reg->off should be 0 for SCALAR_VALUE */
583 verbose(env, "%lld", reg->var_off.value + reg->off);
586 verbose(env, "%s", kernel_type_name(reg->btf_id));
588 verbose(env, "(id=%d", reg->id);
590 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
593 verbose(env, ",off=%d", reg->off);
596 verbose(env, ",r=%d", reg->range);
598 verbose(env, ",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size);
600 if (tnum_is_const(reg->var_off)) {
603 * for reg->off
605 verbose(env, ",imm=%llx", reg->var_off.value);
607 if (reg->smin_value != reg->umin_value && reg->smin_value != S64_MIN) {
608 verbose(env, ",smin_value=%lld", (long long)reg->smin_value);
610 if (reg->smax_value != reg->umax_value && reg->smax_value != S64_MAX) {
611 verbose(env, ",smax_value=%lld", (long long)reg->smax_value);
613 if (reg->umin_value != 0) {
614 verbose(env, ",umin_value=%llu", (unsigned long long)reg->umin_value);
616 if (reg->umax_value != U64_MAX) {
617 verbose(env, ",umax_value=%llu", (unsigned long long)reg->umax_value);
619 if (!tnum_is_unknown(reg->var_off)) {
622 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
625 if (reg->s32_min_value != reg->smin_value && reg->s32_min_value != S32_MIN) {
626 verbose(env, ",s32_min_value=%d", (int)(reg->s32_min_value));
628 if (reg->s32_max_value != reg->smax_value && reg->s32_max_value != S32_MAX) {
629 verbose(env, ",s32_max_value=%d", (int)(reg->s32_max_value));
631 if (reg->u32_min_value != reg->umin_value && reg->u32_min_value != U32_MIN) {
632 verbose(env, ",u32_min_value=%d", (int)(reg->u32_min_value));
634 if (reg->u32_max_value != reg->umax_value && reg->u32_max_value != U32_MAX) {
635 verbose(env, ",u32_max_value=%d", (int)(reg->u32_max_value));
659 reg = &state->stack[i].spilled_ptr;
660 t = reg->type;
662 if (t == SCALAR_VALUE && reg->precise) {
665 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) {
666 verbose(env, "%lld", reg->var_off.value + reg->off);
1005 static void verifier_mark_reg_not_init(const struct bpf_verifier_env *env, struct bpf_reg_state *reg);
1007 /* This helper doesn't clear reg->id */
1008 static void verifier2_mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1010 reg->var_off = tnum_const(imm);
1011 reg->smin_value = (s64)imm;
1012 reg->smax_value = (s64)imm;
1013 reg->umin_value = imm;
1014 reg->umax_value = imm;
1016 reg->s32_min_value = (s32)imm;
1017 reg->s32_max_value = (s32)imm;
1018 reg->u32_min_value = (u32)imm;
1019 reg->u32_max_value = (u32)imm;
1025 static void verifier_mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1028 memset(((u8 *)reg) + sizeof(reg->type), 0, offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
1029 verifier2_mark_reg_known(reg, imm);
1032 static void verifier_mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1034 reg->var_off = tnum_const_subreg(reg->var_off, imm);
1035 reg->s32_min_value = (s32)imm;
1036 reg->s32_max_value = (s32)imm;
1037 reg->u32_min_value = (u32)imm;
1038 reg->u32_max_value = (u32)imm;
1044 static void verifier_mark_reg_known_zero(struct bpf_reg_state *reg)
1046 verifier_mark_reg_known(reg, 0);
1049 static void verifier_mark_reg_const_zero(struct bpf_reg_state *reg)
1051 verifier_mark_reg_known(reg, 0);
1052 reg->type = SCALAR_VALUE;
1068 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1070 return type_is_pkt_pointer(reg->type);
1073 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1075 return reg_is_pkt_pointer(reg) || reg->type == PTR_TO_PACKET_END;
1079 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, enum bpf_reg_type which)
1085 return reg->type == which && reg->id == 0 && reg->off == 0 && tnum_equals_const(reg->var_off, 0);
1089 static void verifier_mark_reg_unbounded(struct bpf_reg_state *reg)
1091 reg->smin_value = S64_MIN;
1092 reg->smax_value = S64_MAX;
1093 reg->umin_value = 0;
1094 reg->umax_value = U64_MAX;
1096 reg->s32_min_value = S32_MIN;
1097 reg->s32_max_value = S32_MAX;
1098 reg->u32_min_value = 0;
1099 reg->u32_max_value = U32_MAX;
1102 static void verifier_mark_reg64_unbounded(struct bpf_reg_state *reg)
1104 reg->smin_value = S64_MIN;
1105 reg->smax_value = S64_MAX;
1106 reg->umin_value = 0;
1107 reg->umax_value = U64_MAX;
1110 static void verifier_mark_reg32_unbounded(struct bpf_reg_state *reg)
1112 reg->s32_min_value = S32_MIN;
1113 reg->s32_max_value = S32_MAX;
1114 reg->u32_min_value = 0;
1115 reg->u32_max_value = U32_MAX;
1118 static void verifier_update_reg32_bounds(struct bpf_reg_state *reg)
1120 struct tnum var32_off = tnum_subreg(reg->var_off);
1123 reg->s32_min_value = max_t(s32, reg->s32_min_value, var32_off.value | (var32_off.mask & S32_MIN));
1125 reg->s32_max_value = min_t(s32, reg->s32_max_value, var32_off.value | (var32_off.mask & S32_MAX));
1126 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1127 reg->u32_max_value = min(reg->u32_max_value, (u32)(var32_off.value | var32_off.mask));
1130 static void verifier_update_reg64_bounds(struct bpf_reg_state *reg)
1133 reg->smin_value = max_t(s64, reg->smin_value, reg->var_off.value | (reg->var_off.mask & S64_MIN));
1135 reg->smax_value = min_t(s64, reg->smax_value, reg->var_off.value | (reg->var_off.mask & S64_MAX));
1136 reg->umin_value = max(reg->umin_value, reg->var_off.value);
1137 reg->umax_value = min(reg->umax_value, reg->var_off.value | reg->var_off.mask);
1140 static void verifier_update_reg_bounds(struct bpf_reg_state *reg)
1142 verifier_update_reg32_bounds(reg);
1143 verifier_update_reg64_bounds(reg);
1147 static void verifier_reg32_deduce_bounds(struct bpf_reg_state *reg)
1154 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
1155 reg->s32_min_value = reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value);
1156 reg->s32_max_value = reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value);
1162 if ((s32)reg->u32_max_value >= 0) {
1166 reg->s32_min_value = reg->u32_min_value;
1167 reg->s32_max_value = reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value);
1168 } else if ((s32)reg->u32_min_value < 0) {
1172 reg->s32_min_value = reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value);
1173 reg->s32_max_value = reg->u32_max_value;
1177 static void verifier_reg64_deduce_bounds(struct bpf_reg_state *reg)
1184 if (reg->smin_value >= 0 || reg->smax_value < 0) {
1185 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value);
1186 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value);
1192 if ((s64)reg->umax_value >= 0) {
1196 reg->smin_value = reg->umin_value;
1197 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value);
1198 } else if ((s64)reg->umin_value < 0) {
1202 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value);
1203 reg->smax_value = reg->umax_value;
1207 static void verifier_reg_deduce_bounds(struct bpf_reg_state *reg)
1209 verifier_reg32_deduce_bounds(reg);
1210 verifier_reg64_deduce_bounds(reg);
1214 static void verifier_reg_bound_offset(struct bpf_reg_state *reg)
1216 struct tnum var64_off = tnum_intersect(reg->var_off, tnum_range(reg->umin_value, reg->umax_value));
1218 tnum_intersect(tnum_subreg(reg->var_off), tnum_range(reg->u32_min_value, reg->u32_max_value));
1220 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
1223 static void reg_bounds_sync(struct bpf_reg_state *reg)
1226 verifier_update_reg_bounds(reg);
1228 verifier_reg_deduce_bounds(reg);
1230 verifier_reg_bound_offset(reg);
1235 verifier_update_reg_bounds(reg);
1242 static void verifier_reg_assign_32_into_64(struct bpf_reg_state *reg)
1244 reg->umin_value = reg->u32_min_value;
1245 reg->umax_value = reg->u32_max_value;
1251 if (verifier_reg32_bound_s64(reg->s32_min_value) && verifier_reg32_bound_s64(reg->s32_max_value)) {
1252 reg->smin_value = reg->s32_min_value;
1253 reg->smax_value = reg->s32_max_value;
1255 reg->smin_value = 0;
1256 reg->smax_value = U32_MAX;
1260 static void verifier_reg_combine_32_into_64(struct bpf_reg_state *reg)
1266 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
1267 verifier_reg_assign_32_into_64(reg);
1276 verifier_mark_reg64_unbounded(reg);
1277 verifier_update_reg_bounds(reg);
1284 reg_bounds_sync(reg);
1297 static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
1299 verifier_mark_reg32_unbounded(reg);
1301 if (verifier_reg64_bound_s32(reg->smin_value) && verifier_reg64_bound_s32(reg->smax_value)) {
1302 reg->s32_min_value = (s32)reg->smin_value;
1303 reg->s32_max_value = (s32)reg->smax_value;
1305 if (verifier_reg64_bound_u32(reg->umin_value) && verifier_reg64_bound_u32(reg->umax_value)) {
1306 reg->u32_min_value = (u32)reg->umin_value;
1307 reg->u32_max_value = (u32)reg->umax_value;
1314 reg_bounds_sync(reg);
1318 static void __mark_reg_unknown(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
1324 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
1325 reg->type = SCALAR_VALUE;
1326 reg->var_off = tnum_unknown;
1327 reg->frameno = 0;
1328 reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
1329 verifier_mark_reg_unbounded(reg);
1345 static void verifier_mark_reg_not_init(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
1347 __mark_reg_unknown(env, reg);
1348 reg->type = NOT_INIT;
1590 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, u32 regno, struct bpf_reg_state *reg,
1644 if (reg->type != SCALAR_VALUE) {
1698 static void mark_insn_zext(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
1700 s32 def_idx = reg->subreg_def;
1708 reg->subreg_def = DEF_NOT_SUBREG;
1716 struct bpf_reg_state *reg, *regs = state->regs;
1724 reg = &regs[regno];
1725 rw64 = is_reg64(env, insn, regno, reg, t);
1728 if (reg->type == NOT_INIT) {
1738 mark_insn_zext(env, reg);
1741 return mark_reg_read(env, reg, reg->parent, rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
1748 reg->live |= REG_LIVE_WRITTEN;
1749 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
1999 struct bpf_reg_state *reg;
2009 reg = &func->regs[j];
2010 if (reg->type != SCALAR_VALUE) {
2013 reg->precise = true;
2019 reg = &func->stack[j].spilled_ptr;
2020 if (reg->type != SCALAR_VALUE) {
2023 reg->precise = true;
2035 struct bpf_reg_state *reg;
2048 reg = &func->regs[regno];
2049 if (reg->type != SCALAR_VALUE) {
2053 if (!reg->precise) {
2058 reg->precise = true;
2066 reg = &func->stack[spi].spilled_ptr;
2067 if (reg->type != SCALAR_VALUE) {
2071 if (!reg->precise) {
2076 reg->precise = true;
2139 reg = &func->regs[i];
2140 if (reg->type != SCALAR_VALUE) {
2144 if (!reg->precise) {
2147 reg->precise = true;
2175 reg = &func->stack[i].spilled_ptr;
2176 if (reg->type != SCALAR_VALUE) {
2180 if (!reg->precise) {
2183 reg->precise = true;
2240 static bool register_is_null(struct bpf_reg_state *reg)
2242 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
2245 static bool register_is_const(struct bpf_reg_state *reg)
2247 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
2250 static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
2252 return tnum_is_unknown(reg->var_off) && reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
2253 reg->umin_value == 0 && reg->umax_value == U64_MAX && reg->s32_min_value == S32_MIN &&
2254 reg->s32_max_value == S32_MAX && reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
2257 static bool register_is_bounded(struct bpf_reg_state *reg)
2259 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
2262 static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg)
2268 return reg->type != SCALAR_VALUE;
2271 static void save_register_state(struct bpf_func_state *state, int spi, struct bpf_reg_state *reg)
2275 state->stack[spi].spilled_ptr = *reg;
2293 struct bpf_reg_state *reg = NULL;
2309 reg = &cur->regs[value_regno];
2312 bool sanitize = reg && is_spillable_regtype(reg->type);
2326 if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) && !register_is_null(reg) && env->bpf_capable) {
2332 * that contributed into 'reg' being a constant.
2339 save_register_state(state, spi, reg);
2340 } else if (reg && is_spillable_regtype(reg->type)) {
2347 if (state != cur && reg->type == PTR_TO_STACK) {
2351 save_register_state(state, spi, reg);
2377 if (reg && register_is_null(reg)) {
2501 * so is the destination reg. Otherwise, the register is considered to be
2549 * 'dst_regno'. It handles reg filling if the addressed stack slot is a
2550 * spilled reg.
2564 struct bpf_reg_state *reg;
2568 reg = &reg_state->stack[spi].spilled_ptr;
2572 if (reg->type != SCALAR_VALUE) {
2581 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2593 state->regs[dst_regno] = *reg;
2594 /* mark reg as written since spilled pointer state likely
2596 * which resets stack/reg liveness for state transitions
2599 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
2609 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2624 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2650 * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
2662 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
2663 struct bpf_func_state *ptr_state = func(env, reg);
2674 min_off = reg->smin_value + off;
2675 max_off = reg->smax_value + off;
2691 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
2692 struct bpf_func_state *state = func(env, reg);
2695 bool var_off = !tnum_is_const(reg->var_off);
2703 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2715 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2721 off += reg->var_off.value;
2746 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
2747 struct bpf_func_state *state = func(env, reg);
2750 if (tnum_is_const(reg->var_off)) {
2751 off += reg->var_off.value;
2784 struct bpf_reg_state *reg;
2790 reg = &cur_regs(env)[regno];
2791 switch (reg->type) {
2799 reg->id, off, mem_size);
2815 struct bpf_reg_state *reg = &state->regs[regno];
2832 if (reg->smin_value < 0 &&
2833 (reg->smin_value == S64_MIN || (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
2834 reg->smin_value + off < 0)) {
2838 err = __check_mem_access(env, regno, reg->smin_value + off, size, mem_size, zero_size_allowed);
2846 * If reg->umax_value + off could overflow, treat that as unbounded too.
2848 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
2852 err = __check_mem_access(env, regno, reg->umax_value + off, size, mem_size, zero_size_allowed);
2866 struct bpf_reg_state *reg = &state->regs[regno];
2867 struct bpf_map *map = reg->map_ptr;
2883 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) && lock < reg->umax_value + off + size) {
2945 struct bpf_reg_state *reg = &regs[regno];
2949 * reg->range we have comes after that. We are only checking the fixed
2956 if (reg->smin_value < 0) {
2960 err = __check_mem_access(env, regno, off, size, reg->range, zero_size_allowed);
2967 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
2970 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
2972 env->prog->aux->max_pkt_offset = max_t(u32, env->prog->aux->max_pkt_offset, off + reg->umax_value + size - 1);
3025 struct bpf_reg_state *reg = &regs[regno];
3029 if (reg->smin_value < 0) {
3034 switch (reg->type) {
3056 verbose(env, "R%d invalid %s access off=%d size=%d\n", regno, reg_type_str(env, reg->type), off, size);
3068 const struct bpf_reg_state *reg = reg_state(env, regno);
3070 return reg->type == PTR_TO_CTX;
3075 const struct bpf_reg_state *reg = reg_state(env, regno);
3077 return type_is_sk_pointer(reg->type);
3082 const struct bpf_reg_state *reg = reg_state(env, regno);
3084 return type_is_pkt_pointer(reg->type);
3089 const struct bpf_reg_state *reg = reg_state(env, regno);
3092 return reg->type == PTR_TO_FLOW_KEYS;
3095 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size,
3116 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
3120 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3121 verbose(env, "misaligned packet access off %d+%s+%d+%d size %d\n", ip_align, tn_buf, reg->off, off, size);
3128 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg,
3138 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
3142 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3143 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", pointer_desc, tn_buf, reg->off, off, size);
3150 static int check_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size,
3156 switch (reg->type) {
3162 return check_pkt_ptr_alignment(env, reg, off, size, strict);
3195 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, strict);
3349 static int __check_ptr_off_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno,
3356 if (!fixed_off_ok && reg->off) {
3357 verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n", reg_type_str(env, reg->type), regno,
3358 reg->off);
3362 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3365 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3366 verbose(env, "variable %s access var_off=%s disallowed\n", reg_type_str(env, reg->type), tn_buf);
3373 int check_ptr_off_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno)
3375 return __check_ptr_off_reg(env, reg, regno, false);
3378 static int __check_buffer_access(struct bpf_verifier_env *env, const char *buf_info, const struct bpf_reg_state *reg,
3385 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3388 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3396 static int check_tp_buffer_access(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno, int off,
3401 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
3413 static int check_buffer_access(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno, int off,
3418 err = __check_buffer_access(env, buf_info, reg, regno, off, size);
3431 static void zext_32_to_64(struct bpf_reg_state *reg)
3433 reg->var_off = tnum_subreg(reg->var_off);
3434 verifier_reg_assign_32_into_64(reg);
3440 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
3445 reg->var_off = tnum_cast(reg->var_off, size);
3449 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
3450 reg->umin_value &= mask;
3451 reg->umax_value &= mask;
3453 reg->umin_value = 0;
3454 reg->umax_value = mask;
3456 reg->smin_value = reg->umin_value;
3457 reg->smax_value = reg->umax_value;
3466 __reg_combine_64_into_32(reg);
3508 struct bpf_reg_state *reg = regs + regno;
3509 const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id);
3518 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3521 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3551 struct bpf_reg_state *reg = regs + regno;
3552 struct bpf_map *map = reg->map_ptr;
3629 struct bpf_reg_state *reg = regs + regno;
3630 struct bpf_func_state *state = func(env, reg);
3644 if (tnum_is_const(reg->var_off)) {
3645 min_off = reg->var_off.value + off;
3652 if (reg->smax_value >= BPF_MAX_VAR_OFF || reg->smin_value <= -BPF_MAX_VAR_OFF) {
3656 min_off = reg->smin_value + off;
3658 max_off = reg->smax_value + off + access_size - 1;
3670 if (tnum_is_const(reg->var_off)) {
3675 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3693 struct bpf_reg_state *reg = regs + regno;
3702 /* alignment checks will add in reg->off themselves */
3703 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
3708 /* for access checks, reg->off is just part of off */
3709 off += reg->off;
3711 if (reg->type == PTR_TO_MAP_VALUE) {
3722 struct bpf_map *map = reg->map_ptr;
3725 if (tnum_is_const(reg->var_off) && bpf_map_is_rdonly(map) && map->ops->map_direct_value_addr) {
3726 int map_off = off + reg->var_off.value;
3740 } else if (base_type(reg->type) == PTR_TO_MEM) {
3741 bool rdonly_mem = type_is_rdonly_mem(reg->type);
3743 if (type_may_be_null(reg->type)) {
3744 verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str(env, reg->type));
3749 verbose(env, "R%d cannot write into %s\n", regno, reg_type_str(env, reg->type));
3758 err = check_mem_region_access(env, regno, off, size, reg->mem_size, false);
3762 } else if (reg->type == PTR_TO_CTX) {
3771 err = check_ptr_off_reg(env, reg, regno);
3804 } else if (reg->type == PTR_TO_STACK) {
3810 state = func(env, reg);
3821 } else if (reg_is_pkt_pointer(reg)) {
3834 } else if (reg->type == PTR_TO_FLOW_KEYS) {
3844 } else if (type_is_sk_pointer(reg->type)) {
3846 verbose(env, "R%d cannot write into %s\n", regno, reg_type_str(env, reg->type));
3853 } else if (reg->type == PTR_TO_TP_BUFFER) {
3854 err = check_tp_buffer_access(env, reg, regno, off, size);
3858 } else if (reg->type == PTR_TO_BTF_ID) {
3860 } else if (reg->type == CONST_PTR_TO_MAP) {
3862 } else if (base_type(reg->type) == PTR_TO_BUF) {
3863 bool rdonly_mem = type_is_rdonly_mem(reg->type);
3869 verbose(env, "R%d cannot write into %s\n", regno, reg_type_str(env, reg->type));
3879 err = check_buffer_access(env, reg, regno, off, size, false, buf_info, max_access);
3884 verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str(env, reg->type));
3951 struct bpf_reg_state *reg = reg_state(env, regno);
3952 struct bpf_func_state *state = func(env, reg);
3981 if (tnum_is_const(reg->var_off)) {
3982 min_off = max_off = reg->var_off.value + off;
3992 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4007 min_off = reg->smin_value + off;
4008 max_off = reg->smax_value + off;
4053 if (tnum_is_const(reg->var_off)) {
4059 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4076 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
4080 switch (base_type(reg->type)) {
4083 return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed);
4085 if (check_map_access_type(env, regno, reg->off, access_size,
4089 return check_map_access(env, regno, reg->off, access_size, zero_size_allowed);
4091 return check_mem_region_access(env, regno, reg->off, access_size, reg->mem_size, zero_size_allowed);
4093 if (type_is_rdonly_mem(reg->type)) {
4104 return check_buffer_access(env, reg, regno, reg->off, access_size, zero_size_allowed, buf_info, max_access);
4106 return check_stack_range_initialized(env, regno, reg->off, access_size, zero_size_allowed, ACCESS_HELPER,
4110 if (zero_size_allowed && access_size == 0 && register_is_null(reg)) {
4114 verbose(env, "R%d type=%s ", regno, reg_type_str(env, reg->type));
4122 * Two bpf_map_lookups (even with the same key) will have different reg->id.
4123 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
4128 * reg->id > 0 after value_or_null->value transition. By doing so
4141 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
4143 bool is_const = tnum_is_const(reg->var_off);
4144 struct bpf_map *map = reg->map_ptr;
4145 u64 val = reg->var_off.value;
4165 if (map->spin_lock_off != val + reg->off) {
4166 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n", val + reg->off);
4174 cur->active_spin_lock = reg->id;
4180 if (cur->active_spin_lock != reg->id) {
4343 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
4344 enum bpf_reg_type expected, type = reg->type;
4382 verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
4390 if (reg->type == PTR_TO_BTF_ID) {
4399 if (!btf_struct_ids_match(&env->log, reg->off, reg->btf_id, *arg_btf_id)) {
4400 verbose(env, "R%d is of type %s but %s is expected\n", regno, kernel_type_name(reg->btf_id),
4413 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
4415 enum bpf_reg_type type = reg->type;
4447 if (register_is_null(reg) && type_may_be_null(arg_type)) {
4461 /* Pointer types where reg offset is explicitly allowed: */
4481 err = __check_ptr_off_reg(env, reg, regno, type == PTR_TO_BTF_ID);
4489 if (reg->ref_obj_id) {
4492 reg->ref_obj_id, meta->ref_obj_id);
4495 meta->ref_obj_id = reg->ref_obj_id;
4500 meta->map_ptr = reg->map_ptr;
4517 if (type_may_be_null(arg_type) && register_is_null(reg)) {
4532 if (!reg->btf_id) {
4536 meta->ret_btf_id = reg->btf_id;
4566 meta->msize_max_value = reg->umax_value;
4571 if (!tnum_is_const(reg->var_off)) {
4580 if (reg->smin_value < 0) {
4585 if (reg->umin_value == 0) {
4592 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
4596 err = check_helper_mem_access(env, regno - 1, reg->umax_value, zero_size_allowed, meta);
4601 if (!tnum_is_const(reg->var_off)) {
4605 meta->mem_size = reg->var_off.value;
4613 err = check_ptr_alignment(env, reg, 0, size, true);
4978 struct bpf_reg_state *regs = state->regs, *reg;
4987 bpf_for_each_spilled_reg(i, state, reg)
4989 if (!reg) {
4992 if (reg_is_pkt_pointer_any(reg)) {
4993 __mark_reg_unknown(env, reg);
5010 struct bpf_reg_state *regs = state->regs, *reg;
5019 bpf_for_each_spilled_reg(i, state, reg)
5021 if (!reg) {
5024 if (reg->ref_obj_id == ref_obj_id) {
5025 __mark_reg_unknown(env, reg);
5260 struct bpf_reg_state *regs = cur_regs(env), *reg;
5273 reg = &regs[BPF_REG_3];
5274 val = reg->var_off.value;
5277 if (!(register_is_const(reg) && val < max)) {
5598 static bool check_reg_sane_offset(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, enum bpf_reg_type type)
5600 bool known = tnum_is_const(reg->var_off);
5601 s64 val = reg->var_off.value;
5602 s64 smin = reg->smin_value;
5609 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
5610 verbose(env, "%s pointer offset %d is not allowed\n", reg_type_str(env, type), reg->off);
5812 * and truncated reg-based in the other in order to explore
5871 /* check that stack access falls within stack limits and that 'reg' doesn't
5879 * 'off' includes 'reg->off'.
5882 const struct bpf_reg_state *reg, int off)
5884 if (!tnum_is_const(reg->var_off)) {
5887 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6102 * nonnegative, then any reg->range we had before is still good.
6781 * the reg unbounded in the subreg bound space and use the resulting
6928 /* Pretend the src is a reg with a known value, since we only
7021 * copy register state to dest reg
7056 * remember the value we stored into this reg
7120 struct bpf_reg_state *reg;
7124 reg = &state->regs[i];
7125 if (reg->type == type && reg->id == dst_reg->id) {
7127 reg->range = max(reg->range, new_range);
7131 bpf_for_each_spilled_reg(i, state, reg)
7133 if (!reg) {
7136 if (reg->type == type && reg->id == dst_reg->id) {
7137 reg->range = max(reg->range, new_range);
7208 * don't care about the other reg's fixed offset, since if it's too big
7217 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
7219 struct tnum subreg = tnum_subreg(reg->var_off);
7242 if (reg->u32_min_value > val) {
7244 } else if (reg->u32_max_value <= val) {
7249 if (reg->s32_min_value > sval) {
7251 } else if (reg->s32_max_value <= sval) {
7256 if (reg->u32_max_value < val) {
7258 } else if (reg->u32_min_value >= val) {
7263 if (reg->s32_max_value < sval) {
7265 } else if (reg->s32_min_value >= sval) {
7270 if (reg->u32_min_value >= val) {
7272 } else if (reg->u32_max_value < val) {
7277 if (reg->s32_min_value >= sval) {
7279 } else if (reg->s32_max_value < sval) {
7284 if (reg->u32_max_value <= val) {
7286 } else if (reg->u32_min_value > val) {
7291 if (reg->s32_max_value <= sval) {
7293 } else if (reg->s32_min_value > sval) {
7302 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
7308 if (tnum_is_const(reg->var_off)) {
7309 return !!tnum_equals_const(reg->var_off, val);
7313 if (tnum_is_const(reg->var_off)) {
7314 return !tnum_equals_const(reg->var_off, val);
7318 if ((~reg->var_off.mask & reg->var_off.value) & val) {
7321 if (!((reg->var_off.mask | reg->var_off.value) & val)) {
7326 if (reg->umin_value > val) {
7328 } else if (reg->umax_value <= val) {
7333 if (reg->smin_value > sval) {
7335 } else if (reg->smax_value <= sval) {
7340 if (reg->umax_value < val) {
7342 } else if (reg->umin_value >= val) {
7347 if (reg->smax_value < sval) {
7349 } else if (reg->smin_value >= sval) {
7354 if (reg->umin_value >= val) {
7356 } else if (reg->umax_value < val) {
7361 if (reg->smin_value >= sval) {
7363 } else if (reg->smax_value < sval) {
7368 if (reg->umax_value <= val) {
7370 } else if (reg->umin_value > val) {
7375 if (reg->smax_value <= sval) {
7377 } else if (reg->smin_value > sval) {
7386 /* compute branch direction of the expression "if (reg opcode val) goto target;"
7390 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
7393 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, bool is_jmp32)
7395 if (__is_pointer_value(false, reg)) {
7396 if (!reg_type_not_null(reg->type)) {
7418 return is_branch32_taken(reg, val, opcode);
7420 return is_branch64_taken(reg, val, opcode);
7575 * the variable reg.
7629 static void mark_ptr_or_null_reg(struct bpf_func_state *state, struct bpf_reg_state *reg, u32 id, bool is_null)
7631 if (type_may_be_null(reg->type) && reg->id == id &&
7632 !WARN_ON_ONCE(!reg->id)) {
7633 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
7634 !tnum_equals_const(reg->var_off, 0) || reg->off)) {
7643 reg->type = SCALAR_VALUE;
7644 } else if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
7645 const struct bpf_map *map = reg->map_ptr;
7648 reg->type = CONST_PTR_TO_MAP;
7649 reg->map_ptr = map->inner_map_meta;
7651 reg->type = PTR_TO_XDP_SOCK;
7653 reg->type = PTR_TO_SOCKET;
7655 reg->type = PTR_TO_MAP_VALUE;
7658 reg->type &= ~PTR_MAYBE_NULL;
7666 reg->id = 0;
7667 reg->ref_obj_id = 0;
7668 } else if (!reg_may_point_to_spin_lock(reg)) {
7669 /* For not-NULL ptr, reg->ref_obj_id will be reset
7672 * reg->id is still used by spin_lock ptr. Other
7673 * than spin_lock ptr type, reg->id can be reset.
7675 reg->id = 0;
7682 struct bpf_reg_state *reg;
7689 bpf_for_each_spilled_reg(i, state, reg)
7691 if (!reg) {
7694 mark_ptr_or_null_reg(state, reg, id, is_null);
7798 struct bpf_reg_state *reg;
7804 reg = &state->regs[j];
7805 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) {
7806 *reg = *known_reg;
7810 bpf_for_each_spilled_reg(j, state, reg)
7812 if (!reg) {
7815 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) {
7816 *reg = *known_reg;
8164 struct bpf_reg_state *reg;
8192 reg = cur_regs(env) + BPF_REG_0;
8194 if (reg->type != SCALAR_VALUE) {
8196 reg_type_str(env, reg->type));
8257 if (reg->type != SCALAR_VALUE) {
8258 verbose(env, "At program exit the register R0 is not a known value (%s)\n", reg_type_str(env, reg->type));
8262 if (!tnum_in(range, reg->var_off)) {
8266 if (!tnum_is_unknown(reg->var_off)) {
8267 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
8277 if (!tnum_is_unknown(enforce_attach_type_range) && tnum_in(enforce_attach_type_range, reg->var_off)) {
8855 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
9269 static int propagate_liveness_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
9273 u8 flag = reg->live & REG_LIVE_READ;
9288 err = mark_reg_read(env, reg, parent_reg, flag);
10733 /* NOTE: arg "reg" (the fourth one) is only used for