Lines Matching defs:old
1231 /* Intersecting with the old var_off might have improved our bounds
1280 /* Intersecting with the old var_off might have improved our bounds
1310 /* Intersecting with the old var_off might have improved our bounds
4973 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
8843 /* check %cur's range satisfies %old's */
8844 static bool range_within(struct bpf_reg_state *old, struct bpf_reg_state *cur)
8846 return old->umin_value <= cur->umin_value && old->umax_value >= cur->umax_value &&
8847 old->smin_value <= cur->smin_value && old->smax_value >= cur->smax_value &&
8848 old->u32_min_value <= cur->u32_min_value && old->u32_max_value >= cur->u32_max_value &&
8849 old->s32_min_value <= cur->s32_min_value && old->s32_max_value >= cur->s32_max_value;
8852 /* If in the old state two registers had the same id, then they need to have
8854 * the old state, so we need to track the mapping from old to new ids.
8855 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
8856 * regs with old id 5 must also have new id 9 for the new state to be safe. But
8857 * regs with a different old id could still have new id 9, we don't care about
8859 * So we look through our idmap to see if this old id has been seen before. If
8867 if (!idmap[i].old) {
8869 idmap[i].old = old_id;
8873 if (idmap[i].old == old_id) {
9021 /* new val must satisfy old val knowledge */
9036 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
9052 /* If the new min/max/var_off satisfy the old ones and
9067 /* We must have at least as much range as the old ptr
9069 * still safe. This is true even if old range < old off,
9086 /* new val must satisfy old val knowledge */
9109 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, struct bpf_func_state *cur,
9118 for (i = 0; i < old->allocated_stack; i++) {
9121 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
9127 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) {
9138 /* if old state was safe with misc data in the stack
9142 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
9146 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != cur->stack[spi].slot_type[i % BPF_REG_SIZE]) {
9147 /* Ex: old explored (safe) state has STACK_SPILL in
9157 if (old->stack[spi].slot_type[0] != STACK_SPILL) {
9160 if (!regsafe(env, &old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) {
9177 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
9179 if (old->acquired_refs != cur->acquired_refs) {
9182 return !memcmp(old->refs, cur->refs, sizeof(*old->refs) * old->acquired_refs);
9191 * execution popped from the state stack. If it sees an old state that has
9204 * valid slots than old one that already passed validation, it means
9211 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, struct bpf_func_state *cur)
9217 if (!regsafe(env, &old->regs[i], &cur->regs[i], env->idmap_scratch)) {
9222 if (!stacksafe(env, old, cur, env->idmap_scratch)) {
9226 if (!refsafe(old, cur)) {
9233 static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
9237 if (old->curframe != cur->curframe) {
9244 if (old->speculative && !cur->speculative) {
9248 if (old->active_spin_lock != cur->active_spin_lock) {
9255 for (i = 0; i <= old->curframe; i++) {
9256 if (old->frame[i]->callsite != cur->frame[i]->callsite) {
9259 if (!func_states_equal(env, old->frame[i], cur->frame[i])) {
9348 static int propagate_precision(struct bpf_verifier_env *env, const struct bpf_verifier_state *old)
9354 state = old->frame[old->curframe];
9388 static bool states_maybe_looping(struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
9393 if (old->curframe != fr) {
9397 fold = old->frame[fr];
10342 * original insn at old prog.