Lines Matching refs:prog

34 	do { prog = emit_code(prog, bytes, len); } while (0)
260 u8 *prog = *pprog;
270 *pprog = prog;
275 u8 *prog = *pprog;
285 *pprog = prog;
296 u8 *prog = *pprog;
302 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
303 prog += X86_PATCH_SIZE;
321 *pprog = prog;
326 u8 *prog = *pprog;
335 *pprog = prog;
362 u8 *prog;
367 prog = old_insn;
369 emit_call(&prog, old_addr, ip) :
370 emit_jump(&prog, old_addr, ip);
377 prog = new_insn;
379 emit_call(&prog, new_addr, ip) :
380 emit_jump(&prog, new_addr, ip);
421 u8 *prog = *pprog;
429 emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
431 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
438 *pprog = prog;
443 u8 *prog = *pprog;
446 emit_jump(&prog, x86_return_thunk, ip);
453 *pprog = prog;
464 * prog = array->ptrs[index];
465 * if (prog == NULL)
467 * goto *(prog->bpf_func + prologue_size);
475 u8 *prog = *pprog, *start = *pprog;
492 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
502 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
507 /* prog = array->ptrs[index]; */
512 * if (prog == NULL)
517 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
520 pop_callee_regs(&prog, callee_regs_used);
527 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
535 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
537 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
540 ctx->tail_call_indirect_label = prog - start;
541 *pprog = prog;
550 u8 *prog = *pprog, *start = *pprog;
560 offset = ctx->tail_call_direct_label - (prog + 2 - start);
565 poke->tailcall_bypass = ip + (prog - start);
570 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
573 pop_callee_regs(&prog, callee_regs_used);
578 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
579 prog += X86_PATCH_SIZE;
582 ctx->tail_call_direct_label = prog - start;
584 *pprog = prog;
587 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
594 for (i = 0; i < prog->aux->size_poke_tab; i++) {
595 poke = &prog->aux->poke_tab[i];
596 if (poke->aux && poke->aux != prog->aux)
627 u8 *prog = *pprog;
661 *pprog = prog;
667 u8 *prog = *pprog;
676 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
684 *pprog = prog;
689 u8 *prog = *pprog;
701 *pprog = prog;
707 u8 *prog = *pprog;
733 *pprog = prog;
739 u8 *prog = *pprog;
753 *pprog = prog;
761 u8 *prog = *pprog;
767 *pprog = prog;
775 u8 *prog = *pprog;
781 *pprog = prog;
787 u8 *prog = *pprog;
810 emit_insn_suffix(&prog, src_reg, dst_reg, off);
811 *pprog = prog;
817 u8 *prog = *pprog;
833 emit_insn_suffix(&prog, src_reg, dst_reg, off);
834 *pprog = prog;
840 u8 *prog = *pprog;
867 emit_insn_suffix(&prog, dst_reg, src_reg, off);
868 *pprog = prog;
874 u8 *prog = *pprog;
878 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
906 emit_insn_suffix(&prog, dst_reg, src_reg, off);
908 *pprog = prog;
943 u8 *prog = *pprog;
957 *pprog = prog;
974 u8 *prog = *pprog;
1004 *pprog = prog;
1010 u8 *prog = *pprog;
1014 emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
1016 *pprog = prog;
1019 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1037 u8 *prog = temp;
1043 /* tail call's presence in current prog implies it is reachable */
1046 emit_prologue(&prog, bpf_prog->aux->stack_depth,
1049 push_callee_regs(&prog, callee_regs_used);
1051 ilen = prog - temp;
1056 prog = temp;
1082 maybe_emit_mod(&prog, dst_reg, src_reg,
1091 emit_mov_reg(&prog,
1095 emit_movsx_reg(&prog, insn->off,
1103 maybe_emit_1mod(&prog, dst_reg,
1118 maybe_emit_1mod(&prog, dst_reg,
1158 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1163 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1199 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1209 maybe_emit_1mod(&prog, src_reg, is64);
1218 maybe_emit_1mod(&prog, src_reg, is64);
1225 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1229 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1240 maybe_emit_mod(&prog, dst_reg, dst_reg,
1256 maybe_emit_mod(&prog, src_reg, dst_reg,
1270 maybe_emit_1mod(&prog, dst_reg,
1304 emit_shiftx(&prog, dst_reg, src_reg, w, op);
1323 maybe_emit_1mod(&prog, dst_reg,
1437 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1481 maybe_emit_1mod(&prog, src_reg, true);
1486 maybe_emit_mod(&prog, src_reg, AUX_REG, true);
1491 end_of_jmp = prog;
1494 emit_mov_imm32(&prog, false, dst_reg, 0);
1499 start_of_ldx = prog;
1504 emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
1506 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
1514 start_of_ldx[-1] = prog - start_of_ldx;
1524 maybe_emit_1mod(&prog, src_reg, true);
1561 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
1581 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1587 branch_target = prog;
1589 emit_ldx(&prog, BPF_SIZE(insn->code),
1595 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1596 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1600 err = emit_atomic(&prog, BPF_CMPXCHG,
1610 EMIT2(X86_JNE, -(prog - branch_target) - 2);
1612 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1614 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1618 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1633 offs = 7 + x86_call_depth_emit_accounting(&prog, func);
1637 offs = x86_call_depth_emit_accounting(&prog, func);
1639 if (emit_call(&prog, func, image + addrs[i - 1] + offs))
1647 &prog, image + addrs[i - 1],
1652 emit_bpf_tail_call_indirect(&prog,
1681 maybe_emit_mod(&prog, dst_reg, src_reg,
1689 maybe_emit_mod(&prog, dst_reg, src_reg,
1697 maybe_emit_1mod(&prog, dst_reg,
1724 maybe_emit_mod(&prog, dst_reg, dst_reg,
1731 maybe_emit_1mod(&prog, dst_reg,
1807 emit_nops(&prog, nops);
1861 emit_nops(&prog, nops);
1886 emit_nops(&prog, INSN_SZ_DIFF - 2);
1905 pop_callee_regs(&prog, callee_regs_used);
1907 emit_return(&prog, image + addrs[i - 1] + (prog - temp));
1921 ilen = prog - temp;
1945 prog = temp;
1960 u8 *prog;
1992 prog = *pprog;
1999 *pprog = prog;
2020 static void save_args(const struct btf_func_model *m, u8 **prog,
2063 emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
2065 emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
2085 emit_stx(prog, BPF_DW, BPF_REG_FP,
2094 clean_stack_garbage(m, prog, nr_stack_slots, first_off);
2097 static void restore_regs(const struct btf_func_model *m, u8 **prog,
2113 emit_ldx(prog, BPF_DW,
2133 u8 *prog = *pprog;
2136 struct bpf_prog *p = l->link.prog;
2140 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
2149 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
2152 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2159 if (emit_rsb_call(&prog, bpf_trampoline_enter(p), prog))
2161 /* remember prog start time returned by __bpf_prog_enter */
2162 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
2164 /* if (__bpf_prog_enter*(prog) == 0)
2169 jmp_insn = prog;
2170 emit_nops(&prog, 2);
2179 emit_mov_imm64(&prog, BPF_REG_2,
2183 if (emit_rsb_call(&prog, p->bpf_func, prog))
2192 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
2195 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2199 jmp_insn[1] = prog - jmp_insn - 2;
2202 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2204 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
2210 if (emit_rsb_call(&prog, bpf_trampoline_exit(p), prog))
2213 *pprog = prog;
2219 u8 *target, *prog = *pprog;
2221 target = PTR_ALIGN(prog, align);
2222 if (target != prog)
2223 emit_nops(&prog, target - prog);
2225 *pprog = prog;
2230 u8 *prog = *pprog;
2239 *pprog = prog;
2248 u8 *prog = *pprog;
2251 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
2255 *pprog = prog;
2263 u8 *prog = *pprog;
2269 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
2270 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2272 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true))
2275 /* mod_ret prog stored return value into [rbp - 8]. Emit:
2287 branches[i] = prog;
2288 emit_nops(&prog, 4 + 2);
2291 *pprog = prog;
2308 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
2311 * mov rsi, rbx // prog start time
2333 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2334 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
2336 * mov rsi, rbx // prog start time
2344 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2345 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
2347 * mov rsi, rbx // prog start time
2367 u8 *prog;
2408 /* room for return value of orig_call or fentry prog */
2454 prog = image;
2461 x86_call_depth_emit_accounting(&prog, NULL);
2473 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
2479 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
2480 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
2487 emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
2488 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
2491 save_args(m, &prog, regs_off, false);
2495 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2496 if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) {
2503 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
2513 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
2521 restore_regs(m, &prog, regs_off);
2522 save_args(m, &prog, arg_stack_off, true);
2531 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
2535 if (emit_rsb_call(&prog, orig_call, prog)) {
2540 /* remember return value in a stack for bpf prog to access */
2541 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2542 im->ip_after_call = prog;
2543 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
2544 prog += X86_PATCH_SIZE;
2553 emit_align(&prog, 16);
2558 emit_cond_near_jump(&branches[i], prog, branches[i],
2563 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) {
2569 restore_regs(m, &prog, regs_off);
2576 im->ip_epilogue = prog;
2578 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2579 if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) {
2589 /* restore return value of orig_call or fentry prog back into RAX */
2591 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2593 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
2598 emit_return(&prog, prog);
2600 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2604 ret = prog - (u8 *)image;
2613 u8 *jg_reloc, *prog = *pprog;
2626 err = emit_cond_near_jump(&prog, /* je func */
2627 (void *)progs[a], image + (prog - buf),
2632 emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
2634 *pprog = prog;
2654 jg_reloc = prog;
2656 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
2666 emit_align(&prog, 16);
2667 jg_offset = prog - jg_reloc;
2670 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
2675 *pprog = prog;
2693 u8 *prog = buf;
2696 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
2711 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2715 struct bpf_prog *tmp, *orig_prog = prog;
2728 if (!prog->jit_requested)
2731 tmp = bpf_jit_blind_constants(prog);
2738 if (tmp != prog) {
2740 prog = tmp;
2743 jit_data = prog->aux->jit_data;
2747 prog = orig_prog;
2750 prog->aux->jit_data = jit_data;
2764 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2766 prog = orig_prog;
2774 for (proglen = 0, i = 0; i <= prog->len; i++) {
2790 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
2800 prog = orig_prog;
2802 prog->bpf_func = NULL;
2803 prog->jited = 0;
2804 prog->jited_len = 0;
2824 u32 extable_size = prog->aux->num_exentries *
2832 prog = orig_prog;
2835 prog->aux->extable = (void *) image + roundup(proglen, align);
2842 bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
2845 if (!prog->is_func || extra_pass) {
2853 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) {
2859 bpf_tail_call_direct_fixup(prog);
2868 prog->bpf_func = (void *)image;
2869 prog->jited = 1;
2870 prog->jited_len = proglen;
2872 prog = orig_prog;
2875 if (!image || !prog->is_func || extra_pass) {
2877 bpf_prog_fill_jited_linfo(prog, addrs + 1);
2881 prog->aux->jit_data = NULL;
2885 bpf_jit_prog_release_other(prog, prog == orig_prog ?
2887 return prog;
2908 void bpf_jit_free(struct bpf_prog *prog)
2910 if (prog->jited) {
2911 struct x64_jit_data *jit_data = prog->aux->jit_data;
2920 bpf_jit_binary_pack_finalize(prog, jit_data->header,
2925 hdr = bpf_jit_binary_pack_hdr(prog);
2927 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
2930 bpf_prog_unlock_free(prog);