Lines Matching refs:prog

33 	do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
236 u8 *prog = *pprog;
247 *pprog = prog;
252 u8 *prog = *pprog;
263 *pprog = prog;
274 u8 *prog = *pprog;
280 memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
281 prog += cnt;
295 *pprog = prog;
300 u8 *prog = *pprog;
310 *pprog = prog;
331 u8 *prog;
336 prog = old_insn;
338 emit_call(&prog, old_addr, ip) :
339 emit_jump(&prog, old_addr, ip);
346 prog = new_insn;
348 emit_call(&prog, new_addr, ip) :
349 emit_jump(&prog, new_addr, ip);
386 u8 *prog = *pprog;
394 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
399 *pprog = prog;
404 u8 *prog = *pprog;
408 emit_jump(&prog, &__x86_return_thunk, ip);
415 *pprog = prog;
426 * prog = array->ptrs[index];
427 * if (prog == NULL)
429 * goto *(prog->bpf_func + prologue_size);
437 u8 *prog = *pprog, *start = *pprog;
454 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
464 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
469 /* prog = array->ptrs[index]; */
474 * if (prog == NULL)
479 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
482 pop_callee_regs(&prog, callee_regs_used);
489 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
497 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
499 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
502 ctx->tail_call_indirect_label = prog - start;
503 *pprog = prog;
512 u8 *prog = *pprog, *start = *pprog;
522 offset = ctx->tail_call_direct_label - (prog + 2 - start);
527 poke->tailcall_bypass = ip + (prog - start);
532 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
535 pop_callee_regs(&prog, callee_regs_used);
540 memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
541 prog += X86_PATCH_SIZE;
544 ctx->tail_call_direct_label = prog - start;
546 *pprog = prog;
549 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
556 for (i = 0; i < prog->aux->size_poke_tab; i++) {
557 poke = &prog->aux->poke_tab[i];
558 if (poke->aux && poke->aux != prog->aux)
598 u8 *prog = *pprog;
633 *pprog = prog;
639 u8 *prog = *pprog;
649 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
657 *pprog = prog;
662 u8 *prog = *pprog;
675 *pprog = prog;
681 u8 *prog = *pprog;
714 *pprog = prog;
720 u8 *prog = *pprog;
752 *pprog = prog;
798 u8 *prog = temp;
803 /* tail call's presence in current prog implies it is reachable */
806 emit_prologue(&prog, bpf_prog->aux->stack_depth,
809 push_callee_regs(&prog, callee_regs_used);
810 addrs[0] = prog - temp;
850 emit_mov_reg(&prog,
917 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
922 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
992 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
994 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
1176 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1188 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1229 ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
1257 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1260 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1268 &prog, image + addrs[i - 1],
1273 emit_bpf_tail_call_indirect(&prog,
1460 pop_callee_regs(&prog, callee_regs_used);
1462 emit_return(&prog, image + addrs[i - 1] + (prog - temp));
1476 ilen = prog - temp;
1500 prog = temp;
1510 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1520 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1526 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1537 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1546 u8 *prog = *pprog;
1550 if (emit_call(&prog, __bpf_prog_enter_sleepable, prog))
1553 if (emit_call(&prog, __bpf_prog_enter, prog))
1555 /* remember prog start time returned by __bpf_prog_enter */
1556 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1563 emit_mov_imm64(&prog, BPF_REG_2,
1567 if (emit_call(&prog, p->bpf_func, prog))
1576 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
1579 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1582 if (emit_call(&prog, __bpf_prog_exit_sleepable, prog))
1586 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32,
1589 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1590 if (emit_call(&prog, __bpf_prog_exit, prog))
1594 *pprog = prog;
1601 u8 *prog = *pprog;
1615 *pprog = prog;
1620 u8 *target, *prog = *pprog;
1622 target = PTR_ALIGN(prog, align);
1623 if (target != prog)
1624 emit_nops(&prog, target - prog);
1626 *pprog = prog;
1631 u8 *prog = *pprog;
1641 *pprog = prog;
1650 u8 *prog = *pprog;
1653 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
1657 *pprog = prog;
1665 u8 *prog = *pprog;
1671 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1672 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1674 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
1677 /* mod_ret prog stored return value into [rbp - 8]. Emit:
1689 branches[i] = prog;
1690 emit_nops(&prog, 4 + 2);
1693 *pprog = prog;
1727 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
1730 * mov rsi, rbx // prog start time
1752 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1753 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
1755 * mov rsi, rbx // prog start time
1763 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1764 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
1766 * mov rsi, rbx // prog start time
1785 u8 *prog;
1795 /* room for return value of orig_call or fentry prog */
1806 prog = image;
1813 save_regs(m, &prog, nr_args, stack_size);
1817 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
1818 if (emit_call(&prog, __bpf_tramp_enter, prog)) {
1825 if (invoke_bpf(m, &prog, fentry, stack_size,
1835 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
1843 restore_regs(m, &prog, nr_args, stack_size);
1846 if (emit_call(&prog, orig_call, prog)) {
1850 /* remember return value in a stack for bpf prog to access */
1851 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1852 im->ip_after_call = prog;
1853 memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
1854 prog += X86_PATCH_SIZE;
1863 emit_align(&prog, 16);
1868 emit_cond_near_jump(&branches[i], prog, branches[i],
1873 if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
1879 restore_regs(m, &prog, nr_args, stack_size);
1886 im->ip_epilogue = prog;
1888 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
1889 if (emit_call(&prog, __bpf_tramp_exit, prog)) {
1894 /* restore return value of orig_call or fentry prog back into RAX */
1896 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
1903 emit_return(&prog, prog);
1905 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
1909 ret = prog - (u8 *)image;
1918 u8 *jg_reloc, *prog = *pprog;
1931 err = emit_cond_near_jump(&prog, /* je func */
1932 (void *)progs[a], prog,
1937 emit_indirect_jump(&prog, 2 /* rdx */, prog);
1939 *pprog = prog;
1959 jg_reloc = prog;
1961 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
1971 emit_align(&prog, 16);
1972 jg_offset = prog - jg_reloc;
1975 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
1980 *pprog = prog;
1998 u8 *prog = image;
2001 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
2012 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2015 struct bpf_prog *tmp, *orig_prog = prog;
2026 if (!prog->jit_requested)
2029 tmp = bpf_jit_blind_constants(prog);
2036 if (tmp != prog) {
2038 prog = tmp;
2041 jit_data = prog->aux->jit_data;
2045 prog = orig_prog;
2048 prog->aux->jit_data = jit_data;
2059 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2061 prog = orig_prog;
2069 for (proglen = 0, i = 0; i <= prog->len; i++) {
2083 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
2089 prog = orig_prog;
2108 u32 extable_size = prog->aux->num_exentries *
2115 prog = orig_prog;
2118 prog->aux->extable = (void *) image + roundup(proglen, align);
2125 bpf_jit_dump(prog->len, proglen, pass + 1, image);
2128 if (!prog->is_func || extra_pass) {
2129 bpf_tail_call_direct_fixup(prog);
2138 prog->bpf_func = (void *)image;
2139 prog->jited = 1;
2140 prog->jited_len = proglen;
2142 prog = orig_prog;
2145 if (!image || !prog->is_func || extra_pass) {
2147 bpf_prog_fill_jited_linfo(prog, addrs + 1);
2151 prog->aux->jit_data = NULL;
2155 bpf_jit_prog_release_other(prog, prog == orig_prog ?
2157 return prog;