Lines Matching refs:prog

1621 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1634 if (!bpf_prog_is_dev_bound(prog->aux)) {
1635 prog->aux->ops = ops;
1637 prog->aux->ops = &bpf_offload_prog_ops;
1639 prog->type = type;
1654 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1672 audit_log_format(ab, "prog-id=%u op=%s", prog->aux->id, bpf_audit_str[op]);
1699 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
1704 ret = __bpf_prog_charge(user, prog->pages);
1710 prog->aux->user = user;
1714 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
1716 struct user_struct *user = prog->aux->user;
1718 __bpf_prog_uncharge(user, prog->pages);
1722 static int bpf_prog_alloc_id(struct bpf_prog *prog)
1728 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1730 prog->aux->id = id;
1743 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1750 if (!prog->aux->id) {
1760 idr_remove(&prog_idr, prog->aux->id);
1761 prog->aux->id = 0;
1776 bpf_prog_uncharge_memlock(aux->prog);
1778 bpf_prog_free(aux->prog);
1781 static void _bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1783 bpf_prog_kallsyms_del_all(prog);
1784 btf_put(prog->aux->btf);
1785 bpf_prog_free_linfo(prog);
1788 if (prog->aux->sleepable) {
1789 call_rcu_tasks_trace(&prog->aux->rcu, _bpf_prog_put_rcu);
1791 call_rcu(&prog->aux->rcu, _bpf_prog_put_rcu);
1794 _bpf_prog_put_rcu(&prog->aux->rcu);
1798 static void _bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1800 if (atomic64_dec_and_test(&prog->aux->refcnt)) {
1801 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1802 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1804 bpf_prog_free_id(prog, do_idr_lock);
1805 _bpf_prog_put_noref(prog, true);
1809 void bpf_prog_put(struct bpf_prog *prog)
1811 _bpf_prog_put(prog, true);
1817 struct bpf_prog *prog = filp->private_data;
1819 bpf_prog_put(prog);
1823 static void bpf_prog_get_stats(const struct bpf_prog *prog, struct bpf_prog_stats *stats)
1834 st = per_cpu_ptr(prog->aux->stats, cpu);
1850 const struct bpf_prog *prog = filp->private_data;
1851 char prog_tag[sizeof(prog->tag) * 0x2 + 1] = {};
1854 bpf_prog_get_stats(prog, &stats);
1855 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1864 prog->type, prog->jited, prog_tag, prog->pages * 1ULL << PAGE_SHIFT, prog->aux->id, stats.nsecs,
1878 int bpf_prog_new_fd(struct bpf_prog *prog)
1882 ret = security_bpf_prog(prog);
1887 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC);
1903 void bpf_prog_add(struct bpf_prog *prog, int i)
1905 atomic64_add(i, &prog->aux->refcnt);
1909 void bpf_prog_sub(struct bpf_prog *prog, int i)
1916 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1920 void bpf_prog_inc(struct bpf_prog *prog)
1922 atomic64_inc(&prog->aux->refcnt);
1927 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1931 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1936 return prog;
1940 bool bpf_prog_get_ok(struct bpf_prog *prog, enum bpf_prog_type *attach_type, bool attach_drv)
1947 if (prog->type != *attach_type) {
1950 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) {
1960 struct bpf_prog *prog;
1962 prog = i_bpf_prog_get(f);
1963 if (IS_ERR(prog)) {
1964 return prog;
1966 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1967 prog = ERR_PTR(-EINVAL);
1971 bpf_prog_inc(prog);
1974 return prog;
1997 * prog type requires it but has some attach types that have to be backward
2115 case BPF_PROG_TYPE_EXT: /* extends any prog */
2137 case BPF_PROG_TYPE_EXT: /* extends any prog */
2150 struct bpf_prog *prog;
2198 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2199 if (!prog) {
2203 prog->expected_attach_type = attr->expected_attach_type;
2204 prog->aux->attach_btf_id = attr->attach_btf_id;
2213 prog->aux->dst_prog = dst_prog;
2216 prog->aux->offload_requested = !!attr->prog_ifindex;
2217 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2219 err = security_bpf_prog_alloc(prog->aux);
2224 err = bpf_prog_charge_memlock(prog);
2229 prog->len = attr->insn_cnt;
2232 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), bpf_prog_insn_size(prog)) != 0) {
2236 prog->orig_prog = NULL;
2237 prog->jited = 0;
2239 atomic64_set(&prog->aux->refcnt, 1);
2240 prog->gpl_compatible = is_gpl ? 1 : 0;
2242 if (bpf_prog_is_dev_bound(prog->aux)) {
2243 err = bpf_prog_offload_init(prog, attr);
2250 err = find_prog_type(type, prog);
2255 prog->aux->load_time = ktime_get_boottime_ns();
2256 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, sizeof(attr->prog_name));
2262 err = bpf_check(&prog, attr, uattr);
2267 prog = bpf_prog_select_runtime(prog, &err);
2272 err = bpf_prog_alloc_id(prog);
2277 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2291 bpf_prog_kallsyms_add(prog);
2292 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2293 bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2295 err = bpf_prog_new_fd(prog);
2297 bpf_prog_put(prog);
2306 _bpf_prog_put_noref(prog, prog->aux->func_cnt);
2309 bpf_prog_uncharge_memlock(prog);
2311 security_bpf_prog_free(prog->aux);
2313 bpf_prog_free(prog);
2338 struct bpf_prog *prog)
2344 link->prog = prog;
2367 primer->link->prog = NULL;
2382 if (link->prog) {
2385 bpf_prog_put(link->prog);
2438 const struct bpf_prog *prog = link->prog;
2439 char prog_tag[sizeof(prog->tag) * 0x2 + 1] = {};
2441 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2447 bpf_link_type_strs[link->type], link->id, prog_tag, prog->aux->id);
2567 WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog, tr_link->trampoline));
2607 static int bpf_tracing_prog_attach(struct bpf_prog *prog, int tgt_prog_fd, u32 btf_id)
2616 switch (prog->type) {
2618 if (prog->expected_attach_type != BPF_TRACE_FENTRY && prog->expected_attach_type != BPF_TRACE_FEXIT &&
2619 prog->expected_attach_type != BPF_MODIFY_RETURN) {
2625 if (prog->expected_attach_type != 0) {
2631 if (prog->expected_attach_type != BPF_LSM_MAC) {
2648 if (prog->type != BPF_PROG_TYPE_EXT) {
2668 bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING, &bpf_tracing_link_lops, prog);
2669 link->attach_type = prog->expected_attach_type;
2671 mutex_lock(&prog->aux->dst_mutex);
2675 * - if prog->aux->dst_trampoline is set, the program was just loaded
2677 * in prog->aux
2679 * - if prog->aux->dst_trampoline is NULL, the program has already been
2686 * raw_tracepoint_open API, and we need a target from prog->aux
2688 * The combination of no saved target in prog->aux, and no target
2691 if (!prog->aux->dst_trampoline && !tgt_prog) {
2696 if (!prog->aux->dst_trampoline || (key && key != prog->aux->dst_trampoline->key)) {
2703 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, &tgt_info);
2719 * prog->aux are cleared below.
2721 tr = prog->aux->dst_trampoline;
2722 tgt_prog = prog->aux->dst_prog;
2730 err = bpf_trampoline_link_prog(prog, tr);
2740 /* Always clear the trampoline and target prog from prog->aux to make
2744 if (prog->aux->dst_prog && (tgt_prog_fd || tr != prog->aux->dst_trampoline)) {
2745 /* got extra prog ref from syscall, or attaching to different prog */
2746 bpf_prog_put(prog->aux->dst_prog);
2748 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) {
2750 bpf_trampoline_put(prog->aux->dst_trampoline);
2753 prog->aux->dst_prog = NULL;
2754 prog->aux->dst_trampoline = NULL;
2755 mutex_unlock(&prog->aux->dst_mutex);
2759 if (tr && tr != prog->aux->dst_trampoline) {
2762 mutex_unlock(&prog->aux->dst_mutex);
2780 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
2849 struct bpf_prog *prog;
2858 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
2859 if (IS_ERR(prog)) {
2860 return PTR_ERR(prog);
2863 switch (prog->type) {
2874 if (prog->type == BPF_PROG_TYPE_TRACING && prog->expected_attach_type == BPF_TRACE_RAW_TP) {
2875 tp_name = prog->aux->attach_func_name;
2878 err = bpf_tracing_prog_attach(prog, 0, 0);
2908 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, &bpf_raw_tp_link_lops, prog);
2917 err = bpf_probe_register(link->btp, prog);
2928 bpf_prog_put(prog);
2932 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, enum bpf_attach_type attach_type)
2934 switch (prog->type) {
2939 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
2947 return prog->enforce_expected_attach_type && prog->expected_attach_type != attach_type ? -EINVAL : 0;
3013 struct bpf_prog *prog;
3029 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3030 if (IS_ERR(prog)) {
3031 return PTR_ERR(prog);
3034 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3035 bpf_prog_put(prog);
3042 ret = sock_map_get_from_fd(attr, prog);
3045 ret = lirc_prog_attach(attr, prog);
3048 ret = netns_bpf_prog_attach(attr, prog);
3057 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3064 bpf_prog_put(prog);
3155 struct bpf_prog *prog;
3170 prog = bpf_prog_get(attr->test.prog_fd);
3171 if (IS_ERR(prog)) {
3172 return PTR_ERR(prog);
3175 if (prog->aux->ops->test_run) {
3176 ret = prog->aux->ops->test_run(prog, attr, uattr);
3179 bpf_prog_put(prog);
3237 struct bpf_prog *prog;
3241 prog = idr_get_next(&prog_idr, id);
3242 if (prog) {
3243 prog = bpf_prog_inc_not_zero(prog);
3244 if (IS_ERR(prog)) {
3253 return prog;
3260 struct bpf_prog *prog;
3267 prog = idr_find(&prog_idr, id);
3268 if (prog) {
3269 prog = bpf_prog_inc_not_zero(prog);
3271 prog = ERR_PTR(-ENOENT);
3274 return prog;
3279 struct bpf_prog *prog;
3291 prog = bpf_prog_by_id(id);
3292 if (IS_ERR(prog)) {
3293 return PTR_ERR(prog);
3296 fd = bpf_prog_new_fd(prog);
3298 bpf_prog_put(prog);
3347 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, unsigned long addr, u32 *off, u32 *type)
3352 mutex_lock(&prog->aux->used_maps_mutex);
3353 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3354 map = prog->aux->used_maps[i];
3370 mutex_unlock(&prog->aux->used_maps_mutex);
3374 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, const struct cred *f_cred)
3383 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), GFP_USER);
3388 for (i = 0; i < prog->len; i++) {
3415 map = bpf_map_from_imm(prog, imm, &off, &type);
3459 static int bpf_prog_get_info_by_fd(struct file *file, struct bpf_prog *prog, const union bpf_attr *attr,
3481 info.type = prog->type;
3482 info.id = prog->aux->id;
3483 info.load_time = prog->aux->load_time;
3484 info.created_by_uid = from_kuid_munged(current_user_ns(), prog->aux->user->uid);
3485 info.gpl_compatible = prog->gpl_compatible;
3487 memcpy(info.tag, prog->tag, sizeof(prog->tag));
3488 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3490 mutex_lock(&prog->aux->used_maps_mutex);
3492 info.nr_map_ids = prog->aux->used_map_cnt;
3499 if (put_user(prog->aux->used_maps[i]->id, &user_map_ids[i])) {
3500 mutex_unlock(&prog->aux->used_maps_mutex);
3505 mutex_unlock(&prog->aux->used_maps_mutex);
3512 bpf_prog_get_stats(prog, &stats);
3528 info.xlated_prog_len = bpf_prog_insn_size(prog);
3533 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
3537 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
3550 if (bpf_prog_is_dev_bound(prog->aux)) {
3551 err = bpf_prog_offload_info_fill(&info, prog);
3563 if (prog->aux->func_cnt) {
3567 for (i = 0; i < prog->aux->func_cnt; i++) {
3568 info.jited_prog_len += prog->aux->func[i]->jited_len;
3571 info.jited_prog_len = prog->jited_len;
3582 if (prog->aux->func_cnt) {
3587 for (i = 0; i < prog->aux->func_cnt; i++) {
3588 len = prog->aux->func[i]->jited_len;
3590 img = (u8 *)prog->aux->func[i]->bpf_func;
3601 if (copy_to_user(uinsns, prog->bpf_func, ulen)) {
3611 info.nr_jited_ksyms = prog->aux->func_cnt ?: 1;
3623 if (prog->aux->func_cnt) {
3625 ksym_addr = (unsigned long)prog->aux->func[i]->bpf_func;
3631 ksym_addr = (unsigned long)prog->bpf_func;
3642 info.nr_jited_func_lens = prog->aux->func_cnt ?: 1;
3651 if (prog->aux->func_cnt) {
3653 func_len = prog->aux->func[i]->jited_len;
3659 func_len = prog->jited_len;
3669 if (prog->aux->btf) {
3670 info.btf_id = btf_id(prog->aux->btf);
3674 info.nr_func_info = prog->aux->func_info_cnt;
3680 if (copy_to_user(user_finfo, prog->aux->func_info, info.func_info_rec_size * ulen)) {
3686 info.nr_line_info = prog->aux->nr_linfo;
3692 if (copy_to_user(user_linfo, prog->aux->linfo, info.line_info_rec_size * ulen)) {
3698 if (prog->aux->jited_linfo) {
3699 info.nr_jited_line_info = prog->aux->nr_linfo;
3711 if (put_user((__u64)(long)prog->aux->jited_linfo[i], &user_linfo[i])) {
3721 info.nr_prog_tags = prog->aux->func_cnt ?: 1;
3728 if (prog->aux->func_cnt) {
3730 if (copy_to_user(user_prog_tags[i], prog->aux->func[i]->tag, BPF_TAG_SIZE)) {
3735 if (copy_to_user(user_prog_tags[0], prog->tag, BPF_TAG_SIZE)) {
3829 info.prog_id = link->prog->aux->id;
4014 err = bpf_task_fd_query_copy(attr, uattr, raw_tp->link.prog->aux->id, BPF_FD_TYPE_RAW_TRACEPOINT,
4096 static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
4098 if (attr->link_create.attach_type != prog->expected_attach_type) {
4102 if (prog->expected_attach_type == BPF_TRACE_ITER) {
4103 return bpf_iter_link_attach(attr, prog);
4104 } else if (prog->type == BPF_PROG_TYPE_EXT) {
4105 return bpf_tracing_prog_attach(prog, attr->link_create.target_fd, attr->link_create.target_btf_id);
4114 struct bpf_prog *prog;
4121 prog = bpf_prog_get(attr->link_create.prog_fd);
4122 if (IS_ERR(prog)) {
4123 return PTR_ERR(prog);
4126 ret = bpf_prog_attach_check_attach_type(prog, attr->link_create.attach_type);
4131 if (prog->type == BPF_PROG_TYPE_EXT) {
4132 ret = tracing_bpf_link_attach(attr, prog);
4137 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4150 ret = cgroup_bpf_link_attach(attr, prog);
4153 ret = tracing_bpf_link_attach(attr, prog);
4157 ret = netns_bpf_link_create(attr, prog);
4161 ret = bpf_xdp_link_attach(attr, prog);
4170 bpf_prog_put(prog);
4404 struct bpf_prog *prog;
4417 prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
4418 if (IS_ERR(prog)) {
4419 return PTR_ERR(prog);
4428 mutex_lock(&prog->aux->used_maps_mutex);
4430 used_maps_old = prog->aux->used_maps;
4432 for (i = 0; i < prog->aux->used_map_cnt; i++) {
4439 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, sizeof(used_maps_new[0]), GFP_KERNEL);
4445 memcpy(used_maps_new, used_maps_old, sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
4446 used_maps_new[prog->aux->used_map_cnt] = map;
4448 prog->aux->used_map_cnt++;
4449 prog->aux->used_maps = used_maps_new;
4454 mutex_unlock(&prog->aux->used_maps_mutex);
4460 bpf_prog_put(prog);