Lines Matching defs:obj
75 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
76 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
446 struct bpf_object *obj;
518 struct bpf_object *obj;
592 * 0 for vmlinux BTF, index in obj->fd_array for module
717 static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
718 static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
720 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
721 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
724 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
726 static Elf64_Shdr *elf_sec_hdr_by_idx(const struct bpf_object *obj, size_t idx, Elf64_Shdr *sheader);
729 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
731 static const char *elf_sec_name_by_idx(const struct bpf_object *obj, size_t idx);
734 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
736 static Elf_Data *elf_sec_data_by_name(const struct bpf_object *obj, const char *name, Elf_Data *data);
737 static Elf_Data *elf_sec_data_by_idx(const struct bpf_object *obj, size_t idx, Elf_Data *data);
739 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
790 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
801 prog->obj = obj;
828 prog->log_level = obj->log_level;
851 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
854 Elf_Data *symbols = obj->efile.symbols;
862 progs = obj->programs;
863 nr_progs = obj->nr_programs;
867 sym = elf_sym_by_idx(obj, i);
877 name = elf_sym_str(obj, sym->st_name);
890 if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
901 * In this case the original obj->programs
909 obj->programs = progs;
913 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
931 obj->nr_programs = nr_progs;
1165 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1171 for (i = 0; i < obj->nr_maps; i++) {
1172 map = &obj->maps[i];
1177 err = bpf_map__init_kern_struct_ops(map, obj->btf,
1178 obj->btf_vmlinux);
1186 static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
1201 btf = obj->btf;
1213 type = btf__type_by_id(obj->btf, vsi->type);
1214 var_name = btf__name_by_offset(obj->btf, type->name_off);
1216 type_id = btf__resolve_type(obj->btf, vsi->type);
1223 type = btf__type_by_id(obj->btf, type_id);
1224 tname = btf__name_by_offset(obj->btf, type->name_off);
1234 map = bpf_object__add_map(obj);
1281 static int bpf_object_init_struct_ops(struct bpf_object *obj)
1285 err = init_struct_ops_maps(obj, STRUCT_OPS_SEC, obj->efile.st_ops_shndx,
1286 obj->efile.st_ops_data, 0);
1287 err = err ?: init_struct_ops_maps(obj, STRUCT_OPS_LINK_SEC,
1288 obj->efile.st_ops_link_shndx,
1289 obj->efile.st_ops_link_data,
1299 struct bpf_object *obj;
1302 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1303 if (!obj) {
1308 strcpy(obj->path, path);
1310 libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
1313 libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
1314 end = strchr(obj->name, '.');
1319 obj->efile.fd = -1;
1326 obj->efile.obj_buf = obj_buf;
1327 obj->efile.obj_buf_sz = obj_buf_sz;
1328 obj->efile.btf_maps_shndx = -1;
1329 obj->efile.st_ops_shndx = -1;
1330 obj->efile.st_ops_link_shndx = -1;
1331 obj->kconfig_map_idx = -1;
1333 obj->kern_version = get_kernel_version();
1334 obj->loaded = false;
1336 return obj;
1339 static void bpf_object__elf_finish(struct bpf_object *obj)
1341 if (!obj->efile.elf)
1344 elf_end(obj->efile.elf);
1346 if (obj->efile.shstring) {
1347 elfio_string_section_accessor_delete(obj->efile.shstring);
1349 if (obj->efile.strstring) {
1350 elfio_string_section_accessor_delete(obj->efile.strstring);
1352 elfio_delete(obj->efile.elf);
1354 obj->efile.elf = NULL;
1355 obj->efile.symbols = NULL;
1356 obj->efile.st_ops_data = NULL;
1357 obj->efile.st_ops_link_data = NULL;
1359 zfree(&obj->efile.secs);
1360 obj->efile.sec_cnt = 0;
1361 zclose(obj->efile.fd);
1362 obj->efile.obj_buf = NULL;
1363 obj->efile.obj_buf_sz = 0;
1366 static int bpf_object__elf_init(struct bpf_object *obj)
1376 if (obj->efile.elf) {
1381 if (obj->efile.obj_buf_sz > 0) {
1384 elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1389 ftruncate(fdm, obj->efile.obj_buf_sz);
1390 write(fdm, (char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1395 obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
1396 if (obj->efile.fd < 0) {
1401 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1405 elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1410 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1415 obj->efile.elf = elf;
1419 pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1428 pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1432 obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1434 obj->efile.ehdr = ehdr = (Elf64_Ehdr*)obj->efile.obj_buf;
1436 if (!obj->efile.ehdr) {
1437 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1443 if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
1445 obj->path, elf_errmsg(-1));
1451 if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
1453 obj->path, elf_errmsg(-1));
1458 obj->efile.shstrndx = elfio_get_section_name_str_index(elf);
1462 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1469 bpf_object__elf_finish(obj);
1473 static int bpf_object__check_endianness(struct bpf_object *obj)
1476 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
1479 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
1484 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1489 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1492 pr_warn("invalid license section in %s\n", obj->path);
1498 libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
1499 pr_debug("license of %s is %s\n", obj->path, obj->license);
1504 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1509 pr_warn("invalid kver section in %s\n", obj->path);
1513 obj->kern_version = kver;
1514 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1526 static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1536 scn = elf_sec_by_name(obj, name);
1537 data = elf_sec_data(obj, scn);
1541 data = elf_sec_data_by_name(obj, name, data);
1551 static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name)
1553 Elf_Data *symbols = obj->efile.symbols;
1558 Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1567 sname = elf_sym_str(obj, sym->st_name);
1579 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1584 err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
1585 sizeof(*obj->maps), obj->nr_maps + 1);
1589 map = &obj->maps[obj->nr_maps++];
1590 map->obj = obj;
1628 static char *internal_map_name(struct bpf_object *obj, const char *real_name)
1673 pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
1675 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1687 map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
1694 static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
1703 t = btf__type_by_id(obj->btf, map->btf_value_type_id);
1709 vt = btf__type_by_id(obj->btf, vsi->type);
1721 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1729 map = bpf_object__add_map(obj);
1737 map->name = internal_map_name(obj, real_name);
1753 (void) map_fill_btf_type_info(obj, map);
1755 if (map_is_mmapable(obj, map))
1777 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1781 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1788 * Populate obj->maps with libbpf internal maps.
1790 for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1791 sec_desc = &obj->efile.secs[sec_idx];
1800 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1802 sec_name = elf_sec_name_by_idx(obj, sec_idx);
1804 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1810 obj->has_rodata = true;
1812 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1814 sec_name = elf_sec_name_by_idx(obj, sec_idx);
1816 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1823 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1825 sec_name = elf_sec_name_by_idx(obj, sec_idx);
1827 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1843 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1848 for (i = 0; i < obj->nr_extern; i++) {
1849 if (strcmp(obj->externs[i].name, name) == 0)
1850 return &obj->externs[i];
2005 static int bpf_object__process_kconfig_line(struct bpf_object *obj,
2035 ext = find_extern_by_name(obj, buf);
2069 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
2094 err = bpf_object__process_kconfig_line(obj, buf, data);
2107 static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
2122 err = bpf_object__process_kconfig_line(obj, buf, data);
2134 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
2140 for (i = 0; i < obj->nr_extern; i++) {
2141 ext = &obj->externs[i];
2150 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
2151 ".kconfig", obj->efile.symbols_shndx,
2156 obj->kconfig_map_idx = obj->nr_maps - 1;
2612 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2627 var = btf__type_by_id(obj->btf, vi->type);
2629 map_name = btf__name_by_offset(obj->btf, var->name_off);
2650 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2661 map = bpf_object__add_map(obj);
2677 err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2705 err = map_fill_btf_type_info(obj, map);
2712 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2724 if (obj->efile.btf_maps_shndx < 0)
2727 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2728 data = elf_sec_data(obj, scn);
2732 data = elf_sec_data_by_idx(obj, obj->efile.btf_maps_shndx, &realdata);
2736 MAPS_ELF_SEC, obj->path);
2740 nr_types = btf__type_cnt(obj->btf);
2742 t = btf__type_by_id(obj->btf, i);
2745 name = btf__name_by_offset(obj->btf, t->name_off);
2748 obj->efile.btf_maps_sec_btf_id = i;
2760 err = bpf_object__init_user_btf_map(obj, sec, i,
2761 obj->efile.btf_maps_shndx,
2771 static int bpf_object__init_maps(struct bpf_object *obj,
2781 err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2782 err = err ?: bpf_object__init_global_data_maps(obj);
2783 err = err ?: bpf_object__init_kconfig_map(obj);
2784 err = err ?: bpf_object_init_struct_ops(obj);
2789 static bool section_have_execinstr(struct bpf_object *obj, int idx)
2793 sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
2796 sh = elf_sec_hdr_by_idx(obj, idx, &header);
2804 static bool btf_needs_sanitization(struct bpf_object *obj)
2806 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2807 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2808 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2809 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2810 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2811 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2812 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
2818 static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2820 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2821 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2822 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2823 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2824 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2825 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2826 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
2918 static bool libbpf_needs_btf(const struct bpf_object *obj)
2920 return obj->efile.btf_maps_shndx >= 0 ||
2921 obj->efile.st_ops_shndx >= 0 ||
2922 obj->efile.st_ops_link_shndx >= 0 ||
2923 obj->nr_extern > 0;
2926 static bool kernel_needs_btf(const struct bpf_object *obj)
2928 return obj->efile.st_ops_shndx >= 0 || obj->efile.st_ops_link_shndx >= 0;
2931 static int bpf_object__init_btf(struct bpf_object *obj,
2938 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2939 err = libbpf_get_error(obj->btf);
2941 obj->btf = NULL;
2946 btf__set_pointer_size(obj->btf, 8);
2952 if (!obj->btf) {
2957 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
2958 err = libbpf_get_error(obj->btf_ext);
2962 obj->btf_ext = NULL;
2967 ext_segs[0] = &obj->btf_ext->func_info;
2968 ext_segs[1] = &obj->btf_ext->line_info;
2969 ext_segs[2] = &obj->btf_ext->core_relo_info;
2996 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3000 scn = elf_sec_by_name(obj, sec_name);
3004 pelfio_t elf = obj->efile.elf;
3018 if (err && libbpf_needs_btf(obj)) {
3033 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
3065 err = find_elf_sec_sz(obj, sec_name, &size);
3099 sym = find_elf_var_sym(obj, var_name);
3126 static int bpf_object_fixup_btf(struct bpf_object *obj)
3130 if (!obj->btf)
3133 n = btf__type_cnt(obj->btf);
3135 struct btf_type *t = btf_type_by_id(obj->btf, i);
3143 err = btf_fixup_datasec(obj, obj->btf, t);
3167 static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
3175 if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
3179 for (i = 0; i < obj->nr_extern; i++) {
3182 ext = &obj->externs[i];
3187 bpf_object__for_each_program(prog, obj) {
3197 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
3202 if (obj->btf_vmlinux || obj->gen_loader)
3205 if (!force && !obj_needs_vmlinux_btf(obj))
3208 obj->btf_vmlinux = btf__load_vmlinux_btf();
3209 err = libbpf_get_error(obj->btf_vmlinux);
3212 obj->btf_vmlinux = NULL;
3218 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
3220 struct btf *kern_btf = obj->btf;
3224 if (!obj->btf)
3227 if (!kernel_supports(obj, FEAT_BTF)) {
3228 if (kernel_needs_btf(obj)) {
3244 for (i = 0; i < obj->nr_programs; i++) {
3245 struct bpf_program *prog = &obj->programs[i];
3250 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
3253 n = btf__type_cnt(obj->btf);
3255 t = btf_type_by_id(obj->btf, j);
3259 name = btf__str_by_offset(obj->btf, t->name_off);
3268 if (!kernel_supports(obj, FEAT_BTF_DECL_TAG))
3270 for (i = 0; i < obj->nr_programs; i++) {
3271 struct bpf_program *prog = &obj->programs[i];
3274 if (prog_is_subprog(obj, prog))
3276 n = btf__type_cnt(obj->btf);
3282 t = btf_type_by_id(obj->btf, j);
3286 name = btf__str_by_offset(obj->btf, t->name_off);
3290 t = btf_type_by_id(obj->btf, t->type);
3296 if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)))
3314 for (k = 0; k < obj->nr_programs; k++) {
3315 struct bpf_program *subprog = &obj->programs[k];
3317 if (!prog_is_subprog(obj, subprog))
3348 sanitize = btf_needs_sanitization(obj);
3354 raw_data = btf__raw_data(obj->btf, &sz);
3361 btf__set_pointer_size(obj->btf, 8);
3362 err = bpf_object__sanitize_btf(obj, kern_btf);
3367 if (obj->gen_loader) {
3373 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3380 err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
3381 obj->log_level ? 1 : 0);
3386 btf__set_fd(obj->btf, btf__fd(kern_btf));
3393 btf_mandatory = kernel_needs_btf(obj);
3403 static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3407 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3409 name = elfio_string_get_string(obj->efile.strstring, off);
3413 off, obj->path, elf_errmsg(-1));
3420 static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3424 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3426 name = elfio_string_get_string(obj->efile.shstring, off);
3431 off, obj->path, elf_errmsg(-1));
3439 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3443 scn = elf_getscn(obj->efile.elf, idx);
3446 idx, obj->path, elf_errmsg(-1));
3452 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3455 Elf *elf = obj->efile.elf;
3459 sec_name = elf_sec_name(obj, scn);
3471 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
3481 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3488 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3496 sh = elf_sec_hdr(obj, scn);
3500 name = elf_sec_str(obj, sh->sh_name);
3503 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3510 static Elf64_Shdr *elf_sec_hdr_by_idx(const struct bpf_object *obj, size_t idx, Elf64_Shdr *sheader)
3512 psection_t psection = elfio_get_section_by_index(obj->efile.elf, idx);
3528 static const char *elf_sec_name_by_idx(const struct bpf_object *obj, size_t idx)
3533 elf_sec_hdr_by_idx(obj, idx, &sh);
3535 name = elf_sec_str(obj, sh.sh_name);
3538 idx, obj->path, elf_errmsg(-1));
3547 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3557 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3558 obj->path, elf_errmsg(-1));
3565 static Elf_Data *elf_sec_data_by_name(const struct bpf_object *obj, const char *name, Elf_Data *data)
3567 pelfio_t elf = obj->efile.elf;
3575 static Elf_Data *elf_sec_data_by_idx(const struct bpf_object *obj, size_t idx, Elf_Data *data)
3577 pelfio_t elf = obj->efile.elf;
3586 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3588 if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3591 return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3654 static int bpf_object__elf_collect(struct bpf_object *obj)
3658 Elf *elf = obj->efile.elf;
3660 pelfio_t elf = obj->efile.elf;
3682 if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
3684 obj->path, elf_errmsg(-1));
3688 obj->efile.sec_cnt = elfio_get_sections_num(elf);
3690 obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3691 if (!obj->efile.secs)
3700 sh = elf_sec_hdr(obj, scn);
3705 sh = elf_sec_hdr_by_idx(obj, i, sh);
3711 if (obj->efile.symbols) {
3712 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3716 data = elf_sec_data(obj, scn);
3718 data = elf_sec_data_by_idx(obj, i, &realdata);
3727 obj->efile.symbols = data;
3729 obj->efile.realsymbols.d_buf = data->d_buf;
3730 obj->efile.realsymbols.d_size = data->d_size;
3731 obj->efile.symbols = &(obj->efile.realsymbols);
3735 obj->efile.symbols_shndx = idx;
3737 obj->efile.symbols_shndx = i;
3739 obj->efile.strtabidx = sh->sh_link;
3747 psection_t psection = elfio_get_section_by_index(elf, obj->efile.strtabidx);
3752 psection = elfio_get_section_by_index(elf, obj->efile.shstrndx);
3759 obj->efile.strstring = strstring;
3760 obj->efile.shstring = shstring;
3763 if (!obj->efile.symbols) {
3765 obj->path);
3775 elf_sec_hdr_by_idx(obj, i, sh);
3783 sec_desc = &obj->efile.secs[idx];
3786 sh = elf_sec_hdr(obj, scn);
3788 sh = elf_sec_hdr_by_idx(obj, i, sh);
3794 name = elf_sec_str(obj, sh->sh_name);
3802 data = elf_sec_data(obj, scn);
3804 data = elf_sec_data_by_idx(obj, i, &sec_desc->realdata);
3815 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3819 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3826 obj->efile.btf_maps_shndx = idx;
3840 obj->efile.text_shndx = idx;
3841 err = bpf_object__add_programs(obj, data, name, idx);
3871 obj->efile.st_ops_data = data;
3873 obj->efile.realst_ops_data.d_buf = data->d_buf;
3874 obj->efile.realst_ops_data.d_size = data->d_size;
3875 obj->efile.st_ops_data = &(obj->efile.realst_ops_data);
3877 obj->efile.st_ops_shndx = idx;
3886 targ_sec_idx >= obj->efile.sec_cnt)
3890 if (!section_have_execinstr(obj, targ_sec_idx) &&
3896 elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
3900 elf_sec_name_by_idx(obj, targ_sec_idx) ?: "<?>");
3927 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3928 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3935 if (obj->nr_programs)
3936 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3938 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
4147 static int bpf_object__collect_externs(struct bpf_object *obj)
4161 if (!obj->efile.symbols)
4165 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
4166 sh = elf_sec_hdr(obj, scn);
4169 sh = elf_sec_hdr_by_idx(obj, obj->efile.symbols_shndx, sh);
4175 dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
4183 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
4189 ext_name = elf_sym_str(obj, sym->st_name);
4193 ext = obj->externs;
4194 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
4197 obj->externs = ext;
4198 ext = &ext[obj->nr_extern];
4200 obj->nr_extern++;
4202 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
4208 t = btf__type_by_id(obj->btf, ext->btf_id);
4209 ext->name = btf__name_by_offset(obj->btf, t->name_off);
4221 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
4227 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
4228 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
4238 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
4244 ext->kcfg.align = btf__align_of(obj->btf, t->type);
4250 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
4259 skip_mods_and_typedefs(obj->btf, t->type,
4266 pr_debug("collected %d externs total\n", obj->nr_extern);
4268 if (!obj->nr_extern)
4272 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
4282 int int_btf_id = find_int_btf_id(obj->btf);
4290 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
4291 for (i = 0; i < obj->nr_extern; i++) {
4292 ext = &obj->externs[i];
4305 vt = (void *)btf__type_by_id(obj->btf, vs->type);
4306 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
4307 ext = find_extern_by_name(obj, ext_name);
4318 func_proto = btf__type_by_id(obj->btf,
4345 for (i = 0; i < obj->nr_extern; i++) {
4346 ext = &obj->externs[i];
4360 t = btf__type_by_id(obj->btf, vs->type);
4361 ext_name = btf__name_by_offset(obj->btf, t->name_off);
4362 ext = find_extern_by_name(obj, ext_name);
4375 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
4377 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
4381 bpf_object__find_program_by_name(const struct bpf_object *obj,
4386 bpf_object__for_each_program(prog, obj) {
4387 if (prog_is_subprog(obj, prog))
4395 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
4398 switch (obj->efile.secs[shndx].sec_type) {
4408 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
4411 return shndx == obj->efile.btf_maps_shndx;
4415 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
4417 if (shndx == obj->efile.symbols_shndx)
4420 switch (obj->efile.secs[shndx].sec_type) {
4438 size_t map_idx, nr_maps = prog->obj->nr_maps;
4439 struct bpf_object *obj = prog->obj;
4453 int i, n = obj->nr_extern;
4457 ext = &obj->externs[i];
4484 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
4486 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4488 sym_sec_name = elf_sec_name_by_idx(obj, shdr_idx);
4512 if (sym_is_subprog(sym, obj->efile.text_shndx)) {
4528 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
4530 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4532 sym_sec_name = elf_sec_name_by_idx(obj, shdr_idx);
4536 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
4542 map = &obj->maps[map_idx];
4565 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
4571 map = &obj->maps[map_idx];
4598 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4601 int l = 0, r = obj->nr_programs - 1, m;
4604 if (!obj->nr_programs)
4609 prog = &obj->programs[m];
4620 prog = &obj->programs[l];
4627 bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
4643 if (sec_idx >= obj->efile.sec_cnt)
4647 scn = elf_sec_by_idx(obj, sec_idx);
4648 scn_data = elf_sec_data(obj, scn);
4650 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4651 sec_name = elf_sec_name(obj, scn);
4656 scn_data = elf_sec_data_by_idx(obj, sec_idx, &realdata);
4658 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4659 sec_name = elf_sec_name_by_idx(obj, sec_idx);
4676 sym = elf_sym_by_idx(obj, sym_idx);
4683 if (sym->st_shndx >= obj->efile.sec_cnt) {
4704 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
4706 sym_name = elf_sec_name_by_idx(obj, sym->st_shndx);
4709 sym_name = elf_sym_str(obj, sym->st_name);
4715 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4740 static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
4744 if (!obj->btf)
4751 if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
4761 id = btf__find_by_name(obj->btf, map->real_name);
4813 if (map->obj->loaded)
4897 if (map->obj->loaded)
4910 bpf_object__probe_loading(struct bpf_object *obj)
4919 if (obj->gen_loader)
5376 bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
5381 if (obj && obj->gen_loader)
5467 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
5473 if (obj->gen_loader) {
5474 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
5477 bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
5505 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
5512 if (kernel_supports(obj, FEAT_PROG_NAME))
5522 if (obj->btf && btf__fd(obj->btf) >= 0) {
5523 create_attr.btf_fd = btf__fd(obj->btf);
5530 err = bpf_object__create_map(obj, map->inner_map, true);
5566 if (obj->gen_loader) {
5567 bpf_gen__map_create(obj->gen_loader, def->type, map_name,
5569 &create_attr, is_inner ? -1 : map - obj->maps);
5600 if (obj->gen_loader)
5609 static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
5622 if (obj->gen_loader) {
5623 bpf_gen__populate_outer_map(obj->gen_loader,
5624 map - obj->maps, i,
5625 targ_map - obj->maps);
5645 static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5651 if (obj->gen_loader)
5678 static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5683 for (i = 0; i < obj->nr_maps; i++) {
5684 map = &obj->maps[i];
5689 err = init_prog_array_slots(obj, map);
5717 bpf_object__create_maps(struct bpf_object *obj)
5725 for (i = 0; i < obj->nr_maps; i++) {
5726 map = &obj->maps[i];
5742 if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
5775 err = bpf_object__create_map(obj, map, false);
5783 err = bpf_object__populate_internal_map(obj, map);
5791 err = init_map_in_map_slots(obj, map);
5821 zclose(obj->maps[j].fd);
5910 static int load_module_btfs(struct bpf_object *obj)
5919 if (obj->btf_modules_loaded)
5922 if (obj->gen_loader)
5926 obj->btf_modules_loaded = true;
5929 if (!kernel_supports(obj, FEAT_MODULE_BTF))
5973 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5981 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5982 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5986 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
6007 bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
6033 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
6043 if (obj->btf_vmlinux_override)
6047 err = load_module_btfs(obj);
6051 for (i = 0; i < obj->btf_module_cnt; i++) {
6053 obj->btf_modules[i].btf,
6054 obj->btf_modules[i].name,
6055 btf__type_cnt(obj->btf_vmlinux),
6167 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
6186 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
6199 if (obj->btf_ext->core_relo_info.len == 0)
6204 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
6206 err = libbpf_get_error(obj->btf_vmlinux_override);
6219 seg = &obj->btf_ext->core_relo_info;
6225 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6237 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
6273 if (prog->obj->gen_loader)
6276 err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
6293 /* obj->btf_vmlinux and module BTFs are freed after object load */
6294 btf__free(obj->btf_vmlinux_override);
6295 obj->btf_vmlinux_override = NULL;
6328 * where lower 123 is map index into obj->maps[] array
6355 * where lower 123 is extern index into obj->externs[] array
6366 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
6378 map = &obj->maps[relo->map_idx];
6379 if (obj->gen_loader) {
6391 map = &obj->maps[relo->map_idx];
6393 if (obj->gen_loader) {
6405 ext = &obj->externs[relo->ext_idx];
6407 if (obj->gen_loader) {
6409 insn[0].imm = obj->kconfig_map_idx;
6412 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6427 ext = &obj->externs[relo->ext_idx];
6461 static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6530 reloc_prog_func_and_line_info(const struct bpf_object *obj,
6539 if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
6548 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6576 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6649 bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog,
6681 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6690 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6744 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6762 err = bpf_object__append_subprog_code(obj, main_prog, subprog);
6765 err = bpf_object__reloc_code(obj, main_prog, subprog);
6871 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6879 for (i = 0; i < obj->nr_programs; i++) {
6880 subprog = &obj->programs[i];
6881 if (!prog_is_subprog(obj, subprog))
6887 err = bpf_object__reloc_code(obj, prog, prog);
6895 bpf_object__free_relocs(struct bpf_object *obj)
6901 for (i = 0; i < obj->nr_programs; i++) {
6902 prog = &obj->programs[i];
6923 static void bpf_object__sort_relos(struct bpf_object *obj)
6927 for (i = 0; i < obj->nr_programs; i++) {
6928 struct bpf_program *p = &obj->programs[i];
6938 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6944 if (obj->btf_ext) {
6945 err = bpf_object__relocate_core(obj, targ_btf_path);
6951 bpf_object__sort_relos(obj);
6961 for (i = 0; i < obj->nr_programs; i++) {
6962 prog = &obj->programs[i];
6980 for (i = 0; i < obj->nr_programs; i++) {
6981 prog = &obj->programs[i];
6985 if (prog_is_subprog(obj, prog))
6990 err = bpf_object__relocate_calls(obj, prog);
6999 struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx];
7007 err = bpf_object__append_subprog_code(obj, prog, subprog);
7010 err = bpf_object__reloc_code(obj, prog, subprog);
7017 for (i = 0; i < obj->nr_programs; i++) {
7018 prog = &obj->programs[i];
7019 if (prog_is_subprog(obj, prog))
7023 err = bpf_object__relocate_data(obj, prog);
7034 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
7037 static int bpf_object__collect_map_relos(struct bpf_object *obj,
7054 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
7056 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
7068 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
7074 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
7080 for (j = 0; j < obj->nr_maps; j++) {
7081 map = &obj->maps[j];
7082 if (map->sec_idx != obj->efile.btf_maps_shndx)
7090 if (j == obj->nr_maps) {
7100 if (sym->st_shndx != obj->efile.btf_maps_shndx) {
7111 targ_map = bpf_object__find_map_by_name(obj, name);
7118 targ_prog = bpf_object__find_program_by_name(obj, name);
7126 prog_is_subprog(obj, targ_prog)) {
7135 var = btf__type_by_id(obj->btf, vi->type);
7136 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
7140 mname = btf__name_by_offset(obj->btf, member->name_off);
7174 static int bpf_object__collect_relos(struct bpf_object *obj)
7178 for (i = 0; i < obj->efile.sec_cnt; i++) {
7179 struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
7191 shdr = elf_sec_hdr_by_idx(obj, i, &shdrelf);
7201 if (idx == obj->efile.st_ops_shndx || idx == obj->efile.st_ops_link_shndx)
7202 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
7203 else if (idx == obj->efile.btf_maps_shndx)
7204 err = bpf_object__collect_map_relos(obj, shdr, data);
7206 err = bpf_object__collect_prog_relos(obj, shdr, data);
7211 bpf_object__sort_relos(obj);
7228 static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
7234 if (obj->gen_loader)
7248 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7253 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7273 if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
7283 if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK))
7328 static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
7354 if (kernel_supports(obj, FEAT_PROG_NAME))
7363 btf_fd = bpf_object__btf_fd(obj);
7364 if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
7375 load_attr.fd_array = obj->fd_array;
7392 if (obj->gen_loader) {
7393 bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
7395 prog - obj->programs);
7411 } else if (obj->log_buf) {
7412 log_buf = obj->log_buf;
7413 log_buf_size = obj->log_size;
7439 if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
7443 for (i = 0; i < obj->nr_maps; i++) {
7444 map = &prog->obj->maps[i];
7573 err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
7596 * "345" in "2001000345" is a map index in obj->maps to fetch map name.
7598 struct bpf_object *obj = prog->obj;
7607 if (map_idx < 0 || map_idx >= obj->nr_maps)
7609 map = &obj->maps[map_idx];
7629 * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name.
7631 struct bpf_object *obj = prog->obj;
7640 if (ext_idx < 0 || ext_idx >= obj->nr_extern)
7642 ext = &obj->externs[ext_idx];
7704 struct bpf_object *obj = prog->obj;
7709 struct extern_desc *ext = &obj->externs[relo->ext_idx];
7716 kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ?
7718 bpf_gen__record_extern(obj->gen_loader, ext->name,
7723 bpf_gen__record_extern(obj->gen_loader, ext->name,
7735 bpf_gen__record_relo_core(obj->gen_loader, &cr);
7746 bpf_object__load_progs(struct bpf_object *obj, int log_level)
7752 for (i = 0; i < obj->nr_programs; i++) {
7753 prog = &obj->programs[i];
7754 err = bpf_object__sanitize_prog(obj, prog);
7759 for (i = 0; i < obj->nr_programs; i++) {
7760 prog = &obj->programs[i];
7761 if (prog_is_subprog(obj, prog))
7769 if (obj->gen_loader)
7772 err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
7773 obj->license, obj->kern_version, &prog->fd);
7780 bpf_object__free_relocs(obj);
7786 static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
7791 bpf_object__for_each_program(prog, obj) {
7823 struct bpf_object *obj;
7861 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7862 if (IS_ERR(obj))
7863 return obj;
7865 obj->log_buf = log_buf;
7866 obj->log_size = log_size;
7867 obj->log_level = log_level;
7875 obj->btf_custom_path = strdup(btf_tmp_path);
7876 if (!obj->btf_custom_path) {
7884 obj->kconfig = strdup(kconfig);
7885 if (!obj->kconfig) {
7891 err = bpf_object__elf_init(obj);
7892 err = err ? : bpf_object__check_endianness(obj);
7893 err = err ? : bpf_object__elf_collect(obj);
7894 err = err ? : bpf_object__collect_externs(obj);
7895 err = err ? : bpf_object_fixup_btf(obj);
7896 err = err ? : bpf_object__init_maps(obj, opts);
7897 err = err ? : bpf_object_init_progs(obj, opts);
7898 err = err ? : bpf_object__collect_relos(obj);
7902 bpf_object__elf_finish(obj);
7904 return obj;
7906 bpf_object__close(obj);
7936 static int bpf_object_unload(struct bpf_object *obj)
7940 if (!obj)
7943 for (i = 0; i < obj->nr_maps; i++) {
7944 zclose(obj->maps[i].fd);
7945 if (obj->maps[i].st_ops)
7946 zfree(&obj->maps[i].st_ops->kern_vdata);
7949 for (i = 0; i < obj->nr_programs; i++)
7950 bpf_program__unload(&obj->programs[i]);
7955 static int bpf_object__sanitize_maps(struct bpf_object *obj)
7959 bpf_object__for_each_map(m, obj) {
7962 if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
8006 struct bpf_object *obj = ctx;
8010 ext = find_extern_by_name(obj, sym_name);
8014 t = btf__type_by_id(obj->btf, ext->btf_id);
8031 static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
8033 return libbpf_kallsyms_parse(kallsyms_cb, obj);
8036 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
8044 btf = obj->btf_vmlinux;
8049 err = load_module_btfs(obj);
8053 for (i = 0; i < obj->btf_module_cnt; i++) {
8055 mod_btf = &obj->btf_modules[i];
8070 static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
8080 id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
8097 err = bpf_core_types_are_compat(obj->btf, local_type_id,
8103 local_type = btf__type_by_id(obj->btf, local_type_id);
8104 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
8123 static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
8134 kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf,
8147 ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
8162 if (obj->fd_array_cnt == INT16_MAX) {
8168 if (!obj->fd_array_cnt)
8169 obj->fd_array_cnt = 1;
8171 ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
8172 obj->fd_array_cnt + 1);
8175 mod_btf->fd_array_idx = obj->fd_array_cnt;
8177 obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
8195 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
8201 for (i = 0; i < obj->nr_extern; i++) {
8202 ext = &obj->externs[i];
8206 if (obj->gen_loader) {
8212 t = btf__type_by_id(obj->btf, ext->btf_id);
8214 err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
8216 err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
8223 static int bpf_object__resolve_externs(struct bpf_object *obj,
8232 if (obj->nr_extern == 0)
8235 if (obj->kconfig_map_idx >= 0)
8236 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
8238 for (i = 0; i < obj->nr_extern; i++) {
8239 ext = &obj->externs[i];
8265 value = kernel_supports(obj, FEAT_BPF_COOKIE);
8267 value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER);
8291 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
8295 for (i = 0; i < obj->nr_extern; i++) {
8296 ext = &obj->externs[i];
8304 err = bpf_object__read_kconfig_file(obj, kcfg_data);
8309 err = bpf_object__read_kallsyms_file(obj);
8314 err = bpf_object__resolve_ksyms_btf_id(obj);
8318 for (i = 0; i < obj->nr_extern; i++) {
8319 ext = &obj->externs[i];
8353 static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
8357 for (i = 0; i < obj->nr_maps; i++)
8358 if (bpf_map__is_struct_ops(&obj->maps[i]))
8359 bpf_map_prepare_vdata(&obj->maps[i]);
8364 static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
8368 if (!obj)
8371 if (obj->loaded) {
8372 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
8376 if (obj->gen_loader)
8377 bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
8379 err = bpf_object__probe_loading(obj);
8380 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
8381 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
8382 err = err ? : bpf_object__sanitize_and_load_btf(obj);
8383 err = err ? : bpf_object__sanitize_maps(obj);
8384 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
8385 err = err ? : bpf_object__create_maps(obj);
8386 err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
8387 err = err ? : bpf_object__load_progs(obj, extra_log_level);
8388 err = err ? : bpf_object_init_prog_arrays(obj);
8389 err = err ? : bpf_object_prepare_struct_ops(obj);
8391 if (obj->gen_loader) {
8393 if (obj->btf)
8394 btf__set_fd(obj->btf, -1);
8395 for (i = 0; i < obj->nr_maps; i++)
8396 obj->maps[i].fd = -1;
8398 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
8402 zfree(&obj->fd_array);
8405 for (i = 0; i < obj->btf_module_cnt; i++) {
8406 close(obj->btf_modules[i].fd);
8407 btf__free(obj->btf_modules[i].btf);
8408 free(obj->btf_modules[i].name);
8410 free(obj->btf_modules);
8413 btf__free(obj->btf_vmlinux);
8414 obj->btf_vmlinux = NULL;
8416 obj->loaded = true; /* doesn't matter if successfully or not */
8424 for (i = 0; i < obj->nr_maps; i++)
8425 if (obj->maps[i].pinned && !obj->maps[i].reused)
8426 bpf_map__unpin(&obj->maps[i], NULL);
8428 bpf_object_unload(obj);
8429 pr_warn("failed to load object '%s'\n", obj->path);
8433 int bpf_object__load(struct bpf_object *obj)
8435 return bpf_object_load(obj, 0, NULL);
8675 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
8680 if (!obj)
8683 if (!obj->loaded) {
8688 bpf_object__for_each_map(map, obj) {
8713 while ((map = bpf_object__prev_map(obj, map))) {
8723 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8728 if (!obj)
8731 bpf_object__for_each_map(map, obj) {
8753 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8759 if (!obj)
8762 if (!obj->loaded) {
8767 bpf_object__for_each_program(prog, obj) {
8780 while ((prog = bpf_object__prev_program(obj, prog))) {
8790 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8795 if (!obj)
8798 bpf_object__for_each_program(prog, obj) {
8813 int bpf_object__pin(struct bpf_object *obj, const char *path)
8817 err = bpf_object__pin_maps(obj, path);
8821 err = bpf_object__pin_programs(obj, path);
8823 bpf_object__unpin_maps(obj, path);
8830 int bpf_object__unpin(struct bpf_object *obj, const char *path)
8834 err = bpf_object__unpin_programs(obj, path);
8838 err = bpf_object__unpin_maps(obj, path);
8878 void bpf_object__close(struct bpf_object *obj)
8882 if (IS_ERR_OR_NULL(obj))
8885 usdt_manager_free(obj->usdt_man);
8886 obj->usdt_man = NULL;
8888 bpf_gen__free(obj->gen_loader);
8889 bpf_object__elf_finish(obj);
8890 bpf_object_unload(obj);
8891 btf__free(obj->btf);
8892 btf__free(obj->btf_vmlinux);
8893 btf_ext__free(obj->btf_ext);
8895 for (i = 0; i < obj->nr_maps; i++)
8896 bpf_map__destroy(&obj->maps[i]);
8898 zfree(&obj->btf_custom_path);
8899 zfree(&obj->kconfig);
8901 for (i = 0; i < obj->nr_extern; i++)
8902 zfree(&obj->externs[i].essent_name);
8904 zfree(&obj->externs);
8905 obj->nr_extern = 0;
8907 zfree(&obj->maps);
8908 obj->nr_maps = 0;
8910 if (obj->programs && obj->nr_programs) {
8911 for (i = 0; i < obj->nr_programs; i++)
8912 bpf_program__exit(&obj->programs[i]);
8914 zfree(&obj->programs);
8916 free(obj);
8919 const char *bpf_object__name(const struct bpf_object *obj)
8921 return obj ? obj->name : libbpf_err_ptr(-EINVAL);
8924 unsigned int bpf_object__kversion(const struct bpf_object *obj)
8926 return obj ? obj->kern_version : 0;
8929 struct btf *bpf_object__btf(const struct bpf_object *obj)
8931 return obj ? obj->btf : NULL;
8934 int bpf_object__btf_fd(const struct bpf_object *obj)
8936 return obj->btf ? btf__fd(obj->btf) : -1;
8939 int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
8941 if (obj->loaded)
8944 obj->kern_version = kern_version;
8949 int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
8961 obj->gen_loader = gen;
8966 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8969 size_t nr_programs = obj->nr_programs;
8977 return forward ? &obj->programs[0] :
8978 &obj->programs[nr_programs - 1];
8980 if (p->obj != obj) {
8985 idx = (p - obj->programs) + (forward ? 1 : -1);
8986 if (idx >= obj->nr_programs || idx < 0)
8988 return &obj->programs[idx];
8992 bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
8997 prog = __bpf_program__iter(prog, obj, true);
8998 } while (prog && prog_is_subprog(obj, prog));
9004 bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
9009 prog = __bpf_program__iter(prog, obj, false);
9010 } while (prog && prog_is_subprog(obj, prog));
9037 if (prog->obj->loaded)
9069 if (prog->obj->loaded)
9112 if (prog->obj->loaded)
9143 if (prog->obj->loaded)
9157 if (prog->obj->loaded)
9171 if (prog->obj->loaded)
9190 if (prog->obj->loaded)
9560 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
9567 for (i = 0; i < obj->nr_maps; i++) {
9568 map = &obj->maps[i];
9581 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
9597 btf = obj->btf;
9606 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
9613 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
9614 map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
9658 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
9811 static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9817 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
9826 ret = load_module_btfs(obj);
9830 for (i = 0; i < obj->btf_module_cnt; i++) {
9831 const struct module_btf *mod = &obj->btf_modules[i];
9873 if (prog->obj->gen_loader) {
9874 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
9878 err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
10030 btf = bpf_object__btf(map->obj);
10186 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
10191 if (!obj || !obj->maps)
10194 s = obj->maps;
10195 e = obj->maps + obj->nr_maps;
10203 idx = (m - obj->maps) + i;
10204 if (idx >= obj->nr_maps || idx < 0)
10206 return &obj->maps[idx];
10210 bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
10213 return obj->maps;
10215 return __bpf_map__iter(prev, obj, 1);
10219 bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
10222 if (!obj->nr_maps)
10224 return obj->maps + obj->nr_maps - 1;
10227 return __bpf_map__iter(next, obj, -1);
10231 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
10235 bpf_object__for_each_map(pos, obj) {
10258 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
10260 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
10595 if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
10998 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
11081 if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
11904 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
12050 struct bpf_object *obj = prog->obj;
12080 if (IS_ERR(obj->usdt_man))
12081 return libbpf_ptr(obj->usdt_man);
12082 if (!obj->usdt_man) {
12083 obj->usdt_man = usdt_manager_new(obj);
12084 if (IS_ERR(obj->usdt_man))
12085 return libbpf_ptr(obj->usdt_man);
12089 link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
13326 if (prog->obj->loaded)
13347 err = bpf_object__load_vmlinux_btf(prog->obj, true);
13350 err = find_kernel_btf_id(prog->obj, attach_func_name,
13466 static int populate_skeleton_maps(const struct bpf_object *obj,
13477 *map = bpf_object__find_map_by_name(obj, name);
13490 static int populate_skeleton_progs(const struct bpf_object *obj,
13500 *prog = bpf_object__find_program_by_name(obj, name);
13515 struct bpf_object *obj;
13530 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
13531 err = libbpf_get_error(obj);
13538 *s->obj = obj;
13539 err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
13545 err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
13565 if (!s->obj)
13568 btf = bpf_object__btf(s->obj);
13571 bpf_object__name(s->obj));
13575 err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
13581 err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
13628 err = bpf_object__load(*s->obj);
13736 if (s->obj)
13737 bpf_object__close(*s->obj);