Lines Matching defs:obj

75 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
271 struct bpf_object *obj;
478 static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
479 static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
480 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
481 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
482 static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
483 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
484 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
485 static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
559 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
570 prog->obj = obj;
609 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
619 progs = obj->programs;
620 nr_progs = obj->nr_programs;
624 if (elf_sym_by_sec_off(obj, sec_idx, sec_off, STT_FUNC, &sym)) {
632 name = elf_sym_str(obj, sym.st_name);
651 * In this case the original obj->programs
659 obj->programs = progs;
663 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
669 obj->nr_programs = nr_progs;
916 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
922 for (i = 0; i < obj->nr_maps; i++) {
923 map = &obj->maps[i];
928 err = bpf_map__init_kern_struct_ops(map, obj->btf,
929 obj->btf_vmlinux);
937 static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
948 if (obj->efile.st_ops_shndx == -1)
951 btf = obj->btf;
963 type = btf__type_by_id(obj->btf, vsi->type);
964 var_name = btf__name_by_offset(obj->btf, type->name_off);
966 type_id = btf__resolve_type(obj->btf, vsi->type);
973 type = btf__type_by_id(obj->btf, type_id);
974 tname = btf__name_by_offset(obj->btf, type->name_off);
984 map = bpf_object__add_map(obj);
988 map->sec_idx = obj->efile.st_ops_shndx;
1010 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1017 obj->efile.st_ops_data->d_buf + vsi->offset,
1035 struct bpf_object *obj;
1038 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1039 if (!obj) {
1044 strcpy(obj->path, path);
1046 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
1047 obj->name[sizeof(obj->name) - 1] = 0;
1050 strncpy(obj->name, basename((void *)path),
1051 sizeof(obj->name) - 1);
1052 end = strchr(obj->name, '.');
1057 obj->efile.fd = -1;
1064 obj->efile.obj_buf = obj_buf;
1065 obj->efile.obj_buf_sz = obj_buf_sz;
1066 obj->efile.maps_shndx = -1;
1067 obj->efile.btf_maps_shndx = -1;
1068 obj->efile.data_shndx = -1;
1069 obj->efile.rodata_shndx = -1;
1070 obj->efile.bss_shndx = -1;
1071 obj->efile.st_ops_shndx = -1;
1072 obj->kconfig_map_idx = -1;
1073 obj->rodata_map_idx = -1;
1075 obj->kern_version = get_kernel_version();
1076 obj->loaded = false;
1078 INIT_LIST_HEAD(&obj->list);
1079 list_add(&obj->list, &bpf_objects_list);
1080 return obj;
1083 static void bpf_object__elf_finish(struct bpf_object *obj)
1085 if (!obj_elf_valid(obj))
1088 if (obj->efile.elf) {
1089 elf_end(obj->efile.elf);
1090 obj->efile.elf = NULL;
1092 obj->efile.symbols = NULL;
1093 obj->efile.data = NULL;
1094 obj->efile.rodata = NULL;
1095 obj->efile.bss = NULL;
1096 obj->efile.st_ops_data = NULL;
1098 zfree(&obj->efile.reloc_sects);
1099 obj->efile.nr_reloc_sects = 0;
1100 zclose(obj->efile.fd);
1101 obj->efile.obj_buf = NULL;
1102 obj->efile.obj_buf_sz = 0;
1110 static int bpf_object__elf_init(struct bpf_object *obj)
1115 if (obj_elf_valid(obj)) {
1120 if (obj->efile.obj_buf_sz > 0) {
1125 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
1126 obj->efile.obj_buf_sz);
1128 obj->efile.fd = open(obj->path, O_RDONLY);
1129 if (obj->efile.fd < 0) {
1134 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1138 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1141 if (!obj->efile.elf) {
1142 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1147 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
1148 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1152 ep = &obj->efile.ehdr;
1154 if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
1156 obj->path, elf_errmsg(-1));
1162 if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
1164 obj->path, elf_errmsg(-1));
1172 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1179 bpf_object__elf_finish(obj);
1183 static int bpf_object__check_endianness(struct bpf_object *obj)
1186 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
1189 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
1194 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1199 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1201 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
1202 pr_debug("license of %s is %s\n", obj->path, obj->license);
1207 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1212 pr_warn("invalid kver section in %s\n", obj->path);
1216 obj->kern_version = kver;
1217 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1229 int bpf_object__section_size(const struct bpf_object *obj, const char *name,
1238 if (obj->efile.data)
1239 *size = obj->efile.data->d_size;
1241 if (obj->efile.bss)
1242 *size = obj->efile.bss->d_size;
1244 if (obj->efile.rodata)
1245 *size = obj->efile.rodata->d_size;
1247 if (obj->efile.st_ops_data)
1248 *size = obj->efile.st_ops_data->d_size;
1250 Elf_Scn *scn = elf_sec_by_name(obj, name);
1251 Elf_Data *data = elf_sec_data(obj, scn);
1262 int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
1265 Elf_Data *symbols = obj->efile.symbols;
1281 sname = elf_sym_str(obj, sym.st_name);
1296 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1302 if (obj->nr_maps < obj->maps_cap)
1303 return &obj->maps[obj->nr_maps++];
1305 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
1306 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
1312 obj->maps_cap = new_cap;
1313 obj->maps = new_maps;
1316 memset(obj->maps + obj->nr_maps, 0,
1317 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1322 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1323 obj->maps[i].fd = -1;
1324 obj->maps[i].inner_map_fd = -1;
1327 return &obj->maps[obj->nr_maps++];
1340 static char *internal_map_name(struct bpf_object *obj,
1347 strlen(obj->name));
1349 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1361 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1368 map = bpf_object__add_map(obj);
1375 map->name = internal_map_name(obj, type);
1407 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1411 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1416 * Populate obj->maps with libbpf internal maps.
1418 if (obj->efile.data_shndx >= 0) {
1419 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1420 obj->efile.data_shndx,
1421 obj->efile.data->d_buf,
1422 obj->efile.data->d_size);
1426 if (obj->efile.rodata_shndx >= 0) {
1427 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1428 obj->efile.rodata_shndx,
1429 obj->efile.rodata->d_buf,
1430 obj->efile.rodata->d_size);
1434 obj->rodata_map_idx = obj->nr_maps - 1;
1436 if (obj->efile.bss_shndx >= 0) {
1437 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1438 obj->efile.bss_shndx,
1440 obj->efile.bss->d_size);
1448 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1453 for (i = 0; i < obj->nr_extern; i++) {
1454 if (strcmp(obj->externs[i].name, name) == 0)
1455 return &obj->externs[i];
1594 static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1624 ext = find_extern_by_name(obj, buf);
1655 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1680 err = bpf_object__process_kconfig_line(obj, buf, data);
1693 static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1708 err = bpf_object__process_kconfig_line(obj, buf, data);
1720 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1726 for (i = 0; i < obj->nr_extern; i++) {
1727 ext = &obj->externs[i];
1736 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1737 obj->efile.symbols_shndx,
1742 obj->kconfig_map_idx = obj->nr_maps - 1;
1747 static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1749 Elf_Data *symbols = obj->efile.symbols;
1754 if (obj->efile.maps_shndx < 0)
1761 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
1762 data = elf_sec_data(obj, scn);
1765 obj->path);
1782 if (sym.st_shndx != obj->efile.maps_shndx)
1788 nr_maps, data->d_size, obj->path);
1792 obj->path);
1797 /* Fill obj->maps using data in "maps" section. */
1806 if (sym.st_shndx != obj->efile.maps_shndx)
1809 map = bpf_object__add_map(obj);
1813 map_name = elf_sym_str(obj, sym.st_name);
1815 pr_warn("failed to get map #%d name sym string for obj %s\n",
1816 i, obj->path);
1827 obj->path, map_name);
1859 obj->path, map_name);
1979 static int parse_btf_map_def(struct bpf_object *obj,
1992 const char *name = btf__name_by_offset(obj->btf, m->name_off);
1999 if (!get_map_field_int(map->name, obj->btf, m,
2005 if (!get_map_field_int(map->name, obj->btf, m,
2011 if (!get_map_field_int(map->name, obj->btf, m,
2017 if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node))
2023 if (!get_map_field_int(map->name, obj->btf, m, &sz))
2036 t = btf__type_by_id(obj->btf, m->type);
2047 sz = btf__resolve_size(obj->btf, t->type);
2065 if (!get_map_field_int(map->name, obj->btf, m, &sz))
2078 t = btf__type_by_id(obj->btf, m->type);
2089 sz = btf__resolve_size(obj->btf, t->type);
2129 t = btf__type_by_id(obj->btf, m->type);
2140 t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type,
2147 t = skip_mods_and_typedefs(obj->btf, t->type, NULL);
2157 map->inner_map->sec_idx = obj->efile.btf_maps_shndx;
2164 err = parse_btf_map_def(obj, map->inner_map, t, strict,
2177 if (!get_map_field_int(map->name, obj->btf, m, &val))
2215 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2228 var = btf__type_by_id(obj->btf, vi->type);
2230 map_name = btf__name_by_offset(obj->btf, var->name_off);
2252 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2263 map = bpf_object__add_map(obj);
2279 return parse_btf_map_def(obj, map, def, strict, false, pin_root_path);
2282 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2292 if (obj->efile.btf_maps_shndx < 0)
2295 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2296 data = elf_sec_data(obj, scn);
2299 MAPS_ELF_SEC, obj->path);
2303 nr_types = btf__get_nr_types(obj->btf);
2305 t = btf__type_by_id(obj->btf, i);
2308 name = btf__name_by_offset(obj->btf, t->name_off);
2311 obj->efile.btf_maps_sec_btf_id = i;
2323 err = bpf_object__init_user_btf_map(obj, sec, i,
2324 obj->efile.btf_maps_shndx,
2334 static int bpf_object__init_maps(struct bpf_object *obj,
2344 err = bpf_object__init_user_maps(obj, strict);
2345 err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2346 err = err ?: bpf_object__init_global_data_maps(obj);
2347 err = err ?: bpf_object__init_kconfig_map(obj);
2348 err = err ?: bpf_object__init_struct_ops_maps(obj);
2355 static bool section_have_execinstr(struct bpf_object *obj, int idx)
2359 if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
2365 static bool btf_needs_sanitization(struct bpf_object *obj)
2374 static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2434 static bool libbpf_needs_btf(const struct bpf_object *obj)
2436 return obj->efile.btf_maps_shndx >= 0 ||
2437 obj->efile.st_ops_shndx >= 0 ||
2438 obj->nr_extern > 0;
2441 static bool kernel_needs_btf(const struct bpf_object *obj)
2443 return obj->efile.st_ops_shndx >= 0;
2446 static int bpf_object__init_btf(struct bpf_object *obj,
2453 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2454 if (IS_ERR(obj->btf)) {
2455 err = PTR_ERR(obj->btf);
2456 obj->btf = NULL;
2462 btf__set_pointer_size(obj->btf, 8);
2466 if (!obj->btf) {
2471 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
2473 if (IS_ERR(obj->btf_ext)) {
2475 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
2476 obj->btf_ext = NULL;
2481 if (err && libbpf_needs_btf(obj)) {
2488 static int bpf_object__finalize_btf(struct bpf_object *obj)
2492 if (!obj->btf)
2495 err = btf__finalize_data(obj, obj->btf);
2519 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
2526 if (obj->btf_ext && obj->btf_ext->core_relo_info.len)
2530 for (i = 0; i < obj->nr_extern; i++) {
2533 ext = &obj->externs[i];
2540 bpf_object__for_each_program(prog, obj) {
2552 obj->btf_vmlinux = libbpf_find_kernel_btf();
2553 if (IS_ERR(obj->btf_vmlinux)) {
2554 err = PTR_ERR(obj->btf_vmlinux);
2556 obj->btf_vmlinux = NULL;
2562 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2564 struct btf *kern_btf = obj->btf;
2568 if (!obj->btf)
2572 if (kernel_needs_btf(obj)) {
2580 sanitize = btf_needs_sanitization(obj);
2586 raw_data = btf__get_raw_data(obj->btf, &sz);
2592 btf__set_pointer_size(obj->btf, 8);
2593 bpf_object__sanitize_btf(obj, kern_btf);
2600 btf__set_fd(obj->btf, btf__fd(kern_btf));
2607 btf_mandatory = kernel_needs_btf(obj);
2617 static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
2621 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
2624 off, obj->path, elf_errmsg(-1));
2631 static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
2635 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
2638 off, obj->path, elf_errmsg(-1));
2645 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
2649 scn = elf_getscn(obj->efile.elf, idx);
2652 idx, obj->path, elf_errmsg(-1));
2658 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
2661 Elf *elf = obj->efile.elf;
2665 sec_name = elf_sec_name(obj, scn);
2677 static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
2684 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2691 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
2699 if (elf_sec_hdr(obj, scn, &sh))
2702 name = elf_sec_str(obj, sh.sh_name);
2705 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2712 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
2722 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
2723 obj->path, elf_errmsg(-1));
2730 static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
2733 Elf_Data *symbols = obj->efile.symbols;
2802 static int bpf_object__elf_collect(struct bpf_object *obj)
2804 Elf *elf = obj->efile.elf;
2818 if (elf_sec_hdr(obj, scn, &sh))
2822 if (obj->efile.symbols) {
2823 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
2827 data = elf_sec_data(obj, scn);
2831 obj->efile.symbols = data;
2832 obj->efile.symbols_shndx = elf_ndxscn(scn);
2833 obj->efile.strtabidx = sh.sh_link;
2841 if (elf_sec_hdr(obj, scn, &sh))
2844 name = elf_sec_str(obj, sh.sh_name);
2851 data = elf_sec_data(obj, scn);
2861 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
2865 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
2869 obj->efile.maps_shndx = idx;
2871 obj->efile.btf_maps_shndx = idx;
2881 obj->efile.text_shndx = idx;
2882 err = bpf_object__add_programs(obj, data, name, idx);
2886 obj->efile.data = data;
2887 obj->efile.data_shndx = idx;
2889 obj->efile.rodata = data;
2890 obj->efile.rodata_shndx = idx;
2892 obj->efile.st_ops_data = data;
2893 obj->efile.st_ops_shndx = idx;
2899 int nr_sects = obj->efile.nr_reloc_sects;
2900 void *sects = obj->efile.reloc_sects;
2904 if (!section_have_execinstr(obj, sec) &&
2909 elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
2914 sizeof(*obj->efile.reloc_sects));
2918 obj->efile.reloc_sects = sects;
2919 obj->efile.nr_reloc_sects++;
2921 obj->efile.reloc_sects[nr_sects].shdr = sh;
2922 obj->efile.reloc_sects[nr_sects].data = data;
2924 obj->efile.bss = data;
2925 obj->efile.bss_shndx = idx;
2932 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
2933 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
2939 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
2941 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3086 static int bpf_object__collect_externs(struct bpf_object *obj)
3096 if (!obj->efile.symbols)
3099 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3100 if (elf_sec_hdr(obj, scn, &sh))
3109 if (!gelf_getsym(obj->efile.symbols, i, &sym))
3113 ext_name = elf_sym_str(obj, sym.st_name);
3117 ext = obj->externs;
3118 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3121 obj->externs = ext;
3122 ext = &ext[obj->nr_extern];
3124 obj->nr_extern++;
3126 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3132 t = btf__type_by_id(obj->btf, ext->btf_id);
3133 ext->name = btf__name_by_offset(obj->btf, t->name_off);
3137 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3143 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3144 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3149 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3155 ext->kcfg.align = btf__align_of(obj->btf, t->type);
3161 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3170 skip_mods_and_typedefs(obj->btf, t->type,
3177 pr_debug("collected %d externs total\n", obj->nr_extern);
3179 if (!obj->nr_extern)
3183 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3193 int int_btf_id = find_int_btf_id(obj->btf);
3195 for (i = 0; i < obj->nr_extern; i++) {
3196 ext = &obj->externs[i];
3209 vt = (void *)btf__type_by_id(obj->btf, vs->type);
3210 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3211 ext = find_extern_by_name(obj, ext_name);
3229 for (i = 0; i < obj->nr_extern; i++) {
3230 ext = &obj->externs[i];
3244 t = btf__type_by_id(obj->btf, vs->type);
3245 ext_name = btf__name_by_offset(obj->btf, t->name_off);
3246 ext = find_extern_by_name(obj, ext_name);
3260 bpf_object__find_program_by_title(const struct bpf_object *obj,
3265 bpf_object__for_each_program(pos, obj) {
3272 static bool prog_is_subprog(const struct bpf_object *obj,
3287 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3291 bpf_object__find_program_by_name(const struct bpf_object *obj,
3296 bpf_object__for_each_program(prog, obj) {
3297 if (prog_is_subprog(obj, prog))
3305 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3308 return shndx == obj->efile.data_shndx ||
3309 shndx == obj->efile.bss_shndx ||
3310 shndx == obj->efile.rodata_shndx;
3313 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3316 return shndx == obj->efile.maps_shndx ||
3317 shndx == obj->efile.btf_maps_shndx;
3321 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3323 if (shndx == obj->efile.data_shndx)
3325 else if (shndx == obj->efile.bss_shndx)
3327 else if (shndx == obj->efile.rodata_shndx)
3329 else if (shndx == obj->efile.symbols_shndx)
3341 size_t map_idx, nr_maps = prog->obj->nr_maps;
3342 struct bpf_object *obj = prog->obj;
3357 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
3358 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3382 int i, n = obj->nr_extern;
3386 ext = &obj->externs[i];
3409 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
3410 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3414 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
3420 map = &obj->maps[map_idx];
3443 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
3449 map = &obj->maps[map_idx];
3476 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
3479 int l = 0, r = obj->nr_programs - 1, m;
3482 if (!obj->nr_programs)
3487 prog = &obj->programs[m];
3498 prog = &obj->programs[l];
3505 bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
3507 Elf_Data *symbols = obj->efile.symbols;
3518 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
3519 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
3551 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
3553 sym_name = elf_sym_str(obj, sym.st_name);
3559 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
3584 static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
3594 if (map->sec_idx == obj->efile.btf_maps_shndx ||
3599 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
3607 ret = btf__find_by_name(obj->btf,
3738 bpf_object__probe_loading(struct bpf_object *obj)
4141 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4172 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
4211 if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
4212 create_attr.btf_fd = btf__fd(obj->btf);
4219 err = bpf_object__create_map(obj, map->inner_map);
4289 bpf_object__create_maps(struct bpf_object *obj)
4297 for (i = 0; i < obj->nr_maps; i++) {
4298 map = &obj->maps[i];
4321 err = bpf_object__create_map(obj, map);
4329 err = bpf_object__populate_internal_map(obj, map);
4367 zclose(obj->maps[j].fd);
5894 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
5906 if (obj->btf_ext->core_relo_info.len == 0)
5912 targ_btf = obj->btf_vmlinux;
5924 seg = &obj->btf_ext->core_relo_info;
5926 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5938 for (i = 0; i < obj->nr_programs; i++) {
5939 if (strcmp(obj->programs[i].sec_name, sec_name) == 0) {
5940 prog = &obj->programs[i];
5955 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5975 err = bpf_core_apply_relo(prog, rec, i, obj->btf,
5986 /* obj->btf_vmlinux is freed at the end of object load phase */
5987 if (targ_btf != obj->btf_vmlinux)
6004 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
6016 insn[0].imm = obj->maps[relo->map_idx].fd;
6022 insn[0].imm = obj->maps[relo->map_idx].fd;
6026 ext = &obj->externs[relo->sym_off];
6029 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6055 static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6069 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6125 reloc_prog_func_and_line_info(const struct bpf_object *obj,
6134 if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
6143 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6171 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6213 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6222 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6257 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6292 err = bpf_object__reloc_code(obj, main_prog, subprog);
6400 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6408 for (i = 0; i < obj->nr_programs; i++) {
6409 subprog = &obj->programs[i];
6410 if (!prog_is_subprog(obj, subprog))
6419 err = bpf_object__reloc_code(obj, prog, prog);
6428 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6434 if (obj->btf_ext) {
6435 err = bpf_object__relocate_core(obj, targ_btf_path);
6446 for (i = 0; i < obj->nr_programs; i++) {
6447 prog = &obj->programs[i];
6448 err = bpf_object__relocate_data(obj, prog);
6460 for (i = 0; i < obj->nr_programs; i++) {
6461 prog = &obj->programs[i];
6465 if (prog_is_subprog(obj, prog))
6468 err = bpf_object__relocate_calls(obj, prog);
6476 for (i = 0; i < obj->nr_programs; i++) {
6477 prog = &obj->programs[i];
6484 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6487 static int bpf_object__collect_map_relos(struct bpf_object *obj,
6503 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
6505 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
6509 symbols = obj->efile.symbols;
6521 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
6522 if (sym.st_shndx != obj->efile.btf_maps_shndx) {
6532 for (j = 0; j < obj->nr_maps; j++) {
6533 map = &obj->maps[j];
6534 if (map->sec_idx != obj->efile.btf_maps_shndx)
6542 if (j == obj->nr_maps) {
6557 targ_map = bpf_object__find_map_by_name(obj, name);
6561 var = btf__type_by_id(obj->btf, vi->type);
6562 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
6566 mname = btf__name_by_offset(obj->btf, member->name_off);
6615 static int bpf_object__collect_relos(struct bpf_object *obj)
6619 for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
6620 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
6621 Elf_Data *data = obj->efile.reloc_sects[i].data;
6629 if (idx == obj->efile.st_ops_shndx)
6630 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
6631 else if (idx == obj->efile.btf_maps_shndx)
6632 err = bpf_object__collect_map_relos(obj, shdr, data);
6634 err = bpf_object__collect_prog_relos(obj, shdr, data);
6639 for (i = 0; i < obj->nr_programs; i++) {
6640 struct bpf_program *p = &obj->programs[i];
6663 static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog)
6733 btf_fd = bpf_object__btf_fd(prog->obj);
6761 if (prog->obj->rodata_map_idx >= 0 &&
6764 &prog->obj->maps[prog->obj->rodata_map_idx];
6825 if (prog->obj->loaded) {
6910 bpf_object__load_progs(struct bpf_object *obj, int log_level)
6916 for (i = 0; i < obj->nr_programs; i++) {
6917 prog = &obj->programs[i];
6918 err = bpf_object__sanitize_prog(obj, prog);
6923 for (i = 0; i < obj->nr_programs; i++) {
6924 prog = &obj->programs[i];
6925 if (prog_is_subprog(obj, prog))
6932 err = bpf_program__load(prog, obj->license, obj->kern_version);
6947 struct bpf_object *obj;
6972 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
6973 if (IS_ERR(obj))
6974 return obj;
6978 obj->kconfig = strdup(kconfig);
6979 if (!obj->kconfig) {
6985 err = bpf_object__elf_init(obj);
6986 err = err ? : bpf_object__check_endianness(obj);
6987 err = err ? : bpf_object__elf_collect(obj);
6988 err = err ? : bpf_object__collect_externs(obj);
6989 err = err ? : bpf_object__finalize_btf(obj);
6990 err = err ? : bpf_object__init_maps(obj, opts);
6991 err = err ? : bpf_object__collect_relos(obj);
6994 bpf_object__elf_finish(obj);
6996 bpf_object__for_each_program(prog, obj) {
7013 return obj;
7015 bpf_object__close(obj);
7087 int bpf_object__unload(struct bpf_object *obj)
7091 if (!obj)
7094 for (i = 0; i < obj->nr_maps; i++) {
7095 zclose(obj->maps[i].fd);
7096 if (obj->maps[i].st_ops)
7097 zfree(&obj->maps[i].st_ops->kern_vdata);
7100 for (i = 0; i < obj->nr_programs; i++)
7101 bpf_program__unload(&obj->programs[i]);
7106 static int bpf_object__sanitize_maps(struct bpf_object *obj)
7110 bpf_object__for_each_map(m, obj) {
7124 static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7150 ext = find_extern_by_name(obj, sym_name);
7172 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
7177 for (i = 0; i < obj->nr_extern; i++) {
7183 ext = &obj->externs[i];
7187 id = btf__find_by_name_kind(obj->btf_vmlinux, ext->name,
7199 targ_var = btf__type_by_id(obj->btf_vmlinux, id);
7200 targ_var_name = btf__name_by_offset(obj->btf_vmlinux,
7202 targ_type = skip_mods_and_typedefs(obj->btf_vmlinux,
7206 ret = bpf_core_types_are_compat(obj->btf, local_type_id,
7207 obj->btf_vmlinux, targ_type_id);
7212 local_type = btf__type_by_id(obj->btf, local_type_id);
7213 local_name = btf__name_by_offset(obj->btf,
7215 targ_name = btf__name_by_offset(obj->btf_vmlinux,
7233 static int bpf_object__resolve_externs(struct bpf_object *obj,
7242 if (obj->nr_extern == 0)
7245 if (obj->kconfig_map_idx >= 0)
7246 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
7248 for (i = 0; i < obj->nr_extern; i++) {
7249 ext = &obj->externs[i];
7278 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
7282 for (i = 0; i < obj->nr_extern; i++) {
7283 ext = &obj->externs[i];
7291 err = bpf_object__read_kconfig_file(obj, kcfg_data);
7296 err = bpf_object__read_kallsyms_file(obj);
7301 err = bpf_object__resolve_ksyms_btf_id(obj);
7305 for (i = 0; i < obj->nr_extern; i++) {
7306 ext = &obj->externs[i];
7322 struct bpf_object *obj;
7327 obj = attr->obj;
7328 if (!obj)
7331 if (obj->loaded) {
7332 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
7336 err = bpf_object__probe_loading(obj);
7337 err = err ? : bpf_object__load_vmlinux_btf(obj);
7338 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
7339 err = err ? : bpf_object__sanitize_and_load_btf(obj);
7340 err = err ? : bpf_object__sanitize_maps(obj);
7341 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
7342 err = err ? : bpf_object__create_maps(obj);
7343 err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
7344 err = err ? : bpf_object__load_progs(obj, attr->log_level);
7346 btf__free(obj->btf_vmlinux);
7347 obj->btf_vmlinux = NULL;
7349 obj->loaded = true; /* doesn't matter if successfully or not */
7357 for (i = 0; i < obj->nr_maps; i++)
7358 if (obj->maps[i].pinned && !obj->maps[i].reused)
7359 bpf_map__unpin(&obj->maps[i], NULL);
7361 bpf_object__unload(obj);
7362 pr_warn("failed to load object '%s'\n", obj->path);
7366 int bpf_object__load(struct bpf_object *obj)
7369 .obj = obj,
7734 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
7739 if (!obj)
7742 if (!obj->loaded) {
7747 bpf_object__for_each_map(map, obj) {
7777 while ((map = bpf_map__prev(map, obj))) {
7787 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
7792 if (!obj)
7795 bpf_object__for_each_map(map, obj) {
7822 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
7827 if (!obj)
7830 if (!obj->loaded) {
7835 bpf_object__for_each_program(prog, obj) {
7857 while ((prog = bpf_program__prev(prog, obj))) {
7874 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
7879 if (!obj)
7882 bpf_object__for_each_program(prog, obj) {
7901 int bpf_object__pin(struct bpf_object *obj, const char *path)
7905 err = bpf_object__pin_maps(obj, path);
7909 err = bpf_object__pin_programs(obj, path);
7911 bpf_object__unpin_maps(obj, path);
7952 void bpf_object__close(struct bpf_object *obj)
7956 if (IS_ERR_OR_NULL(obj))
7959 if (obj->clear_priv)
7960 obj->clear_priv(obj, obj->priv);
7962 bpf_object__elf_finish(obj);
7963 bpf_object__unload(obj);
7964 btf__free(obj->btf);
7965 btf_ext__free(obj->btf_ext);
7967 for (i = 0; i < obj->nr_maps; i++)
7968 bpf_map__destroy(&obj->maps[i]);
7970 zfree(&obj->kconfig);
7971 zfree(&obj->externs);
7972 obj->nr_extern = 0;
7974 zfree(&obj->maps);
7975 obj->nr_maps = 0;
7977 if (obj->programs && obj->nr_programs) {
7978 for (i = 0; i < obj->nr_programs; i++)
7979 bpf_program__exit(&obj->programs[i]);
7981 zfree(&obj->programs);
7983 list_del(&obj->list);
7984 free(obj);
8006 const char *bpf_object__name(const struct bpf_object *obj)
8008 return obj ? obj->name : ERR_PTR(-EINVAL);
8011 unsigned int bpf_object__kversion(const struct bpf_object *obj)
8013 return obj ? obj->kern_version : 0;
8016 struct btf *bpf_object__btf(const struct bpf_object *obj)
8018 return obj ? obj->btf : NULL;
8021 int bpf_object__btf_fd(const struct bpf_object *obj)
8023 return obj->btf ? btf__fd(obj->btf) : -1;
8026 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
8029 if (obj->priv && obj->clear_priv)
8030 obj->clear_priv(obj, obj->priv);
8032 obj->priv = priv;
8033 obj->clear_priv = clear_priv;
8037 void *bpf_object__priv(const struct bpf_object *obj)
8039 return obj ? obj->priv : ERR_PTR(-EINVAL);
8043 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8046 size_t nr_programs = obj->nr_programs;
8054 return forward ? &obj->programs[0] :
8055 &obj->programs[nr_programs - 1];
8057 if (p->obj != obj) {
8062 idx = (p - obj->programs) + (forward ? 1 : -1);
8063 if (idx >= obj->nr_programs || idx < 0)
8065 return &obj->programs[idx];
8069 bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
8074 prog = __bpf_program__iter(prog, obj, true);
8075 } while (prog && prog_is_subprog(obj, prog));
8081 bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
8086 prog = __bpf_program__iter(prog, obj, false);
8087 } while (prog && prog_is_subprog(obj, prog));
8146 if (prog->obj->loaded)
8538 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
8544 for (i = 0; i < obj->nr_maps; i++) {
8545 map = &obj->maps[i];
8557 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
8574 symbols = obj->efile.symbols;
8575 btf = obj->btf;
8589 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
8590 map = find_struct_ops_map_by_offset(obj, rel.r_offset);
8634 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
8786 err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
8982 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
8987 if (!obj || !obj->maps)
8990 s = obj->maps;
8991 e = obj->maps + obj->nr_maps;
8999 idx = (m - obj->maps) + i;
9000 if (idx >= obj->nr_maps || idx < 0)
9002 return &obj->maps[idx];
9006 bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
9009 return obj->maps;
9011 return __bpf_map__iter(prev, obj, 1);
9015 bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
9018 if (!obj->nr_maps)
9020 return obj->maps + obj->nr_maps - 1;
9023 return __bpf_map__iter(next, obj, -1);
9027 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
9031 bpf_object__for_each_map(pos, obj) {
9039 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
9041 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
9045 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
9073 struct bpf_object *obj;
9085 obj = bpf_object__open_xattr(&open_attr);
9086 if (IS_ERR_OR_NULL(obj))
9089 bpf_object__for_each_program(prog, obj) {
9106 bpf_object__close(obj);
9117 bpf_object__for_each_map(map, obj) {
9124 bpf_object__close(obj);
9128 err = bpf_object__load(obj);
9130 bpf_object__close(obj);
9134 *pobj = obj;
10794 struct bpf_object *obj;
10809 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
10810 if (IS_ERR(obj)) {
10812 s->name, PTR_ERR(obj));
10813 return PTR_ERR(obj);
10816 *s->obj = obj;
10823 *map = bpf_object__find_map_by_name(obj, name);
10838 *prog = bpf_object__find_program_by_name(obj, name);
10852 err = bpf_object__load(*s->obj);
10947 if (s->obj)
10948 bpf_object__close(*s->obj);