1// SPDX-License-Identifier: GPL-2.0 2#include <errno.h> 3#include <stdlib.h> 4#include <bpf/bpf.h> 5#include <bpf/btf.h> 6#include <bpf/libbpf.h> 7#include <linux/btf.h> 8#include <linux/err.h> 9#include <linux/string.h> 10#include <internal/lib.h> 11#include <symbol/kallsyms.h> 12#include "bpf-event.h" 13#include "debug.h" 14#include "dso.h" 15#include "symbol.h" 16#include "machine.h" 17#include "env.h" 18#include "session.h" 19#include "map.h" 20#include "evlist.h" 21#include "record.h" 22#include "util/synthetic-events.h" 23 24#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr)) 25 26static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len) 27{ 28 int ret = 0; 29 size_t i; 30 31 for (i = 0; i < len; i++) 32 ret += snprintf(buf + ret, size - ret, "%02x", data[i]); 33 return ret; 34} 35 36static int machine__process_bpf_event_load(struct machine *machine, 37 union perf_event *event, 38 struct perf_sample *sample __maybe_unused) 39{ 40 struct bpf_prog_info_linear *info_linear; 41 struct bpf_prog_info_node *info_node; 42 struct perf_env *env = machine->env; 43 int id = event->bpf.id; 44 unsigned int i; 45 46 /* perf-record, no need to handle bpf-event */ 47 if (env == NULL) 48 return 0; 49 50 info_node = perf_env__find_bpf_prog_info(env, id); 51 if (!info_node) 52 return 0; 53 info_linear = info_node->info_linear; 54 55 for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) { 56 u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms); 57 u64 addr = addrs[i]; 58 struct map *map = maps__find(&machine->kmaps, addr); 59 60 if (map) { 61 map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO; 62 map->dso->bpf_prog.id = id; 63 map->dso->bpf_prog.sub_id = i; 64 map->dso->bpf_prog.env = env; 65 } 66 } 67 return 0; 68} 69 70int machine__process_bpf(struct machine *machine, union perf_event *event, 71 struct perf_sample *sample) 72{ 73 if (dump_trace) 74 perf_event__fprintf_bpf(event, stdout); 75 76 switch (event->bpf.type) { 77 case PERF_BPF_EVENT_PROG_LOAD: 78 return machine__process_bpf_event_load(machine, event, sample); 79 80 case PERF_BPF_EVENT_PROG_UNLOAD: 81 /* 82 * Do not free bpf_prog_info and btf of the program here, 83 * as annotation still need them. They will be freed at 84 * the end of the session. 85 */ 86 break; 87 default: 88 pr_debug("unexpected bpf event type of %d\n", event->bpf.type); 89 break; 90 } 91 return 0; 92} 93 94static int perf_env__fetch_btf(struct perf_env *env, 95 u32 btf_id, 96 struct btf *btf) 97{ 98 struct btf_node *node; 99 u32 data_size; 100 const void *data; 101 102 data = btf__get_raw_data(btf, &data_size); 103 104 node = malloc(data_size + sizeof(struct btf_node)); 105 if (!node) 106 return -1; 107 108 node->id = btf_id; 109 node->data_size = data_size; 110 memcpy(node->data, data, data_size); 111 112 if (!perf_env__insert_btf(env, node)) { 113 /* Insertion failed because of a duplicate. */ 114 free(node); 115 return -1; 116 } 117 return 0; 118} 119 120static int synthesize_bpf_prog_name(char *buf, int size, 121 struct bpf_prog_info *info, 122 struct btf *btf, 123 u32 sub_id) 124{ 125 u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags); 126 void *func_infos = (void *)(uintptr_t)(info->func_info); 127 u32 sub_prog_cnt = info->nr_jited_ksyms; 128 const struct bpf_func_info *finfo; 129 const char *short_name = NULL; 130 const struct btf_type *t; 131 int name_len; 132 133 name_len = snprintf(buf, size, "bpf_prog_"); 134 name_len += snprintf_hex(buf + name_len, size - name_len, 135 prog_tags[sub_id], BPF_TAG_SIZE); 136 if (btf) { 137 finfo = func_infos + sub_id * info->func_info_rec_size; 138 t = btf__type_by_id(btf, finfo->type_id); 139 short_name = btf__name_by_offset(btf, t->name_off); 140 } else if (sub_id == 0 && sub_prog_cnt == 1) { 141 /* no subprog */ 142 if (info->name[0]) 143 short_name = info->name; 144 } else 145 short_name = "F"; 146 if (short_name) 147 name_len += snprintf(buf + name_len, size - name_len, 148 "_%s", short_name); 149 return name_len; 150} 151 152/* 153 * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf 154 * program. One PERF_RECORD_BPF_EVENT is generated for the program. And 155 * one PERF_RECORD_KSYMBOL is generated for each sub program. 156 * 157 * Returns: 158 * 0 for success; 159 * -1 for failures; 160 * -2 for lack of kernel support. 161 */ 162static int perf_event__synthesize_one_bpf_prog(struct perf_session *session, 163 perf_event__handler_t process, 164 struct machine *machine, 165 int fd, 166 union perf_event *event, 167 struct record_opts *opts) 168{ 169 struct perf_record_ksymbol *ksymbol_event = &event->ksymbol; 170 struct perf_record_bpf_event *bpf_event = &event->bpf; 171 struct bpf_prog_info_linear *info_linear; 172 struct perf_tool *tool = session->tool; 173 struct bpf_prog_info_node *info_node; 174 struct bpf_prog_info *info; 175 struct btf *btf = NULL; 176 struct perf_env *env; 177 u32 sub_prog_cnt, i; 178 int err = 0; 179 u64 arrays; 180 181 /* 182 * for perf-record and perf-report use header.env; 183 * otherwise, use global perf_env. 184 */ 185 env = session->data ? &session->header.env : &perf_env; 186 187 arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS; 188 arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS; 189 arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO; 190 arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS; 191 arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS; 192 arrays |= 1UL << BPF_PROG_INFO_LINE_INFO; 193 arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO; 194 195 info_linear = bpf_program__get_prog_info_linear(fd, arrays); 196 if (IS_ERR_OR_NULL(info_linear)) { 197 info_linear = NULL; 198 pr_debug("%s: failed to get BPF program info. aborting\n", __func__); 199 return -1; 200 } 201 202 if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) { 203 pr_debug("%s: the kernel is too old, aborting\n", __func__); 204 return -2; 205 } 206 207 info = &info_linear->info; 208 209 /* number of ksyms, func_lengths, and tags should match */ 210 sub_prog_cnt = info->nr_jited_ksyms; 211 if (sub_prog_cnt != info->nr_prog_tags || 212 sub_prog_cnt != info->nr_jited_func_lens) 213 return -1; 214 215 /* check BTF func info support */ 216 if (info->btf_id && info->nr_func_info && info->func_info_rec_size) { 217 /* btf func info number should be same as sub_prog_cnt */ 218 if (sub_prog_cnt != info->nr_func_info) { 219 pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__); 220 err = -1; 221 goto out; 222 } 223 if (btf__get_from_id(info->btf_id, &btf)) { 224 pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id); 225 err = -1; 226 btf = NULL; 227 goto out; 228 } 229 perf_env__fetch_btf(env, info->btf_id, btf); 230 } 231 232 /* Synthesize PERF_RECORD_KSYMBOL */ 233 for (i = 0; i < sub_prog_cnt; i++) { 234 __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens); 235 __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms); 236 int name_len; 237 238 *ksymbol_event = (struct perf_record_ksymbol) { 239 .header = { 240 .type = PERF_RECORD_KSYMBOL, 241 .size = offsetof(struct perf_record_ksymbol, name), 242 }, 243 .addr = prog_addrs[i], 244 .len = prog_lens[i], 245 .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF, 246 .flags = 0, 247 }; 248 249 name_len = synthesize_bpf_prog_name(ksymbol_event->name, 250 KSYM_NAME_LEN, info, btf, i); 251 ksymbol_event->header.size += PERF_ALIGN(name_len + 1, 252 sizeof(u64)); 253 254 memset((void *)event + event->header.size, 0, machine->id_hdr_size); 255 event->header.size += machine->id_hdr_size; 256 err = perf_tool__process_synth_event(tool, event, 257 machine, process); 258 } 259 260 if (!opts->no_bpf_event) { 261 /* Synthesize PERF_RECORD_BPF_EVENT */ 262 *bpf_event = (struct perf_record_bpf_event) { 263 .header = { 264 .type = PERF_RECORD_BPF_EVENT, 265 .size = sizeof(struct perf_record_bpf_event), 266 }, 267 .type = PERF_BPF_EVENT_PROG_LOAD, 268 .flags = 0, 269 .id = info->id, 270 }; 271 memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE); 272 memset((void *)event + event->header.size, 0, machine->id_hdr_size); 273 event->header.size += machine->id_hdr_size; 274 275 /* save bpf_prog_info to env */ 276 info_node = malloc(sizeof(struct bpf_prog_info_node)); 277 if (!info_node) { 278 err = -1; 279 goto out; 280 } 281 282 info_node->info_linear = info_linear; 283 perf_env__insert_bpf_prog_info(env, info_node); 284 info_linear = NULL; 285 286 /* 287 * process after saving bpf_prog_info to env, so that 288 * required information is ready for look up 289 */ 290 err = perf_tool__process_synth_event(tool, event, 291 machine, process); 292 } 293 294out: 295 free(info_linear); 296 free(btf); 297 return err ? -1 : 0; 298} 299 300struct kallsyms_parse { 301 union perf_event *event; 302 perf_event__handler_t process; 303 struct machine *machine; 304 struct perf_tool *tool; 305}; 306 307static int 308process_bpf_image(char *name, u64 addr, struct kallsyms_parse *data) 309{ 310 struct machine *machine = data->machine; 311 union perf_event *event = data->event; 312 struct perf_record_ksymbol *ksymbol; 313 int len; 314 315 ksymbol = &event->ksymbol; 316 317 *ksymbol = (struct perf_record_ksymbol) { 318 .header = { 319 .type = PERF_RECORD_KSYMBOL, 320 .size = offsetof(struct perf_record_ksymbol, name), 321 }, 322 .addr = addr, 323 .len = page_size, 324 .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF, 325 .flags = 0, 326 }; 327 328 len = scnprintf(ksymbol->name, KSYM_NAME_LEN, "%s", name); 329 ksymbol->header.size += PERF_ALIGN(len + 1, sizeof(u64)); 330 memset((void *) event + event->header.size, 0, machine->id_hdr_size); 331 event->header.size += machine->id_hdr_size; 332 333 return perf_tool__process_synth_event(data->tool, event, machine, 334 data->process); 335} 336 337static int 338kallsyms_process_symbol(void *data, const char *_name, 339 char type __maybe_unused, u64 start) 340{ 341 char disp[KSYM_NAME_LEN]; 342 char *module, *name; 343 unsigned long id; 344 int err = 0; 345 346 module = strchr(_name, '\t'); 347 if (!module) 348 return 0; 349 350 /* We are going after [bpf] module ... */ 351 if (strcmp(module + 1, "[bpf]")) 352 return 0; 353 354 name = memdup(_name, (module - _name) + 1); 355 if (!name) 356 return -ENOMEM; 357 358 name[module - _name] = 0; 359 360 /* .. and only for trampolines and dispatchers */ 361 if ((sscanf(name, "bpf_trampoline_%lu", &id) == 1) || 362 (sscanf(name, "bpf_dispatcher_%s", disp) == 1)) 363 err = process_bpf_image(name, start, data); 364 365 free(name); 366 return err; 367} 368 369int perf_event__synthesize_bpf_events(struct perf_session *session, 370 perf_event__handler_t process, 371 struct machine *machine, 372 struct record_opts *opts) 373{ 374 const char *kallsyms_filename = "/proc/kallsyms"; 375 struct kallsyms_parse arg; 376 union perf_event *event; 377 __u32 id = 0; 378 int err; 379 int fd; 380 381 event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size); 382 if (!event) 383 return -1; 384 385 /* Synthesize all the bpf programs in system. */ 386 while (true) { 387 err = bpf_prog_get_next_id(id, &id); 388 if (err) { 389 if (errno == ENOENT) { 390 err = 0; 391 break; 392 } 393 pr_debug("%s: can't get next program: %s%s\n", 394 __func__, strerror(errno), 395 errno == EINVAL ? " -- kernel too old?" : ""); 396 /* don't report error on old kernel or EPERM */ 397 err = (errno == EINVAL || errno == EPERM) ? 0 : -1; 398 break; 399 } 400 fd = bpf_prog_get_fd_by_id(id); 401 if (fd < 0) { 402 pr_debug("%s: failed to get fd for prog_id %u\n", 403 __func__, id); 404 continue; 405 } 406 407 err = perf_event__synthesize_one_bpf_prog(session, process, 408 machine, fd, 409 event, opts); 410 close(fd); 411 if (err) { 412 /* do not return error for old kernel */ 413 if (err == -2) 414 err = 0; 415 break; 416 } 417 } 418 419 /* Synthesize all the bpf images - trampolines/dispatchers. */ 420 if (symbol_conf.kallsyms_name != NULL) 421 kallsyms_filename = symbol_conf.kallsyms_name; 422 423 arg = (struct kallsyms_parse) { 424 .event = event, 425 .process = process, 426 .machine = machine, 427 .tool = session->tool, 428 }; 429 430 if (kallsyms__parse(kallsyms_filename, &arg, kallsyms_process_symbol)) { 431 pr_err("%s: failed to synthesize bpf images: %s\n", 432 __func__, strerror(errno)); 433 } 434 435 free(event); 436 return err; 437} 438 439static void perf_env__add_bpf_info(struct perf_env *env, u32 id) 440{ 441 struct bpf_prog_info_linear *info_linear; 442 struct bpf_prog_info_node *info_node; 443 struct btf *btf = NULL; 444 u64 arrays; 445 u32 btf_id; 446 int fd; 447 448 fd = bpf_prog_get_fd_by_id(id); 449 if (fd < 0) 450 return; 451 452 arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS; 453 arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS; 454 arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO; 455 arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS; 456 arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS; 457 arrays |= 1UL << BPF_PROG_INFO_LINE_INFO; 458 arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO; 459 460 info_linear = bpf_program__get_prog_info_linear(fd, arrays); 461 if (IS_ERR_OR_NULL(info_linear)) { 462 pr_debug("%s: failed to get BPF program info. aborting\n", __func__); 463 goto out; 464 } 465 466 btf_id = info_linear->info.btf_id; 467 468 info_node = malloc(sizeof(struct bpf_prog_info_node)); 469 if (info_node) { 470 info_node->info_linear = info_linear; 471 perf_env__insert_bpf_prog_info(env, info_node); 472 } else 473 free(info_linear); 474 475 if (btf_id == 0) 476 goto out; 477 478 if (btf__get_from_id(btf_id, &btf)) { 479 pr_debug("%s: failed to get BTF of id %u, aborting\n", 480 __func__, btf_id); 481 goto out; 482 } 483 perf_env__fetch_btf(env, btf_id, btf); 484 485out: 486 free(btf); 487 close(fd); 488} 489 490static int bpf_event__sb_cb(union perf_event *event, void *data) 491{ 492 struct perf_env *env = data; 493 494 if (event->header.type != PERF_RECORD_BPF_EVENT) 495 return -1; 496 497 switch (event->bpf.type) { 498 case PERF_BPF_EVENT_PROG_LOAD: 499 perf_env__add_bpf_info(env, event->bpf.id); 500 501 case PERF_BPF_EVENT_PROG_UNLOAD: 502 /* 503 * Do not free bpf_prog_info and btf of the program here, 504 * as annotation still need them. They will be freed at 505 * the end of the session. 506 */ 507 break; 508 default: 509 pr_debug("unexpected bpf event type of %d\n", event->bpf.type); 510 break; 511 } 512 513 return 0; 514} 515 516int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env) 517{ 518 struct perf_event_attr attr = { 519 .type = PERF_TYPE_SOFTWARE, 520 .config = PERF_COUNT_SW_DUMMY, 521 .sample_id_all = 1, 522 .watermark = 1, 523 .bpf_event = 1, 524 .size = sizeof(attr), /* to capture ABI version */ 525 }; 526 527 /* 528 * Older gcc versions don't support designated initializers, like above, 529 * for unnamed union members, such as the following: 530 */ 531 attr.wakeup_watermark = 1; 532 533 return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env); 534} 535 536void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, 537 struct perf_env *env, 538 FILE *fp) 539{ 540 __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens); 541 __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms); 542 char name[KSYM_NAME_LEN]; 543 struct btf *btf = NULL; 544 u32 sub_prog_cnt, i; 545 546 sub_prog_cnt = info->nr_jited_ksyms; 547 if (sub_prog_cnt != info->nr_prog_tags || 548 sub_prog_cnt != info->nr_jited_func_lens) 549 return; 550 551 if (info->btf_id) { 552 struct btf_node *node; 553 554 node = __perf_env__find_btf(env, info->btf_id); 555 if (node) 556 btf = btf__new((__u8 *)(node->data), 557 node->data_size); 558 } 559 560 if (sub_prog_cnt == 1) { 561 synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0); 562 fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n", 563 info->id, name, prog_addrs[0], prog_lens[0]); 564 goto out; 565 } 566 567 fprintf(fp, "# bpf_prog_info %u:\n", info->id); 568 for (i = 0; i < sub_prog_cnt; i++) { 569 synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i); 570 571 fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n", 572 i, name, prog_addrs[i], prog_lens[i]); 573 } 574out: 575 btf__free(btf); 576} 577