Lines Matching refs:trace

2  * builtin-trace.c
4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
52 #include "trace/beauty/beauty.h"
53 #include "trace-event.h"
119 struct trace {
291 * The evsel->priv as used by 'perf trace'
721 #include "trace/beauty/generated/fsconfig_arrays.c"
891 #include "trace/beauty/arch_errno_names.c"
892 #include "trace/beauty/eventfd.c"
893 #include "trace/beauty/futex_op.c"
894 #include "trace/beauty/futex_val3.c"
895 #include "trace/beauty/mmap.c"
896 #include "trace/beauty/mode_t.c"
897 #include "trace/beauty/msg_flags.c"
898 #include "trace/beauty/open_flags.c"
899 #include "trace/beauty/perf_event_open.c"
900 #include "trace/beauty/pid.c"
901 #include "trace/beauty/sched_policy.c"
902 #include "trace/beauty/seccomp.c"
903 #include "trace/beauty/signum.c"
904 #include "trace/beauty/socket_type.c"
905 #include "trace/beauty/waitid_options.c"
1400 struct trace *trace)
1404 if (ttrace == NULL || trace->fd_path_disabled)
1411 if (!trace->live)
1413 ++trace->stats.proc_getname;
1425 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1433 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1436 struct thread *thread = machine__find_thread(trace->host, pid, pid);
1439 const char *path = thread__fd_path(thread, fd, trace);
1496 if (!arg->trace->vfs_getname)
1503 static bool trace__filter_duration(struct trace *trace, double t)
1505 return t < (trace->duration_filter * NSEC_PER_MSEC);
1508 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1510 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1521 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1524 return __trace__fprintf_tstamp(trace, tstamp, fp);
1538 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1542 if (trace->multiple_threads) {
1543 if (trace->show_comm)
1551 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1556 if (trace->show_tstamp)
1557 printed = trace__fprintf_tstamp(trace, tstamp, fp);
1558 if (trace->show_duration)
1560 return printed + trace__fprintf_comm_tid(trace, thread, fp);
1563 static int trace__process_event(struct trace *trace, struct machine *machine,
1570 color_fprintf(trace->output, PERF_COLOR_RED,
1587 struct trace *trace = container_of(tool, struct trace, tool);
1588 return trace__process_event(trace, machine, event, sample);
1609 static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1616 trace->host = machine__new_host();
1617 if (trace->host == NULL)
1620 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1624 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1634 static void trace__symbols__exit(struct trace *trace)
1636 machine__exit(trace->host);
1637 trace->host = NULL;
1747 static int trace__read_syscall_info(struct trace *trace, int id)
1751 const char *name = syscalltbl__name(trace->sctbl, id);
1754 if (trace->syscalls.table == NULL) {
1755 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
1756 if (trace->syscalls.table == NULL)
1760 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
1762 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1768 if (trace->syscalls.table == NULL)
1771 memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
1773 trace->syscalls.table = table;
1774 trace->sctbl->syscalls.max_id = id;
1777 sc = trace->syscalls.table + id;
1798 * Fails to read trace point format via sysfs node, so the trace point
1846 static int trace__validate_ev_qualifier(struct trace *trace)
1851 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
1853 trace->ev_qualifier_ids.entries = malloc(nr_allocated *
1854 sizeof(trace->ev_qualifier_ids.entries[0]));
1856 if (trace->ev_qualifier_ids.entries == NULL) {
1858 trace->output);
1863 strlist__for_each_entry(pos, trace->ev_qualifier) {
1865 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1868 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1883 trace->ev_qualifier_ids.entries[nr_used++] = id;
1888 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1895 entries = realloc(trace->ev_qualifier_ids.entries,
1896 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1899 fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1902 trace->ev_qualifier_ids.entries = entries;
1904 trace->ev_qualifier_ids.entries[nr_used++] = id;
1908 trace->ev_qualifier_ids.nr = nr_used;
1909 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
1915 zfree(&trace->ev_qualifier_ids.entries);
1916 trace->ev_qualifier_ids.nr = 0;
1920 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
1924 if (trace->ev_qualifier_ids.nr == 0)
1927 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
1928 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
1931 return !trace->not_ev_qualifier;
1933 return trace->not_ev_qualifier;
1965 * in tools/perf/trace/beauty/mount_flags.c
1989 struct trace *trace, struct thread *thread)
2002 .trace = trace,
2004 .show_string_prefix = trace->show_string_prefix,
2037 !trace->show_zeros &&
2047 if (trace->show_arg_names)
2076 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2080 static struct syscall *trace__syscall_info(struct trace *trace,
2099 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2108 if (id > trace->sctbl->syscalls.max_id) {
2110 if (id >= trace->sctbl->syscalls.max_id) {
2116 err = trace__read_syscall_info(trace, id);
2122 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2123 (err = trace__read_syscall_info(trace, id)) != 0)
2126 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
2129 return &trace->syscalls.table[id];
2134 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
2135 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2136 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2137 fputs(" information\n", trace->output);
2204 static int trace__printf_interrupted_entry(struct trace *trace)
2210 if (trace->failure_only || trace->current == NULL)
2213 ttrace = thread__priv(trace->current);
2218 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
2219 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2221 if (len < trace->args_alignment - 4)
2222 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2224 printed += fprintf(trace->output, " ...\n");
2227 ++trace->nr_events_printed;
2232 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
2237 if (trace->print_sample) {
2240 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2275 static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
2286 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2292 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2293 ttrace = thread__trace(thread, trace->output);
2297 trace__fprintf_sample(trace, evsel, sample, thread);
2307 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2308 trace__printf_interrupted_entry(trace);
2319 if (evsel != trace->syscalls.events.sys_enter)
2320 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2326 args, augmented_args, augmented_args_size, trace, thread);
2329 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2332 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2333 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2334 if (trace->args_alignment > printed)
2335 alignment = trace->args_alignment - printed;
2336 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2344 if (trace->current != thread) {
2345 thread__put(trace->current);
2346 trace->current = thread__get(thread);
2354 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2360 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2368 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2369 ttrace = thread__trace(thread, trace->output);
2378 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2379 syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2380 fprintf(trace->output, "%s", msg);
2387 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2394 trace->max_stack;
2397 if (machine__resolve(trace->host, &al, sample) < 0)
2405 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2412 return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
2423 static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2432 int alignment = trace->args_alignment;
2433 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2439 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2440 ttrace = thread__trace(thread, trace->output);
2444 trace__fprintf_sample(trace, evsel, sample, thread);
2448 if (trace->summary)
2449 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2451 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2454 ++trace->stats.vfs_getname;
2459 if (trace__filter_duration(trace, duration))
2462 } else if (trace->duration_filter)
2466 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2468 if (callchain_cursor.nr < trace->min_stack)
2474 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2477 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2480 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2482 printed += fprintf(trace->output, " ... [");
2483 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2485 printed += fprintf(trace->output, "]: %s()", sc->name);
2495 fprintf(trace->output, ")%*s= ", alignment, " ");
2501 fprintf(trace->output, "%ld", ret);
2508 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2511 fprintf(trace->output, "0 (Timeout)");
2517 .trace = trace,
2521 fprintf(trace->output, "%s", bf);
2523 fprintf(trace->output, "%#lx", ret);
2525 struct thread *child = machine__find_thread(trace->host, ret, ret);
2528 fprintf(trace->output, "%ld", ret);
2530 fprintf(trace->output, " (%s)", thread__comm_str(child));
2536 fputc('\n', trace->output);
2542 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2546 trace__fprintf_callchain(trace, sample);
2557 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
2561 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2618 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
2624 struct thread *thread = machine__findnew_thread(trace->host,
2627 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2633 trace->runtime_ms += runtime_ms;
2639 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2672 static void bpf_output__fprintf(struct trace *trace,
2676 bpf_output__printer, NULL, trace->output);
2677 ++trace->nr_events_printed;
2680 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
2697 .trace = trace,
2699 .show_string_prefix = trace->show_string_prefix,
2732 !trace->show_zeros &&
2745 if (1 || trace->show_arg_names)
2751 return printed + fprintf(trace->output, "%s", bf);
2754 static int trace__event_handler(struct trace *trace, struct evsel *evsel,
2769 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2772 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2774 if (callchain_cursor.nr < trace->min_stack)
2780 trace__printf_interrupted_entry(trace);
2781 trace__fprintf_tstamp(trace, sample->time, trace->output);
2783 if (trace->trace_syscalls && trace->show_duration)
2784 fprintf(trace->output, "( ): ");
2787 trace__fprintf_comm_tid(trace, thread, trace->output);
2789 if (evsel == trace->syscalls.events.augmented) {
2791 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2794 fprintf(trace->output, "%s(", sc->name);
2795 trace__fprintf_sys_enter(trace, evsel, sample);
2796 fputc(')', trace->output);
2807 fprintf(trace->output, "%s(", evsel->name);
2810 bpf_output__fprintf(trace, sample);
2813 trace__fprintf_sys_enter(trace, evsel, sample)) {
2814 if (trace->libtraceevent_print) {
2817 trace->output);
2819 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
2825 fprintf(trace->output, ")\n");
2828 trace__fprintf_callchain(trace, sample);
2832 ++trace->nr_events_printed;
2860 static int trace__pgfault(struct trace *trace,
2872 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2875 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2877 if (callchain_cursor.nr < trace->min_stack)
2883 ttrace = thread__trace(thread, trace->output);
2892 if (trace->summary_only)
2897 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2899 fprintf(trace->output, "%sfault [",
2903 print_location(trace->output, sample, &al, false, true);
2905 fprintf(trace->output, "] => ");
2918 print_location(trace->output, sample, &al, true, false);
2920 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2923 trace__fprintf_callchain(trace, sample);
2927 ++trace->nr_events_printed;
2935 static void trace__set_base_time(struct trace *trace,
2947 if (trace->base_time == 0 && !trace->full_time &&
2949 trace->base_time = sample->time;
2958 struct trace *trace = container_of(tool, struct trace, tool);
2964 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2968 trace__set_base_time(trace, evsel, sample);
2971 ++trace->nr_events;
2972 handler(trace, evsel, event, sample);
2979 static int trace__record(struct trace *trace, int argc, const char **argv)
3011 if (trace->trace_syscalls) {
3029 if (trace->trace_pgfaults & TRACE_PFMAJ)
3033 if (trace->trace_pgfaults & TRACE_PFMIN)
3047 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3104 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
3110 trace__process_event(trace, trace->host, event, sample);
3114 evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
3116 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3120 if (evswitch__discard(&trace->evswitch, evsel))
3123 trace__set_base_time(trace, evsel, sample);
3127 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3132 handler(trace, evsel, event, sample);
3135 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3139 static int trace__add_syscall_newtp(struct trace *trace)
3142 struct evlist *evlist = trace->evlist;
3159 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3160 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3165 if (callchain_param.enabled && !trace->kernel_syscallchains) {
3174 trace->syscalls.events.sys_enter = sys_enter;
3175 trace->syscalls.events.sys_exit = sys_exit;
3188 static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
3192 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3193 trace->ev_qualifier_ids.nr,
3194 trace->ev_qualifier_ids.entries);
3199 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3200 sys_exit = trace->syscalls.events.sys_exit;
3213 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
3215 if (trace->bpf_obj == NULL)
3218 return bpf_object__find_map_by_name(trace->bpf_obj, name);
3221 static void trace__set_bpf_map_filtered_pids(struct trace *trace)
3223 trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
3226 static void trace__set_bpf_map_syscalls(struct trace *trace)
3228 trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
3229 trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
3230 trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
3233 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
3235 if (trace->bpf_obj == NULL)
3238 return bpf_object__find_program_by_title(trace->bpf_obj, name);
3241 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
3249 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3254 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3261 prog = trace__find_bpf_program_by_title(trace, prog_name);
3271 return trace->syscalls.unaugmented_prog;
3274 static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
3276 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3281 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3282 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
3285 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
3287 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3288 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3291 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
3293 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3294 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3297 static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
3299 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3316 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
3318 int fd = bpf_map__fd(trace->syscalls.map);
3320 .enabled = !trace->not_ev_qualifier,
3325 for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
3326 int key = trace->ev_qualifier_ids.entries[i];
3329 trace__init_bpf_map_syscall_args(trace, key, &value);
3330 trace__init_syscall_bpf_progs(trace, key);
3341 static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
3343 int fd = bpf_map__fd(trace->syscalls.map);
3349 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3351 trace__init_bpf_map_syscall_args(trace, key, &value);
3361 static int trace__init_syscalls_bpf_map(struct trace *trace)
3365 if (trace->ev_qualifier_ids.nr)
3366 enabled = trace->not_ev_qualifier;
3368 return __trace__init_syscalls_bpf_map(trace, enabled);
3371 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
3387 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
3388 struct syscall *pair = trace__syscall_info(trace, NULL, id);
3393 pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
3445 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3446 if (pair_prog == trace->syscalls.unaugmented_prog)
3459 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
3461 int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3462 map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3465 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3468 if (!trace__syscall_enabled(trace, key))
3471 trace__init_syscall_bpf_progs(trace, key);
3474 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3478 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3512 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3513 struct syscall *sc = trace__syscall_info(trace, NULL, key);
3524 if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3531 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3551 static void trace__delete_augmented_syscalls(struct trace *trace)
3555 evlist__remove(trace->evlist, trace->syscalls.events.augmented);
3556 evsel__delete(trace->syscalls.events.augmented);
3557 trace->syscalls.events.augmented = NULL;
3559 evlist__for_each_entry_safe(trace->evlist, tmp, evsel) {
3560 if (evsel->bpf_obj == trace->bpf_obj) {
3561 evlist__remove(trace->evlist, evsel);
3567 bpf_object__close(trace->bpf_obj);
3568 trace->bpf_obj = NULL;
3571 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
3577 static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused)
3581 static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
3585 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
3590 static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
3595 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
3601 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3606 static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused)
3611 static bool trace__only_augmented_syscalls_evsels(struct trace *trace)
3615 evlist__for_each_entry(trace->evlist, evsel) {
3616 if (evsel == trace->syscalls.events.augmented ||
3617 evsel->bpf_obj == trace->bpf_obj)
3626 static int trace__set_ev_qualifier_filter(struct trace *trace)
3628 if (trace->syscalls.map)
3629 return trace__set_ev_qualifier_bpf_filter(trace);
3630 if (trace->syscalls.events.sys_enter)
3631 return trace__set_ev_qualifier_tp_filter(trace);
3653 static int trace__set_filter_loop_pids(struct trace *trace)
3659 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3662 struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
3675 err = perf_evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3676 if (!err && trace->filter_pids.map)
3677 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3682 static int trace__set_filter_pids(struct trace *trace)
3691 if (trace->filter_pids.nr > 0) {
3692 err = perf_evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3693 trace->filter_pids.entries);
3694 if (!err && trace->filter_pids.map) {
3695 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3696 trace->filter_pids.entries);
3698 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3699 err = trace__set_filter_loop_pids(trace);
3705 static int __trace__deliver_event(struct trace *trace, union perf_event *event)
3707 struct evlist *evlist = trace->evlist;
3713 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3715 trace__handle_event(trace, event, &sample);
3720 static int __trace__flush_events(struct trace *trace)
3722 u64 first = ordered_events__first_time(&trace->oe.data);
3723 u64 flush = trace->oe.last - NSEC_PER_SEC;
3727 return ordered_events__flush_time(&trace->oe.data, flush);
3732 static int trace__flush_events(struct trace *trace)
3734 return !trace->sort_events ? 0 : __trace__flush_events(trace);
3737 static int trace__deliver_event(struct trace *trace, union perf_event *event)
3741 if (!trace->sort_events)
3742 return __trace__deliver_event(trace, event);
3744 err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3748 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
3752 return trace__flush_events(trace);
3758 struct trace *trace = container_of(oe, struct trace, oe.data);
3760 return __trace__deliver_event(trace, event->event);
3778 static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
3876 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
3878 struct evlist *evlist = trace->evlist;
3885 if (trace__expand_filter(trace, evsel)) {
3894 static int trace__run(struct trace *trace, int argc, const char **argv)
3896 struct evlist *evlist = trace->evlist;
3903 trace->live = true;
3905 if (!trace->raw_augmented_syscalls) {
3906 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3909 if (trace->trace_syscalls)
3910 trace->vfs_getname = evlist__add_vfs_getname(evlist);
3913 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3917 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3921 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3925 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3929 if (trace->sched &&
3936 * trace -G A -e sched:*switch
3941 * trace -e sched:*switch -G A
3949 * trace -G A -e sched:*switch -G B
3957 if (trace->cgroup)
3958 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3960 err = perf_evlist__create_maps(evlist, &trace->opts.target);
3962 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3966 err = trace__symbols_init(trace, evlist);
3968 fprintf(trace->output, "Problems initializing symbol libraries!\n");
3972 perf_evlist__config(evlist, &trace->opts, &callchain_param);
3978 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
3981 fprintf(trace->output, "Couldn't run the workload!\n");
4000 err = trace__set_filter_pids(trace);
4004 if (trace->syscalls.map)
4005 trace__init_syscalls_bpf_map(trace);
4007 if (trace->syscalls.prog_array.sys_enter)
4008 trace__init_syscalls_bpf_prog_array_maps(trace);
4010 if (trace->ev_qualifier_ids.nr > 0) {
4011 err = trace__set_ev_qualifier_filter(trace);
4015 if (trace->syscalls.events.sys_exit) {
4017 trace->syscalls.events.sys_exit->filter);
4032 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
4034 err = trace__expand_filters(trace, &evsel);
4041 if (trace->dump.map)
4042 bpf_map__fprintf(trace->dump.map, trace->output);
4044 err = evlist__mmap(evlist, trace->opts.mmap_pages);
4048 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
4054 if (trace->opts.initial_delay) {
4055 usleep(trace->opts.initial_delay * 1000);
4059 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
4072 evsel->core.attr.sample_max_stack = trace->max_stack;
4075 before = trace->nr_events;
4086 ++trace->nr_events;
4088 err = trace__deliver_event(trace, event);
4105 if (trace->nr_events == before) {
4114 if (trace__flush_events(trace))
4122 thread__zput(trace->current);
4126 if (trace->sort_events)
4127 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4130 if (trace->summary)
4131 trace__fprintf_thread_summary(trace, trace->output);
4133 if (trace->show_tool_stats) {
4134 fprintf(trace->output, "Stats:\n "
4137 trace->stats.vfs_getname,
4138 trace->stats.proc_getname);
4143 trace__symbols__exit(trace);
4146 cgroup__put(trace->cgroup);
4147 trace->evlist = NULL;
4148 trace->live = false;
4169 fprintf(trace->output, "%s\n", errbuf);
4173 fprintf(trace->output,
4180 fprintf(trace->output, "Not enough memory to run!\n");
4184 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4188 static int trace__replay(struct trace *trace)
4196 .force = trace->force,
4202 trace->tool.sample = trace__process_sample;
4203 trace->tool.mmap = perf_event__process_mmap;
4204 trace->tool.mmap2 = perf_event__process_mmap2;
4205 trace->tool.comm = perf_event__process_comm;
4206 trace->tool.exit = perf_event__process_exit;
4207 trace->tool.fork = perf_event__process_fork;
4208 trace->tool.attr = perf_event__process_attr;
4209 trace->tool.tracing_data = perf_event__process_tracing_data;
4210 trace->tool.build_id = perf_event__process_build_id;
4211 trace->tool.namespaces = perf_event__process_namespaces;
4213 trace->tool.ordered_events = true;
4214 trace->tool.ordering_requires_timestamps = true;
4217 trace->multiple_threads = true;
4219 session = perf_session__new(&data, false, &trace->tool);
4223 if (trace->opts.target.pid)
4224 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4226 if (trace->opts.target.tid)
4227 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4232 trace->host = &session->machines.host;
4278 else if (trace->summary)
4279 trace__fprintf_thread_summary(trace, trace->output);
4311 struct trace *trace, FILE *fp)
4339 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4345 if (trace->errno_summary && stats->nr_failures) {
4346 const char *arch_name = perf_env__arch(trace->host->env);
4363 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
4372 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4381 if (trace->sched)
4386 printed += thread__dump_stats(ttrace, trace, fp);
4403 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
4410 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
4418 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
4428 struct trace *trace = opt->value;
4430 trace->duration_filter = atof(str);
4439 struct trace *trace = opt->value;
4449 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4450 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4452 if (trace->filter_pids.entries == NULL)
4455 trace->filter_pids.entries[0] = getpid();
4457 for (i = 1; i < trace->filter_pids.nr; ++i)
4458 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4466 static int trace__open_output(struct trace *trace, const char *filename)
4478 trace->output = fopen(filename, "w");
4480 return trace->output == NULL ? -errno : 0;
4568 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4576 struct trace *trace = (struct trace *)opt->value;
4589 trace->not_ev_qualifier = true;
4597 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4598 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4634 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4635 if (trace->ev_qualifier == NULL) {
4636 fputs("Not enough memory to parse event qualifier", trace->output);
4640 if (trace__validate_ev_qualifier(trace))
4642 trace->trace_syscalls = true;
4649 .value = &trace->evlist,
4662 struct trace *trace = opt->value;
4664 if (!list_empty(&trace->evlist->core.entries)) {
4666 .value = &trace->evlist,
4670 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4677 struct trace *trace = arg;
4680 if (!strcmp(var, "trace.add_events")) {
4681 trace->perfconfig_events = strdup(value);
4682 if (trace->perfconfig_events == NULL) {
4683 pr_err("Not enough memory for %s\n", "trace.add_events");
4686 } else if (!strcmp(var, "trace.show_timestamp")) {
4687 trace->show_tstamp = perf_config_bool(var, value);
4688 } else if (!strcmp(var, "trace.show_duration")) {
4689 trace->show_duration = perf_config_bool(var, value);
4690 } else if (!strcmp(var, "trace.show_arg_names")) {
4691 trace->show_arg_names = perf_config_bool(var, value);
4692 if (!trace->show_arg_names)
4693 trace->show_zeros = true;
4694 } else if (!strcmp(var, "trace.show_zeros")) {
4696 if (!trace->show_arg_names && !new_show_zeros) {
4697 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4700 trace->show_zeros = new_show_zeros;
4701 } else if (!strcmp(var, "trace.show_prefix")) {
4702 trace->show_string_prefix = perf_config_bool(var, value);
4703 } else if (!strcmp(var, "trace.no_inherit")) {
4704 trace->opts.no_inherit = perf_config_bool(var, value);
4705 } else if (!strcmp(var, "trace.args_alignment")) {
4708 trace->args_alignment = args_alignment;
4709 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
4711 trace->libtraceevent_print = true;
4713 trace->libtraceevent_print = false;
4722 "perf trace [<options>] [<command>]",
4723 "perf trace [<options>] -- <command> [<options>]",
4724 "perf trace record [<options>] [<command>]",
4725 "perf trace record [<options>] -- <command> [<options>]",
4728 struct trace trace = {
4753 OPT_CALLBACK('e', "event", &trace, "event",
4756 OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
4758 OPT_BOOLEAN(0, "comm", &trace.show_comm,
4760 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4761 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4765 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4766 "trace events on existing process id"),
4767 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4768 "trace events on existing thread id"),
4769 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4771 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4773 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4775 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4777 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4780 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4782 OPT_CALLBACK(0, "duration", &trace, "float",
4788 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4790 OPT_BOOLEAN('T', "time", &trace.full_time,
4792 OPT_BOOLEAN(0, "failure", &trace.failure_only,
4794 OPT_BOOLEAN('s', "summary", &trace.summary_only,
4796 OPT_BOOLEAN('S', "with-summary", &trace.summary,
4798 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
4800 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4802 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4803 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4804 OPT_CALLBACK(0, "call-graph", &trace.opts,
4807 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
4809 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4811 OPT_ULONG(0, "max-events", &trace.max_events,
4813 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4816 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4820 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4822 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4826 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4828 OPT_INTEGER('D', "delay", &trace.opts.initial_delay,
4831 OPTS_EVSWITCH(&trace.evswitch),
4844 trace.evlist = evlist__new();
4845 trace.sctbl = syscalltbl__new();
4847 if (trace.evlist == NULL || trace.sctbl == NULL) {
4857 * global setting. If it fails we'll get something in 'perf trace -v'
4862 err = perf_config(trace__config, &trace);
4878 * .perfconfig trace.add_events, and filter those out.
4880 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4881 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4882 trace.trace_syscalls = true;
4890 if (trace.perfconfig_events != NULL) {
4894 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
4896 parse_events_print_error(&parse_err, trace.perfconfig_events);
4901 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4906 evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
4908 bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
4909 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
4914 trace.syscalls.events.augmented = evsel;
4916 evsel = perf_evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
4927 trace.bpf_obj = evsel->bpf_obj;
4934 if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace))
4935 trace.trace_syscalls = true;
4942 * This is more to fix the current .perfconfig trace.add_events
4947 * to trace.add_events in the form of
4948 * trace.bpf_augmented_syscalls, that will be only parsed if we
4951 * .perfconfig trace.add_events is still useful if we want, for
4953 * 'perf trace --config determinism.profile' mode, where for some
4961 if (!trace.trace_syscalls) {
4962 trace__delete_augmented_syscalls(&trace);
4964 trace__set_bpf_map_filtered_pids(&trace);
4965 trace__set_bpf_map_syscalls(&trace);
4966 trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
4970 err = bpf__setup_stdout(trace.evlist);
4972 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
4980 trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
4981 if (trace.dump.map == NULL) {
4987 if (trace.trace_pgfaults) {
4988 trace.opts.sample_address = true;
4989 trace.opts.sample_time = true;
4992 if (trace.opts.mmap_pages == UINT_MAX)
4995 if (trace.max_stack == UINT_MAX) {
4996 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
5001 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
5002 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
5008 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
5013 if (trace.evlist->core.nr_entries > 0) {
5014 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
5015 if (evlist__set_syscall_tp_fields(trace.evlist)) {
5021 if (trace.sort_events) {
5022 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
5023 ordered_events__set_copy_on_queue(&trace.oe.data, true);
5037 if (trace.syscalls.events.augmented) {
5038 evlist__for_each_entry(trace.evlist, evsel) {
5042 trace.raw_augmented_syscalls = true;
5046 if (trace.syscalls.events.augmented->priv == NULL &&
5048 struct evsel *augmented = trace.syscalls.events.augmented;
5095 if (trace.raw_augmented_syscalls)
5096 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5104 return trace__record(&trace, argc-1, &argv[1]);
5107 if (trace.errno_summary && !trace.summary && !trace.summary_only)
5108 trace.summary_only = true;
5111 if (trace.summary_only)
5112 trace.summary = trace.summary_only;
5115 err = trace__open_output(&trace, output_name);
5122 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5126 err = target__validate(&trace.opts.target);
5128 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5129 fprintf(trace.output, "%s", bf);
5133 err = target__parse_uid(&trace.opts.target);
5135 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5136 fprintf(trace.output, "%s", bf);
5140 if (!argc && target__none(&trace.opts.target))
5141 trace.opts.target.system_wide = true;
5144 err = trace__replay(&trace);
5146 err = trace__run(&trace, argc, argv);
5150 fclose(trace.output);
5152 zfree(&trace.perfconfig_events);