Lines Matching refs:trace
2 * builtin-trace.c
4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
57 #include "trace/beauty/beauty.h"
58 #include "trace-event.h"
128 struct trace {
293 * The evsel->priv as used by 'perf trace'
729 #include "trace/beauty/generated/fsconfig_arrays.c"
899 #include "trace/beauty/arch_errno_names.c"
900 #include "trace/beauty/eventfd.c"
901 #include "trace/beauty/futex_op.c"
902 #include "trace/beauty/futex_val3.c"
903 #include "trace/beauty/mmap.c"
904 #include "trace/beauty/mode_t.c"
905 #include "trace/beauty/msg_flags.c"
906 #include "trace/beauty/open_flags.c"
907 #include "trace/beauty/perf_event_open.c"
908 #include "trace/beauty/pid.c"
909 #include "trace/beauty/sched_policy.c"
910 #include "trace/beauty/seccomp.c"
911 #include "trace/beauty/signum.c"
912 #include "trace/beauty/socket_type.c"
913 #include "trace/beauty/waitid_options.c"
1436 struct trace *trace)
1440 if (ttrace == NULL || trace->fd_path_disabled)
1447 if (!trace->live)
1449 ++trace->stats.proc_getname;
1461 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1469 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1472 struct thread *thread = machine__find_thread(trace->host, pid, pid);
1475 const char *path = thread__fd_path(thread, fd, trace);
1532 if (!arg->trace->vfs_getname)
1539 static bool trace__filter_duration(struct trace *trace, double t)
1541 return t < (trace->duration_filter * NSEC_PER_MSEC);
1544 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1546 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1557 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1560 return __trace__fprintf_tstamp(trace, tstamp, fp);
1581 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1585 if (trace->multiple_threads) {
1586 if (trace->show_comm)
1594 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1599 if (trace->show_tstamp)
1600 printed = trace__fprintf_tstamp(trace, tstamp, fp);
1601 if (trace->show_duration)
1603 return printed + trace__fprintf_comm_tid(trace, thread, fp);
1606 static int trace__process_event(struct trace *trace, struct machine *machine,
1613 color_fprintf(trace->output, PERF_COLOR_RED,
1630 struct trace *trace = container_of(tool, struct trace, tool);
1631 return trace__process_event(trace, machine, event, sample);
1652 static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1659 trace->host = machine__new_host();
1660 if (trace->host == NULL)
1665 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1669 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1679 static void trace__symbols__exit(struct trace *trace)
1681 machine__exit(trace->host);
1682 trace->host = NULL;
1794 static int trace__read_syscall_info(struct trace *trace, int id)
1798 const char *name = syscalltbl__name(trace->sctbl, id);
1801 if (trace->syscalls.table == NULL) {
1802 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
1803 if (trace->syscalls.table == NULL)
1807 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
1809 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1815 if (trace->syscalls.table == NULL)
1818 memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
1820 trace->syscalls.table = table;
1821 trace->sctbl->syscalls.max_id = id;
1824 sc = trace->syscalls.table + id;
1845 * Fails to read trace point format via sysfs node, so the trace point
1893 static int trace__validate_ev_qualifier(struct trace *trace)
1898 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
1900 trace->ev_qualifier_ids.entries = malloc(nr_allocated *
1901 sizeof(trace->ev_qualifier_ids.entries[0]));
1903 if (trace->ev_qualifier_ids.entries == NULL) {
1905 trace->output);
1910 strlist__for_each_entry(pos, trace->ev_qualifier) {
1912 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1915 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1930 trace->ev_qualifier_ids.entries[nr_used++] = id;
1935 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1942 entries = realloc(trace->ev_qualifier_ids.entries,
1943 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1946 fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1949 trace->ev_qualifier_ids.entries = entries;
1951 trace->ev_qualifier_ids.entries[nr_used++] = id;
1955 trace->ev_qualifier_ids.nr = nr_used;
1956 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
1962 zfree(&trace->ev_qualifier_ids.entries);
1963 trace->ev_qualifier_ids.nr = 0;
1967 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
1971 if (trace->ev_qualifier_ids.nr == 0)
1974 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
1975 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
1978 return !trace->not_ev_qualifier;
1980 return trace->not_ev_qualifier;
2012 * in tools/perf/trace/beauty/mount_flags.c
2036 struct trace *trace, struct thread *thread)
2049 .trace = trace,
2051 .show_string_prefix = trace->show_string_prefix,
2084 !trace->show_zeros &&
2094 if (trace->show_arg_names)
2123 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2127 static struct syscall *trace__syscall_info(struct trace *trace,
2146 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2155 if (id > trace->sctbl->syscalls.max_id) {
2157 if (id >= trace->sctbl->syscalls.max_id) {
2163 err = trace__read_syscall_info(trace, id);
2169 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2170 (err = trace__read_syscall_info(trace, id)) != 0)
2173 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
2176 return &trace->syscalls.table[id];
2181 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
2182 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2183 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2184 fputs(" information\n", trace->output);
2249 static int trace__printf_interrupted_entry(struct trace *trace)
2255 if (trace->failure_only || trace->current == NULL)
2258 ttrace = thread__priv(trace->current);
2263 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
2264 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2266 if (len < trace->args_alignment - 4)
2267 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2269 printed += fprintf(trace->output, " ...\n");
2272 ++trace->nr_events_printed;
2277 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
2282 if (trace->print_sample) {
2285 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2328 static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
2339 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2345 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2346 ttrace = thread__trace(thread, trace->output);
2350 trace__fprintf_sample(trace, evsel, sample, thread);
2360 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2361 trace__printf_interrupted_entry(trace);
2372 if (evsel != trace->syscalls.events.sys_enter)
2373 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2379 args, augmented_args, augmented_args_size, trace, thread);
2382 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2385 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2386 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2387 if (trace->args_alignment > printed)
2388 alignment = trace->args_alignment - printed;
2389 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2397 if (trace->current != thread) {
2398 thread__put(trace->current);
2399 trace->current = thread__get(thread);
2407 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2413 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2421 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2422 ttrace = thread__trace(thread, trace->output);
2431 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2432 syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2433 fprintf(trace->output, "%s", msg);
2440 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2447 trace->max_stack;
2451 if (machine__resolve(trace->host, &al, sample) < 0)
2460 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2467 return sample__fprintf_callchain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output);
2478 static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2487 int alignment = trace->args_alignment;
2488 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2494 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2495 ttrace = thread__trace(thread, trace->output);
2499 trace__fprintf_sample(trace, evsel, sample, thread);
2503 if (trace->summary)
2504 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2506 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2509 ++trace->stats.vfs_getname;
2514 if (trace__filter_duration(trace, duration))
2517 } else if (trace->duration_filter)
2523 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
2525 if (cursor->nr < trace->min_stack)
2531 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2534 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2537 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2539 printed += fprintf(trace->output, " ... [");
2540 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2542 printed += fprintf(trace->output, "]: %s()", sc->name);
2552 fprintf(trace->output, ")%*s= ", alignment, " ");
2558 fprintf(trace->output, "%ld", ret);
2565 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2568 fprintf(trace->output, "0 (Timeout)");
2574 .trace = trace,
2578 fprintf(trace->output, "%s", bf);
2580 fprintf(trace->output, "%#lx", ret);
2582 struct thread *child = machine__find_thread(trace->host, ret, ret);
2585 fprintf(trace->output, "%ld", ret);
2587 fprintf(trace->output, " (%s)", thread__comm_str(child));
2593 fputc('\n', trace->output);
2599 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2603 trace__fprintf_callchain(trace, sample);
2614 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
2618 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2675 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
2681 struct thread *thread = machine__findnew_thread(trace->host,
2684 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2690 trace->runtime_ms += runtime_ms;
2696 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2729 static void bpf_output__fprintf(struct trace *trace,
2733 bpf_output__printer, NULL, trace->output);
2734 ++trace->nr_events_printed;
2737 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
2754 .trace = trace,
2756 .show_string_prefix = trace->show_string_prefix,
2791 !trace->show_zeros &&
2800 if (trace->show_arg_names)
2806 return printed + fprintf(trace->output, "%s", bf);
2809 static int trace__event_handler(struct trace *trace, struct evsel *evsel,
2824 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2829 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
2831 if (cursor->nr < trace->min_stack)
2837 trace__printf_interrupted_entry(trace);
2838 trace__fprintf_tstamp(trace, sample->time, trace->output);
2840 if (trace->trace_syscalls && trace->show_duration)
2841 fprintf(trace->output, "( ): ");
2844 trace__fprintf_comm_tid(trace, thread, trace->output);
2846 if (evsel == trace->syscalls.events.bpf_output) {
2848 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2851 fprintf(trace->output, "%s(", sc->name);
2852 trace__fprintf_sys_enter(trace, evsel, sample);
2853 fputc(')', trace->output);
2864 fprintf(trace->output, "%s(", evsel->name);
2867 bpf_output__fprintf(trace, sample);
2870 trace__fprintf_sys_enter(trace, evsel, sample)) {
2871 if (trace->libtraceevent_print) {
2874 trace->output);
2876 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
2882 fprintf(trace->output, ")\n");
2885 trace__fprintf_callchain(trace, sample);
2889 ++trace->nr_events_printed;
2917 static int trace__pgfault(struct trace *trace,
2930 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2935 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
2937 if (cursor->nr < trace->min_stack)
2943 ttrace = thread__trace(thread, trace->output);
2952 if (trace->summary_only)
2957 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2959 fprintf(trace->output, "%sfault [",
2963 print_location(trace->output, sample, &al, false, true);
2965 fprintf(trace->output, "] => ");
2978 print_location(trace->output, sample, &al, true, false);
2980 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2983 trace__fprintf_callchain(trace, sample);
2987 ++trace->nr_events_printed;
2996 static void trace__set_base_time(struct trace *trace,
3008 if (trace->base_time == 0 && !trace->full_time &&
3010 trace->base_time = sample->time;
3019 struct trace *trace = container_of(tool, struct trace, tool);
3025 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3029 trace__set_base_time(trace, evsel, sample);
3032 ++trace->nr_events;
3033 handler(trace, evsel, event, sample);
3040 static int trace__record(struct trace *trace, int argc, const char **argv)
3072 if (trace->trace_syscalls) {
3090 if (trace->trace_pgfaults & TRACE_PFMAJ)
3094 if (trace->trace_pgfaults & TRACE_PFMIN)
3108 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3171 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
3177 trace__process_event(trace, trace->host, event, sample);
3181 evsel = evlist__id2evsel(trace->evlist, sample->id);
3183 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3187 if (evswitch__discard(&trace->evswitch, evsel))
3190 trace__set_base_time(trace, evsel, sample);
3194 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3199 handler(trace, evsel, event, sample);
3202 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3206 static int trace__add_syscall_newtp(struct trace *trace)
3209 struct evlist *evlist = trace->evlist;
3226 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3227 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3232 if (callchain_param.enabled && !trace->kernel_syscallchains) {
3241 trace->syscalls.events.sys_enter = sys_enter;
3242 trace->syscalls.events.sys_exit = sys_exit;
3255 static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
3259 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3260 trace->ev_qualifier_ids.nr,
3261 trace->ev_qualifier_ids.entries);
3266 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3267 sys_exit = trace->syscalls.events.sys_exit;
3280 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
3285 if (trace->skel->obj == NULL)
3288 bpf_object__for_each_program(pos, trace->skel->obj) {
3299 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
3307 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3312 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3319 prog = trace__find_bpf_program_by_title(trace, prog_name);
3329 return trace->skel->progs.syscall_unaugmented;
3332 static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
3334 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3339 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3340 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
3343 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
3345 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3346 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
3349 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
3351 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3352 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
3355 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
3371 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
3372 struct syscall *pair = trace__syscall_info(trace, NULL, id);
3377 pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented)
3442 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3443 if (pair_prog == trace->skel->progs.syscall_unaugmented)
3456 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
3458 int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
3459 int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
3462 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3465 if (!trace__syscall_enabled(trace, key))
3468 trace__init_syscall_bpf_progs(trace, key);
3471 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3475 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3509 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3510 struct syscall *sc = trace__syscall_info(trace, NULL, key);
3521 if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented)
3528 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3548 static int trace__set_ev_qualifier_filter(struct trace *trace)
3550 if (trace->syscalls.events.sys_enter)
3551 return trace__set_ev_qualifier_tp_filter(trace);
3573 static int trace__set_filter_loop_pids(struct trace *trace)
3579 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3582 struct thread *parent = machine__find_thread(trace->host,
3597 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3598 if (!err && trace->filter_pids.map)
3599 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3604 static int trace__set_filter_pids(struct trace *trace)
3613 if (trace->filter_pids.nr > 0) {
3614 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3615 trace->filter_pids.entries);
3616 if (!err && trace->filter_pids.map) {
3617 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3618 trace->filter_pids.entries);
3620 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3621 err = trace__set_filter_loop_pids(trace);
3627 static int __trace__deliver_event(struct trace *trace, union perf_event *event)
3629 struct evlist *evlist = trace->evlist;
3634 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3636 trace__handle_event(trace, event, &sample);
3641 static int __trace__flush_events(struct trace *trace)
3643 u64 first = ordered_events__first_time(&trace->oe.data);
3644 u64 flush = trace->oe.last - NSEC_PER_SEC;
3648 return ordered_events__flush_time(&trace->oe.data, flush);
3653 static int trace__flush_events(struct trace *trace)
3655 return !trace->sort_events ? 0 : __trace__flush_events(trace);
3658 static int trace__deliver_event(struct trace *trace, union perf_event *event)
3662 if (!trace->sort_events)
3663 return __trace__deliver_event(trace, event);
3665 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3669 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL);
3673 return trace__flush_events(trace);
3679 struct trace *trace = container_of(oe, struct trace, oe.data);
3681 return __trace__deliver_event(trace, event->event);
3699 static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
3797 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
3799 struct evlist *evlist = trace->evlist;
3806 if (trace__expand_filter(trace, evsel)) {
3815 static int trace__run(struct trace *trace, int argc, const char **argv)
3817 struct evlist *evlist = trace->evlist;
3824 trace->live = true;
3826 if (!trace->raw_augmented_syscalls) {
3827 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3830 if (trace->trace_syscalls)
3831 trace->vfs_getname = evlist__add_vfs_getname(evlist);
3834 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3838 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3842 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3846 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3851 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid;
3853 if (trace->sched &&
3860 * trace -G A -e sched:*switch
3865 * trace -e sched:*switch -G A
3873 * trace -G A -e sched:*switch -G B
3881 if (trace->cgroup)
3882 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3884 err = evlist__create_maps(evlist, &trace->opts.target);
3886 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3890 err = trace__symbols_init(trace, evlist);
3892 fprintf(trace->output, "Problems initializing symbol libraries!\n");
3896 evlist__config(evlist, &trace->opts, &callchain_param);
3899 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
3901 fprintf(trace->output, "Couldn't run the workload!\n");
3911 if (trace->syscalls.events.bpf_output) {
3918 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
3919 bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__,
3921 xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
3927 err = trace__set_filter_pids(trace);
3932 if (trace->skel && trace->skel->progs.sys_enter)
3933 trace__init_syscalls_bpf_prog_array_maps(trace);
3936 if (trace->ev_qualifier_ids.nr > 0) {
3937 err = trace__set_ev_qualifier_filter(trace);
3941 if (trace->syscalls.events.sys_exit) {
3943 trace->syscalls.events.sys_exit->filter);
3958 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
3960 err = trace__expand_filters(trace, &evsel);
3967 err = evlist__mmap(evlist, trace->opts.mmap_pages);
3971 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay)
3977 if (trace->opts.target.initial_delay) {
3978 usleep(trace->opts.target.initial_delay * 1000);
3982 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
3995 evsel->core.attr.sample_max_stack = trace->max_stack;
3998 before = trace->nr_events;
4009 ++trace->nr_events;
4011 err = trace__deliver_event(trace, event);
4028 if (trace->nr_events == before) {
4037 if (trace__flush_events(trace))
4045 thread__zput(trace->current);
4049 if (trace->sort_events)
4050 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4053 if (trace->summary)
4054 trace__fprintf_thread_summary(trace, trace->output);
4056 if (trace->show_tool_stats) {
4057 fprintf(trace->output, "Stats:\n "
4060 trace->stats.vfs_getname,
4061 trace->stats.proc_getname);
4066 trace__symbols__exit(trace);
4069 cgroup__put(trace->cgroup);
4070 trace->evlist = NULL;
4071 trace->live = false;
4092 fprintf(trace->output, "%s\n", errbuf);
4096 fprintf(trace->output,
4103 fprintf(trace->output, "Not enough memory to run!\n");
4107 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4111 static int trace__replay(struct trace *trace)
4119 .force = trace->force,
4125 trace->tool.sample = trace__process_sample;
4126 trace->tool.mmap = perf_event__process_mmap;
4127 trace->tool.mmap2 = perf_event__process_mmap2;
4128 trace->tool.comm = perf_event__process_comm;
4129 trace->tool.exit = perf_event__process_exit;
4130 trace->tool.fork = perf_event__process_fork;
4131 trace->tool.attr = perf_event__process_attr;
4132 trace->tool.tracing_data = perf_event__process_tracing_data;
4133 trace->tool.build_id = perf_event__process_build_id;
4134 trace->tool.namespaces = perf_event__process_namespaces;
4136 trace->tool.ordered_events = true;
4137 trace->tool.ordering_requires_timestamps = true;
4140 trace->multiple_threads = true;
4142 session = perf_session__new(&data, &trace->tool);
4146 if (trace->opts.target.pid)
4147 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4149 if (trace->opts.target.tid)
4150 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4155 trace->host = &session->machines.host;
4162 trace->syscalls.events.sys_enter = evsel;
4175 trace->syscalls.events.sys_exit = evsel;
4199 else if (trace->summary)
4200 trace__fprintf_thread_summary(trace, trace->output);
4232 struct trace *trace, FILE *fp)
4260 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4266 if (trace->errno_summary && stats->nr_failures) {
4267 const char *arch_name = perf_env__arch(trace->host->env);
4284 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
4293 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4302 if (trace->sched)
4307 printed += thread__dump_stats(ttrace, trace, fp);
4326 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
4333 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
4341 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
4351 struct trace *trace = opt->value;
4353 trace->duration_filter = atof(str);
4362 struct trace *trace = opt->value;
4372 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4373 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4375 if (trace->filter_pids.entries == NULL)
4378 trace->filter_pids.entries[0] = getpid();
4380 for (i = 1; i < trace->filter_pids.nr; ++i)
4381 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4389 static int trace__open_output(struct trace *trace, const char *filename)
4401 trace->output = fopen(filename, "w");
4403 return trace->output == NULL ? -errno : 0;
4491 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4499 struct trace *trace = (struct trace *)opt->value;
4512 trace->not_ev_qualifier = true;
4520 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4521 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4557 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4558 if (trace->ev_qualifier == NULL) {
4559 fputs("Not enough memory to parse event qualifier", trace->output);
4563 if (trace__validate_ev_qualifier(trace))
4565 trace->trace_syscalls = true;
4572 .evlistp = &trace->evlist,
4591 struct trace *trace = opt->value;
4593 if (!list_empty(&trace->evlist->core.entries)) {
4595 .value = &trace->evlist,
4599 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4606 struct trace *trace = arg;
4609 if (!strcmp(var, "trace.add_events")) {
4610 trace->perfconfig_events = strdup(value);
4611 if (trace->perfconfig_events == NULL) {
4612 pr_err("Not enough memory for %s\n", "trace.add_events");
4615 } else if (!strcmp(var, "trace.show_timestamp")) {
4616 trace->show_tstamp = perf_config_bool(var, value);
4617 } else if (!strcmp(var, "trace.show_duration")) {
4618 trace->show_duration = perf_config_bool(var, value);
4619 } else if (!strcmp(var, "trace.show_arg_names")) {
4620 trace->show_arg_names = perf_config_bool(var, value);
4621 if (!trace->show_arg_names)
4622 trace->show_zeros = true;
4623 } else if (!strcmp(var, "trace.show_zeros")) {
4625 if (!trace->show_arg_names && !new_show_zeros) {
4626 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4629 trace->show_zeros = new_show_zeros;
4630 } else if (!strcmp(var, "trace.show_prefix")) {
4631 trace->show_string_prefix = perf_config_bool(var, value);
4632 } else if (!strcmp(var, "trace.no_inherit")) {
4633 trace->opts.no_inherit = perf_config_bool(var, value);
4634 } else if (!strcmp(var, "trace.args_alignment")) {
4637 trace->args_alignment = args_alignment;
4638 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
4640 trace->libtraceevent_print = true;
4642 trace->libtraceevent_print = false;
4648 static void trace__exit(struct trace *trace)
4652 strlist__delete(trace->ev_qualifier);
4653 zfree(&trace->ev_qualifier_ids.entries);
4654 if (trace->syscalls.table) {
4655 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
4656 syscall__exit(&trace->syscalls.table[i]);
4657 zfree(&trace->syscalls.table);
4659 syscalltbl__delete(trace->sctbl);
4660 zfree(&trace->perfconfig_events);
4678 "perf trace [<options>] [<command>]",
4679 "perf trace [<options>] -- <command> [<options>]",
4680 "perf trace record [<options>] [<command>]",
4681 "perf trace record [<options>] -- <command> [<options>]",
4684 struct trace trace = {
4708 OPT_CALLBACK('e', "event", &trace, "event",
4711 OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
4713 OPT_BOOLEAN(0, "comm", &trace.show_comm,
4715 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4716 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4720 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4721 "trace events on existing process id"),
4722 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4723 "trace events on existing thread id"),
4724 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4726 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4728 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4730 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4732 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4734 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4736 OPT_CALLBACK(0, "duration", &trace, "float",
4739 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4741 OPT_BOOLEAN('T', "time", &trace.full_time,
4743 OPT_BOOLEAN(0, "failure", &trace.failure_only,
4745 OPT_BOOLEAN('s', "summary", &trace.summary_only,
4747 OPT_BOOLEAN('S', "with-summary", &trace.summary,
4749 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
4751 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4753 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4754 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4755 OPT_CALLBACK(0, "call-graph", &trace.opts,
4758 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
4760 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4762 OPT_ULONG(0, "max-events", &trace.max_events,
4764 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4767 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4771 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4773 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4777 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4779 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay,
4782 OPTS_EVSWITCH(&trace.evswitch),
4802 trace.evlist = evlist__new();
4803 trace.sctbl = syscalltbl__new();
4805 if (trace.evlist == NULL || trace.sctbl == NULL) {
4815 * global setting. If it fails we'll get something in 'perf trace -v'
4820 err = perf_config(trace__config, &trace);
4836 * .perfconfig trace.add_events, and filter those out.
4838 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4839 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4840 trace.trace_syscalls = true;
4848 if (trace.perfconfig_events != NULL) {
4852 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
4854 parse_events_error__print(&parse_err, trace.perfconfig_events);
4860 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4866 if (!trace.trace_syscalls)
4869 trace.skel = augmented_raw_syscalls_bpf__open();
4870 if (!trace.skel) {
4879 bpf_object__for_each_program(prog, trace.skel->obj) {
4880 if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit)
4884 err = augmented_raw_syscalls_bpf__load(trace.skel);
4890 augmented_raw_syscalls_bpf__attach(trace.skel);
4891 trace__add_syscall_newtp(&trace);
4895 err = bpf__setup_bpf_output(trace.evlist);
4901 trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
4902 assert(!strcmp(evsel__name(trace.syscalls.events.bpf_output), "__augmented_syscalls__"));
4907 if (trace.trace_pgfaults) {
4908 trace.opts.sample_address = true;
4909 trace.opts.sample_time = true;
4912 if (trace.opts.mmap_pages == UINT_MAX)
4915 if (trace.max_stack == UINT_MAX) {
4916 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
4921 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
4922 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
4928 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
4933 if (trace.evlist->core.nr_entries > 0) {
4934 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
4935 if (evlist__set_syscall_tp_fields(trace.evlist)) {
4941 if (trace.sort_events) {
4942 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
4943 ordered_events__set_copy_on_queue(&trace.oe.data, true);
4957 if (trace.syscalls.events.bpf_output) {
4958 evlist__for_each_entry(trace.evlist, evsel) {
4962 trace.raw_augmented_syscalls = true;
4966 if (trace.syscalls.events.bpf_output->priv == NULL &&
4968 struct evsel *augmented = trace.syscalls.events.bpf_output;
5015 if (trace.raw_augmented_syscalls)
5016 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5024 return trace__record(&trace, argc-1, &argv[1]);
5027 if (trace.errno_summary && !trace.summary && !trace.summary_only)
5028 trace.summary_only = true;
5031 if (trace.summary_only)
5032 trace.summary = trace.summary_only;
5035 err = trace__open_output(&trace, output_name);
5042 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5046 err = target__validate(&trace.opts.target);
5048 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5049 fprintf(trace.output, "%s", bf);
5053 err = target__parse_uid(&trace.opts.target);
5055 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5056 fprintf(trace.output, "%s", bf);
5060 if (!argc && target__none(&trace.opts.target))
5061 trace.opts.target.system_wide = true;
5064 err = trace__replay(&trace);
5066 err = trace__run(&trace, argc, argv);
5070 fclose(trace.output);
5072 trace__exit(&trace);
5074 augmented_raw_syscalls_bpf__destroy(trace.skel);