Lines Matching defs:syscalls
138 } syscalls;
294 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME
318 * syscalls:sys_{enter,exit}_SYSCALL tracepoints
471 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
473 evsel = evsel__newtp("syscalls", direction);
1513 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1763 * /sys/kernel/tracing/events/syscalls/sys_enter*
1801 if (trace->syscalls.table == NULL) {
1802 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
1803 if (trace->syscalls.table == NULL)
1807 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
1809 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1815 if (trace->syscalls.table == NULL)
1818 memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
1820 trace->syscalls.table = table;
1821 trace->sctbl->syscalls.max_id = id;
1824 sc = trace->syscalls.table + id;
1837 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1841 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1920 pr_debug("Skipping unknown syscalls: ");
2058 * not needed for syscalls that always return a given type, say an fd.
2155 if (id > trace->sctbl->syscalls.max_id) {
2157 if (id >= trace->sctbl->syscalls.max_id) {
2161 * go on reading syscalls.
2169 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2173 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
2176 return &trace->syscalls.table[id];
2182 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2183 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2308 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
2366 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
2370 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
2372 if (evsel != trace->syscalls.events.sys_enter)
2846 if (evsel == trace->syscalls.events.bpf_output) {
3079 else if (is_valid_tracepoint("syscalls:sys_enter"))
3080 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
3082 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
3241 trace->syscalls.events.sys_enter = sys_enter;
3242 trace->syscalls.events.sys_exit = sys_exit;
3266 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3267 sys_exit = trace->syscalls.events.sys_exit;
3306 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name);
3311 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias);
3326 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
3361 * We're only interested in syscalls that have a pointer:
3371 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
3423 * more than what is common to the two syscalls.
3438 * For instance, we have "!syscalls:sys_enter_renameat" and that is
3462 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3482 * Now lets do a second pass looking for enabled syscalls without
3509 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3525 * Look at all the other syscalls for one that has a signature
3550 if (trace->syscalls.events.sys_enter)
3911 if (trace->syscalls.events.bpf_output) {
3918 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
3921 xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
3941 if (trace->syscalls.events.sys_exit) {
3943 trace->syscalls.events.sys_exit->filter);
3951 * syscalls opening a pathname and associating it with a descriptor or
4162 trace->syscalls.events.sys_enter = evsel;
4163 /* older kernels have syscalls tp versus raw_syscalls */
4165 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
4175 trace->syscalls.events.sys_exit = evsel;
4177 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
4260 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4460 if (strcmp(evsel->tp_format->system, "syscalls")) {
4489 * XXX: Hackish, just splitting the combined -e+--event (syscalls
4654 if (trace->syscalls.table) {
4655 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
4656 syscall__exit(&trace->syscalls.table[i]);
4657 zfree(&trace->syscalls.table);
4716 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4744 "Show only syscalls that failed"),
4748 "Show all syscalls and summary with statistics"),
4753 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4871 pr_debug("Failed to open augmented syscalls BPF skeleton");
4888 pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf);
4901 trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
4902 assert(!strcmp(evsel__name(trace.syscalls.events.bpf_output), "__augmented_syscalls__"));
4936 perror("failed to set syscalls:* tracepoint fields");
4947 * If we are augmenting syscalls, then combine what we put in the
4949 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
4957 if (trace.syscalls.events.bpf_output) {
4966 if (trace.syscalls.events.bpf_output->priv == NULL &&
4967 strstr(evsel__name(evsel), "syscalls:sys_enter")) {
4968 struct evsel *augmented = trace.syscalls.events.bpf_output;
4975 * that we get from syscalls:sys_enter tracefs format file.
4979 * Now we do the same for the *syscalls:sys_enter event so that
4990 if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) {
5011 * syscalls:sys_enter_NAME, so that we reduce