Lines Matching defs:sample
490 pr_err("Display of symbols requested but neither sample IP nor "
491 "sample address\navailable. Hence, no addresses to convert "
506 pr_err("Display of source line number requested but sample IP is not\n"
759 static int perf_sample__fprintf_iregs(struct perf_sample *sample,
762 return perf_sample__fprintf_regs(&sample->intr_regs,
766 static int perf_sample__fprintf_uregs(struct perf_sample *sample,
769 return perf_sample__fprintf_regs(&sample->user_regs,
774 struct perf_sample *sample,
785 if (PRINT_FIELD(MACHINE_PID) && sample->machine_pid)
786 printed += fprintf(fp, "VM:%5d ", sample->machine_pid);
789 if (PRINT_FIELD(VCPU) && sample->machine_pid)
790 printed += fprintf(fp, "VCPU:%03d ", sample->vcpu);
804 printed += fprintf(fp, "%7d/%-7d ", sample->pid, sample->tid);
806 printed += fprintf(fp, "%7d ", sample->pid);
808 printed += fprintf(fp, "%7d ", sample->tid);
812 printed += fprintf(fp, "%3d ", sample->cpu);
814 printed += fprintf(fp, "[%03d] ", sample->cpu);
821 (sample->misc & PERF_RECORD_MISC_##m) == PERF_RECORD_MISC_##m
848 if (sample->misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT)
862 tod_scnprintf(script, tstr, sizeof(tstr), sample->time);
867 u64 t = sample->time;
870 initial_time = sample->time;
871 t = sample->time - initial_time;
874 t = sample->time - previous_time;
878 previous_time = sample->time;
916 static int perf_sample__fprintf_brstack(struct perf_sample *sample,
920 struct branch_stack *br = sample->branch_stack;
921 struct branch_entry *entries = perf_sample__branch_entries(sample);
938 thread__find_map_fb(thread, sample->cpumode, from, &alf);
939 thread__find_map_fb(thread, sample->cpumode, to, &alt);
955 static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
959 struct branch_stack *br = sample->branch_stack;
960 struct branch_entry *entries = perf_sample__branch_entries(sample);
975 thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
976 thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
993 static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
997 struct branch_stack *br = sample->branch_stack;
998 struct branch_entry *entries = perf_sample__branch_entries(sample);
1013 if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
1017 if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
1228 static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
1233 struct branch_stack *br = sample->branch_stack;
1234 struct branch_entry *entries = perf_sample__branch_entries(sample);
1250 x.cpu = sample->cpu;
1320 if (entries[0].from == sample->ip)
1326 * Print final block up to sample
1330 * between final branch and sample. When this happens just
1334 end = sample->ip;
1343 len = grab_bb(buffer, sample->ip, sample->ip,
1348 printed += fprintf(fp, "\t%016" PRIx64 "\t%s", sample->ip,
1349 dump_insn(&x, sample->ip, buffer, len, &ilen));
1354 print_srccode(thread, x.cpumode, sample->ip);
1366 if (arch_is_branch(buffer + off, len - off, x.is64bit) && start + off != sample->ip) {
1370 printed += fprintf(fp, "\t... not reaching sample ...\n");
1380 static int perf_sample__fprintf_addr(struct perf_sample *sample,
1385 int printed = fprintf(fp, "%16" PRIx64, sample->addr);
1391 thread__resolve(thread, &al, sample);
1408 static const char *resolve_branch_sym(struct perf_sample *sample,
1418 if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) {
1421 thread__resolve(thread, addr_al, sample);
1425 *ip = sample->addr;
1427 *ip = sample->addr;
1429 } else if (sample->flags & (PERF_IP_FLAG_RETURN | PERF_IP_FLAG_TRACE_END)) {
1433 *ip = sample->ip;
1438 static int perf_sample__fprintf_callindent(struct perf_sample *sample,
1446 size_t depth = thread_stack__depth(thread, sample->cpu);
1457 if (thread__ts(thread) && sample->flags & PERF_IP_FLAG_RETURN)
1460 name = resolve_branch_sym(sample, evsel, thread, al, addr_al, &ip);
1489 __weak void arch_fetch_insn(struct perf_sample *sample __maybe_unused,
1495 void script_fetch_insn(struct perf_sample *sample, struct thread *thread,
1498 if (sample->insn_len == 0 && native_arch)
1499 arch_fetch_insn(sample, thread, machine);
1502 static int perf_sample__fprintf_insn(struct perf_sample *sample,
1509 script_fetch_insn(sample, thread, machine);
1512 printed += fprintf(fp, " ilen: %d", sample->insn_len);
1513 if (PRINT_FIELD(INSN) && sample->insn_len) {
1517 for (i = 0; i < sample->insn_len; i++)
1518 printed += fprintf(fp, " %02x", (unsigned char)sample->insn[i]);
1521 printed += perf_sample__fprintf_brstackinsn(sample, thread, attr, machine, fp);
1526 static int perf_sample__fprintf_ipc(struct perf_sample *sample,
1531 if (!PRINT_FIELD(IPC) || !sample->cyc_cnt || !sample->insn_cnt)
1534 ipc = (sample->insn_cnt * 100) / sample->cyc_cnt;
1537 ipc / 100, ipc % 100, sample->insn_cnt, sample->cyc_cnt);
1540 static int perf_sample__fprintf_bts(struct perf_sample *sample,
1553 printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, addr_al, fp);
1560 if (symbol_conf.use_callchain && sample->callchain) {
1563 sample, NULL, NULL,
1576 printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor,
1585 printed += perf_sample__fprintf_addr(sample, thread, attr, fp);
1588 printed += perf_sample__fprintf_ipc(sample, attr, fp);
1593 printed += perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
1761 static int perf_sample__fprintf_bpf_output(struct perf_sample *sample, FILE *fp)
1763 unsigned int nr_bytes = sample->raw_size;
1765 int printed = binary__fprintf(sample->raw_data, nr_bytes, 8,
1769 printed += fprintf(fp, "%17s \"%s\"\n", "BPF string:", (char *)(sample->raw_data));
1808 static int perf_sample__fprintf_synth_ptwrite(struct perf_sample *sample, FILE *fp)
1810 struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample);
1815 if (perf_sample__bad_synth_size(sample, *data))
1828 static int perf_sample__fprintf_synth_mwait(struct perf_sample *sample, FILE *fp)
1830 struct perf_synth_intel_mwait *data = perf_sample__synth_ptr(sample);
1833 if (perf_sample__bad_synth_size(sample, *data))
1841 static int perf_sample__fprintf_synth_pwre(struct perf_sample *sample, FILE *fp)
1843 struct perf_synth_intel_pwre *data = perf_sample__synth_ptr(sample);
1846 if (perf_sample__bad_synth_size(sample, *data))
1854 static int perf_sample__fprintf_synth_exstop(struct perf_sample *sample, FILE *fp)
1856 struct perf_synth_intel_exstop *data = perf_sample__synth_ptr(sample);
1859 if (perf_sample__bad_synth_size(sample, *data))
1866 static int perf_sample__fprintf_synth_pwrx(struct perf_sample *sample, FILE *fp)
1868 struct perf_synth_intel_pwrx *data = perf_sample__synth_ptr(sample);
1871 if (perf_sample__bad_synth_size(sample, *data))
1880 static int perf_sample__fprintf_synth_cbr(struct perf_sample *sample, FILE *fp)
1882 struct perf_synth_intel_cbr *data = perf_sample__synth_ptr(sample);
1886 if (perf_sample__bad_synth_size(sample, *data))
1898 static int perf_sample__fprintf_synth_psb(struct perf_sample *sample, FILE *fp)
1900 struct perf_synth_intel_psb *data = perf_sample__synth_ptr(sample);
1903 if (perf_sample__bad_synth_size(sample, *data))
1911 static int perf_sample__fprintf_synth_evt(struct perf_sample *sample, FILE *fp)
1913 struct perf_synth_intel_evt *data = perf_sample__synth_ptr(sample);
1921 if (perf_sample__bad_synth_size(sample, *data))
1947 static int perf_sample__fprintf_synth_iflag_chg(struct perf_sample *sample, FILE *fp)
1949 struct perf_synth_intel_iflag_chg *data = perf_sample__synth_ptr(sample);
1952 if (perf_sample__bad_synth_size(sample, *data))
1960 static int perf_sample__fprintf_synth(struct perf_sample *sample,
1965 return perf_sample__fprintf_synth_ptwrite(sample, fp);
1967 return perf_sample__fprintf_synth_mwait(sample, fp);
1969 return perf_sample__fprintf_synth_pwre(sample, fp);
1971 return perf_sample__fprintf_synth_exstop(sample, fp);
1973 return perf_sample__fprintf_synth_pwrx(sample, fp);
1975 return perf_sample__fprintf_synth_cbr(sample, fp);
1977 return perf_sample__fprintf_synth_psb(sample, fp);
1979 return perf_sample__fprintf_synth_evt(sample, fp);
1981 return perf_sample__fprintf_synth_iflag_chg(sample, fp);
2021 struct perf_sample *sample;
2036 perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
2051 perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
2059 struct perf_sample *sample,
2067 .sample = sample,
2081 val = sample->period * evsel->scale;
2087 sample->cpu,
2095 static bool show_event(struct perf_sample *sample,
2101 int depth = thread_stack__depth(thread, sample->cpu);
2115 const char *name = resolve_branch_sym(sample, evsel, thread, al, addr_al,
2138 struct perf_sample *sample, struct evsel *evsel,
2156 perf_sample__fprintf_start(script, sample, thread, evsel,
2160 fprintf(fp, "%10" PRIu64 " ", sample->period);
2172 perf_sample__fprintf_flags(sample->flags, fp);
2175 perf_sample__fprintf_bts(sample, evsel, thread, al, addr_al, machine, fp);
2179 if (PRINT_FIELD(TRACE) && sample->raw_data) {
2180 event_format__fprintf(evsel->tp_format, sample->cpu,
2181 sample->raw_data, sample->raw_size, fp);
2185 perf_sample__fprintf_synth(sample, evsel, fp);
2188 perf_sample__fprintf_addr(sample, thread, attr, fp);
2191 data_src__fprintf(sample->data_src, fp);
2194 fprintf(fp, "%16" PRIu64, sample->weight);
2197 fprintf(fp, "%16" PRIu16, sample->ins_lat);
2200 fprintf(fp, "%16" PRIu16, sample->retire_lat);
2205 sample->cgroup);
2219 if (symbol_conf.use_callchain && sample->callchain) {
2222 sample, NULL, NULL,
2227 sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor,
2232 perf_sample__fprintf_iregs(sample, attr, arch, fp);
2235 perf_sample__fprintf_uregs(sample, attr, arch, fp);
2238 perf_sample__fprintf_brstack(sample, thread, attr, fp);
2240 perf_sample__fprintf_brstacksym(sample, thread, attr, fp);
2242 perf_sample__fprintf_brstackoff(sample, thread, attr, fp);
2245 perf_sample__fprintf_bpf_output(sample, fp);
2246 perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
2249 fprintf(fp, "%16" PRIx64, sample->phys_addr);
2252 fprintf(fp, " %s", get_page_size_name(sample->data_page_size, str));
2255 fprintf(fp, " %s", get_page_size_name(sample->code_page_size, str));
2257 perf_sample__fprintf_ipc(sample, attr, fp);
2268 perf_sample__fprint_metric(script, thread, evsel, sample, fp);
2341 static bool filter_cpu(struct perf_sample *sample)
2343 if (cpu_list && sample->cpu != (u32)-1)
2344 return !test_bit(sample->cpu, cpu_bitmap);
2350 struct perf_sample *sample,
2363 ret = dlfilter__filter_event_early(dlfilter, event, sample, evsel, machine, &al, &addr_al);
2371 sample->time)) {
2376 if (sample->time < last_timestamp) {
2379 sample->time);
2382 last_timestamp = sample->time;
2386 if (filter_cpu(sample))
2389 if (!al.thread && machine__resolve(machine, &al, sample) < 0) {
2399 if (!show_event(sample, evsel, al.thread, &al, &addr_al))
2405 ret = dlfilter__filter_event(dlfilter, event, sample, evsel, machine, &al, &addr_al);
2418 thread__resolve(al.thread, &addr_al, sample);
2421 scripting_ops->process_event(event, sample, evsel, &al, addr_al_ptr);
2423 process_event(scr, sample, evsel, &al, &addr_al, machine);
2503 struct perf_sample *sample,
2509 struct evsel *evsel = evlist__id2evsel(session->evlist, sample->id);
2513 sample->cpu = 0;
2514 sample->time = timestamp;
2515 sample->pid = pid;
2516 sample->tid = tid;
2519 if (filter_cpu(sample))
2526 perf_sample__fprintf_start(script, sample, thread, evsel,
2538 struct perf_sample *sample, struct machine *machine,
2541 return print_event_with_time(tool, event, sample, machine, pid, tid, 0);
2546 struct perf_sample *sample,
2549 if (perf_event__process_comm(tool, event, sample, machine) < 0)
2552 return print_event(tool, event, sample, machine, event->comm.pid,
2558 struct perf_sample *sample,
2561 if (perf_event__process_namespaces(tool, event, sample, machine) < 0)
2564 return print_event(tool, event, sample, machine, event->namespaces.pid,
2570 struct perf_sample *sample,
2573 if (perf_event__process_cgroup(tool, event, sample, machine) < 0)
2576 return print_event(tool, event, sample, machine, sample->pid,
2577 sample->tid);
2582 struct perf_sample *sample,
2585 if (perf_event__process_fork(tool, event, sample, machine) < 0)
2588 return print_event_with_time(tool, event, sample, machine,
2594 struct perf_sample *sample,
2598 if (print_event_with_time(tool, event, sample, machine, event->fork.pid,
2602 return perf_event__process_exit(tool, event, sample, machine);
2607 struct perf_sample *sample,
2610 if (perf_event__process_mmap(tool, event, sample, machine) < 0)
2613 return print_event(tool, event, sample, machine, event->mmap.pid,
2619 struct perf_sample *sample,
2622 if (perf_event__process_mmap2(tool, event, sample, machine) < 0)
2625 return print_event(tool, event, sample, machine, event->mmap2.pid,
2631 struct perf_sample *sample,
2636 if (perf_event__process_switch(tool, event, sample, machine) < 0)
2639 if (scripting_ops && scripting_ops->process_switch && !filter_cpu(sample))
2640 scripting_ops->process_switch(event, sample, machine);
2645 return print_event(tool, event, sample, machine, sample->pid,
2646 sample->tid);
2663 struct perf_sample *sample,
2666 return print_event(tool, event, sample, machine, sample->pid,
2667 sample->tid);
2673 struct perf_sample *sample,
2677 scripting_ops->process_throttle(event, sample, machine);
2694 struct perf_sample *sample,
2697 if (machine__process_ksymbol(machine, event, sample) < 0)
2700 return print_event(tool, event, sample, machine, sample->pid,
2701 sample->tid);
2706 struct perf_sample *sample,
2709 if (perf_event__process_text_poke(tool, event, sample, machine) < 0)
2712 return print_event(tool, event, sample, machine, sample->pid,
2713 sample->tid);
3830 .sample = process_sample_event,
4023 * Enable guest sample processing.