Lines Matching defs:sample

289 				 struct perf_sample *sample)
295 intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
1234 struct perf_sample *sample)
1237 sample->pid,
1238 sample->tid);
1240 thread_stack__sample_late(thread, sample->cpu, pt->chain,
1241 pt->synth_opts.callchain_sz + 1, sample->ip,
1244 sample->callchain = pt->chain;
1272 struct perf_sample *sample)
1275 sample->pid,
1276 sample->tid);
1278 thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
1279 pt->br_stack_sz, sample->ip,
1282 sample->branch_stack = pt->br_stack;
1685 struct perf_sample *sample)
1687 event->sample.header.type = PERF_RECORD_SAMPLE;
1688 event->sample.header.size = sizeof(struct perf_event_header);
1690 sample->pid = ptq->pid;
1691 sample->tid = ptq->tid;
1696 sample->pid = ptq->guest_pid;
1697 sample->tid = ptq->guest_tid;
1698 sample->machine_pid = ptq->guest_machine_pid;
1699 sample->vcpu = ptq->vcpu;
1703 sample->cpu = ptq->cpu;
1704 sample->insn_len = ptq->insn_len;
1705 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1711 struct perf_sample *sample)
1713 intel_pt_prep_a_sample(ptq, event, sample);
1716 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1718 sample->ip = ptq->state->from_ip;
1719 sample->addr = ptq->state->to_ip;
1720 sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
1721 sample->period = 1;
1722 sample->flags = ptq->flags;
1724 event->sample.header.misc = sample->cpumode;
1728 struct perf_sample *sample, u64 type)
1730 event->header.size = perf_event__sample_event_size(sample, type, 0);
1731 return perf_event__synthesize_sample(event, type, 0, sample);
1736 struct perf_sample *sample, u64 type)
1741 return intel_pt_inject_event(event, sample, type);
1746 struct perf_sample *sample, u64 type)
1750 ret = intel_pt_opt_inject(pt, event, sample, type);
1754 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1765 struct perf_sample sample = { .ip = 0, };
1778 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1780 sample.id = ptq->pt->branches_id;
1781 sample.stream_id = ptq->pt->branches_id;
1792 .from = sample.ip,
1793 .to = sample.addr,
1796 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1800 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1801 if (sample.cyc_cnt) {
1802 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1807 return intel_pt_deliver_synth_event(pt, event, &sample,
1814 struct perf_sample *sample)
1816 intel_pt_prep_b_sample(pt, ptq, event, sample);
1821 sample->ip, pt->kernel_start);
1822 sample->callchain = ptq->chain;
1828 sample->branch_stack = ptq->last_branch;
1836 struct perf_sample sample = { .ip = 0, };
1841 intel_pt_prep_sample(pt, ptq, event, &sample);
1843 sample.id = ptq->pt->instructions_id;
1844 sample.stream_id = ptq->pt->instructions_id;
1846 sample.period = 1;
1848 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1851 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1852 if (sample.cyc_cnt) {
1853 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1860 return intel_pt_deliver_synth_event(pt, event, &sample,
1868 struct perf_sample sample = { .ip = 0, };
1877 intel_pt_prep_sample(pt, ptq, event, &sample);
1879 sample.id = ptq->pt->cycles_id;
1880 sample.stream_id = ptq->pt->cycles_id;
1881 sample.period = period;
1883 sample.cyc_cnt = period;
1884 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_cy_insn_cnt;
1888 return intel_pt_deliver_synth_event(pt, event, &sample, pt->cycles_sample_type);
1895 struct perf_sample sample = { .ip = 0, };
1900 intel_pt_prep_sample(pt, ptq, event, &sample);
1902 sample.id = ptq->pt->transactions_id;
1903 sample.stream_id = ptq->pt->transactions_id;
1905 return intel_pt_deliver_synth_event(pt, event, &sample,
1912 struct perf_sample *sample)
1914 intel_pt_prep_sample(pt, ptq, event, sample);
1920 if (!sample->ip)
1921 sample->flags = 0;
1928 struct perf_sample sample = { .ip = 0, };
1934 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1936 sample.id = ptq->pt->ptwrites_id;
1937 sample.stream_id = ptq->pt->ptwrites_id;
1943 sample.raw_size = perf_synth__raw_size(raw);
1944 sample.raw_data = perf_synth__raw_data(&raw);
1946 return intel_pt_deliver_synth_event(pt, event, &sample,
1954 struct perf_sample sample = { .ip = 0, };
1963 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1965 sample.id = ptq->pt->cbr_id;
1966 sample.stream_id = ptq->pt->cbr_id;
1973 sample.raw_size = perf_synth__raw_size(raw);
1974 sample.raw_data = perf_synth__raw_data(&raw);
1976 return intel_pt_deliver_synth_event(pt, event, &sample,
1984 struct perf_sample sample = { .ip = 0, };
1990 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1992 sample.id = ptq->pt->psb_id;
1993 sample.stream_id = ptq->pt->psb_id;
1994 sample.flags = 0;
1999 sample.raw_size = perf_synth__raw_size(raw);
2000 sample.raw_data = perf_synth__raw_data(&raw);
2002 return intel_pt_deliver_synth_event(pt, event, &sample,
2010 struct perf_sample sample = { .ip = 0, };
2016 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2018 sample.id = ptq->pt->mwait_id;
2019 sample.stream_id = ptq->pt->mwait_id;
2024 sample.raw_size = perf_synth__raw_size(raw);
2025 sample.raw_data = perf_synth__raw_data(&raw);
2027 return intel_pt_deliver_synth_event(pt, event, &sample,
2035 struct perf_sample sample = { .ip = 0, };
2041 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2043 sample.id = ptq->pt->pwre_id;
2044 sample.stream_id = ptq->pt->pwre_id;
2049 sample.raw_size = perf_synth__raw_size(raw);
2050 sample.raw_data = perf_synth__raw_data(&raw);
2052 return intel_pt_deliver_synth_event(pt, event, &sample,
2060 struct perf_sample sample = { .ip = 0, };
2066 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2068 sample.id = ptq->pt->exstop_id;
2069 sample.stream_id = ptq->pt->exstop_id;
2074 sample.raw_size = perf_synth__raw_size(raw);
2075 sample.raw_data = perf_synth__raw_data(&raw);
2077 return intel_pt_deliver_synth_event(pt, event, &sample,
2085 struct perf_sample sample = { .ip = 0, };
2091 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2093 sample.id = ptq->pt->pwrx_id;
2094 sample.stream_id = ptq->pt->pwrx_id;
2099 sample.raw_size = perf_synth__raw_size(raw);
2100 sample.raw_data = perf_synth__raw_data(&raw);
2102 return intel_pt_deliver_synth_event(pt, event, &sample,
2236 struct perf_sample sample = { .ip = 0, };
2241 u64 regs[8 * sizeof(sample.intr_regs.mask)];
2246 intel_pt_prep_a_sample(ptq, event, &sample);
2248 sample.id = id;
2249 sample.stream_id = id;
2252 sample.period = evsel->core.attr.sample_period;
2256 sample.ip = items->ip;
2258 sample.ip = items->rip;
2260 sample.ip = ptq->state->from_ip;
2262 cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
2264 event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
2266 sample.cpumode = cpumode;
2276 sample.time = tsc_to_perf_time(timestamp, &pt->tc);
2282 pt->synth_opts.callchain_sz, sample.ip,
2284 sample.callchain = ptq->chain;
2293 sample.intr_regs.abi = items->is_32_bit ?
2296 sample.intr_regs.regs = regs;
2298 pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
2300 intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
2315 sample.branch_stack = ptq->last_branch;
2319 sample.addr = items->mem_access_address;
2340 sample.weight = weight & 0xffff;
2341 sample.ins_lat = items->mem_access_latency & 0xffff;
2343 sample.weight = items->mem_access_latency;
2345 if (!sample.weight && items->has_tsx_aux_info) {
2347 sample.weight = (u32)items->tsx_aux_info;
2359 sample.transaction = txn;
2362 return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
2408 struct perf_sample sample = { .ip = 0, };
2418 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2420 sample.id = ptq->pt->evt_id;
2421 sample.stream_id = ptq->pt->evt_id;
2435 sample.raw_size = perf_synth__raw_size(raw) +
2437 sample.raw_data = perf_synth__raw_data(&raw);
2439 return intel_pt_deliver_synth_event(pt, event, &sample,
2447 struct perf_sample sample = { .ip = 0, };
2453 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2455 sample.id = ptq->pt->iflag_chg_id;
2456 sample.stream_id = ptq->pt->iflag_chg_id;
2465 sample.addr = 0;
2467 sample.flags = ptq->flags;
2469 sample.raw_size = perf_synth__raw_size(raw);
2470 sample.raw_data = perf_synth__raw_data(&raw);
2472 return intel_pt_deliver_synth_event(pt, event, &sample,
3082 struct perf_sample *sample)
3086 ptq->pid = sample->pid;
3087 ptq->tid = sample->tid;
3109 struct perf_sample *sample)
3115 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
3124 ptq->time = sample->time;
3125 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
3130 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
3132 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
3133 sample->pid, sample->tid, 0, sample->time,
3134 sample->machine_pid, sample->vcpu);
3212 struct perf_sample *sample)
3216 struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id);
3221 tid = evsel__intval(evsel, sample, "next_pid");
3222 cpu = sample->cpu;
3225 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
3228 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
3237 struct perf_sample *sample)
3239 pid_t pid = sample->pid;
3240 pid_t tid = sample->tid;
3241 int cpu = sample->cpu;
3276 struct perf_sample *sample)
3280 struct machine *machine = machines__find(machines, sample->machine_pid);
3298 return machine__set_current_tid(machine, sample->vcpu, sample->pid, sample->tid);
3302 struct perf_sample *sample)
3309 return intel_pt_guest_context_switch(pt, event, sample);
3311 cpu = sample->cpu;
3315 return intel_pt_context_switch_in(pt, sample);
3325 pid = sample->pid;
3326 tid = sample->tid;
3332 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
3341 struct perf_sample *sample)
3347 sample->cpu, event->itrace_start.pid,
3348 event->itrace_start.tid, sample->time,
3349 perf_time_to_tsc(sample->time, &pt->tc));
3351 return machine__set_current_tid(pt->machine, sample->cpu,
3358 struct perf_sample *sample)
3365 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
3366 evsel = evlist__id2evsel_strict(pt->session->evlist, sample->id);
3375 ptq->pebs[hw_id].id = sample->id;
3449 struct perf_sample *sample,
3465 if (sample->time && sample->time != (u64)-1)
3466 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3478 if (sample->aux_sample.size)
3480 sample);
3484 sample->time);
3495 if (pt->synth_opts.add_callchain && !sample->callchain)
3496 intel_pt_add_callchain(pt, sample);
3497 if (pt->synth_opts.add_last_branch && !sample->branch_stack)
3498 intel_pt_add_br_stack(pt, sample);
3504 err = intel_pt_lost(pt, sample);
3511 err = intel_pt_process_switch(pt, sample);
3515 err = intel_pt_process_itrace_start(pt, event, sample);
3517 err = intel_pt_process_aux_output_hw_id(pt, event, sample);
3520 err = intel_pt_context_switch(pt, event, sample);
3525 if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
3527 event->header.type, sample->cpu, sample->time, timestamp);
3639 struct perf_sample *sample,
3651 if (sample->time && sample->time != (u64)-1)
3652 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3656 return auxtrace_queues__add_sample(&pt->queues, session, sample,
3667 struct perf_sample *sample __maybe_unused,
3683 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
3783 * We don't use the hardware index, but the sample generation
4435 * actual sample ip to where the sample time is recorded.