Lines Matching defs:ptq
352 struct intel_pt_queue *ptq = data;
353 struct intel_pt *pt = ptq->pt;
400 static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
408 int fd = perf_data__fd(ptq->pt->session->data);
415 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
417 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
440 static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
443 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
453 struct intel_pt_queue *ptq = data;
454 struct auxtrace_buffer *buffer = ptq->buffer;
455 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
459 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
468 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
473 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
476 intel_pt_lookahead_drop_buffer(ptq, buffer);
486 intel_pt_lookahead_drop_buffer(ptq, buffer);
487 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
498 struct intel_pt_queue *ptq = data;
499 struct auxtrace_buffer *buffer = ptq->buffer;
500 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
504 if (ptq->stop) {
509 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
519 ptq->buffer = buffer;
521 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
525 if (ptq->step_through_buffers)
526 ptq->stop = true;
531 ptq->old_buffer = buffer;
673 static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr)
681 return ip >= ptq->pt->kernel_start ?
686 static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip)
690 return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr);
691 return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr);
694 static int intel_pt_get_guest(struct intel_pt_queue *ptq)
696 struct machines *machines = &ptq->pt->session->machines;
698 pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
700 if (ptq->guest_machine && pid == ptq->guest_machine->pid)
703 ptq->guest_machine = NULL;
704 thread__zput(ptq->unknown_guest_thread);
707 thread__zput(ptq->guest_thread);
708 ptq->guest_thread = machines__findnew_guest_code(machines, pid);
715 ptq->unknown_guest_thread = machine__idle_thread(machine);
716 if (!ptq->unknown_guest_thread)
719 ptq->guest_machine = machine;
751 struct intel_pt_queue *ptq = data;
752 struct machine *machine = ptq->pt->machine;
771 nr = ptq->state->to_nr;
772 cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
775 if (ptq->pt->have_guest_sideband) {
776 if (!ptq->guest_machine || ptq->guest_machine_pid != ptq->pid) {
782 intel_pt_get_guest(ptq)) {
787 machine = ptq->guest_machine;
788 thread = ptq->guest_thread;
795 thread = ptq->unknown_guest_thread;
798 thread = ptq->thread;
805 thread = ptq->pt->unknown_thread;
983 struct intel_pt_queue *ptq = data;
990 if (ptq->state->to_nr) {
992 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
995 } else if (ip >= ptq->pt->kernel_start) {
996 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
1001 thread = ptq->thread;
1011 res = intel_pt_match_pgd_ip(ptq->pt, ip, offset, map__dso(al.map)->long_name);
1294 struct intel_pt_queue *ptq;
1296 ptq = zalloc(sizeof(struct intel_pt_queue));
1297 if (!ptq)
1301 ptq->chain = intel_pt_alloc_chain(pt);
1302 if (!ptq->chain)
1309 ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
1310 if (!ptq->last_branch)
1314 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
1315 if (!ptq->event_buf)
1318 ptq->pt = pt;
1319 ptq->queue_nr = queue_nr;
1320 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
1321 ptq->pid = -1;
1322 ptq->tid = -1;
1323 ptq->cpu = -1;
1324 ptq->next_tid = -1;
1330 params.data = ptq;
1382 ptq->decoder = intel_pt_decoder_new(¶ms);
1383 if (!ptq->decoder)
1386 return ptq;
1389 zfree(&ptq->event_buf);
1390 zfree(&ptq->last_branch);
1391 zfree(&ptq->chain);
1392 free(ptq);
1398 struct intel_pt_queue *ptq = priv;
1400 if (!ptq)
1402 thread__zput(ptq->thread);
1403 thread__zput(ptq->guest_thread);
1404 thread__zput(ptq->unknown_guest_thread);
1405 intel_pt_decoder_free(ptq->decoder);
1406 zfree(&ptq->event_buf);
1407 zfree(&ptq->last_branch);
1408 zfree(&ptq->chain);
1409 free(ptq);
1420 struct intel_pt_queue *ptq = queue->priv;
1422 if (ptq && ptq->decoder)
1423 intel_pt_set_first_timestamp(ptq->decoder, timestamp);
1427 static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq)
1429 struct machines *machines = &ptq->pt->session->machines;
1431 pid_t machine_pid = ptq->pid;
1442 if (ptq->guest_machine != machine) {
1443 ptq->guest_machine = NULL;
1444 thread__zput(ptq->guest_thread);
1445 thread__zput(ptq->unknown_guest_thread);
1447 ptq->unknown_guest_thread = machine__find_thread(machine, 0, 0);
1448 if (!ptq->unknown_guest_thread)
1450 ptq->guest_machine = machine;
1453 vcpu = ptq->thread ? thread__guest_cpu(ptq->thread) : -1;
1459 if (ptq->guest_thread && thread__tid(ptq->guest_thread) != tid)
1460 thread__zput(ptq->guest_thread);
1462 if (!ptq->guest_thread) {
1463 ptq->guest_thread = machine__find_thread(machine, -1, tid);
1464 if (!ptq->guest_thread)
1468 ptq->guest_machine_pid = machine_pid;
1469 ptq->guest_pid = thread__pid(ptq->guest_thread);
1470 ptq->guest_tid = tid;
1471 ptq->vcpu = vcpu;
1479 struct intel_pt_queue *ptq = queue->priv;
1482 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
1483 if (ptq->tid == -1)
1484 ptq->pid = -1;
1485 thread__zput(ptq->thread);
1488 if (!ptq->thread && ptq->tid != -1)
1489 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
1491 if (ptq->thread) {
1492 ptq->pid = thread__pid(ptq->thread);
1494 ptq->cpu = thread__cpu(ptq->thread);
1497 if (pt->have_guest_sideband && intel_pt_get_guest_from_sideband(ptq)) {
1498 ptq->guest_machine_pid = 0;
1499 ptq->guest_pid = -1;
1500 ptq->guest_tid = -1;
1501 ptq->vcpu = -1;
1505 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
1507 struct intel_pt *pt = ptq->pt;
1509 ptq->insn_len = 0;
1510 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
1511 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
1512 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
1513 if (!ptq->state->to_ip)
1514 ptq->flags = PERF_IP_FLAG_BRANCH |
1517 else if (ptq->state->from_nr && !ptq->state->to_nr)
1518 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1522 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1526 if (ptq->state->from_ip)
1527 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1529 ptq->flags = PERF_IP_FLAG_BRANCH |
1531 if (ptq->state->flags & INTEL_PT_IN_TX)
1532 ptq->flags |= PERF_IP_FLAG_IN_TX;
1533 ptq->insn_len = ptq->state->insn_len;
1534 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
1537 if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1538 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1539 if (ptq->state->type & INTEL_PT_TRACE_END)
1540 ptq->flags |= PERF_IP_FLAG_TRACE_END;
1543 if (ptq->state->type & INTEL_PT_IFLAG_CHG) {
1544 if (!ptq->state->from_iflag)
1545 ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
1546 if (ptq->state->from_iflag != ptq->state->to_iflag)
1547 ptq->flags |= PERF_IP_FLAG_INTR_TOGGLE;
1548 } else if (!ptq->state->to_iflag) {
1549 ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
1555 struct intel_pt_queue *ptq)
1560 ptq->sel_timestamp = pt->time_ranges[0].start;
1561 ptq->sel_idx = 0;
1563 if (ptq->sel_timestamp) {
1564 ptq->sel_start = true;
1566 ptq->sel_timestamp = pt->time_ranges[0].end;
1567 ptq->sel_start = false;
1575 struct intel_pt_queue *ptq = queue->priv;
1580 if (!ptq) {
1581 ptq = intel_pt_alloc_queue(pt, queue_nr);
1582 if (!ptq)
1584 queue->priv = ptq;
1587 ptq->cpu = queue->cpu;
1588 ptq->tid = queue->tid;
1590 ptq->cbr_seen = UINT_MAX;
1594 ptq->step_through_buffers = true;
1596 ptq->sync_switch = pt->sync_switch;
1598 intel_pt_setup_time_range(pt, ptq);
1601 if (!ptq->on_heap &&
1602 (!ptq->sync_switch ||
1603 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1612 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1614 if (ptq->sel_start && ptq->sel_timestamp) {
1615 ret = intel_pt_fast_forward(ptq->decoder,
1616 ptq->sel_timestamp);
1622 state = intel_pt_decode(ptq->decoder);
1635 ptq->timestamp = state->timestamp;
1637 queue_nr, ptq->timestamp);
1638 ptq->state = state;
1639 ptq->have_sample = true;
1640 if (ptq->sel_start && ptq->sel_timestamp &&
1641 ptq->timestamp < ptq->sel_timestamp)
1642 ptq->have_sample = false;
1643 intel_pt_sample_flags(ptq);
1644 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1647 ptq->on_heap = true;
1683 static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1690 sample->pid = ptq->pid;
1691 sample->tid = ptq->tid;
1693 if (ptq->pt->have_guest_sideband) {
1694 if ((ptq->state->from_ip && ptq->state->from_nr) ||
1695 (ptq->state->to_ip && ptq->state->to_nr)) {
1696 sample->pid = ptq->guest_pid;
1697 sample->tid = ptq->guest_tid;
1698 sample->machine_pid = ptq->guest_machine_pid;
1699 sample->vcpu = ptq->vcpu;
1703 sample->cpu = ptq->cpu;
1704 sample->insn_len = ptq->insn_len;
1705 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1709 struct intel_pt_queue *ptq,
1713 intel_pt_prep_a_sample(ptq, event, sample);
1716 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1718 sample->ip = ptq->state->from_ip;
1719 sample->addr = ptq->state->to_ip;
1720 sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
1722 sample->flags = ptq->flags;
1761 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1763 struct intel_pt *pt = ptq->pt;
1764 union perf_event *event = ptq->event_buf;
1772 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1778 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1780 sample.id = ptq->pt->branches_id;
1781 sample.stream_id = ptq->pt->branches_id;
1799 if (ptq->sample_ipc)
1800 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1802 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1803 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1804 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1812 struct intel_pt_queue *ptq,
1816 intel_pt_prep_b_sample(pt, ptq, event, sample);
1819 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1822 sample->callchain = ptq->chain;
1826 thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
1828 sample->branch_stack = ptq->last_branch;
1832 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1834 struct intel_pt *pt = ptq->pt;
1835 union perf_event *event = ptq->event_buf;
1841 intel_pt_prep_sample(pt, ptq, event, &sample);
1843 sample.id = ptq->pt->instructions_id;
1844 sample.stream_id = ptq->pt->instructions_id;
1848 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1850 if (ptq->sample_ipc)
1851 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1853 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1854 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1855 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1858 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1864 static int intel_pt_synth_cycle_sample(struct intel_pt_queue *ptq)
1866 struct intel_pt *pt = ptq->pt;
1867 union perf_event *event = ptq->event_buf;
1871 if (ptq->sample_ipc)
1872 period = ptq->ipc_cyc_cnt - ptq->last_cy_cyc_cnt;
1877 intel_pt_prep_sample(pt, ptq, event, &sample);
1879 sample.id = ptq->pt->cycles_id;
1880 sample.stream_id = ptq->pt->cycles_id;
1884 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_cy_insn_cnt;
1885 ptq->last_cy_insn_cnt = ptq->ipc_insn_cnt;
1886 ptq->last_cy_cyc_cnt = ptq->ipc_cyc_cnt;
1891 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1893 struct intel_pt *pt = ptq->pt;
1894 union perf_event *event = ptq->event_buf;
1900 intel_pt_prep_sample(pt, ptq, event, &sample);
1902 sample.id = ptq->pt->transactions_id;
1903 sample.stream_id = ptq->pt->transactions_id;
1910 struct intel_pt_queue *ptq,
1914 intel_pt_prep_sample(pt, ptq, event, sample);
1924 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1926 struct intel_pt *pt = ptq->pt;
1927 union perf_event *event = ptq->event_buf;
1934 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1936 sample.id = ptq->pt->ptwrites_id;
1937 sample.stream_id = ptq->pt->ptwrites_id;
1940 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1941 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1950 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1952 struct intel_pt *pt = ptq->pt;
1953 union perf_event *event = ptq->event_buf;
1961 ptq->cbr_seen = ptq->state->cbr;
1963 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1965 sample.id = ptq->pt->cbr_id;
1966 sample.stream_id = ptq->pt->cbr_id;
1968 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1980 static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq)
1982 struct intel_pt *pt = ptq->pt;
1983 union perf_event *event = ptq->event_buf;
1990 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1992 sample.id = ptq->pt->psb_id;
1993 sample.stream_id = ptq->pt->psb_id;
1997 raw.offset = ptq->state->psb_offset;
2006 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
2008 struct intel_pt *pt = ptq->pt;
2009 union perf_event *event = ptq->event_buf;
2016 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2018 sample.id = ptq->pt->mwait_id;
2019 sample.stream_id = ptq->pt->mwait_id;
2022 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
2031 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
2033 struct intel_pt *pt = ptq->pt;
2034 union perf_event *event = ptq->event_buf;
2041 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2043 sample.id = ptq->pt->pwre_id;
2044 sample.stream_id = ptq->pt->pwre_id;
2047 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
2056 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
2058 struct intel_pt *pt = ptq->pt;
2059 union perf_event *event = ptq->event_buf;
2066 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2068 sample.id = ptq->pt->exstop_id;
2069 sample.stream_id = ptq->pt->exstop_id;
2072 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
2081 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
2083 struct intel_pt *pt = ptq->pt;
2084 union perf_event *event = ptq->event_buf;
2091 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2093 sample.id = ptq->pt->pwrx_id;
2094 sample.stream_id = ptq->pt->pwrx_id;
2097 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
2233 static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id)
2235 const struct intel_pt_blk_items *items = &ptq->state->items;
2237 union perf_event *event = ptq->event_buf;
2238 struct intel_pt *pt = ptq->pt;
2246 intel_pt_prep_a_sample(ptq, event, &sample);
2260 sample.ip = ptq->state->from_ip;
2262 cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
2274 timestamp = ptq->timestamp;
2281 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
2284 sample.callchain = ptq->chain;
2307 intel_pt_add_lbrs(ptq->last_branch, items);
2309 thread_stack__br_sample(ptq->thread, ptq->cpu,
2310 ptq->last_branch,
2313 ptq->last_branch->nr = 0;
2315 sample.branch_stack = ptq->last_branch;
2365 static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq)
2367 struct intel_pt *pt = ptq->pt;
2371 return intel_pt_do_synth_pebs_sample(ptq, evsel, id);
2374 static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
2376 const struct intel_pt_blk_items *items = &ptq->state->items;
2378 struct intel_pt *pt = ptq->pt;
2385 return intel_pt_synth_single_pebs_sample(ptq);
2389 pe = &ptq->pebs[hw_id];
2394 return intel_pt_synth_single_pebs_sample(ptq);
2396 err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id);
2404 static int intel_pt_synth_events_sample(struct intel_pt_queue *ptq)
2406 struct intel_pt *pt = ptq->pt;
2407 union perf_event *event = ptq->event_buf;
2418 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2420 sample.id = ptq->pt->evt_id;
2421 sample.stream_id = ptq->pt->evt_id;
2423 raw.cfe.type = ptq->state->cfe_type;
2425 raw.cfe.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
2426 raw.cfe.vector = ptq->state->cfe_vector;
2427 raw.cfe.evd_cnt = ptq->state->evd_cnt;
2429 for (i = 0; i < ptq->state->evd_cnt; i++) {
2431 raw.evd[i].evd_type = ptq->state->evd[i].type;
2432 raw.evd[i].payload = ptq->state->evd[i].payload;
2436 ptq->state->evd_cnt * sizeof(struct perf_synth_intel_evd);
2443 static int intel_pt_synth_iflag_chg_sample(struct intel_pt_queue *ptq)
2445 struct intel_pt *pt = ptq->pt;
2446 union perf_event *event = ptq->event_buf;
2453 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2455 sample.id = ptq->pt->iflag_chg_id;
2456 sample.stream_id = ptq->pt->iflag_chg_id;
2459 raw.iflag = ptq->state->to_iflag;
2461 if (ptq->state->type & INTEL_PT_BRANCH) {
2463 raw.branch_ip = ptq->state->to_ip;
2467 sample.flags = ptq->flags;
2519 static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
2522 struct intel_pt *pt = ptq->pt;
2523 u64 tm = ptq->timestamp;
2525 pid_t pid = ptq->pid;
2526 pid_t tid = ptq->tid;
2532 machine_pid = ptq->guest_machine_pid;
2533 vcpu = ptq->vcpu;
2534 pid = ptq->guest_pid;
2535 tid = ptq->guest_tid;
2538 return intel_pt_synth_error(pt, state->err, ptq->cpu, pid, tid,
2542 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
2545 pid_t tid = ptq->next_tid;
2551 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
2553 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
2555 queue = &pt->queues.queue_array[ptq->queue_nr];
2558 ptq->next_tid = -1;
2563 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
2565 struct intel_pt *pt = ptq->pt;
2568 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
2569 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
2576 static int intel_pt_sample(struct intel_pt_queue *ptq)
2578 const struct intel_pt_state *state = ptq->state;
2579 struct intel_pt *pt = ptq->pt;
2582 if (!ptq->have_sample)
2585 ptq->have_sample = false;
2588 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2589 ptq->ipc_cyc_cnt = ptq->state->cycles;
2590 ptq->sample_ipc = true;
2592 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2593 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
2594 ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC;
2599 intel_pt_get_guest(ptq);
2606 err = intel_pt_synth_pebs_sample(ptq);
2613 err = intel_pt_synth_events_sample(ptq);
2618 err = intel_pt_synth_iflag_chg_sample(ptq);
2626 err = intel_pt_synth_psb_sample(ptq);
2630 if (ptq->state->cbr != ptq->cbr_seen) {
2631 err = intel_pt_synth_cbr_sample(ptq);
2637 err = intel_pt_synth_mwait_sample(ptq);
2642 err = intel_pt_synth_pwre_sample(ptq);
2647 err = intel_pt_synth_exstop_sample(ptq);
2652 err = intel_pt_synth_pwrx_sample(ptq);
2661 err = intel_pt_synth_instruction_sample(ptq);
2666 err = intel_pt_synth_cycle_sample(ptq);
2673 err = intel_pt_synth_transaction_sample(ptq);
2679 err = intel_pt_synth_ptwrite_sample(ptq);
2688 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
2689 state->from_ip, state->to_ip, ptq->insn_len,
2694 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
2709 err = intel_pt_synth_branch_sample(ptq);
2714 err = intel_pt_synth_branch_sample(ptq);
2717 err = intel_pt_synth_branch_sample(ptq);
2723 if (!ptq->sync_switch)
2726 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
2727 switch (ptq->switch_state) {
2731 err = intel_pt_next_tid(pt, ptq);
2734 ptq->switch_state = INTEL_PT_SS_TRACING;
2737 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2741 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2742 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2743 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2744 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2746 (ptq->flags & PERF_IP_FLAG_CALL)) {
2747 ptq->switch_state = INTEL_PT_SS_TRACING;
2816 struct intel_pt_queue *ptq = queue->priv;
2818 if (ptq)
2819 ptq->sync_switch = true;
2831 struct intel_pt_queue *ptq = queue->priv;
2833 if (ptq) {
2834 ptq->sync_switch = false;
2835 intel_pt_next_tid(pt, ptq);
2844 static bool intel_pt_next_time(struct intel_pt_queue *ptq)
2846 struct intel_pt *pt = ptq->pt;
2848 if (ptq->sel_start) {
2850 ptq->sel_start = false;
2851 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
2853 } else if (ptq->sel_idx + 1 < pt->range_cnt) {
2855 ptq->sel_start = true;
2856 ptq->sel_idx += 1;
2857 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
2865 static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
2870 if (ptq->sel_start) {
2871 if (ptq->timestamp >= ptq->sel_timestamp) {
2873 intel_pt_next_time(ptq);
2874 if (!ptq->sel_timestamp) {
2882 ptq->have_sample = false;
2883 if (ptq->sel_timestamp > *ff_timestamp) {
2884 if (ptq->sync_switch) {
2885 intel_pt_next_tid(ptq->pt, ptq);
2886 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2888 *ff_timestamp = ptq->sel_timestamp;
2889 err = intel_pt_fast_forward(ptq->decoder,
2890 ptq->sel_timestamp);
2895 } else if (ptq->timestamp > ptq->sel_timestamp) {
2897 if (!intel_pt_next_time(ptq)) {
2899 ptq->have_sample = false;
2900 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2912 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
2914 const struct intel_pt_state *state = ptq->state;
2915 struct intel_pt *pt = ptq->pt;
2935 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2937 err = intel_pt_sample(ptq);
2941 state = intel_pt_decode(ptq->decoder);
2945 if (ptq->sync_switch &&
2947 ptq->sync_switch = false;
2948 intel_pt_next_tid(pt, ptq);
2950 ptq->timestamp = state->est_timestamp;
2952 err = intel_ptq_synth_error(ptq, state);
2959 ptq->state = state;
2960 ptq->have_sample = true;
2961 intel_pt_sample_flags(ptq);
2969 ptq->timestamp = state->est_timestamp;
2971 } else if (ptq->sync_switch &&
2972 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2973 intel_pt_is_switch_ip(ptq, state->to_ip) &&
2974 ptq->next_tid == -1) {
2977 ptq->timestamp = state->est_timestamp;
2978 } else if (state->timestamp > ptq->timestamp) {
2979 ptq->timestamp = state->timestamp;
2982 if (ptq->sel_timestamp) {
2983 err = intel_pt_time_filter(ptq, &ff_timestamp);
2988 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
2989 *timestamp = ptq->timestamp;
3013 struct intel_pt_queue *ptq;
3023 ptq = queue->priv;
3041 ret = intel_pt_run_decoder(ptq, &ts);
3053 ptq->on_heap = false;
3069 struct intel_pt_queue *ptq = queue->priv;
3071 if (ptq && (tid == -1 || ptq->tid == tid)) {
3072 ptq->time = time_;
3074 intel_pt_run_decoder(ptq, &ts);
3080 static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
3084 struct machine *m = ptq->pt->machine;
3086 ptq->pid = sample->pid;
3087 ptq->tid = sample->tid;
3088 ptq->cpu = queue->cpu;
3091 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
3093 thread__zput(ptq->thread);
3095 if (ptq->tid == -1)
3098 if (ptq->pid == -1) {
3099 ptq->thread = machine__find_thread(m, -1, ptq->tid);
3100 if (ptq->thread)
3101 ptq->pid = thread__pid(ptq->thread);
3105 ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
3112 struct intel_pt_queue *ptq;
3119 ptq = queue->priv;
3120 if (!ptq)
3123 ptq->stop = false;
3124 ptq->time = sample->time;
3125 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
3126 intel_pt_run_decoder(ptq, &ts);
3168 struct intel_pt_queue *ptq;
3174 ptq = intel_pt_cpu_to_ptq(pt, cpu);
3175 if (!ptq || !ptq->sync_switch)
3178 switch (ptq->switch_state) {
3183 ptq->next_tid = tid;
3184 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
3187 if (!ptq->on_heap) {
3188 ptq->timestamp = perf_time_to_tsc(timestamp,
3190 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
3191 ptq->timestamp);
3194 ptq->on_heap = true;
3196 ptq->switch_state = INTEL_PT_SS_TRACING;
3205 ptq->next_tid = -1;
3244 struct intel_pt_queue *ptq;
3246 ptq = intel_pt_cpu_to_ptq(pt, cpu);
3247 if (ptq && ptq->sync_switch) {
3248 ptq->next_tid = -1;
3249 switch (ptq->switch_state) {
3256 ptq->switch_state = INTEL_PT_SS_TRACING;
3362 struct intel_pt_queue *ptq;
3372 ptq = queue->priv;
3374 ptq->pebs[hw_id].evsel = evsel;
3375 ptq->pebs[hw_id].id = sample->id;