Lines Matching defs:etm

22 #include "cs-etm.h"
23 #include "cs-etm-decoder/cs-etm-decoder.h"
101 struct cs_etm_auxtrace *etm;
117 static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm);
118 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
129 * encode the etm queue number as the upper 16 bit and the channel as
188 * The result is cached in etm->pid_fmt so this function only needs to be called
215 return etmq->etm->pid_fmt;
289 static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu)
294 for (i = 0; i < etm->num_cpu; i++) {
295 if (etm->metadata[i][CS_ETM_CPU] == (u64)cpu) {
296 metadata = etm->metadata[i];
314 struct cs_etm_auxtrace *etm;
332 /* get access to the etm metadata */
333 etm = container_of(session->auxtrace, struct cs_etm_auxtrace, auxtrace);
334 if (!etm || !etm->metadata)
373 cpu_data = get_cpu_data(etm, cpu);
470 struct cs_etm_auxtrace *etm = etmq->etm;
474 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
477 tidq->thread = machine__findnew_thread(&etm->session->machines.host, -1,
479 tidq->prev_packet_thread = machine__idle_thread(&etm->session->machines.host);
489 if (etm->synth_opts.last_branch) {
492 sz += etm->synth_opts.last_branch_sz *
524 struct cs_etm_auxtrace *etm = etmq->etm;
526 if (etm->per_thread_decoding)
602 static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
607 if (etm->synth_opts.branches || etm->synth_opts.last_branch ||
608 etm->synth_opts.instructions) {
644 struct cs_etm_auxtrace *etm, int idx,
647 u64 **metadata = etm->metadata;
655 struct cs_etm_auxtrace *etm, int idx)
657 u64 **metadata = etm->metadata;
669 struct cs_etm_auxtrace *etm, int idx)
671 u64 **metadata = etm->metadata;
684 struct cs_etm_auxtrace *etm,
692 architecture = etm->metadata[i][CS_ETM_MAGIC];
696 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
697 cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
700 cs_etm__set_trace_param_etmv4(t_params, etm, i);
703 cs_etm__set_trace_param_ete(t_params, etm, i);
767 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
776 if (etm->timeless_decoding) {
781 return cs_etm__process_timeless_queues(etm, -1);
784 return cs_etm__process_timestamped_queues(etm);
896 return &etmq->etm->session->machines.host;
908 return machines__find_guest(&etmq->etm->session->machines,
915 return &etmq->etm->session->machines.host;
1018 static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
1028 int decoders = formatted ? etm->num_cpu : 1;
1044 if (cs_etm__init_trace_params(t_params, etm, decoders))
1081 static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
1091 etmq = cs_etm__alloc_queue(etm, formatted);
1097 etmq->etm = etm;
1104 static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm,
1170 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
1200 nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
1212 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
1293 tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
1308 if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
1327 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
1344 int fd = perf_data__fd(etmq->etm->session->data);
1395 return !!etmq->etm->timeless_decoding;
1429 struct cs_etm_auxtrace *etm = etmq->etm;
1431 if (etm->has_virtual_ts)
1432 return tsc_to_perf_time(cs_timestamp, &etm->tc);
1440 struct cs_etm_auxtrace *etm = etmq->etm;
1443 if (!etm->timeless_decoding && etm->has_virtual_ts)
1446 return etm->latest_kernel_timestamp;
1454 struct cs_etm_auxtrace *etm = etmq->etm;
1462 /* Set time field based on etm auxtrace config. */
1468 sample.id = etmq->etm->instructions_id;
1469 sample.stream_id = etmq->etm->instructions_id;
1477 if (etm->synth_opts.last_branch)
1480 if (etm->synth_opts.inject) {
1482 etm->instructions_sample_type);
1487 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1498 * The cs etm packet encodes an instruction range between a branch target
1505 struct cs_etm_auxtrace *etm = etmq->etm;
1522 /* Set time field based on etm auxtrace config. */
1529 sample.id = etmq->etm->branches_id;
1530 sample.stream_id = etmq->etm->branches_id;
1542 if (etm->synth_opts.last_branch) {
1554 if (etm->synth_opts.inject) {
1556 etm->branches_sample_type);
1561 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1600 static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
1611 if (evsel->core.attr.type == etm->pmu_type) {
1628 if (etm->timeless_decoding)
1647 if (etm->synth_opts.branches) {
1654 etm->branches_sample_type = attr.sample_type;
1655 etm->branches_id = id;
1660 if (etm->synth_opts.last_branch) {
1670 if (etm->synth_opts.instructions) {
1672 attr.sample_period = etm->synth_opts.period;
1673 etm->instructions_sample_period = attr.sample_period;
1677 etm->instructions_sample_type = attr.sample_type;
1678 etm->instructions_id = id;
1688 struct cs_etm_auxtrace *etm = etmq->etm;
1702 if (etm->synth_opts.last_branch &&
1707 if (etm->synth_opts.instructions &&
1708 tidq->period_instructions >= etm->instructions_sample_period) {
1734 * every etm->instructions_sample_period instructions - as
1736 * last sample before the current etm packet, n+1 to n+3
1737 * samples are generated from the current etm packet.
1740 * instructions in the current etm packet.
1744 * previous etm packet. This will always be less than
1745 * etm->instructions_sample_period.
1759 * etm->instructions_sample_period.
1761 u64 offset = etm->instructions_sample_period - instrs_prev;
1765 if (etm->synth_opts.last_branch)
1769 etm->instructions_sample_period) {
1780 etm->instructions_sample_period);
1784 offset += etm->instructions_sample_period;
1786 etm->instructions_sample_period;
1790 if (etm->synth_opts.branches) {
1809 cs_etm__packet_swap(etm, tidq);
1837 struct cs_etm_auxtrace *etm = etmq->etm;
1843 if (etmq->etm->synth_opts.last_branch &&
1844 etmq->etm->synth_opts.instructions &&
1870 if (etm->synth_opts.branches &&
1878 cs_etm__packet_swap(etm, tidq);
1881 if (etm->synth_opts.last_branch)
1901 if (etmq->etm->synth_opts.last_branch &&
1902 etmq->etm->synth_opts.instructions &&
2507 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
2511 struct auxtrace_queues *queues = &etm->queues;
2514 struct auxtrace_queue *queue = &etm->queues.queue_array[i];
2521 if (etm->per_thread_decoding) {
2537 static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm)
2551 for (i = 0; i < etm->queues.nr_queues; i++) {
2552 etmq = etm->queues.queue_array[i].priv;
2556 ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i);
2562 if (!etm->heap.heap_cnt)
2566 cs_queue_nr = etm->heap.heap_array[0].queue_nr;
2569 queue = &etm->queues.queue_array[queue_nr];
2576 auxtrace_heap__pop(&etm->heap);
2643 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
2650 static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
2655 if (etm->timeless_decoding)
2664 th = machine__findnew_thread(&etm->session->machines.host,
2675 static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
2685 if (etm->timeless_decoding)
2702 th = machine__findnew_thread(&etm->session->machines.host,
2718 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2740 if (etm->per_thread_decoding && etm->timeless_decoding)
2741 return cs_etm__process_timeless_queues(etm,
2746 return cs_etm__process_itrace_start(etm, event);
2749 return cs_etm__process_switch_cpu_wide(etm, event);
2758 etm->latest_kernel_timestamp = sample->time;
2768 static void dump_queued_data(struct cs_etm_auxtrace *etm,
2778 for (i = 0; i < etm->queues.nr_queues; ++i)
2779 list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
2781 cs_etm__dump_event(etm->queues.queue_array[i].priv, buf);
2788 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2791 if (!etm->data_queued) {
2807 err = auxtrace_queues__add_event(&etm->queues, session,
2818 err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
2825 cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer);
2829 dump_queued_data(etm, &event->auxtrace);
2834 static int cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm)
2837 struct evlist *evlist = etm->session->evlist;
2840 if (etm->synth_opts.timeless_decoding) {
2841 etm->timeless_decoding = true;
2849 if (cs_etm__evsel_is_auxtrace(etm->session, evsel)) {
2850 etm->timeless_decoding =
2942 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2971 etm->per_thread_decoding = true;
2975 if (etm->per_thread_decoding) {
3017 err = auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
3024 return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
3113 * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still
3213 struct cs_etm_auxtrace *etm = NULL;
3291 etm = zalloc(sizeof(*etm));
3293 if (!etm) {
3303 etm->pid_fmt = cs_etm__init_pid_fmt(metadata[0]);
3305 err = auxtrace_queues__init(&etm->queues);
3310 etm->synth_opts = *session->itrace_synth_opts;
3312 itrace_synth_opts__set_default(&etm->synth_opts,
3314 etm->synth_opts.callchain = false;
3317 etm->session = session;
3319 etm->num_cpu = num_cpu;
3320 etm->pmu_type = (unsigned int) ((ptr[CS_PMU_TYPE_CPUS] >> 32) & 0xffffffff);
3321 etm->snapshot_mode = (ptr[CS_ETM_SNAPSHOT] != 0);
3322 etm->metadata = metadata;
3323 etm->auxtrace_type = auxtrace_info->type;
3326 etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu);
3328 if (!etm->has_virtual_ts)
3332 etm->auxtrace.process_event = cs_etm__process_event;
3333 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
3334 etm->auxtrace.flush_events = cs_etm__flush_events;
3335 etm->auxtrace.free_events = cs_etm__free_events;
3336 etm->auxtrace.free = cs_etm__free;
3337 etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
3338 session->auxtrace = &etm->auxtrace;
3340 err = cs_etm__setup_timeless_decoding(etm);
3344 etm->tc.time_shift = tc->time_shift;
3345 etm->tc.time_mult = tc->time_mult;
3346 etm->tc.time_zero = tc->time_zero;
3348 etm->tc.time_cycles = tc->time_cycles;
3349 etm->tc.time_mask = tc->time_mask;
3350 etm->tc.cap_user_time_zero = tc->cap_user_time_zero;
3351 etm->tc.cap_user_time_short = tc->cap_user_time_short;
3353 err = cs_etm__synth_events(etm, session);
3401 etm->data_queued = etm->queues.populated;
3405 auxtrace_queues__free(&etm->queues);
3408 zfree(&etm);