Lines Matching refs:event

213 static u64 precise_datala_hsw(struct perf_event *event, u64 status)
219 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
221 else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW)
232 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
259 static u64 __adl_latency_data_small(struct perf_event *event, u64 status,
264 WARN_ON_ONCE(hybrid_pmu(event->pmu)->cpu_type == hybrid_big);
267 val = hybrid_var(event->pmu, pebs_data_source)[dse];
279 u64 adl_latency_data_small(struct perf_event *event, u64 status)
285 return __adl_latency_data_small(event, status, dse.ld_dse,
291 u64 mtl_latency_data_small(struct perf_event *event, u64 status)
297 return __adl_latency_data_small(event, status, dse.mtl_dse,
302 static u64 load_latency_data(struct perf_event *event, u64 status)
312 val = hybrid_var(event->pmu, pebs_data_source)[dse.ld_dse];
351 static u64 store_latency_data(struct perf_event *event, u64 status)
362 val = hybrid_var(event->pmu, pebs_data_source)[dse.st_lat_dse];
764 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
772 if (!event)
788 perf_sample_data_init(&data, 0, event->hw.last_period);
806 if (event->attr.exclude_kernel &&
817 perf_prepare_sample(&data, event, &regs);
818 perf_prepare_header(&header, &data, event, &regs);
820 if (perf_output_begin(&handle, &data, event,
826 if (event->attr.exclude_kernel &&
833 perf_output_sample(&handle, &header, &data, event);
839 event->hw.interrupts++;
840 event->pending_kill = POLL_IN;
1088 struct event_constraint *intel_pebs_constraints(struct perf_event *event)
1090 struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints);
1093 if (!event->attr.precise_ip)
1098 if (constraint_match(c, event->hw.config)) {
1099 event->hw.flags |= c->flags;
1186 static u64 pebs_update_adaptive_cfg(struct perf_event *event)
1188 struct perf_event_attr *attr = &event->attr;
1203 * + precise_ip < 2 for the non event IP
1234 struct perf_event *event, bool add)
1236 struct pmu *pmu = event->pmu;
1240 * event. It will trigger also during removal, but
1262 pebs_data_cfg = pebs_update_adaptive_cfg(event);
1271 void intel_pmu_pebs_add(struct perf_event *event)
1274 struct hw_perf_event *hwc = &event->hw;
1283 pebs_update_state(needed_cb, cpuc, event, true);
1286 static void intel_pmu_pebs_via_pt_disable(struct perf_event *event)
1290 if (!is_pebs_pt(event))
1297 static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
1300 struct hw_perf_event *hwc = &event->hw;
1306 if (!is_pebs_pt(event))
1309 if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
1332 void intel_pmu_pebs_enable(struct perf_event *event)
1336 struct hw_perf_event *hwc = &event->hw;
1344 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5))
1346 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
1386 intel_pmu_pebs_via_pt_enable(event);
1389 void intel_pmu_pebs_del(struct perf_event *event)
1392 struct hw_perf_event *hwc = &event->hw;
1401 pebs_update_state(needed_cb, cpuc, event, false);
1404 void intel_pmu_pebs_disable(struct perf_event *event)
1407 struct hw_perf_event *hwc = &event->hw;
1413 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) &&
1416 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
1419 intel_pmu_pebs_via_pt_disable(event);
1567 static u64 get_data_src(struct perf_event *event, u64 aux)
1570 int fl = event->hw.flags;
1574 val = load_latency_data(event, aux);
1576 val = store_latency_data(event, aux);
1578 val = x86_pmu.pebs_latency_data(event, aux);
1580 val = precise_datala_hsw(event, aux);
1586 static void setup_pebs_time(struct perf_event *event,
1591 if (event->attr.use_clockid != 0)
1611 static void setup_pebs_fixed_sample_data(struct perf_event *event,
1628 sample_type = event->attr.sample_type;
1629 fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
1631 perf_sample_data_init(data, 0, event->hw.last_period);
1633 data->period = event->hw.last_period;
1647 data->data_src.val = get_data_src(event, pebs->dse);
1658 perf_sample_save_callchain(data, event, iregs);
1699 if (event->attr.precise_ip > 1) {
1755 setup_pebs_time(event, data, pebs->tsc);
1757 if (has_branch_stack(event))
1758 perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
1792 static void setup_pebs_adaptive_sample_data(struct perf_event *event,
1812 sample_type = event->attr.sample_type;
1814 perf_sample_data_init(data, 0, event->hw.last_period);
1815 data->period = event->hw.last_period;
1817 setup_pebs_time(event, data, basic->tsc);
1826 perf_sample_save_callchain(data, event, iregs);
1850 if (event->attr.precise_ip < 2) {
1884 data->data_src.val = get_data_src(event, meminfo->aux);
1913 if (has_branch_stack(event)) {
1915 perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
1964 void intel_pmu_auto_reload_read(struct perf_event *event)
1966 WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
1968 perf_pmu_disable(event->pmu);
1970 perf_pmu_enable(event->pmu);
1977 intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
1979 struct hw_perf_event *hwc = &event->hw;
2025 local64_add(new - old + count * period, &event->count);
2029 perf_event_update_userpage(event);
2035 __intel_pmu_pebs_event(struct perf_event *event,
2047 struct hw_perf_event *hwc = &event->hw;
2060 intel_pmu_save_and_restart_reload(event, count);
2061 } else if (!intel_pmu_save_and_restart(event))
2068 setup_sample(event, iregs, at, data, regs);
2069 perf_event_output(event, data, regs);
2075 setup_sample(event, iregs, at, data, regs);
2083 perf_event_output(event, data, regs);
2089 if (perf_event_overflow(event, data, regs))
2090 x86_pmu_stop(event, 0);
2098 struct perf_event *event = cpuc->events[0]; /* PMC0 only */
2116 WARN_ON_ONCE(!event);
2118 if (!event->attr.precise_ip)
2123 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2124 intel_pmu_save_and_restart_reload(event, 0);
2128 __intel_pmu_pebs_event(event, iregs, data, at, top, 0, n,
2134 struct perf_event *event;
2139 * for auto-reload event in pmu::read(). There are no
2142 * update the event->count for this case.
2145 event = cpuc->events[bit];
2146 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2147 intel_pmu_save_and_restart_reload(event, 0);
2155 struct perf_event *event;
2202 * case when there is only a single active PEBS event
2203 * we can assume it's for that event.
2242 event = cpuc->events[bit];
2243 if (WARN_ON_ONCE(!event))
2246 if (WARN_ON_ONCE(!event->attr.precise_ip))
2251 perf_log_lost_samples(event, error[bit]);
2253 if (iregs && perf_event_account_interrupt(event))
2254 x86_pmu_stop(event, 0);
2258 __intel_pmu_pebs_event(event, iregs, data, base,
2272 struct perf_event *event;
2308 event = cpuc->events[bit];
2309 if (WARN_ON_ONCE(!event))
2312 if (WARN_ON_ONCE(!event->attr.precise_ip))
2315 __intel_pmu_pebs_event(event, iregs, data, base,