Lines Matching refs:event

57  * EN bit and clearing STARTED, still see STARTED set and process the event.
58 * If this event will have the VALID bit clear, we bail properly, but this
79 struct perf_event *event;
144 perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
146 struct hw_perf_event *hwc = &event->hw;
152 * Careful: an NMI might modify the previous event value.
156 * count to the generic event atomically:
166 * (event-)time and add that to the generic event.
174 local64_add(delta, &event->count);
203 static int core_pmu_ibs_config(struct perf_event *event, u64 *config)
205 switch (event->attr.type) {
207 switch (event->attr.config) {
214 switch (event->attr.config) {
236 int forward_event_to_ibs(struct perf_event *event)
240 if (!event->attr.precise_ip || event->attr.precise_ip > 2)
243 if (!core_pmu_ibs_config(event, &config)) {
244 event->attr.type = perf_ibs_op.pmu.type;
245 event->attr.config = config;
252 * one event active at any point in time.
254 static int validate_group(struct perf_event *event)
258 if (event->group_leader == event)
261 if (event->group_leader->pmu == event->pmu)
264 for_each_sibling_event(sibling, event->group_leader) {
265 if (sibling->pmu == event->pmu)
271 static int perf_ibs_init(struct perf_event *event)
273 struct hw_perf_event *hwc = &event->hw;
278 perf_ibs = get_ibs_pmu(event->attr.type);
282 config = event->attr.config;
284 if (event->pmu != &perf_ibs->pmu)
290 ret = validate_group(event);
298 if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
311 event->attr.sample_period = max_cnt << 4;
312 hwc->sample_period = event->attr.sample_period;
372 perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
382 while (!perf_event_try_update(event, count, 64)) {
383 rdmsrl(event->hw.config_base, *config);
418 * the event while stopping it and then reset the state when starting
422 static void perf_ibs_start(struct perf_event *event, int flags)
424 struct hw_perf_event *hwc = &event->hw;
425 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
450 perf_event_update_userpage(event);
453 static void perf_ibs_stop(struct perf_event *event, int flags)
455 struct hw_perf_event *hwc = &event->hw;
456 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
503 perf_ibs_event_update(perf_ibs, event, &config);
507 static int perf_ibs_add(struct perf_event *event, int flags)
509 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
515 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
517 pcpu->event = event;
520 perf_ibs_start(event, PERF_EF_RELOAD);
525 static void perf_ibs_del(struct perf_event *event, int flags)
527 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
533 perf_ibs_stop(event, PERF_EF_UPDATE);
535 pcpu->event = NULL;
537 perf_event_update_userpage(event);
540 static void perf_ibs_read(struct perf_event *event) { }
1022 struct perf_event *event = pcpu->event;
1046 if (WARN_ON_ONCE(!event))
1049 hwc = &event->hw;
1057 perf_ibs_event_update(perf_ibs, event, config);
1067 offset_max = perf_ibs_get_offset_max(perf_ibs, event->attr.sample_type, check_rip);
1081 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
1111 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
1122 perf_ibs_parse_ld_st_data(event->attr.sample_type, &ibs_data, &data);
1129 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
1130 perf_sample_save_callchain(&data, event, iregs);
1132 throttle = perf_event_overflow(event, &data, &regs);
1135 perf_ibs_stop(event, 0);
1150 perf_event_update_userpage(event);