Lines Matching refs:event

3  * Performance event support for s390x - CPU-measurement Counter Facility
19 static enum cpumf_ctr_set get_counter_set(u64 event)
23 if (event < 32)
25 else if (event < 64)
27 else if (event < 128)
29 else if (event < 288)
31 else if (event >= 448 && event < 496)
106 * PMUs that might suffice the event request.
170 /* Release the PMU if event is the last perf event */
171 static void hw_perf_event_destroy(struct perf_event *event)
181 /* CPUMF <-> perf event mappings for kernel+userspace (basic set) */
191 /* CPUMF <-> perf event mappings for userspace (problem-state set) */
202 static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
204 struct perf_event_attr *attr = &event->attr;
205 struct hw_perf_event *hwc = &event->hw;
221 if (is_sampling_event(event)) /* No sampling support */
261 * Use the hardware perf event structure to store the
285 event->destroy = hw_perf_event_destroy;
300 * The result is different when event modifiers exclude_kernel and/or
303 static int cpumf_pmu_event_type(struct perf_event *event)
305 u64 ev = event->attr.config;
315 static int cpumf_pmu_event_init(struct perf_event *event)
317 unsigned int type = event->attr.type;
321 err = __hw_perf_event_init(event, type);
322 else if (event->pmu->type == type)
324 err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
328 if (unlikely(err) && event->destroy)
329 event->destroy(event);
334 static int hw_perf_event_reset(struct perf_event *event)
340 prev = local64_read(&event->hw.prev_count);
341 err = ecctr(event->hw.config, &new);
352 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
357 static void hw_perf_event_update(struct perf_event *event)
363 prev = local64_read(&event->hw.prev_count);
364 err = ecctr(event->hw.config, &new);
367 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
371 local64_add(delta, &event->count);
374 static void cpumf_pmu_read(struct perf_event *event)
376 if (event->hw.state & PERF_HES_STOPPED)
379 hw_perf_event_update(event);
382 static void cpumf_pmu_start(struct perf_event *event, int flags)
385 struct hw_perf_event *hwc = &event->hw;
403 * Because all counters in a set are active, the event->hw.prev_count
407 hw_perf_event_reset(event);
413 static void cpumf_pmu_stop(struct perf_event *event, int flags)
416 struct hw_perf_event *hwc = &event->hw;
425 event->hw.state |= PERF_HES_STOPPED;
429 hw_perf_event_update(event);
430 event->hw.state |= PERF_HES_UPTODATE;
434 static int cpumf_pmu_add(struct perf_event *event, int flags)
444 if (validate_ctr_auth(&event->hw))
447 ctr_set_enable(&cpuhw->state, event->hw.config_base);
448 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
451 cpumf_pmu_start(event, PERF_EF_RELOAD);
453 perf_event_update_userpage(event);
458 static void cpumf_pmu_del(struct perf_event *event, int flags)
462 cpumf_pmu_stop(event, PERF_EF_UPDATE);
468 * When a new perf event has been added but not yet started, this can
472 if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base]))
473 ctr_set_disable(&cpuhw->state, event->hw.config_base);
475 perf_event_update_userpage(event);