Lines Matching refs:event
225 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
272 PMU_FORMAT_ATTR(event, "config:0-7");
297 static bool ddr_perf_is_filtered(struct perf_event *event)
299 return event->attr.config == 0x41 || event->attr.config == 0x42;
302 static u32 ddr_perf_filter_val(struct perf_event *event)
304 return event->attr.config1;
317 static bool ddr_perf_is_enhanced_filtered(struct perf_event *event)
320 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
324 ddr_perf_is_filtered(event);
327 static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
332 * Always map cycle event to counter 0
333 * Cycles counter is dedicated for cycle event
336 if (event == EVENT_CYCLES_ID) {
358 struct perf_event *event = pmu->events[counter];
363 * axid-read and axid-write event if PMU core supports enhanced
366 base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 :
371 static int ddr_perf_event_init(struct perf_event *event)
373 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
374 struct hw_perf_event *hwc = &event->hw;
377 if (event->attr.type != event->pmu->type)
380 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
383 if (event->cpu < 0) {
393 if (event->group_leader->pmu != event->pmu &&
394 !is_software_event(event->group_leader))
398 if (!ddr_perf_filters_compatible(event, event->group_leader))
400 for_each_sibling_event(sibling, event->group_leader) {
401 if (!ddr_perf_filters_compatible(event, sibling))
406 for_each_sibling_event(sibling, event->group_leader) {
407 if (sibling->pmu != event->pmu &&
412 event->cpu = pmu->cpu;
475 static void ddr_perf_event_update(struct perf_event *event)
477 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
478 struct hw_perf_event *hwc = &event->hw;
490 local64_add(new_raw_count, &event->count);
493 * For legacy SoCs: event counter continue counting when overflow,
495 * For new SoCs: event counter stop counting when overflow, need
502 event->attr.config);
505 /* clear counter every time for both cycle counter and event counter */
509 static void ddr_perf_event_start(struct perf_event *event, int flags)
511 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
512 struct hw_perf_event *hwc = &event->hw;
517 ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
526 static int ddr_perf_event_add(struct perf_event *event, int flags)
528 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
529 struct hw_perf_event *hwc = &event->hw;
531 int cfg = event->attr.config;
532 int cfg1 = event->attr.config1;
539 !ddr_perf_filters_compatible(event, pmu->events[i]))
543 if (ddr_perf_is_filtered(event)) {
556 pmu->events[counter] = event;
562 ddr_perf_event_start(event, flags);
567 static void ddr_perf_event_stop(struct perf_event *event, int flags)
569 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
570 struct hw_perf_event *hwc = &event->hw;
573 ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
574 ddr_perf_event_update(event);
583 static void ddr_perf_event_del(struct perf_event *event, int flags)
585 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
586 struct hw_perf_event *hwc = &event->hw;
589 ddr_perf_event_stop(event, PERF_EF_UPDATE);
633 struct perf_event *event;
644 * such as i.MX8MP, event counter would stop when overflow, so
645 * we need use cycle counter to stop overflow of event counter.
657 event = pmu->events[i];
659 ddr_perf_event_update(event);