Lines Matching refs:pmu

125 	struct pmu pmu;
135 #define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
233 struct cn10k_ddr_pmu *pmu = dev_get_drvdata(dev);
235 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
289 static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu,
297 pmu->events[DDRC_PERF_READ_COUNTER_IDX] = event;
303 pmu->events[DDRC_PERF_WRITE_COUNTER_IDX] = event;
309 if (pmu->events[i] == NULL) {
310 pmu->events[i] = event;
318 static void cn10k_ddr_perf_free_counter(struct cn10k_ddr_pmu *pmu, int counter)
320 pmu->events[counter] = NULL;
325 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
328 if (event->attr.type != event->pmu->type)
332 dev_info(pmu->dev, "Sampling not supported!\n");
337 dev_warn(pmu->dev, "Can't provide per-task data!\n");
342 if (event->group_leader->pmu != event->pmu &&
349 event->cpu = pmu->cpu;
354 static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
367 val = readq_relaxed(pmu->base + reg);
374 writeq_relaxed(val, pmu->base + reg);
376 val = readq_relaxed(pmu->base + DDRC_PERF_CNT_FREERUN_EN);
388 writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_EN);
392 static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter)
397 return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_RD_OP);
400 return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_WR_OP);
402 val = readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE(counter));
408 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
414 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
424 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
430 cn10k_ddr_perf_counter_enable(pmu, counter, true);
437 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
444 counter = cn10k_ddr_perf_alloc_counter(pmu, event);
448 pmu->active_events++;
451 if (pmu->active_events == 1)
452 hrtimer_start(&pmu->hrtimer, cn10k_ddr_pmu_timer_period(),
462 writeq_relaxed(val, pmu->base + reg_offset);
470 writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_CTRL);
483 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
487 cn10k_ddr_perf_counter_enable(pmu, counter, false);
497 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
503 cn10k_ddr_perf_free_counter(pmu, counter);
504 pmu->active_events--;
508 if (pmu->active_events == 0)
509 hrtimer_cancel(&pmu->hrtimer);
512 static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu)
514 struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
520 static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu)
522 struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
528 static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
534 if (pmu->events[i] == NULL)
537 cn10k_ddr_perf_event_update(pmu->events[i]);
542 if (pmu->events[i] == NULL)
545 hwc = &pmu->events[i]->hw;
550 static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
558 event = pmu->events[DDRC_PERF_READ_COUNTER_IDX];
562 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
571 event = pmu->events[DDRC_PERF_WRITE_COUNTER_IDX];
575 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
585 if (pmu->events[i] == NULL)
588 value = cn10k_ddr_perf_read_counter(pmu, i);
591 cn10k_ddr_perf_event_update_all(pmu);
592 cn10k_ddr_perf_pmu_disable(&pmu->pmu);
593 cn10k_ddr_perf_pmu_enable(&pmu->pmu);
602 struct cn10k_ddr_pmu *pmu = container_of(hrtimer, struct cn10k_ddr_pmu,
607 cn10k_ddr_pmu_overflow_handler(pmu);
616 struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu,
620 if (cpu != pmu->cpu)
627 perf_pmu_migrate_context(&pmu->pmu, cpu, target);
628 pmu->cpu = target;
657 ddr_pmu->pmu = (struct pmu) {
687 ret = perf_pmu_register(&ddr_pmu->pmu, name, -1);
708 perf_pmu_unregister(&ddr_pmu->pmu);
714 { .compatible = "marvell,cn10k-ddr-pmu", },
730 .name = "cn10k-ddr-pmu",