Lines Matching refs:pmu
40 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
66 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
67 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
68 { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
74 struct pmu pmu;
94 static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
96 u32 quirks = pmu->devtype_data->quirks;
115 struct ddr_pmu *pmu = dev_get_drvdata(dev);
121 ddr_perf_filter_cap_get(pmu, cap));
146 struct ddr_pmu *pmu = dev_get_drvdata(dev);
148 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
267 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
269 filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
274 static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
284 if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
291 if (pmu->events[i] == NULL)
298 static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
300 pmu->events[counter] = NULL;
303 static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
305 struct perf_event *event = pmu->events[counter];
306 void __iomem *base = pmu->base;
320 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
324 if (event->attr.type != event->pmu->type)
331 dev_warn(pmu->dev, "Can't provide per-task data!\n");
340 if (event->group_leader->pmu != event->pmu &&
344 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
354 if (sibling->pmu != event->pmu &&
359 event->cpu = pmu->cpu;
368 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
375 new_raw_count = ddr_perf_read_counter(pmu, counter);
384 static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
397 writel(0, pmu->base + reg);
400 writel(val, pmu->base + reg);
403 val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK;
404 writel(val, pmu->base + reg);
410 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
416 ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
418 if (!pmu->active_counter++)
419 ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
427 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
433 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
437 if (pmu->events[i] &&
438 !ddr_perf_filters_compatible(event, pmu->events[i]))
445 writel(cfg1, pmu->base + COUNTER_DPCR1);
449 counter = ddr_perf_alloc_counter(pmu, cfg);
451 dev_dbg(pmu->dev, "There are not enough counters\n");
455 pmu->events[counter] = event;
456 pmu->active_events++;
469 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
473 ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
476 if (!--pmu->active_counter)
477 ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
485 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
491 ddr_perf_free_counter(pmu, counter);
492 pmu->active_events--;
496 static void ddr_perf_pmu_enable(struct pmu *pmu)
500 static void ddr_perf_pmu_disable(struct pmu *pmu)
504 static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
507 *pmu = (struct ddr_pmu) {
508 .pmu = (struct pmu) {
526 pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
527 return pmu->id;
533 struct ddr_pmu *pmu = (struct ddr_pmu *) p;
537 ddr_perf_counter_enable(pmu,
553 if (!pmu->events[i])
556 event = pmu->events[i];
564 ddr_perf_counter_enable(pmu,
576 struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
579 if (cpu != pmu->cpu)
586 perf_pmu_migrate_context(&pmu->pmu, cpu, target);
587 pmu->cpu = target;
589 WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
596 struct ddr_pmu *pmu;
610 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
611 if (!pmu)
614 num = ddr_perf_init(pmu, base, &pdev->dev);
616 platform_set_drvdata(pdev, pmu);
625 pmu->devtype_data = of_device_get_match_data(&pdev->dev);
627 pmu->cpu = raw_smp_processor_id();
638 pmu->cpuhp_state = ret;
640 /* Register the pmu instance for cpu hotplug */
641 ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
659 pmu);
665 pmu->irq = irq;
666 ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
668 dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
672 ret = perf_pmu_register(&pmu->pmu, name, -1);
679 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
681 cpuhp_remove_multi_state(pmu->cpuhp_state);
683 ida_simple_remove(&ddr_ida, pmu->id);
690 struct ddr_pmu *pmu = platform_get_drvdata(pdev);
692 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
693 cpuhp_remove_multi_state(pmu->cpuhp_state);
694 irq_set_affinity_hint(pmu->irq, NULL);
696 perf_pmu_unregister(&pmu->pmu);
698 ida_simple_remove(&ddr_ida, pmu->id);
704 .name = "imx-ddr-pmu",