Lines Matching defs:dmc620_pmu

71  * dmc620_pmu_node_lock: protects pmus_node lists in all dmc620_pmu instances
86 struct dmc620_pmu {
103 #define to_dmc620_pmu(p) (container_of(p, struct dmc620_pmu, pmu))
238 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(dev_get_drvdata(dev));
241 cpumask_of(dmc620_pmu->irq->cpu));
264 u32 dmc620_pmu_creg_read(struct dmc620_pmu *dmc620_pmu,
267 return readl(dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
271 void dmc620_pmu_creg_write(struct dmc620_pmu *dmc620_pmu,
274 writel(val, dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
295 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
307 if (!test_and_set_bit(idx, dmc620_pmu->used_mask))
318 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
320 return dmc620_pmu_creg_read(dmc620_pmu,
341 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
344 dmc620_pmu_creg_write(dmc620_pmu,
350 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
354 dmc620_pmu_creg_write(dmc620_pmu,
360 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
362 dmc620_pmu_creg_write(dmc620_pmu,
369 struct dmc620_pmu *dmc620_pmu;
373 list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
384 event = dmc620_pmu->events[idx];
390 status = readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
391 status |= (readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK) <<
396 event = dmc620_pmu->events[idx];
404 writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
408 writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
412 event = dmc620_pmu->events[idx];
471 static int dmc620_pmu_get_irq(struct dmc620_pmu *dmc620_pmu, int irq_num)
482 dmc620_pmu->irq = irq;
484 list_add_rcu(&dmc620_pmu->pmus_node, &irq->pmus_node);
490 static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu)
492 struct dmc620_pmu_irq *irq = dmc620_pmu->irq;
495 list_del_rcu(&dmc620_pmu->pmus_node);
514 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
527 dev_dbg(dmc620_pmu->pmu.dev,
541 event->cpu = dmc620_pmu->irq->cpu;
587 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
598 dmc620_pmu->events[idx] = event;
602 dmc620_pmu_creg_write(dmc620_pmu,
604 dmc620_pmu_creg_write(dmc620_pmu,
608 dmc620_pmu_creg_write(dmc620_pmu,
610 dmc620_pmu_creg_write(dmc620_pmu,
622 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
627 dmc620_pmu->events[idx] = NULL;
628 clear_bit(idx, dmc620_pmu->used_mask);
636 struct dmc620_pmu *dmc620_pmu;
649 list_for_each_entry(dmc620_pmu, &irq->pmus_node, pmus_node)
650 perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
661 struct dmc620_pmu *dmc620_pmu;
667 dmc620_pmu = devm_kzalloc(&pdev->dev,
668 sizeof(struct dmc620_pmu), GFP_KERNEL);
669 if (!dmc620_pmu)
672 platform_set_drvdata(pdev, dmc620_pmu);
674 dmc620_pmu->pmu = (struct pmu) {
687 dmc620_pmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
688 if (IS_ERR(dmc620_pmu->base))
689 return PTR_ERR(dmc620_pmu->base);
693 dmc620_pmu_creg_write(dmc620_pmu, i, DMC620_PMU_COUNTERn_CONTROL, 0);
694 writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
695 writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
701 ret = dmc620_pmu_get_irq(dmc620_pmu, irq_num);
715 ret = perf_pmu_register(&dmc620_pmu->pmu, name, -1);
722 dmc620_pmu_put_irq(dmc620_pmu);
729 struct dmc620_pmu *dmc620_pmu = platform_get_drvdata(pdev);
731 dmc620_pmu_put_irq(dmc620_pmu);
733 /* perf will synchronise RCU before devres can free dmc620_pmu */
734 perf_pmu_unregister(&dmc620_pmu->pmu);