Lines Matching refs:cci_pmu

38 #define CCI_PMU_CNTR_LAST(cci_pmu)	(cci_pmu->num_cntrs - 1)
74 struct cci_pmu;
89 int (*validate_hw_event)(struct cci_pmu *, unsigned long);
90 int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
91 void (*write_counters)(struct cci_pmu *, unsigned long *);
96 struct cci_pmu {
112 #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
114 static struct cci_pmu *g_cci_pmu;
128 static void pmu_write_counters(struct cci_pmu *cci_pmu,
309 static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
323 for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
331 static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
362 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
363 ev_code <= cci_pmu->model->event_ranges[if_type].max)
369 static int probe_cci400_revision(struct cci_pmu *cci_pmu)
372 rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
381 static const struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu)
384 return &cci_pmu_models[probe_cci400_revision(cci_pmu)];
388 static inline struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu)
538 static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
573 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
574 ev_code <= cci_pmu->model->event_ranges[if_type].max)
589 static int cci550_validate_hw_event(struct cci_pmu *cci_pmu,
625 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
626 ev_code <= cci_pmu->model->event_ranges[if_type].max)
639 static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
642 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
646 for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
661 pmu_write_counters(cci_pmu, mask);
664 /* Should be called with cci_pmu->hw_events->pmu_lock held */
665 static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu)
670 val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
671 writel(val, cci_pmu->ctrl_base + CCI_PMCR);
674 /* Should be called with cci_pmu->hw_events->pmu_lock held */
675 static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu)
677 cci_pmu_sync_counters(cci_pmu);
678 __cci_pmu_enable_nosync(cci_pmu);
681 /* Should be called with cci_pmu->hw_events->pmu_lock held */
682 static void __cci_pmu_disable(struct cci_pmu *cci_pmu)
687 val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
688 writel(val, cci_pmu->ctrl_base + CCI_PMCR);
709 static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
711 return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu);
714 static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset)
716 return readl_relaxed(cci_pmu->base +
717 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
720 static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value,
723 writel_relaxed(value, cci_pmu->base +
724 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
727 static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx)
729 pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL);
732 static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx)
734 pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL);
738 pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx)
740 return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0;
743 static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event)
745 pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL);
761 pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
765 for (i = 0; i < cci_pmu->num_cntrs; i++) {
766 if (pmu_counter_is_enabled(cci_pmu, i)) {
768 pmu_disable_counter(cci_pmu, i);
778 pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
782 for_each_set_bit(i, mask, cci_pmu->num_cntrs)
783 pmu_enable_counter(cci_pmu, i);
790 static u32 pmu_get_max_counters(struct cci_pmu *cci_pmu)
792 return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) &
798 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
802 if (cci_pmu->model->get_event_idx)
803 return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
806 for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
816 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
819 !cci_pmu->model->validate_hw_event)
822 return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config);
825 static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
828 struct platform_device *pmu_device = cci_pmu->plat_device;
833 if (cci_pmu->nr_irqs < 1) {
845 for (i = 0; i < cci_pmu->nr_irqs; i++) {
846 int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED,
847 "arm-cci-pmu", cci_pmu);
850 cci_pmu->irqs[i]);
854 set_bit(i, &cci_pmu->active_irqs);
860 static void pmu_free_irq(struct cci_pmu *cci_pmu)
864 for (i = 0; i < cci_pmu->nr_irqs; i++) {
865 if (!test_and_clear_bit(i, &cci_pmu->active_irqs))
868 free_irq(cci_pmu->irqs[i], cci_pmu);
874 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
879 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
880 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
883 value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR);
888 static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx)
890 pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
893 static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
896 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
898 for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
903 pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
907 static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
909 if (cci_pmu->model->write_counters)
910 cci_pmu->model->write_counters(cci_pmu, mask);
912 __pmu_write_counters(cci_pmu, mask);
946 static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
951 bitmap_zero(saved_mask, cci_pmu->num_cntrs);
952 pmu_save_counters(cci_pmu, saved_mask);
958 __cci_pmu_enable_nosync(cci_pmu);
960 for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
961 struct perf_event *event = cci_pmu->hw_events.events[i];
966 pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT);
967 pmu_enable_counter(cci_pmu, i);
968 pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
969 pmu_disable_counter(cci_pmu, i);
970 pmu_set_event(cci_pmu, i, event->hw.config_base);
973 __cci_pmu_disable(cci_pmu);
975 pmu_restore_counters(cci_pmu, saved_mask);
1026 struct cci_pmu *cci_pmu = dev;
1027 struct cci_pmu_hw_events *events = &cci_pmu->hw_events;
1033 __cci_pmu_disable(cci_pmu);
1039 for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
1046 if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) &
1050 pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx,
1059 __cci_pmu_enable_sync(cci_pmu);
1065 static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
1067 int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
1069 pmu_free_irq(cci_pmu);
1075 static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
1077 pmu_free_irq(cci_pmu);
1082 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1083 atomic_t *active_events = &cci_pmu->active_events;
1084 struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
1087 cci_pmu_put_hw(cci_pmu);
1094 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1095 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1096 bool enabled = !bitmap_empty(hw_events->used_mask, cci_pmu->num_cntrs);
1103 __cci_pmu_enable_sync(cci_pmu);
1110 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1111 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1115 __cci_pmu_disable(cci_pmu);
1124 static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx)
1126 return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs);
1131 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1132 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1146 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
1147 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
1154 if (!pmu_fixed_hw_idx(cci_pmu, idx))
1155 pmu_set_event(cci_pmu, idx, hwc->config_base);
1158 pmu_enable_counter(cci_pmu, idx);
1165 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1172 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
1173 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
1181 pmu_disable_counter(cci_pmu, idx);
1188 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1189 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1213 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1214 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1225 static int validate_event(struct pmu *cci_pmu,
1237 if (event->pmu != cci_pmu)
1252 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1261 bitmap_zero(mask, cci_pmu->num_cntrs);
1315 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1316 atomic_t *active_events = &cci_pmu->active_events;
1337 event->cpu = cci_pmu->cpu;
1341 mutex_lock(&cci_pmu->reserve_mutex);
1343 err = cci_pmu_get_hw(cci_pmu);
1346 mutex_unlock(&cci_pmu->reserve_mutex);
1362 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1364 return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu));
1396 static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
1398 const struct cci_pmu_model *model = cci_pmu->model;
1410 cci_pmu->pmu = (struct pmu) {
1412 .name = cci_pmu->model->name,
1426 cci_pmu->plat_device = pdev;
1427 num_cntrs = pmu_get_max_counters(cci_pmu);
1428 if (num_cntrs > cci_pmu->model->num_hw_cntrs) {
1432 num_cntrs, cci_pmu->model->num_hw_cntrs);
1433 num_cntrs = cci_pmu->model->num_hw_cntrs;
1435 cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs;
1437 return perf_pmu_register(&cci_pmu->pmu, name, -1);
1591 static struct cci_pmu *cci_pmu_alloc(struct device *dev)
1593 struct cci_pmu *cci_pmu;
1601 cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL);
1602 if (!cci_pmu)
1605 cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data;
1611 model = probe_cci_model(cci_pmu);
1618 cci_pmu->model = model;
1619 cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model),
1620 sizeof(*cci_pmu->irqs), GFP_KERNEL);
1621 if (!cci_pmu->irqs)
1623 cci_pmu->hw_events.events = devm_kcalloc(dev,
1625 sizeof(*cci_pmu->hw_events.events),
1627 if (!cci_pmu->hw_events.events)
1629 cci_pmu->hw_events.used_mask = devm_bitmap_zalloc(dev,
1632 if (!cci_pmu->hw_events.used_mask)
1635 return cci_pmu;
1640 struct cci_pmu *cci_pmu;
1643 cci_pmu = cci_pmu_alloc(&pdev->dev);
1644 if (IS_ERR(cci_pmu))
1645 return PTR_ERR(cci_pmu);
1647 cci_pmu->base = devm_platform_ioremap_resource(pdev, 0);
1648 if (IS_ERR(cci_pmu->base))
1655 cci_pmu->nr_irqs = 0;
1656 for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) {
1661 if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs))
1664 cci_pmu->irqs[cci_pmu->nr_irqs++] = irq;
1671 if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) {
1673 i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model));
1677 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
1678 mutex_init(&cci_pmu->reserve_mutex);
1679 atomic_set(&cci_pmu->active_events, 0);
1681 cci_pmu->cpu = raw_smp_processor_id();
1682 g_cci_pmu = cci_pmu;
1687 ret = cci_pmu_init(cci_pmu, pdev);
1691 pr_info("ARM %s PMU driver probed", cci_pmu->model->name);