Lines Matching refs:cci_pmu
41 #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1)
77 struct cci_pmu;
92 int (*validate_hw_event)(struct cci_pmu *, unsigned long);
93 int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
94 void (*write_counters)(struct cci_pmu *, unsigned long *);
99 struct cci_pmu {
115 #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
117 static struct cci_pmu *g_cci_pmu;
131 static void pmu_write_counters(struct cci_pmu *cci_pmu,
312 static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
326 for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
334 static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
365 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
366 ev_code <= cci_pmu->model->event_ranges[if_type].max)
372 static int probe_cci400_revision(struct cci_pmu *cci_pmu)
375 rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
384 static const struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu)
387 return &cci_pmu_models[probe_cci400_revision(cci_pmu)];
391 static inline struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu)
541 static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
576 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
577 ev_code <= cci_pmu->model->event_ranges[if_type].max)
592 static int cci550_validate_hw_event(struct cci_pmu *cci_pmu,
628 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
629 ev_code <= cci_pmu->model->event_ranges[if_type].max)
642 static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
645 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
648 bitmap_zero(mask, cci_pmu->num_cntrs);
649 for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
664 pmu_write_counters(cci_pmu, mask);
667 /* Should be called with cci_pmu->hw_events->pmu_lock held */
668 static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu)
673 val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
674 writel(val, cci_pmu->ctrl_base + CCI_PMCR);
677 /* Should be called with cci_pmu->hw_events->pmu_lock held */
678 static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu)
680 cci_pmu_sync_counters(cci_pmu);
681 __cci_pmu_enable_nosync(cci_pmu);
684 /* Should be called with cci_pmu->hw_events->pmu_lock held */
685 static void __cci_pmu_disable(struct cci_pmu *cci_pmu)
690 val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
691 writel(val, cci_pmu->ctrl_base + CCI_PMCR);
712 static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
714 return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu);
717 static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset)
719 return readl_relaxed(cci_pmu->base +
720 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
723 static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value,
726 writel_relaxed(value, cci_pmu->base +
727 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
730 static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx)
732 pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL);
735 static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx)
737 pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL);
741 pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx)
743 return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0;
746 static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event)
748 pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL);
764 pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
768 for (i = 0; i < cci_pmu->num_cntrs; i++) {
769 if (pmu_counter_is_enabled(cci_pmu, i)) {
771 pmu_disable_counter(cci_pmu, i);
781 pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
785 for_each_set_bit(i, mask, cci_pmu->num_cntrs)
786 pmu_enable_counter(cci_pmu, i);
793 static u32 pmu_get_max_counters(struct cci_pmu *cci_pmu)
795 return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) &
801 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
805 if (cci_pmu->model->get_event_idx)
806 return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
809 for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
819 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
822 !cci_pmu->model->validate_hw_event)
825 return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config);
828 static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
831 struct platform_device *pmu_device = cci_pmu->plat_device;
836 if (cci_pmu->nr_irqs < 1) {
848 for (i = 0; i < cci_pmu->nr_irqs; i++) {
849 int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED,
850 "arm-cci-pmu", cci_pmu);
853 cci_pmu->irqs[i]);
857 set_bit(i, &cci_pmu->active_irqs);
863 static void pmu_free_irq(struct cci_pmu *cci_pmu)
867 for (i = 0; i < cci_pmu->nr_irqs; i++) {
868 if (!test_and_clear_bit(i, &cci_pmu->active_irqs))
871 free_irq(cci_pmu->irqs[i], cci_pmu);
877 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
882 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
883 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
886 value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR);
891 static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx)
893 pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
896 static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
899 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
901 for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
906 pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
910 static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
912 if (cci_pmu->model->write_counters)
913 cci_pmu->model->write_counters(cci_pmu, mask);
915 __pmu_write_counters(cci_pmu, mask);
949 static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
954 bitmap_zero(saved_mask, cci_pmu->num_cntrs);
955 pmu_save_counters(cci_pmu, saved_mask);
961 __cci_pmu_enable_nosync(cci_pmu);
963 for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
964 struct perf_event *event = cci_pmu->hw_events.events[i];
969 pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT);
970 pmu_enable_counter(cci_pmu, i);
971 pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
972 pmu_disable_counter(cci_pmu, i);
973 pmu_set_event(cci_pmu, i, event->hw.config_base);
976 __cci_pmu_disable(cci_pmu);
978 pmu_restore_counters(cci_pmu, saved_mask);
1030 struct cci_pmu *cci_pmu = dev;
1031 struct cci_pmu_hw_events *events = &cci_pmu->hw_events;
1037 __cci_pmu_disable(cci_pmu);
1043 for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
1050 if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) &
1054 pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx,
1063 __cci_pmu_enable_sync(cci_pmu);
1069 static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
1071 int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
1073 pmu_free_irq(cci_pmu);
1079 static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
1081 pmu_free_irq(cci_pmu);
1086 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1087 atomic_t *active_events = &cci_pmu->active_events;
1088 struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
1091 cci_pmu_put_hw(cci_pmu);
1098 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1099 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1100 int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs);
1107 __cci_pmu_enable_sync(cci_pmu);
1114 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1115 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1119 __cci_pmu_disable(cci_pmu);
1128 static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx)
1130 return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs);
1135 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1136 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1150 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
1151 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
1158 if (!pmu_fixed_hw_idx(cci_pmu, idx))
1159 pmu_set_event(cci_pmu, idx, hwc->config_base);
1162 pmu_enable_counter(cci_pmu, idx);
1169 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1176 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
1177 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
1185 pmu_disable_counter(cci_pmu, idx);
1192 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1193 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1217 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1218 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1229 static int validate_event(struct pmu *cci_pmu,
1241 if (event->pmu != cci_pmu)
1256 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1265 memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long));
1319 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1320 atomic_t *active_events = &cci_pmu->active_events;
1341 event->cpu = cci_pmu->cpu;
1345 mutex_lock(&cci_pmu->reserve_mutex);
1347 err = cci_pmu_get_hw(cci_pmu);
1350 mutex_unlock(&cci_pmu->reserve_mutex);
1366 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1368 return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu));
1400 static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
1402 const struct cci_pmu_model *model = cci_pmu->model;
1414 cci_pmu->pmu = (struct pmu) {
1416 .name = cci_pmu->model->name,
1430 cci_pmu->plat_device = pdev;
1431 num_cntrs = pmu_get_max_counters(cci_pmu);
1432 if (num_cntrs > cci_pmu->model->num_hw_cntrs) {
1436 num_cntrs, cci_pmu->model->num_hw_cntrs);
1437 num_cntrs = cci_pmu->model->num_hw_cntrs;
1439 cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs;
1441 return perf_pmu_register(&cci_pmu->pmu, name, -1);
1595 static struct cci_pmu *cci_pmu_alloc(struct device *dev)
1597 struct cci_pmu *cci_pmu;
1605 cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL);
1606 if (!cci_pmu)
1609 cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data;
1615 model = probe_cci_model(cci_pmu);
1622 cci_pmu->model = model;
1623 cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model),
1624 sizeof(*cci_pmu->irqs), GFP_KERNEL);
1625 if (!cci_pmu->irqs)
1627 cci_pmu->hw_events.events = devm_kcalloc(dev,
1629 sizeof(*cci_pmu->hw_events.events),
1631 if (!cci_pmu->hw_events.events)
1633 cci_pmu->hw_events.used_mask = devm_kcalloc(dev,
1635 sizeof(*cci_pmu->hw_events.used_mask),
1637 if (!cci_pmu->hw_events.used_mask)
1640 return cci_pmu;
1645 struct cci_pmu *cci_pmu;
1648 cci_pmu = cci_pmu_alloc(&pdev->dev);
1649 if (IS_ERR(cci_pmu))
1650 return PTR_ERR(cci_pmu);
1652 cci_pmu->base = devm_platform_ioremap_resource(pdev, 0);
1653 if (IS_ERR(cci_pmu->base))
1660 cci_pmu->nr_irqs = 0;
1661 for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) {
1666 if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs))
1669 cci_pmu->irqs[cci_pmu->nr_irqs++] = irq;
1676 if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) {
1678 i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model));
1682 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
1683 mutex_init(&cci_pmu->reserve_mutex);
1684 atomic_set(&cci_pmu->active_events, 0);
1686 cci_pmu->cpu = raw_smp_processor_id();
1687 g_cci_pmu = cci_pmu;
1692 ret = cci_pmu_init(cci_pmu, pdev);
1696 pr_info("ARM %s PMU driver probed", cci_pmu->model->name);