Lines Matching defs:smmu_pmu

102 struct smmu_pmu {
119 #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
135 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
138 smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
139 writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
142 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
147 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
150 for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
151 smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
158 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
160 writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
161 writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
166 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
174 for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
175 writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
180 static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
183 if (smmu_pmu->counter_mask & BIT(32))
184 writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
186 writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
189 static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
193 if (smmu_pmu->counter_mask & BIT(32))
194 value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
196 value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
201 static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
203 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
206 static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
208 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
211 static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
213 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
216 static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
219 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
222 static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
225 writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
228 static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
230 writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
236 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
242 now = smmu_pmu_counter_get_value(smmu_pmu, idx);
247 delta &= smmu_pmu->counter_mask;
252 static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
258 if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
266 new = smmu_pmu_counter_get_value(smmu_pmu, idx);
274 new = smmu_pmu->counter_mask >> 1;
275 smmu_pmu_counter_set_value(smmu_pmu, idx, new);
284 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
288 smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
289 smmu_pmu_set_smr(smmu_pmu, idx, sid);
305 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
309 unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
317 cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
322 if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
328 if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
329 smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
336 static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
340 unsigned int num_ctrs = smmu_pmu->num_counters;
342 idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
347 err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
351 set_bit(idx, smmu_pmu->used_counters);
377 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
378 struct device *dev = smmu_pmu->dev;
399 (!test_bit(event_id, smmu_pmu->supported_events))) {
409 if (++group_num_events > smmu_pmu->num_counters)
420 if (++group_num_events > smmu_pmu->num_counters)
430 event->cpu = smmu_pmu->on_cpu;
437 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
443 smmu_pmu_set_period(smmu_pmu, hwc);
445 smmu_pmu_counter_enable(smmu_pmu, idx);
450 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
457 smmu_pmu_counter_disable(smmu_pmu, idx);
467 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
469 idx = smmu_pmu_get_event_idx(smmu_pmu, event);
475 smmu_pmu->events[idx] = event;
478 smmu_pmu_interrupt_enable(smmu_pmu, idx);
492 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
496 smmu_pmu_interrupt_disable(smmu_pmu, idx);
497 smmu_pmu->events[idx] = NULL;
498 clear_bit(idx, smmu_pmu->used_counters);
514 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
516 return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
571 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
576 if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
620 struct smmu_pmu *smmu_pmu;
623 smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
624 if (cpu != smmu_pmu->on_cpu)
631 perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
632 smmu_pmu->on_cpu = target;
633 WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, cpumask_of(target)));
640 struct smmu_pmu *smmu_pmu = data;
644 ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
648 writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
650 for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) {
651 struct perf_event *event = smmu_pmu->events[idx];
660 smmu_pmu_set_period(smmu_pmu, hwc);
677 struct smmu_pmu *pmu = dev_get_drvdata(dev);
688 static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
715 static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
729 static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
731 u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
733 smmu_pmu_disable(&smmu_pmu->pmu);
737 smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
739 smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
741 smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
744 static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
748 model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
753 smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE;
756 smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE;
760 dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
765 struct smmu_pmu *smmu_pmu;
773 smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
774 if (!smmu_pmu)
777 smmu_pmu->dev = dev;
778 platform_set_drvdata(pdev, smmu_pmu);
780 smmu_pmu->pmu = (struct pmu) {
795 smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0);
796 if (IS_ERR(smmu_pmu->reg_base))
797 return PTR_ERR(smmu_pmu->reg_base);
799 cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
803 smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1);
804 if (IS_ERR(smmu_pmu->reloc_base))
805 return PTR_ERR(smmu_pmu->reloc_base);
807 smmu_pmu->reloc_base = smmu_pmu->reg_base;
812 smmu_pmu->irq = irq;
814 ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
815 ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
816 bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
819 smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
821 smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
824 smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
826 smmu_pmu_reset(smmu_pmu);
828 err = smmu_pmu_setup_irq(smmu_pmu);
841 smmu_pmu_get_acpi_options(smmu_pmu);
848 if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) {
849 smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09;
850 smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09;
854 smmu_pmu->on_cpu = raw_smp_processor_id();
855 WARN_ON(irq_set_affinity_hint(smmu_pmu->irq,
856 cpumask_of(smmu_pmu->on_cpu)));
859 &smmu_pmu->node);
866 err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
874 &res_0->start, smmu_pmu->num_counters,
875 smmu_pmu->global_filter ? "Global(Counter0)" :
881 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
883 irq_set_affinity_hint(smmu_pmu->irq, NULL);
889 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
891 perf_pmu_unregister(&smmu_pmu->pmu);
892 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
893 irq_set_affinity_hint(smmu_pmu->irq, NULL);
900 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
902 smmu_pmu_disable(&smmu_pmu->pmu);