Lines Matching defs:smmu_pmu

122 struct smmu_pmu {
140 #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
156 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
159 smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
160 writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
163 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
168 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
171 for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
172 smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
179 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
181 writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
182 writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
187 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
195 for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
196 writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
201 static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
204 if (smmu_pmu->counter_mask & BIT(32))
205 writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
207 writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
210 static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
214 if (smmu_pmu->counter_mask & BIT(32))
215 value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
217 value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
222 static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
224 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
227 static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
229 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
232 static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
234 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
237 static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
240 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
243 static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
246 writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
249 static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
251 writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
257 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
263 now = smmu_pmu_counter_get_value(smmu_pmu, idx);
268 delta &= smmu_pmu->counter_mask;
273 static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
279 if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
287 new = smmu_pmu_counter_get_value(smmu_pmu, idx);
295 new = smmu_pmu->counter_mask >> 1;
296 smmu_pmu_counter_set_value(smmu_pmu, idx, new);
305 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
309 smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
310 smmu_pmu_set_smr(smmu_pmu, idx, sid);
326 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
330 unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
338 cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
343 if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
349 if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
350 smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
357 static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
361 unsigned int num_ctrs = smmu_pmu->num_counters;
363 idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
368 err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
372 set_bit(idx, smmu_pmu->used_counters);
398 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
399 struct device *dev = smmu_pmu->dev;
420 (!test_bit(event_id, smmu_pmu->supported_events))) {
430 if (++group_num_events > smmu_pmu->num_counters)
441 if (++group_num_events > smmu_pmu->num_counters)
451 event->cpu = smmu_pmu->on_cpu;
458 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
464 smmu_pmu_set_period(smmu_pmu, hwc);
466 smmu_pmu_counter_enable(smmu_pmu, idx);
471 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
478 smmu_pmu_counter_disable(smmu_pmu, idx);
488 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
490 idx = smmu_pmu_get_event_idx(smmu_pmu, event);
496 smmu_pmu->events[idx] = event;
499 smmu_pmu_interrupt_enable(smmu_pmu, idx);
513 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
517 smmu_pmu_interrupt_disable(smmu_pmu, idx);
518 smmu_pmu->events[idx] = NULL;
519 clear_bit(idx, smmu_pmu->used_counters);
535 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
537 return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
583 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
588 if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
604 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
606 return sysfs_emit(page, "0x%08x\n", smmu_pmu->iidr);
614 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
616 if (!smmu_pmu->iidr)
667 struct smmu_pmu *smmu_pmu;
670 smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
671 if (cpu != smmu_pmu->on_cpu)
678 perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
679 smmu_pmu->on_cpu = target;
680 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
687 struct smmu_pmu *smmu_pmu = data;
692 ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
696 writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
699 for_each_set_bit(idx, ovs, smmu_pmu->num_counters) {
700 struct perf_event *event = smmu_pmu->events[idx];
709 smmu_pmu_set_period(smmu_pmu, hwc);
726 struct smmu_pmu *pmu = dev_get_drvdata(dev);
737 static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
761 static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
775 static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
777 u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
779 smmu_pmu_disable(&smmu_pmu->pmu);
783 smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
785 smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
787 smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
790 static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
794 model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
799 smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE;
802 smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE;
806 dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
809 static bool smmu_pmu_coresight_id_regs(struct smmu_pmu *smmu_pmu)
811 return of_device_is_compatible(smmu_pmu->dev->of_node,
815 static void smmu_pmu_get_iidr(struct smmu_pmu *smmu_pmu)
817 u32 iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR);
819 if (!iidr && smmu_pmu_coresight_id_regs(smmu_pmu)) {
820 u32 pidr0 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR0);
821 u32 pidr1 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR1);
822 u32 pidr2 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR2);
823 u32 pidr3 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR3);
824 u32 pidr4 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR4);
841 smmu_pmu->iidr = iidr;
846 struct smmu_pmu *smmu_pmu;
854 smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
855 if (!smmu_pmu)
858 smmu_pmu->dev = dev;
859 platform_set_drvdata(pdev, smmu_pmu);
861 smmu_pmu->pmu = (struct pmu) {
876 smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0);
877 if (IS_ERR(smmu_pmu->reg_base))
878 return PTR_ERR(smmu_pmu->reg_base);
880 cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
884 smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1);
885 if (IS_ERR(smmu_pmu->reloc_base))
886 return PTR_ERR(smmu_pmu->reloc_base);
888 smmu_pmu->reloc_base = smmu_pmu->reg_base;
893 smmu_pmu->irq = irq;
895 ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
896 ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
897 bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
900 smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
902 smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
905 smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
907 smmu_pmu_reset(smmu_pmu);
909 err = smmu_pmu_setup_irq(smmu_pmu);
915 smmu_pmu_get_iidr(smmu_pmu);
925 smmu_pmu_get_acpi_options(smmu_pmu);
932 if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) {
933 smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09;
934 smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09;
938 smmu_pmu->on_cpu = raw_smp_processor_id();
939 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
942 &smmu_pmu->node);
949 err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
957 &res_0->start, smmu_pmu->num_counters,
958 smmu_pmu->global_filter ? "Global(Counter0)" :
964 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
970 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
972 perf_pmu_unregister(&smmu_pmu->pmu);
973 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
980 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
982 smmu_pmu_disable(&smmu_pmu->pmu);