Lines Matching refs:iommu_pmu

62 static inline struct iommu_pmu *dev_to_iommu_pmu(struct device *dev)
68 return container_of(dev_get_drvdata(dev), struct iommu_pmu, pmu);
83 struct iommu_pmu *iommu_pmu = dev_to_iommu_pmu(dev); \
85 if (!iommu_pmu) \
87 return (iommu_pmu->filter & _filter) ? attr->mode : 0; \
120 if ((iommu_pmu->filter & _filter) && iommu_pmu_en_##_name(_econfig)) { \
121 dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET + \
130 if (iommu_pmu->filter & _filter) { \
131 dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET + \
157 struct iommu_pmu *iommu_pmu = dev_to_iommu_pmu(dev); \
159 if (!iommu_pmu) \
161 return (iommu_pmu->evcap[_g_idx] & _event) ? attr->mode : 0; \
234 iommu_event_base(struct iommu_pmu *iommu_pmu, int idx)
236 return iommu_pmu->cntr_reg + idx * iommu_pmu->cntr_stride;
240 iommu_config_base(struct iommu_pmu *iommu_pmu, int idx)
242 return iommu_pmu->cfg_reg + idx * IOMMU_PMU_CFG_OFFSET;
245 static inline struct iommu_pmu *iommu_event_to_pmu(struct perf_event *event)
247 return container_of(event->pmu, struct iommu_pmu, pmu);
259 static inline bool is_iommu_pmu_event(struct iommu_pmu *iommu_pmu,
262 return event->pmu == &iommu_pmu->pmu;
267 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
270 if (event_group >= iommu_pmu->num_eg)
278 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
287 if (!is_iommu_pmu_event(iommu_pmu, sibling) ||
291 if (++nr > iommu_pmu->num_cntr)
322 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
325 int shift = 64 - iommu_pmu->cntr_width;
329 new_count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx));
345 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
346 struct intel_iommu *iommu = iommu_pmu->iommu;
362 count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx));
382 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
383 struct intel_iommu *iommu = iommu_pmu->iommu;
396 iommu_pmu_validate_per_cntr_event(struct iommu_pmu *iommu_pmu,
402 if (!(iommu_pmu->cntr_evcap[idx][event_group] & select))
408 static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
418 for (idx = iommu_pmu->num_cntr - 1; idx >= 0; idx--) {
419 if (test_and_set_bit(idx, iommu_pmu->used_mask))
422 if (!iommu_pmu_validate_per_cntr_event(iommu_pmu, idx, event))
424 clear_bit(idx, iommu_pmu->used_mask);
429 iommu_pmu->event_list[idx] = event;
433 dmar_writeq(iommu_config_base(iommu_pmu, idx), hwc->config);
456 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
460 ret = iommu_pmu_assign_event(iommu_pmu, event);
474 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
485 iommu_pmu->event_list[idx] = NULL;
487 clear_bit(idx, iommu_pmu->used_mask);
494 struct iommu_pmu *iommu_pmu = container_of(pmu, struct iommu_pmu, pmu);
495 struct intel_iommu *iommu = iommu_pmu->iommu;
502 struct iommu_pmu *iommu_pmu = container_of(pmu, struct iommu_pmu, pmu);
503 struct intel_iommu *iommu = iommu_pmu->iommu;
508 static void iommu_pmu_counter_overflow(struct iommu_pmu *iommu_pmu)
518 while ((status = dmar_readq(iommu_pmu->overflow))) {
519 for_each_set_bit(i, (unsigned long *)&status, iommu_pmu->num_cntr) {
524 event = iommu_pmu->event_list[i];
532 dmar_writeq(iommu_pmu->overflow, status);
553 struct iommu_pmu *iommu_pmu = iommu->pmu;
555 iommu_pmu->pmu.name = iommu->name;
556 iommu_pmu->pmu.task_ctx_nr = perf_invalid_context;
557 iommu_pmu->pmu.event_init = iommu_pmu_event_init;
558 iommu_pmu->pmu.pmu_enable = iommu_pmu_enable;
559 iommu_pmu->pmu.pmu_disable = iommu_pmu_disable;
560 iommu_pmu->pmu.add = iommu_pmu_add;
561 iommu_pmu->pmu.del = iommu_pmu_del;
562 iommu_pmu->pmu.start = iommu_pmu_start;
563 iommu_pmu->pmu.stop = iommu_pmu_stop;
564 iommu_pmu->pmu.read = iommu_pmu_event_update;
565 iommu_pmu->pmu.attr_groups = iommu_pmu_attr_groups;
566 iommu_pmu->pmu.attr_update = iommu_pmu_attr_update;
567 iommu_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
568 iommu_pmu->pmu.module = THIS_MODULE;
570 return perf_pmu_register(&iommu_pmu->pmu, iommu_pmu->pmu.name, -1);
583 struct iommu_pmu *iommu_pmu;
612 iommu_pmu = kzalloc(sizeof(*iommu_pmu), GFP_KERNEL);
613 if (!iommu_pmu)
616 iommu_pmu->num_cntr = pcap_num_cntr(perfcap);
617 if (iommu_pmu->num_cntr > IOMMU_PMU_IDX_MAX) {
619 iommu_pmu->num_cntr, IOMMU_PMU_IDX_MAX);
620 iommu_pmu->num_cntr = IOMMU_PMU_IDX_MAX;
623 iommu_pmu->cntr_width = pcap_cntr_width(perfcap);
624 iommu_pmu->filter = pcap_filters_mask(perfcap);
625 iommu_pmu->cntr_stride = pcap_cntr_stride(perfcap);
626 iommu_pmu->num_eg = pcap_num_event_group(perfcap);
628 iommu_pmu->evcap = kcalloc(iommu_pmu->num_eg, sizeof(u64), GFP_KERNEL);
629 if (!iommu_pmu->evcap) {
635 for (i = 0; i < iommu_pmu->num_eg; i++) {
640 iommu_pmu->evcap[i] = pecap_es(pcap);
643 iommu_pmu->cntr_evcap = kcalloc(iommu_pmu->num_cntr, sizeof(u32 *), GFP_KERNEL);
644 if (!iommu_pmu->cntr_evcap) {
648 for (i = 0; i < iommu_pmu->num_cntr; i++) {
649 iommu_pmu->cntr_evcap[i] = kcalloc(iommu_pmu->num_eg, sizeof(u32), GFP_KERNEL);
650 if (!iommu_pmu->cntr_evcap[i]) {
658 for (j = 0; j < iommu_pmu->num_eg; j++)
659 iommu_pmu->cntr_evcap[i][j] = (u32)iommu_pmu->evcap[j];
662 iommu_pmu->cfg_reg = get_perf_reg_address(iommu, DMAR_PERFCFGOFF_REG);
663 iommu_pmu->cntr_reg = get_perf_reg_address(iommu, DMAR_PERFCNTROFF_REG);
664 iommu_pmu->overflow = get_perf_reg_address(iommu, DMAR_PERFOVFOFF_REG);
671 for (i = 0; i < iommu_pmu->num_cntr; i++) {
672 cap = dmar_readl(iommu_pmu->cfg_reg +
683 if ((iommu_cntrcap_cw(cap) != iommu_pmu->cntr_width) ||
685 iommu_pmu->num_cntr = i;
687 iommu_pmu->num_cntr);
691 for (j = 0; j < iommu_pmu->num_eg; j++)
692 iommu_pmu->cntr_evcap[i][j] = 0;
696 cap = dmar_readl(iommu_pmu->cfg_reg + i * IOMMU_PMU_CFG_OFFSET +
699 iommu_pmu->cntr_evcap[i][iommu_event_group(cap)] = iommu_event_select(cap);
704 iommu_pmu->evcap[iommu_event_group(cap)] |= iommu_event_select(cap);
708 iommu_pmu->iommu = iommu;
709 iommu->pmu = iommu_pmu;
714 for (i = 0; i < iommu_pmu->num_cntr; i++)
715 kfree(iommu_pmu->cntr_evcap[i]);
716 kfree(iommu_pmu->cntr_evcap);
718 kfree(iommu_pmu->evcap);
720 kfree(iommu_pmu);
727 struct iommu_pmu *iommu_pmu = iommu->pmu;
729 if (!iommu_pmu)
732 if (iommu_pmu->evcap) {
735 for (i = 0; i < iommu_pmu->num_cntr; i++)
736 kfree(iommu_pmu->cntr_evcap[i]);
737 kfree(iommu_pmu->cntr_evcap);
739 kfree(iommu_pmu->evcap);
740 kfree(iommu_pmu);
746 struct iommu_pmu *iommu_pmu = iommu->pmu;
753 snprintf(iommu_pmu->irq_name, sizeof(iommu_pmu->irq_name), "dmar%d-perf", iommu->seq_id);
757 IRQF_ONESHOT, iommu_pmu->irq_name, iommu);
778 struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node);
784 iommu_pmu->cpu = cpu;
791 struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node);
796 * for the first iommu_pmu. Migrate the other iommu_pmu to the
799 if (target < nr_cpu_ids && target != iommu_pmu->cpu) {
800 perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target);
801 iommu_pmu->cpu = target;
815 perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target);
816 iommu_pmu->cpu = target;
824 static int iommu_pmu_cpuhp_setup(struct iommu_pmu *iommu_pmu)
838 ret = cpuhp_state_add_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node);
849 static void iommu_pmu_cpuhp_free(struct iommu_pmu *iommu_pmu)
851 cpuhp_state_remove_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node);
861 struct iommu_pmu *iommu_pmu = iommu->pmu;
863 if (!iommu_pmu)
869 if (iommu_pmu_cpuhp_setup(iommu_pmu))
879 iommu_pmu_cpuhp_free(iommu_pmu);
881 perf_pmu_unregister(&iommu_pmu->pmu);
889 struct iommu_pmu *iommu_pmu = iommu->pmu;
891 if (!iommu_pmu)
895 iommu_pmu_cpuhp_free(iommu_pmu);
896 perf_pmu_unregister(&iommu_pmu->pmu);