Lines Matching defs:iommu
8 #include <linux/intel-iommu.h>
31 int intel_svm_enable_prq(struct intel_iommu *iommu)
39 iommu->name);
42 iommu->prq = page_address(pages);
44 irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu);
47 iommu->name);
50 free_pages((unsigned long)iommu->prq, PRQ_ORDER);
51 iommu->prq = NULL;
54 iommu->pr_irq = irq;
56 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
59 iommu->prq_name, iommu);
62 iommu->name);
64 iommu->pr_irq = 0;
67 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
68 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
69 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
71 init_completion(&iommu->prq_complete);
76 int intel_svm_finish_prq(struct intel_iommu *iommu)
78 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
79 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
80 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
82 if (iommu->pr_irq) {
83 free_irq(iommu->pr_irq, iommu);
84 dmar_free_hwirq(iommu->pr_irq);
85 iommu->pr_irq = 0;
88 free_pages((unsigned long)iommu->prq, PRQ_ORDER);
89 iommu->prq = NULL;
94 static inline bool intel_svm_capable(struct intel_iommu *iommu)
96 return iommu->flags & VTD_FLAG_SVM_CAPABLE;
99 void intel_svm_check(struct intel_iommu *iommu)
101 if (!pasid_supported(iommu))
105 !cap_fl1gp_support(iommu->cap)) {
107 iommu->name);
112 !cap_5lp_support(iommu->cap)) {
114 iommu->name);
118 iommu->flags |= VTD_FLAG_SVM_CAPABLE;
131 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
133 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
195 intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
260 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
268 if (WARN_ON(!iommu) || !data)
348 sdev->iommu = iommu;
355 ret = intel_iommu_enable_pasid(iommu, sdev->dev);
367 spin_lock_irqsave(&iommu->lock, iflags);
368 ret = intel_pasid_setup_nested(iommu, dev,
372 spin_unlock_irqrestore(&iommu->lock, iflags);
401 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
406 if (WARN_ON(!iommu))
419 intel_pasid_tear_down_entry(iommu, dev,
468 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
476 if (!iommu || dmar_disabled)
479 if (!intel_svm_capable(iommu))
491 if (!ecap_srs(iommu->ecap) || mm) {
533 sdev->iommu = iommu;
535 ret = intel_iommu_enable_pasid(iommu, dev);
592 spin_lock_irqsave(&iommu->lock, iflags);
593 ret = intel_pasid_setup_first_level(iommu, dev,
599 spin_unlock_irqrestore(&iommu->lock, iflags);
619 spin_lock_irqsave(&iommu->lock, iflags);
620 ret = intel_pasid_setup_first_level(iommu, dev,
626 spin_unlock_irqrestore(&iommu->lock, iflags);
647 struct intel_iommu *iommu;
651 iommu = device_to_iommu(dev, NULL, NULL);
652 if (!iommu)
670 intel_pasid_tear_down_entry(iommu, dev,
770 struct intel_iommu *iommu;
784 iommu = info->iommu;
788 did = domain->iommu_did[iommu->seq_id];
796 reinit_completion(&iommu->prq_complete);
797 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
798 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
802 req = &iommu->prq[head / sizeof(*req)];
808 wait_for_completion(&iommu->prq_complete);
830 reinit_completion(&iommu->prq_complete);
831 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
832 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
833 wait_for_completion(&iommu->prq_complete);
895 struct intel_iommu *iommu = d;
901 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
903 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
904 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
915 req = &iommu->prq[head / sizeof(*req)];
921 iommu->name, ((unsigned long long *)req)[0],
945 iommu->name, req->pasid, ((unsigned long long *)req)[0],
976 * If prq is to be handled outside iommu driver via receiver of
1043 qi_submit_sync(iommu, &resp, 1, 0);
1049 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
1055 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
1057 iommu->name);
1058 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
1059 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
1061 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
1063 iommu->name);
1067 if (!completion_done(&iommu->prq_complete))
1068 complete(&iommu->prq_complete);
1083 * TODO: Consolidate with generic iommu-sva bind after it is merged.
1133 struct intel_iommu *iommu;
1144 iommu = device_to_iommu(dev, &bus, &devfn);
1145 if (!iommu)
1218 qi_submit_sync(iommu, &desc, 1, 0);