Lines Matching refs:iommu
22 #include "iommu.h"
25 #include "../iommu-sva.h"
64 int intel_svm_enable_prq(struct intel_iommu *iommu)
73 iommu->name);
76 iommu->prq = page_address(pages);
78 irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu);
81 iommu->name);
85 iommu->pr_irq = irq;
87 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
88 "dmar%d-iopfq", iommu->seq_id);
89 iopfq = iopf_queue_alloc(iommu->iopfq_name);
91 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name);
95 iommu->iopf_queue = iopfq;
97 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
100 iommu->prq_name, iommu);
103 iommu->name);
106 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
107 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
108 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
110 init_completion(&iommu->prq_complete);
115 iopf_queue_free(iommu->iopf_queue);
116 iommu->iopf_queue = NULL;
119 iommu->pr_irq = 0;
121 free_pages((unsigned long)iommu->prq, PRQ_ORDER);
122 iommu->prq = NULL;
127 int intel_svm_finish_prq(struct intel_iommu *iommu)
129 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
130 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
131 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
133 if (iommu->pr_irq) {
134 free_irq(iommu->pr_irq, iommu);
135 dmar_free_hwirq(iommu->pr_irq);
136 iommu->pr_irq = 0;
139 if (iommu->iopf_queue) {
140 iopf_queue_free(iommu->iopf_queue);
141 iommu->iopf_queue = NULL;
144 free_pages((unsigned long)iommu->prq, PRQ_ORDER);
145 iommu->prq = NULL;
150 void intel_svm_check(struct intel_iommu *iommu)
152 if (!pasid_supported(iommu))
156 !cap_fl1gp_support(iommu->cap)) {
158 iommu->name);
163 !cap_fl5lp_support(iommu->cap)) {
165 iommu->name);
169 iommu->flags |= VTD_FLAG_SVM_CAPABLE;
182 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
184 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
228 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
230 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
275 intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
318 static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
359 sdev->iommu = iommu;
371 ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid,
395 struct intel_iommu *iommu;
399 iommu = device_to_iommu(dev, NULL, NULL);
400 if (!iommu)
483 struct intel_iommu *iommu;
497 iommu = info->iommu;
501 did = domain_id_iommu(domain, iommu);
509 reinit_completion(&iommu->prq_complete);
510 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
511 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
515 req = &iommu->prq[head / sizeof(*req)];
521 wait_for_completion(&iommu->prq_complete);
545 reinit_completion(&iommu->prq_complete);
546 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
547 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
548 wait_for_completion(&iommu->prq_complete);
569 static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
602 } else if (dmar_latency_enabled(iommu, DMAR_LATENCY_PRQ)) {
613 static void handle_bad_prq_event(struct intel_iommu *iommu,
619 iommu->name, ((unsigned long long *)req)[0],
649 qi_submit_sync(iommu, &desc, 1, 0);
654 struct intel_iommu *iommu = d;
664 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
666 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
667 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
670 req = &iommu->prq[head / sizeof(*req)];
675 iommu->name);
677 handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
683 iommu->name);
689 iommu->name);
695 iommu->name);
703 pdev = pci_get_domain_bus_and_slot(iommu->segment,
707 * If prq is to be handled outside iommu driver via receiver of
713 if (intel_svm_prq_report(iommu, &pdev->dev, req))
714 handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
716 trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
718 iommu->prq_seq_number++);
724 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
730 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
732 iommu->name);
733 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
734 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
736 iopf_queue_discard_partial(iommu->iopf_queue);
737 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
739 iommu->name);
743 if (!completion_done(&iommu->prq_complete))
744 complete(&iommu->prq_complete);
754 struct intel_iommu *iommu;
765 iommu = device_to_iommu(dev, &bus, &devfn);
766 if (!iommu)
810 dmar_latency_update(iommu, DMAR_LATENCY_PRQ,
814 qi_submit_sync(iommu, &desc, 1, 0);
824 struct intel_iommu *iommu = info->iommu;
827 return intel_svm_bind_mm(iommu, dev, mm);