Lines Matching refs:iommu
15 #include <linux/iommu.h>
21 #include "iommu.h"
29 int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
36 raw_spin_lock_irqsave(&iommu->register_lock, flags);
37 dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
38 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
40 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
48 pr_info("IOMMU: %s: No PASID available\n", iommu->name);
54 iommu->name, status_code);
60 void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
66 raw_spin_lock_irqsave(&iommu->register_lock, flags);
67 dmar_writeq(iommu->reg + DMAR_VCMD_REG,
69 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
71 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
78 pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
82 iommu->name, status_code);
119 pages = alloc_pages_node(info->iommu->node,
131 if (!ecap_coherent(info->iommu->ecap))
206 entries = alloc_pgtable_page(info->iommu->node, GFP_ATOMIC);
221 if (!ecap_coherent(info->iommu->ecap)) {
406 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
417 qi_submit_sync(iommu, &desc, 1, 0);
421 devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
445 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
447 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
450 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
456 spin_lock(&iommu->lock);
459 spin_unlock(&iommu->lock);
466 spin_unlock(&iommu->lock);
468 if (!ecap_coherent(iommu->ecap))
471 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
474 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
476 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
479 if (!cap_caching_mode(iommu->cap))
480 devtlb_invalidation_with_pasid(iommu, dev, pasid);
487 static void pasid_flush_caches(struct intel_iommu *iommu,
491 if (!ecap_coherent(iommu->ecap))
494 if (cap_caching_mode(iommu->cap)) {
495 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
496 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
498 iommu_flush_write_buffer(iommu);
506 int intel_pasid_setup_first_level(struct intel_iommu *iommu,
512 if (!ecap_flts(iommu->ecap)) {
514 iommu->name);
518 if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) {
520 iommu->name);
524 spin_lock(&iommu->lock);
527 spin_unlock(&iommu->lock);
532 spin_unlock(&iommu->lock);
548 pasid_set_address_width(pte, iommu->agaw);
549 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
555 spin_unlock(&iommu->lock);
557 pasid_flush_caches(iommu, pte, pasid, did);
563 * Skip top levels of page tables for iommu which has less agaw
567 struct intel_iommu *iommu,
572 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
584 int intel_pasid_setup_second_level(struct intel_iommu *iommu,
598 if (!ecap_slts(iommu->ecap)) {
600 iommu->name);
605 agaw = iommu_skip_agaw(domain, iommu, &pgd);
612 did = domain_id_iommu(domain, iommu);
614 spin_lock(&iommu->lock);
617 spin_unlock(&iommu->lock);
622 spin_unlock(&iommu->lock);
632 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
635 spin_unlock(&iommu->lock);
637 pasid_flush_caches(iommu, pte, pasid, did);
645 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
652 spin_lock(&iommu->lock);
655 spin_unlock(&iommu->lock);
660 spin_unlock(&iommu->lock);
666 pasid_set_address_width(pte, iommu->agaw);
669 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
671 spin_unlock(&iommu->lock);
673 pasid_flush_caches(iommu, pte, pasid, did);
681 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
687 spin_lock(&iommu->lock);
690 spin_unlock(&iommu->lock);
696 spin_unlock(&iommu->lock);
698 if (!ecap_coherent(iommu->ecap))
712 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
713 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
716 if (!cap_caching_mode(iommu->cap))
717 devtlb_invalidation_with_pasid(iommu, dev, pasid);