Lines Matching refs:iommu
15 #include <linux/intel-iommu.h>
16 #include <linux/iommu.h>
29 int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
36 raw_spin_lock_irqsave(&iommu->register_lock, flags);
37 dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
38 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
40 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
48 pr_info("IOMMU: %s: No PASID available\n", iommu->name);
54 iommu->name, status_code);
60 void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
66 raw_spin_lock_irqsave(&iommu->register_lock, flags);
67 dmar_writeq(iommu->reg + DMAR_VCMD_REG,
69 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
71 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
78 pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
82 iommu->name, status_code);
116 if (info->iommu->segment == data->segment &&
175 pages = alloc_pages_node(info->iommu->node,
189 if (!ecap_coherent(info->iommu->ecap))
267 entries = alloc_pgtable_page(info->iommu->node);
282 if (!ecap_coherent(info->iommu->ecap)) {
467 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
478 qi_submit_sync(iommu, &desc, 1, 0);
482 devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
503 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
505 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
508 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
523 if (!ecap_coherent(iommu->ecap))
526 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
529 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
531 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
534 if (!cap_caching_mode(iommu->cap))
535 devtlb_invalidation_with_pasid(iommu, dev, pasid);
538 static void pasid_flush_caches(struct intel_iommu *iommu,
542 if (!ecap_coherent(iommu->ecap))
545 if (cap_caching_mode(iommu->cap)) {
546 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
547 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
549 iommu_flush_write_buffer(iommu);
557 int intel_pasid_setup_first_level(struct intel_iommu *iommu,
563 if (!ecap_flts(iommu->ecap)) {
565 iommu->name);
578 if (!ecap_srs(iommu->ecap)) {
580 iommu->name);
587 if (cap_5lp_support(iommu->cap)) {
600 pasid_set_address_width(pte, iommu->agaw);
601 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
606 pasid_flush_caches(iommu, pte, pasid, did);
612 * Skip top levels of page tables for iommu which has less agaw
616 struct intel_iommu *iommu,
621 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
633 int intel_pasid_setup_second_level(struct intel_iommu *iommu,
647 if (!ecap_slts(iommu->ecap)) {
649 iommu->name);
654 agaw = iommu_skip_agaw(domain, iommu, &pgd);
661 did = domain->iommu_did[iommu->seq_id];
675 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
684 if (pasid != PASID_RID2PASID && ecap_srs(iommu->ecap))
687 pasid_flush_caches(iommu, pte, pasid, did);
695 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
710 pasid_set_address_width(pte, iommu->agaw);
713 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
719 if (ecap_srs(iommu->ecap))
722 pasid_flush_caches(iommu, pte, pasid, did);
728 intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
739 if (!ecap_srs(iommu->ecap)) {
741 iommu->name);
748 if (!ecap_eafs(iommu->ecap)) {
750 iommu->name);
762 iommu->name);
775 * @iommu: IOMMU which the device belong to
783 int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
795 if (!ecap_nest(iommu->ecap)) {
797 iommu->name);
829 !cap_5lp_support(iommu->cap)) {
856 ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data);
863 agaw = iommu_skip_agaw(domain, iommu, &pgd);
872 did = domain->iommu_did[iommu->seq_id];
876 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
880 pasid_flush_caches(iommu, pte, pasid, did);