Lines Matching refs:iommu
27 #include "iommu.h"
28 #include "../dma-iommu.h"
30 #include "../iommu-sva.h"
223 static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
225 if (!iommu->copied_tables)
228 return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
232 set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
234 set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
238 clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
240 clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
246 * 2. It maps to each iommu if successful.
247 * 3. Each iommu mapps to this domain if successful.
273 struct intel_iommu *iommu; /* the corresponding iommu */
304 static bool translation_pre_enabled(struct intel_iommu *iommu)
306 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
309 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
311 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
314 static void init_translation_status(struct intel_iommu *iommu)
318 gsts = readl(iommu->reg + DMAR_GSTS_REG);
320 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
340 pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
343 pr_warn("intel_iommu=strict deprecated; use iommu.strict=1 instead\n");
404 static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
408 fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0);
409 sl_sagaw = cap_sagaw(iommu->cap);
412 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
416 if (!ecap_slts(iommu->ecap))
422 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
427 sagaw = __iommu_calculate_sagaw(iommu);
437 * Calculate max SAGAW for each iommu.
439 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
441 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
445 * calculate agaw for each iommu.
449 int iommu_calculate_agaw(struct intel_iommu *iommu)
451 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
454 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
456 return sm_supported(iommu) ?
457 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
464 struct intel_iommu *iommu;
471 if (!iommu_paging_structure_coherency(info->iommu)) {
481 for_each_active_iommu(iommu, drhd) {
482 if (!iommu_paging_structure_coherency(iommu)) {
494 struct intel_iommu *iommu;
502 for_each_active_iommu(iommu, drhd) {
503 if (iommu != skip) {
505 if (!cap_fl1gp_support(iommu->cap))
508 mask &= cap_super_page_val(iommu->cap);
591 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
594 struct root_entry *root = &iommu->root_entry[bus];
602 if (!alloc && context_copied(iommu, bus, devfn))
606 if (sm_supported(iommu)) {
620 context = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
624 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
627 __iommu_flush_cache(iommu, entry, sizeof(*entry));
678 /* we know that the this iommu should be at offset 0xa000 from vtbar */
689 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
691 if (!iommu || iommu->drhd->ignored)
710 struct intel_iommu *iommu;
732 for_each_iommu(iommu, drhd) {
766 iommu = NULL;
768 if (iommu_is_dummy(iommu, dev))
769 iommu = NULL;
773 return iommu;
783 static void free_context_table(struct intel_iommu *iommu)
788 if (!iommu->root_entry)
792 context = iommu_context_addr(iommu, i, 0, 0);
796 if (!sm_supported(iommu))
799 context = iommu_context_addr(iommu, i, 0x80, 0);
804 free_pgtable_page(iommu->root_entry);
805 iommu->root_entry = NULL;
809 static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
833 void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
845 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr);
848 rt_entry = &iommu->root_entry[bus];
854 if (sm_supported(iommu))
861 ctx_entry = iommu_context_addr(iommu, bus, devfn, 0);
871 if (!sm_supported(iommu)) {
911 pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level);
1188 /* iommu handling */
1189 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1193 root = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
1196 iommu->name);
1200 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1201 iommu->root_entry = root;
1206 static void iommu_set_root_entry(struct intel_iommu *iommu)
1212 addr = virt_to_phys(iommu->root_entry);
1213 if (sm_supported(iommu))
1216 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1217 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1219 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1222 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1225 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1231 if (cap_esrtps(iommu->cap))
1234 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
1235 if (sm_supported(iommu))
1236 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
1237 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1240 void iommu_flush_write_buffer(struct intel_iommu *iommu)
1245 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1248 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1249 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1252 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1255 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1259 static void __iommu_flush_context(struct intel_iommu *iommu,
1279 iommu->name, type);
1284 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1285 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1288 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1291 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1295 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1298 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1317 iommu->name, type);
1321 if (cap_write_drain(iommu->cap))
1324 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1327 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1328 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1331 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1334 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1347 struct intel_iommu *iommu, u8 bus, u8 devfn)
1354 if (info->iommu == iommu && info->bus == bus &&
1464 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1489 qi_flush_dev_iotlb_pasid(info->iommu,
1498 static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
1502 u16 did = domain_id_iommu(domain, iommu);
1508 qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih);
1511 qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih);
1515 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1523 u16 did = domain_id_iommu(domain, iommu);
1532 domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
1559 if (!cap_pgsel_inv(iommu->cap) ||
1560 mask > cap_max_amask_val(iommu->cap))
1561 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1564 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1572 if (!cap_caching_mode(iommu->cap) || !map)
1577 static inline void __mapping_notify_one(struct intel_iommu *iommu,
1585 if (cap_caching_mode(iommu->cap) && !domain->use_first_level)
1586 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1588 iommu_flush_write_buffer(iommu);
1598 struct intel_iommu *iommu = info->iommu;
1599 u16 did = domain_id_iommu(dmar_domain, iommu);
1602 domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0);
1604 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1607 if (!cap_caching_mode(iommu->cap))
1612 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1617 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1620 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1621 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1623 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1626 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1629 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1632 static void iommu_enable_translation(struct intel_iommu *iommu)
1637 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1638 iommu->gcmd |= DMA_GCMD_TE;
1639 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1642 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1645 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1648 static void iommu_disable_translation(struct intel_iommu *iommu)
1653 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
1654 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
1657 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1658 iommu->gcmd &= ~DMA_GCMD_TE;
1659 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1662 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1665 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1668 static int iommu_init_domains(struct intel_iommu *iommu)
1672 ndomains = cap_ndoms(iommu->cap);
1674 iommu->name, ndomains);
1676 spin_lock_init(&iommu->lock);
1678 iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL);
1679 if (!iommu->domain_ids)
1688 set_bit(0, iommu->domain_ids);
1697 if (sm_supported(iommu))
1698 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1703 static void disable_dmar_iommu(struct intel_iommu *iommu)
1705 if (!iommu->domain_ids)
1709 * All iommu domains must have been detached from the devices,
1712 if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap))
1716 if (iommu->gcmd & DMA_GCMD_TE)
1717 iommu_disable_translation(iommu);
1720 static void free_dmar_iommu(struct intel_iommu *iommu)
1722 if (iommu->domain_ids) {
1723 bitmap_free(iommu->domain_ids);
1724 iommu->domain_ids = NULL;
1727 if (iommu->copied_tables) {
1728 bitmap_free(iommu->copied_tables);
1729 iommu->copied_tables = NULL;
1733 free_context_table(iommu);
1736 if (pasid_supported(iommu)) {
1737 if (ecap_prs(iommu->ecap))
1738 intel_svm_finish_prq(iommu);
1782 struct intel_iommu *iommu)
1792 spin_lock(&iommu->lock);
1793 curr = xa_load(&domain->iommu_array, iommu->seq_id);
1796 spin_unlock(&iommu->lock);
1801 ndomains = cap_ndoms(iommu->cap);
1802 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1804 pr_err("%s: No free domain ids\n", iommu->name);
1808 set_bit(num, iommu->domain_ids);
1811 info->iommu = iommu;
1812 curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id,
1820 spin_unlock(&iommu->lock);
1824 clear_bit(info->did, iommu->domain_ids);
1826 spin_unlock(&iommu->lock);
1832 struct intel_iommu *iommu)
1836 spin_lock(&iommu->lock);
1837 info = xa_load(&domain->iommu_array, iommu->seq_id);
1839 clear_bit(info->did, iommu->domain_ids);
1840 xa_erase(&domain->iommu_array, iommu->seq_id);
1845 spin_unlock(&iommu->lock);
1927 struct intel_iommu *iommu,
1932 domain_lookup_dev_info(domain, iommu, bus, devfn);
1933 u16 did = domain_id_iommu(domain, iommu);
1944 spin_lock(&iommu->lock);
1946 context = iommu_context_addr(iommu, bus, devfn, 1);
1951 if (context_present(context) && !context_copied(iommu, bus, devfn))
1963 if (context_copied(iommu, bus, devfn)) {
1966 if (did_old < cap_ndoms(iommu->cap)) {
1967 iommu->flush.flush_context(iommu, did_old,
1971 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
1975 clear_context_copied(iommu, bus, devfn);
1980 if (sm_supported(iommu)) {
2009 * Skip top levels of page tables for iommu which has
2012 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2032 context_set_address_width(context, iommu->msagaw);
2040 if (!ecap_coherent(iommu->ecap))
2049 if (cap_caching_mode(iommu->cap)) {
2050 iommu->flush.flush_context(iommu, 0,
2054 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2056 iommu_flush_write_buffer(iommu);
2062 spin_unlock(&iommu->lock);
2069 struct intel_iommu *iommu;
2078 return domain_context_mapping_one(data->domain, data->iommu,
2088 struct intel_iommu *iommu;
2091 iommu = device_to_iommu(dev, &bus, &devfn);
2092 if (!iommu)
2098 return domain_context_mapping_one(domain, iommu, table,
2102 data.iommu = iommu;
2170 iommu_flush_iotlb_psi(info->iommu, domain,
2287 struct intel_iommu *iommu = info->iommu;
2291 if (!iommu)
2294 spin_lock(&iommu->lock);
2295 context = iommu_context_addr(iommu, bus, devfn, 0);
2297 spin_unlock(&iommu->lock);
2301 if (sm_supported(iommu)) {
2305 did_old = domain_id_iommu(info->domain, iommu);
2311 __iommu_flush_cache(iommu, context, sizeof(*context));
2312 spin_unlock(&iommu->lock);
2313 iommu->flush.flush_context(iommu,
2319 if (sm_supported(iommu))
2320 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
2322 iommu->flush.flush_iotlb(iommu,
2331 static int domain_setup_first_level(struct intel_iommu *iommu,
2341 * Skip top levels of page tables for iommu which has
2344 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2360 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
2361 domain_id_iommu(domain, iommu),
2449 struct intel_iommu *iommu;
2454 iommu = device_to_iommu(dev, &bus, &devfn);
2455 if (!iommu)
2458 ret = domain_attach_iommu(domain, iommu);
2467 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
2470 ret = intel_pasid_setup_pass_through(iommu, domain,
2473 ret = domain_setup_first_level(iommu, domain, dev,
2476 ret = intel_pasid_setup_second_level(iommu, domain,
2492 if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
2553 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2556 * Start from the sane iommu hardware state.
2561 if (!iommu->qi) {
2565 dmar_fault(-1, iommu);
2570 dmar_disable_qi(iommu);
2573 if (dmar_enable_qi(iommu)) {
2577 iommu->flush.flush_context = __iommu_flush_context;
2578 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2580 iommu->name);
2582 iommu->flush.flush_context = qi_flush_context;
2583 iommu->flush.flush_iotlb = qi_flush_iotlb;
2584 pr_info("%s: Using Queued invalidation\n", iommu->name);
2588 static int copy_context_table(struct intel_iommu *iommu,
2610 __iommu_flush_cache(iommu, new_ce,
2640 new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL);
2654 if (did >= 0 && did < cap_ndoms(iommu->cap))
2655 set_bit(did, iommu->domain_ids);
2657 set_context_copied(iommu, bus, devfn);
2663 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2672 static int copy_translation_tables(struct intel_iommu *iommu)
2682 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
2684 new_ext = !!sm_supported(iommu);
2695 iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL);
2696 if (!iommu->copied_tables)
2715 ret = copy_context_table(iommu, &old_rt[bus],
2719 iommu->name, bus);
2724 spin_lock(&iommu->lock);
2733 iommu->root_entry[bus].lo = val;
2740 iommu->root_entry[bus].hi = val;
2743 spin_unlock(&iommu->lock);
2747 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
2760 struct intel_iommu *iommu;
2767 for_each_iommu(iommu, drhd) {
2769 iommu_disable_translation(iommu);
2778 if (pasid_supported(iommu)) {
2779 u32 temp = 2 << ecap_pss(iommu->ecap);
2785 intel_iommu_init_qi(iommu);
2787 ret = iommu_init_domains(iommu);
2791 init_translation_status(iommu);
2793 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
2794 iommu_disable_translation(iommu);
2795 clear_translation_pre_enabled(iommu);
2797 iommu->name);
2805 ret = iommu_alloc_root_entry(iommu);
2809 if (translation_pre_enabled(iommu)) {
2812 ret = copy_translation_tables(iommu);
2824 iommu->name);
2825 iommu_disable_translation(iommu);
2826 clear_translation_pre_enabled(iommu);
2829 iommu->name);
2833 if (!ecap_pass_through(iommu->ecap))
2835 intel_svm_check(iommu);
2843 for_each_active_iommu(iommu, drhd) {
2844 iommu_flush_write_buffer(iommu);
2845 iommu_set_root_entry(iommu);
2868 for_each_iommu(iommu, drhd) {
2875 iommu_disable_protect_mem_regions(iommu);
2879 iommu_flush_write_buffer(iommu);
2882 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
2888 ret = intel_svm_enable_prq(iommu);
2894 ret = dmar_set_interrupt(iommu);
2902 for_each_active_iommu(iommu, drhd) {
2903 disable_dmar_iommu(iommu);
2904 free_dmar_iommu(iommu);
2954 struct intel_iommu *iommu = NULL;
2957 for_each_active_iommu(iommu, drhd) {
2958 if (iommu->qi) {
2959 ret = dmar_reenable_qi(iommu);
2965 for_each_iommu(iommu, drhd) {
2972 iommu_disable_protect_mem_regions(iommu);
2976 iommu_flush_write_buffer(iommu);
2977 iommu_set_root_entry(iommu);
2978 iommu_enable_translation(iommu);
2979 iommu_disable_protect_mem_regions(iommu);
2988 struct intel_iommu *iommu;
2990 for_each_active_iommu(iommu, drhd) {
2991 iommu->flush.flush_context(iommu, 0, 0, 0,
2993 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3001 struct intel_iommu *iommu = NULL;
3006 for_each_active_iommu(iommu, drhd) {
3007 iommu_disable_translation(iommu);
3009 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3011 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3012 readl(iommu->reg + DMAR_FECTL_REG);
3013 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3014 readl(iommu->reg + DMAR_FEDATA_REG);
3015 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3016 readl(iommu->reg + DMAR_FEADDR_REG);
3017 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3018 readl(iommu->reg + DMAR_FEUADDR_REG);
3020 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3028 struct intel_iommu *iommu = NULL;
3039 for_each_active_iommu(iommu, drhd) {
3041 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3043 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3044 iommu->reg + DMAR_FECTL_REG);
3045 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3046 iommu->reg + DMAR_FEDATA_REG);
3047 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3048 iommu->reg + DMAR_FEADDR_REG);
3049 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3050 iommu->reg + DMAR_FEUADDR_REG);
3052 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3278 struct intel_iommu *iommu = dmaru->iommu;
3280 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
3284 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3286 iommu->name);
3290 sp = domain_update_iommu_superpage(NULL, iommu) - 1;
3291 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3293 iommu->name);
3300 if (iommu->gcmd & DMA_GCMD_TE)
3301 iommu_disable_translation(iommu);
3303 ret = iommu_init_domains(iommu);
3305 ret = iommu_alloc_root_entry(iommu);
3309 intel_svm_check(iommu);
3316 iommu_disable_protect_mem_regions(iommu);
3320 intel_iommu_init_qi(iommu);
3321 iommu_flush_write_buffer(iommu);
3324 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3325 ret = intel_svm_enable_prq(iommu);
3330 ret = dmar_set_interrupt(iommu);
3334 iommu_set_root_entry(iommu);
3335 iommu_enable_translation(iommu);
3337 iommu_disable_protect_mem_regions(iommu);
3341 disable_dmar_iommu(iommu);
3343 free_dmar_iommu(iommu);
3350 struct intel_iommu *iommu = dmaru->iommu;
3354 if (iommu == NULL)
3360 disable_dmar_iommu(iommu);
3361 free_dmar_iommu(iommu);
3414 static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
3434 return !(satcu->atc_required && !sm_supported(iommu));
3562 struct intel_iommu *iommu;
3568 for_each_active_iommu(iommu, drhd)
3569 iommu_flush_iotlb_psi(iommu, si_domain,
3588 struct intel_iommu *iommu = NULL;
3591 for_each_iommu(iommu, drhd)
3592 iommu_disable_translation(iommu);
3598 struct intel_iommu *iommu = NULL;
3606 for_each_iommu(iommu, drhd)
3607 iommu_disable_protect_mem_regions(iommu);
3619 return container_of(iommu_dev, struct intel_iommu, iommu);
3625 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
3626 u32 ver = readl(iommu->reg + DMAR_VER_REG);
3635 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
3636 return sysfs_emit(buf, "%llx\n", iommu->reg_phys);
3643 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
3644 return sysfs_emit(buf, "%llx\n", iommu->cap);
3651 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
3652 return sysfs_emit(buf, "%llx\n", iommu->ecap);
3659 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
3660 return sysfs_emit(buf, "%ld\n", cap_ndoms(iommu->cap));
3667 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
3669 bitmap_weight(iommu->domain_ids,
3670 cap_ndoms(iommu->cap)));
3685 .name = "intel-iommu",
3732 struct intel_iommu *iommu __maybe_unused;
3736 for_each_active_iommu(iommu, drhd) {
3781 struct intel_iommu *iommu;
3826 for_each_iommu(iommu, drhd)
3827 iommu_disable_protect_mem_regions(iommu);
3862 for_each_active_iommu(iommu, drhd) {
3870 if (cap_caching_mode(iommu->cap) &&
3875 iommu_device_sysfs_add(&iommu->iommu, NULL,
3877 "%s", iommu->name);
3878 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
3880 iommu_pmu_register(iommu);
3892 for_each_iommu(iommu, drhd) {
3893 if (!drhd->ignored && !translation_pre_enabled(iommu))
3894 iommu_enable_translation(iommu);
3896 iommu_disable_protect_mem_regions(iommu);
3921 * NB - intel-iommu lacks any sort of reference counting for the users of
3939 struct intel_iommu *iommu = info->iommu;
3943 if (dev_is_pci(info->dev) && sm_supported(iommu))
3944 intel_pasid_tear_down_entry(iommu, info->dev,
3955 domain_detach_iommu(domain, iommu);
3967 struct intel_iommu *iommu = info->iommu;
3972 if (sm_supported(iommu))
3973 intel_pasid_tear_down_entry(iommu, dev,
3986 domain_detach_iommu(info->domain, iommu);
4074 struct intel_iommu *iommu;
4077 iommu = device_to_iommu(dev, NULL, NULL);
4078 if (!iommu)
4081 if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
4084 /* check if this iommu agaw is sufficient for max mapped address */
4085 addr_width = agaw_to_width(iommu->agaw);
4086 if (addr_width > cap_mgaw(iommu->cap))
4087 addr_width = cap_mgaw(iommu->cap);
4096 while (iommu->agaw < dmar_domain->agaw) {
4148 pr_err("%s: iommu width (%d) is not "
4245 iommu_flush_iotlb_psi(info->iommu, dmar_domain,
4277 if (!ecap_sc_support(info->iommu->ecap)) {
4301 intel_pasid_setup_page_snoop_control(info->iommu, info->dev,
4338 return ecap_sc_support(info->iommu->ecap);
4348 struct intel_iommu *iommu;
4352 iommu = device_to_iommu(dev, &bus, &devfn);
4353 if (!iommu || !iommu->iommu.ops)
4367 info->segment = iommu->segment;
4371 info->iommu = iommu;
4373 if (ecap_dev_iotlb_support(iommu->ecap) &&
4375 dmar_ats_supported(pdev, iommu)) {
4386 if (ecap_dit(iommu->ecap))
4390 if (sm_supported(iommu)) {
4391 if (pasid_supported(iommu)) {
4398 if (info->ats_supported && ecap_prs(iommu->ecap) &&
4406 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
4416 return &iommu->iommu;
4505 struct intel_iommu *iommu;
4510 iommu = info->iommu;
4511 if (!iommu)
4514 if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
4541 struct intel_iommu *iommu;
4550 iommu = info->iommu;
4551 if (!iommu)
4562 ret = iopf_queue_add_device(iommu->iopf_queue, dev);
4580 iopf_queue_remove_device(iommu->iopf_queue, dev);
4588 struct intel_iommu *iommu = info->iommu;
4610 WARN_ON(iopf_queue_remove_device(iommu->iopf_queue, dev));
4649 return translation_pre_enabled(info->iommu) && !info->domain;
4679 __mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
4684 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
4696 * notification. Before consolidating that code into iommu core, let
4716 domain_detach_iommu(dmar_domain, iommu);
4719 intel_pasid_tear_down_entry(iommu, dev, pasid, false);
4728 struct intel_iommu *iommu = info->iommu;
4733 if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
4736 if (context_copied(iommu, info->bus, info->devfn))
4747 ret = domain_attach_iommu(dmar_domain, iommu);
4752 ret = intel_pasid_setup_pass_through(iommu, dmar_domain,
4755 ret = domain_setup_first_level(iommu, dmar_domain,
4758 ret = intel_pasid_setup_second_level(iommu, dmar_domain,
4771 domain_detach_iommu(dmar_domain, iommu);
4780 struct intel_iommu *iommu = info->iommu;
4787 vtd->cap_reg = iommu->cap;
4788 vtd->ecap_reg = iommu->ecap;
5035 * before unmap/unbind. For #3, iommu driver gets mmu_notifier to
5052 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
5055 qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid,
5074 int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob)
5080 if (!cap_ecmds(iommu->cap))
5083 raw_spin_lock_irqsave(&iommu->register_lock, flags);
5085 res = dmar_readq(iommu->reg + DMAR_ECRSP_REG);
5098 dmar_writeq(iommu->reg + DMAR_ECEO_REG, ob);
5099 dmar_writeq(iommu->reg + DMAR_ECMD_REG, ecmd | (oa << DMA_ECMD_OA_SHIFT));
5101 IOMMU_WAIT_OP(iommu, DMAR_ECRSP_REG, dmar_readq,
5111 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);