Lines Matching refs:iommu
33 #include <linux/iommu.h>
34 #include <linux/intel-iommu.h>
47 #include <asm/iommu.h>
171 /* global iommu list, set NULL for ignored DMAR units */
293 * 2. It maps to each iommu if successful.
294 * 3. Each iommu mapps to this domain if successful.
414 static bool translation_pre_enabled(struct intel_iommu *iommu)
416 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
419 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
421 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
424 static void init_translation_status(struct intel_iommu *iommu)
428 gsts = readl(iommu->reg + DMAR_GSTS_REG);
430 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
479 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
484 domains = iommu->domains[idx];
491 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
497 if (!iommu->domains[idx]) {
499 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
502 domains = iommu->domains[idx];
568 static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
572 fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
573 sl_sagaw = cap_sagaw(iommu->cap);
576 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
580 if (!ecap_slts(iommu->ecap))
586 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
591 sagaw = __iommu_calculate_sagaw(iommu);
601 * Calculate max SAGAW for each iommu.
603 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
605 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
609 * calculate agaw for each iommu.
613 int iommu_calculate_agaw(struct intel_iommu *iommu)
615 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
618 /* This functionin only returns single iommu in a domain */
636 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
638 return sm_supported(iommu) ?
639 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
645 struct intel_iommu *iommu;
663 for_each_active_iommu(iommu, drhd) {
664 if (!iommu_paging_structure_coherency(iommu)) {
675 struct intel_iommu *iommu;
679 for_each_active_iommu(iommu, drhd) {
680 if (iommu != skip) {
687 if (!sm_supported(iommu) &&
688 !ecap_sc_support(iommu->ecap)) {
703 struct intel_iommu *iommu;
712 for_each_active_iommu(iommu, drhd) {
713 if (iommu != skip) {
715 if (!cap_fl1gp_support(iommu->cap))
718 mask &= cap_super_page_val(iommu->cap);
785 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
788 struct root_entry *root = &iommu->root_entry[bus];
793 if (sm_supported(iommu)) {
807 context = alloc_pgtable_page(iommu->node);
811 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
814 __iommu_flush_cache(iommu, entry, sizeof(*entry));
870 /* we know that the this iommu should be at offset 0xa000 from vtbar */
881 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
883 if (!iommu || iommu->drhd->ignored)
902 struct intel_iommu *iommu;
924 for_each_iommu(iommu, drhd) {
958 iommu = NULL;
960 if (iommu_is_dummy(iommu, dev))
961 iommu = NULL;
965 return iommu;
975 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
981 spin_lock_irqsave(&iommu->lock, flags);
982 context = iommu_context_addr(iommu, bus, devfn, 0);
985 spin_unlock_irqrestore(&iommu->lock, flags);
989 static void free_context_table(struct intel_iommu *iommu)
995 spin_lock_irqsave(&iommu->lock, flags);
996 if (!iommu->root_entry) {
1000 context = iommu_context_addr(iommu, i, 0, 0);
1004 if (!sm_supported(iommu))
1007 context = iommu_context_addr(iommu, i, 0x80, 0);
1012 free_pgtable_page(iommu->root_entry);
1013 iommu->root_entry = NULL;
1015 spin_unlock_irqrestore(&iommu->lock, flags);
1331 /* iommu handling */
1332 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1337 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1340 iommu->name);
1344 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1346 spin_lock_irqsave(&iommu->lock, flags);
1347 iommu->root_entry = root;
1348 spin_unlock_irqrestore(&iommu->lock, flags);
1353 static void iommu_set_root_entry(struct intel_iommu *iommu)
1359 addr = virt_to_phys(iommu->root_entry);
1360 if (sm_supported(iommu))
1363 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1364 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1366 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1369 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1372 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1374 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
1375 if (sm_supported(iommu))
1376 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
1377 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1380 void iommu_flush_write_buffer(struct intel_iommu *iommu)
1385 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1388 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1389 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1392 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1395 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1399 static void __iommu_flush_context(struct intel_iommu *iommu,
1422 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1423 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1426 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1429 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1433 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1436 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1462 if (cap_read_drain(iommu->cap))
1465 if (cap_write_drain(iommu->cap))
1468 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1471 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1472 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1475 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1478 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1490 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1497 if (!iommu->qi)
1501 if (info->iommu == iommu && info->bus == bus &&
1549 if (!ecap_dit(info->iommu->ecap))
1626 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1632 static void domain_flush_piotlb(struct intel_iommu *iommu,
1636 u16 did = domain->iommu_did[iommu->seq_id];
1639 qi_flush_piotlb(iommu, did, domain->default_pasid,
1643 qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
1646 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1654 u16 did = domain->iommu_did[iommu->seq_id];
1662 domain_flush_piotlb(iommu, domain, addr, pages, ih);
1689 if (!cap_pgsel_inv(iommu->cap) ||
1690 mask > cap_max_amask_val(iommu->cap))
1691 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1694 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1702 if (!cap_caching_mode(iommu->cap) || !map)
1707 static inline void __mapping_notify_one(struct intel_iommu *iommu,
1715 if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
1716 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1718 iommu_flush_write_buffer(iommu);
1729 struct intel_iommu *iommu = g_iommus[idx];
1730 u16 did = domain->iommu_did[iommu->seq_id];
1733 domain_flush_piotlb(iommu, domain, 0, -1, 0);
1735 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1738 if (!cap_caching_mode(iommu->cap))
1739 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1744 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1749 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1752 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1753 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1755 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1758 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1761 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1764 static void iommu_enable_translation(struct intel_iommu *iommu)
1769 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1770 iommu->gcmd |= DMA_GCMD_TE;
1771 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1774 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1777 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1780 static void iommu_disable_translation(struct intel_iommu *iommu)
1785 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
1786 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
1789 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1790 iommu->gcmd &= ~DMA_GCMD_TE;
1791 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1794 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1797 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1800 static int iommu_init_domains(struct intel_iommu *iommu)
1805 ndomains = cap_ndoms(iommu->cap);
1807 iommu->name, ndomains);
1810 spin_lock_init(&iommu->lock);
1812 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1813 if (!iommu->domain_ids) {
1815 iommu->name);
1820 iommu->domains = kzalloc(size, GFP_KERNEL);
1822 if (iommu->domains) {
1824 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1827 if (!iommu->domains || !iommu->domains[0]) {
1829 iommu->name);
1830 kfree(iommu->domain_ids);
1831 kfree(iommu->domains);
1832 iommu->domain_ids = NULL;
1833 iommu->domains = NULL;
1843 set_bit(0, iommu->domain_ids);
1852 if (sm_supported(iommu))
1853 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1858 static void disable_dmar_iommu(struct intel_iommu *iommu)
1863 if (!iommu->domains || !iommu->domain_ids)
1868 if (info->iommu != iommu)
1878 if (iommu->gcmd & DMA_GCMD_TE)
1879 iommu_disable_translation(iommu);
1882 static void free_dmar_iommu(struct intel_iommu *iommu)
1884 if ((iommu->domains) && (iommu->domain_ids)) {
1885 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1889 kfree(iommu->domains[i]);
1890 kfree(iommu->domains);
1891 kfree(iommu->domain_ids);
1892 iommu->domains = NULL;
1893 iommu->domain_ids = NULL;
1896 g_iommus[iommu->seq_id] = NULL;
1899 free_context_table(iommu);
1902 if (pasid_supported(iommu)) {
1903 if (ecap_prs(iommu->ecap))
1904 intel_svm_finish_prq(iommu);
1906 if (vccap_pasid(iommu->vccap))
1907 ioasid_unregister_allocator(&iommu->pasid_allocator);
1919 struct intel_iommu *iommu;
1928 for_each_active_iommu(iommu, drhd) {
1929 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) {
1958 /* Must be called with iommu->lock */
1960 struct intel_iommu *iommu)
1966 assert_spin_locked(&iommu->lock);
1968 domain->iommu_refcnt[iommu->seq_id] += 1;
1970 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1971 ndomains = cap_ndoms(iommu->cap);
1972 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1975 pr_err("%s: No free domain ids\n", iommu->name);
1976 domain->iommu_refcnt[iommu->seq_id] -= 1;
1981 set_bit(num, iommu->domain_ids);
1982 set_iommu_domain(iommu, num, domain);
1984 domain->iommu_did[iommu->seq_id] = num;
1985 domain->nid = iommu->node;
1994 struct intel_iommu *iommu)
1999 assert_spin_locked(&iommu->lock);
2001 domain->iommu_refcnt[iommu->seq_id] -= 1;
2003 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
2004 num = domain->iommu_did[iommu->seq_id];
2005 clear_bit(num, iommu->domain_ids);
2006 set_iommu_domain(iommu, num, NULL);
2009 domain->iommu_did[iommu->seq_id] = 0;
2141 struct intel_iommu *iommu,
2145 u16 did = domain->iommu_did[iommu->seq_id];
2163 spin_lock(&iommu->lock);
2166 context = iommu_context_addr(iommu, bus, devfn, 1);
2186 if (did_old < cap_ndoms(iommu->cap)) {
2187 iommu->flush.flush_context(iommu, did_old,
2191 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2198 if (sm_supported(iommu)) {
2215 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2228 * Skip top levels of page tables for iommu which has
2231 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2238 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2252 context_set_address_width(context, iommu->msagaw);
2260 if (!ecap_coherent(iommu->ecap))
2269 if (cap_caching_mode(iommu->cap)) {
2270 iommu->flush.flush_context(iommu, 0,
2274 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2276 iommu_flush_write_buffer(iommu);
2283 spin_unlock(&iommu->lock);
2291 struct intel_iommu *iommu;
2300 return domain_context_mapping_one(data->domain, data->iommu,
2310 struct intel_iommu *iommu;
2313 iommu = device_to_iommu(dev, &bus, &devfn);
2314 if (!iommu)
2320 return domain_context_mapping_one(domain, iommu, table,
2324 data.iommu = iommu;
2334 struct intel_iommu *iommu = opaque;
2336 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2341 struct intel_iommu *iommu;
2344 iommu = device_to_iommu(dev, &bus, &devfn);
2345 if (!iommu)
2349 return device_context_mapped(iommu, bus, devfn);
2352 domain_context_mapped_cb, iommu);
2522 struct intel_iommu *iommu;
2530 iommu = g_iommus[iommu_id];
2531 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2551 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2557 if (!iommu)
2560 spin_lock_irqsave(&iommu->lock, flags);
2561 context = iommu_context_addr(iommu, bus, devfn, 0);
2563 spin_unlock_irqrestore(&iommu->lock, flags);
2568 __iommu_flush_cache(iommu, context, sizeof(*context));
2569 spin_unlock_irqrestore(&iommu->lock, flags);
2570 iommu->flush.flush_context(iommu,
2576 if (sm_supported(iommu))
2577 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
2579 iommu->flush.flush_iotlb(iommu,
2610 if (unlikely(!dev || !dev->iommu))
2647 static int domain_setup_first_level(struct intel_iommu *iommu,
2657 * Skip top levels of page tables for iommu which has
2660 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2678 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
2679 domain->iommu_did[iommu->seq_id],
2689 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2706 info->segment = iommu->segment;
2720 info->iommu = iommu;
2728 if (ecap_dev_iotlb_support(iommu->ecap) &&
2733 if (sm_supported(iommu)) {
2734 if (pasid_supported(iommu)) {
2740 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2767 spin_lock(&iommu->lock);
2768 ret = domain_attach_iommu(domain, iommu);
2769 spin_unlock(&iommu->lock);
2784 if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
2793 spin_lock_irqsave(&iommu->lock, flags);
2795 ret = intel_pasid_setup_pass_through(iommu, domain,
2798 ret = domain_setup_first_level(iommu, domain, dev,
2801 ret = intel_pasid_setup_second_level(iommu, domain,
2803 spin_unlock_irqrestore(&iommu->lock, flags);
2897 struct intel_iommu *iommu;
2900 iommu = device_to_iommu(dev, &bus, &devfn);
2901 if (!iommu)
2904 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
3025 static void intel_iommu_init_qi(struct intel_iommu *iommu)
3028 * Start from the sane iommu hardware state.
3033 if (!iommu->qi) {
3037 dmar_fault(-1, iommu);
3042 dmar_disable_qi(iommu);
3045 if (dmar_enable_qi(iommu)) {
3049 iommu->flush.flush_context = __iommu_flush_context;
3050 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
3052 iommu->name);
3054 iommu->flush.flush_context = qi_flush_context;
3055 iommu->flush.flush_iotlb = qi_flush_iotlb;
3056 pr_info("%s: Using Queued invalidation\n", iommu->name);
3060 static int copy_context_table(struct intel_iommu *iommu,
3082 __iommu_flush_cache(iommu, new_ce,
3112 new_ce = alloc_pgtable_page(iommu->node);
3126 if (did >= 0 && did < cap_ndoms(iommu->cap))
3127 set_bit(did, iommu->domain_ids);
3153 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3162 static int copy_translation_tables(struct intel_iommu *iommu)
3173 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3175 new_ext = !!ecap_ecs(iommu->ecap);
3202 ret = copy_context_table(iommu, &old_rt[bus],
3206 iommu->name, bus);
3211 spin_lock_irqsave(&iommu->lock, flags);
3220 iommu->root_entry[bus].lo = val;
3227 iommu->root_entry[bus].hi = val;
3230 spin_unlock_irqrestore(&iommu->lock, flags);
3234 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3247 struct intel_iommu *iommu = data;
3250 if (!iommu)
3260 if (vcmd_alloc_pasid(iommu, &ioasid))
3268 struct intel_iommu *iommu = data;
3270 if (!iommu)
3280 vcmd_free_pasid(iommu, ioasid);
3283 static void register_pasid_allocator(struct intel_iommu *iommu)
3289 if (!cap_caching_mode(iommu->cap))
3292 if (!sm_supported(iommu)) {
3304 if (!vccap_pasid(iommu->vccap))
3308 iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc;
3309 iommu->pasid_allocator.free = intel_vcmd_ioasid_free;
3310 iommu->pasid_allocator.pdata = (void *)iommu;
3311 if (ioasid_register_allocator(&iommu->pasid_allocator)) {
3326 struct intel_iommu *iommu;
3355 pr_err("Allocating global iommu array failed\n");
3360 for_each_iommu(iommu, drhd) {
3362 iommu_disable_translation(iommu);
3371 if (pasid_supported(iommu)) {
3372 u32 temp = 2 << ecap_pss(iommu->ecap);
3378 g_iommus[iommu->seq_id] = iommu;
3380 intel_iommu_init_qi(iommu);
3382 ret = iommu_init_domains(iommu);
3386 init_translation_status(iommu);
3388 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3389 iommu_disable_translation(iommu);
3390 clear_translation_pre_enabled(iommu);
3392 iommu->name);
3400 ret = iommu_alloc_root_entry(iommu);
3404 if (translation_pre_enabled(iommu)) {
3407 ret = copy_translation_tables(iommu);
3419 iommu->name);
3420 iommu_disable_translation(iommu);
3421 clear_translation_pre_enabled(iommu);
3424 iommu->name);
3428 if (!ecap_pass_through(iommu->ecap))
3431 if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
3435 intel_svm_check(iommu);
3443 for_each_active_iommu(iommu, drhd) {
3444 iommu_flush_write_buffer(iommu);
3446 register_pasid_allocator(iommu);
3448 iommu_set_root_entry(iommu);
3471 for_each_iommu(iommu, drhd) {
3478 iommu_disable_protect_mem_regions(iommu);
3482 iommu_flush_write_buffer(iommu);
3485 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3491 ret = intel_svm_enable_prq(iommu);
3497 ret = dmar_set_interrupt(iommu);
3505 for_each_active_iommu(iommu, drhd) {
3506 disable_dmar_iommu(iommu);
3507 free_dmar_iommu(iommu);
3528 * Restrict dma_mask to the width that the iommu can handle.
3575 struct intel_iommu *iommu;
3587 iommu = domain_get_iommu(domain);
3599 !cap_zlr(iommu->cap))
3651 struct intel_iommu *iommu;
3658 iommu = domain_get_iommu(domain);
3672 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3782 struct intel_iommu *iommu;
3793 iommu = domain_get_iommu(domain);
3810 !cap_zlr(iommu->cap))
3877 struct intel_iommu *iommu;
3892 iommu = domain_get_iommu(domain);
3893 if (WARN_ON(!iommu))
3907 !cap_zlr(iommu->cap))
4212 struct intel_iommu *iommu = NULL;
4214 for_each_active_iommu(iommu, drhd)
4215 if (iommu->qi)
4216 dmar_reenable_qi(iommu);
4218 for_each_iommu(iommu, drhd) {
4225 iommu_disable_protect_mem_regions(iommu);
4229 iommu_flush_write_buffer(iommu);
4230 iommu_set_root_entry(iommu);
4231 iommu_enable_translation(iommu);
4232 iommu_disable_protect_mem_regions(iommu);
4241 struct intel_iommu *iommu;
4243 for_each_active_iommu(iommu, drhd) {
4244 iommu->flush.flush_context(iommu, 0, 0, 0,
4246 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4254 struct intel_iommu *iommu = NULL;
4257 for_each_active_iommu(iommu, drhd) {
4258 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
4260 if (!iommu->iommu_state)
4266 for_each_active_iommu(iommu, drhd) {
4267 iommu_disable_translation(iommu);
4269 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4271 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4272 readl(iommu->reg + DMAR_FECTL_REG);
4273 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4274 readl(iommu->reg + DMAR_FEDATA_REG);
4275 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4276 readl(iommu->reg + DMAR_FEADDR_REG);
4277 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4278 readl(iommu->reg + DMAR_FEUADDR_REG);
4280 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4285 for_each_active_iommu(iommu, drhd)
4286 kfree(iommu->iommu_state);
4294 struct intel_iommu *iommu = NULL;
4305 for_each_active_iommu(iommu, drhd) {
4307 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4309 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4310 iommu->reg + DMAR_FECTL_REG);
4311 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4312 iommu->reg + DMAR_FEDATA_REG);
4313 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4314 iommu->reg + DMAR_FEADDR_REG);
4315 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4316 iommu->reg + DMAR_FEUADDR_REG);
4318 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4321 for_each_active_iommu(iommu, drhd)
4322 kfree(iommu->iommu_state);
4496 struct intel_iommu *iommu = dmaru->iommu;
4498 if (g_iommus[iommu->seq_id])
4501 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4503 iommu->name);
4506 if (!ecap_sc_support(iommu->ecap) &&
4507 domain_update_iommu_snooping(iommu)) {
4509 iommu->name);
4512 sp = domain_update_iommu_superpage(NULL, iommu) - 1;
4513 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4515 iommu->name);
4522 if (iommu->gcmd & DMA_GCMD_TE)
4523 iommu_disable_translation(iommu);
4525 g_iommus[iommu->seq_id] = iommu;
4526 ret = iommu_init_domains(iommu);
4528 ret = iommu_alloc_root_entry(iommu);
4532 intel_svm_check(iommu);
4539 iommu_disable_protect_mem_regions(iommu);
4543 intel_iommu_init_qi(iommu);
4544 iommu_flush_write_buffer(iommu);
4547 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
4548 ret = intel_svm_enable_prq(iommu);
4553 ret = dmar_set_interrupt(iommu);
4557 iommu_set_root_entry(iommu);
4558 iommu_enable_translation(iommu);
4560 iommu_disable_protect_mem_regions(iommu);
4564 disable_dmar_iommu(iommu);
4566 free_dmar_iommu(iommu);
4573 struct intel_iommu *iommu = dmaru->iommu;
4577 if (iommu == NULL)
4583 disable_dmar_iommu(iommu);
4584 free_dmar_iommu(iommu);
4724 struct intel_iommu *iommu;
4731 for_each_active_iommu(iommu, drhd)
4732 iommu_flush_iotlb_psi(iommu, si_domain,
4754 struct intel_iommu *iommu = g_iommus[i];
4758 if (!iommu)
4761 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4762 domain = get_iommu_domain(iommu, (u16)did);
4780 struct intel_iommu *iommu = NULL;
4783 for_each_iommu(iommu, drhd)
4784 iommu_disable_translation(iommu);
4790 struct intel_iommu *iommu = NULL;
4798 for_each_iommu(iommu, drhd)
4799 iommu_disable_protect_mem_regions(iommu);
4811 return container_of(iommu_dev, struct intel_iommu, iommu);
4818 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4819 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4829 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4830 return sprintf(buf, "%llx\n", iommu->reg_phys);
4838 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4839 return sprintf(buf, "%llx\n", iommu->cap);
4847 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4848 return sprintf(buf, "%llx\n", iommu->ecap);
4856 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4857 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4865 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4866 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4867 cap_ndoms(iommu->cap)));
4882 .name = "intel-iommu",
4929 struct intel_iommu *iommu __maybe_unused;
4933 for_each_active_iommu(iommu, drhd) {
4972 struct intel_iommu *iommu;
4983 panic("tboot: Failed to initialize iommu memory\n");
5023 for_each_iommu(iommu, drhd)
5024 iommu_disable_protect_mem_regions(iommu);
5044 panic("tboot: Failed to reserve iommu ranges\n");
5065 for_each_active_iommu(iommu, drhd) {
5066 iommu_device_sysfs_add(&iommu->iommu, NULL,
5068 "%s", iommu->name);
5069 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
5070 iommu_device_register(&iommu->iommu);
5077 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
5085 for_each_iommu(iommu, drhd) {
5086 if (!drhd->ignored && !translation_pre_enabled(iommu))
5087 iommu_enable_translation(iommu);
5089 iommu_disable_protect_mem_regions(iommu);
5110 struct intel_iommu *iommu = opaque;
5112 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
5117 * NB - intel-iommu lacks any sort of reference counting for the users of
5122 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
5124 if (!iommu || !dev || !dev_is_pci(dev))
5127 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
5133 struct intel_iommu *iommu;
5141 iommu = info->iommu;
5145 if (dev_is_pci(info->dev) && sm_supported(iommu))
5146 intel_pasid_tear_down_entry(iommu, info->dev,
5151 domain_context_clear(iommu, info->dev);
5157 spin_lock_irqsave(&iommu->lock, flags);
5158 domain_detach_iommu(domain, iommu);
5159 spin_unlock_irqrestore(&iommu->lock, flags);
5300 struct intel_iommu *iommu;
5302 iommu = device_to_iommu(dev, NULL, NULL);
5303 if (!iommu)
5322 * iommu->lock must be held to attach domain to iommu and setup the
5325 spin_lock(&iommu->lock);
5326 ret = domain_attach_iommu(domain, iommu);
5332 ret = domain_setup_first_level(iommu, domain, dev,
5335 ret = intel_pasid_setup_second_level(iommu, domain, dev,
5339 spin_unlock(&iommu->lock);
5348 domain_detach_iommu(domain, iommu);
5350 spin_unlock(&iommu->lock);
5362 struct intel_iommu *iommu;
5370 iommu = info->iommu;
5374 spin_lock(&iommu->lock);
5375 intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
5376 domain_detach_iommu(domain, iommu);
5377 spin_unlock(&iommu->lock);
5386 struct intel_iommu *iommu;
5389 iommu = device_to_iommu(dev, NULL, NULL);
5390 if (!iommu)
5393 /* check if this iommu agaw is sufficient for max mapped address */
5394 addr_width = agaw_to_width(iommu->agaw);
5395 if (addr_width > cap_mgaw(iommu->cap))
5396 addr_width = cap_mgaw(iommu->cap);
5399 dev_err(dev, "%s: iommu width (%d) is not "
5409 while (iommu->agaw < dmar_domain->agaw) {
5494 * X: indexed by iommu cache type
5534 struct intel_iommu *iommu;
5548 iommu = device_to_iommu(dev, &bus, &devfn);
5549 if (!iommu)
5556 spin_lock(&iommu->lock);
5562 did = dmar_domain->iommu_did[iommu->seq_id];
5609 qi_flush_piotlb(iommu, did, pasid,
5639 qi_flush_dev_iotlb_pasid(iommu, sid,
5653 spin_unlock(&iommu->lock);
5683 pr_err("%s: iommu width (%d) is not "
5754 struct intel_iommu *iommu;
5758 for_each_active_iommu(iommu, drhd) {
5759 if (!sm_supported(iommu)) {
5772 struct intel_iommu *iommu;
5776 for_each_active_iommu(iommu, drhd) {
5777 if (!pasid_supported(iommu)) {
5790 struct intel_iommu *iommu;
5794 for_each_active_iommu(iommu, drhd) {
5795 if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
5817 struct intel_iommu *iommu;
5819 iommu = device_to_iommu(dev, NULL, NULL);
5820 if (!iommu)
5823 if (translation_pre_enabled(iommu))
5826 return &iommu->iommu;
5831 struct intel_iommu *iommu;
5833 iommu = device_to_iommu(dev, NULL, NULL);
5834 if (!iommu)
5912 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
5926 spin_lock(&iommu->lock);
5933 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5943 iommu->flush.flush_context(iommu,
5944 domain->iommu_did[iommu->seq_id],
5957 spin_unlock(&iommu->lock);
5986 struct intel_iommu *iommu;
5990 iommu = device_to_iommu(dev, NULL, NULL);
5991 if (!iommu || dmar_disabled)
5994 if (!sm_supported(iommu) || !pasid_supported(iommu))
5997 ret = intel_iommu_enable_pasid(iommu, dev);
6071 return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) &&
6091 if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)