Lines Matching refs:iommu
19 #include <linux/amd-iommu.h>
25 #include <asm/iommu.h>
267 bool translation_pre_enabled(struct amd_iommu *iommu)
269 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
273 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
275 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
278 static void init_translation_status(struct amd_iommu *iommu)
282 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
284 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
310 struct amd_iommu *iommu;
312 for_each_iommu(iommu) {
313 ret = iommu_feature(iommu, mask);
327 static void __init early_iommu_features_init(struct amd_iommu *iommu,
331 iommu->features = h->efr_reg;
336 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
340 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
341 pci_read_config_dword(iommu->dev, 0xfc, &val);
345 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
347 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
348 pci_write_config_dword(iommu->dev, 0xfc, val);
349 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
352 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
356 pci_write_config_dword(iommu->dev, 0xf0, address);
357 pci_read_config_dword(iommu->dev, 0xf4, &val);
361 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
363 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
364 pci_write_config_dword(iommu->dev, 0xf4, val);
380 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
382 u64 start = iommu->exclusion_start & PAGE_MASK;
383 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
386 if (!iommu->exclusion_start)
390 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
394 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
398 static void iommu_set_cwwb_range(struct amd_iommu *iommu)
400 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
403 if (!iommu_feature(iommu, FEATURE_SNP))
410 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
417 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
422 static void iommu_set_device_table(struct amd_iommu *iommu)
426 BUG_ON(iommu->mmio_base == NULL);
430 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
435 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
439 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
441 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
444 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
448 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
450 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
453 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
457 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
460 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
464 static void iommu_enable(struct amd_iommu *iommu)
466 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
469 static void iommu_disable(struct amd_iommu *iommu)
471 if (!iommu->mmio_base)
475 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
478 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
479 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
482 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
483 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
486 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
505 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
507 if (iommu->mmio_base)
508 iounmap(iommu->mmio_base);
509 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
655 static int __init alloc_command_buffer(struct amd_iommu *iommu)
657 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
660 return iommu->cmd_buf ? 0 : -ENOMEM;
667 void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
669 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
670 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
677 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
679 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
681 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
682 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
683 iommu->cmd_buf_head = 0;
684 iommu->cmd_buf_tail = 0;
686 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
693 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
697 BUG_ON(iommu->cmd_buf == NULL);
699 entry = iommu_virt_to_phys(iommu->cmd_buf);
702 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
705 amd_iommu_reset_cmd_buffer(iommu);
711 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
713 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
716 static void __init free_command_buffer(struct amd_iommu *iommu)
718 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
721 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
728 iommu_feature(iommu, FEATURE_SNP) &&
738 static int __init alloc_event_buffer(struct amd_iommu *iommu)
740 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
743 return iommu->evt_buf ? 0 : -ENOMEM;
746 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
750 BUG_ON(iommu->evt_buf == NULL);
752 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
754 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
758 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
759 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
761 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
767 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
769 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
772 static void __init free_event_buffer(struct amd_iommu *iommu)
774 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
778 static int __init alloc_ppr_log(struct amd_iommu *iommu)
780 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
783 return iommu->ppr_log ? 0 : -ENOMEM;
786 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
790 if (iommu->ppr_log == NULL)
793 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
795 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
799 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
800 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
802 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
803 iommu_feature_enable(iommu, CONTROL_PPR_EN);
806 static void __init free_ppr_log(struct amd_iommu *iommu)
808 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
811 static void free_ga_log(struct amd_iommu *iommu)
814 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
815 free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
819 static int iommu_ga_log_enable(struct amd_iommu *iommu)
825 if (!iommu->ga_log)
829 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
833 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
834 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
836 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
838 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
840 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
841 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
844 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
845 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
848 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
860 static int iommu_init_ga_log(struct amd_iommu *iommu)
866 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
868 if (!iommu->ga_log)
871 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
873 if (!iommu->ga_log_tail)
878 free_ga_log(iommu);
885 static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
887 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
889 return iommu->cmd_sem ? 0 : -ENOMEM;
892 static void __init free_cwwb_sem(struct amd_iommu *iommu)
894 if (iommu->cmd_sem)
895 free_page((unsigned long)iommu->cmd_sem);
898 static void iommu_enable_xt(struct amd_iommu *iommu)
907 iommu_feature_enable(iommu, CONTROL_XT_EN);
911 static void iommu_enable_gt(struct amd_iommu *iommu)
913 if (!iommu_feature(iommu, FEATURE_GT))
916 iommu_feature_enable(iommu, CONTROL_GT_EN);
943 struct amd_iommu *iommu;
952 for_each_iommu(iommu) {
954 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
955 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
959 iommu->index);
967 iommu->index);
1049 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
1051 amd_iommu_rlookup_table[devid] = iommu;
1058 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1078 set_iommu_for_device(iommu, devid);
1192 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1214 iommu->acpi_flags = h->flags;
1238 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1250 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1280 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1281 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1313 set_dev_entry_from_acpi(iommu, devid, e->flags,
1341 set_dev_entry_from_acpi(iommu,
1344 set_dev_entry_from_acpi(iommu, dev_i,
1380 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1445 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1459 static void __init free_iommu_one(struct amd_iommu *iommu)
1461 free_cwwb_sem(iommu);
1462 free_command_buffer(iommu);
1463 free_event_buffer(iommu);
1464 free_ppr_log(iommu);
1465 free_ga_log(iommu);
1466 iommu_unmap_mmio_space(iommu);
1471 struct amd_iommu *iommu, *next;
1473 for_each_iommu_safe(iommu, next) {
1474 list_del(&iommu->list);
1475 free_iommu_one(iommu);
1476 kfree(iommu);
1486 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1495 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1496 pci_read_config_dword(iommu->dev, 0xf4, &value);
1502 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1504 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1505 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1508 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1517 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1527 value = iommu_read_l2(iommu, 0x47);
1533 iommu_write_l2(iommu, 0x47, value | BIT(0));
1535 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1543 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1547 raw_spin_lock_init(&iommu->lock);
1548 iommu->cmd_sem_val = 0;
1551 list_add_tail(&iommu->list, &amd_iommu_list);
1552 iommu->index = amd_iommus_present++;
1554 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1560 amd_iommus[iommu->index] = iommu;
1563 * Copy data from ACPI table entry to the iommu struct
1565 iommu->devid = h->devid;
1566 iommu->cap_ptr = h->cap_ptr;
1567 iommu->pci_seg = h->pci_seg;
1568 iommu->mmio_phys = h->mmio_phys;
1576 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1578 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1592 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1594 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1617 early_iommu_features_init(iommu, h);
1624 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1625 iommu->mmio_phys_end);
1626 if (!iommu->mmio_base)
1629 if (alloc_cwwb_sem(iommu))
1632 if (alloc_command_buffer(iommu))
1635 if (alloc_event_buffer(iommu))
1638 iommu->int_enabled = false;
1640 init_translation_status(iommu);
1641 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1642 iommu_disable(iommu);
1643 clear_translation_pre_enabled(iommu);
1645 iommu->index);
1648 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1650 ret = init_iommu_from_acpi(iommu, h);
1654 ret = amd_iommu_create_irq_domain(iommu);
1662 amd_iommu_rlookup_table[iommu->devid] = NULL;
1701 struct amd_iommu *iommu;
1719 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1720 if (iommu == NULL)
1723 ret = init_iommu_one(iommu, h);
1735 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1738 struct pci_dev *pdev = iommu->dev;
1740 if (!iommu_feature(iommu, FEATURE_PC))
1747 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1748 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1749 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1758 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1759 return sprintf(buf, "%x\n", iommu->cap);
1767 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1768 return sprintf(buf, "%llx\n", iommu->features);
1779 .name = "amd-iommu",
1793 static void __init late_iommu_features_init(struct amd_iommu *iommu)
1797 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
1801 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
1803 if (!iommu->features) {
1804 iommu->features = features;
1812 if (features != iommu->features)
1814 features, iommu->features);
1817 static int __init iommu_init_pci(struct amd_iommu *iommu)
1819 int cap_ptr = iommu->cap_ptr;
1822 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1823 iommu->devid & 0xff);
1824 if (!iommu->dev)
1828 iommu->dev->match_driver = false;
1830 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1831 &iommu->cap);
1833 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1836 late_iommu_features_init(iommu);
1838 if (iommu_feature(iommu, FEATURE_GT)) {
1843 pasmax = iommu->features & FEATURE_PASID_MASK;
1851 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1860 if (iommu_feature(iommu, FEATURE_GT) &&
1861 iommu_feature(iommu, FEATURE_PPR)) {
1862 iommu->is_iommu_v2 = true;
1866 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1869 ret = iommu_init_ga_log(iommu);
1873 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1876 init_iommu_perf_ctr(iommu);
1878 if (is_rd890_iommu(iommu->dev)) {
1881 iommu->root_pdev =
1882 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1890 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1891 &iommu->stored_addr_lo);
1892 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1893 &iommu->stored_addr_hi);
1896 iommu->stored_addr_lo &= ~1;
1900 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1903 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1906 amd_iommu_erratum_746_workaround(iommu);
1907 amd_iommu_ats_write_check_workaround(iommu);
1909 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1910 amd_iommu_groups, "ivhd%d", iommu->index);
1911 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1912 iommu_device_register(&iommu->iommu);
1914 return pci_enable_device(iommu->dev);
1923 struct amd_iommu *iommu;
1925 for_each_iommu(iommu) {
1926 struct pci_dev *pdev = iommu->dev;
1929 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
1931 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1932 pr_info("Extended features (%#llx):", iommu->features);
1935 if (iommu_feature(iommu, (1ULL << i)))
1939 if (iommu->features & FEATURE_GAM_VAPIC)
1956 struct amd_iommu *iommu;
1959 for_each_iommu(iommu) {
1960 ret = iommu_init_pci(iommu);
1965 iommu_set_cwwb_range(iommu);
1982 for_each_iommu(iommu)
1983 iommu_flush_all_caches(iommu);
2000 static int iommu_setup_msi(struct amd_iommu *iommu)
2004 r = pci_enable_msi(iommu->dev);
2008 r = request_threaded_irq(iommu->dev->irq,
2012 iommu);
2015 pci_disable_msi(iommu->dev);
2019 iommu->int_enabled = true;
2034 static void iommu_update_intcapxt(struct amd_iommu *iommu)
2037 u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
2038 u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
2039 u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
2055 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2056 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2057 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2063 struct amd_iommu *iommu;
2065 for_each_iommu(iommu) {
2066 if (iommu->dev->irq == notify->irq) {
2067 iommu_update_intcapxt(iommu);
2077 static int iommu_init_intcapxt(struct amd_iommu *iommu)
2080 struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
2093 notify->irq = iommu->dev->irq;
2096 ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
2099 iommu->devid, iommu->dev->irq);
2103 iommu_update_intcapxt(iommu);
2104 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2108 static int iommu_init_msi(struct amd_iommu *iommu)
2112 if (iommu->int_enabled)
2115 if (iommu->dev->msi_cap)
2116 ret = iommu_setup_msi(iommu);
2124 ret = iommu_init_intcapxt(iommu);
2128 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2130 if (iommu->ppr_log != NULL)
2131 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
2133 iommu_ga_log_enable(iommu);
2265 static void iommu_init_flags(struct amd_iommu *iommu)
2267 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2268 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2269 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2271 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2272 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2273 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2275 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2276 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2277 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2279 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2280 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2281 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2286 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2289 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2292 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2296 struct pci_dev *pdev = iommu->root_pdev;
2298 /* RD890 BIOSes may not have completely reconfigured the iommu */
2299 if (!is_rd890_iommu(iommu->dev) || !pdev)
2303 * First, we need to ensure that the iommu is enabled. This is
2311 /* Enable the iommu */
2315 /* Restore the iommu BAR */
2316 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2317 iommu->stored_addr_lo);
2318 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2319 iommu->stored_addr_hi);
2324 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2328 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2331 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2332 iommu->stored_addr_lo | 1);
2335 static void iommu_enable_ga(struct amd_iommu *iommu)
2340 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2343 iommu_feature_enable(iommu, CONTROL_GA_EN);
2344 iommu->irte_ops = &irte_128_ops;
2347 iommu->irte_ops = &irte_32_ops;
2353 static void early_enable_iommu(struct amd_iommu *iommu)
2355 iommu_disable(iommu);
2356 iommu_init_flags(iommu);
2357 iommu_set_device_table(iommu);
2358 iommu_enable_command_buffer(iommu);
2359 iommu_enable_event_buffer(iommu);
2360 iommu_set_exclusion_range(iommu);
2361 iommu_enable_ga(iommu);
2362 iommu_enable_xt(iommu);
2363 iommu_enable(iommu);
2364 iommu_flush_all_caches(iommu);
2377 struct amd_iommu *iommu;
2392 for_each_iommu(iommu) {
2393 clear_translation_pre_enabled(iommu);
2394 early_enable_iommu(iommu);
2401 for_each_iommu(iommu) {
2402 iommu_disable_command_buffer(iommu);
2403 iommu_disable_event_buffer(iommu);
2404 iommu_enable_command_buffer(iommu);
2405 iommu_enable_event_buffer(iommu);
2406 iommu_enable_ga(iommu);
2407 iommu_enable_xt(iommu);
2408 iommu_set_device_table(iommu);
2409 iommu_flush_all_caches(iommu);
2429 struct amd_iommu *iommu;
2431 for_each_iommu(iommu) {
2432 iommu_enable_ppr_log(iommu);
2433 iommu_enable_gt(iommu);
2446 struct amd_iommu *iommu;
2448 for_each_iommu(iommu)
2449 iommu_disable(iommu);
2464 struct amd_iommu *iommu;
2466 for_each_iommu(iommu)
2467 iommu_apply_resume_quirks(iommu);
2761 struct amd_iommu *iommu;
2764 for_each_iommu(iommu) {
2765 ret = iommu_init_msi(iommu);
2866 struct amd_iommu *iommu;
2869 for_each_iommu(iommu)
2870 iommu_flush_all_caches(iommu);
2942 struct amd_iommu *iommu;
2956 for_each_iommu(iommu)
2957 amd_iommu_debugfs_setup(iommu);
3003 x86_init.iommu.iommu_init = amd_iommu_init;
3220 struct amd_iommu *iommu;
3222 for_each_iommu(iommu)
3224 return iommu;
3238 struct amd_iommu *iommu = get_amd_iommu(idx);
3240 if (iommu)
3241 return iommu->max_banks;
3255 struct amd_iommu *iommu = get_amd_iommu(idx);
3257 if (iommu)
3258 return iommu->max_counters;
3264 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3274 /* Check for valid iommu and pc register indexing */
3275 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3281 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3282 (iommu->max_counters << 8) | 0x28);
3290 writel((u32)val, iommu->mmio_base + offset);
3291 writel((val >> 32), iommu->mmio_base + offset + 4);
3293 *value = readl(iommu->mmio_base + offset + 4);
3295 *value |= readl(iommu->mmio_base + offset);
3302 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3304 if (!iommu)
3307 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3311 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3313 if (!iommu)
3316 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);