Lines Matching refs:iommu
20 #include <linux/amd-iommu.h>
26 #include <asm/iommu.h>
239 bool translation_pre_enabled(struct amd_iommu *iommu)
241 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
244 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
246 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
249 static void init_translation_status(struct amd_iommu *iommu)
253 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
255 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
277 struct amd_iommu *iommu;
279 for_each_iommu(iommu) {
280 u64 tmp = iommu->features;
281 u64 tmp2 = iommu->features2;
283 if (list_is_first(&iommu->list, &amd_iommu_list)) {
294 "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n",
296 iommu->index, iommu->pci_seg->id,
297 PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid),
298 PCI_FUNC(iommu->devid));
322 static void __init early_iommu_features_init(struct amd_iommu *iommu,
326 iommu->features = h->efr_reg;
327 iommu->features2 = h->efr_reg2;
335 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
339 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
340 pci_read_config_dword(iommu->dev, 0xfc, &val);
344 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
346 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
347 pci_write_config_dword(iommu->dev, 0xfc, val);
348 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
351 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
355 pci_write_config_dword(iommu->dev, 0xf0, address);
356 pci_read_config_dword(iommu->dev, 0xf4, &val);
360 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
362 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
363 pci_write_config_dword(iommu->dev, 0xf4, val);
379 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
381 u64 start = iommu->exclusion_start & PAGE_MASK;
382 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
385 if (!iommu->exclusion_start)
389 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
393 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
397 static void iommu_set_cwwb_range(struct amd_iommu *iommu)
399 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
409 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
416 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
421 static void iommu_set_device_table(struct amd_iommu *iommu)
424 u32 dev_table_size = iommu->pci_seg->dev_table_size;
425 void *dev_table = (void *)get_dev_table(iommu);
427 BUG_ON(iommu->mmio_base == NULL);
431 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
436 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
440 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
442 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
445 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
449 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
451 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
454 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
458 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
461 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
465 static void iommu_enable(struct amd_iommu *iommu)
467 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
470 static void iommu_disable(struct amd_iommu *iommu)
472 if (!iommu->mmio_base)
476 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
479 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
480 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
483 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
484 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
487 iommu_feature_disable(iommu, CONTROL_PPRLOG_EN);
488 iommu_feature_disable(iommu, CONTROL_PPRINT_EN);
491 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
494 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
513 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
515 if (iommu->mmio_base)
516 iounmap(iommu->mmio_base);
517 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
751 static int __init alloc_command_buffer(struct amd_iommu *iommu)
753 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
756 return iommu->cmd_buf ? 0 : -ENOMEM;
763 static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
769 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
775 iommu_feature_disable(iommu, cntrl_log);
776 iommu_feature_disable(iommu, cntrl_intr);
778 writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
780 iommu_feature_enable(iommu, cntrl_intr);
781 iommu_feature_enable(iommu, cntrl_log);
788 void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
790 amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN,
799 void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
801 amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN,
810 void amd_iommu_restart_ppr_log(struct amd_iommu *iommu)
812 amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN,
821 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
823 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
825 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
826 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
827 iommu->cmd_buf_head = 0;
828 iommu->cmd_buf_tail = 0;
830 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
837 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
841 BUG_ON(iommu->cmd_buf == NULL);
843 entry = iommu_virt_to_phys(iommu->cmd_buf);
846 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
849 amd_iommu_reset_cmd_buffer(iommu);
855 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
857 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
860 static void __init free_command_buffer(struct amd_iommu *iommu)
862 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
865 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
882 static int __init alloc_event_buffer(struct amd_iommu *iommu)
884 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
887 return iommu->evt_buf ? 0 : -ENOMEM;
890 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
894 BUG_ON(iommu->evt_buf == NULL);
896 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
898 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
902 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
903 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
905 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
911 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
913 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
916 static void __init free_event_buffer(struct amd_iommu *iommu)
918 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
922 static int __init alloc_ppr_log(struct amd_iommu *iommu)
924 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
927 return iommu->ppr_log ? 0 : -ENOMEM;
930 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
934 if (iommu->ppr_log == NULL)
937 iommu_feature_enable(iommu, CONTROL_PPR_EN);
939 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
941 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
945 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
946 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
948 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
949 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
952 static void __init free_ppr_log(struct amd_iommu *iommu)
954 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
957 static void free_ga_log(struct amd_iommu *iommu)
960 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
961 free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
966 static int iommu_ga_log_enable(struct amd_iommu *iommu)
971 if (!iommu->ga_log)
974 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
975 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
977 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
979 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
981 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
982 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
985 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
986 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
989 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
1001 static int iommu_init_ga_log(struct amd_iommu *iommu)
1006 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1008 if (!iommu->ga_log)
1011 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1013 if (!iommu->ga_log_tail)
1018 free_ga_log(iommu);
1023 static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
1025 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
1027 return iommu->cmd_sem ? 0 : -ENOMEM;
1030 static void __init free_cwwb_sem(struct amd_iommu *iommu)
1032 if (iommu->cmd_sem)
1033 free_page((unsigned long)iommu->cmd_sem);
1036 static void iommu_enable_xt(struct amd_iommu *iommu)
1045 iommu_feature_enable(iommu, CONTROL_XT_EN);
1049 static void iommu_enable_gt(struct amd_iommu *iommu)
1051 if (!iommu_feature(iommu, FEATURE_GT))
1054 iommu_feature_enable(iommu, CONTROL_GT_EN);
1067 static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
1069 struct dev_table_entry *dev_table = get_dev_table(iommu);
1083 static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
1085 struct dev_table_entry *dev_table = get_dev_table(iommu);
1090 static bool __copy_device_table(struct amd_iommu *iommu)
1093 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
1102 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
1103 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
1109 iommu->index);
1182 struct amd_iommu *iommu;
1195 for_each_iommu(iommu) {
1196 if (pci_seg->id != iommu->pci_seg->id)
1198 if (!__copy_device_table(iommu))
1207 void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
1211 sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) |
1212 (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1);
1215 set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW);
1222 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1226 set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS);
1228 set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS);
1230 set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS);
1232 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1);
1234 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2);
1236 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS);
1238 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS);
1240 amd_iommu_apply_erratum_63(iommu, devid);
1242 amd_iommu_set_rlookup_table(iommu, devid);
1356 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1365 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
1379 iommu->acpi_flags = h->flags;
1405 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1417 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1447 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1448 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1480 set_dev_entry_from_acpi(iommu, devid, e->flags,
1508 set_dev_entry_from_acpi(iommu,
1511 set_dev_entry_from_acpi(iommu, dev_i,
1547 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1613 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1695 static void __init free_iommu_one(struct amd_iommu *iommu)
1697 free_cwwb_sem(iommu);
1698 free_command_buffer(iommu);
1699 free_event_buffer(iommu);
1700 free_ppr_log(iommu);
1701 free_ga_log(iommu);
1702 iommu_unmap_mmio_space(iommu);
1707 struct amd_iommu *iommu, *next;
1709 for_each_iommu_safe(iommu, next) {
1710 list_del(&iommu->list);
1711 free_iommu_one(iommu);
1712 kfree(iommu);
1722 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1731 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1732 pci_read_config_dword(iommu->dev, 0xf4, &value);
1738 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1740 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1741 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1744 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1753 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1763 value = iommu_read_l2(iommu, 0x47);
1769 iommu_write_l2(iommu, 0x47, value | BIT(0));
1771 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1779 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
1787 iommu->pci_seg = pci_seg;
1789 raw_spin_lock_init(&iommu->lock);
1790 atomic64_set(&iommu->cmd_sem_val, 0);
1793 list_add_tail(&iommu->list, &amd_iommu_list);
1794 iommu->index = amd_iommus_present++;
1796 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1802 amd_iommus[iommu->index] = iommu;
1805 * Copy data from ACPI table entry to the iommu struct
1807 iommu->devid = h->devid;
1808 iommu->cap_ptr = h->cap_ptr;
1809 iommu->mmio_phys = h->mmio_phys;
1817 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1819 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1833 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1835 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1851 early_iommu_features_init(iommu, h);
1858 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1859 iommu->mmio_phys_end);
1860 if (!iommu->mmio_base)
1863 return init_iommu_from_acpi(iommu, h);
1866 static int __init init_iommu_one_late(struct amd_iommu *iommu)
1870 if (alloc_cwwb_sem(iommu))
1873 if (alloc_command_buffer(iommu))
1876 if (alloc_event_buffer(iommu))
1879 iommu->int_enabled = false;
1881 init_translation_status(iommu);
1882 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1883 iommu_disable(iommu);
1884 clear_translation_pre_enabled(iommu);
1886 iommu->index);
1889 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1892 ret = amd_iommu_create_irq_domain(iommu);
1901 iommu->pci_seg->rlookup_table[iommu->devid] = NULL;
1940 struct amd_iommu *iommu;
1959 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1960 if (iommu == NULL)
1963 ret = init_iommu_one(iommu, h, table);
1976 for_each_iommu(iommu) {
1977 ret = init_iommu_one_late(iommu);
1985 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1988 struct pci_dev *pdev = iommu->dev;
1990 if (!iommu_feature(iommu, FEATURE_PC))
1997 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1998 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1999 iommu->max_counters = (u8) ((val >> 7) & 0xf);
2008 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
2009 return sysfs_emit(buf, "%x\n", iommu->cap);
2017 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
2018 return sysfs_emit(buf, "%llx:%llx\n", iommu->features2, iommu->features);
2029 .name = "amd-iommu",
2043 static void __init late_iommu_features_init(struct amd_iommu *iommu)
2047 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
2051 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
2052 features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
2054 if (!iommu->features) {
2055 iommu->features = features;
2056 iommu->features2 = features2;
2064 if (features != iommu->features ||
2065 features2 != iommu->features2) {
2068 features, iommu->features,
2069 features2, iommu->features2);
2073 static int __init iommu_init_pci(struct amd_iommu *iommu)
2075 int cap_ptr = iommu->cap_ptr;
2078 iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
2079 PCI_BUS_NUM(iommu->devid),
2080 iommu->devid & 0xff);
2081 if (!iommu->dev)
2085 iommu->dev->match_driver = false;
2088 iommu->dev->irq_managed = 1;
2090 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
2091 &iommu->cap);
2093 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
2096 late_iommu_features_init(iommu);
2098 if (iommu_feature(iommu, FEATURE_GT)) {
2103 pasmax = iommu->features & FEATURE_PASID_MASK;
2111 glxval = iommu->features & FEATURE_GLXVAL_MASK;
2120 if (iommu_feature(iommu, FEATURE_GT) &&
2121 iommu_feature(iommu, FEATURE_PPR)) {
2122 iommu->is_iommu_v2 = true;
2126 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
2129 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
2135 init_iommu_perf_ctr(iommu);
2138 if (!iommu_feature(iommu, FEATURE_GIOSUP) ||
2139 !iommu_feature(iommu, FEATURE_GT)) {
2148 if (is_rd890_iommu(iommu->dev)) {
2151 iommu->root_pdev =
2152 pci_get_domain_bus_and_slot(iommu->pci_seg->id,
2153 iommu->dev->bus->number,
2161 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
2162 &iommu->stored_addr_lo);
2163 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
2164 &iommu->stored_addr_hi);
2167 iommu->stored_addr_lo &= ~1;
2171 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
2174 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
2177 amd_iommu_erratum_746_workaround(iommu);
2178 amd_iommu_ats_write_check_workaround(iommu);
2180 ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
2181 amd_iommu_groups, "ivhd%d", iommu->index);
2185 iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
2187 return pci_enable_device(iommu->dev);
2196 struct amd_iommu *iommu;
2198 for_each_iommu(iommu) {
2199 struct pci_dev *pdev = iommu->dev;
2202 pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
2204 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
2205 pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2);
2208 if (iommu_feature(iommu, (1ULL << i)))
2212 if (iommu->features & FEATURE_GAM_VAPIC)
2215 if (iommu->features & FEATURE_SNP)
2234 struct amd_iommu *iommu;
2238 for_each_iommu(iommu) {
2239 ret = iommu_init_pci(iommu);
2242 iommu->index, ret);
2246 iommu_set_cwwb_range(iommu);
2262 for_each_iommu(iommu)
2263 iommu_flush_all_caches(iommu);
2280 static int iommu_setup_msi(struct amd_iommu *iommu)
2284 r = pci_enable_msi(iommu->dev);
2288 r = request_threaded_irq(iommu->dev->irq,
2292 iommu);
2295 pci_disable_msi(iommu->dev);
2364 struct amd_iommu *iommu = irqd->chip_data;
2374 writeq(xt.capxt, iommu->mmio_base + irqd->hwirq);
2379 struct amd_iommu *iommu = irqd->chip_data;
2381 writeq(0, iommu->mmio_base + irqd->hwirq);
2444 static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname,
2450 int node = dev_to_node(&iommu->dev->dev);
2458 info.data = iommu;
2468 thread_fn, 0, devname, iommu);
2478 static int iommu_setup_intcapxt(struct amd_iommu *iommu)
2482 snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name),
2483 "AMD-Vi%d-Evt", iommu->index);
2484 ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name,
2490 snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name),
2491 "AMD-Vi%d-PPR", iommu->index);
2492 ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name,
2499 snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name),
2500 "AMD-Vi%d-GA", iommu->index);
2501 ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name,
2509 static int iommu_init_irq(struct amd_iommu *iommu)
2513 if (iommu->int_enabled)
2517 ret = iommu_setup_intcapxt(iommu);
2518 else if (iommu->dev->msi_cap)
2519 ret = iommu_setup_msi(iommu);
2526 iommu->int_enabled = true;
2530 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2532 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2688 static void iommu_init_flags(struct amd_iommu *iommu)
2690 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2691 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2692 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2694 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2695 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2696 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2698 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2699 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2700 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2702 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2703 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2704 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2709 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2712 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2715 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2719 struct pci_dev *pdev = iommu->root_pdev;
2721 /* RD890 BIOSes may not have completely reconfigured the iommu */
2722 if (!is_rd890_iommu(iommu->dev) || !pdev)
2726 * First, we need to ensure that the iommu is enabled. This is
2734 /* Enable the iommu */
2738 /* Restore the iommu BAR */
2739 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2740 iommu->stored_addr_lo);
2741 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2742 iommu->stored_addr_hi);
2747 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2751 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2754 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2755 iommu->stored_addr_lo | 1);
2758 static void iommu_enable_ga(struct amd_iommu *iommu)
2764 iommu_feature_enable(iommu, CONTROL_GA_EN);
2765 iommu->irte_ops = &irte_128_ops;
2768 iommu->irte_ops = &irte_32_ops;
2774 static void iommu_disable_irtcachedis(struct amd_iommu *iommu)
2776 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
2779 static void iommu_enable_irtcachedis(struct amd_iommu *iommu)
2791 iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS);
2792 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
2795 iommu->irtcachedis_enabled = true;
2796 pr_info("iommu%d (%#06x) : IRT cache is %s\n",
2797 iommu->index, iommu->devid,
2798 iommu->irtcachedis_enabled ? "disabled" : "enabled");
2801 static void early_enable_iommu(struct amd_iommu *iommu)
2803 iommu_disable(iommu);
2804 iommu_init_flags(iommu);
2805 iommu_set_device_table(iommu);
2806 iommu_enable_command_buffer(iommu);
2807 iommu_enable_event_buffer(iommu);
2808 iommu_set_exclusion_range(iommu);
2809 iommu_enable_ga(iommu);
2810 iommu_enable_xt(iommu);
2811 iommu_enable_irtcachedis(iommu);
2812 iommu_enable(iommu);
2813 iommu_flush_all_caches(iommu);
2826 struct amd_iommu *iommu;
2846 for_each_iommu(iommu) {
2847 clear_translation_pre_enabled(iommu);
2848 early_enable_iommu(iommu);
2859 for_each_iommu(iommu) {
2860 iommu_disable_command_buffer(iommu);
2861 iommu_disable_event_buffer(iommu);
2862 iommu_disable_irtcachedis(iommu);
2863 iommu_enable_command_buffer(iommu);
2864 iommu_enable_event_buffer(iommu);
2865 iommu_enable_ga(iommu);
2866 iommu_enable_xt(iommu);
2867 iommu_enable_irtcachedis(iommu);
2868 iommu_set_device_table(iommu);
2869 iommu_flush_all_caches(iommu);
2876 struct amd_iommu *iommu;
2878 for_each_iommu(iommu) {
2879 iommu_enable_ppr_log(iommu);
2880 iommu_enable_gt(iommu);
2888 struct amd_iommu *iommu;
2890 for_each_iommu(iommu) {
2895 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
2899 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
2900 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
2907 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
2931 for_each_iommu(iommu) {
2932 if (iommu_init_ga_log(iommu) ||
2933 iommu_ga_log_enable(iommu))
2936 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2938 iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN);
2953 struct amd_iommu *iommu;
2955 for_each_iommu(iommu)
2956 iommu_disable(iommu);
2971 struct amd_iommu *iommu;
2973 for_each_iommu(iommu)
2974 iommu_apply_resume_quirks(iommu);
3203 struct amd_iommu *iommu;
3206 for_each_iommu(iommu) {
3207 ret = iommu_init_irq(iommu);
3326 struct amd_iommu *iommu;
3332 for_each_iommu(iommu)
3333 iommu_flush_all_caches(iommu);
3408 struct amd_iommu *iommu;
3422 for_each_iommu(iommu)
3423 amd_iommu_debugfs_setup(iommu);
3470 x86_init.iommu.iommu_init = amd_iommu_init;
3511 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
3711 struct amd_iommu *iommu;
3713 for_each_iommu(iommu)
3715 return iommu;
3728 struct amd_iommu *iommu = get_amd_iommu(idx);
3730 if (iommu)
3731 return iommu->max_banks;
3745 struct amd_iommu *iommu = get_amd_iommu(idx);
3747 if (iommu)
3748 return iommu->max_counters;
3754 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3764 /* Check for valid iommu and pc register indexing */
3765 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3771 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3772 (iommu->max_counters << 8) | 0x28);
3780 writel((u32)val, iommu->mmio_base + offset);
3781 writel((val >> 32), iommu->mmio_base + offset + 4);
3783 *value = readl(iommu->mmio_base + offset + 4);
3785 *value |= readl(iommu->mmio_base + offset);
3792 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3794 if (!iommu)
3797 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3800 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3802 if (!iommu)
3805 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);