Lines Matching refs:iommu
28 #include <linux/iommu.h>
33 #include "iommu.h"
67 static void free_iommu(struct intel_iommu *iommu);
461 if (dmaru->iommu)
462 free_iommu(dmaru->iommu);
501 drhd->iommu->node = node;
939 x86_init.iommu.iommu_init = intel_iommu_init;
952 static void unmap_iommu(struct intel_iommu *iommu)
954 iounmap(iommu->reg);
955 release_mem_region(iommu->reg_phys, iommu->reg_size);
959 * map_iommu: map the iommu's registers
960 * @iommu: the iommu to map
963 * Memory map the iommu's registers. Start w/ a single page, and
966 static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd)
971 iommu->reg_phys = phys_addr;
972 iommu->reg_size = drhd->reg_size;
974 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
980 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
981 if (!iommu->reg) {
987 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
988 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
990 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
997 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
998 cap_max_fault_reg_offset(iommu->cap));
1000 if (map_size > iommu->reg_size) {
1001 iounmap(iommu->reg);
1002 release_mem_region(iommu->reg_phys, iommu->reg_size);
1003 iommu->reg_size = map_size;
1004 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
1005 iommu->name)) {
1010 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
1011 if (!iommu->reg) {
1018 if (cap_ecmds(iommu->cap)) {
1022 iommu->ecmdcap[i] = dmar_readq(iommu->reg + DMAR_ECCAP_REG +
1031 iounmap(iommu->reg);
1033 release_mem_region(iommu->reg_phys, iommu->reg_size);
1040 struct intel_iommu *iommu;
1051 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1052 if (!iommu)
1055 iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0,
1057 if (iommu->seq_id < 0) {
1059 err = iommu->seq_id;
1062 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1064 err = map_iommu(iommu, drhd);
1066 pr_err("Failed to map %s\n", iommu->name);
1071 if (!cap_sagaw(iommu->cap) &&
1072 (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
1074 iommu->name);
1079 agaw = iommu_calculate_agaw(iommu);
1081 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1082 iommu->seq_id);
1087 msagaw = iommu_calculate_max_sagaw(iommu);
1089 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1090 iommu->seq_id);
1095 iommu->agaw = agaw;
1096 iommu->msagaw = msagaw;
1097 iommu->segment = drhd->segment;
1099 iommu->node = NUMA_NO_NODE;
1101 ver = readl(iommu->reg + DMAR_VER_REG);
1103 iommu->name,
1106 (unsigned long long)iommu->cap,
1107 (unsigned long long)iommu->ecap);
1110 sts = readl(iommu->reg + DMAR_GSTS_REG);
1112 iommu->gcmd |= DMA_GCMD_IRE;
1114 iommu->gcmd |= DMA_GCMD_TE;
1116 iommu->gcmd |= DMA_GCMD_QIE;
1118 if (alloc_iommu_pmu(iommu))
1119 pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id);
1121 raw_spin_lock_init(&iommu->register_lock);
1127 if (pasid_supported(iommu))
1128 iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap);
1136 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1138 "%s", iommu->name);
1142 err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
1146 iommu_pmu_register(iommu);
1149 drhd->iommu = iommu;
1150 iommu->drhd = drhd;
1155 iommu_device_sysfs_remove(&iommu->iommu);
1157 free_iommu_pmu(iommu);
1158 unmap_iommu(iommu);
1160 ida_free(&dmar_seq_ids, iommu->seq_id);
1162 kfree(iommu);
1166 static void free_iommu(struct intel_iommu *iommu)
1168 if (intel_iommu_enabled && !iommu->drhd->ignored) {
1169 iommu_pmu_unregister(iommu);
1170 iommu_device_unregister(&iommu->iommu);
1171 iommu_device_sysfs_remove(&iommu->iommu);
1174 free_iommu_pmu(iommu);
1176 if (iommu->irq) {
1177 if (iommu->pr_irq) {
1178 free_irq(iommu->pr_irq, iommu);
1179 dmar_free_hwirq(iommu->pr_irq);
1180 iommu->pr_irq = 0;
1182 free_irq(iommu->irq, iommu);
1183 dmar_free_hwirq(iommu->irq);
1184 iommu->irq = 0;
1187 if (iommu->qi) {
1188 free_page((unsigned long)iommu->qi->desc);
1189 kfree(iommu->qi->desc_status);
1190 kfree(iommu->qi);
1193 if (iommu->reg)
1194 unmap_iommu(iommu);
1196 ida_free(&dmar_seq_ids, iommu->seq_id);
1197 kfree(iommu);
1239 static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
1241 unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG);
1242 u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
1243 struct qi_desc *desc = iommu->qi->desc + head;
1260 head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH;
1261 head <<= qi_shift(iommu);
1262 desc = iommu->qi->desc + head;
1270 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
1274 struct q_inval *qi = iommu->qi;
1275 int shift = qi_shift(iommu);
1280 fault = readl(iommu->reg + DMAR_FSTS_REG);
1282 qi_dump_fault(iommu, fault);
1290 head = readl(iommu->reg + DMAR_IQH_REG);
1301 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1312 head = readl(iommu->reg + DMAR_IQH_REG);
1315 tail = readl(iommu->reg + DMAR_IQT_REG);
1318 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1332 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1346 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1349 struct q_inval *qi = iommu->qi;
1366 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IOTLB))
1370 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_DEVTLB))
1374 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IEC))
1394 shift = qi_shift(iommu);
1400 trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1,
1423 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1433 rc = qi_check_fault(iommu, index, wait_index);
1452 dmar_latency_update(iommu, DMAR_LATENCY_INV_IOTLB,
1456 dmar_latency_update(iommu, DMAR_LATENCY_INV_DEVTLB,
1460 dmar_latency_update(iommu, DMAR_LATENCY_INV_IEC,
1469 void qi_global_iec(struct intel_iommu *iommu)
1479 qi_submit_sync(iommu, &desc, 1, 0);
1482 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1493 qi_submit_sync(iommu, &desc, 1, 0);
1496 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1504 if (cap_write_drain(iommu->cap))
1507 if (cap_read_drain(iommu->cap))
1517 qi_submit_sync(iommu, &desc, 1, 0);
1520 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1531 if (!(iommu->gcmd & DMA_GCMD_TE))
1548 qi_submit_sync(iommu, &desc, 1, 0);
1552 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1589 qi_submit_sync(iommu, &desc, 1, 0);
1593 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1605 if (!(iommu->gcmd & DMA_GCMD_TE))
1642 qi_submit_sync(iommu, &desc, 1, 0);
1645 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
1652 qi_submit_sync(iommu, &desc, 1, 0);
1658 void dmar_disable_qi(struct intel_iommu *iommu)
1664 if (!ecap_qis(iommu->ecap))
1667 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1669 sts = readl(iommu->reg + DMAR_GSTS_REG);
1676 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1677 readl(iommu->reg + DMAR_IQH_REG)) &&
1681 iommu->gcmd &= ~DMA_GCMD_QIE;
1682 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1684 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1687 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1693 static void __dmar_enable_qi(struct intel_iommu *iommu)
1697 struct q_inval *qi = iommu->qi;
1707 if (ecap_smts(iommu->ecap))
1710 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1713 writel(0, iommu->reg + DMAR_IQT_REG);
1715 dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
1717 iommu->gcmd |= DMA_GCMD_QIE;
1718 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1721 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1723 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1731 int dmar_enable_qi(struct intel_iommu *iommu)
1736 if (!ecap_qis(iommu->ecap))
1742 if (iommu->qi)
1745 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1746 if (!iommu->qi)
1749 qi = iommu->qi;
1755 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
1756 !!ecap_smts(iommu->ecap));
1759 iommu->qi = NULL;
1769 iommu->qi = NULL;
1775 __dmar_enable_qi(iommu);
1780 /* iommu interrupt handling. Most stuff are MSI-like. */
1895 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1897 if (iommu->irq == irq)
1899 else if (iommu->pr_irq == irq)
1901 else if (iommu->perf_irq == irq)
1909 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1910 int reg = dmar_msi_reg(iommu, data->irq);
1914 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1915 writel(0, iommu->reg + reg);
1917 readl(iommu->reg + reg);
1918 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1923 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1924 int reg = dmar_msi_reg(iommu, data->irq);
1928 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1929 writel(DMA_FECTL_IM, iommu->reg + reg);
1931 readl(iommu->reg + reg);
1932 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1937 struct intel_iommu *iommu = irq_get_handler_data(irq);
1938 int reg = dmar_msi_reg(iommu, irq);
1941 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1942 writel(msg->data, iommu->reg + reg + 4);
1943 writel(msg->address_lo, iommu->reg + reg + 8);
1944 writel(msg->address_hi, iommu->reg + reg + 12);
1945 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1950 struct intel_iommu *iommu = irq_get_handler_data(irq);
1951 int reg = dmar_msi_reg(iommu, irq);
1954 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1955 msg->data = readl(iommu->reg + reg + 4);
1956 msg->address_lo = readl(iommu->reg + reg + 8);
1957 msg->address_hi = readl(iommu->reg + reg + 12);
1958 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1961 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1992 dmar_fault_dump_ptes(iommu, source_id, addr, pasid);
2000 struct intel_iommu *iommu = dev_id;
2008 raw_spin_lock_irqsave(&iommu->register_lock, flag);
2009 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2018 reg = cap_fault_reg_offset(iommu->cap);
2031 data = readl(iommu->reg + reg +
2041 data = readl(iommu->reg + reg +
2046 guest_addr = dmar_readq(iommu->reg + reg +
2052 writel(DMA_FRCD_F, iommu->reg + reg +
2055 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2059 dmar_fault_do_one(iommu, type, fault_reason,
2064 if (fault_index >= cap_num_fault_regs(iommu->cap))
2066 raw_spin_lock_irqsave(&iommu->register_lock, flag);
2070 iommu->reg + DMAR_FSTS_REG);
2073 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2077 int dmar_set_interrupt(struct intel_iommu *iommu)
2084 if (iommu->irq)
2087 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
2089 iommu->irq = irq;
2095 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
2104 struct intel_iommu *iommu;
2109 for_each_iommu(iommu, drhd) {
2111 int ret = dmar_set_interrupt(iommu);
2122 dmar_fault(iommu->irq, iommu);
2123 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2124 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
2133 int dmar_reenable_qi(struct intel_iommu *iommu)
2135 if (!ecap_qis(iommu->ecap))
2138 if (!iommu->qi)
2144 dmar_disable_qi(iommu);
2150 __dmar_enable_qi(iommu);