Lines Matching refs:iommu

22 #include <linux/intel-iommu.h>
29 #include <linux/iommu.h>
65 static void free_iommu(struct intel_iommu *iommu);
462 if (dmaru->iommu)
463 free_iommu(dmaru->iommu);
502 drhd->iommu->node = node;
933 x86_init.iommu.iommu_init = intel_iommu_init;
948 static void unmap_iommu(struct intel_iommu *iommu)
950 iounmap(iommu->reg);
951 release_mem_region(iommu->reg_phys, iommu->reg_size);
955 * map_iommu: map the iommu's registers
956 * @iommu: the iommu to map
959 * Memory map the iommu's registers. Start w/ a single page, and
962 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
966 iommu->reg_phys = phys_addr;
967 iommu->reg_size = VTD_PAGE_SIZE;
969 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
975 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
976 if (!iommu->reg) {
982 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
983 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
985 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
990 if (ecap_vcs(iommu->ecap))
991 iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
994 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
995 cap_max_fault_reg_offset(iommu->cap));
997 if (map_size > iommu->reg_size) {
998 iounmap(iommu->reg);
999 release_mem_region(iommu->reg_phys, iommu->reg_size);
1000 iommu->reg_size = map_size;
1001 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
1002 iommu->name)) {
1007 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
1008 if (!iommu->reg) {
1018 iounmap(iommu->reg);
1020 release_mem_region(iommu->reg_phys, iommu->reg_size);
1025 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
1027 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
1029 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
1030 iommu->seq_id = -1;
1032 set_bit(iommu->seq_id, dmar_seq_ids);
1033 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1036 return iommu->seq_id;
1039 static void dmar_free_seq_id(struct intel_iommu *iommu)
1041 if (iommu->seq_id >= 0) {
1042 clear_bit(iommu->seq_id, dmar_seq_ids);
1043 iommu->seq_id = -1;
1049 struct intel_iommu *iommu;
1060 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1061 if (!iommu)
1064 if (dmar_alloc_seq_id(iommu) < 0) {
1070 err = map_iommu(iommu, drhd->reg_base_addr);
1072 pr_err("Failed to map %s\n", iommu->name);
1077 if (cap_sagaw(iommu->cap) == 0) {
1079 iommu->name);
1084 agaw = iommu_calculate_agaw(iommu);
1086 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1087 iommu->seq_id);
1092 msagaw = iommu_calculate_max_sagaw(iommu);
1094 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1095 iommu->seq_id);
1100 iommu->agaw = agaw;
1101 iommu->msagaw = msagaw;
1102 iommu->segment = drhd->segment;
1104 iommu->node = NUMA_NO_NODE;
1106 ver = readl(iommu->reg + DMAR_VER_REG);
1108 iommu->name,
1111 (unsigned long long)iommu->cap,
1112 (unsigned long long)iommu->ecap);
1115 sts = readl(iommu->reg + DMAR_GSTS_REG);
1117 iommu->gcmd |= DMA_GCMD_IRE;
1119 iommu->gcmd |= DMA_GCMD_TE;
1121 iommu->gcmd |= DMA_GCMD_QIE;
1123 raw_spin_lock_init(&iommu->register_lock);
1131 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1133 "%s", iommu->name);
1137 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
1139 err = iommu_device_register(&iommu->iommu);
1144 drhd->iommu = iommu;
1145 iommu->drhd = drhd;
1150 iommu_device_sysfs_remove(&iommu->iommu);
1152 unmap_iommu(iommu);
1154 dmar_free_seq_id(iommu);
1156 kfree(iommu);
1160 static void free_iommu(struct intel_iommu *iommu)
1162 if (intel_iommu_enabled && !iommu->drhd->ignored) {
1163 iommu_device_unregister(&iommu->iommu);
1164 iommu_device_sysfs_remove(&iommu->iommu);
1167 if (iommu->irq) {
1168 if (iommu->pr_irq) {
1169 free_irq(iommu->pr_irq, iommu);
1170 dmar_free_hwirq(iommu->pr_irq);
1171 iommu->pr_irq = 0;
1173 free_irq(iommu->irq, iommu);
1174 dmar_free_hwirq(iommu->irq);
1175 iommu->irq = 0;
1178 if (iommu->qi) {
1179 free_page((unsigned long)iommu->qi->desc);
1180 kfree(iommu->qi->desc_status);
1181 kfree(iommu->qi);
1184 if (iommu->reg)
1185 unmap_iommu(iommu);
1187 dmar_free_seq_id(iommu);
1188 kfree(iommu);
1204 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
1208 struct q_inval *qi = iommu->qi;
1209 int shift = qi_shift(iommu);
1214 fault = readl(iommu->reg + DMAR_FSTS_REG);
1222 head = readl(iommu->reg + DMAR_IQH_REG);
1236 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1246 head = readl(iommu->reg + DMAR_IQH_REG);
1249 tail = readl(iommu->reg + DMAR_IQT_REG);
1252 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1265 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1277 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1280 struct q_inval *qi = iommu->qi;
1307 shift = qi_shift(iommu);
1334 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1344 rc = qi_check_fault(iommu, index, wait_index);
1368 void qi_global_iec(struct intel_iommu *iommu)
1378 qi_submit_sync(iommu, &desc, 1, 0);
1381 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1392 qi_submit_sync(iommu, &desc, 1, 0);
1395 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1403 if (cap_write_drain(iommu->cap))
1406 if (cap_read_drain(iommu->cap))
1416 qi_submit_sync(iommu, &desc, 1, 0);
1419 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1438 qi_submit_sync(iommu, &desc, 1, 0);
1442 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1479 qi_submit_sync(iommu, &desc, 1, 0);
1483 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1523 qi_submit_sync(iommu, &desc, 1, 0);
1526 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
1533 qi_submit_sync(iommu, &desc, 1, 0);
1539 void dmar_disable_qi(struct intel_iommu *iommu)
1545 if (!ecap_qis(iommu->ecap))
1548 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1550 sts = readl(iommu->reg + DMAR_GSTS_REG);
1557 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1558 readl(iommu->reg + DMAR_IQH_REG)) &&
1562 iommu->gcmd &= ~DMA_GCMD_QIE;
1563 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1565 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1568 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1574 static void __dmar_enable_qi(struct intel_iommu *iommu)
1578 struct q_inval *qi = iommu->qi;
1588 if (ecap_smts(iommu->ecap))
1591 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1594 writel(0, iommu->reg + DMAR_IQT_REG);
1596 dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
1598 iommu->gcmd |= DMA_GCMD_QIE;
1599 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1602 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1604 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1612 int dmar_enable_qi(struct intel_iommu *iommu)
1617 if (!ecap_qis(iommu->ecap))
1623 if (iommu->qi)
1626 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1627 if (!iommu->qi)
1630 qi = iommu->qi;
1636 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
1637 !!ecap_smts(iommu->ecap));
1640 iommu->qi = NULL;
1650 iommu->qi = NULL;
1656 __dmar_enable_qi(iommu);
1661 /* iommu interrupt handling. Most stuff are MSI-like. */
1776 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1778 if (iommu->irq == irq)
1780 else if (iommu->pr_irq == irq)
1788 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1789 int reg = dmar_msi_reg(iommu, data->irq);
1793 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1794 writel(0, iommu->reg + reg);
1796 readl(iommu->reg + reg);
1797 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1802 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1803 int reg = dmar_msi_reg(iommu, data->irq);
1807 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1808 writel(DMA_FECTL_IM, iommu->reg + reg);
1810 readl(iommu->reg + reg);
1811 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1816 struct intel_iommu *iommu = irq_get_handler_data(irq);
1817 int reg = dmar_msi_reg(iommu, irq);
1820 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1821 writel(msg->data, iommu->reg + reg + 4);
1822 writel(msg->address_lo, iommu->reg + reg + 8);
1823 writel(msg->address_hi, iommu->reg + reg + 12);
1824 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1829 struct intel_iommu *iommu = irq_get_handler_data(irq);
1830 int reg = dmar_msi_reg(iommu, irq);
1833 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1834 msg->data = readl(iommu->reg + reg + 4);
1835 msg->address_lo = readl(iommu->reg + reg + 8);
1836 msg->address_hi = readl(iommu->reg + reg + 12);
1837 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1840 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1866 struct intel_iommu *iommu = dev_id;
1874 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1875 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1884 reg = cap_fault_reg_offset(iommu->cap);
1897 data = readl(iommu->reg + reg +
1907 data = readl(iommu->reg + reg +
1912 guest_addr = dmar_readq(iommu->reg + reg +
1918 writel(DMA_FRCD_F, iommu->reg + reg +
1921 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1925 dmar_fault_do_one(iommu, type, fault_reason,
1930 if (fault_index >= cap_num_fault_regs(iommu->cap))
1932 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1936 iommu->reg + DMAR_FSTS_REG);
1939 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1943 int dmar_set_interrupt(struct intel_iommu *iommu)
1950 if (iommu->irq)
1953 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1955 iommu->irq = irq;
1961 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1970 struct intel_iommu *iommu;
1975 for_each_iommu(iommu, drhd) {
1977 int ret = dmar_set_interrupt(iommu);
1988 dmar_fault(iommu->irq, iommu);
1989 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1990 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1999 int dmar_reenable_qi(struct intel_iommu *iommu)
2001 if (!ecap_qis(iommu->ecap))
2004 if (!iommu->qi)
2010 dmar_disable_qi(iommu);
2016 __dmar_enable_qi(iommu);