Lines Matching defs:iommu

23 #include <linux/dma-iommu.h>
24 #include <linux/iommu-helper.h>
26 #include <linux/amd-iommu.h>
40 #include <asm/iommu.h>
84 * if iommu=pt passed on kernel cmd line.
276 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
285 if (translation_pre_enabled(iommu))
397 struct amd_iommu *iommu;
399 iommu = amd_iommu_rlookup_table[dev_data->devid];
400 dev_data->iommu_v2 = iommu->is_iommu_v2;
573 static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
575 struct device *dev = iommu->iommu.dev;
662 static void iommu_poll_events(struct amd_iommu *iommu)
666 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
667 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
670 iommu_print_event(iommu, iommu->evt_buf + head);
674 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
677 static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
695 static void iommu_poll_ppr_log(struct amd_iommu *iommu)
699 if (iommu->ppr_log == NULL)
702 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
703 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
710 raw = (u64 *)(iommu->ppr_log + head);
735 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
738 iommu_handle_ppr_entry(iommu, entry);
741 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
742 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
757 static void iommu_poll_ga_log(struct amd_iommu *iommu)
761 if (iommu->ga_log == NULL)
764 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
765 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
771 raw = (u64 *)(iommu->ga_log + head);
779 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
801 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
807 dev_set_msi_domain(dev, iommu->msi_domain);
812 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
823 struct amd_iommu *iommu = (struct amd_iommu *) data;
824 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
829 iommu->mmio_base + MMIO_STATUS_OFFSET);
833 iommu_poll_events(iommu);
838 iommu_poll_ppr_log(iommu);
844 iommu_poll_ga_log(iommu);
850 amd_iommu_restart_event_logging(iommu);
866 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
882 static int wait_on_sem(struct amd_iommu *iommu, u64 data)
886 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
899 static void copy_cmd_to_buffer(struct amd_iommu *iommu,
906 tail = iommu->cmd_buf_tail;
907 target = iommu->cmd_buf + tail;
911 iommu->cmd_buf_tail = tail;
914 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
918 struct amd_iommu *iommu,
921 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
1071 static int __iommu_queue_command_sync(struct amd_iommu *iommu,
1078 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1080 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
1094 iommu->cmd_buf_head = readl(iommu->mmio_base +
1100 copy_cmd_to_buffer(iommu, cmd);
1103 iommu->need_sync = sync;
1108 static int iommu_queue_command_sync(struct amd_iommu *iommu,
1115 raw_spin_lock_irqsave(&iommu->lock, flags);
1116 ret = __iommu_queue_command_sync(iommu, cmd, sync);
1117 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1122 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1124 return iommu_queue_command_sync(iommu, cmd, true);
1131 static int iommu_completion_wait(struct amd_iommu *iommu)
1138 if (!iommu->need_sync)
1141 raw_spin_lock_irqsave(&iommu->lock, flags);
1143 data = ++iommu->cmd_sem_val;
1144 build_completion_wait(&cmd, iommu, data);
1146 ret = __iommu_queue_command_sync(iommu, &cmd, false);
1150 ret = wait_on_sem(iommu, data);
1153 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1158 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1164 return iommu_queue_command(iommu, &cmd);
1167 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
1172 iommu_flush_dte(iommu, devid);
1174 iommu_completion_wait(iommu);
1181 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
1189 iommu_queue_command(iommu, &cmd);
1192 iommu_completion_wait(iommu);
1195 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1201 iommu_queue_command(iommu, &cmd);
1203 iommu_completion_wait(iommu);
1206 static void amd_iommu_flush_all(struct amd_iommu *iommu)
1212 iommu_queue_command(iommu, &cmd);
1213 iommu_completion_wait(iommu);
1216 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1222 iommu_queue_command(iommu, &cmd);
1225 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
1230 iommu_flush_irt(iommu, devid);
1232 iommu_completion_wait(iommu);
1235 void iommu_flush_all_caches(struct amd_iommu *iommu)
1237 if (iommu_feature(iommu, FEATURE_IA)) {
1238 amd_iommu_flush_all(iommu);
1240 amd_iommu_flush_dte_all(iommu);
1241 amd_iommu_flush_irt_all(iommu);
1242 amd_iommu_flush_tlb_all(iommu);
1252 struct amd_iommu *iommu;
1257 iommu = amd_iommu_rlookup_table[dev_data->devid];
1261 return iommu_queue_command(iommu, &cmd);
1266 struct amd_iommu *iommu = data;
1268 return iommu_flush_dte(iommu, alias);
1276 struct amd_iommu *iommu;
1280 iommu = amd_iommu_rlookup_table[dev_data->devid];
1284 device_flush_dte_alias, iommu);
1286 ret = iommu_flush_dte(iommu, dev_data->devid);
1292 ret = iommu_flush_dte(iommu, alias);
1929 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1931 if (iommu_feature(iommu, FEATURE_EPHSUP))
1974 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1976 amd_iommu_flush_tlb_domid(iommu, old_domid);
1993 struct amd_iommu *iommu;
1996 iommu = amd_iommu_rlookup_table[dev_data->devid];
2004 domain->dev_iommu[iommu->index] += 1;
2019 struct amd_iommu *iommu;
2021 iommu = amd_iommu_rlookup_table[dev_data->devid];
2039 domain->dev_iommu[iommu->index] -= 1;
2227 struct amd_iommu *iommu;
2237 iommu = amd_iommu_rlookup_table[devid];
2240 return &iommu->iommu;
2249 amd_iommu_set_pci_msi_domain(dev, iommu);
2250 iommu_dev = &iommu->iommu;
2253 iommu_completion_wait(iommu);
2271 struct amd_iommu *iommu;
2276 iommu = amd_iommu_rlookup_table[devid];
2279 iommu_completion_wait(iommu);
2525 struct amd_iommu *iommu;
2538 iommu = amd_iommu_rlookup_table[devid];
2539 if (!iommu)
2548 iommu_completion_wait(iommu);
2556 struct amd_iommu *iommu;
2565 iommu = amd_iommu_rlookup_table[dev_data->devid];
2566 if (!iommu)
2583 iommu_completion_wait(iommu);
2909 struct amd_iommu *iommu;
2920 iommu = amd_iommu_rlookup_table[dev_data->devid];
2925 ret = iommu_queue_command(iommu, &cmd);
3083 struct amd_iommu *iommu;
3087 iommu = amd_iommu_rlookup_table[dev_data->devid];
3092 return iommu_queue_command(iommu, &cmd);
3214 "%s: no iommu for devid %x\n", __func__, devid))
3248 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
3253 iommu_flush_dte(iommu, devid);
3273 struct amd_iommu *iommu;
3279 iommu = amd_iommu_rlookup_table[devid];
3280 if (!iommu)
3290 set_remap_table_entry(iommu, devid, table);
3308 set_remap_table_entry(iommu, devid, table);
3319 set_remap_table_entry(iommu, devid, table);
3322 set_remap_table_entry(iommu, alias, table);
3325 iommu_completion_wait(iommu);
3343 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3345 if (!iommu)
3360 if (!iommu->irte_ops->is_allocated(table, index)) {
3370 iommu->irte_ops->set_allocated(table, index - c + 1);
3392 struct amd_iommu *iommu;
3396 iommu = amd_iommu_rlookup_table[devid];
3397 if (iommu == NULL)
3425 iommu_flush_irt(iommu, devid);
3426 iommu_completion_wait(iommu);
3434 struct amd_iommu *iommu;
3437 iommu = amd_iommu_rlookup_table[devid];
3438 if (iommu == NULL)
3449 iommu_flush_irt(iommu, devid);
3450 iommu_completion_wait(iommu);
3458 struct amd_iommu *iommu;
3461 iommu = amd_iommu_rlookup_table[devid];
3462 if (iommu == NULL)
3470 iommu->irte_ops->clear_allocated(table, index);
3473 iommu_flush_irt(iommu, devid);
3474 iommu_completion_wait(iommu);
3631 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3633 if (!iommu)
3639 return iommu->ir_domain;
3676 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3678 if (!iommu)
3683 iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
3769 struct amd_iommu *iommu;
3779 iommu = amd_iommu_rlookup_table[devid];
3781 iommu->irte_ops->set_allocated(table, i);
3870 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3880 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
3883 if (!iommu)
3886 iommu->irte_ops->activate(data->entry, irte_info->devid,
3888 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
3897 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
3899 if (iommu)
3900 iommu->irte_ops->deactivate(data->entry, irte_info->devid,
3970 struct amd_iommu *iommu;
3997 iommu = amd_iommu_rlookup_table[irte_info->devid];
3998 if (iommu == NULL)
4024 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
4034 iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
4046 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
4049 if (!iommu)
4056 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
4082 int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
4086 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
4089 iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
4090 if (!iommu->ir_domain) {
4095 iommu->ir_domain->parent = arch_get_ir_parent_domain();
4096 iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,
4098 iommu->index);
4105 struct amd_iommu *iommu;
4116 iommu = amd_iommu_rlookup_table[devid];
4117 if (!iommu)
4139 iommu_flush_irt(iommu, devid);
4140 iommu_completion_wait(iommu);