Lines Matching defs:iommu

24 #include <asm/iommu.h>
104 struct cbe_iommu *iommu;
131 static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
138 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
195 invalidate_tce_cache(window->iommu, io_pte, npages);
218 __pa(window->iommu->pad_page) |
229 invalidate_tce_cache(window->iommu, io_pte, npages);
235 struct cbe_iommu *iommu = data;
237 stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
241 printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
253 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
270 printk(KERN_ERR "iommu: can't get address for %pOF\n",
298 static void __init cell_iommu_setup_stab(struct cbe_iommu *iommu,
307 pr_debug("%s: iommu[%d]: segments: %lu\n",
308 __func__, iommu->nid, segments);
312 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
314 iommu->stab = page_address(page);
315 memset(iommu->stab, 0, stab_size);
318 static unsigned long *__init cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
335 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__,
336 iommu->nid, ptab_size, get_order(ptab_size));
337 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
346 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
347 __func__, iommu->nid, iommu->stab, ptab,
370 iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) *
372 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
378 static void __init cell_iommu_enable_hardware(struct cbe_iommu *iommu)
384 if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
386 __func__, iommu->nid);
388 iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
389 iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
394 /* setup interrupts for the iommu. */
395 reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
396 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
398 out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
402 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
405 ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu);
408 /* set the IOC segment table origin register (and turn on the iommu) */
409 reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
410 out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
411 in_be64(iommu->xlate_regs + IOC_IOST_Origin);
414 reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
415 out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
418 static void __init cell_iommu_setup_hardware(struct cbe_iommu *iommu,
421 cell_iommu_setup_stab(iommu, base, size, 0, 0);
422 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
424 cell_iommu_enable_hardware(iommu);
428 static struct iommu_window *find_window(struct cbe_iommu *iommu,
435 list_for_each_entry(window, &(iommu->windows), list) {
450 printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n",
464 cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
474 window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
480 window->iommu = iommu;
483 window->table.it_base = (unsigned long)iommu->ptab;
484 window->table.it_index = iommu->nid;
491 if (!iommu_init_table(&window->table, iommu->nid, 0, 0))
492 panic("Failed to initialize iommu table");
500 list_add(&window->list, &iommu->windows);
512 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
514 iommu->pad_page = page_address(page);
515 clear_page(iommu->pad_page);
519 (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0);
545 struct cbe_iommu *iommu;
548 * node's iommu. We -might- do something smarter later though it may
551 iommu = cell_iommu_for_node(dev_to_node(dev));
552 if (iommu == NULL || list_empty(&iommu->windows)) {
553 dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n",
557 window = list_entry(iommu->windows.next, struct iommu_window, list);
622 struct cbe_iommu *iommu;
628 printk(KERN_ERR "iommu: failed to get node for %pOF\n",
632 pr_debug("iommu: setting up iommu for node %d (%pOF)\n",
637 * iommu for that node is already setup.
640 * multiple window support since the cell iommu supports per-page ioids
644 printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n",
651 iommu = &iommus[i];
652 iommu->stab = NULL;
653 iommu->nid = nid;
654 snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
655 INIT_LIST_HEAD(&iommu->windows);
657 return iommu;
663 struct cbe_iommu *iommu;
666 iommu = cell_iommu_alloc(np);
667 if (!iommu)
677 cell_iommu_setup_hardware(iommu, base, size);
680 cell_iommu_setup_window(iommu, np, base, size,
699 pr_debug("iommu: cleaning up iommu on node %d\n", node);
717 /* When no iommu is present, we use direct DMA ops */
754 printk(KERN_WARNING "iommu: force-enabled, dma window"
765 printk("iommu: disabled, direct DMA offset is 0x%lx\n",
824 dev_dbg(dev, "iommu: no dma-ranges found\n");
851 dev_dbg(dev, "iommu: no suitable range found!\n");
874 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
880 static void __init cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
886 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24);
890 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
906 pr_debug("iommu: fixed/dynamic overlap, skipping\n");
919 struct cbe_iommu *iommu;
927 pr_debug("iommu: fixed mapping disabled, no axons found\n");
936 pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
964 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
973 pr_debug("iommu: hash window not segment aligned\n");
982 pr_debug("iommu: hash window doesn't fit in"
994 iommu = cell_iommu_alloc(np);
995 BUG_ON(!iommu);
1004 printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx "
1005 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
1008 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
1009 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
1011 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
1013 cell_iommu_enable_hardware(iommu);
1014 cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
1067 /* Create an iommu for each /axon node. */
1074 /* Create an iommu for each toplevel /pci-internal node for
1083 /* Setup default PCI iommu ops */