Lines Matching defs:iommu

22 #include <asm/iommu.h>
102 struct cbe_iommu *iommu;
129 static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
136 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
193 invalidate_tce_cache(window->iommu, io_pte, npages);
216 __pa(window->iommu->pad_page) |
227 invalidate_tce_cache(window->iommu, io_pte, npages);
233 struct cbe_iommu *iommu = data;
235 stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
239 printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
251 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
268 printk(KERN_ERR "iommu: can't get address for %pOF\n",
296 static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
305 pr_debug("%s: iommu[%d]: segments: %lu\n",
306 __func__, iommu->nid, segments);
310 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
312 iommu->stab = page_address(page);
313 memset(iommu->stab, 0, stab_size);
316 static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
333 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__,
334 iommu->nid, ptab_size, get_order(ptab_size));
335 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
344 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
345 __func__, iommu->nid, iommu->stab, ptab,
368 iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) *
370 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
376 static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
382 if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
384 __func__, iommu->nid);
386 iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
387 iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
392 /* setup interrupts for the iommu. */
393 reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
394 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
396 out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
400 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
403 ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu);
406 /* set the IOC segment table origin register (and turn on the iommu) */
407 reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
408 out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
409 in_be64(iommu->xlate_regs + IOC_IOST_Origin);
412 reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
413 out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
416 static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
419 cell_iommu_setup_stab(iommu, base, size, 0, 0);
420 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
422 cell_iommu_enable_hardware(iommu);
426 static struct iommu_window *find_window(struct cbe_iommu *iommu,
433 list_for_each_entry(window, &(iommu->windows), list) {
448 printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n",
462 cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
472 window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
478 window->iommu = iommu;
481 window->table.it_base = (unsigned long)iommu->ptab;
482 window->table.it_index = iommu->nid;
489 iommu_init_table(&window->table, iommu->nid, 0, 0);
497 list_add(&window->list, &iommu->windows);
509 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
511 iommu->pad_page = page_address(page);
512 clear_page(iommu->pad_page);
516 (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0);
542 struct cbe_iommu *iommu;
545 * node's iommu. We -might- do something smarter later though it may
548 iommu = cell_iommu_for_node(dev_to_node(dev));
549 if (iommu == NULL || list_empty(&iommu->windows)) {
550 dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n",
554 window = list_entry(iommu->windows.next, struct iommu_window, list);
619 struct cbe_iommu *iommu;
625 printk(KERN_ERR "iommu: failed to get node for %pOF\n",
629 pr_debug("iommu: setting up iommu for node %d (%pOF)\n",
634 * iommu for that node is already setup.
637 * multiple window support since the cell iommu supports per-page ioids
641 printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n",
648 iommu = &iommus[i];
649 iommu->stab = NULL;
650 iommu->nid = nid;
651 snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
652 INIT_LIST_HEAD(&iommu->windows);
654 return iommu;
660 struct cbe_iommu *iommu;
663 iommu = cell_iommu_alloc(np);
664 if (!iommu)
674 cell_iommu_setup_hardware(iommu, base, size);
677 cell_iommu_setup_window(iommu, np, base, size,
696 pr_debug("iommu: cleaning up iommu on node %d\n", node);
714 /* When no iommu is present, we use direct DMA ops */
749 printk(KERN_WARNING "iommu: force-enabled, dma window"
760 printk("iommu: disabled, direct DMA offset is 0x%lx\n",
819 dev_dbg(dev, "iommu: no dma-ranges found\n");
846 dev_dbg(dev, "iommu: no suitable range found!\n");
869 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
875 static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
881 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24);
885 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
901 pr_debug("iommu: fixed/dynamic overlap, skipping\n");
914 struct cbe_iommu *iommu;
922 pr_debug("iommu: fixed mapping disabled, no axons found\n");
931 pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
959 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
968 pr_debug("iommu: hash window not segment aligned\n");
977 pr_debug("iommu: hash window doesn't fit in"
989 iommu = cell_iommu_alloc(np);
990 BUG_ON(!iommu);
999 printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx "
1000 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
1003 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
1004 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
1006 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
1008 cell_iommu_enable_hardware(iommu);
1009 cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
1062 /* Create an iommu for each /axon node. */
1069 /* Create an iommu for each toplevel /pci-internal node for
1078 /* Setup default PCI iommu ops */