Lines Matching defs:page
42 #include <asm/page.h> /* PAGE_OFFSET */
81 ** error containment, and is therefore off by default. The page that is
185 ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
188 ** really only supported using the IOC at a 4k page size.
192 ** page iff that driver instance owns it.
840 * of 1 page and a maximum of 2GB. Hardware requires the address be
880 ** the spill page so devices prefetching won't
911 * @page: page to map
912 * @poff: offset into page
919 static dma_addr_t sba_map_page(struct device *dev, struct page *page,
925 void *addr = page_address(page) + poff;
1121 struct page *page;
1132 page = alloc_pages_node(node, flags, get_order(size));
1133 if (unlikely(!page))
1136 addr = page_address(page);
1138 *dma_handle = page_to_phys(page);
1157 *dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size,
1278 ** "start of next" are both on an IOV page boundary.
1381 ** must start on page boundaries and dove tail
1390 ** 3) do the entries end/start on page boundaries?
1490 ** correct virtual address associated with each DMA page.
1611 panic(PFX "Unsupported IOTLB page size %ldK",
1625 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
1650 ** Check to see if the spill page has been allocated, we don't need more than
1660 panic(PFX "Couldn't allocate PDIR spill page\n");
1671 ** Set all the PDIR entries valid w/ the spill page as the target
1889 seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
1907 seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
2009 /* zx1 based systems default to kernel page size iommu pages */
2017 * iommu page size
2148 printk("%s: unknown/unsupported iommu page size %ld\n",