Lines Matching defs:page
64 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
565 * @base and @limit + 1 should be exact multiples of IOMMU page granularity to
566 * avoid rounding surprises. If necessary, we reserve the page at address 0
583 /* Use the smallest supported page size for IOVA granularity */
632 * page flags.
637 * Return: corresponding IOMMU API page protection flags
771 static void __iommu_dma_free_pages(struct page **pages, int count)
778 static struct page **__iommu_dma_alloc_pages(struct device *dev,
781 struct page **pages;
796 struct page *page = NULL;
812 page = alloc_pages_node(nid, alloc_flags, order);
813 if (!page)
816 split_page(page, order);
819 if (!page) {
825 pages[i++] = page++;
831 * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
834 static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
844 struct page **pages;
913 struct page **pages;
1030 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
1034 phys_addr_t phys = page_to_phys(page) + offset;
1044 * page aligned, we don't need to use a bounce page.
1150 * - and this segment starts on an IOVA page boundary
1429 struct page *page = NULL, **pages = NULL;
1443 page = vmalloc_to_page(cpu_addr);
1447 page = virt_to_page(cpu_addr);
1452 if (page)
1453 dma_free_contiguous(dev, page, alloc_size);
1464 struct page **pagep, gfp_t gfp, unsigned long attrs)
1469 struct page *page = NULL;
1472 page = dma_alloc_contiguous(dev, alloc_size, gfp);
1473 if (!page)
1474 page = alloc_pages_node(node, gfp, get_order(alloc_size));
1475 if (!page)
1478 if (!coherent || PageHighMem(page)) {
1481 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
1487 arch_dma_prep_coherent(page, size);
1489 cpu_addr = page_address(page);
1492 *pagep = page;
1496 dma_free_contiguous(dev, page, alloc_size);
1505 struct page *page = NULL;
1518 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
1521 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1525 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
1552 struct page **pages = dma_common_find_pages(cpu_addr);
1570 struct page *page;
1574 struct page **pages = dma_common_find_pages(cpu_addr);
1582 page = vmalloc_to_page(cpu_addr);
1584 page = virt_to_page(cpu_addr);
1589 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1696 * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
1697 * @desc: MSI descriptor, will store the MSI page