Lines Matching refs:page

45         /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
310 * @base and @size should be exact multiples of IOMMU page granularity to
311 * avoid rounding surprises. If necessary, we reserve the page at address 0
328 /* Use the smallest supported page size for IOVA granularity */
434 * page flags.
439 * Return: corresponding IOMMU API page protection flags
570 static void iommu_dma_free_pages_ext(struct page **pages, int count)
578 static struct page **iommu_dma_alloc_pages_ext(struct device *dev, unsigned int count, unsigned long order_mask,
581 struct page **pages;
601 struct page *page = NULL;
617 page = alloc_pages_node(nid, alloc_flags, order);
618 if (!page) {
622 split_page(page, order);
626 if (!page) {
632 pages[i++] = page++;
648 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
662 struct page **pages;
740 static int iommu_dma_mmap_ext(struct page **pages, size_t size, struct vm_area_struct *vma)
797 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size,
800 phys_addr_t phys = page_to_phys(page) + offset;
850 * - and this segment starts on an IOVA page boundary
1033 struct page *page = NULL, **pages = NULL;
1047 page = vmalloc_to_page(cpu_addr);
1052 page = virt_to_page(cpu_addr);
1058 if (page) {
1059 dma_free_contiguous(dev, page, alloc_size);
1069 static void *iommu_dma_alloc_pages(struct device *dev, size_t size, struct page **pagep, gfp_t gfp, unsigned long attrs)
1074 struct page *page = NULL;
1077 page = dma_alloc_contiguous(dev, alloc_size, gfp);
1078 if (!page) {
1079 page = alloc_pages_node(node, gfp, get_order(alloc_size));
1081 if (!page) {
1085 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
1088 cpu_addr = dma_common_contiguous_remap(page, alloc_size, prot, __builtin_return_address(0));
1094 arch_dma_prep_coherent(page, size);
1097 cpu_addr = page_address(page);
1100 *pagep = page;
1104 dma_free_contiguous(dev, page, alloc_size);
1112 struct page *page = NULL;
1122 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, gfp, NULL);
1124 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1130 *handle = iommu_dma_map_ext(dev, page_to_phys(page), size, ioprot, dev->coherent_dma_mask);
1144 struct page *page;
1146 page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
1147 if (!page) {
1150 return page_address(page);
1185 struct page **pages = dma_common_find_pages(cpu_addr);
1201 struct page *page;
1205 struct page **pages = dma_common_find_pages(cpu_addr);
1211 page = vmalloc_to_page(cpu_addr);
1213 page = virt_to_page(cpu_addr);
1218 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);