Lines Matching defs:page
79 /* IO virtual address start page frame number */
84 /* page table handling */
89 * This bitmap is used to advertise the page sizes our hardware support
91 * physically contiguous memory regions it is mapping into page sizes
95 * after making sure the size is an order of a 4KiB page and that the
99 * all page sizes that are an order of 4KiB.
102 * we could change this to advertise the real page sizes we support.
162 static inline unsigned long page_to_dma_pfn(struct page *pg)
455 pr_info("Disable supported super page\n");
511 struct page *page;
514 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
515 if (page)
516 vaddr = page_address(page);
1166 * Free the page table if we're below the level we want to
1181 * clear last level (leaf) ptes and free page table pages below the
1206 /* When a page at a given level is being unlinked from its parent, we don't
1209 know the hardware page-walk will no longer touch them.
1210 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1212 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1214 struct page *freelist)
1216 struct page *pg;
1236 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1240 struct page *freelist)
1258 /* These suborbinate page tables are going away entirely. Don't
1286 the page tables, and may have cached the intermediate levels. The
1288 static struct page *domain_unmap(struct dmar_domain *domain,
1292 struct page *freelist;
1304 struct page *pgd_page = virt_to_page(domain->pgd);
1314 static void dma_free_pagelist(struct page *freelist)
1316 struct page *pg;
1326 struct page *freelist = (struct page *)data;
2082 struct page *freelist;
2228 * Skip top levels of page tables for iommu which has
2355 /* Returns a number of VTD pages, but aligned to MM page size */
2374 /* To use a large page, the virtual *and* physical addresses
2443 /* It is large page*/
2454 * Ensure that old small page tables are
2492 /* If the next PTE would be the first in a new page, then we
2657 * Skip top levels of page tables for iommu which has
3559 dev_err_once(dev, "Allocating %ld-page iova failed\n",
3604 * paddr - (paddr + size) might be partial page, we should map the whole
3605 * page. Note: if two part of one page are separately mapped, we
3629 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3634 return __intel_map_single(dev, page_to_phys(page) + offset,
3652 struct page *freelist;
3706 struct page *page = NULL;
3718 page = dma_alloc_from_contiguous(dev, count, order,
3722 if (!page)
3723 page = alloc_pages(flags, order);
3724 if (!page)
3726 memset(page_address(page), 0, size);
3728 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3732 return page_address(page);
3733 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3734 __free_pages(page, order);
3743 struct page *page = virt_to_page(vaddr);
3749 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3750 __free_pages(page, order);
3914 * page aligned, we don't need to use a bounce page.
3985 bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
3988 return bounce_map_single(dev, page_to_phys(page) + offset,
4514 pr_warn("%s: Doesn't support large page.\n",
4725 struct page *freelist;
5407 * Knock out extra levels of page tables if necessary
5486 * owns the first level page tables. Invalidations of translation caches in the
5489 * vIOMMU in the guest will only expose first level page tables, therefore
5493 * type and page selective granularity within PASID:
5503 * page selective (address granularity)
5691 the low bits of hpa would take us onto the next page */
5703 struct page *freelist = NULL;
5709 size argument if it happens to be a large-page mapping. */