Lines Matching defs:page
26 * device page from the process address space. Such
27 * page is not CPU accessible and thus is mapped as
29 * count as a valid regular mapping for the page (and
30 * is accounted as such in page maps count).
33 * page mapping ie lock CPU page table and returns
55 static inline bool pfn_is_match(struct page *page, unsigned long pfn)
57 unsigned long page_pfn = page_to_pfn(page);
59 /* normal page and hugetlbfs page */
60 if (!PageTransCompound(page) || PageHuge(page))
64 return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
68 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
70 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
74 * page.
77 * entry that points to @pvmw->page or any subpage in case of THP.
80 * @pvmw->page or any subpage in case of THP.
115 return pfn_is_match(pvmw->page, pfn);
126 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
128 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
131 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
132 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
135 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
139 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
140 * regardless of which page table level the page is mapped at. @pvmw->pmd is
143 * Retruns false if there are no more page table entries for the page in
152 struct page *page = pvmw->page;
163 if (unlikely(PageHuge(page))) {
169 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
173 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
183 * any PageKsm page: whose page->index misleads vma_address()
186 end = PageTransCompound(page) ?
187 vma_address_end(page, pvmw->vma) :
223 if (pmd_page(pmde) != page)
235 migration_entry_to_page(entry) != page)
249 PageTransCompound(page)) {
267 /* Did we cross page table boundary? */
295 * page_mapped_in_vma - check whether a page is really mapped in a VMA
296 * @page: the page to test
299 * Returns 1 if the page is mapped into the page tables of the VMA, 0
300 * if the page is not mapped into the page tables of this VMA. Only
303 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
306 .page = page,
311 pvmw.address = vma_address(page, vma);