Lines Matching defs:page

110  * Mask used when checking the page offset value passed in via system
144 * page based offset in vm_pgoff could be sufficiently large to
154 /* must be huge page aligned */
297 * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset.
303 static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes)
309 page = nth_page(page, offset / PAGE_SIZE);
312 if (is_raw_hwpoison_page_in_hugepage(page))
323 page = nth_page(page, 1);
332 * Support for read() - Find the page attached to f_mapping and copy out the
348 struct page *page;
351 /* nr is the maximum number of bytes to copy from this page */
366 /* Find the page */
367 page = find_lock_page(mapping, index);
368 if (unlikely(page == NULL)) {
375 unlock_page(page);
377 if (!PageHWPoison(page))
385 want = adjust_range_hwpoison(page, offset, nr);
387 put_page(page);
394 * We have the page, copy it to user space buffer.
396 copied = copy_page_to_iter(page, offset, want, to);
397 put_page(page);
416 struct page **pagep, void **fsdata)
423 struct page *page, void *fsdata)
439 * mutex for the page in the mapping. So, we can not race with page being
443 unsigned long addr, struct page *page)
455 if (pte_page(pte) == page)
500 struct page *page = &folio->page;
516 if (!hugetlb_vma_maps_page(vma, v_start, page))
562 * still maps page and if so, unmap.
566 if (hugetlb_vma_maps_page(vma, v_start, page))
611 * Returns true if page was actually removed, false otherwise.
631 * We must remove the folio from page cache before removing
658 * During faults, hugetlb_no_page() checks i_size before page allocation,
659 * and again after obtaining page table lock. It will 'back out'
663 * Only when releasing a page is the associated region/reserve map
666 * This is indicated if we find a mapped page.
794 /* If range starts before first full page, zero partial page. */
807 /* If range extends beyond last full page, zero partial page. */
847 * as well as being converted to page offsets.
865 * Initialize a pseudo vma as this is required by the huge page
866 * allocation routines. If NUMA is configured, use page index
875 * This is supposed to be the vaddr where the page is being
923 clear_huge_page(&folio->page, addr, pages_per_huge_page(h));
1021 * page allocations.
1169 struct page *page)
1451 pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
1484 * Use huge page pool size (in hstate) to convert the size
1697 pr_err("Cannot mount internal hugetlbfs for page size %luK",