Lines Matching refs:page

122  * Mask used when checking the page offset value passed in via system
155 * page based offset in vm_pgoff could be sufficiently large to
165 /* must be huge page aligned */
289 hugetlbfs_read_actor(struct page *page, unsigned long offset,
306 n = copy_page_to_iter(&page[i], offset, chunksize, to);
318 * Support for read() - Find the page attached to f_mapping and copy out the
335 struct page *page;
338 /* nr is the maximum number of bytes to copy from this page */
353 /* Find the page */
354 page = find_lock_page(mapping, index);
355 if (unlikely(page == NULL)) {
362 unlock_page(page);
365 * We have the page, copy it to user space buffer.
367 copied = hugetlbfs_read_actor(page, offset, to, nr);
368 put_page(page);
387 struct page **pagep, void **fsdata)
394 struct page *page, void *fsdata)
400 static void remove_huge_page(struct page *page)
402 ClearPageDirty(page);
403 ClearPageUptodate(page);
404 delete_from_page_cache(page);
454 * page faults in the truncated range by checking i_size. i_size is
458 * Only when releasing a page is the associated region/reserv map
461 * This is indicated if we find a mapped page.
490 struct page *page = pvec.pages[i];
493 index = page->index;
499 * page faults. Races are not possible in the
506 * If page is mapped, it was faulted in after being
509 * until we finish removing the page.
514 if (unlikely(page_mapped(page))) {
526 lock_page(page);
528 * We must free the huge page and remove from page
536 VM_BUG_ON(PagePrivate(page));
537 remove_huge_page(page);
545 unlock_page(page);
656 * as well as being converted to page offsets.
674 * Initialize a pseudo vma as this is required by the huge page
675 * allocation routines. If NUMA is configured, use page index
684 * This is supposed to be the vaddr where the page is being
687 struct page *page;
717 page = find_get_page(mapping, index);
718 if (page) {
719 put_page(page);
725 /* Allocate page and add to page cache */
726 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
728 if (IS_ERR(page)) {
730 error = PTR_ERR(page);
733 clear_huge_page(page, addr, pages_per_huge_page(h));
734 __SetPageUptodate(page);
735 error = huge_add_to_page_cache(page, mapping, index);
737 put_page(page);
744 set_page_huge_active(page);
749 unlock_page(page);
750 put_page(page);
833 * page allocations.
955 * mark the head page dirty
957 static int hugetlbfs_set_page_dirty(struct page *page)
959 struct page *head = compound_head(page);
966 struct page *newpage, struct page *page,
971 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
977 * new page. PagePrivate is not associated with page_private for
981 if (page_private(page)) {
982 set_page_private(newpage, page_private(page));
983 set_page_private(page, 0);
987 migrate_page_copy(newpage, page);
989 migrate_page_states(newpage, page);
995 struct page *page)
998 pgoff_t index = page->index;
1000 remove_huge_page(page);
1283 pr_err("Unsupported page size %lu MB\n", ps >> 20);
1315 * Use huge page pool size (in hstate) to convert the size
1535 pr_err("Cannot mount internal hugetlbfs for page size %uK",