Lines Matching refs:page

13 #include <linux/page-isolation.h>
204 struct page *page;
217 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
219 if (page)
220 put_page(page);
236 struct page *page;
239 xas_for_each(&xas, page, end_index) {
242 if (!xa_is_value(page))
247 swap = radix_to_swp_entry(page);
248 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
250 if (page)
251 put_page(page);
323 struct page *page = NULL;
349 page = pmd_page(orig_pmd);
351 /* Do not interfere with other mappings of this page */
352 if (page_mapcount(page) != 1)
358 get_page(page);
360 lock_page(page);
361 err = split_huge_page(page);
362 unlock_page(page);
363 put_page(page);
377 ClearPageReferenced(page);
378 test_and_clear_page_young(page);
380 if (!isolate_lru_page(page)) {
381 if (PageUnevictable(page))
382 putback_lru_page(page);
384 list_add(&page->lru, &page_list);
387 deactivate_page(page);
412 page = vm_normal_page(vma, addr, ptent);
413 if (!page)
417 * Creating a THP page is expensive so split it only if we
420 if (PageTransCompound(page)) {
421 if (page_mapcount(page) != 1)
423 get_page(page);
424 if (!trylock_page(page)) {
425 put_page(page);
429 if (split_huge_page(page)) {
430 unlock_page(page);
431 put_page(page);
435 unlock_page(page);
436 put_page(page);
444 * Do not interfere with other mappings of this page and
445 * non-LRU page.
447 if (!PageLRU(page) || page_mapcount(page) != 1)
450 VM_BUG_ON_PAGE(PageTransCompound(page), page);
461 * We are deactivating a page for accelerating reclaiming.
462 * VM couldn't reclaim the page unless we clear PG_young.
463 * As a side effect, it makes confuse idle-page tracking
466 ClearPageReferenced(page);
467 test_and_clear_page_young(page);
469 if (!isolate_lru_page(page)) {
470 if (PageUnevictable(page))
471 putback_lru_page(page);
473 list_add(&page->lru, &page_list);
476 deactivate_page(page);
586 struct page *page;
608 * If the pte has swp_entry, just clear page table to
610 * (page allocation + zeroing).
624 page = vm_normal_page(vma, addr, ptent);
625 if (!page)
629 * If pmd isn't transhuge but the page is THP and
633 if (PageTransCompound(page)) {
634 if (page_mapcount(page) != 1)
636 get_page(page);
637 if (!trylock_page(page)) {
638 put_page(page);
642 if (split_huge_page(page)) {
643 unlock_page(page);
644 put_page(page);
648 unlock_page(page);
649 put_page(page);
656 VM_BUG_ON_PAGE(PageTransCompound(page), page);
658 if (PageSwapCache(page) || PageDirty(page)) {
659 if (!trylock_page(page))
662 * If page is shared with others, we couldn't clear
663 * PG_dirty of the page.
665 if (page_mapcount(page) != 1) {
666 unlock_page(page);
670 if (PageSwapCache(page) && !try_to_free_swap(page)) {
671 unlock_page(page);
675 ClearPageDirty(page);
676 unlock_page(page);
694 mark_page_lazyfree(page);
988 struct page *page;
991 ret = get_user_pages_fast(start, 1, 0, &page);
994 pfn = page_to_pfn(page);
997 * When soft offlining hugepages, after migrating the page
998 * we dissolve it, therefore in the second loop "page" will
999 * no longer be a compound page.
1001 size = page_size(compound_head(page));
1242 * page out the pages in this range immediately.
1246 * -EINVAL - start + len < 0, start is not page-aligned,