Lines Matching refs:newpage

404 		struct page *newpage, struct page *page, int extra_count)
418 newpage->index = page->index;
419 newpage->mapping = page->mapping;
421 __SetPageSwapBacked(newpage);
427 newzone = page_zone(newpage);
444 newpage->index = page->index;
445 newpage->mapping = page->mapping;
446 page_ref_add(newpage, nr); /* add cache reference */
448 __SetPageSwapBacked(newpage);
450 SetPageSwapCache(newpage);
451 set_page_private(newpage, page_private(page));
457 /* Move dirty while page refs frozen and newpage not yet exposed */
461 SetPageDirty(newpage);
464 xas_store(&xas, newpage);
470 xas_store(&xas, newpage);
526 struct page *newpage, struct page *page)
543 newpage->index = page->index;
544 newpage->mapping = page->mapping;
546 get_page(newpage);
548 xas_store(&xas, newpage);
608 void migrate_page_states(struct page *newpage, struct page *page)
613 SetPageError(newpage);
615 SetPageReferenced(newpage);
617 SetPageUptodate(newpage);
620 SetPageActive(newpage);
622 SetPageUnevictable(newpage);
624 SetPageWorkingset(newpage);
626 SetPageChecked(newpage);
628 SetPageMappedToDisk(newpage);
632 SetPageDirty(newpage);
635 set_page_young(newpage);
637 set_page_idle(newpage);
641 SetPageXPMWritetainted(newpage);
644 SetPageXPMReadonly(newpage);
651 page_cpupid_xchg_last(newpage, cpupid);
653 ksm_migrate_page(newpage, page);
667 if (PageWriteback(newpage))
668 end_page_writeback(newpage);
676 SetPageReadahead(newpage);
678 copy_page_owner(page, newpage);
681 mem_cgroup_migrate(page, newpage);
685 void migrate_page_copy(struct page *newpage, struct page *page)
688 copy_huge_page(newpage, page);
690 copy_highpage(newpage, page);
692 migrate_page_states(newpage, page);
701 struct page *newpage, struct page *page,
708 rc = migrate_page_move_mapping(mapping, newpage, page, extra_count);
714 migrate_page_copy(newpage, page);
716 migrate_page_states(newpage, page);
727 struct page *newpage, struct page *page,
730 return migrate_page_extra(mapping, newpage, page, mode, 0);
774 struct page *newpage, struct page *page, enum migrate_mode mode,
782 return migrate_page(mapping, newpage, page, mode);
820 rc = migrate_page_move_mapping(mapping, newpage, page, 0);
824 attach_page_private(newpage, detach_page_private(page));
828 set_bh_page(bh, newpage, bh_offset(bh));
834 migrate_page_copy(newpage, page);
836 migrate_page_states(newpage, page);
858 struct page *newpage, struct page *page, enum migrate_mode mode)
860 return __buffer_migrate_page(mapping, newpage, page, mode, false);
871 struct page *newpage, struct page *page, enum migrate_mode mode)
873 return __buffer_migrate_page(mapping, newpage, page, mode, true);
922 struct page *newpage, struct page *page, enum migrate_mode mode)
944 return migrate_page(mapping, newpage, page, mode);
958 static int move_to_new_page(struct page *newpage, struct page *page,
966 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
972 rc = migrate_page(mapping, newpage, page, mode);
981 rc = mapping->a_ops->migratepage(mapping, newpage,
984 rc = fallback_migrate_page(mapping, newpage,
998 rc = mapping->a_ops->migratepage(mapping, newpage,
1027 if (likely(!is_zone_device_page(newpage))) {
1028 int i, nr = compound_nr(newpage);
1031 flush_dcache_page(newpage + i);
1038 static int __unmap_and_move(struct page *page, struct page *newpage,
1109 * holding a reference to newpage at this point. We used to have a BUG
1110 * here if trylock_page(newpage) fails, but would like to allow for
1111 * cases where there might be a race with the previous use of newpage.
1114 if (unlikely(!trylock_page(newpage)))
1118 rc = move_to_new_page(newpage, page, mode);
1149 rc = move_to_new_page(newpage, page, mode);
1153 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1156 unlock_page(newpage);
1164 * If migration is successful, decrease refcount of the newpage
1168 * determine if we migrated a LRU page. newpage was already unlocked
1174 put_page(newpage);
1176 putback_lru_page(newpage);
1184 * to the newly allocated page in newpage.
1193 struct page *newpage = NULL;
1211 newpage = get_new_page(page, private);
1212 if (!newpage)
1215 rc = __unmap_and_move(page, newpage, force, mode);
1217 set_page_owner_migrate_reason(newpage, reason);
1266 put_new_page(newpage, private);
1268 put_page(newpage);
2031 struct page *newpage;
2033 newpage = __alloc_pages_node(nid,
2039 return newpage;
3018 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
3023 if (!newpage) {
3041 migrate_vma_insert_page(migrate, addr, newpage,
3049 if (is_zone_device_page(newpage)) {
3050 if (is_device_private_page(newpage)) {
3070 r = migrate_page_extra(mapping, newpage, page,
3073 r = migrate_page(mapping, newpage, page,
3106 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
3110 if (newpage) {
3111 unlock_page(newpage);
3112 put_page(newpage);
3117 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
3118 if (newpage) {
3119 unlock_page(newpage);
3120 put_page(newpage);
3122 newpage = page;
3125 remove_migration_ptes(page, newpage, false);
3133 if (newpage != page) {
3134 unlock_page(newpage);
3135 if (is_zone_device_page(newpage))
3136 put_page(newpage);
3138 putback_lru_page(newpage);