Lines Matching refs:page
86 int isolate_movable_page(struct page *page, isolate_mode_t mode)
94 * In case we 'win' a race for a movable page being freed under us and
97 * release this page, thus avoiding a nasty leakage.
99 if (unlikely(!get_page_unless_zero(page)))
103 * Check PageMovable before holding a PG_lock because page's owner
104 * assumes anybody doesn't touch PG_lock of newly allocated page
105 * so unconditionally grabbing the lock ruins page's owner side.
107 if (unlikely(!__PageMovable(page)))
111 * compaction threads can race against page migration functions
112 * as well as race against the releasing a page.
114 * In order to avoid having an already isolated movable page
117 * lets be sure we have the page lock
118 * before proceeding with the movable page isolation steps.
120 if (unlikely(!trylock_page(page)))
123 if (!PageMovable(page) || PageIsolated(page))
126 mapping = page_mapping(page);
127 VM_BUG_ON_PAGE(!mapping, page);
129 if (!mapping->a_ops->isolate_page(page, mode))
132 /* Driver shouldn't use PG_isolated bit of page->flags */
133 WARN_ON_ONCE(PageIsolated(page));
134 __SetPageIsolated(page);
135 unlock_page(page);
140 unlock_page(page);
142 put_page(page);
147 /* It should be called on page which is PG_movable */
148 void putback_movable_page(struct page *page)
152 VM_BUG_ON_PAGE(!PageLocked(page), page);
153 VM_BUG_ON_PAGE(!PageMovable(page), page);
154 VM_BUG_ON_PAGE(!PageIsolated(page), page);
156 mapping = page_mapping(page);
157 mapping->a_ops->putback_page(page);
158 __ClearPageIsolated(page);
166 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
171 struct page *page;
172 struct page *page2;
174 list_for_each_entry_safe(page, page2, l, lru) {
175 if (unlikely(PageHuge(page))) {
176 putback_active_hugepage(page);
179 list_del(&page->lru);
181 * We isolated non-lru movable page so here we can use
182 * __PageMovable because LRU page's mapping cannot have
185 if (unlikely(__PageMovable(page))) {
186 VM_BUG_ON_PAGE(!PageIsolated(page), page);
187 lock_page(page);
188 if (PageMovable(page))
189 putback_movable_page(page);
191 __ClearPageIsolated(page);
192 unlock_page(page);
193 put_page(page);
195 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
196 page_is_file_lru(page), -thp_nr_pages(page));
197 putback_lru_page(page);
205 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
209 .page = old,
214 struct page *new;
218 VM_BUG_ON_PAGE(PageTail(page), page);
220 if (PageKsm(page))
221 new = page;
223 new = page - pvmw.page->index +
229 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
280 if (PageTransHuge(page) && PageMlocked(page))
281 clear_page_mlock(page);
292 * references to the indicated page.
294 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
308 * Something used the pte of a page under migration. We need to
309 * get to the page and wait until migration is finished.
317 struct page *page;
328 page = migration_entry_to_page(entry);
329 page = compound_head(page);
332 * Once page cache replacement of page migration started, page_count
336 if (!get_page_unless_zero(page))
339 put_and_wait_on_page_locked(page);
364 struct page *page;
369 page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
370 if (!get_page_unless_zero(page))
373 put_and_wait_on_page_locked(page);
380 static int expected_page_refs(struct address_space *mapping, struct page *page)
388 expected_count += is_device_private_page(page);
390 expected_count += thp_nr_pages(page) + page_has_private(page);
396 * Replace the page in the mapping.
404 struct page *newpage, struct page *page, int extra_count)
406 XA_STATE(xas, &mapping->i_pages, page_index(page));
409 int expected_count = expected_page_refs(mapping, page) + extra_count;
410 int nr = thp_nr_pages(page);
413 /* Anonymous page without mapping */
414 if (page_count(page) != expected_count)
418 newpage->index = page->index;
419 newpage->mapping = page->mapping;
420 if (PageSwapBacked(page))
426 oldzone = page_zone(page);
430 if (page_count(page) != expected_count || xas_load(&xas) != page) {
435 if (!page_ref_freeze(page, expected_count)) {
441 * Now we know that no one else is looking at the page:
444 newpage->index = page->index;
445 newpage->mapping = page->mapping;
447 if (PageSwapBacked(page)) {
449 if (PageSwapCache(page)) {
451 set_page_private(newpage, page_private(page));
454 VM_BUG_ON_PAGE(PageSwapCache(page), page);
457 /* Move dirty while page refs frozen and newpage not yet exposed */
458 dirty = PageDirty(page);
460 ClearPageDirty(page);
465 if (PageTransHuge(page)) {
475 * Drop cache reference from old page by unfreezing
479 page_ref_unfreeze(page, expected_count - nr);
486 * the page for that zone. Other VM counters will be
488 * new page and drop references to the old page.
498 memcg = page_memcg(page);
504 if (PageSwapBacked(page) && !PageSwapCache(page)) {
526 struct page *newpage, struct page *page)
528 XA_STATE(xas, &mapping->i_pages, page_index(page));
532 expected_count = 2 + page_has_private(page);
533 if (page_count(page) != expected_count || xas_load(&xas) != page) {
538 if (!page_ref_freeze(page, expected_count)) {
543 newpage->index = page->index;
544 newpage->mapping = page->mapping;
550 page_ref_unfreeze(page, expected_count - 1);
558 * Gigantic pages are so large that we do not guarantee that page++ pointer
559 * arithmetic will work across the entire page. We need something more
562 static void __copy_gigantic_page(struct page *dst, struct page *src,
566 struct page *dst_base = dst;
567 struct page *src_base = src;
579 static void copy_huge_page(struct page *dst, struct page *src)
585 /* hugetlbfs page */
594 /* thp page */
606 * Copy the page to its new location
608 void migrate_page_states(struct page *newpage, struct page *page)
612 if (PageError(page))
614 if (PageReferenced(page))
616 if (PageUptodate(page))
618 if (TestClearPageActive(page)) {
619 VM_BUG_ON_PAGE(PageUnevictable(page), page);
621 } else if (TestClearPageUnevictable(page))
623 if (PageWorkingset(page))
625 if (PageChecked(page))
627 if (PageMappedToDisk(page))
631 if (PageDirty(page))
634 if (page_is_young(page))
636 if (page_is_idle(page))
639 /* Migrate the page's xpm state */
640 if(PageXPMWritetainted(page))
643 if(PageXPMReadonly(page))
647 * Copy NUMA information to the new page, to prevent over-eager
648 * future migrations of this same page.
650 cpupid = page_cpupid_xchg_last(page, -1);
653 ksm_migrate_page(newpage, page);
658 if (PageSwapCache(page))
659 ClearPageSwapCache(page);
660 ClearPagePrivate(page);
661 set_page_private(page, 0);
664 * If any waiters have accumulated on the new page then
675 if (PageReadahead(page))
678 copy_page_owner(page, newpage);
680 if (!PageHuge(page))
681 mem_cgroup_migrate(page, newpage);
685 void migrate_page_copy(struct page *newpage, struct page *page)
687 if (PageHuge(page) || PageTransHuge(page))
688 copy_huge_page(newpage, page);
690 copy_highpage(newpage, page);
692 migrate_page_states(newpage, page);
701 struct page *newpage, struct page *page,
706 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
708 rc = migrate_page_move_mapping(mapping, newpage, page, extra_count);
714 migrate_page_copy(newpage, page);
716 migrate_page_states(newpage, page);
721 * Common logic to directly migrate a single LRU page suitable for
727 struct page *newpage, struct page *page,
730 return migrate_page_extra(mapping, newpage, page, mode, 0);
774 struct page *newpage, struct page *page, enum migrate_mode mode,
781 if (!page_has_buffers(page))
782 return migrate_page(mapping, newpage, page, mode);
784 /* Check whether page does not have extra refs before we do more work */
785 expected_count = expected_page_refs(mapping, page);
786 if (page_count(page) != expected_count)
789 head = page_buffers(page);
820 rc = migrate_page_move_mapping(mapping, newpage, page, 0);
824 attach_page_private(newpage, detach_page_private(page));
834 migrate_page_copy(newpage, page);
836 migrate_page_states(newpage, page);
854 * if the underlying filesystem guarantees that no other references to "page"
855 * exist. For example attached buffer heads are accessed only under page lock.
858 struct page *newpage, struct page *page, enum migrate_mode mode)
860 return __buffer_migrate_page(mapping, newpage, page, mode, false);
871 struct page *newpage, struct page *page, enum migrate_mode mode)
873 return __buffer_migrate_page(mapping, newpage, page, mode, true);
878 * Writeback a page to clean the dirty state
880 static int writeout(struct address_space *mapping, struct page *page)
895 if (!clear_page_dirty_for_io(page))
900 * A dirty page may imply that the underlying filesystem has
901 * the page on some queue. So the page must be clean for
903 * page state is no longer what we checked for earlier.
907 remove_migration_ptes(page, page, false);
909 rc = mapping->a_ops->writepage(page, &wbc);
913 lock_page(page);
922 struct page *newpage, struct page *page, enum migrate_mode mode)
924 if (PageDirty(page)) {
933 return writeout(mapping, page);
940 if (page_has_private(page) &&
941 !try_to_release_page(page, GFP_KERNEL))
944 return migrate_page(mapping, newpage, page, mode);
948 * Move a page to a newly allocated page
949 * The page is locked and all ptes have been successfully removed.
951 * The new page will have replaced the old page if this function
958 static int move_to_new_page(struct page *newpage, struct page *page,
963 bool is_lru = !__PageMovable(page);
965 VM_BUG_ON_PAGE(!PageLocked(page), page);
968 mapping = page_mapping(page);
972 rc = migrate_page(mapping, newpage, page, mode);
979 * for page migration.
982 page, mode);
985 page, mode);
988 * In case of non-lru page, it could be released after
991 VM_BUG_ON_PAGE(!PageIsolated(page), page);
992 if (!PageMovable(page)) {
994 __ClearPageIsolated(page);
999 page, mode);
1001 !PageIsolated(page));
1005 * When successful, old pagecache page->mapping must be cleared before
1006 * page is freed; but stats require that PageAnon be left as PageAnon.
1009 if (__PageMovable(page)) {
1010 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1014 * cannot try to migrate this page.
1016 __ClearPageIsolated(page);
1020 * Anonymous and movable page->mapping will be cleared by
1024 if (!PageMappingFlags(page))
1025 page->mapping = NULL;
1038 static int __unmap_and_move(struct page *page, struct page *newpage,
1044 bool is_lru = !__PageMovable(page);
1046 if (!trylock_page(page)) {
1052 * For example, during page readahead pages are added locked
1057 * second or third page, the process can end up locking
1058 * the same page twice and deadlocking. Rather than
1066 lock_page(page);
1069 if (PageWriteback(page)) {
1086 wait_on_page_writeback(page);
1090 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1091 * we cannot notice that anon_vma is freed while we migrates a page.
1095 * just care Anon page here.
1100 * because that implies that the anon page is no longer mapped
1101 * (and cannot be remapped so long as we hold the page lock).
1103 if (PageAnon(page) && !PageKsm(page))
1104 anon_vma = page_get_anon_vma(page);
1107 * Block others from accessing the new page when we get around to
1118 rc = move_to_new_page(newpage, page, mode);
1124 * 1. When a new swap-cache page is read into, it is added to the LRU
1126 * Calling try_to_unmap() against a page->mapping==NULL page will
1128 * 2. An orphaned page (see truncate_complete_page) might have
1129 * fs-private metadata. The page can be picked up due to memory
1130 * offlining. Everywhere else except page reclaim, the page is
1131 * invisible to the vm, so the page can not be migrated. So try to
1132 * free the metadata, so the page can be freed.
1134 if (!page->mapping) {
1135 VM_BUG_ON_PAGE(PageAnon(page), page);
1136 if (page_has_private(page)) {
1137 try_to_free_buffers(page);
1140 } else if (page_mapped(page)) {
1142 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1143 page);
1144 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK);
1148 if (!page_mapped(page))
1149 rc = move_to_new_page(newpage, page, mode);
1152 remove_migration_ptes(page,
1153 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1161 unlock_page(page);
1165 * which will not free the page because new page owner increased
1166 * refcounter. As well, if it is LRU page, add the page to LRU
1167 * list in here. Use the old state of the isolated source page to
1168 * determine if we migrated a LRU page. newpage was already unlocked
1169 * and possibly modified by its owner - don't rely on the page
1183 * Obtain the lock on page, remove all ptes and migrate the page
1184 * to the newly allocated page in newpage.
1188 unsigned long private, struct page *page,
1193 struct page *newpage = NULL;
1195 if (!thp_migration_supported() && PageTransHuge(page))
1198 if (page_count(page) == 1) {
1199 /* page was freed from under us. So we are done. */
1200 ClearPageActive(page);
1201 ClearPageUnevictable(page);
1202 if (unlikely(__PageMovable(page))) {
1203 lock_page(page);
1204 if (!PageMovable(page))
1205 __ClearPageIsolated(page);
1206 unlock_page(page);
1211 newpage = get_new_page(page, private);
1215 rc = __unmap_and_move(page, newpage, force, mode);
1222 * A page that has been migrated has all references
1223 * removed and will be freed. A page that has not been
1226 list_del(&page->lru);
1233 if (likely(!__PageMovable(page)))
1234 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1235 page_is_file_lru(page), -thp_nr_pages(page));
1240 * isolation. Otherwise, restore the page to right list unless
1246 * We release the page in page_handle_poison.
1248 put_page(page);
1251 if (likely(!__PageMovable(page))) {
1252 putback_lru_page(page);
1256 lock_page(page);
1257 if (PageMovable(page))
1258 putback_movable_page(page);
1260 __ClearPageIsolated(page);
1261 unlock_page(page);
1262 put_page(page);
1282 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1283 * under direct I/O, the reference of the head page is 512 and a bit more.)
1288 * There is also no race when direct I/O is issued on the page under migration,
1290 * will wait in the page fault for migration to complete.
1294 struct page *hpage, int force,
1299 struct page *new_hpage;
1306 * like soft offline and memory hotremove don't walk through page
1334 * page_mapping() set, hugetlbfs specific move page routine will not
1414 * supplied as the target for the page migration
1418 * as the target of the page migration.
1423 * page migration, if any.
1424 * @reason: The reason for page migration.
1446 struct page *page;
1447 struct page *page2;
1458 list_for_each_entry_safe(page, page2, from, lru) {
1461 * THP statistics is based on the source huge page.
1465 is_thp = PageTransHuge(page) && !PageHuge(page);
1466 nr_subpages = thp_nr_pages(page);
1469 if (PageHuge(page))
1471 put_new_page, private, page,
1475 private, page, pass > 2, mode,
1483 * retry on the same page with the THP split
1486 * Head page is retried immediately and tail
1492 lock_page(page);
1493 rc = split_huge_page_to_list(page, from);
1494 unlock_page(page);
1496 list_safe_reset_next(page, page2, lru);
1525 * unlike -EAGAIN case, the failed page is
1526 * removed from migration page list and not
1557 struct page *alloc_migration_target(struct page *page, unsigned long private)
1562 struct page *new_page = NULL;
1570 nid = page_to_nid(page);
1572 if (PageHuge(page)) {
1573 struct hstate *h = page_hstate(compound_head(page));
1579 if (PageTransHuge(page)) {
1588 zidx = zone_idx(page_zone(page));
1630 * Resolves the given address to a struct page, isolates it from the LRU and
1633 * errno - if the page cannot be found/isolated
1642 struct page *page;
1654 page = follow_page(vma, addr, follflags);
1656 err = PTR_ERR(page);
1657 if (IS_ERR(page))
1661 if (!page)
1665 if (page_to_nid(page) == node)
1669 if (page_mapcount(page) > 1 && !migrate_all)
1672 if (PageHuge(page)) {
1673 if (PageHead(page)) {
1674 err = isolate_hugetlb(page, pagelist);
1679 struct page *head;
1681 head = compound_head(page);
1695 * isolate_lru_page() or drop the page ref if it was
1698 put_page(page);
1731 * Migrate an array of page address onto an array of nodes and fill
1782 * Errors in the page lookup or isolation are not fatal and we simply
1789 /* The page is successfully queued for migration */
1794 * If the page is already on the target node (!err), store the
1830 struct page *page;
1838 page = follow_page(vma, addr, FOLL_DUMP);
1840 err = PTR_ERR(page);
1841 if (IS_ERR(page))
1844 err = page ? page_to_nid(page) : -ENOENT;
2027 static struct page *alloc_misplaced_dst_page(struct page *page,
2031 struct page *newpage;
2042 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2046 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
2049 if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
2052 if (isolate_lru_page(page))
2056 * migrate_misplaced_transhuge_page() skips page migration's usual
2057 * check on page_count(), so we must do it here, now that the page
2059 * The expected page count is 3: 1 for page's mapcount and 1 for the
2062 if (PageTransHuge(page) && page_count(page) != 3) {
2063 putback_lru_page(page);
2067 page_lru = page_is_file_lru(page);
2068 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
2069 thp_nr_pages(page));
2072 * Isolating the page has taken another reference, so the
2073 * caller's reference can be safely dropped without the page
2076 put_page(page);
2082 struct page *page = pmd_page(pmd);
2083 return PageLocked(page);
2087 * Attempt to migrate a misplaced page to the specified destination
2089 * the page that will be dropped by this function before returning.
2091 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2103 if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2111 if (page_is_file_lru(page) && PageDirty(page))
2114 isolated = numamigrate_isolate_page(pgdat, page);
2118 list_add(&page->lru, &migratepages);
2124 list_del(&page->lru);
2125 dec_node_page_state(page, NR_ISOLATED_ANON +
2126 page_is_file_lru(page));
2127 putback_lru_page(page);
2136 put_page(page);
2143 * Migrates a THP to a given target node. page must be locked and is unlocked
2150 struct page *page, int node)
2155 struct page *new_page = NULL;
2156 int page_lru = page_is_file_lru(page);
2166 isolated = numamigrate_isolate_page(pgdat, page);
2172 /* Prepare a page as a migration target */
2174 if (PageSwapBacked(page))
2177 /* anon mapping, we can simply copy page->mapping to the new page: */
2178 new_page->mapping = page->mapping;
2179 new_page->index = page->index;
2182 migrate_page_copy(new_page, page);
2187 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
2192 SetPageActive(page);
2194 SetPageUnevictable(page);
2200 get_page(page);
2201 putback_lru_page(page);
2202 mod_node_page_state(page_pgdat(page),
2214 * page blocking on the page lock, block on the page table
2215 * lock or observe the new page. The SetPageUptodate on the
2216 * new page and page_add_new_anon_rmap guarantee the copy is
2234 page_ref_unfreeze(page, 2);
2235 mlock_migrate_page(new_page, page);
2236 page_remove_rmap(page, true);
2241 /* Take an "isolate" reference and put new page on the LRU. */
2246 unlock_page(page);
2247 put_page(page); /* Drop the rmap reference */
2248 put_page(page); /* Drop the LRU isolation reference */
2253 mod_node_page_state(page_pgdat(page),
2269 unlock_page(page);
2270 put_page(page);
2338 struct page *page;
2346 page = pmd_page(*pmdp);
2347 if (is_huge_zero_page(page)) {
2356 get_page(page);
2358 if (unlikely(!trylock_page(page)))
2361 ret = split_huge_page(page);
2362 unlock_page(page);
2363 put_page(page);
2381 struct page *page;
2397 * Only care about unaddressable device page special
2398 * page table entry. Other special swap entries are not
2399 * migratable, and we ignore regular swapped page.
2405 page = device_private_entry_to_page(entry);
2408 page->pgmap->owner != migrate->pgmap_owner)
2411 mpfn = migrate_pfn(page_to_pfn(page)) |
2424 page = vm_normal_page(migrate->vma, addr, pte);
2430 if (!page || !page->mapping || PageTransCompound(page)) {
2436 * By getting a reference on the page we pin it and that blocks
2440 * We drop this reference after isolating the page from the lru
2441 * for non device page (device page are not on the lru and thus
2444 get_page(page);
2448 * Optimize for the common case where page is only mapped once
2449 * in one process. If we can lock the page, then we can safely
2450 * set up a special migration page table entry now.
2452 if (trylock_page(page)) {
2458 /* Setup special migration page table entry */
2459 entry = make_migration_entry(page, mpfn &
2477 * drop page refcount. Page won't be freed, as we took
2480 page_remove_rmap(page, false);
2481 put_page(page);
2511 * This will walk the CPU page table. For each virtual address backed by a
2512 * valid page, it updates the src array and takes a reference on the page, in
2513 * order to pin the page until we lock it and unmap it.
2522 * private page mappings that won't be migrated.
2537 * migrate_vma_check_page() - check if page is pinned or not
2538 * @page: struct page to check
2542 * ZONE_DEVICE page.
2544 static bool migrate_vma_check_page(struct page *page, struct page *fault_page)
2548 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2549 * a device page.
2551 int extra = 1 + (page == fault_page);
2554 * FIXME support THP (transparent huge page), it is bit more complex to
2558 if (PageCompound(page))
2562 if (is_zone_device_page(page)) {
2564 * Private page can never be pin as they have no valid pte and
2567 * will bump the page reference count. Sadly there is no way to
2574 * it does not need to take a reference on page.
2576 return is_device_private_page(page);
2579 /* For file back page */
2580 if (page_mapping(page))
2581 extra += 1 + page_has_private(page);
2583 if ((page_count(page) - extra) > page_mapcount(page))
2594 * page is locked it is isolated from the lru (for non-device pages). Finally,
2608 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2611 if (!page)
2618 * are waiting on each other page lock.
2621 * for any page we can not lock right away.
2623 if (!trylock_page(page)) {
2626 put_page(page);
2634 if (!is_zone_device_page(page)) {
2635 if (!PageLRU(page) && allow_drain) {
2641 if (isolate_lru_page(page)) {
2648 unlock_page(page);
2650 put_page(page);
2656 put_page(page);
2659 if (!migrate_vma_check_page(page, migrate->fault_page)) {
2665 if (!is_zone_device_page(page)) {
2666 get_page(page);
2667 putback_lru_page(page);
2671 unlock_page(page);
2674 if (!is_zone_device_page(page))
2675 putback_lru_page(page);
2677 put_page(page);
2683 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2685 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2688 remove_migration_pte(page, migrate->vma, addr, page);
2691 unlock_page(page);
2692 put_page(page);
2698 * migrate_vma_unmap() - replace page mapping with special migration pte entry
2701 * Replace page mapping (CPU page table pte) with a special migration pte entry
2706 * destination memory and copy contents of original page over to new page.
2716 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2718 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2721 if (page_mapped(page)) {
2722 try_to_unmap(page, flags);
2723 if (page_mapped(page))
2727 if (migrate_vma_check_page(page, migrate->fault_page))
2737 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2739 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2742 remove_migration_ptes(page, page, false);
2745 unlock_page(page);
2748 if (is_zone_device_page(page))
2749 put_page(page);
2751 putback_lru_page(page);
2765 * and unmapped, check whether each page is pinned or not. Pages that aren't
2774 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
2779 * device memory to system memory. If the caller cannot migrate a device page
2784 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2796 * then migrate_vma_pages() to migrate struct page information from the source
2797 * struct page to the destination struct page. If it fails to migrate the
2798 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2809 * It is safe to update device page table after migrate_vma_pages() because
2810 * both destination and source page are still locked, and the mmap_lock is held
2813 * Once the caller is done cleaning up things and updating its page table (if it
2815 * migrate_vma_finalize() to update the CPU page table to point to new pages
2816 * for successfully migrated pages or otherwise restore the CPU page table to
2866 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
2867 * private page.
2871 struct page *page,
2923 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
2928 * preceding stores to the page contents become visible before
2931 __SetPageUptodate(page);
2933 if (is_zone_device_page(page)) {
2934 if (is_device_private_page(page)) {
2937 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
2944 pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
2948 entry = mk_pte(page, vma->vm_page_prot);
2975 page_add_new_anon_rmap(page, vma, addr, false);
2976 if (!is_zone_device_page(page))
2977 lru_cache_add_inactive_or_unevictable(page, vma);
2978 get_page(page);
3002 * migrate_vma_pages() - migrate meta-data from src page to dst page
3005 * This migrates struct page meta-data from source struct page to destination
3006 * struct page. This effectively finishes the migration from source page to the
3007 * destination page.
3018 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
3019 struct page *page = migrate_pfn_to_page(migrate->src[i]);
3028 if (!page) {
3047 mapping = page_mapping(page);
3061 * Other types of ZONE_DEVICE page are not
3069 if (migrate->fault_page == page)
3070 r = migrate_page_extra(mapping, newpage, page,
3073 r = migrate_page(mapping, newpage, page,
3090 * migrate_vma_finalize() - restore CPU page table entry
3094 * new page if migration was successful for that page, or to the original page
3106 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
3107 struct page *page = migrate_pfn_to_page(migrate->src[i]);
3109 if (!page) {
3122 newpage = page;
3125 remove_migration_ptes(page, newpage, false);
3126 unlock_page(page);
3128 if (is_zone_device_page(page))
3129 put_page(page);
3131 putback_lru_page(page);
3133 if (newpage != page) {