Lines Matching defs:page
60 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
62 struct folio *folio = folio_get_nontail_page(page);
69 * In case we 'win' a race for a movable page being freed under us and
72 * release this page, thus avoiding a nasty leakage.
82 * Check movable flag before taking the page lock because
83 * we use non-atomic bitops on newly allocated page flags so
84 * unconditionally grabbing the lock ruins page's owner side.
95 * compaction threads can race against page migration functions
96 * as well as race against the releasing a page.
98 * In order to avoid having an already isolated movable page
101 * lets be sure we have the page lock
102 * before proceeding with the movable page isolation steps.
113 if (!mops->isolate_page(&folio->page, mode))
116 /* Driver shouldn't use PG_isolated bit of page->flags */
135 mops->putback_page(&folio->page);
144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
193 struct page *new;
283 * references to the indicated page.
299 * Something used the pte of a page under migration. We need to
300 * get to the page and wait until migration is finished.
353 * here because the pgtable page won't be freed without the
393 * Replace the page in the mapping.
411 /* Anonymous page without mapping */
452 /* Move dirty while page refs frozen and newpage not yet exposed */
466 * Drop cache reference from old page by unfreezing
477 * the page for that zone. Other VM counters will be
479 * new page and drop references to the old page.
595 * Copy NUMA information to the new page, to prevent over-eager
596 * future migrations of this same page.
598 cpupid = page_cpupid_xchg_last(&folio->page, -1);
602 * page access time in slow memory node.
605 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
606 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
611 page_cpupid_xchg_last(&newfolio->page, cpupid);
622 /* page->private contains hugetlb specific flags */
627 * If any waiters have accumulated on the new page then
683 * @mode: How to migrate the page.
743 /* Check whether page does not have extra refs before we do more work */
904 rc = mapping->a_ops->writepage(&folio->page, &wbc);
942 * Move a page to a newly allocated page
943 * The page is locked and all ptes have been successfully removed.
945 * The new page will have replaced the old page if this function
956 bool is_lru = !__PageMovable(&src->page);
972 * for page migration.
982 * In case of non-lru page, it could be released after
993 rc = mops->migrate_page(&dst->page, &src->page, mode);
1003 if (__PageMovable(&src->page)) {
1008 * cannot try to migrate this page.
1101 /* We release the page in page_handle_poison. */
1105 /* Obtain the lock on page, remove all ptes. */
1115 bool is_lru = !__PageMovable(&src->page);
1142 * For example, during page readahead pages are added locked
1147 * second or third page, the process can end up locking
1148 * the same page twice and deadlocking. Rather than
1158 * inserting a page into the page table), but it's not
1190 * we cannot notice that anon_vma is freed while we migrate a page.
1194 * just care Anon page here.
1199 * because that implies that the anon page is no longer mapped
1200 * (and cannot be remapped so long as we hold the page lock).
1206 * Block others from accessing the new page when we get around to
1224 * 1. When a new swap-cache page is read into, it is added to the LRU
1226 * Calling try_to_unmap() against a src->mapping==NULL page will
1228 * 2. An orphaned page (see truncate_cleanup_page) might have
1229 * fs-private metadata. The page can be picked up due to memory
1230 * offlining. Everywhere else except page reclaim, the page is
1231 * invisible to the vm, so the page can not be migrated. So try to
1232 * free the metadata, so the page can be freed.
1276 bool is_lru = !__PageMovable(&src->page);
1292 * turns out to be an mlocked page, remove_migration_ptes() will
1295 * We would like to do something similar for the old page, when
1296 * unsuccessful, and other cases when a page has been temporarily
1308 set_page_owner_migrate_reason(&dst->page, reason);
1311 * which will not free the page because new page owner increased
1354 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1355 * under direct I/O, the reference of the head page is 512 and a bit more.)
1360 * There is also no race when direct I/O is issued on the page under migration,
1362 * will wait in the page fault for migration to complete.
1376 /* page was freed from under us. So we are done. */
1400 * folio_mapping() set, hugetlbfs specific move page routine will not
1424 mapping = hugetlb_page_mapping_lock_write(&src->page);
1547 * hotremove don't walk through page tables or check whether
1871 * supplied as the target for the page migration
2059 * Resolves the given address to a struct page, isolates it from the LRU and
2062 * errno - if the page cannot be found/isolated
2072 struct page *page;
2085 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2087 err = PTR_ERR(page);
2088 if (IS_ERR(page))
2092 if (!page)
2095 if (is_zone_device_page(page))
2099 if (page_to_nid(page) == node)
2103 if (page_mapcount(page) > 1 && !migrate_all)
2106 if (PageHuge(page)) {
2107 if (PageHead(page)) {
2108 isolated = isolate_hugetlb(page_folio(page), pagelist);
2112 struct page *head;
2114 head = compound_head(page);
2130 * isolate_lru_page() or drop the page ref if it was
2133 put_page(page);
2166 * Migrate an array of page address onto an array of nodes and fill
2225 * Errors in the page lookup or isolation are not fatal and we simply
2232 /* The page is successfully queued for migration */
2237 * The move_pages() man page does not have an -EEXIST choice, so
2244 * If the page is already on the target node (!err), store the
2254 /* We have accounted for page i */
2285 struct page *page;
2293 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2295 err = PTR_ERR(page);
2296 if (IS_ERR(page))
2300 if (!page)
2303 if (!is_zone_device_page(page))
2304 err = page_to_nid(page);
2306 put_page(page);
2504 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2506 int nr_pages = thp_nr_pages(page);
2507 int order = compound_order(page);
2509 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
2512 if (PageTransHuge(page) && total_mapcount(page) > 1)
2537 if (!isolate_lru_page(page))
2540 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
2544 * Isolating the page has taken another reference, so the
2545 * caller's reference can be safely dropped without the page
2548 put_page(page);
2553 * Attempt to migrate a misplaced page to the specified destination
2555 * the page that will be dropped by this function before returning.
2557 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2565 int nr_pages = thp_nr_pages(page);
2571 if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2579 if (page_is_file_lru(page) && PageDirty(page))
2582 isolated = numamigrate_isolate_page(pgdat, page);
2586 list_add(&page->lru, &migratepages);
2592 list_del(&page->lru);
2593 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2594 page_is_file_lru(page), -nr_pages);
2595 putback_lru_page(page);
2601 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2609 put_page(page);