Lines Matching defs:page

6  * this heavily depends upon page migration to do all the real heavy
20 #include <linux/page-isolation.h>
71 struct page *page, *next;
74 list_for_each_entry_safe(page, next, freelist, lru) {
75 unsigned long pfn = page_to_pfn(page);
76 list_del(&page->lru);
77 __free_page(page);
88 struct page *page, *next;
91 list_for_each_entry_safe(page, next, list, lru) {
92 list_del(&page->lru);
94 order = page_private(page);
97 post_alloc_hook(page, order, __GFP_MOVABLE);
99 split_page(page, order);
102 list_add(&page->lru, &tmp_list);
103 page++;
111 bool PageMovable(struct page *page)
115 VM_BUG_ON_PAGE(!PageLocked(page), page);
116 if (!__PageMovable(page))
119 mops = page_movable_ops(page);
126 void __SetPageMovable(struct page *page, const struct movable_operations *mops)
128 VM_BUG_ON_PAGE(!PageLocked(page), page);
129 VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page);
130 page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE);
134 void __ClearPageMovable(struct page *page)
136 VM_BUG_ON_PAGE(!PageMovable(page), page);
138 * This page still has the type of a movable page, but it's
141 page->mapping = (void *)PAGE_MAPPING_MOVABLE;
149 * Compaction is deferred when compaction fails to result in a page
216 struct page *page)
221 return !get_pageblock_skip(page);
289 static bool pageblock_skip_persistent(struct page *page)
291 if (!PageCompound(page))
294 page = compound_head(page);
296 if (compound_order(page) >= pageblock_order)
306 struct page *page = pfn_to_online_page(pfn);
307 struct page *block_page;
308 struct page *end_page;
311 if (!page)
313 if (zone != page_zone(page))
315 if (pageblock_skip_persistent(page))
322 if (check_source && check_target && !get_pageblock_skip(page))
330 get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
338 page = block_page;
351 * free page or an LRU page in the block. One or other condition
355 if (check_source && PageLRU(page)) {
356 clear_pageblock_skip(page);
360 if (check_target && PageBuddy(page)) {
361 clear_pageblock_skip(page);
365 page += (1 << PAGE_ALLOC_COSTLY_ORDER);
366 } while (page <= end_page);
373 * should be skipped for page isolation when the migrate and free page scanner
447 static bool test_and_set_skip(struct compact_control *cc, struct page *page)
455 skip = get_pageblock_skip(page);
457 set_pageblock_skip(page);
485 struct page *page, unsigned long pfn)
492 set_pageblock_skip(page);
499 struct page *page)
504 static inline bool pageblock_skip_persistent(struct page *page)
510 struct page *page, unsigned long pfn)
518 static bool test_and_set_skip(struct compact_control *cc, struct page *page)
592 struct page *page;
602 page = pfn_to_page(blockpfn);
605 for (; blockpfn < end_pfn; blockpfn += stride, page += stride) {
626 if (PageCompound(page)) {
627 const unsigned int order = compound_order(page);
631 page += (1UL << order) - 1;
637 if (!PageBuddy(page))
645 /* Recheck this is a buddy page under lock */
646 if (!PageBuddy(page))
650 /* Found a free page, will break it into order-0 pages */
651 order = buddy_order(page);
652 isolated = __isolate_free_page(page, order);
655 set_page_private(page, order);
660 list_add_tail(&page->lru, freelist);
666 /* Advance to the end of split page */
668 page += isolated - 1;
717 * Otherwise, function returns one-past-the-last PFN of isolated page
719 * a free page).
770 * page may span two pageblocks).
831 * -ENOMEM in case we could not allocate a page, or 0.
847 struct page *page = NULL, *valid_page = NULL;
903 * a compound or a high-order buddy page in the
932 page = pfn_to_page(low_pfn);
942 if (!isolation_suitable(cc, page)) {
947 valid_page = page;
950 if (PageHuge(page) && cc->alloc_contig) {
956 ret = isolate_or_dissolve_huge_page(page, &cc->migratepages);
966 low_pfn += compound_nr(page) - 1;
967 nr_scanned += compound_nr(page) - 1;
971 if (PageHuge(page)) {
976 folio = page_folio(page);
990 * Skip if free. We read page order here without zone lock
995 if (PageBuddy(page)) {
996 unsigned long freepage_order = buddy_order_unsafe(page);
1000 * a valid page order. Consider only values in the
1013 * an allocation much larger than the huge page size (eg CMA).
1018 if (PageCompound(page) && !cc->alloc_contig) {
1019 const unsigned int order = compound_order(page);
1031 * Skip any other type of page
1033 if (!PageLRU(page)) {
1038 if (unlikely(__PageMovable(page)) &&
1039 !PageIsolated(page)) {
1045 if (isolate_movable_page(page, mode)) {
1046 folio = page_folio(page);
1056 * sure the page is not being freed elsewhere -- the
1057 * page release code relies on it.
1059 folio = folio_get_nontail_page(page);
1064 * Migration will fail if an anonymous page is pinned in memory,
1105 * until after the folio is removed from the page
1193 /* Avoid potential deadlock in freeing page under lru_lock */
1207 * page anyway.
1283 * in case we could not allocate a page, or 0.
1326 struct page *page)
1330 if (pageblock_skip_persistent(page))
1336 block_mt = get_pageblock_migratetype(page);
1344 /* Returns true if the page is within a block suitable for migration to */
1346 struct page *page)
1348 /* If the page is a large free page, then disallow migration */
1349 if (PageBuddy(page)) {
1355 if (buddy_order_unsafe(page) >= pageblock_order)
1363 if (is_migrate_movable(get_pageblock_migratetype(page)))
1394 move_freelist_head(struct list_head *freelist, struct page *freepage)
1411 move_freelist_tail(struct list_head *freelist, struct page *freepage)
1425 struct page *page;
1439 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
1440 if (!page)
1447 set_pageblock_skip(page);
1475 struct page *page = NULL;
1510 !page && order >= 0;
1514 struct page *freepage;
1538 page = freepage;
1554 if (!page && high_pfn) {
1555 page = pfn_to_page(high_pfn);
1558 freepage = page;
1564 /* Isolate the page if available */
1565 if (page) {
1566 if (__isolate_free_page(page, order)) {
1567 set_page_private(page, order);
1572 list_add_tail(&page->lru, &cc->freepages);
1577 page = NULL;
1598 if (!page) {
1607 page = pfn_to_page(highest);
1611 page = pageblock_pfn_to_page(min_pfn,
1627 if (!page)
1630 low_pfn = page_to_pfn(page);
1641 struct page *page;
1675 * and free page scanners meet or enough free pages are isolated.
1690 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1692 if (!page) {
1703 if (!suitable_migration_target(cc, page))
1707 if (!isolation_suitable(cc, page))
1716 update_pageblock_skip(cc, page, block_start_pfn -
1908 struct page *freepage;
1952 * If fast scanning failed then use a cached entry for a page block
1972 struct page *page;
2016 page = pageblock_pfn_to_page(block_start_pfn,
2018 if (!page) {
2036 !fast_find_block && !isolation_suitable(cc, page))
2047 if (!suitable_migration_source(cc, page)) {
2226 /* Direct compactor: Is a suitable page free? */
2232 /* Job done if page is free of the right migratetype */
2478 * happen if a page cannot be isolated (dirty/writeback in
2534 * If an ASYNC or SYNC_LIGHT fails to migrate a page
2551 * any page failed to migrate. Even after
2561 /* Stop if a page has been captured */
2562 if (capc && capc->page) {
2621 struct page **capture)
2640 .page = NULL,
2646 * frees a page.
2655 * page pointer, otherwise an interrupt could free and capture a page
2659 *capture = READ_ONCE(capc.page);
2662 * the page is still captured out of luck(IRQ came and freed the page).
2679 * @capture: Pointer to free page created by compaction will be stored here
2681 * This is the main entry point for direct page compaction.
2685 enum compact_priority prio, struct page **capture)
2940 * With no special task, compact all zones so that a page of requested