Lines Matching defs:page
6 * this heavily depends upon page migration to do all the real heavy
20 #include <linux/page-isolation.h>
73 struct page *page, *next;
76 list_for_each_entry_safe(page, next, freelist, lru) {
77 unsigned long pfn = page_to_pfn(page);
78 list_del(&page->lru);
79 __free_page(page);
90 struct page *page, *next;
93 list_for_each_entry_safe(page, next, list, lru) {
94 list_del(&page->lru);
96 order = page_private(page);
99 post_alloc_hook(page, order, __GFP_MOVABLE);
101 split_page(page, order);
104 list_add(&page->lru, &tmp_list);
105 page++;
114 int PageMovable(struct page *page)
118 VM_BUG_ON_PAGE(!PageLocked(page), page);
119 if (!__PageMovable(page))
122 mapping = page_mapping(page);
130 void __SetPageMovable(struct page *page, struct address_space *mapping)
132 VM_BUG_ON_PAGE(!PageLocked(page), page);
133 VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
134 page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
138 void __ClearPageMovable(struct page *page)
140 VM_BUG_ON_PAGE(!PageLocked(page), page);
141 VM_BUG_ON_PAGE(!PageMovable(page), page);
144 * flag so that VM can catch up released page by driver after isolation.
147 page->mapping = (void *)((unsigned long)page->mapping &
156 * Compaction is deferred when compaction fails to result in a page
223 struct page *page)
228 return !get_pageblock_skip(page);
244 static bool pageblock_skip_persistent(struct page *page)
246 if (!PageCompound(page))
249 page = compound_head(page);
251 if (compound_order(page) >= pageblock_order)
261 struct page *page = pfn_to_online_page(pfn);
262 struct page *block_page;
263 struct page *end_page;
266 if (!page)
268 if (zone != page_zone(page))
270 if (pageblock_skip_persistent(page))
277 if (check_source && check_target && !get_pageblock_skip(page))
285 get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
293 page = block_page;
306 * free page or an LRU page in the block. One or other condition
311 if (check_source && PageLRU(page)) {
312 clear_pageblock_skip(page);
316 if (check_target && PageBuddy(page)) {
317 clear_pageblock_skip(page);
322 page += (1 << PAGE_ALLOC_COSTLY_ORDER);
324 } while (page <= end_page);
331 * should be skipped for page isolation when the migrate and free page scanner
405 static bool test_and_set_skip(struct compact_control *cc, struct page *page,
417 skip = get_pageblock_skip(page);
419 set_pageblock_skip(page);
446 struct page *page, unsigned long pfn)
453 if (!page)
456 set_pageblock_skip(page);
464 struct page *page)
469 static inline bool pageblock_skip_persistent(struct page *page)
475 struct page *page, unsigned long pfn)
483 static bool test_and_set_skip(struct compact_control *cc, struct page *page,
561 struct page *cursor;
576 struct page *page = cursor;
598 if (PageCompound(page)) {
599 const unsigned int order = compound_order(page);
608 if (!PageBuddy(page))
622 /* Recheck this is a buddy page under lock */
623 if (!PageBuddy(page))
627 /* Found a free page, will break it into order-0 pages */
628 order = buddy_order(page);
629 isolated = __isolate_free_page(page, order);
632 set_page_private(page, order);
636 list_add_tail(&page->lru, freelist);
642 /* Advance to the end of split page */
695 * Otherwise, function returns one-past-the-last PFN of isolated page
697 * a free page).
749 * page may span two pageblocks).
792 * first page that was not scanned (which may be both less, equal to or more
808 struct page *page = NULL, *valid_page = NULL;
860 * a compound or a high-order buddy page in the
882 page = pfn_to_page(low_pfn);
891 if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
895 valid_page = page;
899 * Skip if free. We read page order here without zone lock
904 if (PageBuddy(page)) {
905 unsigned long freepage_order = buddy_order_unsafe(page);
909 * a valid page order. Consider only values in the
920 * an allocation much larger than the huge page size (eg CMA).
925 if (PageCompound(page) && !cc->alloc_contig) {
926 const unsigned int order = compound_order(page);
936 * Skip any other type of page
938 if (!PageLRU(page)) {
943 if (unlikely(__PageMovable(page)) &&
944 !PageIsolated(page)) {
951 if (!isolate_movable_page(page, isolate_mode))
959 * Migration will fail if an anonymous page is pinned in memory,
963 if (!page_mapping(page) &&
964 page_count(page) > page_mapcount(page))
971 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
982 if (test_and_set_skip(cc, page, low_pfn))
987 if (!PageLRU(page))
995 if (unlikely(PageCompound(page) && !cc->alloc_contig)) {
996 low_pfn += compound_nr(page) - 1;
1001 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1003 /* Try isolate the page */
1004 if (__isolate_lru_page(page, isolate_mode) != 0)
1007 /* The whole page is taken off the LRU; skip the tail pages. */
1008 if (PageCompound(page))
1009 low_pfn += compound_nr(page) - 1;
1012 del_page_from_lru_list(page, lruvec, page_lru(page));
1013 mod_node_page_state(page_pgdat(page),
1014 NR_ISOLATED_ANON + page_is_file_lru(page),
1015 thp_nr_pages(page));
1018 list_add(&page->lru, &cc->migratepages);
1019 cc->nr_migratepages += compound_nr(page);
1020 nr_isolated += compound_nr(page);
1042 * page anyway.
1107 * Otherwise, function returns one-past-the-last PFN of isolated page
1108 * (which may be greater than end_pfn if end fell in a middle of a THP page).
1150 struct page *page)
1154 if (pageblock_skip_persistent(page))
1160 block_mt = get_pageblock_migratetype(page);
1168 /* Returns true if the page is within a block suitable for migration to */
1170 struct page *page)
1172 /* If the page is a large free page, then disallow migration */
1173 if (PageBuddy(page)) {
1179 if (buddy_order_unsafe(page) >= pageblock_order)
1187 if (is_migrate_movable(get_pageblock_migratetype(page)))
1218 move_freelist_head(struct list_head *freelist, struct page *freepage)
1236 move_freelist_tail(struct list_head *freelist, struct page *freepage)
1251 struct page *page;
1265 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
1266 if (!page)
1273 set_pageblock_skip(page);
1304 struct page *page = NULL;
1339 !page && order >= 0;
1343 struct page *freepage;
1367 page = freepage;
1383 if (!page && high_pfn) {
1384 page = pfn_to_page(high_pfn);
1387 freepage = page;
1393 /* Isolate the page if available */
1394 if (page) {
1395 if (__isolate_free_page(page, order)) {
1396 set_page_private(page, order);
1399 list_add_tail(&page->lru, &cc->freepages);
1404 page = NULL;
1418 if (!page) {
1427 page = pfn_to_page(highest);
1431 page = pageblock_pfn_to_page(min_pfn,
1447 if (!page)
1450 low_pfn = page_to_pfn(page);
1462 struct page *page;
1496 * and free page scanners meet or enough free pages are isolated.
1511 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1513 if (!page)
1517 if (!suitable_migration_target(cc, page))
1521 if (!isolation_suitable(cc, page))
1530 update_pageblock_skip(cc, page, block_start_pfn);
1576 static struct page *compaction_alloc(struct page *migratepage,
1580 struct page *freepage;
1589 freepage = list_entry(cc->freepages.next, struct page, lru);
1601 static void compaction_free(struct page *page, unsigned long data)
1605 list_add(&page->lru, &cc->freepages);
1711 struct page *freepage;
1756 * If fast scanning failed then use a cached entry for a page block
1776 struct page *page;
1820 page = pageblock_pfn_to_page(block_start_pfn,
1822 if (!page)
1833 !fast_find_block && !isolation_suitable(cc, page))
1844 if (!suitable_migration_source(cc, page)) {
2006 /* Direct compactor: Is a suitable page free? */
2012 /* Job done if page is free of the right migratetype */
2279 * Avoid multiple rescans which can happen if a page cannot be
2336 * We failed to migrate at least one page in the current
2367 /* Stop if a page has been captured */
2368 if (capc && capc->page) {
2406 struct page **capture)
2425 .page = NULL,
2431 * frees a page.
2443 * page pointer, otherwise an interrupt could free and capture a page
2447 *capture = READ_ONCE(capc.page);
2461 * @capture: Pointer to free page created by compaction will be stored here
2463 * This is the main entry point for direct page compaction.
2467 enum compact_priority prio, struct page **capture)
2691 * With no special task, compact all zones so that a page of requested