Lines Matching defs:page

18 #include <linux/page-isolation.h>
115 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
119 "Node not in page flags");
123 "Last cpupid not in page flags");
564 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
567 mm_zero_struct_page(page);
568 set_page_links(page, zone, nid, pfn);
569 init_page_count(page);
570 page_mapcount_reset(page);
571 page_cpupid_reset_last(page);
572 page_kasan_tag_reset(page);
574 INIT_LIST_HEAD(&page->lru);
578 set_page_address(page, __va(pfn << PAGE_SHIFT));
658 /* Returns true if the struct page for the pfn is initialised */
746 * sent to the buddy page allocator.
756 struct page *page = pfn_to_page(start_pfn);
761 INIT_LIST_HEAD(&page->lru);
765 * page is not visible yet so nobody should
768 __SetPageReserved(page);
810 * - zone and node links point to zone and node that span the page if the
854 struct page *page;
864 * ZONE_DEVICE page initialization until after we have released
891 page = pfn_to_page(pfn);
892 __init_single_page(page, pfn, zone, nid);
894 __SetPageReserved(page);
902 set_pageblock_migratetype(page, migratetype);
971 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
976 __init_single_page(page, pfn, zone_idx, nid);
979 * Mark page reserved as it will need to wait for onlining
985 __SetPageReserved(page);
989 * and zone_device_data. It is a bug if a ZONE_DEVICE page is
992 page->pgmap = pgmap;
993 page->zone_device_data = NULL;
1006 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1011 * ZONE_DEVICE pages are released directly to the driver page allocator
1012 * which will set the page count to 1 when allocating the page.
1016 set_page_count(page, 0);
1020 * With compound page geometry and when struct pages are stored in ram most
1033 return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
1036 static void __ref memmap_init_compound(struct page *head,
1047 struct page *page = pfn_to_page(pfn);
1049 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1051 set_page_count(page, 0);
1054 * The first tail page stores important compound page info.
1055 * Call prep_compound_head() after the first tail page has
1090 struct page *page = pfn_to_page(pfn);
1092 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1097 memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
1163 * absent_pages_in_range - Return number of page frames in holes within a range
1175 /* Return the number of page frames in holes in a zone on a node */
1320 * populated regions may not be naturally aligned on page boundary.
1327 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
1480 * Assume the largest contiguous order of interest is a huge page.
1655 struct page *map;
1664 size = (end - start) * sizeof(struct page);
1691 * get_pfn_range_for_nid - Return the start and end page frames for a node
1696 * It returns the start and end page frame of a node based on information
1796 * Using the page ranges provided by memblock_set_node(), the size of each
1978 struct page *page;
1984 page = pfn_to_page(pfn);
1989 set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
1990 __free_pages_core(page, MAX_ORDER);
1997 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1999 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2000 __free_pages_core(page, 0);
2015 * Returns true if page needs to be initialized or freed to buddy allocator.
2052 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
2063 struct page *page = NULL;
2067 page = NULL;
2069 } else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
2070 page = pfn_to_page(pfn);
2072 page++;
2074 __init_single_page(page, pfn, zid, nid);
2112 * struct page, then free to buddy allocator, because while we are
2114 * page in __free_one_page()).
2117 * broken along max page order boundaries. This way we will not cause
2118 * any issues with the buddy page computation.
2129 /* First we loop through and initialize the page values */
2223 * pre-grown prior to start of deferred page initialization.
2273 * PAGES_PER_SECTION * sizeof(struct page) bytes.
2346 void __init init_cma_reserved_pageblock(struct page *page)
2349 struct page *p = page;
2356 set_pageblock_migratetype(page, MIGRATE_CMA);
2357 set_page_refcounted(page);
2358 __free_pages(page, pageblock_order);
2360 adjust_managed_page_count(page, pageblock_nr_pages);
2361 page_zone(page)->cma_pages += pageblock_nr_pages;
2405 * on-demand struct page initialization.
2424 /* Initialize page ext after all struct pages are initialized. */
2582 void __init memblock_free_pages(struct page *page, unsigned long pfn,
2593 if (!kmsan_memblock_free_pages(page, order)) {
2597 __free_pages_core(page, order);
2638 * Page poisoning is debug page alloc for some arches. If
2687 * Any page debugging or hardening option also enables sanity checking