Lines Matching refs:page

29 #include <linux/page-isolation.h>
47 * online_page_callback contains pointer to current page onlining function.
145 void get_page_bootmem(unsigned long info, struct page *page,
148 page->freelist = (void *)type;
149 SetPagePrivate(page);
150 set_page_private(page, info);
151 page_ref_inc(page);
154 void put_page_bootmem(struct page *page)
158 type = (unsigned long) page->freelist;
162 if (page_ref_dec_return(page) == 1) {
163 page->freelist = NULL;
164 ClearPagePrivate(page);
165 set_page_private(page, 0);
166 INIT_LIST_HEAD(&page->lru);
167 free_reserved_page(page);
177 struct page *page, *memmap;
187 * Get page for the memmap's phys address
190 page = virt_to_page(memmap);
191 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
194 /* remember memmap's page */
195 for (i = 0; i < mapsize; i++, page++)
196 get_page_bootmem(section_nr, page, SECTION_INFO);
199 page = virt_to_page(usage);
203 for (i = 0; i < mapsize; i++, page++)
204 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
212 struct page *page, *memmap;
223 page = virt_to_page(usage);
227 for (i = 0; i < mapsize; i++, page++)
228 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
236 struct page *page;
239 page = virt_to_page(pgdat);
241 for (i = 0; i < nr_pages; i++, page++)
242 get_page_bootmem(node, page, NODE_INFO);
487 sizeof(struct page) * cur_nr_pages);
526 * @altmap: alternative device page map or %NULL if default memmap is used
593 void generic_online_page(struct page *page, unsigned int order)
596 * Freeing the page with debug_pagealloc enabled will try to unmap it,
598 * case in page freeing fast path.
601 kernel_map_pages(page, 1 << order, 1);
602 __free_pages_core(page, order);
605 if (PageHighMem(page))
820 * This means the page allocator ignores this zone.
1198 struct page *page;
1218 page = pfn_to_page(pfn + i);
1219 if (zone && page_zone(page) != zone)
1221 zone = page_zone(page);
1235 * 0 in case a movable page is found and movable_pfn was updated.
1236 * -ENOENT in case no movable page was found.
1237 * -EBUSY in case a definitely unmovable page was found.
1245 struct page *page, *head;
1250 page = pfn_to_page(pfn);
1251 if (PageLRU(page))
1253 if (__PageMovable(page))
1262 if (PageOffline(page) && page_count(page))
1265 if (!PageHuge(page))
1267 head = compound_head(page);
1283 struct page *page, *head;
1292 page = pfn_to_page(pfn);
1293 head = compound_head(page);
1295 if (PageHuge(page)) {
1299 } else if (PageTransHuge(page))
1300 pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
1305 * first place. Still try to unmap such a page in case it is still mapped
1309 if (PageHWPoison(page)) {
1310 if (WARN_ON(PageLRU(page)))
1311 isolate_lru_page(page);
1312 if (page_mapped(page))
1313 try_to_unmap(page, TTU_IGNORE_MLOCK);
1317 if (!get_page_unless_zero(page))
1323 if (PageLRU(page))
1324 ret = isolate_lru_page(page);
1326 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1328 list_add_tail(&page->lru, &source);
1329 if (!__PageMovable(page))
1330 inc_node_page_state(page, NR_ISOLATED_ANON +
1331 page_is_file_lru(page));
1336 dump_page(page, "isolation failed");
1339 put_page(page);
1350 * we can use the nid of the first page to all the others.
1352 mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
1365 list_for_each_entry(page, &source, lru) {
1368 page_to_pfn(page), ret);
1369 dump_page(page, "migration failure");
1548 reason = "unmovable page";
1808 struct page *page;
1816 page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr));
1817 if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE)