Lines Matching refs:page
21 * struct vmemmap_remap_walk - walk vmemmap page table
25 * @reuse_page: the page which is reused for the tail vmemmap pages.
26 * @reuse_addr: the virtual address of the @reuse_page page.
34 struct page *reuse_page;
44 struct page *head;
206 * Free a vmemmap page. A vmemmap page can be allocated from the memblock
211 static inline void free_vmemmap_page(struct page *page)
213 if (PageReserved(page))
214 free_bootmem_page(page);
216 __free_page(page);
222 struct page *page, *next;
224 list_for_each_entry_safe(page, next, list, lru)
225 free_vmemmap_page(page);
236 struct page *page = pte_page(ptep_get(pte));
239 /* Remapping the head page requires r/w */
245 * Makes sure that preceding stores to the page contents from
253 list_add_tail(&page->lru, walk->vmemmap_pages);
258 * How many struct page structs need to be reset. When we reuse the head
259 * struct page, the special metadata (e.g. page->flags or page->mapping)
260 * cannot copy to the tail struct page structs. The invalid value will be
262 * of "corrupted mapping in tail page". We need to reset at least 3 (one
263 * head struct page struct and two tail struct page structs) struct page
268 static inline void reset_struct_pages(struct page *start)
270 struct page *from = start + NR_RESET_STRUCT_PAGE;
272 BUILD_BUG_ON(NR_RESET_STRUCT_PAGE * 2 > PAGE_SIZE / sizeof(struct page));
280 struct page *page;
285 page = list_first_entry(walk->vmemmap_pages, struct page, lru);
286 list_del(&page->lru);
287 to = page_to_virt(page);
292 * Makes sure that preceding stores to the page contents become visible
296 set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
301 * to the page which @reuse is mapped to, then free vmemmap
321 int nid = page_to_nid((struct page *)start);
326 * Allocate a new head vmemmap page to avoid breaking a contiguous
327 * block of struct page memory when freeing it back to page allocator
329 * struct page backing memory to be kept contiguous and allowing for
331 * mapped head page in case should it fail to allocate.
342 * the routine of vmemmap page table walking has the following rules
385 int nid = page_to_nid((struct page *)start);
386 struct page *page, *next;
389 page = alloc_pages_node(nid, gfp_mask, 0);
390 if (!page)
392 list_add_tail(&page->lru, list);
397 list_for_each_entry_safe(page, next, list, lru)
398 __free_page(page);
404 * to the page which is from the @vmemmap_pages
448 * @head: the head page whose vmemmap pages will be restored.
453 int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
469 * the range is mapped to the page which @vmemmap_reuse is mapped to.
470 * When a HugeTLB page is freed to the buddy allocator, previously
483 static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
493 struct page *vmemmap_page;
497 * Only the vmemmap page's vmemmap page can be self-hosted.
498 * Walking the page tables to find the backing page of the
499 * vmemmap page.
518 * page's vmemmap page if it is marked as VmemmapSelfHosted is
539 * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
541 * @head: the head page whose vmemmap pages will be optimized.
548 void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
564 * to the page which @vmemmap_reuse is mapped to, then free the pages
589 BUILD_BUG_ON(__NR_USED_SUBPAGE * sizeof(struct page) > HUGETLB_VMEMMAP_RESERVE_SIZE);