Lines Matching defs:page

268 long invalidate_inode_page(struct page *page);
296 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
299 static inline void set_page_refcounted(struct page *page)
301 VM_BUG_ON_PAGE(PageTail(page), page);
302 VM_BUG_ON_PAGE(page_ref_count(page), page);
303 set_page_count(page, 1);
331 bool isolate_lru_page(struct page *page);
333 void putback_lru_page(struct page *page);
404 * This function returns the order of a free page in the buddy system. In
405 * general, page_zone(page)->lock must be held by the caller to prevent the
406 * page from being allocated in parallel and returning garbage as the order.
407 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
408 * page cannot be allocated or merged in parallel. Alternatively, it must
411 static inline unsigned int buddy_order(struct page *page)
414 return page_private(page);
424 * decide to remove the variable and inline the page_private(page) multiple
428 #define buddy_order_unsafe(page) READ_ONCE(page_private(page))
431 * This function checks whether a page is free && is the buddy
432 * we can coalesce a page and its buddy if
435 * (c) a page and its buddy have the same order &&
436 * (d) a page and its buddy are in the same zone.
438 * For recording whether a page is in the buddy system, we set PageBuddy.
441 * For recording page's order, we use page_private(page).
443 static inline bool page_is_buddy(struct page *page, struct page *buddy,
456 if (page_zone_id(page) != page_zone_id(buddy))
465 * Locate the struct page for both the matching buddy in our
466 * pair (buddy1) and the combined O(n+1) page they form (page).
488 * Find the buddy of @page and validate it.
489 * @page: The input page
490 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
492 * @order: The order of the page
496 * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
497 * not the same as @page. The validation is necessary before use it.
499 * Return: the found buddy page or NULL if not found.
501 static inline struct page *find_buddy_page_pfn(struct page *page,
505 struct page *buddy;
507 buddy = page + (__buddy_pfn - pfn);
511 if (page_is_buddy(page, buddy, order))
516 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
519 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
535 extern int __isolate_free_page(struct page *page, unsigned int order);
536 extern void __putback_isolated_page(struct page *page, unsigned int order,
538 extern void memblock_free_pages(struct page *page, unsigned long pfn,
540 extern void __free_pages_core(struct page *page, unsigned int order);
559 static inline void prep_compound_head(struct page *page, unsigned int order)
561 struct folio *folio = (struct folio *)page;
569 static inline void prep_compound_tail(struct page *head, int tail_idx)
571 struct page *p = head + tail_idx;
578 extern void prep_compound_page(struct page *page, unsigned int order);
580 extern void post_alloc_hook(struct page *page, unsigned int order,
584 extern void free_unref_page(struct page *page, unsigned int order);
600 int split_free_page(struct page *free_page,
622 * Acts as an in/out parameter to page isolation for migration.
656 * Used in direct compaction when a page should be taken from the freelists
661 struct page *page;
675 void init_cma_reserved_pageblock(struct page *page);
733 * under page table lock for the pte/pmd being added or removed.
741 * mapping of the THP head cannot be distinguished by the page alone.
802 * Return the start of user virtual address of a page within a vma.
803 * Returns -EFAULT if all of the page is outside the range of vma.
804 * If page is a compound head, the entire compound page is considered.
807 vma_address(struct page *page, struct vm_area_struct *vma)
809 VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
810 return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
844 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
934 extern int hwpoison_filter(struct page *p);
1027 static inline bool is_migrate_highatomic_page(struct page *page)
1029 return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
1052 pgprot_t prot, struct page **pages, unsigned int page_shift);
1060 pgprot_t prot, struct page **pages, unsigned int page_shift)
1068 struct page **pages, unsigned int page_shift);
1074 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
1077 void free_zone_device_page(struct page *page);
1078 int migrate_device_coherent_page(struct page *page);
1083 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
1084 int __must_check try_grab_page(struct page *page, unsigned int flags);
1089 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1094 /* mark page accessed */
1109 * Indicates for which pages that are write-protected in the page table,
1111 * GUP pin will remain consistent with the pages mapped into the page tables
1121 * Must be called with the (sub)page that's actually referenced via the
1122 * page table entry, which might not necessarily be the head page for a
1129 unsigned int flags, struct page *page)
1132 * FOLL_WRITE is implicitly handled correctly as the page table entry
1139 * Note: PageAnon(page) is stable until the page is actually getting
1142 if (!PageAnon(page)) {
1146 * changes through the process page tables.
1167 * During GUP-fast we might not get called on the head page for a
1168 * hugetlb page that is mapped using cont-PTE, because GUP-fast does
1170 * head page. For hugetlb, PageAnonExclusive only applies on the head
1171 * page (as it cannot be partially COW-shared), so lookup the head page.
1173 if (unlikely(!PageHead(page) && PageHuge(page)))
1174 page = compound_head(page);
1180 return !PageAnonExclusive(page);