Lines Matching defs:page

182 struct page *find_get_entry(struct address_space *mapping, pgoff_t index);
183 struct page *find_lock_entry(struct address_space *mapping, pgoff_t index);
186 * page_evictable - test whether a page is evictable
187 * @page: the page to test
189 * Test whether page is evictable--i.e., should be placed on active/inactive
192 * Reasons page might not be evictable:
193 * (1) page's mapping marked unevictable
194 * (2) page is part of an mlocked VMA
197 static inline bool page_evictable(struct page *page)
203 ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
209 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
212 static inline void set_page_refcounted(struct page *page)
214 VM_BUG_ON_PAGE(PageTail(page), page);
215 VM_BUG_ON_PAGE(page_ref_count(page), page);
216 set_page_count(page, 1);
233 extern int isolate_lru_page(struct page *page);
234 extern void putback_lru_page(struct page *page);
290 * Locate the struct page for both the matching buddy in our
291 * pair (buddy1) and the combined O(n+1) page they form (page).
312 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
315 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
324 extern int __isolate_free_page(struct page *page, unsigned int order);
325 extern void __putback_isolated_page(struct page *page, unsigned int order,
327 extern void memblock_free_pages(struct page *page, unsigned long pfn,
329 extern void __free_pages_core(struct page *page, unsigned int order);
330 extern void prep_compound_page(struct page *page, unsigned int order);
331 extern void post_alloc_hook(struct page *page, unsigned int order,
381 * Used in direct compaction when a page should be taken from the freelists
386 struct page *page;
401 * This function returns the order of a free page in the buddy system. In
402 * general, page_zone(page)->lock must be held by the caller to prevent the
403 * page from being allocated in parallel and returning garbage as the order.
404 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
405 * page cannot be allocated or merged in parallel. Alternatively, it must
408 static inline unsigned int buddy_order(struct page *page)
411 return page_private(page);
421 * decide to remove the variable and inline the page_private(page) multiple
425 #define buddy_order_unsafe(page) READ_ONCE(page_private(page))
479 * must be called with vma's mmap_lock held for read or write, and page locked.
481 extern void mlock_vma_page(struct page *page);
482 extern unsigned int munlock_vma_page(struct page *page);
485 * Clear the page's PageMlocked(). This can be useful in a situation where
486 * we want to unconditionally remove a page from the pagecache -- e.g.,
489 * It is legal to call this function for any page, mlocked or not.
490 * If called for a page that is still mapped by mlocked vmas, all we do
493 extern void clear_page_mlock(struct page *page);
498 * to migrate the Mlocked page flag; update statistics.
500 static inline void mlock_migrate_page(struct page *newpage, struct page *page)
502 if (TestClearPageMlocked(page)) {
503 int nr_pages = thp_nr_pages(page);
506 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
515 * At what user virtual address is page expected in vma?
516 * Returns -EFAULT if all of the page is outside the range of vma.
517 * If page is a compound head, the entire compound page is considered.
520 vma_address(struct page *page, struct vm_area_struct *vma)
525 VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
526 pgoff = page_to_pgoff(page);
533 } else if (PageHead(page) &&
534 pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
544 * Then at what user virtual address will none of the page be found in vma?
546 * If page is a compound head, the entire compound page is considered.
549 vma_address_end(struct page *page, struct vm_area_struct *vma)
554 VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
555 pgoff = page_to_pgoff(page) + compound_nr(page);
572 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
585 static inline void clear_page_mlock(struct page *page) { }
586 static inline void mlock_vma_page(struct page *page) { }
587 static inline void mlock_migrate_page(struct page *new, struct page *old) { }
593 * the maximally aligned gigantic page 'base'. Handle any discontiguity
596 static inline struct page *mem_map_offset(struct page *base, int offset)
605 * page 'base'. Handle any discontiguity in the mem_map.
607 static inline struct page *mem_map_next(struct page *iter,
608 struct page *base, int offset)
684 extern int hwpoison_filter(struct page *p);
766 static inline bool is_migrate_highatomic_page(struct page *page)
768 return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;