Lines Matching refs:page

45 /* How many pages do we try to swap or page in/out together? */
79 static void __page_cache_release(struct page *page)
81 if (PageLRU(page)) {
82 pg_data_t *pgdat = page_pgdat(page);
87 lruvec = mem_cgroup_page_lruvec(page, pgdat);
88 VM_BUG_ON_PAGE(!PageLRU(page), page);
89 __ClearPageLRU(page);
90 del_page_from_lru_list(page, lruvec, page_off_lru(page));
93 __ClearPageWaiters(page);
96 static void __put_single_page(struct page *page)
98 __page_cache_release(page);
99 mem_cgroup_uncharge(page);
100 free_unref_page(page);
103 static void __put_compound_page(struct page *page)
107 * hugetlb. This is because hugetlb page does never have PageLRU set
111 if (!PageHuge(page))
112 __page_cache_release(page);
113 destroy_compound_page(page);
116 void __put_page(struct page *page)
118 if (is_zone_device_page(page)) {
119 put_dev_pagemap(page->pgmap);
122 * The page belongs to the device that created pgmap. Do
123 * not return it to page allocator.
128 if (unlikely(PageCompound(page)))
129 __put_compound_page(page);
131 __put_single_page(page);
137 * @pages: list of pages threaded on page->lru
139 * Release a list of pages which are strung together on page.lru. Currently
145 struct page *victim;
164 * were pinned, returns -errno. Each page returned must be released
168 struct page **pages)
185 * get_kernel_page() - pin a kernel page in memory
188 * @pages: array that receives pointer to the page pinned.
191 * Returns 1 if page is pinned. If the page was not pinned, returns
192 * -errno. The page returned must be released with a put_page() call
195 int get_kernel_page(unsigned long start, int write, struct page **pages)
207 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
216 struct page *page = pvec->pages[i];
217 struct pglist_data *pagepgdat = page_pgdat(page);
226 lruvec = mem_cgroup_page_lruvec(page, pgdat);
227 (*move_fn)(page, lruvec, arg);
235 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
240 if (PageLRU(page) && !PageUnevictable(page)) {
241 del_page_from_lru_list(page, lruvec, page_lru(page));
242 ClearPageActive(page);
243 add_page_to_lru_list_tail(page, lruvec, page_lru(page));
244 (*pgmoved) += thp_nr_pages(page);
261 * Writeback is about to end against a page which has been marked for immediate
265 void rotate_reclaimable_page(struct page *page)
267 if (!PageLocked(page) && !PageDirty(page) &&
268 !PageUnevictable(page) && PageLRU(page)) {
272 get_page(page);
275 if (!pagevec_add(pvec, page) || PageCompound(page))
312 void lru_note_cost_page(struct page *page)
315 if (page_is_file_lru(page)) {
316 lru_note_cost(&(page_pgdat(page)->__lruvec), 1, thp_nr_pages(page));
320 lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
321 page_is_file_lru(page), thp_nr_pages(page));
324 static void __activate_page(struct page *page, struct lruvec *lruvec,
327 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
328 int lru = page_lru_base_type(page);
329 int nr_pages = thp_nr_pages(page);
331 del_page_from_lru_list(page, lruvec, lru);
332 SetPageActive(page);
334 add_page_to_lru_list(page, lruvec, lru);
335 trace_mm_lru_activate(page);
357 static void activate_page(struct page *page)
359 page = compound_head(page);
360 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
365 get_page(page);
366 if (!pagevec_add(pvec, page) || PageCompound(page))
377 static void activate_page(struct page *page)
379 pg_data_t *pgdat = page_pgdat(page);
381 page = compound_head(page);
383 __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
388 static void __lru_cache_activate_page(struct page *page)
397 * Search backwards on the optimistic assumption that the page being
399 * the local pagevec is examined as a !PageLRU page could be in the
402 * a remote pagevec's page PageActive potentially hits a race where
403 * a page is marked PageActive just after it is added to the inactive
407 struct page *pagevec_page = pvec->pages[i];
409 if (pagevec_page == page) {
410 SetPageActive(page);
419 * Mark a page as having seen activity.
425 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
426 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
428 void mark_page_accessed(struct page *page)
430 page = compound_head(page);
432 if (!PageReferenced(page)) {
433 SetPageReferenced(page);
434 } else if (PageUnevictable(page)) {
438 * evictable page accessed has no effect.
440 } else if (!PageActive(page)) {
442 * If the page is on the LRU, queue it for activation via
443 * lru_pvecs.activate_page. Otherwise, assume the page is on a
447 if (PageLRU(page))
448 activate_page(page);
450 __lru_cache_activate_page(page);
451 ClearPageReferenced(page);
452 workingset_activation(page);
454 if (page_is_idle(page))
455 clear_page_idle(page);
460 * lru_cache_add - add a page to a page list
461 * @page: the page to be added to the LRU.
463 * Queue the page for addition to the LRU via pagevec. The decision on whether
464 * to add the page to the [in]active [file|anon] list is deferred until the
466 * have the page added to the active list using mark_page_accessed().
468 void lru_cache_add(struct page *page)
472 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
473 VM_BUG_ON_PAGE(PageLRU(page), page);
475 get_page(page);
478 if (!pagevec_add(pvec, page) || PageCompound(page))
486 * @page: the page to be added to LRU
487 * @vma: vma in which page is mapped for determining reclaimability
489 * Place @page on the inactive or unevictable LRU list, depending on its
492 void lru_cache_add_inactive_or_unevictable(struct page *page,
497 VM_BUG_ON_PAGE(PageLRU(page), page);
500 if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
501 int nr_pages = thp_nr_pages(page);
507 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
510 lru_cache_add(page);
514 * If the page can not be invalidated, it is moved to the
518 * effective than the single-page writeout from reclaim.
520 * If the page isn't page_mapped and dirty/writeback, the page
523 * 1. active, mapped page -> none
524 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
525 * 3. inactive, mapped page -> none
526 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
530 * In 4, why it moves inactive's head, the VM expects the page would
532 * than the single-page writeout from reclaim.
534 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
539 int nr_pages = thp_nr_pages(page);
541 if (!PageLRU(page))
544 if (PageUnevictable(page))
547 /* Some processes are using the page */
548 if (page_mapped(page))
551 active = PageActive(page);
552 lru = page_lru_base_type(page);
554 del_page_from_lru_list(page, lruvec, lru + active);
555 ClearPageActive(page);
556 ClearPageReferenced(page);
558 if (PageWriteback(page) || PageDirty(page)) {
564 add_page_to_lru_list(page, lruvec, lru);
565 SetPageReclaim(page);
568 * The page's writeback ends up during pagevec
569 * We moves tha page into tail of inactive.
571 add_page_to_lru_list_tail(page, lruvec, lru);
582 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
585 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
586 int lru = page_lru_base_type(page);
587 int nr_pages = thp_nr_pages(page);
589 del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
590 ClearPageActive(page);
591 ClearPageReferenced(page);
592 add_page_to_lru_list(page, lruvec, lru);
600 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
603 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
604 !PageSwapCache(page) && !PageUnevictable(page)) {
605 bool active = PageActive(page);
606 int nr_pages = thp_nr_pages(page);
608 del_page_from_lru_list(page, lruvec,
610 ClearPageActive(page);
611 ClearPageReferenced(page);
617 ClearPageSwapBacked(page);
618 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
665 * deactivate_file_page - forcefully deactivate a file page
666 * @page: page to deactivate
668 * This function hints the VM that @page is a good reclaim candidate,
669 * for example if its invalidation fails due to the page being dirty
672 void deactivate_file_page(struct page *page)
675 * In a workload with many unevictable page such as mprotect,
676 * unevictable page deactivation for accelerating reclaim is pointless.
678 if (PageUnevictable(page))
681 if (likely(get_page_unless_zero(page))) {
687 if (!pagevec_add(pvec, page) || PageCompound(page))
694 * deactivate_page - deactivate a page
695 * @page: page to deactivate
697 * deactivate_page() moves @page to the inactive list if @page was on the active
698 * list and was not an unevictable page. This is done to accelerate the reclaim
699 * of @page.
701 void deactivate_page(struct page *page)
703 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
708 get_page(page);
709 if (!pagevec_add(pvec, page) || PageCompound(page))
716 * mark_page_lazyfree - make an anon page lazyfree
717 * @page: page to deactivate
719 * mark_page_lazyfree() moves @page to the inactive file list.
720 * This is done to accelerate the reclaim of @page.
722 void mark_page_lazyfree(struct page *page)
724 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
725 !PageSwapCache(page) && !PageUnevictable(page)) {
730 get_page(page);
731 if (!pagevec_add(pvec, page) || PageCompound(page))
824 * below which drains the page vectors.
874 * fell to zero, remove the page from the LRU and free it.
876 void release_pages(struct page **pages, int nr)
886 struct page *page = pages[i];
898 page = compound_head(page);
899 if (is_huge_zero_page(page))
902 if (is_zone_device_page(page)) {
914 if (page_is_devmap_managed(page)) {
915 put_devmap_managed_page(page);
920 if (!put_page_testzero(page))
923 if (PageCompound(page)) {
928 __put_compound_page(page);
932 if (PageLRU(page)) {
933 struct pglist_data *pgdat = page_pgdat(page);
944 lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
945 VM_BUG_ON_PAGE(!PageLRU(page), page);
946 __ClearPageLRU(page);
947 del_page_from_lru_list(page, lruvec, page_off_lru(page));
950 __ClearPageWaiters(page);
952 list_add(&page->lru, &pages_to_free);
966 * cache-warm and we want to give them back to the page allocator ASAP.
985 void lru_add_page_tail(struct page *page, struct page *page_tail,
988 VM_BUG_ON_PAGE(!PageHead(page), page);
989 VM_BUG_ON_PAGE(PageCompound(page_tail), page);
990 VM_BUG_ON_PAGE(PageLRU(page_tail), page);
996 if (likely(PageLRU(page)))
997 list_add_tail(&page_tail->lru, &page->lru);
999 /* page reclaim is reclaiming a huge page */
1004 * Head page has not yet been counted, as an hpage,
1016 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
1020 int was_unevictable = TestClearPageUnevictable(page);
1021 int nr_pages = thp_nr_pages(page);
1023 VM_BUG_ON_PAGE(PageLRU(page), page);
1028 * 2) Before acquiring LRU lock to put the page to correct LRU and then
1045 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
1047 * the isolation of the page whose Mlocked bit is cleared (#0 is also
1048 * looking at the same page) and the evictable page will be stranded
1051 SetPageLRU(page);
1054 if (page_evictable(page)) {
1055 lru = page_lru(page);
1060 ClearPageActive(page);
1061 SetPageUnevictable(page);
1066 add_page_to_lru_list(page, lruvec, lru);
1067 trace_mm_lru_insertion(page, lru);
1120 * passed on to page-only pagevec operations.
1127 struct page *page = pvec->pages[i];
1128 if (!xa_is_value(page))
1129 pvec->pages[j++] = page;
1138 * @start: The starting page index
1139 * @end: The final page index
1148 * also update @start to index the next page for the traversal.
1201 void put_devmap_managed_page(struct page *page)
1205 if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
1208 count = page_ref_dec_return(page);
1211 * devmap page refcounts are 1-based, rather than 0-based: if
1212 * refcount is 1, then the page is free and the refcount is
1213 * stable because nobody holds a reference on the page.
1216 free_devmap_managed_page(page);
1218 __put_page(page);