Lines Matching defs:folio
60 * The following folio batches are grouped together because they are protected
81 static void __page_cache_release(struct folio *folio)
83 if (folio_test_lru(folio)) {
87 lruvec = folio_lruvec_lock_irqsave(folio, &flags);
88 lruvec_del_folio(lruvec, folio);
89 __folio_clear_lru_flags(folio);
93 if (unlikely(folio_test_mlocked(folio))) {
94 long nr_pages = folio_nr_pages(folio);
96 __folio_clear_mlocked(folio);
97 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
102 static void __folio_put_small(struct folio *folio)
104 __page_cache_release(folio);
105 mem_cgroup_uncharge(folio);
106 free_unref_page(&folio->page, 0);
109 static void __folio_put_large(struct folio *folio)
117 if (!folio_test_hugetlb(folio))
118 __page_cache_release(folio);
119 destroy_large_folio(folio);
122 void __folio_put(struct folio *folio)
124 if (unlikely(folio_is_zone_device(folio)))
125 free_zone_device_page(&folio->page);
126 else if (unlikely(folio_test_large(folio)))
127 __folio_put_large(folio);
129 __folio_put_small(folio);
141 struct folio *folio, *next;
143 list_for_each_entry_safe(folio, next, pages, lru) {
144 if (!folio_put_testzero(folio)) {
145 list_del(&folio->lru);
148 if (folio_test_large(folio)) {
149 list_del(&folio->lru);
150 __folio_put_large(folio);
161 typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
163 static void lru_add_fn(struct lruvec *lruvec, struct folio *folio)
165 int was_unevictable = folio_test_clear_unevictable(folio);
166 long nr_pages = folio_nr_pages(folio);
168 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
173 * of stranding an evictable folio on an unevictable LRU? I think
179 * folio_put_testzero() has excluded any other users of the folio.)
181 if (folio_evictable(folio)) {
185 folio_clear_active(folio);
186 folio_set_unevictable(folio);
188 * folio->mlock_count = !!folio_test_mlocked(folio)?
194 folio->mlock_count = 0;
199 lruvec_add_folio(lruvec, folio);
200 trace_mm_lru_insertion(folio);
210 struct folio *folio = fbatch->folios[i];
212 /* block memcg migration while the folio moves between lru */
213 if (move_fn != lru_add_fn && !folio_test_clear_lru(folio))
216 lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
217 move_fn(lruvec, folio);
219 folio_set_lru(folio);
229 struct folio *folio, move_fn_t move_fn)
231 if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) &&
237 static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
239 if (!folio_test_unevictable(folio)) {
240 lruvec_del_folio(lruvec, folio);
241 folio_clear_active(folio);
242 lruvec_add_folio_tail(lruvec, folio);
243 __count_vm_events(PGROTATED, folio_nr_pages(folio));
248 * Writeback is about to end against a folio which has been marked for
254 void folio_rotate_reclaimable(struct folio *folio)
256 if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
257 !folio_test_unevictable(folio) && folio_test_lru(folio)) {
261 folio_get(folio);
264 folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn);
321 void lru_note_cost_refault(struct folio *folio)
324 if (page_is_file_lru(folio_page(folio, 0))) {
325 lru_note_cost(&(folio_pgdat(folio)->__lruvec), 1, folio_nr_pages(folio), 0);
330 lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio),
331 folio_nr_pages(folio), 0);
334 static void folio_activate_fn(struct lruvec *lruvec, struct folio *folio)
336 if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
337 long nr_pages = folio_nr_pages(folio);
339 lruvec_del_folio(lruvec, folio);
340 folio_set_active(folio);
341 lruvec_add_folio(lruvec, folio);
342 trace_mm_lru_activate(folio);
359 void folio_activate(struct folio *folio)
361 if (folio_test_lru(folio) && !folio_test_active(folio) &&
362 !folio_test_unevictable(folio)) {
365 folio_get(folio);
368 folio_batch_add_and_move(fbatch, folio, folio_activate_fn);
378 void folio_activate(struct folio *folio)
382 if (folio_test_clear_lru(folio)) {
383 lruvec = folio_lruvec_lock_irq(folio);
384 folio_activate_fn(lruvec, folio);
386 folio_set_lru(folio);
391 static void __lru_cache_activate_folio(struct folio *folio)
400 * Search backwards on the optimistic assumption that the folio being
402 * the local batch is examined as a !LRU folio could be in the
405 * a remote batch's folio active potentially hits a race where
406 * a folio is marked active just after it is added to the inactive
410 struct folio *batch_folio = fbatch->folios[i];
412 if (batch_folio == folio) {
413 folio_set_active(folio);
422 static void folio_inc_refs(struct folio *folio)
424 unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
426 if (folio_test_unevictable(folio))
429 if (!folio_test_referenced(folio)) {
430 folio_set_referenced(folio);
434 if (!folio_test_workingset(folio)) {
435 folio_set_workingset(folio);
447 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
450 static void folio_inc_refs(struct folio *folio)
465 void folio_mark_accessed(struct folio *folio)
468 folio_inc_refs(folio);
472 if (!folio_test_referenced(folio)) {
473 folio_set_referenced(folio);
474 } else if (folio_test_unevictable(folio)) {
480 } else if (!folio_test_active(folio)) {
482 * If the folio is on the LRU, queue it for activation via
483 * cpu_fbatches.activate. Otherwise, assume the folio is in a
487 if (folio_test_lru(folio))
488 folio_activate(folio);
490 __lru_cache_activate_folio(folio);
491 folio_clear_referenced(folio);
492 workingset_activation(folio);
494 if (folio_test_idle(folio))
495 folio_clear_idle(folio);
500 * folio_add_lru - Add a folio to an LRU list.
501 * @folio: The folio to be added to the LRU.
503 * Queue the folio for addition to the LRU. The decision on whether
506 * have the folio added to the active list using folio_mark_accessed().
508 void folio_add_lru(struct folio *folio)
512 VM_BUG_ON_FOLIO(folio_test_active(folio) &&
513 folio_test_unevictable(folio), folio);
514 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
517 if (lru_gen_enabled() && !folio_test_unevictable(folio) &&
519 folio_set_active(folio);
521 folio_get(folio);
524 folio_batch_add_and_move(fbatch, folio, lru_add_fn);
530 * folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA.
531 * @folio: The folio to be added to the LRU.
532 * @vma: VMA in which the folio is mapped.
534 * If the VMA is mlocked, @folio is added to the unevictable list.
537 void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
539 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
542 mlock_new_folio(folio);
544 folio_add_lru(folio);
548 * If the folio cannot be invalidated, it is moved to the
554 * If the folio isn't mapped and dirty/writeback, the folio
557 * 1. active, mapped folio -> none
558 * 2. active, dirty/writeback folio -> inactive, head, reclaim
559 * 3. inactive, mapped folio -> none
560 * 4. inactive, dirty/writeback folio -> inactive, head, reclaim
564 * In 4, it moves to the head of the inactive list so the folio is
568 static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
570 bool active = folio_test_active(folio);
571 long nr_pages = folio_nr_pages(folio);
573 if (folio_test_unevictable(folio))
576 /* Some processes are using the folio */
577 if (folio_mapped(folio))
580 lruvec_del_folio(lruvec, folio);
581 folio_clear_active(folio);
582 folio_clear_referenced(folio);
584 if (folio_test_writeback(folio) || folio_test_dirty(folio)) {
591 lruvec_add_folio(lruvec, folio);
592 folio_set_reclaim(folio);
595 * The folio's writeback ended while it was in the batch.
596 * We move that folio to the tail of the inactive list.
598 lruvec_add_folio_tail(lruvec, folio);
609 static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio)
611 if (!folio_test_unevictable(folio) && (folio_test_active(folio) || lru_gen_enabled())) {
612 long nr_pages = folio_nr_pages(folio);
614 lruvec_del_folio(lruvec, folio);
615 folio_clear_active(folio);
616 folio_clear_referenced(folio);
617 lruvec_add_folio(lruvec, folio);
625 static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio)
627 if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
628 !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) {
629 long nr_pages = folio_nr_pages(folio);
631 lruvec_del_folio(lruvec, folio);
632 folio_clear_active(folio);
633 folio_clear_referenced(folio);
639 folio_clear_swapbacked(folio);
640 lruvec_add_folio(lruvec, folio);
688 * deactivate_file_folio() - Deactivate a file folio.
689 * @folio: Folio to deactivate.
691 * This function hints to the VM that @folio is a good reclaim candidate,
692 * for example if its invalidation fails due to the folio being dirty
695 * Context: Caller holds a reference on the folio.
697 void deactivate_file_folio(struct folio *folio)
701 /* Deactivating an unevictable folio will not accelerate reclaim */
702 if (folio_test_unevictable(folio))
705 folio_get(folio);
708 folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn);
713 * folio_deactivate - deactivate a folio
714 * @folio: folio to deactivate
716 * folio_deactivate() moves @folio to the inactive list if @folio was on the
718 * reclaim of @folio.
720 void folio_deactivate(struct folio *folio)
722 if (folio_test_lru(folio) && !folio_test_unevictable(folio) &&
723 (folio_test_active(folio) || lru_gen_enabled())) {
726 folio_get(folio);
729 folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn);
735 * folio_mark_lazyfree - make an anon folio lazyfree
736 * @folio: folio to deactivate
738 * folio_mark_lazyfree() moves @folio to the inactive file list.
739 * This is done to accelerate the reclaim of @folio.
741 void folio_mark_lazyfree(struct folio *folio)
743 if (folio_test_lru(folio) && folio_test_anon(folio) &&
744 folio_test_swapbacked(folio) && !folio_test_swapcache(folio) &&
745 !folio_test_unevictable(folio)) {
748 folio_get(folio);
751 folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn);
964 * or folio pointers. We ignore any encoded bits, and turn any of
965 * them into just a folio that gets free'd.
977 struct folio *folio;
979 /* Turn any of the argument types into a folio */
980 folio = page_folio(encoded_page_ptr(encoded[i]));
992 if (is_huge_zero_page(&folio->page))
995 if (folio_is_zone_device(folio)) {
1000 if (put_devmap_managed_page(&folio->page))
1002 if (folio_put_testzero(folio))
1003 free_zone_device_page(&folio->page);
1007 if (!folio_put_testzero(folio))
1010 if (folio_test_large(folio)) {
1015 __folio_put_large(folio);
1019 if (folio_test_lru(folio)) {
1022 lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
1027 lruvec_del_folio(lruvec, folio);
1028 __folio_clear_lru_flags(folio);
1037 if (unlikely(folio_test_mlocked(folio))) {
1038 __folio_clear_mlocked(folio);
1039 zone_stat_sub_folio(folio, NR_MLOCK);
1043 list_add(&folio->lru, &pages_to_free);
1079 * entries. This function prunes all the non-folio entries from @fbatch
1080 * without leaving holes, so that it can be passed on to folio-only batch
1088 struct folio *folio = fbatch->folios[i];
1089 if (!xa_is_value(folio))
1090 fbatch->folios[j++] = folio;