Lines Matching defs:folio
85 struct folio *prev; \
1057 static inline int is_page_cache_freeable(struct folio *folio)
1060 * A freeable page cache folio is referenced only by the caller
1061 * that isolated the folio, the page cache and optional filesystem
1062 * private data at folio->private.
1064 return folio_ref_count(folio) - folio_test_private(folio) ==
1065 1 + folio_nr_pages(folio);
1069 * We detected a synchronous write error writing a folio out. Probably
1074 * prevents it from being freed up. But we have a ref on the folio and once
1075 * that folio is locked, the mapping is pinned.
1081 struct folio *folio, int error)
1083 folio_lock(folio);
1084 if (folio_mapping(folio) == mapping)
1086 folio_unlock(folio);
1196 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
1201 node_stat_add_folio(folio, NR_THROTTLED_WRITTEN);
1219 /* failed to write folio out, folio is locked */
1221 /* move folio to the active list, folio is locked */
1223 /* folio has been sent to the disk successfully, folio is unlocked */
1225 /* folio is clean and locked */
1230 * pageout is called by shrink_folio_list() for each dirty folio.
1233 static pageout_t pageout(struct folio *folio, struct address_space *mapping,
1237 * If the folio is dirty, only perform writeback if that write
1244 * this folio's queue, we can perform writeback even if that
1247 * If the folio is swapcache, write it back even if that would
1252 if (!is_page_cache_freeable(folio))
1257 * folio->mapping == NULL while being dirty with clean buffers.
1259 if (folio_test_private(folio)) {
1260 if (try_to_free_buffers(folio)) {
1261 folio_clear_dirty(folio);
1262 pr_info("%s: orphaned folio\n", __func__);
1271 if (folio_clear_dirty_for_io(folio)) {
1282 folio_set_reclaim(folio);
1283 res = mapping->a_ops->writepage(&folio->page, &wbc);
1285 handle_write_error(mapping, folio, res);
1287 folio_clear_reclaim(folio);
1291 if (!folio_test_writeback(folio)) {
1293 folio_clear_reclaim(folio);
1295 trace_mm_vmscan_write_folio(folio);
1296 node_stat_add_folio(folio, NR_VMSCAN_WRITE);
1304 * Same as remove_mapping, but if the folio is removed from the mapping, it
1307 static int __remove_mapping(struct address_space *mapping, struct folio *folio,
1313 BUG_ON(!folio_test_locked(folio));
1314 BUG_ON(mapping != folio_mapping(folio));
1316 if (!folio_test_swapcache(folio))
1320 * The non racy check for a busy folio.
1323 * a ref to the folio, it may be possible that they dirty it then
1330 * !folio_test_dirty(folio) [good]
1331 * folio_set_dirty(folio);
1332 * folio_put(folio);
1333 * !refcount(folio) [good, discard it]
1338 * escape unnoticed. The smp_rmb is needed to ensure the folio->flags
1339 * load is not satisfied before that of folio->_refcount.
1344 refcount = 1 + folio_nr_pages(folio);
1345 if (!folio_ref_freeze(folio, refcount))
1348 if (unlikely(folio_test_dirty(folio))) {
1349 folio_ref_unfreeze(folio, refcount);
1353 if (folio_test_swapcache(folio)) {
1354 swp_entry_t swap = folio->swap;
1357 shadow = workingset_eviction(folio, target_memcg);
1358 __delete_from_swap_cache(folio, swap, shadow);
1359 mem_cgroup_swapout(folio, swap);
1361 put_swap_folio(folio, swap);
1363 void (*free_folio)(struct folio *);
1382 if (reclaimed && folio_is_file_lru(folio) &&
1384 shadow = workingset_eviction(folio, target_memcg);
1385 __filemap_remove_folio(folio, shadow);
1392 free_folio(folio);
1399 if (!folio_test_swapcache(folio))
1405 * remove_mapping() - Attempt to remove a folio from its mapping.
1407 * @folio: The folio to remove.
1409 * If the folio is dirty, under writeback or if someone else has a ref
1411 * Return: The number of pages removed from the mapping. 0 if the folio
1413 * Context: The caller should have a single refcount on the folio and
1416 long remove_mapping(struct address_space *mapping, struct folio *folio)
1418 if (__remove_mapping(mapping, folio, false, NULL)) {
1424 folio_ref_unfreeze(folio, 1);
1425 return folio_nr_pages(folio);
1431 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list.
1432 * @folio: Folio to be returned to an LRU list.
1434 * Add previously isolated @folio to appropriate LRU list.
1435 * The folio may still be unevictable for other reasons.
1439 void folio_putback_lru(struct folio *folio)
1441 folio_add_lru(folio);
1442 folio_put(folio); /* drop ref from isolate */
1453 static enum folio_references folio_check_references(struct folio *folio,
1459 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup,
1461 referenced_folio = folio_test_clear_referenced(folio);
1464 * The supposedly reclaimable folio was found to be in a VM_LOCKED vma.
1465 * Let the folio, now marked Mlocked, be moved to the unevictable list.
1484 * to look twice if a mapped file/anon folio is used more
1495 folio_set_referenced(folio);
1503 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio))
1510 if (referenced_folio && folio_is_file_lru(folio))
1516 /* Check if a folio is dirty or under writeback */
1517 static void folio_check_dirty_writeback(struct folio *folio,
1529 if (!folio_is_file_lru(folio) ||
1530 (folio_test_anon(folio) && !folio_test_swapbacked(folio))) {
1536 /* By default assume that the folio flags are accurate */
1537 *dirty = folio_test_dirty(folio);
1538 *writeback = folio_test_writeback(folio);
1541 if (!folio_test_private(folio))
1544 mapping = folio_mapping(folio);
1546 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
1549 static struct folio *alloc_demote_folio(struct folio *src,
1552 struct folio *dst;
1621 static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask)
1625 if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO))
1634 return !data_race(folio_swap_flags(folio) & SWP_FS_OPS);
1659 struct folio *folio;
1666 folio = lru_to_folio(folio_list);
1667 list_del(&folio->lru);
1669 if (!folio_trylock(folio))
1672 VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
1674 nr_pages = folio_nr_pages(folio);
1679 if (unlikely(!folio_evictable(folio)))
1682 if (!sc->may_unmap && folio_mapped(folio))
1687 folio_mapped(folio) && folio_test_referenced(folio))
1695 folio_check_dirty_writeback(folio, &dirty, &writeback);
1703 * Treat this folio as congested if folios are cycling
1708 if (writeback && folio_test_reclaim(folio))
1712 * If a folio at the tail of the LRU is under writeback, there
1716 * of folios under writeback and this folio has both
1720 * can complete. Waiting on the folio itself risks an
1722 * the folio due to I/O error or disconnected storage
1724 * quickly and the caller can stall after the folio
1727 * 2) Global or new memcg reclaim encounters a folio that is
1730 * not to fs). In this case mark the folio for immediate
1735 * enter reclaim, and deadlock if it waits on a folio for
1740 * 3) Legacy memcg encounters a folio that already has the
1741 * reclaim flag set. memcg does not have any dirty folio
1755 if (folio_test_writeback(folio)) {
1758 folio_test_reclaim(folio) &&
1765 !folio_test_reclaim(folio) ||
1766 !may_enter_fs(folio, sc->gfp_mask)) {
1774 * What we do want is for this folio to
1781 folio_set_reclaim(folio);
1787 folio_unlock(folio);
1788 folio_wait_writeback(folio);
1789 /* then go back and try same folio again */
1790 list_add_tail(&folio->lru, folio_list);
1796 references = folio_check_references(folio, sc);
1807 ; /* try to reclaim the folio below */
1811 * Before reclaiming the folio, try to relocate
1815 (thp_migration_supported() || !folio_test_large(folio))) {
1816 list_add(&folio->lru, &demote_folios);
1817 folio_unlock(folio);
1824 * Lazyfree folio could be freed directly
1826 if (folio_test_anon(folio) && folio_test_swapbacked(folio)) {
1827 if (!folio_test_swapcache(folio) && references != FOLIOREF_RECLAIM_PURGEABLE) {
1830 if (folio_maybe_dma_pinned(folio))
1832 if (folio_test_large(folio)) {
1833 /* cannot split folio, skip it */
1834 if (!can_split_folio(folio, NULL))
1841 if (!folio_entire_mapcount(folio) &&
1842 split_folio_to_list(folio,
1846 if (!add_to_swap(folio)) {
1847 if (!folio_test_large(folio))
1850 if (split_folio_to_list(folio,
1856 if (!add_to_swap(folio))
1860 } else if (folio_test_swapbacked(folio) &&
1861 folio_test_large(folio)) {
1862 /* Split shmem folio */
1863 if (split_folio_to_list(folio, folio_list))
1868 * If the folio was split above, the tail pages will make
1872 if ((nr_pages > 1) && !folio_test_large(folio)) {
1878 * The folio is mapped into the page tables of one or more
1881 if (folio_mapped(folio)) {
1883 bool was_swapbacked = folio_test_swapbacked(folio);
1885 if (folio_test_pmd_mappable(folio))
1888 try_to_unmap(folio, flags);
1889 if (folio_mapped(folio)) {
1892 folio_test_swapbacked(folio))
1900 * No point in trying to reclaim folio if it is pinned.
1902 * if the folio is pinned and thus potentially modified by the
1905 if (folio_maybe_dma_pinned(folio))
1908 mapping = folio_mapping(folio);
1909 if (folio_test_dirty(folio) && references != FOLIOREF_RECLAIM_PURGEABLE) {
1913 * injecting inefficient single-folio I/O into
1921 if (folio_is_file_lru(folio) &&
1923 !folio_test_reclaim(folio) ||
1928 * except we already have the folio isolated
1931 node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE,
1933 folio_set_reclaim(folio);
1940 if (!may_enter_fs(folio, sc->gfp_mask))
1951 switch (pageout(folio, mapping, &plug)) {
1959 if (folio_test_writeback(folio))
1961 if (folio_test_dirty(folio))
1966 * ahead and try to reclaim the folio.
1968 if (!folio_trylock(folio))
1970 if (folio_test_dirty(folio) ||
1971 folio_test_writeback(folio))
1973 mapping = folio_mapping(folio);
1976 ; /* try to free the folio below */
1981 * If the folio has buffers, try to free the buffer
1982 * mappings associated with this folio. If we succeed
1983 * we try to free the folio as well.
1985 * We do this even if the folio is dirty.
1987 * is possible for a folio to have the dirty flag set,
1993 * and mark the folio clean - it can be freed.
1999 * folio is no longer mapped into process address space
2001 * the folio on the LRU so it is swappable.
2003 if (folio_needs_release(folio)) {
2004 if (!filemap_release_folio(folio, sc->gfp_mask))
2006 if (!mapping && folio_ref_count(folio) == 1) {
2007 folio_unlock(folio);
2008 if (folio_put_testzero(folio))
2014 * this folio shortly, so we may
2024 if (folio_test_anon(folio) && (!folio_test_swapbacked(folio) || references == FOLIOREF_RECLAIM_PURGEABLE)) {
2026 if (!folio_ref_freeze(folio, 1))
2030 * The folio has only one reference left, which is
2032 * folio back on the lru and drops the reference, the
2033 * folio will be freed anyway. It doesn't matter
2038 count_memcg_folio_events(folio, PGLAZYFREED, nr_pages);
2039 } else if (!mapping || !__remove_mapping(mapping, folio, true,
2043 folio_unlock(folio);
2055 if (unlikely(folio_test_large(folio)))
2056 destroy_large_folio(folio);
2058 list_add(&folio->lru, &free_folios);
2072 if (folio_test_swapcache(folio) &&
2073 (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio)))
2074 folio_free_swap(folio);
2075 VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
2076 if (!folio_test_mlocked(folio)) {
2077 int type = folio_is_file_lru(folio);
2078 folio_set_active(folio);
2080 count_memcg_folio_events(folio, PGACTIVATE, nr_pages);
2083 folio_unlock(folio);
2085 list_add(&folio->lru, &ret_folios);
2086 VM_BUG_ON_FOLIO(folio_test_lru(folio) ||
2087 folio_test_unevictable(folio), folio);
2143 struct folio *folio, *next;
2147 list_for_each_entry_safe(folio, next, folio_list, lru) {
2148 if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) &&
2149 !folio_test_dirty(folio) && !__folio_test_movable(folio) &&
2150 !folio_test_unevictable(folio)) {
2151 folio_clear_active(folio);
2152 list_move(&folio->lru, &clean_folios);
2207 static bool skip_cma(struct folio *folio, struct scan_control *sc)
2211 get_pageblock_migratetype(&folio->page) == MIGRATE_CMA;
2214 static bool skip_cma(struct folio *folio, struct scan_control *sc)
2258 struct folio *folio;
2260 folio = lru_to_folio(src);
2261 prefetchw_prev_lru_folio(folio, src, flags);
2263 nr_pages = folio_nr_pages(folio);
2266 if (folio_zonenum(folio) > sc->reclaim_idx ||
2267 skip_cma(folio, sc)) {
2268 nr_skipped[folio_zonenum(folio)] += nr_pages;
2278 * Account all pages in a folio.
2282 if (!folio_test_lru(folio))
2284 if (!sc->may_unmap && folio_mapped(folio))
2289 * sure the folio is not being freed elsewhere -- the
2290 * folio release code relies on it.
2292 if (unlikely(!folio_try_get(folio)))
2295 if (!folio_test_clear_lru(folio)) {
2296 /* Another thread is already isolating this folio */
2297 folio_put(folio);
2302 nr_zone_taken[folio_zonenum(folio)] += nr_pages;
2305 list_move(&folio->lru, move_to);
2336 * folio_isolate_lru() - Try to isolate a folio from its LRU list.
2337 * @folio: Folio to isolate from its LRU list.
2339 * Isolate a @folio from an LRU list and adjust the vmstat statistic
2340 * corresponding to whatever LRU list the folio was on.
2342 * The folio will have its LRU flag cleared. If it was found on the
2349 * (1) Must be called with an elevated refcount on the folio. This is a
2355 * Return: true if the folio was removed from an LRU list.
2356 * false if the folio was not on an LRU list.
2358 bool folio_isolate_lru(struct folio *folio)
2362 VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio);
2364 if (folio_test_clear_lru(folio)) {
2367 folio_get(folio);
2368 lruvec = folio_lruvec_lock_irq(folio);
2369 lruvec_del_folio(lruvec, folio);
2438 struct folio *folio = lru_to_folio(list);
2440 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
2441 list_del(&folio->lru);
2442 if (unlikely(!folio_evictable(folio))) {
2444 folio_putback_lru(folio);
2457 * list_add(&folio->lru,)
2458 * list_add(&folio->lru,)
2460 folio_set_lru(folio);
2462 if (unlikely(folio_put_testzero(folio))) {
2463 __folio_clear_lru_flags(folio);
2465 if (unlikely(folio_test_large(folio))) {
2467 destroy_large_folio(folio);
2470 list_add(&folio->lru, &folios_to_free);
2479 VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
2480 lruvec_add_folio(lruvec, folio);
2481 nr_pages = folio_nr_pages(folio);
2484 if (folio_test_active(folio)) {
2485 prot = is_prot_page(folio_page(folio, 0));
2486 file = page_is_file_lru(folio_page(folio, 0));
2488 lruvec = folio_lruvec(folio);
2497 if (folio_test_active(folio))
2640 * We move them the other way if the folio is referenced by one or more
2646 * we should drop lru_lock around each folio. It's impossible to balance
2649 * because nobody will play with that bit on a non-LRU folio.
2651 * The downside is that we have to touch folio->_refcount against each folio.
2652 * But we had to alter folio->flags anyway.
2686 struct folio *folio;
2689 folio = lru_to_folio(&l_hold);
2690 list_del(&folio->lru);
2692 if (unlikely(!folio_evictable(folio))) {
2693 folio_putback_lru(folio);
2698 if (folio_needs_release(folio) &&
2699 folio_trylock(folio)) {
2700 filemap_release_folio(folio, 0);
2701 folio_unlock(folio);
2706 if (folio_referenced(folio, 0, sc->target_mem_cgroup,
2717 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) {
2718 nr_rotated += folio_nr_pages(folio);
2719 list_add(&folio->lru, &l_active);
2724 folio_clear_active(folio); /* we are de-activating */
2725 folio_set_workingset(folio);
2726 list_add(&folio->lru, &l_inactive);
2758 struct folio *folio;
2769 folio = lru_to_folio(folio_list);
2770 list_del(&folio->lru);
2771 folio_putback_lru(folio);
2791 struct folio *folio = lru_to_folio(folio_list);
2793 if (nid == folio_nid(folio)) {
2794 folio_clear_active(folio);
2795 list_move(&folio->lru, &node_folio_list);
2833 * folio has a chance to be referenced again before it is reclaimed.
3732 static int folio_update_gen(struct folio *folio, int gen)
3734 unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
3749 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
3755 static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
3757 int type = folio_is_file_lru(folio);
3760 unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
3762 VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio);
3777 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
3779 lru_gen_update_size(lruvec, folio, old_gen, new_gen);
3784 static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio,
3787 int type = folio_is_file_lru(folio);
3788 int zone = folio_zonenum(folio);
3789 int delta = folio_nr_pages(folio);
3931 static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
3934 struct folio *folio;
3940 folio = pfn_folio(pfn);
3941 if (folio_nid(folio) != pgdat->node_id)
3944 if (folio_memcg_rcu(folio) != memcg)
3948 if (!folio_is_file_lru(folio) && !can_swap)
3951 return folio;
3988 struct folio *folio;
4003 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
4004 if (!folio)
4013 if (pte_dirty(ptent) && !folio_test_dirty(folio) &&
4014 !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
4015 !folio_test_swapcache(folio)))
4016 folio_mark_dirty(folio);
4018 old_gen = folio_update_gen(folio, new_gen);
4020 update_batch_size(walk, folio, old_gen, new_gen);
4069 struct folio *folio;
4084 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
4085 if (!folio)
4093 if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) &&
4094 !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
4095 !folio_test_swapcache(folio)))
4096 folio_mark_dirty(folio);
4098 old_gen = folio_update_gen(folio, new_gen);
4100 update_batch_size(walk, folio, old_gen, new_gen);
4333 struct folio *folio = lru_to_folio(head);
4335 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
4336 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
4337 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
4338 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
4340 new_gen = folio_inc_gen(lruvec, folio, false);
4341 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
4619 struct folio *folio = pfn_folio(pvmw->pfn);
4620 bool can_swap = !folio_is_file_lru(folio);
4621 struct mem_cgroup *memcg = folio_memcg(folio);
4622 struct pglist_data *pgdat = folio_pgdat(folio);
4628 VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio);
4673 folio = get_pfn_folio(pfn, memcg, pgdat, can_swap);
4674 if (!folio)
4682 if (pte_dirty(ptent) && !folio_test_dirty(folio) &&
4683 !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
4684 !folio_test_swapcache(folio)))
4685 folio_mark_dirty(folio);
4688 old_gen = folio_update_gen(folio, new_gen);
4690 update_batch_size(walk, folio, old_gen, new_gen);
4695 old_gen = folio_lru_gen(folio);
4697 folio_set_referenced(folio);
4699 folio_activate(folio);
4860 static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc,
4864 int gen = folio_lru_gen(folio);
4865 int type = folio_is_file_lru(folio);
4866 int zone = folio_zonenum(folio);
4867 int delta = folio_nr_pages(folio);
4868 int refs = folio_lru_refs(folio);
4872 VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
4875 if (!folio_evictable(folio)) {
4876 success = lru_gen_del_folio(lruvec, folio, true);
4877 VM_WARN_ON_ONCE_FOLIO(!success, folio);
4878 folio_set_unevictable(folio);
4879 lruvec_add_folio(lruvec, folio);
4885 if (type == LRU_GEN_FILE && folio_test_anon(folio) && folio_test_dirty(folio)) {
4886 success = lru_gen_del_folio(lruvec, folio, true);
4887 VM_WARN_ON_ONCE_FOLIO(!success, folio);
4888 folio_set_swapbacked(folio);
4889 lruvec_add_folio_tail(lruvec, folio);
4895 list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
4903 gen = folio_inc_gen(lruvec, folio, false);
4904 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
4912 if (zone > sc->reclaim_idx || skip_cma(folio, sc)) {
4913 gen = folio_inc_gen(lruvec, folio, false);
4914 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
4919 if (folio_test_locked(folio) || folio_test_writeback(folio) ||
4920 (type == LRU_GEN_FILE && folio_test_dirty(folio))) {
4921 gen = folio_inc_gen(lruvec, folio, true);
4922 list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
4929 static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc)
4935 (folio_test_dirty(folio) ||
4936 (folio_test_anon(folio) && !folio_test_swapcache(folio))))
4940 if (!folio_try_get(folio))
4944 if (!folio_test_clear_lru(folio)) {
4945 folio_put(folio);
4950 if (!folio_test_referenced(folio))
4951 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0);
4954 folio_clear_reclaim(folio);
4955 folio_clear_referenced(folio);
4957 success = lru_gen_del_folio(lruvec, folio, true);
4958 VM_WARN_ON_ONCE_FOLIO(!success, folio);
4990 struct folio *folio = lru_to_folio(head);
4991 int delta = folio_nr_pages(folio);
4993 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
4994 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
4995 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
4996 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
5000 if (sort_folio(lruvec, folio, sc, tier))
5002 else if (isolate_folio(lruvec, folio, sc)) {
5003 list_add(&folio->lru, list);
5006 list_move(&folio->lru, &moved);
5136 struct folio *folio;
5137 struct folio *next;
5162 list_for_each_entry_safe_reverse(folio, next, &list, lru) {
5163 if (!folio_evictable(folio)) {
5164 list_del(&folio->lru);
5165 folio_putback_lru(folio);
5169 if (folio_test_reclaim(folio) &&
5170 (folio_test_dirty(folio) || folio_test_writeback(folio))) {
5172 if (folio_test_workingset(folio))
5173 folio_set_referenced(folio);
5177 if (skip_retry || folio_test_active(folio) || folio_test_referenced(folio) ||
5178 folio_mapped(folio) || folio_test_locked(folio) ||
5179 folio_test_dirty(folio) || folio_test_writeback(folio)) {
5181 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS,
5187 list_move(&folio->lru, &clean);
5188 sc->nr_scanned -= folio_nr_pages(folio);
5636 struct folio *folio = lru_to_folio(head);
5638 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
5639 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio);
5640 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
5641 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio);
5643 lruvec_del_folio(lruvec, folio);
5644 success = lru_gen_add_folio(lruvec, folio, false);
5665 struct folio *folio = lru_to_folio(head);
5667 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
5668 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
5669 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
5670 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
5672 success = lru_gen_del_folio(lruvec, folio, false);
5674 lruvec_add_folio(lruvec, folio);
8153 * Checks folios for evictability, if an evictable folio is in the unevictable
8165 struct folio *folio = fbatch->folios[i];
8166 int nr_pages = folio_nr_pages(folio);
8170 /* block memcg migration while the folio moves between lrus */
8171 if (!folio_test_clear_lru(folio))
8174 lruvec = folio_lruvec_relock_irq(folio, lruvec);
8175 if (folio_evictable(folio) && folio_test_unevictable(folio)) {
8176 lruvec_del_folio(lruvec, folio);
8177 folio_clear_unevictable(folio);
8178 lruvec_add_folio(lruvec, folio);
8181 folio_set_lru(folio);