Lines Matching defs:folio
62 struct folio *folio = folio_get_nontail_page(page);
74 if (!folio)
77 if (unlikely(folio_test_slab(folio)))
86 if (unlikely(!__folio_test_movable(folio)))
90 if (unlikely(folio_test_slab(folio)))
104 if (unlikely(!folio_trylock(folio)))
107 if (!folio_test_movable(folio) || folio_test_isolated(folio))
110 mops = folio_movable_ops(folio);
111 VM_BUG_ON_FOLIO(!mops, folio);
113 if (!mops->isolate_page(&folio->page, mode))
117 WARN_ON_ONCE(folio_test_isolated(folio));
118 folio_set_isolated(folio);
119 folio_unlock(folio);
124 folio_unlock(folio);
126 folio_put(folio);
131 static void putback_movable_folio(struct folio *folio)
133 const struct movable_operations *mops = folio_movable_ops(folio);
135 mops->putback_page(&folio->page);
136 folio_clear_isolated(folio);
149 struct folio *folio;
150 struct folio *folio2;
152 list_for_each_entry_safe(folio, folio2, l, lru) {
153 if (unlikely(folio_test_hugetlb(folio))) {
154 folio_putback_active_hugetlb(folio);
157 list_del(&folio->lru);
159 * We isolated non-lru movable folio so here we can use
160 * __PageMovable because LRU folio's mapping cannot have
163 if (unlikely(__folio_test_movable(folio))) {
164 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
165 folio_lock(folio);
166 if (folio_test_movable(folio))
167 putback_movable_folio(folio);
169 folio_clear_isolated(folio);
170 folio_unlock(folio);
171 folio_put(folio);
173 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174 folio_is_file_lru(folio), -folio_nr_pages(folio));
175 folio_putback_lru(folio);
183 static bool remove_migration_pte(struct folio *folio,
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
199 new = folio_page(folio, idx);
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 !folio_test_pmd_mappable(folio), folio);
211 folio_get(folio);
220 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
227 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
245 if (folio_test_hugetlb(folio)) {
251 if (folio_test_anon(folio))
261 if (folio_test_anon(folio))
285 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
379 struct folio *folio)
385 refs += folio_nr_pages(folio);
386 if (folio_test_private(folio))
401 struct folio *newfolio, struct folio *folio, int extra_count)
403 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
406 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
407 long nr = folio_nr_pages(folio);
412 if (folio_ref_count(folio) != expected_count)
416 newfolio->index = folio->index;
417 newfolio->mapping = folio->mapping;
418 if (folio_test_swapbacked(folio))
424 oldzone = folio_zone(folio);
428 if (!folio_ref_freeze(folio, expected_count)) {
434 * Now we know that no one else is looking at the folio:
437 newfolio->index = folio->index;
438 newfolio->mapping = folio->mapping;
440 if (folio_test_swapbacked(folio)) {
442 if (folio_test_swapcache(folio)) {
444 newfolio->private = folio_get_private(folio);
448 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
453 dirty = folio_test_dirty(folio);
455 folio_clear_dirty(folio);
470 folio_ref_unfreeze(folio, expected_count - nr);
489 memcg = folio_memcg(folio);
495 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
499 if (folio_test_pmd_mappable(folio)) {
505 if (folio_test_swapcache(folio)) {
528 struct folio *dst, struct folio *src)
557 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
561 if (folio_test_error(folio))
563 if (folio_test_referenced(folio))
565 if (folio_test_uptodate(folio))
567 if (folio_test_clear_active(folio)) {
568 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
570 } else if (folio_test_clear_unevictable(folio))
572 if (folio_test_workingset(folio))
574 if (folio_test_checked(folio))
582 if (folio_test_mappedtodisk(folio))
586 if (folio_test_dirty(folio))
589 if (folio_test_young(folio))
591 if (folio_test_idle(folio))
598 cpupid = page_cpupid_xchg_last(&folio->page, -1);
605 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
613 folio_migrate_ksm(newfolio, folio);
618 if (folio_test_swapcache(folio))
619 folio_clear_swapcache(folio);
620 folio_clear_private(folio);
623 if (!folio_test_hugetlb(folio))
624 folio->private = NULL;
638 if (folio_test_readahead(folio))
641 folio_copy_owner(newfolio, folio);
643 if (!folio_test_hugetlb(folio))
644 mem_cgroup_migrate(folio, newfolio);
648 void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
650 folio_copy(newfolio, folio);
651 folio_migrate_flags(newfolio, folio);
659 int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
660 struct folio *src, enum migrate_mode mode, int extra_count)
679 * migrate_folio() - Simple folio migration.
680 * @mapping: The address_space containing the folio.
681 * @dst: The folio to migrate the data to.
682 * @src: The folio containing the current data.
685 * Common logic to directly migrate a single LRU folio suitable for
690 int migrate_folio(struct address_space *mapping, struct folio *dst,
691 struct folio *src, enum migrate_mode mode)
732 struct folio *dst, struct folio *src, enum migrate_mode mode,
811 * @dst: The folio to migrate to.
812 * @src: The folio to migrate from.
813 * @mode: How to migrate the folio.
817 * heads are accessed only under the folio lock. If your filesystem cannot
824 struct folio *dst, struct folio *src, enum migrate_mode mode)
833 * @dst: The folio to migrate to.
834 * @src: The folio to migrate from.
835 * @mode: How to migrate the folio.
845 struct folio *dst, struct folio *src, enum migrate_mode mode)
853 struct folio *dst, struct folio *src, enum migrate_mode mode)
873 * Writeback a folio to clean the dirty state
875 static int writeout(struct address_space *mapping, struct folio *folio)
890 if (!folio_clear_dirty_for_io(folio))
895 * A dirty folio may imply that the underlying filesystem has
896 * the folio on some queue. So the folio must be clean for
898 * folio state is no longer what we checked for earlier.
902 remove_migration_ptes(folio, folio, false);
904 rc = mapping->a_ops->writepage(&folio->page, &wbc);
908 folio_lock(folio);
917 struct folio *dst, struct folio *src, enum migrate_mode mode)
952 static int move_to_new_folio(struct folio *dst, struct folio *src,
1030 * field of struct folio of the newly allocated destination folio.
1039 static void __migrate_folio_record(struct folio *dst,
1046 static void __migrate_folio_extract(struct folio *dst,
1057 /* Restore the source folio to the original state upon failure */
1058 static void migrate_folio_undo_src(struct folio *src,
1075 /* Restore the destination folio to the original state upon failure */
1076 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1087 /* Cleanup src folio upon migration success */
1088 static void migrate_folio_done(struct folio *src,
1108 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1111 struct folio *dst;
1254 * A folio that has not been unmapped will be restored to
1267 /* Migrate the folio to the newly allocated folio in dst. */
1269 struct folio *src, struct folio *dst,
1317 * A folio that has been migrated has all references removed
1330 * A folio that has not been migrated will be restored to
1366 struct folio *src, int force, enum migrate_mode mode,
1369 struct folio *dst;
1478 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1482 folio_lock(folio);
1483 rc = split_folio_to_list(folio, split_folios);
1484 folio_unlock(folio);
1486 list_move_tail(&folio->lru, split_folios);
1528 struct folio *folio, *folio2;
1535 list_for_each_entry_safe(folio, folio2, from, lru) {
1536 if (!folio_test_hugetlb(folio))
1539 nr_pages = folio_nr_pages(folio);
1550 if (!hugepage_migration_supported(folio_hstate(folio))) {
1553 list_move_tail(&folio->lru, ret_folios);
1559 folio, pass > 2, mode,
1563 * Success: hugetlb folio will be put back
1586 * unlike -EAGAIN case, the failed folio is
1587 * removed from migration folio list and not
1612 * lock or bit when we have locked more than one folio. Which may cause
1628 struct folio *folio, *folio2, *dst = NULL, *dst2;
1642 list_for_each_entry_safe(folio, folio2, from, lru) {
1643 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1644 nr_pages = folio_nr_pages(folio);
1649 * Large folio migration might be unsupported or
1651 * on the same folio with the large folio split
1661 if (!try_split_folio(folio, split_folios)) {
1666 list_move_tail(&folio->lru, ret_folios);
1671 private, folio, &dst, mode, reason,
1675 * Success: folio will be freed
1676 * Unmap: folio will be put on unmap_folios list,
1677 * dst folio put on dst_folios list
1690 /* Large folio NUMA faulting doesn't split to retry. */
1691 if (folio_test_large(folio) && !nosplit) {
1692 int ret = try_split_folio(folio, split_folios);
1700 * Try again to split large folio to
1731 list_move_tail(&folio->lru, &unmap_folios);
1737 * unlike -EAGAIN case, the failed folio is
1738 * removed from migration folio list and not
1761 dst = list_first_entry(&dst_folios, struct folio, lru);
1763 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1764 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1765 nr_pages = folio_nr_pages(folio);
1770 folio, dst, mode,
1774 * Success: folio will be freed
1805 dst = list_first_entry(&dst_folios, struct folio, lru);
1807 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1812 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1875 * as the target of the folio migration.
1880 * folio migration, if any.
1881 * @reason: The reason for folio migration.
1890 * Returns the number of {normal folio, large folio, hugetlb} that were not
1891 * migrated, or an error code. The number of large folio splits will be
1892 * considered as the number of non-migrated large folio, no matter how many
1893 * split folios of the large folio are migrated successfully.
1901 struct folio *folio, *folio2;
1918 list_for_each_entry_safe(folio, folio2, from, lru) {
1920 if (folio_test_hugetlb(folio)) {
1921 list_move_tail(&folio->lru, &ret_folios);
1925 nr_pages += folio_nr_pages(folio);
1950 * Failure isn't counted since all split folios of a large folio
1964 * Put the permanent failure folio back to migration list, they
1991 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2487 static struct folio *alloc_misplaced_dst_folio(struct folio *src,