Lines Matching refs:src

285 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
289 .arg = src,
528 struct folio *dst, struct folio *src)
530 XA_STATE(xas, &mapping->i_pages, folio_index(src));
534 expected_count = 2 + folio_has_private(src);
535 if (!folio_ref_freeze(src, expected_count)) {
540 dst->index = src->index;
541 dst->mapping = src->mapping;
547 folio_ref_unfreeze(src, expected_count - 1);
660 struct folio *src, enum migrate_mode mode, int extra_count)
664 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
666 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
672 folio_migrate_copy(dst, src);
674 folio_migrate_flags(dst, src);
682 * @src: The folio containing the current data.
691 struct folio *src, enum migrate_mode mode)
693 return migrate_folio_extra(mapping, dst, src, mode, 0);
732 struct folio *dst, struct folio *src, enum migrate_mode mode,
739 head = folio_buffers(src);
741 return migrate_folio(mapping, dst, src, mode);
744 expected_count = folio_expected_refs(mapping, src);
745 if (folio_ref_count(src) != expected_count)
778 rc = folio_migrate_mapping(mapping, dst, src, 0);
782 folio_attach_private(dst, folio_detach_private(src));
791 folio_migrate_copy(dst, src);
793 folio_migrate_flags(dst, src);
810 * @mapping: The address space containing @src.
812 * @src: The folio to migrate from.
816 * that no other references to @src exist. For example attached buffer
824 struct folio *dst, struct folio *src, enum migrate_mode mode)
826 return __buffer_migrate_folio(mapping, dst, src, mode, false);
832 * @mapping: The address space containing @src.
834 * @src: The folio to migrate from.
845 struct folio *dst, struct folio *src, enum migrate_mode mode)
847 return __buffer_migrate_folio(mapping, dst, src, mode, true);
853 struct folio *dst, struct folio *src, enum migrate_mode mode)
857 ret = folio_migrate_mapping(mapping, dst, src, 0);
861 if (folio_get_private(src))
862 folio_attach_private(dst, folio_detach_private(src));
865 folio_migrate_copy(dst, src);
867 folio_migrate_flags(dst, src);
917 struct folio *dst, struct folio *src, enum migrate_mode mode)
919 if (folio_test_dirty(src)) {
928 return writeout(mapping, src);
935 if (!filemap_release_folio(src, GFP_KERNEL))
938 return migrate_folio(mapping, dst, src, mode);
952 static int move_to_new_folio(struct folio *dst, struct folio *src,
956 bool is_lru = !__PageMovable(&src->page);
958 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
962 struct address_space *mapping = folio_mapping(src);
965 rc = migrate_folio(mapping, dst, src, mode);
974 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
977 rc = fallback_migrate_folio(mapping, dst, src, mode);
985 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
986 if (!folio_test_movable(src)) {
988 folio_clear_isolated(src);
992 mops = folio_movable_ops(src);
993 rc = mops->migrate_page(&dst->page, &src->page, mode);
995 !folio_test_isolated(src));
999 * When successful, old pagecache src->mapping must be cleared before
1000 * src is freed; but stats require that PageAnon be left as PageAnon.
1003 if (__PageMovable(&src->page)) {
1004 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1010 folio_clear_isolated(src);
1014 * Anonymous and movable src->mapping will be cleared by
1018 if (!folio_mapping_flags(src))
1019 src->mapping = NULL;
1058 static void migrate_folio_undo_src(struct folio *src,
1065 remove_migration_ptes(src, src, false);
1070 folio_unlock(src);
1072 list_move_tail(&src->lru, ret);
1087 /* Cleanup src folio upon migration success */
1088 static void migrate_folio_done(struct folio *src,
1096 if (likely(!__folio_test_movable(src)))
1097 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1098 folio_is_file_lru(src), -folio_nr_pages(src));
1102 folio_put(src);
1108 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1115 bool is_lru = !__PageMovable(&src->page);
1119 if (folio_ref_count(src) == 1) {
1121 folio_clear_active(src);
1122 folio_clear_unevictable(src);
1124 list_del(&src->lru);
1125 migrate_folio_done(src, reason);
1129 dst = get_new_folio(src, private);
1136 if (!folio_trylock(src)) {
1161 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1164 folio_lock(src);
1167 if (folio_test_mlocked(src))
1170 if (folio_test_writeback(src)) {
1185 folio_wait_writeback(src);
1189 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1202 if (folio_test_anon(src) && !folio_test_ksm(src))
1203 anon_vma = folio_get_anon_vma(src);
1226 * Calling try_to_unmap() against a src->mapping==NULL page will
1234 if (!src->mapping) {
1235 if (folio_test_private(src)) {
1236 try_to_free_buffers(src);
1239 } else if (folio_mapped(src)) {
1241 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1242 !folio_test_ksm(src) && !anon_vma, src);
1243 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1247 if (!folio_mapped(src)) {
1260 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1269 struct folio *src, struct folio *dst,
1276 bool is_lru = !__PageMovable(&src->page);
1283 rc = move_to_new_folio(dst, src, mode);
1304 remove_migration_ptes(src, dst, false);
1320 list_del(&src->lru);
1324 folio_unlock(src);
1325 migrate_folio_done(src, reason);
1339 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1366 struct folio *src, int force, enum migrate_mode mode,
1375 if (folio_ref_count(src) == 1) {
1377 folio_putback_active_hugetlb(src);
1381 dst = get_new_folio(src, private);
1385 if (!folio_trylock(src)) {
1395 folio_lock(src);
1403 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1408 if (folio_test_anon(src))
1409 anon_vma = folio_get_anon_vma(src);
1414 if (folio_mapped(src)) {
1417 if (!folio_test_anon(src)) {
1424 mapping = hugetlb_page_mapping_lock_write(&src->page);
1431 try_to_migrate(src, ttu);
1438 if (!folio_mapped(src))
1439 rc = move_to_new_folio(dst, src, mode);
1442 remove_migration_ptes(src,
1443 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1453 move_hugetlb_state(src, dst, reason);
1458 folio_unlock(src);
1461 folio_putback_active_hugetlb(src);
1463 list_move_tail(&src->lru, ret);
1991 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2003 nid = folio_nid(src);
2005 if (folio_test_hugetlb(src)) {
2006 struct hstate *h = folio_hstate(src);
2013 if (folio_test_large(src)) {
2020 order = folio_order(src);
2022 zidx = zone_idx(folio_zone(src));
2487 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2491 int order = folio_order(src);