Lines Matching defs:folio
495 struct anon_vma *folio_get_anon_vma(struct folio *folio)
501 anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
504 if (!folio_mapped(folio))
514 * If this folio is still mapped, then its anon_vma cannot have been
520 if (!folio_mapped(folio)) {
539 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
547 anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
550 if (!folio_mapped(folio))
557 * If the folio is still mapped, then this anon_vma is still
561 if (!folio_mapped(folio)) {
580 if (!folio_mapped(folio)) {
748 struct folio *folio = page_folio(page);
749 if (folio_test_anon(folio)) {
750 struct anon_vma *page__anon_vma = folio_anon_vma(folio);
760 } else if (vma->vm_file->f_mapping != folio->mapping) {
805 static bool folio_referenced_one(struct folio *folio,
809 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
820 (!folio_test_large(folio) || !pvmw.pte)) {
822 mlock_vma_folio(folio, vma, !pvmw.pte);
843 /* unexpected pmd-mapped folio? */
851 folio_clear_idle(folio);
852 if (folio_test_clear_young(folio))
876 * folio has been used in another mapping, we will catch it; if this
878 * referenced flag or activated the folio in zap_pte_range().
894 * folio_referenced() - Test if the folio was referenced.
895 * @folio: The folio to test.
896 * @is_locked: Caller holds lock on the folio.
898 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
900 * Quick test_and_clear_referenced for all mappings of a folio,
902 * Return: The number of mappings which referenced the folio. Return -1 if
905 int folio_referenced(struct folio *folio, int is_locked,
910 .mapcount = folio_mapcount(folio),
928 if (!folio_raw_mapping(folio))
931 if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) {
932 we_locked = folio_trylock(folio);
937 rmap_walk(folio, &rwc);
941 folio_unlock(folio);
955 * the folio can not be freed from this function.
994 /* unexpected pmd-mapped folio? */
1008 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
1011 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
1027 int folio_mkclean(struct folio *folio)
1037 BUG_ON(!folio_test_locked(folio));
1039 if (!folio_mapped(folio))
1042 mapping = folio_mapping(folio);
1046 rmap_walk(folio, &rwc);
1084 int folio_total_mapcount(struct folio *folio)
1086 int mapcount = folio_entire_mapcount(folio);
1091 if (folio_nr_pages_mapped(folio) == 0)
1098 nr_pages = folio_nr_pages(folio);
1100 mapcount += atomic_read(&folio_page(folio, i)->_mapcount);
1120 struct folio *folio = page_folio(page);
1122 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1131 WRITE_ONCE(folio->mapping, anon_vma);
1137 * @folio: Folio which contains page.
1143 static void __page_set_anon_rmap(struct folio *folio, struct page *page,
1150 if (folio_test_anon(folio))
1162 * page_idle does a lockless/optimistic rmap scan on folio->mapping.
1168 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
1169 folio->index = linear_page_index(vma, address);
1177 * @folio: The folio containing @page.
1182 static void __page_check_anon_rmap(struct folio *folio, struct page *page,
1196 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1197 folio);
1217 struct folio *folio = page_folio(page);
1218 atomic_t *mapped = &folio->_nr_pages_mapped;
1227 if (first && folio_test_large(folio)) {
1231 } else if (folio_test_pmd_mappable(folio)) {
1234 first = atomic_inc_and_test(&folio->_entire_mapcount);
1238 nr_pmdmapped = folio_nr_pages(folio);
1254 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
1256 __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
1258 if (likely(!folio_test_ksm(folio))) {
1261 __page_set_anon_rmap(folio, page, vma, address,
1264 __page_check_anon_rmap(folio, page, vma, address);
1267 mlock_vma_folio(folio, vma, compound);
1271 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
1272 * @folio: The folio to add the mapping to.
1278 * The folio does not have to be locked.
1280 * If the folio is large, it is accounted as a THP. As the folio
1283 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
1289 __folio_set_swapbacked(folio);
1291 if (likely(!folio_test_pmd_mappable(folio))) {
1293 atomic_set(&folio->_mapcount, 0);
1297 atomic_set(&folio->_entire_mapcount, 0);
1298 atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED);
1299 nr = folio_nr_pages(folio);
1300 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
1303 __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
1304 __page_set_anon_rmap(folio, &folio->page, vma, address, 1);
1308 * folio_add_file_rmap_range - add pte mapping to page range of a folio
1309 * @folio: The folio to add the mapping to
1315 * The page range of folio is defined by [first_page, first_page + nr_pages)
1319 void folio_add_file_rmap_range(struct folio *folio, struct page *page,
1323 atomic_t *mapped = &folio->_nr_pages_mapped;
1327 VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio);
1333 if (first && folio_test_large(folio)) {
1341 } else if (folio_test_pmd_mappable(folio)) {
1344 first = atomic_inc_and_test(&folio->_entire_mapcount);
1348 nr_pmdmapped = folio_nr_pages(folio);
1361 __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ?
1364 __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr);
1366 mlock_vma_folio(folio, vma, compound);
1380 struct folio *folio = page_folio(page);
1388 nr_pages = folio_nr_pages(folio);
1390 folio_add_file_rmap_range(folio, page, nr_pages, vma, compound);
1404 struct folio *folio = page_folio(page);
1405 atomic_t *mapped = &folio->_nr_pages_mapped;
1413 if (unlikely(folio_test_hugetlb(folio))) {
1415 atomic_dec(&folio->_entire_mapcount);
1423 if (last && folio_test_large(folio)) {
1427 } else if (folio_test_pmd_mappable(folio)) {
1430 last = atomic_add_negative(-1, &folio->_entire_mapcount);
1434 nr_pmdmapped = folio_nr_pages(folio);
1447 if (folio_test_anon(folio))
1449 else if (folio_test_swapbacked(folio))
1453 __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped);
1456 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
1457 __lruvec_stat_mod_folio(folio, idx, -nr);
1461 * page of the folio is unmapped and at least one page
1464 if (folio_test_pmd_mappable(folio) && folio_test_anon(folio))
1466 deferred_split_folio(folio);
1477 munlock_vma_folio(folio, vma, compound);
1483 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
1487 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1506 split_huge_pmd_address(vma, address, false, folio);
1513 * Note that the folio can not be freed in this function as call of
1514 * try_to_unmap() must hold a reference on the folio.
1519 if (folio_test_hugetlb(folio)) {
1534 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1544 * If the folio is in an mlock()d vma, we must not swap it out.
1549 mlock_vma_folio(folio, vma, false);
1556 subpage = folio_page(folio, pfn - folio_pfn(folio));
1558 anon_exclusive = folio_test_anon(folio) &&
1561 if (folio_test_hugetlb(folio)) {
1562 bool anon = folio_test_anon(folio);
1621 * a remote CPU could still be writing to the folio.
1642 /* Set the dirty flag on the folio now the pte is gone. */
1644 folio_mark_dirty(folio);
1651 if (folio_test_hugetlb(folio)) {
1652 hugetlb_count_sub(folio_nr_pages(folio), mm);
1656 dec_mm_counter(mm, mm_counter(&folio->page));
1681 dec_mm_counter(mm, mm_counter(&folio->page));
1682 } else if (folio_test_anon(folio)) {
1689 if (unlikely(folio_test_swapbacked(folio) !=
1690 folio_test_swapcache(folio))) {
1698 if (!folio_test_swapbacked(folio)) {
1708 ref_count = folio_ref_count(folio);
1709 map_count = folio_mapcount(folio);
1722 !folio_test_dirty(folio)) {
1728 * If the folio was redirtied, it cannot be
1732 folio_set_swapbacked(folio);
1779 * This is a locked file-backed folio,
1781 * cache and replaced by a new folio before
1784 * to point at a new folio while a device is
1785 * still using this folio.
1789 dec_mm_counter(mm, mm_counter_file(&folio->page));
1792 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
1795 folio_put(folio);
1808 static int folio_not_mapped(struct folio *folio)
1810 return !folio_mapped(folio);
1814 * try_to_unmap - Try to remove all page table mappings to a folio.
1815 * @folio: The folio to unmap.
1819 * folio. It is the caller's responsibility to check if the folio is
1822 * Context: Caller must hold the folio lock.
1824 void try_to_unmap(struct folio *folio, enum ttu_flags flags)
1834 rmap_walk_locked(folio, &rwc);
1836 rmap_walk(folio, &rwc);
1845 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
1849 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1872 split_huge_pmd_address(vma, address, true, folio);
1885 if (folio_test_hugetlb(folio)) {
1902 subpage = folio_page(folio,
1903 pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
1904 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
1905 !folio_test_pmd_mappable(folio), folio);
1917 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1921 if (folio_is_zone_device(folio)) {
1932 VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio);
1933 subpage = &folio->page;
1935 subpage = folio_page(folio, pfn - folio_pfn(folio));
1938 anon_exclusive = folio_test_anon(folio) &&
1941 if (folio_test_hugetlb(folio)) {
1942 bool anon = folio_test_anon(folio);
1998 * a remote CPU could still be writing to the folio.
2012 /* Set the dirty flag on the folio now the pte is gone. */
2014 folio_mark_dirty(folio);
2019 if (folio_is_device_private(folio)) {
2020 unsigned long pfn = folio_pfn(folio);
2051 compound_order(&folio->page));
2058 if (folio_test_hugetlb(folio)) {
2059 hugetlb_count_sub(folio_nr_pages(folio), mm);
2063 dec_mm_counter(mm, mm_counter(&folio->page));
2078 dec_mm_counter(mm, mm_counter(&folio->page));
2084 if (folio_test_hugetlb(folio))
2093 VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) &&
2099 if (folio_test_hugetlb(folio))
2132 if (folio_test_hugetlb(folio))
2138 compound_order(&folio->page));
2145 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
2148 folio_put(folio);
2158 * @folio: the folio to replace page table entries for
2161 * Tries to remove all the page table entries which are mapping this folio and
2162 * replace them with special swap entries. Caller must hold the folio lock.
2164 void try_to_migrate(struct folio *folio, enum ttu_flags flags)
2181 if (folio_is_zone_device(folio) &&
2182 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio)))
2193 if (!folio_test_ksm(folio) && folio_test_anon(folio))
2197 rmap_walk_locked(folio, &rwc);
2199 rmap_walk(folio, &rwc);
2210 static bool page_make_device_exclusive_one(struct folio *folio,
2214 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
2226 address + folio_size(folio)),
2232 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2241 subpage = folio_page(folio,
2242 pte_pfn(ptent) - folio_pfn(folio));
2249 /* Set the dirty flag on the folio now the pte is gone. */
2251 folio_mark_dirty(folio);
2293 * folio_make_device_exclusive - Mark the folio exclusively owned by a device.
2294 * @folio: The folio to replace page table entries for.
2295 * @mm: The mm_struct where the folio is expected to be mapped.
2296 * @address: Address where the folio is expected to be mapped.
2300 * folio and replace them with special device exclusive swap entries to
2301 * grant a device exclusive access to the folio.
2303 * Context: Caller must hold the folio lock.
2307 static bool folio_make_device_exclusive(struct folio *folio,
2327 if (!folio_test_anon(folio))
2330 rmap_walk(folio, &rwc);
2332 return args.valid && !folio_mapcount(folio);
2370 struct folio *folio = page_folio(pages[i]);
2371 if (PageTail(pages[i]) || !folio_trylock(folio)) {
2372 folio_put(folio);
2377 if (!folio_make_device_exclusive(folio, mm, start, owner)) {
2378 folio_unlock(folio);
2379 folio_put(folio);
2398 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
2404 return rwc->anon_lock(folio, rwc);
2412 anon_vma = folio_anon_vma(folio);
2433 * @folio: the folio to be handled
2437 * Find all the mappings of a folio using the mapping pointer and the vma
2440 static void rmap_walk_anon(struct folio *folio,
2448 anon_vma = folio_anon_vma(folio);
2450 VM_BUG_ON_FOLIO(!anon_vma, folio);
2452 anon_vma = rmap_walk_anon_lock(folio, rwc);
2457 pgoff_start = folio_pgoff(folio);
2458 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2462 unsigned long address = vma_address(&folio->page, vma);
2470 if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2472 if (rwc->done && rwc->done(folio))
2482 * @folio: the folio to be handled
2486 * Find all the mappings of a folio using the mapping pointer and the vma chains
2489 static void rmap_walk_file(struct folio *folio,
2492 struct address_space *mapping = folio_mapping(folio);
2502 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2507 pgoff_start = folio_pgoff(folio);
2508 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2523 unsigned long address = vma_address(&folio->page, vma);
2531 if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2533 if (rwc->done && rwc->done(folio))
2542 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
2544 if (unlikely(folio_test_ksm(folio)))
2545 rmap_walk_ksm(folio, rwc);
2546 else if (folio_test_anon(folio))
2547 rmap_walk_anon(folio, rwc, false);
2549 rmap_walk_file(folio, rwc, false);
2553 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
2556 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
2557 if (folio_test_anon(folio))
2558 rmap_walk_anon(folio, rwc, true);
2560 rmap_walk_file(folio, rwc, true);
2574 struct folio *folio = page_folio(page);
2578 BUG_ON(!folio_test_locked(folio));
2581 first = atomic_inc_and_test(&folio->_entire_mapcount);
2585 __page_set_anon_rmap(folio, page, vma, address,
2589 void hugepage_add_new_anon_rmap(struct folio *folio,
2594 atomic_set(&folio->_entire_mapcount, 0);
2595 folio_clear_hugetlb_restore_reserve(folio);
2596 __page_set_anon_rmap(folio, &folio->page, vma, address, 1);