Lines Matching defs:folio

149 			     struct folio **foliop, enum sgp_type sgp,
619 struct folio *folio;
672 folio = filemap_get_folio(inode->i_mapping, index);
673 if (IS_ERR(folio))
677 if (!folio_test_large(folio)) {
678 folio_put(folio);
689 if (!folio_trylock(folio)) {
690 folio_put(folio);
694 ret = split_folio(folio);
695 folio_unlock(folio);
696 folio_put(folio);
761 static int shmem_add_to_page_cache(struct folio *folio,
766 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
767 long nr = folio_nr_pages(folio);
770 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
771 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
772 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
773 VM_BUG_ON(expected && folio_test_large(folio));
775 folio_ref_add(folio, nr);
776 folio->mapping = mapping;
777 folio->index = index;
779 if (!folio_test_swapcache(folio)) {
780 error = mem_cgroup_charge(folio, charge_mm, gfp);
782 if (folio_test_pmd_mappable(folio)) {
789 folio_throttle_swaprate(folio, gfp);
801 xas_store(&xas, folio);
804 if (folio_test_pmd_mappable(folio)) {
806 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
809 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
810 __lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
822 folio->mapping = NULL;
823 folio_ref_sub(folio, nr);
828 * Like delete_from_page_cache, but substitutes swap for @folio.
830 static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
832 struct address_space *mapping = folio->mapping;
833 long nr = folio_nr_pages(folio);
837 error = shmem_replace_entry(mapping, folio->index, folio, radswap);
838 folio->mapping = NULL;
840 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
841 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
843 folio_put(folio);
949 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
951 struct folio *folio;
957 folio = filemap_get_entry(inode->i_mapping, index);
958 if (!folio)
959 return folio;
960 if (!xa_is_value(folio)) {
961 folio_lock(folio);
962 if (folio->mapping == inode->i_mapping)
963 return folio;
964 /* The folio has been swapped out */
965 folio_unlock(folio);
966 folio_put(folio);
969 * But read a folio back from swap if any of it is within i_size
972 folio = NULL;
973 shmem_get_folio(inode, index, &folio, SGP_READ);
974 return folio;
990 struct folio *folio;
1007 folio = fbatch.folios[i];
1009 if (xa_is_value(folio)) {
1013 indices[i], folio);
1017 if (!unfalloc || !folio_test_uptodate(folio))
1018 truncate_inode_folio(mapping, folio);
1019 folio_unlock(folio);
1027 * When undoing a failed fallocate, we want none of the partial folio
1029 * folio when !uptodate indicates that it was added by this fallocate,
1030 * even when [lstart, lend] covers only a part of the folio.
1036 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1037 if (folio) {
1038 same_folio = lend < folio_pos(folio) + folio_size(folio);
1039 folio_mark_dirty(folio);
1040 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
1041 start = folio_next_index(folio);
1043 end = folio->index;
1045 folio_unlock(folio);
1046 folio_put(folio);
1047 folio = NULL;
1051 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1052 if (folio) {
1053 folio_mark_dirty(folio);
1054 if (!truncate_inode_partial_folio(folio, lstart, lend))
1055 end = folio->index;
1056 folio_unlock(folio);
1057 folio_put(folio);
1076 folio = fbatch.folios[i];
1078 if (xa_is_value(folio)) {
1081 if (shmem_free_swap(mapping, indices[i], folio)) {
1090 folio_lock(folio);
1092 if (!unfalloc || !folio_test_uptodate(folio)) {
1093 if (folio_mapping(folio) != mapping) {
1095 folio_unlock(folio);
1099 VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1100 folio);
1102 if (!folio_test_large(folio)) {
1103 truncate_inode_folio(mapping, folio);
1104 } else if (truncate_inode_partial_folio(folio, lstart, lend)) {
1113 if (!folio_test_large(folio)) {
1114 folio_unlock(folio);
1120 folio_unlock(folio);
1296 struct folio *folio;
1300 xas_for_each(&xas, folio, ULONG_MAX) {
1301 if (xas_retry(&xas, folio))
1304 if (!xa_is_value(folio))
1307 entry = radix_to_swp_entry(folio);
1316 if (!folio_batch_add(fbatch, folio))
1342 struct folio *folio = fbatch->folios[i];
1344 if (!xa_is_value(folio))
1347 &folio, SGP_CACHE,
1351 folio_unlock(folio);
1352 folio_put(folio);
1441 struct folio *folio = page_folio(page);
1442 struct address_space *mapping = folio->mapping;
1470 if (folio_test_large(folio)) {
1472 folio_test_set_dirty(folio);
1475 folio = page_folio(page);
1476 folio_clear_dirty(folio);
1479 index = folio->index;
1484 * fallocated folio arriving here is now to initialize it and write it.
1486 * That's okay for a folio already fallocated earlier, but if we have
1488 * of this folio in case we have to undo it, and (b) it may not be a
1490 * reactivate the folio, and let shmem_fallocate() quit when too many.
1492 if (!folio_test_uptodate(folio)) {
1508 folio_zero_range(folio, 0, folio_size(folio));
1509 flush_dcache_folio(folio);
1510 folio_mark_uptodate(folio);
1513 swap = folio_alloc_swap(folio);
1519 * if it's not already there. Do it now before the folio is
1529 if (add_to_swap_cache(folio, swap,
1534 shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
1537 BUG_ON(folio_mapped(folio));
1538 swap_writepage(&folio->page, wbc);
1543 put_swap_folio(folio, swap);
1545 folio_mark_dirty(folio);
1547 return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
1548 folio_unlock(folio);
1605 static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1647 static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
1653 struct folio *folio;
1661 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true);
1663 if (!folio)
1665 return folio;
1668 static struct folio *shmem_alloc_folio(gfp_t gfp,
1672 struct folio *folio;
1675 folio = vma_alloc_folio(gfp, 0, &pvma, 0, false);
1678 return folio;
1681 static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
1685 struct folio *folio;
1698 folio = shmem_alloc_hugefolio(gfp, info, index);
1700 folio = shmem_alloc_folio(gfp, info, index);
1701 if (folio) {
1702 __folio_set_locked(folio);
1703 __folio_set_swapbacked(folio);
1704 return folio;
1725 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1727 return folio_zonenum(folio) > gfp_zone(gfp);
1730 static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
1733 struct folio *old, *new;
1800 struct folio *folio, swp_entry_t swap)
1813 folio_wait_writeback(folio);
1814 delete_from_swap_cache(folio);
1816 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
1825 * Swap in the folio pointed to by *foliop.
1826 * Caller has to make sure that *foliop contains a valid swapped folio.
1827 * Returns 0 and the folio in foliop if success. On failure, returns the
1831 struct folio **foliop, enum sgp_type sgp,
1839 struct folio *folio = NULL;
1859 folio = swap_cache_get_folio(swap, NULL, 0);
1860 if (!folio) {
1868 folio = shmem_swapin(swap, gfp, info, index);
1869 if (!folio) {
1875 /* We have to do this with folio locked to prevent races */
1876 folio_lock(folio);
1877 if (!folio_test_swapcache(folio) ||
1878 folio->swap.val != swap.val ||
1883 if (!folio_test_uptodate(folio)) {
1887 folio_wait_writeback(folio);
1891 * folio after reading from swap.
1893 arch_swap_restore(swap, folio);
1895 if (shmem_should_replace_folio(folio, gfp)) {
1896 error = shmem_replace_folio(&folio, gfp, info, index);
1901 error = shmem_add_to_page_cache(folio, mapping, index,
1910 folio_mark_accessed(folio);
1912 delete_from_swap_cache(folio);
1913 folio_mark_dirty(folio);
1917 *foliop = folio;
1923 shmem_set_folio_swapin_error(inode, index, folio, swap);
1925 if (folio) {
1926 folio_unlock(folio);
1927 folio_put(folio);
1945 struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
1953 struct folio *folio;
1971 folio = filemap_get_entry(mapping, index);
1972 if (folio && vma && userfaultfd_minor(vma)) {
1973 if (!xa_is_value(folio))
1974 folio_put(folio);
1979 if (xa_is_value(folio)) {
1980 error = shmem_swapin_folio(inode, index, &folio,
1985 *foliop = folio;
1989 if (folio) {
1990 folio_lock(folio);
1992 /* Has the folio been truncated or swapped out? */
1993 if (unlikely(folio->mapping != mapping)) {
1994 folio_unlock(folio);
1995 folio_put(folio);
1999 folio_mark_accessed(folio);
2000 if (folio_test_uptodate(folio))
2002 /* fallocated folio */
2005 folio_unlock(folio);
2006 folio_put(folio);
2010 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
2011 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
2034 folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true);
2035 if (IS_ERR(folio)) {
2037 folio = shmem_alloc_and_acct_folio(gfp, inode, index, false);
2039 if (IS_ERR(folio)) {
2042 error = PTR_ERR(folio);
2043 folio = NULL;
2047 * Try to reclaim some space by splitting a large folio
2062 hindex = round_down(index, folio_nr_pages(folio));
2065 __folio_set_referenced(folio);
2067 error = shmem_add_to_page_cache(folio, mapping, hindex,
2073 folio_add_lru(folio);
2074 shmem_recalc_inode(inode, folio_nr_pages(folio), 0);
2077 if (folio_test_pmd_mappable(folio) &&
2079 folio_next_index(folio) - 1) {
2081 * Part of the large folio is beyond i_size: subject
2098 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
2104 * Let SGP_WRITE caller clear ends if write does not fill folio;
2105 * but SGP_FALLOC on a folio fallocated earlier must initialize
2108 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2109 long i, n = folio_nr_pages(folio);
2112 clear_highpage(folio_page(folio, i));
2113 flush_dcache_folio(folio);
2114 folio_mark_uptodate(folio);
2121 folio_clear_dirty(folio);
2122 filemap_remove_folio(folio);
2129 *foliop = folio;
2136 shmem_inode_unacct_blocks(inode, folio_nr_pages(folio));
2138 if (folio_test_large(folio)) {
2139 folio_unlock(folio);
2140 folio_put(folio);
2144 if (folio) {
2145 folio_unlock(folio);
2146 folio_put(folio);
2157 int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
2181 struct folio *folio = NULL;
2244 err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
2248 if (folio)
2249 vmf->page = folio_file_page(folio, vmf->pgoff);
2573 struct folio **foliop)
2581 struct folio *folio;
2600 folio = shmem_alloc_folio(gfp, info, pgoff);
2601 if (!folio)
2605 page_kaddr = kmap_local_folio(folio, 0);
2630 *foliop = folio;
2636 flush_dcache_folio(folio);
2638 clear_user_highpage(&folio->page, dst_addr);
2641 folio = *foliop;
2642 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2646 VM_BUG_ON(folio_test_locked(folio));
2647 VM_BUG_ON(folio_test_swapbacked(folio));
2648 __folio_set_locked(folio);
2649 __folio_set_swapbacked(folio);
2650 __folio_mark_uptodate(folio);
2657 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
2663 &folio->page, true, flags);
2668 folio_unlock(folio);
2671 filemap_remove_folio(folio);
2673 folio_unlock(folio);
2674 folio_put(folio);
2693 struct folio *folio;
2705 ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
2710 *pagep = folio_file_page(folio, index);
2712 folio_unlock(folio);
2713 folio_put(folio);
2726 struct folio *folio = page_folio(page);
2732 if (!folio_test_uptodate(folio)) {
2733 if (copied < folio_size(folio)) {
2734 size_t from = offset_in_folio(folio, pos);
2735 folio_zero_segments(folio, 0, from,
2736 from + copied, folio_size(folio));
2738 folio_mark_uptodate(folio);
2740 folio_mark_dirty(folio);
2741 folio_unlock(folio);
2742 folio_put(folio);
2762 struct folio *folio = NULL;
2777 error = shmem_get_folio(inode, index, &folio, SGP_READ);
2783 if (folio) {
2784 folio_unlock(folio);
2786 page = folio_file_page(folio, index);
2788 folio_put(folio);
2804 if (folio)
2805 folio_put(folio);
2811 if (folio) {
2823 folio_mark_accessed(folio);
2829 folio_put(folio);
2939 struct folio *folio = NULL;
2953 error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio,
2960 if (folio) {
2961 folio_unlock(folio);
2963 if (folio_test_hwpoison(folio) ||
2964 (folio_test_large(folio) &&
2965 folio_test_has_hwpoisoned(folio))) {
2984 if (folio) {
2991 flush_dcache_folio(folio);
2992 folio_mark_accessed(folio);
2997 n = splice_folio_into_pipe(pipe, folio, *ppos, part);
2998 folio_put(folio);
2999 folio = NULL;
3016 if (folio)
3017 folio_put(folio);
3129 struct folio *folio;
3140 error = shmem_get_folio(inode, index, &folio,
3155 * a second SGP_FALLOC on the same large folio will clear it,
3158 index = folio_next_index(folio);
3167 if (!folio_test_uptodate(folio))
3178 folio_mark_dirty(folio);
3179 folio_unlock(folio);
3180 folio_put(folio);
3465 struct folio *folio;
3496 error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
3501 memcpy(folio_address(folio), symname, len);
3502 folio_mark_uptodate(folio);
3503 folio_mark_dirty(folio);
3504 folio_unlock(folio);
3505 folio_put(folio);
3531 struct folio *folio = NULL;
3535 folio = filemap_get_folio(inode->i_mapping, 0);
3536 if (IS_ERR(folio))
3538 if (PageHWPoison(folio_page(folio, 0)) ||
3539 !folio_test_uptodate(folio)) {
3540 folio_put(folio);
3544 error = shmem_get_folio(inode, 0, &folio, SGP_READ);
3547 if (!folio)
3549 if (PageHWPoison(folio_page(folio, 0))) {
3550 folio_unlock(folio);
3551 folio_put(folio);
3554 folio_unlock(folio);
3556 set_delayed_call(done, shmem_put_link, folio);
3557 return folio_address(folio);
4894 * @mapping: the folio's address_space
4895 * @index: the folio index
4907 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
4912 struct folio *folio;
4916 error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
4921 folio_unlock(folio);
4922 return folio;
4935 struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
4938 if (IS_ERR(folio))
4939 return &folio->page;
4941 page = folio_file_page(folio, index);
4943 folio_put(folio);