Lines Matching defs:folio
86 int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
91 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
92 unsigned long i, nr = folio_nr_pages(folio);
97 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
98 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
99 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
101 folio_ref_add(folio, nr);
102 folio_set_swapcache(folio);
103 folio->swap = entry;
111 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
117 xas_store(&xas, folio);
121 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
122 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
130 folio_clear_swapcache(folio);
131 folio_ref_sub(folio, nr);
139 void __delete_from_swap_cache(struct folio *folio,
144 long nr = folio_nr_pages(folio);
150 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
151 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
152 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
156 VM_BUG_ON_PAGE(entry != folio, entry);
159 folio->swap.val = 0;
160 folio_clear_swapcache(folio);
162 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
163 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
167 * add_to_swap - allocate swap space for a folio
168 * @folio: folio we want to move to swap
170 * Allocate swap space for the folio and add the folio to the
173 * Context: Caller needs to hold the folio lock.
174 * Return: Whether the folio was added to the swap cache.
176 bool add_to_swap(struct folio *folio)
181 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
182 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
184 entry = folio_alloc_swap(folio);
199 err = add_to_swap_cache(folio, entry,
208 * Normally the folio will be dirtied in unmap because its
210 * page's pte could have dirty bit cleared but the folio's
212 * and SwapBacked flag has no lock protected. For such folio,
213 * unmap will not set dirty bit for it, so folio reclaim will
214 * not write the folio out. This can cause data corruption when
215 * the folio is swapped in later. Always setting the dirty flag
216 * for the folio solves the problem.
218 folio_mark_dirty(folio);
223 put_swap_folio(folio, entry);
230 * It will never put the folio into the free list,
231 * the caller has a reference on the folio.
233 void delete_from_swap_cache(struct folio *folio)
235 swp_entry_t entry = folio->swap;
239 __delete_from_swap_cache(folio, entry, NULL);
242 put_swap_folio(folio, entry);
243 folio_ref_sub(folio, folio_nr_pages(folio));
279 * Its ok to check the swapcache flag without the folio lock
286 struct folio *folio = page_folio(page);
288 if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
289 folio_trylock(folio)) {
290 folio_free_swap(folio);
291 folio_unlock(folio);
324 * Lookup a swap entry in the swap cache. A found folio will be returned
326 * lock getting page table operations atomic even if we drop the folio
331 struct folio *swap_cache_get_folio(swp_entry_t entry,
334 struct folio *folio;
336 folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
337 if (!IS_ERR(folio)) {
345 if (unlikely(folio_test_large(folio)))
346 return folio;
348 readahead = folio_test_clear_readahead(folio);
368 folio = NULL;
371 return folio;
375 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
380 * folio in the swap cache.
382 * Return: The found folio or %NULL.
384 struct folio *filemap_get_incore_folio(struct address_space *mapping,
389 struct folio *folio = filemap_get_entry(mapping, index);
391 if (!folio)
393 if (!xa_is_value(folio))
394 return folio;
398 swp = radix_to_swp_entry(folio);
407 folio = filemap_get_folio(swap_address_space(swp), index);
409 return folio;
417 struct folio *folio;
433 folio = filemap_get_folio(swap_address_space(entry),
435 if (!IS_ERR(folio)) {
436 page = folio_file_page(folio, swp_offset(entry));
456 folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
457 if (!folio)
467 folio_put(folio);
485 __folio_set_locked(folio);
486 __folio_set_swapbacked(folio);
488 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
492 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
498 workingset_refault(folio, shadow);
500 /* Caller will initiate read into locked folio */
501 folio_add_lru(folio);
503 page = &folio->page;
509 put_swap_folio(folio, entry);
510 folio_unlock(folio);
511 folio_put(folio);
525 * swap cache folio lock.