Lines Matching defs:folio
83 struct folio *folio = fbatch->folios[i];
86 if (!xa_is_value(folio)) {
87 fbatch->folios[j++] = folio;
96 __clear_shadow_entry(mapping, index, folio);
139 * folio_invalidate - Invalidate part or all of a folio.
140 * @folio: The folio which is affected.
144 * folio_invalidate() is called when all or part of the folio has become
153 void folio_invalidate(struct folio *folio, size_t offset, size_t length)
155 const struct address_space_operations *aops = folio->mapping->a_ops;
158 aops->invalidate_folio(folio, offset, length);
172 static void truncate_cleanup_folio(struct folio *folio)
174 if (folio_mapped(folio))
175 unmap_mapping_folio(folio);
177 if (folio_has_private(folio))
178 folio_invalidate(folio, 0, folio_size(folio));
185 folio_cancel_dirty(folio);
186 folio_clear_mappedtodisk(folio);
189 int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
191 if (folio->mapping != mapping)
194 truncate_cleanup_folio(folio);
195 filemap_remove_folio(folio);
200 * Handle partial folios. The folio may be entirely within the
202 * folio that's within the [start, end] range, and then split the folio if
208 * discarding the entire folio which is stubbornly unsplit.
210 bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
212 loff_t pos = folio_pos(folio);
219 length = folio_size(folio);
225 folio_wait_writeback(folio);
226 if (length == folio_size(folio)) {
227 truncate_inode_folio(folio->mapping, folio);
236 folio_zero_range(folio, offset, length);
238 if (folio_has_private(folio))
239 folio_invalidate(folio, offset, length);
240 if (!folio_test_large(folio))
242 if (split_folio(folio) == 0)
244 if (folio_test_dirty(folio))
246 truncate_inode_folio(folio->mapping, folio);
270 struct folio *folio)
272 if (folio_test_dirty(folio) || folio_test_writeback(folio))
274 /* The refcount will be elevated if any page in the folio is mapped */
275 if (folio_ref_count(folio) >
276 folio_nr_pages(folio) + folio_has_private(folio) + 1)
278 if (!filemap_release_folio(folio, 0))
281 return remove_mapping(mapping, folio);
296 struct folio *folio = page_folio(page);
297 struct address_space *mapping = folio_mapping(folio);
302 return mapping_evict_folio(mapping, folio);
338 struct folio *folio;
376 folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
377 if (!IS_ERR(folio)) {
378 same_folio = lend < folio_pos(folio) + folio_size(folio);
379 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
380 start = folio_next_index(folio);
382 end = folio->index;
384 folio_unlock(folio);
385 folio_put(folio);
386 folio = NULL;
390 folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
392 if (!IS_ERR(folio)) {
393 if (!truncate_inode_partial_folio(folio, lstart, lend))
394 end = folio->index;
395 folio_unlock(folio);
396 folio_put(folio);
414 struct folio *folio = fbatch.folios[i];
418 if (xa_is_value(folio))
421 folio_lock(folio);
422 VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
423 folio_wait_writeback(folio);
424 truncate_inode_folio(mapping, folio);
425 folio_unlock(folio);
492 * @nr_failed: How many folio invalidations failed
510 struct folio *folio = fbatch.folios[i];
512 /* We rely upon deletion not changing folio->index */
514 if (xa_is_value(folio)) {
516 indices[i], folio);
520 ret = mapping_evict_folio(mapping, folio);
521 folio_unlock(folio);
523 * Invalidation is a hint that the folio is no longer
527 deactivate_file_folio(folio);
570 struct folio *folio)
572 if (folio->mapping != mapping)
575 if (!filemap_release_folio(folio, GFP_KERNEL))
580 if (folio_test_dirty(folio))
583 BUG_ON(folio_has_private(folio));
584 __filemap_remove_folio(folio, NULL);
590 filemap_free_folio(mapping, folio);
598 static int folio_launder(struct address_space *mapping, struct folio *folio)
600 if (!folio_test_dirty(folio))
602 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
604 return mapping->a_ops->launder_folio(folio);
636 struct folio *folio = fbatch.folios[i];
638 /* We rely upon deletion not changing folio->index */
640 if (xa_is_value(folio)) {
642 indices[i], folio))
647 if (!did_range_unmap && folio_mapped(folio)) {
649 * If folio is mapped, before taking its lock,
657 folio_lock(folio);
658 if (unlikely(folio->mapping != mapping)) {
659 folio_unlock(folio);
662 VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
663 folio_wait_writeback(folio);
665 if (folio_mapped(folio))
666 unmap_mapping_folio(folio);
667 BUG_ON(folio_mapped(folio));
669 ret2 = folio_launder(mapping, folio);
671 if (!invalidate_complete_folio2(mapping, folio))
676 folio_unlock(folio);