Lines Matching defs:folio
28 * Structure allocated for each folio to track per-block uptodate, dirty state
46 static inline bool ifs_is_fully_uptodate(struct folio *folio,
49 struct inode *inode = folio->mapping->host;
51 return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
60 static void ifs_set_range_uptodate(struct folio *folio,
63 struct inode *inode = folio->mapping->host;
71 if (ifs_is_fully_uptodate(folio, ifs))
72 folio_mark_uptodate(folio);
76 static void iomap_set_range_uptodate(struct folio *folio, size_t off,
79 struct iomap_folio_state *ifs = folio->private;
82 ifs_set_range_uptodate(folio, ifs, off, len);
84 folio_mark_uptodate(folio);
87 static inline bool ifs_block_is_dirty(struct folio *folio,
90 struct inode *inode = folio->mapping->host;
91 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
96 static void ifs_clear_range_dirty(struct folio *folio,
99 struct inode *inode = folio->mapping->host;
100 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
111 static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
113 struct iomap_folio_state *ifs = folio->private;
116 ifs_clear_range_dirty(folio, ifs, off, len);
119 static void ifs_set_range_dirty(struct folio *folio,
122 struct inode *inode = folio->mapping->host;
123 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
134 static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
136 struct iomap_folio_state *ifs = folio->private;
139 ifs_set_range_dirty(folio, ifs, off, len);
143 struct folio *folio, unsigned int flags)
145 struct iomap_folio_state *ifs = folio->private;
146 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
159 * filesystem block size is smaller than the folio size.
169 if (folio_test_uptodate(folio))
171 if (folio_test_dirty(folio))
173 folio_attach_private(folio, ifs);
178 static void ifs_free(struct folio *folio)
180 struct iomap_folio_state *ifs = folio_detach_private(folio);
186 WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
187 folio_test_uptodate(folio));
192 * Calculate the range inside the folio that we actually need to read.
194 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
197 struct iomap_folio_state *ifs = folio->private;
202 size_t poff = offset_in_folio(folio, *pos);
203 size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
241 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
251 static void iomap_finish_folio_read(struct folio *folio, size_t offset,
254 struct iomap_folio_state *ifs = folio->private;
257 folio_clear_uptodate(folio);
258 folio_set_error(folio);
260 iomap_set_range_uptodate(folio, offset, len);
264 folio_unlock(folio);
273 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
278 struct folio *cur_folio;
287 * @folio: folio to copy to
289 * Copy the inline data in @iter into @folio and zero out the rest of the folio.
294 struct folio *folio)
299 size_t offset = offset_in_folio(folio, iomap->offset);
302 if (folio_test_uptodate(folio))
313 ifs_alloc(iter->inode, folio, iter->flags);
315 addr = kmap_local_folio(folio, offset);
319 iomap_set_range_uptodate(folio, offset, PAGE_SIZE - poff);
339 struct folio *folio = ctx->cur_folio;
346 return iomap_read_inline_data(iter, folio);
349 ifs = ifs_alloc(iter->inode, folio, iter->flags);
350 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
355 folio_zero_range(folio, poff, plen);
356 iomap_set_range_uptodate(folio, poff, plen);
367 !bio_add_folio(ctx->bio, folio, plen, poff)) {
368 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
392 bio_add_folio_nofail(ctx->bio, folio, plen, poff);
405 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
408 .inode = folio->mapping->host,
409 .pos = folio_pos(folio),
410 .len = folio_size(folio),
413 .cur_folio = folio,
423 folio_set_error(folio);
430 folio_unlock(folio);
435 * return 0 and just set the folio error flag on errors. This
508 * iomap_is_partially_uptodate checks whether blocks within a folio are
512 * of the folio are uptodate.
514 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
516 struct iomap_folio_state *ifs = folio->private;
517 struct inode *inode = folio->mapping->host;
523 /* Caller's range may extend past the end of this folio */
524 count = min(folio_size(folio) - from, count);
526 /* First and last blocks in range within folio */
538 * iomap_get_folio - get a folio reference for writing
541 * @len: Suggested size of folio to create.
543 * Returns a locked reference to the folio at @pos, or an error pointer if the
544 * folio could not be obtained.
546 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
559 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
561 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
562 folio_size(folio));
565 * If the folio is dirty, we refuse to release our metadata because
569 if (folio_test_dirty(folio))
571 ifs_free(folio);
576 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
578 trace_iomap_invalidate_folio(folio->mapping->host,
579 folio_pos(folio) + offset, len);
582 * If we're invalidating the entire folio, clear the dirty state
585 if (offset == 0 && len == folio_size(folio)) {
586 WARN_ON_ONCE(folio_test_writeback(folio));
587 folio_cancel_dirty(folio);
588 ifs_free(folio);
593 bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
596 size_t len = folio_size(folio);
598 ifs_alloc(inode, folio, 0);
599 iomap_set_range_dirty(folio, 0, len);
600 return filemap_dirty_folio(mapping, folio);
618 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
626 bio_add_folio_nofail(&bio, folio, plen, poff);
631 size_t len, struct folio *folio)
638 unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
639 size_t from = offset_in_folio(folio, pos), to = from + len;
643 * If the write or zeroing completely overlaps the current folio, then
644 * entire folio will be dirtied so there is no need for
645 * per-block state tracking structures to be attached to this folio.
649 if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
650 pos + len >= folio_pos(folio) + folio_size(folio))
653 ifs = ifs_alloc(iter->inode, folio, iter->flags);
657 if (folio_test_uptodate(folio))
659 folio_clear_error(folio);
662 iomap_adjust_read_range(iter->inode, folio, &block_start,
675 folio_zero_segments(folio, poff, from, to, poff + plen);
682 status = iomap_read_folio_sync(block_start, folio,
687 iomap_set_range_uptodate(folio, poff, plen);
693 static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
705 struct folio *folio)
710 folio_ops->put_folio(iter->inode, pos, ret, folio);
712 folio_unlock(folio);
713 folio_put(folio);
718 struct folio *folio)
723 return iomap_read_inline_data(iter, folio);
727 size_t len, struct folio **foliop)
731 struct folio *folio;
744 folio = __iomap_get_folio(iter, pos, len);
745 if (IS_ERR(folio))
746 return PTR_ERR(folio);
749 * Now we have a locked folio, before we do anything with it we need to
768 if (pos + len > folio_pos(folio) + folio_size(folio))
769 len = folio_pos(folio) + folio_size(folio) - pos;
772 status = iomap_write_begin_inline(iter, folio);
774 status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
776 status = __iomap_write_begin(iter, pos, len, folio);
781 *foliop = folio;
785 __iomap_put_folio(iter, pos, 0, folio);
792 size_t copied, struct folio *folio)
794 flush_dcache_folio(folio);
807 if (unlikely(copied < len && !folio_test_uptodate(folio)))
809 iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
810 iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
811 filemap_dirty_folio(inode->i_mapping, folio);
816 struct folio *folio, loff_t pos, size_t copied)
821 WARN_ON_ONCE(!folio_test_uptodate(folio));
824 flush_dcache_folio(folio);
825 addr = kmap_local_folio(folio, pos);
835 size_t copied, struct folio *folio)
842 ret = iomap_write_end_inline(iter, folio, pos, copied);
845 copied, &folio->page, NULL);
847 ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
859 __iomap_put_folio(iter, pos, ret, folio);
879 struct folio *folio;
880 size_t offset; /* Offset into folio */
881 size_t bytes; /* Bytes to write to folio */
911 status = iomap_write_begin(iter, pos, bytes, &folio);
917 offset = offset_in_folio(folio, pos);
918 if (bytes > folio_size(folio) - offset)
919 bytes = folio_size(folio) - offset;
922 flush_dcache_folio(folio);
924 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
925 status = iomap_write_end(iter, pos, bytes, copied, folio);
985 struct folio *folio, loff_t start_byte, loff_t end_byte,
996 * blocks within a folio which are marked uptodate
1000 ifs = folio->private;
1005 folio_pos(folio) + folio_size(folio) - 1);
1006 first_blk = offset_in_folio(folio, start_byte) >> blkbits;
1007 last_blk = offset_in_folio(folio, last_byte) >> blkbits;
1009 if (!ifs_block_is_dirty(folio, ifs, i)) {
1010 ret = punch(inode, folio_pos(folio) + (i << blkbits),
1021 static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
1027 if (!folio_test_dirty(folio))
1038 /* Punch non-dirty blocks within folio */
1039 ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte,
1046 * the end of this data range, not the end of the folio.
1049 folio_pos(folio) + folio_size(folio));
1056 * dirty folio, punch out the preceding range and update the offset from which
1076 struct folio *folio;
1080 folio = filemap_lock_folio(inode->i_mapping,
1082 if (IS_ERR(folio)) {
1088 ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte,
1091 folio_unlock(folio);
1092 folio_put(folio);
1096 /* move offset to start of next folio in range */
1097 start_byte = folio_next_index(folio) << PAGE_SHIFT;
1098 folio_unlock(folio);
1099 folio_put(folio);
1111 * start and end of data ranges correctly even for sub-folio block sizes. This
1114 * the data range lies within a folio, if they lie within the same folio or even
1115 * if there are multiple discontiguous data ranges within the folio.
1124 * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1216 * The punch() callback may be called with a folio locked to prevent writeback
1218 * The locked folio may or may not cover the range being punched, so it is not
1280 struct folio *folio;
1285 status = iomap_write_begin(iter, pos, bytes, &folio);
1291 offset = offset_in_folio(folio, pos);
1292 if (bytes > folio_size(folio) - offset)
1293 bytes = folio_size(folio) - offset;
1295 bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
1341 struct folio *folio;
1346 status = iomap_write_begin(iter, pos, bytes, &folio);
1352 offset = offset_in_folio(folio, pos);
1353 if (bytes > folio_size(folio) - offset)
1354 bytes = folio_size(folio) - offset;
1356 folio_zero_range(folio, offset, bytes);
1357 folio_mark_accessed(folio);
1359 bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
1406 struct folio *folio)
1412 ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1416 block_commit_write(&folio->page, 0, length);
1418 WARN_ON_ONCE(!folio_test_uptodate(folio));
1419 folio_mark_dirty(folio);
1431 struct folio *folio = page_folio(vmf->page);
1434 folio_lock(folio);
1435 ret = folio_mkwrite_check_truncate(folio, iter.inode);
1438 iter.pos = folio_pos(folio);
1441 iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
1445 folio_wait_stable(folio);
1448 folio_unlock(folio);
1453 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1456 struct iomap_folio_state *ifs = folio->private;
1459 folio_set_error(folio);
1463 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1467 folio_end_writeback(folio);
1500 iomap_finish_folio_write(inode, fi.folio, fi.length,
1732 iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
1738 size_t poff = offset_in_folio(folio, pos);
1746 if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
1748 bio_add_folio_nofail(wpc->ioend->io_bio, folio, len, poff);
1754 wbc_account_cgroup_owner(wbc, &folio->page, len);
1776 struct folio *folio, u64 end_pos)
1778 struct iomap_folio_state *ifs = folio->private;
1781 unsigned nblocks = i_blocks_per_folio(inode, folio);
1782 u64 pos = folio_pos(folio);
1789 ifs = ifs_alloc(inode, folio, 0);
1790 iomap_set_range_dirty(folio, 0, end_pos - pos);
1796 * Walk through the folio to find areas to write back. If we
1801 if (ifs && !ifs_block_is_dirty(folio, ifs, i))
1812 iomap_add_to_ioend(inode, pos, folio, ifs, wpc, wbc,
1820 WARN_ON_ONCE(!folio_test_locked(folio));
1821 WARN_ON_ONCE(folio_test_writeback(folio));
1822 WARN_ON_ONCE(folio_test_dirty(folio));
1836 wpc->ops->discard_folio(folio, pos);
1841 * while mapping the last partial folio. Hence it's better to clear
1842 * all the dirty bits in the folio here.
1844 iomap_clear_range_dirty(folio, 0, folio_size(folio));
1851 folio_unlock(folio);
1855 folio_start_writeback(folio);
1856 folio_unlock(folio);
1877 folio_end_writeback(folio);
1890 static int iomap_do_writepage(struct folio *folio,
1894 struct inode *inode = folio->mapping->host;
1897 trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
1900 * Refuse to write the folio out if we're called from reclaim context.
1914 * Is this folio beyond the end of the file?
1916 * The folio index is less than the end_index, adjust the end_pos
1917 * to the highest offset that this folio should represent.
1927 end_pos = folio_pos(folio) + folio_size(folio);
1940 size_t poff = offset_in_folio(folio, isize);
1960 if (folio->index > end_index ||
1961 (folio->index == end_index && poff == 0))
1972 folio_zero_segment(folio, poff, folio_size(folio));
1976 return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
1979 folio_redirty_for_writepage(wbc, folio);
1981 folio_unlock(folio);