Lines Matching defs:folio

84  * Returns if the folio has dirty or writeback buffers. If all the buffers
88 void buffer_check_dirty_writeback(struct folio *folio,
95 BUG_ON(!folio_test_locked(folio));
97 head = folio_buffers(folio);
101 if (folio_test_writeback(folio))
198 struct folio *folio;
203 folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
204 if (IS_ERR(folio))
208 head = folio_buffers(folio);
240 folio_put(folio);
250 struct folio *folio;
255 folio = bh->b_folio;
261 folio_set_error(folio);
269 first = folio_buffers(folio);
290 folio_mark_uptodate(folio);
291 folio_unlock(folio);
318 struct folio *folio = bh->b_folio;
319 struct inode *inode = folio->mapping->host;
323 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
389 struct folio *folio;
393 folio = bh->b_folio;
400 folio_set_error(folio);
403 first = folio_buffers(folio);
417 folio_end_writeback(folio);
722 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
728 head = folio_buffers(folio);
741 folio_memcg_lock(folio);
742 newly_dirty = !folio_test_set_dirty(folio);
746 __folio_mark_dirty(folio, mapping, 1);
748 folio_memcg_unlock(folio);
909 * Create the appropriate buffers when given a folio for data area and
917 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
928 /* The folio lock pins the memcg */
929 memcg = folio_memcg(folio);
933 offset = folio_size(folio);
945 /* Link the buffer to its folio */
946 folio_set_bh(bh, folio, offset);
974 static inline void link_dev_buffers(struct folio *folio,
985 folio_attach_private(folio, head);
1001 * Initialise the state of a blockdev folio's buffers.
1003 static sector_t folio_init_buffers(struct folio *folio,
1006 struct buffer_head *head = folio_buffers(folio);
1008 bool uptodate = folio_test_uptodate(folio);
1042 struct folio *folio;
1058 folio = __filemap_get_folio(inode->i_mapping, index,
1061 bh = folio_buffers(folio);
1064 end_block = folio_init_buffers(folio, bdev,
1068 if (!try_to_free_buffers(folio))
1072 bh = folio_alloc_buffers(folio, size, true);
1075 * Link the folio to the buffers and initialise them. Take the
1077 * run under the folio lock.
1080 link_dev_buffers(folio, bh);
1081 end_block = folio_init_buffers(folio, bdev,
1087 folio_unlock(folio);
1088 folio_put(folio);
1170 * block_read_full_folio() against that folio will discover all the uptodate
1171 * buffers, will set the folio uptodate and will perform no I/O.
1205 struct folio *folio = bh->b_folio;
1208 folio_memcg_lock(folio);
1209 if (!folio_test_set_dirty(folio)) {
1210 mapping = folio->mapping;
1212 __folio_mark_dirty(folio, mapping, 0);
1214 folio_memcg_unlock(folio);
1536 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1539 bh->b_folio = folio;
1540 BUG_ON(offset >= folio_size(folio));
1541 if (folio_test_highmem(folio))
1547 bh->b_data = folio_address(folio) + offset;
1575 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1576 * @folio: The folio which is affected.
1580 * block_invalidate_folio() is called when all or part of the folio has been
1589 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1595 BUG_ON(!folio_test_locked(folio));
1600 BUG_ON(stop > folio_size(folio) || stop < length);
1602 head = folio_buffers(folio);
1627 * We release buffers only if the entire folio is being invalidated.
1631 if (length == folio_size(folio))
1632 filemap_release_folio(folio, 0);
1641 * is already excluded via the folio lock.
1643 void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
1648 head = folio_alloc_buffers(folio, blocksize, true);
1657 spin_lock(&folio->mapping->private_lock);
1658 if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1661 if (folio_test_dirty(folio))
1663 if (folio_test_uptodate(folio))
1668 folio_attach_private(folio, head);
1669 spin_unlock(&folio->mapping->private_lock);
1716 struct folio *folio = fbatch.folios[i];
1718 if (!folio_buffers(folio))
1721 * We use folio lock instead of bd_mapping->private_lock
1725 folio_lock(folio);
1726 /* Recheck when the folio is locked which pins bhs */
1727 head = folio_buffers(folio);
1743 folio_unlock(folio);
1767 static struct buffer_head *folio_create_buffers(struct folio *folio,
1771 BUG_ON(!folio_test_locked(folio));
1773 if (!folio_buffers(folio))
1774 folio_create_empty_buffers(folio,
1777 return folio_buffers(folio);
1809 int __block_write_full_folio(struct inode *inode, struct folio *folio,
1821 head = folio_create_buffers(folio, inode,
1828 * then we just miss that fact, and the folio stays dirty.
1838 block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1849 * this folio can be outside i_size when there is a
1879 * lock the buffer then redirty the folio. Note that this can
1887 folio_redirty_for_writepage(wbc, folio);
1898 * The folio and its buffers are protected by the writeback flag,
1901 BUG_ON(folio_test_writeback(folio));
1902 folio_start_writeback(folio);
1912 folio_unlock(folio);
1918 * The folio was marked dirty, but the buffers were
1922 folio_end_writeback(folio);
1925 * The folio and buffer_heads can be released at any time from
1936 * The folio is currently locked and not marked for writeback
1948 * attachment to a dirty folio.
1953 folio_set_error(folio);
1954 BUG_ON(folio_test_writeback(folio));
1955 mapping_set_error(folio->mapping, err);
1956 folio_start_writeback(folio);
1966 folio_unlock(folio);
1972 * If a folio has any new buffers, zero them out here, and mark them uptodate
1976 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1981 BUG_ON(!folio_test_locked(folio));
1982 head = folio_buffers(folio);
1993 if (!folio_test_uptodate(folio)) {
1999 folio_zero_segment(folio, start, xend);
2082 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2087 struct inode *inode = folio->mapping->host;
2094 BUG_ON(!folio_test_locked(folio));
2099 head = folio_create_buffers(folio, inode, 0);
2103 block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2109 if (folio_test_uptodate(folio)) {
2128 if (folio_test_uptodate(folio)) {
2135 folio_zero_segments(folio,
2141 if (folio_test_uptodate(folio)) {
2162 folio_zero_new_buffers(folio, from, to);
2174 static void __block_commit_write(struct folio *folio, size_t from, size_t to)
2181 bh = head = folio_buffers(folio);
2204 * the next read(). Here we 'discover' whether the folio went
2208 folio_mark_uptodate(folio);
2244 struct folio *folio = page_folio(page);
2245 size_t start = pos - folio_pos(folio);
2257 * non uptodate folio as a zero-length write, and force the
2260 if (!folio_test_uptodate(folio))
2263 folio_zero_new_buffers(folio, start+copied, start+len);
2265 flush_dcache_folio(folio);
2268 __block_commit_write(folio, start, start + copied);
2314 * block_is_partially_uptodate checks whether buffers within a folio are
2318 * of the folio are uptodate.
2320 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2327 head = folio_buffers(folio);
2331 to = min_t(unsigned, folio_size(folio) - from, count);
2333 if (from < blocksize && to > folio_size(folio) - blocksize)
2359 * Reads the folio asynchronously --- the unlock_buffer() and
2361 * folio once IO has completed.
2363 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2365 struct inode *inode = folio->mapping->host;
2378 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2380 head = folio_create_buffers(folio, inode, 0);
2384 iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2402 folio_set_error(folio);
2407 folio_zero_range(folio, i * blocksize,
2424 folio_set_mappedtodisk(folio);
2428 * All buffers are uptodate - we can set the folio uptodate
2432 folio_mark_uptodate(folio);
2433 folio_unlock(folio);
2591 struct folio *folio = page_folio(page);
2592 __block_commit_write(folio, from, to);
2617 struct folio *folio = page_folio(vmf->page);
2623 folio_lock(folio);
2625 if ((folio->mapping != inode->i_mapping) ||
2626 (folio_pos(folio) >= size)) {
2632 end = folio_size(folio);
2633 /* folio is wholly or partially inside EOF */
2634 if (folio_pos(folio) + end > size)
2635 end = size - folio_pos(folio);
2637 ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2641 __block_commit_write(folio, 0, end);
2643 folio_mark_dirty(folio);
2644 folio_wait_stable(folio);
2647 folio_unlock(folio);
2660 struct folio *folio;
2674 folio = filemap_grab_folio(mapping, index);
2675 if (IS_ERR(folio))
2676 return PTR_ERR(folio);
2678 bh = folio_buffers(folio);
2680 folio_create_empty_buffers(folio, blocksize, 0);
2681 bh = folio_buffers(folio);
2685 offset = offset_in_folio(folio, from);
2704 if (folio_test_uptodate(folio))
2714 folio_zero_range(folio, offset, length);
2718 folio_unlock(folio);
2719 folio_put(folio);
2731 struct folio *folio = page_folio(page);
2732 struct inode * const inode = folio->mapping->host;
2735 /* Is the folio fully inside i_size? */
2736 if (folio_pos(folio) + folio_size(folio) <= i_size)
2737 return __block_write_full_folio(inode, folio, get_block, wbc,
2740 /* Is the folio fully outside i_size? (truncate in progress) */
2741 if (folio_pos(folio) >= i_size) {
2742 folio_unlock(folio);
2747 * The folio straddles i_size. It must be zeroed out on each and every
2753 folio_zero_segment(folio, offset_in_folio(folio, i_size),
2754 folio_size(folio));
2755 return __block_write_full_folio(inode, folio, get_block, wbc,
2887 * try_to_free_buffers() checks if all the buffers on this particular folio
2891 * locking the folio or by holding its mapping's private_lock.
2893 * If the folio is dirty but all the buffers are clean then we need to
2894 * be sure to mark the folio clean as well. This is because the folio
2896 * to a dirty folio will set *all* buffers dirty. Which would corrupt
2900 * clean then we set the folio clean and proceed. To do that, we require
2913 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2915 struct buffer_head *head = folio_buffers(folio);
2933 folio_detach_private(folio);
2939 bool try_to_free_buffers(struct folio *folio)
2941 struct address_space * const mapping = folio->mapping;
2945 BUG_ON(!folio_test_locked(folio));
2946 if (folio_test_writeback(folio))
2950 ret = drop_buffers(folio, &buffers_to_free);
2955 ret = drop_buffers(folio, &buffers_to_free);
2959 * then we can have clean buffers against a dirty folio. We
2960 * clean the folio here; otherwise the VM will never notice
2964 * the folio's buffers clean. We discover that here and clean
2965 * the folio also.
2972 folio_cancel_dirty(folio);