Lines Matching defs:folio

976  * Helper for handling dirtying of journalled data. We also mark the folio as
1014 static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
1019 struct inode *inode = folio->mapping->host;
1029 BUG_ON(!folio_test_locked(folio));
1034 head = folio_buffers(folio);
1036 create_empty_buffers(&folio->page, blocksize, 0);
1037 head = folio_buffers(folio);
1040 block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1046 if (folio_test_uptodate(folio)) {
1059 if (folio_test_uptodate(folio)) {
1066 folio_zero_segments(folio, to,
1072 if (folio_test_uptodate(folio)) {
1092 folio_zero_new_buffers(folio, from, to);
1097 err2 = fscrypt_decrypt_pagecache_blocks(folio,
1125 struct folio *folio;
1153 * system is thrashing due to memory pressure, or if the folio
1156 * the folio (if needed) without using GFP_NOFS.
1159 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
1161 if (IS_ERR(folio))
1162 return PTR_ERR(folio);
1167 if (!folio_buffers(folio))
1168 create_empty_buffers(&folio->page, inode->i_sb->s_blocksize, 0);
1170 folio_unlock(folio);
1175 folio_put(folio);
1179 folio_lock(folio);
1180 if (folio->mapping != mapping) {
1181 /* The folio got truncated from under us */
1182 folio_unlock(folio);
1183 folio_put(folio);
1187 /* In case writeback began while the folio was unlocked */
1188 folio_wait_stable(folio);
1192 ret = ext4_block_write_begin(folio, pos, len,
1195 ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
1198 ret = __block_write_begin(&folio->page, pos, len,
1201 ret = __block_write_begin(&folio->page, pos, len, ext4_get_block);
1205 folio_buffers(folio), from, to,
1213 folio_unlock(folio);
1241 folio_put(folio);
1244 *pagep = &folio->page;
1274 struct folio *folio = page_folio(page);
1287 folio);
1291 * it's important to update i_size while still holding folio lock:
1299 folio_unlock(folio);
1300 folio_put(folio);
1305 * Don't mark the inode dirty under folio lock. First, it unnecessarily
1306 * makes the holding time of folio lock longer. Second, it forces lock
1307 * ordering of folio lock and transaction start for journaling
1345 struct folio *folio,
1351 bh = head = folio_buffers(folio);
1356 if (!folio_test_uptodate(folio)) {
1362 folio_zero_range(folio, start, size);
1378 struct folio *folio = page_folio(page);
1396 folio);
1398 if (unlikely(copied < len) && !folio_test_uptodate(folio)) {
1400 ext4_journalled_zero_new_buffers(handle, inode, folio,
1404 ext4_journalled_zero_new_buffers(handle, inode, folio,
1407 folio_buffers(folio),
1411 folio_mark_uptodate(folio);
1416 folio_unlock(folio);
1417 folio_put(folio);
1580 struct folio *folio = fbatch.folios[i];
1582 if (folio->index < mpd->first_page)
1584 if (folio_next_index(folio) - 1 > end)
1586 BUG_ON(!folio_test_locked(folio));
1587 BUG_ON(folio_test_writeback(folio));
1589 if (folio_mapped(folio))
1590 folio_clear_dirty_for_io(folio);
1591 block_invalidate_folio(folio, 0,
1592 folio_size(folio));
1593 folio_clear_uptodate(folio);
1595 folio_unlock(folio);
1845 static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio)
1847 mpd->first_page += folio_nr_pages(folio);
1848 folio_unlock(folio);
1851 static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
1857 BUG_ON(folio->index != mpd->first_page);
1858 folio_clear_dirty_for_io(folio);
1865 * written to again until we release folio lock. So only after
1873 len = folio_size(folio);
1874 if (folio_pos(folio) + len > size &&
1877 err = ext4_bio_write_folio(&mpd->io_submit, folio, len);
2003 * mpage_process_folio - update folio buffers corresponding to changed extent
2006 * @folio: Contains these buffers.
2012 * Scan given folio buffers corresponding to changed extent and update buffer
2015 * If the given folio is not fully mapped, we update @mpd to the next extent in
2016 * the given folio that needs mapping & return @map_bh as true.
2018 static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio,
2031 bh = head = folio_buffers(folio);
2038 * Find next buffer in the folio to map.
2111 struct folio *folio = fbatch.folios[i];
2113 err = mpage_process_folio(mpd, folio, &lblk, &pblock,
2123 err = mpage_submit_folio(mpd, folio);
2126 mpage_folio_done(mpd, folio);
2311 static int ext4_journal_folio_buffers(handle_t *handle, struct folio *folio,
2314 struct buffer_head *page_bufs = folio_buffers(folio);
2315 struct inode *inode = folio->mapping->host;
2324 err = ext4_jbd2_inode_add_write(handle, inode, folio_pos(folio), len);
2334 struct folio *folio)
2338 size_t len = folio_size(folio);
2340 folio_clear_checked(folio);
2343 if (folio_pos(folio) + len > size &&
2345 len = size - folio_pos(folio);
2347 return ext4_journal_folio_buffers(handle, folio, len);
2406 struct folio *folio = fbatch.folios[i];
2422 if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
2432 folio_lock(folio);
2440 if (!folio_test_dirty(folio) ||
2441 (folio_test_writeback(folio) &&
2443 unlikely(folio->mapping != mapping)) {
2444 folio_unlock(folio);
2448 folio_wait_writeback(folio);
2449 BUG_ON(folio_test_writeback(folio));
2460 if (!folio_buffers(folio)) {
2461 ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
2462 folio_clear_dirty(folio);
2463 folio_unlock(folio);
2468 mpd->first_page = folio->index;
2469 mpd->next_page = folio_next_index(folio);
2481 err = mpage_submit_folio(mpd, folio);
2485 if (folio_test_checked(folio)) {
2487 mpd, folio);
2492 mpage_folio_done(mpd, folio);
2495 lblk = ((ext4_lblk_t)folio->index) <<
2497 head = folio_buffers(folio);
2866 struct folio *folio;
2893 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2895 if (IS_ERR(folio))
2896 return PTR_ERR(folio);
2898 /* In case writeback began while the folio was unlocked */
2899 folio_wait_stable(folio);
2902 ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
2904 ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep);
2907 folio_unlock(folio);
2908 folio_put(folio);
2923 *pagep = &folio->page;
2931 static int ext4_da_should_update_i_disksize(struct folio *folio,
2935 struct inode *inode = folio->mapping->host;
2939 bh = folio_buffers(folio);
3018 struct folio *folio = page_folio(page);
3022 len, copied, &folio->page, fsdata);
3030 folio);
3035 return ext4_da_do_write_end(mapping, pos, len, copied, &folio->page);
3126 static int ext4_read_folio(struct file *file, struct folio *folio)
3129 struct inode *inode = folio->mapping->host;
3131 trace_ext4_read_folio(inode, folio);
3134 ret = ext4_readpage_inline(inode, folio);
3137 return ext4_mpage_readpages(inode, NULL, folio);
3153 static void ext4_invalidate_folio(struct folio *folio, size_t offset,
3156 trace_ext4_invalidate_folio(folio, offset, length);
3159 WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
3161 block_invalidate_folio(folio, offset, length);
3164 static int __ext4_journalled_invalidate_folio(struct folio *folio,
3167 journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
3169 trace_ext4_journalled_invalidate_folio(folio, offset, length);
3174 if (offset == 0 && length == folio_size(folio))
3175 folio_clear_checked(folio);
3177 return jbd2_journal_invalidate_folio(journal, folio, offset, length);
3181 static void ext4_journalled_invalidate_folio(struct folio *folio,
3185 WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
3188 static bool ext4_release_folio(struct folio *folio, gfp_t wait)
3190 struct inode *inode = folio->mapping->host;
3193 trace_ext4_release_folio(inode, folio);
3196 if (folio_test_checked(folio))
3199 return jbd2_journal_try_to_free_buffers(journal, folio);
3201 return try_to_free_buffers(folio);
3519 * For data=journal mode, folio should be marked dirty only when it was
3527 * folio and leave attached buffers clean, because the buffers' dirty state is
3529 * the journalling code will explode. So what we do is to mark the folio
3534 struct folio *folio)
3536 WARN_ON_ONCE(!folio_buffers(folio));
3537 if (folio_maybe_dma_pinned(folio))
3538 folio_set_checked(folio);
3539 return filemap_dirty_folio(mapping, folio);
3542 static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
3544 WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
3545 WARN_ON_ONCE(!folio_buffers(folio));
3546 return block_dirty_folio(mapping, folio);
3644 struct folio *folio;
3647 folio = __filemap_get_folio(mapping, from >> PAGE_SHIFT,
3650 if (IS_ERR(folio))
3651 return PTR_ERR(folio);
3657 bh = folio_buffers(folio);
3659 create_empty_buffers(&folio->page, blocksize, 0);
3660 bh = folio_buffers(folio);
3685 if (folio_test_uptodate(folio))
3695 err = fscrypt_decrypt_pagecache_blocks(folio,
3711 folio_zero_range(folio, offset, length);
3725 folio_unlock(folio);
3726 folio_put(folio);
5230 * buffers that are attached to a folio straddling i_size and are undergoing
5242 * If the folio is fully truncated, we don't need to wait for any commit
5244 * strip all buffers from the folio but keep the folio dirty which can then
5245 * confuse e.g. concurrent ext4_writepages() seeing dirty folio without
5247 * the folio remain valid. This is most beneficial for the common case of
5253 struct folio *folio = filemap_lock_folio(inode->i_mapping,
5255 if (IS_ERR(folio))
5257 ret = __ext4_journalled_invalidate_folio(folio, offset,
5258 folio_size(folio) - offset);
5259 folio_unlock(folio);
5260 folio_put(folio);
6070 struct folio *folio = page_folio(vmf->page);
6114 folio_lock(folio);
6117 if (folio->mapping != mapping || folio_pos(folio) > size) {
6118 folio_unlock(folio);
6123 len = folio_size(folio);
6124 if (folio_pos(folio) + len > size)
6125 len = size - folio_pos(folio);
6133 if (folio_buffers(folio)) {
6134 if (!ext4_walk_page_buffers(NULL, inode, folio_buffers(folio),
6138 folio_wait_stable(folio);
6143 folio_unlock(folio);
6164 folio_lock(folio);
6167 if (folio->mapping != mapping || folio_pos(folio) > size) {
6172 len = folio_size(folio);
6173 if (folio_pos(folio) + len > size)
6174 len = size - folio_pos(folio);
6176 err = __block_write_begin(&folio->page, 0, len, ext4_get_block);
6179 if (ext4_journal_folio_buffers(handle, folio, len))
6182 folio_unlock(folio);
6195 folio_unlock(folio);