Lines Matching refs:inode
3 * linux/fs/ext4/inode.c
12 * linux/fs/minix/inode.c
52 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
55 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
67 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
78 EXT4_INODE_SIZE(inode->i_sb) - offset);
84 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
89 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
91 !ext4_has_metadata_csum(inode->i_sb))
95 calculated = ext4_inode_csum(inode, raw, ei);
96 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
105 void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
110 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
112 !ext4_has_metadata_csum(inode->i_sb))
115 csum = ext4_inode_csum(inode, raw, ei);
117 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
122 static inline int ext4_begin_ordered_truncate(struct inode *inode,
125 trace_ext4_begin_ordered_truncate(inode, new_size);
132 if (!EXT4_I(inode)->jinode)
134 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
135 EXT4_I(inode)->jinode,
139 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
143 * Test whether an inode is a fast symlink.
146 int ext4_inode_is_fast_symlink(struct inode *inode)
148 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
149 int ea_blocks = EXT4_I(inode)->i_file_acl ?
150 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
152 if (ext4_has_inline_data(inode))
155 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
157 return S_ISLNK(inode->i_mode) && inode->i_size &&
158 (inode->i_size < EXT4_N_BLOCKS * 4);
164 void ext4_evict_inode(struct inode *inode)
169 * Credits for final inode cleanup and freeing:
170 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
171 * (xattr block freeing), bitmap, group descriptor (inode freeing)
177 trace_ext4_evict_inode(inode);
179 if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
180 ext4_evict_ea_inode(inode);
181 if (inode->i_nlink) {
182 truncate_inode_pages_final(&inode->i_data);
187 if (is_bad_inode(inode))
189 dquot_initialize(inode);
191 if (ext4_should_order_data(inode))
192 ext4_begin_ordered_truncate(inode, 0);
193 truncate_inode_pages_final(&inode->i_data);
197 * dirtied the inode. And for inodes with dioread_nolock, unwritten
199 * the inode. Flush worker is ignoring it because of I_FREEING flag but
200 * we still need to remove the inode from the writeback lists.
202 if (!list_empty_careful(&inode->i_io_list))
203 inode_io_list_del(inode);
212 sb_start_intwrite(inode->i_sb);
216 if (!IS_NOQUOTA(inode))
217 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
220 * Block bitmap, group descriptor, and inode are accounted in both
223 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
224 ext4_blocks_for_truncate(inode) + extra_credits - 3);
226 ext4_std_error(inode->i_sb, PTR_ERR(handle));
232 ext4_orphan_del(NULL, inode);
234 sb_end_intwrite(inode->i_sb);
238 if (IS_SYNC(inode))
242 * Set inode->i_size to 0 before calling ext4_truncate(). We need
248 if (ext4_inode_is_fast_symlink(inode))
249 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
250 inode->i_size = 0;
251 err = ext4_mark_inode_dirty(handle, inode);
253 ext4_warning(inode->i_sb,
254 "couldn't mark inode dirty (err %d)", err);
257 if (inode->i_blocks) {
258 err = ext4_truncate(inode);
260 ext4_error_err(inode->i_sb, -err,
261 "couldn't truncate inode %lu (err %d)",
262 inode->i_ino, err);
268 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
271 ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
274 ext4_orphan_del(NULL, inode);
276 sb_end_intwrite(inode->i_sb);
289 ext4_orphan_del(handle, inode);
290 EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds();
296 * having errors), but we can't free the inode if the mark_dirty
299 if (ext4_mark_inode_dirty(handle, inode))
300 /* If that failed, just do the required in-core inode clear. */
301 ext4_clear_inode(inode);
303 ext4_free_inode(handle, inode);
306 sb_end_intwrite(inode->i_sb);
311 * Check out some where else accidentally dirty the evicting inode,
312 * which may probably cause inode use-after-free issues later.
314 WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list));
316 if (!list_empty(&EXT4_I(inode)->i_fc_list))
317 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
318 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
322 qsize_t *ext4_get_reserved_space(struct inode *inode)
324 return &EXT4_I(inode)->i_reserved_quota;
332 void ext4_da_update_reserve_space(struct inode *inode,
335 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
336 struct ext4_inode_info *ei = EXT4_I(inode);
339 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
341 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
343 __func__, inode->i_ino, used,
349 /* Update per-inode reservations */
357 dquot_claim_block(inode, EXT4_C2B(sbi, used));
364 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
369 * there aren't any writers on the inode, we can discard the
370 * inode's preallocations.
373 !inode_is_open_for_write(inode))
374 ext4_discard_preallocations(inode, 0);
377 static int __check_block_validity(struct inode *inode, const char *func,
381 if (ext4_has_feature_journal(inode->i_sb) &&
382 (inode->i_ino ==
383 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
385 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
386 ext4_error_inode(inode, func, line, map->m_pblk,
395 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
400 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
401 return fscrypt_zeroout_range(inode, lblk, pblk, len);
403 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
410 #define check_block_validity(inode, map) \
411 __check_block_validity((inode), __func__, __LINE__, (map))
415 struct inode *inode,
430 down_read(&EXT4_I(inode)->i_data_sem);
431 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
432 retval = ext4_ext_map_blocks(handle, inode, map, 0);
434 retval = ext4_ind_map_blocks(handle, inode, map, 0);
436 up_read((&EXT4_I(inode)->i_data_sem));
445 printk("ES cache assertion failed for inode: %lu "
448 inode->i_ino, es_map->m_lblk, es_map->m_len,
478 int ext4_map_blocks(handle_t *handle, struct inode *inode,
491 ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
505 if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
506 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
530 ext4_map_blocks_es_recheck(handle, inode, map,
546 down_read(&EXT4_I(inode)->i_data_sem);
547 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
548 retval = ext4_ext_map_blocks(handle, inode, map, 0);
550 retval = ext4_ind_map_blocks(handle, inode, map, 0);
556 ext4_warning(inode->i_sb,
557 "ES len assertion failed for inode "
559 inode->i_ino, retval, map->m_len);
567 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
570 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
573 up_read((&EXT4_I(inode)->i_data_sem));
577 ret = check_block_validity(inode, map);
614 down_write(&EXT4_I(inode)->i_data_sem);
618 * could have changed the inode type in between
620 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
621 retval = ext4_ext_map_blocks(handle, inode, map, flags);
623 retval = ext4_ind_map_blocks(handle, inode, map, flags);
631 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
639 ext4_warning(inode->i_sb,
640 "ES len assertion failed for inode "
642 inode->i_ino, retval, map->m_len);
656 ret = ext4_issue_zeroout(inode, map->m_lblk,
669 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
677 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
680 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
685 up_write((&EXT4_I(inode)->i_data_sem));
687 ret = check_block_validity(inode, map);
699 !ext4_is_quota_file(inode) &&
700 ext4_should_order_data(inode)) {
702 (loff_t)map->m_lblk << inode->i_blkbits;
703 loff_t length = (loff_t)map->m_len << inode->i_blkbits;
706 ret = ext4_jbd2_inode_add_wait(handle, inode,
709 ret = ext4_jbd2_inode_add_write(handle, inode,
717 ext4_fc_track_range(handle, inode, map->m_lblk,
720 ext_debug(inode, "failed with err %d\n", retval);
751 static int _ext4_get_block(struct inode *inode, sector_t iblock,
757 if (ext4_has_inline_data(inode))
761 map.m_len = bh->b_size >> inode->i_blkbits;
763 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
766 map_bh(bh, inode->i_sb, map.m_pblk);
768 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
772 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
777 int ext4_get_block(struct inode *inode, sector_t iblock,
780 return _ext4_get_block(inode, iblock, bh,
789 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
794 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
795 inode->i_ino, create);
796 ret = _ext4_get_block(inode, iblock, bh_result,
816 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
825 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
831 err = ext4_map_blocks(handle, inode, &map, map_flags);
839 return sb_find_get_block(inode->i_sb, map.m_pblk);
841 bh = sb_getblk(inode->i_sb, map.m_pblk);
846 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
858 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
865 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
870 err = ext4_handle_dirty_metadata(handle, inode, bh);
881 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
887 bh = ext4_getblk(handle, inode, block, map_flags);
902 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
908 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
944 int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
949 int (*fn)(handle_t *handle, struct inode *inode,
968 err = (*fn)(handle, inode, bh);
977 * dirty so that writeback code knows about this page (and inode) contains
987 int do_journal_get_write_access(handle_t *handle, struct inode *inode,
1006 ret = ext4_journal_get_write_access(handle, inode->i_sb, bh,
1019 struct inode *inode = folio->mapping->host;
1023 unsigned blocksize = inode->i_sb->s_blocksize;
1055 err = get_block(inode, block, bh, 1);
1093 } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
1121 struct inode *inode = mapping->host;
1129 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
1132 trace_ext4_write_begin(inode, pos, len);
1137 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1142 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1143 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1168 create_empty_buffers(&folio->page, inode->i_sb->s_blocksize, 0);
1173 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1191 if (ext4_should_dioread_nolock(inode))
1197 if (ext4_should_dioread_nolock(inode))
1203 if (!ret && ext4_should_journal_data(inode)) {
1204 ret = ext4_walk_page_buffers(handle, inode,
1210 bool extended = (pos + len > inode->i_size) &&
1211 !ext4_verity_in_progress(inode);
1219 * Add inode to orphan list in case we crash before
1222 if (extended && ext4_can_truncate(inode))
1223 ext4_orphan_add(handle, inode);
1227 ext4_truncate_failed_write(inode);
1229 * If truncate failed early the inode might
1231 * make sure the inode is removed from the
1234 if (inode->i_nlink)
1235 ext4_orphan_del(NULL, inode);
1239 ext4_should_retry_alloc(inode->i_sb, &retries))
1249 static int write_end_fn(handle_t *handle, struct inode *inode,
1263 * We need to pick up the new inode size which generic_commit_write gave us
1266 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1276 struct inode *inode = mapping->host;
1277 loff_t old_size = inode->i_size;
1280 bool verity = ext4_verity_in_progress(inode);
1282 trace_ext4_write_end(inode, pos, len, copied);
1284 if (ext4_has_inline_data(inode) &&
1285 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
1286 return ext4_write_inline_data_end(inode, pos, len, copied,
1294 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1298 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1303 pagecache_isize_extended(inode, old_size, pos);
1305 * Don't mark the inode dirty under folio lock. First, it unnecessarily
1311 ret = ext4_mark_inode_dirty(handle, inode);
1313 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1316 * inode->i_size. So truncate them
1318 ext4_orphan_add(handle, inode);
1324 if (pos + len > inode->i_size && !verity) {
1325 ext4_truncate_failed_write(inode);
1327 * If truncate failed early the inode might still be
1328 * on the orphan list; we need to make sure the inode
1331 if (inode->i_nlink)
1332 ext4_orphan_del(NULL, inode);
1344 struct inode *inode,
1363 write_end_fn(handle, inode, bh);
1380 struct inode *inode = mapping->host;
1381 loff_t old_size = inode->i_size;
1386 bool verity = ext4_verity_in_progress(inode);
1388 trace_ext4_journalled_write_end(inode, pos, len, copied);
1394 if (ext4_has_inline_data(inode))
1395 return ext4_write_inline_data_end(inode, pos, len, copied,
1400 ext4_journalled_zero_new_buffers(handle, inode, folio,
1404 ext4_journalled_zero_new_buffers(handle, inode, folio,
1406 ret = ext4_walk_page_buffers(handle, inode,
1414 size_changed = ext4_update_inode_size(inode, pos + copied);
1415 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1420 pagecache_isize_extended(inode, old_size, pos);
1423 ret2 = ext4_mark_inode_dirty(handle, inode);
1428 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1431 * inode->i_size. So truncate them
1433 ext4_orphan_add(handle, inode);
1438 if (pos + len > inode->i_size && !verity) {
1439 ext4_truncate_failed_write(inode);
1441 * If truncate failed early the inode might still be
1442 * on the orphan list; we need to make sure the inode
1445 if (inode->i_nlink)
1446 ext4_orphan_del(NULL, inode);
1455 static int ext4_da_reserve_space(struct inode *inode)
1457 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1458 struct ext4_inode_info *ei = EXT4_I(inode);
1466 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1473 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1477 trace_ext4_da_reserve_space(inode);
1483 void ext4_da_release_space(struct inode *inode, int to_free)
1485 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1486 struct ext4_inode_info *ei = EXT4_I(inode);
1491 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1493 trace_ext4_da_release_space(inode, to_free);
1501 ext4_warning(inode->i_sb, "ext4_da_release_space: "
1503 "data blocks", inode->i_ino, to_free,
1513 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1515 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1524 struct inode *inode;
1550 struct inode *inode = mpd->inode;
1551 struct address_space *mapping = inode->i_mapping;
1562 start = index << (PAGE_SHIFT - inode->i_blkbits);
1563 last = end << (PAGE_SHIFT - inode->i_blkbits);
1569 down_write(&EXT4_I(inode)->i_data_sem);
1570 ext4_es_remove_extent(inode, start, last - start + 1);
1571 up_write(&EXT4_I(inode)->i_data_sem);
1601 static void ext4_print_free_blocks(struct inode *inode)
1603 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1604 struct super_block *sb = inode->i_sb;
1605 struct ext4_inode_info *ei = EXT4_I(inode);
1608 EXT4_C2B(EXT4_SB(inode->i_sb),
1629 * @inode - file containing the newly added block
1634 static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
1636 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1652 ret = ext4_da_reserve_space(inode);
1656 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1657 if (!ext4_es_scan_clu(inode,
1659 ret = ext4_clu_mapped(inode,
1664 ret = ext4_da_reserve_space(inode);
1676 ext4_es_insert_delayed_block(inode, lblk, allocated);
1686 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1699 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1703 ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
1707 if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
1710 down_read(&EXT4_I(inode)->i_data_sem);
1719 map_bh(bh, inode->i_sb, invalid_block);
1738 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1747 down_read(&EXT4_I(inode)->i_data_sem);
1748 if (ext4_has_inline_data(inode))
1750 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1751 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1753 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1764 ret = ext4_insert_delayed_block(inode, map->m_lblk);
1770 map_bh(bh, inode->i_sb, invalid_block);
1777 ext4_warning(inode->i_sb,
1778 "ES len assertion failed for inode "
1780 inode->i_ino, retval, map->m_len);
1786 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1791 up_read((&EXT4_I(inode)->i_data_sem));
1808 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1815 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1825 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1829 map_bh(bh, inode->i_sb, map.m_pblk);
1872 size = i_size_read(mpd->inode);
1875 !ext4_verity_in_progress(mpd->inode))
1966 struct inode *inode = mpd->inode;
1968 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
1969 >> inode->i_blkbits;
1971 if (ext4_verity_in_progress(inode))
2027 int blkbits = mpd->inode->i_blkbits;
2092 struct inode *inode = mpd->inode;
2093 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2107 nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch);
2141 struct inode *inode = mpd->inode;
2146 trace_ext4_da_write_pages_extent(inode, map);
2165 dioread_nolock = ext4_should_dioread_nolock(inode);
2171 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2180 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2211 struct inode *inode = mpd->inode;
2222 io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
2226 struct super_block *sb = inode->i_sb;
2243 "inode %lu at logical offset %llu with"
2245 inode->i_ino,
2252 ext4_print_free_blocks(inode);
2273 if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
2277 down_write(&EXT4_I(inode)->i_data_sem);
2278 i_size = i_size_read(inode);
2281 if (disksize > EXT4_I(inode)->i_disksize)
2282 EXT4_I(inode)->i_disksize = disksize;
2283 up_write(&EXT4_I(inode)->i_data_sem);
2284 err2 = ext4_mark_inode_dirty(handle, inode);
2286 ext4_error_err(inode->i_sb, -err2,
2287 "Failed to mark inode %lu dirty",
2288 inode->i_ino);
2303 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2305 int bpp = ext4_journal_blocks_per_page(inode);
2307 return ext4_meta_trans_blocks(inode,
2315 struct inode *inode = folio->mapping->host;
2318 ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2320 err = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2324 err = ext4_jbd2_inode_add_write(handle, inode, folio_pos(folio), len);
2327 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
2336 struct inode *inode = mpd->inode;
2337 loff_t size = i_size_read(inode);
2344 !ext4_verity_in_progress(inode))
2372 struct address_space *mapping = mpd->inode->i_mapping;
2379 int blkbits = mpd->inode->i_blkbits;
2383 int bpp = ext4_journal_blocks_per_page(mpd->inode);
2392 if (ext4_should_journal_data(mpd->inode)) {
2393 handle = ext4_journal_start(mpd->inode, EXT4_HT_WRITE_PAGE,
2435 * longer corresponds to inode we are writing (which
2461 ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
2527 struct inode *inode = mpd->inode;
2528 struct address_space *mapping = inode->i_mapping;
2534 trace_ext4_writepages(inode, wbc);
2538 * a transaction for special inodes like journal inode on last iput()
2564 if (ext4_has_inline_data(inode)) {
2565 /* Just inode will be modified... */
2566 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2571 BUG_ON(ext4_test_inode_state(inode,
2573 ext4_destroy_inline_data(handle, inode);
2589 if (ext4_should_journal_data(inode)) {
2593 EXT4_I(inode)->i_datasync_tid);
2597 if (ext4_should_dioread_nolock(inode)) {
2600 * the page and we may dirty the inode.
2602 rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2603 PAGE_SIZE >> inode->i_blkbits);
2635 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2652 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2666 BUG_ON(ext4_should_journal_data(inode));
2667 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2670 handle = ext4_journal_start_with_reserve(inode,
2674 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2676 wbc->nr_to_write, inode->i_ino, ret);
2684 trace_ext4_da_write_pages(inode, mpd->first_page, wbc);
2755 trace_ext4_writepages_result(inode, wbc, ret,
2765 .inode = mapping->host,
2798 .inode = jinode->i_vfs_inode,
2810 struct inode *inode = mapping->host;
2813 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2816 alloc_ctx = ext4_writepages_down_read(inode->i_sb);
2817 trace_ext4_writepages(inode, wbc);
2820 EXT4_SB(inode->i_sb)->s_daxdev, wbc);
2821 trace_ext4_writepages_result(inode, wbc, ret,
2823 ext4_writepages_up_read(inode->i_sb, alloc_ctx);
2868 struct inode *inode = mapping->host;
2870 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2875 if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
2881 trace_ext4_da_write_begin(inode, pos, len);
2883 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2884 ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
2912 * i_size_read because we hold inode lock.
2914 if (pos + len > inode->i_size)
2915 ext4_truncate_failed_write(inode);
2918 ext4_should_retry_alloc(inode->i_sb, &retries))
2935 struct inode *inode = folio->mapping->host;
2940 idx = offset >> inode->i_blkbits;
2954 struct inode *inode = mapping->host;
2955 loff_t old_size = inode->i_size;
2960 * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES
2971 * Since we are holding inode lock, we are sure i_disksize <=
2981 if (new_i_size > inode->i_size) {
2984 i_size_write(inode, new_i_size);
2987 ext4_update_i_disksize(inode, new_i_size);
2996 pagecache_isize_extended(inode, old_size, pos);
3001 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3004 ext4_mark_inode_dirty(handle, inode);
3016 struct inode *inode = mapping->host;
3024 trace_ext4_da_write_end(inode, pos, len, copied);
3027 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3028 ext4_has_inline_data(inode))
3029 return ext4_write_inline_data_end(inode, pos, len, copied,
3039 * Force all delayed allocation blocks to be allocated for a given inode.
3041 int ext4_alloc_da_blocks(struct inode *inode)
3043 trace_ext4_alloc_da_blocks(inode);
3045 if (!EXT4_I(inode)->i_reserved_data_blocks)
3079 return filemap_flush(inode->i_mapping);
3098 struct inode *inode = mapping->host;
3101 inode_lock_shared(inode);
3105 if (ext4_has_inline_data(inode))
3109 (test_opt(inode->i_sb, DELALLOC) ||
3110 ext4_should_journal_data(inode))) {
3122 inode_unlock_shared(inode);
3129 struct inode *inode = folio->mapping->host;
3131 trace_ext4_read_folio(inode, folio);
3133 if (ext4_has_inline_data(inode))
3134 ret = ext4_readpage_inline(inode, folio);
3137 return ext4_mpage_readpages(inode, NULL, folio);
3144 struct inode *inode = rac->mapping->host;
3147 if (ext4_has_inline_data(inode))
3150 ext4_mpage_readpages(inode, rac, NULL);
3190 struct inode *inode = folio->mapping->host;
3191 journal_t *journal = EXT4_JOURNAL(inode);
3193 trace_ext4_release_folio(inode, folio);
3204 static bool ext4_inode_datasync_dirty(struct inode *inode)
3206 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3210 EXT4_I(inode)->i_datasync_tid))
3212 if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
3213 return !list_empty(&EXT4_I(inode)->i_fc_list);
3218 if (!list_empty(&inode->i_mapping->private_list))
3220 return inode->i_state & I_DIRTY_DATASYNC;
3223 static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
3227 u8 blkbits = inode->i_blkbits;
3235 if (ext4_inode_datasync_dirty(inode) ||
3236 offset + length > i_size_read(inode))
3243 iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
3245 iomap->bdev = inode->i_sb->s_bdev;
3250 !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3266 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3271 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3278 static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3282 u8 blkbits = inode->i_blkbits;
3291 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3300 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
3317 else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
3319 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3322 ret = ext4_map_blocks(handle, inode, map, m_flags);
3333 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3340 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3345 u8 blkbits = inode->i_blkbits;
3350 if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3367 if (offset + length <= i_size_read(inode)) {
3368 ret = ext4_map_blocks(NULL, inode, &map, 0);
3372 ret = ext4_iomap_alloc(inode, &map, flags);
3374 ret = ext4_map_blocks(NULL, inode, &map, 0);
3385 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
3387 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3392 static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
3403 ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
3408 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3434 static bool ext4_iomap_is_delalloc(struct inode *inode,
3440 ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
3457 static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
3464 u8 blkbits = inode->i_blkbits;
3469 if (ext4_has_inline_data(inode)) {
3470 ret = ext4_inline_data_iomap(inode, iomap);
3491 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3492 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3500 ret = ext4_map_blocks(NULL, inode, &map, 0);
3504 delalloc = ext4_iomap_is_delalloc(inode, &map);
3507 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3615 void ext4_set_aops(struct inode *inode)
3617 switch (ext4_inode_journal_mode(inode)) {
3622 inode->i_mapping->a_ops = &ext4_journalled_aops;
3627 if (IS_DAX(inode))
3628 inode->i_mapping->a_ops = &ext4_dax_aops;
3629 else if (test_opt(inode->i_sb, DELALLOC))
3630 inode->i_mapping->a_ops = &ext4_da_aops;
3632 inode->i_mapping->a_ops = &ext4_aops;
3642 struct inode *inode = mapping->host;
3653 blocksize = inode->i_sb->s_blocksize;
3655 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3676 ext4_get_block(inode, iblock, bh, 0);
3692 if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
3694 BUG_ON(!fscrypt_has_encryption_key(inode));
3704 if (ext4_should_journal_data(inode)) {
3706 err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
3714 if (ext4_should_journal_data(inode)) {
3719 if (ext4_should_order_data(inode))
3720 err = ext4_jbd2_inode_add_write(handle, inode, from,
3740 struct inode *inode = mapping->host;
3742 unsigned blocksize = inode->i_sb->s_blocksize;
3752 if (IS_DAX(inode)) {
3753 return dax_zero_range(inode, from, length, NULL,
3771 struct inode *inode = mapping->host;
3773 /* If we are processing an encrypted inode during orphan list handling */
3774 if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
3777 blocksize = inode->i_sb->s_blocksize;
3783 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3786 struct super_block *sb = inode->i_sb;
3787 struct address_space *mapping = inode->i_mapping;
3821 int ext4_can_truncate(struct inode *inode)
3823 if (S_ISREG(inode->i_mode))
3825 if (S_ISDIR(inode->i_mode))
3827 if (S_ISLNK(inode->i_mode))
3828 return !ext4_inode_is_fast_symlink(inode);
3838 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3844 loff_t size = i_size_read(inode);
3846 WARN_ON(!inode_is_locked(inode));
3850 if (EXT4_I(inode)->i_disksize >= size)
3853 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
3856 ext4_update_i_disksize(inode, size);
3857 ret = ext4_mark_inode_dirty(handle, inode);
3863 static void ext4_wait_dax_page(struct inode *inode)
3865 filemap_invalidate_unlock(inode->i_mapping);
3867 filemap_invalidate_lock(inode->i_mapping);
3870 int ext4_break_layouts(struct inode *inode)
3875 if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
3879 page = dax_layout_busy_page(inode->i_mapping);
3886 ext4_wait_dax_page(inode));
3896 * @inode: File inode
3905 struct inode *inode = file_inode(file);
3906 struct super_block *sb = inode->i_sb;
3908 struct address_space *mapping = inode->i_mapping;
3910 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3915 trace_ext4_punch_hole(inode, offset, length, 0);
3928 inode_lock(inode);
3931 if (offset >= inode->i_size)
3938 if (offset + length > inode->i_size) {
3939 length = inode->i_size +
3940 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
3948 max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
3955 * Attach jinode to inode for jbd2 if we do any zeroing of
3958 ret = ext4_inode_attach_jinode(inode);
3965 inode_dio_wait(inode);
3977 ret = ext4_break_layouts(inode);
3986 ret = ext4_update_disksize_before_punch(inode, offset, length);
3989 truncate_pagecache_range(inode, first_block_offset,
3993 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3994 credits = ext4_writepage_trans_blocks(inode);
3996 credits = ext4_blocks_for_truncate(inode);
3997 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4004 ret = ext4_zero_partial_blocks(handle, inode, offset,
4016 down_write(&EXT4_I(inode)->i_data_sem);
4017 ext4_discard_preallocations(inode, 0);
4019 ext4_es_remove_extent(inode, first_block,
4022 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4023 ret = ext4_ext_remove_space(inode, first_block,
4026 ret = ext4_ind_remove_space(handle, inode, first_block,
4029 up_write(&EXT4_I(inode)->i_data_sem);
4031 ext4_fc_track_range(handle, inode, first_block, stop_block);
4032 if (IS_SYNC(inode))
4035 inode->i_mtime = inode_set_ctime_current(inode);
4036 ret2 = ext4_mark_inode_dirty(handle, inode);
4040 ext4_update_inode_fsync_trans(handle, inode, 1);
4046 inode_unlock(inode);
4050 int ext4_inode_attach_jinode(struct inode *inode)
4052 struct ext4_inode_info *ei = EXT4_I(inode);
4055 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4059 spin_lock(&inode->i_lock);
4062 spin_unlock(&inode->i_lock);
4066 jbd2_journal_init_jbd_inode(ei->jinode, inode);
4069 spin_unlock(&inode->i_lock);
4080 * simultaneously on behalf of the same inode.
4093 * truncate against the orphan inode list.
4095 * The committed inode has the new, desired i_size (which is the same as
4097 * that this inode's truncate did not complete and it will again call
4100 * that's fine - as long as they are linked from the inode, the post-crash
4103 int ext4_truncate(struct inode *inode)
4105 struct ext4_inode_info *ei = EXT4_I(inode);
4109 struct address_space *mapping = inode->i_mapping;
4112 * There is a possibility that we're either freeing the inode
4113 * or it's a completely new inode. In those cases we might not
4116 if (!(inode->i_state & (I_NEW|I_FREEING)))
4117 WARN_ON(!inode_is_locked(inode));
4118 trace_ext4_truncate_enter(inode);
4120 if (!ext4_can_truncate(inode))
4123 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4124 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4126 if (ext4_has_inline_data(inode)) {
4129 err = ext4_inline_data_truncate(inode, &has_inline);
4135 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4136 err = ext4_inode_attach_jinode(inode);
4141 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4142 credits = ext4_writepage_trans_blocks(inode);
4144 credits = ext4_blocks_for_truncate(inode);
4146 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4152 if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4153 ext4_block_truncate_page(handle, mapping, inode->i_size);
4156 * We add the inode to the orphan list, so that if this
4159 * marks the inode dirty, to catch the new size.
4164 err = ext4_orphan_add(handle, inode);
4168 down_write(&EXT4_I(inode)->i_data_sem);
4170 ext4_discard_preallocations(inode, 0);
4172 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4173 err = ext4_ext_truncate(handle, inode);
4175 ext4_ind_truncate(handle, inode);
4181 if (IS_SYNC(inode))
4192 if (inode->i_nlink)
4193 ext4_orphan_del(handle, inode);
4195 inode->i_mtime = inode_set_ctime_current(inode);
4196 err2 = ext4_mark_inode_dirty(handle, inode);
4202 trace_ext4_truncate_exit(inode);
4206 static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4208 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4209 return inode_peek_iversion_raw(inode);
4211 return inode_peek_iversion(inode);
4217 struct inode *inode = &(ei->vfs_inode);
4218 u64 i_blocks = READ_ONCE(inode->i_blocks);
4219 struct super_block *sb = inode->i_sb;
4228 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4247 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4249 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4251 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4258 static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode)
4260 struct ext4_inode_info *ei = EXT4_I(inode);
4269 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4270 i_uid = i_uid_read(inode);
4271 i_gid = i_gid_read(inode);
4273 if (!(test_opt(inode->i_sb, NO_UID32))) {
4296 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4298 EXT4_INODE_SET_CTIME(inode, raw_inode);
4299 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4300 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4305 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
4311 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4312 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4313 if (old_valid_dev(inode->i_rdev)) {
4315 cpu_to_le32(old_encode_dev(inode->i_rdev));
4320 cpu_to_le32(new_encode_dev(inode->i_rdev));
4323 } else if (!ext4_has_inline_data(inode)) {
4328 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4329 u64 ivers = ext4_inode_peek_iversion(inode);
4342 !ext4_has_feature_project(inode->i_sb))
4345 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4349 ext4_inode_csum_set(inode, raw_inode, ei);
4354 * ext4_get_inode_loc returns with an extra refcount against the inode's
4355 * underlying buffer_head on success. If we pass 'inode' and it does not
4356 * have in-inode xattr, we have all inode data in memory that is needed
4357 * to recreate the on-disk version of this inode.
4360 struct inode *inode, struct ext4_iloc *iloc,
4380 * Figure out the offset within the block group inode table
4390 ext4_error(sb, "Invalid inode table block %llu in "
4410 * If we have all information of the inode in memory and this
4411 * is the only valid inode in the block, we need not read the
4414 if (inode && !ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4420 /* Is the inode bitmap in cache? */
4426 * If the inode bitmap isn't in cache then the
4447 if (!ext4_test_inode_state(inode, EXT4_STATE_NEW))
4448 ext4_fill_raw_inode(inode, raw_inode);
4458 * blocks from the inode table.
4483 * There are other valid inodes in the buffer, this inode
4484 * has in-inode xattrs, or we don't have this inode in memory.
4503 static int __ext4_get_inode_loc_noinmem(struct inode *inode,
4509 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc,
4513 ext4_error_inode_block(inode, err_blk, EIO,
4519 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4524 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc,
4528 ext4_error_inode_block(inode, err_blk, EIO,
4541 static bool ext4_should_enable_dax(struct inode *inode)
4543 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4545 if (test_opt2(inode->i_sb, DAX_NEVER))
4547 if (!S_ISREG(inode->i_mode))
4549 if (ext4_should_journal_data(inode))
4551 if (ext4_has_inline_data(inode))
4553 if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
4555 if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4559 if (test_opt(inode->i_sb, DAX_ALWAYS))
4562 return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
4565 void ext4_set_inode_flags(struct inode *inode, bool init)
4567 unsigned int flags = EXT4_I(inode)->i_flags;
4570 WARN_ON_ONCE(IS_DAX(inode) && init);
4585 new_fl |= (inode->i_flags & S_DAX);
4586 if (init && ext4_should_enable_dax(inode))
4595 inode_set_flags(inode, new_fl,
4604 struct inode *inode = &(ei->vfs_inode);
4605 struct super_block *sb = inode->i_sb;
4611 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4613 return i_blocks << (inode->i_blkbits - 9);
4622 static inline int ext4_iget_extra_inode(struct inode *inode,
4629 if (EXT4_INODE_HAS_XATTR_SPACE(inode) &&
4633 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4634 err = ext4_find_inline_data_nolock(inode);
4635 if (!err && ext4_has_inline_data(inode))
4636 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4639 EXT4_I(inode)->i_inline_off = 0;
4643 int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4645 if (!ext4_has_feature_project(inode->i_sb))
4647 *projid = EXT4_I(inode)->i_projid;
4653 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4656 static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4658 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4659 inode_set_iversion_raw(inode, val);
4661 inode_set_iversion_queried(inode, val);
4664 static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
4668 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4670 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4671 EXT4_I(inode)->i_file_acl)
4674 if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4677 if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
4678 return "unexpected bad inode w/o EXT4_IGET_BAD";
4682 struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4690 struct inode *inode;
4711 "inode #%lu: comm %s: iget: illegal inode #",
4716 inode = iget_locked(sb, ino);
4717 if (!inode)
4719 if (!(inode->i_state & I_NEW)) {
4720 if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4721 ext4_error_inode(inode, function, line, 0, err_str);
4722 iput(inode);
4725 return inode;
4728 ei = EXT4_I(inode);
4731 ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
4742 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4745 EXT4_INODE_SIZE(inode->i_sb) ||
4747 ext4_error_inode(inode, function, line, 0,
4749 "(inode size %u)",
4751 EXT4_INODE_SIZE(inode->i_sb));
4758 /* Precompute checksum seed for inode metadata */
4760 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4762 __le32 inum = cpu_to_le32(inode->i_ino);
4770 if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
4773 ext4_error_inode_err(inode, function, line, 0,
4779 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4789 if (!(test_opt(inode->i_sb, NO_UID32))) {
4793 i_uid_write(inode, i_uid);
4794 i_gid_write(inode, i_gid);
4796 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4802 /* We now have enough fields to check if the inode was active or not.
4807 if (inode->i_nlink == 0) {
4808 if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL ||
4809 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4811 /* this inode is deleted or unallocated */
4813 ext4_error_inode(inode, function, line, 0,
4814 "iget: special inode unallocated");
4828 ext4_set_inode_flags(inode, true);
4829 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4834 inode->i_size = ext4_isize(sb, raw_inode);
4835 if ((size = i_size_read(inode)) < 0) {
4836 ext4_error_inode(inode, function, line, 0,
4847 ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
4848 ext4_error_inode(inode, function, line, 0,
4853 ei->i_disksize = inode->i_size;
4857 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4861 * NOTE! The in-memory inode i_data array is in little-endian order
4872 * as we cannot be sure that the inode or some of its metadata isn't
4873 * part of the transaction - the inode could have been reclaimed and
4894 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4901 ret = ext4_iget_extra_inode(inode, raw_inode, ei);
4907 EXT4_INODE_GET_CTIME(inode, raw_inode);
4908 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4909 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4912 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4915 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4920 ext4_inode_set_iversion_queried(inode, ivers);
4925 !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
4926 ext4_error_inode(inode, function, line, 0,
4931 } else if (!ext4_has_inline_data(inode)) {
4932 /* validate the block references in the inode */
4934 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4935 (S_ISLNK(inode->i_mode) &&
4936 !ext4_inode_is_fast_symlink(inode)))) {
4937 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4938 ret = ext4_ext_check_inode(inode);
4940 ret = ext4_ind_check_inode(inode);
4946 if (S_ISREG(inode->i_mode)) {
4947 inode->i_op = &ext4_file_inode_operations;
4948 inode->i_fop = &ext4_file_operations;
4949 ext4_set_aops(inode);
4950 } else if (S_ISDIR(inode->i_mode)) {
4951 inode->i_op = &ext4_dir_inode_operations;
4952 inode->i_fop = &ext4_dir_operations;
4953 } else if (S_ISLNK(inode->i_mode)) {
4955 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4956 ext4_error_inode(inode, function, line, 0,
4962 if (IS_ENCRYPTED(inode)) {
4963 inode->i_op = &ext4_encrypted_symlink_inode_operations;
4964 } else if (ext4_inode_is_fast_symlink(inode)) {
4965 inode->i_link = (char *)ei->i_data;
4966 inode->i_op = &ext4_fast_symlink_inode_operations;
4967 nd_terminate_link(ei->i_data, inode->i_size,
4970 inode->i_op = &ext4_symlink_inode_operations;
4972 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4973 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4974 inode->i_op = &ext4_special_inode_operations;
4976 init_special_inode(inode, inode->i_mode,
4979 init_special_inode(inode, inode->i_mode,
4982 make_bad_inode(inode);
4985 ext4_error_inode(inode, function, line, 0,
4986 "iget: bogus i_mode (%o)", inode->i_mode);
4989 if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) {
4990 ext4_error_inode(inode, function, line, 0,
4995 if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4996 ext4_error_inode(inode, function, line, 0, err_str);
5002 unlock_new_inode(inode);
5003 return inode;
5007 iget_failed(inode);
5016 struct inode *inode;
5018 inode = find_inode_by_ino_rcu(sb, ino);
5019 if (!inode)
5022 if (!inode_is_dirtytime_only(inode))
5025 spin_lock(&inode->i_lock);
5026 if (inode_is_dirtytime_only(inode)) {
5027 struct ext4_inode_info *ei = EXT4_I(inode);
5029 inode->i_state &= ~I_DIRTY_TIME;
5030 spin_unlock(&inode->i_lock);
5033 EXT4_INODE_SET_CTIME(inode, raw_inode);
5034 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5035 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5036 ext4_inode_csum_set(inode, raw_inode, ei);
5038 trace_ext4_other_inode_update_time(inode, orig_ino);
5041 spin_unlock(&inode->i_lock);
5046 * the same inode table block.
5056 * Calculate the first inode in the inode table block. Inode
5057 * numbers are one-based. That is, the first inode in a block
5072 * Post the struct inode info into an on-disk inode location in the
5074 * buffer_head in the inode location struct.
5079 struct inode *inode,
5083 struct ext4_inode_info *ei = EXT4_I(inode);
5085 struct super_block *sb = inode->i_sb;
5092 * For fields not tracked in the in-memory inode, initialise them
5095 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5096 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5098 if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode))
5106 err = ext4_fill_raw_inode(inode, raw_inode);
5109 EXT4_ERROR_INODE(inode, "corrupted inode contents");
5113 if (inode->i_sb->s_flags & SB_LAZYTIME)
5114 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5121 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5137 ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5139 ext4_std_error(inode->i_sb, err);
5161 * because the inode has been copied into a raw inode buffer in
5165 * Note that we are absolutely dependent upon all inode dirtiers doing the
5171 * mark_inode_dirty(inode)
5173 * inode->i_size = expr;
5176 * and the new i_size will be lost. Plus the inode will no longer be on the
5177 * superblock's dirty inode list.
5179 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5186 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5189 if (EXT4_SB(inode->i_sb)->s_journal) {
5204 err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
5205 EXT4_I(inode)->i_sync_tid);
5209 err = __ext4_get_inode_loc_noinmem(inode, &iloc);
5214 * it here separately for each inode.
5219 ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
5220 "IO error syncing inode");
5233 static void ext4_wait_for_tail_page_commit(struct inode *inode)
5236 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5240 offset = inode->i_size & (PAGE_SIZE - 1);
5250 if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
5253 struct folio *folio = filemap_lock_folio(inode->i_mapping,
5254 inode->i_size >> PAGE_SHIFT);
5280 * shrinks i_size, we put the inode on the orphan list and modify
5284 * disk. (On recovery, the inode will get truncated and the blocks will
5289 * and inode is still attached to the committing transaction, we must
5295 * Called with inode->i_rwsem down.
5300 struct inode *inode = d_inode(dentry);
5306 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5309 if (unlikely(IS_IMMUTABLE(inode)))
5312 if (unlikely(IS_APPEND(inode) &&
5329 if (is_quota_modification(idmap, inode, attr)) {
5330 error = dquot_initialize(inode);
5335 if (i_uid_needs_update(idmap, attr, inode) ||
5336 i_gid_needs_update(idmap, attr, inode)) {
5339 /* (user+group)*(old+new) structure, inode write (sb,
5340 * inode block, ? - but truncate inode update has it) */
5341 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5342 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5343 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5350 * counts xattr inode references.
5352 down_read(&EXT4_I(inode)->xattr_sem);
5353 error = dquot_transfer(idmap, inode, attr);
5354 up_read(&EXT4_I(inode)->xattr_sem);
5360 /* Update corresponding info in inode so that everything is in
5362 i_uid_update(idmap, attr, inode);
5363 i_gid_update(idmap, attr, inode);
5364 error = ext4_mark_inode_dirty(handle, inode);
5373 loff_t oldsize = inode->i_size;
5375 int shrink = (attr->ia_size < inode->i_size);
5377 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5378 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5384 if (!S_ISREG(inode->i_mode)) {
5388 if (attr->ia_size == inode->i_size)
5392 if (ext4_should_order_data(inode)) {
5393 error = ext4_begin_ordered_truncate(inode,
5399 * Blocks are going to be removed from the inode. Wait
5402 inode_dio_wait(inode);
5405 filemap_invalidate_lock(inode->i_mapping);
5407 rc = ext4_break_layouts(inode);
5409 filemap_invalidate_unlock(inode->i_mapping);
5413 if (attr->ia_size != inode->i_size) {
5414 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5420 error = ext4_orphan_add(handle, inode);
5428 inode->i_mtime = inode_set_ctime_current(inode);
5431 ext4_fc_track_range(handle, inode,
5433 inode->i_sb->s_blocksize_bits,
5437 handle, inode,
5439 inode->i_sb->s_blocksize_bits,
5441 inode->i_sb->s_blocksize_bits);
5443 down_write(&EXT4_I(inode)->i_data_sem);
5444 old_disksize = EXT4_I(inode)->i_disksize;
5445 EXT4_I(inode)->i_disksize = attr->ia_size;
5446 rc = ext4_mark_inode_dirty(handle, inode);
5455 i_size_write(inode, attr->ia_size);
5457 EXT4_I(inode)->i_disksize = old_disksize;
5458 up_write(&EXT4_I(inode)->i_data_sem);
5463 pagecache_isize_extended(inode, oldsize,
5464 inode->i_size);
5465 } else if (ext4_should_journal_data(inode)) {
5466 ext4_wait_for_tail_page_commit(inode);
5474 truncate_pagecache(inode, inode->i_size);
5480 rc = ext4_truncate(inode);
5485 filemap_invalidate_unlock(inode->i_mapping);
5490 inode_inc_iversion(inode);
5491 setattr_copy(idmap, inode, attr);
5492 mark_inode_dirty(inode);
5499 if (orphan && inode->i_nlink)
5500 ext4_orphan_del(NULL, inode);
5503 rc = posix_acl_chmod(idmap, dentry, inode->i_mode);
5507 ext4_std_error(inode->i_sb, error);
5513 u32 ext4_dio_alignment(struct inode *inode)
5515 if (fsverity_active(inode))
5517 if (ext4_should_journal_data(inode))
5519 if (ext4_has_inline_data(inode))
5521 if (IS_ENCRYPTED(inode)) {
5522 if (!fscrypt_dio_supported(inode))
5524 return i_blocksize(inode);
5532 struct inode *inode = d_inode(path->dentry);
5534 struct ext4_inode_info *ei = EXT4_I(inode);
5549 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
5550 u32 dio_align = ext4_dio_alignment(inode);
5554 struct block_device *bdev = inode->i_sb->s_bdev;
5586 generic_fillattr(idmap, request_mask, inode, stat);
5594 struct inode *inode = d_inode(path->dentry);
5600 * If there is inline data in the inode, the inode will normally not
5605 if (unlikely(ext4_has_inline_data(inode)))
5618 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5619 EXT4_I(inode)->i_reserved_data_blocks);
5620 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
5624 static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5627 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5628 return ext4_ind_trans_blocks(inode, lblocks);
5629 return ext4_ext_index_trans_blocks(inode, pextents);
5641 * Also account for superblock, inode, quota and xattr blocks
5643 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5646 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5655 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5667 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5668 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5673 /* Blocks for super block, inode, quota and xattr blocks */
5674 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5689 int ext4_writepage_trans_blocks(struct inode *inode)
5691 int bpp = ext4_journal_blocks_per_page(inode);
5694 ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5697 if (ext4_should_journal_data(inode))
5711 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5713 return ext4_meta_trans_blocks(inode, nrblocks, 1);
5721 struct inode *inode, struct ext4_iloc *iloc)
5725 if (unlikely(ext4_forced_shutdown(inode->i_sb))) {
5729 ext4_fc_track_inode(handle, inode);
5735 err = ext4_do_update_inode(handle, inode, iloc);
5746 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5751 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5754 err = ext4_get_inode_loc(inode, iloc);
5757 err = ext4_journal_get_write_access(handle, inode->i_sb,
5764 ext4_std_error(inode->i_sb, err);
5768 static int __ext4_expand_extra_isize(struct inode *inode,
5775 unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
5776 struct ext4_inode_info *ei = EXT4_I(inode);
5782 EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
5784 EXT4_INODE_SIZE(inode->i_sb));
5794 header = IHDR(inode, raw_inode);
5797 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5800 EXT4_I(inode)->i_extra_isize, 0,
5801 new_extra_isize - EXT4_I(inode)->i_extra_isize);
5802 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5811 if (dquot_initialize_needed(inode))
5815 error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5828 * Expand an inode by new_extra_isize bytes.
5831 static int ext4_try_to_expand_extra_isize(struct inode *inode,
5839 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5844 * the inode. When journaled, we first need to obtain extra
5847 * only result in a minor loss of functionality for that inode.
5852 EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
5855 if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
5858 error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5860 ext4_write_unlock_xattr(inode, &no_expand);
5865 int ext4_expand_extra_isize(struct inode *inode,
5873 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5878 handle = ext4_journal_start(inode, EXT4_HT_INODE,
5879 EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5886 ext4_write_lock_xattr(inode, &no_expand);
5889 error = ext4_journal_get_write_access(handle, inode->i_sb, iloc->bh,
5896 error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
5899 rc = ext4_mark_iloc_dirty(handle, inode, iloc);
5904 ext4_write_unlock_xattr(inode, &no_expand);
5910 * What we do here is to mark the in-core inode as clean with respect to inode
5912 * This means that the in-core inode may be reaped by prune_icache
5918 * inode out, but prune_icache isn't a user-visible syncing function.
5922 int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
5926 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5930 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5931 err = ext4_reserve_inode_write(handle, inode, &iloc);
5935 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
5936 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
5939 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5942 ext4_error_inode_err(inode, func, line, 0, err,
5952 * to include the updated inode in the current transaction.
5954 * Also, dquot_alloc_block() will always dirty the inode when blocks
5957 * If the inode is marked synchronous, we don't honour that here - doing
5961 void ext4_dirty_inode(struct inode *inode, int flags)
5965 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
5968 ext4_mark_inode_dirty(handle, inode);
5972 int ext4_change_inode_journal_flag(struct inode *inode, int val)
5989 journal = EXT4_JOURNAL(inode);
5996 inode_dio_wait(inode);
5999 * Before flushing the journal and switching inode's aops, we have
6000 * to flush all dirty data the inode has. There can be outstanding
6007 filemap_invalidate_lock(inode->i_mapping);
6008 err = filemap_write_and_wait(inode->i_mapping);
6010 filemap_invalidate_unlock(inode->i_mapping);
6015 alloc_ctx = ext4_writepages_down_write(inode->i_sb);
6023 * the inode's in-core data-journaling state flag now.
6027 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6032 ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6035 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6037 ext4_set_aops(inode);
6040 ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6043 filemap_invalidate_unlock(inode->i_mapping);
6045 /* Finally we can mark the inode as dirty. */
6047 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6051 ext4_fc_mark_ineligible(inode->i_sb,
6053 err = ext4_mark_inode_dirty(handle, inode);
6056 ext4_std_error(inode->i_sb, err);
6061 static int ext4_bh_unmapped(handle_t *handle, struct inode *inode,
6076 struct inode *inode = file_inode(file);
6077 struct address_space *mapping = inode->i_mapping;
6082 if (unlikely(IS_IMMUTABLE(inode)))
6085 sb_start_pagefault(inode->i_sb);
6090 err = ext4_convert_inline_data(inode);
6100 if (ext4_should_journal_data(inode))
6104 if (test_opt(inode->i_sb, DELALLOC) &&
6105 !ext4_nonda_switch(inode->i_sb)) {
6110 ext4_should_retry_alloc(inode->i_sb, &retries));
6115 size = i_size_read(inode);
6131 * inode to the transaction's list to writeprotect pages on commit.
6134 if (!ext4_walk_page_buffers(NULL, inode, folio_buffers(folio),
6145 if (ext4_should_dioread_nolock(inode))
6150 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6151 ext4_writepage_trans_blocks(inode));
6161 if (!ext4_should_journal_data(inode)) {
6165 size = i_size_read(inode);
6186 if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
6192 sb_end_pagefault(inode->i_sb);