Lines Matching defs:inode
3 * linux/fs/ext4/inode.c
12 * linux/fs/minix/inode.c
51 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
54 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
66 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
77 EXT4_INODE_SIZE(inode->i_sb) - offset);
83 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
88 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
90 !ext4_has_metadata_csum(inode->i_sb))
94 calculated = ext4_inode_csum(inode, raw, ei);
95 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
104 void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
109 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
111 !ext4_has_metadata_csum(inode->i_sb))
114 csum = ext4_inode_csum(inode, raw, ei);
116 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
121 static inline int ext4_begin_ordered_truncate(struct inode *inode,
124 trace_ext4_begin_ordered_truncate(inode, new_size);
131 if (!EXT4_I(inode)->jinode)
133 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
134 EXT4_I(inode)->jinode,
142 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
146 * Test whether an inode is a fast symlink.
149 int ext4_inode_is_fast_symlink(struct inode *inode)
151 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
152 int ea_blocks = EXT4_I(inode)->i_file_acl ?
153 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
155 if (ext4_has_inline_data(inode))
158 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
160 return S_ISLNK(inode->i_mode) && inode->i_size &&
161 (inode->i_size < EXT4_N_BLOCKS * 4);
167 void ext4_evict_inode(struct inode *inode)
172 * Credits for final inode cleanup and freeing:
173 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
174 * (xattr block freeing), bitmap, group descriptor (inode freeing)
180 trace_ext4_evict_inode(inode);
182 if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
183 ext4_evict_ea_inode(inode);
184 if (inode->i_nlink) {
188 * ready for reaping the inode might still have some pages to
193 * buffers, we would have no way to find them after the inode
198 * containing inode's data.
203 if (inode->i_ino != EXT4_JOURNAL_INO &&
204 ext4_should_journal_data(inode) &&
205 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
206 inode->i_data.nrpages) {
207 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
208 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
211 filemap_write_and_wait(&inode->i_data);
213 truncate_inode_pages_final(&inode->i_data);
218 if (is_bad_inode(inode))
220 dquot_initialize(inode);
222 if (ext4_should_order_data(inode))
223 ext4_begin_ordered_truncate(inode, 0);
224 truncate_inode_pages_final(&inode->i_data);
228 * dirtied the inode. And for inodes with dioread_nolock, unwritten
230 * the inode. Flush worker is ignoring it because of I_FREEING flag but
231 * we still need to remove the inode from the writeback lists.
233 if (!list_empty_careful(&inode->i_io_list))
234 inode_io_list_del(inode);
243 sb_start_intwrite(inode->i_sb);
247 if (!IS_NOQUOTA(inode))
248 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
251 * Block bitmap, group descriptor, and inode are accounted in both
254 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
255 ext4_blocks_for_truncate(inode) + extra_credits - 3);
257 ext4_std_error(inode->i_sb, PTR_ERR(handle));
263 ext4_orphan_del(NULL, inode);
265 sb_end_intwrite(inode->i_sb);
269 if (IS_SYNC(inode))
273 * Set inode->i_size to 0 before calling ext4_truncate(). We need
279 if (ext4_inode_is_fast_symlink(inode))
280 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
281 inode->i_size = 0;
282 err = ext4_mark_inode_dirty(handle, inode);
284 ext4_warning(inode->i_sb,
285 "couldn't mark inode dirty (err %d)", err);
288 if (inode->i_blocks) {
289 err = ext4_truncate(inode);
291 ext4_error_err(inode->i_sb, -err,
292 "couldn't truncate inode %lu (err %d)",
293 inode->i_ino, err);
299 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
302 ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
305 ext4_orphan_del(NULL, inode);
307 sb_end_intwrite(inode->i_sb);
320 ext4_orphan_del(handle, inode);
321 EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds();
327 * having errors), but we can't free the inode if the mark_dirty
330 if (ext4_mark_inode_dirty(handle, inode))
331 /* If that failed, just do the required in-core inode clear. */
332 ext4_clear_inode(inode);
334 ext4_free_inode(handle, inode);
337 sb_end_intwrite(inode->i_sb);
342 * Check out some where else accidentally dirty the evicting inode,
343 * which may probably cause inode use-after-free issues later.
345 WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list));
347 if (!list_empty(&EXT4_I(inode)->i_fc_list))
348 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
349 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
353 qsize_t *ext4_get_reserved_space(struct inode *inode)
355 return &EXT4_I(inode)->i_reserved_quota;
363 void ext4_da_update_reserve_space(struct inode *inode,
366 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
367 struct ext4_inode_info *ei = EXT4_I(inode);
370 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
372 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
374 __func__, inode->i_ino, used,
380 /* Update per-inode reservations */
384 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
388 dquot_claim_block(inode, EXT4_C2B(sbi, used));
395 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
400 * there aren't any writers on the inode, we can discard the
401 * inode's preallocations.
404 !inode_is_open_for_write(inode))
405 ext4_discard_preallocations(inode, 0);
408 static int __check_block_validity(struct inode *inode, const char *func,
412 if (ext4_has_feature_journal(inode->i_sb) &&
413 (inode->i_ino ==
414 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
416 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
417 ext4_error_inode(inode, func, line, map->m_pblk,
426 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
431 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
432 return fscrypt_zeroout_range(inode, lblk, pblk, len);
434 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
441 #define check_block_validity(inode, map) \
442 __check_block_validity((inode), __func__, __LINE__, (map))
446 struct inode *inode,
461 down_read(&EXT4_I(inode)->i_data_sem);
462 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
463 retval = ext4_ext_map_blocks(handle, inode, map, 0);
465 retval = ext4_ind_map_blocks(handle, inode, map, 0);
467 up_read((&EXT4_I(inode)->i_data_sem));
476 printk("ES cache assertion failed for inode: %lu "
479 inode->i_ino, es_map->m_lblk, es_map->m_len,
509 int ext4_map_blocks(handle_t *handle, struct inode *inode,
522 ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
536 if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
537 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
558 ext4_map_blocks_es_recheck(handle, inode, map,
568 down_read(&EXT4_I(inode)->i_data_sem);
569 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
570 retval = ext4_ext_map_blocks(handle, inode, map, 0);
572 retval = ext4_ind_map_blocks(handle, inode, map, 0);
578 ext4_warning(inode->i_sb,
579 "ES len assertion failed for inode "
581 inode->i_ino, retval, map->m_len);
589 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
592 ret = ext4_es_insert_extent(inode, map->m_lblk,
597 up_read((&EXT4_I(inode)->i_data_sem));
601 ret = check_block_validity(inode, map);
638 down_write(&EXT4_I(inode)->i_data_sem);
642 * could have changed the inode type in between
644 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
645 retval = ext4_ext_map_blocks(handle, inode, map, flags);
647 retval = ext4_ind_map_blocks(handle, inode, map, flags);
655 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
663 ext4_warning(inode->i_sb,
664 "ES len assertion failed for inode "
666 inode->i_ino, retval, map->m_len);
680 ret = ext4_issue_zeroout(inode, map->m_lblk,
693 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
701 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
704 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
713 up_write((&EXT4_I(inode)->i_data_sem));
715 ret = check_block_validity(inode, map);
727 !ext4_is_quota_file(inode) &&
728 ext4_should_order_data(inode)) {
730 (loff_t)map->m_lblk << inode->i_blkbits;
731 loff_t length = (loff_t)map->m_len << inode->i_blkbits;
734 ret = ext4_jbd2_inode_add_wait(handle, inode,
737 ret = ext4_jbd2_inode_add_write(handle, inode,
745 ext4_fc_track_range(handle, inode, map->m_lblk,
748 ext_debug(inode, "failed with err %d\n", retval);
780 static int _ext4_get_block(struct inode *inode, sector_t iblock,
786 if (ext4_has_inline_data(inode))
790 map.m_len = bh->b_size >> inode->i_blkbits;
792 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
795 map_bh(bh, inode->i_sb, map.m_pblk);
797 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
801 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
806 int ext4_get_block(struct inode *inode, sector_t iblock,
809 return _ext4_get_block(inode, iblock, bh,
818 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
821 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
822 inode->i_ino, create);
823 return _ext4_get_block(inode, iblock, bh_result,
833 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
841 J_ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
846 err = ext4_map_blocks(handle, inode, &map, map_flags);
853 bh = sb_getblk(inode->i_sb, map.m_pblk);
858 J_ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
876 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
881 err = ext4_handle_dirty_metadata(handle, inode, bh);
892 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
898 bh = ext4_getblk(handle, inode, block, map_flags);
913 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
919 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
1041 struct inode *inode = page->mapping->host;
1045 unsigned blocksize = inode->i_sb->s_blocksize;
1076 err = get_block(inode, block, bh, 1);
1114 } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
1135 struct inode *inode = mapping->host;
1143 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
1146 trace_ext4_write_begin(inode, pos, len, flags);
1151 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1156 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1157 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1181 create_empty_buffers(page, inode->i_sb->s_blocksize, 0);
1186 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1204 if (ext4_should_dioread_nolock(inode))
1211 if (ext4_should_dioread_nolock(inode))
1217 if (!ret && ext4_should_journal_data(inode)) {
1224 bool extended = (pos + len > inode->i_size) &&
1225 !ext4_verity_in_progress(inode);
1233 * Add inode to orphan list in case we crash before
1236 if (extended && ext4_can_truncate(inode))
1237 ext4_orphan_add(handle, inode);
1241 ext4_truncate_failed_write(inode);
1243 * If truncate failed early the inode might
1245 * make sure the inode is removed from the
1248 if (inode->i_nlink)
1249 ext4_orphan_del(NULL, inode);
1253 ext4_should_retry_alloc(inode->i_sb, &retries))
1276 * We need to pick up the new inode size which generic_commit_write gave us
1279 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1288 struct inode *inode = mapping->host;
1289 loff_t old_size = inode->i_size;
1292 bool verity = ext4_verity_in_progress(inode);
1294 trace_ext4_write_end(inode, pos, len, copied);
1296 if (ext4_has_inline_data(inode) &&
1297 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
1298 return ext4_write_inline_data_end(inode, pos, len, copied, page);
1305 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1309 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1314 pagecache_isize_extended(inode, old_size, pos);
1316 * Don't mark the inode dirty under page lock. First, it unnecessarily
1322 ret = ext4_mark_inode_dirty(handle, inode);
1324 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1327 * inode->i_size. So truncate them
1329 ext4_orphan_add(handle, inode);
1335 if (pos + len > inode->i_size && !verity) {
1336 ext4_truncate_failed_write(inode);
1338 * If truncate failed early the inode might still be
1339 * on the orphan list; we need to make sure the inode
1342 if (inode->i_nlink)
1343 ext4_orphan_del(NULL, inode);
1389 struct inode *inode = mapping->host;
1390 loff_t old_size = inode->i_size;
1395 bool verity = ext4_verity_in_progress(inode);
1397 trace_ext4_journalled_write_end(inode, pos, len, copied);
1403 if (ext4_has_inline_data(inode))
1404 return ext4_write_inline_data_end(inode, pos, len, copied, page);
1420 size_changed = ext4_update_inode_size(inode, pos + copied);
1421 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1422 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1427 pagecache_isize_extended(inode, old_size, pos);
1430 ret2 = ext4_mark_inode_dirty(handle, inode);
1435 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1438 * inode->i_size. So truncate them
1440 ext4_orphan_add(handle, inode);
1445 if (pos + len > inode->i_size && !verity) {
1446 ext4_truncate_failed_write(inode);
1448 * If truncate failed early the inode might still be
1449 * on the orphan list; we need to make sure the inode
1452 if (inode->i_nlink)
1453 ext4_orphan_del(NULL, inode);
1462 static int ext4_da_reserve_space(struct inode *inode)
1464 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1465 struct ext4_inode_info *ei = EXT4_I(inode);
1473 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1480 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1484 trace_ext4_da_reserve_space(inode);
1490 void ext4_da_release_space(struct inode *inode, int to_free)
1492 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1493 struct ext4_inode_info *ei = EXT4_I(inode);
1498 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1500 trace_ext4_da_release_space(inode, to_free);
1508 ext4_warning(inode->i_sb, "ext4_da_release_space: "
1510 "data blocks", inode->i_ino, to_free,
1520 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1522 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1530 struct inode *inode;
1553 struct inode *inode = mpd->inode;
1554 struct address_space *mapping = inode->i_mapping;
1565 start = index << (PAGE_SHIFT - inode->i_blkbits);
1566 last = end << (PAGE_SHIFT - inode->i_blkbits);
1572 down_write(&EXT4_I(inode)->i_data_sem);
1573 ext4_es_remove_extent(inode, start, last - start + 1);
1574 up_write(&EXT4_I(inode)->i_data_sem);
1599 static void ext4_print_free_blocks(struct inode *inode)
1601 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1602 struct super_block *sb = inode->i_sb;
1603 struct ext4_inode_info *ei = EXT4_I(inode);
1606 EXT4_C2B(EXT4_SB(inode->i_sb),
1632 * @inode - file containing the newly added block
1637 static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
1639 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1656 ret = ext4_da_reserve_space(inode);
1661 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1662 if (!ext4_es_scan_clu(inode,
1664 ret = ext4_clu_mapped(inode,
1669 ret = ext4_da_reserve_space(inode);
1682 ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
1684 ext4_da_release_space(inode, 1);
1696 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1709 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1713 ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
1717 if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
1720 down_read(&EXT4_I(inode)->i_data_sem);
1729 map_bh(bh, inode->i_sb, invalid_block);
1748 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1757 down_read(&EXT4_I(inode)->i_data_sem);
1758 if (ext4_has_inline_data(inode))
1760 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1761 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1763 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1774 ret = ext4_insert_delayed_block(inode, map->m_lblk);
1780 map_bh(bh, inode->i_sb, invalid_block);
1788 ext4_warning(inode->i_sb,
1789 "ES len assertion failed for inode "
1791 inode->i_ino, retval, map->m_len);
1797 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1804 up_read((&EXT4_I(inode)->i_data_sem));
1821 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1828 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1838 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1842 map_bh(bh, inode->i_sb, map.m_pblk);
1874 struct inode *inode = mapping->host;
1878 int inline_data = ext4_has_inline_data(inode);
1885 BUG_ON(len > ext4_get_max_inline_size(inode));
1886 inode_bh = ext4_journalled_write_inline_data(inode, len, page);
1906 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
1907 ext4_writepage_trans_blocks(inode));
1925 ret = ext4_mark_inode_dirty(handle, inode);
1935 err = ext4_jbd2_inode_add_write(handle, inode, page_offset(page), len);
1938 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1943 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1971 * need to file the inode to the transaction's list in ordered mode because if
1972 * we are writing back data added by write(), the inode is already there and if
2016 struct inode *inode = page->mapping->host;
2020 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
2021 inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
2033 size = i_size_read(inode);
2035 !ext4_verity_in_progress(inode))
2042 ext4_warning_inode(inode,
2071 (inode->i_sb->s_blocksize == PAGE_SIZE)) {
2085 if (PageChecked(page) && ext4_should_journal_data(inode))
2093 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
2127 size = i_size_read(mpd->inode);
2129 !ext4_verity_in_progress(mpd->inode))
2223 struct inode *inode = mpd->inode;
2225 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
2226 >> inode->i_blkbits;
2228 if (ext4_verity_in_progress(inode))
2282 int blkbits = mpd->inode->i_blkbits;
2349 struct inode *inode = mpd->inode;
2350 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2364 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
2398 struct inode *inode = mpd->inode;
2403 trace_ext4_da_write_pages_extent(inode, map);
2422 dioread_nolock = ext4_should_dioread_nolock(inode);
2428 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2437 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2468 struct inode *inode = mpd->inode;
2479 io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
2483 struct super_block *sb = inode->i_sb;
2501 "inode %lu at logical offset %llu with"
2503 inode->i_ino,
2510 ext4_print_free_blocks(inode);
2531 if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
2535 down_write(&EXT4_I(inode)->i_data_sem);
2536 i_size = i_size_read(inode);
2539 if (disksize > EXT4_I(inode)->i_disksize)
2540 EXT4_I(inode)->i_disksize = disksize;
2541 up_write(&EXT4_I(inode)->i_data_sem);
2542 err2 = ext4_mark_inode_dirty(handle, inode);
2544 ext4_error_err(inode->i_sb, -err2,
2545 "Failed to mark inode %lu dirty",
2546 inode->i_ino);
2561 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2563 int bpp = ext4_journal_blocks_per_page(inode);
2565 return ext4_meta_trans_blocks(inode,
2589 struct address_space *mapping = mpd->inode->i_mapping;
2597 int blkbits = mpd->inode->i_blkbits;
2636 * longer corresponds to inode we are writing (which
2668 ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", page->index);
2706 struct inode *inode = mapping->host;
2712 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2716 trace_ext4_writepages(inode, wbc);
2720 * a transaction for special inodes like journal inode on last iput()
2726 if (ext4_should_journal_data(inode)) {
2742 ext4_test_mount_flag(inode->i_sb, EXT4_MF_FS_ABORTED))) {
2752 if (ext4_has_inline_data(inode)) {
2753 /* Just inode will be modified... */
2754 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2759 BUG_ON(ext4_test_inode_state(inode,
2761 ext4_destroy_inline_data(handle, inode);
2765 if (ext4_should_dioread_nolock(inode)) {
2768 * the page and we may dirty the inode.
2770 rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2771 PAGE_SIZE >> inode->i_blkbits);
2788 mpd.inode = inode;
2804 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2821 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2834 BUG_ON(ext4_should_journal_data(inode));
2835 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2838 handle = ext4_journal_start_with_reserve(inode,
2842 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2844 wbc->nr_to_write, inode->i_ino, ret);
2852 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2923 trace_ext4_writepages_result(inode, wbc, ret,
2934 struct inode *inode = mapping->host;
2937 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2941 trace_ext4_writepages(inode, wbc);
2944 trace_ext4_writepages_result(inode, wbc, ret,
2991 struct inode *inode = mapping->host;
2993 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2998 if (ext4_nonda_switch(inode->i_sb) || S_ISLNK(inode->i_mode) ||
2999 ext4_verity_in_progress(inode)) {
3005 trace_ext4_da_write_begin(inode, pos, len, flags);
3007 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
3008 ret = ext4_da_write_inline_data_begin(mapping, inode,
3037 * i_size_read because we hold inode lock.
3039 if (pos + len > inode->i_size)
3040 ext4_truncate_failed_write(inode);
3043 ext4_should_retry_alloc(inode->i_sb, &retries))
3060 struct inode *inode = page->mapping->host;
3065 idx = offset >> inode->i_blkbits;
3080 struct inode *inode = mapping->host;
3089 trace_ext4_da_write_end(inode, pos, len, copied);
3092 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3093 ext4_has_inline_data(inode))
3094 return ext4_write_inline_data_end(inode, pos, len, copied, page);
3100 * Since we are holding inode lock, we are sure i_disksize <=
3111 * Note that we defer inode dirtying to generic_write_end() /
3115 if (copied && new_i_size > inode->i_size &&
3117 ext4_update_i_disksize(inode, new_i_size);
3123 * Force all delayed allocation blocks to be allocated for a given inode.
3125 int ext4_alloc_da_blocks(struct inode *inode)
3127 trace_ext4_alloc_da_blocks(inode);
3129 if (!EXT4_I(inode)->i_reserved_data_blocks)
3163 return filemap_flush(inode->i_mapping);
3182 struct inode *inode = mapping->host;
3187 inode_lock_shared(inode);
3191 if (ext4_has_inline_data(inode))
3195 test_opt(inode->i_sb, DELALLOC)) {
3204 if (EXT4_JOURNAL(inode) &&
3205 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
3224 ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
3225 journal = EXT4_JOURNAL(inode);
3237 inode_unlock_shared(inode);
3244 struct inode *inode = page->mapping->host;
3248 if (ext4_has_inline_data(inode))
3249 ret = ext4_readpage_inline(inode, page);
3252 return ext4_mpage_readpages(inode, NULL, page);
3259 struct inode *inode = rac->mapping->host;
3262 if (ext4_has_inline_data(inode))
3265 ext4_mpage_readpages(inode, rac, NULL);
3319 static bool ext4_inode_datasync_dirty(struct inode *inode)
3321 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3325 EXT4_I(inode)->i_datasync_tid))
3327 if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
3328 return !list_empty(&EXT4_I(inode)->i_fc_list);
3333 if (!list_empty(&inode->i_mapping->private_list))
3335 return inode->i_state & I_DIRTY_DATASYNC;
3338 static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
3342 u8 blkbits = inode->i_blkbits;
3350 if (ext4_inode_datasync_dirty(inode) ||
3351 offset + length > i_size_read(inode))
3357 iomap->bdev = inode->i_sb->s_bdev;
3358 iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
3363 !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3387 static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3391 u8 blkbits = inode->i_blkbits;
3400 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3409 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
3417 WARN_ON(!IS_DAX(inode) && !(flags & IOMAP_DIRECT));
3418 if (IS_DAX(inode))
3426 else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
3428 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3431 ret = ext4_map_blocks(handle, inode, map, m_flags);
3442 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3449 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3454 u8 blkbits = inode->i_blkbits;
3459 if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3476 if (offset + length <= i_size_read(inode)) {
3477 ret = ext4_map_blocks(NULL, inode, &map, 0);
3481 ret = ext4_iomap_alloc(inode, &map, flags);
3483 ret = ext4_map_blocks(NULL, inode, &map, 0);
3489 ext4_set_iomap(inode, iomap, &map, offset, length);
3494 static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
3505 ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
3510 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3536 static bool ext4_iomap_is_delalloc(struct inode *inode,
3542 ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
3559 static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
3566 u8 blkbits = inode->i_blkbits;
3571 if (ext4_has_inline_data(inode)) {
3572 ret = ext4_inline_data_iomap(inode, iomap);
3593 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3594 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3602 ret = ext4_map_blocks(NULL, inode, &map, 0);
3606 delalloc = ext4_iomap_is_delalloc(inode, &map);
3609 ext4_set_iomap(inode, iomap, &map, offset, length);
3715 void ext4_set_aops(struct inode *inode)
3717 switch (ext4_inode_journal_mode(inode)) {
3722 inode->i_mapping->a_ops = &ext4_journalled_aops;
3727 if (IS_DAX(inode))
3728 inode->i_mapping->a_ops = &ext4_dax_aops;
3729 else if (test_opt(inode->i_sb, DELALLOC))
3730 inode->i_mapping->a_ops = &ext4_da_aops;
3732 inode->i_mapping->a_ops = &ext4_aops;
3742 struct inode *inode = mapping->host;
3752 blocksize = inode->i_sb->s_blocksize;
3754 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3773 ext4_get_block(inode, iblock, bh, 0);
3789 if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
3791 BUG_ON(!fscrypt_has_encryption_key(inode));
3800 if (ext4_should_journal_data(inode)) {
3809 if (ext4_should_journal_data(inode)) {
3810 err = ext4_handle_dirty_metadata(handle, inode, bh);
3814 if (ext4_should_order_data(inode))
3815 err = ext4_jbd2_inode_add_write(handle, inode, from,
3835 struct inode *inode = mapping->host;
3837 unsigned blocksize = inode->i_sb->s_blocksize;
3847 if (IS_DAX(inode)) {
3848 return iomap_zero_range(inode, from, length, NULL,
3866 struct inode *inode = mapping->host;
3868 /* If we are processing an encrypted inode during orphan list handling */
3869 if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
3872 blocksize = inode->i_sb->s_blocksize;
3878 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3881 struct super_block *sb = inode->i_sb;
3882 struct address_space *mapping = inode->i_mapping;
3916 int ext4_can_truncate(struct inode *inode)
3918 if (S_ISREG(inode->i_mode))
3920 if (S_ISDIR(inode->i_mode))
3922 if (S_ISLNK(inode->i_mode))
3923 return !ext4_inode_is_fast_symlink(inode);
3933 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3939 loff_t size = i_size_read(inode);
3941 WARN_ON(!inode_is_locked(inode));
3945 if (EXT4_I(inode)->i_disksize >= size)
3948 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
3951 ext4_update_i_disksize(inode, size);
3952 ret = ext4_mark_inode_dirty(handle, inode);
3965 int ext4_break_layouts(struct inode *inode)
3967 struct ext4_inode_info *ei = EXT4_I(inode);
3975 page = dax_layout_busy_page(inode->i_mapping);
3992 * @inode: File inode
4001 struct inode *inode = file_inode(file);
4002 struct super_block *sb = inode->i_sb;
4004 struct address_space *mapping = inode->i_mapping;
4006 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4011 trace_ext4_punch_hole(inode, offset, length, 0);
4024 inode_lock(inode);
4027 if (offset >= inode->i_size)
4034 if (offset + length > inode->i_size) {
4035 length = inode->i_size +
4036 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
4044 max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
4051 * Attach jinode to inode for jbd2 if we do any zeroing of
4054 ret = ext4_inode_attach_jinode(inode);
4061 inode_dio_wait(inode);
4071 down_write(&EXT4_I(inode)->i_mmap_sem);
4073 ret = ext4_break_layouts(inode);
4082 ret = ext4_update_disksize_before_punch(inode, offset, length);
4085 truncate_pagecache_range(inode, first_block_offset,
4089 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4090 credits = ext4_writepage_trans_blocks(inode);
4092 credits = ext4_blocks_for_truncate(inode);
4093 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4100 ret = ext4_zero_partial_blocks(handle, inode, offset,
4112 down_write(&EXT4_I(inode)->i_data_sem);
4113 ext4_discard_preallocations(inode, 0);
4115 ret = ext4_es_remove_extent(inode, first_block,
4118 up_write(&EXT4_I(inode)->i_data_sem);
4122 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4123 ret = ext4_ext_remove_space(inode, first_block,
4126 ret = ext4_ind_remove_space(handle, inode, first_block,
4129 up_write(&EXT4_I(inode)->i_data_sem);
4131 ext4_fc_track_range(handle, inode, first_block, stop_block);
4132 if (IS_SYNC(inode))
4135 inode->i_mtime = inode->i_ctime = current_time(inode);
4136 ret2 = ext4_mark_inode_dirty(handle, inode);
4140 ext4_update_inode_fsync_trans(handle, inode, 1);
4144 up_write(&EXT4_I(inode)->i_mmap_sem);
4146 inode_unlock(inode);
4150 int ext4_inode_attach_jinode(struct inode *inode)
4152 struct ext4_inode_info *ei = EXT4_I(inode);
4155 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4159 spin_lock(&inode->i_lock);
4162 spin_unlock(&inode->i_lock);
4166 jbd2_journal_init_jbd_inode(ei->jinode, inode);
4169 spin_unlock(&inode->i_lock);
4180 * simultaneously on behalf of the same inode.
4193 * truncate against the orphan inode list.
4195 * The committed inode has the new, desired i_size (which is the same as
4197 * that this inode's truncate did not complete and it will again call
4200 * that's fine - as long as they are linked from the inode, the post-crash
4203 int ext4_truncate(struct inode *inode)
4205 struct ext4_inode_info *ei = EXT4_I(inode);
4209 struct address_space *mapping = inode->i_mapping;
4212 * There is a possibility that we're either freeing the inode
4213 * or it's a completely new inode. In those cases we might not
4216 if (!(inode->i_state & (I_NEW|I_FREEING)))
4217 WARN_ON(!inode_is_locked(inode));
4218 trace_ext4_truncate_enter(inode);
4220 if (!ext4_can_truncate(inode))
4223 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4224 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4226 if (ext4_has_inline_data(inode)) {
4229 err = ext4_inline_data_truncate(inode, &has_inline);
4235 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4236 err = ext4_inode_attach_jinode(inode);
4241 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4242 credits = ext4_writepage_trans_blocks(inode);
4244 credits = ext4_blocks_for_truncate(inode);
4246 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4252 if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4253 ext4_block_truncate_page(handle, mapping, inode->i_size);
4256 * We add the inode to the orphan list, so that if this
4259 * marks the inode dirty, to catch the new size.
4264 err = ext4_orphan_add(handle, inode);
4268 down_write(&EXT4_I(inode)->i_data_sem);
4270 ext4_discard_preallocations(inode, 0);
4272 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4273 err = ext4_ext_truncate(handle, inode);
4275 ext4_ind_truncate(handle, inode);
4281 if (IS_SYNC(inode))
4292 if (inode->i_nlink)
4293 ext4_orphan_del(handle, inode);
4295 inode->i_mtime = inode->i_ctime = current_time(inode);
4296 err2 = ext4_mark_inode_dirty(handle, inode);
4302 trace_ext4_truncate_exit(inode);
4306 static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4308 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4309 return inode_peek_iversion_raw(inode);
4311 return inode_peek_iversion(inode);
4317 struct inode *inode = &(ei->vfs_inode);
4318 u64 i_blocks = READ_ONCE(inode->i_blocks);
4319 struct super_block *sb = inode->i_sb;
4328 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4347 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4349 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4351 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4358 static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode)
4360 struct ext4_inode_info *ei = EXT4_I(inode);
4369 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4370 i_uid = i_uid_read(inode);
4371 i_gid = i_gid_read(inode);
4373 if (!(test_opt(inode->i_sb, NO_UID32))) {
4396 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4398 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4399 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4400 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4405 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
4411 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4412 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4413 if (old_valid_dev(inode->i_rdev)) {
4415 cpu_to_le32(old_encode_dev(inode->i_rdev));
4420 cpu_to_le32(new_encode_dev(inode->i_rdev));
4423 } else if (!ext4_has_inline_data(inode)) {
4428 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4429 u64 ivers = ext4_inode_peek_iversion(inode);
4442 !ext4_has_feature_project(inode->i_sb))
4445 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4449 ext4_inode_csum_set(inode, raw_inode, ei);
4454 * ext4_get_inode_loc returns with an extra refcount against the inode's
4455 * underlying buffer_head on success. If we pass 'inode' and it does not
4456 * have in-inode xattr, we have all inode data in memory that is needed
4457 * to recreate the on-disk version of this inode.
4460 struct inode *inode, struct ext4_iloc *iloc,
4480 * Figure out the offset within the block group inode table
4490 ext4_error(sb, "Invalid inode table block %llu in "
4509 * If we have all information of the inode in memory and this
4510 * is the only valid inode in the block, we need not read the
4513 if (inode && !ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4519 /* Is the inode bitmap in cache? */
4525 * If the inode bitmap isn't in cache then the
4546 if (!ext4_test_inode_state(inode, EXT4_STATE_NEW))
4547 ext4_fill_raw_inode(inode, raw_inode);
4557 * blocks from the inode table.
4582 * There are other valid inodes in the buffer, this inode
4583 * has in-inode xattrs, or we don't have this inode in memory.
4603 static int __ext4_get_inode_loc_noinmem(struct inode *inode,
4609 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc,
4613 ext4_error_inode_block(inode, err_blk, EIO,
4619 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4624 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc,
4628 ext4_error_inode_block(inode, err_blk, EIO,
4641 static bool ext4_should_enable_dax(struct inode *inode)
4643 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4645 if (test_opt2(inode->i_sb, DAX_NEVER))
4647 if (!S_ISREG(inode->i_mode))
4649 if (ext4_should_journal_data(inode))
4651 if (ext4_has_inline_data(inode))
4653 if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
4655 if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4659 if (test_opt(inode->i_sb, DAX_ALWAYS))
4662 return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
4665 void ext4_set_inode_flags(struct inode *inode, bool init)
4667 unsigned int flags = EXT4_I(inode)->i_flags;
4670 WARN_ON_ONCE(IS_DAX(inode) && init);
4685 new_fl |= (inode->i_flags & S_DAX);
4686 if (init && ext4_should_enable_dax(inode))
4695 inode_set_flags(inode, new_fl,
4704 struct inode *inode = &(ei->vfs_inode);
4705 struct super_block *sb = inode->i_sb;
4711 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4713 return i_blocks << (inode->i_blkbits - 9);
4722 static inline int ext4_iget_extra_inode(struct inode *inode,
4729 if (EXT4_INODE_HAS_XATTR_SPACE(inode) &&
4733 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4734 err = ext4_find_inline_data_nolock(inode);
4735 if (!err && ext4_has_inline_data(inode))
4736 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4739 EXT4_I(inode)->i_inline_off = 0;
4743 int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4745 if (!ext4_has_feature_project(inode->i_sb))
4747 *projid = EXT4_I(inode)->i_projid;
4753 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4756 static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4758 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4759 inode_set_iversion_raw(inode, val);
4761 inode_set_iversion_queried(inode, val);
4764 static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
4768 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4770 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4771 EXT4_I(inode)->i_file_acl)
4774 if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4777 if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
4778 return "unexpected bad inode w/o EXT4_IGET_BAD";
4782 struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4789 struct inode *inode;
4806 "inode #%lu: comm %s: iget: illegal inode #",
4811 inode = iget_locked(sb, ino);
4812 if (!inode)
4814 if (!(inode->i_state & I_NEW)) {
4815 if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4816 ext4_error_inode(inode, function, line, 0, err_str);
4817 iput(inode);
4820 return inode;
4823 ei = EXT4_I(inode);
4826 ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
4837 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4840 EXT4_INODE_SIZE(inode->i_sb) ||
4842 ext4_error_inode(inode, function, line, 0,
4844 "(inode size %u)",
4846 EXT4_INODE_SIZE(inode->i_sb));
4853 /* Precompute checksum seed for inode metadata */
4855 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4857 __le32 inum = cpu_to_le32(inode->i_ino);
4865 if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
4868 ext4_error_inode_err(inode, function, line, 0,
4874 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4884 if (!(test_opt(inode->i_sb, NO_UID32))) {
4888 i_uid_write(inode, i_uid);
4889 i_gid_write(inode, i_gid);
4891 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4897 /* We now have enough fields to check if the inode was active or not.
4902 if (inode->i_nlink == 0) {
4903 if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL ||
4904 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4906 /* this inode is deleted or unallocated */
4908 ext4_error_inode(inode, function, line, 0,
4909 "iget: special inode unallocated");
4923 ext4_set_inode_flags(inode, true);
4924 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4929 inode->i_size = ext4_isize(sb, raw_inode);
4930 if ((size = i_size_read(inode)) < 0) {
4931 ext4_error_inode(inode, function, line, 0,
4942 ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
4943 ext4_error_inode(inode, function, line, 0,
4948 ei->i_disksize = inode->i_size;
4952 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4956 * NOTE! The in-memory inode i_data array is in little-endian order
4967 * as we cannot be sure that the inode or some of its metadata isn't
4968 * part of the transaction - the inode could have been reclaimed and
4989 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4996 ret = ext4_iget_extra_inode(inode, raw_inode, ei);
5002 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
5003 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
5004 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
5007 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
5010 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
5015 ext4_inode_set_iversion_queried(inode, ivers);
5020 !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
5021 ext4_error_inode(inode, function, line, 0,
5026 } else if (!ext4_has_inline_data(inode)) {
5027 /* validate the block references in the inode */
5029 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
5030 (S_ISLNK(inode->i_mode) &&
5031 !ext4_inode_is_fast_symlink(inode)))) {
5032 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5033 ret = ext4_ext_check_inode(inode);
5035 ret = ext4_ind_check_inode(inode);
5041 if (S_ISREG(inode->i_mode)) {
5042 inode->i_op = &ext4_file_inode_operations;
5043 inode->i_fop = &ext4_file_operations;
5044 ext4_set_aops(inode);
5045 } else if (S_ISDIR(inode->i_mode)) {
5046 inode->i_op = &ext4_dir_inode_operations;
5047 inode->i_fop = &ext4_dir_operations;
5048 } else if (S_ISLNK(inode->i_mode)) {
5050 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
5051 ext4_error_inode(inode, function, line, 0,
5057 if (IS_ENCRYPTED(inode)) {
5058 inode->i_op = &ext4_encrypted_symlink_inode_operations;
5059 ext4_set_aops(inode);
5060 } else if (ext4_inode_is_fast_symlink(inode)) {
5061 inode->i_link = (char *)ei->i_data;
5062 inode->i_op = &ext4_fast_symlink_inode_operations;
5063 nd_terminate_link(ei->i_data, inode->i_size,
5066 inode->i_op = &ext4_symlink_inode_operations;
5067 ext4_set_aops(inode);
5069 inode_nohighmem(inode);
5070 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
5071 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
5072 inode->i_op = &ext4_special_inode_operations;
5074 init_special_inode(inode, inode->i_mode,
5077 init_special_inode(inode, inode->i_mode,
5080 make_bad_inode(inode);
5083 ext4_error_inode(inode, function, line, 0,
5084 "iget: bogus i_mode (%o)", inode->i_mode);
5087 if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
5088 ext4_error_inode(inode, function, line, 0,
5090 if ((err_str = check_igot_inode(inode, flags)) != NULL) {
5091 ext4_error_inode(inode, function, line, 0, err_str);
5097 unlock_new_inode(inode);
5098 return inode;
5102 iget_failed(inode);
5111 struct inode *inode;
5113 inode = find_inode_by_ino_rcu(sb, ino);
5114 if (!inode)
5117 if ((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
5119 ((inode->i_state & I_DIRTY_TIME) == 0))
5122 spin_lock(&inode->i_lock);
5123 if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
5125 (inode->i_state & I_DIRTY_TIME)) {
5126 struct ext4_inode_info *ei = EXT4_I(inode);
5128 inode->i_state &= ~I_DIRTY_TIME;
5129 spin_unlock(&inode->i_lock);
5132 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
5133 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5134 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5135 ext4_inode_csum_set(inode, raw_inode, ei);
5137 trace_ext4_other_inode_update_time(inode, orig_ino);
5140 spin_unlock(&inode->i_lock);
5145 * the same inode table block.
5155 * Calculate the first inode in the inode table block. Inode
5156 * numbers are one-based. That is, the first inode in a block
5171 * Post the struct inode info into an on-disk inode location in the
5173 * buffer_head in the inode location struct.
5178 struct inode *inode,
5182 struct ext4_inode_info *ei = EXT4_I(inode);
5184 struct super_block *sb = inode->i_sb;
5191 * For fields not tracked in the in-memory inode, initialise them
5194 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5195 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5197 if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode))
5205 err = ext4_fill_raw_inode(inode, raw_inode);
5208 EXT4_ERROR_INODE(inode, "corrupted inode contents");
5212 if (inode->i_sb->s_flags & SB_LAZYTIME)
5213 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5220 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5234 ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5236 ext4_std_error(inode->i_sb, err);
5258 * because the inode has been copied into a raw inode buffer in
5262 * Note that we are absolutely dependent upon all inode dirtiers doing the
5268 * mark_inode_dirty(inode)
5270 * inode->i_size = expr;
5273 * and the new i_size will be lost. Plus the inode will no longer be on the
5274 * superblock's dirty inode list.
5276 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5281 sb_rdonly(inode->i_sb))
5284 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5287 if (EXT4_SB(inode->i_sb)->s_journal) {
5302 err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
5303 EXT4_I(inode)->i_sync_tid);
5307 err = __ext4_get_inode_loc_noinmem(inode, &iloc);
5312 * it here separately for each inode.
5317 ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
5318 "IO error syncing inode");
5331 static void ext4_wait_for_tail_page_commit(struct inode *inode)
5335 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5339 offset = inode->i_size & (PAGE_SIZE - 1);
5349 if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
5352 page = find_lock_page(inode->i_mapping,
5353 inode->i_size >> PAGE_SHIFT);
5379 * shrinks i_size, we put the inode on the orphan list and modify
5383 * disk. (On recovery, the inode will get truncated and the blocks will
5388 * and inode is still attached to the committing transaction, we must
5394 * Called with inode->i_mutex down.
5398 struct inode *inode = d_inode(dentry);
5403 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5406 if (unlikely(IS_IMMUTABLE(inode)))
5409 if (unlikely(IS_APPEND(inode) &&
5426 if (is_quota_modification(inode, attr)) {
5427 error = dquot_initialize(inode);
5432 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
5433 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
5436 /* (user+group)*(old+new) structure, inode write (sb,
5437 * inode block, ? - but truncate inode update has it) */
5438 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5439 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5440 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5447 * counts xattr inode references.
5449 down_read(&EXT4_I(inode)->xattr_sem);
5450 error = dquot_transfer(inode, attr);
5451 up_read(&EXT4_I(inode)->xattr_sem);
5457 /* Update corresponding info in inode so that everything is in
5460 inode->i_uid = attr->ia_uid;
5462 inode->i_gid = attr->ia_gid;
5463 error = ext4_mark_inode_dirty(handle, inode);
5472 loff_t oldsize = inode->i_size;
5474 int shrink = (attr->ia_size < inode->i_size);
5476 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5477 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5483 if (!S_ISREG(inode->i_mode)) {
5487 if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
5488 inode_inc_iversion(inode);
5491 if (ext4_should_order_data(inode)) {
5492 error = ext4_begin_ordered_truncate(inode,
5498 * Blocks are going to be removed from the inode. Wait
5501 inode_dio_wait(inode);
5504 down_write(&EXT4_I(inode)->i_mmap_sem);
5506 rc = ext4_break_layouts(inode);
5508 up_write(&EXT4_I(inode)->i_mmap_sem);
5512 if (attr->ia_size != inode->i_size) {
5513 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5519 error = ext4_orphan_add(handle, inode);
5527 inode->i_mtime = current_time(inode);
5528 inode->i_ctime = inode->i_mtime;
5532 ext4_fc_track_range(handle, inode,
5534 inode->i_sb->s_blocksize_bits,
5538 handle, inode,
5540 inode->i_sb->s_blocksize_bits,
5542 inode->i_sb->s_blocksize_bits);
5544 down_write(&EXT4_I(inode)->i_data_sem);
5545 old_disksize = EXT4_I(inode)->i_disksize;
5546 EXT4_I(inode)->i_disksize = attr->ia_size;
5547 rc = ext4_mark_inode_dirty(handle, inode);
5556 i_size_write(inode, attr->ia_size);
5558 EXT4_I(inode)->i_disksize = old_disksize;
5559 up_write(&EXT4_I(inode)->i_data_sem);
5564 pagecache_isize_extended(inode, oldsize,
5565 inode->i_size);
5566 } else if (ext4_should_journal_data(inode)) {
5567 ext4_wait_for_tail_page_commit(inode);
5575 truncate_pagecache(inode, inode->i_size);
5581 rc = ext4_truncate(inode);
5586 up_write(&EXT4_I(inode)->i_mmap_sem);
5590 setattr_copy(inode, attr);
5591 mark_inode_dirty(inode);
5598 if (orphan && inode->i_nlink)
5599 ext4_orphan_del(NULL, inode);
5602 rc = posix_acl_chmod(inode, inode->i_mode);
5606 ext4_std_error(inode->i_sb, error);
5615 struct inode *inode = d_inode(path->dentry);
5617 struct ext4_inode_info *ei = EXT4_I(inode);
5648 generic_fillattr(inode, stat);
5655 struct inode *inode = d_inode(path->dentry);
5661 * If there is inline data in the inode, the inode will normally not
5666 if (unlikely(ext4_has_inline_data(inode)))
5679 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5680 EXT4_I(inode)->i_reserved_data_blocks);
5681 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
5685 static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5688 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5689 return ext4_ind_trans_blocks(inode, lblocks);
5690 return ext4_ext_index_trans_blocks(inode, pextents);
5702 * Also account for superblock, inode, quota and xattr blocks
5704 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5707 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5716 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5728 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5729 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5734 /* Blocks for super block, inode, quota and xattr blocks */
5735 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5750 int ext4_writepage_trans_blocks(struct inode *inode)
5752 int bpp = ext4_journal_blocks_per_page(inode);
5755 ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5758 if (ext4_should_journal_data(inode))
5772 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5774 return ext4_meta_trans_blocks(inode, nrblocks, 1);
5782 struct inode *inode, struct ext4_iloc *iloc)
5786 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
5790 ext4_fc_track_inode(handle, inode);
5796 if (IS_I_VERSION(inode) &&
5797 !(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
5798 inode_inc_iversion(inode);
5804 err = ext4_do_update_inode(handle, inode, iloc);
5815 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5820 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5823 err = ext4_get_inode_loc(inode, iloc);
5832 ext4_std_error(inode->i_sb, err);
5836 static int __ext4_expand_extra_isize(struct inode *inode,
5843 unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
5844 struct ext4_inode_info *ei = EXT4_I(inode);
5850 EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
5852 EXT4_INODE_SIZE(inode->i_sb));
5862 header = IHDR(inode, raw_inode);
5865 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5868 EXT4_I(inode)->i_extra_isize, 0,
5869 new_extra_isize - EXT4_I(inode)->i_extra_isize);
5870 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5879 if (dquot_initialize_needed(inode))
5883 error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5896 * Expand an inode by new_extra_isize bytes.
5899 static int ext4_try_to_expand_extra_isize(struct inode *inode,
5907 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5912 * the inode. When journaled, we first need to obtain extra
5915 * only result in a minor loss of functionality for that inode.
5920 EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
5923 if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
5926 error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5928 ext4_write_unlock_xattr(inode, &no_expand);
5933 int ext4_expand_extra_isize(struct inode *inode,
5941 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5946 handle = ext4_journal_start(inode, EXT4_HT_INODE,
5947 EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5954 ext4_write_lock_xattr(inode, &no_expand);
5963 error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
5966 rc = ext4_mark_iloc_dirty(handle, inode, iloc);
5971 ext4_write_unlock_xattr(inode, &no_expand);
5977 * What we do here is to mark the in-core inode as clean with respect to inode
5979 * This means that the in-core inode may be reaped by prune_icache
5985 * inode out, but prune_icache isn't a user-visible syncing function.
5989 int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
5993 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5997 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5998 err = ext4_reserve_inode_write(handle, inode, &iloc);
6002 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
6003 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
6006 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
6009 ext4_error_inode_err(inode, func, line, 0, err,
6019 * to include the updated inode in the current transaction.
6021 * Also, dquot_alloc_block() will always dirty the inode when blocks
6024 * If the inode is marked synchronous, we don't honour that here - doing
6029 * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need
6030 * to copy into the on-disk inode structure are the timestamp files.
6032 void ext4_dirty_inode(struct inode *inode, int flags)
6038 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
6042 ext4_mark_inode_dirty(handle, inode);
6049 int ext4_change_inode_journal_flag(struct inode *inode, int val)
6054 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
6066 journal = EXT4_JOURNAL(inode);
6073 inode_dio_wait(inode);
6076 * Before flushing the journal and switching inode's aops, we have
6077 * to flush all dirty data the inode has. There can be outstanding
6084 down_write(&EXT4_I(inode)->i_mmap_sem);
6085 err = filemap_write_and_wait(inode->i_mapping);
6087 up_write(&EXT4_I(inode)->i_mmap_sem);
6100 * the inode's in-core data-journaling state flag now.
6104 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6112 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6114 ext4_set_aops(inode);
6120 up_write(&EXT4_I(inode)->i_mmap_sem);
6122 /* Finally we can mark the inode as dirty. */
6124 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6128 ext4_fc_mark_ineligible(inode->i_sb,
6130 err = ext4_mark_inode_dirty(handle, inode);
6133 ext4_std_error(inode->i_sb, err);
6152 struct inode *inode = file_inode(file);
6153 struct address_space *mapping = inode->i_mapping;
6158 if (unlikely(IS_IMMUTABLE(inode)))
6161 sb_start_pagefault(inode->i_sb);
6164 down_read(&EXT4_I(inode)->i_mmap_sem);
6166 err = ext4_convert_inline_data(inode);
6176 if (ext4_should_journal_data(inode))
6180 if (test_opt(inode->i_sb, DELALLOC) &&
6181 !ext4_nonda_switch(inode->i_sb)) {
6186 ext4_should_retry_alloc(inode->i_sb, &retries));
6191 size = i_size_read(inode);
6208 * inode to the transaction's list to writeprotect pages on commit.
6222 if (ext4_should_dioread_nolock(inode))
6227 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6228 ext4_writepage_trans_blocks(inode));
6238 if (!ext4_should_journal_data(inode)) {
6242 size = i_size_read(inode);
6263 if (ext4_jbd2_inode_add_write(handle, inode,
6266 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
6272 if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
6277 up_read(&EXT4_I(inode)->i_mmap_sem);
6278 sb_end_pagefault(inode->i_sb);
6288 struct inode *inode = file_inode(vmf->vma->vm_file);
6291 down_read(&EXT4_I(inode)->i_mmap_sem);
6293 up_read(&EXT4_I(inode)->i_mmap_sem);