Lines Matching refs:inode

41 	struct inode *inode = file_inode(vmf->vma->vm_file);
46 f2fs_update_iostat(F2FS_I_SB(inode), inode,
49 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
57 struct inode *inode = file_inode(vmf->vma->vm_file);
58 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
63 if (unlikely(IS_IMMUTABLE(inode)))
66 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
79 err = f2fs_convert_inline_inode(inode);
84 if (f2fs_compressed_file(inode)) {
85 int ret = f2fs_is_compressed_cluster(inode, page->index);
99 sb_start_pagefault(inode->i_sb);
101 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
104 filemap_invalidate_lock_shared(inode->i_mapping);
106 if (unlikely(page->mapping != inode->i_mapping ||
107 page_offset(page) > i_size_read(inode) ||
116 set_new_dnode(&dn, inode, NULL, NULL, 0);
122 set_new_dnode(&dn, inode, NULL, NULL, 0);
135 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
145 i_size_read(inode)) {
148 offset = i_size_read(inode) & ~PAGE_MASK;
153 f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
158 filemap_invalidate_unlock_shared(inode->i_mapping);
160 sb_end_pagefault(inode->i_sb);
171 static int get_parent_ino(struct inode *inode, nid_t *pino)
179 dentry = d_find_alias(inode);
188 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
190 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
193 if (!S_ISREG(inode->i_mode))
195 else if (f2fs_compressed_file(inode))
197 else if (inode->i_nlink != 1)
201 else if (file_wrong_pino(inode))
205 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
212 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
213 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
224 /* But we need to avoid that there are some inode updates */
231 static void try_to_fix_pino(struct inode *inode)
233 struct f2fs_inode_info *fi = F2FS_I(inode);
237 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
238 get_parent_ino(inode, &pino)) {
239 f2fs_i_pino_write(inode, pino);
240 file_got_pino(inode);
248 struct inode *inode = file->f_mapping->host;
249 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
250 nid_t ino = inode->i_ino;
260 if (unlikely(f2fs_readonly(inode->i_sb)))
263 trace_f2fs_sync_file_enter(inode);
265 if (S_ISDIR(inode->i_mode))
269 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
270 set_inode_flag(inode, FI_NEED_IPU);
272 clear_inode_flag(inode, FI_NEED_IPU);
275 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
279 /* if the inode is dirty, let's recover all the time */
280 if (!f2fs_skip_inode_update(inode, datasync)) {
281 f2fs_write_inode(inode, NULL);
288 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
295 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
317 f2fs_down_read(&F2FS_I(inode)->i_sem);
318 cp_reason = need_do_checkpoint(inode);
319 f2fs_up_read(&F2FS_I(inode)->i_sem);
323 ret = f2fs_sync_fs(inode->i_sb, 1);
329 try_to_fix_pino(inode);
330 clear_inode_flag(inode, FI_APPEND_WRITE);
331 clear_inode_flag(inode, FI_UPDATE_WRITE);
336 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
348 f2fs_mark_inode_dirty_sync(inode, true);
349 f2fs_write_inode(inode, NULL);
369 clear_inode_flag(inode, FI_APPEND_WRITE);
373 ret = f2fs_issue_flush(sbi, inode->i_ino);
376 clear_inode_flag(inode, FI_UPDATE_WRITE);
381 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
413 struct inode *inode = file->f_mapping->host;
414 loff_t maxbytes = inode->i_sb->s_maxbytes;
421 inode_lock(inode);
423 isize = i_size_read(inode);
428 if (f2fs_has_inline_data(inode)) {
441 set_new_dnode(&dn, inode, NULL, NULL, 0);
455 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
466 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
486 inode_unlock(inode);
489 inode_unlock(inode);
495 struct inode *inode = file->f_mapping->host;
496 loff_t maxbytes = inode->i_sb->s_maxbytes;
498 if (f2fs_compressed_file(inode))
499 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
506 maxbytes, i_size_read(inode));
519 struct inode *inode = file_inode(file);
521 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
524 if (!f2fs_is_compress_backend_ready(inode))
530 f2fs_down_read(&F2FS_I(inode)->i_sem);
531 set_inode_flag(inode, FI_MMAP_FILE);
532 f2fs_up_read(&F2FS_I(inode)->i_sem);
537 static int f2fs_file_open(struct inode *inode, struct file *filp)
539 int err = fscrypt_file_open(inode, filp);
544 if (!f2fs_is_compress_backend_ready(inode))
547 err = fsverity_file_open(inode, filp);
554 return dquot_file_open(inode, filp);
559 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
564 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
565 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
567 addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
573 if (f2fs_compressed_file(dn->inode) &&
576 f2fs_i_compr_blocks_update(dn->inode,
602 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
611 dn->inode) + ofs;
614 dec_valid_block_count(sbi, dn->inode, nr_free);
619 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
623 static int truncate_partial_data_page(struct inode *inode, u64 from,
628 struct address_space *mapping = inode->i_mapping;
642 page = f2fs_get_lock_data_page(inode, index, true);
649 /* An encrypted inode should have a key and truncate the last page. */
650 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
657 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
659 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
666 trace_f2fs_truncate_blocks_enter(inode, from);
670 if (free_from >= max_file_blocks(inode))
676 ipage = f2fs_get_node_page(sbi, inode->i_ino);
682 if (f2fs_has_inline_data(inode)) {
683 f2fs_truncate_inline_inode(inode, ipage, from);
689 set_new_dnode(&dn, inode, ipage, NULL, 0);
697 count = ADDRS_PER_PAGE(dn.node_page, inode);
709 err = f2fs_truncate_inode_blocks(inode, free_from);
716 err = truncate_partial_data_page(inode, from, truncate_page);
718 trace_f2fs_truncate_blocks_exit(inode, err);
722 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
732 if (f2fs_compressed_file(inode))
734 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
737 err = f2fs_do_truncate_blocks(inode, free_from, lock);
746 if (f2fs_compressed_file(inode) && !free_from
747 && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
748 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
751 err = f2fs_truncate_partial_cluster(inode, from, lock);
760 int f2fs_truncate(struct inode *inode)
764 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
767 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
768 S_ISLNK(inode->i_mode)))
771 trace_f2fs_truncate(inode);
773 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE))
776 err = f2fs_dquot_initialize(inode);
781 if (!f2fs_may_inline_data(inode)) {
782 err = f2fs_convert_inline_inode(inode);
787 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
791 inode->i_mtime = inode_set_ctime_current(inode);
792 f2fs_mark_inode_dirty_sync(inode, false);
796 static bool f2fs_force_buffered_io(struct inode *inode, int rw)
798 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
800 if (!fscrypt_dio_supported(inode))
802 if (fsverity_active(inode))
804 if (f2fs_compressed_file(inode))
827 struct inode *inode = d_inode(path->dentry);
828 struct f2fs_inode_info *fi = F2FS_I(inode);
832 if (f2fs_has_extra_attr(inode) &&
833 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
848 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
849 unsigned int bsize = i_blocksize(inode);
852 if (!f2fs_force_buffered_io(inode, WRITE)) {
863 if (IS_ENCRYPTED(inode))
869 if (IS_VERITY(inode))
879 generic_fillattr(idmap, request_mask, inode, stat);
882 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
883 f2fs_has_inline_dentry(inode))
891 struct inode *inode, const struct iattr *attr)
895 i_uid_update(idmap, attr, inode);
896 i_gid_update(idmap, attr, inode);
898 inode->i_atime = attr->ia_atime;
900 inode->i_mtime = attr->ia_mtime;
902 inode_set_ctime_to_ts(inode, attr->ia_ctime);
905 vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
908 !capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
910 set_acl_inode(inode, mode);
920 struct inode *inode = d_inode(dentry);
923 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
926 if (unlikely(IS_IMMUTABLE(inode)))
929 if (unlikely(IS_APPEND(inode) &&
935 !f2fs_is_compress_backend_ready(inode))
950 if (is_quota_modification(idmap, inode, attr)) {
951 err = f2fs_dquot_initialize(inode);
955 if (i_uid_needs_update(idmap, attr, inode) ||
956 i_gid_needs_update(idmap, attr, inode)) {
957 f2fs_lock_op(F2FS_I_SB(inode));
958 err = dquot_transfer(idmap, inode, attr);
960 set_sbi_flag(F2FS_I_SB(inode),
962 f2fs_unlock_op(F2FS_I_SB(inode));
966 * update uid/gid under lock_op(), so that dquot and inode can
969 i_uid_update(idmap, attr, inode);
970 i_gid_update(idmap, attr, inode);
971 f2fs_mark_inode_dirty_sync(inode, true);
972 f2fs_unlock_op(F2FS_I_SB(inode));
976 loff_t old_size = i_size_read(inode);
978 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
980 * should convert inline inode before i_size_write to
983 err = f2fs_convert_inline_inode(inode);
988 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
989 filemap_invalidate_lock(inode->i_mapping);
991 truncate_setsize(inode, attr->ia_size);
994 err = f2fs_truncate(inode);
999 filemap_invalidate_unlock(inode->i_mapping);
1000 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1004 spin_lock(&F2FS_I(inode)->i_size_lock);
1005 inode->i_mtime = inode_set_ctime_current(inode);
1006 F2FS_I(inode)->last_disk_size = i_size_read(inode);
1007 spin_unlock(&F2FS_I(inode)->i_size_lock);
1010 __setattr_copy(idmap, inode, attr);
1013 err = posix_acl_chmod(idmap, dentry, f2fs_get_inode_mode(inode));
1015 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
1017 inode->i_mode = F2FS_I(inode)->i_acl_mode;
1018 clear_inode_flag(inode, FI_ACL_MODE);
1023 f2fs_mark_inode_dirty_sync(inode, true);
1025 /* inode change will produce dirty node pages flushed by checkpoint */
1026 f2fs_balance_fs(F2FS_I_SB(inode), true);
1042 static int fill_zero(struct inode *inode, pgoff_t index,
1045 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1054 page = f2fs_get_new_data_page(inode, NULL, index, false);
1067 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1075 set_new_dnode(&dn, inode, NULL, NULL, 0);
1086 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1089 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1099 static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1105 ret = f2fs_convert_inline_inode(inode);
1116 ret = fill_zero(inode, pg_start, off_start,
1122 ret = fill_zero(inode, pg_start++, off_start,
1128 ret = fill_zero(inode, pg_end, 0, off_end);
1135 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1142 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1143 filemap_invalidate_lock(inode->i_mapping);
1145 truncate_pagecache_range(inode, blk_start, blk_end - 1);
1148 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1151 filemap_invalidate_unlock(inode->i_mapping);
1152 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1159 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1162 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1167 set_new_dnode(&dn, inode, NULL, NULL, 0);
1174 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1181 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1215 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1218 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1226 set_new_dnode(&dn, inode, NULL, NULL, 0);
1229 dec_valid_block_count(sbi, inode, 1);
1239 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1324 static int __exchange_data_block(struct inode *src_inode,
1325 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1376 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1378 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1379 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1387 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1388 filemap_invalidate_lock(inode->i_mapping);
1391 f2fs_drop_extent_tree(inode);
1392 truncate_pagecache(inode, offset);
1393 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1396 filemap_invalidate_unlock(inode->i_mapping);
1397 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1401 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1406 if (offset + len >= i_size_read(inode))
1413 ret = f2fs_convert_inline_inode(inode);
1418 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1422 ret = f2fs_do_collapse(inode, offset, len);
1427 filemap_invalidate_lock(inode->i_mapping);
1428 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1429 truncate_pagecache(inode, offset);
1431 new_size = i_size_read(inode) - len;
1432 ret = f2fs_truncate_blocks(inode, new_size, true);
1433 filemap_invalidate_unlock(inode->i_mapping);
1435 f2fs_i_size_write(inode, new_size);
1442 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1490 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1493 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1494 struct address_space *mapping = inode->i_mapping;
1496 loff_t new_size = i_size_read(inode);
1500 ret = inode_newsize_ok(inode, (len + offset));
1504 ret = f2fs_convert_inline_inode(inode);
1519 ret = fill_zero(inode, pg_start, off_start,
1527 ret = fill_zero(inode, pg_start++, off_start,
1541 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1544 truncate_pagecache_range(inode,
1550 set_new_dnode(&dn, inode, NULL, NULL, 0);
1555 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1559 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1567 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1580 ret = fill_zero(inode, pg_end, 0, off_end);
1589 if (new_size > i_size_read(inode)) {
1591 file_set_keep_isize(inode);
1593 f2fs_i_size_write(inode, new_size);
1598 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1600 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1601 struct address_space *mapping = inode->i_mapping;
1606 new_size = i_size_read(inode) + len;
1607 ret = inode_newsize_ok(inode, new_size);
1611 if (offset >= i_size_read(inode))
1618 ret = f2fs_convert_inline_inode(inode);
1625 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1638 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1641 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1643 truncate_pagecache(inode, offset);
1652 f2fs_drop_extent_tree(inode);
1654 ret = __exchange_data_block(inode, inode, idx,
1659 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1664 truncate_pagecache(inode, offset);
1668 f2fs_i_size_write(inode, new_size);
1672 static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
1675 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1690 err = inode_newsize_ok(inode, (len + offset));
1694 err = f2fs_convert_inline_inode(inode);
1712 if (f2fs_is_pinned_file(inode)) {
1734 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_DIO);
1735 file_dont_truncate(inode);
1747 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_AIO);
1766 if (new_size > i_size_read(inode)) {
1768 file_set_keep_isize(inode);
1770 f2fs_i_size_write(inode, new_size);
1779 struct inode *inode = file_inode(file);
1782 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1784 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1786 if (!f2fs_is_compress_backend_ready(inode))
1790 if (!S_ISREG(inode->i_mode))
1793 if (IS_ENCRYPTED(inode) &&
1801 if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
1811 inode_lock(inode);
1818 if (offset >= inode->i_size)
1821 ret = f2fs_punch_hole(inode, offset, len);
1823 ret = f2fs_collapse_range(inode, offset, len);
1825 ret = f2fs_zero_range(inode, offset, len, mode);
1827 ret = f2fs_insert_range(inode, offset, len);
1829 ret = f2fs_expand_inode_data(inode, offset, len, mode);
1833 inode->i_mtime = inode_set_ctime_current(inode);
1834 f2fs_mark_inode_dirty_sync(inode, false);
1835 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1839 inode_unlock(inode);
1841 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1845 static int f2fs_release_file(struct inode *inode, struct file *filp)
1852 atomic_read(&inode->i_writecount) != 1)
1855 inode_lock(inode);
1856 f2fs_abort_atomic_write(inode, true);
1857 inode_unlock(inode);
1864 struct inode *inode = file_inode(file);
1872 if (F2FS_I(inode)->atomic_write_task == current &&
1874 inode_lock(inode);
1875 f2fs_abort_atomic_write(inode, true);
1876 inode_unlock(inode);
1882 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1884 struct f2fs_inode_info *fi = F2FS_I(inode);
1891 if (IS_NOQUOTA(inode))
1895 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1897 if (!f2fs_empty_dir(inode))
1902 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1910 if (!f2fs_disable_compressed_file(inode))
1914 int err = f2fs_convert_inline_inode(inode);
1918 f2fs_down_write(&F2FS_I(inode)->i_sem);
1919 if (!f2fs_may_compress(inode) ||
1920 (S_ISREG(inode->i_mode) &&
1921 F2FS_HAS_BLOCKS(inode))) {
1922 f2fs_up_write(&F2FS_I(inode)->i_sem);
1925 err = set_compress_context(inode);
1926 f2fs_up_write(&F2FS_I(inode)->i_sem);
1934 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1938 set_inode_flag(inode, FI_PROJ_INHERIT);
1940 clear_inode_flag(inode, FI_PROJ_INHERIT);
1942 inode_set_ctime_current(inode);
1943 f2fs_set_inode_flags(inode);
1944 f2fs_mark_inode_dirty_sync(inode, true);
2034 struct inode *inode = file_inode(filp);
2036 return put_user(inode->i_generation, (int __user *)arg);
2041 struct inode *inode = file_inode(filp);
2043 struct f2fs_inode_info *fi = F2FS_I(inode);
2044 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2045 struct inode *pinode;
2049 if (!inode_owner_or_capable(idmap, inode))
2052 if (!S_ISREG(inode->i_mode))
2062 inode_lock(inode);
2064 if (!f2fs_disable_compressed_file(inode)) {
2069 if (f2fs_is_atomic_file(inode))
2072 ret = f2fs_convert_inline_inode(inode);
2082 if (get_dirty_pages(inode))
2084 inode->i_ino, get_dirty_pages(inode));
2085 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2091 /* Check if the inode already has a COW inode */
2093 /* Create a COW inode for atomic write */
2094 pinode = f2fs_iget(inode->i_sb, fi->i_pino);
2111 /* Reuse the already created COW inode */
2119 f2fs_write_inode(inode, NULL);
2121 stat_inc_atomic_inode(inode);
2123 set_inode_flag(inode, FI_ATOMIC_FILE);
2125 isize = i_size_read(inode);
2128 set_inode_flag(inode, FI_ATOMIC_REPLACE);
2129 truncate_inode_pages_final(inode->i_mapping);
2130 f2fs_i_size_write(inode, 0);
2139 stat_update_max_atomic_write(inode);
2142 inode_unlock(inode);
2149 struct inode *inode = file_inode(filp);
2153 if (!inode_owner_or_capable(idmap, inode))
2160 f2fs_balance_fs(F2FS_I_SB(inode), true);
2162 inode_lock(inode);
2164 if (f2fs_is_atomic_file(inode)) {
2165 ret = f2fs_commit_atomic_write(inode);
2169 f2fs_abort_atomic_write(inode, ret);
2174 inode_unlock(inode);
2181 struct inode *inode = file_inode(filp);
2185 if (!inode_owner_or_capable(idmap, inode))
2192 inode_lock(inode);
2194 f2fs_abort_atomic_write(inode, true);
2196 inode_unlock(inode);
2199 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2205 struct inode *inode = file_inode(filp);
2206 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2282 struct inode *inode = file_inode(filp);
2283 struct super_block *sb = inode->i_sb;
2311 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2327 struct inode *inode = file_inode(filp);
2329 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2332 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2346 struct inode *inode = file_inode(filp);
2347 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2437 struct inode *inode = file_inode(filp);
2438 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2541 struct inode *inode = file_inode(filp);
2542 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2570 struct inode *inode = file_inode(filp);
2587 inode_lock(inode);
2589 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
2595 set_inode_flag(inode, FI_OPU_WRITE);
2596 if (f2fs_should_update_inplace(inode, NULL)) {
2602 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2611 if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
2626 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
2673 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
2682 set_inode_flag(inode, FI_SKIP_WRITES);
2688 page = f2fs_get_lock_data_page(inode, idx, true);
2708 clear_inode_flag(inode, FI_SKIP_WRITES);
2710 err = filemap_fdatawrite(inode->i_mapping);
2715 clear_inode_flag(inode, FI_SKIP_WRITES);
2717 clear_inode_flag(inode, FI_OPU_WRITE);
2719 inode_unlock(inode);
2727 struct inode *inode = file_inode(filp);
2728 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2735 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2750 max_file_blocks(inode)))
2774 struct inode *src = file_inode(file_in);
2775 struct inode *dst = file_inode(file_out);
2946 struct inode *inode = file_inode(filp);
2947 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3017 struct inode *inode = file_inode(filp);
3018 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3027 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3030 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3038 err = __dquot_transfer(inode, transfer_to);
3045 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3047 struct f2fs_inode_info *fi = F2FS_I(inode);
3048 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3060 if (!f2fs_has_extra_attr(inode))
3070 if (IS_NOQUOTA(inode))
3076 err = f2fs_dquot_initialize(inode);
3081 err = f2fs_transfer_project_quota(inode, kprojid);
3086 inode_set_ctime_current(inode);
3087 f2fs_mark_inode_dirty_sync(inode, true);
3093 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3098 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3108 struct inode *inode = d_inode(dentry);
3109 struct f2fs_inode_info *fi = F2FS_I(inode);
3112 if (IS_ENCRYPTED(inode))
3114 if (IS_VERITY(inode))
3116 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3118 if (is_inode_flag_set(inode, FI_PIN_FILE))
3123 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3132 struct inode *inode = d_inode(dentry);
3137 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3139 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3148 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3151 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3153 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3158 int f2fs_pin_file_control(struct inode *inode, bool inc)
3160 struct f2fs_inode_info *fi = F2FS_I(inode);
3161 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3165 f2fs_i_gc_failures_write(inode,
3170 __func__, inode->i_ino,
3172 clear_inode_flag(inode, FI_PIN_FILE);
3180 struct inode *inode = file_inode(filp);
3187 if (!S_ISREG(inode->i_mode))
3190 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3197 inode_lock(inode);
3200 clear_inode_flag(inode, FI_PIN_FILE);
3201 f2fs_i_gc_failures_write(inode, 0);
3205 if (f2fs_should_update_outplace(inode, NULL)) {
3210 if (f2fs_pin_file_control(inode, false)) {
3215 ret = f2fs_convert_inline_inode(inode);
3219 if (!f2fs_disable_compressed_file(inode)) {
3224 set_inode_flag(inode, FI_PIN_FILE);
3225 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3227 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3229 inode_unlock(inode);
3236 struct inode *inode = file_inode(filp);
3239 if (is_inode_flag_set(inode, FI_PIN_FILE))
3240 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3244 int f2fs_precache_extents(struct inode *inode)
3246 struct f2fs_inode_info *fi = F2FS_I(inode);
3252 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3261 end = max_file_blocks(inode);
3267 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE);
3303 struct inode *inode = file_inode(filp);
3305 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3307 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3308 f2fs_warn(F2FS_I_SB(inode),
3309 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3310 inode->i_ino);
3354 struct inode *inode = file_inode(filp);
3355 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3380 struct inode *inode = file_inode(filp);
3381 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3414 static int f2fs_get_compress_blocks(struct inode *inode, __u64 *blocks)
3416 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3419 if (!f2fs_compressed_file(inode))
3422 *blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3429 struct inode *inode = file_inode(filp);
3433 ret = f2fs_get_compress_blocks(inode, &blocks);
3442 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3444 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3449 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3483 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3484 dec_valid_block_count(sbi, dn->inode,
3497 struct inode *inode = file_inode(filp);
3498 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3507 if (!f2fs_compressed_file(inode))
3519 inode_lock(inode);
3521 writecount = atomic_read(&inode->i_writecount);
3528 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3533 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3537 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3542 set_inode_flag(inode, FI_COMPRESS_RELEASED);
3543 inode_set_ctime_current(inode);
3544 f2fs_mark_inode_dirty_sync(inode, true);
3546 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3547 filemap_invalidate_lock(inode->i_mapping);
3549 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3555 set_new_dnode(&dn, inode, NULL, NULL, 0);
3567 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3569 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3582 filemap_invalidate_unlock(inode->i_mapping);
3583 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3585 inode_unlock(inode);
3592 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3597 __func__, inode->i_ino, inode->i_blocks,
3599 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3608 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3609 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3614 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3632 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3661 ret = inc_valid_block_count(sbi, dn->inode, &reserved, false);
3670 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3682 struct inode *inode = file_inode(filp);
3683 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3691 if (!f2fs_compressed_file(inode))
3703 inode_lock(inode);
3705 if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3710 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3713 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3714 filemap_invalidate_lock(inode->i_mapping);
3716 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3722 set_new_dnode(&dn, inode, NULL, NULL, 0);
3734 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3736 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3748 filemap_invalidate_unlock(inode->i_mapping);
3749 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3752 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3753 inode_set_ctime_current(inode);
3754 f2fs_mark_inode_dirty_sync(inode, true);
3757 inode_unlock(inode);
3763 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3768 __func__, inode->i_ino, inode->i_blocks,
3770 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3776 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3793 if (IS_ENCRYPTED(inode))
3794 ret = fscrypt_zeroout_range(inode, off, block, len);
3805 struct inode *inode = file_inode(filp);
3806 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3807 struct address_space *mapping = inode->i_mapping;
3824 !S_ISREG(inode->i_mode))
3830 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3834 inode_lock(inode);
3836 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3837 range.start >= inode->i_size) {
3845 if (inode->i_size - range.start > range.len) {
3849 sbi->sb->s_maxbytes : inode->i_size;
3862 ret = f2fs_convert_inline_inode(inode);
3866 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3882 set_new_dnode(&dn, inode, NULL, NULL, 0);
3892 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3924 inode, prev_index, prev_block,
3953 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3957 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3959 inode_unlock(inode);
3967 struct inode *inode = file_inode(filp);
3970 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3973 inode_lock_shared(inode);
3975 if (!f2fs_compressed_file(inode)) {
3976 inode_unlock_shared(inode);
3980 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3981 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3983 inode_unlock_shared(inode);
3994 struct inode *inode = file_inode(filp);
3995 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4015 inode_lock(inode);
4017 f2fs_down_write(&F2FS_I(inode)->i_sem);
4018 if (!f2fs_compressed_file(inode)) {
4023 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4028 if (F2FS_HAS_BLOCKS(inode)) {
4033 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4034 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4035 F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
4037 if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
4038 F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
4040 F2FS_I(inode)->i_compress_level = 0;
4044 F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
4045 f2fs_mark_inode_dirty_sync(inode, true);
4047 if (!f2fs_is_compress_backend_ready(inode))
4051 f2fs_up_write(&F2FS_I(inode)->i_sem);
4052 inode_unlock(inode);
4058 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4060 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
4061 struct address_space *mapping = inode->i_mapping;
4081 f2fs_bug_on(F2FS_I_SB(inode), !page);
4094 struct inode *inode = file_inode(filp);
4095 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4096 struct f2fs_inode_info *fi = F2FS_I(inode);
4109 if (!f2fs_compressed_file(inode))
4115 inode_lock(inode);
4117 if (!f2fs_is_compress_backend_ready(inode)) {
4122 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4127 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4134 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4138 ret = redirty_blocks(inode, page_idx, cluster_size);
4142 if (get_dirty_pages(inode) >= blk_per_seg) {
4143 ret = filemap_fdatawrite(inode->i_mapping);
4159 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4166 inode_unlock(inode);
4174 struct inode *inode = file_inode(filp);
4175 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4178 int cluster_size = F2FS_I(inode)->i_cluster_size;
4188 if (!f2fs_compressed_file(inode))
4194 inode_lock(inode);
4196 if (!f2fs_is_compress_backend_ready(inode)) {
4201 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4206 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4210 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4212 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4216 ret = redirty_blocks(inode, page_idx, cluster_size);
4220 if (get_dirty_pages(inode) >= blk_per_seg) {
4221 ret = filemap_fdatawrite(inode->i_mapping);
4237 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4240 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4246 inode_unlock(inode);
4359 static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
4367 if (f2fs_force_buffered_io(inode, iov_iter_rw(iter)))
4382 if (!IS_ALIGNED(align, i_blocksize(inode)) &&
4383 IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
4408 struct inode *inode = file_inode(file);
4409 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4410 struct f2fs_inode_info *fi = F2FS_I(inode);
4419 trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
4450 trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
4457 struct inode *inode = file_inode(file);
4460 buf = f2fs_getname(F2FS_I_SB(inode));
4467 trace_f2fs_datawrite_start(inode, pos, count,
4470 trace_f2fs_dataread_start(inode, pos, count,
4478 struct inode *inode = file_inode(iocb->ki_filp);
4482 if (!f2fs_is_compress_backend_ready(inode))
4489 if (f2fs_should_use_dio(inode, iocb, to)) {
4494 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4498 trace_f2fs_dataread_end(inode, pos, ret);
4506 struct inode *inode = file_inode(in);
4510 if (!f2fs_is_compress_backend_ready(inode))
4518 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4522 trace_f2fs_dataread_end(inode, pos, ret);
4529 struct inode *inode = file_inode(file);
4533 if (IS_IMMUTABLE(inode))
4536 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
4553 * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the
4559 struct inode *inode = file_inode(iocb->ki_filp);
4560 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4574 if (dio && i_size_read(inode) &&
4575 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
4586 if (f2fs_has_inline_data(inode)) {
4588 if (pos + count <= MAX_INLINE_DATA(inode))
4590 ret = f2fs_convert_inline_inode(inode);
4604 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4611 ret = f2fs_map_blocks(inode, &map, flag);
4616 set_inode_flag(inode, FI_PREALLOCATED_ALL);
4624 struct inode *inode = file_inode(file);
4633 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4673 struct inode *inode = file_inode(file);
4674 struct f2fs_inode_info *fi = F2FS_I(inode);
4675 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4683 trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
4687 if (f2fs_has_inline_data(inode) ||
4688 !f2fs_overwrite_io(inode, pos, count)) {
4703 ret = f2fs_convert_inline_inode(inode);
4719 if (pos + count > inode->i_size)
4739 if (pos + ret > inode->i_size)
4740 f2fs_i_size_write(inode, pos + ret);
4742 set_inode_flag(inode, FI_UPDATE_WRITE);
4755 f2fs_write_failed(inode, iocb->ki_pos);
4777 trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
4783 struct inode *inode = file_inode(iocb->ki_filp);
4792 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4797 if (!f2fs_is_compress_backend_ready(inode)) {
4803 if (!inode_trylock(inode)) {
4808 inode_lock(inode);
4816 dio = f2fs_should_use_dio(inode, iocb, from);
4834 trace_f2fs_datawrite_end(inode, orig_pos, ret);
4838 if (preallocated && i_size_read(inode) < target_size) {
4839 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4840 filemap_invalidate_lock(inode->i_mapping);
4841 if (!f2fs_truncate(inode))
4842 file_dont_truncate(inode);
4843 filemap_invalidate_unlock(inode->i_mapping);
4844 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4846 file_dont_truncate(inode);
4849 clear_inode_flag(inode, FI_PREALLOCATED_ALL);
4851 inode_unlock(inode);
4853 trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
4874 struct inode *inode = file_inode(filp);
4878 if (S_ISFIFO(inode->i_mode))
4887 F2FS_I_SB(inode)->seq_file_ra_mul;
4896 test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
4897 f2fs_compressed_file(inode))
4898 f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);