Lines Matching defs:inode
38 struct inode *inode = file_inode(vmf->vma->vm_file);
41 down_read(&F2FS_I(inode)->i_mmap_sem);
43 up_read(&F2FS_I(inode)->i_mmap_sem);
46 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
49 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
57 struct inode *inode = file_inode(vmf->vma->vm_file);
58 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
63 if (unlikely(IS_IMMUTABLE(inode)))
77 if (f2fs_compressed_file(inode)) {
78 int ret = f2fs_is_compressed_cluster(inode, page->index);
84 if (ret < F2FS_I(inode)->i_cluster_size) {
96 sb_start_pagefault(inode->i_sb);
98 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
101 down_read(&F2FS_I(inode)->i_mmap_sem);
103 if (unlikely(page->mapping != inode->i_mapping ||
104 page_offset(page) > i_size_read(inode) ||
114 set_new_dnode(&dn, inode, NULL, NULL, 0);
122 set_new_dnode(&dn, inode, NULL, NULL, 0);
135 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
145 i_size_read(inode)) {
148 offset = i_size_read(inode) & ~PAGE_MASK;
160 up_read(&F2FS_I(inode)->i_mmap_sem);
162 sb_end_pagefault(inode->i_sb);
173 static int get_parent_ino(struct inode *inode, nid_t *pino)
181 dentry = d_find_alias(inode);
190 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
192 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
195 if (!S_ISREG(inode->i_mode))
197 else if (f2fs_compressed_file(inode))
199 else if (inode->i_nlink != 1)
203 else if (file_wrong_pino(inode))
207 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
214 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
215 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
226 /* But we need to avoid that there are some inode updates */
233 static void try_to_fix_pino(struct inode *inode)
235 struct f2fs_inode_info *fi = F2FS_I(inode);
239 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
240 get_parent_ino(inode, &pino)) {
241 f2fs_i_pino_write(inode, pino);
242 file_got_pino(inode);
250 struct inode *inode = file->f_mapping->host;
251 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
252 nid_t ino = inode->i_ino;
262 if (unlikely(f2fs_readonly(inode->i_sb)))
265 trace_f2fs_sync_file_enter(inode);
267 if (S_ISDIR(inode->i_mode))
271 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
272 set_inode_flag(inode, FI_NEED_IPU);
274 clear_inode_flag(inode, FI_NEED_IPU);
277 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
281 /* if the inode is dirty, let's recover all the time */
282 if (!f2fs_skip_inode_update(inode, datasync)) {
283 f2fs_write_inode(inode, NULL);
290 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
297 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
307 down_read(&F2FS_I(inode)->i_sem);
308 cp_reason = need_do_checkpoint(inode);
309 up_read(&F2FS_I(inode)->i_sem);
313 ret = f2fs_sync_fs(inode->i_sb, 1);
319 try_to_fix_pino(inode);
320 clear_inode_flag(inode, FI_APPEND_WRITE);
321 clear_inode_flag(inode, FI_UPDATE_WRITE);
326 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
338 f2fs_mark_inode_dirty_sync(inode, true);
339 f2fs_write_inode(inode, NULL);
359 clear_inode_flag(inode, FI_APPEND_WRITE);
362 ret = f2fs_issue_flush(sbi, inode->i_ino);
365 clear_inode_flag(inode, FI_UPDATE_WRITE);
370 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
403 struct inode *inode = file->f_mapping->host;
404 loff_t maxbytes = inode->i_sb->s_maxbytes;
411 inode_lock(inode);
413 isize = i_size_read(inode);
418 if (f2fs_has_inline_data(inode)) {
431 set_new_dnode(&dn, inode, NULL, NULL, 0);
445 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
456 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
476 inode_unlock(inode);
479 inode_unlock(inode);
485 struct inode *inode = file->f_mapping->host;
486 loff_t maxbytes = inode->i_sb->s_maxbytes;
493 maxbytes, i_size_read(inode));
506 struct inode *inode = file_inode(file);
509 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
512 if (!f2fs_is_compress_backend_ready(inode))
516 err = f2fs_convert_inline_inode(inode);
522 set_inode_flag(inode, FI_MMAP_FILE);
526 static int f2fs_file_open(struct inode *inode, struct file *filp)
528 int err = fscrypt_file_open(inode, filp);
533 if (!f2fs_is_compress_backend_ready(inode))
536 err = fsverity_file_open(inode, filp);
542 return dquot_file_open(inode, filp);
547 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
554 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
555 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
557 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
558 base = get_extra_isize(dn->inode);
567 if (f2fs_compressed_file(dn->inode) &&
570 f2fs_i_compr_blocks_update(dn->inode,
591 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
600 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
609 dn->inode) + ofs;
611 dec_valid_block_count(sbi, dn->inode, nr_free);
616 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
622 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
625 static int truncate_partial_data_page(struct inode *inode, u64 from,
630 struct address_space *mapping = inode->i_mapping;
644 page = f2fs_get_lock_data_page(inode, index, true);
651 /* An encrypted inode should have a key and truncate the last page. */
652 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
659 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
661 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
668 trace_f2fs_truncate_blocks_enter(inode, from);
678 ipage = f2fs_get_node_page(sbi, inode->i_ino);
684 if (f2fs_has_inline_data(inode)) {
685 f2fs_truncate_inline_inode(inode, ipage, from);
691 set_new_dnode(&dn, inode, ipage, NULL, 0);
699 count = ADDRS_PER_PAGE(dn.node_page, inode);
711 err = f2fs_truncate_inode_blocks(inode, free_from);
718 err = truncate_partial_data_page(inode, from, truncate_page);
720 trace_f2fs_truncate_blocks_exit(inode, err);
724 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
734 if (f2fs_compressed_file(inode))
736 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
739 err = f2fs_do_truncate_blocks(inode, free_from, lock);
745 err = f2fs_truncate_partial_cluster(inode, from, lock);
754 int f2fs_truncate(struct inode *inode)
758 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
761 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
762 S_ISLNK(inode->i_mode)))
765 trace_f2fs_truncate(inode);
767 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
768 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
772 err = dquot_initialize(inode);
777 if (!f2fs_may_inline_data(inode)) {
778 err = f2fs_convert_inline_inode(inode);
783 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
787 inode->i_mtime = inode->i_ctime = current_time(inode);
788 f2fs_mark_inode_dirty_sync(inode, false);
795 struct inode *inode = d_inode(path->dentry);
796 struct f2fs_inode_info *fi = F2FS_I(inode);
800 if (f2fs_has_extra_attr(inode) &&
801 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
813 if (IS_ENCRYPTED(inode))
819 if (IS_VERITY(inode))
829 generic_fillattr(inode, stat);
832 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
833 f2fs_has_inline_dentry(inode))
840 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
845 inode->i_uid = attr->ia_uid;
847 inode->i_gid = attr->ia_gid;
849 inode->i_atime = attr->ia_atime;
851 inode->i_mtime = attr->ia_mtime;
853 inode->i_ctime = attr->ia_ctime;
857 if (!in_group_p(inode->i_gid) &&
858 !capable_wrt_inode_uidgid(inode, CAP_FSETID))
860 set_acl_inode(inode, mode);
869 struct inode *inode = d_inode(dentry);
872 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
875 if (unlikely(IS_IMMUTABLE(inode)))
878 if (unlikely(IS_APPEND(inode) &&
884 !f2fs_is_compress_backend_ready(inode))
899 if (is_quota_modification(inode, attr)) {
900 err = dquot_initialize(inode);
905 !uid_eq(attr->ia_uid, inode->i_uid)) ||
907 !gid_eq(attr->ia_gid, inode->i_gid))) {
908 f2fs_lock_op(F2FS_I_SB(inode));
909 err = dquot_transfer(inode, attr);
911 set_sbi_flag(F2FS_I_SB(inode),
913 f2fs_unlock_op(F2FS_I_SB(inode));
917 * update uid/gid under lock_op(), so that dquot and inode can
921 inode->i_uid = attr->ia_uid;
923 inode->i_gid = attr->ia_gid;
924 f2fs_mark_inode_dirty_sync(inode, true);
925 f2fs_unlock_op(F2FS_I_SB(inode));
929 loff_t old_size = i_size_read(inode);
931 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
933 * should convert inline inode before i_size_write to
936 err = f2fs_convert_inline_inode(inode);
941 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
942 down_write(&F2FS_I(inode)->i_mmap_sem);
944 truncate_setsize(inode, attr->ia_size);
947 err = f2fs_truncate(inode);
952 up_write(&F2FS_I(inode)->i_mmap_sem);
953 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
957 spin_lock(&F2FS_I(inode)->i_size_lock);
958 inode->i_mtime = inode->i_ctime = current_time(inode);
959 F2FS_I(inode)->last_disk_size = i_size_read(inode);
960 spin_unlock(&F2FS_I(inode)->i_size_lock);
963 __setattr_copy(inode, attr);
966 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
967 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
968 inode->i_mode = F2FS_I(inode)->i_acl_mode;
969 clear_inode_flag(inode, FI_ACL_MODE);
974 f2fs_mark_inode_dirty_sync(inode, true);
976 /* inode change will produce dirty node pages flushed by checkpoint */
977 f2fs_balance_fs(F2FS_I_SB(inode), true);
991 static int fill_zero(struct inode *inode, pgoff_t index,
994 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1003 page = f2fs_get_new_data_page(inode, NULL, index, false);
1016 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1024 set_new_dnode(&dn, inode, NULL, NULL, 0);
1035 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1038 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1048 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1054 ret = f2fs_convert_inline_inode(inode);
1065 ret = fill_zero(inode, pg_start, off_start,
1071 ret = fill_zero(inode, pg_start++, off_start,
1077 ret = fill_zero(inode, pg_end, 0, off_end);
1084 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1091 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1092 down_write(&F2FS_I(inode)->i_mmap_sem);
1094 truncate_pagecache_range(inode, blk_start, blk_end - 1);
1097 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1100 up_write(&F2FS_I(inode)->i_mmap_sem);
1101 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1108 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1111 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1116 set_new_dnode(&dn, inode, NULL, NULL, 0);
1123 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1130 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1163 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1166 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1174 set_new_dnode(&dn, inode, NULL, NULL, 0);
1177 dec_valid_block_count(sbi, inode, 1);
1187 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1271 static int __exchange_data_block(struct inode *src_inode,
1272 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1323 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1325 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1326 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1334 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1335 down_write(&F2FS_I(inode)->i_mmap_sem);
1338 f2fs_drop_extent_tree(inode);
1339 truncate_pagecache(inode, offset);
1340 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1343 up_write(&F2FS_I(inode)->i_mmap_sem);
1344 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1348 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1353 if (offset + len >= i_size_read(inode))
1360 ret = f2fs_convert_inline_inode(inode);
1365 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1369 ret = f2fs_do_collapse(inode, offset, len);
1374 down_write(&F2FS_I(inode)->i_mmap_sem);
1375 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1376 truncate_pagecache(inode, offset);
1378 new_size = i_size_read(inode) - len;
1379 ret = f2fs_truncate_blocks(inode, new_size, true);
1380 up_write(&F2FS_I(inode)->i_mmap_sem);
1382 f2fs_i_size_write(inode, new_size);
1389 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1436 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1439 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1440 struct address_space *mapping = inode->i_mapping;
1442 loff_t new_size = i_size_read(inode);
1446 ret = inode_newsize_ok(inode, (len + offset));
1450 ret = f2fs_convert_inline_inode(inode);
1465 ret = fill_zero(inode, pg_start, off_start,
1473 ret = fill_zero(inode, pg_start++, off_start,
1487 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1488 down_write(&F2FS_I(inode)->i_mmap_sem);
1490 truncate_pagecache_range(inode,
1496 set_new_dnode(&dn, inode, NULL, NULL, 0);
1500 up_write(&F2FS_I(inode)->i_mmap_sem);
1501 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1505 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1512 up_write(&F2FS_I(inode)->i_mmap_sem);
1513 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1526 ret = fill_zero(inode, pg_end, 0, off_end);
1535 if (new_size > i_size_read(inode)) {
1537 file_set_keep_isize(inode);
1539 f2fs_i_size_write(inode, new_size);
1544 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1546 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1551 new_size = i_size_read(inode) + len;
1552 ret = inode_newsize_ok(inode, new_size);
1556 if (offset >= i_size_read(inode))
1563 ret = f2fs_convert_inline_inode(inode);
1569 down_write(&F2FS_I(inode)->i_mmap_sem);
1570 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1571 up_write(&F2FS_I(inode)->i_mmap_sem);
1576 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1583 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1586 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1587 down_write(&F2FS_I(inode)->i_mmap_sem);
1588 truncate_pagecache(inode, offset);
1597 f2fs_drop_extent_tree(inode);
1599 ret = __exchange_data_block(inode, inode, idx,
1603 up_write(&F2FS_I(inode)->i_mmap_sem);
1604 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1607 down_write(&F2FS_I(inode)->i_mmap_sem);
1608 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1609 truncate_pagecache(inode, offset);
1610 up_write(&F2FS_I(inode)->i_mmap_sem);
1613 f2fs_i_size_write(inode, new_size);
1617 static int expand_inode_data(struct inode *inode, loff_t offset,
1620 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1625 loff_t new_size = i_size_read(inode);
1630 err = inode_newsize_ok(inode, (len + offset));
1634 err = f2fs_convert_inline_inode(inode);
1652 if (f2fs_is_pinned_file(inode)) {
1673 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1685 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1704 if (new_size > i_size_read(inode)) {
1706 file_set_keep_isize(inode);
1708 f2fs_i_size_write(inode, new_size);
1717 struct inode *inode = file_inode(file);
1720 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1722 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1724 if (!f2fs_is_compress_backend_ready(inode))
1728 if (!S_ISREG(inode->i_mode))
1731 if (IS_ENCRYPTED(inode) &&
1735 if (f2fs_compressed_file(inode) &&
1745 inode_lock(inode);
1752 if (offset >= inode->i_size)
1755 ret = punch_hole(inode, offset, len);
1757 ret = f2fs_collapse_range(inode, offset, len);
1759 ret = f2fs_zero_range(inode, offset, len, mode);
1761 ret = f2fs_insert_range(inode, offset, len);
1763 ret = expand_inode_data(inode, offset, len, mode);
1767 inode->i_mtime = inode->i_ctime = current_time(inode);
1768 f2fs_mark_inode_dirty_sync(inode, false);
1769 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1773 inode_unlock(inode);
1775 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1779 static int f2fs_release_file(struct inode *inode, struct file *filp)
1786 atomic_read(&inode->i_writecount) != 1)
1790 if (f2fs_is_atomic_file(inode))
1791 f2fs_drop_inmem_pages(inode);
1792 if (f2fs_is_volatile_file(inode)) {
1793 set_inode_flag(inode, FI_DROP_CACHE);
1794 filemap_fdatawrite(inode->i_mapping);
1795 clear_inode_flag(inode, FI_DROP_CACHE);
1796 clear_inode_flag(inode, FI_VOLATILE_FILE);
1797 stat_dec_volatile_write(inode);
1804 struct inode *inode = file_inode(file);
1812 if (f2fs_is_atomic_file(inode) &&
1813 F2FS_I(inode)->inmem_task == current)
1814 f2fs_drop_inmem_pages(inode);
1818 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1820 struct f2fs_inode_info *fi = F2FS_I(inode);
1823 f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1826 if (IS_NOQUOTA(inode))
1830 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1832 if (!f2fs_empty_dir(inode))
1837 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1845 if (!f2fs_disable_compressed_file(inode))
1848 if (!f2fs_may_compress(inode))
1850 if (S_ISREG(inode->i_mode) && inode->i_size)
1853 set_compress_context(inode);
1858 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1862 set_inode_flag(inode, FI_PROJ_INHERIT);
1864 clear_inode_flag(inode, FI_PROJ_INHERIT);
1866 inode->i_ctime = current_time(inode);
1867 f2fs_set_inode_flags(inode);
1868 f2fs_mark_inode_dirty_sync(inode, true);
1955 struct inode *inode = file_inode(filp);
1956 struct f2fs_inode_info *fi = F2FS_I(inode);
1959 if (IS_ENCRYPTED(inode))
1961 if (IS_VERITY(inode))
1963 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1965 if (is_inode_flag_set(inode, FI_PIN_FILE))
1975 struct inode *inode = file_inode(filp);
1976 struct f2fs_inode_info *fi = F2FS_I(inode);
1981 if (!inode_owner_or_capable(inode))
1992 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1999 inode_lock(inode);
2002 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2006 ret = f2fs_setflags_common(inode, iflags,
2009 inode_unlock(inode);
2016 struct inode *inode = file_inode(filp);
2018 return put_user(inode->i_generation, (int __user *)arg);
2023 struct inode *inode = file_inode(filp);
2024 struct f2fs_inode_info *fi = F2FS_I(inode);
2025 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2028 if (!inode_owner_or_capable(inode))
2031 if (!S_ISREG(inode->i_mode))
2041 inode_lock(inode);
2043 if (!f2fs_disable_compressed_file(inode)) {
2048 if (f2fs_is_atomic_file(inode)) {
2049 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2054 ret = f2fs_convert_inline_inode(inode);
2058 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2064 if (get_dirty_pages(inode))
2065 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2066 inode->i_ino, get_dirty_pages(inode));
2067 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2069 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2079 /* add inode in inmem_list first and set atomic_file */
2080 set_inode_flag(inode, FI_ATOMIC_FILE);
2081 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2082 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2084 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2085 F2FS_I(inode)->inmem_task = current;
2086 stat_update_max_atomic_write(inode);
2088 inode_unlock(inode);
2095 struct inode *inode = file_inode(filp);
2098 if (!inode_owner_or_capable(inode))
2105 f2fs_balance_fs(F2FS_I_SB(inode), true);
2107 inode_lock(inode);
2109 if (f2fs_is_volatile_file(inode)) {
2114 if (f2fs_is_atomic_file(inode)) {
2115 ret = f2fs_commit_inmem_pages(inode);
2121 f2fs_drop_inmem_pages(inode);
2126 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2127 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2130 inode_unlock(inode);
2137 struct inode *inode = file_inode(filp);
2140 if (!inode_owner_or_capable(inode))
2143 if (!S_ISREG(inode->i_mode))
2150 inode_lock(inode);
2152 if (f2fs_is_volatile_file(inode))
2155 ret = f2fs_convert_inline_inode(inode);
2159 stat_inc_volatile_write(inode);
2160 stat_update_max_volatile_write(inode);
2162 set_inode_flag(inode, FI_VOLATILE_FILE);
2163 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2165 inode_unlock(inode);
2172 struct inode *inode = file_inode(filp);
2175 if (!inode_owner_or_capable(inode))
2182 inode_lock(inode);
2184 if (!f2fs_is_volatile_file(inode))
2187 if (!f2fs_is_first_block_written(inode)) {
2188 ret = truncate_partial_data_page(inode, 0, true);
2192 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2194 inode_unlock(inode);
2201 struct inode *inode = file_inode(filp);
2204 if (!inode_owner_or_capable(inode))
2211 inode_lock(inode);
2213 if (f2fs_is_atomic_file(inode))
2214 f2fs_drop_inmem_pages(inode);
2215 if (f2fs_is_volatile_file(inode)) {
2216 clear_inode_flag(inode, FI_VOLATILE_FILE);
2217 stat_dec_volatile_write(inode);
2221 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2223 inode_unlock(inode);
2226 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2232 struct inode *inode = file_inode(filp);
2233 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2317 struct inode *inode = file_inode(filp);
2318 struct super_block *sb = inode->i_sb;
2347 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2363 struct inode *inode = file_inode(filp);
2365 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2368 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2382 struct inode *inode = file_inode(filp);
2383 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2470 struct inode *inode = file_inode(filp);
2471 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2560 struct inode *inode = file_inode(filp);
2561 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2589 struct inode *inode = file_inode(filp);
2602 if (f2fs_should_update_inplace(inode, NULL))
2610 inode_lock(inode);
2613 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2622 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2637 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2684 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2693 set_inode_flag(inode, FI_DO_DEFRAG);
2699 page = f2fs_get_lock_data_page(inode, idx, true);
2718 clear_inode_flag(inode, FI_DO_DEFRAG);
2720 err = filemap_fdatawrite(inode->i_mapping);
2725 clear_inode_flag(inode, FI_DO_DEFRAG);
2727 inode_unlock(inode);
2735 struct inode *inode = file_inode(filp);
2736 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2743 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2782 struct inode *src = file_inode(file_in);
2783 struct inode *dst = file_inode(file_out);
2943 struct inode *inode = file_inode(filp);
2944 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3006 struct inode *inode = file_inode(filp);
3007 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3016 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3019 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3027 err = __dquot_transfer(inode, transfer_to);
3036 struct inode *inode = file_inode(filp);
3037 struct f2fs_inode_info *fi = F2FS_I(inode);
3038 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3050 if (!f2fs_has_extra_attr(inode))
3055 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3060 if (IS_NOQUOTA(inode))
3063 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3075 err = dquot_initialize(inode);
3080 err = f2fs_transfer_project_quota(inode, kprojid);
3084 F2FS_I(inode)->i_projid = kprojid;
3085 inode->i_ctime = current_time(inode);
3086 f2fs_mark_inode_dirty_sync(inode, true);
3092 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3159 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3161 struct f2fs_inode_info *fi = F2FS_I(inode);
3165 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3171 struct inode *inode = file_inode(filp);
3174 f2fs_fill_fsxattr(inode, &fa);
3183 struct inode *inode = file_inode(filp);
3192 if (!inode_owner_or_capable(inode))
3199 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3206 inode_lock(inode);
3208 f2fs_fill_fsxattr(inode, &old_fa);
3209 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3213 err = f2fs_setflags_common(inode, iflags,
3220 inode_unlock(inode);
3225 int f2fs_pin_file_control(struct inode *inode, bool inc)
3227 struct f2fs_inode_info *fi = F2FS_I(inode);
3228 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3232 f2fs_i_gc_failures_write(inode,
3237 __func__, inode->i_ino,
3239 clear_inode_flag(inode, FI_PIN_FILE);
3247 struct inode *inode = file_inode(filp);
3254 if (!S_ISREG(inode->i_mode))
3257 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3264 inode_lock(inode);
3266 if (f2fs_should_update_outplace(inode, NULL)) {
3272 clear_inode_flag(inode, FI_PIN_FILE);
3273 f2fs_i_gc_failures_write(inode, 0);
3277 if (f2fs_pin_file_control(inode, false)) {
3282 ret = f2fs_convert_inline_inode(inode);
3286 if (!f2fs_disable_compressed_file(inode)) {
3291 set_inode_flag(inode, FI_PIN_FILE);
3292 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3294 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3296 inode_unlock(inode);
3303 struct inode *inode = file_inode(filp);
3306 if (is_inode_flag_set(inode, FI_PIN_FILE))
3307 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3311 int f2fs_precache_extents(struct inode *inode)
3313 struct f2fs_inode_info *fi = F2FS_I(inode);
3319 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3328 end = F2FS_I_SB(inode)->max_file_blocks;
3334 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3370 struct inode *inode = file_inode(filp);
3372 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3374 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3375 f2fs_warn(F2FS_I_SB(inode),
3376 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3377 inode->i_ino);
3413 struct inode *inode = file_inode(filp);
3414 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3439 struct inode *inode = file_inode(filp);
3440 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3475 struct inode *inode = file_inode(filp);
3478 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3481 if (!f2fs_compressed_file(inode))
3484 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3490 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3492 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3497 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3530 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3531 dec_valid_block_count(sbi, dn->inode,
3544 struct inode *inode = file_inode(filp);
3545 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3551 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3554 if (!f2fs_compressed_file(inode))
3564 f2fs_balance_fs(F2FS_I_SB(inode), true);
3566 inode_lock(inode);
3568 writecount = atomic_read(&inode->i_writecount);
3575 if (IS_IMMUTABLE(inode)) {
3580 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3584 F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3585 f2fs_set_inode_flags(inode);
3586 inode->i_ctime = current_time(inode);
3587 f2fs_mark_inode_dirty_sync(inode, true);
3589 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3592 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3593 down_write(&F2FS_I(inode)->i_mmap_sem);
3595 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3603 set_new_dnode(&dn, inode, NULL, NULL, 0);
3616 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3618 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3633 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3634 up_write(&F2FS_I(inode)->i_mmap_sem);
3636 inode_unlock(inode);
3643 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3648 __func__, inode->i_ino, inode->i_blocks,
3650 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3659 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3660 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3665 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3700 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3707 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3719 struct inode *inode = file_inode(filp);
3720 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3725 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3728 if (!f2fs_compressed_file(inode))
3738 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3741 f2fs_balance_fs(F2FS_I_SB(inode), true);
3743 inode_lock(inode);
3745 if (!IS_IMMUTABLE(inode)) {
3750 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3751 down_write(&F2FS_I(inode)->i_mmap_sem);
3753 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3761 set_new_dnode(&dn, inode, NULL, NULL, 0);
3774 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3776 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3790 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3791 up_write(&F2FS_I(inode)->i_mmap_sem);
3794 F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3795 f2fs_set_inode_flags(inode);
3796 inode->i_ctime = current_time(inode);
3797 f2fs_mark_inode_dirty_sync(inode, true);
3800 inode_unlock(inode);
3807 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3812 __func__, inode->i_ino, inode->i_blocks,
3814 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3820 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3837 if (IS_ENCRYPTED(inode))
3838 ret = fscrypt_zeroout_range(inode, off, block, len);
3849 struct inode *inode = file_inode(filp);
3850 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3851 struct address_space *mapping = inode->i_mapping;
3868 !S_ISREG(inode->i_mode))
3874 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3878 inode_lock(inode);
3880 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3881 range.start >= inode->i_size) {
3889 if (inode->i_size - range.start > range.len) {
3893 sbi->sb->s_maxbytes : inode->i_size;
3906 ret = f2fs_convert_inline_inode(inode);
3910 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3911 down_write(&F2FS_I(inode)->i_mmap_sem);
3926 set_new_dnode(&dn, inode, NULL, NULL, 0);
3936 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3966 inode, prev_index, prev_block,
3995 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3998 up_write(&F2FS_I(inode)->i_mmap_sem);
3999 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4001 inode_unlock(inode);
4110 struct inode *inode = file_inode(file);
4113 if (!f2fs_is_compress_backend_ready(inode))
4119 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4127 struct inode *inode = file_inode(file);
4130 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4135 if (!f2fs_is_compress_backend_ready(inode)) {
4141 if (!inode_trylock(inode)) {
4146 inode_lock(inode);
4149 if (unlikely(IS_IMMUTABLE(inode))) {
4161 set_inode_flag(inode, FI_NO_PREALLOC);
4164 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4166 f2fs_has_inline_data(inode) ||
4167 f2fs_force_buffered_io(inode, iocb, from)) {
4168 clear_inode_flag(inode, FI_NO_PREALLOC);
4169 inode_unlock(inode);
4176 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4184 err = f2fs_convert_inline_inode(inode);
4192 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4193 allow_outplace_dio(inode, iocb, from))
4202 clear_inode_flag(inode, FI_NO_PREALLOC);
4203 inode_unlock(inode);
4209 clear_inode_flag(inode, FI_NO_PREALLOC);
4212 if (preallocated && i_size_read(inode) < target_size)
4213 f2fs_truncate(inode);
4216 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4219 inode_unlock(inode);
4221 trace_f2fs_file_write_iter(inode, iocb->ki_pos,