Lines Matching defs:inode
54 struct inode *inode;
60 inode = mapping->host;
61 sbi = F2FS_I_SB(inode);
63 if (inode->i_ino == F2FS_META_INO(sbi) ||
64 inode->i_ino == F2FS_NODE_INO(sbi) ||
65 S_ISDIR(inode->i_mode))
68 if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
79 struct inode *inode = mapping->host;
80 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
82 if (inode->i_ino == F2FS_META_INO(sbi))
85 if (inode->i_ino == F2FS_NODE_INO(sbi))
488 static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
498 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
501 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
512 return fscrypt_mergeable_bio(bio, inode, next_idx);
592 static bool __has_merged_page(struct bio *bio, struct inode *inode,
601 if (!inode && !page && !ino)
618 if (inode && inode == target->mapping->host)
686 struct inode *inode, struct page *page,
698 ret = __has_merged_page(io->bio, inode, page, ino);
716 struct inode *inode, struct page *page,
719 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
1105 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
1109 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1122 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
1125 if (fscrypt_inode_uses_fs_layer_crypto(inode))
1128 if (f2fs_need_verity(inode, first_idx))
1138 if (post_read_steps || f2fs_compressed_file(inode)) {
1154 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1158 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1161 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1167 f2fs_wait_on_block_writeback(inode, blkaddr);
1184 __le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
1213 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1219 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1221 err = inc_valid_block_count(sbi, dn->inode, &count, true);
1225 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1271 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1275 struct address_space *mapping = inode->i_mapping;
1284 if (f2fs_lookup_read_extent_cache_block(inode, index,
1286 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1289 f2fs_handle_error(F2FS_I_SB(inode),
1296 set_new_dnode(&dn, inode, NULL, NULL, 0);
1312 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1316 f2fs_handle_error(F2FS_I_SB(inode),
1328 * new inode page couldn't be allocated due to -ENOSPC.
1341 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1352 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
1355 struct address_space *mapping = inode->i_mapping;
1363 page = f2fs_get_read_data_page(inode, index, 0, false, next_pgofs);
1383 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1386 struct address_space *mapping = inode->i_mapping;
1389 page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
1411 struct page *f2fs_get_new_data_page(struct inode *inode,
1414 struct address_space *mapping = inode->i_mapping;
1429 set_new_dnode(&dn, inode, ipage, NULL, 0);
1449 f2fs_bug_on(F2FS_I_SB(inode), ipage);
1450 page = f2fs_get_lock_data_page(inode, index, true);
1455 if (new_i_size && i_size_read(inode) <
1457 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1463 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1470 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1479 err = inc_valid_block_count(sbi, dn->inode, &count, true);
1513 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1517 if (!f2fs_lookup_read_extent_cache_block(dn->inode, index,
1525 static int f2fs_map_no_dnode(struct inode *inode,
1529 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1547 static bool f2fs_map_blocks_cached(struct inode *inode,
1550 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1555 if (!f2fs_lookup_read_extent_cache(inode, pgoff, &ei))
1566 f2fs_wait_on_block_writeback_range(inode,
1577 map->m_bdev = inode->i_sb->s_bdev;
1587 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
1591 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1605 if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag))
1608 map->m_bdev = inode->i_sb->s_bdev;
1610 f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
1624 set_new_dnode(&dn, inode, NULL, NULL, 0);
1630 err = f2fs_map_no_dnode(inode, map, &dn, pgofs);
1637 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1670 file_need_truncate(inode);
1671 set_inode_flag(inode, FI_APPEND_WRITE);
1683 if (f2fs_compressed_file(inode) &&
1794 f2fs_wait_on_block_writeback_range(inode,
1806 f2fs_update_device_state(sbi, inode->i_ino,
1832 trace_f2fs_map_blocks(inode, map, flag, err);
1836 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1842 if (pos + len > i_size_read(inode))
1854 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
1862 static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
1864 return (bytes >> inode->i_blkbits);
1867 static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1869 return (blks << inode->i_blkbits);
1872 static int f2fs_xattr_fiemap(struct inode *inode,
1875 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1880 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1883 if (f2fs_has_inline_xattr(inode)) {
1887 inode->i_ino, false);
1891 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
1897 phys = blks_to_bytes(inode, ni.blk_addr);
1900 get_inline_xattr_addrs(inode));
1903 len = inline_xattr_size(inode);
1913 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1929 phys = blks_to_bytes(inode, ni.blk_addr);
1930 len = inode->i_sb->s_blocksize;
1939 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1945 static loff_t max_inode_blocks(struct inode *inode)
1947 loff_t result = ADDRS_PER_INODE(inode);
1948 loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1964 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1974 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1979 ret = f2fs_precache_extents(inode);
1984 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1988 inode_lock(inode);
1990 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
2000 ret = f2fs_xattr_fiemap(inode, fieinfo);
2004 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
2005 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
2010 if (bytes_to_blks(inode, len) == 0)
2011 len = blks_to_bytes(inode, 1);
2013 start_blk = bytes_to_blks(inode, start);
2014 last_blk = bytes_to_blks(inode, start + len - 1);
2019 map.m_len = bytes_to_blks(inode, len);
2028 ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
2036 if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
2037 max_inode_blocks(inode)))
2053 if (IS_ENCRYPTED(inode))
2058 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
2074 size += blks_to_bytes(inode, appended_blks);
2078 logical = blks_to_bytes(inode, start_blk);
2080 blks_to_bytes(inode, map.m_pblk) : 0;
2081 size = blks_to_bytes(inode, map.m_len);
2089 size += blks_to_bytes(inode, 1);
2095 start_blk += bytes_to_blks(inode, size);
2108 inode_unlock(inode);
2112 static inline loff_t f2fs_readpage_limit(struct inode *inode)
2114 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2115 return inode->i_sb->s_maxbytes;
2117 return i_size_read(inode);
2120 static int f2fs_read_single_page(struct inode *inode, struct page *page,
2128 const unsigned blocksize = blks_to_bytes(inode, 1);
2137 last_block_in_file = bytes_to_blks(inode,
2138 f2fs_readpage_limit(inode) + blocksize - 1);
2160 ret = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_DEFAULT);
2168 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2171 f2fs_handle_error(F2FS_I_SB(inode),
2178 if (f2fs_need_verity(inode, page->index) &&
2193 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2195 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2197 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2201 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2215 f2fs_wait_on_block_writeback(inode, block_nr);
2220 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2221 f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
2235 struct inode *inode = cc->inode;
2236 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2240 const unsigned blocksize = blks_to_bytes(inode, 1);
2249 last_block_in_file = bytes_to_blks(inode,
2250 f2fs_readpage_limit(inode) + blocksize - 1);
2276 if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei))
2282 set_new_dnode(&dn, inode, NULL, NULL, 0);
2297 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2331 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2335 f2fs_wait_on_block_writeback(inode, blkaddr);
2347 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2354 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2374 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
2403 static int f2fs_mpage_readpages(struct inode *inode,
2411 .inode = inode,
2412 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2413 .cluster_size = F2FS_I(inode)->i_cluster_size,
2442 if (f2fs_compressed_file(inode)) {
2459 ret = f2fs_is_compressed_cluster(inode, page->index);
2481 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2497 if (f2fs_compressed_file(inode)) {
2510 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2517 struct inode *inode = page_file_mapping(page)->host;
2522 if (!f2fs_is_compress_backend_ready(inode)) {
2528 if (f2fs_has_inline_data(inode))
2529 ret = f2fs_read_inline_data(inode, page);
2531 ret = f2fs_mpage_readpages(inode, NULL, page);
2537 struct inode *inode = rac->mapping->host;
2539 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2541 if (!f2fs_is_compress_backend_ready(inode))
2545 if (f2fs_has_inline_data(inode))
2548 f2fs_mpage_readpages(inode, rac, NULL);
2553 struct inode *inode = fio->page->mapping->host;
2557 if (!f2fs_encrypted_file(inode))
2562 if (fscrypt_inode_uses_inline_crypto(inode))
2589 static inline bool check_inplace_update_policy(struct inode *inode,
2592 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2595 is_inode_flag_set(inode, FI_OPU_WRITE))
2611 !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode))
2615 if (IS_F2FS_IPU_FSYNC(sbi) && is_inode_flag_set(inode, FI_NEED_IPU))
2625 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2628 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2631 if (f2fs_is_pinned_file(inode))
2635 if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE))
2638 return check_inplace_update_policy(inode, fio);
2641 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2643 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2646 if (f2fs_is_pinned_file(inode))
2652 if (S_ISDIR(inode->i_mode))
2654 if (IS_NOQUOTA(inode))
2656 if (f2fs_is_atomic_file(inode))
2660 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2663 if (is_inode_flag_set(inode, FI_OPU_WRITE))
2680 struct inode *inode = fio->page->mapping->host;
2682 if (f2fs_should_update_outplace(inode, fio))
2685 return f2fs_should_update_inplace(inode, fio);
2691 struct inode *inode = page->mapping->host;
2697 /* Use COW inode to make dnode_of_data for atomic write */
2698 if (f2fs_is_atomic_file(inode))
2699 set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
2701 set_new_dnode(&dn, inode, NULL, NULL, 0);
2704 f2fs_lookup_read_extent_cache_block(inode, page->index,
2745 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2764 if (fscrypt_inode_uses_fs_layer_crypto(inode))
2769 set_inode_flag(inode, FI_UPDATE_WRITE);
2796 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2801 set_inode_flag(inode, FI_APPEND_WRITE);
2818 struct inode *inode = page->mapping->host;
2819 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2820 loff_t i_size = i_size_read(inode);
2826 bool quota_inode = IS_NOQUOTA(inode);
2830 .ino = inode->i_ino,
2840 .post_read = f2fs_post_read_required(inode) ? 1 : 0,
2856 if (S_ISDIR(inode->i_mode) &&
2870 f2fs_verity_in_progress(inode) ||
2885 if (S_ISDIR(inode->i_mode) || quota_inode) {
2908 set_inode_flag(inode, FI_HOT_DATA);
2911 if (f2fs_has_inline_data(inode)) {
2912 err = f2fs_write_inline_data(inode, page);
2927 file_set_keep_isize(inode);
2929 spin_lock(&F2FS_I(inode)->i_size_lock);
2930 if (F2FS_I(inode)->last_disk_size < psize)
2931 F2FS_I(inode)->last_disk_size = psize;
2932 spin_unlock(&F2FS_I(inode)->i_size_lock);
2940 inode_dec_dirty_pages(inode);
2948 clear_inode_flag(inode, FI_HOT_DATA);
2949 f2fs_remove_dirty_inode(inode);
2953 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2954 !F2FS_I(inode)->wb_task && allow_balance)
2987 struct inode *inode = page->mapping->host;
2989 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2992 if (f2fs_compressed_file(inode)) {
2993 if (f2fs_is_compressed_cluster(inode, page->index)) {
3023 struct inode *inode = mapping->host;
3025 .inode = inode,
3026 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
3027 .cluster_size = F2FS_I(inode)->i_cluster_size,
3035 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
3052 if (f2fs_compressed_file(inode) &&
3123 if (f2fs_compressed_file(inode)) {
3154 inode, &pagep,
3161 (!f2fs_compress_write_end(inode,
3205 if (f2fs_compressed_file(inode)) {
3258 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3267 if (f2fs_compressed_file(inode))
3295 static inline bool __should_serialize_io(struct inode *inode,
3299 if (F2FS_I(inode)->wb_task)
3302 if (!S_ISREG(inode->i_mode))
3304 if (IS_NOQUOTA(inode))
3307 if (f2fs_need_compress_data(inode))
3311 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3320 struct inode *inode = mapping->host;
3321 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3330 /* skip writing if there is no dirty page in this inode */
3331 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3338 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3340 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3345 if (is_inode_flag_set(inode, FI_SKIP_WRITES))
3360 if (__should_serialize_io(inode, wbc)) {
3379 f2fs_remove_dirty_inode(inode);
3383 wbc->pages_skipped += get_dirty_pages(inode);
3391 struct inode *inode = mapping->host;
3394 F2FS_I(inode)->cp_task == current ?
3398 void f2fs_write_failed(struct inode *inode, loff_t to)
3400 loff_t i_size = i_size_read(inode);
3402 if (IS_NOQUOTA(inode))
3406 if (to > i_size && !f2fs_verity_in_progress(inode)) {
3407 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3408 filemap_invalidate_lock(inode->i_mapping);
3410 truncate_pagecache(inode, i_size);
3411 f2fs_truncate_blocks(inode, i_size, true);
3413 filemap_invalidate_unlock(inode->i_mapping);
3414 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3422 struct inode *inode = page->mapping->host;
3434 if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL))
3438 if (f2fs_has_inline_data(inode)) {
3439 if (pos + len > MAX_INLINE_DATA(inode))
3443 } else if ((pos & PAGE_MASK) >= i_size_read(inode)) {
3450 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3456 set_new_dnode(&dn, inode, ipage, ipage, 0);
3458 if (f2fs_has_inline_data(inode)) {
3459 if (pos + len <= MAX_INLINE_DATA(inode)) {
3461 set_inode_flag(inode, FI_DATA_EXIST);
3462 if (inode->i_nlink)
3471 if (!f2fs_lookup_read_extent_cache_block(inode, index,
3501 static int __find_data_block(struct inode *inode, pgoff_t index,
3508 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
3512 set_new_dnode(&dn, inode, ipage, ipage, 0);
3514 if (!f2fs_lookup_read_extent_cache_block(inode, index,
3528 static int __reserve_data_block(struct inode *inode, pgoff_t index,
3531 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3538 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3543 set_new_dnode(&dn, inode, ipage, ipage, 0);
3545 if (!f2fs_lookup_read_extent_cache_block(dn.inode, index,
3562 struct inode *inode = page->mapping->host;
3563 struct inode *cow_inode = F2FS_I(inode)->cow_inode;
3568 /* If pos is beyond the end of file, reserve a new block in COW inode */
3569 if ((pos & PAGE_MASK) >= i_size_read(inode))
3572 /* Look for the block in COW inode first */
3581 if (is_inode_flag_set(inode, FI_ATOMIC_REPLACE))
3584 /* Look for the block in the original inode */
3585 err = __find_data_block(inode, index, &ori_blk_addr);
3590 /* Finally, we should reserve a new block in COW inode for the update */
3594 inc_atomic_write_cnt(inode);
3604 struct inode *inode = mapping->host;
3605 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3613 trace_f2fs_write_begin(inode, pos, len);
3621 * We should check this at this moment to avoid deadlock on inode page
3626 err = f2fs_convert_inline_inode(inode);
3632 if (f2fs_compressed_file(inode)) {
3637 if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
3640 ret = f2fs_prepare_compress_overwrite(inode, pagep,
3667 if (f2fs_is_atomic_file(inode))
3676 if (need_balance && !IS_NOQUOTA(inode) &&
3693 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3694 !f2fs_verity_in_progress(inode)) {
3710 F2FS_I(inode)->cow_inode : inode, page,
3729 f2fs_write_failed(inode, pos + len);
3738 struct inode *inode = page->mapping->host;
3740 trace_f2fs_write_end(inode, pos, len, copied);
3756 if (f2fs_compressed_file(inode) && fsdata) {
3757 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3758 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3760 if (pos + copied > i_size_read(inode) &&
3761 !f2fs_verity_in_progress(inode))
3762 f2fs_i_size_write(inode, pos + copied);
3772 if (pos + copied > i_size_read(inode) &&
3773 !f2fs_verity_in_progress(inode)) {
3774 f2fs_i_size_write(inode, pos + copied);
3775 if (f2fs_is_atomic_file(inode))
3776 f2fs_i_size_write(F2FS_I(inode)->cow_inode,
3781 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3787 struct inode *inode = folio->mapping->host;
3788 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3790 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3795 if (inode->i_ino == F2FS_META_INO(sbi)) {
3797 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3800 inode_dec_dirty_pages(inode);
3801 f2fs_remove_dirty_inode(inode);
3820 struct inode *inode = mapping->host;
3829 f2fs_update_dirty_folio(inode, folio);
3836 static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3843 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3845 set_new_dnode(&dn, inode, NULL, NULL, 0);
3867 struct inode *inode = mapping->host;
3870 if (f2fs_has_inline_data(inode))
3878 if (unlikely(block >= max_file_blocks(inode)))
3881 if (f2fs_compressed_file(inode)) {
3882 blknr = f2fs_bmap_compress(inode, block);
3892 if (!f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_BMAP))
3896 trace_f2fs_bmap(inode, block, blknr);
3901 static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
3904 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3911 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3912 filemap_invalidate_lock(inode->i_mapping);
3914 set_inode_flag(inode, FI_ALIGNED_WRITE);
3915 set_inode_flag(inode, FI_OPU_WRITE);
3924 set_inode_flag(inode, FI_SKIP_WRITES);
3930 page = f2fs_get_lock_data_page(inode, blkidx, true);
3941 clear_inode_flag(inode, FI_SKIP_WRITES);
3943 ret = filemap_fdatawrite(inode->i_mapping);
3952 clear_inode_flag(inode, FI_SKIP_WRITES);
3953 clear_inode_flag(inode, FI_OPU_WRITE);
3954 clear_inode_flag(inode, FI_ALIGNED_WRITE);
3956 filemap_invalidate_unlock(inode->i_mapping);
3957 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3966 struct inode *inode = mapping->host;
3967 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3985 last_lblock = bytes_to_blks(inode, i_size_read(inode));
4000 ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
4029 ret = f2fs_migrate_blocks(inode, cur_lblock,
4072 struct inode *inode = file_inode(file);
4075 if (!S_ISREG(inode->i_mode))
4078 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
4081 if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
4082 f2fs_err(F2FS_I_SB(inode),
4087 ret = f2fs_convert_inline_inode(inode);
4091 if (!f2fs_disable_compressed_file(inode))
4094 f2fs_precache_extents(inode);
4100 stat_inc_swapfile_inode(inode);
4101 set_inode_flag(inode, FI_PIN_FILE);
4102 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
4108 struct inode *inode = file_inode(file);
4110 stat_dec_swapfile_inode(inode);
4111 clear_inode_flag(inode, FI_PIN_FILE);
4209 static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
4217 map.m_lblk = bytes_to_blks(inode, offset);
4218 map.m_len = bytes_to_blks(inode, offset + length - 1) - map.m_lblk + 1;
4220 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4224 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);
4228 iomap->offset = blks_to_bytes(inode, map.m_lblk);
4235 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
4247 iomap->length = blks_to_bytes(inode, map.m_len);
4251 iomap->addr = blks_to_bytes(inode, map.m_pblk);
4255 iomap->length = blks_to_bytes(inode, next_pgofs) -
4263 if ((inode->i_state & I_DIRTY_DATASYNC) ||
4264 offset + length > i_size_read(inode))