xref: /third_party/f2fs-tools/fsck/segment.c (revision 5e5c12b0)
1/**
2 * segment.c
3 *
4 * Many parts of codes are copied from Linux kernel/fs/f2fs.
5 *
6 * Copyright (C) 2015 Huawei Ltd.
7 * Witten by:
8 *   Hou Pengyang <houpengyang@huawei.com>
9 *   Liu Shuoran <liushuoran@huawei.com>
10 *   Jaegeuk Kim <jaegeuk@kernel.org>
11 * Copyright (c) 2020 Google Inc.
12 *   Robin Hsu <robinhsu@google.com>
13 *  : add sload compression support
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 */
19#include "fsck.h"
20#include "node.h"
21#include "quotaio.h"
22
23int reserve_new_block(struct f2fs_sb_info *sbi, block_t *to,
24			struct f2fs_summary *sum, int type, bool is_inode)
25{
26	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
27	struct seg_entry *se;
28	u64 blkaddr, offset;
29	u64 old_blkaddr = *to;
30	bool is_node = IS_NODESEG(type);
31	int left = 0;
32
33	if (old_blkaddr == NULL_ADDR) {
34		if (c.func == FSCK) {
35			if (fsck->chk.valid_blk_cnt >= sbi->user_block_count) {
36				ERR_MSG("Not enough space\n");
37				return -ENOSPC;
38			}
39			if (is_node && fsck->chk.valid_node_cnt >=
40					sbi->total_valid_node_count) {
41				ERR_MSG("Not enough space for node block\n");
42				return -ENOSPC;
43			}
44		} else {
45			if (sbi->total_valid_block_count >=
46						sbi->user_block_count) {
47				ERR_MSG("Not enough space\n");
48				return -ENOSPC;
49			}
50			if (is_node && sbi->total_valid_node_count >=
51						sbi->total_node_count) {
52				ERR_MSG("Not enough space for node block\n");
53				return -ENOSPC;
54			}
55		}
56	}
57
58	blkaddr = SM_I(sbi)->main_blkaddr;
59
60	if (sbi->raw_super->feature & cpu_to_le32(F2FS_FEATURE_RO)) {
61		if (IS_NODESEG(type)) {
62			type = CURSEG_HOT_NODE;
63			blkaddr = __end_block_addr(sbi);
64			left = 1;
65		} else if (IS_DATASEG(type)) {
66			type = CURSEG_HOT_DATA;
67			blkaddr = SM_I(sbi)->main_blkaddr;
68			left = 0;
69		}
70	}
71
72	if (find_next_free_block(sbi, &blkaddr, left, type, false)) {
73		ERR_MSG("Can't find free block");
74		ASSERT(0);
75	}
76
77	se = get_seg_entry(sbi, GET_SEGNO(sbi, blkaddr));
78	offset = OFFSET_IN_SEG(sbi, blkaddr);
79	se->type = type;
80	se->valid_blocks++;
81	f2fs_set_bit(offset, (char *)se->cur_valid_map);
82	if (need_fsync_data_record(sbi)) {
83		se->ckpt_type = type;
84		se->ckpt_valid_blocks++;
85		f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
86	}
87	if (c.func == FSCK) {
88		f2fs_set_main_bitmap(sbi, blkaddr, type);
89		f2fs_set_sit_bitmap(sbi, blkaddr);
90	}
91
92	if (old_blkaddr == NULL_ADDR) {
93		sbi->total_valid_block_count++;
94		if (is_node) {
95			sbi->total_valid_node_count++;
96			if (is_inode)
97				sbi->total_valid_inode_count++;
98		}
99		if (c.func == FSCK) {
100			fsck->chk.valid_blk_cnt++;
101			if (is_node) {
102				fsck->chk.valid_node_cnt++;
103				if (is_inode)
104					fsck->chk.valid_inode_cnt++;
105			}
106		}
107	}
108	se->dirty = 1;
109
110	/* read/write SSA */
111	*to = (block_t)blkaddr;
112	update_sum_entry(sbi, *to, sum);
113
114	return 0;
115}
116
117int new_data_block(struct f2fs_sb_info *sbi, void *block,
118				struct dnode_of_data *dn, int type)
119{
120	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
121	struct f2fs_summary sum;
122	struct node_info ni;
123	unsigned int blkaddr = datablock_addr(dn->node_blk, dn->ofs_in_node);
124	int ret;
125
126	if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
127					type != CURSEG_HOT_DATA)
128		type = CURSEG_HOT_DATA;
129
130	ASSERT(dn->node_blk);
131	memset(block, 0, BLOCK_SZ);
132
133	get_node_info(sbi, dn->nid, &ni);
134	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
135
136	dn->data_blkaddr = blkaddr;
137	ret = reserve_new_block(sbi, &dn->data_blkaddr, &sum, type, 0);
138	if (ret) {
139		c.alloc_failed = 1;
140		return ret;
141	}
142
143	if (blkaddr == NULL_ADDR)
144		inc_inode_blocks(dn);
145	else if (blkaddr == NEW_ADDR)
146		dn->idirty = 1;
147	set_data_blkaddr(dn);
148	return 0;
149}
150
151u64 f2fs_quota_size(struct quota_file *qf)
152{
153	struct node_info ni;
154	struct f2fs_node *inode;
155	u64 filesize;
156
157	inode = (struct f2fs_node *) calloc(BLOCK_SZ, 1);
158	ASSERT(inode);
159
160	/* Read inode */
161	get_node_info(qf->sbi, qf->ino, &ni);
162	ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
163	ASSERT(S_ISREG(le16_to_cpu(inode->i.i_mode)));
164
165	filesize = le64_to_cpu(inode->i.i_size);
166	free(inode);
167	return filesize;
168}
169
170u64 f2fs_read(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
171					u64 count, pgoff_t offset)
172{
173	struct dnode_of_data dn;
174	struct node_info ni;
175	struct f2fs_node *inode;
176	char *blk_buffer;
177	u64 filesize;
178	u64 off_in_blk;
179	u64 len_in_blk;
180	u64 read_count;
181	u64 remained_blkentries;
182	block_t blkaddr;
183	void *index_node = NULL;
184
185	memset(&dn, 0, sizeof(dn));
186
187	/* Memory allocation for block buffer and inode. */
188	blk_buffer = calloc(BLOCK_SZ, 2);
189	ASSERT(blk_buffer);
190	inode = (struct f2fs_node*)(blk_buffer + BLOCK_SZ);
191
192	/* Read inode */
193	get_node_info(sbi, ino, &ni);
194	ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
195	ASSERT(!S_ISDIR(le16_to_cpu(inode->i.i_mode)));
196	ASSERT(!S_ISLNK(le16_to_cpu(inode->i.i_mode)));
197
198	/* Adjust count with file length. */
199	filesize = le64_to_cpu(inode->i.i_size);
200	if (offset > filesize)
201		count = 0;
202	else if (count + offset > filesize)
203		count = filesize - offset;
204
205	/* Main loop for file blocks */
206	read_count = remained_blkentries = 0;
207	while (count > 0) {
208		if (remained_blkentries == 0) {
209			set_new_dnode(&dn, inode, NULL, ino);
210			get_dnode_of_data(sbi, &dn, F2FS_BYTES_TO_BLK(offset),
211					LOOKUP_NODE);
212			if (index_node)
213				free(index_node);
214			index_node = (dn.node_blk == dn.inode_blk) ?
215							NULL : dn.node_blk;
216			remained_blkentries = ADDRS_PER_PAGE(sbi,
217						dn.node_blk, dn.inode_blk);
218		}
219		ASSERT(remained_blkentries > 0);
220
221		blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
222		if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR)
223			break;
224
225		off_in_blk = offset % BLOCK_SZ;
226		len_in_blk = BLOCK_SZ - off_in_blk;
227		if (len_in_blk > count)
228			len_in_blk = count;
229
230		/* Read data from single block. */
231		if (len_in_blk < BLOCK_SZ) {
232			ASSERT(dev_read_block(blk_buffer, blkaddr) >= 0);
233			memcpy(buffer, blk_buffer + off_in_blk, len_in_blk);
234		} else {
235			/* Direct read */
236			ASSERT(dev_read_block(buffer, blkaddr) >= 0);
237		}
238
239		offset += len_in_blk;
240		count -= len_in_blk;
241		buffer += len_in_blk;
242		read_count += len_in_blk;
243
244		dn.ofs_in_node++;
245		remained_blkentries--;
246	}
247	if (index_node)
248		free(index_node);
249	free(blk_buffer);
250
251	return read_count;
252}
253
254/*
255 * Do not call this function directly.  Instead, call one of the following:
256 *     u64 f2fs_write();
257 *     u64 f2fs_write_compress_data();
258 *     u64 f2fs_write_addrtag();
259 */
260static u64 f2fs_write_ex(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
261		u64 count, pgoff_t offset, enum wr_addr_type addr_type)
262{
263	struct dnode_of_data dn;
264	struct node_info ni;
265	struct f2fs_node *inode;
266	char *blk_buffer;
267	u64 off_in_blk;
268	u64 len_in_blk;
269	u64 written_count;
270	u64 remained_blkentries;
271	block_t blkaddr;
272	void* index_node = NULL;
273	int idirty = 0;
274	int err;
275	bool has_data = (addr_type == WR_NORMAL
276			|| addr_type == WR_COMPRESS_DATA);
277
278	if (count == 0)
279		return 0;
280
281	/*
282	 * Enforce calling from f2fs_write(), f2fs_write_compress_data(),
283	 * and f2fs_write_addrtag().   Beside, check if is properly called.
284	 */
285	ASSERT((!has_data && buffer == NULL) || (has_data && buffer != NULL));
286	if (addr_type != WR_NORMAL)
287		ASSERT(offset % F2FS_BLKSIZE == 0); /* block boundary only */
288
289	/* Memory allocation for block buffer and inode. */
290	blk_buffer = calloc(BLOCK_SZ, 2);
291	ASSERT(blk_buffer);
292	inode = (struct f2fs_node*)(blk_buffer + BLOCK_SZ);
293
294	/* Read inode */
295	get_node_info(sbi, ino, &ni);
296	ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
297	ASSERT(!S_ISDIR(le16_to_cpu(inode->i.i_mode)));
298	ASSERT(!S_ISLNK(le16_to_cpu(inode->i.i_mode)));
299
300	/* Main loop for file blocks */
301	written_count = remained_blkentries = 0;
302	while (count > 0) {
303		if (remained_blkentries == 0) {
304			set_new_dnode(&dn, inode, NULL, ino);
305			err = get_dnode_of_data(sbi, &dn,
306					F2FS_BYTES_TO_BLK(offset), ALLOC_NODE);
307			if (err)
308				break;
309			idirty |= dn.idirty;
310			free(index_node);
311			index_node = (dn.node_blk == dn.inode_blk) ?
312					NULL : dn.node_blk;
313			remained_blkentries = ADDRS_PER_PAGE(sbi,
314					dn.node_blk, dn.inode_blk) -
315					dn.ofs_in_node;
316		}
317		ASSERT(remained_blkentries > 0);
318
319		if (!has_data) {
320			dn.data_blkaddr = addr_type;
321			set_data_blkaddr(&dn);
322			idirty |= dn.idirty;
323			if (dn.ndirty)
324				ASSERT(dev_write_block(dn.node_blk,
325						dn.node_blkaddr) >= 0);
326			written_count = 0;
327			break;
328		}
329
330		blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
331		if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
332			err = new_data_block(sbi, blk_buffer,
333						&dn, CURSEG_WARM_DATA);
334			if (err)
335				break;
336			blkaddr = dn.data_blkaddr;
337			idirty |= dn.idirty;
338		}
339
340		off_in_blk = offset % BLOCK_SZ;
341		len_in_blk = BLOCK_SZ - off_in_blk;
342		if (len_in_blk > count)
343			len_in_blk = count;
344
345		/* Write data to single block. */
346		if (len_in_blk < BLOCK_SZ) {
347			ASSERT(dev_read_block(blk_buffer, blkaddr) >= 0);
348			memcpy(blk_buffer + off_in_blk, buffer, len_in_blk);
349			ASSERT(dev_write_block(blk_buffer, blkaddr) >= 0);
350		} else {
351			/* Direct write */
352			ASSERT(dev_write_block(buffer, blkaddr) >= 0);
353		}
354
355		offset += len_in_blk;
356		count -= len_in_blk;
357		buffer += len_in_blk;
358		written_count += len_in_blk;
359
360		dn.ofs_in_node++;
361		if ((--remained_blkentries == 0 || count == 0) && (dn.ndirty))
362			ASSERT(dev_write_block(dn.node_blk, dn.node_blkaddr)
363					>= 0);
364	}
365	if (addr_type == WR_NORMAL && offset > le64_to_cpu(inode->i.i_size)) {
366		inode->i.i_size = cpu_to_le64(offset);
367		idirty = 1;
368	}
369	if (idirty) {
370		ASSERT(inode == dn.inode_blk);
371		ASSERT(write_inode(inode, ni.blk_addr) >= 0);
372	}
373
374	free(index_node);
375	free(blk_buffer);
376
377	return written_count;
378}
379
380u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
381					u64 count, pgoff_t offset)
382{
383	return f2fs_write_ex(sbi, ino, buffer, count, offset, WR_NORMAL);
384}
385
386u64 f2fs_write_compress_data(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
387					u64 count, pgoff_t offset)
388{
389	return f2fs_write_ex(sbi, ino, buffer, count, offset, WR_COMPRESS_DATA);
390}
391
392u64 f2fs_write_addrtag(struct f2fs_sb_info *sbi, nid_t ino, pgoff_t offset,
393		unsigned int addrtag)
394{
395	ASSERT(addrtag == COMPRESS_ADDR || addrtag == NEW_ADDR
396			|| addrtag == NULL_ADDR);
397	return f2fs_write_ex(sbi, ino, NULL, F2FS_BLKSIZE, offset, addrtag);
398}
399
400/* This function updates only inode->i.i_size */
401void f2fs_filesize_update(struct f2fs_sb_info *sbi, nid_t ino, u64 filesize)
402{
403	struct node_info ni;
404	struct f2fs_node *inode;
405
406	inode = calloc(BLOCK_SZ, 1);
407	ASSERT(inode);
408	get_node_info(sbi, ino, &ni);
409
410	ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
411	ASSERT(!S_ISDIR(le16_to_cpu(inode->i.i_mode)));
412	ASSERT(!S_ISLNK(le16_to_cpu(inode->i.i_mode)));
413
414	inode->i.i_size = cpu_to_le64(filesize);
415
416	ASSERT(write_inode(inode, ni.blk_addr) >= 0);
417	free(inode);
418}
419
420#define MAX_BULKR_RETRY 5
421int bulkread(int fd, void *rbuf, size_t rsize, bool *eof)
422{
423	int n = 0;
424	int retry = MAX_BULKR_RETRY;
425	int cur;
426
427	if (!rsize)
428		return 0;
429
430	if (eof != NULL)
431		*eof = false;
432	while (rsize && (cur = read(fd, rbuf, rsize)) != 0) {
433		if (cur == -1) {
434			if (errno == EINTR && retry--)
435				continue;
436			return -1;
437		}
438		retry = MAX_BULKR_RETRY;
439
440		rsize -= cur;
441		n += cur;
442	}
443	if (eof != NULL)
444		*eof = (cur == 0);
445	return n;
446}
447
448u64 f2fs_fix_mutable(struct f2fs_sb_info *sbi, nid_t ino, pgoff_t offset,
449		unsigned int compressed)
450{
451	unsigned int i;
452	u64 wlen;
453
454	if (c.compress.readonly)
455		return 0;
456
457	for (i = 0; i < compressed - 1; i++) {
458		wlen = f2fs_write_addrtag(sbi, ino,
459				offset + (i << F2FS_BLKSIZE_BITS), NEW_ADDR);
460		if (wlen)
461			return wlen;
462	}
463	return 0;
464}
465
466static inline int is_consecutive(u32 prev_addr, u32 cur_addr)
467{
468	if (is_valid_data_blkaddr(cur_addr) && (cur_addr == prev_addr + 1))
469		return 1;
470	return 0;
471}
472
473static inline void copy_extent_info(struct extent_info *t_ext,
474				struct extent_info *s_ext)
475{
476	t_ext->fofs = s_ext->fofs;
477	t_ext->blk = s_ext->blk;
478	t_ext->len = s_ext->len;
479}
480
481static inline void update_extent_info(struct f2fs_node *inode,
482				struct extent_info *ext)
483{
484	inode->i.i_ext.fofs = cpu_to_le32(ext->fofs);
485	inode->i.i_ext.blk_addr = cpu_to_le32(ext->blk);
486	inode->i.i_ext.len = cpu_to_le32(ext->len);
487}
488
489static void update_largest_extent(struct f2fs_sb_info *sbi, nid_t ino)
490{
491	struct dnode_of_data dn;
492	struct node_info ni;
493	struct f2fs_node *inode;
494	u32 blkaddr, prev_blkaddr, cur_blk = 0, end_blk;
495	struct extent_info largest_ext, cur_ext;
496	u64 remained_blkentries = 0;
497	u32 cluster_size;
498	int count;
499	void *index_node = NULL;
500
501	memset(&dn, 0, sizeof(dn));
502	largest_ext.len = cur_ext.len = 0;
503
504	inode = (struct f2fs_node *) calloc(BLOCK_SZ, 1);
505	ASSERT(inode);
506
507	/* Read inode info */
508	get_node_info(sbi, ino, &ni);
509	ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
510	cluster_size = 1 << inode->i.i_log_cluster_size;
511
512	if (inode->i.i_inline & F2FS_INLINE_DATA)
513		goto exit;
514
515	end_blk  = f2fs_max_file_offset(&inode->i) >> F2FS_BLKSIZE_BITS;
516
517	while (cur_blk <= end_blk) {
518		if (remained_blkentries == 0) {
519			set_new_dnode(&dn, inode, NULL, ino);
520			get_dnode_of_data(sbi, &dn, cur_blk, LOOKUP_NODE);
521			if (index_node)
522				free(index_node);
523			index_node = (dn.node_blk == dn.inode_blk) ?
524				NULL : dn.node_blk;
525			remained_blkentries = ADDRS_PER_PAGE(sbi,
526					dn.node_blk, dn.inode_blk);
527		}
528		ASSERT(remained_blkentries > 0);
529
530		blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
531		if (cur_ext.len > 0) {
532			if (is_consecutive(prev_blkaddr, blkaddr))
533				cur_ext.len++;
534			else {
535				if (cur_ext.len > largest_ext.len)
536					copy_extent_info(&largest_ext,
537							&cur_ext);
538				cur_ext.len = 0;
539			}
540		}
541
542		if (cur_ext.len == 0 && is_valid_data_blkaddr(blkaddr)) {
543			cur_ext.fofs = cur_blk;
544			cur_ext.len = 1;
545			cur_ext.blk = blkaddr;
546		}
547
548		prev_blkaddr = blkaddr;
549		count = blkaddr == COMPRESS_ADDR ? cluster_size : 1;
550		cur_blk += count;
551		dn.ofs_in_node += count;
552		remained_blkentries -= count;
553	}
554
555exit:
556	if (cur_ext.len > largest_ext.len)
557		copy_extent_info(&largest_ext, &cur_ext);
558	if (largest_ext.len > 0) {
559		update_extent_info(inode, &largest_ext);
560		ASSERT(write_inode(inode, ni.blk_addr) >= 0);
561	}
562
563	if (index_node)
564		free(index_node);
565	free(inode);
566}
567
568int f2fs_build_file(struct f2fs_sb_info *sbi, struct dentry *de)
569{
570	int fd, n = -1;
571	pgoff_t off = 0;
572	u8 buffer[BLOCK_SZ];
573	struct node_info ni;
574	struct f2fs_node *node_blk;
575
576	if (de->ino == 0)
577		return -1;
578
579	if (de->from_devino) {
580		struct hardlink_cache_entry *found_hardlink;
581
582		found_hardlink = f2fs_search_hardlink(sbi, de);
583		if (found_hardlink && found_hardlink->to_ino &&
584				found_hardlink->nbuild)
585			return 0;
586
587		found_hardlink->nbuild++;
588	}
589
590	fd = open(de->full_path, O_RDONLY);
591	if (fd < 0) {
592		MSG(0, "Skip: Fail to open %s\n", de->full_path);
593		return -1;
594	}
595
596	/* inline_data support */
597	if (de->size <= DEF_MAX_INLINE_DATA) {
598		int ret;
599
600		get_node_info(sbi, de->ino, &ni);
601
602		node_blk = calloc(BLOCK_SZ, 1);
603		ASSERT(node_blk);
604
605		ret = dev_read_block(node_blk, ni.blk_addr);
606		ASSERT(ret >= 0);
607
608		node_blk->i.i_inline |= F2FS_INLINE_DATA;
609		node_blk->i.i_inline |= F2FS_DATA_EXIST;
610
611		if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
612			node_blk->i.i_inline |= F2FS_EXTRA_ATTR;
613			node_blk->i.i_extra_isize =
614					cpu_to_le16(calc_extra_isize());
615		}
616		n = read(fd, buffer, BLOCK_SZ);
617		ASSERT((unsigned long)n == de->size);
618		memcpy(inline_data_addr(node_blk), buffer, de->size);
619		node_blk->i.i_size = cpu_to_le64(de->size);
620		ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
621		free(node_blk);
622#ifdef WITH_SLOAD
623	} else if (c.func == SLOAD && c.compress.enabled &&
624			c.compress.filter_ops->filter(de->full_path)) {
625		bool eof = false;
626		u8 *rbuf = c.compress.cc.rbuf;
627		unsigned int cblocks = 0;
628
629		node_blk = calloc(BLOCK_SZ, 1);
630		ASSERT(node_blk);
631
632		/* read inode */
633		get_node_info(sbi, de->ino, &ni);
634		ASSERT(dev_read_block(node_blk, ni.blk_addr) >= 0);
635		/* update inode meta */
636		node_blk->i.i_compress_algrithm = c.compress.alg;
637		node_blk->i.i_log_cluster_size =
638				c.compress.cc.log_cluster_size;
639		node_blk->i.i_flags = cpu_to_le32(F2FS_COMPR_FL);
640		if (c.compress.readonly)
641			node_blk->i.i_inline |= F2FS_COMPRESS_RELEASED;
642		ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
643
644		while (!eof && (n = bulkread(fd, rbuf, c.compress.cc.rlen,
645				&eof)) > 0) {
646			int ret = c.compress.ops->compress(&c.compress.cc);
647			u64 wlen;
648			u32 csize = ALIGN_UP(c.compress.cc.clen +
649					COMPRESS_HEADER_SIZE, BLOCK_SZ);
650			unsigned int cur_cblk;
651
652			if (ret || n < c.compress.cc.rlen ||
653				n < (int)(csize + BLOCK_SZ *
654						c.compress.min_blocks)) {
655				wlen = f2fs_write(sbi, de->ino, rbuf, n, off);
656				ASSERT((int)wlen == n);
657			} else {
658				wlen = f2fs_write_addrtag(sbi, de->ino, off,
659						WR_COMPRESS_ADDR);
660				ASSERT(!wlen);
661				wlen = f2fs_write_compress_data(sbi, de->ino,
662						(u8 *)c.compress.cc.cbuf,
663						csize, off + BLOCK_SZ);
664				ASSERT(wlen == csize);
665				c.compress.ops->reset(&c.compress.cc);
666				cur_cblk = (c.compress.cc.rlen - csize) /
667								BLOCK_SZ;
668				cblocks += cur_cblk;
669				wlen = f2fs_fix_mutable(sbi, de->ino,
670						off + BLOCK_SZ + csize,
671						cur_cblk);
672				ASSERT(!wlen);
673			}
674			off += n;
675		}
676		if (n == -1) {
677			fprintf(stderr, "Load file '%s' failed: ",
678					de->full_path);
679			perror(NULL);
680		}
681		/* read inode */
682		get_node_info(sbi, de->ino, &ni);
683		ASSERT(dev_read_block(node_blk, ni.blk_addr) >= 0);
684		/* update inode meta */
685		node_blk->i.i_size = cpu_to_le64(off);
686		if (!c.compress.readonly) {
687			node_blk->i.i_compr_blocks = cpu_to_le64(cblocks);
688			node_blk->i.i_blocks += cpu_to_le64(cblocks);
689		}
690		ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
691		free(node_blk);
692
693		if (!c.compress.readonly) {
694			sbi->total_valid_block_count += cblocks;
695			if (sbi->total_valid_block_count >=
696					sbi->user_block_count) {
697				ERR_MSG("Not enough space\n");
698				ASSERT(0);
699			}
700		}
701#endif
702	} else {
703		while ((n = read(fd, buffer, BLOCK_SZ)) > 0) {
704			f2fs_write(sbi, de->ino, buffer, n, off);
705			off += n;
706		}
707	}
708
709	close(fd);
710	if (n < 0)
711		return -1;
712
713	if (!c.compress.enabled || (c.feature & cpu_to_le32(F2FS_FEATURE_RO)))
714		update_largest_extent(sbi, de->ino);
715	update_free_segments(sbi);
716
717	MSG(1, "Info: Create %s -> %s\n"
718		"  -- ino=%x, type=%x, mode=%x, uid=%x, "
719		"gid=%x, cap=%"PRIx64", size=%lu, pino=%x\n",
720		de->full_path, de->path,
721		de->ino, de->file_type, de->mode,
722		de->uid, de->gid, de->capabilities, de->size, de->pino);
723	return 0;
724}
725