Lines Matching defs:node_blk

226 	struct f2fs_node *node_blk = NULL;
231 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
232 ASSERT(node_blk != NULL);
243 ret = dev_read_block(node_blk, ni.blk_addr);
246 if (le32_to_cpu(node_blk->footer.nid) != nid)
250 if (node_blk->footer.nid == node_blk->footer.ino) {
251 int ofs = get_extra_isize(node_blk);
255 target_blk_addr = node_blk->i.i_addr[ofs + ofs_in_node];
259 target_blk_addr = node_blk->dn.addr[ofs_in_node];
265 free(node_blk);
390 struct f2fs_node *node_blk,
418 ret = dev_read_block(node_blk, ni->blk_addr);
422 node_blk->footer.nid != node_blk->footer.ino) {
424 nid, le32_to_cpu(node_blk->footer.nid),
425 le32_to_cpu(node_blk->footer.ino));
428 if (ni->ino != le32_to_cpu(node_blk->footer.ino)) {
430 nid, ni->ino, le32_to_cpu(node_blk->footer.ino));
434 node_blk->footer.nid == node_blk->footer.ino) {
436 nid, le32_to_cpu(node_blk->footer.nid),
437 le32_to_cpu(node_blk->footer.ino));
441 if (le32_to_cpu(node_blk->footer.nid) != nid) {
444 le32_to_cpu(node_blk->footer.nid));
449 u32 flag = le32_to_cpu(node_blk->footer.flag);
473 __check_inode_mode(nid, ftype, le16_to_cpu(node_blk->i.i_mode)))
518 struct f2fs_node *node_blk,
522 return sanity_check_nid(sbi, nid, node_blk, ftype, ntype, ni);
528 struct f2fs_node *node_blk = NULL;
535 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
536 ASSERT(node_blk != NULL);
539 if (sanity_check_nid(sbi, x_nid, node_blk,
549 free(node_blk);
559 struct f2fs_node *node_blk = NULL;
561 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
562 ASSERT(node_blk != NULL);
564 if (sanity_check_nid(sbi, nid, node_blk, ftype, ntype, &ni))
570 fsck_chk_inode_blk(sbi, nid, ftype, node_blk, blk_cnt, cbc,
572 quota_add_inode_usage(fsck->qctx, nid, &node_blk->i);
578 fsck_chk_dnode_blk(sbi, inode, nid, ftype, node_blk,
584 fsck_chk_idnode_blk(sbi, inode, ftype, node_blk,
590 fsck_chk_didnode_blk(sbi, inode, ftype, node_blk,
597 free(node_blk);
600 free(node_blk);
677 struct f2fs_node *node_blk)
682 u32 nid = le32_to_cpu(node_blk->in.nid[i]);
690 enum FILE_TYPE ftype, struct f2fs_node *node_blk,
697 u32 i_links = le32_to_cpu(node_blk->i.i_links);
698 u64 i_size = le64_to_cpu(node_blk->i.i_size);
699 u64 i_blocks = le64_to_cpu(node_blk->i.i_blocks);
701 u32 i_flags = le32_to_cpu(node_blk->i.i_flags);
703 bool compr_rel = node_blk->i.i_inline & F2FS_COMPRESS_RELEASED;
704 u64 i_compr_blocks = le64_to_cpu(node_blk->i.i_compr_blocks);
705 nid_t i_xattr_nid = le32_to_cpu(node_blk->i.i_xattr_nid);
713 u32 cluster_size = 1 << node_blk->i.i_log_cluster_size;
718 if (!compr_supported || (node_blk->i.i_inline & F2FS_INLINE_DATA)) {
722 * in the memory (node_blk).
724 node_blk->i.i_flags &= ~cpu_to_le32(F2FS_COMPR_FL);
729 nid, i_flags, node_blk->i.i_flags);
737 child.pp_ino = le32_to_cpu(node_blk->i.i_pino);
738 child.dir_level = node_blk->i.i_dir_level;
745 namelen = le32_to_cpu(node_blk->i.i_namelen);
748 memcpy(child.p_name, node_blk->i.i_name, namelen);
765 node_blk->i.i_links =
784 node_blk->i.i_xattr_nid = 0;
796 get_extent_info(&child.ei, &node_blk->i.i_ext);
799 if (f2fs_has_extra_isize(&node_blk->i)) {
802 le16_to_cpu(node_blk->i.i_extra_isize);
811 node_blk->i.i_extra_isize =
821 nid, node_blk->i.i_inline);
823 node_blk->i.i_inline &= ~F2FS_EXTRA_ATTR;
830 (node_blk->i.i_inline & F2FS_INLINE_XATTR)) {
832 le16_to_cpu(node_blk->i.i_inline_xattr_size);
835 inline_size > MAX_INLINE_XATTR_SIZE(&node_blk->i)) {
843 node_blk->i.i_inline_xattr_size =
850 ofs = get_extra_isize(node_blk);
852 if ((node_blk->i.i_flags & cpu_to_le32(F2FS_CASEFOLD_FL)) &&
858 node_blk->i.i_flags &= ~cpu_to_le32(F2FS_CASEFOLD_FL);
863 if ((node_blk->i.i_inline & F2FS_INLINE_DATA)) {
864 unsigned int inline_size = MAX_INLINE_DATA(node_blk);
867 block_t blkaddr = le32_to_cpu(node_blk->i.i_addr[ofs]);
875 node_blk->i.i_addr[ofs] = 0;
876 node_blk->i.i_blocks = cpu_to_le64(*blk_cnt);
884 node_blk->i.i_size = cpu_to_le64(inline_size);
890 if (!(node_blk->i.i_inline & F2FS_DATA_EXIST)) {
891 char buf[MAX_INLINE_DATA(node_blk)];
892 memset(buf, 0, MAX_INLINE_DATA(node_blk));
894 if (memcmp(buf, inline_data_addr(node_blk),
895 MAX_INLINE_DATA(node_blk))) {
899 node_blk->i.i_inline |= F2FS_DATA_EXIST;
909 if ((node_blk->i.i_inline & F2FS_INLINE_DENTRY)) {
910 block_t blkaddr = le32_to_cpu(node_blk->i.i_addr[ofs]);
919 node_blk->i.i_addr[ofs] = 0;
920 node_blk->i.i_blocks = cpu_to_le64(*blk_cnt);
925 ret = fsck_chk_inline_dentries(sbi, node_blk, &child);
935 addrs = ADDRS_PER_INODE(&node_blk->i);
937 u64 addrs_per_blk = (u64)ADDRS_PER_BLOCK(&node_blk->i);
945 block_t blkaddr = le32_to_cpu(node_blk->i.i_addr[ofs + idx]);
956 node_blk->i.i_addr[ofs + idx] =
976 IS_CASEFOLDED(&node_blk->i),
980 file_is_encrypt(&node_blk->i));
986 node_blk->i.i_addr[ofs + idx] = 0;
994 u32 nid = le32_to_cpu(node_blk->i.i_nid[idx]);
1000 nid_t i_nid = le32_to_cpu(node_blk->i.i_nid[idx]);
1014 ret = fsck_chk_node_blk(sbi, &node_blk->i, i_nid,
1020 node_blk->i.i_nid[idx] = 0;
1026 child.pgofs += ADDRS_PER_BLOCK(&node_blk->i);
1028 child.pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
1031 child.pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
1053 node_blk->i.i_blocks = cpu_to_le64(*blk_cnt);
1062 node_blk->i.i_compr_blocks = cpu_to_le64(cbc->cnt);
1073 namelen = le32_to_cpu(node_blk->i.i_namelen);
1082 node_blk->i.i_namelen = cpu_to_le32(child_d->i_namelen);
1089 pretty_print_filename(node_blk->i.i_name, namelen, en,
1090 file_enc_name(&node_blk->i));
1093 le32_to_cpu(node_blk->footer.ino),
1098 le32_to_cpu(node_blk->footer.ino),
1103 le32_to_cpu(node_blk->footer.ino), en,
1104 le32_to_cpu(node_blk->i.i_current_depth),
1111 node_blk->i.i_links = cpu_to_le32(child.links);
1118 !(node_blk->i.i_inline & F2FS_INLINE_DOTS)) {
1122 node_blk->i.i_inline |= F2FS_INLINE_DOTS;
1129 i_gc_failures = le16_to_cpu(node_blk->i.i_gc_failures);
1139 le32_to_cpu(node_blk->footer.ino), en,
1143 node_blk->i.i_gc_failures = cpu_to_le16(0);
1154 node_blk->i.i_size = cpu_to_le64(F2FS_BLKSIZE);
1164 node_blk->i.i_links = 0;
1173 node_blk->i.i_ext.len = 0;
1176 f2fs_has_extra_isize(&node_blk->i)) {
1179 provided = le32_to_cpu(node_blk->i.i_inode_checksum);
1180 calculated = f2fs_inode_chksum(node_blk);
1186 node_blk->i.i_inode_checksum =
1196 ret = dev_write_block(node_blk, ni->blk_addr);
1202 u32 nid, enum FILE_TYPE ftype, struct f2fs_node *node_blk,
1216 block_t blkaddr = le32_to_cpu(node_blk->dn.addr[idx]);
1226 node_blk->dn.addr[idx] = NULL_ADDR;
1254 node_blk->dn.addr[idx] = NULL_ADDR;
1260 ret = dev_write_block(node_blk, ni->blk_addr);
1267 enum FILE_TYPE ftype, struct f2fs_node *node_blk, u32 *blk_cnt,
1273 fsck_reada_all_direct_node_blocks(sbi, node_blk);
1276 if (le32_to_cpu(node_blk->in.nid[i]) == 0x0)
1279 le32_to_cpu(node_blk->in.nid[i]),
1288 node_blk->in.nid[i] = 0;
1293 child->pgofs += ADDRS_PER_BLOCK(&node_blk->i);
1299 nid_t nid = le32_to_cpu(node_blk->footer.nid);
1302 ret = dev_write_block(node_blk, ni.blk_addr);
1310 enum FILE_TYPE ftype, struct f2fs_node *node_blk, u32 *blk_cnt,
1316 fsck_reada_all_direct_node_blocks(sbi, node_blk);
1319 if (le32_to_cpu(node_blk->in.nid[i]) == 0x0)
1322 le32_to_cpu(node_blk->in.nid[i]),
1330 node_blk->in.nid[i] = 0;
1335 child->pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
1342 nid_t nid = le32_to_cpu(node_blk->footer.nid);
1345 ret = dev_write_block(node_blk, ni.blk_addr);
1758 struct f2fs_node *node_blk, struct child_info *child)
1767 inline_dentry = inline_data_addr(node_blk);
1770 make_dentry_ptr(&d, node_blk, inline_dentry, 2);
1781 dentries = __chk_dentries(sbi, IS_CASEFOLDED(&node_blk->i), child,
1783 file_is_encrypt(&node_blk->i));// pass through
2225 struct f2fs_node *node_blk = NULL;
2232 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
2233 ASSERT(node_blk != NULL);
2238 if (sanity_check_nid(sbi, node->nid, node_blk,
2242 node_blk->i.i_links = cpu_to_le32(node->actual_links);
2247 ret = dev_write_block(node_blk, ni.blk_addr);
2253 free(node_blk);