Lines Matching defs:leaf

10  * The objective is to do leaf/node validation checks when tree block is read
32 * @type: leaf or node
33 * @identifier: the necessary info to locate the leaf/node.
45 * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt.
64 btrfs_header_level(eb) == 0 ? "leaf" : "node",
91 btrfs_header_level(eb) == 0 ? "leaf" : "node",
101 #define CHECK_FE_ALIGNED(leaf, slot, fi, name, alignment) \
103 if (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))) \
104 file_extent_err((leaf), (slot), \
106 (#name), btrfs_file_extent_##name((leaf), (fi)), \
108 (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))); \
111 static u64 file_extent_end(struct extent_buffer *leaf,
118 if (btrfs_file_extent_type(leaf, extent) == BTRFS_FILE_EXTENT_INLINE) {
119 len = btrfs_file_extent_ram_bytes(leaf, extent);
120 end = ALIGN(key->offset + len, leaf->fs_info->sectorsize);
122 len = btrfs_file_extent_num_bytes(leaf, extent);
150 btrfs_header_level(eb) == 0 ? "leaf" : "node",
165 static bool check_prev_ino(struct extent_buffer *leaf,
184 if (!is_fstree(btrfs_header_owner(leaf)))
191 dir_item_err(leaf, slot,
196 static int check_extent_data_item(struct extent_buffer *leaf,
200 struct btrfs_fs_info *fs_info = leaf->fs_info;
203 u32 item_size = btrfs_item_size_nr(leaf, slot);
207 file_extent_err(leaf, slot,
219 if (!check_prev_ino(leaf, key, slot, prev_key))
222 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
229 file_extent_err(leaf, slot,
235 if (btrfs_file_extent_type(leaf, fi) >= BTRFS_NR_FILE_EXTENT_TYPES) {
236 file_extent_err(leaf, slot,
238 btrfs_file_extent_type(leaf, fi),
247 if (btrfs_file_extent_compression(leaf, fi) >= BTRFS_NR_COMPRESS_TYPES) {
248 file_extent_err(leaf, slot,
250 btrfs_file_extent_compression(leaf, fi),
254 if (btrfs_file_extent_encryption(leaf, fi)) {
255 file_extent_err(leaf, slot,
257 btrfs_file_extent_encryption(leaf, fi));
260 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
263 file_extent_err(leaf, slot,
270 if (btrfs_file_extent_compression(leaf, fi) !=
276 btrfs_file_extent_ram_bytes(leaf, fi)) {
277 file_extent_err(leaf, slot,
280 btrfs_file_extent_ram_bytes(leaf, fi));
288 file_extent_err(leaf, slot,
293 if (CHECK_FE_ALIGNED(leaf, slot, fi, ram_bytes, sectorsize) ||
294 CHECK_FE_ALIGNED(leaf, slot, fi, disk_bytenr, sectorsize) ||
295 CHECK_FE_ALIGNED(leaf, slot, fi, disk_num_bytes, sectorsize) ||
296 CHECK_FE_ALIGNED(leaf, slot, fi, offset, sectorsize) ||
297 CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize))
301 if (check_add_overflow(btrfs_file_extent_num_bytes(leaf, fi),
303 file_extent_err(leaf, slot,
306 btrfs_file_extent_num_bytes(leaf, fi));
311 * Check that no two consecutive file extent items, in the same leaf,
320 prev_fi = btrfs_item_ptr(leaf, slot - 1,
322 prev_end = file_extent_end(leaf, prev_key, prev_fi);
324 file_extent_err(leaf, slot - 1,
334 static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
337 struct btrfs_fs_info *fs_info = leaf->fs_info;
342 generic_err(leaf, slot,
348 generic_err(leaf, slot,
353 if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) {
354 generic_err(leaf, slot,
356 btrfs_item_size_nr(leaf, slot), csumsize);
363 prev_item_size = btrfs_item_size_nr(leaf, slot - 1);
367 generic_err(leaf, slot - 1,
380 static int check_inode_key(struct extent_buffer *leaf, struct btrfs_key *key,
386 btrfs_item_key_to_cpu(leaf, &item_key, slot);
401 generic_err(leaf, slot,
408 dir_item_err(leaf, slot,
419 inode_item_err(leaf, slot,
423 dir_item_err(leaf, slot,
431 static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key,
437 btrfs_item_key_to_cpu(leaf, &item_key, slot);
443 generic_err(leaf, slot, "invalid root id 0");
445 dir_item_err(leaf, slot,
452 dir_item_err(leaf, slot,
468 generic_err(leaf, slot, "invalid root id 0 for reloc tree");
474 static int check_dir_item(struct extent_buffer *leaf,
478 struct btrfs_fs_info *fs_info = leaf->fs_info;
480 u32 item_size = btrfs_item_size_nr(leaf, slot);
483 if (!check_prev_ino(leaf, key, slot, prev_key))
485 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
498 dir_item_err(leaf, slot,
505 btrfs_dir_item_key_to_cpu(leaf, di, &location_key);
507 ret = check_root_key(leaf, &location_key, slot);
512 ret = check_inode_key(leaf, &location_key, slot);
516 dir_item_err(leaf, slot,
524 dir_type = btrfs_dir_type(leaf, di);
526 dir_item_err(leaf, slot,
534 dir_item_err(leaf, slot,
541 dir_item_err(leaf, slot,
551 name_len = btrfs_dir_name_len(leaf, di);
552 data_len = btrfs_dir_data_len(leaf, di);
554 dir_item_err(leaf, slot,
560 dir_item_err(leaf, slot,
568 dir_item_err(leaf, slot,
578 dir_item_err(leaf, slot,
592 read_extent_buffer(leaf, namebuf,
596 dir_item_err(leaf, slot,
626 btrfs_header_level(eb) == 0 ? "leaf" : "node",
632 static int check_block_group_item(struct extent_buffer *leaf,
636 u32 item_size = btrfs_item_size_nr(leaf, slot);
645 block_group_err(leaf, slot,
651 block_group_err(leaf, slot,
657 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
661 block_group_err(leaf, slot,
669 block_group_err(leaf, slot,
677 block_group_err(leaf, slot,
690 block_group_err(leaf, slot,
703 static void chunk_err(const struct extent_buffer *leaf,
707 const struct btrfs_fs_info *fs_info = leaf->fs_info;
715 is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET);
722 for (i = 0; i < btrfs_header_nritems(leaf); i++) {
723 if (btrfs_item_ptr_offset(leaf, i) ==
740 "corrupt leaf: root=%llu block=%llu slot=%d chunk_start=%llu, %pV",
741 BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot,
752 int btrfs_check_chunk_valid(struct extent_buffer *leaf,
755 struct btrfs_fs_info *fs_info = leaf->fs_info;
768 length = btrfs_chunk_length(leaf, chunk);
769 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
770 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
771 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
772 type = btrfs_chunk_type(leaf, chunk);
778 chunk_err(leaf, chunk, logical,
783 chunk_err(leaf, chunk, logical,
789 chunk_err(leaf, chunk, logical,
795 chunk_err(leaf, chunk, logical,
800 if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
801 chunk_err(leaf, chunk, logical,
803 btrfs_chunk_sector_size(leaf, chunk),
808 chunk_err(leaf, chunk, logical,
813 chunk_err(leaf, chunk, logical,
819 chunk_err(leaf, chunk, logical,
826 chunk_err(leaf, chunk, logical,
830 btrfs_chunk_type(leaf, chunk));
836 chunk_err(leaf, chunk, logical,
842 chunk_err(leaf, chunk, logical,
850 chunk_err(leaf, chunk, logical,
863 chunk_err(leaf, chunk, logical,
875 chunk_err(leaf, chunk, logical,
891 static int check_leaf_chunk_item(struct extent_buffer *leaf,
897 if (btrfs_item_size_nr(leaf, slot) < sizeof(struct btrfs_chunk)) {
898 chunk_err(leaf, chunk, key->offset,
900 btrfs_item_size_nr(leaf, slot),
902 BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
906 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
912 btrfs_item_size_nr(leaf, slot)) {
913 chunk_err(leaf, chunk, key->offset,
915 btrfs_item_size_nr(leaf, slot),
920 return btrfs_check_chunk_valid(leaf, chunk, key->offset);
940 btrfs_header_level(eb) == 0 ? "leaf" : "node",
946 static int check_dev_item(struct extent_buffer *leaf,
950 const u32 item_size = btrfs_item_size_nr(leaf, slot);
953 dev_item_err(leaf, slot,
960 dev_item_err(leaf, slot, "invalid item size: has %u expect %zu",
965 ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
966 if (btrfs_device_id(leaf, ditem) != key->offset) {
967 dev_item_err(leaf, slot,
969 key->offset, btrfs_device_id(leaf, ditem));
978 if (btrfs_device_bytes_used(leaf, ditem) >
979 btrfs_device_total_bytes(leaf, ditem)) {
980 dev_item_err(leaf, slot,
982 btrfs_device_bytes_used(leaf, ditem),
983 btrfs_device_total_bytes(leaf, ditem));
993 static int check_inode_item(struct extent_buffer *leaf,
996 struct btrfs_fs_info *fs_info = leaf->fs_info;
1000 const u32 item_size = btrfs_item_size_nr(leaf, slot);
1004 ret = check_inode_key(leaf, key, slot);
1009 generic_err(leaf, slot, "invalid item size: has %u expect %zu",
1014 iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
1017 if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) {
1018 inode_item_err(leaf, slot,
1020 btrfs_inode_generation(leaf, iitem),
1025 if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
1026 inode_item_err(leaf, slot,
1028 btrfs_inode_transid(leaf, iitem), super_gen + 1);
1037 mode = btrfs_inode_mode(leaf, iitem);
1039 inode_item_err(leaf, slot,
1052 inode_item_err(leaf, slot,
1058 if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) {
1059 inode_item_err(leaf, slot,
1061 btrfs_inode_nlink(leaf, iitem));
1064 if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) {
1065 inode_item_err(leaf, slot,
1067 btrfs_inode_flags(leaf, iitem) &
1074 static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
1077 struct btrfs_fs_info *fs_info = leaf->fs_info;
1083 ret = check_root_key(leaf, key, slot);
1087 if (btrfs_item_size_nr(leaf, slot) != sizeof(ri) &&
1088 btrfs_item_size_nr(leaf, slot) != btrfs_legacy_root_item_size()) {
1089 generic_err(leaf, slot,
1091 btrfs_item_size_nr(leaf, slot), sizeof(ri),
1101 read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot),
1102 btrfs_item_size_nr(leaf, slot));
1107 generic_err(leaf, slot,
1115 generic_err(leaf, slot,
1123 generic_err(leaf, slot,
1132 generic_err(leaf, slot,
1138 generic_err(leaf, slot,
1144 generic_err(leaf, slot,
1152 generic_err(leaf, slot,
1186 btrfs_header_level(eb) == 0 ? "leaf" : "node",
1191 static int check_extent_item(struct extent_buffer *leaf,
1194 struct btrfs_fs_info *fs_info = leaf->fs_info;
1199 const u32 item_size = btrfs_item_size_nr(leaf, slot);
1207 generic_err(leaf, slot,
1213 generic_err(leaf, slot,
1222 extent_err(leaf, slot,
1248 extent_err(leaf, slot,
1254 end = item_size + btrfs_item_ptr_offset(leaf, slot);
1257 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
1258 flags = btrfs_extent_flags(leaf, ei);
1259 total_refs = btrfs_extent_refs(leaf, ei);
1260 generation = btrfs_extent_generation(leaf, ei);
1262 extent_err(leaf, slot,
1270 extent_err(leaf, slot,
1280 extent_err(leaf, slot,
1287 extent_err(leaf, slot,
1293 extent_err(leaf, slot,
1306 if (btrfs_tree_block_level(leaf, info) >= BTRFS_MAX_LEVEL) {
1307 extent_err(leaf, slot,
1309 btrfs_tree_block_level(leaf, info),
1326 extent_err(leaf, slot,
1332 inline_type = btrfs_extent_inline_ref_type(leaf, iref);
1333 inline_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1335 extent_err(leaf, slot,
1349 extent_err(leaf, slot,
1362 dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
1364 extent_err(leaf, slot,
1369 inline_refs += btrfs_extent_data_ref_count(leaf, dref);
1375 extent_err(leaf, slot,
1380 inline_refs += btrfs_shared_data_ref_count(leaf, sref);
1383 extent_err(leaf, slot, "unknown inline ref type: %u",
1391 extent_err(leaf, slot,
1398 extent_err(leaf, slot,
1406 static int check_simple_keyed_refs(struct extent_buffer *leaf,
1414 if (btrfs_item_size_nr(leaf, slot) != expect_item_size) {
1415 generic_err(leaf, slot,
1417 btrfs_item_size_nr(leaf, slot),
1421 if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
1422 generic_err(leaf, slot,
1424 key->objectid, leaf->fs_info->sectorsize);
1428 !IS_ALIGNED(key->offset, leaf->fs_info->sectorsize)) {
1429 extent_err(leaf, slot,
1431 key->offset, leaf->fs_info->sectorsize);
1437 static int check_extent_data_ref(struct extent_buffer *leaf,
1441 unsigned long ptr = btrfs_item_ptr_offset(leaf, slot);
1442 const unsigned long end = ptr + btrfs_item_size_nr(leaf, slot);
1444 if (btrfs_item_size_nr(leaf, slot) % sizeof(*dref) != 0) {
1445 generic_err(leaf, slot,
1447 btrfs_item_size_nr(leaf, slot),
1451 if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
1452 generic_err(leaf, slot,
1454 key->objectid, leaf->fs_info->sectorsize);
1462 * overflow from the leaf due to hash collisions.
1465 offset = btrfs_extent_data_ref_offset(leaf, dref);
1466 if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) {
1467 extent_err(leaf, slot,
1469 offset, leaf->fs_info->sectorsize);
1478 static int check_inode_ref(struct extent_buffer *leaf,
1486 if (!check_prev_ino(leaf, key, slot, prev_key))
1489 if (btrfs_item_size_nr(leaf, slot) <= sizeof(*iref)) {
1490 inode_ref_err(leaf, slot,
1492 btrfs_item_size_nr(leaf, slot),
1493 sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
1497 ptr = btrfs_item_ptr_offset(leaf, slot);
1498 end = ptr + btrfs_item_size_nr(leaf, slot);
1503 inode_ref_err(leaf, slot,
1510 namelen = btrfs_inode_ref_name_len(leaf, iref);
1512 inode_ref_err(leaf, slot,
1531 static int check_leaf_item(struct extent_buffer *leaf,
1540 ret = check_extent_data_item(leaf, key, slot, prev_key);
1543 ret = check_csum_item(leaf, key, slot, prev_key);
1548 ret = check_dir_item(leaf, key, prev_key, slot);
1551 ret = check_inode_ref(leaf, key, prev_key, slot);
1554 ret = check_block_group_item(leaf, key, slot);
1557 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
1558 ret = check_leaf_chunk_item(leaf, chunk, key, slot);
1561 ret = check_dev_item(leaf, key, slot);
1564 ret = check_inode_item(leaf, key, slot);
1567 ret = check_root_item(leaf, key, slot);
1571 ret = check_extent_item(leaf, key, slot);
1576 ret = check_simple_keyed_refs(leaf, key, slot);
1579 ret = check_extent_data_ref(leaf, key, slot);
1585 static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
1587 struct btrfs_fs_info *fs_info = leaf->fs_info;
1591 u32 nritems = btrfs_header_nritems(leaf);
1594 if (btrfs_header_level(leaf) != 0) {
1595 generic_err(leaf, 0,
1596 "invalid level for leaf, have %d expect 0",
1597 btrfs_header_level(leaf));
1609 if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
1610 u64 owner = btrfs_header_owner(leaf);
1619 generic_err(leaf, 0,
1626 generic_err(leaf, 0,
1637 * Check the following things to make sure this is a good leaf, and
1638 * leaf users won't need to bother with similar sanity checks:
1642 * No overlap, no hole, all inside the leaf.
1651 btrfs_item_key_to_cpu(leaf, &key, slot);
1655 generic_err(leaf, slot,
1665 * item data starts at the end of the leaf and grows towards the
1671 item_end_expected = btrfs_item_offset_nr(leaf,
1673 if (btrfs_item_end_nr(leaf, slot) != item_end_expected) {
1674 generic_err(leaf, slot,
1676 btrfs_item_end_nr(leaf, slot),
1682 * Check to make sure that we don't point outside of the leaf,
1684 * all point outside of the leaf.
1686 if (btrfs_item_end_nr(leaf, slot) >
1688 generic_err(leaf, slot,
1689 "slot end outside of leaf, have %u expect range [0, %u]",
1690 btrfs_item_end_nr(leaf, slot),
1697 btrfs_item_ptr_offset(leaf, slot)) {
1698 generic_err(leaf, slot,
1702 btrfs_item_ptr_offset(leaf, slot));
1711 ret = check_leaf_item(leaf, &key, slot, &prev_key);
1724 int btrfs_check_leaf_full(struct extent_buffer *leaf)
1726 return check_leaf(leaf, true);
1730 int btrfs_check_leaf_relaxed(struct extent_buffer *leaf)
1732 return check_leaf(leaf, false);