Lines Matching refs:path

137 				struct ext4_ext_path *path)
141 if (path->p_bh) {
142 /* path points to block */
143 BUFFER_TRACE(path->p_bh, "get_write_access");
144 err = ext4_journal_get_write_access(handle, path->p_bh);
152 clear_buffer_verified(path->p_bh);
154 /* path points to leaf/index in inode body */
167 struct ext4_ext_path *path)
172 if (path->p_bh) {
173 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
174 /* path points to block */
176 inode, path->p_bh);
179 set_buffer_verified(path->p_bh);
181 /* path points to leaf/index in inode body */
187 #define ext4_ext_dirty(handle, inode, path) \
188 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
191 struct ext4_ext_path *path,
194 if (path) {
195 int depth = path->p_depth;
215 ex = path[depth].p_ext;
228 if (path[depth].p_bh)
229 return path[depth].p_bh->b_blocknr;
241 struct ext4_ext_path *path,
246 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
311 struct ext4_ext_path *path = *ppath;
312 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
594 struct ext4_ext_path *path = NULL;
610 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
612 if (path == NULL) {
617 path[0].p_hdr = ext_inode_hdr(inode);
618 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
621 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
628 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
629 brelse(path[i].p_bh);
630 path[i].p_bh = NULL;
634 bh = read_extent_tree_block(inode, path[i].p_idx++,
642 path[i].p_bh = bh;
643 path[i].p_hdr = ext_block_hdr(bh);
644 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
649 ext4_ext_drop_refs(path);
650 kfree(path);
655 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
657 int k, l = path->p_depth;
659 ext_debug(inode, "path:");
660 for (k = 0; k <= l; k++, path++) {
661 if (path->p_idx) {
663 le32_to_cpu(path->p_idx->ei_block),
664 ext4_idx_pblock(path->p_idx));
665 } else if (path->p_ext) {
667 le32_to_cpu(path->p_ext->ee_block),
668 ext4_ext_is_unwritten(path->p_ext),
669 ext4_ext_get_actual_len(path->p_ext),
670 ext4_ext_pblock(path->p_ext));
677 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
684 if (!path)
687 eh = path[depth].p_hdr;
700 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
708 idx = path[level].p_idx;
709 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
719 ex = path[depth].p_ext;
720 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
732 #define ext4_ext_show_path(inode, path)
733 #define ext4_ext_show_leaf(inode, path)
734 #define ext4_ext_show_move(inode, path, newblock, level)
737 void ext4_ext_drop_refs(struct ext4_ext_path *path)
741 if (!path)
743 depth = path->p_depth;
744 for (i = 0; i <= depth; i++, path++) {
745 brelse(path->p_bh);
746 path->p_bh = NULL;
757 struct ext4_ext_path *path, ext4_lblk_t block)
759 struct ext4_extent_header *eh = path->p_hdr;
778 path->p_idx = l - 1;
779 ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
780 ext4_idx_pblock(path->p_idx));
804 BUG_ON(chix != path->p_idx);
817 struct ext4_ext_path *path, ext4_lblk_t block)
819 struct ext4_extent_header *eh = path->p_hdr;
846 path->p_ext = l - 1;
848 le32_to_cpu(path->p_ext->ee_block),
849 ext4_ext_pblock(path->p_ext),
850 ext4_ext_is_unwritten(path->p_ext),
851 ext4_ext_get_actual_len(path->p_ext));
866 BUG_ON(chex != path->p_ext);
891 struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
908 if (path) {
909 ext4_ext_drop_refs(path);
910 if (depth > path[0].p_maxdepth) {
911 kfree(path);
912 *orig_path = path = NULL;
915 if (!path) {
917 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
919 if (unlikely(!path))
921 path[0].p_maxdepth = depth + 1;
923 path[0].p_hdr = eh;
924 path[0].p_bh = NULL;
934 ext4_ext_binsearch_idx(inode, path + ppos, block);
935 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
936 path[ppos].p_depth = i;
937 path[ppos].p_ext = NULL;
939 bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags);
947 path[ppos].p_bh = bh;
948 path[ppos].p_hdr = eh;
951 path[ppos].p_depth = i;
952 path[ppos].p_ext = NULL;
953 path[ppos].p_idx = NULL;
956 ext4_ext_binsearch(inode, path + ppos, block);
958 if (path[ppos].p_ext)
959 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
961 ext4_ext_show_path(inode, path);
963 return path;
966 ext4_ext_drop_refs(path);
967 kfree(path);
1048 * inserts new subtree into the path, using free index entry
1058 struct ext4_ext_path *path,
1081 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1085 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1086 border = path[depth].p_ext[1].ee_block;
1116 newblock = ext4_ext_new_meta_block(handle, inode, path,
1148 /* move remainder of path[depth] to the new leaf */
1149 if (unlikely(path[depth].p_hdr->eh_entries !=
1150 path[depth].p_hdr->eh_max)) {
1152 path[depth].p_hdr->eh_entries,
1153 path[depth].p_hdr->eh_max);
1158 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1159 ext4_ext_show_move(inode, path, newblock, depth);
1163 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1183 err = ext4_ext_get_access(handle, inode, path + depth);
1186 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1187 err = ext4_ext_dirty(handle, inode, path + depth);
1232 /* move remainder of path[i] to the new index block */
1233 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1234 EXT_LAST_INDEX(path[i].p_hdr))) {
1237 le32_to_cpu(path[i].p_ext->ee_block));
1242 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1243 ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
1244 EXT_MAX_INDEX(path[i].p_hdr));
1245 ext4_ext_show_move(inode, path, newblock, i);
1247 memmove(++fidx, path[i].p_idx,
1268 err = ext4_ext_get_access(handle, inode, path + i);
1271 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1272 err = ext4_ext_dirty(handle, inode, path + i);
1281 err = ext4_ext_insert_index(handle, inode, path + at,
1404 struct ext4_ext_path *path = *ppath;
1412 curp = path + depth;
1423 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1427 /* refill path */
1428 path = ext4_find_extent(inode,
1431 if (IS_ERR(path))
1432 err = PTR_ERR(path);
1439 /* refill path */
1440 path = ext4_find_extent(inode,
1443 if (IS_ERR(path)) {
1444 err = PTR_ERR(path);
1453 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1471 struct ext4_ext_path *path,
1478 if (unlikely(path == NULL)) {
1479 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1482 depth = path->p_depth;
1485 if (depth == 0 && path->p_ext == NULL)
1488 /* usually extent in the path covers blocks smaller
1492 ex = path[depth].p_ext;
1495 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1502 ix = path[depth].p_idx;
1503 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1507 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1508 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1536 struct ext4_ext_path *path,
1547 if (unlikely(path == NULL)) {
1548 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1551 depth = path->p_depth;
1554 if (depth == 0 && path->p_ext == NULL)
1557 /* usually extent in the path covers blocks smaller
1561 ex = path[depth].p_ext;
1564 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1566 "first_extent(path[%d].p_hdr) != ex",
1571 ix = path[depth].p_idx;
1572 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1589 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1597 ix = path[depth].p_idx;
1598 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1610 while (++depth < path->p_depth) {
1612 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
1620 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
1643 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1647 BUG_ON(path == NULL);
1648 depth = path->p_depth;
1650 if (depth == 0 && path->p_ext == NULL)
1654 struct ext4_ext_path *p = &path[depth];
1656 if (depth == path->p_depth) {
1675 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1679 BUG_ON(path == NULL);
1680 depth = path->p_depth;
1690 if (path[depth].p_idx !=
1691 EXT_LAST_INDEX(path[depth].p_hdr))
1693 le32_to_cpu(path[depth].p_idx[1].ei_block);
1707 struct ext4_ext_path *path)
1715 eh = path[depth].p_hdr;
1716 ex = path[depth].p_ext;
1738 border = path[depth].p_ext->ee_block;
1739 err = ext4_ext_get_access(handle, inode, path + k);
1742 path[k].p_idx->ei_block = border;
1743 err = ext4_ext_dirty(handle, inode, path + k);
1749 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1751 err = ext4_ext_get_access(handle, inode, path + k);
1754 path[k].p_idx->ei_block = border;
1755 err = ext4_ext_dirty(handle, inode, path + k);
1803 struct ext4_ext_path *path,
1811 BUG_ON(path[depth].p_hdr == NULL);
1812 eh = path[depth].p_hdr;
1845 struct ext4_ext_path *path)
1851 if ((path[0].p_depth != 1) ||
1852 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1853 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1868 blk = ext4_idx_pblock(path[0].p_idx);
1869 s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1873 path[1].p_maxdepth = path[0].p_maxdepth;
1874 memcpy(path[0].p_hdr, path[1].p_hdr, s);
1875 path[0].p_depth = 0;
1876 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1877 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1878 path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1880 brelse(path[1].p_bh);
1891 struct ext4_ext_path *path,
1899 BUG_ON(path[depth].p_hdr == NULL);
1900 eh = path[depth].p_hdr;
1903 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1906 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1908 ext4_ext_try_to_merge_up(handle, inode, path);
1922 struct ext4_ext_path *path)
1931 if (!path[depth].p_ext)
1933 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1936 * get the next allocated block if the extent in the path
1940 b2 = ext4_ext_next_allocated_block(path);
1972 struct ext4_ext_path *path = *ppath;
1988 ex = path[depth].p_ext;
1989 eh = path[depth].p_hdr;
1990 if (unlikely(path[depth].p_hdr == NULL)) {
1991 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2028 path + depth);
2036 eh = path[depth].p_hdr;
2054 path + depth);
2065 eh = path[depth].p_hdr;
2072 eh = path[depth].p_hdr;
2080 next = ext4_ext_next_leaf_block(path);
2087 BUG_ON(npath->p_depth != path->p_depth);
2092 path = npath;
2110 eh = path[depth].p_hdr;
2113 nearex = path[depth].p_ext;
2115 err = ext4_ext_get_access(handle, inode, path + depth);
2165 path[depth].p_ext = nearex;
2173 ext4_ext_try_to_merge(handle, inode, path, nearex);
2177 err = ext4_ext_correct_indexes(handle, inode, path);
2181 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2238 * @path: path in extent tree to @lblk
2242 * block. We don't try too hard to find the beginning of the hole but @path
2249 struct ext4_ext_path *path,
2256 ex = path[depth].p_ext;
2268 next = ext4_ext_next_allocated_block(path);
2306 struct ext4_ext_path *path, int depth)
2313 path = path + depth;
2314 leaf = ext4_idx_pblock(path->p_idx);
2315 if (unlikely(path->p_hdr->eh_entries == 0)) {
2316 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2319 err = ext4_ext_get_access(handle, inode, path);
2323 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2324 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2326 memmove(path->p_idx, path->p_idx + 1, len);
2329 le16_add_cpu(&path->p_hdr->eh_entries, -1);
2330 err = ext4_ext_dirty(handle, inode, path);
2340 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2342 path--;
2343 err = ext4_ext_get_access(handle, inode, path);
2346 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2347 err = ext4_ext_dirty(handle, inode, path);
2358 * When pass the actual path, the caller should calculate credits
2362 struct ext4_ext_path *path)
2364 if (path) {
2369 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2370 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2585 * @path: The path to the leaf
2595 struct ext4_ext_path *path,
2613 if (!path[depth].p_hdr)
2614 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2615 eh = path[depth].p_hdr;
2616 if (unlikely(path[depth].p_hdr == NULL)) {
2617 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2621 ex = path[depth].p_ext;
2640 path[depth].p_ext = ex;
2711 err = ext4_ext_get_access(handle, inode, path + depth);
2751 err = ext4_ext_dirty(handle, inode, path + depth);
2763 err = ext4_ext_correct_indexes(handle, inode, path);
2790 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2791 err = ext4_ext_rm_idx(handle, inode, path, depth);
2802 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2804 BUG_ON(path->p_idx == NULL);
2806 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2813 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2823 struct ext4_ext_path *path = NULL;
2857 path = ext4_find_extent(inode, end, NULL,
2859 if (IS_ERR(path)) {
2861 return PTR_ERR(path);
2865 ex = path[depth].p_ext;
2869 "path[%d].p_hdr == NULL",
2904 err = ext4_force_split_extent_at(handle, inode, &path,
2922 err = ext4_ext_search_right(inode, path, &lblk, &pblk,
2937 if (path) {
2940 path[k].p_block =
2941 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2943 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
2945 if (path == NULL) {
2949 path[0].p_maxdepth = path[0].p_depth = depth;
2950 path[0].p_hdr = ext_inode_hdr(inode);
2953 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2963 err = ext4_ext_rm_leaf(handle, inode, path,
2966 brelse(path[i].p_bh);
2967 path[i].p_bh = NULL;
2973 if (!path[i].p_hdr) {
2975 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2978 if (!path[i].p_idx) {
2980 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2981 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2983 path[i].p_hdr,
2984 le16_to_cpu(path[i].p_hdr->eh_entries));
2987 path[i].p_idx--;
2991 i, EXT_FIRST_INDEX(path[i].p_hdr),
2992 path[i].p_idx);
2993 if (ext4_ext_more_to_rm(path + i)) {
2997 i + 1, ext4_idx_pblock(path[i].p_idx));
2998 memset(path + i + 1, 0, sizeof(*path));
2999 bh = read_extent_tree_block(inode, path[i].p_idx,
3014 path[i + 1].p_bh = bh;
3018 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
3022 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
3026 err = ext4_ext_rm_idx(handle, inode, path, i);
3029 brelse(path[i].p_bh);
3030 path[i].p_bh = NULL;
3037 path->p_hdr->eh_entries);
3057 if (path->p_hdr->eh_entries == 0) {
3062 err = ext4_ext_get_access(handle, inode, path);
3067 err = ext4_ext_dirty(handle, inode, path);
3071 ext4_ext_drop_refs(path);
3072 kfree(path);
3073 path = NULL;
3166 * @path: the path to the extent
3189 struct ext4_ext_path *path = *ppath;
3202 ext4_ext_show_leaf(inode, path);
3205 ex = path[depth].p_ext;
3216 err = ext4_ext_get_access(handle, inode, path + depth);
3232 ext4_ext_try_to_merge(handle, inode, path, ex);
3234 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3245 * path may lead to new leaf, not to original leaf any more
3248 err = ext4_ext_dirty(handle, inode, path + depth);
3292 ext4_ext_try_to_merge(handle, inode, path, ex);
3293 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3309 * Ignore ext4_ext_dirty return value since we are already in error path
3312 ext4_ext_dirty(handle, inode, path + path->p_depth);
3315 ext4_ext_show_leaf(inode, path);
3337 struct ext4_ext_path *path = *ppath;
3347 ex = path[depth].p_ext;
3368 * Update path is required because previous ext4_split_extent_at() may
3371 path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
3372 if (IS_ERR(path))
3373 return PTR_ERR(path);
3375 ex = path[depth].p_ext;
3397 ext4_ext_show_leaf(inode, path);
3413 * - The extent pointed to by 'path' is unwritten.
3414 * - The extent pointed to by 'path' contains a superset
3428 struct ext4_ext_path *path = *ppath;
3450 eh = path[depth].p_hdr;
3451 ex = path[depth].p_ext;
3505 err = ext4_ext_get_access(handle, inode, path + depth);
3551 err = ext4_ext_get_access(handle, inode, path + depth);
3573 err = ext4_ext_dirty(handle, inode, path + depth);
3575 /* Update path to point to the right extent */
3576 path[depth].p_ext = abut_ex;
3687 struct ext4_ext_path *path = *ppath;
3706 ex = path[depth].p_ext;
3728 struct ext4_ext_path *path = *ppath;
3736 ex = path[depth].p_ext;
3760 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3761 if (IS_ERR(path))
3762 return PTR_ERR(path);
3764 ex = path[depth].p_ext;
3767 err = ext4_ext_get_access(handle, inode, path + depth);
3776 ext4_ext_try_to_merge(handle, inode, path, ex);
3779 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3781 ext4_ext_show_leaf(inode, path);
3791 struct ext4_ext_path *path = *ppath;
3806 ex = path[depth].p_ext;
3818 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3819 if (IS_ERR(path))
3820 return PTR_ERR(path);
3822 ex = path[depth].p_ext;
3830 err = ext4_ext_get_access(handle, inode, path + depth);
3839 ext4_ext_try_to_merge(handle, inode, path, ex);
3842 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3845 ext4_ext_show_leaf(inode, path);
3862 struct ext4_ext_path __maybe_unused *path = *ppath;
3869 ext4_ext_show_leaf(inode, path);
3966 ext4_ext_show_leaf(inode, path);
4015 struct ext4_ext_path *path)
4061 ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4095 struct ext4_ext_path *path = NULL;
4109 path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4110 if (IS_ERR(path)) {
4111 err = PTR_ERR(path);
4112 path = NULL;
4123 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4127 path[depth].p_block);
4132 ex = path[depth].p_ext;
4162 inode, map, &path, &allocated);
4170 ext4_ext_show_leaf(inode, path);
4175 handle, inode, map, &path, flags,
4193 hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
4220 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4228 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4232 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4239 get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
4260 err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4268 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4314 err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
4384 ext4_ext_show_leaf(inode, path);
4386 ext4_ext_drop_refs(path);
4387 kfree(path);
5029 * Shift the extents of a path structure lying between path[depth].p_ext
5030 * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells
5034 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5042 depth = path->p_depth;
5045 if (depth == path->p_depth) {
5046 ex_start = path[depth].p_ext;
5050 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5053 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) {
5068 err = ext4_ext_get_access(handle, inode, path + depth);
5078 EXT_FIRST_EXTENT(path[depth].p_hdr))
5081 path, ex_start - 1))
5087 ext4_ext_try_to_merge_right(inode, path,
5092 err = ext4_ext_dirty(handle, inode, path + depth);
5101 err = ext4_ext_get_access(handle, inode, path + depth);
5106 le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
5108 le32_add_cpu(&path[depth].p_idx->ei_block, shift);
5109 err = ext4_ext_dirty(handle, inode, path + depth);
5114 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
5136 struct ext4_ext_path *path;
5142 /* Let path point to the last extent */
5143 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5145 if (IS_ERR(path))
5146 return PTR_ERR(path);
5148 depth = path->p_depth;
5149 extent = path[depth].p_ext;
5161 path = ext4_find_extent(inode, start - 1, &path,
5163 if (IS_ERR(path))
5164 return PTR_ERR(path);
5165 depth = path->p_depth;
5166 extent = path[depth].p_ext;
5210 path = ext4_find_extent(inode, *iterator, &path,
5212 if (IS_ERR(path))
5213 return PTR_ERR(path);
5214 depth = path->p_depth;
5215 extent = path[depth].p_ext;
5224 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5225 path[depth].p_ext++;
5227 *iterator = ext4_ext_next_allocated_block(path);
5234 extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5238 extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
5244 extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5248 if (extent == EXT_LAST_EXTENT(path[depth].p_hdr))
5254 path[depth].p_ext = extent;
5256 ret = ext4_ext_shift_path_extents(path, shift, inode,
5265 ext4_ext_drop_refs(path);
5266 kfree(path);
5431 struct ext4_ext_path *path;
5529 path = ext4_find_extent(inode, offset_lblk, NULL, 0);
5530 if (IS_ERR(path)) {
5536 extent = path[depth].p_ext;
5550 ret = ext4_split_extent_at(handle, inode, &path,
5557 ext4_ext_drop_refs(path);
5558 kfree(path);
5564 ext4_ext_drop_refs(path);
5565 kfree(path);
5715 * path must to be revalidated. */
5741 * path must to be revalidated. */
5810 struct ext4_ext_path *path;
5825 path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
5826 if (IS_ERR(path)) {
5827 err = PTR_ERR(path);
5828 path = NULL;
5839 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
5843 depth, path[depth].p_block);
5848 extent = path[depth].p_ext;
5869 first_lblk = ext4_ext_next_allocated_block(path);
5877 ext4_ext_drop_refs(path);
5878 kfree(path);
5888 * replay path. Returns 0 on success and error on failure.
5893 struct ext4_ext_path *path = NULL, *ppath;
5897 path = ext4_find_extent(inode, start, NULL, 0);
5898 if (IS_ERR(path))
5899 return PTR_ERR(path);
5900 ex = path[path->p_depth].p_ext;
5909 ppath = path;
5915 kfree(path);
5916 path = ext4_find_extent(inode, start, NULL, 0);
5917 if (IS_ERR(path))
5919 ppath = path;
5920 ex = path[path->p_depth].p_ext;
5929 kfree(path);
5930 path = ext4_find_extent(inode, start, NULL, 0);
5931 if (IS_ERR(path))
5933 ex = path[path->p_depth].p_ext;
5942 ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5945 ext4_ext_drop_refs(path);
5946 kfree(path);
5954 struct ext4_ext_path *path = NULL;
5959 path = ext4_find_extent(inode, cur, NULL, 0);
5960 if (IS_ERR(path))
5962 ex = path[path->p_depth].p_ext;
5964 ext4_ext_drop_refs(path);
5965 kfree(path);
5973 ext4_ext_try_to_merge(NULL, inode, path, ex);
5975 ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5978 ext4_ext_drop_refs(path);
5979 kfree(path);
6004 struct ext4_ext_path *path = NULL, *path2 = NULL;
6012 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
6014 if (IS_ERR(path))
6015 return PTR_ERR(path);
6016 ex = path[path->p_depth].p_ext;
6018 ext4_ext_drop_refs(path);
6019 kfree(path);
6023 ext4_ext_drop_refs(path);
6024 kfree(path);
6042 * their paths. When path is different for 2 successive extents
6043 * we compare the blocks in the path at each level and increment
6050 path = ext4_find_extent(inode, cur, NULL, 0);
6051 if (IS_ERR(path))
6053 numblks += path->p_depth;
6054 ext4_ext_drop_refs(path);
6055 kfree(path);
6057 path = ext4_find_extent(inode, cur, NULL, 0);
6058 if (IS_ERR(path))
6060 ex = path[path->p_depth].p_ext;
6062 ext4_ext_drop_refs(path);
6063 kfree(path);
6070 ext4_ext_drop_refs(path);
6071 kfree(path);
6076 ext4_ext_drop_refs(path);
6077 kfree(path);
6081 for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
6083 if (i <= path->p_depth)
6084 cmp1 = path[i].p_bh ?
6085 path[i].p_bh->b_blocknr : 0;
6092 ext4_ext_drop_refs(path);
6094 kfree(path);
6106 struct ext4_ext_path *path = NULL;
6113 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
6115 if (IS_ERR(path))
6116 return PTR_ERR(path);
6117 ex = path[path->p_depth].p_ext;
6119 ext4_ext_drop_refs(path);
6120 kfree(path);
6124 ext4_ext_drop_refs(path);
6125 kfree(path);
6135 path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
6136 if (!IS_ERR_OR_NULL(path)) {
6137 for (j = 0; j < path->p_depth; j++) {
6140 path[j].p_block, 1, 0);
6142 0, path[j].p_block, 1, 1);
6144 ext4_ext_drop_refs(path);
6145 kfree(path);