Lines Matching refs:path

109 static void ext4_ext_drop_refs(struct ext4_ext_path *path)
113 if (!path)
115 depth = path->p_depth;
116 for (i = 0; i <= depth; i++, path++) {
117 brelse(path->p_bh);
118 path->p_bh = NULL;
122 void ext4_free_ext_path(struct ext4_ext_path *path)
124 ext4_ext_drop_refs(path);
125 kfree(path);
156 struct ext4_ext_path *path)
160 if (path->p_bh) {
161 /* path points to block */
162 BUFFER_TRACE(path->p_bh, "get_write_access");
164 path->p_bh, EXT4_JTR_NONE);
172 clear_buffer_verified(path->p_bh);
174 /* path points to leaf/index in inode body */
187 struct ext4_ext_path *path)
192 if (path->p_bh) {
193 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
194 /* path points to block */
196 inode, path->p_bh);
199 set_buffer_verified(path->p_bh);
201 /* path points to leaf/index in inode body */
207 #define ext4_ext_dirty(handle, inode, path) \
208 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
211 struct ext4_ext_path *path,
214 if (path) {
215 int depth = path->p_depth;
235 ex = path[depth].p_ext;
248 if (path[depth].p_bh)
249 return path[depth].p_bh->b_blocknr;
261 struct ext4_ext_path *path,
266 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
331 struct ext4_ext_path *path = *ppath;
332 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
603 struct ext4_ext_path *path = NULL;
619 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
621 if (path == NULL) {
626 path[0].p_hdr = ext_inode_hdr(inode);
627 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
630 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
637 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
638 brelse(path[i].p_bh);
639 path[i].p_bh = NULL;
643 bh = read_extent_tree_block(inode, path[i].p_idx++,
651 path[i].p_bh = bh;
652 path[i].p_hdr = ext_block_hdr(bh);
653 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
658 ext4_free_ext_path(path);
663 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
665 int k, l = path->p_depth;
667 ext_debug(inode, "path:");
668 for (k = 0; k <= l; k++, path++) {
669 if (path->p_idx) {
671 le32_to_cpu(path->p_idx->ei_block),
672 ext4_idx_pblock(path->p_idx));
673 } else if (path->p_ext) {
675 le32_to_cpu(path->p_ext->ee_block),
676 ext4_ext_is_unwritten(path->p_ext),
677 ext4_ext_get_actual_len(path->p_ext),
678 ext4_ext_pblock(path->p_ext));
685 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
692 if (!path)
695 eh = path[depth].p_hdr;
708 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
716 idx = path[level].p_idx;
717 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
727 ex = path[depth].p_ext;
728 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
740 #define ext4_ext_show_path(inode, path)
741 #define ext4_ext_show_leaf(inode, path)
742 #define ext4_ext_show_move(inode, path, newblock, level)
752 struct ext4_ext_path *path, ext4_lblk_t block)
754 struct ext4_extent_header *eh = path->p_hdr;
774 path->p_idx = l - 1;
775 ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
776 ext4_idx_pblock(path->p_idx));
800 BUG_ON(chix != path->p_idx);
813 struct ext4_ext_path *path, ext4_lblk_t block)
815 struct ext4_extent_header *eh = path->p_hdr;
843 path->p_ext = l - 1;
845 le32_to_cpu(path->p_ext->ee_block),
846 ext4_ext_pblock(path->p_ext),
847 ext4_ext_is_unwritten(path->p_ext),
848 ext4_ext_get_actual_len(path->p_ext));
863 BUG_ON(chex != path->p_ext);
888 struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
905 if (path) {
906 ext4_ext_drop_refs(path);
907 if (depth > path[0].p_maxdepth) {
908 kfree(path);
909 *orig_path = path = NULL;
912 if (!path) {
914 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
916 if (unlikely(!path))
918 path[0].p_maxdepth = depth + 1;
920 path[0].p_hdr = eh;
921 path[0].p_bh = NULL;
931 ext4_ext_binsearch_idx(inode, path + ppos, block);
932 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
933 path[ppos].p_depth = i;
934 path[ppos].p_ext = NULL;
936 bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags);
944 path[ppos].p_bh = bh;
945 path[ppos].p_hdr = eh;
948 path[ppos].p_depth = i;
949 path[ppos].p_ext = NULL;
950 path[ppos].p_idx = NULL;
953 ext4_ext_binsearch(inode, path + ppos, block);
955 if (path[ppos].p_ext)
956 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
958 ext4_ext_show_path(inode, path);
960 return path;
963 ext4_free_ext_path(path);
1044 * inserts new subtree into the path, using free index entry
1054 struct ext4_ext_path *path,
1077 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1081 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1082 border = path[depth].p_ext[1].ee_block;
1112 newblock = ext4_ext_new_meta_block(handle, inode, path,
1145 /* move remainder of path[depth] to the new leaf */
1146 if (unlikely(path[depth].p_hdr->eh_entries !=
1147 path[depth].p_hdr->eh_max)) {
1149 path[depth].p_hdr->eh_entries,
1150 path[depth].p_hdr->eh_max);
1155 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1156 ext4_ext_show_move(inode, path, newblock, depth);
1160 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1180 err = ext4_ext_get_access(handle, inode, path + depth);
1183 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1184 err = ext4_ext_dirty(handle, inode, path + depth);
1230 /* move remainder of path[i] to the new index block */
1231 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1232 EXT_LAST_INDEX(path[i].p_hdr))) {
1235 le32_to_cpu(path[i].p_ext->ee_block));
1240 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1241 ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
1242 EXT_MAX_INDEX(path[i].p_hdr));
1243 ext4_ext_show_move(inode, path, newblock, i);
1245 memmove(++fidx, path[i].p_idx,
1266 err = ext4_ext_get_access(handle, inode, path + i);
1269 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1270 err = ext4_ext_dirty(handle, inode, path + i);
1279 err = ext4_ext_insert_index(handle, inode, path + at,
1404 struct ext4_ext_path *path = *ppath;
1412 curp = path + depth;
1423 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1427 /* refill path */
1428 path = ext4_find_extent(inode,
1431 if (IS_ERR(path))
1432 err = PTR_ERR(path);
1439 /* refill path */
1440 path = ext4_find_extent(inode,
1443 if (IS_ERR(path)) {
1444 err = PTR_ERR(path);
1453 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1471 struct ext4_ext_path *path,
1478 if (unlikely(path == NULL)) {
1479 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1482 depth = path->p_depth;
1485 if (depth == 0 && path->p_ext == NULL)
1488 /* usually extent in the path covers blocks smaller
1492 ex = path[depth].p_ext;
1495 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1502 ix = path[depth].p_idx;
1503 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1507 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block),
1535 struct ext4_ext_path *path,
1546 if (unlikely(path == NULL)) {
1547 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1550 depth = path->p_depth;
1553 if (depth == 0 && path->p_ext == NULL)
1556 /* usually extent in the path covers blocks smaller
1560 ex = path[depth].p_ext;
1563 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1565 "first_extent(path[%d].p_hdr) != ex",
1570 ix = path[depth].p_idx;
1571 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1588 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1596 ix = path[depth].p_idx;
1597 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1609 while (++depth < path->p_depth) {
1611 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
1619 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
1642 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1646 BUG_ON(path == NULL);
1647 depth = path->p_depth;
1649 if (depth == 0 && path->p_ext == NULL)
1653 struct ext4_ext_path *p = &path[depth];
1655 if (depth == path->p_depth) {
1674 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1678 BUG_ON(path == NULL);
1679 depth = path->p_depth;
1689 if (path[depth].p_idx !=
1690 EXT_LAST_INDEX(path[depth].p_hdr))
1692 le32_to_cpu(path[depth].p_idx[1].ei_block);
1706 struct ext4_ext_path *path)
1714 eh = path[depth].p_hdr;
1715 ex = path[depth].p_ext;
1737 border = path[depth].p_ext->ee_block;
1738 err = ext4_ext_get_access(handle, inode, path + k);
1741 path[k].p_idx->ei_block = border;
1742 err = ext4_ext_dirty(handle, inode, path + k);
1748 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1750 err = ext4_ext_get_access(handle, inode, path + k);
1753 path[k].p_idx->ei_block = border;
1754 err = ext4_ext_dirty(handle, inode, path + k);
1802 struct ext4_ext_path *path,
1810 BUG_ON(path[depth].p_hdr == NULL);
1811 eh = path[depth].p_hdr;
1844 struct ext4_ext_path *path)
1850 if ((path[0].p_depth != 1) ||
1851 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1852 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1867 blk = ext4_idx_pblock(path[0].p_idx);
1868 s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1872 path[1].p_maxdepth = path[0].p_maxdepth;
1873 memcpy(path[0].p_hdr, path[1].p_hdr, s);
1874 path[0].p_depth = 0;
1875 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1876 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1877 path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1879 brelse(path[1].p_bh);
1890 struct ext4_ext_path *path,
1898 BUG_ON(path[depth].p_hdr == NULL);
1899 eh = path[depth].p_hdr;
1902 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1905 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1907 ext4_ext_try_to_merge_up(handle, inode, path);
1921 struct ext4_ext_path *path)
1930 if (!path[depth].p_ext)
1932 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1935 * get the next allocated block if the extent in the path
1939 b2 = ext4_ext_next_allocated_block(path);
1971 struct ext4_ext_path *path = *ppath;
1987 ex = path[depth].p_ext;
1988 eh = path[depth].p_hdr;
1989 if (unlikely(path[depth].p_hdr == NULL)) {
1990 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2027 path + depth);
2052 path + depth);
2069 eh = path[depth].p_hdr;
2077 next = ext4_ext_next_leaf_block(path);
2084 BUG_ON(npath->p_depth != path->p_depth);
2089 path = npath;
2107 eh = path[depth].p_hdr;
2110 nearex = path[depth].p_ext;
2112 err = ext4_ext_get_access(handle, inode, path + depth);
2162 path[depth].p_ext = nearex;
2170 ext4_ext_try_to_merge(handle, inode, path, nearex);
2174 err = ext4_ext_correct_indexes(handle, inode, path);
2178 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2232 * ext4_ext_find_hole - find hole around given block according to the given path
2234 * @path: path in extent tree to @lblk
2238 * block. We don't try too hard to find the beginning of the hole but @path
2245 struct ext4_ext_path *path,
2252 ex = path[depth].p_ext;
2264 next = ext4_ext_next_allocated_block(path);
2278 struct ext4_ext_path *path, int depth)
2285 path = path + depth;
2286 leaf = ext4_idx_pblock(path->p_idx);
2287 if (unlikely(path->p_hdr->eh_entries == 0)) {
2288 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2291 err = ext4_ext_get_access(handle, inode, path);
2295 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2296 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2298 memmove(path->p_idx, path->p_idx + 1, len);
2301 le16_add_cpu(&path->p_hdr->eh_entries, -1);
2302 err = ext4_ext_dirty(handle, inode, path);
2312 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2314 path--;
2315 err = ext4_ext_get_access(handle, inode, path);
2318 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2319 err = ext4_ext_dirty(handle, inode, path);
2330 * When pass the actual path, the caller should calculate credits
2334 struct ext4_ext_path *path)
2336 if (path) {
2341 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2342 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2557 * @path: The path to the leaf
2567 struct ext4_ext_path *path,
2585 if (!path[depth].p_hdr)
2586 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2587 eh = path[depth].p_hdr;
2588 if (unlikely(path[depth].p_hdr == NULL)) {
2589 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2593 ex = path[depth].p_ext;
2612 path[depth].p_ext = ex;
2682 err = ext4_ext_get_access(handle, inode, path + depth);
2722 err = ext4_ext_dirty(handle, inode, path + depth);
2734 err = ext4_ext_correct_indexes(handle, inode, path);
2761 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2762 err = ext4_ext_rm_idx(handle, inode, path, depth);
2773 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2775 BUG_ON(path->p_idx == NULL);
2777 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2784 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2794 struct ext4_ext_path *path = NULL;
2828 path = ext4_find_extent(inode, end, NULL,
2830 if (IS_ERR(path)) {
2832 return PTR_ERR(path);
2836 ex = path[depth].p_ext;
2840 "path[%d].p_hdr == NULL",
2875 err = ext4_force_split_extent_at(handle, inode, &path,
2893 err = ext4_ext_search_right(inode, path, &lblk, &pblk,
2908 if (path) {
2911 path[k].p_block =
2912 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2914 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
2916 if (path == NULL) {
2920 path[0].p_maxdepth = path[0].p_depth = depth;
2921 path[0].p_hdr = ext_inode_hdr(inode);
2924 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2934 err = ext4_ext_rm_leaf(handle, inode, path,
2937 brelse(path[i].p_bh);
2938 path[i].p_bh = NULL;
2944 if (!path[i].p_hdr) {
2946 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2949 if (!path[i].p_idx) {
2951 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2952 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2954 path[i].p_hdr,
2955 le16_to_cpu(path[i].p_hdr->eh_entries));
2958 path[i].p_idx--;
2962 i, EXT_FIRST_INDEX(path[i].p_hdr),
2963 path[i].p_idx);
2964 if (ext4_ext_more_to_rm(path + i)) {
2968 i + 1, ext4_idx_pblock(path[i].p_idx));
2969 memset(path + i + 1, 0, sizeof(*path));
2970 bh = read_extent_tree_block(inode, path[i].p_idx,
2985 path[i + 1].p_bh = bh;
2989 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2993 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2997 err = ext4_ext_rm_idx(handle, inode, path, i);
3000 brelse(path[i].p_bh);
3001 path[i].p_bh = NULL;
3008 path->p_hdr->eh_entries);
3028 if (path->p_hdr->eh_entries == 0) {
3033 err = ext4_ext_get_access(handle, inode, path);
3038 err = ext4_ext_dirty(handle, inode, path);
3042 ext4_free_ext_path(path);
3043 path = NULL;
3136 * @path: the path to the extent
3159 struct ext4_ext_path *path = *ppath;
3172 ext4_ext_show_leaf(inode, path);
3175 ex = path[depth].p_ext;
3186 err = ext4_ext_get_access(handle, inode, path + depth);
3202 ext4_ext_try_to_merge(handle, inode, path, ex);
3204 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3215 * path may lead to new leaf, not to original leaf any more
3218 err = ext4_ext_dirty(handle, inode, path + depth);
3262 ext4_ext_try_to_merge(handle, inode, path, ex);
3263 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3279 * Ignore ext4_ext_dirty return value since we are already in error path
3282 ext4_ext_dirty(handle, inode, path + path->p_depth);
3285 ext4_ext_show_leaf(inode, path);
3307 struct ext4_ext_path *path = *ppath;
3317 ex = path[depth].p_ext;
3338 * Update path is required because previous ext4_split_extent_at() may
3341 path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
3342 if (IS_ERR(path))
3343 return PTR_ERR(path);
3345 ex = path[depth].p_ext;
3366 ext4_ext_show_leaf(inode, path);
3382 * - The extent pointed to by 'path' is unwritten.
3383 * - The extent pointed to by 'path' contains a superset
3397 struct ext4_ext_path *path = *ppath;
3419 eh = path[depth].p_hdr;
3420 ex = path[depth].p_ext;
3474 err = ext4_ext_get_access(handle, inode, path + depth);
3520 err = ext4_ext_get_access(handle, inode, path + depth);
3542 err = ext4_ext_dirty(handle, inode, path + depth);
3544 /* Update path to point to the right extent */
3545 path[depth].p_ext = abut_ex;
3655 struct ext4_ext_path *path = *ppath;
3674 ex = path[depth].p_ext;
3696 struct ext4_ext_path *path = *ppath;
3704 ex = path[depth].p_ext;
3728 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3729 if (IS_ERR(path))
3730 return PTR_ERR(path);
3732 ex = path[depth].p_ext;
3735 err = ext4_ext_get_access(handle, inode, path + depth);
3744 ext4_ext_try_to_merge(handle, inode, path, ex);
3747 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3749 ext4_ext_show_leaf(inode, path);
3759 struct ext4_ext_path *path = *ppath;
3774 ex = path[depth].p_ext;
3786 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3787 if (IS_ERR(path))
3788 return PTR_ERR(path);
3790 ex = path[depth].p_ext;
3798 err = ext4_ext_get_access(handle, inode, path + depth);
3807 ext4_ext_try_to_merge(handle, inode, path, ex);
3810 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3813 ext4_ext_show_leaf(inode, path);
3830 struct ext4_ext_path __maybe_unused *path = *ppath;
3837 ext4_ext_show_leaf(inode, path);
3934 ext4_ext_show_leaf(inode, path);
3983 struct ext4_ext_path *path)
4029 ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4043 * locate and expand the hole from the given @path, and then adjust it
4049 struct ext4_ext_path *path,
4056 len = ext4_ext_find_hole(inode, path, &hole_start);
4126 struct ext4_ext_path *path = NULL;
4140 path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4141 if (IS_ERR(path)) {
4142 err = PTR_ERR(path);
4143 path = NULL;
4154 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4158 path[depth].p_block);
4163 ex = path[depth].p_ext;
4193 inode, map, &path, &allocated);
4201 ext4_ext_show_leaf(inode, path);
4206 handle, inode, map, &path, flags,
4223 len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk);
4241 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4249 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4253 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4260 get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
4281 err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4289 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4335 err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
4405 ext4_ext_show_leaf(inode, path);
4407 ext4_free_ext_path(path);
5050 * Shift the extents of a path structure lying between path[depth].p_ext
5051 * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells
5055 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5063 depth = path->p_depth;
5066 if (depth == path->p_depth) {
5067 ex_start = path[depth].p_ext;
5071 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5074 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) {
5089 err = ext4_ext_get_access(handle, inode, path + depth);
5099 EXT_FIRST_EXTENT(path[depth].p_hdr))
5102 path, ex_start - 1))
5108 ext4_ext_try_to_merge_right(inode, path,
5113 err = ext4_ext_dirty(handle, inode, path + depth);
5122 err = ext4_ext_get_access(handle, inode, path + depth);
5127 le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
5129 le32_add_cpu(&path[depth].p_idx->ei_block, shift);
5130 err = ext4_ext_dirty(handle, inode, path + depth);
5135 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
5157 struct ext4_ext_path *path;
5163 /* Let path point to the last extent */
5164 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5166 if (IS_ERR(path))
5167 return PTR_ERR(path);
5169 depth = path->p_depth;
5170 extent = path[depth].p_ext;
5182 path = ext4_find_extent(inode, start - 1, &path,
5184 if (IS_ERR(path))
5185 return PTR_ERR(path);
5186 depth = path->p_depth;
5187 extent = path[depth].p_ext;
5231 path = ext4_find_extent(inode, *iterator, &path,
5233 if (IS_ERR(path))
5234 return PTR_ERR(path);
5235 depth = path->p_depth;
5236 extent = path[depth].p_ext;
5245 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5246 path[depth].p_ext++;
5248 *iterator = ext4_ext_next_allocated_block(path);
5255 extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5259 extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
5265 extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5269 if (extent == EXT_LAST_EXTENT(path[depth].p_hdr))
5275 path[depth].p_ext = extent;
5277 ret = ext4_ext_shift_path_extents(path, shift, inode,
5286 ext4_free_ext_path(path);
5439 struct ext4_ext_path *path;
5530 path = ext4_find_extent(inode, offset_lblk, NULL, 0);
5531 if (IS_ERR(path)) {
5537 extent = path[depth].p_ext;
5551 ret = ext4_split_extent_at(handle, inode, &path,
5558 ext4_free_ext_path(path);
5564 ext4_free_ext_path(path);
5703 * path must to be revalidated. */
5729 * path must to be revalidated. */
5796 struct ext4_ext_path *path;
5811 path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
5812 if (IS_ERR(path)) {
5813 err = PTR_ERR(path);
5814 path = NULL;
5825 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
5829 depth, path[depth].p_block);
5834 extent = path[depth].p_ext;
5855 first_lblk = ext4_ext_next_allocated_block(path);
5863 ext4_free_ext_path(path);
5873 * replay path. Returns 0 on success and error on failure.
5878 struct ext4_ext_path *path = NULL, *ppath;
5882 path = ext4_find_extent(inode, start, NULL, 0);
5883 if (IS_ERR(path))
5884 return PTR_ERR(path);
5885 ex = path[path->p_depth].p_ext;
5894 ppath = path;
5900 kfree(path);
5901 path = ext4_find_extent(inode, start, NULL, 0);
5902 if (IS_ERR(path))
5904 ppath = path;
5905 ex = path[path->p_depth].p_ext;
5914 kfree(path);
5915 path = ext4_find_extent(inode, start, NULL, 0);
5916 if (IS_ERR(path))
5918 ex = path[path->p_depth].p_ext;
5927 ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5930 ext4_free_ext_path(path);
5938 struct ext4_ext_path *path = NULL;
5943 path = ext4_find_extent(inode, cur, NULL, 0);
5944 if (IS_ERR(path))
5946 ex = path[path->p_depth].p_ext;
5948 ext4_free_ext_path(path);
5956 ext4_ext_try_to_merge(NULL, inode, path, ex);
5958 ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5961 ext4_free_ext_path(path);
5986 struct ext4_ext_path *path = NULL, *path2 = NULL;
5994 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5996 if (IS_ERR(path))
5997 return PTR_ERR(path);
5998 ex = path[path->p_depth].p_ext;
6000 ext4_free_ext_path(path);
6004 ext4_free_ext_path(path);
6022 * their paths. When path is different for 2 successive extents
6023 * we compare the blocks in the path at each level and increment
6030 path = ext4_find_extent(inode, cur, NULL, 0);
6031 if (IS_ERR(path))
6033 numblks += path->p_depth;
6034 ext4_free_ext_path(path);
6036 path = ext4_find_extent(inode, cur, NULL, 0);
6037 if (IS_ERR(path))
6039 ex = path[path->p_depth].p_ext;
6041 ext4_free_ext_path(path);
6048 ext4_free_ext_path(path);
6053 ext4_free_ext_path(path);
6056 for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
6058 if (i <= path->p_depth)
6059 cmp1 = path[i].p_bh ?
6060 path[i].p_bh->b_blocknr : 0;
6067 ext4_free_ext_path(path);
6079 struct ext4_ext_path *path = NULL;
6089 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
6091 if (IS_ERR(path))
6092 return PTR_ERR(path);
6093 ex = path[path->p_depth].p_ext;
6095 ext4_free_ext_path(path);
6099 ext4_free_ext_path(path);
6109 path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
6110 if (!IS_ERR_OR_NULL(path)) {
6111 for (j = 0; j < path->p_depth; j++) {
6114 path[j].p_block, 1, 0);
6116 0, path[j].p_block, 1, 1);
6118 ext4_free_ext_path(path);