Lines Matching defs:path

46  * A fs_path is a helper to dynamically build path names with unknown size.
48 * It allows fast adding of path elements on the right side (normal path) and
49 * fast adding to the left side (reversed path). A reversed path can also be
64 * Average path length does not exceed 200 bytes, we'll have
500 * the fast path happen most of the time.
631 struct btrfs_path *path;
633 path = btrfs_alloc_path();
634 if (!path)
636 path->search_commit_root = 1;
637 path->skip_locking = 1;
638 path->need_commit_sem = 1;
639 return path;
840 struct fs_path *path, struct fs_path *lnk)
845 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
851 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
864 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
869 btrfs_debug(fs_info, "send_unlink %s", path->start);
875 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
887 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
892 btrfs_debug(fs_info, "send_rmdir %s", path->start);
898 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
925 struct btrfs_path *path;
929 path = alloc_path_for_send();
930 if (!path)
936 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
946 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
948 info->size = btrfs_inode_size(path->nodes[0], ii);
949 info->gen = btrfs_inode_generation(path->nodes[0], ii);
950 info->mode = btrfs_inode_mode(path->nodes[0], ii);
951 info->uid = btrfs_inode_uid(path->nodes[0], ii);
952 info->gid = btrfs_inode_gid(path->nodes[0], ii);
953 info->rdev = btrfs_inode_rdev(path->nodes[0], ii);
954 info->nlink = btrfs_inode_nlink(path->nodes[0], ii);
959 info->fileattr = btrfs_inode_flags(path->nodes[0], ii);
962 btrfs_free_path(path);
988 * path must point to the INODE_REF or INODE_EXTREF when called.
990 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
994 struct extent_buffer *eb = path->nodes[0];
1001 int slot = path->slots[0];
1106 * path must point to the dir item when called.
1108 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
1138 eb = path->nodes[0];
1139 slot = path->slots[0];
1232 * Retrieve the first path of an inode. If an inode has more then one
1236 u64 ino, struct fs_path *path)
1246 fs_path_reset(path);
1268 __copy_first_ref, path);
1488 * backref walking to determine which roots have a path to the leaf.
1567 * path must point to the extent item when called.
1570 struct btrfs_path *path,
1582 struct extent_buffer *eb = path->nodes[0];
1598 fi = btrfs_item_ptr(eb, path->slots[0], struct btrfs_file_extent_item);
1740 struct btrfs_path *path;
1748 path = alloc_path_for_send();
1749 if (!path)
1755 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1774 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1776 type = btrfs_file_extent_type(path->nodes[0], ei);
1784 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1794 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
1796 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1799 btrfs_free_path(path);
1812 struct btrfs_path *path;
1818 path = alloc_path_for_send();
1819 if (!path)
1832 path, BTRFS_FIRST_FREE_OBJECTID,
1834 btrfs_release_path(path);
1852 path, BTRFS_FIRST_FREE_OBJECTID,
1854 btrfs_release_path(path);
1871 btrfs_free_path(path);
1988 struct btrfs_path *path;
1991 path = alloc_path_for_send();
1992 if (!path)
1995 di = btrfs_lookup_dir_item(NULL, root, path, dir, &name_str, 0);
2000 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
2008 btrfs_free_path(path);
2022 struct btrfs_path *path;
2026 path = alloc_path_for_send();
2027 if (!path)
2034 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
2038 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2039 path->slots[0]);
2049 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
2051 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
2052 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
2058 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
2060 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
2061 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
2063 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
2067 btrfs_release_path(path);
2078 btrfs_free_path(path);
2251 * if it has to use the path as returned by get_cur_path or the orphan name.
2412 * We walk the path up to the root. For every inode in between, we check if it
2423 * tried to get the path to the dir items, it would get a path inside that
2506 struct btrfs_path *path;
2513 path = btrfs_alloc_path();
2514 if (!path)
2519 btrfs_free_path(path);
2528 &key, path, 1, 0);
2536 leaf = path->nodes[0];
2537 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2543 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2546 btrfs_release_path(path);
2584 btrfs_free_path(path);
2720 struct btrfs_path *path = NULL;
2731 path = alloc_path_for_send();
2732 if (!path) {
2740 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2746 eb = path->nodes[0];
2747 slot = path->slots[0];
2769 btrfs_free_path(path);
2778 * full path - in which case we would generate outdated paths (pre-rename)
2832 * a valid path yet because we did not process the refs yet. So, the inode
2944 struct btrfs_path *path = NULL;
2953 path = alloc_path_for_send();
2954 if (!path)
2961 btrfs_for_each_slot(sctx->send_root, &key, &found_key, path, iter_ret) {
2962 struct extent_buffer *eb = path->nodes[0];
2970 di = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dir_item);
2984 btrfs_free_path(path);
3048 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
3050 ref->full_path = path;
3091 struct fs_path *path)
3104 ret = send_rename(sctx, path, orphan);
3194 struct btrfs_path *path;
3213 path = alloc_path_for_send();
3214 if (!path)
3230 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3235 ASSERT(path->slots[0] > 0);
3236 if (WARN_ON(path->slots[0] == 0)) {
3240 path->slots[0]--;
3243 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3250 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
3252 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
3259 btrfs_release_path(path);
3266 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
3273 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
3275 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
3302 btrfs_free_path(path);
3760 struct btrfs_path *path;
3772 path = alloc_path_for_send();
3773 if (!path)
3780 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3788 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3802 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3837 btrfs_free_path(path);
3875 * possible path (in case ino2 is not a directory and has multiple hard links).
3887 struct btrfs_path *path = NULL;
3897 path = alloc_path_for_send();
3898 if (!path) {
3907 btrfs_for_each_slot(root, &key, &key, path, iter_ret) {
3908 struct extent_buffer *leaf = path->nodes[0];
3909 int slot = path->slots[0];
3955 btrfs_free_path(path);
3984 * that ancestor is processed to avoid path build infinite loops (done
3998 * the send stream or getting into infinite path build
4069 * we use here a new path.
4096 * inode, we might need to orphanize another inode, but the path we have in the
4120 * collecting all its new references we set a full path of "d1/d2" for its new
4126 * orphanization we use a source path corresponding to the path we stored in the
4128 * receiver fail since the path component "d1/" no longer exists, it was renamed
4130 * must recompute the path in the new reference and use it for the new
4197 * get the path of the first ref as it would like while receiving at
4231 * that the path used for link and rename commands don't use an
4258 * path in "valid_path" already contains the orphanized name for 259.
4307 * the source path when performing its rename
4332 * current path of cur_ino) again because it
4352 * processed, recompute the current path because
4353 * that directory may be part of the path.
4462 * so our reference's full path, which was
4534 * to recompute the full path for deleted names,
4535 * since any such path was computed before we
4656 struct fs_path *path = NULL;
4659 path = fs_path_alloc();
4660 if (!path) {
4671 ret = get_cur_path(sctx, dir, dir_gen, path);
4674 ret = fs_path_add_path(path, name);
4680 set_ref_path(ref, path);
4686 if (path && (!ref || !ref->full_path))
4687 fs_path_free(path);
4810 struct btrfs_path *path;
4816 path = alloc_path_for_send();
4817 if (!path)
4836 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
4842 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4851 btrfs_release_path(path);
4860 btrfs_free_path(path);
4865 struct fs_path *path,
4875 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4887 struct fs_path *path,
4896 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
5015 struct btrfs_path *path,
5029 ret = iterate_dir_item(root, path, __find_xattr, &ctx);
5114 struct btrfs_path *path;
5118 path = alloc_path_for_send();
5119 if (!path)
5127 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
5134 ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
5142 btrfs_free_path(path);
5146 static int send_verity(struct send_ctx *sctx, struct fs_path *path,
5155 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
5522 struct btrfs_path *path, u64 offset,
5529 struct extent_buffer *leaf = path->nodes[0];
5554 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5555 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
5557 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
5587 static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
5594 struct extent_buffer *leaf = path->nodes[0];
5621 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5622 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
5692 static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path,
5696 struct extent_buffer *leaf = path->nodes[0];
5704 ei = btrfs_item_ptr(leaf, path->slots[0],
5720 path->slots[0]) <= len) {
5721 return send_encoded_inline_extent(sctx, path, offset,
5725 return send_encoded_extent(sctx, path, offset, len);
5818 struct btrfs_path *path;
5826 path = alloc_path_for_send();
5827 if (!path)
5830 di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
5840 leaf = path->nodes[0];
5862 btrfs_free_path(path);
5870 struct btrfs_path *path;
5895 path = alloc_path_for_send();
5896 if (!path)
5904 btrfs_release_path(path);
5934 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5937 if (ret > 0 && path->slots[0] > 0) {
5938 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5941 path->slots[0]--;
5945 struct extent_buffer *leaf = path->nodes[0];
5946 int slot = path->slots[0];
5955 ret = btrfs_next_leaf(clone_root->root, path);
6123 path->slots[0]++;
6131 btrfs_free_path(path);
6136 struct btrfs_path *path,
6145 end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
6154 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
6156 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
6157 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
6158 ret = clone_range(sctx, path, clone_root, disk_byte,
6161 ret = send_extent_data(sctx, path, offset, end - offset);
6173 struct btrfs_path *path = NULL;
6190 path = alloc_path_for_send();
6191 if (!path)
6232 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
6243 eb = path->nodes[0];
6244 slot = path->slots[0];
6322 ret = btrfs_next_item(sctx->parent_root, path);
6326 eb = path->nodes[0];
6327 slot = path->slots[0];
6353 btrfs_free_path(path);
6359 struct btrfs_path *path;
6364 path = alloc_path_for_send();
6365 if (!path)
6373 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
6377 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
6381 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
6383 btrfs_free_path(path);
6391 struct btrfs_path *path;
6397 path = alloc_path_for_send();
6398 if (!path)
6404 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6407 if (ret > 0 && path->slots[0] > 0)
6408 path->slots[0]--;
6411 struct extent_buffer *leaf = path->nodes[0];
6412 int slot = path->slots[0];
6417 ret = btrfs_next_leaf(root, path);
6435 extent_end = btrfs_file_extent_end(path);
6445 path->slots[0]++;
6449 btrfs_free_path(path);
6453 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
6467 if (path->slots[0] == 0 &&
6492 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
6497 struct btrfs_path *path,
6507 ret = is_extent_unchanged(sctx, path, key);
6518 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
6520 type = btrfs_file_extent_type(path->nodes[0], ei);
6535 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
6542 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
6547 ret = send_write_or_clone(sctx, path, key, found_clone);
6551 ret = maybe_send_hole(sctx, path, key);
6561 struct btrfs_path *path;
6566 path = alloc_path_for_send();
6567 if (!path)
6573 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
6580 ret = process_extent(sctx, path, &found_key);
6588 btrfs_free_path(path);
6651 * the old path (pre move/rename) of our current inode, and the
7156 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
7176 leaf = path->nodes[0];
7177 item_size = btrfs_item_size(leaf, path->slots[0]);
7178 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
7303 struct btrfs_path *path,
7308 if (!path->need_commit_sem)
7318 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7321 btrfs_print_tree(path->nodes[path->lowest_level], false);
7326 root->root_key.objectid, path->lowest_level,
7327 path->slots[path->lowest_level]);
7340 struct btrfs_path *path;
7342 path = alloc_path_for_send();
7343 if (!path)
7345 path->reada = READA_FORWARD_ALWAYS;
7355 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
7362 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
7364 ret = changed_cb(path, NULL, &key,
7376 * our path (leaf) and restart the search, so that we
7383 btrfs_release_path(path);
7384 ret = search_key_again(sctx, send_root, path, &key);
7391 ret = btrfs_next_item(send_root, path);
7404 btrfs_free_path(path);
7408 static int replace_node_with_clone(struct btrfs_path *path, int level)
7412 clone = btrfs_clone_extent_buffer(path->nodes[level]);
7416 free_extent_buffer(path->nodes[level]);
7417 path->nodes[level] = clone;
7422 static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen)
7425 struct extent_buffer *parent = path->nodes[*level];
7426 int slot = path->slots[*level];
7453 path->nodes[*level - 1] = eb;
7454 path->slots[*level - 1] = 0;
7458 return replace_node_with_clone(path, 0);
7463 static int tree_move_next_or_upnext(struct btrfs_path *path,
7468 nritems = btrfs_header_nritems(path->nodes[*level]);
7470 path->slots[*level]++;
7472 while (path->slots[*level] >= nritems) {
7474 path->slots[*level] = nritems - 1;
7479 path->slots[*level] = 0;
7480 free_extent_buffer(path->nodes[*level]);
7481 path->nodes[*level] = NULL;
7483 path->slots[*level]++;
7485 nritems = btrfs_header_nritems(path->nodes[*level]);
7495 static int tree_advance(struct btrfs_path *path,
7504 ret = tree_move_next_or_upnext(path, level, root_level);
7506 ret = tree_move_down(path, level, reada_min_gen);
7516 btrfs_item_key_to_cpu(path->nodes[*level], key,
7517 path->slots[*level]);
7519 btrfs_node_key_to_cpu(path->nodes[*level], key,
7520 path->slots[*level]);