Lines Matching defs:path
45 struct btrfs_path *path);
48 struct btrfs_path *path,
65 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
69 btrfs_release_path(path);
73 leaf = path->nodes[0];
74 header = btrfs_item_ptr(leaf, path->slots[0],
78 btrfs_release_path(path);
85 inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path);
86 btrfs_release_path(path);
99 struct btrfs_path *path)
112 inode = __lookup_free_space_inode(fs_info->tree_root, path,
136 struct btrfs_path *path,
147 ret = btrfs_insert_empty_inode(trans, root, path, ino);
155 leaf = path->nodes[0];
156 inode_item = btrfs_item_ptr(leaf, path->slots[0],
158 btrfs_item_key(leaf, &disk_key, path->slots[0]);
172 btrfs_release_path(path);
177 ret = btrfs_insert_empty_item(trans, root, path, &key,
180 btrfs_release_path(path);
184 leaf = path->nodes[0];
185 header = btrfs_item_ptr(leaf, path->slots[0],
190 btrfs_release_path(path);
197 struct btrfs_path *path)
206 return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
238 struct btrfs_path *path = btrfs_alloc_path();
240 if (!path) {
249 btrfs_wait_cache_io(trans, block_group, path);
260 btrfs_free_path(path);
668 struct btrfs_path *path, u64 offset)
691 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
695 btrfs_release_path(path);
701 leaf = path->nodes[0];
702 header = btrfs_item_ptr(leaf, path->slots[0],
707 btrfs_release_path(path);
850 struct btrfs_path *path;
866 path = btrfs_alloc_path();
867 if (!path)
869 path->search_commit_root = 1;
870 path->skip_locking = 1;
873 * We must pass a path with search_commit_root set to btrfs_iget in
891 inode = lookup_free_space_inode(block_group, path);
893 btrfs_free_path(path);
901 btrfs_free_path(path);
907 path, block_group->start);
908 btrfs_free_path(path);
1022 struct btrfs_path *path, u64 offset,
1034 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1040 leaf = path->nodes[0];
1043 ASSERT(path->slots[0]);
1044 path->slots[0]--;
1045 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1051 btrfs_release_path(path);
1057 header = btrfs_item_ptr(leaf, path->slots[0],
1063 btrfs_release_path(path);
1174 struct btrfs_path *path, u64 offset)
1188 ret = update_cache_item(trans, root, inode, path, offset,
1231 struct btrfs_path *path)
1233 return __btrfs_wait_cache_io(root, trans, NULL, io_ctl, path, 0);
1238 struct btrfs_path *path)
1242 path, block_group->start);
1397 struct btrfs_path *path)
1411 inode = lookup_free_space_inode(block_group, path);
2277 * find_free_extent(). Rule 2 enables coalescing based on the common path
2282 * the reuse path.
3890 struct btrfs_path *path)
3901 inode = __lookup_free_space_inode(root, path, 0);
3915 struct btrfs_path *path)
3917 return __create_free_space_inode(root, trans, path,
3924 struct btrfs_path *path;
3939 path = btrfs_alloc_path();
3940 if (!path)
3943 inode = lookup_free_ino_inode(root, path);
3950 ret = __load_free_space_cache(root, inode, ctl, path, 0);
3959 btrfs_free_path(path);
3965 struct btrfs_path *path,
3987 ret = btrfs_wait_cache_io_root(root, trans, &io_ctl, path);