Lines Matching defs:path

589 	struct btrfs_path *path;
599 path = btrfs_alloc_path();
600 if (!path)
606 path->skip_locking = 1;
607 path->search_commit_root = 1;
608 path->reada = READA_FORWARD;
615 btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) {
631 btrfs_free_path(path);
707 struct btrfs_path *path;
716 path = btrfs_alloc_path();
717 if (!path)
738 path->skip_locking = 1;
739 path->search_commit_root = 1;
740 path->reada = READA_FORWARD;
747 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
751 leaf = path->nodes[0];
760 if (path->slots[0] < nritems) {
761 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
763 ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
769 btrfs_release_path(path);
778 ret = btrfs_next_leaf(extent_root, path);
783 leaf = path->nodes[0];
792 btrfs_release_path(path);
797 path->slots[0]++;
827 path->slots[0]++;
834 btrfs_free_path(path);
1026 struct btrfs_path *path,
1039 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1045 ret = btrfs_del_item(trans, root, path);
1053 struct btrfs_path *path;
1099 path = btrfs_alloc_path();
1100 if (!path) {
1109 inode = lookup_free_space_inode(block_group, path);
1123 btrfs_wait_cache_io(trans, block_group, path);
1238 ret = remove_block_group_item(trans, path, block_group);
1290 btrfs_free_path(path);
1515 * to the unused_bgs code path. Therefore, if it's not fully
1651 * The normal path here is an unused block group is passed here,
1652 * then trimming is handled in the transaction commit path.
1654 * before coming down the unused block group path as trimming
1655 * will no longer be done later in the transaction commit path.
1973 struct btrfs_path *path)
1983 slot = path->slots[0];
1984 leaf = path->nodes[0];
2024 struct btrfs_path *path,
2031 btrfs_for_each_slot(root, key, &found_key, path, ret) {
2034 return read_bg_from_eb(fs_info, &found_key, path);
2469 struct btrfs_path *path;
2492 path = btrfs_alloc_path();
2493 if (!path)
2508 ret = find_first_block_group(info, path, &key);
2514 leaf = path->nodes[0];
2515 slot = path->slots[0];
2521 btrfs_release_path(path);
2528 btrfs_release_path(path);
2565 btrfs_free_path(path);
2622 struct btrfs_path *path;
2630 path = btrfs_alloc_path();
2631 if (!path)
2637 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent));
2641 leaf = path->nodes[0];
2642 extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
2651 btrfs_free_path(path);
3025 struct btrfs_path *path,
3059 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3066 leaf = path->nodes[0];
3067 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3075 btrfs_release_path(path);
3096 struct btrfs_path *path)
3125 inode = lookup_free_space_inode(block_group, path);
3128 btrfs_release_path(path);
3139 ret = create_free_space_inode(trans, block_group, path);
3247 btrfs_release_path(path);
3264 struct btrfs_path *path;
3270 path = btrfs_alloc_path();
3271 if (!path)
3278 cache_save_setup(cache, trans, path);
3281 btrfs_free_path(path);
3304 struct btrfs_path *path = NULL;
3321 if (!path) {
3322 path = btrfs_alloc_path();
3323 if (!path) {
3347 btrfs_wait_cache_io(trans, cache, path);
3366 cache_save_setup(cache, trans, path);
3370 ret = btrfs_write_out_cache(trans, cache, path);
3389 ret = update_block_group_item(trans, path, cache);
3459 btrfs_free_path(path);
3470 struct btrfs_path *path;
3473 path = btrfs_alloc_path();
3474 if (!path)
3506 btrfs_wait_cache_io(trans, cache, path);
3519 cache_save_setup(cache, trans, path);
3527 ret = btrfs_write_out_cache(trans, cache, path);
3540 ret = update_block_group_item(trans, path, cache);
3557 ret = update_block_group_item(trans, path, cache);
3579 btrfs_wait_cache_io(trans, cache, path);
3583 btrfs_free_path(path);
3706 * @num_bytes except for the compress path.
3942 * in the extent btree right way, we could deadlock because the path for the
4054 * Allocation of system chunks can not happen through this path, as we