Lines Matching refs:root
16 static void fail_caching_thread(struct btrfs_root *root)
18 struct btrfs_fs_info *fs_info = root->fs_info;
23 spin_lock(&root->ino_cache_lock);
24 root->ino_cache_state = BTRFS_CACHE_ERROR;
25 spin_unlock(&root->ino_cache_lock);
26 wake_up(&root->ino_cache_wait);
31 struct btrfs_root *root = data;
32 struct btrfs_fs_info *fs_info = root->fs_info;
33 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
46 fail_caching_thread(root);
50 /* Since the commit root is read-only, we can safely skip locking. */
62 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
73 ret = btrfs_next_leaf(root, path);
92 root->ino_cache_progress = last;
105 if (key.objectid >= root->highest_objectid)
111 wake_up(&root->ino_cache_wait);
119 if (last < root->highest_objectid - 1) {
121 root->highest_objectid - last - 1, 0);
124 spin_lock(&root->ino_cache_lock);
125 root->ino_cache_state = BTRFS_CACHE_FINISHED;
126 spin_unlock(&root->ino_cache_lock);
128 root->ino_cache_progress = (u64)-1;
129 btrfs_unpin_free_ino(root);
131 wake_up(&root->ino_cache_wait);
139 static void start_caching(struct btrfs_root *root)
141 struct btrfs_fs_info *fs_info = root->fs_info;
142 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
150 spin_lock(&root->ino_cache_lock);
151 if (root->ino_cache_state != BTRFS_CACHE_NO) {
152 spin_unlock(&root->ino_cache_lock);
156 root->ino_cache_state = BTRFS_CACHE_STARTED;
157 spin_unlock(&root->ino_cache_lock);
159 ret = load_free_ino_cache(fs_info, root);
161 spin_lock(&root->ino_cache_lock);
162 root->ino_cache_state = BTRFS_CACHE_FINISHED;
163 spin_unlock(&root->ino_cache_lock);
164 wake_up(&root->ino_cache_wait);
175 ret = btrfs_find_free_objectid(root, &objectid);
180 wake_up(&root->ino_cache_wait);
183 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
184 root->root_key.objectid);
186 fail_caching_thread(root);
189 int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
191 if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
192 return btrfs_find_free_objectid(root, objectid);
195 *objectid = btrfs_find_ino_for_alloc(root);
200 start_caching(root);
202 wait_event(root->ino_cache_wait,
203 root->ino_cache_state == BTRFS_CACHE_FINISHED ||
204 root->ino_cache_state == BTRFS_CACHE_ERROR ||
205 root->free_ino_ctl->free_space > 0);
207 if (root->ino_cache_state == BTRFS_CACHE_FINISHED &&
208 root->free_ino_ctl->free_space == 0)
210 else if (root->ino_cache_state == BTRFS_CACHE_ERROR)
211 return btrfs_find_free_objectid(root, objectid);
216 void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
218 struct btrfs_fs_info *fs_info = root->fs_info;
219 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
224 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
228 spin_lock(&root->ino_cache_lock);
229 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
230 spin_unlock(&root->ino_cache_lock);
234 spin_unlock(&root->ino_cache_lock);
236 start_caching(root);
246 * smaller than root->ino_cache_progress from pinned tree to free_ino tree, and
247 * others will just be dropped, because the commit root we were searching has
250 * Must be called with root->fs_info->commit_root_sem held
252 void btrfs_unpin_free_ino(struct btrfs_root *root)
254 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
255 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
256 spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
261 if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
275 if (info->offset > root->ino_cache_progress)
278 count = min(root->ino_cache_progress - info->offset + 1,
284 __btrfs_add_free_space(root->fs_info, ctl,
369 void btrfs_init_free_ino_ctl(struct btrfs_root *root)
371 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
372 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
397 int btrfs_save_ino_cache(struct btrfs_root *root,
400 struct btrfs_fs_info *fs_info = root->fs_info;
401 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
413 if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID &&
414 (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID ||
415 root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID))
418 /* Don't save inode cache if we are deleting this root */
419 if (btrfs_root_refs(&root->root_item) == 0)
441 ret = btrfs_block_rsv_add(root, trans->block_rsv,
449 inode = lookup_free_ino_inode(root, path);
459 ret = create_free_ino_inode(root, trans, path);
466 ret = btrfs_update_inode(trans, root, inode);
481 spin_lock(&root->ino_cache_lock);
482 if (root->ino_cache_state != BTRFS_CACHE_FINISHED) {
484 spin_unlock(&root->ino_cache_lock);
487 spin_unlock(&root->ino_cache_lock);
511 ret = btrfs_write_out_ino_cache(root, trans, path, inode);