Lines Matching defs:delayed_root

200 			struct btrfs_delayed_root *delayed_root)
205 spin_lock(&delayed_root->lock);
206 if (list_empty(&delayed_root->node_list))
209 p = delayed_root->node_list.next;
213 spin_unlock(&delayed_root->lock);
221 struct btrfs_delayed_root *delayed_root;
225 delayed_root = node->root->fs_info->delayed_root;
226 spin_lock(&delayed_root->lock);
229 if (list_empty(&delayed_root->node_list))
231 p = delayed_root->node_list.next;
232 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
240 spin_unlock(&delayed_root->lock);
249 struct btrfs_delayed_root *delayed_root;
254 delayed_root = delayed_node->root->fs_info->delayed_root;
258 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
260 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
285 struct btrfs_delayed_root *delayed_root)
290 spin_lock(&delayed_root->lock);
291 if (list_empty(&delayed_root->prepare_list))
294 p = delayed_root->prepare_list.next;
299 spin_unlock(&delayed_root->lock);
399 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
403 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
405 int seq = atomic_inc_return(&delayed_root->items_seq);
408 if ((atomic_dec_return(&delayed_root->items) <
410 cond_wake_up_nomb(&delayed_root->wait);
417 struct btrfs_delayed_root *delayed_root;
426 delayed_root = delayed_node->root->fs_info->delayed_root;
428 BUG_ON(!delayed_root);
439 finish_one_item(delayed_root);
974 struct btrfs_delayed_root *delayed_root;
982 delayed_root = delayed_node->root->fs_info->delayed_root;
983 finish_one_item(delayed_root);
991 struct btrfs_delayed_root *delayed_root;
996 delayed_root = delayed_node->root->fs_info->delayed_root;
997 finish_one_item(delayed_root);
1136 struct btrfs_delayed_root *delayed_root;
1153 delayed_root = fs_info->delayed_root;
1155 curr_node = btrfs_first_delayed_node(delayed_root);
1306 struct btrfs_delayed_root *delayed_root;
1314 struct btrfs_delayed_root *delayed_root;
1323 delayed_root = async_work->delayed_root;
1330 if (atomic_read(&delayed_root->items) <
1334 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1366 wake_up(&delayed_root->wait);
1371 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1380 async_work->delayed_root = delayed_root;
1391 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1394 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1396 int val = atomic_read(&delayed_root->items_seq);
1401 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1409 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1411 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1415 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1419 seq = atomic_read(&delayed_root->items_seq);
1421 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1425 wait_event_interruptible(delayed_root->wait,
1426 could_end_wait(delayed_root, seq));
1430 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1941 atomic_inc(&root->fs_info->delayed_root->items);
1985 atomic_inc(&fs_info->delayed_root->items);
2081 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);