Lines Matching refs:node

157 				 struct btrfs_backref_node *node)
161 if (node->level == 0 ||
162 in_range(node->bytenr, rc->block_group->start,
165 set_extent_bits(&rc->processed_blocks, node->bytenr,
166 node->bytenr + blocksize - 1, EXTENT_DIRTY);
168 node->processed = 1;
179 * walk up backref nodes until reach node presents tree root
182 struct btrfs_backref_node *node,
188 while (!list_empty(&node->upper)) {
189 edge = list_entry(node->upper.next,
192 node = edge->node[UPPER];
194 BUG_ON(node->detached);
196 return node;
211 lower = edge->node[LOWER];
220 return edge->node[UPPER];
227 struct btrfs_backref_node *node, u64 bytenr)
230 rb_erase(&node->rb_node, &cache->rb_root);
231 node->bytenr = bytenr;
232 rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
243 struct btrfs_backref_node *node;
260 node = list_entry(cache->detached.next,
262 btrfs_backref_cleanup_node(cache, node);
266 node = list_entry(cache->changed.next,
268 list_del_init(&node->list);
269 BUG_ON(node->pending);
270 update_backref_node(cache, node, node->new_bytenr);
278 list_for_each_entry(node, &cache->pending[level], list) {
279 BUG_ON(!node->pending);
280 if (node->bytenr == node->new_bytenr)
282 update_backref_node(cache, node, node->new_bytenr);
341 * so backref node for the fs tree root is useless for
354 struct mapping_node *node;
361 node = rb_entry(rb_node, struct mapping_node, rb_node);
362 root = (struct btrfs_root *)node->data;
372 * If child node is also orphan (no parent) during cleanup, then the child
373 * node will also be cleaned up.
376 * For nodes, the node is still cached as "detached"
378 * Return false if @node is not in the @useless_nodes list.
379 * Return true if @node is in the @useless_nodes list.
382 struct btrfs_backref_node *node)
398 if (cur == node)
401 /* The node is the lowest node */
416 lower = edge->node[LOWER];
419 /* Child node is also orphan, queue for cleanup */
465 struct btrfs_backref_node *node = NULL;
479 node = btrfs_backref_alloc_node(cache, bytenr, level);
480 if (!node) {
485 node->lowest = 1;
486 cur = node;
504 cur = edge->node[UPPER];
509 ret = btrfs_backref_finish_upper_links(cache, node);
515 if (handle_useless_nodes(rc, node))
516 node = NULL;
521 btrfs_backref_error_cleanup(cache, node);
524 ASSERT(!node || !node->detached);
527 return node;
531 * helper to add backref node for the newly created snapshot.
532 * the backref node is created by cloning backref node that
542 struct btrfs_backref_node *node = NULL;
553 node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
554 if (node->detached)
555 node = NULL;
557 BUG_ON(node->new_bytenr != reloc_root->node->start);
560 if (!node) {
564 node = rb_entry(rb_node, struct btrfs_backref_node,
566 BUG_ON(node->detached);
570 if (!node)
573 new_node = btrfs_backref_alloc_node(cache, dest->node->start,
574 node->level);
578 new_node->lowest = node->lowest;
583 if (!node->lowest) {
584 list_for_each_entry(edge, &node->lower, list[UPPER]) {
589 btrfs_backref_link_edge(new_edge, edge->node[LOWER],
604 &new_edge->node[LOWER]->upper);
626 struct mapping_node *node;
629 node = kmalloc(sizeof(*node), GFP_NOFS);
630 if (!node)
633 node->bytenr = root->commit_root->start;
634 node->data = root;
638 node->bytenr, &node->rb_node);
643 node->bytenr);
658 struct mapping_node *node = NULL;
662 if (rc && root->node) {
667 node = rb_entry(rb_node, struct mapping_node, rb_node);
668 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
669 RB_CLEAR_NODE(&node->rb_node);
672 ASSERT(!node || (struct btrfs_root *)node->data == root);
691 kfree(node);
702 struct mapping_node *node = NULL;
709 node = rb_entry(rb_node, struct mapping_node, rb_node);
710 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
714 if (!node)
716 BUG_ON((struct btrfs_root *)node->data != root);
719 node->bytenr = root->node->start;
721 node->bytenr, &node->rb_node);
724 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
774 ret = btrfs_copy_root(trans, root, root->node, &eb,
924 if (reloc_root->commit_root != reloc_root->node) {
926 btrfs_set_root_node(root_item, reloc_root->node);
943 struct rb_node *node;
950 node = root->inode_tree.rb_node;
952 while (node) {
953 prev = node;
954 entry = rb_entry(node, struct btrfs_inode, rb_node);
957 node = node->rb_left;
959 node = node->rb_right;
963 if (!node) {
967 node = prev;
973 while (node) {
974 entry = rb_entry(node, struct btrfs_inode, rb_node);
985 node = rb_next(node);
1337 * CoW on the subtree root node before transaction commit.
1687 atomic_inc(&reloc_root->node->refs);
1688 path->nodes[level] = reloc_root->node;
2037 struct btrfs_backref_node *node,
2044 next = node;
2060 if (next->new_bytenr != root->node->start) {
2063 next->new_bytenr = root->node->start;
2076 if (!next || next->level <= node->level)
2082 next = node;
2083 /* setup backref node path for btrfs_reloc_cow_block */
2088 next = edges[index]->node[UPPER];
2103 struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
2111 next = node;
2125 if (next != node)
2129 if (!next || next->level <= node->level)
2140 struct btrfs_backref_node *node, int reserve)
2143 struct btrfs_backref_node *next = node;
2149 BUG_ON(reserve && node->processed);
2154 if (next->processed && (reserve || next != node))
2165 next = edge->node[UPPER];
2174 struct btrfs_backref_node *node)
2182 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2222 struct btrfs_backref_node *node,
2239 BUG_ON(lowest && node->eb);
2241 path->lowest_level = node->level + 1;
2242 rc->backref_cache.path[node->level] = node;
2243 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2249 upper = edge->node[UPPER];
2262 if (node->eb->start == bytenr)
2303 if (bytenr != node->bytenr) {
2305 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2306 bytenr, node->bytenr, slot,
2312 if (node->eb->start == bytenr)
2332 if (!node->eb) {
2341 BUG_ON(node->eb != eb);
2344 node->eb->start);
2350 node->eb->start, blocksize,
2353 btrfs_init_tree_ref(&ref, node->level,
2370 if (!err && node->pending) {
2371 btrfs_backref_drop_node_buffer(node);
2372 list_move_tail(&node->list, &rc->backref_cache.changed);
2373 node->pending = 0;
2383 struct btrfs_backref_node *node,
2388 btrfs_node_key_to_cpu(node->eb, &key, 0);
2389 return do_relocation(trans, rc, node, &key, path, 0);
2398 struct btrfs_backref_node *node;
2404 node = list_entry(cache->pending[level].next,
2406 list_move_tail(&node->list, &list);
2407 BUG_ON(!node->pending);
2410 ret = link_to_upper(trans, rc, node, path);
2425 struct btrfs_backref_node *node)
2427 struct btrfs_backref_node *next = node;
2446 next = edge->node[UPPER];
2489 struct btrfs_backref_node *node,
2496 if (!node)
2503 ret = reserve_metadata_space(trans, rc, node);
2507 BUG_ON(node->processed);
2508 root = select_one_root(node);
2510 update_processed_blocks(rc, node);
2516 BUG_ON(node->new_bytenr);
2517 BUG_ON(!list_empty(&node->list));
2520 node->new_bytenr = root->node->start;
2521 btrfs_put_root(node->root);
2522 node->root = btrfs_grab_root(root);
2523 ASSERT(node->root);
2524 list_add_tail(&node->list, &rc->backref_cache.changed);
2526 path->lowest_level = node->level;
2533 update_processed_blocks(rc, node);
2535 ret = do_relocation(trans, rc, node, key, path, 1);
2538 if (ret || node->level == 0 || node->cowonly)
2539 btrfs_backref_cleanup_node(&rc->backref_cache, node);
2551 struct btrfs_backref_node *node;
2581 node = build_backref_tree(rc, &block->key,
2583 if (IS_ERR(node)) {
2584 err = PTR_ERR(node);
2588 ret = relocate_tree_block(trans, rc, node, &block->key,
3556 struct mapping_node *node, *tmp;
3559 rbtree_postorder_for_each_entry_safe(node, tmp,
3561 kfree(node);
3961 struct btrfs_backref_node *node;
3982 node = rc->backref_cache.path[level];
3983 BUG_ON(node->bytenr != buf->start &&
3984 node->new_bytenr != buf->start);
3986 btrfs_backref_drop_node_buffer(node);
3988 node->eb = cow;
3989 node->new_bytenr = cow->start;
3991 if (!node->pending) {
3992 list_move_tail(&node->list,
3994 node->pending = 1;
3998 mark_block_processed(rc, node);