Lines Matching refs:node
15 /* The anchor node sits above the top of the usable address space */
42 iovad->cached_node = &iovad->anchor.node;
43 iovad->cached32_node = &iovad->anchor.node;
51 rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
52 rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
128 iovad->cached32_node = &new->node;
130 iovad->cached_node = &new->node;
138 cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
142 iovad->cached32_node = rb_next(&free->node);
147 cached_iova = rb_entry(iovad->cached_node, struct iova, node);
149 iovad->cached_node = rb_next(&free->node);
160 /* Figure out where to put new node */
162 struct iova *this = rb_entry(*new, struct iova, node);
175 /* Add new node and rebalance tree. */
176 rb_link_node(&iova->node, parent, new);
177 rb_insert_color(&iova->node, root);
200 curr_iova = rb_entry(curr, struct iova, node);
206 curr_iova = rb_entry(curr, struct iova, node);
320 struct rb_node *node = iovad->rbroot.rb_node;
324 while (node) {
325 struct iova *iova = rb_entry(node, struct iova, node);
328 node = node->rb_left;
330 node = node->rb_right;
342 rb_erase(&iova->node, &iovad->rbroot);
599 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
605 __is_range_overlap(struct rb_node *node,
608 struct iova *iova = rb_entry(node, struct iova, node);
664 struct rb_node *node;
674 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
675 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
676 iova = rb_entry(node, struct iova, node);
687 /* We are here either because this is the first reserver node
709 struct rb_node *node;
712 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
713 struct iova *iova = rb_entry(node, struct iova, node);
748 rb_erase(&iova->node, &iovad->rbroot);