Lines Matching refs:node
31 struct rb_node node;
37 #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
52 static inline struct llist_node *barrier_to_ll(struct active_node *node)
54 GEM_BUG_ON(!is_barrier(&node->base));
55 return (struct llist_node *)&node->base.cb.node;
59 __barrier_to_engine(struct active_node *node)
61 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
65 barrier_to_engine(struct active_node *node)
67 GEM_BUG_ON(!is_barrier(&node->base));
68 return __barrier_to_engine(node);
74 struct active_node, base.cb.node);
149 /* Keep the MRU cached node for reuse */
152 rb_erase(&ref->cache->node, &ref->tree);
155 /* Rebuild the tree with only the cached node */
156 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
157 rb_insert_color(&ref->cache->node, &ref->tree);
158 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
160 /* Make the cached node available for reuse with any timeline */
175 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
274 BUILD_BUG_ON(offsetof(typeof(*it), node));
282 it = fetch_node(it->node.rb_right);
284 it = fetch_node(it->node.rb_left);
298 struct active_node *node, *prealloc;
301 node = __active_lookup(ref, idx);
302 if (likely(node))
303 return &node->base;
318 node = rb_entry(parent, struct active_node, node);
319 if (node->timeline == idx) {
324 if (node->timeline < idx)
330 node = prealloc;
331 __i915_active_fence_init(&node->base, NULL, node_retire);
332 node->ref = ref;
333 node->timeline = idx;
335 rb_link_node(&node->node, parent, p);
336 rb_insert_color(&node->node, &ref->tree);
339 WRITE_ONCE(ref->cache, node);
342 return &node->base;
376 struct active_node *node,
383 GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
386 * Rebuild the llist excluding our node. We may perform this
393 * claim ownership of its node.
401 if (node == barrier_from_ll(pos)) {
402 node = NULL;
414 return !node;
418 __active_del_barrier(struct i915_active *ref, struct active_node *node)
420 return ____active_del_barrier(ref, node, barrier_to_engine(node));
426 if (!is_barrier(active)) /* proto-node used by our idle barrier? */
621 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
742 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
800 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
802 return node->timeline == idx && !i915_active_fence_isset(&node->base);
817 * node kept alive (as we reuse before parking). We prefer to reuse
822 p = &ref->cache->node;
829 struct active_node *node =
830 rb_entry(p, struct active_node, node);
832 if (is_idle_barrier(node, idx))
836 if (node->timeline < idx)
849 struct active_node *node =
850 rb_entry(p, struct active_node, node);
853 if (node->timeline > idx)
856 if (node->timeline < idx)
859 if (is_idle_barrier(node, idx))
869 engine = __barrier_to_engine(node);
871 if (is_barrier(&node->base) &&
872 ____active_del_barrier(ref, node, engine))
881 if (p == &ref->cache->node)
885 return rb_entry(p, struct active_node, node);
902 * Preallocate a node for each physical engine supporting the target
911 struct active_node *node;
914 node = reuse_idle_barrier(ref, idx);
916 if (!node) {
917 node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
918 if (!node)
921 RCU_INIT_POINTER(node->base.fence, NULL);
922 node->base.cb.func = node_retire;
923 node->timeline = idx;
924 node->ref = ref;
927 if (!i915_active_fence_isset(&node->base)) {
929 * Mark this as being *our* unconnected proto-node.
931 * Since this node is not in any list, and we have
933 * request to indicate this is an idle-barrier node
937 RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
938 node->base.cb.node.prev = (void *)engine;
941 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
943 GEM_BUG_ON(barrier_to_engine(node) != engine);
944 first = barrier_to_ll(node);
958 struct active_node *node = barrier_from_ll(first);
963 intel_engine_pm_put(barrier_to_engine(node));
965 kmem_cache_free(global.slab_cache, node);
984 struct active_node *node = barrier_from_ll(pos);
985 struct intel_engine_cs *engine = barrier_to_engine(node);
997 it = rb_entry(parent, struct active_node, node);
998 if (it->timeline < node->timeline)
1003 rb_link_node(&node->node, parent, p);
1004 rb_insert_color(&node->node, &ref->tree);
1008 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
1013 static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
1015 return __active_fence_slot(&barrier_from_ll(node)->base);
1021 struct llist_node *node, *next;
1028 node = llist_del_all(&engine->barrier_tasks);
1029 if (!node)
1037 llist_for_each_safe(node, next, node) {
1039 smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1040 list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1140 __list_del_entry(&active->cb.node);
1143 list_add_tail(&active->cb.node, &fence->cb_list);