Lines Matching refs:node

27 	struct rb_node node;
33 #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
48 static inline struct llist_node *barrier_to_ll(struct active_node *node)
50 GEM_BUG_ON(!is_barrier(&node->base));
51 return (struct llist_node *)&node->base.cb.node;
55 __barrier_to_engine(struct active_node *node)
57 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
61 barrier_to_engine(struct active_node *node)
63 GEM_BUG_ON(!is_barrier(&node->base));
64 return __barrier_to_engine(node);
70 struct active_node, base.cb.node);
145 /* Keep the MRU cached node for reuse */
148 rb_erase(&ref->cache->node, &ref->tree);
151 /* Rebuild the tree with only the cached node */
152 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
153 rb_insert_color(&ref->cache->node, &ref->tree);
154 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
156 /* Make the cached node available for reuse with any timeline */
170 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
267 BUILD_BUG_ON(offsetof(typeof(*it), node));
275 it = fetch_node(it->node.rb_right);
277 it = fetch_node(it->node.rb_left);
291 struct active_node *node;
294 node = __active_lookup(ref, idx);
295 if (likely(node))
296 return &node->base;
306 node = rb_entry(parent, struct active_node, node);
307 if (node->timeline == idx)
310 if (node->timeline < idx)
320 node = kmem_cache_alloc(slab_cache, GFP_ATOMIC);
321 if (!node)
324 __i915_active_fence_init(&node->base, NULL, node_retire);
325 node->ref = ref;
326 node->timeline = idx;
328 rb_link_node(&node->node, parent, p);
329 rb_insert_color(&node->node, &ref->tree);
332 WRITE_ONCE(ref->cache, node);
335 return &node->base;
366 struct active_node *node,
373 GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
376 * Rebuild the llist excluding our node. We may perform this
383 * claim ownership of its node.
391 if (node == barrier_from_ll(pos)) {
392 node = NULL;
404 return !node;
408 __active_del_barrier(struct i915_active *ref, struct active_node *node)
410 return ____active_del_barrier(ref, node, barrier_to_engine(node));
416 if (!is_barrier(active)) /* proto-node used by our idle barrier? */
590 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
711 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
769 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
771 return node->timeline == idx && !i915_active_fence_isset(&node->base);
786 * node kept alive (as we reuse before parking). We prefer to reuse
791 p = &ref->cache->node;
798 struct active_node *node =
799 rb_entry(p, struct active_node, node);
801 if (is_idle_barrier(node, idx))
805 if (node->timeline < idx)
818 struct active_node *node =
819 rb_entry(p, struct active_node, node);
822 if (node->timeline > idx)
825 if (node->timeline < idx)
828 if (is_idle_barrier(node, idx))
838 engine = __barrier_to_engine(node);
840 if (is_barrier(&node->base) &&
841 ____active_del_barrier(ref, node, engine))
850 if (p == &ref->cache->node)
854 return rb_entry(p, struct active_node, node);
871 * Preallocate a node for each physical engine supporting the target
880 struct active_node *node;
883 node = reuse_idle_barrier(ref, idx);
885 if (!node) {
886 node = kmem_cache_alloc(slab_cache, GFP_KERNEL);
887 if (!node)
890 RCU_INIT_POINTER(node->base.fence, NULL);
891 node->base.cb.func = node_retire;
892 node->timeline = idx;
893 node->ref = ref;
896 if (!i915_active_fence_isset(&node->base)) {
898 * Mark this as being *our* unconnected proto-node.
900 * Since this node is not in any list, and we have
902 * request to indicate this is an idle-barrier node
906 RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
907 node->base.cb.node.prev = (void *)engine;
910 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
912 GEM_BUG_ON(barrier_to_engine(node) != engine);
913 first = barrier_to_ll(node);
927 struct active_node *node = barrier_from_ll(first);
932 intel_engine_pm_put(barrier_to_engine(node));
934 kmem_cache_free(slab_cache, node);
953 struct active_node *node = barrier_from_ll(pos);
954 struct intel_engine_cs *engine = barrier_to_engine(node);
966 it = rb_entry(parent, struct active_node, node);
967 if (it->timeline < node->timeline)
972 rb_link_node(&node->node, parent, p);
973 rb_insert_color(&node->node, &ref->tree);
977 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
982 static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
984 return __active_fence_slot(&barrier_from_ll(node)->base);
990 struct llist_node *node, *next;
997 node = llist_del_all(&engine->barrier_tasks);
998 if (!node)
1006 llist_for_each_safe(node, next, node) {
1008 smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1009 list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1109 __list_del_entry(&active->cb.node);
1112 list_add_tail(&active->cb.node, &fence->cb_list);