Lines Matching refs:it
133 struct active_node *it, *n;
171 /* ... except if you wait on it, you must manage your own references! */
175 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
176 GEM_BUG_ON(i915_active_fence_isset(&it->base));
177 kmem_cache_free(global.slab_cache, it);
239 struct active_node *it;
246 * at all. We can reuse the last slot if it is empty, that is
247 * after the previous activity has been retired, or if it matches the
250 it = READ_ONCE(ref->cache);
251 if (it) {
252 u64 cached = READ_ONCE(it->timeline);
256 return it;
264 * idx. If, and only if, the timeline is currently zero is it
265 * worth competing to claim it atomically for ourselves (for
269 if (!cached && !cmpxchg(&it->timeline, 0, idx))
270 return it;
274 BUILD_BUG_ON(offsetof(typeof(*it), node));
279 it = fetch_node(ref->tree.rb_node);
280 while (it) {
281 if (it->timeline < idx) {
282 it = fetch_node(it->node.rb_right);
283 } else if (it->timeline > idx) {
284 it = fetch_node(it->node.rb_left);
286 WRITE_ONCE(ref->cache, it);
292 return it;
431 * we can use it to substitute for the pending idle-barrer
493 struct active_node *it;
495 it = __active_lookup(ref, idx);
496 if (unlikely(!it)) { /* Contention with parallel tree builders! */
498 it = __active_lookup(ref, idx);
501 GEM_BUG_ON(!it); /* slot must be preallocated */
503 return &it->base;
600 static int flush_barrier(struct active_node *it)
604 if (likely(!is_barrier(&it->base)))
607 engine = __barrier_to_engine(it);
609 if (!is_barrier(&it->base))
617 struct active_node *it, *n;
621 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
622 err = flush_barrier(it); /* unconnected idle barrier? */
626 enable_signaling(&it->base);
740 struct active_node *it, *n;
742 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
743 err = __await_active(&it->base, fn, arg);
866 * the barrier before we claim it, so we have to check
932 * decoupled it from the rbtree, we can reuse the
993 struct active_node *it;
997 it = rb_entry(parent, struct active_node, node);
998 if (it->timeline < node->timeline)
1054 * that it is executed before the new fence. To ensure that the order of
1055 * fences within the timeline of the i915_active_fence is understood, it
1067 * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
1076 * first, so if we succeed and pass it back to our user then it is not
1078 * user has a chance to set up an await dependency on it.
1092 * then it has locked C first (before B).
1103 * A does the cmpxchg first, and so it sees C or NULL, as before, or
1109 * active->fence, locks it as soon as A completes, and possibly
1129 * and we know that we are first on the timeline. If it is still
1131 * serialise with the interrupt handler, in the process of removing it
1135 * As B is second, it sees A as the previous fence and so waits for
1136 * it to complete its transition and takes over the occupancy for
1137 * itself -- remembering that it needs to wait on A before executing.