Lines Matching refs:it

129 	struct active_node *it, *n;
166 /* ... except if you wait on it, you must manage your own references! */
170 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
171 GEM_BUG_ON(i915_active_fence_isset(&it->base));
172 kmem_cache_free(slab_cache, it);
234 struct active_node *it;
241 * at all. We can reuse the last slot if it is empty, that is
242 * after the previous activity has been retired, or if it matches the
245 it = READ_ONCE(ref->cache);
246 if (it) {
247 u64 cached = READ_ONCE(it->timeline);
251 return it;
258 * idx. If, and only if, the timeline is currently zero is it
259 * worth competing to claim it atomically for ourselves (for
263 if (!cached && !cmpxchg64(&it->timeline, 0, idx))
264 return it;
267 BUILD_BUG_ON(offsetof(typeof(*it), node));
272 it = fetch_node(ref->tree.rb_node);
273 while (it) {
274 if (it->timeline < idx) {
275 it = fetch_node(it->node.rb_right);
276 } else if (it->timeline > idx) {
277 it = fetch_node(it->node.rb_left);
279 WRITE_ONCE(ref->cache, it);
285 return it;
421 * we can use it to substitute for the pending idle-barrer
569 static int flush_barrier(struct active_node *it)
573 if (likely(!is_barrier(&it->base)))
576 engine = __barrier_to_engine(it);
578 if (!is_barrier(&it->base))
586 struct active_node *it, *n;
590 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
591 err = flush_barrier(it); /* unconnected idle barrier? */
595 enable_signaling(&it->base);
709 struct active_node *it, *n;
711 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
712 err = __await_active(&it->base, fn, arg);
835 * the barrier before we claim it, so we have to check
901 * decoupled it from the rbtree, we can reuse the
962 struct active_node *it;
966 it = rb_entry(parent, struct active_node, node);
967 if (it->timeline < node->timeline)
1023 * that it is executed before the new fence. To ensure that the order of
1024 * fences within the timeline of the i915_active_fence is understood, it
1036 * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
1045 * first, so if we succeed and pass it back to our user then it is not
1047 * user has a chance to set up an await dependency on it.
1061 * then it has locked C first (before B).
1072 * A does the cmpxchg first, and so it sees C or NULL, as before, or
1078 * active->fence, locks it as soon as A completes, and possibly
1098 * and we know that we are first on the timeline. If it is still
1100 * serialise with the interrupt handler, in the process of removing it
1104 * As B is second, it sees A as the previous fence and so waits for
1105 * it to complete its transition and takes over the occupancy for
1106 * itself -- remembering that it needs to wait on A before executing.