Lines Matching refs:ref
33 struct i915_active *ref;
81 struct i915_active *ref = addr;
83 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
91 static void debug_active_init(struct i915_active *ref)
93 debug_object_init(ref, &active_debug_desc);
96 static void debug_active_activate(struct i915_active *ref)
98 lockdep_assert_held(&ref->tree_lock);
99 debug_object_activate(ref, &active_debug_desc);
102 static void debug_active_deactivate(struct i915_active *ref)
104 lockdep_assert_held(&ref->tree_lock);
105 if (!atomic_read(&ref->count)) /* after the last dec */
106 debug_object_deactivate(ref, &active_debug_desc);
109 static void debug_active_fini(struct i915_active *ref)
111 debug_object_free(ref, &active_debug_desc);
114 static void debug_active_assert(struct i915_active *ref)
116 debug_object_assert_init(ref, &active_debug_desc);
121 static inline void debug_active_init(struct i915_active *ref) { }
122 static inline void debug_active_activate(struct i915_active *ref) { }
123 static inline void debug_active_deactivate(struct i915_active *ref) { }
124 static inline void debug_active_fini(struct i915_active *ref) { }
125 static inline void debug_active_assert(struct i915_active *ref) { }
130 __active_retire(struct i915_active *ref)
136 GEM_BUG_ON(i915_active_is_idle(ref));
139 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
142 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
143 debug_active_deactivate(ref);
146 if (!ref->cache)
147 ref->cache = fetch_node(ref->tree.rb_node);
150 if (ref->cache) {
152 rb_erase(&ref->cache->node, &ref->tree);
153 root = ref->tree;
156 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
157 rb_insert_color(&ref->cache->node, &ref->tree);
158 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
162 ref->cache->timeline = 0; /* needs cmpxchg(u64) */
165 spin_unlock_irqrestore(&ref->tree_lock, flags);
168 if (ref->retire)
169 ref->retire(ref);
172 wake_up_var(ref);
184 struct i915_active *ref = container_of(wrk, typeof(*ref), work);
186 GEM_BUG_ON(!atomic_read(&ref->count));
187 if (atomic_add_unless(&ref->count, -1, 1))
190 __active_retire(ref);
194 active_retire(struct i915_active *ref)
196 GEM_BUG_ON(!atomic_read(&ref->count));
197 if (atomic_add_unless(&ref->count, -1, 1))
200 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
201 queue_work(system_unbound_wq, &ref->work);
205 __active_retire(ref);
227 active_retire(container_of(cb, struct active_node, base.cb)->ref);
237 static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
250 it = READ_ONCE(ref->cache);
277 GEM_BUG_ON(i915_active_is_idle(ref));
279 it = fetch_node(ref->tree.rb_node);
286 WRITE_ONCE(ref->cache, it);
296 active_instance(struct i915_active *ref, u64 idx)
301 node = __active_lookup(ref, idx);
310 spin_lock_irq(&ref->tree_lock);
311 GEM_BUG_ON(i915_active_is_idle(ref));
314 p = &ref->tree.rb_node;
332 node->ref = ref;
336 rb_insert_color(&node->node, &ref->tree);
339 WRITE_ONCE(ref->cache, node);
340 spin_unlock_irq(&ref->tree_lock);
345 void __i915_active_init(struct i915_active *ref,
346 int (*active)(struct i915_active *ref),
347 void (*retire)(struct i915_active *ref),
353 debug_active_init(ref);
355 ref->flags = 0;
356 ref->active = active;
357 ref->retire = ptr_unpack_bits(retire, &bits, 2);
359 ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
361 spin_lock_init(&ref->tree_lock);
362 ref->tree = RB_ROOT;
363 ref->cache = NULL;
365 init_llist_head(&ref->preallocated_barriers);
366 atomic_set(&ref->count, 0);
367 __mutex_init(&ref->mutex, "i915_active", mkey);
368 __i915_active_fence_init(&ref->excl, NULL, excl_retire);
369 INIT_WORK(&ref->work, active_work);
371 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
375 static bool ____active_del_barrier(struct i915_active *ref,
418 __active_del_barrier(struct i915_active *ref, struct active_node *node)
420 return ____active_del_barrier(ref, node, barrier_to_engine(node));
424 replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
434 return __active_del_barrier(ref, node_from_active(active));
437 int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
443 err = i915_active_acquire(ref);
448 active = active_instance(ref, idx);
454 if (replace_barrier(ref, active)) {
456 atomic_dec(&ref->count);
462 __i915_active_acquire(ref);
467 i915_active_release(ref);
472 __i915_active_set_fence(struct i915_active *ref,
478 if (replace_barrier(ref, active)) {
485 __i915_active_acquire(ref);
491 __active_fence(struct i915_active *ref, u64 idx)
495 it = __active_lookup(ref, idx);
497 spin_lock_irq(&ref->tree_lock);
498 it = __active_lookup(ref, idx);
499 spin_unlock_irq(&ref->tree_lock);
507 __i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
510 return __i915_active_set_fence(ref, __active_fence(ref, idx), fence);
514 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
517 return __i915_active_set_fence(ref, &ref->excl, f);
520 bool i915_active_acquire_if_busy(struct i915_active *ref)
522 debug_active_assert(ref);
523 return atomic_add_unless(&ref->count, 1, 0);
526 static void __i915_active_activate(struct i915_active *ref)
528 spin_lock_irq(&ref->tree_lock); /* __active_retire() */
529 if (!atomic_fetch_inc(&ref->count))
530 debug_active_activate(ref);
531 spin_unlock_irq(&ref->tree_lock);
534 int i915_active_acquire(struct i915_active *ref)
538 if (i915_active_acquire_if_busy(ref))
541 if (!ref->active) {
542 __i915_active_activate(ref);
546 err = mutex_lock_interruptible(&ref->mutex);
550 if (likely(!i915_active_acquire_if_busy(ref))) {
551 err = ref->active(ref);
553 __i915_active_activate(ref);
556 mutex_unlock(&ref->mutex);
561 int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
566 err = i915_active_acquire(ref);
570 active = active_instance(ref, idx);
572 i915_active_release(ref);
576 return 0; /* return with active ref */
579 void i915_active_release(struct i915_active *ref)
581 debug_active_assert(ref);
582 active_retire(ref);
615 static int flush_lazy_signals(struct i915_active *ref)
620 enable_signaling(&ref->excl);
621 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
632 int __i915_active_wait(struct i915_active *ref, int state)
637 if (i915_active_acquire_if_busy(ref)) {
640 err = flush_lazy_signals(ref);
641 i915_active_release(ref);
645 if (___wait_var_event(ref, i915_active_is_idle(ref),
654 flush_work(&ref->work);
682 struct i915_active *ref;
690 if (i915_active_is_idle(wb->ref)) {
699 static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
707 GEM_BUG_ON(i915_active_is_idle(ref));
716 wb->ref = ref;
718 add_wait_queue(__var_waitqueue(ref), &wb->base);
722 static int await_active(struct i915_active *ref,
729 if (!i915_active_acquire_if_busy(ref))
733 rcu_access_pointer(ref->excl.fence)) {
734 err = __await_active(&ref->excl, fn, arg);
742 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
750 err = flush_lazy_signals(ref);
754 err = __await_barrier(ref, barrier);
760 i915_active_release(ref);
770 struct i915_active *ref,
773 return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
783 struct i915_active *ref,
786 return await_active(ref, flags, sw_await_fence, fence, fence);
789 void i915_active_fini(struct i915_active *ref)
791 debug_active_fini(ref);
792 GEM_BUG_ON(atomic_read(&ref->count));
793 GEM_BUG_ON(work_pending(&ref->work));
794 mutex_destroy(&ref->mutex);
796 if (ref->cache)
797 kmem_cache_free(global.slab_cache, ref->cache);
805 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
809 if (RB_EMPTY_ROOT(&ref->tree))
812 GEM_BUG_ON(i915_active_is_idle(ref));
821 if (ref->cache && is_idle_barrier(ref->cache, idx)) {
822 p = &ref->cache->node;
827 p = ref->tree.rb_node;
872 ____active_del_barrier(ref, node, engine))
879 spin_lock_irq(&ref->tree_lock);
880 rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
881 if (p == &ref->cache->node)
882 WRITE_ONCE(ref->cache, NULL);
883 spin_unlock_irq(&ref->tree_lock);
888 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
895 GEM_BUG_ON(i915_active_is_idle(ref));
898 while (!llist_empty(&ref->preallocated_barriers))
914 node = reuse_idle_barrier(ref, idx);
924 node->ref = ref;
939 __i915_active_acquire(ref);
951 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
952 llist_add_batch(first, last, &ref->preallocated_barriers);
962 atomic_dec(&ref->count);
970 void i915_active_acquire_barrier(struct i915_active *ref)
975 GEM_BUG_ON(i915_active_is_idle(ref));
983 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
988 spin_lock_irqsave_nested(&ref->tree_lock, flags,
991 p = &ref->tree.rb_node;
1004 rb_insert_color(&node->node, &ref->tree);
1005 spin_unlock_irqrestore(&ref->tree_lock, flags);
1172 struct kref ref;
1175 struct i915_active *i915_active_get(struct i915_active *ref)
1177 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1179 kref_get(&aa->ref);
1183 static void auto_release(struct kref *ref)
1185 struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1191 void i915_active_put(struct i915_active *ref)
1193 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1195 kref_put(&aa->ref, auto_release);
1198 static int auto_active(struct i915_active *ref)
1200 i915_active_get(ref);
1205 auto_retire(struct i915_active *ref)
1207 i915_active_put(ref);
1218 kref_init(&aa->ref);