Lines Matching refs:rq
48 void (*hook)(struct i915_request *rq, struct dma_fence *signal);
112 struct i915_request *rq = to_request(fence);
121 i915_sw_fence_fini(&rq->submit);
122 i915_sw_fence_fini(&rq->semaphore);
128 * very careful in what rq->engine we poke. The virtual engine is
129 * referenced via the rq->context and we released that ref during
137 * not be unsubmitted again, so rq->engine and rq->execution_mask
138 * at this point is stable. rq->execution_mask will be a single
142 * power-of-two we assume that rq->engine may still be a virtual
152 * know that if the rq->execution_mask is a single bit, rq->engine
155 if (is_power_of_2(rq->execution_mask) &&
156 !cmpxchg(&rq->engine->request_pool, NULL, rq))
159 kmem_cache_free(global.slab_requests, rq);
191 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
195 if (llist_empty(&rq->execute_cb))
199 llist_del_all(&rq->execute_cb),
204 static void __notify_execute_cb_irq(struct i915_request *rq)
206 __notify_execute_cb(rq, irq_work_queue);
215 static void __notify_execute_cb_imm(struct i915_request *rq)
217 __notify_execute_cb(rq, irq_work_imm);
233 static void __i915_request_fill(struct i915_request *rq, u8 val)
235 void *vaddr = rq->ring->vaddr;
238 head = rq->infix;
239 if (rq->postfix < head) {
240 memset(vaddr + head, val, rq->ring->size - head);
243 memset(vaddr + head, val, rq->postfix - head);
246 static void remove_from_engine(struct i915_request *rq)
252 * as their rq->engine pointer is not stable until under that
254 * check that the rq still belongs to the newly locked engine.
256 locked = READ_ONCE(rq->engine);
258 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
263 list_del_init(&rq->sched.link);
265 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
266 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
269 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
273 __notify_execute_cb_imm(rq);
276 bool i915_request_retire(struct i915_request *rq)
278 if (!i915_request_completed(rq))
281 RQ_TRACE(rq, "\n");
283 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
284 trace_i915_request_retire(rq);
285 i915_request_mark_complete(rq);
296 GEM_BUG_ON(!list_is_first(&rq->link,
297 &i915_request_timeline(rq)->requests));
300 __i915_request_fill(rq, POISON_FREE);
301 rq->ring->head = rq->postfix;
303 if (!i915_request_signaled(rq)) {
304 spin_lock_irq(&rq->lock);
305 dma_fence_signal_locked(&rq->fence);
306 spin_unlock_irq(&rq->lock);
309 if (i915_request_has_waitboost(rq)) {
310 GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
311 atomic_dec(&rq->engine->gt->rps.num_waiters);
324 remove_from_engine(rq);
325 GEM_BUG_ON(!llist_empty(&rq->execute_cb));
327 __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
329 intel_context_exit(rq->context);
330 intel_context_unpin(rq->context);
332 free_capture_list(rq);
333 i915_sched_node_fini(&rq->sched);
334 i915_request_put(rq);
339 void i915_request_retire_upto(struct i915_request *rq)
341 struct intel_timeline * const tl = i915_request_timeline(rq);
344 RQ_TRACE(rq, "\n");
346 GEM_BUG_ON(!i915_request_completed(rq));
350 } while (i915_request_retire(tmp) && tmp != rq);
361 struct i915_request * const *port, *rq;
408 (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
410 if (rq->context == signal->context) {
411 inflight = i915_seqno_passed(rq->fence.seqno,
422 __await_execution(struct i915_request *rq,
424 void (*hook)(struct i915_request *rq,
432 hook(rq, &signal->fence);
440 cb->fence = &rq->submit;
484 void __i915_request_skip(struct i915_request *rq)
486 GEM_BUG_ON(!fatal_error(rq->fence.error));
488 if (rq->infix == rq->postfix)
496 __i915_request_fill(rq, 0);
497 rq->infix = rq->postfix;
500 void i915_request_set_error_once(struct i915_request *rq, int error)
506 if (i915_request_signaled(rq))
509 old = READ_ONCE(rq->fence.error);
513 } while (!try_cmpxchg(&rq->fence.error, &old, error));
708 struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
715 i915_request_put(rq);
724 struct i915_request *rq, *rn;
726 list_for_each_entry_safe(rq, rn, &tl->requests, link)
727 if (!i915_request_retire(rq))
736 struct i915_request *rq;
740 rq = xchg(rsvd, NULL);
741 if (!rq) /* Use the normal failure path for one final WARN */
744 return rq;
751 rq = list_first_entry(&tl->requests, typeof(*rq), link);
752 i915_request_retire(rq);
754 rq = kmem_cache_alloc(global.slab_requests,
756 if (rq)
757 return rq;
760 rq = list_last_entry(&tl->requests, typeof(*rq), link);
761 cond_synchronize_rcu(rq->rcustate);
772 struct i915_request *rq = arg;
774 spin_lock_init(&rq->lock);
775 i915_sched_node_init(&rq->sched);
776 i915_sw_fence_init(&rq->submit, submit_notify);
777 i915_sw_fence_init(&rq->semaphore, semaphore_notify);
779 rq->capture_list = NULL;
781 init_llist_head(&rq->execute_cb);
788 struct i915_request *rq;
826 rq = kmem_cache_alloc(global.slab_requests,
828 if (unlikely(!rq)) {
829 rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
830 if (!rq) {
836 rq->context = ce;
837 rq->engine = ce->engine;
838 rq->ring = ce->ring;
839 rq->execution_mask = ce->engine->mask;
841 ret = intel_timeline_get_seqno(tl, rq, &seqno);
845 dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
848 RCU_INIT_POINTER(rq->timeline, tl);
849 RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
850 rq->hwsp_seqno = tl->hwsp_seqno;
851 GEM_BUG_ON(i915_request_completed(rq));
853 rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
856 i915_sw_fence_reinit(&i915_request_get(rq)->submit);
857 i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
859 i915_sched_node_reinit(&rq->sched);
862 rq->batch = NULL;
863 GEM_BUG_ON(rq->capture_list);
864 GEM_BUG_ON(!llist_empty(&rq->execute_cb));
878 rq->reserved_space =
879 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
887 rq->head = rq->ring->emit;
889 ret = rq->engine->request_alloc(rq);
893 rq->infix = rq->ring->emit; /* end of header; start of user payload */
896 list_add_tail_rcu(&rq->link, &tl->requests);
898 return rq;
901 ce->ring->emit = rq->head;
904 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
905 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
908 kmem_cache_free(global.slab_requests, rq);
917 struct i915_request *rq;
925 rq = list_first_entry(&tl->requests, typeof(*rq), link);
926 if (!list_is_last(&rq->link, &tl->requests))
927 i915_request_retire(rq);
930 rq = __i915_request_create(ce, GFP_KERNEL);
932 if (IS_ERR(rq))
936 rq->cookie = lockdep_pin_lock(&tl->mutex);
938 return rq;
942 return rq;
946 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
951 if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
996 if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
997 err = i915_sw_fence_await_dma_fence(&rq->submit,
1006 already_busywaiting(struct i915_request *rq)
1020 return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
1140 void (*hook)(struct i915_request *rq,
1210 static void mark_external(struct i915_request *rq)
1220 rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
1224 __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1226 mark_external(rq);
1227 return i915_sw_fence_await_dma_fence(&rq->submit, fence,
1228 i915_fence_context_timeout(rq->engine->i915,
1234 i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1240 return __i915_request_await_external(rq, fence);
1246 err = __i915_request_await_external(rq, iter);
1250 err = i915_request_await_dma_fence(rq, chain->fence);
1260 i915_request_await_execution(struct i915_request *rq,
1262 void (*hook)(struct i915_request *rq,
1284 if (fence->context == rq->fence.context)
1293 ret = __i915_request_await_execution(rq,
1297 ret = i915_request_await_external(rq, fence);
1356 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
1388 if (fence->context == rq->fence.context)
1393 intel_timeline_sync_is_later(i915_request_timeline(rq),
1398 ret = i915_request_await_request(rq, to_request(fence));
1400 ret = i915_request_await_external(rq, fence);
1406 intel_timeline_sync_set(i915_request_timeline(rq),
1476 __i915_request_add_to_timeline(struct i915_request *rq)
1478 struct intel_timeline *timeline = i915_request_timeline(rq);
1502 &rq->fence));
1510 GEM_BUG_ON(prev->context == rq->context &&
1512 rq->fence.seqno));
1514 if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
1515 i915_sw_fence_await_sw_fence(&rq->submit,
1517 &rq->submitq);
1519 __i915_sw_fence_await_dma_fence(&rq->submit,
1521 &rq->dmaq);
1522 if (rq->engine->schedule)
1523 __i915_sched_node_add_dependency(&rq->sched,
1525 &rq->dep,
1536 GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1546 struct i915_request *__i915_request_commit(struct i915_request *rq)
1548 struct intel_engine_cs *engine = rq->engine;
1549 struct intel_ring *ring = rq->ring;
1552 RQ_TRACE(rq, "\n");
1559 GEM_BUG_ON(rq->reserved_space > ring->space);
1560 rq->reserved_space = 0;
1561 rq->emitted_jiffies = jiffies;
1569 cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1571 rq->postfix = intel_ring_offset(rq, cs);
1573 return __i915_request_add_to_timeline(rq);
1576 void __i915_request_queue(struct i915_request *rq,
1590 if (attr && rq->engine->schedule)
1591 rq->engine->schedule(rq, attr);
1592 i915_sw_fence_commit(&rq->semaphore);
1593 i915_sw_fence_commit(&rq->submit);
1596 void i915_request_add(struct i915_request *rq)
1598 struct intel_timeline * const tl = i915_request_timeline(rq);
1603 lockdep_unpin_lock(&tl->mutex, rq->cookie);
1605 trace_i915_request_add(rq);
1606 __i915_request_commit(rq);
1610 ctx = rcu_dereference(rq->context->gem_context);
1615 __i915_request_queue(rq, &attr);
1653 static bool __i915_spin_request(struct i915_request * const rq, int state)
1669 if (!i915_request_is_running(rq))
1683 timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
1686 if (dma_fence_is_signaled(&rq->fence))
1715 * @rq: the request to wait upon
1728 long i915_request_wait(struct i915_request *rq,
1739 if (dma_fence_is_signaled(&rq->fence))
1745 trace_i915_request_wait_begin(rq, flags);
1753 mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
1779 __i915_spin_request(rq, state))
1794 if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq))
1795 intel_rps_boost(rq);
1798 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1816 if (i915_request_is_ready(rq))
1817 intel_engine_flush_submission(rq->engine);
1822 if (dma_fence_is_signaled(&rq->fence))
1840 dma_fence_remove_callback(&rq->fence, &wait.cb);
1844 mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
1845 trace_i915_request_wait_end(rq);