Lines Matching refs:rq

115 	struct i915_request *rq = to_request(fence);
117 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT &&
118 rq->guc_prio != GUC_PRIO_FINI);
120 i915_request_free_capture_list(fetch_and_zero(&rq->capture_list));
121 if (rq->batch_res) {
122 i915_vma_resource_put(rq->batch_res);
123 rq->batch_res = NULL;
133 i915_sw_fence_fini(&rq->submit);
134 i915_sw_fence_fini(&rq->semaphore);
140 * very careful in what rq->engine we poke. The virtual engine is
141 * referenced via the rq->context and we released that ref during
149 * not be unsubmitted again, so rq->engine and rq->execution_mask
150 * at this point is stable. rq->execution_mask will be a single
154 * power-of-two we assume that rq->engine may still be a virtual
164 * know that if the rq->execution_mask is a single bit, rq->engine
167 if (is_power_of_2(rq->execution_mask) &&
168 !cmpxchg(&rq->engine->request_pool, NULL, rq))
171 kmem_cache_free(slab_requests, rq);
192 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
196 if (llist_empty(&rq->execute_cb))
200 llist_del_all(&rq->execute_cb),
205 static void __notify_execute_cb_irq(struct i915_request *rq)
207 __notify_execute_cb(rq, irq_work_queue);
216 void i915_request_notify_execute_cb_imm(struct i915_request *rq)
218 __notify_execute_cb(rq, irq_work_imm);
221 static void __i915_request_fill(struct i915_request *rq, u8 val)
223 void *vaddr = rq->ring->vaddr;
226 head = rq->infix;
227 if (rq->postfix < head) {
228 memset(vaddr + head, val, rq->ring->size - head);
231 memset(vaddr + head, val, rq->postfix - head);
236 * @rq: request to inspect
245 i915_request_active_engine(struct i915_request *rq,
255 * Note that rq->engine is unstable, and so we double
258 locked = READ_ONCE(rq->engine);
260 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
266 if (i915_request_is_active(rq)) {
267 if (!__i915_request_is_complete(rq))
277 static void __rq_init_watchdog(struct i915_request *rq)
279 rq->watchdog.timer.function = NULL;
284 struct i915_request *rq =
286 struct intel_gt *gt = rq->engine->gt;
288 if (!i915_request_completed(rq)) {
289 if (llist_add(&rq->watchdog.link, &gt->watchdog.list))
292 i915_request_put(rq);
298 static void __rq_arm_watchdog(struct i915_request *rq)
300 struct i915_request_watchdog *wdg = &rq->watchdog;
301 struct intel_context *ce = rq->context;
306 i915_request_get(rq);
317 static void __rq_cancel_watchdog(struct i915_request *rq)
319 struct i915_request_watchdog *wdg = &rq->watchdog;
322 i915_request_put(rq);
357 bool i915_request_retire(struct i915_request *rq)
359 if (!__i915_request_is_complete(rq))
362 RQ_TRACE(rq, "\n");
364 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
365 trace_i915_request_retire(rq);
366 i915_request_mark_complete(rq);
368 __rq_cancel_watchdog(rq);
379 GEM_BUG_ON(!list_is_first(&rq->link,
380 &i915_request_timeline(rq)->requests));
383 __i915_request_fill(rq, POISON_FREE);
384 rq->ring->head = rq->postfix;
386 if (!i915_request_signaled(rq)) {
387 spin_lock_irq(&rq->lock);
388 dma_fence_signal_locked(&rq->fence);
389 spin_unlock_irq(&rq->lock);
392 if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
393 intel_rps_dec_waiters(&rq->engine->gt->rps);
405 rq->engine->remove_active_request(rq);
406 GEM_BUG_ON(!llist_empty(&rq->execute_cb));
408 __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
410 intel_context_exit(rq->context);
411 intel_context_unpin(rq->context);
413 i915_sched_node_fini(&rq->sched);
414 i915_request_put(rq);
419 void i915_request_retire_upto(struct i915_request *rq)
421 struct intel_timeline * const tl = i915_request_timeline(rq);
424 RQ_TRACE(rq, "\n");
425 GEM_BUG_ON(!__i915_request_is_complete(rq));
430 } while (i915_request_retire(tmp) && tmp != rq);
441 struct i915_request * const *port, *rq;
488 (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
490 if (rq->context == signal->context) {
491 inflight = i915_seqno_passed(rq->fence.seqno,
502 __await_execution(struct i915_request *rq,
515 cb->fence = &rq->submit;
553 void __i915_request_skip(struct i915_request *rq)
555 GEM_BUG_ON(!fatal_error(rq->fence.error));
557 if (rq->infix == rq->postfix)
560 RQ_TRACE(rq, "error: %d\n", rq->fence.error);
567 __i915_request_fill(rq, 0);
568 rq->infix = rq->postfix;
571 bool i915_request_set_error_once(struct i915_request *rq, int error)
577 if (i915_request_signaled(rq))
580 old = READ_ONCE(rq->fence.error);
584 } while (!try_cmpxchg(&rq->fence.error, &old, error));
589 struct i915_request *i915_request_mark_eio(struct i915_request *rq)
591 if (__i915_request_is_complete(rq))
594 GEM_BUG_ON(i915_request_signaled(rq));
597 rq = i915_request_get(rq);
599 i915_request_set_error_once(rq, -EIO);
600 i915_request_mark_complete(rq);
602 return rq;
762 void i915_request_cancel(struct i915_request *rq, int error)
764 if (!i915_request_set_error_once(rq, error))
767 set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
769 intel_context_cancel_request(rq->context, rq);
811 struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
818 i915_request_put(rq);
827 struct i915_request *rq, *rn;
829 list_for_each_entry_safe(rq, rn, &tl->requests, link)
830 if (!i915_request_retire(rq))
839 struct i915_request *rq;
843 rq = xchg(rsvd, NULL);
844 if (!rq) /* Use the normal failure path for one final WARN */
847 return rq;
854 rq = list_first_entry(&tl->requests, typeof(*rq), link);
855 i915_request_retire(rq);
857 rq = kmem_cache_alloc(slab_requests,
859 if (rq)
860 return rq;
863 rq = list_last_entry(&tl->requests, typeof(*rq), link);
864 cond_synchronize_rcu(rq->rcustate);
875 struct i915_request *rq = arg;
877 spin_lock_init(&rq->lock);
878 i915_sched_node_init(&rq->sched);
879 i915_sw_fence_init(&rq->submit, submit_notify);
880 i915_sw_fence_init(&rq->semaphore, semaphore_notify);
882 clear_capture_list(rq);
883 rq->batch_res = NULL;
885 init_llist_head(&rq->execute_cb);
898 struct i915_request *rq;
936 rq = kmem_cache_alloc(slab_requests,
938 if (unlikely(!rq)) {
939 rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
940 if (!rq) {
946 rq->context = ce;
947 rq->engine = ce->engine;
948 rq->ring = ce->ring;
949 rq->execution_mask = ce->engine->mask;
950 rq->i915 = ce->engine->i915;
952 ret = intel_timeline_get_seqno(tl, rq, &seqno);
956 dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
959 RCU_INIT_POINTER(rq->timeline, tl);
960 rq->hwsp_seqno = tl->hwsp_seqno;
961 GEM_BUG_ON(__i915_request_is_complete(rq));
963 rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
965 rq->guc_prio = GUC_PRIO_INIT;
968 i915_sw_fence_reinit(&i915_request_get(rq)->submit);
969 i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
971 i915_sched_node_reinit(&rq->sched);
974 clear_batch_ptr(rq);
975 __rq_init_watchdog(rq);
976 assert_capture_list_is_null(rq);
977 GEM_BUG_ON(!llist_empty(&rq->execute_cb));
978 GEM_BUG_ON(rq->batch_res);
992 rq->reserved_space =
993 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
1001 rq->head = rq->ring->emit;
1003 ret = rq->engine->request_alloc(rq);
1007 rq->infix = rq->ring->emit; /* end of header; start of user payload */
1010 list_add_tail_rcu(&rq->link, &tl->requests);
1012 return rq;
1015 ce->ring->emit = rq->head;
1018 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
1019 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
1022 kmem_cache_free(slab_requests, rq);
1031 struct i915_request *rq;
1039 rq = list_first_entry(&tl->requests, typeof(*rq), link);
1040 if (!list_is_last(&rq->link, &tl->requests))
1041 i915_request_retire(rq);
1044 rq = __i915_request_create(ce, GFP_KERNEL);
1046 if (IS_ERR(rq))
1050 rq->cookie = lockdep_pin_lock(&tl->mutex);
1052 return rq;
1056 return rq;
1060 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
1065 if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
1116 if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
1117 err = i915_sw_fence_await_dma_fence(&rq->submit,
1126 already_busywaiting(struct i915_request *rq)
1140 return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
1335 static void mark_external(struct i915_request *rq)
1345 rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
1349 __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1351 mark_external(rq);
1352 return i915_sw_fence_await_dma_fence(&rq->submit, fence,
1353 i915_fence_context_timeout(rq->i915,
1359 i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1365 return __i915_request_await_external(rq, fence);
1371 err = __i915_request_await_external(rq, iter);
1375 err = i915_request_await_dma_fence(rq, chain->fence);
1384 static inline bool is_parallel_rq(struct i915_request *rq)
1386 return intel_context_is_parallel(rq->context);
1389 static inline struct intel_context *request_to_parent(struct i915_request *rq)
1391 return intel_context_to_parent(rq->context);
1404 i915_request_await_execution(struct i915_request *rq,
1426 if (fence->context == rq->fence.context)
1435 if (is_same_parallel_context(rq, to_request(fence)))
1437 ret = __i915_request_await_execution(rq,
1440 ret = i915_request_await_external(rq, fence);
1501 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
1533 if (fence->context == rq->fence.context)
1538 intel_timeline_sync_is_later(i915_request_timeline(rq),
1543 if (is_same_parallel_context(rq, to_request(fence)))
1545 ret = i915_request_await_request(rq, to_request(fence));
1547 ret = i915_request_await_external(rq, fence);
1554 intel_timeline_sync_set(i915_request_timeline(rq),
1564 * @rq: request we are wishing to use
1569 int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps)
1574 err = i915_request_await_dma_fence(rq, deps->fences[i]);
1621 static void i915_request_await_huc(struct i915_request *rq)
1623 struct intel_huc *huc = &rq->context->engine->gt->uc.huc;
1626 if (!rcu_access_pointer(rq->context->gem_context))
1630 i915_sw_fence_await_sw_fence(&rq->submit,
1632 &rq->hucq);
1636 __i915_request_ensure_parallel_ordering(struct i915_request *rq,
1641 GEM_BUG_ON(!is_parallel_rq(rq));
1643 prev = request_to_parent(rq)->parallel.last_rq;
1646 i915_sw_fence_await_sw_fence(&rq->submit,
1648 &rq->submitq);
1650 if (rq->engine->sched_engine->schedule)
1651 __i915_sched_node_add_dependency(&rq->sched,
1653 &rq->dep,
1659 request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
1667 &rq->fence));
1671 __i915_request_ensure_ordering(struct i915_request *rq,
1676 GEM_BUG_ON(is_parallel_rq(rq));
1679 &rq->fence));
1682 bool uses_guc = intel_engine_uses_guc(rq->engine);
1684 rq->engine->mask);
1685 bool same_context = prev->context == rq->context;
1695 rq->fence.seqno));
1698 i915_sw_fence_await_sw_fence(&rq->submit,
1700 &rq->submitq);
1702 __i915_sw_fence_await_dma_fence(&rq->submit,
1704 &rq->dmaq);
1705 if (rq->engine->sched_engine->schedule)
1706 __i915_sched_node_add_dependency(&rq->sched,
1708 &rq->dep,
1720 __i915_request_add_to_timeline(struct i915_request *rq)
1722 struct intel_timeline *timeline = i915_request_timeline(rq);
1732 if (rq->engine->class == VIDEO_DECODE_CLASS)
1733 i915_request_await_huc(rq);
1765 if (likely(!is_parallel_rq(rq)))
1766 prev = __i915_request_ensure_ordering(rq, timeline);
1768 prev = __i915_request_ensure_parallel_ordering(rq, timeline);
1777 GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1787 struct i915_request *__i915_request_commit(struct i915_request *rq)
1789 struct intel_engine_cs *engine = rq->engine;
1790 struct intel_ring *ring = rq->ring;
1793 RQ_TRACE(rq, "\n");
1800 GEM_BUG_ON(rq->reserved_space > ring->space);
1801 rq->reserved_space = 0;
1802 rq->emitted_jiffies = jiffies;
1810 cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1812 rq->postfix = intel_ring_offset(rq, cs);
1814 return __i915_request_add_to_timeline(rq);
1817 void __i915_request_queue_bh(struct i915_request *rq)
1819 i915_sw_fence_commit(&rq->semaphore);
1820 i915_sw_fence_commit(&rq->submit);
1823 void __i915_request_queue(struct i915_request *rq,
1837 if (attr && rq->engine->sched_engine->schedule)
1838 rq->engine->sched_engine->schedule(rq, attr);
1841 __i915_request_queue_bh(rq);
1845 void i915_request_add(struct i915_request *rq)
1847 struct intel_timeline * const tl = i915_request_timeline(rq);
1852 lockdep_unpin_lock(&tl->mutex, rq->cookie);
1854 trace_i915_request_add(rq);
1855 __i915_request_commit(rq);
1859 ctx = rcu_dereference(rq->context->gem_context);
1864 __i915_request_queue(rq, &attr);
1902 static bool __i915_spin_request(struct i915_request * const rq, int state)
1918 if (!i915_request_is_running(rq))
1932 timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
1935 if (dma_fence_is_signaled(&rq->fence))
1964 * @rq: the request to wait upon
1981 long i915_request_wait_timeout(struct i915_request *rq,
1992 if (dma_fence_is_signaled(&rq->fence))
1998 trace_i915_request_wait_begin(rq, flags);
2006 mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
2032 __i915_spin_request(rq, state))
2047 if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq))
2048 intel_rps_boost(rq);
2051 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
2069 if (i915_request_is_ready(rq))
2070 __intel_engine_flush_submission(rq->engine, false);
2075 if (dma_fence_is_signaled(&rq->fence))
2093 dma_fence_remove_callback(&rq->fence, &wait.cb);
2097 mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
2098 trace_i915_request_wait_end(rq);
2104 * @rq: the request to wait upon
2120 long i915_request_wait(struct i915_request *rq,
2124 long ret = i915_request_wait_timeout(rq, flags, timeout);
2147 static char queue_status(const struct i915_request *rq)
2149 if (i915_request_is_active(rq))
2152 if (i915_request_is_ready(rq))
2153 return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
2158 static const char *run_status(const struct i915_request *rq)
2160 if (__i915_request_is_complete(rq))
2163 if (__i915_request_has_started(rq))
2166 if (!i915_sw_fence_signaled(&rq->semaphore))
2172 static const char *fence_status(const struct i915_request *rq)
2174 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
2177 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
2184 const struct i915_request *rq,
2188 const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
2222 x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
2226 queue_status(rq),
2227 rq->fence.context, rq->fence.seqno,
2228 run_status(rq),
2229 fence_status(rq),
2231 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
2235 static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
2239 return ring == i915_ggtt_offset(rq->ring->vma);
2242 static bool match_ring(struct i915_request *rq)
2248 if (!intel_engine_is_virtual(rq->engine))
2249 return engine_match_ring(rq->engine, rq);
2253 while ((engine = intel_engine_get_sibling(rq->engine, i++))) {
2254 found = engine_match_ring(engine, rq);
2262 enum i915_request_state i915_test_request_state(struct i915_request *rq)
2264 if (i915_request_completed(rq))
2267 if (!i915_request_started(rq))
2270 if (match_ring(rq))