/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/uc/ |
H A D | intel_guc_submission.c | 228 * When we're doing submissions using regular execlists backend, writing to 270 * (ce->inflight = rq->engine). It is only used by the execlists in schedule_in() 289 struct intel_engine_execlists * const execlists = &engine->execlists; in __guc_dequeue() local 290 struct i915_request **first = execlists->inflight; in __guc_dequeue() 291 struct i915_request ** const last_port = first + execlists->port_mask; in __guc_dequeue() 307 * We write directly into the execlists->inflight queue and don't use in __guc_dequeue() 308 * the execlists->pending queue, as we don't have a distinct switch in __guc_dequeue() 312 while ((rb = rb_first_cached(&execlists->queue))) { in __guc_dequeue() 323 port - execlists in __guc_dequeue() 350 struct intel_engine_execlists * const execlists = &engine->execlists; guc_submission_tasklet() local 375 struct intel_engine_execlists * const execlists = &engine->execlists; guc_reset_prepare() local 392 cancel_port_requests(struct intel_engine_execlists * const execlists) cancel_port_requests() argument 406 struct intel_engine_execlists * const execlists = &engine->execlists; guc_reset_rewind() local 431 struct intel_engine_execlists * const execlists = &engine->execlists; guc_reset_cancel() local 489 struct intel_engine_execlists * const execlists = &engine->execlists; guc_reset_finish() local [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/ |
H A D | intel_execlists_submission.c | 345 return max(virtual_prio(&engine->execlists), in need_preempt() 409 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) in execlists_unwind_incomplete_requests() argument 412 container_of(execlists, typeof(*engine), execlists); in execlists_unwind_incomplete_requests() 517 ce->lrc.ccid |= engine->execlists.ccid; in __execlists_schedule_in() 726 static void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) in write_desc() argument 728 if (execlists->ctrl_reg) { in write_desc() 729 writel(lower_32_bits(desc), execlists->submit_reg + port * 2); in write_desc() 730 writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1); in write_desc() 732 writel(upper_32_bits(desc), execlists in write_desc() 756 trace_ports(const struct intel_engine_execlists *execlists, const char *msg, struct i915_request * const *ports) trace_ports() argument 779 assert_pending_valid(const struct intel_engine_execlists *execlists, const char *msg) assert_pending_valid() argument 910 struct intel_engine_execlists *execlists = &engine->execlists; execlists_submit_ports() local 1234 record_preemption(struct intel_engine_execlists *execlists) record_preemption() argument 1275 struct intel_engine_execlists * const execlists = &engine->execlists; execlists_dequeue() local 1641 cancel_port_requests(struct intel_engine_execlists * const execlists, struct i915_request **inactive) cancel_port_requests() argument 1819 struct intel_engine_execlists * const execlists = &engine->execlists; process_csb() local 2528 __execlists_kick(struct intel_engine_execlists *execlists) __execlists_kick() argument 2809 struct intel_engine_execlists * const execlists = &engine->execlists; reset_csb_pointers() local 3015 struct intel_engine_execlists * const execlists = &engine->execlists; reset_csb() local 3108 struct intel_engine_execlists * const execlists = &engine->execlists; execlists_reset_csb() local 3151 struct intel_engine_execlists * const execlists = &engine->execlists; execlists_reset_cancel() local 3239 struct intel_engine_execlists * const execlists = &engine->execlists; execlists_reset_finish() local 3541 struct intel_engine_execlists * const execlists = &engine->execlists; intel_execlists_submission_setup() local 4087 const struct intel_engine_execlists *execlists = &engine->execlists; intel_execlists_show_requests() local [all...] |
H A D | intel_engine.h | 105 execlists_num_ports(const struct intel_engine_execlists * const execlists) in execlists_num_ports() argument 107 return execlists->port_mask + 1; in execlists_num_ports() 111 execlists_active(const struct intel_engine_execlists *execlists) in execlists_active() argument 115 cur = READ_ONCE(execlists->active); in execlists_active() 121 cur = READ_ONCE(execlists->active); in execlists_active() 130 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
|
H A D | intel_engine_stats.h | 18 struct intel_engine_execlists_stats *stats = &engine->stats.execlists; in intel_engine_context_in() 41 struct intel_engine_execlists_stats *stats = &engine->stats.execlists; in intel_engine_context_out()
|
H A D | intel_engine_cs.c | 39 * never be saved (power context is stored elsewhere, and execlists don't work 1027 struct intel_engine_execlists * const execlists = &engine->execlists; in intel_engine_init_execlists() local 1029 execlists->port_mask = 1; in intel_engine_init_execlists() 1030 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists))); in intel_engine_init_execlists() 1031 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); in intel_engine_init_execlists() 1033 memset(execlists->pending, 0, sizeof(execlists->pending)); in intel_engine_init_execlists() 1034 execlists->active = in intel_engine_init_execlists() 1035 memset(execlists in intel_engine_init_execlists() 2062 struct intel_engine_execlists * const execlists = &engine->execlists; intel_engine_print_registers() local [all...] |
H A D | sysfs_engines.c | 226 if (execlists_active(&engine->execlists)) in timeslice_store() 227 set_timer_ms(&engine->execlists.timer, duration); in timeslice_store() 332 if (READ_ONCE(engine->execlists.pending[0])) in preempt_timeout_store() 333 set_timer_ms(&engine->execlists.preempt, timeout); in preempt_timeout_store()
|
H A D | selftest_execlists.c | 58 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit() 78 if (READ_ONCE(engine->execlists.pending[0])) in wait_for_reset() 631 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); in live_hold_reset() 1201 del_timer(&engine->execlists.timer); in live_timeslice_rewind() 1367 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); in live_timeslice_queue() 1378 } while (READ_ONCE(engine->execlists.pending[0])); in live_timeslice_queue() 1978 engine->execlists.preempt_hang.count = 0; in live_nopreempt() 2025 if (engine->execlists.preempt_hang.count) { in live_nopreempt() 2027 engine->execlists.preempt_hang.count); in live_nopreempt() 2358 while (!engine->execlists in __cancel_fail() [all...] |
H A D | intel_engine_types.h | 229 * @ctrl_reg: the enhanced execlists control register, used to load the 551 struct intel_engine_execlists execlists; member 605 struct intel_engine_execlists_stats execlists; member
|
H A D | selftest_lrc.c | 68 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/ |
H A D | intel_lrc.c | 437 static int queue_prio(const struct intel_engine_execlists *execlists) in queue_prio() argument 442 rb = rb_first_cached(&execlists->queue); in queue_prio() 485 if (engine->execlists.queue_priority_hint <= last_prio) in need_preempt() 525 return queue_prio(&engine->execlists) > last_prio; in need_preempt() 1137 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); in __unwind_incomplete_requests() 1162 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) in execlists_unwind_incomplete_requests() argument 1165 container_of(execlists, typeof(*engine), execlists); in execlists_unwind_incomplete_requests() 1367 ce->lrc.ccid |= engine->execlists.ccid; in __execlists_schedule_in() 1405 tasklet_hi_schedule(&ve->base.execlists in kick_siblings() 1526 write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) write_desc() argument 1556 trace_ports(const struct intel_engine_execlists *execlists, const char *msg, struct i915_request * const *ports) trace_ports() argument 1573 reset_in_progress(const struct intel_engine_execlists *execlists) reset_in_progress() argument 1579 assert_pending_valid(const struct intel_engine_execlists *execlists, const char *msg) assert_pending_valid() argument 1692 struct intel_engine_execlists *execlists = &engine->execlists; execlists_submit_ports() local 1984 const struct intel_engine_execlists *execlists = &engine->execlists; active_timeslice() local 2011 struct intel_engine_execlists *execlists = &engine->execlists; start_timeslice() local 2032 record_preemption(struct intel_engine_execlists *execlists) record_preemption() argument 2075 struct intel_engine_execlists * const execlists = &engine->execlists; execlists_dequeue() local 2449 cancel_port_requests(struct intel_engine_execlists * const execlists) cancel_port_requests() argument 2547 struct intel_engine_execlists * const execlists = &engine->execlists; process_csb() local 3182 __execlists_kick(struct intel_engine_execlists *execlists) __execlists_kick() argument 3212 struct intel_engine_execlists * const execlists = &engine->execlists; __submit_queue_imm() local 3223 struct intel_engine_execlists *execlists = &engine->execlists; submit_queue() local 4018 struct intel_engine_execlists * const execlists = &engine->execlists; reset_csb_pointers() local 4185 struct intel_engine_execlists * const execlists = &engine->execlists; execlists_reset_prepare() local 4246 struct intel_engine_execlists * const execlists = &engine->execlists; __execlists_reset() local 4360 struct intel_engine_execlists * const execlists = &engine->execlists; execlists_reset_cancel() local 4442 struct intel_engine_execlists * const execlists = &engine->execlists; execlists_reset_finish() local 5163 struct intel_engine_execlists * const execlists = &engine->execlists; intel_execlists_submission_setup() local 5999 const struct intel_engine_execlists *execlists = &engine->execlists; intel_execlists_show_requests() local [all...] |
H A D | intel_engine.h | 102 execlists_num_ports(const struct intel_engine_execlists * const execlists) in execlists_num_ports() argument 104 return execlists->port_mask + 1; in execlists_num_ports() 108 execlists_active(const struct intel_engine_execlists *execlists) in execlists_active() argument 112 cur = READ_ONCE(execlists->active); in execlists_active() 118 cur = READ_ONCE(execlists->active); in execlists_active() 127 execlists_active_lock_bh(struct intel_engine_execlists *execlists) in execlists_active_lock_bh() argument 130 tasklet_lock(&execlists->tasklet); in execlists_active_lock_bh() 134 execlists_active_unlock_bh(struct intel_engine_execlists *execlists) in execlists_active_unlock_bh() argument 136 tasklet_unlock(&execlists->tasklet); in execlists_active_unlock_bh() 141 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists); [all...] |
H A D | intel_engine_cs.c | 46 * never be saved (power context is stored elsewhere, and execlists don't work 587 struct intel_engine_execlists * const execlists = &engine->execlists; in intel_engine_init_execlists() local 589 execlists->port_mask = 1; in intel_engine_init_execlists() 590 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists))); in intel_engine_init_execlists() 591 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); in intel_engine_init_execlists() 593 memset(execlists->pending, 0, sizeof(execlists->pending)); in intel_engine_init_execlists() 594 execlists->active = in intel_engine_init_execlists() 595 memset(execlists in intel_engine_init_execlists() 1427 struct intel_engine_execlists * const execlists = &engine->execlists; intel_engine_print_registers() local [all...] |
H A D | intel_gt_irq.c | 39 WRITE_ONCE(engine->execlists.error_interrupt, eir); in cs_irq_handler() 45 WRITE_ONCE(engine->execlists.yield, in cs_irq_handler() 48 engine->execlists.yield); in cs_irq_handler() 49 if (del_timer(&engine->execlists.timer)) in cs_irq_handler() 62 tasklet_hi_schedule(&engine->execlists.tasklet); in cs_irq_handler()
|
H A D | intel_engine_pm.c | 254 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN); in __engine_park() 259 engine->execlists.no_priolist = false; in __engine_park()
|
H A D | sysfs_engines.c | 230 if (execlists_active(&engine->execlists)) in timeslice_store() 231 set_timer_ms(&engine->execlists.timer, duration); in timeslice_store() 334 if (READ_ONCE(engine->execlists.pending[0])) in preempt_timeout_store() 335 set_timer_ms(&engine->execlists.preempt, timeout); in preempt_timeout_store()
|
H A D | selftest_lrc.c | 82 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit() 102 if (READ_ONCE(engine->execlists.pending[0])) in wait_for_reset() 632 tasklet_disable(&engine->execlists.tasklet); in live_hold_reset() 634 engine->execlists.tasklet.func(engine->execlists.tasklet.data); in live_hold_reset() 635 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); in live_hold_reset() 644 tasklet_enable(&engine->execlists.tasklet); in live_hold_reset() 1205 del_timer(&engine->execlists.timer); in live_timeslice_rewind() 1206 tasklet_hi_schedule(&engine->execlists.tasklet); in live_timeslice_rewind() 1373 GEM_BUG_ON(execlists_active(&engine->execlists) ! in live_timeslice_queue() [all...] |
H A D | selftest_reset.c | 323 tasklet_disable(&engine->execlists.tasklet); in igt_atomic_engine_reset() 342 tasklet_enable(&engine->execlists.tasklet); in igt_atomic_engine_reset()
|
H A D | intel_engine_types.h | 206 * @ctrl_reg: the enhanced execlists control register, used to load the 475 struct intel_engine_execlists execlists; member
|
H A D | selftest_hangcheck.c | 1572 struct tasklet_struct * const t = &engine->execlists.tasklet; in __igt_atomic_reset_engine()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/ |
H A D | i915_scheduler.c | 43 static void assert_priolists(struct intel_engine_execlists * const execlists) in assert_priolists() argument 51 GEM_BUG_ON(rb_first_cached(&execlists->queue) != in assert_priolists() 52 rb_first(&execlists->queue.rb_root)); in assert_priolists() 55 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { in assert_priolists() 74 struct intel_engine_execlists * const execlists = &engine->execlists; in i915_sched_lookup_priolist() local 81 assert_priolists(execlists); in i915_sched_lookup_priolist() 86 if (unlikely(execlists->no_priolist)) in i915_sched_lookup_priolist() 92 parent = &execlists->queue.rb_root.rb_node; in i915_sched_lookup_priolist() 107 p = &execlists in i915_sched_lookup_priolist() [all...] |
H A D | i915_request.c | 356 return READ_ONCE(engine->execlists.active); in __engine_active() 385 * may either perform a context switch to the second inflight execlists, in __request_in_flight() 386 * or it may switch to the pending set of execlists. In the case of the in __request_in_flight() 395 * Note that the read of *execlists->active may race with the promotion in __request_in_flight() 396 * of execlists->pending[] to execlists->inflight[], overwritting in __request_in_flight() 397 * the value at *execlists->active. This is fine. The promotion implies in __request_in_flight() 531 * updating the payload, and execlists can even skip submitting in __i915_request_submit()
|
H A D | i915_gpu_error.c | 1251 const struct intel_engine_execlists * const el = &ee->engine->execlists; in engine_record_execlists()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/ |
H A D | i915_live_selftests.h | 46 selftest(execlists, intel_execlists_live_selftests)
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/selftests/ |
H A D | i915_live_selftests.h | 47 selftest(execlists, intel_execlists_live_selftests)
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/ |
H A D | i915_request.c | 436 return READ_ONCE(engine->execlists.active); in __engine_active() 465 * may either perform a context switch to the second inflight execlists, in __request_in_flight() 466 * or it may switch to the pending set of execlists. In the case of the in __request_in_flight() 475 * Note that the read of *execlists->active may race with the promotion in __request_in_flight() 476 * of execlists->pending[] to execlists->inflight[], overwritting in __request_in_flight() 477 * the value at *execlists->active. This is fine. The promotion implies in __request_in_flight() 620 * updating the payload, and execlists can even skip submitting in __i915_request_submit()
|