162306a36Sopenharmony_ci/*
262306a36Sopenharmony_ci * Copyright © 2008-2018 Intel Corporation
362306a36Sopenharmony_ci *
462306a36Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a
562306a36Sopenharmony_ci * copy of this software and associated documentation files (the "Software"),
662306a36Sopenharmony_ci * to deal in the Software without restriction, including without limitation
762306a36Sopenharmony_ci * the rights to use, copy, modify, merge, publish, distribute, sublicense,
862306a36Sopenharmony_ci * and/or sell copies of the Software, and to permit persons to whom the
962306a36Sopenharmony_ci * Software is furnished to do so, subject to the following conditions:
1062306a36Sopenharmony_ci *
1162306a36Sopenharmony_ci * The above copyright notice and this permission notice (including the next
1262306a36Sopenharmony_ci * paragraph) shall be included in all copies or substantial portions of the
1362306a36Sopenharmony_ci * Software.
1462306a36Sopenharmony_ci *
1562306a36Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1662306a36Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1762306a36Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1862306a36Sopenharmony_ci * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1962306a36Sopenharmony_ci * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2062306a36Sopenharmony_ci * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
2162306a36Sopenharmony_ci * IN THE SOFTWARE.
2262306a36Sopenharmony_ci *
2362306a36Sopenharmony_ci */
2462306a36Sopenharmony_ci
2562306a36Sopenharmony_ci#ifndef I915_REQUEST_H
2662306a36Sopenharmony_ci#define I915_REQUEST_H
2762306a36Sopenharmony_ci
2862306a36Sopenharmony_ci#include <linux/dma-fence.h>
2962306a36Sopenharmony_ci#include <linux/hrtimer.h>
3062306a36Sopenharmony_ci#include <linux/irq_work.h>
3162306a36Sopenharmony_ci#include <linux/llist.h>
3262306a36Sopenharmony_ci#include <linux/lockdep.h>
3362306a36Sopenharmony_ci
3462306a36Sopenharmony_ci#include "gem/i915_gem_context_types.h"
3562306a36Sopenharmony_ci#include "gt/intel_context_types.h"
3662306a36Sopenharmony_ci#include "gt/intel_engine_types.h"
3762306a36Sopenharmony_ci#include "gt/intel_timeline_types.h"
3862306a36Sopenharmony_ci
3962306a36Sopenharmony_ci#include "i915_gem.h"
4062306a36Sopenharmony_ci#include "i915_scheduler.h"
4162306a36Sopenharmony_ci#include "i915_selftest.h"
4262306a36Sopenharmony_ci#include "i915_sw_fence.h"
4362306a36Sopenharmony_ci#include "i915_vma_resource.h"
4462306a36Sopenharmony_ci
4562306a36Sopenharmony_ci#include <uapi/drm/i915_drm.h>
4662306a36Sopenharmony_ci
4762306a36Sopenharmony_cistruct drm_file;
4862306a36Sopenharmony_cistruct drm_i915_gem_object;
4962306a36Sopenharmony_cistruct drm_printer;
5062306a36Sopenharmony_cistruct i915_deps;
5162306a36Sopenharmony_cistruct i915_request;
5262306a36Sopenharmony_ci
5362306a36Sopenharmony_ci#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
5462306a36Sopenharmony_cistruct i915_capture_list {
5562306a36Sopenharmony_ci	struct i915_vma_resource *vma_res;
5662306a36Sopenharmony_ci	struct i915_capture_list *next;
5762306a36Sopenharmony_ci};
5862306a36Sopenharmony_ci
5962306a36Sopenharmony_civoid i915_request_free_capture_list(struct i915_capture_list *capture);
6062306a36Sopenharmony_ci#else
6162306a36Sopenharmony_ci#define i915_request_free_capture_list(_a) do {} while (0)
6262306a36Sopenharmony_ci#endif
6362306a36Sopenharmony_ci
6462306a36Sopenharmony_ci#define RQ_TRACE(rq, fmt, ...) do {					\
6562306a36Sopenharmony_ci	const struct i915_request *rq__ = (rq);				\
6662306a36Sopenharmony_ci	ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt,	\
6762306a36Sopenharmony_ci		     rq__->fence.context, rq__->fence.seqno,		\
6862306a36Sopenharmony_ci		     hwsp_seqno(rq__), ##__VA_ARGS__);			\
6962306a36Sopenharmony_ci} while (0)
7062306a36Sopenharmony_ci
7162306a36Sopenharmony_cienum {
7262306a36Sopenharmony_ci	/*
7362306a36Sopenharmony_ci	 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
7462306a36Sopenharmony_ci	 *
7562306a36Sopenharmony_ci	 * Set by __i915_request_submit() on handing over to HW, and cleared
7662306a36Sopenharmony_ci	 * by __i915_request_unsubmit() if we preempt this request.
7762306a36Sopenharmony_ci	 *
7862306a36Sopenharmony_ci	 * Finally cleared for consistency on retiring the request, when
7962306a36Sopenharmony_ci	 * we know the HW is no longer running this request.
8062306a36Sopenharmony_ci	 *
8162306a36Sopenharmony_ci	 * See i915_request_is_active()
8262306a36Sopenharmony_ci	 */
8362306a36Sopenharmony_ci	I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
8462306a36Sopenharmony_ci
8562306a36Sopenharmony_ci	/*
8662306a36Sopenharmony_ci	 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
8762306a36Sopenharmony_ci	 *
8862306a36Sopenharmony_ci	 * Using the scheduler, when a request is ready for execution it is put
8962306a36Sopenharmony_ci	 * into the priority queue, and removed from that queue when transferred
9062306a36Sopenharmony_ci	 * to the HW runlists. We want to track its membership within the
9162306a36Sopenharmony_ci	 * priority queue so that we can easily check before rescheduling.
9262306a36Sopenharmony_ci	 *
9362306a36Sopenharmony_ci	 * See i915_request_in_priority_queue()
9462306a36Sopenharmony_ci	 */
9562306a36Sopenharmony_ci	I915_FENCE_FLAG_PQUEUE,
9662306a36Sopenharmony_ci
9762306a36Sopenharmony_ci	/*
9862306a36Sopenharmony_ci	 * I915_FENCE_FLAG_HOLD - this request is currently on hold
9962306a36Sopenharmony_ci	 *
10062306a36Sopenharmony_ci	 * This request has been suspended, pending an ongoing investigation.
10162306a36Sopenharmony_ci	 */
10262306a36Sopenharmony_ci	I915_FENCE_FLAG_HOLD,
10362306a36Sopenharmony_ci
10462306a36Sopenharmony_ci	/*
10562306a36Sopenharmony_ci	 * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
10662306a36Sopenharmony_ci	 * breadcrumb that marks the end of semaphore waits and start of the
10762306a36Sopenharmony_ci	 * user payload.
10862306a36Sopenharmony_ci	 */
10962306a36Sopenharmony_ci	I915_FENCE_FLAG_INITIAL_BREADCRUMB,
11062306a36Sopenharmony_ci
11162306a36Sopenharmony_ci	/*
11262306a36Sopenharmony_ci	 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
11362306a36Sopenharmony_ci	 *
11462306a36Sopenharmony_ci	 * Internal bookkeeping used by the breadcrumb code to track when
11562306a36Sopenharmony_ci	 * a request is on the various signal_list.
11662306a36Sopenharmony_ci	 */
11762306a36Sopenharmony_ci	I915_FENCE_FLAG_SIGNAL,
11862306a36Sopenharmony_ci
11962306a36Sopenharmony_ci	/*
12062306a36Sopenharmony_ci	 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
12162306a36Sopenharmony_ci	 *
12262306a36Sopenharmony_ci	 * The execution of some requests should not be interrupted. This is
12362306a36Sopenharmony_ci	 * a sensitive operation as it makes the request super important,
12462306a36Sopenharmony_ci	 * blocking other higher priority work. Abuse of this flag will
12562306a36Sopenharmony_ci	 * lead to quality of service issues.
12662306a36Sopenharmony_ci	 */
12762306a36Sopenharmony_ci	I915_FENCE_FLAG_NOPREEMPT,
12862306a36Sopenharmony_ci
12962306a36Sopenharmony_ci	/*
13062306a36Sopenharmony_ci	 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
13162306a36Sopenharmony_ci	 *
13262306a36Sopenharmony_ci	 * A high priority sentinel request may be submitted to clear the
13362306a36Sopenharmony_ci	 * submission queue. As it will be the only request in-flight, upon
13462306a36Sopenharmony_ci	 * execution all other active requests will have been preempted and
13562306a36Sopenharmony_ci	 * unsubmitted. This preemptive pulse is used to re-evaluate the
13662306a36Sopenharmony_ci	 * in-flight requests, particularly in cases where an active context
13762306a36Sopenharmony_ci	 * is banned and those active requests need to be cancelled.
13862306a36Sopenharmony_ci	 */
13962306a36Sopenharmony_ci	I915_FENCE_FLAG_SENTINEL,
14062306a36Sopenharmony_ci
14162306a36Sopenharmony_ci	/*
14262306a36Sopenharmony_ci	 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
14362306a36Sopenharmony_ci	 *
14462306a36Sopenharmony_ci	 * Some requests are more important than others! In particular, a
14562306a36Sopenharmony_ci	 * request that the user is waiting on is typically required for
14662306a36Sopenharmony_ci	 * interactive latency, for which we want to minimise by upclocking
14762306a36Sopenharmony_ci	 * the GPU. Here we track such boost requests on a per-request basis.
14862306a36Sopenharmony_ci	 */
14962306a36Sopenharmony_ci	I915_FENCE_FLAG_BOOST,
15062306a36Sopenharmony_ci
15162306a36Sopenharmony_ci	/*
15262306a36Sopenharmony_ci	 * I915_FENCE_FLAG_SUBMIT_PARALLEL - request with a context in a
15362306a36Sopenharmony_ci	 * parent-child relationship (parallel submission, multi-lrc) should
15462306a36Sopenharmony_ci	 * trigger a submission to the GuC rather than just moving the context
15562306a36Sopenharmony_ci	 * tail.
15662306a36Sopenharmony_ci	 */
15762306a36Sopenharmony_ci	I915_FENCE_FLAG_SUBMIT_PARALLEL,
15862306a36Sopenharmony_ci
15962306a36Sopenharmony_ci	/*
16062306a36Sopenharmony_ci	 * I915_FENCE_FLAG_SKIP_PARALLEL - request with a context in a
16162306a36Sopenharmony_ci	 * parent-child relationship (parallel submission, multi-lrc) that
16262306a36Sopenharmony_ci	 * hit an error while generating requests in the execbuf IOCTL.
16362306a36Sopenharmony_ci	 * Indicates this request should be skipped as another request in
16462306a36Sopenharmony_ci	 * submission / relationship encoutered an error.
16562306a36Sopenharmony_ci	 */
16662306a36Sopenharmony_ci	I915_FENCE_FLAG_SKIP_PARALLEL,
16762306a36Sopenharmony_ci
16862306a36Sopenharmony_ci	/*
16962306a36Sopenharmony_ci	 * I915_FENCE_FLAG_COMPOSITE - Indicates fence is part of a composite
17062306a36Sopenharmony_ci	 * fence (dma_fence_array) and i915 generated for parallel submission.
17162306a36Sopenharmony_ci	 */
17262306a36Sopenharmony_ci	I915_FENCE_FLAG_COMPOSITE,
17362306a36Sopenharmony_ci};
17462306a36Sopenharmony_ci
17562306a36Sopenharmony_ci/*
17662306a36Sopenharmony_ci * Request queue structure.
17762306a36Sopenharmony_ci *
17862306a36Sopenharmony_ci * The request queue allows us to note sequence numbers that have been emitted
17962306a36Sopenharmony_ci * and may be associated with active buffers to be retired.
18062306a36Sopenharmony_ci *
18162306a36Sopenharmony_ci * By keeping this list, we can avoid having to do questionable sequence
18262306a36Sopenharmony_ci * number comparisons on buffer last_read|write_seqno. It also allows an
18362306a36Sopenharmony_ci * emission time to be associated with the request for tracking how far ahead
18462306a36Sopenharmony_ci * of the GPU the submission is.
18562306a36Sopenharmony_ci *
18662306a36Sopenharmony_ci * When modifying this structure be very aware that we perform a lockless
18762306a36Sopenharmony_ci * RCU lookup of it that may race against reallocation of the struct
18862306a36Sopenharmony_ci * from the slab freelist. We intentionally do not zero the structure on
18962306a36Sopenharmony_ci * allocation so that the lookup can use the dangling pointers (and is
19062306a36Sopenharmony_ci * cogniscent that those pointers may be wrong). Instead, everything that
19162306a36Sopenharmony_ci * needs to be initialised must be done so explicitly.
19262306a36Sopenharmony_ci *
19362306a36Sopenharmony_ci * The requests are reference counted.
19462306a36Sopenharmony_ci */
19562306a36Sopenharmony_cistruct i915_request {
19662306a36Sopenharmony_ci	struct dma_fence fence;
19762306a36Sopenharmony_ci	spinlock_t lock;
19862306a36Sopenharmony_ci
19962306a36Sopenharmony_ci	struct drm_i915_private *i915;
20062306a36Sopenharmony_ci
20162306a36Sopenharmony_ci	/*
20262306a36Sopenharmony_ci	 * Context and ring buffer related to this request
20362306a36Sopenharmony_ci	 * Contexts are refcounted, so when this request is associated with a
20462306a36Sopenharmony_ci	 * context, we must increment the context's refcount, to guarantee that
20562306a36Sopenharmony_ci	 * it persists while any request is linked to it. Requests themselves
20662306a36Sopenharmony_ci	 * are also refcounted, so the request will only be freed when the last
20762306a36Sopenharmony_ci	 * reference to it is dismissed, and the code in
20862306a36Sopenharmony_ci	 * i915_request_free() will then decrement the refcount on the
20962306a36Sopenharmony_ci	 * context.
21062306a36Sopenharmony_ci	 */
21162306a36Sopenharmony_ci	struct intel_engine_cs *engine;
21262306a36Sopenharmony_ci	struct intel_context *context;
21362306a36Sopenharmony_ci	struct intel_ring *ring;
21462306a36Sopenharmony_ci	struct intel_timeline __rcu *timeline;
21562306a36Sopenharmony_ci
21662306a36Sopenharmony_ci	struct list_head signal_link;
21762306a36Sopenharmony_ci	struct llist_node signal_node;
21862306a36Sopenharmony_ci
21962306a36Sopenharmony_ci	/*
22062306a36Sopenharmony_ci	 * The rcu epoch of when this request was allocated. Used to judiciously
22162306a36Sopenharmony_ci	 * apply backpressure on future allocations to ensure that under
22262306a36Sopenharmony_ci	 * mempressure there is sufficient RCU ticks for us to reclaim our
22362306a36Sopenharmony_ci	 * RCU protected slabs.
22462306a36Sopenharmony_ci	 */
22562306a36Sopenharmony_ci	unsigned long rcustate;
22662306a36Sopenharmony_ci
22762306a36Sopenharmony_ci	/*
22862306a36Sopenharmony_ci	 * We pin the timeline->mutex while constructing the request to
22962306a36Sopenharmony_ci	 * ensure that no caller accidentally drops it during construction.
23062306a36Sopenharmony_ci	 * The timeline->mutex must be held to ensure that only this caller
23162306a36Sopenharmony_ci	 * can use the ring and manipulate the associated timeline during
23262306a36Sopenharmony_ci	 * construction.
23362306a36Sopenharmony_ci	 */
23462306a36Sopenharmony_ci	struct pin_cookie cookie;
23562306a36Sopenharmony_ci
23662306a36Sopenharmony_ci	/*
23762306a36Sopenharmony_ci	 * Fences for the various phases in the request's lifetime.
23862306a36Sopenharmony_ci	 *
23962306a36Sopenharmony_ci	 * The submit fence is used to await upon all of the request's
24062306a36Sopenharmony_ci	 * dependencies. When it is signaled, the request is ready to run.
24162306a36Sopenharmony_ci	 * It is used by the driver to then queue the request for execution.
24262306a36Sopenharmony_ci	 */
24362306a36Sopenharmony_ci	struct i915_sw_fence submit;
24462306a36Sopenharmony_ci	union {
24562306a36Sopenharmony_ci		wait_queue_entry_t submitq;
24662306a36Sopenharmony_ci		struct i915_sw_dma_fence_cb dmaq;
24762306a36Sopenharmony_ci		struct i915_request_duration_cb {
24862306a36Sopenharmony_ci			struct dma_fence_cb cb;
24962306a36Sopenharmony_ci			ktime_t emitted;
25062306a36Sopenharmony_ci		} duration;
25162306a36Sopenharmony_ci	};
25262306a36Sopenharmony_ci	struct llist_head execute_cb;
25362306a36Sopenharmony_ci	struct i915_sw_fence semaphore;
25462306a36Sopenharmony_ci	/*
25562306a36Sopenharmony_ci	 * complete submit fence from an IRQ if needed for locking hierarchy
25662306a36Sopenharmony_ci	 * reasons.
25762306a36Sopenharmony_ci	 */
25862306a36Sopenharmony_ci	struct irq_work submit_work;
25962306a36Sopenharmony_ci
26062306a36Sopenharmony_ci	/*
26162306a36Sopenharmony_ci	 * A list of everyone we wait upon, and everyone who waits upon us.
26262306a36Sopenharmony_ci	 * Even though we will not be submitted to the hardware before the
26362306a36Sopenharmony_ci	 * submit fence is signaled (it waits for all external events as well
26462306a36Sopenharmony_ci	 * as our own requests), the scheduler still needs to know the
26562306a36Sopenharmony_ci	 * dependency tree for the lifetime of the request (from execbuf
26662306a36Sopenharmony_ci	 * to retirement), i.e. bidirectional dependency information for the
26762306a36Sopenharmony_ci	 * request not tied to individual fences.
26862306a36Sopenharmony_ci	 */
26962306a36Sopenharmony_ci	struct i915_sched_node sched;
27062306a36Sopenharmony_ci	struct i915_dependency dep;
27162306a36Sopenharmony_ci	intel_engine_mask_t execution_mask;
27262306a36Sopenharmony_ci
27362306a36Sopenharmony_ci	/*
27462306a36Sopenharmony_ci	 * A convenience pointer to the current breadcrumb value stored in
27562306a36Sopenharmony_ci	 * the HW status page (or our timeline's local equivalent). The full
27662306a36Sopenharmony_ci	 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
27762306a36Sopenharmony_ci	 */
27862306a36Sopenharmony_ci	const u32 *hwsp_seqno;
27962306a36Sopenharmony_ci
28062306a36Sopenharmony_ci	/* Position in the ring of the start of the request */
28162306a36Sopenharmony_ci	u32 head;
28262306a36Sopenharmony_ci
28362306a36Sopenharmony_ci	/* Position in the ring of the start of the user packets */
28462306a36Sopenharmony_ci	u32 infix;
28562306a36Sopenharmony_ci
28662306a36Sopenharmony_ci	/*
28762306a36Sopenharmony_ci	 * Position in the ring of the start of the postfix.
28862306a36Sopenharmony_ci	 * This is required to calculate the maximum available ring space
28962306a36Sopenharmony_ci	 * without overwriting the postfix.
29062306a36Sopenharmony_ci	 */
29162306a36Sopenharmony_ci	u32 postfix;
29262306a36Sopenharmony_ci
29362306a36Sopenharmony_ci	/* Position in the ring of the end of the whole request */
29462306a36Sopenharmony_ci	u32 tail;
29562306a36Sopenharmony_ci
29662306a36Sopenharmony_ci	/* Position in the ring of the end of any workarounds after the tail */
29762306a36Sopenharmony_ci	u32 wa_tail;
29862306a36Sopenharmony_ci
29962306a36Sopenharmony_ci	/* Preallocate space in the ring for the emitting the request */
30062306a36Sopenharmony_ci	u32 reserved_space;
30162306a36Sopenharmony_ci
30262306a36Sopenharmony_ci	/* Batch buffer pointer for selftest internal use. */
30362306a36Sopenharmony_ci	I915_SELFTEST_DECLARE(struct i915_vma *batch);
30462306a36Sopenharmony_ci
30562306a36Sopenharmony_ci	struct i915_vma_resource *batch_res;
30662306a36Sopenharmony_ci
30762306a36Sopenharmony_ci#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
30862306a36Sopenharmony_ci	/*
30962306a36Sopenharmony_ci	 * Additional buffers requested by userspace to be captured upon
31062306a36Sopenharmony_ci	 * a GPU hang. The vma/obj on this list are protected by their
31162306a36Sopenharmony_ci	 * active reference - all objects on this list must also be
31262306a36Sopenharmony_ci	 * on the active_list (of their final request).
31362306a36Sopenharmony_ci	 */
31462306a36Sopenharmony_ci	struct i915_capture_list *capture_list;
31562306a36Sopenharmony_ci#endif
31662306a36Sopenharmony_ci
31762306a36Sopenharmony_ci	/* Time at which this request was emitted, in jiffies. */
31862306a36Sopenharmony_ci	unsigned long emitted_jiffies;
31962306a36Sopenharmony_ci
32062306a36Sopenharmony_ci	/* timeline->request entry for this request */
32162306a36Sopenharmony_ci	struct list_head link;
32262306a36Sopenharmony_ci
32362306a36Sopenharmony_ci	/* Watchdog support fields. */
32462306a36Sopenharmony_ci	struct i915_request_watchdog {
32562306a36Sopenharmony_ci		struct llist_node link;
32662306a36Sopenharmony_ci		struct hrtimer timer;
32762306a36Sopenharmony_ci	} watchdog;
32862306a36Sopenharmony_ci
32962306a36Sopenharmony_ci	/*
33062306a36Sopenharmony_ci	 * Requests may need to be stalled when using GuC submission waiting for
33162306a36Sopenharmony_ci	 * certain GuC operations to complete. If that is the case, stalled
33262306a36Sopenharmony_ci	 * requests are added to a per context list of stalled requests. The
33362306a36Sopenharmony_ci	 * below list_head is the link in that list. Protected by
33462306a36Sopenharmony_ci	 * ce->guc_state.lock.
33562306a36Sopenharmony_ci	 */
33662306a36Sopenharmony_ci	struct list_head guc_fence_link;
33762306a36Sopenharmony_ci
33862306a36Sopenharmony_ci	/*
33962306a36Sopenharmony_ci	 * Priority level while the request is in flight. Differs
34062306a36Sopenharmony_ci	 * from i915 scheduler priority. See comment above
34162306a36Sopenharmony_ci	 * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. Protected by
34262306a36Sopenharmony_ci	 * ce->guc_active.lock. Two special values (GUC_PRIO_INIT and
34362306a36Sopenharmony_ci	 * GUC_PRIO_FINI) outside the GuC priority range are used to indicate
34462306a36Sopenharmony_ci	 * if the priority has not been initialized yet or if no more updates
34562306a36Sopenharmony_ci	 * are possible because the request has completed.
34662306a36Sopenharmony_ci	 */
34762306a36Sopenharmony_ci#define	GUC_PRIO_INIT	0xff
34862306a36Sopenharmony_ci#define	GUC_PRIO_FINI	0xfe
34962306a36Sopenharmony_ci	u8 guc_prio;
35062306a36Sopenharmony_ci
35162306a36Sopenharmony_ci	/*
35262306a36Sopenharmony_ci	 * wait queue entry used to wait on the HuC load to complete
35362306a36Sopenharmony_ci	 */
35462306a36Sopenharmony_ci	wait_queue_entry_t hucq;
35562306a36Sopenharmony_ci
35662306a36Sopenharmony_ci	I915_SELFTEST_DECLARE(struct {
35762306a36Sopenharmony_ci		struct list_head link;
35862306a36Sopenharmony_ci		unsigned long delay;
35962306a36Sopenharmony_ci	} mock;)
36062306a36Sopenharmony_ci};
36162306a36Sopenharmony_ci
36262306a36Sopenharmony_ci#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
36362306a36Sopenharmony_ci
36462306a36Sopenharmony_ciextern const struct dma_fence_ops i915_fence_ops;
36562306a36Sopenharmony_ci
36662306a36Sopenharmony_cistatic inline bool dma_fence_is_i915(const struct dma_fence *fence)
36762306a36Sopenharmony_ci{
36862306a36Sopenharmony_ci	return fence->ops == &i915_fence_ops;
36962306a36Sopenharmony_ci}
37062306a36Sopenharmony_ci
37162306a36Sopenharmony_cistruct kmem_cache *i915_request_slab_cache(void);
37262306a36Sopenharmony_ci
37362306a36Sopenharmony_cistruct i915_request * __must_check
37462306a36Sopenharmony_ci__i915_request_create(struct intel_context *ce, gfp_t gfp);
37562306a36Sopenharmony_cistruct i915_request * __must_check
37662306a36Sopenharmony_cii915_request_create(struct intel_context *ce);
37762306a36Sopenharmony_ci
37862306a36Sopenharmony_civoid __i915_request_skip(struct i915_request *rq);
37962306a36Sopenharmony_cibool i915_request_set_error_once(struct i915_request *rq, int error);
38062306a36Sopenharmony_cistruct i915_request *i915_request_mark_eio(struct i915_request *rq);
38162306a36Sopenharmony_ci
38262306a36Sopenharmony_cistruct i915_request *__i915_request_commit(struct i915_request *request);
38362306a36Sopenharmony_civoid __i915_request_queue(struct i915_request *rq,
38462306a36Sopenharmony_ci			  const struct i915_sched_attr *attr);
38562306a36Sopenharmony_civoid __i915_request_queue_bh(struct i915_request *rq);
38662306a36Sopenharmony_ci
38762306a36Sopenharmony_cibool i915_request_retire(struct i915_request *rq);
38862306a36Sopenharmony_civoid i915_request_retire_upto(struct i915_request *rq);
38962306a36Sopenharmony_ci
39062306a36Sopenharmony_cistatic inline struct i915_request *
39162306a36Sopenharmony_cito_request(struct dma_fence *fence)
39262306a36Sopenharmony_ci{
39362306a36Sopenharmony_ci	/* We assume that NULL fence/request are interoperable */
39462306a36Sopenharmony_ci	BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
39562306a36Sopenharmony_ci	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
39662306a36Sopenharmony_ci	return container_of(fence, struct i915_request, fence);
39762306a36Sopenharmony_ci}
39862306a36Sopenharmony_ci
39962306a36Sopenharmony_cistatic inline struct i915_request *
40062306a36Sopenharmony_cii915_request_get(struct i915_request *rq)
40162306a36Sopenharmony_ci{
40262306a36Sopenharmony_ci	return to_request(dma_fence_get(&rq->fence));
40362306a36Sopenharmony_ci}
40462306a36Sopenharmony_ci
40562306a36Sopenharmony_cistatic inline struct i915_request *
40662306a36Sopenharmony_cii915_request_get_rcu(struct i915_request *rq)
40762306a36Sopenharmony_ci{
40862306a36Sopenharmony_ci	return to_request(dma_fence_get_rcu(&rq->fence));
40962306a36Sopenharmony_ci}
41062306a36Sopenharmony_ci
41162306a36Sopenharmony_cistatic inline void
41262306a36Sopenharmony_cii915_request_put(struct i915_request *rq)
41362306a36Sopenharmony_ci{
41462306a36Sopenharmony_ci	dma_fence_put(&rq->fence);
41562306a36Sopenharmony_ci}
41662306a36Sopenharmony_ci
41762306a36Sopenharmony_ciint i915_request_await_object(struct i915_request *to,
41862306a36Sopenharmony_ci			      struct drm_i915_gem_object *obj,
41962306a36Sopenharmony_ci			      bool write);
42062306a36Sopenharmony_ciint i915_request_await_dma_fence(struct i915_request *rq,
42162306a36Sopenharmony_ci				 struct dma_fence *fence);
42262306a36Sopenharmony_ciint i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps);
42362306a36Sopenharmony_ciint i915_request_await_execution(struct i915_request *rq,
42462306a36Sopenharmony_ci				 struct dma_fence *fence);
42562306a36Sopenharmony_ci
42662306a36Sopenharmony_civoid i915_request_add(struct i915_request *rq);
42762306a36Sopenharmony_ci
42862306a36Sopenharmony_cibool __i915_request_submit(struct i915_request *request);
42962306a36Sopenharmony_civoid i915_request_submit(struct i915_request *request);
43062306a36Sopenharmony_ci
43162306a36Sopenharmony_civoid __i915_request_unsubmit(struct i915_request *request);
43262306a36Sopenharmony_civoid i915_request_unsubmit(struct i915_request *request);
43362306a36Sopenharmony_ci
43462306a36Sopenharmony_civoid i915_request_cancel(struct i915_request *rq, int error);
43562306a36Sopenharmony_ci
43662306a36Sopenharmony_cilong i915_request_wait_timeout(struct i915_request *rq,
43762306a36Sopenharmony_ci			       unsigned int flags,
43862306a36Sopenharmony_ci			       long timeout)
43962306a36Sopenharmony_ci	__attribute__((nonnull(1)));
44062306a36Sopenharmony_ci
44162306a36Sopenharmony_cilong i915_request_wait(struct i915_request *rq,
44262306a36Sopenharmony_ci		       unsigned int flags,
44362306a36Sopenharmony_ci		       long timeout)
44462306a36Sopenharmony_ci	__attribute__((nonnull(1)));
44562306a36Sopenharmony_ci#define I915_WAIT_INTERRUPTIBLE	BIT(0)
44662306a36Sopenharmony_ci#define I915_WAIT_PRIORITY	BIT(1) /* small priority bump for the request */
44762306a36Sopenharmony_ci#define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
44862306a36Sopenharmony_ci
44962306a36Sopenharmony_civoid i915_request_show(struct drm_printer *m,
45062306a36Sopenharmony_ci		       const struct i915_request *rq,
45162306a36Sopenharmony_ci		       const char *prefix,
45262306a36Sopenharmony_ci		       int indent);
45362306a36Sopenharmony_ci
45462306a36Sopenharmony_cistatic inline bool i915_request_signaled(const struct i915_request *rq)
45562306a36Sopenharmony_ci{
45662306a36Sopenharmony_ci	/* The request may live longer than its HWSP, so check flags first! */
45762306a36Sopenharmony_ci	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
45862306a36Sopenharmony_ci}
45962306a36Sopenharmony_ci
46062306a36Sopenharmony_cistatic inline bool i915_request_is_active(const struct i915_request *rq)
46162306a36Sopenharmony_ci{
46262306a36Sopenharmony_ci	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
46362306a36Sopenharmony_ci}
46462306a36Sopenharmony_ci
46562306a36Sopenharmony_cistatic inline bool i915_request_in_priority_queue(const struct i915_request *rq)
46662306a36Sopenharmony_ci{
46762306a36Sopenharmony_ci	return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
46862306a36Sopenharmony_ci}
46962306a36Sopenharmony_ci
47062306a36Sopenharmony_cistatic inline bool
47162306a36Sopenharmony_cii915_request_has_initial_breadcrumb(const struct i915_request *rq)
47262306a36Sopenharmony_ci{
47362306a36Sopenharmony_ci	return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
47462306a36Sopenharmony_ci}
47562306a36Sopenharmony_ci
47662306a36Sopenharmony_ci/*
47762306a36Sopenharmony_ci * Returns true if seq1 is later than seq2.
47862306a36Sopenharmony_ci */
47962306a36Sopenharmony_cistatic inline bool i915_seqno_passed(u32 seq1, u32 seq2)
48062306a36Sopenharmony_ci{
48162306a36Sopenharmony_ci	return (s32)(seq1 - seq2) >= 0;
48262306a36Sopenharmony_ci}
48362306a36Sopenharmony_ci
48462306a36Sopenharmony_cistatic inline u32 __hwsp_seqno(const struct i915_request *rq)
48562306a36Sopenharmony_ci{
48662306a36Sopenharmony_ci	const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
48762306a36Sopenharmony_ci
48862306a36Sopenharmony_ci	return READ_ONCE(*hwsp);
48962306a36Sopenharmony_ci}
49062306a36Sopenharmony_ci
49162306a36Sopenharmony_ci/**
49262306a36Sopenharmony_ci * hwsp_seqno - the current breadcrumb value in the HW status page
49362306a36Sopenharmony_ci * @rq: the request, to chase the relevant HW status page
49462306a36Sopenharmony_ci *
49562306a36Sopenharmony_ci * The emphasis in naming here is that hwsp_seqno() is not a property of the
49662306a36Sopenharmony_ci * request, but an indication of the current HW state (associated with this
49762306a36Sopenharmony_ci * request). Its value will change as the GPU executes more requests.
49862306a36Sopenharmony_ci *
49962306a36Sopenharmony_ci * Returns the current breadcrumb value in the associated HW status page (or
50062306a36Sopenharmony_ci * the local timeline's equivalent) for this request. The request itself
50162306a36Sopenharmony_ci * has the associated breadcrumb value of rq->fence.seqno, when the HW
50262306a36Sopenharmony_ci * status page has that breadcrumb or later, this request is complete.
50362306a36Sopenharmony_ci */
50462306a36Sopenharmony_cistatic inline u32 hwsp_seqno(const struct i915_request *rq)
50562306a36Sopenharmony_ci{
50662306a36Sopenharmony_ci	u32 seqno;
50762306a36Sopenharmony_ci
50862306a36Sopenharmony_ci	rcu_read_lock(); /* the HWSP may be freed at runtime */
50962306a36Sopenharmony_ci	seqno = __hwsp_seqno(rq);
51062306a36Sopenharmony_ci	rcu_read_unlock();
51162306a36Sopenharmony_ci
51262306a36Sopenharmony_ci	return seqno;
51362306a36Sopenharmony_ci}
51462306a36Sopenharmony_ci
51562306a36Sopenharmony_cistatic inline bool __i915_request_has_started(const struct i915_request *rq)
51662306a36Sopenharmony_ci{
51762306a36Sopenharmony_ci	return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
51862306a36Sopenharmony_ci}
51962306a36Sopenharmony_ci
52062306a36Sopenharmony_ci/**
52162306a36Sopenharmony_ci * i915_request_started - check if the request has begun being executed
52262306a36Sopenharmony_ci * @rq: the request
52362306a36Sopenharmony_ci *
52462306a36Sopenharmony_ci * If the timeline is not using initial breadcrumbs, a request is
52562306a36Sopenharmony_ci * considered started if the previous request on its timeline (i.e.
52662306a36Sopenharmony_ci * context) has been signaled.
52762306a36Sopenharmony_ci *
52862306a36Sopenharmony_ci * If the timeline is using semaphores, it will also be emitting an
52962306a36Sopenharmony_ci * "initial breadcrumb" after the semaphores are complete and just before
53062306a36Sopenharmony_ci * it began executing the user payload. A request can therefore be active
53162306a36Sopenharmony_ci * on the HW and not yet started as it is still busywaiting on its
53262306a36Sopenharmony_ci * dependencies (via HW semaphores).
53362306a36Sopenharmony_ci *
53462306a36Sopenharmony_ci * If the request has started, its dependencies will have been signaled
53562306a36Sopenharmony_ci * (either by fences or by semaphores) and it will have begun processing
53662306a36Sopenharmony_ci * the user payload.
53762306a36Sopenharmony_ci *
53862306a36Sopenharmony_ci * However, even if a request has started, it may have been preempted and
53962306a36Sopenharmony_ci * so no longer active, or it may have already completed.
54062306a36Sopenharmony_ci *
54162306a36Sopenharmony_ci * See also i915_request_is_active().
54262306a36Sopenharmony_ci *
54362306a36Sopenharmony_ci * Returns true if the request has begun executing the user payload, or
54462306a36Sopenharmony_ci * has completed:
54562306a36Sopenharmony_ci */
54662306a36Sopenharmony_cistatic inline bool i915_request_started(const struct i915_request *rq)
54762306a36Sopenharmony_ci{
54862306a36Sopenharmony_ci	bool result;
54962306a36Sopenharmony_ci
55062306a36Sopenharmony_ci	if (i915_request_signaled(rq))
55162306a36Sopenharmony_ci		return true;
55262306a36Sopenharmony_ci
55362306a36Sopenharmony_ci	result = true;
55462306a36Sopenharmony_ci	rcu_read_lock(); /* the HWSP may be freed at runtime */
55562306a36Sopenharmony_ci	if (likely(!i915_request_signaled(rq)))
55662306a36Sopenharmony_ci		/* Remember: started but may have since been preempted! */
55762306a36Sopenharmony_ci		result = __i915_request_has_started(rq);
55862306a36Sopenharmony_ci	rcu_read_unlock();
55962306a36Sopenharmony_ci
56062306a36Sopenharmony_ci	return result;
56162306a36Sopenharmony_ci}
56262306a36Sopenharmony_ci
56362306a36Sopenharmony_ci/**
56462306a36Sopenharmony_ci * i915_request_is_running - check if the request may actually be executing
56562306a36Sopenharmony_ci * @rq: the request
56662306a36Sopenharmony_ci *
56762306a36Sopenharmony_ci * Returns true if the request is currently submitted to hardware, has passed
56862306a36Sopenharmony_ci * its start point (i.e. the context is setup and not busywaiting). Note that
56962306a36Sopenharmony_ci * it may no longer be running by the time the function returns!
57062306a36Sopenharmony_ci */
57162306a36Sopenharmony_cistatic inline bool i915_request_is_running(const struct i915_request *rq)
57262306a36Sopenharmony_ci{
57362306a36Sopenharmony_ci	bool result;
57462306a36Sopenharmony_ci
57562306a36Sopenharmony_ci	if (!i915_request_is_active(rq))
57662306a36Sopenharmony_ci		return false;
57762306a36Sopenharmony_ci
57862306a36Sopenharmony_ci	rcu_read_lock();
57962306a36Sopenharmony_ci	result = __i915_request_has_started(rq) && i915_request_is_active(rq);
58062306a36Sopenharmony_ci	rcu_read_unlock();
58162306a36Sopenharmony_ci
58262306a36Sopenharmony_ci	return result;
58362306a36Sopenharmony_ci}
58462306a36Sopenharmony_ci
58562306a36Sopenharmony_ci/**
58662306a36Sopenharmony_ci * i915_request_is_ready - check if the request is ready for execution
58762306a36Sopenharmony_ci * @rq: the request
58862306a36Sopenharmony_ci *
58962306a36Sopenharmony_ci * Upon construction, the request is instructed to wait upon various
59062306a36Sopenharmony_ci * signals before it is ready to be executed by the HW. That is, we do
59162306a36Sopenharmony_ci * not want to start execution and read data before it is written. In practice,
59262306a36Sopenharmony_ci * this is controlled with a mixture of interrupts and semaphores. Once
59362306a36Sopenharmony_ci * the submit fence is completed, the backend scheduler will place the
59462306a36Sopenharmony_ci * request into its queue and from there submit it for execution. So we
59562306a36Sopenharmony_ci * can detect when a request is eligible for execution (and is under control
59662306a36Sopenharmony_ci * of the scheduler) by querying where it is in any of the scheduler's lists.
59762306a36Sopenharmony_ci *
59862306a36Sopenharmony_ci * Returns true if the request is ready for execution (it may be inflight),
59962306a36Sopenharmony_ci * false otherwise.
60062306a36Sopenharmony_ci */
60162306a36Sopenharmony_cistatic inline bool i915_request_is_ready(const struct i915_request *rq)
60262306a36Sopenharmony_ci{
60362306a36Sopenharmony_ci	return !list_empty(&rq->sched.link);
60462306a36Sopenharmony_ci}
60562306a36Sopenharmony_ci
60662306a36Sopenharmony_cistatic inline bool __i915_request_is_complete(const struct i915_request *rq)
60762306a36Sopenharmony_ci{
60862306a36Sopenharmony_ci	return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
60962306a36Sopenharmony_ci}
61062306a36Sopenharmony_ci
61162306a36Sopenharmony_cistatic inline bool i915_request_completed(const struct i915_request *rq)
61262306a36Sopenharmony_ci{
61362306a36Sopenharmony_ci	bool result;
61462306a36Sopenharmony_ci
61562306a36Sopenharmony_ci	if (i915_request_signaled(rq))
61662306a36Sopenharmony_ci		return true;
61762306a36Sopenharmony_ci
61862306a36Sopenharmony_ci	result = true;
61962306a36Sopenharmony_ci	rcu_read_lock(); /* the HWSP may be freed at runtime */
62062306a36Sopenharmony_ci	if (likely(!i915_request_signaled(rq)))
62162306a36Sopenharmony_ci		result = __i915_request_is_complete(rq);
62262306a36Sopenharmony_ci	rcu_read_unlock();
62362306a36Sopenharmony_ci
62462306a36Sopenharmony_ci	return result;
62562306a36Sopenharmony_ci}
62662306a36Sopenharmony_ci
62762306a36Sopenharmony_cistatic inline void i915_request_mark_complete(struct i915_request *rq)
62862306a36Sopenharmony_ci{
62962306a36Sopenharmony_ci	WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
63062306a36Sopenharmony_ci		   (u32 *)&rq->fence.seqno);
63162306a36Sopenharmony_ci}
63262306a36Sopenharmony_ci
63362306a36Sopenharmony_cistatic inline bool i915_request_has_waitboost(const struct i915_request *rq)
63462306a36Sopenharmony_ci{
63562306a36Sopenharmony_ci	return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
63662306a36Sopenharmony_ci}
63762306a36Sopenharmony_ci
63862306a36Sopenharmony_cistatic inline bool i915_request_has_nopreempt(const struct i915_request *rq)
63962306a36Sopenharmony_ci{
64062306a36Sopenharmony_ci	/* Preemption should only be disabled very rarely */
64162306a36Sopenharmony_ci	return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
64262306a36Sopenharmony_ci}
64362306a36Sopenharmony_ci
64462306a36Sopenharmony_cistatic inline bool i915_request_has_sentinel(const struct i915_request *rq)
64562306a36Sopenharmony_ci{
64662306a36Sopenharmony_ci	return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
64762306a36Sopenharmony_ci}
64862306a36Sopenharmony_ci
64962306a36Sopenharmony_cistatic inline bool i915_request_on_hold(const struct i915_request *rq)
65062306a36Sopenharmony_ci{
65162306a36Sopenharmony_ci	return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
65262306a36Sopenharmony_ci}
65362306a36Sopenharmony_ci
65462306a36Sopenharmony_cistatic inline void i915_request_set_hold(struct i915_request *rq)
65562306a36Sopenharmony_ci{
65662306a36Sopenharmony_ci	set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
65762306a36Sopenharmony_ci}
65862306a36Sopenharmony_ci
65962306a36Sopenharmony_cistatic inline void i915_request_clear_hold(struct i915_request *rq)
66062306a36Sopenharmony_ci{
66162306a36Sopenharmony_ci	clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
66262306a36Sopenharmony_ci}
66362306a36Sopenharmony_ci
66462306a36Sopenharmony_cistatic inline struct intel_timeline *
66562306a36Sopenharmony_cii915_request_timeline(const struct i915_request *rq)
66662306a36Sopenharmony_ci{
66762306a36Sopenharmony_ci	/* Valid only while the request is being constructed (or retired). */
66862306a36Sopenharmony_ci	return rcu_dereference_protected(rq->timeline,
66962306a36Sopenharmony_ci					 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) ||
67062306a36Sopenharmony_ci					 test_bit(CONTEXT_IS_PARKING, &rq->context->flags));
67162306a36Sopenharmony_ci}
67262306a36Sopenharmony_ci
67362306a36Sopenharmony_cistatic inline struct i915_gem_context *
67462306a36Sopenharmony_cii915_request_gem_context(const struct i915_request *rq)
67562306a36Sopenharmony_ci{
67662306a36Sopenharmony_ci	/* Valid only while the request is being constructed (or retired). */
67762306a36Sopenharmony_ci	return rcu_dereference_protected(rq->context->gem_context, true);
67862306a36Sopenharmony_ci}
67962306a36Sopenharmony_ci
68062306a36Sopenharmony_cistatic inline struct intel_timeline *
68162306a36Sopenharmony_cii915_request_active_timeline(const struct i915_request *rq)
68262306a36Sopenharmony_ci{
68362306a36Sopenharmony_ci	/*
68462306a36Sopenharmony_ci	 * When in use during submission, we are protected by a guarantee that
68562306a36Sopenharmony_ci	 * the context/timeline is pinned and must remain pinned until after
68662306a36Sopenharmony_ci	 * this submission.
68762306a36Sopenharmony_ci	 */
68862306a36Sopenharmony_ci	return rcu_dereference_protected(rq->timeline,
68962306a36Sopenharmony_ci					 lockdep_is_held(&rq->engine->sched_engine->lock));
69062306a36Sopenharmony_ci}
69162306a36Sopenharmony_ci
69262306a36Sopenharmony_cistatic inline u32
69362306a36Sopenharmony_cii915_request_active_seqno(const struct i915_request *rq)
69462306a36Sopenharmony_ci{
69562306a36Sopenharmony_ci	u32 hwsp_phys_base =
69662306a36Sopenharmony_ci		page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset);
69762306a36Sopenharmony_ci	u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno);
69862306a36Sopenharmony_ci
69962306a36Sopenharmony_ci	/*
70062306a36Sopenharmony_ci	 * Because of wraparound, we cannot simply take tl->hwsp_offset,
70162306a36Sopenharmony_ci	 * but instead use the fact that the relative for vaddr is the
70262306a36Sopenharmony_ci	 * offset as for hwsp_offset. Take the top bits from tl->hwsp_offset
70362306a36Sopenharmony_ci	 * and combine them with the relative offset in rq->hwsp_seqno.
70462306a36Sopenharmony_ci	 *
70562306a36Sopenharmony_ci	 * As rw->hwsp_seqno is rewritten when signaled, this only works
70662306a36Sopenharmony_ci	 * when the request isn't signaled yet, but at that point you
70762306a36Sopenharmony_ci	 * no longer need the offset.
70862306a36Sopenharmony_ci	 */
70962306a36Sopenharmony_ci
71062306a36Sopenharmony_ci	return hwsp_phys_base + hwsp_relative_offset;
71162306a36Sopenharmony_ci}
71262306a36Sopenharmony_ci
71362306a36Sopenharmony_cibool
71462306a36Sopenharmony_cii915_request_active_engine(struct i915_request *rq,
71562306a36Sopenharmony_ci			   struct intel_engine_cs **active);
71662306a36Sopenharmony_ci
71762306a36Sopenharmony_civoid i915_request_notify_execute_cb_imm(struct i915_request *rq);
71862306a36Sopenharmony_ci
71962306a36Sopenharmony_cienum i915_request_state {
72062306a36Sopenharmony_ci	I915_REQUEST_UNKNOWN = 0,
72162306a36Sopenharmony_ci	I915_REQUEST_COMPLETE,
72262306a36Sopenharmony_ci	I915_REQUEST_PENDING,
72362306a36Sopenharmony_ci	I915_REQUEST_QUEUED,
72462306a36Sopenharmony_ci	I915_REQUEST_ACTIVE,
72562306a36Sopenharmony_ci};
72662306a36Sopenharmony_ci
72762306a36Sopenharmony_cienum i915_request_state i915_test_request_state(struct i915_request *rq);
72862306a36Sopenharmony_ci
72962306a36Sopenharmony_civoid i915_request_module_exit(void);
73062306a36Sopenharmony_ciint i915_request_module_init(void);
73162306a36Sopenharmony_ci
73262306a36Sopenharmony_ci#endif /* I915_REQUEST_H */
733