1/*
2 * Copyright © 2008-2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#ifndef I915_REQUEST_H
26#define I915_REQUEST_H
27
28#include <linux/dma-fence.h>
29#include <linux/irq_work.h>
30#include <linux/lockdep.h>
31
32#include "gem/i915_gem_context_types.h"
33#include "gt/intel_context_types.h"
34#include "gt/intel_engine_types.h"
35#include "gt/intel_timeline_types.h"
36
37#include "i915_gem.h"
38#include "i915_scheduler.h"
39#include "i915_selftest.h"
40#include "i915_sw_fence.h"
41
42#include <uapi/drm/i915_drm.h>
43
44struct drm_file;
45struct drm_i915_gem_object;
46struct i915_request;
47
48struct i915_capture_list {
49	struct i915_capture_list *next;
50	struct i915_vma *vma;
51};
52
53#define RQ_TRACE(rq, fmt, ...) do {					\
54	const struct i915_request *rq__ = (rq);				\
55	ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt,	\
56		     rq__->fence.context, rq__->fence.seqno,		\
57		     hwsp_seqno(rq__), ##__VA_ARGS__);			\
58} while (0)
59
60enum {
61	/*
62	 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
63	 *
64	 * Set by __i915_request_submit() on handing over to HW, and cleared
65	 * by __i915_request_unsubmit() if we preempt this request.
66	 *
67	 * Finally cleared for consistency on retiring the request, when
68	 * we know the HW is no longer running this request.
69	 *
70	 * See i915_request_is_active()
71	 */
72	I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
73
74	/*
75	 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
76	 *
77	 * Using the scheduler, when a request is ready for execution it is put
78	 * into the priority queue, and removed from that queue when transferred
79	 * to the HW runlists. We want to track its membership within the
80	 * priority queue so that we can easily check before rescheduling.
81	 *
82	 * See i915_request_in_priority_queue()
83	 */
84	I915_FENCE_FLAG_PQUEUE,
85
86	/*
87	 * I915_FENCE_FLAG_HOLD - this request is currently on hold
88	 *
89	 * This request has been suspended, pending an ongoing investigation.
90	 */
91	I915_FENCE_FLAG_HOLD,
92
93	/*
94	 * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
95	 * breadcrumb that marks the end of semaphore waits and start of the
96	 * user payload.
97	 */
98	I915_FENCE_FLAG_INITIAL_BREADCRUMB,
99
100	/*
101	 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
102	 *
103	 * Internal bookkeeping used by the breadcrumb code to track when
104	 * a request is on the various signal_list.
105	 */
106	I915_FENCE_FLAG_SIGNAL,
107
108	/*
109	 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
110	 *
111	 * The execution of some requests should not be interrupted. This is
112	 * a sensitive operation as it makes the request super important,
113	 * blocking other higher priority work. Abuse of this flag will
114	 * lead to quality of service issues.
115	 */
116	I915_FENCE_FLAG_NOPREEMPT,
117
118	/*
119	 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
120	 *
121	 * A high priority sentinel request may be submitted to clear the
122	 * submission queue. As it will be the only request in-flight, upon
123	 * execution all other active requests will have been preempted and
124	 * unsubmitted. This preemptive pulse is used to re-evaluate the
125	 * in-flight requests, particularly in cases where an active context
126	 * is banned and those active requests need to be cancelled.
127	 */
128	I915_FENCE_FLAG_SENTINEL,
129
130	/*
131	 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
132	 *
133	 * Some requests are more important than others! In particular, a
134	 * request that the user is waiting on is typically required for
135	 * interactive latency, for which we want to minimise by upclocking
136	 * the GPU. Here we track such boost requests on a per-request basis.
137	 */
138	I915_FENCE_FLAG_BOOST,
139};
140
141/**
142 * Request queue structure.
143 *
144 * The request queue allows us to note sequence numbers that have been emitted
145 * and may be associated with active buffers to be retired.
146 *
147 * By keeping this list, we can avoid having to do questionable sequence
148 * number comparisons on buffer last_read|write_seqno. It also allows an
149 * emission time to be associated with the request for tracking how far ahead
150 * of the GPU the submission is.
151 *
152 * When modifying this structure be very aware that we perform a lockless
153 * RCU lookup of it that may race against reallocation of the struct
154 * from the slab freelist. We intentionally do not zero the structure on
155 * allocation so that the lookup can use the dangling pointers (and is
156 * cogniscent that those pointers may be wrong). Instead, everything that
157 * needs to be initialised must be done so explicitly.
158 *
159 * The requests are reference counted.
160 */
161struct i915_request {
162	struct dma_fence fence;
163	spinlock_t lock;
164
165	/**
166	 * Context and ring buffer related to this request
167	 * Contexts are refcounted, so when this request is associated with a
168	 * context, we must increment the context's refcount, to guarantee that
169	 * it persists while any request is linked to it. Requests themselves
170	 * are also refcounted, so the request will only be freed when the last
171	 * reference to it is dismissed, and the code in
172	 * i915_request_free() will then decrement the refcount on the
173	 * context.
174	 */
175	struct intel_engine_cs *engine;
176	struct intel_context *context;
177	struct intel_ring *ring;
178	struct intel_timeline __rcu *timeline;
179
180	struct list_head signal_link;
181	struct llist_node signal_node;
182
183	/*
184	 * The rcu epoch of when this request was allocated. Used to judiciously
185	 * apply backpressure on future allocations to ensure that under
186	 * mempressure there is sufficient RCU ticks for us to reclaim our
187	 * RCU protected slabs.
188	 */
189	unsigned long rcustate;
190
191	/*
192	 * We pin the timeline->mutex while constructing the request to
193	 * ensure that no caller accidentally drops it during construction.
194	 * The timeline->mutex must be held to ensure that only this caller
195	 * can use the ring and manipulate the associated timeline during
196	 * construction.
197	 */
198	struct pin_cookie cookie;
199
200	/*
201	 * Fences for the various phases in the request's lifetime.
202	 *
203	 * The submit fence is used to await upon all of the request's
204	 * dependencies. When it is signaled, the request is ready to run.
205	 * It is used by the driver to then queue the request for execution.
206	 */
207	struct i915_sw_fence submit;
208	union {
209		wait_queue_entry_t submitq;
210		struct i915_sw_dma_fence_cb dmaq;
211		struct i915_request_duration_cb {
212			struct dma_fence_cb cb;
213			ktime_t emitted;
214		} duration;
215	};
216	struct llist_head execute_cb;
217	struct i915_sw_fence semaphore;
218
219	/*
220	 * A list of everyone we wait upon, and everyone who waits upon us.
221	 * Even though we will not be submitted to the hardware before the
222	 * submit fence is signaled (it waits for all external events as well
223	 * as our own requests), the scheduler still needs to know the
224	 * dependency tree for the lifetime of the request (from execbuf
225	 * to retirement), i.e. bidirectional dependency information for the
226	 * request not tied to individual fences.
227	 */
228	struct i915_sched_node sched;
229	struct i915_dependency dep;
230	intel_engine_mask_t execution_mask;
231
232	/*
233	 * A convenience pointer to the current breadcrumb value stored in
234	 * the HW status page (or our timeline's local equivalent). The full
235	 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
236	 */
237	const u32 *hwsp_seqno;
238
239	/*
240	 * If we need to access the timeline's seqno for this request in
241	 * another request, we need to keep a read reference to this associated
242	 * cacheline, so that we do not free and recycle it before the foreign
243	 * observers have completed. Hence, we keep a pointer to the cacheline
244	 * inside the timeline's HWSP vma, but it is only valid while this
245	 * request has not completed and guarded by the timeline mutex.
246	 */
247	struct intel_timeline_cacheline __rcu *hwsp_cacheline;
248
249	/** Position in the ring of the start of the request */
250	u32 head;
251
252	/** Position in the ring of the start of the user packets */
253	u32 infix;
254
255	/**
256	 * Position in the ring of the start of the postfix.
257	 * This is required to calculate the maximum available ring space
258	 * without overwriting the postfix.
259	 */
260	u32 postfix;
261
262	/** Position in the ring of the end of the whole request */
263	u32 tail;
264
265	/** Position in the ring of the end of any workarounds after the tail */
266	u32 wa_tail;
267
268	/** Preallocate space in the ring for the emitting the request */
269	u32 reserved_space;
270
271	/** Batch buffer related to this request if any (used for
272	 * error state dump only).
273	 */
274	struct i915_vma *batch;
275	/**
276	 * Additional buffers requested by userspace to be captured upon
277	 * a GPU hang. The vma/obj on this list are protected by their
278	 * active reference - all objects on this list must also be
279	 * on the active_list (of their final request).
280	 */
281	struct i915_capture_list *capture_list;
282
283	/** Time at which this request was emitted, in jiffies. */
284	unsigned long emitted_jiffies;
285
286	/** timeline->request entry for this request */
287	struct list_head link;
288
289	I915_SELFTEST_DECLARE(struct {
290		struct list_head link;
291		unsigned long delay;
292	} mock;)
293};
294
295#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
296
297extern const struct dma_fence_ops i915_fence_ops;
298
299static inline bool dma_fence_is_i915(const struct dma_fence *fence)
300{
301	return fence->ops == &i915_fence_ops;
302}
303
304struct kmem_cache *i915_request_slab_cache(void);
305
306struct i915_request * __must_check
307__i915_request_create(struct intel_context *ce, gfp_t gfp);
308struct i915_request * __must_check
309i915_request_create(struct intel_context *ce);
310
311void i915_request_set_error_once(struct i915_request *rq, int error);
312void __i915_request_skip(struct i915_request *rq);
313
314struct i915_request *__i915_request_commit(struct i915_request *request);
315void __i915_request_queue(struct i915_request *rq,
316			  const struct i915_sched_attr *attr);
317
318bool i915_request_retire(struct i915_request *rq);
319void i915_request_retire_upto(struct i915_request *rq);
320
321static inline struct i915_request *
322to_request(struct dma_fence *fence)
323{
324	/* We assume that NULL fence/request are interoperable */
325	BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
326	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
327	return container_of(fence, struct i915_request, fence);
328}
329
330static inline struct i915_request *
331i915_request_get(struct i915_request *rq)
332{
333	return to_request(dma_fence_get(&rq->fence));
334}
335
336static inline struct i915_request *
337i915_request_get_rcu(struct i915_request *rq)
338{
339	return to_request(dma_fence_get_rcu(&rq->fence));
340}
341
342static inline void
343i915_request_put(struct i915_request *rq)
344{
345	dma_fence_put(&rq->fence);
346}
347
348int i915_request_await_object(struct i915_request *to,
349			      struct drm_i915_gem_object *obj,
350			      bool write);
351int i915_request_await_dma_fence(struct i915_request *rq,
352				 struct dma_fence *fence);
353int i915_request_await_execution(struct i915_request *rq,
354				 struct dma_fence *fence,
355				 void (*hook)(struct i915_request *rq,
356					      struct dma_fence *signal));
357
358void i915_request_add(struct i915_request *rq);
359
360bool __i915_request_submit(struct i915_request *request);
361void i915_request_submit(struct i915_request *request);
362
363void __i915_request_unsubmit(struct i915_request *request);
364void i915_request_unsubmit(struct i915_request *request);
365
366long i915_request_wait(struct i915_request *rq,
367		       unsigned int flags,
368		       long timeout)
369	__attribute__((nonnull(1)));
370#define I915_WAIT_INTERRUPTIBLE	BIT(0)
371#define I915_WAIT_PRIORITY	BIT(1) /* small priority bump for the request */
372#define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
373
374static inline bool i915_request_signaled(const struct i915_request *rq)
375{
376	/* The request may live longer than its HWSP, so check flags first! */
377	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
378}
379
380static inline bool i915_request_is_active(const struct i915_request *rq)
381{
382	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
383}
384
385static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
386{
387	return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
388}
389
390static inline bool
391i915_request_has_initial_breadcrumb(const struct i915_request *rq)
392{
393	return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
394}
395
396/**
397 * Returns true if seq1 is later than seq2.
398 */
399static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
400{
401	return (s32)(seq1 - seq2) >= 0;
402}
403
404static inline u32 __hwsp_seqno(const struct i915_request *rq)
405{
406	const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
407
408	return READ_ONCE(*hwsp);
409}
410
411/**
412 * hwsp_seqno - the current breadcrumb value in the HW status page
413 * @rq: the request, to chase the relevant HW status page
414 *
415 * The emphasis in naming here is that hwsp_seqno() is not a property of the
416 * request, but an indication of the current HW state (associated with this
417 * request). Its value will change as the GPU executes more requests.
418 *
419 * Returns the current breadcrumb value in the associated HW status page (or
420 * the local timeline's equivalent) for this request. The request itself
421 * has the associated breadcrumb value of rq->fence.seqno, when the HW
422 * status page has that breadcrumb or later, this request is complete.
423 */
424static inline u32 hwsp_seqno(const struct i915_request *rq)
425{
426	u32 seqno;
427
428	rcu_read_lock(); /* the HWSP may be freed at runtime */
429	seqno = __hwsp_seqno(rq);
430	rcu_read_unlock();
431
432	return seqno;
433}
434
435static inline bool __i915_request_has_started(const struct i915_request *rq)
436{
437	return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
438}
439
440/**
441 * i915_request_started - check if the request has begun being executed
442 * @rq: the request
443 *
444 * If the timeline is not using initial breadcrumbs, a request is
445 * considered started if the previous request on its timeline (i.e.
446 * context) has been signaled.
447 *
448 * If the timeline is using semaphores, it will also be emitting an
449 * "initial breadcrumb" after the semaphores are complete and just before
450 * it began executing the user payload. A request can therefore be active
451 * on the HW and not yet started as it is still busywaiting on its
452 * dependencies (via HW semaphores).
453 *
454 * If the request has started, its dependencies will have been signaled
455 * (either by fences or by semaphores) and it will have begun processing
456 * the user payload.
457 *
458 * However, even if a request has started, it may have been preempted and
459 * so no longer active, or it may have already completed.
460 *
461 * See also i915_request_is_active().
462 *
463 * Returns true if the request has begun executing the user payload, or
464 * has completed:
465 */
466static inline bool i915_request_started(const struct i915_request *rq)
467{
468	bool result;
469
470	if (i915_request_signaled(rq))
471		return true;
472
473	result = true;
474	rcu_read_lock(); /* the HWSP may be freed at runtime */
475	if (likely(!i915_request_signaled(rq)))
476		/* Remember: started but may have since been preempted! */
477		result = __i915_request_has_started(rq);
478	rcu_read_unlock();
479
480	return result;
481}
482
483/**
484 * i915_request_is_running - check if the request may actually be executing
485 * @rq: the request
486 *
487 * Returns true if the request is currently submitted to hardware, has passed
488 * its start point (i.e. the context is setup and not busywaiting). Note that
489 * it may no longer be running by the time the function returns!
490 */
491static inline bool i915_request_is_running(const struct i915_request *rq)
492{
493	bool result;
494
495	if (!i915_request_is_active(rq))
496		return false;
497
498	rcu_read_lock();
499	result = __i915_request_has_started(rq) && i915_request_is_active(rq);
500	rcu_read_unlock();
501
502	return result;
503}
504
505/**
506 * i915_request_is_ready - check if the request is ready for execution
507 * @rq: the request
508 *
509 * Upon construction, the request is instructed to wait upon various
510 * signals before it is ready to be executed by the HW. That is, we do
511 * not want to start execution and read data before it is written. In practice,
512 * this is controlled with a mixture of interrupts and semaphores. Once
513 * the submit fence is completed, the backend scheduler will place the
514 * request into its queue and from there submit it for execution. So we
515 * can detect when a request is eligible for execution (and is under control
516 * of the scheduler) by querying where it is in any of the scheduler's lists.
517 *
518 * Returns true if the request is ready for execution (it may be inflight),
519 * false otherwise.
520 */
521static inline bool i915_request_is_ready(const struct i915_request *rq)
522{
523	return !list_empty(&rq->sched.link);
524}
525
526static inline bool __i915_request_is_complete(const struct i915_request *rq)
527{
528	return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
529}
530
531static inline bool i915_request_completed(const struct i915_request *rq)
532{
533	bool result;
534
535	if (i915_request_signaled(rq))
536		return true;
537
538	result = true;
539	rcu_read_lock(); /* the HWSP may be freed at runtime */
540	if (likely(!i915_request_signaled(rq)))
541		result = __i915_request_is_complete(rq);
542	rcu_read_unlock();
543
544	return result;
545}
546
547static inline void i915_request_mark_complete(struct i915_request *rq)
548{
549	WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
550		   (u32 *)&rq->fence.seqno);
551}
552
553static inline bool i915_request_has_waitboost(const struct i915_request *rq)
554{
555	return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
556}
557
558static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
559{
560	/* Preemption should only be disabled very rarely */
561	return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
562}
563
564static inline bool i915_request_has_sentinel(const struct i915_request *rq)
565{
566	return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
567}
568
569static inline bool i915_request_on_hold(const struct i915_request *rq)
570{
571	return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
572}
573
574static inline void i915_request_set_hold(struct i915_request *rq)
575{
576	set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
577}
578
579static inline void i915_request_clear_hold(struct i915_request *rq)
580{
581	clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
582}
583
584static inline struct intel_timeline *
585i915_request_timeline(const struct i915_request *rq)
586{
587	/* Valid only while the request is being constructed (or retired). */
588	return rcu_dereference_protected(rq->timeline,
589					 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
590}
591
592static inline struct i915_gem_context *
593i915_request_gem_context(const struct i915_request *rq)
594{
595	/* Valid only while the request is being constructed (or retired). */
596	return rcu_dereference_protected(rq->context->gem_context, true);
597}
598
599static inline struct intel_timeline *
600i915_request_active_timeline(const struct i915_request *rq)
601{
602	/*
603	 * When in use during submission, we are protected by a guarantee that
604	 * the context/timeline is pinned and must remain pinned until after
605	 * this submission.
606	 */
607	return rcu_dereference_protected(rq->timeline,
608					 lockdep_is_held(&rq->engine->active.lock));
609}
610
611#endif /* I915_REQUEST_H */
612