1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
7#include "gem/i915_gem_context.h"
8#include "gem/i915_gem_pm.h"
9
10#include "i915_drv.h"
11#include "i915_globals.h"
12
13#include "intel_context.h"
14#include "intel_engine.h"
15#include "intel_engine_pm.h"
16#include "intel_ring.h"
17
18static struct i915_global_context {
19	struct i915_global base;
20	struct kmem_cache *slab_ce;
21} global;
22
23static struct intel_context *intel_context_alloc(void)
24{
25	return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
26}
27
28static void rcu_context_free(struct rcu_head *rcu)
29{
30	struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
31
32	kmem_cache_free(global.slab_ce, ce);
33}
34
35void intel_context_free(struct intel_context *ce)
36{
37	call_rcu(&ce->rcu, rcu_context_free);
38}
39
40struct intel_context *
41intel_context_create(struct intel_engine_cs *engine)
42{
43	struct intel_context *ce;
44
45	ce = intel_context_alloc();
46	if (!ce)
47		return ERR_PTR(-ENOMEM);
48
49	intel_context_init(ce, engine);
50	return ce;
51}
52
53int intel_context_alloc_state(struct intel_context *ce)
54{
55	int err = 0;
56
57	if (mutex_lock_interruptible(&ce->pin_mutex))
58		return -EINTR;
59
60	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
61		if (intel_context_is_banned(ce)) {
62			err = -EIO;
63			goto unlock;
64		}
65
66		err = ce->ops->alloc(ce);
67		if (unlikely(err))
68			goto unlock;
69
70		set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
71	}
72
73unlock:
74	mutex_unlock(&ce->pin_mutex);
75	return err;
76}
77
78static int intel_context_active_acquire(struct intel_context *ce)
79{
80	int err;
81
82	__i915_active_acquire(&ce->active);
83
84	if (intel_context_is_barrier(ce))
85		return 0;
86
87	/* Preallocate tracking nodes */
88	err = i915_active_acquire_preallocate_barrier(&ce->active,
89						      ce->engine);
90	if (err)
91		i915_active_release(&ce->active);
92
93	return err;
94}
95
96static void intel_context_active_release(struct intel_context *ce)
97{
98	/* Nodes preallocated in intel_context_active() */
99	i915_active_acquire_barrier(&ce->active);
100	i915_active_release(&ce->active);
101}
102
103static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
104{
105	unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
106	int err;
107
108	err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH);
109	if (err)
110		return err;
111
112	err = i915_active_acquire(&vma->active);
113	if (err)
114		goto err_unpin;
115
116	/*
117	 * And mark it as a globally pinned object to let the shrinker know
118	 * it cannot reclaim the object until we release it.
119	 */
120	i915_vma_make_unshrinkable(vma);
121	vma->obj->mm.dirty = true;
122
123	return 0;
124
125err_unpin:
126	i915_vma_unpin(vma);
127	return err;
128}
129
130static void __context_unpin_state(struct i915_vma *vma)
131{
132	i915_vma_make_shrinkable(vma);
133	i915_active_release(&vma->active);
134	__i915_vma_unpin(vma);
135}
136
137static int __ring_active(struct intel_ring *ring,
138			 struct i915_gem_ww_ctx *ww)
139{
140	int err;
141
142	err = intel_ring_pin(ring, ww);
143	if (err)
144		return err;
145
146	err = i915_active_acquire(&ring->vma->active);
147	if (err)
148		goto err_pin;
149
150	return 0;
151
152err_pin:
153	intel_ring_unpin(ring);
154	return err;
155}
156
157static void __ring_retire(struct intel_ring *ring)
158{
159	i915_active_release(&ring->vma->active);
160	intel_ring_unpin(ring);
161}
162
163static int intel_context_pre_pin(struct intel_context *ce,
164				 struct i915_gem_ww_ctx *ww)
165{
166	int err;
167
168	CE_TRACE(ce, "active\n");
169
170	err = __ring_active(ce->ring, ww);
171	if (err)
172		return err;
173
174	err = intel_timeline_pin(ce->timeline, ww);
175	if (err)
176		goto err_ring;
177
178	if (!ce->state)
179		return 0;
180
181	err = __context_pin_state(ce->state, ww);
182	if (err)
183		goto err_timeline;
184
185
186	return 0;
187
188err_timeline:
189	intel_timeline_unpin(ce->timeline);
190err_ring:
191	__ring_retire(ce->ring);
192	return err;
193}
194
195static void intel_context_post_unpin(struct intel_context *ce)
196{
197	if (ce->state)
198		__context_unpin_state(ce->state);
199
200	intel_timeline_unpin(ce->timeline);
201	__ring_retire(ce->ring);
202}
203
204int __intel_context_do_pin_ww(struct intel_context *ce,
205			      struct i915_gem_ww_ctx *ww)
206{
207	bool handoff = false;
208	void *vaddr;
209	int err = 0;
210
211	if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
212		err = intel_context_alloc_state(ce);
213		if (err)
214			return err;
215	}
216
217	/*
218	 * We always pin the context/ring/timeline here, to ensure a pin
219	 * refcount for __intel_context_active(), which prevent a lock
220	 * inversion of ce->pin_mutex vs dma_resv_lock().
221	 */
222
223	err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
224	if (!err && ce->ring->vma->obj)
225		err = i915_gem_object_lock(ce->ring->vma->obj, ww);
226	if (!err && ce->state)
227		err = i915_gem_object_lock(ce->state->obj, ww);
228	if (!err)
229		err = intel_context_pre_pin(ce, ww);
230	if (err)
231		return err;
232
233	err = i915_active_acquire(&ce->active);
234	if (err)
235		goto err_ctx_unpin;
236
237	err = ce->ops->pre_pin(ce, ww, &vaddr);
238	if (err)
239		goto err_release;
240
241	err = mutex_lock_interruptible(&ce->pin_mutex);
242	if (err)
243		goto err_post_unpin;
244
245	if (unlikely(intel_context_is_closed(ce))) {
246		err = -ENOENT;
247		goto err_unlock;
248	}
249
250	if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
251		err = intel_context_active_acquire(ce);
252		if (unlikely(err))
253			goto err_unlock;
254
255		err = ce->ops->pin(ce, vaddr);
256		if (err) {
257			intel_context_active_release(ce);
258			goto err_unlock;
259		}
260
261		CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
262			 i915_ggtt_offset(ce->ring->vma),
263			 ce->ring->head, ce->ring->tail);
264
265		handoff = true;
266		smp_mb__before_atomic(); /* flush pin before it is visible */
267		atomic_inc(&ce->pin_count);
268	}
269
270	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
271
272err_unlock:
273	mutex_unlock(&ce->pin_mutex);
274err_post_unpin:
275	if (!handoff)
276		ce->ops->post_unpin(ce);
277err_release:
278	i915_active_release(&ce->active);
279err_ctx_unpin:
280	intel_context_post_unpin(ce);
281
282	/*
283	 * Unlock the hwsp_ggtt object since it's shared.
284	 * In principle we can unlock all the global state locked above
285	 * since it's pinned and doesn't need fencing, and will
286	 * thus remain resident until it is explicitly unpinned.
287	 */
288	i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
289
290	return err;
291}
292
293int __intel_context_do_pin(struct intel_context *ce)
294{
295	struct i915_gem_ww_ctx ww;
296	int err;
297
298	i915_gem_ww_ctx_init(&ww, true);
299retry:
300	err = __intel_context_do_pin_ww(ce, &ww);
301	if (err == -EDEADLK) {
302		err = i915_gem_ww_ctx_backoff(&ww);
303		if (!err)
304			goto retry;
305	}
306	i915_gem_ww_ctx_fini(&ww);
307	return err;
308}
309
310void intel_context_unpin(struct intel_context *ce)
311{
312	if (!atomic_dec_and_test(&ce->pin_count))
313		return;
314
315	CE_TRACE(ce, "unpin\n");
316	ce->ops->unpin(ce);
317	ce->ops->post_unpin(ce);
318
319	/*
320	 * Once released, we may asynchronously drop the active reference.
321	 * As that may be the only reference keeping the context alive,
322	 * take an extra now so that it is not freed before we finish
323	 * dereferencing it.
324	 */
325	intel_context_get(ce);
326	intel_context_active_release(ce);
327	intel_context_put(ce);
328}
329
330__i915_active_call
331static void __intel_context_retire(struct i915_active *active)
332{
333	struct intel_context *ce = container_of(active, typeof(*ce), active);
334
335	CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
336		 intel_context_get_total_runtime_ns(ce),
337		 intel_context_get_avg_runtime_ns(ce));
338
339	set_bit(CONTEXT_VALID_BIT, &ce->flags);
340	intel_context_post_unpin(ce);
341	intel_context_put(ce);
342}
343
344static int __intel_context_active(struct i915_active *active)
345{
346	struct intel_context *ce = container_of(active, typeof(*ce), active);
347
348	intel_context_get(ce);
349
350	/* everything should already be activated by intel_context_pre_pin() */
351	GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
352	__intel_ring_pin(ce->ring);
353
354	__intel_timeline_pin(ce->timeline);
355
356	if (ce->state) {
357		GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
358		__i915_vma_pin(ce->state);
359		i915_vma_make_unshrinkable(ce->state);
360	}
361
362	return 0;
363}
364
365void
366intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
367{
368	GEM_BUG_ON(!engine->cops);
369	GEM_BUG_ON(!engine->gt->vm);
370
371	kref_init(&ce->ref);
372
373	ce->engine = engine;
374	ce->ops = engine->cops;
375	ce->sseu = engine->sseu;
376	ce->ring = __intel_context_ring_size(SZ_4K);
377
378	ewma_runtime_init(&ce->runtime.avg);
379
380	ce->vm = i915_vm_get(engine->gt->vm);
381
382	/* NB ce->signal_link/lock is used under RCU */
383	spin_lock_init(&ce->signal_lock);
384	INIT_LIST_HEAD(&ce->signals);
385
386	mutex_init(&ce->pin_mutex);
387
388	i915_active_init(&ce->active,
389			 __intel_context_active, __intel_context_retire);
390}
391
392void intel_context_fini(struct intel_context *ce)
393{
394	if (ce->timeline)
395		intel_timeline_put(ce->timeline);
396	i915_vm_put(ce->vm);
397
398	mutex_destroy(&ce->pin_mutex);
399	i915_active_fini(&ce->active);
400}
401
402static void i915_global_context_shrink(void)
403{
404	kmem_cache_shrink(global.slab_ce);
405}
406
407static void i915_global_context_exit(void)
408{
409	kmem_cache_destroy(global.slab_ce);
410}
411
412static struct i915_global_context global = { {
413	.shrink = i915_global_context_shrink,
414	.exit = i915_global_context_exit,
415} };
416
417int __init i915_global_context_init(void)
418{
419	global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
420	if (!global.slab_ce)
421		return -ENOMEM;
422
423	i915_global_register(&global.base);
424	return 0;
425}
426
427void intel_context_enter_engine(struct intel_context *ce)
428{
429	intel_engine_pm_get(ce->engine);
430	intel_timeline_enter(ce->timeline);
431}
432
433void intel_context_exit_engine(struct intel_context *ce)
434{
435	intel_timeline_exit(ce->timeline);
436	intel_engine_pm_put(ce->engine);
437}
438
439int intel_context_prepare_remote_request(struct intel_context *ce,
440					 struct i915_request *rq)
441{
442	struct intel_timeline *tl = ce->timeline;
443	int err;
444
445	/* Only suitable for use in remotely modifying this context */
446	GEM_BUG_ON(rq->context == ce);
447
448	if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
449		/* Queue this switch after current activity by this context. */
450		err = i915_active_fence_set(&tl->last_request, rq);
451		if (err)
452			return err;
453	}
454
455	/*
456	 * Guarantee context image and the timeline remains pinned until the
457	 * modifying request is retired by setting the ce activity tracker.
458	 *
459	 * But we only need to take one pin on the account of it. Or in other
460	 * words transfer the pinned ce object to tracked active request.
461	 */
462	GEM_BUG_ON(i915_active_is_idle(&ce->active));
463	return i915_active_add_request(&ce->active, rq);
464}
465
466struct i915_request *intel_context_create_request(struct intel_context *ce)
467{
468	struct i915_gem_ww_ctx ww;
469	struct i915_request *rq;
470	int err;
471
472	i915_gem_ww_ctx_init(&ww, true);
473retry:
474	err = intel_context_pin_ww(ce, &ww);
475	if (!err) {
476		rq = i915_request_create(ce);
477		intel_context_unpin(ce);
478	} else if (err == -EDEADLK) {
479		err = i915_gem_ww_ctx_backoff(&ww);
480		if (!err)
481			goto retry;
482		rq = ERR_PTR(err);
483	} else {
484		rq = ERR_PTR(err);
485	}
486
487	i915_gem_ww_ctx_fini(&ww);
488
489	if (IS_ERR(rq))
490		return rq;
491
492	/*
493	 * timeline->mutex should be the inner lock, but is used as outer lock.
494	 * Hack around this to shut up lockdep in selftests..
495	 */
496	lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
497	mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
498	mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
499	rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
500
501	return rq;
502}
503
504#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
505#include "selftest_context.c"
506#endif
507