162306a36Sopenharmony_ci// SPDX-License-Identifier: MIT
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Copyright © 2016-2018 Intel Corporation
462306a36Sopenharmony_ci */
562306a36Sopenharmony_ci
662306a36Sopenharmony_ci#include <drm/drm_cache.h>
762306a36Sopenharmony_ci
862306a36Sopenharmony_ci#include "gem/i915_gem_internal.h"
962306a36Sopenharmony_ci
1062306a36Sopenharmony_ci#include "i915_active.h"
1162306a36Sopenharmony_ci#include "i915_drv.h"
1262306a36Sopenharmony_ci#include "i915_syncmap.h"
1362306a36Sopenharmony_ci#include "intel_gt.h"
1462306a36Sopenharmony_ci#include "intel_ring.h"
1562306a36Sopenharmony_ci#include "intel_timeline.h"
1662306a36Sopenharmony_ci
1762306a36Sopenharmony_ci#define TIMELINE_SEQNO_BYTES 8
1862306a36Sopenharmony_ci
1962306a36Sopenharmony_cistatic struct i915_vma *hwsp_alloc(struct intel_gt *gt)
2062306a36Sopenharmony_ci{
2162306a36Sopenharmony_ci	struct drm_i915_private *i915 = gt->i915;
2262306a36Sopenharmony_ci	struct drm_i915_gem_object *obj;
2362306a36Sopenharmony_ci	struct i915_vma *vma;
2462306a36Sopenharmony_ci
2562306a36Sopenharmony_ci	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
2662306a36Sopenharmony_ci	if (IS_ERR(obj))
2762306a36Sopenharmony_ci		return ERR_CAST(obj);
2862306a36Sopenharmony_ci
2962306a36Sopenharmony_ci	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
3062306a36Sopenharmony_ci
3162306a36Sopenharmony_ci	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
3262306a36Sopenharmony_ci	if (IS_ERR(vma))
3362306a36Sopenharmony_ci		i915_gem_object_put(obj);
3462306a36Sopenharmony_ci
3562306a36Sopenharmony_ci	return vma;
3662306a36Sopenharmony_ci}
3762306a36Sopenharmony_ci
3862306a36Sopenharmony_cistatic void __timeline_retire(struct i915_active *active)
3962306a36Sopenharmony_ci{
4062306a36Sopenharmony_ci	struct intel_timeline *tl =
4162306a36Sopenharmony_ci		container_of(active, typeof(*tl), active);
4262306a36Sopenharmony_ci
4362306a36Sopenharmony_ci	i915_vma_unpin(tl->hwsp_ggtt);
4462306a36Sopenharmony_ci	intel_timeline_put(tl);
4562306a36Sopenharmony_ci}
4662306a36Sopenharmony_ci
4762306a36Sopenharmony_cistatic int __timeline_active(struct i915_active *active)
4862306a36Sopenharmony_ci{
4962306a36Sopenharmony_ci	struct intel_timeline *tl =
5062306a36Sopenharmony_ci		container_of(active, typeof(*tl), active);
5162306a36Sopenharmony_ci
5262306a36Sopenharmony_ci	__i915_vma_pin(tl->hwsp_ggtt);
5362306a36Sopenharmony_ci	intel_timeline_get(tl);
5462306a36Sopenharmony_ci	return 0;
5562306a36Sopenharmony_ci}
5662306a36Sopenharmony_ci
5762306a36Sopenharmony_ciI915_SELFTEST_EXPORT int
5862306a36Sopenharmony_ciintel_timeline_pin_map(struct intel_timeline *timeline)
5962306a36Sopenharmony_ci{
6062306a36Sopenharmony_ci	struct drm_i915_gem_object *obj = timeline->hwsp_ggtt->obj;
6162306a36Sopenharmony_ci	u32 ofs = offset_in_page(timeline->hwsp_offset);
6262306a36Sopenharmony_ci	void *vaddr;
6362306a36Sopenharmony_ci
6462306a36Sopenharmony_ci	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
6562306a36Sopenharmony_ci	if (IS_ERR(vaddr))
6662306a36Sopenharmony_ci		return PTR_ERR(vaddr);
6762306a36Sopenharmony_ci
6862306a36Sopenharmony_ci	timeline->hwsp_map = vaddr;
6962306a36Sopenharmony_ci	timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
7062306a36Sopenharmony_ci	drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES);
7162306a36Sopenharmony_ci
7262306a36Sopenharmony_ci	return 0;
7362306a36Sopenharmony_ci}
7462306a36Sopenharmony_ci
7562306a36Sopenharmony_cistatic int intel_timeline_init(struct intel_timeline *timeline,
7662306a36Sopenharmony_ci			       struct intel_gt *gt,
7762306a36Sopenharmony_ci			       struct i915_vma *hwsp,
7862306a36Sopenharmony_ci			       unsigned int offset)
7962306a36Sopenharmony_ci{
8062306a36Sopenharmony_ci	kref_init(&timeline->kref);
8162306a36Sopenharmony_ci	atomic_set(&timeline->pin_count, 0);
8262306a36Sopenharmony_ci
8362306a36Sopenharmony_ci	timeline->gt = gt;
8462306a36Sopenharmony_ci
8562306a36Sopenharmony_ci	if (hwsp) {
8662306a36Sopenharmony_ci		timeline->hwsp_offset = offset;
8762306a36Sopenharmony_ci		timeline->hwsp_ggtt = i915_vma_get(hwsp);
8862306a36Sopenharmony_ci	} else {
8962306a36Sopenharmony_ci		timeline->has_initial_breadcrumb = true;
9062306a36Sopenharmony_ci		hwsp = hwsp_alloc(gt);
9162306a36Sopenharmony_ci		if (IS_ERR(hwsp))
9262306a36Sopenharmony_ci			return PTR_ERR(hwsp);
9362306a36Sopenharmony_ci		timeline->hwsp_ggtt = hwsp;
9462306a36Sopenharmony_ci	}
9562306a36Sopenharmony_ci
9662306a36Sopenharmony_ci	timeline->hwsp_map = NULL;
9762306a36Sopenharmony_ci	timeline->hwsp_seqno = (void *)(long)timeline->hwsp_offset;
9862306a36Sopenharmony_ci
9962306a36Sopenharmony_ci	GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
10062306a36Sopenharmony_ci
10162306a36Sopenharmony_ci	timeline->fence_context = dma_fence_context_alloc(1);
10262306a36Sopenharmony_ci
10362306a36Sopenharmony_ci	mutex_init(&timeline->mutex);
10462306a36Sopenharmony_ci
10562306a36Sopenharmony_ci	INIT_ACTIVE_FENCE(&timeline->last_request);
10662306a36Sopenharmony_ci	INIT_LIST_HEAD(&timeline->requests);
10762306a36Sopenharmony_ci
10862306a36Sopenharmony_ci	i915_syncmap_init(&timeline->sync);
10962306a36Sopenharmony_ci	i915_active_init(&timeline->active, __timeline_active,
11062306a36Sopenharmony_ci			 __timeline_retire, 0);
11162306a36Sopenharmony_ci
11262306a36Sopenharmony_ci	return 0;
11362306a36Sopenharmony_ci}
11462306a36Sopenharmony_ci
11562306a36Sopenharmony_civoid intel_gt_init_timelines(struct intel_gt *gt)
11662306a36Sopenharmony_ci{
11762306a36Sopenharmony_ci	struct intel_gt_timelines *timelines = &gt->timelines;
11862306a36Sopenharmony_ci
11962306a36Sopenharmony_ci	spin_lock_init(&timelines->lock);
12062306a36Sopenharmony_ci	INIT_LIST_HEAD(&timelines->active_list);
12162306a36Sopenharmony_ci}
12262306a36Sopenharmony_ci
12362306a36Sopenharmony_cistatic void intel_timeline_fini(struct rcu_head *rcu)
12462306a36Sopenharmony_ci{
12562306a36Sopenharmony_ci	struct intel_timeline *timeline =
12662306a36Sopenharmony_ci		container_of(rcu, struct intel_timeline, rcu);
12762306a36Sopenharmony_ci
12862306a36Sopenharmony_ci	if (timeline->hwsp_map)
12962306a36Sopenharmony_ci		i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
13062306a36Sopenharmony_ci
13162306a36Sopenharmony_ci	i915_vma_put(timeline->hwsp_ggtt);
13262306a36Sopenharmony_ci	i915_active_fini(&timeline->active);
13362306a36Sopenharmony_ci
13462306a36Sopenharmony_ci	/*
13562306a36Sopenharmony_ci	 * A small race exists between intel_gt_retire_requests_timeout and
13662306a36Sopenharmony_ci	 * intel_timeline_exit which could result in the syncmap not getting
13762306a36Sopenharmony_ci	 * free'd. Rather than work to hard to seal this race, simply cleanup
13862306a36Sopenharmony_ci	 * the syncmap on fini.
13962306a36Sopenharmony_ci	 */
14062306a36Sopenharmony_ci	i915_syncmap_free(&timeline->sync);
14162306a36Sopenharmony_ci
14262306a36Sopenharmony_ci	kfree(timeline);
14362306a36Sopenharmony_ci}
14462306a36Sopenharmony_ci
14562306a36Sopenharmony_cistruct intel_timeline *
14662306a36Sopenharmony_ci__intel_timeline_create(struct intel_gt *gt,
14762306a36Sopenharmony_ci			struct i915_vma *global_hwsp,
14862306a36Sopenharmony_ci			unsigned int offset)
14962306a36Sopenharmony_ci{
15062306a36Sopenharmony_ci	struct intel_timeline *timeline;
15162306a36Sopenharmony_ci	int err;
15262306a36Sopenharmony_ci
15362306a36Sopenharmony_ci	timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
15462306a36Sopenharmony_ci	if (!timeline)
15562306a36Sopenharmony_ci		return ERR_PTR(-ENOMEM);
15662306a36Sopenharmony_ci
15762306a36Sopenharmony_ci	err = intel_timeline_init(timeline, gt, global_hwsp, offset);
15862306a36Sopenharmony_ci	if (err) {
15962306a36Sopenharmony_ci		kfree(timeline);
16062306a36Sopenharmony_ci		return ERR_PTR(err);
16162306a36Sopenharmony_ci	}
16262306a36Sopenharmony_ci
16362306a36Sopenharmony_ci	return timeline;
16462306a36Sopenharmony_ci}
16562306a36Sopenharmony_ci
16662306a36Sopenharmony_cistruct intel_timeline *
16762306a36Sopenharmony_ciintel_timeline_create_from_engine(struct intel_engine_cs *engine,
16862306a36Sopenharmony_ci				  unsigned int offset)
16962306a36Sopenharmony_ci{
17062306a36Sopenharmony_ci	struct i915_vma *hwsp = engine->status_page.vma;
17162306a36Sopenharmony_ci	struct intel_timeline *tl;
17262306a36Sopenharmony_ci
17362306a36Sopenharmony_ci	tl = __intel_timeline_create(engine->gt, hwsp, offset);
17462306a36Sopenharmony_ci	if (IS_ERR(tl))
17562306a36Sopenharmony_ci		return tl;
17662306a36Sopenharmony_ci
17762306a36Sopenharmony_ci	/* Borrow a nearby lock; we only create these timelines during init */
17862306a36Sopenharmony_ci	mutex_lock(&hwsp->vm->mutex);
17962306a36Sopenharmony_ci	list_add_tail(&tl->engine_link, &engine->status_page.timelines);
18062306a36Sopenharmony_ci	mutex_unlock(&hwsp->vm->mutex);
18162306a36Sopenharmony_ci
18262306a36Sopenharmony_ci	return tl;
18362306a36Sopenharmony_ci}
18462306a36Sopenharmony_ci
18562306a36Sopenharmony_civoid __intel_timeline_pin(struct intel_timeline *tl)
18662306a36Sopenharmony_ci{
18762306a36Sopenharmony_ci	GEM_BUG_ON(!atomic_read(&tl->pin_count));
18862306a36Sopenharmony_ci	atomic_inc(&tl->pin_count);
18962306a36Sopenharmony_ci}
19062306a36Sopenharmony_ci
19162306a36Sopenharmony_ciint intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
19262306a36Sopenharmony_ci{
19362306a36Sopenharmony_ci	int err;
19462306a36Sopenharmony_ci
19562306a36Sopenharmony_ci	if (atomic_add_unless(&tl->pin_count, 1, 0))
19662306a36Sopenharmony_ci		return 0;
19762306a36Sopenharmony_ci
19862306a36Sopenharmony_ci	if (!tl->hwsp_map) {
19962306a36Sopenharmony_ci		err = intel_timeline_pin_map(tl);
20062306a36Sopenharmony_ci		if (err)
20162306a36Sopenharmony_ci			return err;
20262306a36Sopenharmony_ci	}
20362306a36Sopenharmony_ci
20462306a36Sopenharmony_ci	err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH);
20562306a36Sopenharmony_ci	if (err)
20662306a36Sopenharmony_ci		return err;
20762306a36Sopenharmony_ci
20862306a36Sopenharmony_ci	tl->hwsp_offset =
20962306a36Sopenharmony_ci		i915_ggtt_offset(tl->hwsp_ggtt) +
21062306a36Sopenharmony_ci		offset_in_page(tl->hwsp_offset);
21162306a36Sopenharmony_ci	GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
21262306a36Sopenharmony_ci		 tl->fence_context, tl->hwsp_offset);
21362306a36Sopenharmony_ci
21462306a36Sopenharmony_ci	i915_active_acquire(&tl->active);
21562306a36Sopenharmony_ci	if (atomic_fetch_inc(&tl->pin_count)) {
21662306a36Sopenharmony_ci		i915_active_release(&tl->active);
21762306a36Sopenharmony_ci		__i915_vma_unpin(tl->hwsp_ggtt);
21862306a36Sopenharmony_ci	}
21962306a36Sopenharmony_ci
22062306a36Sopenharmony_ci	return 0;
22162306a36Sopenharmony_ci}
22262306a36Sopenharmony_ci
22362306a36Sopenharmony_civoid intel_timeline_reset_seqno(const struct intel_timeline *tl)
22462306a36Sopenharmony_ci{
22562306a36Sopenharmony_ci	u32 *hwsp_seqno = (u32 *)tl->hwsp_seqno;
22662306a36Sopenharmony_ci	/* Must be pinned to be writable, and no requests in flight. */
22762306a36Sopenharmony_ci	GEM_BUG_ON(!atomic_read(&tl->pin_count));
22862306a36Sopenharmony_ci
22962306a36Sopenharmony_ci	memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno));
23062306a36Sopenharmony_ci	WRITE_ONCE(*hwsp_seqno, tl->seqno);
23162306a36Sopenharmony_ci	drm_clflush_virt_range(hwsp_seqno, TIMELINE_SEQNO_BYTES);
23262306a36Sopenharmony_ci}
23362306a36Sopenharmony_ci
23462306a36Sopenharmony_civoid intel_timeline_enter(struct intel_timeline *tl)
23562306a36Sopenharmony_ci{
23662306a36Sopenharmony_ci	struct intel_gt_timelines *timelines = &tl->gt->timelines;
23762306a36Sopenharmony_ci
23862306a36Sopenharmony_ci	/*
23962306a36Sopenharmony_ci	 * Pretend we are serialised by the timeline->mutex.
24062306a36Sopenharmony_ci	 *
24162306a36Sopenharmony_ci	 * While generally true, there are a few exceptions to the rule
24262306a36Sopenharmony_ci	 * for the engine->kernel_context being used to manage power
24362306a36Sopenharmony_ci	 * transitions. As the engine_park may be called from under any
24462306a36Sopenharmony_ci	 * timeline, it uses the power mutex as a global serialisation
24562306a36Sopenharmony_ci	 * lock to prevent any other request entering its timeline.
24662306a36Sopenharmony_ci	 *
24762306a36Sopenharmony_ci	 * The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
24862306a36Sopenharmony_ci	 *
24962306a36Sopenharmony_ci	 * However, intel_gt_retire_request() does not know which engine
25062306a36Sopenharmony_ci	 * it is retiring along and so cannot partake in the engine-pm
25162306a36Sopenharmony_ci	 * barrier, and there we use the tl->active_count as a means to
25262306a36Sopenharmony_ci	 * pin the timeline in the active_list while the locks are dropped.
25362306a36Sopenharmony_ci	 * Ergo, as that is outside of the engine-pm barrier, we need to
25462306a36Sopenharmony_ci	 * use atomic to manipulate tl->active_count.
25562306a36Sopenharmony_ci	 */
25662306a36Sopenharmony_ci	lockdep_assert_held(&tl->mutex);
25762306a36Sopenharmony_ci
25862306a36Sopenharmony_ci	if (atomic_add_unless(&tl->active_count, 1, 0))
25962306a36Sopenharmony_ci		return;
26062306a36Sopenharmony_ci
26162306a36Sopenharmony_ci	spin_lock(&timelines->lock);
26262306a36Sopenharmony_ci	if (!atomic_fetch_inc(&tl->active_count)) {
26362306a36Sopenharmony_ci		/*
26462306a36Sopenharmony_ci		 * The HWSP is volatile, and may have been lost while inactive,
26562306a36Sopenharmony_ci		 * e.g. across suspend/resume. Be paranoid, and ensure that
26662306a36Sopenharmony_ci		 * the HWSP value matches our seqno so we don't proclaim
26762306a36Sopenharmony_ci		 * the next request as already complete.
26862306a36Sopenharmony_ci		 */
26962306a36Sopenharmony_ci		intel_timeline_reset_seqno(tl);
27062306a36Sopenharmony_ci		list_add_tail(&tl->link, &timelines->active_list);
27162306a36Sopenharmony_ci	}
27262306a36Sopenharmony_ci	spin_unlock(&timelines->lock);
27362306a36Sopenharmony_ci}
27462306a36Sopenharmony_ci
27562306a36Sopenharmony_civoid intel_timeline_exit(struct intel_timeline *tl)
27662306a36Sopenharmony_ci{
27762306a36Sopenharmony_ci	struct intel_gt_timelines *timelines = &tl->gt->timelines;
27862306a36Sopenharmony_ci
27962306a36Sopenharmony_ci	/* See intel_timeline_enter() */
28062306a36Sopenharmony_ci	lockdep_assert_held(&tl->mutex);
28162306a36Sopenharmony_ci
28262306a36Sopenharmony_ci	GEM_BUG_ON(!atomic_read(&tl->active_count));
28362306a36Sopenharmony_ci	if (atomic_add_unless(&tl->active_count, -1, 1))
28462306a36Sopenharmony_ci		return;
28562306a36Sopenharmony_ci
28662306a36Sopenharmony_ci	spin_lock(&timelines->lock);
28762306a36Sopenharmony_ci	if (atomic_dec_and_test(&tl->active_count))
28862306a36Sopenharmony_ci		list_del(&tl->link);
28962306a36Sopenharmony_ci	spin_unlock(&timelines->lock);
29062306a36Sopenharmony_ci
29162306a36Sopenharmony_ci	/*
29262306a36Sopenharmony_ci	 * Since this timeline is idle, all bariers upon which we were waiting
29362306a36Sopenharmony_ci	 * must also be complete and so we can discard the last used barriers
29462306a36Sopenharmony_ci	 * without loss of information.
29562306a36Sopenharmony_ci	 */
29662306a36Sopenharmony_ci	i915_syncmap_free(&tl->sync);
29762306a36Sopenharmony_ci}
29862306a36Sopenharmony_ci
29962306a36Sopenharmony_cistatic u32 timeline_advance(struct intel_timeline *tl)
30062306a36Sopenharmony_ci{
30162306a36Sopenharmony_ci	GEM_BUG_ON(!atomic_read(&tl->pin_count));
30262306a36Sopenharmony_ci	GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
30362306a36Sopenharmony_ci
30462306a36Sopenharmony_ci	return tl->seqno += 1 + tl->has_initial_breadcrumb;
30562306a36Sopenharmony_ci}
30662306a36Sopenharmony_ci
30762306a36Sopenharmony_cistatic noinline int
30862306a36Sopenharmony_ci__intel_timeline_get_seqno(struct intel_timeline *tl,
30962306a36Sopenharmony_ci			   u32 *seqno)
31062306a36Sopenharmony_ci{
31162306a36Sopenharmony_ci	u32 next_ofs = offset_in_page(tl->hwsp_offset + TIMELINE_SEQNO_BYTES);
31262306a36Sopenharmony_ci
31362306a36Sopenharmony_ci	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
31462306a36Sopenharmony_ci	if (TIMELINE_SEQNO_BYTES <= BIT(5) && (next_ofs & BIT(5)))
31562306a36Sopenharmony_ci		next_ofs = offset_in_page(next_ofs + BIT(5));
31662306a36Sopenharmony_ci
31762306a36Sopenharmony_ci	tl->hwsp_offset = i915_ggtt_offset(tl->hwsp_ggtt) + next_ofs;
31862306a36Sopenharmony_ci	tl->hwsp_seqno = tl->hwsp_map + next_ofs;
31962306a36Sopenharmony_ci	intel_timeline_reset_seqno(tl);
32062306a36Sopenharmony_ci
32162306a36Sopenharmony_ci	*seqno = timeline_advance(tl);
32262306a36Sopenharmony_ci	GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
32362306a36Sopenharmony_ci	return 0;
32462306a36Sopenharmony_ci}
32562306a36Sopenharmony_ci
32662306a36Sopenharmony_ciint intel_timeline_get_seqno(struct intel_timeline *tl,
32762306a36Sopenharmony_ci			     struct i915_request *rq,
32862306a36Sopenharmony_ci			     u32 *seqno)
32962306a36Sopenharmony_ci{
33062306a36Sopenharmony_ci	*seqno = timeline_advance(tl);
33162306a36Sopenharmony_ci
33262306a36Sopenharmony_ci	/* Replace the HWSP on wraparound for HW semaphores */
33362306a36Sopenharmony_ci	if (unlikely(!*seqno && tl->has_initial_breadcrumb))
33462306a36Sopenharmony_ci		return __intel_timeline_get_seqno(tl, seqno);
33562306a36Sopenharmony_ci
33662306a36Sopenharmony_ci	return 0;
33762306a36Sopenharmony_ci}
33862306a36Sopenharmony_ci
33962306a36Sopenharmony_ciint intel_timeline_read_hwsp(struct i915_request *from,
34062306a36Sopenharmony_ci			     struct i915_request *to,
34162306a36Sopenharmony_ci			     u32 *hwsp)
34262306a36Sopenharmony_ci{
34362306a36Sopenharmony_ci	struct intel_timeline *tl;
34462306a36Sopenharmony_ci	int err;
34562306a36Sopenharmony_ci
34662306a36Sopenharmony_ci	rcu_read_lock();
34762306a36Sopenharmony_ci	tl = rcu_dereference(from->timeline);
34862306a36Sopenharmony_ci	if (i915_request_signaled(from) ||
34962306a36Sopenharmony_ci	    !i915_active_acquire_if_busy(&tl->active))
35062306a36Sopenharmony_ci		tl = NULL;
35162306a36Sopenharmony_ci
35262306a36Sopenharmony_ci	if (tl) {
35362306a36Sopenharmony_ci		/* hwsp_offset may wraparound, so use from->hwsp_seqno */
35462306a36Sopenharmony_ci		*hwsp = i915_ggtt_offset(tl->hwsp_ggtt) +
35562306a36Sopenharmony_ci			offset_in_page(from->hwsp_seqno);
35662306a36Sopenharmony_ci	}
35762306a36Sopenharmony_ci
35862306a36Sopenharmony_ci	/* ensure we wait on the right request, if not, we completed */
35962306a36Sopenharmony_ci	if (tl && __i915_request_is_complete(from)) {
36062306a36Sopenharmony_ci		i915_active_release(&tl->active);
36162306a36Sopenharmony_ci		tl = NULL;
36262306a36Sopenharmony_ci	}
36362306a36Sopenharmony_ci	rcu_read_unlock();
36462306a36Sopenharmony_ci
36562306a36Sopenharmony_ci	if (!tl)
36662306a36Sopenharmony_ci		return 1;
36762306a36Sopenharmony_ci
36862306a36Sopenharmony_ci	/* Can't do semaphore waits on kernel context */
36962306a36Sopenharmony_ci	if (!tl->has_initial_breadcrumb) {
37062306a36Sopenharmony_ci		err = -EINVAL;
37162306a36Sopenharmony_ci		goto out;
37262306a36Sopenharmony_ci	}
37362306a36Sopenharmony_ci
37462306a36Sopenharmony_ci	err = i915_active_add_request(&tl->active, to);
37562306a36Sopenharmony_ci
37662306a36Sopenharmony_ciout:
37762306a36Sopenharmony_ci	i915_active_release(&tl->active);
37862306a36Sopenharmony_ci	return err;
37962306a36Sopenharmony_ci}
38062306a36Sopenharmony_ci
38162306a36Sopenharmony_civoid intel_timeline_unpin(struct intel_timeline *tl)
38262306a36Sopenharmony_ci{
38362306a36Sopenharmony_ci	GEM_BUG_ON(!atomic_read(&tl->pin_count));
38462306a36Sopenharmony_ci	if (!atomic_dec_and_test(&tl->pin_count))
38562306a36Sopenharmony_ci		return;
38662306a36Sopenharmony_ci
38762306a36Sopenharmony_ci	i915_active_release(&tl->active);
38862306a36Sopenharmony_ci	__i915_vma_unpin(tl->hwsp_ggtt);
38962306a36Sopenharmony_ci}
39062306a36Sopenharmony_ci
39162306a36Sopenharmony_civoid __intel_timeline_free(struct kref *kref)
39262306a36Sopenharmony_ci{
39362306a36Sopenharmony_ci	struct intel_timeline *timeline =
39462306a36Sopenharmony_ci		container_of(kref, typeof(*timeline), kref);
39562306a36Sopenharmony_ci
39662306a36Sopenharmony_ci	GEM_BUG_ON(atomic_read(&timeline->pin_count));
39762306a36Sopenharmony_ci	GEM_BUG_ON(!list_empty(&timeline->requests));
39862306a36Sopenharmony_ci	GEM_BUG_ON(timeline->retire);
39962306a36Sopenharmony_ci
40062306a36Sopenharmony_ci	call_rcu(&timeline->rcu, intel_timeline_fini);
40162306a36Sopenharmony_ci}
40262306a36Sopenharmony_ci
40362306a36Sopenharmony_civoid intel_gt_fini_timelines(struct intel_gt *gt)
40462306a36Sopenharmony_ci{
40562306a36Sopenharmony_ci	struct intel_gt_timelines *timelines = &gt->timelines;
40662306a36Sopenharmony_ci
40762306a36Sopenharmony_ci	GEM_BUG_ON(!list_empty(&timelines->active_list));
40862306a36Sopenharmony_ci}
40962306a36Sopenharmony_ci
41062306a36Sopenharmony_civoid intel_gt_show_timelines(struct intel_gt *gt,
41162306a36Sopenharmony_ci			     struct drm_printer *m,
41262306a36Sopenharmony_ci			     void (*show_request)(struct drm_printer *m,
41362306a36Sopenharmony_ci						  const struct i915_request *rq,
41462306a36Sopenharmony_ci						  const char *prefix,
41562306a36Sopenharmony_ci						  int indent))
41662306a36Sopenharmony_ci{
41762306a36Sopenharmony_ci	struct intel_gt_timelines *timelines = &gt->timelines;
41862306a36Sopenharmony_ci	struct intel_timeline *tl, *tn;
41962306a36Sopenharmony_ci	LIST_HEAD(free);
42062306a36Sopenharmony_ci
42162306a36Sopenharmony_ci	spin_lock(&timelines->lock);
42262306a36Sopenharmony_ci	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
42362306a36Sopenharmony_ci		unsigned long count, ready, inflight;
42462306a36Sopenharmony_ci		struct i915_request *rq, *rn;
42562306a36Sopenharmony_ci		struct dma_fence *fence;
42662306a36Sopenharmony_ci
42762306a36Sopenharmony_ci		if (!mutex_trylock(&tl->mutex)) {
42862306a36Sopenharmony_ci			drm_printf(m, "Timeline %llx: busy; skipping\n",
42962306a36Sopenharmony_ci				   tl->fence_context);
43062306a36Sopenharmony_ci			continue;
43162306a36Sopenharmony_ci		}
43262306a36Sopenharmony_ci
43362306a36Sopenharmony_ci		intel_timeline_get(tl);
43462306a36Sopenharmony_ci		GEM_BUG_ON(!atomic_read(&tl->active_count));
43562306a36Sopenharmony_ci		atomic_inc(&tl->active_count); /* pin the list element */
43662306a36Sopenharmony_ci		spin_unlock(&timelines->lock);
43762306a36Sopenharmony_ci
43862306a36Sopenharmony_ci		count = 0;
43962306a36Sopenharmony_ci		ready = 0;
44062306a36Sopenharmony_ci		inflight = 0;
44162306a36Sopenharmony_ci		list_for_each_entry_safe(rq, rn, &tl->requests, link) {
44262306a36Sopenharmony_ci			if (i915_request_completed(rq))
44362306a36Sopenharmony_ci				continue;
44462306a36Sopenharmony_ci
44562306a36Sopenharmony_ci			count++;
44662306a36Sopenharmony_ci			if (i915_request_is_ready(rq))
44762306a36Sopenharmony_ci				ready++;
44862306a36Sopenharmony_ci			if (i915_request_is_active(rq))
44962306a36Sopenharmony_ci				inflight++;
45062306a36Sopenharmony_ci		}
45162306a36Sopenharmony_ci
45262306a36Sopenharmony_ci		drm_printf(m, "Timeline %llx: { ", tl->fence_context);
45362306a36Sopenharmony_ci		drm_printf(m, "count: %lu, ready: %lu, inflight: %lu",
45462306a36Sopenharmony_ci			   count, ready, inflight);
45562306a36Sopenharmony_ci		drm_printf(m, ", seqno: { current: %d, last: %d }",
45662306a36Sopenharmony_ci			   *tl->hwsp_seqno, tl->seqno);
45762306a36Sopenharmony_ci		fence = i915_active_fence_get(&tl->last_request);
45862306a36Sopenharmony_ci		if (fence) {
45962306a36Sopenharmony_ci			drm_printf(m, ", engine: %s",
46062306a36Sopenharmony_ci				   to_request(fence)->engine->name);
46162306a36Sopenharmony_ci			dma_fence_put(fence);
46262306a36Sopenharmony_ci		}
46362306a36Sopenharmony_ci		drm_printf(m, " }\n");
46462306a36Sopenharmony_ci
46562306a36Sopenharmony_ci		if (show_request) {
46662306a36Sopenharmony_ci			list_for_each_entry_safe(rq, rn, &tl->requests, link)
46762306a36Sopenharmony_ci				show_request(m, rq, "", 2);
46862306a36Sopenharmony_ci		}
46962306a36Sopenharmony_ci
47062306a36Sopenharmony_ci		mutex_unlock(&tl->mutex);
47162306a36Sopenharmony_ci		spin_lock(&timelines->lock);
47262306a36Sopenharmony_ci
47362306a36Sopenharmony_ci		/* Resume list iteration after reacquiring spinlock */
47462306a36Sopenharmony_ci		list_safe_reset_next(tl, tn, link);
47562306a36Sopenharmony_ci		if (atomic_dec_and_test(&tl->active_count))
47662306a36Sopenharmony_ci			list_del(&tl->link);
47762306a36Sopenharmony_ci
47862306a36Sopenharmony_ci		/* Defer the final release to after the spinlock */
47962306a36Sopenharmony_ci		if (refcount_dec_and_test(&tl->kref.refcount)) {
48062306a36Sopenharmony_ci			GEM_BUG_ON(atomic_read(&tl->active_count));
48162306a36Sopenharmony_ci			list_add(&tl->link, &free);
48262306a36Sopenharmony_ci		}
48362306a36Sopenharmony_ci	}
48462306a36Sopenharmony_ci	spin_unlock(&timelines->lock);
48562306a36Sopenharmony_ci
48662306a36Sopenharmony_ci	list_for_each_entry_safe(tl, tn, &free, link)
48762306a36Sopenharmony_ci		__intel_timeline_free(&tl->kref);
48862306a36Sopenharmony_ci}
48962306a36Sopenharmony_ci
49062306a36Sopenharmony_ci#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
49162306a36Sopenharmony_ci#include "gt/selftests/mock_timeline.c"
49262306a36Sopenharmony_ci#include "gt/selftest_timeline.c"
49362306a36Sopenharmony_ci#endif
494