162306a36Sopenharmony_ci/*
262306a36Sopenharmony_ci * SPDX-License-Identifier: MIT
362306a36Sopenharmony_ci *
462306a36Sopenharmony_ci * Copyright © 2018 Intel Corporation
562306a36Sopenharmony_ci */
662306a36Sopenharmony_ci
762306a36Sopenharmony_ci#include <linux/mutex.h>
862306a36Sopenharmony_ci
962306a36Sopenharmony_ci#include "i915_drv.h"
1062306a36Sopenharmony_ci#include "i915_request.h"
1162306a36Sopenharmony_ci#include "i915_scheduler.h"
1262306a36Sopenharmony_ci
1362306a36Sopenharmony_cistatic struct kmem_cache *slab_dependencies;
1462306a36Sopenharmony_cistatic struct kmem_cache *slab_priorities;
1562306a36Sopenharmony_ci
1662306a36Sopenharmony_cistatic DEFINE_SPINLOCK(schedule_lock);
1762306a36Sopenharmony_ci
1862306a36Sopenharmony_cistatic const struct i915_request *
1962306a36Sopenharmony_cinode_to_request(const struct i915_sched_node *node)
2062306a36Sopenharmony_ci{
2162306a36Sopenharmony_ci	return container_of(node, const struct i915_request, sched);
2262306a36Sopenharmony_ci}
2362306a36Sopenharmony_ci
2462306a36Sopenharmony_cistatic inline bool node_started(const struct i915_sched_node *node)
2562306a36Sopenharmony_ci{
2662306a36Sopenharmony_ci	return i915_request_started(node_to_request(node));
2762306a36Sopenharmony_ci}
2862306a36Sopenharmony_ci
2962306a36Sopenharmony_cistatic inline bool node_signaled(const struct i915_sched_node *node)
3062306a36Sopenharmony_ci{
3162306a36Sopenharmony_ci	return i915_request_completed(node_to_request(node));
3262306a36Sopenharmony_ci}
3362306a36Sopenharmony_ci
3462306a36Sopenharmony_cistatic inline struct i915_priolist *to_priolist(struct rb_node *rb)
3562306a36Sopenharmony_ci{
3662306a36Sopenharmony_ci	return rb_entry(rb, struct i915_priolist, node);
3762306a36Sopenharmony_ci}
3862306a36Sopenharmony_ci
3962306a36Sopenharmony_cistatic void assert_priolists(struct i915_sched_engine * const sched_engine)
4062306a36Sopenharmony_ci{
4162306a36Sopenharmony_ci	struct rb_node *rb;
4262306a36Sopenharmony_ci	long last_prio;
4362306a36Sopenharmony_ci
4462306a36Sopenharmony_ci	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
4562306a36Sopenharmony_ci		return;
4662306a36Sopenharmony_ci
4762306a36Sopenharmony_ci	GEM_BUG_ON(rb_first_cached(&sched_engine->queue) !=
4862306a36Sopenharmony_ci		   rb_first(&sched_engine->queue.rb_root));
4962306a36Sopenharmony_ci
5062306a36Sopenharmony_ci	last_prio = INT_MAX;
5162306a36Sopenharmony_ci	for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
5262306a36Sopenharmony_ci		const struct i915_priolist *p = to_priolist(rb);
5362306a36Sopenharmony_ci
5462306a36Sopenharmony_ci		GEM_BUG_ON(p->priority > last_prio);
5562306a36Sopenharmony_ci		last_prio = p->priority;
5662306a36Sopenharmony_ci	}
5762306a36Sopenharmony_ci}
5862306a36Sopenharmony_ci
5962306a36Sopenharmony_cistruct list_head *
6062306a36Sopenharmony_cii915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio)
6162306a36Sopenharmony_ci{
6262306a36Sopenharmony_ci	struct i915_priolist *p;
6362306a36Sopenharmony_ci	struct rb_node **parent, *rb;
6462306a36Sopenharmony_ci	bool first = true;
6562306a36Sopenharmony_ci
6662306a36Sopenharmony_ci	lockdep_assert_held(&sched_engine->lock);
6762306a36Sopenharmony_ci	assert_priolists(sched_engine);
6862306a36Sopenharmony_ci
6962306a36Sopenharmony_ci	if (unlikely(sched_engine->no_priolist))
7062306a36Sopenharmony_ci		prio = I915_PRIORITY_NORMAL;
7162306a36Sopenharmony_ci
7262306a36Sopenharmony_cifind_priolist:
7362306a36Sopenharmony_ci	/* most positive priority is scheduled first, equal priorities fifo */
7462306a36Sopenharmony_ci	rb = NULL;
7562306a36Sopenharmony_ci	parent = &sched_engine->queue.rb_root.rb_node;
7662306a36Sopenharmony_ci	while (*parent) {
7762306a36Sopenharmony_ci		rb = *parent;
7862306a36Sopenharmony_ci		p = to_priolist(rb);
7962306a36Sopenharmony_ci		if (prio > p->priority) {
8062306a36Sopenharmony_ci			parent = &rb->rb_left;
8162306a36Sopenharmony_ci		} else if (prio < p->priority) {
8262306a36Sopenharmony_ci			parent = &rb->rb_right;
8362306a36Sopenharmony_ci			first = false;
8462306a36Sopenharmony_ci		} else {
8562306a36Sopenharmony_ci			return &p->requests;
8662306a36Sopenharmony_ci		}
8762306a36Sopenharmony_ci	}
8862306a36Sopenharmony_ci
8962306a36Sopenharmony_ci	if (prio == I915_PRIORITY_NORMAL) {
9062306a36Sopenharmony_ci		p = &sched_engine->default_priolist;
9162306a36Sopenharmony_ci	} else {
9262306a36Sopenharmony_ci		p = kmem_cache_alloc(slab_priorities, GFP_ATOMIC);
9362306a36Sopenharmony_ci		/* Convert an allocation failure to a priority bump */
9462306a36Sopenharmony_ci		if (unlikely(!p)) {
9562306a36Sopenharmony_ci			prio = I915_PRIORITY_NORMAL; /* recurses just once */
9662306a36Sopenharmony_ci
9762306a36Sopenharmony_ci			/* To maintain ordering with all rendering, after an
9862306a36Sopenharmony_ci			 * allocation failure we have to disable all scheduling.
9962306a36Sopenharmony_ci			 * Requests will then be executed in fifo, and schedule
10062306a36Sopenharmony_ci			 * will ensure that dependencies are emitted in fifo.
10162306a36Sopenharmony_ci			 * There will be still some reordering with existing
10262306a36Sopenharmony_ci			 * requests, so if userspace lied about their
10362306a36Sopenharmony_ci			 * dependencies that reordering may be visible.
10462306a36Sopenharmony_ci			 */
10562306a36Sopenharmony_ci			sched_engine->no_priolist = true;
10662306a36Sopenharmony_ci			goto find_priolist;
10762306a36Sopenharmony_ci		}
10862306a36Sopenharmony_ci	}
10962306a36Sopenharmony_ci
11062306a36Sopenharmony_ci	p->priority = prio;
11162306a36Sopenharmony_ci	INIT_LIST_HEAD(&p->requests);
11262306a36Sopenharmony_ci
11362306a36Sopenharmony_ci	rb_link_node(&p->node, rb, parent);
11462306a36Sopenharmony_ci	rb_insert_color_cached(&p->node, &sched_engine->queue, first);
11562306a36Sopenharmony_ci
11662306a36Sopenharmony_ci	return &p->requests;
11762306a36Sopenharmony_ci}
11862306a36Sopenharmony_ci
11962306a36Sopenharmony_civoid __i915_priolist_free(struct i915_priolist *p)
12062306a36Sopenharmony_ci{
12162306a36Sopenharmony_ci	kmem_cache_free(slab_priorities, p);
12262306a36Sopenharmony_ci}
12362306a36Sopenharmony_ci
12462306a36Sopenharmony_cistruct sched_cache {
12562306a36Sopenharmony_ci	struct list_head *priolist;
12662306a36Sopenharmony_ci};
12762306a36Sopenharmony_ci
12862306a36Sopenharmony_cistatic struct i915_sched_engine *
12962306a36Sopenharmony_cilock_sched_engine(struct i915_sched_node *node,
13062306a36Sopenharmony_ci		  struct i915_sched_engine *locked,
13162306a36Sopenharmony_ci		  struct sched_cache *cache)
13262306a36Sopenharmony_ci{
13362306a36Sopenharmony_ci	const struct i915_request *rq = node_to_request(node);
13462306a36Sopenharmony_ci	struct i915_sched_engine *sched_engine;
13562306a36Sopenharmony_ci
13662306a36Sopenharmony_ci	GEM_BUG_ON(!locked);
13762306a36Sopenharmony_ci
13862306a36Sopenharmony_ci	/*
13962306a36Sopenharmony_ci	 * Virtual engines complicate acquiring the engine timeline lock,
14062306a36Sopenharmony_ci	 * as their rq->engine pointer is not stable until under that
14162306a36Sopenharmony_ci	 * engine lock. The simple ploy we use is to take the lock then
14262306a36Sopenharmony_ci	 * check that the rq still belongs to the newly locked engine.
14362306a36Sopenharmony_ci	 */
14462306a36Sopenharmony_ci	while (locked != (sched_engine = READ_ONCE(rq->engine)->sched_engine)) {
14562306a36Sopenharmony_ci		spin_unlock(&locked->lock);
14662306a36Sopenharmony_ci		memset(cache, 0, sizeof(*cache));
14762306a36Sopenharmony_ci		spin_lock(&sched_engine->lock);
14862306a36Sopenharmony_ci		locked = sched_engine;
14962306a36Sopenharmony_ci	}
15062306a36Sopenharmony_ci
15162306a36Sopenharmony_ci	GEM_BUG_ON(locked != sched_engine);
15262306a36Sopenharmony_ci	return locked;
15362306a36Sopenharmony_ci}
15462306a36Sopenharmony_ci
15562306a36Sopenharmony_cistatic void __i915_schedule(struct i915_sched_node *node,
15662306a36Sopenharmony_ci			    const struct i915_sched_attr *attr)
15762306a36Sopenharmony_ci{
15862306a36Sopenharmony_ci	const int prio = max(attr->priority, node->attr.priority);
15962306a36Sopenharmony_ci	struct i915_sched_engine *sched_engine;
16062306a36Sopenharmony_ci	struct i915_dependency *dep, *p;
16162306a36Sopenharmony_ci	struct i915_dependency stack;
16262306a36Sopenharmony_ci	struct sched_cache cache;
16362306a36Sopenharmony_ci	LIST_HEAD(dfs);
16462306a36Sopenharmony_ci
16562306a36Sopenharmony_ci	/* Needed in order to use the temporary link inside i915_dependency */
16662306a36Sopenharmony_ci	lockdep_assert_held(&schedule_lock);
16762306a36Sopenharmony_ci	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
16862306a36Sopenharmony_ci
16962306a36Sopenharmony_ci	if (node_signaled(node))
17062306a36Sopenharmony_ci		return;
17162306a36Sopenharmony_ci
17262306a36Sopenharmony_ci	stack.signaler = node;
17362306a36Sopenharmony_ci	list_add(&stack.dfs_link, &dfs);
17462306a36Sopenharmony_ci
17562306a36Sopenharmony_ci	/*
17662306a36Sopenharmony_ci	 * Recursively bump all dependent priorities to match the new request.
17762306a36Sopenharmony_ci	 *
17862306a36Sopenharmony_ci	 * A naive approach would be to use recursion:
17962306a36Sopenharmony_ci	 * static void update_priorities(struct i915_sched_node *node, prio) {
18062306a36Sopenharmony_ci	 *	list_for_each_entry(dep, &node->signalers_list, signal_link)
18162306a36Sopenharmony_ci	 *		update_priorities(dep->signal, prio)
18262306a36Sopenharmony_ci	 *	queue_request(node);
18362306a36Sopenharmony_ci	 * }
18462306a36Sopenharmony_ci	 * but that may have unlimited recursion depth and so runs a very
18562306a36Sopenharmony_ci	 * real risk of overunning the kernel stack. Instead, we build
18662306a36Sopenharmony_ci	 * a flat list of all dependencies starting with the current request.
18762306a36Sopenharmony_ci	 * As we walk the list of dependencies, we add all of its dependencies
18862306a36Sopenharmony_ci	 * to the end of the list (this may include an already visited
18962306a36Sopenharmony_ci	 * request) and continue to walk onwards onto the new dependencies. The
19062306a36Sopenharmony_ci	 * end result is a topological list of requests in reverse order, the
19162306a36Sopenharmony_ci	 * last element in the list is the request we must execute first.
19262306a36Sopenharmony_ci	 */
19362306a36Sopenharmony_ci	list_for_each_entry(dep, &dfs, dfs_link) {
19462306a36Sopenharmony_ci		struct i915_sched_node *node = dep->signaler;
19562306a36Sopenharmony_ci
19662306a36Sopenharmony_ci		/* If we are already flying, we know we have no signalers */
19762306a36Sopenharmony_ci		if (node_started(node))
19862306a36Sopenharmony_ci			continue;
19962306a36Sopenharmony_ci
20062306a36Sopenharmony_ci		/*
20162306a36Sopenharmony_ci		 * Within an engine, there can be no cycle, but we may
20262306a36Sopenharmony_ci		 * refer to the same dependency chain multiple times
20362306a36Sopenharmony_ci		 * (redundant dependencies are not eliminated) and across
20462306a36Sopenharmony_ci		 * engines.
20562306a36Sopenharmony_ci		 */
20662306a36Sopenharmony_ci		list_for_each_entry(p, &node->signalers_list, signal_link) {
20762306a36Sopenharmony_ci			GEM_BUG_ON(p == dep); /* no cycles! */
20862306a36Sopenharmony_ci
20962306a36Sopenharmony_ci			if (node_signaled(p->signaler))
21062306a36Sopenharmony_ci				continue;
21162306a36Sopenharmony_ci
21262306a36Sopenharmony_ci			if (prio > READ_ONCE(p->signaler->attr.priority))
21362306a36Sopenharmony_ci				list_move_tail(&p->dfs_link, &dfs);
21462306a36Sopenharmony_ci		}
21562306a36Sopenharmony_ci	}
21662306a36Sopenharmony_ci
21762306a36Sopenharmony_ci	/*
21862306a36Sopenharmony_ci	 * If we didn't need to bump any existing priorities, and we haven't
21962306a36Sopenharmony_ci	 * yet submitted this request (i.e. there is no potential race with
22062306a36Sopenharmony_ci	 * execlists_submit_request()), we can set our own priority and skip
22162306a36Sopenharmony_ci	 * acquiring the engine locks.
22262306a36Sopenharmony_ci	 */
22362306a36Sopenharmony_ci	if (node->attr.priority == I915_PRIORITY_INVALID) {
22462306a36Sopenharmony_ci		GEM_BUG_ON(!list_empty(&node->link));
22562306a36Sopenharmony_ci		node->attr = *attr;
22662306a36Sopenharmony_ci
22762306a36Sopenharmony_ci		if (stack.dfs_link.next == stack.dfs_link.prev)
22862306a36Sopenharmony_ci			return;
22962306a36Sopenharmony_ci
23062306a36Sopenharmony_ci		__list_del_entry(&stack.dfs_link);
23162306a36Sopenharmony_ci	}
23262306a36Sopenharmony_ci
23362306a36Sopenharmony_ci	memset(&cache, 0, sizeof(cache));
23462306a36Sopenharmony_ci	sched_engine = node_to_request(node)->engine->sched_engine;
23562306a36Sopenharmony_ci	spin_lock(&sched_engine->lock);
23662306a36Sopenharmony_ci
23762306a36Sopenharmony_ci	/* Fifo and depth-first replacement ensure our deps execute before us */
23862306a36Sopenharmony_ci	sched_engine = lock_sched_engine(node, sched_engine, &cache);
23962306a36Sopenharmony_ci	list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
24062306a36Sopenharmony_ci		struct i915_request *from = container_of(dep->signaler,
24162306a36Sopenharmony_ci							 struct i915_request,
24262306a36Sopenharmony_ci							 sched);
24362306a36Sopenharmony_ci		INIT_LIST_HEAD(&dep->dfs_link);
24462306a36Sopenharmony_ci
24562306a36Sopenharmony_ci		node = dep->signaler;
24662306a36Sopenharmony_ci		sched_engine = lock_sched_engine(node, sched_engine, &cache);
24762306a36Sopenharmony_ci		lockdep_assert_held(&sched_engine->lock);
24862306a36Sopenharmony_ci
24962306a36Sopenharmony_ci		/* Recheck after acquiring the engine->timeline.lock */
25062306a36Sopenharmony_ci		if (prio <= node->attr.priority || node_signaled(node))
25162306a36Sopenharmony_ci			continue;
25262306a36Sopenharmony_ci
25362306a36Sopenharmony_ci		GEM_BUG_ON(node_to_request(node)->engine->sched_engine !=
25462306a36Sopenharmony_ci			   sched_engine);
25562306a36Sopenharmony_ci
25662306a36Sopenharmony_ci		/* Must be called before changing the nodes priority */
25762306a36Sopenharmony_ci		if (sched_engine->bump_inflight_request_prio)
25862306a36Sopenharmony_ci			sched_engine->bump_inflight_request_prio(from, prio);
25962306a36Sopenharmony_ci
26062306a36Sopenharmony_ci		WRITE_ONCE(node->attr.priority, prio);
26162306a36Sopenharmony_ci
26262306a36Sopenharmony_ci		/*
26362306a36Sopenharmony_ci		 * Once the request is ready, it will be placed into the
26462306a36Sopenharmony_ci		 * priority lists and then onto the HW runlist. Before the
26562306a36Sopenharmony_ci		 * request is ready, it does not contribute to our preemption
26662306a36Sopenharmony_ci		 * decisions and we can safely ignore it, as it will, and
26762306a36Sopenharmony_ci		 * any preemption required, be dealt with upon submission.
26862306a36Sopenharmony_ci		 * See engine->submit_request()
26962306a36Sopenharmony_ci		 */
27062306a36Sopenharmony_ci		if (list_empty(&node->link))
27162306a36Sopenharmony_ci			continue;
27262306a36Sopenharmony_ci
27362306a36Sopenharmony_ci		if (i915_request_in_priority_queue(node_to_request(node))) {
27462306a36Sopenharmony_ci			if (!cache.priolist)
27562306a36Sopenharmony_ci				cache.priolist =
27662306a36Sopenharmony_ci					i915_sched_lookup_priolist(sched_engine,
27762306a36Sopenharmony_ci								   prio);
27862306a36Sopenharmony_ci			list_move_tail(&node->link, cache.priolist);
27962306a36Sopenharmony_ci		}
28062306a36Sopenharmony_ci
28162306a36Sopenharmony_ci		/* Defer (tasklet) submission until after all of our updates. */
28262306a36Sopenharmony_ci		if (sched_engine->kick_backend)
28362306a36Sopenharmony_ci			sched_engine->kick_backend(node_to_request(node), prio);
28462306a36Sopenharmony_ci	}
28562306a36Sopenharmony_ci
28662306a36Sopenharmony_ci	spin_unlock(&sched_engine->lock);
28762306a36Sopenharmony_ci}
28862306a36Sopenharmony_ci
28962306a36Sopenharmony_civoid i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
29062306a36Sopenharmony_ci{
29162306a36Sopenharmony_ci	spin_lock_irq(&schedule_lock);
29262306a36Sopenharmony_ci	__i915_schedule(&rq->sched, attr);
29362306a36Sopenharmony_ci	spin_unlock_irq(&schedule_lock);
29462306a36Sopenharmony_ci}
29562306a36Sopenharmony_ci
29662306a36Sopenharmony_civoid i915_sched_node_init(struct i915_sched_node *node)
29762306a36Sopenharmony_ci{
29862306a36Sopenharmony_ci	INIT_LIST_HEAD(&node->signalers_list);
29962306a36Sopenharmony_ci	INIT_LIST_HEAD(&node->waiters_list);
30062306a36Sopenharmony_ci	INIT_LIST_HEAD(&node->link);
30162306a36Sopenharmony_ci
30262306a36Sopenharmony_ci	i915_sched_node_reinit(node);
30362306a36Sopenharmony_ci}
30462306a36Sopenharmony_ci
30562306a36Sopenharmony_civoid i915_sched_node_reinit(struct i915_sched_node *node)
30662306a36Sopenharmony_ci{
30762306a36Sopenharmony_ci	node->attr.priority = I915_PRIORITY_INVALID;
30862306a36Sopenharmony_ci	node->semaphores = 0;
30962306a36Sopenharmony_ci	node->flags = 0;
31062306a36Sopenharmony_ci
31162306a36Sopenharmony_ci	GEM_BUG_ON(!list_empty(&node->signalers_list));
31262306a36Sopenharmony_ci	GEM_BUG_ON(!list_empty(&node->waiters_list));
31362306a36Sopenharmony_ci	GEM_BUG_ON(!list_empty(&node->link));
31462306a36Sopenharmony_ci}
31562306a36Sopenharmony_ci
31662306a36Sopenharmony_cistatic struct i915_dependency *
31762306a36Sopenharmony_cii915_dependency_alloc(void)
31862306a36Sopenharmony_ci{
31962306a36Sopenharmony_ci	return kmem_cache_alloc(slab_dependencies, GFP_KERNEL);
32062306a36Sopenharmony_ci}
32162306a36Sopenharmony_ci
32262306a36Sopenharmony_cistatic void
32362306a36Sopenharmony_cii915_dependency_free(struct i915_dependency *dep)
32462306a36Sopenharmony_ci{
32562306a36Sopenharmony_ci	kmem_cache_free(slab_dependencies, dep);
32662306a36Sopenharmony_ci}
32762306a36Sopenharmony_ci
32862306a36Sopenharmony_cibool __i915_sched_node_add_dependency(struct i915_sched_node *node,
32962306a36Sopenharmony_ci				      struct i915_sched_node *signal,
33062306a36Sopenharmony_ci				      struct i915_dependency *dep,
33162306a36Sopenharmony_ci				      unsigned long flags)
33262306a36Sopenharmony_ci{
33362306a36Sopenharmony_ci	bool ret = false;
33462306a36Sopenharmony_ci
33562306a36Sopenharmony_ci	spin_lock_irq(&schedule_lock);
33662306a36Sopenharmony_ci
33762306a36Sopenharmony_ci	if (!node_signaled(signal)) {
33862306a36Sopenharmony_ci		INIT_LIST_HEAD(&dep->dfs_link);
33962306a36Sopenharmony_ci		dep->signaler = signal;
34062306a36Sopenharmony_ci		dep->waiter = node;
34162306a36Sopenharmony_ci		dep->flags = flags;
34262306a36Sopenharmony_ci
34362306a36Sopenharmony_ci		/* All set, now publish. Beware the lockless walkers. */
34462306a36Sopenharmony_ci		list_add_rcu(&dep->signal_link, &node->signalers_list);
34562306a36Sopenharmony_ci		list_add_rcu(&dep->wait_link, &signal->waiters_list);
34662306a36Sopenharmony_ci
34762306a36Sopenharmony_ci		/* Propagate the chains */
34862306a36Sopenharmony_ci		node->flags |= signal->flags;
34962306a36Sopenharmony_ci		ret = true;
35062306a36Sopenharmony_ci	}
35162306a36Sopenharmony_ci
35262306a36Sopenharmony_ci	spin_unlock_irq(&schedule_lock);
35362306a36Sopenharmony_ci
35462306a36Sopenharmony_ci	return ret;
35562306a36Sopenharmony_ci}
35662306a36Sopenharmony_ci
35762306a36Sopenharmony_ciint i915_sched_node_add_dependency(struct i915_sched_node *node,
35862306a36Sopenharmony_ci				   struct i915_sched_node *signal,
35962306a36Sopenharmony_ci				   unsigned long flags)
36062306a36Sopenharmony_ci{
36162306a36Sopenharmony_ci	struct i915_dependency *dep;
36262306a36Sopenharmony_ci
36362306a36Sopenharmony_ci	dep = i915_dependency_alloc();
36462306a36Sopenharmony_ci	if (!dep)
36562306a36Sopenharmony_ci		return -ENOMEM;
36662306a36Sopenharmony_ci
36762306a36Sopenharmony_ci	if (!__i915_sched_node_add_dependency(node, signal, dep,
36862306a36Sopenharmony_ci					      flags | I915_DEPENDENCY_ALLOC))
36962306a36Sopenharmony_ci		i915_dependency_free(dep);
37062306a36Sopenharmony_ci
37162306a36Sopenharmony_ci	return 0;
37262306a36Sopenharmony_ci}
37362306a36Sopenharmony_ci
37462306a36Sopenharmony_civoid i915_sched_node_fini(struct i915_sched_node *node)
37562306a36Sopenharmony_ci{
37662306a36Sopenharmony_ci	struct i915_dependency *dep, *tmp;
37762306a36Sopenharmony_ci
37862306a36Sopenharmony_ci	spin_lock_irq(&schedule_lock);
37962306a36Sopenharmony_ci
38062306a36Sopenharmony_ci	/*
38162306a36Sopenharmony_ci	 * Everyone we depended upon (the fences we wait to be signaled)
38262306a36Sopenharmony_ci	 * should retire before us and remove themselves from our list.
38362306a36Sopenharmony_ci	 * However, retirement is run independently on each timeline and
38462306a36Sopenharmony_ci	 * so we may be called out-of-order.
38562306a36Sopenharmony_ci	 */
38662306a36Sopenharmony_ci	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
38762306a36Sopenharmony_ci		GEM_BUG_ON(!list_empty(&dep->dfs_link));
38862306a36Sopenharmony_ci
38962306a36Sopenharmony_ci		list_del_rcu(&dep->wait_link);
39062306a36Sopenharmony_ci		if (dep->flags & I915_DEPENDENCY_ALLOC)
39162306a36Sopenharmony_ci			i915_dependency_free(dep);
39262306a36Sopenharmony_ci	}
39362306a36Sopenharmony_ci	INIT_LIST_HEAD(&node->signalers_list);
39462306a36Sopenharmony_ci
39562306a36Sopenharmony_ci	/* Remove ourselves from everyone who depends upon us */
39662306a36Sopenharmony_ci	list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
39762306a36Sopenharmony_ci		GEM_BUG_ON(dep->signaler != node);
39862306a36Sopenharmony_ci		GEM_BUG_ON(!list_empty(&dep->dfs_link));
39962306a36Sopenharmony_ci
40062306a36Sopenharmony_ci		list_del_rcu(&dep->signal_link);
40162306a36Sopenharmony_ci		if (dep->flags & I915_DEPENDENCY_ALLOC)
40262306a36Sopenharmony_ci			i915_dependency_free(dep);
40362306a36Sopenharmony_ci	}
40462306a36Sopenharmony_ci	INIT_LIST_HEAD(&node->waiters_list);
40562306a36Sopenharmony_ci
40662306a36Sopenharmony_ci	spin_unlock_irq(&schedule_lock);
40762306a36Sopenharmony_ci}
40862306a36Sopenharmony_ci
40962306a36Sopenharmony_civoid i915_request_show_with_schedule(struct drm_printer *m,
41062306a36Sopenharmony_ci				     const struct i915_request *rq,
41162306a36Sopenharmony_ci				     const char *prefix,
41262306a36Sopenharmony_ci				     int indent)
41362306a36Sopenharmony_ci{
41462306a36Sopenharmony_ci	struct i915_dependency *dep;
41562306a36Sopenharmony_ci
41662306a36Sopenharmony_ci	i915_request_show(m, rq, prefix, indent);
41762306a36Sopenharmony_ci	if (i915_request_completed(rq))
41862306a36Sopenharmony_ci		return;
41962306a36Sopenharmony_ci
42062306a36Sopenharmony_ci	rcu_read_lock();
42162306a36Sopenharmony_ci	for_each_signaler(dep, rq) {
42262306a36Sopenharmony_ci		const struct i915_request *signaler =
42362306a36Sopenharmony_ci			node_to_request(dep->signaler);
42462306a36Sopenharmony_ci
42562306a36Sopenharmony_ci		/* Dependencies along the same timeline are expected. */
42662306a36Sopenharmony_ci		if (signaler->timeline == rq->timeline)
42762306a36Sopenharmony_ci			continue;
42862306a36Sopenharmony_ci
42962306a36Sopenharmony_ci		if (__i915_request_is_complete(signaler))
43062306a36Sopenharmony_ci			continue;
43162306a36Sopenharmony_ci
43262306a36Sopenharmony_ci		i915_request_show(m, signaler, prefix, indent + 2);
43362306a36Sopenharmony_ci	}
43462306a36Sopenharmony_ci	rcu_read_unlock();
43562306a36Sopenharmony_ci}
43662306a36Sopenharmony_ci
43762306a36Sopenharmony_cistatic void default_destroy(struct kref *kref)
43862306a36Sopenharmony_ci{
43962306a36Sopenharmony_ci	struct i915_sched_engine *sched_engine =
44062306a36Sopenharmony_ci		container_of(kref, typeof(*sched_engine), ref);
44162306a36Sopenharmony_ci
44262306a36Sopenharmony_ci	tasklet_kill(&sched_engine->tasklet); /* flush the callback */
44362306a36Sopenharmony_ci	kfree(sched_engine);
44462306a36Sopenharmony_ci}
44562306a36Sopenharmony_ci
44662306a36Sopenharmony_cistatic bool default_disabled(struct i915_sched_engine *sched_engine)
44762306a36Sopenharmony_ci{
44862306a36Sopenharmony_ci	return false;
44962306a36Sopenharmony_ci}
45062306a36Sopenharmony_ci
45162306a36Sopenharmony_cistruct i915_sched_engine *
45262306a36Sopenharmony_cii915_sched_engine_create(unsigned int subclass)
45362306a36Sopenharmony_ci{
45462306a36Sopenharmony_ci	struct i915_sched_engine *sched_engine;
45562306a36Sopenharmony_ci
45662306a36Sopenharmony_ci	sched_engine = kzalloc(sizeof(*sched_engine), GFP_KERNEL);
45762306a36Sopenharmony_ci	if (!sched_engine)
45862306a36Sopenharmony_ci		return NULL;
45962306a36Sopenharmony_ci
46062306a36Sopenharmony_ci	kref_init(&sched_engine->ref);
46162306a36Sopenharmony_ci
46262306a36Sopenharmony_ci	sched_engine->queue = RB_ROOT_CACHED;
46362306a36Sopenharmony_ci	sched_engine->queue_priority_hint = INT_MIN;
46462306a36Sopenharmony_ci	sched_engine->destroy = default_destroy;
46562306a36Sopenharmony_ci	sched_engine->disabled = default_disabled;
46662306a36Sopenharmony_ci
46762306a36Sopenharmony_ci	INIT_LIST_HEAD(&sched_engine->requests);
46862306a36Sopenharmony_ci	INIT_LIST_HEAD(&sched_engine->hold);
46962306a36Sopenharmony_ci
47062306a36Sopenharmony_ci	spin_lock_init(&sched_engine->lock);
47162306a36Sopenharmony_ci	lockdep_set_subclass(&sched_engine->lock, subclass);
47262306a36Sopenharmony_ci
47362306a36Sopenharmony_ci	/*
47462306a36Sopenharmony_ci	 * Due to an interesting quirk in lockdep's internal debug tracking,
47562306a36Sopenharmony_ci	 * after setting a subclass we must ensure the lock is used. Otherwise,
47662306a36Sopenharmony_ci	 * nr_unused_locks is incremented once too often.
47762306a36Sopenharmony_ci	 */
47862306a36Sopenharmony_ci#ifdef CONFIG_DEBUG_LOCK_ALLOC
47962306a36Sopenharmony_ci	local_irq_disable();
48062306a36Sopenharmony_ci	lock_map_acquire(&sched_engine->lock.dep_map);
48162306a36Sopenharmony_ci	lock_map_release(&sched_engine->lock.dep_map);
48262306a36Sopenharmony_ci	local_irq_enable();
48362306a36Sopenharmony_ci#endif
48462306a36Sopenharmony_ci
48562306a36Sopenharmony_ci	return sched_engine;
48662306a36Sopenharmony_ci}
48762306a36Sopenharmony_ci
48862306a36Sopenharmony_civoid i915_scheduler_module_exit(void)
48962306a36Sopenharmony_ci{
49062306a36Sopenharmony_ci	kmem_cache_destroy(slab_dependencies);
49162306a36Sopenharmony_ci	kmem_cache_destroy(slab_priorities);
49262306a36Sopenharmony_ci}
49362306a36Sopenharmony_ci
49462306a36Sopenharmony_ciint __init i915_scheduler_module_init(void)
49562306a36Sopenharmony_ci{
49662306a36Sopenharmony_ci	slab_dependencies = KMEM_CACHE(i915_dependency,
49762306a36Sopenharmony_ci					      SLAB_HWCACHE_ALIGN |
49862306a36Sopenharmony_ci					      SLAB_TYPESAFE_BY_RCU);
49962306a36Sopenharmony_ci	if (!slab_dependencies)
50062306a36Sopenharmony_ci		return -ENOMEM;
50162306a36Sopenharmony_ci
50262306a36Sopenharmony_ci	slab_priorities = KMEM_CACHE(i915_priolist, 0);
50362306a36Sopenharmony_ci	if (!slab_priorities)
50462306a36Sopenharmony_ci		goto err_priorities;
50562306a36Sopenharmony_ci
50662306a36Sopenharmony_ci	return 0;
50762306a36Sopenharmony_ci
50862306a36Sopenharmony_cierr_priorities:
50962306a36Sopenharmony_ci	kmem_cache_destroy(slab_priorities);
51062306a36Sopenharmony_ci	return -ENOMEM;
51162306a36Sopenharmony_ci}
512