1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24/**
25 * DOC: Overview
26 *
27 * The GPU scheduler provides entities which allow userspace to push jobs
28 * into software queues which are then scheduled on a hardware run queue.
29 * The software queues have a priority among them. The scheduler selects the entities
30 * from the run queue using a FIFO. The scheduler provides dependency handling
31 * features among jobs. The driver is supposed to provide callback functions for
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
34 *
35 * The organisation of the scheduler is the following:
36 *
37 * 1. Each hw run queue has one scheduler
38 * 2. Each scheduler has multiple run queues with different priorities
39 *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40 * 3. Each scheduler run queue has a queue of entities to schedule
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42 *    the hardware.
43 *
44 * The jobs in a entity are always scheduled in the order that they were pushed.
45 *
46 * Note that once a job was taken from the entities queue and pushed to the
47 * hardware, i.e. the pending queue, the entity must not be referenced anymore
48 * through the jobs entity pointer.
49 */
50
51#include <linux/kthread.h>
52#include <linux/wait.h>
53#include <linux/sched.h>
54#include <linux/completion.h>
55#include <linux/dma-resv.h>
56#include <uapi/linux/sched/types.h>
57
58#include <drm/drm_print.h>
59#include <drm/drm_gem.h>
60#include <drm/drm_syncobj.h>
61#include <drm/gpu_scheduler.h>
62#include <drm/spsc_queue.h>
63
64#define CREATE_TRACE_POINTS
65#include "gpu_scheduler_trace.h"
66
67#define to_drm_sched_job(sched_job)		\
68		container_of((sched_job), struct drm_sched_job, queue_node)
69
70int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
71
72/**
73 * DOC: sched_policy (int)
74 * Used to override default entities scheduling policy in a run queue.
75 */
76MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
77module_param_named(sched_policy, drm_sched_policy, int, 0444);
78
79static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
80							    const struct rb_node *b)
81{
82	struct drm_sched_entity *ent_a =  rb_entry((a), struct drm_sched_entity, rb_tree_node);
83	struct drm_sched_entity *ent_b =  rb_entry((b), struct drm_sched_entity, rb_tree_node);
84
85	return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
86}
87
88static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
89{
90	struct drm_sched_rq *rq = entity->rq;
91
92	if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
93		rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
94		RB_CLEAR_NODE(&entity->rb_tree_node);
95	}
96}
97
98void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
99{
100	/*
101	 * Both locks need to be grabbed, one to protect from entity->rq change
102	 * for entity from within concurrent drm_sched_entity_select_rq and the
103	 * other to update the rb tree structure.
104	 */
105	spin_lock(&entity->rq_lock);
106	spin_lock(&entity->rq->lock);
107
108	drm_sched_rq_remove_fifo_locked(entity);
109
110	entity->oldest_job_waiting = ts;
111
112	rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
113		      drm_sched_entity_compare_before);
114
115	spin_unlock(&entity->rq->lock);
116	spin_unlock(&entity->rq_lock);
117}
118
119/**
120 * drm_sched_rq_init - initialize a given run queue struct
121 *
122 * @sched: scheduler instance to associate with this run queue
123 * @rq: scheduler run queue
124 *
125 * Initializes a scheduler runqueue.
126 */
127static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
128			      struct drm_sched_rq *rq)
129{
130	spin_lock_init(&rq->lock);
131	INIT_LIST_HEAD(&rq->entities);
132	rq->rb_tree_root = RB_ROOT_CACHED;
133	rq->current_entity = NULL;
134	rq->sched = sched;
135}
136
137/**
138 * drm_sched_rq_add_entity - add an entity
139 *
140 * @rq: scheduler run queue
141 * @entity: scheduler entity
142 *
143 * Adds a scheduler entity to the run queue.
144 */
145void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
146			     struct drm_sched_entity *entity)
147{
148	if (!list_empty(&entity->list))
149		return;
150
151	spin_lock(&rq->lock);
152
153	atomic_inc(rq->sched->score);
154	list_add_tail(&entity->list, &rq->entities);
155
156	spin_unlock(&rq->lock);
157}
158
159/**
160 * drm_sched_rq_remove_entity - remove an entity
161 *
162 * @rq: scheduler run queue
163 * @entity: scheduler entity
164 *
165 * Removes a scheduler entity from the run queue.
166 */
167void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
168				struct drm_sched_entity *entity)
169{
170	if (list_empty(&entity->list))
171		return;
172
173	spin_lock(&rq->lock);
174
175	atomic_dec(rq->sched->score);
176	list_del_init(&entity->list);
177
178	if (rq->current_entity == entity)
179		rq->current_entity = NULL;
180
181	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
182		drm_sched_rq_remove_fifo_locked(entity);
183
184	spin_unlock(&rq->lock);
185}
186
187/**
188 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
189 *
190 * @rq: scheduler run queue to check.
191 *
192 * Try to find a ready entity, returns NULL if none found.
193 */
194static struct drm_sched_entity *
195drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
196{
197	struct drm_sched_entity *entity;
198
199	spin_lock(&rq->lock);
200
201	entity = rq->current_entity;
202	if (entity) {
203		list_for_each_entry_continue(entity, &rq->entities, list) {
204			if (drm_sched_entity_is_ready(entity)) {
205				rq->current_entity = entity;
206				reinit_completion(&entity->entity_idle);
207				spin_unlock(&rq->lock);
208				return entity;
209			}
210		}
211	}
212
213	list_for_each_entry(entity, &rq->entities, list) {
214
215		if (drm_sched_entity_is_ready(entity)) {
216			rq->current_entity = entity;
217			reinit_completion(&entity->entity_idle);
218			spin_unlock(&rq->lock);
219			return entity;
220		}
221
222		if (entity == rq->current_entity)
223			break;
224	}
225
226	spin_unlock(&rq->lock);
227
228	return NULL;
229}
230
231/**
232 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
233 *
234 * @rq: scheduler run queue to check.
235 *
236 * Find oldest waiting ready entity, returns NULL if none found.
237 */
238static struct drm_sched_entity *
239drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
240{
241	struct rb_node *rb;
242
243	spin_lock(&rq->lock);
244	for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
245		struct drm_sched_entity *entity;
246
247		entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
248		if (drm_sched_entity_is_ready(entity)) {
249			rq->current_entity = entity;
250			reinit_completion(&entity->entity_idle);
251			break;
252		}
253	}
254	spin_unlock(&rq->lock);
255
256	return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
257}
258
259/**
260 * drm_sched_job_done - complete a job
261 * @s_job: pointer to the job which is done
262 *
263 * Finish the job's fence and wake up the worker thread.
264 */
265static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
266{
267	struct drm_sched_fence *s_fence = s_job->s_fence;
268	struct drm_gpu_scheduler *sched = s_fence->sched;
269
270	atomic_dec(&sched->hw_rq_count);
271	atomic_dec(sched->score);
272
273	trace_drm_sched_process_job(s_fence);
274
275	dma_fence_get(&s_fence->finished);
276	drm_sched_fence_finished(s_fence, result);
277	dma_fence_put(&s_fence->finished);
278	wake_up_interruptible(&sched->wake_up_worker);
279}
280
281/**
282 * drm_sched_job_done_cb - the callback for a done job
283 * @f: fence
284 * @cb: fence callbacks
285 */
286static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
287{
288	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
289
290	drm_sched_job_done(s_job, f->error);
291}
292
293/**
294 * drm_sched_start_timeout - start timeout for reset worker
295 *
296 * @sched: scheduler instance to start the worker for
297 *
298 * Start the timeout for the given scheduler.
299 */
300static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
301{
302	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
303	    !list_empty(&sched->pending_list))
304		queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
305}
306
307/**
308 * drm_sched_fault - immediately start timeout handler
309 *
310 * @sched: scheduler where the timeout handling should be started.
311 *
312 * Start timeout handling immediately when the driver detects a hardware fault.
313 */
314void drm_sched_fault(struct drm_gpu_scheduler *sched)
315{
316	if (sched->timeout_wq)
317		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
318}
319EXPORT_SYMBOL(drm_sched_fault);
320
321/**
322 * drm_sched_suspend_timeout - Suspend scheduler job timeout
323 *
324 * @sched: scheduler instance for which to suspend the timeout
325 *
326 * Suspend the delayed work timeout for the scheduler. This is done by
327 * modifying the delayed work timeout to an arbitrary large value,
328 * MAX_SCHEDULE_TIMEOUT in this case.
329 *
330 * Returns the timeout remaining
331 *
332 */
333unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
334{
335	unsigned long sched_timeout, now = jiffies;
336
337	sched_timeout = sched->work_tdr.timer.expires;
338
339	/*
340	 * Modify the timeout to an arbitrarily large value. This also prevents
341	 * the timeout to be restarted when new submissions arrive
342	 */
343	if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
344			&& time_after(sched_timeout, now))
345		return sched_timeout - now;
346	else
347		return sched->timeout;
348}
349EXPORT_SYMBOL(drm_sched_suspend_timeout);
350
351/**
352 * drm_sched_resume_timeout - Resume scheduler job timeout
353 *
354 * @sched: scheduler instance for which to resume the timeout
355 * @remaining: remaining timeout
356 *
357 * Resume the delayed work timeout for the scheduler.
358 */
359void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
360		unsigned long remaining)
361{
362	spin_lock(&sched->job_list_lock);
363
364	if (list_empty(&sched->pending_list))
365		cancel_delayed_work(&sched->work_tdr);
366	else
367		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
368
369	spin_unlock(&sched->job_list_lock);
370}
371EXPORT_SYMBOL(drm_sched_resume_timeout);
372
373static void drm_sched_job_begin(struct drm_sched_job *s_job)
374{
375	struct drm_gpu_scheduler *sched = s_job->sched;
376
377	spin_lock(&sched->job_list_lock);
378	list_add_tail(&s_job->list, &sched->pending_list);
379	drm_sched_start_timeout(sched);
380	spin_unlock(&sched->job_list_lock);
381}
382
383static void drm_sched_job_timedout(struct work_struct *work)
384{
385	struct drm_gpu_scheduler *sched;
386	struct drm_sched_job *job;
387	enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
388
389	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
390
391	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
392	spin_lock(&sched->job_list_lock);
393	job = list_first_entry_or_null(&sched->pending_list,
394				       struct drm_sched_job, list);
395
396	if (job) {
397		/*
398		 * Remove the bad job so it cannot be freed by concurrent
399		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
400		 * is parked at which point it's safe.
401		 */
402		list_del_init(&job->list);
403		spin_unlock(&sched->job_list_lock);
404
405		status = job->sched->ops->timedout_job(job);
406
407		/*
408		 * Guilty job did complete and hence needs to be manually removed
409		 * See drm_sched_stop doc.
410		 */
411		if (sched->free_guilty) {
412			job->sched->ops->free_job(job);
413			sched->free_guilty = false;
414		}
415	} else {
416		spin_unlock(&sched->job_list_lock);
417	}
418
419	if (status != DRM_GPU_SCHED_STAT_ENODEV) {
420		spin_lock(&sched->job_list_lock);
421		drm_sched_start_timeout(sched);
422		spin_unlock(&sched->job_list_lock);
423	}
424}
425
426/**
427 * drm_sched_stop - stop the scheduler
428 *
429 * @sched: scheduler instance
430 * @bad: job which caused the time out
431 *
432 * Stop the scheduler and also removes and frees all completed jobs.
433 * Note: bad job will not be freed as it might be used later and so it's
434 * callers responsibility to release it manually if it's not part of the
435 * pending list any more.
436 *
437 */
438void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
439{
440	struct drm_sched_job *s_job, *tmp;
441
442	kthread_park(sched->thread);
443
444	/*
445	 * Reinsert back the bad job here - now it's safe as
446	 * drm_sched_get_cleanup_job cannot race against us and release the
447	 * bad job at this point - we parked (waited for) any in progress
448	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
449	 * now until the scheduler thread is unparked.
450	 */
451	if (bad && bad->sched == sched)
452		/*
453		 * Add at the head of the queue to reflect it was the earliest
454		 * job extracted.
455		 */
456		list_add(&bad->list, &sched->pending_list);
457
458	/*
459	 * Iterate the job list from later to  earlier one and either deactive
460	 * their HW callbacks or remove them from pending list if they already
461	 * signaled.
462	 * This iteration is thread safe as sched thread is stopped.
463	 */
464	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
465					 list) {
466		if (s_job->s_fence->parent &&
467		    dma_fence_remove_callback(s_job->s_fence->parent,
468					      &s_job->cb)) {
469			dma_fence_put(s_job->s_fence->parent);
470			s_job->s_fence->parent = NULL;
471			atomic_dec(&sched->hw_rq_count);
472		} else {
473			/*
474			 * remove job from pending_list.
475			 * Locking here is for concurrent resume timeout
476			 */
477			spin_lock(&sched->job_list_lock);
478			list_del_init(&s_job->list);
479			spin_unlock(&sched->job_list_lock);
480
481			/*
482			 * Wait for job's HW fence callback to finish using s_job
483			 * before releasing it.
484			 *
485			 * Job is still alive so fence refcount at least 1
486			 */
487			dma_fence_wait(&s_job->s_fence->finished, false);
488
489			/*
490			 * We must keep bad job alive for later use during
491			 * recovery by some of the drivers but leave a hint
492			 * that the guilty job must be released.
493			 */
494			if (bad != s_job)
495				sched->ops->free_job(s_job);
496			else
497				sched->free_guilty = true;
498		}
499	}
500
501	/*
502	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
503	 * avoids the pending timeout work in progress to fire right away after
504	 * this TDR finished and before the newly restarted jobs had a
505	 * chance to complete.
506	 */
507	cancel_delayed_work(&sched->work_tdr);
508}
509
510EXPORT_SYMBOL(drm_sched_stop);
511
512/**
513 * drm_sched_start - recover jobs after a reset
514 *
515 * @sched: scheduler instance
516 * @full_recovery: proceed with complete sched restart
517 *
518 */
519void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
520{
521	struct drm_sched_job *s_job, *tmp;
522	int r;
523
524	/*
525	 * Locking the list is not required here as the sched thread is parked
526	 * so no new jobs are being inserted or removed. Also concurrent
527	 * GPU recovers can't run in parallel.
528	 */
529	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
530		struct dma_fence *fence = s_job->s_fence->parent;
531
532		atomic_inc(&sched->hw_rq_count);
533
534		if (!full_recovery)
535			continue;
536
537		if (fence) {
538			r = dma_fence_add_callback(fence, &s_job->cb,
539						   drm_sched_job_done_cb);
540			if (r == -ENOENT)
541				drm_sched_job_done(s_job, fence->error);
542			else if (r)
543				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
544					  r);
545		} else
546			drm_sched_job_done(s_job, -ECANCELED);
547	}
548
549	if (full_recovery) {
550		spin_lock(&sched->job_list_lock);
551		drm_sched_start_timeout(sched);
552		spin_unlock(&sched->job_list_lock);
553	}
554
555	kthread_unpark(sched->thread);
556}
557EXPORT_SYMBOL(drm_sched_start);
558
559/**
560 * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
561 *
562 * @sched: scheduler instance
563 *
564 * Re-submitting jobs was a concept AMD came up as cheap way to implement
565 * recovery after a job timeout.
566 *
567 * This turned out to be not working very well. First of all there are many
568 * problem with the dma_fence implementation and requirements. Either the
569 * implementation is risking deadlocks with core memory management or violating
570 * documented implementation details of the dma_fence object.
571 *
572 * Drivers can still save and restore their state for recovery operations, but
573 * we shouldn't make this a general scheduler feature around the dma_fence
574 * interface.
575 */
576void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
577{
578	struct drm_sched_job *s_job, *tmp;
579	uint64_t guilty_context;
580	bool found_guilty = false;
581	struct dma_fence *fence;
582
583	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
584		struct drm_sched_fence *s_fence = s_job->s_fence;
585
586		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
587			found_guilty = true;
588			guilty_context = s_job->s_fence->scheduled.context;
589		}
590
591		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
592			dma_fence_set_error(&s_fence->finished, -ECANCELED);
593
594		fence = sched->ops->run_job(s_job);
595
596		if (IS_ERR_OR_NULL(fence)) {
597			if (IS_ERR(fence))
598				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
599
600			s_job->s_fence->parent = NULL;
601		} else {
602
603			s_job->s_fence->parent = dma_fence_get(fence);
604
605			/* Drop for orignal kref_init */
606			dma_fence_put(fence);
607		}
608	}
609}
610EXPORT_SYMBOL(drm_sched_resubmit_jobs);
611
612/**
613 * drm_sched_job_init - init a scheduler job
614 * @job: scheduler job to init
615 * @entity: scheduler entity to use
616 * @owner: job owner for debugging
617 *
618 * Refer to drm_sched_entity_push_job() documentation
619 * for locking considerations.
620 *
621 * Drivers must make sure drm_sched_job_cleanup() if this function returns
622 * successfully, even when @job is aborted before drm_sched_job_arm() is called.
623 *
624 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
625 * has died, which can mean that there's no valid runqueue for a @entity.
626 * This function returns -ENOENT in this case (which probably should be -EIO as
627 * a more meanigful return value).
628 *
629 * Returns 0 for success, negative error code otherwise.
630 */
631int drm_sched_job_init(struct drm_sched_job *job,
632		       struct drm_sched_entity *entity,
633		       void *owner)
634{
635	if (!entity->rq)
636		return -ENOENT;
637
638	job->entity = entity;
639	job->s_fence = drm_sched_fence_alloc(entity, owner);
640	if (!job->s_fence)
641		return -ENOMEM;
642
643	INIT_LIST_HEAD(&job->list);
644
645	xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
646
647	return 0;
648}
649EXPORT_SYMBOL(drm_sched_job_init);
650
651/**
652 * drm_sched_job_arm - arm a scheduler job for execution
653 * @job: scheduler job to arm
654 *
655 * This arms a scheduler job for execution. Specifically it initializes the
656 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
657 * or other places that need to track the completion of this job.
658 *
659 * Refer to drm_sched_entity_push_job() documentation for locking
660 * considerations.
661 *
662 * This can only be called if drm_sched_job_init() succeeded.
663 */
664void drm_sched_job_arm(struct drm_sched_job *job)
665{
666	struct drm_gpu_scheduler *sched;
667	struct drm_sched_entity *entity = job->entity;
668
669	BUG_ON(!entity);
670	drm_sched_entity_select_rq(entity);
671	sched = entity->rq->sched;
672
673	job->sched = sched;
674	job->s_priority = entity->rq - sched->sched_rq;
675	job->id = atomic64_inc_return(&sched->job_id_count);
676
677	drm_sched_fence_init(job->s_fence, job->entity);
678}
679EXPORT_SYMBOL(drm_sched_job_arm);
680
681/**
682 * drm_sched_job_add_dependency - adds the fence as a job dependency
683 * @job: scheduler job to add the dependencies to
684 * @fence: the dma_fence to add to the list of dependencies.
685 *
686 * Note that @fence is consumed in both the success and error cases.
687 *
688 * Returns:
689 * 0 on success, or an error on failing to expand the array.
690 */
691int drm_sched_job_add_dependency(struct drm_sched_job *job,
692				 struct dma_fence *fence)
693{
694	struct dma_fence *entry;
695	unsigned long index;
696	u32 id = 0;
697	int ret;
698
699	if (!fence)
700		return 0;
701
702	/* Deduplicate if we already depend on a fence from the same context.
703	 * This lets the size of the array of deps scale with the number of
704	 * engines involved, rather than the number of BOs.
705	 */
706	xa_for_each(&job->dependencies, index, entry) {
707		if (entry->context != fence->context)
708			continue;
709
710		if (dma_fence_is_later(fence, entry)) {
711			dma_fence_put(entry);
712			xa_store(&job->dependencies, index, fence, GFP_KERNEL);
713		} else {
714			dma_fence_put(fence);
715		}
716		return 0;
717	}
718
719	ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
720	if (ret != 0)
721		dma_fence_put(fence);
722
723	return ret;
724}
725EXPORT_SYMBOL(drm_sched_job_add_dependency);
726
727/**
728 * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
729 * @job: scheduler job to add the dependencies to
730 * @file: drm file private pointer
731 * @handle: syncobj handle to lookup
732 * @point: timeline point
733 *
734 * This adds the fence matching the given syncobj to @job.
735 *
736 * Returns:
737 * 0 on success, or an error on failing to expand the array.
738 */
739int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
740					 struct drm_file *file,
741					 u32 handle,
742					 u32 point)
743{
744	struct dma_fence *fence;
745	int ret;
746
747	ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
748	if (ret)
749		return ret;
750
751	return drm_sched_job_add_dependency(job, fence);
752}
753EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
754
755/**
756 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
757 * @job: scheduler job to add the dependencies to
758 * @resv: the dma_resv object to get the fences from
759 * @usage: the dma_resv_usage to use to filter the fences
760 *
761 * This adds all fences matching the given usage from @resv to @job.
762 * Must be called with the @resv lock held.
763 *
764 * Returns:
765 * 0 on success, or an error on failing to expand the array.
766 */
767int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
768					struct dma_resv *resv,
769					enum dma_resv_usage usage)
770{
771	struct dma_resv_iter cursor;
772	struct dma_fence *fence;
773	int ret;
774
775	dma_resv_assert_held(resv);
776
777	dma_resv_for_each_fence(&cursor, resv, usage, fence) {
778		/* Make sure to grab an additional ref on the added fence */
779		dma_fence_get(fence);
780		ret = drm_sched_job_add_dependency(job, fence);
781		if (ret) {
782			dma_fence_put(fence);
783			return ret;
784		}
785	}
786	return 0;
787}
788EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
789
790/**
791 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
792 *   dependencies
793 * @job: scheduler job to add the dependencies to
794 * @obj: the gem object to add new dependencies from.
795 * @write: whether the job might write the object (so we need to depend on
796 * shared fences in the reservation object).
797 *
798 * This should be called after drm_gem_lock_reservations() on your array of
799 * GEM objects used in the job but before updating the reservations with your
800 * own fences.
801 *
802 * Returns:
803 * 0 on success, or an error on failing to expand the array.
804 */
805int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
806					    struct drm_gem_object *obj,
807					    bool write)
808{
809	return drm_sched_job_add_resv_dependencies(job, obj->resv,
810						   dma_resv_usage_rw(write));
811}
812EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
813
814/**
815 * drm_sched_job_cleanup - clean up scheduler job resources
816 * @job: scheduler job to clean up
817 *
818 * Cleans up the resources allocated with drm_sched_job_init().
819 *
820 * Drivers should call this from their error unwind code if @job is aborted
821 * before drm_sched_job_arm() is called.
822 *
823 * After that point of no return @job is committed to be executed by the
824 * scheduler, and this function should be called from the
825 * &drm_sched_backend_ops.free_job callback.
826 */
827void drm_sched_job_cleanup(struct drm_sched_job *job)
828{
829	struct dma_fence *fence;
830	unsigned long index;
831
832	if (kref_read(&job->s_fence->finished.refcount)) {
833		/* drm_sched_job_arm() has been called */
834		dma_fence_put(&job->s_fence->finished);
835	} else {
836		/* aborted job before committing to run it */
837		drm_sched_fence_free(job->s_fence);
838	}
839
840	job->s_fence = NULL;
841
842	xa_for_each(&job->dependencies, index, fence) {
843		dma_fence_put(fence);
844	}
845	xa_destroy(&job->dependencies);
846
847}
848EXPORT_SYMBOL(drm_sched_job_cleanup);
849
850/**
851 * drm_sched_can_queue -- Can we queue more to the hardware?
852 * @sched: scheduler instance
853 *
854 * Return true if we can push more jobs to the hw, otherwise false.
855 */
856static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
857{
858	return atomic_read(&sched->hw_rq_count) <
859		sched->hw_submission_limit;
860}
861
862/**
863 * drm_sched_wakeup_if_can_queue - Wake up the scheduler
864 * @sched: scheduler instance
865 *
866 * Wake up the scheduler if we can queue jobs.
867 */
868void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
869{
870	if (drm_sched_can_queue(sched))
871		wake_up_interruptible(&sched->wake_up_worker);
872}
873
874/**
875 * drm_sched_select_entity - Select next entity to process
876 *
877 * @sched: scheduler instance
878 *
879 * Returns the entity to process or NULL if none are found.
880 */
881static struct drm_sched_entity *
882drm_sched_select_entity(struct drm_gpu_scheduler *sched)
883{
884	struct drm_sched_entity *entity;
885	int i;
886
887	if (!drm_sched_can_queue(sched))
888		return NULL;
889
890	/* Kernel run queue has higher priority than normal run queue*/
891	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
892		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
893			drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
894			drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
895		if (entity)
896			break;
897	}
898
899	return entity;
900}
901
902/**
903 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
904 *
905 * @sched: scheduler instance
906 *
907 * Returns the next finished job from the pending list (if there is one)
908 * ready for it to be destroyed.
909 */
910static struct drm_sched_job *
911drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
912{
913	struct drm_sched_job *job, *next;
914
915	spin_lock(&sched->job_list_lock);
916
917	job = list_first_entry_or_null(&sched->pending_list,
918				       struct drm_sched_job, list);
919
920	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
921		/* remove job from pending_list */
922		list_del_init(&job->list);
923
924		/* cancel this job's TO timer */
925		cancel_delayed_work(&sched->work_tdr);
926		/* make the scheduled timestamp more accurate */
927		next = list_first_entry_or_null(&sched->pending_list,
928						typeof(*next), list);
929
930		if (next) {
931			next->s_fence->scheduled.timestamp =
932				dma_fence_timestamp(&job->s_fence->finished);
933			/* start TO timer for next job */
934			drm_sched_start_timeout(sched);
935		}
936	} else {
937		job = NULL;
938	}
939
940	spin_unlock(&sched->job_list_lock);
941
942	return job;
943}
944
945/**
946 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
947 * @sched_list: list of drm_gpu_schedulers
948 * @num_sched_list: number of drm_gpu_schedulers in the sched_list
949 *
950 * Returns pointer of the sched with the least load or NULL if none of the
951 * drm_gpu_schedulers are ready
952 */
953struct drm_gpu_scheduler *
954drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
955		     unsigned int num_sched_list)
956{
957	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
958	int i;
959	unsigned int min_score = UINT_MAX, num_score;
960
961	for (i = 0; i < num_sched_list; ++i) {
962		sched = sched_list[i];
963
964		if (!sched->ready) {
965			DRM_WARN("scheduler %s is not ready, skipping",
966				 sched->name);
967			continue;
968		}
969
970		num_score = atomic_read(sched->score);
971		if (num_score < min_score) {
972			min_score = num_score;
973			picked_sched = sched;
974		}
975	}
976
977	return picked_sched;
978}
979EXPORT_SYMBOL(drm_sched_pick_best);
980
981/**
982 * drm_sched_blocked - check if the scheduler is blocked
983 *
984 * @sched: scheduler instance
985 *
986 * Returns true if blocked, otherwise false.
987 */
988static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
989{
990	if (kthread_should_park()) {
991		kthread_parkme();
992		return true;
993	}
994
995	return false;
996}
997
998/**
999 * drm_sched_main - main scheduler thread
1000 *
1001 * @param: scheduler instance
1002 *
1003 * Returns 0.
1004 */
1005static int drm_sched_main(void *param)
1006{
1007	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
1008	int r;
1009
1010	sched_set_fifo_low(current);
1011
1012	while (!kthread_should_stop()) {
1013		struct drm_sched_entity *entity = NULL;
1014		struct drm_sched_fence *s_fence;
1015		struct drm_sched_job *sched_job;
1016		struct dma_fence *fence;
1017		struct drm_sched_job *cleanup_job = NULL;
1018
1019		wait_event_interruptible(sched->wake_up_worker,
1020					 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
1021					 (!drm_sched_blocked(sched) &&
1022					  (entity = drm_sched_select_entity(sched))) ||
1023					 kthread_should_stop());
1024
1025		if (cleanup_job)
1026			sched->ops->free_job(cleanup_job);
1027
1028		if (!entity)
1029			continue;
1030
1031		sched_job = drm_sched_entity_pop_job(entity);
1032
1033		if (!sched_job) {
1034			complete_all(&entity->entity_idle);
1035			continue;
1036		}
1037
1038		s_fence = sched_job->s_fence;
1039
1040		atomic_inc(&sched->hw_rq_count);
1041		drm_sched_job_begin(sched_job);
1042
1043		trace_drm_run_job(sched_job, entity);
1044		fence = sched->ops->run_job(sched_job);
1045		complete_all(&entity->entity_idle);
1046		drm_sched_fence_scheduled(s_fence, fence);
1047
1048		if (!IS_ERR_OR_NULL(fence)) {
1049			/* Drop for original kref_init of the fence */
1050			dma_fence_put(fence);
1051
1052			r = dma_fence_add_callback(fence, &sched_job->cb,
1053						   drm_sched_job_done_cb);
1054			if (r == -ENOENT)
1055				drm_sched_job_done(sched_job, fence->error);
1056			else if (r)
1057				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
1058					  r);
1059		} else {
1060			drm_sched_job_done(sched_job, IS_ERR(fence) ?
1061					   PTR_ERR(fence) : 0);
1062		}
1063
1064		wake_up(&sched->job_scheduled);
1065	}
1066	return 0;
1067}
1068
1069/**
1070 * drm_sched_init - Init a gpu scheduler instance
1071 *
1072 * @sched: scheduler instance
1073 * @ops: backend operations for this scheduler
1074 * @hw_submission: number of hw submissions that can be in flight
1075 * @hang_limit: number of times to allow a job to hang before dropping it
1076 * @timeout: timeout value in jiffies for the scheduler
1077 * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
1078 *		used
1079 * @score: optional score atomic shared with other schedulers
1080 * @name: name used for debugging
1081 * @dev: target &struct device
1082 *
1083 * Return 0 on success, otherwise error code.
1084 */
1085int drm_sched_init(struct drm_gpu_scheduler *sched,
1086		   const struct drm_sched_backend_ops *ops,
1087		   unsigned hw_submission, unsigned hang_limit,
1088		   long timeout, struct workqueue_struct *timeout_wq,
1089		   atomic_t *score, const char *name, struct device *dev)
1090{
1091	int i, ret;
1092	sched->ops = ops;
1093	sched->hw_submission_limit = hw_submission;
1094	sched->name = name;
1095	sched->timeout = timeout;
1096	sched->timeout_wq = timeout_wq ? : system_wq;
1097	sched->hang_limit = hang_limit;
1098	sched->score = score ? score : &sched->_score;
1099	sched->dev = dev;
1100	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
1101		drm_sched_rq_init(sched, &sched->sched_rq[i]);
1102
1103	init_waitqueue_head(&sched->wake_up_worker);
1104	init_waitqueue_head(&sched->job_scheduled);
1105	INIT_LIST_HEAD(&sched->pending_list);
1106	spin_lock_init(&sched->job_list_lock);
1107	atomic_set(&sched->hw_rq_count, 0);
1108	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1109	atomic_set(&sched->_score, 0);
1110	atomic64_set(&sched->job_id_count, 0);
1111
1112	/* Each scheduler will run on a seperate kernel thread */
1113	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
1114	if (IS_ERR(sched->thread)) {
1115		ret = PTR_ERR(sched->thread);
1116		sched->thread = NULL;
1117		DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
1118		return ret;
1119	}
1120
1121	sched->ready = true;
1122	return 0;
1123}
1124EXPORT_SYMBOL(drm_sched_init);
1125
1126/**
1127 * drm_sched_fini - Destroy a gpu scheduler
1128 *
1129 * @sched: scheduler instance
1130 *
1131 * Tears down and cleans up the scheduler.
1132 */
1133void drm_sched_fini(struct drm_gpu_scheduler *sched)
1134{
1135	struct drm_sched_entity *s_entity;
1136	int i;
1137
1138	if (sched->thread)
1139		kthread_stop(sched->thread);
1140
1141	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
1142		struct drm_sched_rq *rq = &sched->sched_rq[i];
1143
1144		spin_lock(&rq->lock);
1145		list_for_each_entry(s_entity, &rq->entities, list)
1146			/*
1147			 * Prevents reinsertion and marks job_queue as idle,
1148			 * it will removed from rq in drm_sched_entity_fini
1149			 * eventually
1150			 */
1151			s_entity->stopped = true;
1152		spin_unlock(&rq->lock);
1153
1154	}
1155
1156	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1157	wake_up_all(&sched->job_scheduled);
1158
1159	/* Confirm no work left behind accessing device structures */
1160	cancel_delayed_work_sync(&sched->work_tdr);
1161
1162	sched->ready = false;
1163}
1164EXPORT_SYMBOL(drm_sched_fini);
1165
1166/**
1167 * drm_sched_increase_karma - Update sched_entity guilty flag
1168 *
1169 * @bad: The job guilty of time out
1170 *
1171 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1172 * limit of the scheduler then the respective sched entity is marked guilty and
1173 * jobs from it will not be scheduled further
1174 */
1175void drm_sched_increase_karma(struct drm_sched_job *bad)
1176{
1177	int i;
1178	struct drm_sched_entity *tmp;
1179	struct drm_sched_entity *entity;
1180	struct drm_gpu_scheduler *sched = bad->sched;
1181
1182	/* don't change @bad's karma if it's from KERNEL RQ,
1183	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1184	 * corrupt but keep in mind that kernel jobs always considered good.
1185	 */
1186	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1187		atomic_inc(&bad->karma);
1188
1189		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
1190		     i++) {
1191			struct drm_sched_rq *rq = &sched->sched_rq[i];
1192
1193			spin_lock(&rq->lock);
1194			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1195				if (bad->s_fence->scheduled.context ==
1196				    entity->fence_context) {
1197					if (entity->guilty)
1198						atomic_set(entity->guilty, 1);
1199					break;
1200				}
1201			}
1202			spin_unlock(&rq->lock);
1203			if (&entity->list != &rq->entities)
1204				break;
1205		}
1206	}
1207}
1208EXPORT_SYMBOL(drm_sched_increase_karma);
1209