Lines Matching refs:sched
49 #include <linux/sched.h>
51 #include <uapi/linux/sched/types.h>
72 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
78 rq->sched = sched;
95 atomic_inc(&rq->sched->score);
114 atomic_dec(&rq->sched->score);
176 struct drm_gpu_scheduler *sched = entity->rq->sched;
184 if (s_fence && s_fence->sched == sched)
194 * @sched: scheduler instance to start the worker for
198 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
200 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
201 !list_empty(&sched->ring_mirror_list))
202 schedule_delayed_work(&sched->work_tdr, sched->timeout);
208 * @sched: scheduler where the timeout handling should be started.
212 void drm_sched_fault(struct drm_gpu_scheduler *sched)
214 mod_delayed_work(system_wq, &sched->work_tdr, 0);
221 * @sched: scheduler instance for which to suspend the timeout
230 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
234 sched_timeout = sched->work_tdr.timer.expires;
240 if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
244 return sched->timeout;
251 * @sched: scheduler instance for which to resume the timeout
256 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
259 spin_lock(&sched->job_list_lock);
261 if (list_empty(&sched->ring_mirror_list))
262 cancel_delayed_work(&sched->work_tdr);
264 mod_delayed_work(system_wq, &sched->work_tdr, remaining);
266 spin_unlock(&sched->job_list_lock);
272 struct drm_gpu_scheduler *sched = s_job->sched;
274 spin_lock(&sched->job_list_lock);
275 list_add_tail(&s_job->node, &sched->ring_mirror_list);
276 drm_sched_start_timeout(sched);
277 spin_unlock(&sched->job_list_lock);
282 struct drm_gpu_scheduler *sched;
285 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
288 spin_lock(&sched->job_list_lock);
289 job = list_first_entry_or_null(&sched->ring_mirror_list,
295 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
299 spin_unlock(&sched->job_list_lock);
301 job->sched->ops->timedout_job(job);
307 if (sched->free_guilty) {
308 job->sched->ops->free_job(job);
309 sched->free_guilty = false;
312 spin_unlock(&sched->job_list_lock);
315 spin_lock(&sched->job_list_lock);
316 drm_sched_start_timeout(sched);
317 spin_unlock(&sched->job_list_lock);
326 * limit of the scheduler then the respective sched entity is marked guilty and
334 struct drm_gpu_scheduler *sched = bad->sched;
344 struct drm_sched_rq *rq = &sched->sched_rq[i];
351 bad->sched->hang_limit)
368 * @sched: scheduler instance
377 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
381 kthread_park(sched->thread);
390 if (bad && bad->sched == sched)
395 list_add(&bad->node, &sched->ring_mirror_list);
401 * This iteration is thread safe as sched thread is stopped.
403 list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
407 atomic_dec(&sched->hw_rq_count);
413 spin_lock(&sched->job_list_lock);
415 spin_unlock(&sched->job_list_lock);
431 sched->ops->free_job(s_job);
433 sched->free_guilty = true;
443 cancel_delayed_work(&sched->work_tdr);
451 * @sched: scheduler instance
452 * @full_recovery: proceed with complete sched restart
455 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
461 * Locking the list is not required here as the sched thread is parked
465 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
468 atomic_inc(&sched->hw_rq_count);
486 spin_lock(&sched->job_list_lock);
487 drm_sched_start_timeout(sched);
488 spin_unlock(&sched->job_list_lock);
491 kthread_unpark(sched->thread);
498 * @sched: scheduler instance
501 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
508 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
511 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
520 fence = sched->ops->run_job(s_job);
552 struct drm_gpu_scheduler *sched;
558 sched = entity->rq->sched;
560 job->sched = sched;
562 job->s_priority = entity->rq - sched->sched_rq;
566 job->id = atomic64_inc_return(&sched->job_id_count);
589 * @sched: scheduler instance
593 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
595 return atomic_read(&sched->hw_rq_count) <
596 sched->hw_submission_limit;
602 * @sched: scheduler instance
605 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
607 if (drm_sched_ready(sched))
608 wake_up_interruptible(&sched->wake_up_worker);
614 * @sched: scheduler instance
619 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
624 if (!drm_sched_ready(sched))
629 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
649 struct drm_gpu_scheduler *sched = s_fence->sched;
651 atomic_dec(&sched->hw_rq_count);
652 atomic_dec(&sched->score);
659 wake_up_interruptible(&sched->wake_up_worker);
665 * @sched: scheduler instance
671 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
679 if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
680 !cancel_delayed_work(&sched->work_tdr)) ||
684 spin_lock(&sched->job_list_lock);
686 job = list_first_entry_or_null(&sched->ring_mirror_list,
695 drm_sched_start_timeout(sched);
698 spin_unlock(&sched->job_list_lock);
704 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
708 * Returns pointer of the sched with the least load or NULL if none of the
715 struct drm_gpu_scheduler *sched, *picked_sched = NULL;
720 sched = sched_list[i];
722 if (!sched->ready) {
724 sched->name);
728 num_score = atomic_read(&sched->score);
731 picked_sched = sched;
742 * @sched: scheduler instance
746 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
765 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
777 wait_event_interruptible(sched->wake_up_worker,
778 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
779 (!drm_sched_blocked(sched) &&
780 (entity = drm_sched_select_entity(sched))) ||
784 sched->ops->free_job(cleanup_job);
786 drm_sched_start_timeout(sched);
801 atomic_inc(&sched->hw_rq_count);
805 fence = sched->ops->run_job(sched_job);
825 wake_up(&sched->job_scheduled);
833 * @sched: scheduler instance
842 int drm_sched_init(struct drm_gpu_scheduler *sched,
850 sched->ops = ops;
851 sched->hw_submission_limit = hw_submission;
852 sched->name = name;
853 sched->timeout = timeout;
854 sched->hang_limit = hang_limit;
856 drm_sched_rq_init(sched, &sched->sched_rq[i]);
858 init_waitqueue_head(&sched->wake_up_worker);
859 init_waitqueue_head(&sched->job_scheduled);
860 INIT_LIST_HEAD(&sched->ring_mirror_list);
861 spin_lock_init(&sched->job_list_lock);
862 atomic_set(&sched->hw_rq_count, 0);
863 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
864 atomic_set(&sched->score, 0);
865 atomic64_set(&sched->job_id_count, 0);
868 sched->thread = kthread_run(drm_sched_main, sched, sched->name);
869 if (IS_ERR(sched->thread)) {
870 ret = PTR_ERR(sched->thread);
871 sched->thread = NULL;
876 sched->ready = true;
884 * @sched: scheduler instance
888 void drm_sched_fini(struct drm_gpu_scheduler *sched)
893 if (sched->thread)
894 kthread_stop(sched->thread);
897 struct drm_sched_rq *rq = &sched->sched_rq[i];
915 wake_up_all(&sched->job_scheduled);
918 cancel_delayed_work_sync(&sched->work_tdr);
920 sched->ready = false;