Lines Matching refs:sched
53 #include <linux/sched.h>
56 #include <uapi/linux/sched/types.h>
122 * @sched: scheduler instance to associate with this run queue
127 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
134 rq->sched = sched;
153 atomic_inc(rq->sched->score);
175 atomic_dec(rq->sched->score);
268 struct drm_gpu_scheduler *sched = s_fence->sched;
270 atomic_dec(&sched->hw_rq_count);
271 atomic_dec(sched->score);
278 wake_up_interruptible(&sched->wake_up_worker);
296 * @sched: scheduler instance to start the worker for
300 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
302 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
303 !list_empty(&sched->pending_list))
304 queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
310 * @sched: scheduler where the timeout handling should be started.
314 void drm_sched_fault(struct drm_gpu_scheduler *sched)
316 if (sched->timeout_wq)
317 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
324 * @sched: scheduler instance for which to suspend the timeout
333 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
337 sched_timeout = sched->work_tdr.timer.expires;
343 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
347 return sched->timeout;
354 * @sched: scheduler instance for which to resume the timeout
359 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
362 spin_lock(&sched->job_list_lock);
364 if (list_empty(&sched->pending_list))
365 cancel_delayed_work(&sched->work_tdr);
367 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
369 spin_unlock(&sched->job_list_lock);
375 struct drm_gpu_scheduler *sched = s_job->sched;
377 spin_lock(&sched->job_list_lock);
378 list_add_tail(&s_job->list, &sched->pending_list);
379 drm_sched_start_timeout(sched);
380 spin_unlock(&sched->job_list_lock);
385 struct drm_gpu_scheduler *sched;
389 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
392 spin_lock(&sched->job_list_lock);
393 job = list_first_entry_or_null(&sched->pending_list,
399 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
403 spin_unlock(&sched->job_list_lock);
405 status = job->sched->ops->timedout_job(job);
411 if (sched->free_guilty) {
412 job->sched->ops->free_job(job);
413 sched->free_guilty = false;
416 spin_unlock(&sched->job_list_lock);
420 spin_lock(&sched->job_list_lock);
421 drm_sched_start_timeout(sched);
422 spin_unlock(&sched->job_list_lock);
429 * @sched: scheduler instance
438 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
442 kthread_park(sched->thread);
451 if (bad && bad->sched == sched)
456 list_add(&bad->list, &sched->pending_list);
462 * This iteration is thread safe as sched thread is stopped.
464 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
471 atomic_dec(&sched->hw_rq_count);
477 spin_lock(&sched->job_list_lock);
479 spin_unlock(&sched->job_list_lock);
495 sched->ops->free_job(s_job);
497 sched->free_guilty = true;
507 cancel_delayed_work(&sched->work_tdr);
515 * @sched: scheduler instance
516 * @full_recovery: proceed with complete sched restart
519 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
525 * Locking the list is not required here as the sched thread is parked
529 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
532 atomic_inc(&sched->hw_rq_count);
543 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
550 spin_lock(&sched->job_list_lock);
551 drm_sched_start_timeout(sched);
552 spin_unlock(&sched->job_list_lock);
555 kthread_unpark(sched->thread);
562 * @sched: scheduler instance
576 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
583 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
586 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
594 fence = sched->ops->run_job(s_job);
666 struct drm_gpu_scheduler *sched;
671 sched = entity->rq->sched;
673 job->sched = sched;
674 job->s_priority = entity->rq - sched->sched_rq;
675 job->id = atomic64_inc_return(&sched->job_id_count);
852 * @sched: scheduler instance
856 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
858 return atomic_read(&sched->hw_rq_count) <
859 sched->hw_submission_limit;
864 * @sched: scheduler instance
868 void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
870 if (drm_sched_can_queue(sched))
871 wake_up_interruptible(&sched->wake_up_worker);
877 * @sched: scheduler instance
882 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
887 if (!drm_sched_can_queue(sched))
893 drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
894 drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
905 * @sched: scheduler instance
911 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
915 spin_lock(&sched->job_list_lock);
917 job = list_first_entry_or_null(&sched->pending_list,
925 cancel_delayed_work(&sched->work_tdr);
927 next = list_first_entry_or_null(&sched->pending_list,
934 drm_sched_start_timeout(sched);
940 spin_unlock(&sched->job_list_lock);
946 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
950 * Returns pointer of the sched with the least load or NULL if none of the
957 struct drm_gpu_scheduler *sched, *picked_sched = NULL;
962 sched = sched_list[i];
964 if (!sched->ready) {
966 sched->name);
970 num_score = atomic_read(sched->score);
973 picked_sched = sched;
984 * @sched: scheduler instance
988 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
1007 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
1019 wait_event_interruptible(sched->wake_up_worker,
1020 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
1021 (!drm_sched_blocked(sched) &&
1022 (entity = drm_sched_select_entity(sched))) ||
1026 sched->ops->free_job(cleanup_job);
1040 atomic_inc(&sched->hw_rq_count);
1044 fence = sched->ops->run_job(sched_job);
1057 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
1064 wake_up(&sched->job_scheduled);
1072 * @sched: scheduler instance
1085 int drm_sched_init(struct drm_gpu_scheduler *sched,
1092 sched->ops = ops;
1093 sched->hw_submission_limit = hw_submission;
1094 sched->name = name;
1095 sched->timeout = timeout;
1096 sched->timeout_wq = timeout_wq ? : system_wq;
1097 sched->hang_limit = hang_limit;
1098 sched->score = score ? score : &sched->_score;
1099 sched->dev = dev;
1101 drm_sched_rq_init(sched, &sched->sched_rq[i]);
1103 init_waitqueue_head(&sched->wake_up_worker);
1104 init_waitqueue_head(&sched->job_scheduled);
1105 INIT_LIST_HEAD(&sched->pending_list);
1106 spin_lock_init(&sched->job_list_lock);
1107 atomic_set(&sched->hw_rq_count, 0);
1108 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1109 atomic_set(&sched->_score, 0);
1110 atomic64_set(&sched->job_id_count, 0);
1113 sched->thread = kthread_run(drm_sched_main, sched, sched->name);
1114 if (IS_ERR(sched->thread)) {
1115 ret = PTR_ERR(sched->thread);
1116 sched->thread = NULL;
1117 DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
1121 sched->ready = true;
1129 * @sched: scheduler instance
1133 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1138 if (sched->thread)
1139 kthread_stop(sched->thread);
1142 struct drm_sched_rq *rq = &sched->sched_rq[i];
1157 wake_up_all(&sched->job_scheduled);
1160 cancel_delayed_work_sync(&sched->work_tdr);
1162 sched->ready = false;
1172 * limit of the scheduler then the respective sched entity is marked guilty and
1180 struct drm_gpu_scheduler *sched = bad->sched;
1191 struct drm_sched_rq *rq = &sched->sched_rq[i];