162306a36Sopenharmony_ci/* 262306a36Sopenharmony_ci * Copyright 2015 Advanced Micro Devices, Inc. 362306a36Sopenharmony_ci * 462306a36Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a 562306a36Sopenharmony_ci * copy of this software and associated documentation files (the "Software"), 662306a36Sopenharmony_ci * to deal in the Software without restriction, including without limitation 762306a36Sopenharmony_ci * the rights to use, copy, modify, merge, publish, distribute, sublicense, 862306a36Sopenharmony_ci * and/or sell copies of the Software, and to permit persons to whom the 962306a36Sopenharmony_ci * Software is furnished to do so, subject to the following conditions: 1062306a36Sopenharmony_ci * 1162306a36Sopenharmony_ci * The above copyright notice and this permission notice shall be included in 1262306a36Sopenharmony_ci * all copies or substantial portions of the Software. 1362306a36Sopenharmony_ci * 1462306a36Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1562306a36Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1662306a36Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1762306a36Sopenharmony_ci * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 1862306a36Sopenharmony_ci * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 1962306a36Sopenharmony_ci * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 2062306a36Sopenharmony_ci * OTHER DEALINGS IN THE SOFTWARE. 2162306a36Sopenharmony_ci * 2262306a36Sopenharmony_ci */ 2362306a36Sopenharmony_ci 2462306a36Sopenharmony_ci#include <linux/kthread.h> 2562306a36Sopenharmony_ci#include <linux/slab.h> 2662306a36Sopenharmony_ci#include <linux/completion.h> 2762306a36Sopenharmony_ci 2862306a36Sopenharmony_ci#include <drm/drm_print.h> 2962306a36Sopenharmony_ci#include <drm/gpu_scheduler.h> 3062306a36Sopenharmony_ci 3162306a36Sopenharmony_ci#include "gpu_scheduler_trace.h" 3262306a36Sopenharmony_ci 3362306a36Sopenharmony_ci#define to_drm_sched_job(sched_job) \ 3462306a36Sopenharmony_ci container_of((sched_job), struct drm_sched_job, queue_node) 3562306a36Sopenharmony_ci 3662306a36Sopenharmony_ci/** 3762306a36Sopenharmony_ci * drm_sched_entity_init - Init a context entity used by scheduler when 3862306a36Sopenharmony_ci * submit to HW ring. 3962306a36Sopenharmony_ci * 4062306a36Sopenharmony_ci * @entity: scheduler entity to init 4162306a36Sopenharmony_ci * @priority: priority of the entity 4262306a36Sopenharmony_ci * @sched_list: the list of drm scheds on which jobs from this 4362306a36Sopenharmony_ci * entity can be submitted 4462306a36Sopenharmony_ci * @num_sched_list: number of drm sched in sched_list 4562306a36Sopenharmony_ci * @guilty: atomic_t set to 1 when a job on this queue 4662306a36Sopenharmony_ci * is found to be guilty causing a timeout 4762306a36Sopenharmony_ci * 4862306a36Sopenharmony_ci * Note that the &sched_list must have at least one element to schedule the entity. 4962306a36Sopenharmony_ci * 5062306a36Sopenharmony_ci * For changing @priority later on at runtime see 5162306a36Sopenharmony_ci * drm_sched_entity_set_priority(). For changing the set of schedulers 5262306a36Sopenharmony_ci * @sched_list at runtime see drm_sched_entity_modify_sched(). 5362306a36Sopenharmony_ci * 5462306a36Sopenharmony_ci * An entity is cleaned up by callind drm_sched_entity_fini(). See also 5562306a36Sopenharmony_ci * drm_sched_entity_destroy(). 5662306a36Sopenharmony_ci * 5762306a36Sopenharmony_ci * Returns 0 on success or a negative error code on failure. 5862306a36Sopenharmony_ci */ 5962306a36Sopenharmony_ciint drm_sched_entity_init(struct drm_sched_entity *entity, 6062306a36Sopenharmony_ci enum drm_sched_priority priority, 6162306a36Sopenharmony_ci struct drm_gpu_scheduler **sched_list, 6262306a36Sopenharmony_ci unsigned int num_sched_list, 6362306a36Sopenharmony_ci atomic_t *guilty) 6462306a36Sopenharmony_ci{ 6562306a36Sopenharmony_ci if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) 6662306a36Sopenharmony_ci return -EINVAL; 6762306a36Sopenharmony_ci 6862306a36Sopenharmony_ci memset(entity, 0, sizeof(struct drm_sched_entity)); 6962306a36Sopenharmony_ci INIT_LIST_HEAD(&entity->list); 7062306a36Sopenharmony_ci entity->rq = NULL; 7162306a36Sopenharmony_ci entity->guilty = guilty; 7262306a36Sopenharmony_ci entity->num_sched_list = num_sched_list; 7362306a36Sopenharmony_ci entity->priority = priority; 7462306a36Sopenharmony_ci entity->sched_list = num_sched_list > 1 ? sched_list : NULL; 7562306a36Sopenharmony_ci RCU_INIT_POINTER(entity->last_scheduled, NULL); 7662306a36Sopenharmony_ci RB_CLEAR_NODE(&entity->rb_tree_node); 7762306a36Sopenharmony_ci 7862306a36Sopenharmony_ci if(num_sched_list) 7962306a36Sopenharmony_ci entity->rq = &sched_list[0]->sched_rq[entity->priority]; 8062306a36Sopenharmony_ci 8162306a36Sopenharmony_ci init_completion(&entity->entity_idle); 8262306a36Sopenharmony_ci 8362306a36Sopenharmony_ci /* We start in an idle state. */ 8462306a36Sopenharmony_ci complete_all(&entity->entity_idle); 8562306a36Sopenharmony_ci 8662306a36Sopenharmony_ci spin_lock_init(&entity->rq_lock); 8762306a36Sopenharmony_ci spsc_queue_init(&entity->job_queue); 8862306a36Sopenharmony_ci 8962306a36Sopenharmony_ci atomic_set(&entity->fence_seq, 0); 9062306a36Sopenharmony_ci entity->fence_context = dma_fence_context_alloc(2); 9162306a36Sopenharmony_ci 9262306a36Sopenharmony_ci return 0; 9362306a36Sopenharmony_ci} 9462306a36Sopenharmony_ciEXPORT_SYMBOL(drm_sched_entity_init); 9562306a36Sopenharmony_ci 9662306a36Sopenharmony_ci/** 9762306a36Sopenharmony_ci * drm_sched_entity_modify_sched - Modify sched of an entity 9862306a36Sopenharmony_ci * @entity: scheduler entity to init 9962306a36Sopenharmony_ci * @sched_list: the list of new drm scheds which will replace 10062306a36Sopenharmony_ci * existing entity->sched_list 10162306a36Sopenharmony_ci * @num_sched_list: number of drm sched in sched_list 10262306a36Sopenharmony_ci * 10362306a36Sopenharmony_ci * Note that this must be called under the same common lock for @entity as 10462306a36Sopenharmony_ci * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to 10562306a36Sopenharmony_ci * guarantee through some other means that this is never called while new jobs 10662306a36Sopenharmony_ci * can be pushed to @entity. 10762306a36Sopenharmony_ci */ 10862306a36Sopenharmony_civoid drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 10962306a36Sopenharmony_ci struct drm_gpu_scheduler **sched_list, 11062306a36Sopenharmony_ci unsigned int num_sched_list) 11162306a36Sopenharmony_ci{ 11262306a36Sopenharmony_ci WARN_ON(!num_sched_list || !sched_list); 11362306a36Sopenharmony_ci 11462306a36Sopenharmony_ci entity->sched_list = sched_list; 11562306a36Sopenharmony_ci entity->num_sched_list = num_sched_list; 11662306a36Sopenharmony_ci} 11762306a36Sopenharmony_ciEXPORT_SYMBOL(drm_sched_entity_modify_sched); 11862306a36Sopenharmony_ci 11962306a36Sopenharmony_cistatic bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) 12062306a36Sopenharmony_ci{ 12162306a36Sopenharmony_ci rmb(); /* for list_empty to work without lock */ 12262306a36Sopenharmony_ci 12362306a36Sopenharmony_ci if (list_empty(&entity->list) || 12462306a36Sopenharmony_ci spsc_queue_count(&entity->job_queue) == 0 || 12562306a36Sopenharmony_ci entity->stopped) 12662306a36Sopenharmony_ci return true; 12762306a36Sopenharmony_ci 12862306a36Sopenharmony_ci return false; 12962306a36Sopenharmony_ci} 13062306a36Sopenharmony_ci 13162306a36Sopenharmony_ci/* Return true if entity could provide a job. */ 13262306a36Sopenharmony_cibool drm_sched_entity_is_ready(struct drm_sched_entity *entity) 13362306a36Sopenharmony_ci{ 13462306a36Sopenharmony_ci if (spsc_queue_peek(&entity->job_queue) == NULL) 13562306a36Sopenharmony_ci return false; 13662306a36Sopenharmony_ci 13762306a36Sopenharmony_ci if (READ_ONCE(entity->dependency)) 13862306a36Sopenharmony_ci return false; 13962306a36Sopenharmony_ci 14062306a36Sopenharmony_ci return true; 14162306a36Sopenharmony_ci} 14262306a36Sopenharmony_ci 14362306a36Sopenharmony_ci/** 14462306a36Sopenharmony_ci * drm_sched_entity_error - return error of last scheduled job 14562306a36Sopenharmony_ci * @entity: scheduler entity to check 14662306a36Sopenharmony_ci * 14762306a36Sopenharmony_ci * Opportunistically return the error of the last scheduled job. Result can 14862306a36Sopenharmony_ci * change any time when new jobs are pushed to the hw. 14962306a36Sopenharmony_ci */ 15062306a36Sopenharmony_ciint drm_sched_entity_error(struct drm_sched_entity *entity) 15162306a36Sopenharmony_ci{ 15262306a36Sopenharmony_ci struct dma_fence *fence; 15362306a36Sopenharmony_ci int r; 15462306a36Sopenharmony_ci 15562306a36Sopenharmony_ci rcu_read_lock(); 15662306a36Sopenharmony_ci fence = rcu_dereference(entity->last_scheduled); 15762306a36Sopenharmony_ci r = fence ? fence->error : 0; 15862306a36Sopenharmony_ci rcu_read_unlock(); 15962306a36Sopenharmony_ci 16062306a36Sopenharmony_ci return r; 16162306a36Sopenharmony_ci} 16262306a36Sopenharmony_ciEXPORT_SYMBOL(drm_sched_entity_error); 16362306a36Sopenharmony_ci 16462306a36Sopenharmony_cistatic void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) 16562306a36Sopenharmony_ci{ 16662306a36Sopenharmony_ci struct drm_sched_job *job = container_of(wrk, typeof(*job), work); 16762306a36Sopenharmony_ci 16862306a36Sopenharmony_ci drm_sched_fence_finished(job->s_fence, -ESRCH); 16962306a36Sopenharmony_ci WARN_ON(job->s_fence->parent); 17062306a36Sopenharmony_ci job->sched->ops->free_job(job); 17162306a36Sopenharmony_ci} 17262306a36Sopenharmony_ci 17362306a36Sopenharmony_ci/* Signal the scheduler finished fence when the entity in question is killed. */ 17462306a36Sopenharmony_cistatic void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 17562306a36Sopenharmony_ci struct dma_fence_cb *cb) 17662306a36Sopenharmony_ci{ 17762306a36Sopenharmony_ci struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 17862306a36Sopenharmony_ci finish_cb); 17962306a36Sopenharmony_ci unsigned long index; 18062306a36Sopenharmony_ci 18162306a36Sopenharmony_ci dma_fence_put(f); 18262306a36Sopenharmony_ci 18362306a36Sopenharmony_ci /* Wait for all dependencies to avoid data corruptions */ 18462306a36Sopenharmony_ci xa_for_each(&job->dependencies, index, f) { 18562306a36Sopenharmony_ci struct drm_sched_fence *s_fence = to_drm_sched_fence(f); 18662306a36Sopenharmony_ci 18762306a36Sopenharmony_ci if (s_fence && f == &s_fence->scheduled) { 18862306a36Sopenharmony_ci /* The dependencies array had a reference on the scheduled 18962306a36Sopenharmony_ci * fence, and the finished fence refcount might have 19062306a36Sopenharmony_ci * dropped to zero. Use dma_fence_get_rcu() so we get 19162306a36Sopenharmony_ci * a NULL fence in that case. 19262306a36Sopenharmony_ci */ 19362306a36Sopenharmony_ci f = dma_fence_get_rcu(&s_fence->finished); 19462306a36Sopenharmony_ci 19562306a36Sopenharmony_ci /* Now that we have a reference on the finished fence, 19662306a36Sopenharmony_ci * we can release the reference the dependencies array 19762306a36Sopenharmony_ci * had on the scheduled fence. 19862306a36Sopenharmony_ci */ 19962306a36Sopenharmony_ci dma_fence_put(&s_fence->scheduled); 20062306a36Sopenharmony_ci } 20162306a36Sopenharmony_ci 20262306a36Sopenharmony_ci xa_erase(&job->dependencies, index); 20362306a36Sopenharmony_ci if (f && !dma_fence_add_callback(f, &job->finish_cb, 20462306a36Sopenharmony_ci drm_sched_entity_kill_jobs_cb)) 20562306a36Sopenharmony_ci return; 20662306a36Sopenharmony_ci 20762306a36Sopenharmony_ci dma_fence_put(f); 20862306a36Sopenharmony_ci } 20962306a36Sopenharmony_ci 21062306a36Sopenharmony_ci INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); 21162306a36Sopenharmony_ci schedule_work(&job->work); 21262306a36Sopenharmony_ci} 21362306a36Sopenharmony_ci 21462306a36Sopenharmony_ci/* Remove the entity from the scheduler and kill all pending jobs */ 21562306a36Sopenharmony_cistatic void drm_sched_entity_kill(struct drm_sched_entity *entity) 21662306a36Sopenharmony_ci{ 21762306a36Sopenharmony_ci struct drm_sched_job *job; 21862306a36Sopenharmony_ci struct dma_fence *prev; 21962306a36Sopenharmony_ci 22062306a36Sopenharmony_ci if (!entity->rq) 22162306a36Sopenharmony_ci return; 22262306a36Sopenharmony_ci 22362306a36Sopenharmony_ci spin_lock(&entity->rq_lock); 22462306a36Sopenharmony_ci entity->stopped = true; 22562306a36Sopenharmony_ci drm_sched_rq_remove_entity(entity->rq, entity); 22662306a36Sopenharmony_ci spin_unlock(&entity->rq_lock); 22762306a36Sopenharmony_ci 22862306a36Sopenharmony_ci /* Make sure this entity is not used by the scheduler at the moment */ 22962306a36Sopenharmony_ci wait_for_completion(&entity->entity_idle); 23062306a36Sopenharmony_ci 23162306a36Sopenharmony_ci /* The entity is guaranteed to not be used by the scheduler */ 23262306a36Sopenharmony_ci prev = rcu_dereference_check(entity->last_scheduled, true); 23362306a36Sopenharmony_ci dma_fence_get(prev); 23462306a36Sopenharmony_ci while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { 23562306a36Sopenharmony_ci struct drm_sched_fence *s_fence = job->s_fence; 23662306a36Sopenharmony_ci 23762306a36Sopenharmony_ci dma_fence_get(&s_fence->finished); 23862306a36Sopenharmony_ci if (!prev || dma_fence_add_callback(prev, &job->finish_cb, 23962306a36Sopenharmony_ci drm_sched_entity_kill_jobs_cb)) 24062306a36Sopenharmony_ci drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 24162306a36Sopenharmony_ci 24262306a36Sopenharmony_ci prev = &s_fence->finished; 24362306a36Sopenharmony_ci } 24462306a36Sopenharmony_ci dma_fence_put(prev); 24562306a36Sopenharmony_ci} 24662306a36Sopenharmony_ci 24762306a36Sopenharmony_ci/** 24862306a36Sopenharmony_ci * drm_sched_entity_flush - Flush a context entity 24962306a36Sopenharmony_ci * 25062306a36Sopenharmony_ci * @entity: scheduler entity 25162306a36Sopenharmony_ci * @timeout: time to wait in for Q to become empty in jiffies. 25262306a36Sopenharmony_ci * 25362306a36Sopenharmony_ci * Splitting drm_sched_entity_fini() into two functions, The first one does the 25462306a36Sopenharmony_ci * waiting, removes the entity from the runqueue and returns an error when the 25562306a36Sopenharmony_ci * process was killed. 25662306a36Sopenharmony_ci * 25762306a36Sopenharmony_ci * Returns the remaining time in jiffies left from the input timeout 25862306a36Sopenharmony_ci */ 25962306a36Sopenharmony_cilong drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) 26062306a36Sopenharmony_ci{ 26162306a36Sopenharmony_ci struct drm_gpu_scheduler *sched; 26262306a36Sopenharmony_ci struct task_struct *last_user; 26362306a36Sopenharmony_ci long ret = timeout; 26462306a36Sopenharmony_ci 26562306a36Sopenharmony_ci if (!entity->rq) 26662306a36Sopenharmony_ci return 0; 26762306a36Sopenharmony_ci 26862306a36Sopenharmony_ci sched = entity->rq->sched; 26962306a36Sopenharmony_ci /** 27062306a36Sopenharmony_ci * The client will not queue more IBs during this fini, consume existing 27162306a36Sopenharmony_ci * queued IBs or discard them on SIGKILL 27262306a36Sopenharmony_ci */ 27362306a36Sopenharmony_ci if (current->flags & PF_EXITING) { 27462306a36Sopenharmony_ci if (timeout) 27562306a36Sopenharmony_ci ret = wait_event_timeout( 27662306a36Sopenharmony_ci sched->job_scheduled, 27762306a36Sopenharmony_ci drm_sched_entity_is_idle(entity), 27862306a36Sopenharmony_ci timeout); 27962306a36Sopenharmony_ci } else { 28062306a36Sopenharmony_ci wait_event_killable(sched->job_scheduled, 28162306a36Sopenharmony_ci drm_sched_entity_is_idle(entity)); 28262306a36Sopenharmony_ci } 28362306a36Sopenharmony_ci 28462306a36Sopenharmony_ci /* For killed process disable any more IBs enqueue right now */ 28562306a36Sopenharmony_ci last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 28662306a36Sopenharmony_ci if ((!last_user || last_user == current->group_leader) && 28762306a36Sopenharmony_ci (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) 28862306a36Sopenharmony_ci drm_sched_entity_kill(entity); 28962306a36Sopenharmony_ci 29062306a36Sopenharmony_ci return ret; 29162306a36Sopenharmony_ci} 29262306a36Sopenharmony_ciEXPORT_SYMBOL(drm_sched_entity_flush); 29362306a36Sopenharmony_ci 29462306a36Sopenharmony_ci/** 29562306a36Sopenharmony_ci * drm_sched_entity_fini - Destroy a context entity 29662306a36Sopenharmony_ci * 29762306a36Sopenharmony_ci * @entity: scheduler entity 29862306a36Sopenharmony_ci * 29962306a36Sopenharmony_ci * Cleanups up @entity which has been initialized by drm_sched_entity_init(). 30062306a36Sopenharmony_ci * 30162306a36Sopenharmony_ci * If there are potentially job still in flight or getting newly queued 30262306a36Sopenharmony_ci * drm_sched_entity_flush() must be called first. This function then goes over 30362306a36Sopenharmony_ci * the entity and signals all jobs with an error code if the process was killed. 30462306a36Sopenharmony_ci */ 30562306a36Sopenharmony_civoid drm_sched_entity_fini(struct drm_sched_entity *entity) 30662306a36Sopenharmony_ci{ 30762306a36Sopenharmony_ci /* 30862306a36Sopenharmony_ci * If consumption of existing IBs wasn't completed. Forcefully remove 30962306a36Sopenharmony_ci * them here. Also makes sure that the scheduler won't touch this entity 31062306a36Sopenharmony_ci * any more. 31162306a36Sopenharmony_ci */ 31262306a36Sopenharmony_ci drm_sched_entity_kill(entity); 31362306a36Sopenharmony_ci 31462306a36Sopenharmony_ci if (entity->dependency) { 31562306a36Sopenharmony_ci dma_fence_remove_callback(entity->dependency, &entity->cb); 31662306a36Sopenharmony_ci dma_fence_put(entity->dependency); 31762306a36Sopenharmony_ci entity->dependency = NULL; 31862306a36Sopenharmony_ci } 31962306a36Sopenharmony_ci 32062306a36Sopenharmony_ci dma_fence_put(rcu_dereference_check(entity->last_scheduled, true)); 32162306a36Sopenharmony_ci RCU_INIT_POINTER(entity->last_scheduled, NULL); 32262306a36Sopenharmony_ci} 32362306a36Sopenharmony_ciEXPORT_SYMBOL(drm_sched_entity_fini); 32462306a36Sopenharmony_ci 32562306a36Sopenharmony_ci/** 32662306a36Sopenharmony_ci * drm_sched_entity_destroy - Destroy a context entity 32762306a36Sopenharmony_ci * @entity: scheduler entity 32862306a36Sopenharmony_ci * 32962306a36Sopenharmony_ci * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a 33062306a36Sopenharmony_ci * convenience wrapper. 33162306a36Sopenharmony_ci */ 33262306a36Sopenharmony_civoid drm_sched_entity_destroy(struct drm_sched_entity *entity) 33362306a36Sopenharmony_ci{ 33462306a36Sopenharmony_ci drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 33562306a36Sopenharmony_ci drm_sched_entity_fini(entity); 33662306a36Sopenharmony_ci} 33762306a36Sopenharmony_ciEXPORT_SYMBOL(drm_sched_entity_destroy); 33862306a36Sopenharmony_ci 33962306a36Sopenharmony_ci/* drm_sched_entity_clear_dep - callback to clear the entities dependency */ 34062306a36Sopenharmony_cistatic void drm_sched_entity_clear_dep(struct dma_fence *f, 34162306a36Sopenharmony_ci struct dma_fence_cb *cb) 34262306a36Sopenharmony_ci{ 34362306a36Sopenharmony_ci struct drm_sched_entity *entity = 34462306a36Sopenharmony_ci container_of(cb, struct drm_sched_entity, cb); 34562306a36Sopenharmony_ci 34662306a36Sopenharmony_ci entity->dependency = NULL; 34762306a36Sopenharmony_ci dma_fence_put(f); 34862306a36Sopenharmony_ci} 34962306a36Sopenharmony_ci 35062306a36Sopenharmony_ci/* 35162306a36Sopenharmony_ci * drm_sched_entity_clear_dep - callback to clear the entities dependency and 35262306a36Sopenharmony_ci * wake up scheduler 35362306a36Sopenharmony_ci */ 35462306a36Sopenharmony_cistatic void drm_sched_entity_wakeup(struct dma_fence *f, 35562306a36Sopenharmony_ci struct dma_fence_cb *cb) 35662306a36Sopenharmony_ci{ 35762306a36Sopenharmony_ci struct drm_sched_entity *entity = 35862306a36Sopenharmony_ci container_of(cb, struct drm_sched_entity, cb); 35962306a36Sopenharmony_ci 36062306a36Sopenharmony_ci drm_sched_entity_clear_dep(f, cb); 36162306a36Sopenharmony_ci drm_sched_wakeup_if_can_queue(entity->rq->sched); 36262306a36Sopenharmony_ci} 36362306a36Sopenharmony_ci 36462306a36Sopenharmony_ci/** 36562306a36Sopenharmony_ci * drm_sched_entity_set_priority - Sets priority of the entity 36662306a36Sopenharmony_ci * 36762306a36Sopenharmony_ci * @entity: scheduler entity 36862306a36Sopenharmony_ci * @priority: scheduler priority 36962306a36Sopenharmony_ci * 37062306a36Sopenharmony_ci * Update the priority of runqueus used for the entity. 37162306a36Sopenharmony_ci */ 37262306a36Sopenharmony_civoid drm_sched_entity_set_priority(struct drm_sched_entity *entity, 37362306a36Sopenharmony_ci enum drm_sched_priority priority) 37462306a36Sopenharmony_ci{ 37562306a36Sopenharmony_ci spin_lock(&entity->rq_lock); 37662306a36Sopenharmony_ci entity->priority = priority; 37762306a36Sopenharmony_ci spin_unlock(&entity->rq_lock); 37862306a36Sopenharmony_ci} 37962306a36Sopenharmony_ciEXPORT_SYMBOL(drm_sched_entity_set_priority); 38062306a36Sopenharmony_ci 38162306a36Sopenharmony_ci/* 38262306a36Sopenharmony_ci * Add a callback to the current dependency of the entity to wake up the 38362306a36Sopenharmony_ci * scheduler when the entity becomes available. 38462306a36Sopenharmony_ci */ 38562306a36Sopenharmony_cistatic bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 38662306a36Sopenharmony_ci{ 38762306a36Sopenharmony_ci struct drm_gpu_scheduler *sched = entity->rq->sched; 38862306a36Sopenharmony_ci struct dma_fence *fence = entity->dependency; 38962306a36Sopenharmony_ci struct drm_sched_fence *s_fence; 39062306a36Sopenharmony_ci 39162306a36Sopenharmony_ci if (fence->context == entity->fence_context || 39262306a36Sopenharmony_ci fence->context == entity->fence_context + 1) { 39362306a36Sopenharmony_ci /* 39462306a36Sopenharmony_ci * Fence is a scheduled/finished fence from a job 39562306a36Sopenharmony_ci * which belongs to the same entity, we can ignore 39662306a36Sopenharmony_ci * fences from ourself 39762306a36Sopenharmony_ci */ 39862306a36Sopenharmony_ci dma_fence_put(entity->dependency); 39962306a36Sopenharmony_ci return false; 40062306a36Sopenharmony_ci } 40162306a36Sopenharmony_ci 40262306a36Sopenharmony_ci s_fence = to_drm_sched_fence(fence); 40362306a36Sopenharmony_ci if (!fence->error && s_fence && s_fence->sched == sched && 40462306a36Sopenharmony_ci !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) { 40562306a36Sopenharmony_ci 40662306a36Sopenharmony_ci /* 40762306a36Sopenharmony_ci * Fence is from the same scheduler, only need to wait for 40862306a36Sopenharmony_ci * it to be scheduled 40962306a36Sopenharmony_ci */ 41062306a36Sopenharmony_ci fence = dma_fence_get(&s_fence->scheduled); 41162306a36Sopenharmony_ci dma_fence_put(entity->dependency); 41262306a36Sopenharmony_ci entity->dependency = fence; 41362306a36Sopenharmony_ci if (!dma_fence_add_callback(fence, &entity->cb, 41462306a36Sopenharmony_ci drm_sched_entity_clear_dep)) 41562306a36Sopenharmony_ci return true; 41662306a36Sopenharmony_ci 41762306a36Sopenharmony_ci /* Ignore it when it is already scheduled */ 41862306a36Sopenharmony_ci dma_fence_put(fence); 41962306a36Sopenharmony_ci return false; 42062306a36Sopenharmony_ci } 42162306a36Sopenharmony_ci 42262306a36Sopenharmony_ci if (!dma_fence_add_callback(entity->dependency, &entity->cb, 42362306a36Sopenharmony_ci drm_sched_entity_wakeup)) 42462306a36Sopenharmony_ci return true; 42562306a36Sopenharmony_ci 42662306a36Sopenharmony_ci dma_fence_put(entity->dependency); 42762306a36Sopenharmony_ci return false; 42862306a36Sopenharmony_ci} 42962306a36Sopenharmony_ci 43062306a36Sopenharmony_cistatic struct dma_fence * 43162306a36Sopenharmony_cidrm_sched_job_dependency(struct drm_sched_job *job, 43262306a36Sopenharmony_ci struct drm_sched_entity *entity) 43362306a36Sopenharmony_ci{ 43462306a36Sopenharmony_ci struct dma_fence *f; 43562306a36Sopenharmony_ci 43662306a36Sopenharmony_ci /* We keep the fence around, so we can iterate over all dependencies 43762306a36Sopenharmony_ci * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled 43862306a36Sopenharmony_ci * before killing the job. 43962306a36Sopenharmony_ci */ 44062306a36Sopenharmony_ci f = xa_load(&job->dependencies, job->last_dependency); 44162306a36Sopenharmony_ci if (f) { 44262306a36Sopenharmony_ci job->last_dependency++; 44362306a36Sopenharmony_ci return dma_fence_get(f); 44462306a36Sopenharmony_ci } 44562306a36Sopenharmony_ci 44662306a36Sopenharmony_ci if (job->sched->ops->prepare_job) 44762306a36Sopenharmony_ci return job->sched->ops->prepare_job(job, entity); 44862306a36Sopenharmony_ci 44962306a36Sopenharmony_ci return NULL; 45062306a36Sopenharmony_ci} 45162306a36Sopenharmony_ci 45262306a36Sopenharmony_cistruct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) 45362306a36Sopenharmony_ci{ 45462306a36Sopenharmony_ci struct drm_sched_job *sched_job; 45562306a36Sopenharmony_ci 45662306a36Sopenharmony_ci sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 45762306a36Sopenharmony_ci if (!sched_job) 45862306a36Sopenharmony_ci return NULL; 45962306a36Sopenharmony_ci 46062306a36Sopenharmony_ci while ((entity->dependency = 46162306a36Sopenharmony_ci drm_sched_job_dependency(sched_job, entity))) { 46262306a36Sopenharmony_ci trace_drm_sched_job_wait_dep(sched_job, entity->dependency); 46362306a36Sopenharmony_ci 46462306a36Sopenharmony_ci if (drm_sched_entity_add_dependency_cb(entity)) 46562306a36Sopenharmony_ci return NULL; 46662306a36Sopenharmony_ci } 46762306a36Sopenharmony_ci 46862306a36Sopenharmony_ci /* skip jobs from entity that marked guilty */ 46962306a36Sopenharmony_ci if (entity->guilty && atomic_read(entity->guilty)) 47062306a36Sopenharmony_ci dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); 47162306a36Sopenharmony_ci 47262306a36Sopenharmony_ci dma_fence_put(rcu_dereference_check(entity->last_scheduled, true)); 47362306a36Sopenharmony_ci rcu_assign_pointer(entity->last_scheduled, 47462306a36Sopenharmony_ci dma_fence_get(&sched_job->s_fence->finished)); 47562306a36Sopenharmony_ci 47662306a36Sopenharmony_ci /* 47762306a36Sopenharmony_ci * If the queue is empty we allow drm_sched_entity_select_rq() to 47862306a36Sopenharmony_ci * locklessly access ->last_scheduled. This only works if we set the 47962306a36Sopenharmony_ci * pointer before we dequeue and if we a write barrier here. 48062306a36Sopenharmony_ci */ 48162306a36Sopenharmony_ci smp_wmb(); 48262306a36Sopenharmony_ci 48362306a36Sopenharmony_ci spsc_queue_pop(&entity->job_queue); 48462306a36Sopenharmony_ci 48562306a36Sopenharmony_ci /* 48662306a36Sopenharmony_ci * Update the entity's location in the min heap according to 48762306a36Sopenharmony_ci * the timestamp of the next job, if any. 48862306a36Sopenharmony_ci */ 48962306a36Sopenharmony_ci if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) { 49062306a36Sopenharmony_ci struct drm_sched_job *next; 49162306a36Sopenharmony_ci 49262306a36Sopenharmony_ci next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 49362306a36Sopenharmony_ci if (next) 49462306a36Sopenharmony_ci drm_sched_rq_update_fifo(entity, next->submit_ts); 49562306a36Sopenharmony_ci } 49662306a36Sopenharmony_ci 49762306a36Sopenharmony_ci /* Jobs and entities might have different lifecycles. Since we're 49862306a36Sopenharmony_ci * removing the job from the entities queue, set the jobs entity pointer 49962306a36Sopenharmony_ci * to NULL to prevent any future access of the entity through this job. 50062306a36Sopenharmony_ci */ 50162306a36Sopenharmony_ci sched_job->entity = NULL; 50262306a36Sopenharmony_ci 50362306a36Sopenharmony_ci return sched_job; 50462306a36Sopenharmony_ci} 50562306a36Sopenharmony_ci 50662306a36Sopenharmony_civoid drm_sched_entity_select_rq(struct drm_sched_entity *entity) 50762306a36Sopenharmony_ci{ 50862306a36Sopenharmony_ci struct dma_fence *fence; 50962306a36Sopenharmony_ci struct drm_gpu_scheduler *sched; 51062306a36Sopenharmony_ci struct drm_sched_rq *rq; 51162306a36Sopenharmony_ci 51262306a36Sopenharmony_ci /* single possible engine and already selected */ 51362306a36Sopenharmony_ci if (!entity->sched_list) 51462306a36Sopenharmony_ci return; 51562306a36Sopenharmony_ci 51662306a36Sopenharmony_ci /* queue non-empty, stay on the same engine */ 51762306a36Sopenharmony_ci if (spsc_queue_count(&entity->job_queue)) 51862306a36Sopenharmony_ci return; 51962306a36Sopenharmony_ci 52062306a36Sopenharmony_ci /* 52162306a36Sopenharmony_ci * Only when the queue is empty are we guaranteed that the scheduler 52262306a36Sopenharmony_ci * thread cannot change ->last_scheduled. To enforce ordering we need 52362306a36Sopenharmony_ci * a read barrier here. See drm_sched_entity_pop_job() for the other 52462306a36Sopenharmony_ci * side. 52562306a36Sopenharmony_ci */ 52662306a36Sopenharmony_ci smp_rmb(); 52762306a36Sopenharmony_ci 52862306a36Sopenharmony_ci fence = rcu_dereference_check(entity->last_scheduled, true); 52962306a36Sopenharmony_ci 53062306a36Sopenharmony_ci /* stay on the same engine if the previous job hasn't finished */ 53162306a36Sopenharmony_ci if (fence && !dma_fence_is_signaled(fence)) 53262306a36Sopenharmony_ci return; 53362306a36Sopenharmony_ci 53462306a36Sopenharmony_ci spin_lock(&entity->rq_lock); 53562306a36Sopenharmony_ci sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); 53662306a36Sopenharmony_ci rq = sched ? &sched->sched_rq[entity->priority] : NULL; 53762306a36Sopenharmony_ci if (rq != entity->rq) { 53862306a36Sopenharmony_ci drm_sched_rq_remove_entity(entity->rq, entity); 53962306a36Sopenharmony_ci entity->rq = rq; 54062306a36Sopenharmony_ci } 54162306a36Sopenharmony_ci spin_unlock(&entity->rq_lock); 54262306a36Sopenharmony_ci 54362306a36Sopenharmony_ci if (entity->num_sched_list == 1) 54462306a36Sopenharmony_ci entity->sched_list = NULL; 54562306a36Sopenharmony_ci} 54662306a36Sopenharmony_ci 54762306a36Sopenharmony_ci/** 54862306a36Sopenharmony_ci * drm_sched_entity_push_job - Submit a job to the entity's job queue 54962306a36Sopenharmony_ci * @sched_job: job to submit 55062306a36Sopenharmony_ci * 55162306a36Sopenharmony_ci * Note: To guarantee that the order of insertion to queue matches the job's 55262306a36Sopenharmony_ci * fence sequence number this function should be called with drm_sched_job_arm() 55362306a36Sopenharmony_ci * under common lock for the struct drm_sched_entity that was set up for 55462306a36Sopenharmony_ci * @sched_job in drm_sched_job_init(). 55562306a36Sopenharmony_ci * 55662306a36Sopenharmony_ci * Returns 0 for success, negative error code otherwise. 55762306a36Sopenharmony_ci */ 55862306a36Sopenharmony_civoid drm_sched_entity_push_job(struct drm_sched_job *sched_job) 55962306a36Sopenharmony_ci{ 56062306a36Sopenharmony_ci struct drm_sched_entity *entity = sched_job->entity; 56162306a36Sopenharmony_ci bool first; 56262306a36Sopenharmony_ci ktime_t submit_ts; 56362306a36Sopenharmony_ci 56462306a36Sopenharmony_ci trace_drm_sched_job(sched_job, entity); 56562306a36Sopenharmony_ci atomic_inc(entity->rq->sched->score); 56662306a36Sopenharmony_ci WRITE_ONCE(entity->last_user, current->group_leader); 56762306a36Sopenharmony_ci 56862306a36Sopenharmony_ci /* 56962306a36Sopenharmony_ci * After the sched_job is pushed into the entity queue, it may be 57062306a36Sopenharmony_ci * completed and freed up at any time. We can no longer access it. 57162306a36Sopenharmony_ci * Make sure to set the submit_ts first, to avoid a race. 57262306a36Sopenharmony_ci */ 57362306a36Sopenharmony_ci sched_job->submit_ts = submit_ts = ktime_get(); 57462306a36Sopenharmony_ci first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); 57562306a36Sopenharmony_ci 57662306a36Sopenharmony_ci /* first job wakes up scheduler */ 57762306a36Sopenharmony_ci if (first) { 57862306a36Sopenharmony_ci /* Add the entity to the run queue */ 57962306a36Sopenharmony_ci spin_lock(&entity->rq_lock); 58062306a36Sopenharmony_ci if (entity->stopped) { 58162306a36Sopenharmony_ci spin_unlock(&entity->rq_lock); 58262306a36Sopenharmony_ci 58362306a36Sopenharmony_ci DRM_ERROR("Trying to push to a killed entity\n"); 58462306a36Sopenharmony_ci return; 58562306a36Sopenharmony_ci } 58662306a36Sopenharmony_ci 58762306a36Sopenharmony_ci drm_sched_rq_add_entity(entity->rq, entity); 58862306a36Sopenharmony_ci spin_unlock(&entity->rq_lock); 58962306a36Sopenharmony_ci 59062306a36Sopenharmony_ci if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) 59162306a36Sopenharmony_ci drm_sched_rq_update_fifo(entity, submit_ts); 59262306a36Sopenharmony_ci 59362306a36Sopenharmony_ci drm_sched_wakeup_if_can_queue(entity->rq->sched); 59462306a36Sopenharmony_ci } 59562306a36Sopenharmony_ci} 59662306a36Sopenharmony_ciEXPORT_SYMBOL(drm_sched_entity_push_job); 597