18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ 38c2ecf20Sopenharmony_ci/* Copyright 2019 Collabora ltd. */ 48c2ecf20Sopenharmony_ci#include <linux/delay.h> 58c2ecf20Sopenharmony_ci#include <linux/interrupt.h> 68c2ecf20Sopenharmony_ci#include <linux/io.h> 78c2ecf20Sopenharmony_ci#include <linux/platform_device.h> 88c2ecf20Sopenharmony_ci#include <linux/pm_runtime.h> 98c2ecf20Sopenharmony_ci#include <linux/dma-resv.h> 108c2ecf20Sopenharmony_ci#include <drm/gpu_scheduler.h> 118c2ecf20Sopenharmony_ci#include <drm/panfrost_drm.h> 128c2ecf20Sopenharmony_ci 138c2ecf20Sopenharmony_ci#include "panfrost_device.h" 148c2ecf20Sopenharmony_ci#include "panfrost_devfreq.h" 158c2ecf20Sopenharmony_ci#include "panfrost_job.h" 168c2ecf20Sopenharmony_ci#include "panfrost_features.h" 178c2ecf20Sopenharmony_ci#include "panfrost_issues.h" 188c2ecf20Sopenharmony_ci#include "panfrost_gem.h" 198c2ecf20Sopenharmony_ci#include "panfrost_regs.h" 208c2ecf20Sopenharmony_ci#include "panfrost_gpu.h" 218c2ecf20Sopenharmony_ci#include "panfrost_mmu.h" 228c2ecf20Sopenharmony_ci 238c2ecf20Sopenharmony_ci#define JOB_TIMEOUT_MS 500 248c2ecf20Sopenharmony_ci 258c2ecf20Sopenharmony_ci#define job_write(dev, reg, data) writel(data, dev->iomem + (reg)) 268c2ecf20Sopenharmony_ci#define job_read(dev, reg) readl(dev->iomem + (reg)) 278c2ecf20Sopenharmony_ci 288c2ecf20Sopenharmony_cienum panfrost_queue_status { 298c2ecf20Sopenharmony_ci PANFROST_QUEUE_STATUS_ACTIVE, 308c2ecf20Sopenharmony_ci PANFROST_QUEUE_STATUS_STOPPED, 318c2ecf20Sopenharmony_ci PANFROST_QUEUE_STATUS_STARTING, 328c2ecf20Sopenharmony_ci PANFROST_QUEUE_STATUS_FAULT_PENDING, 338c2ecf20Sopenharmony_ci}; 348c2ecf20Sopenharmony_ci 358c2ecf20Sopenharmony_cistruct panfrost_queue_state { 368c2ecf20Sopenharmony_ci struct drm_gpu_scheduler sched; 378c2ecf20Sopenharmony_ci atomic_t status; 388c2ecf20Sopenharmony_ci struct mutex lock; 398c2ecf20Sopenharmony_ci u64 fence_context; 408c2ecf20Sopenharmony_ci u64 emit_seqno; 418c2ecf20Sopenharmony_ci}; 428c2ecf20Sopenharmony_ci 438c2ecf20Sopenharmony_cistruct panfrost_job_slot { 448c2ecf20Sopenharmony_ci struct panfrost_queue_state queue[NUM_JOB_SLOTS]; 458c2ecf20Sopenharmony_ci spinlock_t job_lock; 468c2ecf20Sopenharmony_ci}; 478c2ecf20Sopenharmony_ci 488c2ecf20Sopenharmony_cistatic struct panfrost_job * 498c2ecf20Sopenharmony_cito_panfrost_job(struct drm_sched_job *sched_job) 508c2ecf20Sopenharmony_ci{ 518c2ecf20Sopenharmony_ci return container_of(sched_job, struct panfrost_job, base); 528c2ecf20Sopenharmony_ci} 538c2ecf20Sopenharmony_ci 548c2ecf20Sopenharmony_cistruct panfrost_fence { 558c2ecf20Sopenharmony_ci struct dma_fence base; 568c2ecf20Sopenharmony_ci struct drm_device *dev; 578c2ecf20Sopenharmony_ci /* panfrost seqno for signaled() test */ 588c2ecf20Sopenharmony_ci u64 seqno; 598c2ecf20Sopenharmony_ci int queue; 608c2ecf20Sopenharmony_ci}; 618c2ecf20Sopenharmony_ci 628c2ecf20Sopenharmony_cistatic inline struct panfrost_fence * 638c2ecf20Sopenharmony_cito_panfrost_fence(struct dma_fence *fence) 648c2ecf20Sopenharmony_ci{ 658c2ecf20Sopenharmony_ci return (struct panfrost_fence *)fence; 668c2ecf20Sopenharmony_ci} 678c2ecf20Sopenharmony_ci 688c2ecf20Sopenharmony_cistatic const char *panfrost_fence_get_driver_name(struct dma_fence *fence) 698c2ecf20Sopenharmony_ci{ 708c2ecf20Sopenharmony_ci return "panfrost"; 718c2ecf20Sopenharmony_ci} 728c2ecf20Sopenharmony_ci 738c2ecf20Sopenharmony_cistatic const char *panfrost_fence_get_timeline_name(struct dma_fence *fence) 748c2ecf20Sopenharmony_ci{ 758c2ecf20Sopenharmony_ci struct panfrost_fence *f = to_panfrost_fence(fence); 768c2ecf20Sopenharmony_ci 778c2ecf20Sopenharmony_ci switch (f->queue) { 788c2ecf20Sopenharmony_ci case 0: 798c2ecf20Sopenharmony_ci return "panfrost-js-0"; 808c2ecf20Sopenharmony_ci case 1: 818c2ecf20Sopenharmony_ci return "panfrost-js-1"; 828c2ecf20Sopenharmony_ci case 2: 838c2ecf20Sopenharmony_ci return "panfrost-js-2"; 848c2ecf20Sopenharmony_ci default: 858c2ecf20Sopenharmony_ci return NULL; 868c2ecf20Sopenharmony_ci } 878c2ecf20Sopenharmony_ci} 888c2ecf20Sopenharmony_ci 898c2ecf20Sopenharmony_cistatic const struct dma_fence_ops panfrost_fence_ops = { 908c2ecf20Sopenharmony_ci .get_driver_name = panfrost_fence_get_driver_name, 918c2ecf20Sopenharmony_ci .get_timeline_name = panfrost_fence_get_timeline_name, 928c2ecf20Sopenharmony_ci}; 938c2ecf20Sopenharmony_ci 948c2ecf20Sopenharmony_cistatic struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num) 958c2ecf20Sopenharmony_ci{ 968c2ecf20Sopenharmony_ci struct panfrost_fence *fence; 978c2ecf20Sopenharmony_ci struct panfrost_job_slot *js = pfdev->js; 988c2ecf20Sopenharmony_ci 998c2ecf20Sopenharmony_ci fence = kzalloc(sizeof(*fence), GFP_KERNEL); 1008c2ecf20Sopenharmony_ci if (!fence) 1018c2ecf20Sopenharmony_ci return ERR_PTR(-ENOMEM); 1028c2ecf20Sopenharmony_ci 1038c2ecf20Sopenharmony_ci fence->dev = pfdev->ddev; 1048c2ecf20Sopenharmony_ci fence->queue = js_num; 1058c2ecf20Sopenharmony_ci fence->seqno = ++js->queue[js_num].emit_seqno; 1068c2ecf20Sopenharmony_ci dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock, 1078c2ecf20Sopenharmony_ci js->queue[js_num].fence_context, fence->seqno); 1088c2ecf20Sopenharmony_ci 1098c2ecf20Sopenharmony_ci return &fence->base; 1108c2ecf20Sopenharmony_ci} 1118c2ecf20Sopenharmony_ci 1128c2ecf20Sopenharmony_cistatic int panfrost_job_get_slot(struct panfrost_job *job) 1138c2ecf20Sopenharmony_ci{ 1148c2ecf20Sopenharmony_ci /* JS0: fragment jobs. 1158c2ecf20Sopenharmony_ci * JS1: vertex/tiler jobs 1168c2ecf20Sopenharmony_ci * JS2: compute jobs 1178c2ecf20Sopenharmony_ci */ 1188c2ecf20Sopenharmony_ci if (job->requirements & PANFROST_JD_REQ_FS) 1198c2ecf20Sopenharmony_ci return 0; 1208c2ecf20Sopenharmony_ci 1218c2ecf20Sopenharmony_ci/* Not exposed to userspace yet */ 1228c2ecf20Sopenharmony_ci#if 0 1238c2ecf20Sopenharmony_ci if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { 1248c2ecf20Sopenharmony_ci if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && 1258c2ecf20Sopenharmony_ci (job->pfdev->features.nr_core_groups == 2)) 1268c2ecf20Sopenharmony_ci return 2; 1278c2ecf20Sopenharmony_ci if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) 1288c2ecf20Sopenharmony_ci return 2; 1298c2ecf20Sopenharmony_ci } 1308c2ecf20Sopenharmony_ci#endif 1318c2ecf20Sopenharmony_ci return 1; 1328c2ecf20Sopenharmony_ci} 1338c2ecf20Sopenharmony_ci 1348c2ecf20Sopenharmony_cistatic void panfrost_job_write_affinity(struct panfrost_device *pfdev, 1358c2ecf20Sopenharmony_ci u32 requirements, 1368c2ecf20Sopenharmony_ci int js) 1378c2ecf20Sopenharmony_ci{ 1388c2ecf20Sopenharmony_ci u64 affinity; 1398c2ecf20Sopenharmony_ci 1408c2ecf20Sopenharmony_ci /* 1418c2ecf20Sopenharmony_ci * Use all cores for now. 1428c2ecf20Sopenharmony_ci * Eventually we may need to support tiler only jobs and h/w with 1438c2ecf20Sopenharmony_ci * multiple (2) coherent core groups 1448c2ecf20Sopenharmony_ci */ 1458c2ecf20Sopenharmony_ci affinity = pfdev->features.shader_present; 1468c2ecf20Sopenharmony_ci 1478c2ecf20Sopenharmony_ci job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF); 1488c2ecf20Sopenharmony_ci job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32); 1498c2ecf20Sopenharmony_ci} 1508c2ecf20Sopenharmony_ci 1518c2ecf20Sopenharmony_cistatic void panfrost_job_hw_submit(struct panfrost_job *job, int js) 1528c2ecf20Sopenharmony_ci{ 1538c2ecf20Sopenharmony_ci struct panfrost_device *pfdev = job->pfdev; 1548c2ecf20Sopenharmony_ci u32 cfg; 1558c2ecf20Sopenharmony_ci u64 jc_head = job->jc; 1568c2ecf20Sopenharmony_ci int ret; 1578c2ecf20Sopenharmony_ci 1588c2ecf20Sopenharmony_ci panfrost_devfreq_record_busy(&pfdev->pfdevfreq); 1598c2ecf20Sopenharmony_ci 1608c2ecf20Sopenharmony_ci ret = pm_runtime_get_sync(pfdev->dev); 1618c2ecf20Sopenharmony_ci if (ret < 0) 1628c2ecf20Sopenharmony_ci return; 1638c2ecf20Sopenharmony_ci 1648c2ecf20Sopenharmony_ci if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) { 1658c2ecf20Sopenharmony_ci return; 1668c2ecf20Sopenharmony_ci } 1678c2ecf20Sopenharmony_ci 1688c2ecf20Sopenharmony_ci cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu); 1698c2ecf20Sopenharmony_ci 1708c2ecf20Sopenharmony_ci job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF); 1718c2ecf20Sopenharmony_ci job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32); 1728c2ecf20Sopenharmony_ci 1738c2ecf20Sopenharmony_ci panfrost_job_write_affinity(pfdev, job->requirements, js); 1748c2ecf20Sopenharmony_ci 1758c2ecf20Sopenharmony_ci /* start MMU, medium priority, cache clean/flush on end, clean/flush on 1768c2ecf20Sopenharmony_ci * start */ 1778c2ecf20Sopenharmony_ci cfg |= JS_CONFIG_THREAD_PRI(8) | 1788c2ecf20Sopenharmony_ci JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE | 1798c2ecf20Sopenharmony_ci JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE; 1808c2ecf20Sopenharmony_ci 1818c2ecf20Sopenharmony_ci if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) 1828c2ecf20Sopenharmony_ci cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION; 1838c2ecf20Sopenharmony_ci 1848c2ecf20Sopenharmony_ci if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649)) 1858c2ecf20Sopenharmony_ci cfg |= JS_CONFIG_START_MMU; 1868c2ecf20Sopenharmony_ci 1878c2ecf20Sopenharmony_ci job_write(pfdev, JS_CONFIG_NEXT(js), cfg); 1888c2ecf20Sopenharmony_ci 1898c2ecf20Sopenharmony_ci if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) 1908c2ecf20Sopenharmony_ci job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id); 1918c2ecf20Sopenharmony_ci 1928c2ecf20Sopenharmony_ci /* GO ! */ 1938c2ecf20Sopenharmony_ci dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx", 1948c2ecf20Sopenharmony_ci job, js, jc_head); 1958c2ecf20Sopenharmony_ci 1968c2ecf20Sopenharmony_ci job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START); 1978c2ecf20Sopenharmony_ci} 1988c2ecf20Sopenharmony_ci 1998c2ecf20Sopenharmony_cistatic void panfrost_acquire_object_fences(struct drm_gem_object **bos, 2008c2ecf20Sopenharmony_ci int bo_count, 2018c2ecf20Sopenharmony_ci struct dma_fence **implicit_fences) 2028c2ecf20Sopenharmony_ci{ 2038c2ecf20Sopenharmony_ci int i; 2048c2ecf20Sopenharmony_ci 2058c2ecf20Sopenharmony_ci for (i = 0; i < bo_count; i++) 2068c2ecf20Sopenharmony_ci implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv); 2078c2ecf20Sopenharmony_ci} 2088c2ecf20Sopenharmony_ci 2098c2ecf20Sopenharmony_cistatic void panfrost_attach_object_fences(struct drm_gem_object **bos, 2108c2ecf20Sopenharmony_ci int bo_count, 2118c2ecf20Sopenharmony_ci struct dma_fence *fence) 2128c2ecf20Sopenharmony_ci{ 2138c2ecf20Sopenharmony_ci int i; 2148c2ecf20Sopenharmony_ci 2158c2ecf20Sopenharmony_ci for (i = 0; i < bo_count; i++) 2168c2ecf20Sopenharmony_ci dma_resv_add_excl_fence(bos[i]->resv, fence); 2178c2ecf20Sopenharmony_ci} 2188c2ecf20Sopenharmony_ci 2198c2ecf20Sopenharmony_ciint panfrost_job_push(struct panfrost_job *job) 2208c2ecf20Sopenharmony_ci{ 2218c2ecf20Sopenharmony_ci struct panfrost_device *pfdev = job->pfdev; 2228c2ecf20Sopenharmony_ci int slot = panfrost_job_get_slot(job); 2238c2ecf20Sopenharmony_ci struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot]; 2248c2ecf20Sopenharmony_ci struct ww_acquire_ctx acquire_ctx; 2258c2ecf20Sopenharmony_ci int ret = 0; 2268c2ecf20Sopenharmony_ci 2278c2ecf20Sopenharmony_ci mutex_lock(&pfdev->sched_lock); 2288c2ecf20Sopenharmony_ci 2298c2ecf20Sopenharmony_ci ret = drm_gem_lock_reservations(job->bos, job->bo_count, 2308c2ecf20Sopenharmony_ci &acquire_ctx); 2318c2ecf20Sopenharmony_ci if (ret) { 2328c2ecf20Sopenharmony_ci mutex_unlock(&pfdev->sched_lock); 2338c2ecf20Sopenharmony_ci return ret; 2348c2ecf20Sopenharmony_ci } 2358c2ecf20Sopenharmony_ci 2368c2ecf20Sopenharmony_ci ret = drm_sched_job_init(&job->base, entity, NULL); 2378c2ecf20Sopenharmony_ci if (ret) { 2388c2ecf20Sopenharmony_ci mutex_unlock(&pfdev->sched_lock); 2398c2ecf20Sopenharmony_ci goto unlock; 2408c2ecf20Sopenharmony_ci } 2418c2ecf20Sopenharmony_ci 2428c2ecf20Sopenharmony_ci job->render_done_fence = dma_fence_get(&job->base.s_fence->finished); 2438c2ecf20Sopenharmony_ci 2448c2ecf20Sopenharmony_ci kref_get(&job->refcount); /* put by scheduler job completion */ 2458c2ecf20Sopenharmony_ci 2468c2ecf20Sopenharmony_ci panfrost_acquire_object_fences(job->bos, job->bo_count, 2478c2ecf20Sopenharmony_ci job->implicit_fences); 2488c2ecf20Sopenharmony_ci 2498c2ecf20Sopenharmony_ci drm_sched_entity_push_job(&job->base, entity); 2508c2ecf20Sopenharmony_ci 2518c2ecf20Sopenharmony_ci mutex_unlock(&pfdev->sched_lock); 2528c2ecf20Sopenharmony_ci 2538c2ecf20Sopenharmony_ci panfrost_attach_object_fences(job->bos, job->bo_count, 2548c2ecf20Sopenharmony_ci job->render_done_fence); 2558c2ecf20Sopenharmony_ci 2568c2ecf20Sopenharmony_ciunlock: 2578c2ecf20Sopenharmony_ci drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx); 2588c2ecf20Sopenharmony_ci 2598c2ecf20Sopenharmony_ci return ret; 2608c2ecf20Sopenharmony_ci} 2618c2ecf20Sopenharmony_ci 2628c2ecf20Sopenharmony_cistatic void panfrost_job_cleanup(struct kref *ref) 2638c2ecf20Sopenharmony_ci{ 2648c2ecf20Sopenharmony_ci struct panfrost_job *job = container_of(ref, struct panfrost_job, 2658c2ecf20Sopenharmony_ci refcount); 2668c2ecf20Sopenharmony_ci unsigned int i; 2678c2ecf20Sopenharmony_ci 2688c2ecf20Sopenharmony_ci if (job->in_fences) { 2698c2ecf20Sopenharmony_ci for (i = 0; i < job->in_fence_count; i++) 2708c2ecf20Sopenharmony_ci dma_fence_put(job->in_fences[i]); 2718c2ecf20Sopenharmony_ci kvfree(job->in_fences); 2728c2ecf20Sopenharmony_ci } 2738c2ecf20Sopenharmony_ci if (job->implicit_fences) { 2748c2ecf20Sopenharmony_ci for (i = 0; i < job->bo_count; i++) 2758c2ecf20Sopenharmony_ci dma_fence_put(job->implicit_fences[i]); 2768c2ecf20Sopenharmony_ci kvfree(job->implicit_fences); 2778c2ecf20Sopenharmony_ci } 2788c2ecf20Sopenharmony_ci dma_fence_put(job->done_fence); 2798c2ecf20Sopenharmony_ci dma_fence_put(job->render_done_fence); 2808c2ecf20Sopenharmony_ci 2818c2ecf20Sopenharmony_ci if (job->mappings) { 2828c2ecf20Sopenharmony_ci for (i = 0; i < job->bo_count; i++) { 2838c2ecf20Sopenharmony_ci if (!job->mappings[i]) 2848c2ecf20Sopenharmony_ci break; 2858c2ecf20Sopenharmony_ci 2868c2ecf20Sopenharmony_ci atomic_dec(&job->mappings[i]->obj->gpu_usecount); 2878c2ecf20Sopenharmony_ci panfrost_gem_mapping_put(job->mappings[i]); 2888c2ecf20Sopenharmony_ci } 2898c2ecf20Sopenharmony_ci kvfree(job->mappings); 2908c2ecf20Sopenharmony_ci } 2918c2ecf20Sopenharmony_ci 2928c2ecf20Sopenharmony_ci if (job->bos) { 2938c2ecf20Sopenharmony_ci for (i = 0; i < job->bo_count; i++) 2948c2ecf20Sopenharmony_ci drm_gem_object_put(job->bos[i]); 2958c2ecf20Sopenharmony_ci 2968c2ecf20Sopenharmony_ci kvfree(job->bos); 2978c2ecf20Sopenharmony_ci } 2988c2ecf20Sopenharmony_ci 2998c2ecf20Sopenharmony_ci kfree(job); 3008c2ecf20Sopenharmony_ci} 3018c2ecf20Sopenharmony_ci 3028c2ecf20Sopenharmony_civoid panfrost_job_put(struct panfrost_job *job) 3038c2ecf20Sopenharmony_ci{ 3048c2ecf20Sopenharmony_ci kref_put(&job->refcount, panfrost_job_cleanup); 3058c2ecf20Sopenharmony_ci} 3068c2ecf20Sopenharmony_ci 3078c2ecf20Sopenharmony_cistatic void panfrost_job_free(struct drm_sched_job *sched_job) 3088c2ecf20Sopenharmony_ci{ 3098c2ecf20Sopenharmony_ci struct panfrost_job *job = to_panfrost_job(sched_job); 3108c2ecf20Sopenharmony_ci 3118c2ecf20Sopenharmony_ci drm_sched_job_cleanup(sched_job); 3128c2ecf20Sopenharmony_ci 3138c2ecf20Sopenharmony_ci panfrost_job_put(job); 3148c2ecf20Sopenharmony_ci} 3158c2ecf20Sopenharmony_ci 3168c2ecf20Sopenharmony_cistatic struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job, 3178c2ecf20Sopenharmony_ci struct drm_sched_entity *s_entity) 3188c2ecf20Sopenharmony_ci{ 3198c2ecf20Sopenharmony_ci struct panfrost_job *job = to_panfrost_job(sched_job); 3208c2ecf20Sopenharmony_ci struct dma_fence *fence; 3218c2ecf20Sopenharmony_ci unsigned int i; 3228c2ecf20Sopenharmony_ci 3238c2ecf20Sopenharmony_ci /* Explicit fences */ 3248c2ecf20Sopenharmony_ci for (i = 0; i < job->in_fence_count; i++) { 3258c2ecf20Sopenharmony_ci if (job->in_fences[i]) { 3268c2ecf20Sopenharmony_ci fence = job->in_fences[i]; 3278c2ecf20Sopenharmony_ci job->in_fences[i] = NULL; 3288c2ecf20Sopenharmony_ci return fence; 3298c2ecf20Sopenharmony_ci } 3308c2ecf20Sopenharmony_ci } 3318c2ecf20Sopenharmony_ci 3328c2ecf20Sopenharmony_ci /* Implicit fences, max. one per BO */ 3338c2ecf20Sopenharmony_ci for (i = 0; i < job->bo_count; i++) { 3348c2ecf20Sopenharmony_ci if (job->implicit_fences[i]) { 3358c2ecf20Sopenharmony_ci fence = job->implicit_fences[i]; 3368c2ecf20Sopenharmony_ci job->implicit_fences[i] = NULL; 3378c2ecf20Sopenharmony_ci return fence; 3388c2ecf20Sopenharmony_ci } 3398c2ecf20Sopenharmony_ci } 3408c2ecf20Sopenharmony_ci 3418c2ecf20Sopenharmony_ci return NULL; 3428c2ecf20Sopenharmony_ci} 3438c2ecf20Sopenharmony_ci 3448c2ecf20Sopenharmony_cistatic struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job) 3458c2ecf20Sopenharmony_ci{ 3468c2ecf20Sopenharmony_ci struct panfrost_job *job = to_panfrost_job(sched_job); 3478c2ecf20Sopenharmony_ci struct panfrost_device *pfdev = job->pfdev; 3488c2ecf20Sopenharmony_ci int slot = panfrost_job_get_slot(job); 3498c2ecf20Sopenharmony_ci struct dma_fence *fence = NULL; 3508c2ecf20Sopenharmony_ci 3518c2ecf20Sopenharmony_ci if (unlikely(job->base.s_fence->finished.error)) 3528c2ecf20Sopenharmony_ci return NULL; 3538c2ecf20Sopenharmony_ci 3548c2ecf20Sopenharmony_ci pfdev->jobs[slot] = job; 3558c2ecf20Sopenharmony_ci 3568c2ecf20Sopenharmony_ci fence = panfrost_fence_create(pfdev, slot); 3578c2ecf20Sopenharmony_ci if (IS_ERR(fence)) 3588c2ecf20Sopenharmony_ci return NULL; 3598c2ecf20Sopenharmony_ci 3608c2ecf20Sopenharmony_ci if (job->done_fence) 3618c2ecf20Sopenharmony_ci dma_fence_put(job->done_fence); 3628c2ecf20Sopenharmony_ci job->done_fence = dma_fence_get(fence); 3638c2ecf20Sopenharmony_ci 3648c2ecf20Sopenharmony_ci panfrost_job_hw_submit(job, slot); 3658c2ecf20Sopenharmony_ci 3668c2ecf20Sopenharmony_ci return fence; 3678c2ecf20Sopenharmony_ci} 3688c2ecf20Sopenharmony_ci 3698c2ecf20Sopenharmony_civoid panfrost_job_enable_interrupts(struct panfrost_device *pfdev) 3708c2ecf20Sopenharmony_ci{ 3718c2ecf20Sopenharmony_ci int j; 3728c2ecf20Sopenharmony_ci u32 irq_mask = 0; 3738c2ecf20Sopenharmony_ci 3748c2ecf20Sopenharmony_ci for (j = 0; j < NUM_JOB_SLOTS; j++) { 3758c2ecf20Sopenharmony_ci irq_mask |= MK_JS_MASK(j); 3768c2ecf20Sopenharmony_ci } 3778c2ecf20Sopenharmony_ci 3788c2ecf20Sopenharmony_ci job_write(pfdev, JOB_INT_CLEAR, irq_mask); 3798c2ecf20Sopenharmony_ci job_write(pfdev, JOB_INT_MASK, irq_mask); 3808c2ecf20Sopenharmony_ci} 3818c2ecf20Sopenharmony_ci 3828c2ecf20Sopenharmony_cistatic bool panfrost_scheduler_stop(struct panfrost_queue_state *queue, 3838c2ecf20Sopenharmony_ci struct drm_sched_job *bad) 3848c2ecf20Sopenharmony_ci{ 3858c2ecf20Sopenharmony_ci enum panfrost_queue_status old_status; 3868c2ecf20Sopenharmony_ci bool stopped = false; 3878c2ecf20Sopenharmony_ci 3888c2ecf20Sopenharmony_ci mutex_lock(&queue->lock); 3898c2ecf20Sopenharmony_ci old_status = atomic_xchg(&queue->status, 3908c2ecf20Sopenharmony_ci PANFROST_QUEUE_STATUS_STOPPED); 3918c2ecf20Sopenharmony_ci if (old_status == PANFROST_QUEUE_STATUS_STOPPED) 3928c2ecf20Sopenharmony_ci goto out; 3938c2ecf20Sopenharmony_ci 3948c2ecf20Sopenharmony_ci WARN_ON(old_status != PANFROST_QUEUE_STATUS_ACTIVE); 3958c2ecf20Sopenharmony_ci drm_sched_stop(&queue->sched, bad); 3968c2ecf20Sopenharmony_ci if (bad) 3978c2ecf20Sopenharmony_ci drm_sched_increase_karma(bad); 3988c2ecf20Sopenharmony_ci 3998c2ecf20Sopenharmony_ci stopped = true; 4008c2ecf20Sopenharmony_ci 4018c2ecf20Sopenharmony_ci /* 4028c2ecf20Sopenharmony_ci * Set the timeout to max so the timer doesn't get started 4038c2ecf20Sopenharmony_ci * when we return from the timeout handler (restored in 4048c2ecf20Sopenharmony_ci * panfrost_scheduler_start()). 4058c2ecf20Sopenharmony_ci */ 4068c2ecf20Sopenharmony_ci queue->sched.timeout = MAX_SCHEDULE_TIMEOUT; 4078c2ecf20Sopenharmony_ci 4088c2ecf20Sopenharmony_ciout: 4098c2ecf20Sopenharmony_ci mutex_unlock(&queue->lock); 4108c2ecf20Sopenharmony_ci 4118c2ecf20Sopenharmony_ci return stopped; 4128c2ecf20Sopenharmony_ci} 4138c2ecf20Sopenharmony_ci 4148c2ecf20Sopenharmony_cistatic void panfrost_scheduler_start(struct panfrost_queue_state *queue) 4158c2ecf20Sopenharmony_ci{ 4168c2ecf20Sopenharmony_ci enum panfrost_queue_status old_status; 4178c2ecf20Sopenharmony_ci 4188c2ecf20Sopenharmony_ci mutex_lock(&queue->lock); 4198c2ecf20Sopenharmony_ci old_status = atomic_xchg(&queue->status, 4208c2ecf20Sopenharmony_ci PANFROST_QUEUE_STATUS_STARTING); 4218c2ecf20Sopenharmony_ci WARN_ON(old_status != PANFROST_QUEUE_STATUS_STOPPED); 4228c2ecf20Sopenharmony_ci 4238c2ecf20Sopenharmony_ci /* Restore the original timeout before starting the scheduler. */ 4248c2ecf20Sopenharmony_ci queue->sched.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS); 4258c2ecf20Sopenharmony_ci drm_sched_resubmit_jobs(&queue->sched); 4268c2ecf20Sopenharmony_ci drm_sched_start(&queue->sched, true); 4278c2ecf20Sopenharmony_ci old_status = atomic_xchg(&queue->status, 4288c2ecf20Sopenharmony_ci PANFROST_QUEUE_STATUS_ACTIVE); 4298c2ecf20Sopenharmony_ci if (old_status == PANFROST_QUEUE_STATUS_FAULT_PENDING) 4308c2ecf20Sopenharmony_ci drm_sched_fault(&queue->sched); 4318c2ecf20Sopenharmony_ci 4328c2ecf20Sopenharmony_ci mutex_unlock(&queue->lock); 4338c2ecf20Sopenharmony_ci} 4348c2ecf20Sopenharmony_ci 4358c2ecf20Sopenharmony_cistatic void panfrost_job_timedout(struct drm_sched_job *sched_job) 4368c2ecf20Sopenharmony_ci{ 4378c2ecf20Sopenharmony_ci struct panfrost_job *job = to_panfrost_job(sched_job); 4388c2ecf20Sopenharmony_ci struct panfrost_device *pfdev = job->pfdev; 4398c2ecf20Sopenharmony_ci int js = panfrost_job_get_slot(job); 4408c2ecf20Sopenharmony_ci 4418c2ecf20Sopenharmony_ci /* 4428c2ecf20Sopenharmony_ci * If the GPU managed to complete this jobs fence, the timeout is 4438c2ecf20Sopenharmony_ci * spurious. Bail out. 4448c2ecf20Sopenharmony_ci */ 4458c2ecf20Sopenharmony_ci if (dma_fence_is_signaled(job->done_fence)) 4468c2ecf20Sopenharmony_ci return; 4478c2ecf20Sopenharmony_ci 4488c2ecf20Sopenharmony_ci dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", 4498c2ecf20Sopenharmony_ci js, 4508c2ecf20Sopenharmony_ci job_read(pfdev, JS_CONFIG(js)), 4518c2ecf20Sopenharmony_ci job_read(pfdev, JS_STATUS(js)), 4528c2ecf20Sopenharmony_ci job_read(pfdev, JS_HEAD_LO(js)), 4538c2ecf20Sopenharmony_ci job_read(pfdev, JS_TAIL_LO(js)), 4548c2ecf20Sopenharmony_ci sched_job); 4558c2ecf20Sopenharmony_ci 4568c2ecf20Sopenharmony_ci /* Scheduler is already stopped, nothing to do. */ 4578c2ecf20Sopenharmony_ci if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job)) 4588c2ecf20Sopenharmony_ci return; 4598c2ecf20Sopenharmony_ci 4608c2ecf20Sopenharmony_ci /* Schedule a reset if there's no reset in progress. */ 4618c2ecf20Sopenharmony_ci if (!atomic_xchg(&pfdev->reset.pending, 1)) 4628c2ecf20Sopenharmony_ci schedule_work(&pfdev->reset.work); 4638c2ecf20Sopenharmony_ci} 4648c2ecf20Sopenharmony_ci 4658c2ecf20Sopenharmony_cistatic const struct drm_sched_backend_ops panfrost_sched_ops = { 4668c2ecf20Sopenharmony_ci .dependency = panfrost_job_dependency, 4678c2ecf20Sopenharmony_ci .run_job = panfrost_job_run, 4688c2ecf20Sopenharmony_ci .timedout_job = panfrost_job_timedout, 4698c2ecf20Sopenharmony_ci .free_job = panfrost_job_free 4708c2ecf20Sopenharmony_ci}; 4718c2ecf20Sopenharmony_ci 4728c2ecf20Sopenharmony_cistatic irqreturn_t panfrost_job_irq_handler(int irq, void *data) 4738c2ecf20Sopenharmony_ci{ 4748c2ecf20Sopenharmony_ci struct panfrost_device *pfdev = data; 4758c2ecf20Sopenharmony_ci u32 status = job_read(pfdev, JOB_INT_STAT); 4768c2ecf20Sopenharmony_ci int j; 4778c2ecf20Sopenharmony_ci 4788c2ecf20Sopenharmony_ci dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status); 4798c2ecf20Sopenharmony_ci 4808c2ecf20Sopenharmony_ci if (!status) 4818c2ecf20Sopenharmony_ci return IRQ_NONE; 4828c2ecf20Sopenharmony_ci 4838c2ecf20Sopenharmony_ci pm_runtime_mark_last_busy(pfdev->dev); 4848c2ecf20Sopenharmony_ci 4858c2ecf20Sopenharmony_ci for (j = 0; status; j++) { 4868c2ecf20Sopenharmony_ci u32 mask = MK_JS_MASK(j); 4878c2ecf20Sopenharmony_ci 4888c2ecf20Sopenharmony_ci if (!(status & mask)) 4898c2ecf20Sopenharmony_ci continue; 4908c2ecf20Sopenharmony_ci 4918c2ecf20Sopenharmony_ci job_write(pfdev, JOB_INT_CLEAR, mask); 4928c2ecf20Sopenharmony_ci 4938c2ecf20Sopenharmony_ci if (status & JOB_INT_MASK_ERR(j)) { 4948c2ecf20Sopenharmony_ci enum panfrost_queue_status old_status; 4958c2ecf20Sopenharmony_ci 4968c2ecf20Sopenharmony_ci job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP); 4978c2ecf20Sopenharmony_ci 4988c2ecf20Sopenharmony_ci dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", 4998c2ecf20Sopenharmony_ci j, 5008c2ecf20Sopenharmony_ci panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))), 5018c2ecf20Sopenharmony_ci job_read(pfdev, JS_HEAD_LO(j)), 5028c2ecf20Sopenharmony_ci job_read(pfdev, JS_TAIL_LO(j))); 5038c2ecf20Sopenharmony_ci 5048c2ecf20Sopenharmony_ci /* 5058c2ecf20Sopenharmony_ci * When the queue is being restarted we don't report 5068c2ecf20Sopenharmony_ci * faults directly to avoid races between the timeout 5078c2ecf20Sopenharmony_ci * and reset handlers. panfrost_scheduler_start() will 5088c2ecf20Sopenharmony_ci * call drm_sched_fault() after the queue has been 5098c2ecf20Sopenharmony_ci * started if status == FAULT_PENDING. 5108c2ecf20Sopenharmony_ci */ 5118c2ecf20Sopenharmony_ci old_status = atomic_cmpxchg(&pfdev->js->queue[j].status, 5128c2ecf20Sopenharmony_ci PANFROST_QUEUE_STATUS_STARTING, 5138c2ecf20Sopenharmony_ci PANFROST_QUEUE_STATUS_FAULT_PENDING); 5148c2ecf20Sopenharmony_ci if (old_status == PANFROST_QUEUE_STATUS_ACTIVE) 5158c2ecf20Sopenharmony_ci drm_sched_fault(&pfdev->js->queue[j].sched); 5168c2ecf20Sopenharmony_ci } 5178c2ecf20Sopenharmony_ci 5188c2ecf20Sopenharmony_ci if (status & JOB_INT_MASK_DONE(j)) { 5198c2ecf20Sopenharmony_ci struct panfrost_job *job; 5208c2ecf20Sopenharmony_ci 5218c2ecf20Sopenharmony_ci spin_lock(&pfdev->js->job_lock); 5228c2ecf20Sopenharmony_ci job = pfdev->jobs[j]; 5238c2ecf20Sopenharmony_ci /* Only NULL if job timeout occurred */ 5248c2ecf20Sopenharmony_ci if (job) { 5258c2ecf20Sopenharmony_ci pfdev->jobs[j] = NULL; 5268c2ecf20Sopenharmony_ci 5278c2ecf20Sopenharmony_ci panfrost_mmu_as_put(pfdev, job->file_priv->mmu); 5288c2ecf20Sopenharmony_ci panfrost_devfreq_record_idle(&pfdev->pfdevfreq); 5298c2ecf20Sopenharmony_ci 5308c2ecf20Sopenharmony_ci dma_fence_signal_locked(job->done_fence); 5318c2ecf20Sopenharmony_ci pm_runtime_put_autosuspend(pfdev->dev); 5328c2ecf20Sopenharmony_ci } 5338c2ecf20Sopenharmony_ci spin_unlock(&pfdev->js->job_lock); 5348c2ecf20Sopenharmony_ci } 5358c2ecf20Sopenharmony_ci 5368c2ecf20Sopenharmony_ci status &= ~mask; 5378c2ecf20Sopenharmony_ci } 5388c2ecf20Sopenharmony_ci 5398c2ecf20Sopenharmony_ci return IRQ_HANDLED; 5408c2ecf20Sopenharmony_ci} 5418c2ecf20Sopenharmony_ci 5428c2ecf20Sopenharmony_cistatic void panfrost_reset(struct work_struct *work) 5438c2ecf20Sopenharmony_ci{ 5448c2ecf20Sopenharmony_ci struct panfrost_device *pfdev = container_of(work, 5458c2ecf20Sopenharmony_ci struct panfrost_device, 5468c2ecf20Sopenharmony_ci reset.work); 5478c2ecf20Sopenharmony_ci unsigned long flags; 5488c2ecf20Sopenharmony_ci unsigned int i; 5498c2ecf20Sopenharmony_ci bool cookie; 5508c2ecf20Sopenharmony_ci 5518c2ecf20Sopenharmony_ci cookie = dma_fence_begin_signalling(); 5528c2ecf20Sopenharmony_ci for (i = 0; i < NUM_JOB_SLOTS; i++) { 5538c2ecf20Sopenharmony_ci /* 5548c2ecf20Sopenharmony_ci * We want pending timeouts to be handled before we attempt 5558c2ecf20Sopenharmony_ci * to stop the scheduler. If we don't do that and the timeout 5568c2ecf20Sopenharmony_ci * handler is in flight, it might have removed the bad job 5578c2ecf20Sopenharmony_ci * from the list, and we'll lose this job if the reset handler 5588c2ecf20Sopenharmony_ci * enters the critical section in panfrost_scheduler_stop() 5598c2ecf20Sopenharmony_ci * before the timeout handler. 5608c2ecf20Sopenharmony_ci * 5618c2ecf20Sopenharmony_ci * Timeout is set to MAX_SCHEDULE_TIMEOUT - 1 because we need 5628c2ecf20Sopenharmony_ci * something big enough to make sure the timer will not expire 5638c2ecf20Sopenharmony_ci * before we manage to stop the scheduler, but we can't use 5648c2ecf20Sopenharmony_ci * MAX_SCHEDULE_TIMEOUT because drm_sched_get_cleanup_job() 5658c2ecf20Sopenharmony_ci * considers that as 'timer is not running' and will dequeue 5668c2ecf20Sopenharmony_ci * the job without making sure the timeout handler is not 5678c2ecf20Sopenharmony_ci * running. 5688c2ecf20Sopenharmony_ci */ 5698c2ecf20Sopenharmony_ci pfdev->js->queue[i].sched.timeout = MAX_SCHEDULE_TIMEOUT - 1; 5708c2ecf20Sopenharmony_ci cancel_delayed_work_sync(&pfdev->js->queue[i].sched.work_tdr); 5718c2ecf20Sopenharmony_ci panfrost_scheduler_stop(&pfdev->js->queue[i], NULL); 5728c2ecf20Sopenharmony_ci } 5738c2ecf20Sopenharmony_ci 5748c2ecf20Sopenharmony_ci /* All timers have been stopped, we can safely reset the pending state. */ 5758c2ecf20Sopenharmony_ci atomic_set(&pfdev->reset.pending, 0); 5768c2ecf20Sopenharmony_ci 5778c2ecf20Sopenharmony_ci spin_lock_irqsave(&pfdev->js->job_lock, flags); 5788c2ecf20Sopenharmony_ci for (i = 0; i < NUM_JOB_SLOTS; i++) { 5798c2ecf20Sopenharmony_ci if (pfdev->jobs[i]) { 5808c2ecf20Sopenharmony_ci pm_runtime_put_noidle(pfdev->dev); 5818c2ecf20Sopenharmony_ci panfrost_devfreq_record_idle(&pfdev->pfdevfreq); 5828c2ecf20Sopenharmony_ci pfdev->jobs[i] = NULL; 5838c2ecf20Sopenharmony_ci } 5848c2ecf20Sopenharmony_ci } 5858c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&pfdev->js->job_lock, flags); 5868c2ecf20Sopenharmony_ci 5878c2ecf20Sopenharmony_ci panfrost_device_reset(pfdev); 5888c2ecf20Sopenharmony_ci 5898c2ecf20Sopenharmony_ci for (i = 0; i < NUM_JOB_SLOTS; i++) 5908c2ecf20Sopenharmony_ci panfrost_scheduler_start(&pfdev->js->queue[i]); 5918c2ecf20Sopenharmony_ci 5928c2ecf20Sopenharmony_ci dma_fence_end_signalling(cookie); 5938c2ecf20Sopenharmony_ci} 5948c2ecf20Sopenharmony_ci 5958c2ecf20Sopenharmony_ciint panfrost_job_init(struct panfrost_device *pfdev) 5968c2ecf20Sopenharmony_ci{ 5978c2ecf20Sopenharmony_ci struct panfrost_job_slot *js; 5988c2ecf20Sopenharmony_ci int ret, j, irq; 5998c2ecf20Sopenharmony_ci 6008c2ecf20Sopenharmony_ci INIT_WORK(&pfdev->reset.work, panfrost_reset); 6018c2ecf20Sopenharmony_ci 6028c2ecf20Sopenharmony_ci pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL); 6038c2ecf20Sopenharmony_ci if (!js) 6048c2ecf20Sopenharmony_ci return -ENOMEM; 6058c2ecf20Sopenharmony_ci 6068c2ecf20Sopenharmony_ci spin_lock_init(&js->job_lock); 6078c2ecf20Sopenharmony_ci 6088c2ecf20Sopenharmony_ci irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job"); 6098c2ecf20Sopenharmony_ci if (irq <= 0) 6108c2ecf20Sopenharmony_ci return -ENODEV; 6118c2ecf20Sopenharmony_ci 6128c2ecf20Sopenharmony_ci ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler, 6138c2ecf20Sopenharmony_ci IRQF_SHARED, KBUILD_MODNAME "-job", pfdev); 6148c2ecf20Sopenharmony_ci if (ret) { 6158c2ecf20Sopenharmony_ci dev_err(pfdev->dev, "failed to request job irq"); 6168c2ecf20Sopenharmony_ci return ret; 6178c2ecf20Sopenharmony_ci } 6188c2ecf20Sopenharmony_ci 6198c2ecf20Sopenharmony_ci for (j = 0; j < NUM_JOB_SLOTS; j++) { 6208c2ecf20Sopenharmony_ci mutex_init(&js->queue[j].lock); 6218c2ecf20Sopenharmony_ci 6228c2ecf20Sopenharmony_ci js->queue[j].fence_context = dma_fence_context_alloc(1); 6238c2ecf20Sopenharmony_ci 6248c2ecf20Sopenharmony_ci ret = drm_sched_init(&js->queue[j].sched, 6258c2ecf20Sopenharmony_ci &panfrost_sched_ops, 6268c2ecf20Sopenharmony_ci 1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS), 6278c2ecf20Sopenharmony_ci "pan_js"); 6288c2ecf20Sopenharmony_ci if (ret) { 6298c2ecf20Sopenharmony_ci dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); 6308c2ecf20Sopenharmony_ci goto err_sched; 6318c2ecf20Sopenharmony_ci } 6328c2ecf20Sopenharmony_ci } 6338c2ecf20Sopenharmony_ci 6348c2ecf20Sopenharmony_ci panfrost_job_enable_interrupts(pfdev); 6358c2ecf20Sopenharmony_ci 6368c2ecf20Sopenharmony_ci return 0; 6378c2ecf20Sopenharmony_ci 6388c2ecf20Sopenharmony_cierr_sched: 6398c2ecf20Sopenharmony_ci for (j--; j >= 0; j--) 6408c2ecf20Sopenharmony_ci drm_sched_fini(&js->queue[j].sched); 6418c2ecf20Sopenharmony_ci 6428c2ecf20Sopenharmony_ci return ret; 6438c2ecf20Sopenharmony_ci} 6448c2ecf20Sopenharmony_ci 6458c2ecf20Sopenharmony_civoid panfrost_job_fini(struct panfrost_device *pfdev) 6468c2ecf20Sopenharmony_ci{ 6478c2ecf20Sopenharmony_ci struct panfrost_job_slot *js = pfdev->js; 6488c2ecf20Sopenharmony_ci int j; 6498c2ecf20Sopenharmony_ci 6508c2ecf20Sopenharmony_ci job_write(pfdev, JOB_INT_MASK, 0); 6518c2ecf20Sopenharmony_ci 6528c2ecf20Sopenharmony_ci for (j = 0; j < NUM_JOB_SLOTS; j++) { 6538c2ecf20Sopenharmony_ci drm_sched_fini(&js->queue[j].sched); 6548c2ecf20Sopenharmony_ci mutex_destroy(&js->queue[j].lock); 6558c2ecf20Sopenharmony_ci } 6568c2ecf20Sopenharmony_ci 6578c2ecf20Sopenharmony_ci} 6588c2ecf20Sopenharmony_ci 6598c2ecf20Sopenharmony_ciint panfrost_job_open(struct panfrost_file_priv *panfrost_priv) 6608c2ecf20Sopenharmony_ci{ 6618c2ecf20Sopenharmony_ci struct panfrost_device *pfdev = panfrost_priv->pfdev; 6628c2ecf20Sopenharmony_ci struct panfrost_job_slot *js = pfdev->js; 6638c2ecf20Sopenharmony_ci struct drm_gpu_scheduler *sched; 6648c2ecf20Sopenharmony_ci int ret, i; 6658c2ecf20Sopenharmony_ci 6668c2ecf20Sopenharmony_ci for (i = 0; i < NUM_JOB_SLOTS; i++) { 6678c2ecf20Sopenharmony_ci sched = &js->queue[i].sched; 6688c2ecf20Sopenharmony_ci ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], 6698c2ecf20Sopenharmony_ci DRM_SCHED_PRIORITY_NORMAL, &sched, 6708c2ecf20Sopenharmony_ci 1, NULL); 6718c2ecf20Sopenharmony_ci if (WARN_ON(ret)) 6728c2ecf20Sopenharmony_ci return ret; 6738c2ecf20Sopenharmony_ci } 6748c2ecf20Sopenharmony_ci return 0; 6758c2ecf20Sopenharmony_ci} 6768c2ecf20Sopenharmony_ci 6778c2ecf20Sopenharmony_civoid panfrost_job_close(struct panfrost_file_priv *panfrost_priv) 6788c2ecf20Sopenharmony_ci{ 6798c2ecf20Sopenharmony_ci int i; 6808c2ecf20Sopenharmony_ci 6818c2ecf20Sopenharmony_ci for (i = 0; i < NUM_JOB_SLOTS; i++) 6828c2ecf20Sopenharmony_ci drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]); 6838c2ecf20Sopenharmony_ci} 6848c2ecf20Sopenharmony_ci 6858c2ecf20Sopenharmony_ciint panfrost_job_is_idle(struct panfrost_device *pfdev) 6868c2ecf20Sopenharmony_ci{ 6878c2ecf20Sopenharmony_ci struct panfrost_job_slot *js = pfdev->js; 6888c2ecf20Sopenharmony_ci int i; 6898c2ecf20Sopenharmony_ci 6908c2ecf20Sopenharmony_ci for (i = 0; i < NUM_JOB_SLOTS; i++) { 6918c2ecf20Sopenharmony_ci /* If there are any jobs in the HW queue, we're not idle */ 6928c2ecf20Sopenharmony_ci if (atomic_read(&js->queue[i].sched.hw_rq_count)) 6938c2ecf20Sopenharmony_ci return false; 6948c2ecf20Sopenharmony_ci } 6958c2ecf20Sopenharmony_ci 6968c2ecf20Sopenharmony_ci return true; 6978c2ecf20Sopenharmony_ci} 698