18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. 38c2ecf20Sopenharmony_ci */ 48c2ecf20Sopenharmony_ci 58c2ecf20Sopenharmony_ci#include <linux/types.h> 68c2ecf20Sopenharmony_ci#include <linux/debugfs.h> 78c2ecf20Sopenharmony_ci 88c2ecf20Sopenharmony_ci#include <drm/drm_debugfs.h> 98c2ecf20Sopenharmony_ci#include <drm/drm_file.h> 108c2ecf20Sopenharmony_ci#include <drm/drm_print.h> 118c2ecf20Sopenharmony_ci 128c2ecf20Sopenharmony_ci#include "a5xx_gpu.h" 138c2ecf20Sopenharmony_ci 148c2ecf20Sopenharmony_cistatic void pfp_print(struct msm_gpu *gpu, struct drm_printer *p) 158c2ecf20Sopenharmony_ci{ 168c2ecf20Sopenharmony_ci int i; 178c2ecf20Sopenharmony_ci 188c2ecf20Sopenharmony_ci drm_printf(p, "PFP state:\n"); 198c2ecf20Sopenharmony_ci 208c2ecf20Sopenharmony_ci for (i = 0; i < 36; i++) { 218c2ecf20Sopenharmony_ci gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i); 228c2ecf20Sopenharmony_ci drm_printf(p, " %02x: %08x\n", i, 238c2ecf20Sopenharmony_ci gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA)); 248c2ecf20Sopenharmony_ci } 258c2ecf20Sopenharmony_ci} 268c2ecf20Sopenharmony_ci 278c2ecf20Sopenharmony_cistatic void me_print(struct msm_gpu *gpu, struct drm_printer *p) 288c2ecf20Sopenharmony_ci{ 298c2ecf20Sopenharmony_ci int i; 308c2ecf20Sopenharmony_ci 318c2ecf20Sopenharmony_ci drm_printf(p, "ME state:\n"); 328c2ecf20Sopenharmony_ci 338c2ecf20Sopenharmony_ci for (i = 0; i < 29; i++) { 348c2ecf20Sopenharmony_ci gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i); 358c2ecf20Sopenharmony_ci drm_printf(p, " %02x: %08x\n", i, 368c2ecf20Sopenharmony_ci gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA)); 378c2ecf20Sopenharmony_ci } 388c2ecf20Sopenharmony_ci} 398c2ecf20Sopenharmony_ci 408c2ecf20Sopenharmony_cistatic void meq_print(struct msm_gpu *gpu, struct drm_printer *p) 418c2ecf20Sopenharmony_ci{ 428c2ecf20Sopenharmony_ci int i; 438c2ecf20Sopenharmony_ci 448c2ecf20Sopenharmony_ci drm_printf(p, "MEQ state:\n"); 458c2ecf20Sopenharmony_ci gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0); 468c2ecf20Sopenharmony_ci 478c2ecf20Sopenharmony_ci for (i = 0; i < 64; i++) { 488c2ecf20Sopenharmony_ci drm_printf(p, " %02x: %08x\n", i, 498c2ecf20Sopenharmony_ci gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA)); 508c2ecf20Sopenharmony_ci } 518c2ecf20Sopenharmony_ci} 528c2ecf20Sopenharmony_ci 538c2ecf20Sopenharmony_cistatic void roq_print(struct msm_gpu *gpu, struct drm_printer *p) 548c2ecf20Sopenharmony_ci{ 558c2ecf20Sopenharmony_ci int i; 568c2ecf20Sopenharmony_ci 578c2ecf20Sopenharmony_ci drm_printf(p, "ROQ state:\n"); 588c2ecf20Sopenharmony_ci gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0); 598c2ecf20Sopenharmony_ci 608c2ecf20Sopenharmony_ci for (i = 0; i < 512 / 4; i++) { 618c2ecf20Sopenharmony_ci uint32_t val[4]; 628c2ecf20Sopenharmony_ci int j; 638c2ecf20Sopenharmony_ci for (j = 0; j < 4; j++) 648c2ecf20Sopenharmony_ci val[j] = gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA); 658c2ecf20Sopenharmony_ci drm_printf(p, " %02x: %08x %08x %08x %08x\n", i, 668c2ecf20Sopenharmony_ci val[0], val[1], val[2], val[3]); 678c2ecf20Sopenharmony_ci } 688c2ecf20Sopenharmony_ci} 698c2ecf20Sopenharmony_ci 708c2ecf20Sopenharmony_cistatic int show(struct seq_file *m, void *arg) 718c2ecf20Sopenharmony_ci{ 728c2ecf20Sopenharmony_ci struct drm_info_node *node = (struct drm_info_node *) m->private; 738c2ecf20Sopenharmony_ci struct drm_device *dev = node->minor->dev; 748c2ecf20Sopenharmony_ci struct msm_drm_private *priv = dev->dev_private; 758c2ecf20Sopenharmony_ci struct drm_printer p = drm_seq_file_printer(m); 768c2ecf20Sopenharmony_ci void (*show)(struct msm_gpu *gpu, struct drm_printer *p) = 778c2ecf20Sopenharmony_ci node->info_ent->data; 788c2ecf20Sopenharmony_ci 798c2ecf20Sopenharmony_ci show(priv->gpu, &p); 808c2ecf20Sopenharmony_ci return 0; 818c2ecf20Sopenharmony_ci} 828c2ecf20Sopenharmony_ci 838c2ecf20Sopenharmony_ci#define ENT(n) { .name = #n, .show = show, .data = n ##_print } 848c2ecf20Sopenharmony_cistatic struct drm_info_list a5xx_debugfs_list[] = { 858c2ecf20Sopenharmony_ci ENT(pfp), 868c2ecf20Sopenharmony_ci ENT(me), 878c2ecf20Sopenharmony_ci ENT(meq), 888c2ecf20Sopenharmony_ci ENT(roq), 898c2ecf20Sopenharmony_ci}; 908c2ecf20Sopenharmony_ci 918c2ecf20Sopenharmony_ci/* for debugfs files that can be written to, we can't use drm helper: */ 928c2ecf20Sopenharmony_cistatic int 938c2ecf20Sopenharmony_cireset_set(void *data, u64 val) 948c2ecf20Sopenharmony_ci{ 958c2ecf20Sopenharmony_ci struct drm_device *dev = data; 968c2ecf20Sopenharmony_ci struct msm_drm_private *priv = dev->dev_private; 978c2ecf20Sopenharmony_ci struct msm_gpu *gpu = priv->gpu; 988c2ecf20Sopenharmony_ci struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 998c2ecf20Sopenharmony_ci struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); 1008c2ecf20Sopenharmony_ci 1018c2ecf20Sopenharmony_ci if (!capable(CAP_SYS_ADMIN)) 1028c2ecf20Sopenharmony_ci return -EINVAL; 1038c2ecf20Sopenharmony_ci 1048c2ecf20Sopenharmony_ci /* TODO do we care about trying to make sure the GPU is idle? 1058c2ecf20Sopenharmony_ci * Since this is just a debug feature limited to CAP_SYS_ADMIN, 1068c2ecf20Sopenharmony_ci * maybe it is fine to let the user keep both pieces if they 1078c2ecf20Sopenharmony_ci * try to reset an active GPU. 1088c2ecf20Sopenharmony_ci */ 1098c2ecf20Sopenharmony_ci 1108c2ecf20Sopenharmony_ci mutex_lock(&dev->struct_mutex); 1118c2ecf20Sopenharmony_ci 1128c2ecf20Sopenharmony_ci release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]); 1138c2ecf20Sopenharmony_ci adreno_gpu->fw[ADRENO_FW_PM4] = NULL; 1148c2ecf20Sopenharmony_ci 1158c2ecf20Sopenharmony_ci release_firmware(adreno_gpu->fw[ADRENO_FW_PFP]); 1168c2ecf20Sopenharmony_ci adreno_gpu->fw[ADRENO_FW_PFP] = NULL; 1178c2ecf20Sopenharmony_ci 1188c2ecf20Sopenharmony_ci if (a5xx_gpu->pm4_bo) { 1198c2ecf20Sopenharmony_ci msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace); 1208c2ecf20Sopenharmony_ci drm_gem_object_put_locked(a5xx_gpu->pm4_bo); 1218c2ecf20Sopenharmony_ci a5xx_gpu->pm4_bo = NULL; 1228c2ecf20Sopenharmony_ci } 1238c2ecf20Sopenharmony_ci 1248c2ecf20Sopenharmony_ci if (a5xx_gpu->pfp_bo) { 1258c2ecf20Sopenharmony_ci msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace); 1268c2ecf20Sopenharmony_ci drm_gem_object_put_locked(a5xx_gpu->pfp_bo); 1278c2ecf20Sopenharmony_ci a5xx_gpu->pfp_bo = NULL; 1288c2ecf20Sopenharmony_ci } 1298c2ecf20Sopenharmony_ci 1308c2ecf20Sopenharmony_ci gpu->needs_hw_init = true; 1318c2ecf20Sopenharmony_ci 1328c2ecf20Sopenharmony_ci pm_runtime_get_sync(&gpu->pdev->dev); 1338c2ecf20Sopenharmony_ci gpu->funcs->recover(gpu); 1348c2ecf20Sopenharmony_ci 1358c2ecf20Sopenharmony_ci pm_runtime_put_sync(&gpu->pdev->dev); 1368c2ecf20Sopenharmony_ci mutex_unlock(&dev->struct_mutex); 1378c2ecf20Sopenharmony_ci 1388c2ecf20Sopenharmony_ci return 0; 1398c2ecf20Sopenharmony_ci} 1408c2ecf20Sopenharmony_ci 1418c2ecf20Sopenharmony_ciDEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n"); 1428c2ecf20Sopenharmony_ci 1438c2ecf20Sopenharmony_ci 1448c2ecf20Sopenharmony_civoid a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) 1458c2ecf20Sopenharmony_ci{ 1468c2ecf20Sopenharmony_ci struct drm_device *dev; 1478c2ecf20Sopenharmony_ci 1488c2ecf20Sopenharmony_ci if (!minor) 1498c2ecf20Sopenharmony_ci return; 1508c2ecf20Sopenharmony_ci 1518c2ecf20Sopenharmony_ci dev = minor->dev; 1528c2ecf20Sopenharmony_ci 1538c2ecf20Sopenharmony_ci drm_debugfs_create_files(a5xx_debugfs_list, 1548c2ecf20Sopenharmony_ci ARRAY_SIZE(a5xx_debugfs_list), 1558c2ecf20Sopenharmony_ci minor->debugfs_root, minor); 1568c2ecf20Sopenharmony_ci 1578c2ecf20Sopenharmony_ci debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev, 1588c2ecf20Sopenharmony_ci &reset_fops); 1598c2ecf20Sopenharmony_ci} 160