1// SPDX-License-Identifier: GPL-2.0 2/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */ 3/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ 4/* Copyright 2019 Collabora ltd. */ 5 6#include <linux/module.h> 7#include <linux/of_platform.h> 8#include <linux/pagemap.h> 9#include <linux/pm_runtime.h> 10#include <drm/panfrost_drm.h> 11#include <drm/drm_drv.h> 12#include <drm/drm_ioctl.h> 13#include <drm/drm_syncobj.h> 14#include <drm/drm_utils.h> 15 16#include "panfrost_device.h" 17#include "panfrost_gem.h" 18#include "panfrost_mmu.h" 19#include "panfrost_job.h" 20#include "panfrost_gpu.h" 21#include "panfrost_perfcnt.h" 22 23static bool unstable_ioctls; 24module_param_unsafe(unstable_ioctls, bool, 0600); 25 26static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file) 27{ 28 struct drm_panfrost_get_param *param = data; 29 struct panfrost_device *pfdev = ddev->dev_private; 30 31 if (param->pad != 0) 32 return -EINVAL; 33 34#define PANFROST_FEATURE(name, member) \ 35 case DRM_PANFROST_PARAM_ ## name: \ 36 param->value = pfdev->features.member; \ 37 break 38#define PANFROST_FEATURE_ARRAY(name, member, max) \ 39 case DRM_PANFROST_PARAM_ ## name ## 0 ... \ 40 DRM_PANFROST_PARAM_ ## name ## max: \ 41 param->value = pfdev->features.member[param->param - \ 42 DRM_PANFROST_PARAM_ ## name ## 0]; \ 43 break 44 45 switch (param->param) { 46 PANFROST_FEATURE(GPU_PROD_ID, id); 47 PANFROST_FEATURE(GPU_REVISION, revision); 48 PANFROST_FEATURE(SHADER_PRESENT, shader_present); 49 PANFROST_FEATURE(TILER_PRESENT, tiler_present); 50 PANFROST_FEATURE(L2_PRESENT, l2_present); 51 PANFROST_FEATURE(STACK_PRESENT, stack_present); 52 PANFROST_FEATURE(AS_PRESENT, as_present); 53 PANFROST_FEATURE(JS_PRESENT, js_present); 54 PANFROST_FEATURE(L2_FEATURES, l2_features); 55 PANFROST_FEATURE(CORE_FEATURES, core_features); 56 PANFROST_FEATURE(TILER_FEATURES, tiler_features); 57 PANFROST_FEATURE(MEM_FEATURES, mem_features); 58 PANFROST_FEATURE(MMU_FEATURES, mmu_features); 59 PANFROST_FEATURE(THREAD_FEATURES, thread_features); 60 PANFROST_FEATURE(MAX_THREADS, max_threads); 61 PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ, 62 thread_max_workgroup_sz); 63 PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ, 64 thread_max_barrier_sz); 65 PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features); 66 PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3); 67 PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15); 68 PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups); 69 PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc); 70 default: 71 return -EINVAL; 72 } 73 74 return 0; 75} 76 77static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, 78 struct drm_file *file) 79{ 80 struct panfrost_file_priv *priv = file->driver_priv; 81 struct panfrost_gem_object *bo; 82 struct drm_panfrost_create_bo *args = data; 83 struct panfrost_gem_mapping *mapping; 84 int ret; 85 86 if (!args->size || args->pad || 87 (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) 88 return -EINVAL; 89 90 /* Heaps should never be executable */ 91 if ((args->flags & PANFROST_BO_HEAP) && 92 !(args->flags & PANFROST_BO_NOEXEC)) 93 return -EINVAL; 94 95 bo = panfrost_gem_create(dev, args->size, args->flags); 96 if (IS_ERR(bo)) 97 return PTR_ERR(bo); 98 99 ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); 100 if (ret) 101 goto out; 102 103 mapping = panfrost_gem_mapping_get(bo, priv); 104 if (mapping) { 105 args->offset = mapping->mmnode.start << PAGE_SHIFT; 106 panfrost_gem_mapping_put(mapping); 107 } else { 108 /* This can only happen if the handle from 109 * drm_gem_handle_create() has already been guessed and freed 110 * by user space 111 */ 112 ret = -EINVAL; 113 } 114 115out: 116 drm_gem_object_put(&bo->base.base); 117 return ret; 118} 119 120/** 121 * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects 122 * referenced by the job. 123 * @dev: DRM device 124 * @file_priv: DRM file for this fd 125 * @args: IOCTL args 126 * @job: job being set up 127 * 128 * Resolve handles from userspace to BOs and attach them to job. 129 * 130 * Note that this function doesn't need to unreference the BOs on 131 * failure, because that will happen at panfrost_job_cleanup() time. 132 */ 133static int 134panfrost_lookup_bos(struct drm_device *dev, 135 struct drm_file *file_priv, 136 struct drm_panfrost_submit *args, 137 struct panfrost_job *job) 138{ 139 struct panfrost_file_priv *priv = file_priv->driver_priv; 140 struct panfrost_gem_object *bo; 141 unsigned int i; 142 int ret; 143 144 job->bo_count = args->bo_handle_count; 145 146 if (!job->bo_count) 147 return 0; 148 149 job->implicit_fences = kvmalloc_array(job->bo_count, 150 sizeof(struct dma_fence *), 151 GFP_KERNEL | __GFP_ZERO); 152 if (!job->implicit_fences) 153 return -ENOMEM; 154 155 ret = drm_gem_objects_lookup(file_priv, 156 (void __user *)(uintptr_t)args->bo_handles, 157 job->bo_count, &job->bos); 158 if (ret) 159 return ret; 160 161 job->mappings = kvmalloc_array(job->bo_count, 162 sizeof(struct panfrost_gem_mapping *), 163 GFP_KERNEL | __GFP_ZERO); 164 if (!job->mappings) 165 return -ENOMEM; 166 167 for (i = 0; i < job->bo_count; i++) { 168 struct panfrost_gem_mapping *mapping; 169 170 bo = to_panfrost_bo(job->bos[i]); 171 mapping = panfrost_gem_mapping_get(bo, priv); 172 if (!mapping) { 173 ret = -EINVAL; 174 break; 175 } 176 177 atomic_inc(&bo->gpu_usecount); 178 job->mappings[i] = mapping; 179 } 180 181 return ret; 182} 183 184/** 185 * panfrost_copy_in_sync() - Sets up job->in_fences[] with the sync objects 186 * referenced by the job. 187 * @dev: DRM device 188 * @file_priv: DRM file for this fd 189 * @args: IOCTL args 190 * @job: job being set up 191 * 192 * Resolve syncobjs from userspace to fences and attach them to job. 193 * 194 * Note that this function doesn't need to unreference the fences on 195 * failure, because that will happen at panfrost_job_cleanup() time. 196 */ 197static int 198panfrost_copy_in_sync(struct drm_device *dev, 199 struct drm_file *file_priv, 200 struct drm_panfrost_submit *args, 201 struct panfrost_job *job) 202{ 203 u32 *handles; 204 int ret = 0; 205 int i; 206 207 job->in_fence_count = args->in_sync_count; 208 209 if (!job->in_fence_count) 210 return 0; 211 212 job->in_fences = kvmalloc_array(job->in_fence_count, 213 sizeof(struct dma_fence *), 214 GFP_KERNEL | __GFP_ZERO); 215 if (!job->in_fences) { 216 DRM_DEBUG("Failed to allocate job in fences\n"); 217 return -ENOMEM; 218 } 219 220 handles = kvmalloc_array(job->in_fence_count, sizeof(u32), GFP_KERNEL); 221 if (!handles) { 222 ret = -ENOMEM; 223 DRM_DEBUG("Failed to allocate incoming syncobj handles\n"); 224 goto fail; 225 } 226 227 if (copy_from_user(handles, 228 (void __user *)(uintptr_t)args->in_syncs, 229 job->in_fence_count * sizeof(u32))) { 230 ret = -EFAULT; 231 DRM_DEBUG("Failed to copy in syncobj handles\n"); 232 goto fail; 233 } 234 235 for (i = 0; i < job->in_fence_count; i++) { 236 ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0, 237 &job->in_fences[i]); 238 if (ret == -EINVAL) 239 goto fail; 240 } 241 242fail: 243 kvfree(handles); 244 return ret; 245} 246 247static int panfrost_ioctl_submit(struct drm_device *dev, void *data, 248 struct drm_file *file) 249{ 250 struct panfrost_device *pfdev = dev->dev_private; 251 struct drm_panfrost_submit *args = data; 252 struct drm_syncobj *sync_out = NULL; 253 struct panfrost_job *job; 254 int ret = 0; 255 256 if (!args->jc) 257 return -EINVAL; 258 259 if (args->requirements && args->requirements != PANFROST_JD_REQ_FS) 260 return -EINVAL; 261 262 if (args->out_sync > 0) { 263 sync_out = drm_syncobj_find(file, args->out_sync); 264 if (!sync_out) 265 return -ENODEV; 266 } 267 268 job = kzalloc(sizeof(*job), GFP_KERNEL); 269 if (!job) { 270 ret = -ENOMEM; 271 goto fail_out_sync; 272 } 273 274 kref_init(&job->refcount); 275 276 job->pfdev = pfdev; 277 job->jc = args->jc; 278 job->requirements = args->requirements; 279 job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); 280 job->file_priv = file->driver_priv; 281 282 ret = panfrost_copy_in_sync(dev, file, args, job); 283 if (ret) 284 goto fail_job; 285 286 ret = panfrost_lookup_bos(dev, file, args, job); 287 if (ret) 288 goto fail_job; 289 290 ret = panfrost_job_push(job); 291 if (ret) 292 goto fail_job; 293 294 /* Update the return sync object for the job */ 295 if (sync_out) 296 drm_syncobj_replace_fence(sync_out, job->render_done_fence); 297 298fail_job: 299 panfrost_job_put(job); 300fail_out_sync: 301 if (sync_out) 302 drm_syncobj_put(sync_out); 303 304 return ret; 305} 306 307static int 308panfrost_ioctl_wait_bo(struct drm_device *dev, void *data, 309 struct drm_file *file_priv) 310{ 311 long ret; 312 struct drm_panfrost_wait_bo *args = data; 313 struct drm_gem_object *gem_obj; 314 unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); 315 316 if (args->pad) 317 return -EINVAL; 318 319 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 320 if (!gem_obj) 321 return -ENOENT; 322 323 ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true, 324 true, timeout); 325 if (!ret) 326 ret = timeout ? -ETIMEDOUT : -EBUSY; 327 328 drm_gem_object_put(gem_obj); 329 330 return ret; 331} 332 333static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data, 334 struct drm_file *file_priv) 335{ 336 struct drm_panfrost_mmap_bo *args = data; 337 struct drm_gem_object *gem_obj; 338 int ret; 339 340 if (args->flags != 0) { 341 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags); 342 return -EINVAL; 343 } 344 345 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 346 if (!gem_obj) { 347 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 348 return -ENOENT; 349 } 350 351 /* Don't allow mmapping of heap objects as pages are not pinned. */ 352 if (to_panfrost_bo(gem_obj)->is_heap) { 353 ret = -EINVAL; 354 goto out; 355 } 356 357 ret = drm_gem_create_mmap_offset(gem_obj); 358 if (ret == 0) 359 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 360 361out: 362 drm_gem_object_put(gem_obj); 363 return ret; 364} 365 366static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, 367 struct drm_file *file_priv) 368{ 369 struct panfrost_file_priv *priv = file_priv->driver_priv; 370 struct drm_panfrost_get_bo_offset *args = data; 371 struct panfrost_gem_mapping *mapping; 372 struct drm_gem_object *gem_obj; 373 struct panfrost_gem_object *bo; 374 375 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 376 if (!gem_obj) { 377 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 378 return -ENOENT; 379 } 380 bo = to_panfrost_bo(gem_obj); 381 382 mapping = panfrost_gem_mapping_get(bo, priv); 383 drm_gem_object_put(gem_obj); 384 385 if (!mapping) 386 return -EINVAL; 387 388 args->offset = mapping->mmnode.start << PAGE_SHIFT; 389 panfrost_gem_mapping_put(mapping); 390 return 0; 391} 392 393static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, 394 struct drm_file *file_priv) 395{ 396 struct panfrost_file_priv *priv = file_priv->driver_priv; 397 struct drm_panfrost_madvise *args = data; 398 struct panfrost_device *pfdev = dev->dev_private; 399 struct drm_gem_object *gem_obj; 400 struct panfrost_gem_object *bo; 401 int ret = 0; 402 403 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 404 if (!gem_obj) { 405 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 406 return -ENOENT; 407 } 408 409 bo = to_panfrost_bo(gem_obj); 410 411 mutex_lock(&pfdev->shrinker_lock); 412 mutex_lock(&bo->mappings.lock); 413 if (args->madv == PANFROST_MADV_DONTNEED) { 414 struct panfrost_gem_mapping *first; 415 416 first = list_first_entry(&bo->mappings.list, 417 struct panfrost_gem_mapping, 418 node); 419 420 /* 421 * If we want to mark the BO purgeable, there must be only one 422 * user: the caller FD. 423 * We could do something smarter and mark the BO purgeable only 424 * when all its users have marked it purgeable, but globally 425 * visible/shared BOs are likely to never be marked purgeable 426 * anyway, so let's not bother. 427 */ 428 if (!list_is_singular(&bo->mappings.list) || 429 WARN_ON_ONCE(first->mmu != priv->mmu)) { 430 ret = -EINVAL; 431 goto out_unlock_mappings; 432 } 433 } 434 435 args->retained = drm_gem_shmem_madvise(gem_obj, args->madv); 436 437 if (args->retained) { 438 if (args->madv == PANFROST_MADV_DONTNEED) 439 list_move_tail(&bo->base.madv_list, 440 &pfdev->shrinker_list); 441 else if (args->madv == PANFROST_MADV_WILLNEED) 442 list_del_init(&bo->base.madv_list); 443 } 444 445out_unlock_mappings: 446 mutex_unlock(&bo->mappings.lock); 447 mutex_unlock(&pfdev->shrinker_lock); 448 449 drm_gem_object_put(gem_obj); 450 return ret; 451} 452 453int panfrost_unstable_ioctl_check(void) 454{ 455 if (!unstable_ioctls) 456 return -ENOSYS; 457 458 return 0; 459} 460 461static int 462panfrost_open(struct drm_device *dev, struct drm_file *file) 463{ 464 int ret; 465 struct panfrost_device *pfdev = dev->dev_private; 466 struct panfrost_file_priv *panfrost_priv; 467 468 panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL); 469 if (!panfrost_priv) 470 return -ENOMEM; 471 472 panfrost_priv->pfdev = pfdev; 473 file->driver_priv = panfrost_priv; 474 475 panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev); 476 if (IS_ERR(panfrost_priv->mmu)) { 477 ret = PTR_ERR(panfrost_priv->mmu); 478 goto err_free; 479 } 480 481 ret = panfrost_job_open(panfrost_priv); 482 if (ret) 483 goto err_job; 484 485 return 0; 486 487err_job: 488 panfrost_mmu_ctx_put(panfrost_priv->mmu); 489err_free: 490 kfree(panfrost_priv); 491 return ret; 492} 493 494static void 495panfrost_postclose(struct drm_device *dev, struct drm_file *file) 496{ 497 struct panfrost_file_priv *panfrost_priv = file->driver_priv; 498 499 panfrost_perfcnt_close(file); 500 panfrost_job_close(panfrost_priv); 501 502 panfrost_mmu_ctx_put(panfrost_priv->mmu); 503 kfree(panfrost_priv); 504} 505 506static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = { 507#define PANFROST_IOCTL(n, func, flags) \ 508 DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags) 509 510 PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW), 511 PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW), 512 PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW), 513 PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW), 514 PANFROST_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW), 515 PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW), 516 PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW), 517 PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW), 518 PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW), 519}; 520 521DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops); 522 523/* 524 * Panfrost driver version: 525 * - 1.0 - initial interface 526 * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO 527 */ 528static struct drm_driver panfrost_drm_driver = { 529 .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, 530 .open = panfrost_open, 531 .postclose = panfrost_postclose, 532 .ioctls = panfrost_drm_driver_ioctls, 533 .num_ioctls = ARRAY_SIZE(panfrost_drm_driver_ioctls), 534 .fops = &panfrost_drm_driver_fops, 535 .name = "panfrost", 536 .desc = "panfrost DRM", 537 .date = "20180908", 538 .major = 1, 539 .minor = 1, 540 541 .gem_create_object = panfrost_gem_create_object, 542 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 543 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 544 .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, 545 .gem_prime_mmap = drm_gem_prime_mmap, 546}; 547 548static int panfrost_probe(struct platform_device *pdev) 549{ 550 struct panfrost_device *pfdev; 551 struct drm_device *ddev; 552 int err; 553 554 pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL); 555 if (!pfdev) 556 return -ENOMEM; 557 558 pfdev->pdev = pdev; 559 pfdev->dev = &pdev->dev; 560 561 platform_set_drvdata(pdev, pfdev); 562 563 pfdev->comp = of_device_get_match_data(&pdev->dev); 564 if (!pfdev->comp) 565 return -ENODEV; 566 567 pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT; 568 569 /* Allocate and initialze the DRM device. */ 570 ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev); 571 if (IS_ERR(ddev)) 572 return PTR_ERR(ddev); 573 574 ddev->dev_private = pfdev; 575 pfdev->ddev = ddev; 576 577 mutex_init(&pfdev->shrinker_lock); 578 INIT_LIST_HEAD(&pfdev->shrinker_list); 579 580 err = panfrost_device_init(pfdev); 581 if (err) { 582 if (err != -EPROBE_DEFER) 583 dev_err(&pdev->dev, "Fatal error during GPU init\n"); 584 goto err_out0; 585 } 586 587 pm_runtime_set_active(pfdev->dev); 588 pm_runtime_mark_last_busy(pfdev->dev); 589 pm_runtime_enable(pfdev->dev); 590 pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */ 591 pm_runtime_use_autosuspend(pfdev->dev); 592 593 /* 594 * Register the DRM device with the core and the connectors with 595 * sysfs 596 */ 597 err = drm_dev_register(ddev, 0); 598 if (err < 0) 599 goto err_out1; 600 601 panfrost_gem_shrinker_init(ddev); 602 603 return 0; 604 605err_out1: 606 pm_runtime_disable(pfdev->dev); 607 panfrost_device_fini(pfdev); 608 pm_runtime_set_suspended(pfdev->dev); 609err_out0: 610 drm_dev_put(ddev); 611 return err; 612} 613 614static int panfrost_remove(struct platform_device *pdev) 615{ 616 struct panfrost_device *pfdev = platform_get_drvdata(pdev); 617 struct drm_device *ddev = pfdev->ddev; 618 619 drm_dev_unregister(ddev); 620 panfrost_gem_shrinker_cleanup(ddev); 621 622 pm_runtime_get_sync(pfdev->dev); 623 pm_runtime_disable(pfdev->dev); 624 panfrost_device_fini(pfdev); 625 pm_runtime_set_suspended(pfdev->dev); 626 627 drm_dev_put(ddev); 628 return 0; 629} 630 631static const char * const default_supplies[] = { "mali" }; 632static const struct panfrost_compatible default_data = { 633 .num_supplies = ARRAY_SIZE(default_supplies), 634 .supply_names = default_supplies, 635 .num_pm_domains = 1, /* optional */ 636 .pm_domain_names = NULL, 637}; 638 639static const struct panfrost_compatible amlogic_data = { 640 .num_supplies = ARRAY_SIZE(default_supplies), 641 .supply_names = default_supplies, 642 .vendor_quirk = panfrost_gpu_amlogic_quirk, 643}; 644 645static const struct of_device_id dt_match[] = { 646 /* Set first to probe before the generic compatibles */ 647 { .compatible = "amlogic,meson-gxm-mali", 648 .data = &amlogic_data, }, 649 { .compatible = "amlogic,meson-g12a-mali", 650 .data = &amlogic_data, }, 651 { .compatible = "arm,mali-t604", .data = &default_data, }, 652 { .compatible = "arm,mali-t624", .data = &default_data, }, 653 { .compatible = "arm,mali-t628", .data = &default_data, }, 654 { .compatible = "arm,mali-t720", .data = &default_data, }, 655 { .compatible = "arm,mali-t760", .data = &default_data, }, 656 { .compatible = "arm,mali-t820", .data = &default_data, }, 657 { .compatible = "arm,mali-t830", .data = &default_data, }, 658 { .compatible = "arm,mali-t860", .data = &default_data, }, 659 { .compatible = "arm,mali-t880", .data = &default_data, }, 660 { .compatible = "arm,mali-bifrost", .data = &default_data, }, 661 {} 662}; 663MODULE_DEVICE_TABLE(of, dt_match); 664 665static const struct dev_pm_ops panfrost_pm_ops = { 666 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 667 SET_RUNTIME_PM_OPS(panfrost_device_suspend, panfrost_device_resume, NULL) 668}; 669 670static struct platform_driver panfrost_driver = { 671 .probe = panfrost_probe, 672 .remove = panfrost_remove, 673 .driver = { 674 .name = "panfrost", 675 .pm = &panfrost_pm_ops, 676 .of_match_table = dt_match, 677 }, 678}; 679module_platform_driver(panfrost_driver); 680 681MODULE_AUTHOR("Panfrost Project Developers"); 682MODULE_DESCRIPTION("Panfrost DRM Driver"); 683MODULE_LICENSE("GPL v2"); 684