1// SPDX-License-Identifier: GPL-2.0-only 2/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2015 Linaro Ltd. 4 */ 5#include <linux/platform_device.h> 6#include <linux/init.h> 7#include <linux/cpumask.h> 8#include <linux/export.h> 9#include <linux/dma-mapping.h> 10#include <linux/module.h> 11#include <linux/types.h> 12#include <linux/qcom_scm.h> 13#include <linux/of.h> 14#include <linux/of_address.h> 15#include <linux/of_platform.h> 16#include <linux/clk.h> 17#include <linux/reset-controller.h> 18#include <linux/arm-smccc.h> 19 20#include "qcom_scm.h" 21 22static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); 23module_param(download_mode, bool, 0); 24 25#define SCM_HAS_CORE_CLK BIT(0) 26#define SCM_HAS_IFACE_CLK BIT(1) 27#define SCM_HAS_BUS_CLK BIT(2) 28 29struct qcom_scm { 30 struct device *dev; 31 struct clk *core_clk; 32 struct clk *iface_clk; 33 struct clk *bus_clk; 34 struct reset_controller_dev reset; 35 36 u64 dload_mode_addr; 37}; 38 39struct qcom_scm_current_perm_info { 40 __le32 vmid; 41 __le32 perm; 42 __le64 ctx; 43 __le32 ctx_size; 44 __le32 unused; 45}; 46 47struct qcom_scm_mem_map_info { 48 __le64 mem_addr; 49 __le64 mem_size; 50}; 51 52#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 53#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 54#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 55#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 56 57#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 58#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 59#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 60#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 61 62struct qcom_scm_wb_entry { 63 int flag; 64 void *entry; 65}; 66 67static struct qcom_scm_wb_entry qcom_scm_wb[] = { 68 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, 69 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, 70 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, 71 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, 72}; 73 74static const char *qcom_scm_convention_names[] = { 75 [SMC_CONVENTION_UNKNOWN] = "unknown", 76 [SMC_CONVENTION_ARM_32] = "smc arm 32", 77 [SMC_CONVENTION_ARM_64] = "smc arm 64", 78 [SMC_CONVENTION_LEGACY] = "smc legacy", 79}; 80 81static struct qcom_scm *__scm; 82 83static int qcom_scm_clk_enable(void) 84{ 85 int ret; 86 87 ret = clk_prepare_enable(__scm->core_clk); 88 if (ret) 89 goto bail; 90 91 ret = clk_prepare_enable(__scm->iface_clk); 92 if (ret) 93 goto disable_core; 94 95 ret = clk_prepare_enable(__scm->bus_clk); 96 if (ret) 97 goto disable_iface; 98 99 return 0; 100 101disable_iface: 102 clk_disable_unprepare(__scm->iface_clk); 103disable_core: 104 clk_disable_unprepare(__scm->core_clk); 105bail: 106 return ret; 107} 108 109static void qcom_scm_clk_disable(void) 110{ 111 clk_disable_unprepare(__scm->core_clk); 112 clk_disable_unprepare(__scm->iface_clk); 113 clk_disable_unprepare(__scm->bus_clk); 114} 115 116enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; 117static DEFINE_SPINLOCK(scm_query_lock); 118 119static enum qcom_scm_convention __get_convention(void) 120{ 121 unsigned long flags; 122 struct qcom_scm_desc desc = { 123 .svc = QCOM_SCM_SVC_INFO, 124 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 125 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, 126 QCOM_SCM_INFO_IS_CALL_AVAIL) | 127 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), 128 .arginfo = QCOM_SCM_ARGS(1), 129 .owner = ARM_SMCCC_OWNER_SIP, 130 }; 131 struct qcom_scm_res res; 132 enum qcom_scm_convention probed_convention; 133 int ret; 134 bool forced = false; 135 136 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) 137 return qcom_scm_convention; 138 139 /* 140 * Per the "SMC calling convention specification", the 64-bit calling 141 * convention can only be used when the client is 64-bit, otherwise 142 * system will encounter the undefined behaviour. 143 */ 144#if IS_ENABLED(CONFIG_ARM64) 145 /* 146 * Device isn't required as there is only one argument - no device 147 * needed to dma_map_single to secure world 148 */ 149 probed_convention = SMC_CONVENTION_ARM_64; 150 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 151 if (!ret && res.result[0] == 1) 152 goto found; 153 154 /* 155 * Some SC7180 firmwares didn't implement the 156 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 157 * calling conventions on these firmwares. Luckily we don't make any 158 * early calls into the firmware on these SoCs so the device pointer 159 * will be valid here to check if the compatible matches. 160 */ 161 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { 162 forced = true; 163 goto found; 164 } 165#endif 166 167 probed_convention = SMC_CONVENTION_ARM_32; 168 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 169 if (!ret && res.result[0] == 1) 170 goto found; 171 172 probed_convention = SMC_CONVENTION_LEGACY; 173found: 174 spin_lock_irqsave(&scm_query_lock, flags); 175 if (probed_convention != qcom_scm_convention) { 176 qcom_scm_convention = probed_convention; 177 pr_info("qcom_scm: convention: %s%s\n", 178 qcom_scm_convention_names[qcom_scm_convention], 179 forced ? " (forced)" : ""); 180 } 181 spin_unlock_irqrestore(&scm_query_lock, flags); 182 183 return qcom_scm_convention; 184} 185 186/** 187 * qcom_scm_call() - Invoke a syscall in the secure world 188 * @dev: device 189 * @svc_id: service identifier 190 * @cmd_id: command identifier 191 * @desc: Descriptor structure containing arguments and return values 192 * 193 * Sends a command to the SCM and waits for the command to finish processing. 194 * This should *only* be called in pre-emptible context. 195 */ 196static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, 197 struct qcom_scm_res *res) 198{ 199 might_sleep(); 200 switch (__get_convention()) { 201 case SMC_CONVENTION_ARM_32: 202 case SMC_CONVENTION_ARM_64: 203 return scm_smc_call(dev, desc, res, false); 204 case SMC_CONVENTION_LEGACY: 205 return scm_legacy_call(dev, desc, res); 206 default: 207 pr_err("Unknown current SCM calling convention.\n"); 208 return -EINVAL; 209 } 210} 211 212/** 213 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() 214 * @dev: device 215 * @svc_id: service identifier 216 * @cmd_id: command identifier 217 * @desc: Descriptor structure containing arguments and return values 218 * @res: Structure containing results from SMC/HVC call 219 * 220 * Sends a command to the SCM and waits for the command to finish processing. 221 * This can be called in atomic context. 222 */ 223static int qcom_scm_call_atomic(struct device *dev, 224 const struct qcom_scm_desc *desc, 225 struct qcom_scm_res *res) 226{ 227 switch (__get_convention()) { 228 case SMC_CONVENTION_ARM_32: 229 case SMC_CONVENTION_ARM_64: 230 return scm_smc_call(dev, desc, res, true); 231 case SMC_CONVENTION_LEGACY: 232 return scm_legacy_call_atomic(dev, desc, res); 233 default: 234 pr_err("Unknown current SCM calling convention.\n"); 235 return -EINVAL; 236 } 237} 238 239static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, 240 u32 cmd_id) 241{ 242 int ret; 243 struct qcom_scm_desc desc = { 244 .svc = QCOM_SCM_SVC_INFO, 245 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 246 .owner = ARM_SMCCC_OWNER_SIP, 247 }; 248 struct qcom_scm_res res; 249 250 desc.arginfo = QCOM_SCM_ARGS(1); 251 switch (__get_convention()) { 252 case SMC_CONVENTION_ARM_32: 253 case SMC_CONVENTION_ARM_64: 254 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | 255 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); 256 break; 257 case SMC_CONVENTION_LEGACY: 258 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); 259 break; 260 default: 261 pr_err("Unknown SMC convention being used\n"); 262 return false; 263 } 264 265 ret = qcom_scm_call(dev, &desc, &res); 266 267 return ret ? false : !!res.result[0]; 268} 269 270/** 271 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus 272 * @entry: Entry point function for the cpus 273 * @cpus: The cpumask of cpus that will use the entry point 274 * 275 * Set the Linux entry point for the SCM to transfer control to when coming 276 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 277 */ 278int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) 279{ 280 int ret; 281 int flags = 0; 282 int cpu; 283 struct qcom_scm_desc desc = { 284 .svc = QCOM_SCM_SVC_BOOT, 285 .cmd = QCOM_SCM_BOOT_SET_ADDR, 286 .arginfo = QCOM_SCM_ARGS(2), 287 }; 288 289 /* 290 * Reassign only if we are switching from hotplug entry point 291 * to cpuidle entry point or vice versa. 292 */ 293 for_each_cpu(cpu, cpus) { 294 if (entry == qcom_scm_wb[cpu].entry) 295 continue; 296 flags |= qcom_scm_wb[cpu].flag; 297 } 298 299 /* No change in entry function */ 300 if (!flags) 301 return 0; 302 303 desc.args[0] = flags; 304 desc.args[1] = virt_to_phys(entry); 305 306 ret = qcom_scm_call(__scm->dev, &desc, NULL); 307 if (!ret) { 308 for_each_cpu(cpu, cpus) 309 qcom_scm_wb[cpu].entry = entry; 310 } 311 312 return ret; 313} 314EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); 315 316/** 317 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus 318 * @entry: Entry point function for the cpus 319 * @cpus: The cpumask of cpus that will use the entry point 320 * 321 * Set the cold boot address of the cpus. Any cpu outside the supported 322 * range would be removed from the cpu present mask. 323 */ 324int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) 325{ 326 int flags = 0; 327 int cpu; 328 int scm_cb_flags[] = { 329 QCOM_SCM_FLAG_COLDBOOT_CPU0, 330 QCOM_SCM_FLAG_COLDBOOT_CPU1, 331 QCOM_SCM_FLAG_COLDBOOT_CPU2, 332 QCOM_SCM_FLAG_COLDBOOT_CPU3, 333 }; 334 struct qcom_scm_desc desc = { 335 .svc = QCOM_SCM_SVC_BOOT, 336 .cmd = QCOM_SCM_BOOT_SET_ADDR, 337 .arginfo = QCOM_SCM_ARGS(2), 338 .owner = ARM_SMCCC_OWNER_SIP, 339 }; 340 341 if (!cpus || (cpus && cpumask_empty(cpus))) 342 return -EINVAL; 343 344 for_each_cpu(cpu, cpus) { 345 if (cpu < ARRAY_SIZE(scm_cb_flags)) 346 flags |= scm_cb_flags[cpu]; 347 else 348 set_cpu_present(cpu, false); 349 } 350 351 desc.args[0] = flags; 352 desc.args[1] = virt_to_phys(entry); 353 354 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 355} 356EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); 357 358/** 359 * qcom_scm_cpu_power_down() - Power down the cpu 360 * @flags - Flags to flush cache 361 * 362 * This is an end point to power down cpu. If there was a pending interrupt, 363 * the control would return from this function, otherwise, the cpu jumps to the 364 * warm boot entry point set for this cpu upon reset. 365 */ 366void qcom_scm_cpu_power_down(u32 flags) 367{ 368 struct qcom_scm_desc desc = { 369 .svc = QCOM_SCM_SVC_BOOT, 370 .cmd = QCOM_SCM_BOOT_TERMINATE_PC, 371 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, 372 .arginfo = QCOM_SCM_ARGS(1), 373 .owner = ARM_SMCCC_OWNER_SIP, 374 }; 375 376 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 377} 378EXPORT_SYMBOL(qcom_scm_cpu_power_down); 379 380int qcom_scm_set_remote_state(u32 state, u32 id) 381{ 382 struct qcom_scm_desc desc = { 383 .svc = QCOM_SCM_SVC_BOOT, 384 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, 385 .arginfo = QCOM_SCM_ARGS(2), 386 .args[0] = state, 387 .args[1] = id, 388 .owner = ARM_SMCCC_OWNER_SIP, 389 }; 390 struct qcom_scm_res res; 391 int ret; 392 393 ret = qcom_scm_call(__scm->dev, &desc, &res); 394 395 return ret ? : res.result[0]; 396} 397EXPORT_SYMBOL(qcom_scm_set_remote_state); 398 399static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) 400{ 401 struct qcom_scm_desc desc = { 402 .svc = QCOM_SCM_SVC_BOOT, 403 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, 404 .arginfo = QCOM_SCM_ARGS(2), 405 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, 406 .owner = ARM_SMCCC_OWNER_SIP, 407 }; 408 409 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; 410 411 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 412} 413 414static void qcom_scm_set_download_mode(bool enable) 415{ 416 bool avail; 417 int ret = 0; 418 419 avail = __qcom_scm_is_call_available(__scm->dev, 420 QCOM_SCM_SVC_BOOT, 421 QCOM_SCM_BOOT_SET_DLOAD_MODE); 422 if (avail) { 423 ret = __qcom_scm_set_dload_mode(__scm->dev, enable); 424 } else if (__scm->dload_mode_addr) { 425 ret = qcom_scm_io_writel(__scm->dload_mode_addr, 426 enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0); 427 } else { 428 dev_err(__scm->dev, 429 "No available mechanism for setting download mode\n"); 430 } 431 432 if (ret) 433 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 434} 435 436/** 437 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 438 * state machine for a given peripheral, using the 439 * metadata 440 * @peripheral: peripheral id 441 * @metadata: pointer to memory containing ELF header, program header table 442 * and optional blob of data used for authenticating the metadata 443 * and the rest of the firmware 444 * @size: size of the metadata 445 * 446 * Returns 0 on success. 447 */ 448int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) 449{ 450 dma_addr_t mdata_phys; 451 void *mdata_buf; 452 int ret; 453 struct qcom_scm_desc desc = { 454 .svc = QCOM_SCM_SVC_PIL, 455 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, 456 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), 457 .args[0] = peripheral, 458 .owner = ARM_SMCCC_OWNER_SIP, 459 }; 460 struct qcom_scm_res res; 461 462 /* 463 * During the scm call memory protection will be enabled for the meta 464 * data blob, so make sure it's physically contiguous, 4K aligned and 465 * non-cachable to avoid XPU violations. 466 */ 467 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 468 GFP_KERNEL); 469 if (!mdata_buf) { 470 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); 471 return -ENOMEM; 472 } 473 memcpy(mdata_buf, metadata, size); 474 475 ret = qcom_scm_clk_enable(); 476 if (ret) 477 goto free_metadata; 478 479 desc.args[1] = mdata_phys; 480 481 ret = qcom_scm_call(__scm->dev, &desc, &res); 482 483 qcom_scm_clk_disable(); 484 485free_metadata: 486 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 487 488 return ret ? : res.result[0]; 489} 490EXPORT_SYMBOL(qcom_scm_pas_init_image); 491 492/** 493 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 494 * for firmware loading 495 * @peripheral: peripheral id 496 * @addr: start address of memory area to prepare 497 * @size: size of the memory area to prepare 498 * 499 * Returns 0 on success. 500 */ 501int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) 502{ 503 int ret; 504 struct qcom_scm_desc desc = { 505 .svc = QCOM_SCM_SVC_PIL, 506 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, 507 .arginfo = QCOM_SCM_ARGS(3), 508 .args[0] = peripheral, 509 .args[1] = addr, 510 .args[2] = size, 511 .owner = ARM_SMCCC_OWNER_SIP, 512 }; 513 struct qcom_scm_res res; 514 515 ret = qcom_scm_clk_enable(); 516 if (ret) 517 return ret; 518 519 ret = qcom_scm_call(__scm->dev, &desc, &res); 520 qcom_scm_clk_disable(); 521 522 return ret ? : res.result[0]; 523} 524EXPORT_SYMBOL(qcom_scm_pas_mem_setup); 525 526/** 527 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 528 * and reset the remote processor 529 * @peripheral: peripheral id 530 * 531 * Return 0 on success. 532 */ 533int qcom_scm_pas_auth_and_reset(u32 peripheral) 534{ 535 int ret; 536 struct qcom_scm_desc desc = { 537 .svc = QCOM_SCM_SVC_PIL, 538 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, 539 .arginfo = QCOM_SCM_ARGS(1), 540 .args[0] = peripheral, 541 .owner = ARM_SMCCC_OWNER_SIP, 542 }; 543 struct qcom_scm_res res; 544 545 ret = qcom_scm_clk_enable(); 546 if (ret) 547 return ret; 548 549 ret = qcom_scm_call(__scm->dev, &desc, &res); 550 qcom_scm_clk_disable(); 551 552 return ret ? : res.result[0]; 553} 554EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); 555 556/** 557 * qcom_scm_pas_shutdown() - Shut down the remote processor 558 * @peripheral: peripheral id 559 * 560 * Returns 0 on success. 561 */ 562int qcom_scm_pas_shutdown(u32 peripheral) 563{ 564 int ret; 565 struct qcom_scm_desc desc = { 566 .svc = QCOM_SCM_SVC_PIL, 567 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, 568 .arginfo = QCOM_SCM_ARGS(1), 569 .args[0] = peripheral, 570 .owner = ARM_SMCCC_OWNER_SIP, 571 }; 572 struct qcom_scm_res res; 573 574 ret = qcom_scm_clk_enable(); 575 if (ret) 576 return ret; 577 578 ret = qcom_scm_call(__scm->dev, &desc, &res); 579 580 qcom_scm_clk_disable(); 581 582 return ret ? : res.result[0]; 583} 584EXPORT_SYMBOL(qcom_scm_pas_shutdown); 585 586/** 587 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 588 * available for the given peripherial 589 * @peripheral: peripheral id 590 * 591 * Returns true if PAS is supported for this peripheral, otherwise false. 592 */ 593bool qcom_scm_pas_supported(u32 peripheral) 594{ 595 int ret; 596 struct qcom_scm_desc desc = { 597 .svc = QCOM_SCM_SVC_PIL, 598 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, 599 .arginfo = QCOM_SCM_ARGS(1), 600 .args[0] = peripheral, 601 .owner = ARM_SMCCC_OWNER_SIP, 602 }; 603 struct qcom_scm_res res; 604 605 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 606 QCOM_SCM_PIL_PAS_IS_SUPPORTED)) 607 return false; 608 609 ret = qcom_scm_call(__scm->dev, &desc, &res); 610 611 return ret ? false : !!res.result[0]; 612} 613EXPORT_SYMBOL(qcom_scm_pas_supported); 614 615static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) 616{ 617 struct qcom_scm_desc desc = { 618 .svc = QCOM_SCM_SVC_PIL, 619 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, 620 .arginfo = QCOM_SCM_ARGS(2), 621 .args[0] = reset, 622 .args[1] = 0, 623 .owner = ARM_SMCCC_OWNER_SIP, 624 }; 625 struct qcom_scm_res res; 626 int ret; 627 628 ret = qcom_scm_call(__scm->dev, &desc, &res); 629 630 return ret ? : res.result[0]; 631} 632 633static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 634 unsigned long idx) 635{ 636 if (idx != 0) 637 return -EINVAL; 638 639 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 640} 641 642static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 643 unsigned long idx) 644{ 645 if (idx != 0) 646 return -EINVAL; 647 648 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 649} 650 651static const struct reset_control_ops qcom_scm_pas_reset_ops = { 652 .assert = qcom_scm_pas_reset_assert, 653 .deassert = qcom_scm_pas_reset_deassert, 654}; 655 656int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 657{ 658 struct qcom_scm_desc desc = { 659 .svc = QCOM_SCM_SVC_IO, 660 .cmd = QCOM_SCM_IO_READ, 661 .arginfo = QCOM_SCM_ARGS(1), 662 .args[0] = addr, 663 .owner = ARM_SMCCC_OWNER_SIP, 664 }; 665 struct qcom_scm_res res; 666 int ret; 667 668 669 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); 670 if (ret >= 0) 671 *val = res.result[0]; 672 673 return ret < 0 ? ret : 0; 674} 675EXPORT_SYMBOL(qcom_scm_io_readl); 676 677int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 678{ 679 struct qcom_scm_desc desc = { 680 .svc = QCOM_SCM_SVC_IO, 681 .cmd = QCOM_SCM_IO_WRITE, 682 .arginfo = QCOM_SCM_ARGS(2), 683 .args[0] = addr, 684 .args[1] = val, 685 .owner = ARM_SMCCC_OWNER_SIP, 686 }; 687 688 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 689} 690EXPORT_SYMBOL(qcom_scm_io_writel); 691 692/** 693 * qcom_scm_restore_sec_cfg_available() - Check if secure environment 694 * supports restore security config interface. 695 * 696 * Return true if restore-cfg interface is supported, false if not. 697 */ 698bool qcom_scm_restore_sec_cfg_available(void) 699{ 700 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 701 QCOM_SCM_MP_RESTORE_SEC_CFG); 702} 703EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available); 704 705int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 706{ 707 struct qcom_scm_desc desc = { 708 .svc = QCOM_SCM_SVC_MP, 709 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, 710 .arginfo = QCOM_SCM_ARGS(2), 711 .args[0] = device_id, 712 .args[1] = spare, 713 .owner = ARM_SMCCC_OWNER_SIP, 714 }; 715 struct qcom_scm_res res; 716 int ret; 717 718 ret = qcom_scm_call(__scm->dev, &desc, &res); 719 720 return ret ? : res.result[0]; 721} 722EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); 723 724int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 725{ 726 struct qcom_scm_desc desc = { 727 .svc = QCOM_SCM_SVC_MP, 728 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, 729 .arginfo = QCOM_SCM_ARGS(1), 730 .args[0] = spare, 731 .owner = ARM_SMCCC_OWNER_SIP, 732 }; 733 struct qcom_scm_res res; 734 int ret; 735 736 ret = qcom_scm_call(__scm->dev, &desc, &res); 737 738 if (size) 739 *size = res.result[0]; 740 741 return ret ? : res.result[1]; 742} 743EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); 744 745int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 746{ 747 struct qcom_scm_desc desc = { 748 .svc = QCOM_SCM_SVC_MP, 749 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, 750 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 751 QCOM_SCM_VAL), 752 .args[0] = addr, 753 .args[1] = size, 754 .args[2] = spare, 755 .owner = ARM_SMCCC_OWNER_SIP, 756 }; 757 int ret; 758 759 ret = qcom_scm_call(__scm->dev, &desc, NULL); 760 761 /* the pg table has been initialized already, ignore the error */ 762 if (ret == -EPERM) 763 ret = 0; 764 765 return ret; 766} 767EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); 768 769int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, 770 u32 cp_nonpixel_start, 771 u32 cp_nonpixel_size) 772{ 773 int ret; 774 struct qcom_scm_desc desc = { 775 .svc = QCOM_SCM_SVC_MP, 776 .cmd = QCOM_SCM_MP_VIDEO_VAR, 777 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 778 QCOM_SCM_VAL, QCOM_SCM_VAL), 779 .args[0] = cp_start, 780 .args[1] = cp_size, 781 .args[2] = cp_nonpixel_start, 782 .args[3] = cp_nonpixel_size, 783 .owner = ARM_SMCCC_OWNER_SIP, 784 }; 785 struct qcom_scm_res res; 786 787 ret = qcom_scm_call(__scm->dev, &desc, &res); 788 789 return ret ? : res.result[0]; 790} 791EXPORT_SYMBOL(qcom_scm_mem_protect_video_var); 792 793static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, 794 size_t mem_sz, phys_addr_t src, size_t src_sz, 795 phys_addr_t dest, size_t dest_sz) 796{ 797 int ret; 798 struct qcom_scm_desc desc = { 799 .svc = QCOM_SCM_SVC_MP, 800 .cmd = QCOM_SCM_MP_ASSIGN, 801 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, 802 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, 803 QCOM_SCM_VAL, QCOM_SCM_VAL), 804 .args[0] = mem_region, 805 .args[1] = mem_sz, 806 .args[2] = src, 807 .args[3] = src_sz, 808 .args[4] = dest, 809 .args[5] = dest_sz, 810 .args[6] = 0, 811 .owner = ARM_SMCCC_OWNER_SIP, 812 }; 813 struct qcom_scm_res res; 814 815 ret = qcom_scm_call(dev, &desc, &res); 816 817 return ret ? : res.result[0]; 818} 819 820/** 821 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 822 * @mem_addr: mem region whose ownership need to be reassigned 823 * @mem_sz: size of the region. 824 * @srcvm: vmid for current set of owners, each set bit in 825 * flag indicate a unique owner 826 * @newvm: array having new owners and corresponding permission 827 * flags 828 * @dest_cnt: number of owners in next set. 829 * 830 * Return negative errno on failure or 0 on success with @srcvm updated. 831 */ 832int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 833 unsigned int *srcvm, 834 const struct qcom_scm_vmperm *newvm, 835 unsigned int dest_cnt) 836{ 837 struct qcom_scm_current_perm_info *destvm; 838 struct qcom_scm_mem_map_info *mem_to_map; 839 phys_addr_t mem_to_map_phys; 840 phys_addr_t dest_phys; 841 dma_addr_t ptr_phys; 842 size_t mem_to_map_sz; 843 size_t dest_sz; 844 size_t src_sz; 845 size_t ptr_sz; 846 int next_vm; 847 __le32 *src; 848 void *ptr; 849 int ret, i, b; 850 unsigned long srcvm_bits = *srcvm; 851 852 src_sz = hweight_long(srcvm_bits) * sizeof(*src); 853 mem_to_map_sz = sizeof(*mem_to_map); 854 dest_sz = dest_cnt * sizeof(*destvm); 855 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 856 ALIGN(dest_sz, SZ_64); 857 858 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); 859 if (!ptr) 860 return -ENOMEM; 861 862 /* Fill source vmid detail */ 863 src = ptr; 864 i = 0; 865 for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG) 866 src[i++] = cpu_to_le32(b); 867 868 /* Fill details of mem buff to map */ 869 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 870 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 871 mem_to_map->mem_addr = cpu_to_le64(mem_addr); 872 mem_to_map->mem_size = cpu_to_le64(mem_sz); 873 874 next_vm = 0; 875 /* Fill details of next vmid detail */ 876 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 877 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 878 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { 879 destvm->vmid = cpu_to_le32(newvm->vmid); 880 destvm->perm = cpu_to_le32(newvm->perm); 881 destvm->ctx = 0; 882 destvm->ctx_size = 0; 883 next_vm |= BIT(newvm->vmid); 884 } 885 886 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 887 ptr_phys, src_sz, dest_phys, dest_sz); 888 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); 889 if (ret) { 890 dev_err(__scm->dev, 891 "Assign memory protection call failed %d\n", ret); 892 return -EINVAL; 893 } 894 895 *srcvm = next_vm; 896 return 0; 897} 898EXPORT_SYMBOL(qcom_scm_assign_mem); 899 900/** 901 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 902 */ 903bool qcom_scm_ocmem_lock_available(void) 904{ 905 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, 906 QCOM_SCM_OCMEM_LOCK_CMD); 907} 908EXPORT_SYMBOL(qcom_scm_ocmem_lock_available); 909 910/** 911 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 912 * region to the specified initiator 913 * 914 * @id: tz initiator id 915 * @offset: OCMEM offset 916 * @size: OCMEM size 917 * @mode: access mode (WIDE/NARROW) 918 */ 919int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 920 u32 mode) 921{ 922 struct qcom_scm_desc desc = { 923 .svc = QCOM_SCM_SVC_OCMEM, 924 .cmd = QCOM_SCM_OCMEM_LOCK_CMD, 925 .args[0] = id, 926 .args[1] = offset, 927 .args[2] = size, 928 .args[3] = mode, 929 .arginfo = QCOM_SCM_ARGS(4), 930 }; 931 932 return qcom_scm_call(__scm->dev, &desc, NULL); 933} 934EXPORT_SYMBOL(qcom_scm_ocmem_lock); 935 936/** 937 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 938 * region from the specified initiator 939 * 940 * @id: tz initiator id 941 * @offset: OCMEM offset 942 * @size: OCMEM size 943 */ 944int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 945{ 946 struct qcom_scm_desc desc = { 947 .svc = QCOM_SCM_SVC_OCMEM, 948 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, 949 .args[0] = id, 950 .args[1] = offset, 951 .args[2] = size, 952 .arginfo = QCOM_SCM_ARGS(3), 953 }; 954 955 return qcom_scm_call(__scm->dev, &desc, NULL); 956} 957EXPORT_SYMBOL(qcom_scm_ocmem_unlock); 958 959/** 960 * qcom_scm_ice_available() - Is the ICE key programming interface available? 961 * 962 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and 963 * qcom_scm_ice_set_key() are available. 964 */ 965bool qcom_scm_ice_available(void) 966{ 967 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 968 QCOM_SCM_ES_INVALIDATE_ICE_KEY) && 969 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 970 QCOM_SCM_ES_CONFIG_SET_ICE_KEY); 971} 972EXPORT_SYMBOL(qcom_scm_ice_available); 973 974/** 975 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key 976 * @index: the keyslot to invalidate 977 * 978 * The UFSHCI standard defines a standard way to do this, but it doesn't work on 979 * these SoCs; only this SCM call does. 980 * 981 * Return: 0 on success; -errno on failure. 982 */ 983int qcom_scm_ice_invalidate_key(u32 index) 984{ 985 struct qcom_scm_desc desc = { 986 .svc = QCOM_SCM_SVC_ES, 987 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, 988 .arginfo = QCOM_SCM_ARGS(1), 989 .args[0] = index, 990 .owner = ARM_SMCCC_OWNER_SIP, 991 }; 992 993 return qcom_scm_call(__scm->dev, &desc, NULL); 994} 995EXPORT_SYMBOL(qcom_scm_ice_invalidate_key); 996 997/** 998 * qcom_scm_ice_set_key() - Set an inline encryption key 999 * @index: the keyslot into which to set the key 1000 * @key: the key to program 1001 * @key_size: the size of the key in bytes 1002 * @cipher: the encryption algorithm the key is for 1003 * @data_unit_size: the encryption data unit size, i.e. the size of each 1004 * individual plaintext and ciphertext. Given in 512-byte 1005 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. 1006 * 1007 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it 1008 * can then be used to encrypt/decrypt UFS I/O requests inline. 1009 * 1010 * The UFSHCI standard defines a standard way to do this, but it doesn't work on 1011 * these SoCs; only this SCM call does. 1012 * 1013 * Return: 0 on success; -errno on failure. 1014 */ 1015int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, 1016 enum qcom_scm_ice_cipher cipher, u32 data_unit_size) 1017{ 1018 struct qcom_scm_desc desc = { 1019 .svc = QCOM_SCM_SVC_ES, 1020 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, 1021 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, 1022 QCOM_SCM_VAL, QCOM_SCM_VAL, 1023 QCOM_SCM_VAL), 1024 .args[0] = index, 1025 .args[2] = key_size, 1026 .args[3] = cipher, 1027 .args[4] = data_unit_size, 1028 .owner = ARM_SMCCC_OWNER_SIP, 1029 }; 1030 void *keybuf; 1031 dma_addr_t key_phys; 1032 int ret; 1033 1034 /* 1035 * 'key' may point to vmalloc()'ed memory, but we need to pass a 1036 * physical address that's been properly flushed. The sanctioned way to 1037 * do this is by using the DMA API. But as is best practice for crypto 1038 * keys, we also must wipe the key after use. This makes kmemdup() + 1039 * dma_map_single() not clearly correct, since the DMA API can use 1040 * bounce buffers. Instead, just use dma_alloc_coherent(). Programming 1041 * keys is normally rare and thus not performance-critical. 1042 */ 1043 1044 keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys, 1045 GFP_KERNEL); 1046 if (!keybuf) 1047 return -ENOMEM; 1048 memcpy(keybuf, key, key_size); 1049 desc.args[1] = key_phys; 1050 1051 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1052 1053 memzero_explicit(keybuf, key_size); 1054 1055 dma_free_coherent(__scm->dev, key_size, keybuf, key_phys); 1056 return ret; 1057} 1058EXPORT_SYMBOL(qcom_scm_ice_set_key); 1059 1060/** 1061 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 1062 * 1063 * Return true if HDCP is supported, false if not. 1064 */ 1065bool qcom_scm_hdcp_available(void) 1066{ 1067 bool avail; 1068 int ret = qcom_scm_clk_enable(); 1069 1070 if (ret) 1071 return ret; 1072 1073 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 1074 QCOM_SCM_HDCP_INVOKE); 1075 1076 qcom_scm_clk_disable(); 1077 1078 return avail; 1079} 1080EXPORT_SYMBOL(qcom_scm_hdcp_available); 1081 1082/** 1083 * qcom_scm_hdcp_req() - Send HDCP request. 1084 * @req: HDCP request array 1085 * @req_cnt: HDCP request array count 1086 * @resp: response buffer passed to SCM 1087 * 1088 * Write HDCP register(s) through SCM. 1089 */ 1090int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 1091{ 1092 int ret; 1093 struct qcom_scm_desc desc = { 1094 .svc = QCOM_SCM_SVC_HDCP, 1095 .cmd = QCOM_SCM_HDCP_INVOKE, 1096 .arginfo = QCOM_SCM_ARGS(10), 1097 .args = { 1098 req[0].addr, 1099 req[0].val, 1100 req[1].addr, 1101 req[1].val, 1102 req[2].addr, 1103 req[2].val, 1104 req[3].addr, 1105 req[3].val, 1106 req[4].addr, 1107 req[4].val 1108 }, 1109 .owner = ARM_SMCCC_OWNER_SIP, 1110 }; 1111 struct qcom_scm_res res; 1112 1113 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) 1114 return -ERANGE; 1115 1116 ret = qcom_scm_clk_enable(); 1117 if (ret) 1118 return ret; 1119 1120 ret = qcom_scm_call(__scm->dev, &desc, &res); 1121 *resp = res.result[0]; 1122 1123 qcom_scm_clk_disable(); 1124 1125 return ret; 1126} 1127EXPORT_SYMBOL(qcom_scm_hdcp_req); 1128 1129int qcom_scm_qsmmu500_wait_safe_toggle(bool en) 1130{ 1131 struct qcom_scm_desc desc = { 1132 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1133 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, 1134 .arginfo = QCOM_SCM_ARGS(2), 1135 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, 1136 .args[1] = en, 1137 .owner = ARM_SMCCC_OWNER_SIP, 1138 }; 1139 1140 1141 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1142} 1143EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); 1144 1145static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 1146{ 1147 struct device_node *tcsr; 1148 struct device_node *np = dev->of_node; 1149 struct resource res; 1150 u32 offset; 1151 int ret; 1152 1153 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 1154 if (!tcsr) 1155 return 0; 1156 1157 ret = of_address_to_resource(tcsr, 0, &res); 1158 of_node_put(tcsr); 1159 if (ret) 1160 return ret; 1161 1162 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 1163 if (ret < 0) 1164 return ret; 1165 1166 *addr = res.start + offset; 1167 1168 return 0; 1169} 1170 1171/** 1172 * qcom_scm_is_available() - Checks if SCM is available 1173 */ 1174bool qcom_scm_is_available(void) 1175{ 1176 return !!__scm; 1177} 1178EXPORT_SYMBOL(qcom_scm_is_available); 1179 1180static int qcom_scm_probe(struct platform_device *pdev) 1181{ 1182 struct qcom_scm *scm; 1183 unsigned long clks; 1184 int ret; 1185 1186 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 1187 if (!scm) 1188 return -ENOMEM; 1189 1190 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 1191 if (ret < 0) 1192 return ret; 1193 1194 clks = (unsigned long)of_device_get_match_data(&pdev->dev); 1195 1196 scm->core_clk = devm_clk_get(&pdev->dev, "core"); 1197 if (IS_ERR(scm->core_clk)) { 1198 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) 1199 return PTR_ERR(scm->core_clk); 1200 1201 if (clks & SCM_HAS_CORE_CLK) { 1202 dev_err(&pdev->dev, "failed to acquire core clk\n"); 1203 return PTR_ERR(scm->core_clk); 1204 } 1205 1206 scm->core_clk = NULL; 1207 } 1208 1209 scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); 1210 if (IS_ERR(scm->iface_clk)) { 1211 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER) 1212 return PTR_ERR(scm->iface_clk); 1213 1214 if (clks & SCM_HAS_IFACE_CLK) { 1215 dev_err(&pdev->dev, "failed to acquire iface clk\n"); 1216 return PTR_ERR(scm->iface_clk); 1217 } 1218 1219 scm->iface_clk = NULL; 1220 } 1221 1222 scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); 1223 if (IS_ERR(scm->bus_clk)) { 1224 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER) 1225 return PTR_ERR(scm->bus_clk); 1226 1227 if (clks & SCM_HAS_BUS_CLK) { 1228 dev_err(&pdev->dev, "failed to acquire bus clk\n"); 1229 return PTR_ERR(scm->bus_clk); 1230 } 1231 1232 scm->bus_clk = NULL; 1233 } 1234 1235 scm->reset.ops = &qcom_scm_pas_reset_ops; 1236 scm->reset.nr_resets = 1; 1237 scm->reset.of_node = pdev->dev.of_node; 1238 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 1239 if (ret) 1240 return ret; 1241 1242 /* vote for max clk rate for highest performance */ 1243 ret = clk_set_rate(scm->core_clk, INT_MAX); 1244 if (ret) 1245 return ret; 1246 1247 __scm = scm; 1248 __scm->dev = &pdev->dev; 1249 1250 __get_convention(); 1251 1252 /* 1253 * If requested enable "download mode", from this point on warmboot 1254 * will cause the the boot stages to enter download mode, unless 1255 * disabled below by a clean shutdown/reboot. 1256 */ 1257 if (download_mode) 1258 qcom_scm_set_download_mode(true); 1259 1260 return 0; 1261} 1262 1263static void qcom_scm_shutdown(struct platform_device *pdev) 1264{ 1265 /* Clean shutdown, disable download mode to allow normal restart */ 1266 qcom_scm_set_download_mode(false); 1267} 1268 1269static const struct of_device_id qcom_scm_dt_match[] = { 1270 { .compatible = "qcom,scm-apq8064", 1271 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ 1272 }, 1273 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK | 1274 SCM_HAS_IFACE_CLK | 1275 SCM_HAS_BUS_CLK) 1276 }, 1277 { .compatible = "qcom,scm-ipq4019" }, 1278 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK }, 1279 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK }, 1280 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK | 1281 SCM_HAS_IFACE_CLK | 1282 SCM_HAS_BUS_CLK) 1283 }, 1284 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK | 1285 SCM_HAS_IFACE_CLK | 1286 SCM_HAS_BUS_CLK) 1287 }, 1288 { .compatible = "qcom,scm-msm8994" }, 1289 { .compatible = "qcom,scm-msm8996" }, 1290 { .compatible = "qcom,scm" }, 1291 {} 1292}; 1293 1294static struct platform_driver qcom_scm_driver = { 1295 .driver = { 1296 .name = "qcom_scm", 1297 .of_match_table = qcom_scm_dt_match, 1298 }, 1299 .probe = qcom_scm_probe, 1300 .shutdown = qcom_scm_shutdown, 1301}; 1302 1303static int __init qcom_scm_init(void) 1304{ 1305 return platform_driver_register(&qcom_scm_driver); 1306} 1307subsys_initcall(qcom_scm_init); 1308