1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2015 Linaro Ltd.
4 */
5#include <linux/platform_device.h>
6#include <linux/init.h>
7#include <linux/interrupt.h>
8#include <linux/completion.h>
9#include <linux/cpumask.h>
10#include <linux/export.h>
11#include <linux/dma-mapping.h>
12#include <linux/interconnect.h>
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/firmware/qcom/qcom_scm.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/of_irq.h>
19#include <linux/of_platform.h>
20#include <linux/clk.h>
21#include <linux/reset-controller.h>
22#include <linux/arm-smccc.h>
23
24#include "qcom_scm.h"
25
26static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
27module_param(download_mode, bool, 0);
28
29struct qcom_scm {
30	struct device *dev;
31	struct clk *core_clk;
32	struct clk *iface_clk;
33	struct clk *bus_clk;
34	struct icc_path *path;
35	struct completion waitq_comp;
36	struct reset_controller_dev reset;
37
38	/* control access to the interconnect path */
39	struct mutex scm_bw_lock;
40	int scm_vote_count;
41
42	u64 dload_mode_addr;
43};
44
45struct qcom_scm_current_perm_info {
46	__le32 vmid;
47	__le32 perm;
48	__le64 ctx;
49	__le32 ctx_size;
50	__le32 unused;
51};
52
53struct qcom_scm_mem_map_info {
54	__le64 mem_addr;
55	__le64 mem_size;
56};
57
58/* Each bit configures cold/warm boot address for one of the 4 CPUs */
59static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
60	0, BIT(0), BIT(3), BIT(5)
61};
62static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
63	BIT(2), BIT(1), BIT(4), BIT(6)
64};
65
66#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE	BIT(0)
67#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL	BIT(1)
68
69static const char * const qcom_scm_convention_names[] = {
70	[SMC_CONVENTION_UNKNOWN] = "unknown",
71	[SMC_CONVENTION_ARM_32] = "smc arm 32",
72	[SMC_CONVENTION_ARM_64] = "smc arm 64",
73	[SMC_CONVENTION_LEGACY] = "smc legacy",
74};
75
76static struct qcom_scm *__scm;
77
78static int qcom_scm_clk_enable(void)
79{
80	int ret;
81
82	ret = clk_prepare_enable(__scm->core_clk);
83	if (ret)
84		goto bail;
85
86	ret = clk_prepare_enable(__scm->iface_clk);
87	if (ret)
88		goto disable_core;
89
90	ret = clk_prepare_enable(__scm->bus_clk);
91	if (ret)
92		goto disable_iface;
93
94	return 0;
95
96disable_iface:
97	clk_disable_unprepare(__scm->iface_clk);
98disable_core:
99	clk_disable_unprepare(__scm->core_clk);
100bail:
101	return ret;
102}
103
104static void qcom_scm_clk_disable(void)
105{
106	clk_disable_unprepare(__scm->core_clk);
107	clk_disable_unprepare(__scm->iface_clk);
108	clk_disable_unprepare(__scm->bus_clk);
109}
110
111static int qcom_scm_bw_enable(void)
112{
113	int ret = 0;
114
115	if (!__scm->path)
116		return 0;
117
118	if (IS_ERR(__scm->path))
119		return -EINVAL;
120
121	mutex_lock(&__scm->scm_bw_lock);
122	if (!__scm->scm_vote_count) {
123		ret = icc_set_bw(__scm->path, 0, UINT_MAX);
124		if (ret < 0) {
125			dev_err(__scm->dev, "failed to set bandwidth request\n");
126			goto err_bw;
127		}
128	}
129	__scm->scm_vote_count++;
130err_bw:
131	mutex_unlock(&__scm->scm_bw_lock);
132
133	return ret;
134}
135
136static void qcom_scm_bw_disable(void)
137{
138	if (IS_ERR_OR_NULL(__scm->path))
139		return;
140
141	mutex_lock(&__scm->scm_bw_lock);
142	if (__scm->scm_vote_count-- == 1)
143		icc_set_bw(__scm->path, 0, 0);
144	mutex_unlock(&__scm->scm_bw_lock);
145}
146
147enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
148static DEFINE_SPINLOCK(scm_query_lock);
149
150static enum qcom_scm_convention __get_convention(void)
151{
152	unsigned long flags;
153	struct qcom_scm_desc desc = {
154		.svc = QCOM_SCM_SVC_INFO,
155		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
156		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
157					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
158			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
159		.arginfo = QCOM_SCM_ARGS(1),
160		.owner = ARM_SMCCC_OWNER_SIP,
161	};
162	struct qcom_scm_res res;
163	enum qcom_scm_convention probed_convention;
164	int ret;
165	bool forced = false;
166
167	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
168		return qcom_scm_convention;
169
170	/*
171	 * Per the "SMC calling convention specification", the 64-bit calling
172	 * convention can only be used when the client is 64-bit, otherwise
173	 * system will encounter the undefined behaviour.
174	 */
175#if IS_ENABLED(CONFIG_ARM64)
176	/*
177	 * Device isn't required as there is only one argument - no device
178	 * needed to dma_map_single to secure world
179	 */
180	probed_convention = SMC_CONVENTION_ARM_64;
181	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
182	if (!ret && res.result[0] == 1)
183		goto found;
184
185	/*
186	 * Some SC7180 firmwares didn't implement the
187	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
188	 * calling conventions on these firmwares. Luckily we don't make any
189	 * early calls into the firmware on these SoCs so the device pointer
190	 * will be valid here to check if the compatible matches.
191	 */
192	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
193		forced = true;
194		goto found;
195	}
196#endif
197
198	probed_convention = SMC_CONVENTION_ARM_32;
199	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
200	if (!ret && res.result[0] == 1)
201		goto found;
202
203	probed_convention = SMC_CONVENTION_LEGACY;
204found:
205	spin_lock_irqsave(&scm_query_lock, flags);
206	if (probed_convention != qcom_scm_convention) {
207		qcom_scm_convention = probed_convention;
208		pr_info("qcom_scm: convention: %s%s\n",
209			qcom_scm_convention_names[qcom_scm_convention],
210			forced ? " (forced)" : "");
211	}
212	spin_unlock_irqrestore(&scm_query_lock, flags);
213
214	return qcom_scm_convention;
215}
216
217/**
218 * qcom_scm_call() - Invoke a syscall in the secure world
219 * @dev:	device
220 * @desc:	Descriptor structure containing arguments and return values
221 * @res:        Structure containing results from SMC/HVC call
222 *
223 * Sends a command to the SCM and waits for the command to finish processing.
224 * This should *only* be called in pre-emptible context.
225 */
226static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
227			 struct qcom_scm_res *res)
228{
229	might_sleep();
230	switch (__get_convention()) {
231	case SMC_CONVENTION_ARM_32:
232	case SMC_CONVENTION_ARM_64:
233		return scm_smc_call(dev, desc, res, false);
234	case SMC_CONVENTION_LEGACY:
235		return scm_legacy_call(dev, desc, res);
236	default:
237		pr_err("Unknown current SCM calling convention.\n");
238		return -EINVAL;
239	}
240}
241
242/**
243 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
244 * @dev:	device
245 * @desc:	Descriptor structure containing arguments and return values
246 * @res:	Structure containing results from SMC/HVC call
247 *
248 * Sends a command to the SCM and waits for the command to finish processing.
249 * This can be called in atomic context.
250 */
251static int qcom_scm_call_atomic(struct device *dev,
252				const struct qcom_scm_desc *desc,
253				struct qcom_scm_res *res)
254{
255	switch (__get_convention()) {
256	case SMC_CONVENTION_ARM_32:
257	case SMC_CONVENTION_ARM_64:
258		return scm_smc_call(dev, desc, res, true);
259	case SMC_CONVENTION_LEGACY:
260		return scm_legacy_call_atomic(dev, desc, res);
261	default:
262		pr_err("Unknown current SCM calling convention.\n");
263		return -EINVAL;
264	}
265}
266
267static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
268					 u32 cmd_id)
269{
270	int ret;
271	struct qcom_scm_desc desc = {
272		.svc = QCOM_SCM_SVC_INFO,
273		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
274		.owner = ARM_SMCCC_OWNER_SIP,
275	};
276	struct qcom_scm_res res;
277
278	desc.arginfo = QCOM_SCM_ARGS(1);
279	switch (__get_convention()) {
280	case SMC_CONVENTION_ARM_32:
281	case SMC_CONVENTION_ARM_64:
282		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
283				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
284		break;
285	case SMC_CONVENTION_LEGACY:
286		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
287		break;
288	default:
289		pr_err("Unknown SMC convention being used\n");
290		return false;
291	}
292
293	ret = qcom_scm_call(dev, &desc, &res);
294
295	return ret ? false : !!res.result[0];
296}
297
298static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
299{
300	int cpu;
301	unsigned int flags = 0;
302	struct qcom_scm_desc desc = {
303		.svc = QCOM_SCM_SVC_BOOT,
304		.cmd = QCOM_SCM_BOOT_SET_ADDR,
305		.arginfo = QCOM_SCM_ARGS(2),
306		.owner = ARM_SMCCC_OWNER_SIP,
307	};
308
309	for_each_present_cpu(cpu) {
310		if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
311			return -EINVAL;
312		flags |= cpu_bits[cpu];
313	}
314
315	desc.args[0] = flags;
316	desc.args[1] = virt_to_phys(entry);
317
318	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
319}
320
321static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
322{
323	struct qcom_scm_desc desc = {
324		.svc = QCOM_SCM_SVC_BOOT,
325		.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
326		.owner = ARM_SMCCC_OWNER_SIP,
327		.arginfo = QCOM_SCM_ARGS(6),
328		.args = {
329			virt_to_phys(entry),
330			/* Apply to all CPUs in all affinity levels */
331			~0ULL, ~0ULL, ~0ULL, ~0ULL,
332			flags,
333		},
334	};
335
336	/* Need a device for DMA of the additional arguments */
337	if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
338		return -EOPNOTSUPP;
339
340	return qcom_scm_call(__scm->dev, &desc, NULL);
341}
342
343/**
344 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
345 * @entry: Entry point function for the cpus
346 *
347 * Set the Linux entry point for the SCM to transfer control to when coming
348 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
349 */
350int qcom_scm_set_warm_boot_addr(void *entry)
351{
352	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
353		/* Fallback to old SCM call */
354		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
355	return 0;
356}
357EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
358
359/**
360 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
361 * @entry: Entry point function for the cpus
362 */
363int qcom_scm_set_cold_boot_addr(void *entry)
364{
365	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
366		/* Fallback to old SCM call */
367		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
368	return 0;
369}
370EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
371
372/**
373 * qcom_scm_cpu_power_down() - Power down the cpu
374 * @flags:	Flags to flush cache
375 *
376 * This is an end point to power down cpu. If there was a pending interrupt,
377 * the control would return from this function, otherwise, the cpu jumps to the
378 * warm boot entry point set for this cpu upon reset.
379 */
380void qcom_scm_cpu_power_down(u32 flags)
381{
382	struct qcom_scm_desc desc = {
383		.svc = QCOM_SCM_SVC_BOOT,
384		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
385		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
386		.arginfo = QCOM_SCM_ARGS(1),
387		.owner = ARM_SMCCC_OWNER_SIP,
388	};
389
390	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
391}
392EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
393
394int qcom_scm_set_remote_state(u32 state, u32 id)
395{
396	struct qcom_scm_desc desc = {
397		.svc = QCOM_SCM_SVC_BOOT,
398		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
399		.arginfo = QCOM_SCM_ARGS(2),
400		.args[0] = state,
401		.args[1] = id,
402		.owner = ARM_SMCCC_OWNER_SIP,
403	};
404	struct qcom_scm_res res;
405	int ret;
406
407	ret = qcom_scm_call(__scm->dev, &desc, &res);
408
409	return ret ? : res.result[0];
410}
411EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
412
413static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
414{
415	struct qcom_scm_desc desc = {
416		.svc = QCOM_SCM_SVC_BOOT,
417		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
418		.arginfo = QCOM_SCM_ARGS(2),
419		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
420		.owner = ARM_SMCCC_OWNER_SIP,
421	};
422
423	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
424
425	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
426}
427
428static void qcom_scm_set_download_mode(bool enable)
429{
430	bool avail;
431	int ret = 0;
432
433	avail = __qcom_scm_is_call_available(__scm->dev,
434					     QCOM_SCM_SVC_BOOT,
435					     QCOM_SCM_BOOT_SET_DLOAD_MODE);
436	if (avail) {
437		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
438	} else if (__scm->dload_mode_addr) {
439		ret = qcom_scm_io_writel(__scm->dload_mode_addr,
440				enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
441	} else {
442		dev_err(__scm->dev,
443			"No available mechanism for setting download mode\n");
444	}
445
446	if (ret)
447		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
448}
449
450/**
451 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
452 *			       state machine for a given peripheral, using the
453 *			       metadata
454 * @peripheral: peripheral id
455 * @metadata:	pointer to memory containing ELF header, program header table
456 *		and optional blob of data used for authenticating the metadata
457 *		and the rest of the firmware
458 * @size:	size of the metadata
459 * @ctx:	optional metadata context
460 *
461 * Return: 0 on success.
462 *
463 * Upon successful return, the PAS metadata context (@ctx) will be used to
464 * track the metadata allocation, this needs to be released by invoking
465 * qcom_scm_pas_metadata_release() by the caller.
466 */
467int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
468			    struct qcom_scm_pas_metadata *ctx)
469{
470	dma_addr_t mdata_phys;
471	void *mdata_buf;
472	int ret;
473	struct qcom_scm_desc desc = {
474		.svc = QCOM_SCM_SVC_PIL,
475		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
476		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
477		.args[0] = peripheral,
478		.owner = ARM_SMCCC_OWNER_SIP,
479	};
480	struct qcom_scm_res res;
481
482	/*
483	 * During the scm call memory protection will be enabled for the meta
484	 * data blob, so make sure it's physically contiguous, 4K aligned and
485	 * non-cachable to avoid XPU violations.
486	 */
487	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
488				       GFP_KERNEL);
489	if (!mdata_buf) {
490		dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
491		return -ENOMEM;
492	}
493	memcpy(mdata_buf, metadata, size);
494
495	ret = qcom_scm_clk_enable();
496	if (ret)
497		goto out;
498
499	ret = qcom_scm_bw_enable();
500	if (ret)
501		return ret;
502
503	desc.args[1] = mdata_phys;
504
505	ret = qcom_scm_call(__scm->dev, &desc, &res);
506
507	qcom_scm_bw_disable();
508	qcom_scm_clk_disable();
509
510out:
511	if (ret < 0 || !ctx) {
512		dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
513	} else if (ctx) {
514		ctx->ptr = mdata_buf;
515		ctx->phys = mdata_phys;
516		ctx->size = size;
517	}
518
519	return ret ? : res.result[0];
520}
521EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
522
523/**
524 * qcom_scm_pas_metadata_release() - release metadata context
525 * @ctx:	metadata context
526 */
527void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
528{
529	if (!ctx->ptr)
530		return;
531
532	dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
533
534	ctx->ptr = NULL;
535	ctx->phys = 0;
536	ctx->size = 0;
537}
538EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
539
540/**
541 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
542 *			      for firmware loading
543 * @peripheral:	peripheral id
544 * @addr:	start address of memory area to prepare
545 * @size:	size of the memory area to prepare
546 *
547 * Returns 0 on success.
548 */
549int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
550{
551	int ret;
552	struct qcom_scm_desc desc = {
553		.svc = QCOM_SCM_SVC_PIL,
554		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
555		.arginfo = QCOM_SCM_ARGS(3),
556		.args[0] = peripheral,
557		.args[1] = addr,
558		.args[2] = size,
559		.owner = ARM_SMCCC_OWNER_SIP,
560	};
561	struct qcom_scm_res res;
562
563	ret = qcom_scm_clk_enable();
564	if (ret)
565		return ret;
566
567	ret = qcom_scm_bw_enable();
568	if (ret)
569		return ret;
570
571	ret = qcom_scm_call(__scm->dev, &desc, &res);
572	qcom_scm_bw_disable();
573	qcom_scm_clk_disable();
574
575	return ret ? : res.result[0];
576}
577EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
578
579/**
580 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
581 *				   and reset the remote processor
582 * @peripheral:	peripheral id
583 *
584 * Return 0 on success.
585 */
586int qcom_scm_pas_auth_and_reset(u32 peripheral)
587{
588	int ret;
589	struct qcom_scm_desc desc = {
590		.svc = QCOM_SCM_SVC_PIL,
591		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
592		.arginfo = QCOM_SCM_ARGS(1),
593		.args[0] = peripheral,
594		.owner = ARM_SMCCC_OWNER_SIP,
595	};
596	struct qcom_scm_res res;
597
598	ret = qcom_scm_clk_enable();
599	if (ret)
600		return ret;
601
602	ret = qcom_scm_bw_enable();
603	if (ret)
604		return ret;
605
606	ret = qcom_scm_call(__scm->dev, &desc, &res);
607	qcom_scm_bw_disable();
608	qcom_scm_clk_disable();
609
610	return ret ? : res.result[0];
611}
612EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
613
614/**
615 * qcom_scm_pas_shutdown() - Shut down the remote processor
616 * @peripheral: peripheral id
617 *
618 * Returns 0 on success.
619 */
620int qcom_scm_pas_shutdown(u32 peripheral)
621{
622	int ret;
623	struct qcom_scm_desc desc = {
624		.svc = QCOM_SCM_SVC_PIL,
625		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
626		.arginfo = QCOM_SCM_ARGS(1),
627		.args[0] = peripheral,
628		.owner = ARM_SMCCC_OWNER_SIP,
629	};
630	struct qcom_scm_res res;
631
632	ret = qcom_scm_clk_enable();
633	if (ret)
634		return ret;
635
636	ret = qcom_scm_bw_enable();
637	if (ret)
638		return ret;
639
640	ret = qcom_scm_call(__scm->dev, &desc, &res);
641
642	qcom_scm_bw_disable();
643	qcom_scm_clk_disable();
644
645	return ret ? : res.result[0];
646}
647EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
648
649/**
650 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
651 *			      available for the given peripherial
652 * @peripheral:	peripheral id
653 *
654 * Returns true if PAS is supported for this peripheral, otherwise false.
655 */
656bool qcom_scm_pas_supported(u32 peripheral)
657{
658	int ret;
659	struct qcom_scm_desc desc = {
660		.svc = QCOM_SCM_SVC_PIL,
661		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
662		.arginfo = QCOM_SCM_ARGS(1),
663		.args[0] = peripheral,
664		.owner = ARM_SMCCC_OWNER_SIP,
665	};
666	struct qcom_scm_res res;
667
668	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
669					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
670		return false;
671
672	ret = qcom_scm_call(__scm->dev, &desc, &res);
673
674	return ret ? false : !!res.result[0];
675}
676EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
677
678static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
679{
680	struct qcom_scm_desc desc = {
681		.svc = QCOM_SCM_SVC_PIL,
682		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
683		.arginfo = QCOM_SCM_ARGS(2),
684		.args[0] = reset,
685		.args[1] = 0,
686		.owner = ARM_SMCCC_OWNER_SIP,
687	};
688	struct qcom_scm_res res;
689	int ret;
690
691	ret = qcom_scm_call(__scm->dev, &desc, &res);
692
693	return ret ? : res.result[0];
694}
695
696static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
697				     unsigned long idx)
698{
699	if (idx != 0)
700		return -EINVAL;
701
702	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
703}
704
705static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
706				       unsigned long idx)
707{
708	if (idx != 0)
709		return -EINVAL;
710
711	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
712}
713
714static const struct reset_control_ops qcom_scm_pas_reset_ops = {
715	.assert = qcom_scm_pas_reset_assert,
716	.deassert = qcom_scm_pas_reset_deassert,
717};
718
719int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
720{
721	struct qcom_scm_desc desc = {
722		.svc = QCOM_SCM_SVC_IO,
723		.cmd = QCOM_SCM_IO_READ,
724		.arginfo = QCOM_SCM_ARGS(1),
725		.args[0] = addr,
726		.owner = ARM_SMCCC_OWNER_SIP,
727	};
728	struct qcom_scm_res res;
729	int ret;
730
731
732	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
733	if (ret >= 0)
734		*val = res.result[0];
735
736	return ret < 0 ? ret : 0;
737}
738EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
739
740int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
741{
742	struct qcom_scm_desc desc = {
743		.svc = QCOM_SCM_SVC_IO,
744		.cmd = QCOM_SCM_IO_WRITE,
745		.arginfo = QCOM_SCM_ARGS(2),
746		.args[0] = addr,
747		.args[1] = val,
748		.owner = ARM_SMCCC_OWNER_SIP,
749	};
750
751	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
752}
753EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
754
755/**
756 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
757 * supports restore security config interface.
758 *
759 * Return true if restore-cfg interface is supported, false if not.
760 */
761bool qcom_scm_restore_sec_cfg_available(void)
762{
763	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
764					    QCOM_SCM_MP_RESTORE_SEC_CFG);
765}
766EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
767
768int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
769{
770	struct qcom_scm_desc desc = {
771		.svc = QCOM_SCM_SVC_MP,
772		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
773		.arginfo = QCOM_SCM_ARGS(2),
774		.args[0] = device_id,
775		.args[1] = spare,
776		.owner = ARM_SMCCC_OWNER_SIP,
777	};
778	struct qcom_scm_res res;
779	int ret;
780
781	ret = qcom_scm_call(__scm->dev, &desc, &res);
782
783	return ret ? : res.result[0];
784}
785EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
786
787int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
788{
789	struct qcom_scm_desc desc = {
790		.svc = QCOM_SCM_SVC_MP,
791		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
792		.arginfo = QCOM_SCM_ARGS(1),
793		.args[0] = spare,
794		.owner = ARM_SMCCC_OWNER_SIP,
795	};
796	struct qcom_scm_res res;
797	int ret;
798
799	ret = qcom_scm_call(__scm->dev, &desc, &res);
800
801	if (size)
802		*size = res.result[0];
803
804	return ret ? : res.result[1];
805}
806EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
807
808int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
809{
810	struct qcom_scm_desc desc = {
811		.svc = QCOM_SCM_SVC_MP,
812		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
813		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
814					 QCOM_SCM_VAL),
815		.args[0] = addr,
816		.args[1] = size,
817		.args[2] = spare,
818		.owner = ARM_SMCCC_OWNER_SIP,
819	};
820	int ret;
821
822	ret = qcom_scm_call(__scm->dev, &desc, NULL);
823
824	/* the pg table has been initialized already, ignore the error */
825	if (ret == -EPERM)
826		ret = 0;
827
828	return ret;
829}
830EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
831
832int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
833{
834	struct qcom_scm_desc desc = {
835		.svc = QCOM_SCM_SVC_MP,
836		.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
837		.arginfo = QCOM_SCM_ARGS(2),
838		.args[0] = size,
839		.args[1] = spare,
840		.owner = ARM_SMCCC_OWNER_SIP,
841	};
842
843	return qcom_scm_call(__scm->dev, &desc, NULL);
844}
845EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
846
847int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
848				   u32 cp_nonpixel_start,
849				   u32 cp_nonpixel_size)
850{
851	int ret;
852	struct qcom_scm_desc desc = {
853		.svc = QCOM_SCM_SVC_MP,
854		.cmd = QCOM_SCM_MP_VIDEO_VAR,
855		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
856					 QCOM_SCM_VAL, QCOM_SCM_VAL),
857		.args[0] = cp_start,
858		.args[1] = cp_size,
859		.args[2] = cp_nonpixel_start,
860		.args[3] = cp_nonpixel_size,
861		.owner = ARM_SMCCC_OWNER_SIP,
862	};
863	struct qcom_scm_res res;
864
865	ret = qcom_scm_call(__scm->dev, &desc, &res);
866
867	return ret ? : res.result[0];
868}
869EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
870
871static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
872				 size_t mem_sz, phys_addr_t src, size_t src_sz,
873				 phys_addr_t dest, size_t dest_sz)
874{
875	int ret;
876	struct qcom_scm_desc desc = {
877		.svc = QCOM_SCM_SVC_MP,
878		.cmd = QCOM_SCM_MP_ASSIGN,
879		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
880					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
881					 QCOM_SCM_VAL, QCOM_SCM_VAL),
882		.args[0] = mem_region,
883		.args[1] = mem_sz,
884		.args[2] = src,
885		.args[3] = src_sz,
886		.args[4] = dest,
887		.args[5] = dest_sz,
888		.args[6] = 0,
889		.owner = ARM_SMCCC_OWNER_SIP,
890	};
891	struct qcom_scm_res res;
892
893	ret = qcom_scm_call(dev, &desc, &res);
894
895	return ret ? : res.result[0];
896}
897
898/**
899 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
900 * @mem_addr: mem region whose ownership need to be reassigned
901 * @mem_sz:   size of the region.
902 * @srcvm:    vmid for current set of owners, each set bit in
903 *            flag indicate a unique owner
904 * @newvm:    array having new owners and corresponding permission
905 *            flags
906 * @dest_cnt: number of owners in next set.
907 *
908 * Return negative errno on failure or 0 on success with @srcvm updated.
909 */
910int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
911			u64 *srcvm,
912			const struct qcom_scm_vmperm *newvm,
913			unsigned int dest_cnt)
914{
915	struct qcom_scm_current_perm_info *destvm;
916	struct qcom_scm_mem_map_info *mem_to_map;
917	phys_addr_t mem_to_map_phys;
918	phys_addr_t dest_phys;
919	dma_addr_t ptr_phys;
920	size_t mem_to_map_sz;
921	size_t dest_sz;
922	size_t src_sz;
923	size_t ptr_sz;
924	int next_vm;
925	__le32 *src;
926	void *ptr;
927	int ret, i, b;
928	u64 srcvm_bits = *srcvm;
929
930	src_sz = hweight64(srcvm_bits) * sizeof(*src);
931	mem_to_map_sz = sizeof(*mem_to_map);
932	dest_sz = dest_cnt * sizeof(*destvm);
933	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
934			ALIGN(dest_sz, SZ_64);
935
936	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
937	if (!ptr)
938		return -ENOMEM;
939
940	/* Fill source vmid detail */
941	src = ptr;
942	i = 0;
943	for (b = 0; b < BITS_PER_TYPE(u64); b++) {
944		if (srcvm_bits & BIT(b))
945			src[i++] = cpu_to_le32(b);
946	}
947
948	/* Fill details of mem buff to map */
949	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
950	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
951	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
952	mem_to_map->mem_size = cpu_to_le64(mem_sz);
953
954	next_vm = 0;
955	/* Fill details of next vmid detail */
956	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
957	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
958	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
959		destvm->vmid = cpu_to_le32(newvm->vmid);
960		destvm->perm = cpu_to_le32(newvm->perm);
961		destvm->ctx = 0;
962		destvm->ctx_size = 0;
963		next_vm |= BIT(newvm->vmid);
964	}
965
966	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
967				    ptr_phys, src_sz, dest_phys, dest_sz);
968	dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
969	if (ret) {
970		dev_err(__scm->dev,
971			"Assign memory protection call failed %d\n", ret);
972		return -EINVAL;
973	}
974
975	*srcvm = next_vm;
976	return 0;
977}
978EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
979
980/**
981 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
982 */
983bool qcom_scm_ocmem_lock_available(void)
984{
985	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
986					    QCOM_SCM_OCMEM_LOCK_CMD);
987}
988EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
989
990/**
991 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
992 * region to the specified initiator
993 *
994 * @id:     tz initiator id
995 * @offset: OCMEM offset
996 * @size:   OCMEM size
997 * @mode:   access mode (WIDE/NARROW)
998 */
999int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1000			u32 mode)
1001{
1002	struct qcom_scm_desc desc = {
1003		.svc = QCOM_SCM_SVC_OCMEM,
1004		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1005		.args[0] = id,
1006		.args[1] = offset,
1007		.args[2] = size,
1008		.args[3] = mode,
1009		.arginfo = QCOM_SCM_ARGS(4),
1010	};
1011
1012	return qcom_scm_call(__scm->dev, &desc, NULL);
1013}
1014EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
1015
1016/**
1017 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1018 * region from the specified initiator
1019 *
1020 * @id:     tz initiator id
1021 * @offset: OCMEM offset
1022 * @size:   OCMEM size
1023 */
1024int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1025{
1026	struct qcom_scm_desc desc = {
1027		.svc = QCOM_SCM_SVC_OCMEM,
1028		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1029		.args[0] = id,
1030		.args[1] = offset,
1031		.args[2] = size,
1032		.arginfo = QCOM_SCM_ARGS(3),
1033	};
1034
1035	return qcom_scm_call(__scm->dev, &desc, NULL);
1036}
1037EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
1038
1039/**
1040 * qcom_scm_ice_available() - Is the ICE key programming interface available?
1041 *
1042 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1043 *	   qcom_scm_ice_set_key() are available.
1044 */
1045bool qcom_scm_ice_available(void)
1046{
1047	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1048					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1049		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1050					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1051}
1052EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
1053
1054/**
1055 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1056 * @index: the keyslot to invalidate
1057 *
1058 * The UFSHCI and eMMC standards define a standard way to do this, but it
1059 * doesn't work on these SoCs; only this SCM call does.
1060 *
1061 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1062 * call doesn't specify which ICE instance the keyslot belongs to.
1063 *
1064 * Return: 0 on success; -errno on failure.
1065 */
1066int qcom_scm_ice_invalidate_key(u32 index)
1067{
1068	struct qcom_scm_desc desc = {
1069		.svc = QCOM_SCM_SVC_ES,
1070		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1071		.arginfo = QCOM_SCM_ARGS(1),
1072		.args[0] = index,
1073		.owner = ARM_SMCCC_OWNER_SIP,
1074	};
1075
1076	return qcom_scm_call(__scm->dev, &desc, NULL);
1077}
1078EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
1079
1080/**
1081 * qcom_scm_ice_set_key() - Set an inline encryption key
1082 * @index: the keyslot into which to set the key
1083 * @key: the key to program
1084 * @key_size: the size of the key in bytes
1085 * @cipher: the encryption algorithm the key is for
1086 * @data_unit_size: the encryption data unit size, i.e. the size of each
1087 *		    individual plaintext and ciphertext.  Given in 512-byte
1088 *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1089 *
1090 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1091 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1092 *
1093 * The UFSHCI and eMMC standards define a standard way to do this, but it
1094 * doesn't work on these SoCs; only this SCM call does.
1095 *
1096 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1097 * call doesn't specify which ICE instance the keyslot belongs to.
1098 *
1099 * Return: 0 on success; -errno on failure.
1100 */
1101int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1102			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1103{
1104	struct qcom_scm_desc desc = {
1105		.svc = QCOM_SCM_SVC_ES,
1106		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1107		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1108					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1109					 QCOM_SCM_VAL),
1110		.args[0] = index,
1111		.args[2] = key_size,
1112		.args[3] = cipher,
1113		.args[4] = data_unit_size,
1114		.owner = ARM_SMCCC_OWNER_SIP,
1115	};
1116	void *keybuf;
1117	dma_addr_t key_phys;
1118	int ret;
1119
1120	/*
1121	 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1122	 * physical address that's been properly flushed.  The sanctioned way to
1123	 * do this is by using the DMA API.  But as is best practice for crypto
1124	 * keys, we also must wipe the key after use.  This makes kmemdup() +
1125	 * dma_map_single() not clearly correct, since the DMA API can use
1126	 * bounce buffers.  Instead, just use dma_alloc_coherent().  Programming
1127	 * keys is normally rare and thus not performance-critical.
1128	 */
1129
1130	keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1131				    GFP_KERNEL);
1132	if (!keybuf)
1133		return -ENOMEM;
1134	memcpy(keybuf, key, key_size);
1135	desc.args[1] = key_phys;
1136
1137	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1138
1139	memzero_explicit(keybuf, key_size);
1140
1141	dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1142	return ret;
1143}
1144EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
1145
1146/**
1147 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1148 *
1149 * Return true if HDCP is supported, false if not.
1150 */
1151bool qcom_scm_hdcp_available(void)
1152{
1153	bool avail;
1154	int ret = qcom_scm_clk_enable();
1155
1156	if (ret)
1157		return ret;
1158
1159	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1160						QCOM_SCM_HDCP_INVOKE);
1161
1162	qcom_scm_clk_disable();
1163
1164	return avail;
1165}
1166EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
1167
1168/**
1169 * qcom_scm_hdcp_req() - Send HDCP request.
1170 * @req: HDCP request array
1171 * @req_cnt: HDCP request array count
1172 * @resp: response buffer passed to SCM
1173 *
1174 * Write HDCP register(s) through SCM.
1175 */
1176int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1177{
1178	int ret;
1179	struct qcom_scm_desc desc = {
1180		.svc = QCOM_SCM_SVC_HDCP,
1181		.cmd = QCOM_SCM_HDCP_INVOKE,
1182		.arginfo = QCOM_SCM_ARGS(10),
1183		.args = {
1184			req[0].addr,
1185			req[0].val,
1186			req[1].addr,
1187			req[1].val,
1188			req[2].addr,
1189			req[2].val,
1190			req[3].addr,
1191			req[3].val,
1192			req[4].addr,
1193			req[4].val
1194		},
1195		.owner = ARM_SMCCC_OWNER_SIP,
1196	};
1197	struct qcom_scm_res res;
1198
1199	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1200		return -ERANGE;
1201
1202	ret = qcom_scm_clk_enable();
1203	if (ret)
1204		return ret;
1205
1206	ret = qcom_scm_call(__scm->dev, &desc, &res);
1207	*resp = res.result[0];
1208
1209	qcom_scm_clk_disable();
1210
1211	return ret;
1212}
1213EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
1214
1215int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1216{
1217	struct qcom_scm_desc desc = {
1218		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1219		.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1220		.arginfo = QCOM_SCM_ARGS(3),
1221		.args[0] = sec_id,
1222		.args[1] = ctx_num,
1223		.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1224		.owner = ARM_SMCCC_OWNER_SIP,
1225	};
1226
1227	return qcom_scm_call(__scm->dev, &desc, NULL);
1228}
1229EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
1230
1231int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1232{
1233	struct qcom_scm_desc desc = {
1234		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1235		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1236		.arginfo = QCOM_SCM_ARGS(2),
1237		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1238		.args[1] = en,
1239		.owner = ARM_SMCCC_OWNER_SIP,
1240	};
1241
1242
1243	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1244}
1245EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
1246
1247bool qcom_scm_lmh_dcvsh_available(void)
1248{
1249	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1250}
1251EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
1252
1253int qcom_scm_lmh_profile_change(u32 profile_id)
1254{
1255	struct qcom_scm_desc desc = {
1256		.svc = QCOM_SCM_SVC_LMH,
1257		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1258		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1259		.args[0] = profile_id,
1260		.owner = ARM_SMCCC_OWNER_SIP,
1261	};
1262
1263	return qcom_scm_call(__scm->dev, &desc, NULL);
1264}
1265EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
1266
1267int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1268		       u64 limit_node, u32 node_id, u64 version)
1269{
1270	dma_addr_t payload_phys;
1271	u32 *payload_buf;
1272	int ret, payload_size = 5 * sizeof(u32);
1273
1274	struct qcom_scm_desc desc = {
1275		.svc = QCOM_SCM_SVC_LMH,
1276		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1277		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1278					QCOM_SCM_VAL, QCOM_SCM_VAL),
1279		.args[1] = payload_size,
1280		.args[2] = limit_node,
1281		.args[3] = node_id,
1282		.args[4] = version,
1283		.owner = ARM_SMCCC_OWNER_SIP,
1284	};
1285
1286	payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
1287	if (!payload_buf)
1288		return -ENOMEM;
1289
1290	payload_buf[0] = payload_fn;
1291	payload_buf[1] = 0;
1292	payload_buf[2] = payload_reg;
1293	payload_buf[3] = 1;
1294	payload_buf[4] = payload_val;
1295
1296	desc.args[0] = payload_phys;
1297
1298	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1299
1300	dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
1301	return ret;
1302}
1303EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
1304
1305static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1306{
1307	struct device_node *tcsr;
1308	struct device_node *np = dev->of_node;
1309	struct resource res;
1310	u32 offset;
1311	int ret;
1312
1313	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1314	if (!tcsr)
1315		return 0;
1316
1317	ret = of_address_to_resource(tcsr, 0, &res);
1318	of_node_put(tcsr);
1319	if (ret)
1320		return ret;
1321
1322	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1323	if (ret < 0)
1324		return ret;
1325
1326	*addr = res.start + offset;
1327
1328	return 0;
1329}
1330
1331/**
1332 * qcom_scm_is_available() - Checks if SCM is available
1333 */
1334bool qcom_scm_is_available(void)
1335{
1336	return !!__scm;
1337}
1338EXPORT_SYMBOL_GPL(qcom_scm_is_available);
1339
1340static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
1341{
1342	/* FW currently only supports a single wq_ctx (zero).
1343	 * TODO: Update this logic to include dynamic allocation and lookup of
1344	 * completion structs when FW supports more wq_ctx values.
1345	 */
1346	if (wq_ctx != 0) {
1347		dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
1348		return -EINVAL;
1349	}
1350
1351	return 0;
1352}
1353
1354int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
1355{
1356	int ret;
1357
1358	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1359	if (ret)
1360		return ret;
1361
1362	wait_for_completion(&__scm->waitq_comp);
1363
1364	return 0;
1365}
1366
1367static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx)
1368{
1369	int ret;
1370
1371	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1372	if (ret)
1373		return ret;
1374
1375	complete(&__scm->waitq_comp);
1376
1377	return 0;
1378}
1379
1380static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
1381{
1382	int ret;
1383	struct qcom_scm *scm = data;
1384	u32 wq_ctx, flags, more_pending = 0;
1385
1386	do {
1387		ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
1388		if (ret) {
1389			dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
1390			goto out;
1391		}
1392
1393		if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
1394		    flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
1395			dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
1396			goto out;
1397		}
1398
1399		ret = qcom_scm_waitq_wakeup(scm, wq_ctx);
1400		if (ret)
1401			goto out;
1402	} while (more_pending);
1403
1404out:
1405	return IRQ_HANDLED;
1406}
1407
1408static int qcom_scm_probe(struct platform_device *pdev)
1409{
1410	struct qcom_scm *scm;
1411	int irq, ret;
1412
1413	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1414	if (!scm)
1415		return -ENOMEM;
1416
1417	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1418	if (ret < 0)
1419		return ret;
1420
1421	mutex_init(&scm->scm_bw_lock);
1422
1423	scm->path = devm_of_icc_get(&pdev->dev, NULL);
1424	if (IS_ERR(scm->path))
1425		return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
1426				     "failed to acquire interconnect path\n");
1427
1428	scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
1429	if (IS_ERR(scm->core_clk))
1430		return PTR_ERR(scm->core_clk);
1431
1432	scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
1433	if (IS_ERR(scm->iface_clk))
1434		return PTR_ERR(scm->iface_clk);
1435
1436	scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
1437	if (IS_ERR(scm->bus_clk))
1438		return PTR_ERR(scm->bus_clk);
1439
1440	scm->reset.ops = &qcom_scm_pas_reset_ops;
1441	scm->reset.nr_resets = 1;
1442	scm->reset.of_node = pdev->dev.of_node;
1443	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1444	if (ret)
1445		return ret;
1446
1447	/* vote for max clk rate for highest performance */
1448	ret = clk_set_rate(scm->core_clk, INT_MAX);
1449	if (ret)
1450		return ret;
1451
1452	__scm = scm;
1453	__scm->dev = &pdev->dev;
1454
1455	init_completion(&__scm->waitq_comp);
1456
1457	irq = platform_get_irq_optional(pdev, 0);
1458	if (irq < 0) {
1459		if (irq != -ENXIO)
1460			return irq;
1461	} else {
1462		ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
1463						IRQF_ONESHOT, "qcom-scm", __scm);
1464		if (ret < 0)
1465			return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
1466	}
1467
1468	__get_convention();
1469
1470	/*
1471	 * If requested enable "download mode", from this point on warmboot
1472	 * will cause the boot stages to enter download mode, unless
1473	 * disabled below by a clean shutdown/reboot.
1474	 */
1475	if (download_mode)
1476		qcom_scm_set_download_mode(true);
1477
1478	return 0;
1479}
1480
1481static void qcom_scm_shutdown(struct platform_device *pdev)
1482{
1483	/* Clean shutdown, disable download mode to allow normal restart */
1484	qcom_scm_set_download_mode(false);
1485}
1486
1487static const struct of_device_id qcom_scm_dt_match[] = {
1488	{ .compatible = "qcom,scm" },
1489
1490	/* Legacy entries kept for backwards compatibility */
1491	{ .compatible = "qcom,scm-apq8064" },
1492	{ .compatible = "qcom,scm-apq8084" },
1493	{ .compatible = "qcom,scm-ipq4019" },
1494	{ .compatible = "qcom,scm-msm8953" },
1495	{ .compatible = "qcom,scm-msm8974" },
1496	{ .compatible = "qcom,scm-msm8996" },
1497	{}
1498};
1499MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
1500
1501static struct platform_driver qcom_scm_driver = {
1502	.driver = {
1503		.name	= "qcom_scm",
1504		.of_match_table = qcom_scm_dt_match,
1505		.suppress_bind_attrs = true,
1506	},
1507	.probe = qcom_scm_probe,
1508	.shutdown = qcom_scm_shutdown,
1509};
1510
1511static int __init qcom_scm_init(void)
1512{
1513	return platform_driver_register(&qcom_scm_driver);
1514}
1515subsys_initcall(qcom_scm_init);
1516
1517MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1518MODULE_LICENSE("GPL v2");
1519