1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2015, 2018, 2020-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #if IS_ENABLED(CONFIG_ARM64)
23
24 #include <mali_kbase.h>
25 #include <mali_kbase_smc.h>
26
27 #include <linux/compiler.h>
28
29 /* __asmeq is not available on Kernel versions >= 4.20 */
30 #ifndef __asmeq
31 /*
32 * This is used to ensure the compiler did actually allocate the register we
33 * asked it for some inline assembly sequences. Apparently we can't trust the
34 * compiler from one version to another so a bit of paranoia won't hurt. This
35 * string is meant to be concatenated with the inline asm string and will
36 * cause compilation to stop on mismatch. (for details, see gcc PR 15089)
37 */
38 #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
39 #endif
40
invoke_smc_fid(u64 function_id, u64 arg0, u64 arg1, u64 arg2)41 static noinline u64 invoke_smc_fid(u64 function_id,
42 u64 arg0, u64 arg1, u64 arg2)
43 {
44 register u64 x0 asm("x0") = function_id;
45 register u64 x1 asm("x1") = arg0;
46 register u64 x2 asm("x2") = arg1;
47 register u64 x3 asm("x3") = arg2;
48
49 asm volatile(
50 __asmeq("%0", "x0")
51 __asmeq("%1", "x1")
52 __asmeq("%2", "x2")
53 __asmeq("%3", "x3")
54 "smc #0\n"
55 : "+r" (x0)
56 : "r" (x1), "r" (x2), "r" (x3));
57
58 return x0;
59 }
60
kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2)61 u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2)
62 {
63 /* Is fast call (bit 31 set) */
64 KBASE_DEBUG_ASSERT(fid & ~SMC_FAST_CALL);
65 /* bits 16-23 must be zero for fast calls */
66 KBASE_DEBUG_ASSERT((fid & (0xFF << 16)) == 0);
67
68 return invoke_smc_fid(fid, arg0, arg1, arg2);
69 }
70
kbase_invoke_smc(u32 oen, u16 function_number, bool smc64, u64 arg0, u64 arg1, u64 arg2)71 u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64,
72 u64 arg0, u64 arg1, u64 arg2)
73 {
74 u32 fid = 0;
75
76 /* Only the six bits allowed should be used. */
77 KBASE_DEBUG_ASSERT((oen & ~SMC_OEN_MASK) == 0);
78
79 fid |= SMC_FAST_CALL; /* Bit 31: Fast call */
80 if (smc64)
81 fid |= SMC_64; /* Bit 30: 1=SMC64, 0=SMC32 */
82 fid |= oen; /* Bit 29:24: OEN */
83 /* Bit 23:16: Must be zero for fast calls */
84 fid |= (function_number); /* Bit 15:0: function number */
85
86 return kbase_invoke_smc_fid(fid, arg0, arg1, arg2);
87 }
88
89 #endif /* CONFIG_ARM64 */
90
91