1 /*
2 *
3 * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
13 *
14 */
15
16
17
18 #ifdef CONFIG_ARM64
19
20 #include <mali_kbase.h>
21 #include <mali_kbase_smc.h>
22
23 #include <linux/compiler.h>
24
25 /* __asmeq is not available on Kernel versions >= 4.20 */
26 #ifndef __asmeq
27 /*
28 * This is used to ensure the compiler did actually allocate the register we
29 * asked it for some inline assembly sequences. Apparently we can't trust the
30 * compiler from one version to another so a bit of paranoia won't hurt. This
31 * string is meant to be concatenated with the inline asm string and will
32 * cause compilation to stop on mismatch. (for details, see gcc PR 15089)
33 */
34 #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
35 #endif
36
invoke_smc_fid(u64 function_id, u64 arg0, u64 arg1, u64 arg2)37 static noinline u64 invoke_smc_fid(u64 function_id,
38 u64 arg0, u64 arg1, u64 arg2)
39 {
40 register u64 x0 asm("x0") = function_id;
41 register u64 x1 asm("x1") = arg0;
42 register u64 x2 asm("x2") = arg1;
43 register u64 x3 asm("x3") = arg2;
44
45 asm volatile(
46 __asmeq("%0", "x0")
47 __asmeq("%1", "x1")
48 __asmeq("%2", "x2")
49 __asmeq("%3", "x3")
50 "smc #0\n"
51 : "+r" (x0)
52 : "r" (x1), "r" (x2), "r" (x3));
53
54 return x0;
55 }
56
kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2)57 u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2)
58 {
59 /* Is fast call (bit 31 set) */
60 KBASE_DEBUG_ASSERT(fid & ~SMC_FAST_CALL);
61 /* bits 16-23 must be zero for fast calls */
62 KBASE_DEBUG_ASSERT((fid & (0xFF << 16)) == 0);
63
64 return invoke_smc_fid(fid, arg0, arg1, arg2);
65 }
66
kbase_invoke_smc(u32 oen, u16 function_number, bool smc64, u64 arg0, u64 arg1, u64 arg2)67 u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64,
68 u64 arg0, u64 arg1, u64 arg2)
69 {
70 u32 fid = 0;
71
72 /* Only the six bits allowed should be used. */
73 KBASE_DEBUG_ASSERT((oen & ~SMC_OEN_MASK) == 0);
74
75 fid |= SMC_FAST_CALL; /* Bit 31: Fast call */
76 if (smc64)
77 fid |= SMC_64; /* Bit 30: 1=SMC64, 0=SMC32 */
78 fid |= oen; /* Bit 29:24: OEN */
79 /* Bit 23:16: Must be zero for fast calls */
80 fid |= (function_number); /* Bit 15:0: function number */
81
82 return kbase_invoke_smc_fid(fid, arg0, arg1, arg2);
83 }
84
85 #endif /* CONFIG_ARM64 */
86
87