1 /*
2  * Copyright (C) 2022 Huawei Technologies Co., Ltd.
3  * Decription: function for proc open,close session and invoke.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  */
14 #include "tz_pm.h"
15 #include <securec.h>
16 #include <linux/types.h>
17 #include <linux/vmalloc.h>
18 #include <linux/dma-mapping.h>
19 #include <asm/cacheflush.h>
20 #include "tc_ns_client.h"
21 #include "teek_ns_client.h"
22 #include "tc_ns_log.h"
23 #include "smc_call.h"
24 
25 #define S4_ADDR_4G			  				0xffffffff
26 #define RESERVED_SECOS_PHYMEM_BASE			0x22800000
27 #define RESERVED_SECOS_PHYMEM_SIZE			(0x3000000)
28 #define RESERVED_SECOS_S4_BASE				0x27760000
29 #define RESERVED_SECOS_S4_SIZE				(0x100000)
30 
31 static char *g_s4_kernel_mem_addr;
32 static char *g_s4_buffer_vaddr;
33 static uint64_t g_s4_buffer_paddr;
34 static uint32_t g_s4_buffer_size;
35 
tc_vmap(phys_addr_t paddr, size_t size)36 static void *tc_vmap(phys_addr_t paddr, size_t size)
37 {
38 	uint32_t i;
39 	void *vaddr = NULL;
40 	pgprot_t pgprot = PAGE_KERNEL;
41 	uintptr_t offset;
42 	uint32_t pages_count;
43 	struct page **pages = NULL;
44 
45 	offset = paddr & ~PAGE_MASK;
46 	paddr &= PAGE_MASK;
47 	pages_count = (uint32_t)(PAGE_ALIGN(size + offset) / PAGE_SIZE);
48 
49 	pages = kzalloc(sizeof(struct page *) * pages_count, GFP_KERNEL);
50 	if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)pages))
51 		return NULL;
52 
53 	for (i = 0; i < pages_count; i++)
54 		*(pages + i) = phys_to_page((uintptr_t)(paddr + PAGE_SIZE * i));
55 
56 	vaddr = vmap(pages, pages_count, VM_MAP, pgprot);
57 	kfree(pages);
58 	if (vaddr == NULL)
59 		return NULL;
60 
61 	return offset + (char *)vaddr;
62 }
63 
tc_s4_alloc_crypto_buffer(struct device *dev, char **kernel_mem_addr)64 static int tc_s4_alloc_crypto_buffer(struct device *dev,
65 	 char **kernel_mem_addr)
66 {
67 	(void)dev;
68 	if (RESERVED_SECOS_S4_BASE > S4_ADDR_4G) {
69 		tloge("addr is invalid\n");
70 		return -EFAULT;
71 	}
72 
73 	g_s4_buffer_vaddr = tc_vmap(RESERVED_SECOS_S4_BASE, RESERVED_SECOS_S4_SIZE);
74 	if (g_s4_buffer_vaddr == NULL) {
75 		tloge("vmap failed for s4\n");
76 		return -EFAULT;
77 	}
78 	g_s4_buffer_paddr = RESERVED_SECOS_S4_BASE;
79 	g_s4_buffer_size = RESERVED_SECOS_S4_SIZE;
80 
81 	*kernel_mem_addr = vmalloc(RESERVED_SECOS_PHYMEM_SIZE);
82 	if (*kernel_mem_addr == NULL) {
83 		vunmap(g_s4_buffer_vaddr);
84 		g_s4_buffer_paddr = 0;
85 		g_s4_buffer_vaddr = NULL;
86 		g_s4_buffer_size = 0;
87 		tloge("vmalloc failed for s4\n");
88 		return -ENOMEM;
89 	}
90 
91 	return 0;
92 }
93 
free_resource(const char *kernel_mem_addr)94 static void free_resource(const char *kernel_mem_addr)
95 {
96 	vunmap(g_s4_buffer_vaddr);
97 	vfree(kernel_mem_addr);
98 	g_s4_kernel_mem_addr = NULL;
99 	g_s4_buffer_paddr = 0;
100 	g_s4_buffer_vaddr = NULL;
101 	g_s4_buffer_size = 0;
102 }
103 
tc_s4_suspend_or_resume(uint32_t power_op)104 static uint64_t tc_s4_suspend_or_resume(uint32_t power_op)
105 {
106 	u64 smc_id = (u64)power_op;
107 	u64 smc_ret = 0xffff;
108 	struct smc_in_params in_param = { smc_id };
109 	struct smc_out_params out_param = { smc_ret };
110 	smc_req(&in_param, &out_param, 0);
111 	smc_ret = out_param.ret;
112 	return smc_ret;
113 }
114 
tc_s4_crypto_and_copy(uint32_t crypt_op, uint64_t middle_mem_addr, uintptr_t secos_mem, uint32_t size, uint32_t index)115 static uint64_t tc_s4_crypto_and_copy(uint32_t crypt_op,
116 	uint64_t middle_mem_addr,
117 	uintptr_t secos_mem,
118 	uint32_t size, uint32_t index)
119 {
120 	u64 smc_id = (u64)crypt_op;
121 	u64 arg0 = (u64)middle_mem_addr;
122 	u64 arg1 = (u64)secos_mem;
123 	u64 arg2 = (u64)size;
124 	u64 arg3 = (u64)index;
125 	u64 smc_ret = 0xffff;
126 	struct smc_in_params in_param = { smc_id, arg0, arg1, arg2, arg3 };
127 	struct smc_out_params out_param = { smc_ret };
128 
129 	smc_req(&in_param, &out_param, 0);
130 	smc_ret = out_param.ret;
131 	return smc_ret;
132 }
133 
tc_s4_transfer_data(char *kernel_mem_addr, uint32_t crypt_op)134 static int tc_s4_transfer_data(char *kernel_mem_addr, uint32_t crypt_op)
135 {
136 	uint32_t index = 0;
137 	uint32_t copied_size = 0;
138 
139 	while (copied_size < RESERVED_SECOS_PHYMEM_SIZE) {
140 		if (crypt_op == TSP_S4_DECRYPT_AND_COPY) {
141 			if (memcpy_s(g_s4_buffer_vaddr, g_s4_buffer_size,
142 				kernel_mem_addr + copied_size,
143 				g_s4_buffer_size) != EOK) {
144 				tloge("mem copy for decrypt failed\n");
145 				return -EFAULT;
146 			}
147 		}
148 
149 		if (tc_s4_crypto_and_copy(crypt_op, g_s4_buffer_paddr,
150 			RESERVED_SECOS_PHYMEM_BASE + copied_size,
151 			g_s4_buffer_size, index) != 0) {
152 			tloge("crypto and copy failed\n");
153 			return -EFAULT;
154 		}
155 
156 		if (crypt_op == TSP_S4_ENCRYPT_AND_COPY) {
157 			if (memcpy_s(kernel_mem_addr + copied_size,
158 				g_s4_buffer_size, g_s4_buffer_vaddr,
159 				g_s4_buffer_size) != EOK) {
160 				tloge("mem copy for encrypt failed\n");
161 				return -EFAULT;
162 			}
163 		}
164 
165 		copied_size += g_s4_buffer_size;
166 		index++;
167 	}
168 
169 	return 0;
170 }
171 
tc_s4_pm_ops(struct device *dev, uint32_t power_op, uint32_t crypt_op, char *kernel_mem_addr)172 static int tc_s4_pm_ops(struct device *dev, uint32_t power_op,
173 			uint32_t crypt_op, char *kernel_mem_addr)
174 {
175 	int ret;
176 	(void)dev;
177 
178 	if (power_op == TSP_S4_SUSPEND)
179 		g_s4_kernel_mem_addr = kernel_mem_addr;
180 	else
181 		kernel_mem_addr = g_s4_kernel_mem_addr;
182 
183 	/* notify TEEOS to suspend all pm driver */
184 	if (power_op == TSP_S4_SUSPEND) {
185 		ret = (int)tc_s4_suspend_or_resume(power_op);
186 		if (ret != 0) {
187 			tloge("tc s4 suspend failed\n");
188 			return ret;
189 		}
190 	}
191 
192 	ret = tc_s4_transfer_data(kernel_mem_addr, crypt_op);
193 	if (ret != 0) {
194 		tloge("transfer data failed, power_op=0x%x\n", power_op);
195 		return ret;
196 	}
197 
198 	/* notify TEEOS to resume all pm driver */
199 	if (power_op == TSP_S4_RESUME) {
200 		ret = (int)tc_s4_suspend_or_resume(power_op);
201 		if (ret != 0) {
202 			tloge("tc s4 resume failed\n");
203 			return ret;
204 		}
205 	}
206 
207 	return 0;
208 }
209 
tc_s4_pm_suspend(struct device *dev)210 int tc_s4_pm_suspend(struct device *dev)
211 {
212 	int ret;
213 	char *kernel_mem_addr = NULL;
214 
215 	ret = tc_s4_alloc_crypto_buffer(dev, &kernel_mem_addr);
216 	if (ret != 0) {
217 		tloge("alloc buffer failed\n");
218 		return ret;
219 	}
220 
221 	ret = tc_s4_pm_ops(dev, TSP_S4_SUSPEND, TSP_S4_ENCRYPT_AND_COPY, kernel_mem_addr);
222 	if (ret != 0) {
223 		free_resource(kernel_mem_addr);
224 		tloge("s4 suspend failed\n");
225 	}
226 
227 	return ret;
228 }
229 
tc_s4_pm_resume(struct device *dev)230 int tc_s4_pm_resume(struct device *dev)
231 {
232 	int ret;
233 
234 	ret = tc_s4_pm_ops(dev, TSP_S4_RESUME, TSP_S4_DECRYPT_AND_COPY, g_s4_kernel_mem_addr);
235 	if (ret != 0)
236 		tloge("s4 resume failed\n");
237 
238 	free_resource(g_s4_kernel_mem_addr);
239 	return ret;
240 }
241