1 /*
2 * Copyright (C) 2022 Huawei Technologies Co., Ltd.
3 * Decription: memory managering for reserved memory with TEE.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14 #include "reserved_mempool.h"
15 #include <linux/list.h>
16 #include <linux/sizes.h>
17 #include <linux/mm.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/debugfs.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/uaccess.h>
24 #include <securec.h>
25 #include <linux/vmalloc.h>
26
27 #include <linux/of.h>
28 #include <linux/of_address.h>
29 #include <linux/of_device.h>
30 #include <linux/of_platform.h>
31 #include <asm/io.h>
32
33 #include "teek_client_constants.h"
34 #include "tc_ns_log.h"
35 #include "smc_smp.h"
36
37 #define STATE_MODE 0440U
38 #define SLICE_RATE 4
39 #define MAX_SLICE 0x400000
40 #define MIN_RES_MEM_SIZE 0x400000
41
42 struct virt_page {
43 unsigned long start;
44 };
45
46 struct reserved_page_t {
47 struct list_head node;
48 struct virt_page *page;
49 int order;
50 unsigned int count; /* whether be used */
51 };
52
53 struct reserved_free_area_t {
54 struct list_head page_list;
55 int order;
56 };
57
58 struct reserved_zone_t {
59 struct virt_page *all_pages;
60 struct reserved_page_t *pages;
61 struct reserved_free_area_t free_areas[0];
62 };
63
64 static struct reserved_zone_t *g_res_zone;
65 static struct mutex g_res_lock;
66 static int g_res_max_order;
67 static unsigned long g_start_vaddr = 0;
68 static unsigned long g_start_paddr;
69 static struct dentry *g_res_mem_dbg_dentry;
70 static unsigned int g_res_mem_size = 0;
71
get_res_page_size(void)72 static unsigned int get_res_page_size(void)
73 {
74 return g_res_mem_size >> PAGE_SHIFT;
75 }
76
calc_res_mem_size(unsigned int rsize)77 static unsigned int calc_res_mem_size(unsigned int rsize)
78 {
79 unsigned int size = rsize;
80 unsigned int idx = 0;
81
82 if (size == 0 || (size & (size - 1)) == 0)
83 return size;
84
85 while (size != 0) {
86 size = size >> 1;
87 idx++;
88 }
89 return (1 << (idx - 1));
90 }
91
get_res_mem_slice_size(void)92 unsigned int get_res_mem_slice_size(void)
93 {
94 unsigned int size = (g_res_mem_size >> SLICE_RATE);
95 return (size > MAX_SLICE) ? MAX_SLICE : size;
96 }
97
exist_res_mem(void)98 bool exist_res_mem(void)
99 {
100 return (g_start_vaddr != 0) && (g_res_mem_size != 0);
101 }
102
res_mem_virt_to_phys(unsigned long vaddr)103 unsigned long res_mem_virt_to_phys(unsigned long vaddr)
104 {
105 return vaddr - g_start_vaddr + g_start_paddr;
106 }
107
load_reserved_mem(void)108 int load_reserved_mem(void)
109 {
110 struct device_node *np = NULL;
111 struct resource r;
112 unsigned int res_size;
113 int rc;
114 void *p = NULL;
115
116 np = of_find_compatible_node(NULL, NULL, "tz_reserved");
117 if (np == NULL) {
118 tlogd("can not find reserved memory.\n");
119 return 0;
120 }
121
122 rc = of_address_to_resource(np, 0, &r);
123 if (rc != 0) {
124 tloge("of_address_to_resource error\n");
125 return -ENODEV;
126 }
127
128 res_size = (unsigned int)resource_size(&r);
129 if (res_size < MIN_RES_MEM_SIZE) {
130 tloge("reserved memory size is too small\n");
131 return -EINVAL;
132 }
133
134 p = ioremap(r.start, res_size);
135 if (p == NULL) {
136 tloge("io remap for reserved memory failed\n");
137 return -ENOMEM;
138 }
139 g_start_vaddr = (unsigned long)(uintptr_t)p;
140 g_start_paddr = (unsigned long)r.start;
141 g_res_mem_size = calc_res_mem_size(res_size);
142
143 return 0;
144 }
145
unmap_res_mem(void)146 void unmap_res_mem(void)
147 {
148 if (exist_res_mem()) {
149 iounmap((void __iomem *)g_start_vaddr);
150 g_start_vaddr = 0;
151 g_res_mem_size = 0;
152 }
153 }
154
create_zone(void)155 static int create_zone(void)
156 {
157 size_t zone_len;
158 g_res_max_order = get_order(g_res_mem_size);
159 zone_len = sizeof(struct reserved_free_area_t) * (g_res_max_order + 1) + sizeof(*g_res_zone);
160
161 g_res_zone = kzalloc(zone_len, GFP_KERNEL);
162 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)g_res_zone)) {
163 tloge("fail to create zone\n");
164 return -ENOMEM;
165 }
166
167 g_res_zone->pages = kzalloc(sizeof(struct reserved_page_t) * get_res_page_size(), GFP_KERNEL);
168 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)g_res_zone->pages)) {
169 tloge("failed to alloc zone pages\n");
170 kfree(g_res_zone);
171 g_res_zone = NULL;
172 return -ENOMEM;
173 }
174 return 0;
175 }
176
create_virt_pages(void)177 static struct virt_page *create_virt_pages(void)
178 {
179 unsigned int i = 0;
180 struct virt_page *pages = NULL;
181
182 pages = kzalloc(get_res_page_size() * sizeof(struct virt_page), GFP_KERNEL);
183 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)pages)) {
184 tloge("alloc pages failed\n");
185 return NULL;
186 }
187
188 for (i = 0; i < get_res_page_size(); i++)
189 pages[i].start = g_start_vaddr + i * PAGE_SIZE;
190 return pages;
191 }
192
free_reserved_mempool(void)193 void free_reserved_mempool(void)
194 {
195 if (!exist_res_mem())
196 return;
197
198 if (g_res_zone->all_pages != NULL) {
199 kfree(g_res_zone->all_pages);
200 g_res_zone->all_pages = NULL;
201 }
202
203 if (g_res_zone->pages != NULL) {
204 kfree(g_res_zone->pages);
205 g_res_zone->pages = NULL;
206 }
207
208 if (g_res_zone != NULL) {
209 kfree(g_res_zone);
210 g_res_zone = NULL;
211 }
212
213 if (!g_res_mem_dbg_dentry)
214 return;
215 debugfs_remove_recursive(g_res_mem_dbg_dentry);
216 g_res_mem_dbg_dentry = NULL;
217 }
218
show_res_mem_info(void)219 static void show_res_mem_info(void)
220 {
221 unsigned int i;
222 struct reserved_page_t *pos = NULL;
223 struct list_head *head = NULL;
224 unsigned int used = 0;
225
226 if (g_res_zone == NULL) {
227 tloge("res zone is NULL\n");
228 return;
229 }
230
231 tloge("################## reserved memory info ######################\n");
232 mutex_lock(&g_res_lock);
233 for (i = 0; i < get_res_page_size(); i++) {
234 if (g_res_zone->pages[i].count != 0) {
235 tloge("page[%02d], order=%02d, count=%d\n",
236 i, g_res_zone->pages[i].order,
237 g_res_zone->pages[i].count);
238 used += (1 << (uint32_t)g_res_zone->pages[i].order);
239 }
240 }
241 tloge("reserved memory total usage:%u/%u\n", used, get_res_page_size());
242 tloge("--------------------------------------------------------------\n");
243
244 for (i = 0; i < (unsigned int)g_res_max_order; i++) {
245 head = &g_res_zone->free_areas[i].page_list;
246 if (list_empty(head) != 0) {
247 tloge("order[%02d] is empty\n", i);
248 } else {
249 list_for_each_entry(pos, head, node)
250 tloge("order[%02d]\n", i);
251 }
252 }
253 mutex_unlock(&g_res_lock);
254
255 tloge("#############################################################\n");
256 }
257
mb_res_mem_state_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)258 static ssize_t mb_res_mem_state_read(struct file *filp, char __user *ubuf,
259 size_t cnt, loff_t *ppos)
260 {
261 (void)(filp);
262 (void)(ubuf);
263 (void)cnt;
264 (void)(ppos);
265 show_res_mem_info();
266 return 0;
267 }
268
269 static const struct file_operations g_res_mem_dbg_state_fops = {
270 .owner = THIS_MODULE,
271 .read = mb_res_mem_state_read,
272 };
273
init_res_mem_dentry(void)274 static void init_res_mem_dentry(void)
275 {
276 #ifdef DEF_ENG
277 g_res_mem_dbg_dentry = debugfs_create_dir("tz_res_mem", NULL);
278 debugfs_create_file("state", STATE_MODE, g_res_mem_dbg_dentry, NULL, &g_res_mem_dbg_state_fops);
279 #endif
280 }
281
res_mem_register(unsigned long paddr, unsigned int size)282 static int res_mem_register(unsigned long paddr, unsigned int size)
283 {
284 struct tc_ns_operation *operation = NULL;
285 struct tc_ns_smc_cmd *smc_cmd = NULL;
286 int ret = 0;
287
288 smc_cmd = kzalloc(sizeof(*smc_cmd), GFP_KERNEL);
289 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)smc_cmd)) {
290 tloge("alloc smc_cmd failed\n");
291 return -ENOMEM;
292 }
293
294 operation = kzalloc(sizeof(*operation), GFP_KERNEL);
295 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)operation)) {
296 tloge("alloc operation failed\n");
297 ret = -ENOMEM;
298 goto free_smc_cmd;
299 }
300
301 operation->paramtypes = TEE_PARAM_TYPE_VALUE_INPUT |
302 (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM);
303 operation->params[0].value.a = paddr;
304 operation->params[0].value.b = paddr >> ADDR_TRANS_NUM;
305 operation->params[1].value.a = size;
306
307 smc_cmd->cmd_type = CMD_TYPE_GLOBAL;
308 smc_cmd->cmd_id = GLOBAL_CMD_ID_REGISTER_RESMEM;
309 smc_cmd->operation_phys = virt_to_phys(operation);
310 smc_cmd->operation_h_phys = virt_to_phys(operation) >> ADDR_TRANS_NUM;
311
312 if (tc_ns_smc(smc_cmd) != 0) {
313 tloge("resigter res mem failed\n");
314 ret = -EIO;
315 }
316
317 kfree(operation);
318 operation = NULL;
319 free_smc_cmd:
320 kfree(smc_cmd);
321 smc_cmd = NULL;
322 return ret;
323 }
324
zone_init(struct virt_page *all_pages)325 static void zone_init(struct virt_page *all_pages)
326 {
327 int i;
328 struct reserved_free_area_t *area = NULL;
329 int max_order_cnt;
330 struct reserved_page_t *res_page = NULL;
331
332 for (i = 0; i < (int)get_res_page_size(); i++) {
333 g_res_zone->pages[i].order = -1;
334 g_res_zone->pages[i].count = 0;
335 g_res_zone->pages[i].page = &all_pages[i];
336 }
337
338 for (i = 0; i <= g_res_max_order; i++) {
339 area = &g_res_zone->free_areas[i];
340 INIT_LIST_HEAD(&area->page_list);
341 area->order = i;
342 }
343
344 max_order_cnt = (int)(get_res_page_size() / (1 << (unsigned int)g_res_max_order));
345 g_res_zone->all_pages = all_pages;
346 for (i = 0; i < max_order_cnt; i++) {
347 int idx = i * (1 << (unsigned int)g_res_max_order);
348 g_res_zone->pages[idx].order = g_res_max_order;
349 res_page = &g_res_zone->pages[idx];
350 list_add_tail(&res_page->node, &area->page_list);
351 }
352 }
353
reserved_mempool_init(void)354 int reserved_mempool_init(void)
355 {
356 struct virt_page *all_pages = NULL;
357 int ret = 0;
358 unsigned long paddr;
359
360 if (!exist_res_mem())
361 return 0;
362
363 ret = create_zone();
364 if (ret != 0)
365 return ret;
366
367 all_pages = create_virt_pages();
368 if (all_pages == NULL) {
369 kfree(g_res_zone->pages);
370 g_res_zone->pages = NULL;
371 kfree(g_res_zone);
372 g_res_zone = NULL;
373 return -ENOMEM;
374 }
375
376 paddr = g_start_paddr;
377 ret = res_mem_register(paddr, g_res_mem_size);
378 if (ret != 0) {
379 kfree(all_pages);
380 all_pages = NULL;
381 kfree(g_res_zone->pages);
382 g_res_zone->pages = NULL;
383 kfree(g_res_zone);
384 g_res_zone = NULL;
385 return -EIO;
386 }
387
388 zone_init(all_pages);
389
390 mutex_init(&g_res_lock);
391 init_res_mem_dentry();
392 return 0;
393 }
394
reserved_mem_alloc(size_t size)395 void *reserved_mem_alloc(size_t size)
396 {
397 int i, j;
398 struct reserved_page_t *pos = NULL;
399 struct list_head *head = NULL;
400 int order = get_order(ALIGN(size, SZ_4K));
401 unsigned long addr = 0;
402
403 bool valid_param = (size > 0 && order <= g_res_max_order && order >= 0);
404 if (!valid_param) {
405 tloge("invalid alloc param, size %d, order %d, max %d\n",(int)size, order, g_res_max_order);
406 return NULL;
407 }
408 mutex_lock(&g_res_lock);
409 for (i = order; i <= g_res_max_order; i++) {
410 head = &g_res_zone->free_areas[i].page_list;
411 if (list_empty(head) != 0)
412 continue;
413
414 pos = list_first_entry(head, struct reserved_page_t, node);
415 pos->count = 1;
416 pos->order = order;
417
418 for (j = order; j < i; j++) {
419 struct reserved_page_t *new_page = NULL;
420 new_page = pos + (1 << (unsigned int)j);
421 new_page->count = 0;
422 new_page->order = j;
423 list_add_tail(&new_page->node, &g_res_zone->free_areas[j].page_list);
424 }
425 list_del(&pos->node);
426 addr = pos->page->start;
427 break;
428 }
429 mutex_unlock(&g_res_lock);
430 return (void *)(uintptr_t)addr;
431 }
432
get_virt_page_index(const void *ptr)433 static int get_virt_page_index(const void *ptr)
434 {
435 unsigned long vaddr = (unsigned long)(uintptr_t)ptr;
436 unsigned long offset = vaddr - g_start_vaddr;
437 int pg_idx = offset / (1 << PAGE_SHIFT);
438 if ((unsigned int)pg_idx >= get_res_page_size() || pg_idx < 0)
439 return -1;
440 return pg_idx;
441 }
442
buddy_merge(struct virt_page *vpage, int order, unsigned int *page_index)443 static int buddy_merge(struct virt_page *vpage, int order, unsigned int *page_index)
444 {
445 int i;
446 unsigned int cur_idx;
447 unsigned int buddy_idx;
448 struct reserved_page_t *self = NULL;
449 struct reserved_page_t *buddy = NULL;
450
451 for (i = order; i < g_res_max_order; i++) {
452 cur_idx = vpage - g_res_zone->all_pages;
453 buddy_idx = cur_idx ^ (1 << (unsigned int)i);
454 self = &g_res_zone->pages[cur_idx];
455 buddy = &g_res_zone->pages[buddy_idx];
456 self->count = 0;
457 /* is buddy free */
458 if (buddy->order == i && buddy->count == 0) {
459 /* release buddy */
460 list_del(&buddy->node);
461 /* combine self and buddy */
462 if (cur_idx > buddy_idx) {
463 vpage = buddy->page;
464 buddy->order = i + 1;
465 self->order = -1;
466 } else {
467 self->order = i + 1;
468 buddy->order = -1;
469 }
470 } else {
471 /* release self */
472 list_add_tail(&self->node,
473 &g_res_zone->free_areas[i].page_list);
474 return -1;
475 }
476 }
477
478 if (order == g_res_max_order) {
479 cur_idx = vpage - g_res_zone->all_pages;
480 tlogd("no need to find buddy, cur is %u\n", cur_idx);
481 *page_index = cur_idx;
482 return 0;
483 }
484 *page_index = (cur_idx > buddy_idx) ? buddy_idx : cur_idx;
485 return 0;
486 }
487
reserved_mem_free(const void *ptr)488 void reserved_mem_free(const void *ptr)
489 {
490 struct reserved_page_t *self = NULL;
491 int self_idx;
492 unsigned int page_index;
493 struct reserved_page_t *max_order_page = NULL;
494
495 if (ptr == NULL) {
496 tloge("invalid ptr\n");
497 return;
498 }
499
500 mutex_lock(&g_res_lock);
501 self_idx = get_virt_page_index(ptr);
502 if (self_idx < 0) {
503 mutex_unlock(&g_res_lock);
504 tloge("invalid page\n");
505 return;
506 }
507 self = &g_res_zone->pages[self_idx];
508 if (self->count == 0) {
509 tloge("already free in reserved mempool\n");
510 mutex_unlock(&g_res_lock);
511 return;
512 }
513
514 if (buddy_merge(self->page, self->order, &page_index) < 0) {
515 mutex_unlock(&g_res_lock);
516 return;
517 }
518
519 max_order_page = &g_res_zone->pages[page_index];
520 list_add_tail(&max_order_page->node,
521 &g_res_zone->free_areas[g_res_max_order].page_list);
522 mutex_unlock(&g_res_lock);
523 }
524