1 /*
2 * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd.
3 * Description: dynamic Ion memory allocation and free.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15 #include "dynamic_ion_mem.h"
16 #include <linux/version.h>
17 #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)
18 #include <stdarg.h>
19 #else
20 #include <linux/stdarg.h>
21 #endif
22 #include <linux/workqueue.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/mutex.h>
28 #include <linux/timer.h>
29 #include <linux/kernel.h>
30 #include <linux/uaccess.h>
31 #include <linux/debugfs.h>
32 #include <linux/module.h>
33 #include <linux/version.h>
34 #ifndef CONFIG_DMABUF_MM
35 #include <linux/ion.h>
36 #endif
37 #include <linux/mm.h>
38 #include <linux/cma.h>
39 #include <asm/tlbflush.h>
40 #include <asm/cacheflush.h>
41 #if ((defined CONFIG_ION_MM) || (defined CONFIG_ION_MM_SECSG))
42 #include <linux/ion/mm_ion.h>
43 #endif
44 #ifdef CONFIG_DMABUF_MM
45 #include <linux/dmabuf/mm_dma_heap.h>
46 #endif
47 #include "tc_ns_log.h"
48 #include "tc_ns_client.h"
49 #include "smc_smp.h"
50 #include "gp_ops.h"
51 #include "teek_client_constants.h"
52 #include "mailbox_mempool.h"
53 #include "dynamic_ion_uuid.h"
54
55 static DEFINE_MUTEX(dynamic_mem_lock);
56 struct dynamic_mem_list {
57 struct list_head list;
58 };
59
60 static const struct dynamic_mem_config g_dyn_mem_config[] = {
61 #ifdef DEF_ENG
62 {TEE_SERVICE_UT, SEC_EID},
63 {TEE_SERVICE_TEST_DYNION, SEC_AI_ION},
64 #endif
65 {TEE_SECIDENTIFICATION1, SEC_EID},
66 {TEE_SECIDENTIFICATION3, SEC_EID},
67 {TEE_SERVICE_AI, SEC_AI_ION},
68 {TEE_SERVICE_AI_TINY, SEC_AI_ION},
69 {TEE_SERVICE_VCODEC, SEC_DRM_TEE},
70 };
71
72 static struct dynamic_mem_list g_dynamic_mem_list;
73 static const uint32_t g_dyn_mem_config_num = ARRAY_SIZE(g_dyn_mem_config);
74
release_ion_srv(const struct tc_uuid *uuid)75 static int release_ion_srv(const struct tc_uuid *uuid)
76 {
77 struct tc_ns_smc_cmd smc_cmd = {{0}, 0};
78
79 smc_cmd.err_origin = TEEC_ORIGIN_COMMS;
80 smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
81 smc_cmd.cmd_id = GLOBAL_CMD_ID_RELEASE_ION_SRV;
82 if (memcpy_s(&smc_cmd.uuid, sizeof(smc_cmd.uuid), uuid, sizeof(*uuid))) {
83 tloge("copy uuid failed\n");
84 return -ENOMEM;
85 }
86
87 if (tc_ns_smc(&smc_cmd)) {
88 tloge("send release ion srv cmd failed\n");
89 return -EPERM;
90 }
91 return 0;
92 }
93
94
get_ion_sglist(struct dynamic_mem_item *mem_item)95 static int get_ion_sglist(struct dynamic_mem_item *mem_item)
96 {
97 struct sglist *tmp_sglist = NULL;
98 struct scatterlist *sg = NULL;
99 struct page *page = NULL;
100 uint32_t sglist_size;
101 uint32_t i = 0;
102 struct sg_table *ion_sg_table = mem_item->memory.dyn_sg_table;
103
104 if (!ion_sg_table)
105 return -EINVAL;
106
107 if (ion_sg_table->nents <= 0 || ion_sg_table->nents > MAX_ION_NENTS)
108 return -EINVAL;
109
110 for_each_sg(ion_sg_table->sgl, sg, ion_sg_table->nents, i) {
111 if (!sg) {
112 tloge("an error sg when get ion sglist\n");
113 return -EINVAL;
114 }
115 }
116
117 sglist_size = sizeof(struct ion_page_info) * ion_sg_table->nents + sizeof(*tmp_sglist);
118 tmp_sglist = (struct sglist *)mailbox_alloc(sglist_size, MB_FLAG_ZERO);
119 if (!tmp_sglist) {
120 tloge("mailbox alloc failed\n");
121 return -ENOMEM;
122 }
123
124 tmp_sglist->sglist_size = (uint64_t)sglist_size;
125 tmp_sglist->ion_size = (uint64_t)mem_item->size;
126 tmp_sglist->info_length = (uint64_t)ion_sg_table->nents;
127 for_each_sg(ion_sg_table->sgl, sg, ion_sg_table->nents, i) {
128 page = sg_page(sg);
129 tmp_sglist->page_info[i].phys_addr = page_to_phys(page);
130 tmp_sglist->page_info[i].npages = sg->length / PAGE_SIZE;
131 }
132 mem_item->memory.ion_phys_addr = mailbox_virt_to_phys((uintptr_t)(void *)tmp_sglist);
133 mem_item->memory.len = sglist_size;
134 return 0;
135 }
136
send_dyn_ion_cmd(struct dynamic_mem_item *mem_item, unsigned int cmd_id, int32_t *ret_origin)137 static int send_dyn_ion_cmd(struct dynamic_mem_item *mem_item, unsigned int cmd_id, int32_t *ret_origin)
138 {
139 struct tc_ns_smc_cmd smc_cmd = {{0}, 0};
140 int ret;
141 struct mb_cmd_pack *mb_pack = NULL;
142
143 if (!mem_item) {
144 tloge("mem_item is null\n");
145 return -EINVAL;
146 }
147
148 ret = get_ion_sglist(mem_item);
149 if (ret != 0)
150 return ret;
151
152 mb_pack = mailbox_alloc_cmd_pack();
153 if (!mb_pack) {
154 mailbox_free(phys_to_virt(mem_item->memory.ion_phys_addr));
155 tloge("alloc cmd pack failed\n");
156 return -ENOMEM;
157 }
158 smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
159 smc_cmd.cmd_id = cmd_id;
160 smc_cmd.err_origin = TEEC_ORIGIN_COMMS;
161 mb_pack->operation.paramtypes = teec_param_types(
162 TEE_PARAM_TYPE_ION_SGLIST_INPUT,
163 TEE_PARAM_TYPE_VALUE_INPUT,
164 TEE_PARAM_TYPE_VALUE_INPUT,
165 TEE_PARAM_TYPE_NONE);
166
167 mb_pack->operation.params[0].memref.size = (uint32_t)mem_item->memory.len;
168 mb_pack->operation.params[0].memref.buffer =
169 (uint32_t)(mem_item->memory.ion_phys_addr & 0xFFFFFFFF);
170 mb_pack->operation.buffer_h_addr[0] =
171 (uint64_t)(mem_item->memory.ion_phys_addr) >> ADDR_TRANS_NUM;
172 mb_pack->operation.params[1].value.a = (uint32_t)mem_item->size;
173 mb_pack->operation.params[2].value.a = mem_item->configid;
174 smc_cmd.operation_phys = (unsigned int)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
175 smc_cmd.operation_h_phys = (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM;
176
177 if (tc_ns_smc(&smc_cmd)) {
178 if (ret_origin)
179 *ret_origin = smc_cmd.err_origin;
180 ret = -EPERM;
181 tlogd("send loadapp ion failed\n");
182 }
183 mailbox_free(phys_to_virt(mem_item->memory.ion_phys_addr));
184 mailbox_free(mb_pack);
185 return ret;
186 }
187
find_memitem_by_configid_locked(uint32_t configid)188 static struct dynamic_mem_item *find_memitem_by_configid_locked(uint32_t configid)
189 {
190 struct dynamic_mem_item *item = NULL;
191 list_for_each_entry(item, &g_dynamic_mem_list.list, head) {
192 if (item->configid == configid)
193 return item;
194 }
195 return NULL;
196 }
197
find_memitem_by_uuid_locked(const struct tc_uuid *uuid)198 static struct dynamic_mem_item *find_memitem_by_uuid_locked(const struct tc_uuid *uuid)
199 {
200 struct dynamic_mem_item *item = NULL;
201 list_for_each_entry(item, &g_dynamic_mem_list.list, head) {
202 if (!memcmp(&item->uuid, uuid, sizeof(*uuid)))
203 return item;
204 }
205 return NULL;
206 }
207
208 #define BLOCK_64KB_SIZE (64 * 1024) /* 64 */
209 #define BLOCK_64KB_MASK 0xFFFFFFFFFFFF0000
210 /* size should be aligned with 64KB */
211 #define BLOCK_64KB_SIZE_MASK (BLOCK_64KB_SIZE -1)
proc_alloc_dyn_mem(struct dynamic_mem_item *mem_item)212 static int proc_alloc_dyn_mem(struct dynamic_mem_item *mem_item)
213 {
214 struct sg_table *ion_sg_table = NULL;
215
216 if (mem_item->size + BLOCK_64KB_SIZE_MASK < mem_item->size) {
217 tloge("ion size is error, size = %x\n", mem_item->size);
218 return -EINVAL;
219 }
220 mem_item->memory.len = (mem_item ->size + BLOCK_64KB_SIZE_MASK) & BLOCK_64KB_MASK;
221
222 ion_sg_table = mm_secmem_alloc(mem_item->addr_sec_region,
223 mem_item->memory.len);
224 if (!ion_sg_table) {
225 tloge("failed to get ion page, configid = %d\n",
226 mem_item->configid);
227 return -ENOMEM;
228 }
229 mem_item->memory.dyn_sg_table = ion_sg_table;
230 return 0;
231 }
232
proc_free_dyn_mem(struct dynamic_mem_item *mem_item)233 static void proc_free_dyn_mem(struct dynamic_mem_item *mem_item)
234 {
235 if (!mem_item->memory.dyn_sg_table) {
236 tloge("ion_phys_addr is NULL\n");
237 return;
238 }
239 mm_secmem_free(mem_item->ddr_sec_region,
240 mem_item->memory.dyn_sg_table);
241 mem_item->memory.dyn_sg_table = NULL;
242 return;
243 }
244
init_dynamic_mem(void)245 int init_dynamic_mem(void)
246 {
247 INIT_LIST_HEAD(&(g_dynamic_mem_list.list));
248 return 0;
249 }
250
find_ddr_sec_region_by_uuid(const struct tc_uuid *uuid, uint32_t *ddr_sec_region)251 static int32_t find_ddr_sec_region_by_uuid(const struct tc_uuid *uuid,
252 uint32_t *ddr_sec_region)
253 {
254 uint32_t i;
255 for (i = 0; i < g_dyn_mem_config_num; i++) {
256 if (!memcmp(&(g_dyn_mem_config[i].uuid), uuid,
257 sizeof(*uuid))) {
258 *ddr_sec_region = g_dyn_mem_config[i].ddr_sec_region;
259 return 0;
260 }
261 }
262 return -EINVAL;
263 }
264
alloc_dyn_mem_item(uint32_t configid, uint32_t cafd, const struct tc_uuid *uuid, uint32_t size)265 static struct dynamic_mem_item *alloc_dyn_mem_item(uint32_t configid,
266 uint32_t cafd, const struct tc_uuid *uuid, uint32_t size)
267 {
268 uint32_t ddr_sec_region;
269 struct dynamic_mem_item *mem_item = NULL;
270 int32_t result;
271
272 result = find_ddr_sec_region_by_uuid(uuid, &ddr_sec_region);
273 if (result != 0) {
274 tloge("find ddr sec region failed\n");
275 return NULL;
276 }
277
278 mem_item = kzalloc(sizeof(*mem_item), GFP_KERNEL);
279 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)mem_item)) {
280 tloge("alloc mem item failed\n");
281 return NULL;
282 }
283
284 mem_item->ddr_sec_region = ddr_sec_region;
285 mem_item->configid = configid;
286 mem_item->size = size;
287 mem_item->cafd = cafd;
288 result = memcpy_s(&mem_item->uuid, sizeof(mem_item->uuid), uuid,
289 sizeof(*uuid));
290 if(result != EOK) {
291 tloge("memcpy uuid failed\n");
292 kfree(mem_item);
293 return NULL;
294 }
295 return mem_item;
296 }
297
298
trans_configid2memid(uint32_t configid, uint32_t cafd, const struct tc_uuid *uuid, uint32_t size, int32_t *ret_origin)299 static int trans_configid2memid(uint32_t configid, uint32_t cafd,
300 const struct tc_uuid *uuid, uint32_t size, int32_t *ret_origin)
301 {
302 int result;
303
304 if (!uuid)
305 return -EINVAL;
306 mutex_lock(&dynamic_mem_lock);
307 do {
308 struct dynamic_mem_item *mem_item =
309 find_memitem_by_configid_locked(configid);
310 if (mem_item) {
311 result = -EINVAL;
312 break;
313 }
314
315 mem_item = alloc_dyn_mem_item(configid, cafd, uuid, size);
316 if (!mem_item) {
317 tloge("alloc dyn mem item failed\n");
318 result = -ENOMEM;
319 break;
320 }
321
322 result = proc_alloc_dyn_mem(mem_item);
323 if (result != 0) {
324 tloge("alloc dyn mem failed , ret = %d\n", result);
325 kfree(mem_item);
326 break;
327 }
328 /* register to tee */
329 result = send_dyn_ion_cmd(mem_item, GLOBAL_CMD_ID_ADD_DYNAMIC_ION, ret_origin);
330 if (result != 0) {
331 tloge("register to tee failed, result = %d\n", result);
332 proc_free_dyn_mem(mem_item);
333 kfree(mem_item);
334 break;
335 }
336 list_add_tail(&mem_item->head, &g_dynamic_mem_list.list);
337 tloge("log import:alloc ion configid=%d\n",
338 mem_item->configid);
339 } while (0);
340
341 mutex_unlock(&dynamic_mem_lock);
342 return result;
343 }
344
release_configid_mem_locked(uint32_t configid)345 static void release_configid_mem_locked(uint32_t configid)
346 {
347 int result;
348 /* if config id is memid map, and can reuse */
349 do {
350 struct dynamic_mem_item *mem_item =
351 find_memitem_by_configid_locked(configid);
352 if (!mem_item) {
353 tloge("fail to find memitem by configid\n");
354 break;
355 }
356
357 result = send_dyn_ion_cmd(mem_item, GLOBAL_CMD_ID_DEL_DYNAMIC_ION, NULL);
358 if (result != 0) {
359 tloge("unregister_from_tee configid=%d, result =%d\n",
360 mem_item->configid, result);
361 break;
362 }
363 proc_free_dyn_mem(mem_item);
364 list_del(&mem_item->head);
365 kfree(mem_item);
366 tloge("log import: free ion\n");
367 } while (0);
368
369 return;
370 }
371
372
load_app_use_configid(uint32_t configid, uint32_t cafd, const struct tc_uuid *uuid, uint32_t size, int32_t *ret_origin)373 int load_app_use_configid(uint32_t configid, uint32_t cafd,
374 const struct tc_uuid *uuid, uint32_t size, int32_t *ret_origin)
375 {
376 int result;
377
378 if (!uuid)
379 return -EINVAL;
380
381 result = trans_configid2memid(configid, cafd, uuid, size, ret_origin);
382 if (result != 0) {
383 tloge("trans_configid2memid failed ret = %d\n", result);
384 if (release_ion_srv(uuid) != 0)
385 tloge("release ion srv failed\n");
386 }
387 return result;
388 }
389
390
kill_ion_by_uuid(const struct tc_uuid *uuid)391 void kill_ion_by_uuid(const struct tc_uuid *uuid)
392 {
393 if (!uuid) {
394 tloge("uuid is null\n");
395 return;
396 }
397 mutex_lock(&dynamic_mem_lock);
398 do {
399 struct dynamic_mem_item *mem_item =
400 find_memitem_by_uuid_locked(uuid);
401 if (!mem_item)
402 break;
403 tlogd("kill ION by UUID\n");
404 release_configid_mem_locked(mem_item->configid);
405 } while (0);
406 mutex_unlock(&dynamic_mem_lock);
407 }
408
kill_ion_by_cafd(unsigned int cafd)409 void kill_ion_by_cafd(unsigned int cafd)
410 {
411 struct dynamic_mem_item *item = NULL;
412 struct dynamic_mem_item *temp = NULL;
413 tlogd("kill_ion_by_cafd:\n");
414 mutex_lock(&dynamic_mem_lock);
415 list_for_each_entry_safe(item, temp, &g_dynamic_mem_list.list, head) {
416 if (item->cafd == cafd)
417 release_configid_mem_locked(item->configid);
418 }
419 mutex_unlock(&dynamic_mem_lock);
420 }
421
load_image_for_ion(const struct load_img_params *params, int32_t *ret_origin)422 int load_image_for_ion(const struct load_img_params *params, int32_t *ret_origin)
423 {
424 int ret = 0;
425
426 if (!params)
427 return -EFAULT;
428 /* check need to add ionmem */
429 uint32_t configid = params->mb_pack->operation.params[1].value.a;
430 uint32_t ion_size = params->mb_pack->operation.params[1].value.b;
431 int32_t check_result = (configid != 0 && ion_size != 0);
432
433 tloge("check load result=%d, cfgid=%d, ion_size=%d, uuid=%x\n",
434 check_result, configid, ion_size, params->uuid_return->time_low);
435 if (check_result) {
436 ret = load_app_use_configid(configid, params->dev_file->dev_file_id,
437 params->uuid_return, ion_size, ret_origin);
438 if (ret != 0) {
439 tloge("load app use configid failed ret=%d\n", ret);
440 return -EFAULT;
441 }
442 }
443 return ret;
444 }
445
is_ion_param(uint32_t param_type)446 bool is_ion_param(uint32_t param_type)
447 {
448 if (param_type == TEEC_ION_INPUT ||
449 param_type == TEEC_ION_SGLIST_INPUT)
450 return true;
451 return false;
452 }
453
fill_sg_list(struct sg_table *ion_table, uint32_t ion_list_num, struct sglist *tmp_sglist)454 static void fill_sg_list(struct sg_table *ion_table,
455 uint32_t ion_list_num, struct sglist *tmp_sglist)
456 {
457 uint32_t i;
458 struct page *page = NULL;
459 struct scatterlist *sg = NULL;
460
461 for_each_sg(ion_table->sgl, sg, ion_list_num, i) {
462 page = sg_page(sg);
463 tmp_sglist->page_info[i].phys_addr = page_to_phys(page);
464 tmp_sglist->page_info[i].npages = sg->length / PAGE_SIZE;
465 }
466 }
467
check_sg_list(const struct sg_table *ion_table, uint32_t ion_list_num)468 static int check_sg_list(const struct sg_table *ion_table, uint32_t ion_list_num)
469 {
470 struct scatterlist *sg = NULL;
471 uint32_t i;
472 for_each_sg(ion_table->sgl, sg, ion_list_num, i) {
473 if (!sg) {
474 tloge("an error sg when get ion sglist \n");
475 return -EFAULT;
476 }
477 }
478 return 0;
479 }
480
get_ion_sg_list_from_fd(uint32_t ion_shared_fd, uint32_t ion_alloc_size, phys_addr_t *sglist_table, size_t *ion_sglist_size)481 static int get_ion_sg_list_from_fd(uint32_t ion_shared_fd,
482 uint32_t ion_alloc_size, phys_addr_t *sglist_table,
483 size_t *ion_sglist_size)
484 {
485 struct sg_table *ion_table = NULL;
486 struct sglist *tmp_sglist = NULL;
487 uint64_t ion_id = 0;
488 enum SEC_SVC ion_type = 0;
489 uint32_t ion_list_num = 0;
490 uint32_t sglist_size;
491 #ifdef CONFIG_DMABUF_MM
492 if (mm_dma_heap_secmem_get_buffer(ion_shared_fd, &ion_table, &ion_id, &ion_type)) {
493 #else
494 if (secmem_get_buffer(ion_shared_fd, &ion_table, &ion_id, &ion_type)) {
495 #endif
496 tloge("get ion table failed. \n");
497 return -EFAULT;
498 }
499
500 if (ion_type != SEC_DRM_TEE) {
501 if (ion_table->nents <= 0 || ion_table->nents > MAX_ION_NENTS)
502 return -EFAULT;
503 ion_list_num = (uint32_t)(ion_table->nents & INT_MAX);
504 if (check_sg_list(ion_table, ion_list_num) != 0)
505 return -EFAULT;
506 }
507 /* ion_list_num is less than 1024, so sglist_size won't flow */
508 sglist_size = sizeof(struct ion_page_info) * ion_list_num + sizeof(*tmp_sglist);
509 tmp_sglist = (struct sglist *)mailbox_alloc(sglist_size, MB_FLAG_ZERO);
510 if (!tmp_sglist) {
511 tloge("sglist mem alloc failed\n");
512 return -ENOMEM;
513 }
514 tmp_sglist->sglist_size = (uint64_t)sglist_size;
515 tmp_sglist->ion_size = (uint64_t)ion_alloc_size;
516 tmp_sglist->info_length = (uint64_t)ion_list_num;
517 if (ion_type != SEC_DRM_TEE)
518 fill_sg_list(ion_table, ion_list_num, tmp_sglist);
519 else
520 tmp_sglist->ion_id = ion_id;
521
522 *sglist_table = mailbox_virt_to_phys((uintptr_t)tmp_sglist);
523 *ion_sglist_size = sglist_size;
524 return 0;
525 }
526
527 int alloc_for_ion_sglist(const struct tc_call_params *call_params,
528 struct tc_op_params *op_params, uint8_t kernel_params,
529 uint32_t param_type, unsigned int index)
530 {
531 struct tc_ns_operation *operation = NULL;
532 size_t ion_sglist_size = 0;
533 phys_addr_t ion_sglist_addr = 0x0;
534 union tc_ns_client_param *client_param = NULL;
535 unsigned int ion_shared_fd = 0;
536 unsigned int ion_alloc_size;
537 uint64_t a_addr, b_addr;
538
539 /* this never happens */
540 if (index >= TEE_PARAM_NUM || !call_params || !op_params)
541 return -EINVAL;
542
543 operation = &op_params->mb_pack->operation;
544 client_param = &(call_params->context->params[index]);
545 a_addr = client_param->value.a_addr |
546 ((uint64_t)client_param->value.a_h_addr << ADDR_TRANS_NUM);
547 b_addr = client_param->value.b_addr |
548 ((uint64_t)client_param->value.b_h_addr << ADDR_TRANS_NUM);
549
550 if (read_from_client(&operation->params[index].value.a,
551 sizeof(operation->params[index].value.a),
552 (void *)(uintptr_t)a_addr,
553 sizeof(operation->params[index].value.a), kernel_params)) {
554 tloge("valuea copy failed\n");
555 return -EFAULT;
556 }
557 if (read_from_client(&operation->params[index].value.b,
558 sizeof(operation->params[index].value.b),
559 (void *)(uintptr_t)b_addr,
560 sizeof(operation->params[index].value.b), kernel_params)) {
561 tloge("valueb copy failed\n");
562 return -EFAULT;
563 }
564 ion_shared_fd = operation->params[index].value.a;
565 ion_alloc_size = operation->params[index].value.b;
566
567 if(get_ion_sg_list_from_fd(ion_shared_fd, ion_alloc_size,
568 &ion_sglist_addr, &ion_sglist_size)) {
569 tloge("get ion sglist failed, fd=%u\n", ion_shared_fd);
570 return -EFAULT;
571 }
572 op_params->local_tmpbuf[index].temp_buffer = phys_to_virt(ion_sglist_addr);
573 op_params->local_tmpbuf[index].size = ion_sglist_size;
574
575 operation->params[index].memref.buffer = (unsigned int)ion_sglist_addr;
576 operation->buffer_h_addr[index] =
577 (uint64_t)ion_sglist_addr >> ADDR_TRANS_NUM;
578 operation->params[index].memref.size = (unsigned int)ion_sglist_size;
579 op_params->trans_paramtype[index] = param_type;
580
581 return 0;
582 }
583
584 static int transfer_ion_params(struct tc_ns_operation *operation,
585 union tc_ns_client_param *client_param, uint8_t kernel_params,
586 unsigned int index)
587 {
588 uint64_t a_addr = client_param->value.a_addr |
589 ((uint64_t)client_param->value.a_h_addr << ADDR_TRANS_NUM);
590 uint64_t b_addr = client_param->value.b_addr |
591 ((uint64_t)client_param->value.b_h_addr << ADDR_TRANS_NUM);
592
593 if (read_from_client(&operation->params[index].value.a,
594 sizeof(operation->params[index].value.a),
595 (void *)(uintptr_t)a_addr,
596 sizeof(operation->params[index].value.a), kernel_params)) {
597 tloge("value.a_addr copy failed\n");
598 return -EFAULT;
599 }
600
601 if (read_from_client(&operation->params[index].value.b,
602 sizeof(operation->params[index].value.b),
603 (void *)(uintptr_t)b_addr,
604 sizeof(operation->params[index].value.b), kernel_params)) {
605 tloge("value.b_addr copy failed\n");
606 return -EFAULT;
607 }
608
609 return 0;
610 }
611
612 int alloc_for_ion(const struct tc_call_params *call_params,
613 struct tc_op_params *op_params, uint8_t kernel_params,
614 uint32_t param_type, unsigned int index)
615 {
616 struct tc_ns_operation *operation = NULL;
617 size_t drm_ion_size = 0;
618 phys_addr_t drm_ion_phys = 0x0;
619 struct dma_buf *drm_dma_buf = NULL;
620 union tc_ns_client_param *client_param = NULL;
621 unsigned int ion_shared_fd = 0;
622 int ret = 0;
623
624 /* this never happens */
625 if (index >= TEE_PARAM_NUM || !call_params || !op_params)
626 return -EINVAL;
627
628 operation = &op_params->mb_pack->operation;
629 client_param = &(call_params->context->params[index]);
630 if (transfer_ion_params(operation, client_param, kernel_params, index))
631 return -EFAULT;
632
633 ion_shared_fd = operation->params[index].value.a;
634 drm_dma_buf = dma_buf_get(ion_shared_fd);
635 if (IS_ERR_OR_NULL(drm_dma_buf)) {
636 tloge("drm dma buf is err, ret = %d fd = %u\n", ret, ion_shared_fd);
637 return -EFAULT;
638 }
639 #ifdef CONFIG_DMABUF_MM
640 ret = mm_dma_heap_secmem_get_phys(drm_dma_buf, &drm_ion_phys, &drm_ion_size);
641 #else
642 ret = ion_secmem_get_phys(drm_dma_buf, &drm_ion_phys, &drm_ion_size);
643 #endif
644 if (ret != 0) {
645 tloge("in %s err:ret=%d fd=%u\n", __func__, ret, ion_shared_fd);
646 dma_buf_put(drm_dma_buf);
647 return -EFAULT;
648 }
649
650 if (drm_ion_size > operation->params[index].value.b)
651 drm_ion_size = operation->params[index].value.b;
652 operation->params[index].value.a = (unsigned int)drm_ion_phys;
653 operation->params[index].value.b = (unsigned int)drm_ion_size;
654 op_params->trans_paramtype[index] = param_type;
655 dma_buf_put(drm_dma_buf);
656
657 return ret;
658 }