xref: /kernel/linux/linux-5.10/drivers/tee/tee_core.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015-2016, Linaro Limited
4 */
5
6#define pr_fmt(fmt) "%s: " fmt, __func__
7
8#include <linux/cdev.h>
9#include <linux/cred.h>
10#include <linux/fs.h>
11#include <linux/idr.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/tee_drv.h>
15#include <linux/uaccess.h>
16#include <crypto/hash.h>
17#include <crypto/sha.h>
18#include "tee_private.h"
19
20#define TEE_NUM_DEVICES	32
21
22#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
23
24#define TEE_UUID_NS_NAME_SIZE	128
25
26/*
27 * TEE Client UUID name space identifier (UUIDv4)
28 *
29 * Value here is random UUID that is allocated as name space identifier for
30 * forming Client UUID's for TEE environment using UUIDv5 scheme.
31 */
32static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683,
33						   0xa1, 0xb8, 0xec, 0x4b,
34						   0xc0, 0x8e, 0x01, 0xb6);
35
36/*
37 * Unprivileged devices in the lower half range and privileged devices in
38 * the upper half range.
39 */
40static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
41static DEFINE_SPINLOCK(driver_lock);
42
43static struct class *tee_class;
44static dev_t tee_devt;
45
46struct tee_context *teedev_open(struct tee_device *teedev)
47{
48	int rc;
49	struct tee_context *ctx;
50
51	if (!tee_device_get(teedev))
52		return ERR_PTR(-EINVAL);
53
54	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
55	if (!ctx) {
56		rc = -ENOMEM;
57		goto err;
58	}
59
60	kref_init(&ctx->refcount);
61	ctx->teedev = teedev;
62	rc = teedev->desc->ops->open(ctx);
63	if (rc)
64		goto err;
65
66	return ctx;
67err:
68	kfree(ctx);
69	tee_device_put(teedev);
70	return ERR_PTR(rc);
71
72}
73EXPORT_SYMBOL_GPL(teedev_open);
74
75void teedev_ctx_get(struct tee_context *ctx)
76{
77	if (ctx->releasing)
78		return;
79
80	kref_get(&ctx->refcount);
81}
82
83static void teedev_ctx_release(struct kref *ref)
84{
85	struct tee_context *ctx = container_of(ref, struct tee_context,
86					       refcount);
87	ctx->releasing = true;
88	ctx->teedev->desc->ops->release(ctx);
89	kfree(ctx);
90}
91
92void teedev_ctx_put(struct tee_context *ctx)
93{
94	if (ctx->releasing)
95		return;
96
97	kref_put(&ctx->refcount, teedev_ctx_release);
98}
99
100void teedev_close_context(struct tee_context *ctx)
101{
102	struct tee_device *teedev = ctx->teedev;
103
104	teedev_ctx_put(ctx);
105	tee_device_put(teedev);
106}
107EXPORT_SYMBOL_GPL(teedev_close_context);
108
109static int tee_open(struct inode *inode, struct file *filp)
110{
111	struct tee_context *ctx;
112
113	ctx = teedev_open(container_of(inode->i_cdev, struct tee_device, cdev));
114	if (IS_ERR(ctx))
115		return PTR_ERR(ctx);
116
117	/*
118	 * Default user-space behaviour is to wait for tee-supplicant
119	 * if not present for any requests in this context.
120	 */
121	ctx->supp_nowait = false;
122	filp->private_data = ctx;
123	return 0;
124}
125
126static int tee_release(struct inode *inode, struct file *filp)
127{
128	teedev_close_context(filp->private_data);
129	return 0;
130}
131
132/**
133 * uuid_v5() - Calculate UUIDv5
134 * @uuid: Resulting UUID
135 * @ns: Name space ID for UUIDv5 function
136 * @name: Name for UUIDv5 function
137 * @size: Size of name
138 *
139 * UUIDv5 is specific in RFC 4122.
140 *
141 * This implements section (for SHA-1):
142 * 4.3.  Algorithm for Creating a Name-Based UUID
143 */
144static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name,
145		   size_t size)
146{
147	unsigned char hash[SHA1_DIGEST_SIZE];
148	struct crypto_shash *shash = NULL;
149	struct shash_desc *desc = NULL;
150	int rc;
151
152	shash = crypto_alloc_shash("sha1", 0, 0);
153	if (IS_ERR(shash)) {
154		rc = PTR_ERR(shash);
155		pr_err("shash(sha1) allocation failed\n");
156		return rc;
157	}
158
159	desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash),
160		       GFP_KERNEL);
161	if (!desc) {
162		rc = -ENOMEM;
163		goto out_free_shash;
164	}
165
166	desc->tfm = shash;
167
168	rc = crypto_shash_init(desc);
169	if (rc < 0)
170		goto out_free_desc;
171
172	rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns));
173	if (rc < 0)
174		goto out_free_desc;
175
176	rc = crypto_shash_update(desc, (const u8 *)name, size);
177	if (rc < 0)
178		goto out_free_desc;
179
180	rc = crypto_shash_final(desc, hash);
181	if (rc < 0)
182		goto out_free_desc;
183
184	memcpy(uuid->b, hash, UUID_SIZE);
185
186	/* Tag for version 5 */
187	uuid->b[6] = (hash[6] & 0x0F) | 0x50;
188	uuid->b[8] = (hash[8] & 0x3F) | 0x80;
189
190out_free_desc:
191	kfree(desc);
192
193out_free_shash:
194	crypto_free_shash(shash);
195	return rc;
196}
197
198int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
199				 const u8 connection_data[TEE_IOCTL_UUID_LEN])
200{
201	gid_t ns_grp = (gid_t)-1;
202	kgid_t grp = INVALID_GID;
203	char *name = NULL;
204	int name_len;
205	int rc;
206
207	if (connection_method == TEE_IOCTL_LOGIN_PUBLIC ||
208	    connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) {
209		/* Nil UUID to be passed to TEE environment */
210		uuid_copy(uuid, &uuid_null);
211		return 0;
212	}
213
214	/*
215	 * In Linux environment client UUID is based on UUIDv5.
216	 *
217	 * Determine client UUID with following semantics for 'name':
218	 *
219	 * For TEEC_LOGIN_USER:
220	 * uid=<uid>
221	 *
222	 * For TEEC_LOGIN_GROUP:
223	 * gid=<gid>
224	 *
225	 */
226
227	name = kzalloc(TEE_UUID_NS_NAME_SIZE, GFP_KERNEL);
228	if (!name)
229		return -ENOMEM;
230
231	switch (connection_method) {
232	case TEE_IOCTL_LOGIN_USER:
233		name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "uid=%x",
234				    current_euid().val);
235		if (name_len >= TEE_UUID_NS_NAME_SIZE) {
236			rc = -E2BIG;
237			goto out_free_name;
238		}
239		break;
240
241	case TEE_IOCTL_LOGIN_GROUP:
242		memcpy(&ns_grp, connection_data, sizeof(gid_t));
243		grp = make_kgid(current_user_ns(), ns_grp);
244		if (!gid_valid(grp) || !in_egroup_p(grp)) {
245			rc = -EPERM;
246			goto out_free_name;
247		}
248
249		name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "gid=%x",
250				    grp.val);
251		if (name_len >= TEE_UUID_NS_NAME_SIZE) {
252			rc = -E2BIG;
253			goto out_free_name;
254		}
255		break;
256
257	default:
258		rc = -EINVAL;
259		goto out_free_name;
260	}
261
262	rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len);
263out_free_name:
264	kfree(name);
265
266	return rc;
267}
268EXPORT_SYMBOL_GPL(tee_session_calc_client_uuid);
269
270static int tee_ioctl_version(struct tee_context *ctx,
271			     struct tee_ioctl_version_data __user *uvers)
272{
273	struct tee_ioctl_version_data vers;
274
275	ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
276
277	if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED)
278		vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED;
279
280	if (copy_to_user(uvers, &vers, sizeof(vers)))
281		return -EFAULT;
282
283	return 0;
284}
285
286static int tee_ioctl_shm_alloc(struct tee_context *ctx,
287			       struct tee_ioctl_shm_alloc_data __user *udata)
288{
289	long ret;
290	struct tee_ioctl_shm_alloc_data data;
291	struct tee_shm *shm;
292
293	if (copy_from_user(&data, udata, sizeof(data)))
294		return -EFAULT;
295
296	/* Currently no input flags are supported */
297	if (data.flags)
298		return -EINVAL;
299
300	shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
301	if (IS_ERR(shm))
302		return PTR_ERR(shm);
303
304	data.id = shm->id;
305	data.flags = shm->flags;
306	data.size = shm->size;
307
308	if (copy_to_user(udata, &data, sizeof(data)))
309		ret = -EFAULT;
310	else
311		ret = tee_shm_get_fd(shm);
312
313	/*
314	 * When user space closes the file descriptor the shared memory
315	 * should be freed or if tee_shm_get_fd() failed then it will
316	 * be freed immediately.
317	 */
318	tee_shm_put(shm);
319	return ret;
320}
321
322static int
323tee_ioctl_shm_register(struct tee_context *ctx,
324		       struct tee_ioctl_shm_register_data __user *udata)
325{
326	long ret;
327	struct tee_ioctl_shm_register_data data;
328	struct tee_shm *shm;
329
330	if (copy_from_user(&data, udata, sizeof(data)))
331		return -EFAULT;
332
333	/* Currently no input flags are supported */
334	if (data.flags)
335		return -EINVAL;
336
337	if (!access_ok((void __user *)(unsigned long)data.addr, data.length))
338		return -EFAULT;
339
340	shm = tee_shm_register(ctx, data.addr, data.length,
341			       TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
342	if (IS_ERR(shm))
343		return PTR_ERR(shm);
344
345	data.id = shm->id;
346	data.flags = shm->flags;
347	data.length = shm->size;
348
349	if (copy_to_user(udata, &data, sizeof(data)))
350		ret = -EFAULT;
351	else
352		ret = tee_shm_get_fd(shm);
353	/*
354	 * When user space closes the file descriptor the shared memory
355	 * should be freed or if tee_shm_get_fd() failed then it will
356	 * be freed immediately.
357	 */
358	tee_shm_put(shm);
359	return ret;
360}
361
362static int params_from_user(struct tee_context *ctx, struct tee_param *params,
363			    size_t num_params,
364			    struct tee_ioctl_param __user *uparams)
365{
366	size_t n;
367
368	for (n = 0; n < num_params; n++) {
369		struct tee_shm *shm;
370		struct tee_ioctl_param ip;
371
372		if (copy_from_user(&ip, uparams + n, sizeof(ip)))
373			return -EFAULT;
374
375		/* All unused attribute bits has to be zero */
376		if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
377			return -EINVAL;
378
379		params[n].attr = ip.attr;
380		switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
381		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
382		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
383			break;
384		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
385		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
386			params[n].u.value.a = ip.a;
387			params[n].u.value.b = ip.b;
388			params[n].u.value.c = ip.c;
389			break;
390		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
391		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
392		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
393			/*
394			 * If a NULL pointer is passed to a TA in the TEE,
395			 * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
396			 * indicating a NULL memory reference.
397			 */
398			if (ip.c != TEE_MEMREF_NULL) {
399				/*
400				 * If we fail to get a pointer to a shared
401				 * memory object (and increase the ref count)
402				 * from an identifier we return an error. All
403				 * pointers that has been added in params have
404				 * an increased ref count. It's the callers
405				 * responibility to do tee_shm_put() on all
406				 * resolved pointers.
407				 */
408				shm = tee_shm_get_from_id(ctx, ip.c);
409				if (IS_ERR(shm))
410					return PTR_ERR(shm);
411
412				/*
413				 * Ensure offset + size does not overflow
414				 * offset and does not overflow the size of
415				 * the referred shared memory object.
416				 */
417				if ((ip.a + ip.b) < ip.a ||
418				    (ip.a + ip.b) > shm->size) {
419					tee_shm_put(shm);
420					return -EINVAL;
421				}
422			} else if (ctx->cap_memref_null) {
423				/* Pass NULL pointer to OP-TEE */
424				shm = NULL;
425			} else {
426				return -EINVAL;
427			}
428
429			params[n].u.memref.shm_offs = ip.a;
430			params[n].u.memref.size = ip.b;
431			params[n].u.memref.shm = shm;
432			break;
433		default:
434			/* Unknown attribute */
435			return -EINVAL;
436		}
437	}
438	return 0;
439}
440
441static int params_to_user(struct tee_ioctl_param __user *uparams,
442			  size_t num_params, struct tee_param *params)
443{
444	size_t n;
445
446	for (n = 0; n < num_params; n++) {
447		struct tee_ioctl_param __user *up = uparams + n;
448		struct tee_param *p = params + n;
449
450		switch (p->attr) {
451		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
452		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
453			if (put_user(p->u.value.a, &up->a) ||
454			    put_user(p->u.value.b, &up->b) ||
455			    put_user(p->u.value.c, &up->c))
456				return -EFAULT;
457			break;
458		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
459		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
460			if (put_user((u64)p->u.memref.size, &up->b))
461				return -EFAULT;
462		default:
463			break;
464		}
465	}
466	return 0;
467}
468
469static int tee_ioctl_open_session(struct tee_context *ctx,
470				  struct tee_ioctl_buf_data __user *ubuf)
471{
472	int rc;
473	size_t n;
474	struct tee_ioctl_buf_data buf;
475	struct tee_ioctl_open_session_arg __user *uarg;
476	struct tee_ioctl_open_session_arg arg;
477	struct tee_ioctl_param __user *uparams = NULL;
478	struct tee_param *params = NULL;
479	bool have_session = false;
480
481	if (!ctx->teedev->desc->ops->open_session)
482		return -EINVAL;
483
484	if (copy_from_user(&buf, ubuf, sizeof(buf)))
485		return -EFAULT;
486
487	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
488	    buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
489		return -EINVAL;
490
491	uarg = u64_to_user_ptr(buf.buf_ptr);
492	if (copy_from_user(&arg, uarg, sizeof(arg)))
493		return -EFAULT;
494
495	if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
496		return -EINVAL;
497
498	if (arg.num_params) {
499		params = kcalloc(arg.num_params, sizeof(struct tee_param),
500				 GFP_KERNEL);
501		if (!params)
502			return -ENOMEM;
503		uparams = uarg->params;
504		rc = params_from_user(ctx, params, arg.num_params, uparams);
505		if (rc)
506			goto out;
507	}
508
509	if (arg.clnt_login >= TEE_IOCTL_LOGIN_REE_KERNEL_MIN &&
510	    arg.clnt_login <= TEE_IOCTL_LOGIN_REE_KERNEL_MAX) {
511		pr_debug("login method not allowed for user-space client\n");
512		rc = -EPERM;
513		goto out;
514	}
515
516	rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
517	if (rc)
518		goto out;
519	have_session = true;
520
521	if (put_user(arg.session, &uarg->session) ||
522	    put_user(arg.ret, &uarg->ret) ||
523	    put_user(arg.ret_origin, &uarg->ret_origin)) {
524		rc = -EFAULT;
525		goto out;
526	}
527	rc = params_to_user(uparams, arg.num_params, params);
528out:
529	/*
530	 * If we've succeeded to open the session but failed to communicate
531	 * it back to user space, close the session again to avoid leakage.
532	 */
533	if (rc && have_session && ctx->teedev->desc->ops->close_session)
534		ctx->teedev->desc->ops->close_session(ctx, arg.session);
535
536	if (params) {
537		/* Decrease ref count for all valid shared memory pointers */
538		for (n = 0; n < arg.num_params; n++)
539			if (tee_param_is_memref(params + n) &&
540			    params[n].u.memref.shm)
541				tee_shm_put(params[n].u.memref.shm);
542		kfree(params);
543	}
544
545	return rc;
546}
547
548static int tee_ioctl_invoke(struct tee_context *ctx,
549			    struct tee_ioctl_buf_data __user *ubuf)
550{
551	int rc;
552	size_t n;
553	struct tee_ioctl_buf_data buf;
554	struct tee_ioctl_invoke_arg __user *uarg;
555	struct tee_ioctl_invoke_arg arg;
556	struct tee_ioctl_param __user *uparams = NULL;
557	struct tee_param *params = NULL;
558
559	if (!ctx->teedev->desc->ops->invoke_func)
560		return -EINVAL;
561
562	if (copy_from_user(&buf, ubuf, sizeof(buf)))
563		return -EFAULT;
564
565	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
566	    buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
567		return -EINVAL;
568
569	uarg = u64_to_user_ptr(buf.buf_ptr);
570	if (copy_from_user(&arg, uarg, sizeof(arg)))
571		return -EFAULT;
572
573	if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
574		return -EINVAL;
575
576	if (arg.num_params) {
577		params = kcalloc(arg.num_params, sizeof(struct tee_param),
578				 GFP_KERNEL);
579		if (!params)
580			return -ENOMEM;
581		uparams = uarg->params;
582		rc = params_from_user(ctx, params, arg.num_params, uparams);
583		if (rc)
584			goto out;
585	}
586
587	rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
588	if (rc)
589		goto out;
590
591	if (put_user(arg.ret, &uarg->ret) ||
592	    put_user(arg.ret_origin, &uarg->ret_origin)) {
593		rc = -EFAULT;
594		goto out;
595	}
596	rc = params_to_user(uparams, arg.num_params, params);
597out:
598	if (params) {
599		/* Decrease ref count for all valid shared memory pointers */
600		for (n = 0; n < arg.num_params; n++)
601			if (tee_param_is_memref(params + n) &&
602			    params[n].u.memref.shm)
603				tee_shm_put(params[n].u.memref.shm);
604		kfree(params);
605	}
606	return rc;
607}
608
609static int tee_ioctl_cancel(struct tee_context *ctx,
610			    struct tee_ioctl_cancel_arg __user *uarg)
611{
612	struct tee_ioctl_cancel_arg arg;
613
614	if (!ctx->teedev->desc->ops->cancel_req)
615		return -EINVAL;
616
617	if (copy_from_user(&arg, uarg, sizeof(arg)))
618		return -EFAULT;
619
620	return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
621						  arg.session);
622}
623
624static int
625tee_ioctl_close_session(struct tee_context *ctx,
626			struct tee_ioctl_close_session_arg __user *uarg)
627{
628	struct tee_ioctl_close_session_arg arg;
629
630	if (!ctx->teedev->desc->ops->close_session)
631		return -EINVAL;
632
633	if (copy_from_user(&arg, uarg, sizeof(arg)))
634		return -EFAULT;
635
636	return ctx->teedev->desc->ops->close_session(ctx, arg.session);
637}
638
639static int params_to_supp(struct tee_context *ctx,
640			  struct tee_ioctl_param __user *uparams,
641			  size_t num_params, struct tee_param *params)
642{
643	size_t n;
644
645	for (n = 0; n < num_params; n++) {
646		struct tee_ioctl_param ip;
647		struct tee_param *p = params + n;
648
649		ip.attr = p->attr;
650		switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
651		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
652		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
653			ip.a = p->u.value.a;
654			ip.b = p->u.value.b;
655			ip.c = p->u.value.c;
656			break;
657		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
658		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
659		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
660			ip.b = p->u.memref.size;
661			if (!p->u.memref.shm) {
662				ip.a = 0;
663				ip.c = (u64)-1; /* invalid shm id */
664				break;
665			}
666			ip.a = p->u.memref.shm_offs;
667			ip.c = p->u.memref.shm->id;
668			break;
669		default:
670			ip.a = 0;
671			ip.b = 0;
672			ip.c = 0;
673			break;
674		}
675
676		if (copy_to_user(uparams + n, &ip, sizeof(ip)))
677			return -EFAULT;
678	}
679
680	return 0;
681}
682
683static int tee_ioctl_supp_recv(struct tee_context *ctx,
684			       struct tee_ioctl_buf_data __user *ubuf)
685{
686	int rc;
687	struct tee_ioctl_buf_data buf;
688	struct tee_iocl_supp_recv_arg __user *uarg;
689	struct tee_param *params;
690	u32 num_params;
691	u32 func;
692
693	if (!ctx->teedev->desc->ops->supp_recv)
694		return -EINVAL;
695
696	if (copy_from_user(&buf, ubuf, sizeof(buf)))
697		return -EFAULT;
698
699	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
700	    buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
701		return -EINVAL;
702
703	uarg = u64_to_user_ptr(buf.buf_ptr);
704	if (get_user(num_params, &uarg->num_params))
705		return -EFAULT;
706
707	if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
708		return -EINVAL;
709
710	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
711	if (!params)
712		return -ENOMEM;
713
714	rc = params_from_user(ctx, params, num_params, uarg->params);
715	if (rc)
716		goto out;
717
718	rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
719	if (rc)
720		goto out;
721
722	if (put_user(func, &uarg->func) ||
723	    put_user(num_params, &uarg->num_params)) {
724		rc = -EFAULT;
725		goto out;
726	}
727
728	rc = params_to_supp(ctx, uarg->params, num_params, params);
729out:
730	kfree(params);
731	return rc;
732}
733
734static int params_from_supp(struct tee_param *params, size_t num_params,
735			    struct tee_ioctl_param __user *uparams)
736{
737	size_t n;
738
739	for (n = 0; n < num_params; n++) {
740		struct tee_param *p = params + n;
741		struct tee_ioctl_param ip;
742
743		if (copy_from_user(&ip, uparams + n, sizeof(ip)))
744			return -EFAULT;
745
746		/* All unused attribute bits has to be zero */
747		if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
748			return -EINVAL;
749
750		p->attr = ip.attr;
751		switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
752		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
753		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
754			/* Only out and in/out values can be updated */
755			p->u.value.a = ip.a;
756			p->u.value.b = ip.b;
757			p->u.value.c = ip.c;
758			break;
759		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
760		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
761			/*
762			 * Only the size of the memref can be updated.
763			 * Since we don't have access to the original
764			 * parameters here, only store the supplied size.
765			 * The driver will copy the updated size into the
766			 * original parameters.
767			 */
768			p->u.memref.shm = NULL;
769			p->u.memref.shm_offs = 0;
770			p->u.memref.size = ip.b;
771			break;
772		default:
773			memset(&p->u, 0, sizeof(p->u));
774			break;
775		}
776	}
777	return 0;
778}
779
780static int tee_ioctl_supp_send(struct tee_context *ctx,
781			       struct tee_ioctl_buf_data __user *ubuf)
782{
783	long rc;
784	struct tee_ioctl_buf_data buf;
785	struct tee_iocl_supp_send_arg __user *uarg;
786	struct tee_param *params;
787	u32 num_params;
788	u32 ret;
789
790	/* Not valid for this driver */
791	if (!ctx->teedev->desc->ops->supp_send)
792		return -EINVAL;
793
794	if (copy_from_user(&buf, ubuf, sizeof(buf)))
795		return -EFAULT;
796
797	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
798	    buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
799		return -EINVAL;
800
801	uarg = u64_to_user_ptr(buf.buf_ptr);
802	if (get_user(ret, &uarg->ret) ||
803	    get_user(num_params, &uarg->num_params))
804		return -EFAULT;
805
806	if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
807		return -EINVAL;
808
809	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
810	if (!params)
811		return -ENOMEM;
812
813	rc = params_from_supp(params, num_params, uarg->params);
814	if (rc)
815		goto out;
816
817	rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
818out:
819	kfree(params);
820	return rc;
821}
822
823static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
824{
825	struct tee_context *ctx = filp->private_data;
826	void __user *uarg = (void __user *)arg;
827
828	switch (cmd) {
829	case TEE_IOC_VERSION:
830		return tee_ioctl_version(ctx, uarg);
831	case TEE_IOC_SHM_ALLOC:
832		return tee_ioctl_shm_alloc(ctx, uarg);
833	case TEE_IOC_SHM_REGISTER:
834		return tee_ioctl_shm_register(ctx, uarg);
835	case TEE_IOC_OPEN_SESSION:
836		return tee_ioctl_open_session(ctx, uarg);
837	case TEE_IOC_INVOKE:
838		return tee_ioctl_invoke(ctx, uarg);
839	case TEE_IOC_CANCEL:
840		return tee_ioctl_cancel(ctx, uarg);
841	case TEE_IOC_CLOSE_SESSION:
842		return tee_ioctl_close_session(ctx, uarg);
843	case TEE_IOC_SUPPL_RECV:
844		return tee_ioctl_supp_recv(ctx, uarg);
845	case TEE_IOC_SUPPL_SEND:
846		return tee_ioctl_supp_send(ctx, uarg);
847	default:
848		return -EINVAL;
849	}
850}
851
852static const struct file_operations tee_fops = {
853	.owner = THIS_MODULE,
854	.open = tee_open,
855	.release = tee_release,
856	.unlocked_ioctl = tee_ioctl,
857	.compat_ioctl = compat_ptr_ioctl,
858};
859
860static void tee_release_device(struct device *dev)
861{
862	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
863
864	spin_lock(&driver_lock);
865	clear_bit(teedev->id, dev_mask);
866	spin_unlock(&driver_lock);
867	mutex_destroy(&teedev->mutex);
868	idr_destroy(&teedev->idr);
869	kfree(teedev);
870}
871
872/**
873 * tee_device_alloc() - Allocate a new struct tee_device instance
874 * @teedesc:	Descriptor for this driver
875 * @dev:	Parent device for this device
876 * @pool:	Shared memory pool, NULL if not used
877 * @driver_data: Private driver data for this device
878 *
879 * Allocates a new struct tee_device instance. The device is
880 * removed by tee_device_unregister().
881 *
882 * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
883 */
884struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
885				    struct device *dev,
886				    struct tee_shm_pool *pool,
887				    void *driver_data)
888{
889	struct tee_device *teedev;
890	void *ret;
891	int rc, max_id;
892	int offs = 0;
893
894	if (!teedesc || !teedesc->name || !teedesc->ops ||
895	    !teedesc->ops->get_version || !teedesc->ops->open ||
896	    !teedesc->ops->release || !pool)
897		return ERR_PTR(-EINVAL);
898
899	teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
900	if (!teedev) {
901		ret = ERR_PTR(-ENOMEM);
902		goto err;
903	}
904
905	max_id = TEE_NUM_DEVICES / 2;
906
907	if (teedesc->flags & TEE_DESC_PRIVILEGED) {
908		offs = TEE_NUM_DEVICES / 2;
909		max_id = TEE_NUM_DEVICES;
910	}
911
912	spin_lock(&driver_lock);
913	teedev->id = find_next_zero_bit(dev_mask, max_id, offs);
914	if (teedev->id < max_id)
915		set_bit(teedev->id, dev_mask);
916	spin_unlock(&driver_lock);
917
918	if (teedev->id >= max_id) {
919		ret = ERR_PTR(-ENOMEM);
920		goto err;
921	}
922
923	snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
924		 teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
925		 teedev->id - offs);
926
927	teedev->dev.class = tee_class;
928	teedev->dev.release = tee_release_device;
929	teedev->dev.parent = dev;
930
931	teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
932
933	rc = dev_set_name(&teedev->dev, "%s", teedev->name);
934	if (rc) {
935		ret = ERR_PTR(rc);
936		goto err_devt;
937	}
938
939	cdev_init(&teedev->cdev, &tee_fops);
940	teedev->cdev.owner = teedesc->owner;
941
942	dev_set_drvdata(&teedev->dev, driver_data);
943	device_initialize(&teedev->dev);
944
945	/* 1 as tee_device_unregister() does one final tee_device_put() */
946	teedev->num_users = 1;
947	init_completion(&teedev->c_no_users);
948	mutex_init(&teedev->mutex);
949	idr_init(&teedev->idr);
950
951	teedev->desc = teedesc;
952	teedev->pool = pool;
953
954	return teedev;
955err_devt:
956	unregister_chrdev_region(teedev->dev.devt, 1);
957err:
958	pr_err("could not register %s driver\n",
959	       teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
960	if (teedev && teedev->id < TEE_NUM_DEVICES) {
961		spin_lock(&driver_lock);
962		clear_bit(teedev->id, dev_mask);
963		spin_unlock(&driver_lock);
964	}
965	kfree(teedev);
966	return ret;
967}
968EXPORT_SYMBOL_GPL(tee_device_alloc);
969
970static ssize_t implementation_id_show(struct device *dev,
971				      struct device_attribute *attr, char *buf)
972{
973	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
974	struct tee_ioctl_version_data vers;
975
976	teedev->desc->ops->get_version(teedev, &vers);
977	return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
978}
979static DEVICE_ATTR_RO(implementation_id);
980
981static struct attribute *tee_dev_attrs[] = {
982	&dev_attr_implementation_id.attr,
983	NULL
984};
985
986ATTRIBUTE_GROUPS(tee_dev);
987
988/**
989 * tee_device_register() - Registers a TEE device
990 * @teedev:	Device to register
991 *
992 * tee_device_unregister() need to be called to remove the @teedev if
993 * this function fails.
994 *
995 * @returns < 0 on failure
996 */
997int tee_device_register(struct tee_device *teedev)
998{
999	int rc;
1000
1001	if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
1002		dev_err(&teedev->dev, "attempt to register twice\n");
1003		return -EINVAL;
1004	}
1005
1006	teedev->dev.groups = tee_dev_groups;
1007
1008	rc = cdev_device_add(&teedev->cdev, &teedev->dev);
1009	if (rc) {
1010		dev_err(&teedev->dev,
1011			"unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
1012			teedev->name, MAJOR(teedev->dev.devt),
1013			MINOR(teedev->dev.devt), rc);
1014		return rc;
1015	}
1016
1017	teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
1018	return 0;
1019}
1020EXPORT_SYMBOL_GPL(tee_device_register);
1021
1022void tee_device_put(struct tee_device *teedev)
1023{
1024	mutex_lock(&teedev->mutex);
1025	/* Shouldn't put in this state */
1026	if (!WARN_ON(!teedev->desc)) {
1027		teedev->num_users--;
1028		if (!teedev->num_users) {
1029			teedev->desc = NULL;
1030			complete(&teedev->c_no_users);
1031		}
1032	}
1033	mutex_unlock(&teedev->mutex);
1034}
1035
1036bool tee_device_get(struct tee_device *teedev)
1037{
1038	mutex_lock(&teedev->mutex);
1039	if (!teedev->desc) {
1040		mutex_unlock(&teedev->mutex);
1041		return false;
1042	}
1043	teedev->num_users++;
1044	mutex_unlock(&teedev->mutex);
1045	return true;
1046}
1047
1048/**
1049 * tee_device_unregister() - Removes a TEE device
1050 * @teedev:	Device to unregister
1051 *
1052 * This function should be called to remove the @teedev even if
1053 * tee_device_register() hasn't been called yet. Does nothing if
1054 * @teedev is NULL.
1055 */
1056void tee_device_unregister(struct tee_device *teedev)
1057{
1058	if (!teedev)
1059		return;
1060
1061	if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED)
1062		cdev_device_del(&teedev->cdev, &teedev->dev);
1063
1064	tee_device_put(teedev);
1065	wait_for_completion(&teedev->c_no_users);
1066
1067	/*
1068	 * No need to take a mutex any longer now since teedev->desc was
1069	 * set to NULL before teedev->c_no_users was completed.
1070	 */
1071
1072	teedev->pool = NULL;
1073
1074	put_device(&teedev->dev);
1075}
1076EXPORT_SYMBOL_GPL(tee_device_unregister);
1077
1078/**
1079 * tee_get_drvdata() - Return driver_data pointer
1080 * @teedev:	Device containing the driver_data pointer
1081 * @returns the driver_data pointer supplied to tee_register().
1082 */
1083void *tee_get_drvdata(struct tee_device *teedev)
1084{
1085	return dev_get_drvdata(&teedev->dev);
1086}
1087EXPORT_SYMBOL_GPL(tee_get_drvdata);
1088
1089struct match_dev_data {
1090	struct tee_ioctl_version_data *vers;
1091	const void *data;
1092	int (*match)(struct tee_ioctl_version_data *, const void *);
1093};
1094
1095static int match_dev(struct device *dev, const void *data)
1096{
1097	const struct match_dev_data *match_data = data;
1098	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
1099
1100	teedev->desc->ops->get_version(teedev, match_data->vers);
1101	return match_data->match(match_data->vers, match_data->data);
1102}
1103
1104struct tee_context *
1105tee_client_open_context(struct tee_context *start,
1106			int (*match)(struct tee_ioctl_version_data *,
1107				     const void *),
1108			const void *data, struct tee_ioctl_version_data *vers)
1109{
1110	struct device *dev = NULL;
1111	struct device *put_dev = NULL;
1112	struct tee_context *ctx = NULL;
1113	struct tee_ioctl_version_data v;
1114	struct match_dev_data match_data = { vers ? vers : &v, data, match };
1115
1116	if (start)
1117		dev = &start->teedev->dev;
1118
1119	do {
1120		dev = class_find_device(tee_class, dev, &match_data, match_dev);
1121		if (!dev) {
1122			ctx = ERR_PTR(-ENOENT);
1123			break;
1124		}
1125
1126		put_device(put_dev);
1127		put_dev = dev;
1128
1129		ctx = teedev_open(container_of(dev, struct tee_device, dev));
1130	} while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM);
1131
1132	put_device(put_dev);
1133	/*
1134	 * Default behaviour for in kernel client is to not wait for
1135	 * tee-supplicant if not present for any requests in this context.
1136	 * Also this flag could be configured again before call to
1137	 * tee_client_open_session() if any in kernel client requires
1138	 * different behaviour.
1139	 */
1140	if (!IS_ERR(ctx))
1141		ctx->supp_nowait = true;
1142
1143	return ctx;
1144}
1145EXPORT_SYMBOL_GPL(tee_client_open_context);
1146
1147void tee_client_close_context(struct tee_context *ctx)
1148{
1149	teedev_close_context(ctx);
1150}
1151EXPORT_SYMBOL_GPL(tee_client_close_context);
1152
1153void tee_client_get_version(struct tee_context *ctx,
1154			    struct tee_ioctl_version_data *vers)
1155{
1156	ctx->teedev->desc->ops->get_version(ctx->teedev, vers);
1157}
1158EXPORT_SYMBOL_GPL(tee_client_get_version);
1159
1160int tee_client_open_session(struct tee_context *ctx,
1161			    struct tee_ioctl_open_session_arg *arg,
1162			    struct tee_param *param)
1163{
1164	if (!ctx->teedev->desc->ops->open_session)
1165		return -EINVAL;
1166	return ctx->teedev->desc->ops->open_session(ctx, arg, param);
1167}
1168EXPORT_SYMBOL_GPL(tee_client_open_session);
1169
1170int tee_client_close_session(struct tee_context *ctx, u32 session)
1171{
1172	if (!ctx->teedev->desc->ops->close_session)
1173		return -EINVAL;
1174	return ctx->teedev->desc->ops->close_session(ctx, session);
1175}
1176EXPORT_SYMBOL_GPL(tee_client_close_session);
1177
1178int tee_client_invoke_func(struct tee_context *ctx,
1179			   struct tee_ioctl_invoke_arg *arg,
1180			   struct tee_param *param)
1181{
1182	if (!ctx->teedev->desc->ops->invoke_func)
1183		return -EINVAL;
1184	return ctx->teedev->desc->ops->invoke_func(ctx, arg, param);
1185}
1186EXPORT_SYMBOL_GPL(tee_client_invoke_func);
1187
1188int tee_client_cancel_req(struct tee_context *ctx,
1189			  struct tee_ioctl_cancel_arg *arg)
1190{
1191	if (!ctx->teedev->desc->ops->cancel_req)
1192		return -EINVAL;
1193	return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id,
1194						  arg->session);
1195}
1196
1197static int tee_client_device_match(struct device *dev,
1198				   struct device_driver *drv)
1199{
1200	const struct tee_client_device_id *id_table;
1201	struct tee_client_device *tee_device;
1202
1203	id_table = to_tee_client_driver(drv)->id_table;
1204	tee_device = to_tee_client_device(dev);
1205
1206	while (!uuid_is_null(&id_table->uuid)) {
1207		if (uuid_equal(&tee_device->id.uuid, &id_table->uuid))
1208			return 1;
1209		id_table++;
1210	}
1211
1212	return 0;
1213}
1214
1215static int tee_client_device_uevent(struct device *dev,
1216				    struct kobj_uevent_env *env)
1217{
1218	uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
1219
1220	return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
1221}
1222
1223struct bus_type tee_bus_type = {
1224	.name		= "tee",
1225	.match		= tee_client_device_match,
1226	.uevent		= tee_client_device_uevent,
1227};
1228EXPORT_SYMBOL_GPL(tee_bus_type);
1229
1230static int __init tee_init(void)
1231{
1232	int rc;
1233
1234	tee_class = class_create(THIS_MODULE, "tee");
1235	if (IS_ERR(tee_class)) {
1236		pr_err("couldn't create class\n");
1237		return PTR_ERR(tee_class);
1238	}
1239
1240	rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
1241	if (rc) {
1242		pr_err("failed to allocate char dev region\n");
1243		goto out_unreg_class;
1244	}
1245
1246	rc = bus_register(&tee_bus_type);
1247	if (rc) {
1248		pr_err("failed to register tee bus\n");
1249		goto out_unreg_chrdev;
1250	}
1251
1252	return 0;
1253
1254out_unreg_chrdev:
1255	unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1256out_unreg_class:
1257	class_destroy(tee_class);
1258	tee_class = NULL;
1259
1260	return rc;
1261}
1262
1263static void __exit tee_exit(void)
1264{
1265	bus_unregister(&tee_bus_type);
1266	unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1267	class_destroy(tee_class);
1268	tee_class = NULL;
1269}
1270
1271subsys_initcall(tee_init);
1272module_exit(tee_exit);
1273
1274MODULE_AUTHOR("Linaro");
1275MODULE_DESCRIPTION("TEE Driver");
1276MODULE_VERSION("1.0");
1277MODULE_LICENSE("GPL v2");
1278