1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (c) 2015-2017, 2019-2021 Linaro Limited 4 */ 5#include <linux/anon_inodes.h> 6#include <linux/device.h> 7#include <linux/idr.h> 8#include <linux/mm.h> 9#include <linux/sched.h> 10#include <linux/slab.h> 11#include <linux/tee_drv.h> 12#include <linux/uaccess.h> 13#include <linux/uio.h> 14#include "tee_private.h" 15 16static void release_registered_pages(struct tee_shm *shm) 17{ 18 if (shm->pages) { 19 if (shm->flags & TEE_SHM_USER_MAPPED) { 20 unpin_user_pages(shm->pages, shm->num_pages); 21 } else { 22 size_t n; 23 24 for (n = 0; n < shm->num_pages; n++) 25 put_page(shm->pages[n]); 26 } 27 28 kfree(shm->pages); 29 } 30} 31 32static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) 33{ 34 if (shm->flags & TEE_SHM_POOL) { 35 struct tee_shm_pool_mgr *poolm; 36 37 if (shm->flags & TEE_SHM_DMA_BUF) 38 poolm = teedev->pool->dma_buf_mgr; 39 else 40 poolm = teedev->pool->private_mgr; 41 42 poolm->ops->free(poolm, shm); 43 } else if (shm->flags & TEE_SHM_REGISTER) { 44 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); 45 46 if (rc) 47 dev_err(teedev->dev.parent, 48 "unregister shm %p failed: %d", shm, rc); 49 50 release_registered_pages(shm); 51 } 52 53 teedev_ctx_put(shm->ctx); 54 55 kfree(shm); 56 57 tee_device_put(teedev); 58} 59 60struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) 61{ 62 struct tee_device *teedev = ctx->teedev; 63 struct tee_shm_pool_mgr *poolm = NULL; 64 struct tee_shm *shm; 65 void *ret; 66 int rc; 67 68 if (!(flags & TEE_SHM_MAPPED)) { 69 dev_err(teedev->dev.parent, 70 "only mapped allocations supported\n"); 71 return ERR_PTR(-EINVAL); 72 } 73 74 if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) { 75 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags); 76 return ERR_PTR(-EINVAL); 77 } 78 79 if (!tee_device_get(teedev)) 80 return ERR_PTR(-EINVAL); 81 82 if (!teedev->pool) { 83 /* teedev has been detached from driver */ 84 ret = ERR_PTR(-EINVAL); 85 goto err_dev_put; 86 } 87 88 shm = kzalloc(sizeof(*shm), GFP_KERNEL); 89 if (!shm) { 90 ret = ERR_PTR(-ENOMEM); 91 goto err_dev_put; 92 } 93 94 refcount_set(&shm->refcount, 1); 95 shm->flags = flags | TEE_SHM_POOL; 96 shm->ctx = ctx; 97 if (flags & TEE_SHM_DMA_BUF) 98 poolm = teedev->pool->dma_buf_mgr; 99 else 100 poolm = teedev->pool->private_mgr; 101 102 rc = poolm->ops->alloc(poolm, shm, size); 103 if (rc) { 104 ret = ERR_PTR(rc); 105 goto err_kfree; 106 } 107 108 if (flags & TEE_SHM_DMA_BUF) { 109 mutex_lock(&teedev->mutex); 110 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); 111 mutex_unlock(&teedev->mutex); 112 if (shm->id < 0) { 113 ret = ERR_PTR(shm->id); 114 goto err_pool_free; 115 } 116 } 117 118 teedev_ctx_get(ctx); 119 120 return shm; 121err_pool_free: 122 poolm->ops->free(poolm, shm); 123err_kfree: 124 kfree(shm); 125err_dev_put: 126 tee_device_put(teedev); 127 return ret; 128} 129EXPORT_SYMBOL_GPL(tee_shm_alloc); 130 131/** 132 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer 133 * @ctx: Context that allocates the shared memory 134 * @size: Requested size of shared memory 135 * 136 * The returned memory registered in secure world and is suitable to be 137 * passed as a memory buffer in parameter argument to 138 * tee_client_invoke_func(). The memory allocated is later freed with a 139 * call to tee_shm_free(). 140 * 141 * @returns a pointer to 'struct tee_shm' 142 */ 143struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) 144{ 145 return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED); 146} 147EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf); 148 149struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, 150 size_t length, u32 flags) 151{ 152 struct tee_device *teedev = ctx->teedev; 153 const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED; 154 const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED; 155 struct tee_shm *shm; 156 void *ret; 157 int rc; 158 int num_pages; 159 unsigned long start; 160 161 if (flags != req_user_flags && flags != req_kernel_flags) 162 return ERR_PTR(-ENOTSUPP); 163 164 if (!tee_device_get(teedev)) 165 return ERR_PTR(-EINVAL); 166 167 if (!teedev->desc->ops->shm_register || 168 !teedev->desc->ops->shm_unregister) { 169 tee_device_put(teedev); 170 return ERR_PTR(-ENOTSUPP); 171 } 172 173 teedev_ctx_get(ctx); 174 175 shm = kzalloc(sizeof(*shm), GFP_KERNEL); 176 if (!shm) { 177 ret = ERR_PTR(-ENOMEM); 178 goto err; 179 } 180 181 refcount_set(&shm->refcount, 1); 182 shm->flags = flags | TEE_SHM_REGISTER; 183 shm->ctx = ctx; 184 shm->id = -1; 185 addr = untagged_addr(addr); 186 start = rounddown(addr, PAGE_SIZE); 187 shm->offset = addr - start; 188 shm->size = length; 189 num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE; 190 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); 191 if (!shm->pages) { 192 ret = ERR_PTR(-ENOMEM); 193 goto err; 194 } 195 196 if (flags & TEE_SHM_USER_MAPPED) { 197 rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE, 198 shm->pages); 199 } else { 200 struct kvec *kiov; 201 int i; 202 203 kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL); 204 if (!kiov) { 205 ret = ERR_PTR(-ENOMEM); 206 goto err; 207 } 208 209 for (i = 0; i < num_pages; i++) { 210 kiov[i].iov_base = (void *)(start + i * PAGE_SIZE); 211 kiov[i].iov_len = PAGE_SIZE; 212 } 213 214 rc = get_kernel_pages(kiov, num_pages, 0, shm->pages); 215 kfree(kiov); 216 } 217 if (rc > 0) 218 shm->num_pages = rc; 219 if (rc != num_pages) { 220 if (rc >= 0) 221 rc = -ENOMEM; 222 ret = ERR_PTR(rc); 223 goto err; 224 } 225 226 mutex_lock(&teedev->mutex); 227 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); 228 mutex_unlock(&teedev->mutex); 229 230 if (shm->id < 0) { 231 ret = ERR_PTR(shm->id); 232 goto err; 233 } 234 235 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, 236 shm->num_pages, start); 237 if (rc) { 238 ret = ERR_PTR(rc); 239 goto err; 240 } 241 242 return shm; 243err: 244 if (shm) { 245 if (shm->id >= 0) { 246 mutex_lock(&teedev->mutex); 247 idr_remove(&teedev->idr, shm->id); 248 mutex_unlock(&teedev->mutex); 249 } 250 release_registered_pages(shm); 251 } 252 kfree(shm); 253 teedev_ctx_put(ctx); 254 tee_device_put(teedev); 255 return ret; 256} 257EXPORT_SYMBOL_GPL(tee_shm_register); 258 259static int tee_shm_fop_release(struct inode *inode, struct file *filp) 260{ 261 tee_shm_put(filp->private_data); 262 return 0; 263} 264 265static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma) 266{ 267 struct tee_shm *shm = filp->private_data; 268 size_t size = vma->vm_end - vma->vm_start; 269 270 /* Refuse sharing shared memory provided by application */ 271 if (shm->flags & TEE_SHM_USER_MAPPED) 272 return -EINVAL; 273 274 /* check for overflowing the buffer's size */ 275 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) 276 return -EINVAL; 277 278 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 279 size, vma->vm_page_prot); 280} 281 282static const struct file_operations tee_shm_fops = { 283 .owner = THIS_MODULE, 284 .release = tee_shm_fop_release, 285 .mmap = tee_shm_fop_mmap, 286}; 287 288/** 289 * tee_shm_get_fd() - Increase reference count and return file descriptor 290 * @shm: Shared memory handle 291 * @returns user space file descriptor to shared memory 292 */ 293int tee_shm_get_fd(struct tee_shm *shm) 294{ 295 int fd; 296 297 if (!(shm->flags & TEE_SHM_DMA_BUF)) 298 return -EINVAL; 299 300 /* matched by tee_shm_put() in tee_shm_op_release() */ 301 refcount_inc(&shm->refcount); 302 fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR); 303 if (fd < 0) 304 tee_shm_put(shm); 305 return fd; 306} 307 308/** 309 * tee_shm_free() - Free shared memory 310 * @shm: Handle to shared memory to free 311 */ 312void tee_shm_free(struct tee_shm *shm) 313{ 314 tee_shm_put(shm); 315} 316EXPORT_SYMBOL_GPL(tee_shm_free); 317 318/** 319 * tee_shm_va2pa() - Get physical address of a virtual address 320 * @shm: Shared memory handle 321 * @va: Virtual address to tranlsate 322 * @pa: Returned physical address 323 * @returns 0 on success and < 0 on failure 324 */ 325int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa) 326{ 327 if (!(shm->flags & TEE_SHM_MAPPED)) 328 return -EINVAL; 329 /* Check that we're in the range of the shm */ 330 if ((char *)va < (char *)shm->kaddr) 331 return -EINVAL; 332 if ((char *)va >= ((char *)shm->kaddr + shm->size)) 333 return -EINVAL; 334 335 return tee_shm_get_pa( 336 shm, (unsigned long)va - (unsigned long)shm->kaddr, pa); 337} 338EXPORT_SYMBOL_GPL(tee_shm_va2pa); 339 340/** 341 * tee_shm_pa2va() - Get virtual address of a physical address 342 * @shm: Shared memory handle 343 * @pa: Physical address to tranlsate 344 * @va: Returned virtual address 345 * @returns 0 on success and < 0 on failure 346 */ 347int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va) 348{ 349 if (!(shm->flags & TEE_SHM_MAPPED)) 350 return -EINVAL; 351 /* Check that we're in the range of the shm */ 352 if (pa < shm->paddr) 353 return -EINVAL; 354 if (pa >= (shm->paddr + shm->size)) 355 return -EINVAL; 356 357 if (va) { 358 void *v = tee_shm_get_va(shm, pa - shm->paddr); 359 360 if (IS_ERR(v)) 361 return PTR_ERR(v); 362 *va = v; 363 } 364 return 0; 365} 366EXPORT_SYMBOL_GPL(tee_shm_pa2va); 367 368/** 369 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset 370 * @shm: Shared memory handle 371 * @offs: Offset from start of this shared memory 372 * @returns virtual address of the shared memory + offs if offs is within 373 * the bounds of this shared memory, else an ERR_PTR 374 */ 375void *tee_shm_get_va(struct tee_shm *shm, size_t offs) 376{ 377 if (!(shm->flags & TEE_SHM_MAPPED)) 378 return ERR_PTR(-EINVAL); 379 if (offs >= shm->size) 380 return ERR_PTR(-EINVAL); 381 return (char *)shm->kaddr + offs; 382} 383EXPORT_SYMBOL_GPL(tee_shm_get_va); 384 385/** 386 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset 387 * @shm: Shared memory handle 388 * @offs: Offset from start of this shared memory 389 * @pa: Physical address to return 390 * @returns 0 if offs is within the bounds of this shared memory, else an 391 * error code. 392 */ 393int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa) 394{ 395 if (offs >= shm->size) 396 return -EINVAL; 397 if (pa) 398 *pa = shm->paddr + offs; 399 return 0; 400} 401EXPORT_SYMBOL_GPL(tee_shm_get_pa); 402 403/** 404 * tee_shm_get_from_id() - Find shared memory object and increase reference 405 * count 406 * @ctx: Context owning the shared memory 407 * @id: Id of shared memory object 408 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure 409 */ 410struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id) 411{ 412 struct tee_device *teedev; 413 struct tee_shm *shm; 414 415 if (!ctx) 416 return ERR_PTR(-EINVAL); 417 418 teedev = ctx->teedev; 419 mutex_lock(&teedev->mutex); 420 shm = idr_find(&teedev->idr, id); 421 /* 422 * If the tee_shm was found in the IDR it must have a refcount 423 * larger than 0 due to the guarantee in tee_shm_put() below. So 424 * it's safe to use refcount_inc(). 425 */ 426 if (!shm || shm->ctx != ctx) 427 shm = ERR_PTR(-EINVAL); 428 else 429 refcount_inc(&shm->refcount); 430 mutex_unlock(&teedev->mutex); 431 return shm; 432} 433EXPORT_SYMBOL_GPL(tee_shm_get_from_id); 434 435/** 436 * tee_shm_put() - Decrease reference count on a shared memory handle 437 * @shm: Shared memory handle 438 */ 439void tee_shm_put(struct tee_shm *shm) 440{ 441 struct tee_device *teedev = shm->ctx->teedev; 442 bool do_release = false; 443 444 mutex_lock(&teedev->mutex); 445 if (refcount_dec_and_test(&shm->refcount)) { 446 /* 447 * refcount has reached 0, we must now remove it from the 448 * IDR before releasing the mutex. This will guarantee that 449 * the refcount_inc() in tee_shm_get_from_id() never starts 450 * from 0. 451 */ 452 if (shm->flags & TEE_SHM_DMA_BUF) 453 idr_remove(&teedev->idr, shm->id); 454 do_release = true; 455 } 456 mutex_unlock(&teedev->mutex); 457 458 if (do_release) 459 tee_shm_release(teedev, shm); 460} 461EXPORT_SYMBOL_GPL(tee_shm_put); 462