Lines Matching refs:shm

44 static void release_registered_pages(struct tee_shm *shm)
46 if (shm->pages) {
47 if (shm->flags & TEE_SHM_USER_MAPPED)
48 unpin_user_pages(shm->pages, shm->num_pages);
50 shm_put_kernel_pages(shm->pages, shm->num_pages);
52 kfree(shm->pages);
56 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
58 if (shm->flags & TEE_SHM_POOL) {
59 teedev->pool->ops->free(teedev->pool, shm);
60 } else if (shm->flags & TEE_SHM_DYNAMIC) {
61 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
65 "unregister shm %p failed: %d", shm, rc);
67 release_registered_pages(shm);
70 teedev_ctx_put(shm->ctx);
72 kfree(shm);
81 struct tee_shm *shm;
94 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
95 if (!shm) {
100 refcount_set(&shm->refcount, 1);
101 shm->flags = flags;
102 shm->id = id;
105 * We're assigning this as it is needed if the shm is to be
107 * to call teedev_ctx_get() or clear shm->ctx in case it's not
110 shm->ctx = ctx;
112 rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align);
119 return shm;
121 kfree(shm);
143 struct tee_shm *shm;
153 shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id);
154 if (IS_ERR(shm)) {
158 return shm;
162 ret = idr_replace(&teedev->idr, shm, id);
165 tee_shm_free(shm);
169 return shm;
221 struct tee_shm *shm;
238 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
239 if (!shm) {
244 refcount_set(&shm->refcount, 1);
245 shm->flags = flags;
246 shm->ctx = ctx;
247 shm->id = id;
250 shm->offset = addr - start;
251 shm->size = length;
253 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
254 if (!shm->pages) {
261 shm->pages);
263 rc = shm_get_kernel_pages(start, num_pages, shm->pages);
265 shm->num_pages = rc;
273 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
274 shm->num_pages, start);
280 return shm;
283 unpin_user_pages(shm->pages, shm->num_pages);
285 shm_put_kernel_pages(shm->pages, shm->num_pages);
286 kfree(shm->pages);
288 kfree(shm);
309 struct tee_shm *shm;
322 shm = register_shm_helper(ctx, addr, length, flags, id);
323 if (IS_ERR(shm)) {
327 return shm;
331 ret = idr_replace(&teedev->idr, shm, id);
334 tee_shm_free(shm);
338 return shm;
368 struct tee_shm *shm = filp->private_data;
372 if (shm->flags & TEE_SHM_USER_MAPPED)
376 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
379 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
391 * @shm: Shared memory handle
394 int tee_shm_get_fd(struct tee_shm *shm)
398 if (shm->id < 0)
402 refcount_inc(&shm->refcount);
403 fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
405 tee_shm_put(shm);
411 * @shm: Handle to shared memory to free
413 void tee_shm_free(struct tee_shm *shm)
415 tee_shm_put(shm);
421 * @shm: Shared memory handle
426 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
428 if (!shm->kaddr)
430 if (offs >= shm->size)
432 return (char *)shm->kaddr + offs;
438 * @shm: Shared memory handle
444 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
446 if (offs >= shm->size)
449 *pa = shm->paddr + offs;
464 struct tee_shm *shm;
471 shm = idr_find(&teedev->idr, id);
477 if (!shm || shm->ctx != ctx)
478 shm = ERR_PTR(-EINVAL);
480 refcount_inc(&shm->refcount);
482 return shm;
488 * @shm: Shared memory handle
490 void tee_shm_put(struct tee_shm *shm)
492 struct tee_device *teedev = shm->ctx->teedev;
496 if (refcount_dec_and_test(&shm->refcount)) {
503 if (shm->id >= 0)
504 idr_remove(&teedev->idr, shm->id);
510 tee_shm_release(teedev, shm);