Lines Matching refs:shm
16 static void release_registered_pages(struct tee_shm *shm)
18 if (shm->pages) {
19 if (shm->flags & TEE_SHM_USER_MAPPED) {
20 unpin_user_pages(shm->pages, shm->num_pages);
24 for (n = 0; n < shm->num_pages; n++)
25 put_page(shm->pages[n]);
28 kfree(shm->pages);
32 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
34 if (shm->flags & TEE_SHM_POOL) {
37 if (shm->flags & TEE_SHM_DMA_BUF)
42 poolm->ops->free(poolm, shm);
43 } else if (shm->flags & TEE_SHM_REGISTER) {
44 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
48 "unregister shm %p failed: %d", shm, rc);
50 release_registered_pages(shm);
53 teedev_ctx_put(shm->ctx);
55 kfree(shm);
64 struct tee_shm *shm;
75 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
88 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
89 if (!shm) {
94 refcount_set(&shm->refcount, 1);
95 shm->flags = flags | TEE_SHM_POOL;
96 shm->ctx = ctx;
102 rc = poolm->ops->alloc(poolm, shm, size);
110 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
112 if (shm->id < 0) {
113 ret = ERR_PTR(shm->id);
120 return shm;
122 poolm->ops->free(poolm, shm);
124 kfree(shm);
155 struct tee_shm *shm;
175 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
176 if (!shm) {
181 refcount_set(&shm->refcount, 1);
182 shm->flags = flags | TEE_SHM_REGISTER;
183 shm->ctx = ctx;
184 shm->id = -1;
187 shm->offset = addr - start;
188 shm->size = length;
190 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
191 if (!shm->pages) {
198 shm->pages);
214 rc = get_kernel_pages(kiov, num_pages, 0, shm->pages);
218 shm->num_pages = rc;
227 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
230 if (shm->id < 0) {
231 ret = ERR_PTR(shm->id);
235 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
236 shm->num_pages, start);
242 return shm;
244 if (shm) {
245 if (shm->id >= 0) {
247 idr_remove(&teedev->idr, shm->id);
250 release_registered_pages(shm);
252 kfree(shm);
267 struct tee_shm *shm = filp->private_data;
271 if (shm->flags & TEE_SHM_USER_MAPPED)
275 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
278 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
290 * @shm: Shared memory handle
293 int tee_shm_get_fd(struct tee_shm *shm)
297 if (!(shm->flags & TEE_SHM_DMA_BUF))
301 refcount_inc(&shm->refcount);
302 fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
304 tee_shm_put(shm);
310 * @shm: Handle to shared memory to free
312 void tee_shm_free(struct tee_shm *shm)
314 tee_shm_put(shm);
320 * @shm: Shared memory handle
325 int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
327 if (!(shm->flags & TEE_SHM_MAPPED))
329 /* Check that we're in the range of the shm */
330 if ((char *)va < (char *)shm->kaddr)
332 if ((char *)va >= ((char *)shm->kaddr + shm->size))
336 shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
342 * @shm: Shared memory handle
347 int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
349 if (!(shm->flags & TEE_SHM_MAPPED))
351 /* Check that we're in the range of the shm */
352 if (pa < shm->paddr)
354 if (pa >= (shm->paddr + shm->size))
358 void *v = tee_shm_get_va(shm, pa - shm->paddr);
370 * @shm: Shared memory handle
375 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
377 if (!(shm->flags & TEE_SHM_MAPPED))
379 if (offs >= shm->size)
381 return (char *)shm->kaddr + offs;
387 * @shm: Shared memory handle
393 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
395 if (offs >= shm->size)
398 *pa = shm->paddr + offs;
413 struct tee_shm *shm;
420 shm = idr_find(&teedev->idr, id);
426 if (!shm || shm->ctx != ctx)
427 shm = ERR_PTR(-EINVAL);
429 refcount_inc(&shm->refcount);
431 return shm;
437 * @shm: Shared memory handle
439 void tee_shm_put(struct tee_shm *shm)
441 struct tee_device *teedev = shm->ctx->teedev;
445 if (refcount_dec_and_test(&shm->refcount)) {
452 if (shm->flags & TEE_SHM_DMA_BUF)
453 idr_remove(&teedev->idr, shm->id);
459 tee_shm_release(teedev, shm);