Lines Matching refs:bo

29 static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo)
35 static void prime_free_pages_locked(struct ivpu_bo *bo)
40 static int prime_map_pages_locked(struct ivpu_bo *bo)
42 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
45 sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL);
51 bo->sgt = sgt;
55 static void prime_unmap_pages_locked(struct ivpu_bo *bo)
57 dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL);
58 bo->sgt = NULL;
70 static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo)
72 int npages = bo->base.size >> PAGE_SHIFT;
75 pages = drm_gem_get_pages(&bo->base);
79 if (bo->flags & DRM_IVPU_BO_WC)
81 else if (bo->flags & DRM_IVPU_BO_UNCACHED)
84 bo->pages = pages;
88 static void shmem_free_pages_locked(struct ivpu_bo *bo)
90 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
91 set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
93 drm_gem_put_pages(&bo->base, bo->pages, true, false);
94 bo->pages = NULL;
97 static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo)
99 int npages = bo->base.size >> PAGE_SHIFT;
100 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
104 sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages);
116 bo->sgt = sgt;
124 static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo)
126 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
128 dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0);
129 sg_free_table(bo->sgt);
130 kfree(bo->sgt);
131 bo->sgt = NULL;
143 static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo)
145 unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
149 pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL);
162 bo->pages = pages;
172 static void internal_free_pages_locked(struct ivpu_bo *bo)
174 unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
176 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
177 set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
180 put_page(bo->pages[i]);
182 kvfree(bo->pages);
183 bo->pages = NULL;
195 static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo)
197 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
200 lockdep_assert_held(&bo->lock);
201 drm_WARN_ON(&vdev->drm, bo->sgt);
203 ret = bo->ops->alloc_pages(bo);
209 ret = bo->ops->map_pages(bo);
217 bo->ops->free_pages(bo);
221 static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo)
223 mutex_lock(&bo->lock);
225 WARN_ON(!bo->sgt);
226 bo->ops->unmap_pages(bo);
227 WARN_ON(bo->sgt);
228 bo->ops->free_pages(bo);
229 WARN_ON(bo->pages);
231 mutex_unlock(&bo->lock);
241 int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
243 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
246 mutex_lock(&bo->lock);
248 if (!bo->vpu_addr) {
250 bo->ctx->id, bo->handle);
255 if (!bo->sgt) {
256 ret = ivpu_bo_alloc_and_map_pages_locked(bo);
261 if (!bo->mmu_mapped) {
262 ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt,
263 ivpu_bo_is_snooped(bo));
268 bo->mmu_mapped = true;
272 mutex_unlock(&bo->lock);
278 ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
281 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
285 if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
287 else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
294 ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node);
296 bo->ctx = ctx;
297 bo->vpu_addr = bo->mm_node.start;
298 list_add_tail(&bo->ctx_node, &ctx->bo_list);
305 static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo)
307 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
308 struct ivpu_mmu_context *ctx = bo->ctx;
311 ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
313 mutex_lock(&bo->lock);
315 if (bo->mmu_mapped) {
316 drm_WARN_ON(&vdev->drm, !bo->sgt);
317 ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt);
318 bo->mmu_mapped = false;
322 list_del(&bo->ctx_node);
323 bo->vpu_addr = 0;
324 bo->ctx = NULL;
325 ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node);
328 mutex_unlock(&bo->lock);
333 struct ivpu_bo *bo, *tmp;
335 list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node)
336 ivpu_bo_free_vpu_addr(bo);
344 struct ivpu_bo *bo;
359 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
360 if (!bo)
363 mutex_init(&bo->lock);
364 bo->base.funcs = &ivpu_gem_funcs;
365 bo->flags = flags;
366 bo->ops = ops;
367 bo->user_ptr = user_ptr;
370 ret = drm_gem_object_init(&vdev->drm, &bo->base, size);
372 drm_gem_private_object_init(&vdev->drm, &bo->base, size);
380 ret = drm_gem_create_mmap_offset(&bo->base);
388 ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range);
395 return bo;
398 drm_gem_object_release(&bo->base);
400 kfree(bo);
406 struct ivpu_bo *bo = to_ivpu_bo(obj);
407 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
409 if (bo->ctx)
411 bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
414 (bool)bo->sgt, bo->mmu_mapped);
418 vunmap(bo->kvaddr);
420 if (bo->ctx)
421 ivpu_bo_free_vpu_addr(bo);
423 if (bo->sgt)
424 ivpu_bo_unmap_and_free_pages(bo);
426 if (bo->base.import_attach)
427 drm_prime_gem_destroy(&bo->base, bo->sgt);
429 drm_gem_object_release(&bo->base);
431 mutex_destroy(&bo->lock);
432 kfree(bo);
437 struct ivpu_bo *bo = to_ivpu_bo(obj);
438 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
441 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name);
451 vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags));
458 struct ivpu_bo *bo = to_ivpu_bo(obj);
462 mutex_lock(&bo->lock);
464 if (!bo->sgt)
465 ret = ivpu_bo_alloc_and_map_pages_locked(bo);
467 mutex_unlock(&bo->lock);
472 return drm_prime_pages_to_sg(obj->dev, bo->pages, npages);
479 struct ivpu_bo *bo = to_ivpu_bo(obj);
486 mutex_lock(&bo->lock);
488 if (!bo->sgt) {
489 err = ivpu_bo_alloc_and_map_pages_locked(bo);
501 page = bo->pages[page_offset];
506 mutex_unlock(&bo->lock);
531 struct ivpu_bo *bo;
540 bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0);
541 if (IS_ERR(bo)) {
543 bo, file_priv->ctx.id, args->size, args->flags);
544 return PTR_ERR(bo);
547 ret = drm_gem_handle_create(file, &bo->base, &bo->handle);
549 args->vpu_addr = bo->vpu_addr;
550 args->handle = bo->handle;
553 drm_gem_object_put(&bo->base);
556 file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags);
566 struct ivpu_bo *bo;
581 bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
582 if (IS_ERR(bo)) {
584 bo, vpu_addr, size, flags);
588 ret = ivpu_bo_pin(bo);
592 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
593 drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT);
595 if (bo->flags & DRM_IVPU_BO_WC)
596 set_pages_array_wc(bo->pages, bo->base.size >> PAGE_SHIFT);
597 else if (bo->flags & DRM_IVPU_BO_UNCACHED)
598 set_pages_array_uc(bo->pages, bo->base.size >> PAGE_SHIFT);
600 prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
601 bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot);
602 if (!bo->kvaddr) {
608 bo->vpu_addr, bo->base.size, flags);
610 return bo;
613 drm_gem_object_put(&bo->base);
617 void ivpu_bo_free_internal(struct ivpu_bo *bo)
619 drm_gem_object_put(&bo->base);
626 struct ivpu_bo *bo;
634 bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0);
635 if (IS_ERR(bo)) {
636 ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size);
640 lockdep_set_class(&bo->lock, &prime_bo_lock_class_key);
642 bo->base.import_attach = attach;
644 return &bo->base;
649 return ERR_CAST(bo);
658 struct ivpu_bo *bo;
665 bo = to_ivpu_bo(obj);
667 mutex_lock(&bo->lock);
669 if (!bo->ctx) {
670 ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL);
677 args->flags = bo->flags;
679 args->vpu_addr = bo->vpu_addr;
682 mutex_unlock(&bo->lock);
713 static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
717 if (bo->base.dma_buf && bo->base.dma_buf->file)
718 dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count);
721 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size,
722 kref_read(&bo->base.refcount), dma_refcount, bo->ops->name);
730 struct ivpu_bo *bo;
736 list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node)
737 ivpu_bo_print_info(bo, p);
746 list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node)
747 ivpu_bo_print_info(bo, p);