Lines Matching refs:bo
52 static void tegra_bo_put(struct host1x_bo *bo)
54 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
59 static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
62 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
72 map->bo = host1x_bo_get(bo);
173 host1x_bo_put(map->bo);
177 static void *tegra_bo_mmap(struct host1x_bo *bo)
179 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
194 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
196 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
207 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
209 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
213 return bo;
225 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
230 if (bo->mm)
233 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
234 if (!bo->mm)
240 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
247 bo->iova = bo->mm->start;
249 bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
250 if (!bo->size) {
261 drm_mm_remove_node(bo->mm);
264 kfree(bo->mm);
268 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
270 if (!bo->mm)
274 iommu_unmap(tegra->domain, bo->iova, bo->size);
275 drm_mm_remove_node(bo->mm);
278 kfree(bo->mm);
292 struct tegra_bo *bo;
295 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
296 if (!bo)
299 bo->gem.funcs = &tegra_gem_object_funcs;
301 host1x_bo_init(&bo->base, &tegra_bo_ops);
304 err = drm_gem_object_init(drm, &bo->gem, size);
308 err = drm_gem_create_mmap_offset(&bo->gem);
312 return bo;
315 drm_gem_object_release(&bo->gem);
317 kfree(bo);
321 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
323 if (bo->pages) {
324 dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
325 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
326 sg_free_table(bo->sgt);
327 kfree(bo->sgt);
328 } else if (bo->vaddr) {
329 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
333 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
337 bo->pages = drm_gem_get_pages(&bo->gem);
338 if (IS_ERR(bo->pages))
339 return PTR_ERR(bo->pages);
341 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
343 bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
344 if (IS_ERR(bo->sgt)) {
345 err = PTR_ERR(bo->sgt);
349 err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
356 sg_free_table(bo->sgt);
357 kfree(bo->sgt);
359 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
363 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
369 err = tegra_bo_get_pages(drm, bo);
373 err = tegra_bo_iommu_map(tegra, bo);
375 tegra_bo_free(drm, bo);
379 size_t size = bo->gem.size;
381 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
383 if (!bo->vaddr) {
397 struct tegra_bo *bo;
400 bo = tegra_bo_alloc_object(drm, size);
401 if (IS_ERR(bo))
402 return bo;
404 err = tegra_bo_alloc(drm, bo);
409 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
412 bo->flags |= TEGRA_BO_BOTTOM_UP;
414 return bo;
417 drm_gem_object_release(&bo->gem);
418 kfree(bo);
428 struct tegra_bo *bo;
431 bo = tegra_bo_create(drm, size, flags);
432 if (IS_ERR(bo))
433 return bo;
435 err = drm_gem_handle_create(file, &bo->gem, handle);
437 tegra_bo_free_object(&bo->gem);
441 drm_gem_object_put(&bo->gem);
443 return bo;
451 struct tegra_bo *bo;
454 bo = tegra_bo_alloc_object(drm, buf->size);
455 if (IS_ERR(bo))
456 return bo;
466 bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
467 if (IS_ERR(bo->sgt)) {
468 err = PTR_ERR(bo->sgt);
473 err = tegra_bo_iommu_map(tegra, bo);
478 bo->gem.import_attach = attach;
480 return bo;
483 if (!IS_ERR_OR_NULL(bo->sgt))
484 dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
489 drm_gem_object_release(&bo->gem);
490 kfree(bo);
498 struct tegra_bo *bo = to_tegra_bo(gem);
501 list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
510 tegra_bo_iommu_unmap(tegra, bo);
513 dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
517 tegra_bo_free(gem->dev, bo);
521 kfree(bo);
529 struct tegra_bo *bo;
534 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
536 if (IS_ERR(bo))
537 return PTR_ERR(bo);
546 struct tegra_bo *bo = to_tegra_bo(gem);
550 if (!bo->pages)
554 page = bo->pages[offset];
567 struct tegra_bo *bo = to_tegra_bo(gem);
569 if (!bo->pages) {
581 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
619 struct tegra_bo *bo = to_tegra_bo(gem);
626 if (bo->pages) {
627 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
631 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
652 struct tegra_bo *bo = to_tegra_bo(gem);
654 if (bo->pages)
670 struct tegra_bo *bo = to_tegra_bo(gem);
673 if (bo->pages)
674 dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
683 struct tegra_bo *bo = to_tegra_bo(gem);
686 if (bo->pages)
687 dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
707 struct tegra_bo *bo = to_tegra_bo(gem);
710 vaddr = tegra_bo_mmap(&bo->base);
722 struct tegra_bo *bo = to_tegra_bo(gem);
724 tegra_bo_munmap(&bo->base, map->vaddr);
756 struct tegra_bo *bo;
767 bo = tegra_bo_import(drm, buf);
768 if (IS_ERR(bo))
769 return ERR_CAST(bo);
771 return &bo->gem;
777 struct tegra_bo *bo;
783 bo = to_tegra_bo(gem);
784 return &bo->base;