Lines Matching refs:bo

23 static void tegra_bo_put(struct host1x_bo *bo)
25 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
53 static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
56 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
132 static void *tegra_bo_mmap(struct host1x_bo *bo)
134 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
145 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
147 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
157 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
159 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
163 return bo;
175 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
180 if (bo->mm)
183 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
184 if (!bo->mm)
190 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
197 bo->iova = bo->mm->start;
199 bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
200 if (!bo->size) {
211 drm_mm_remove_node(bo->mm);
214 kfree(bo->mm);
218 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
220 if (!bo->mm)
224 iommu_unmap(tegra->domain, bo->iova, bo->size);
225 drm_mm_remove_node(bo->mm);
228 kfree(bo->mm);
236 struct tegra_bo *bo;
239 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
240 if (!bo)
243 host1x_bo_init(&bo->base, &tegra_bo_ops);
246 err = drm_gem_object_init(drm, &bo->gem, size);
250 err = drm_gem_create_mmap_offset(&bo->gem);
254 return bo;
257 drm_gem_object_release(&bo->gem);
259 kfree(bo);
263 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
265 if (bo->pages) {
266 dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
267 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
268 sg_free_table(bo->sgt);
269 kfree(bo->sgt);
270 } else if (bo->vaddr) {
271 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
275 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
279 bo->pages = drm_gem_get_pages(&bo->gem);
280 if (IS_ERR(bo->pages))
281 return PTR_ERR(bo->pages);
283 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
285 bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
286 if (IS_ERR(bo->sgt)) {
287 err = PTR_ERR(bo->sgt);
291 err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
298 sg_free_table(bo->sgt);
299 kfree(bo->sgt);
301 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
305 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
311 err = tegra_bo_get_pages(drm, bo);
315 err = tegra_bo_iommu_map(tegra, bo);
317 tegra_bo_free(drm, bo);
321 size_t size = bo->gem.size;
323 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
325 if (!bo->vaddr) {
339 struct tegra_bo *bo;
342 bo = tegra_bo_alloc_object(drm, size);
343 if (IS_ERR(bo))
344 return bo;
346 err = tegra_bo_alloc(drm, bo);
351 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
354 bo->flags |= TEGRA_BO_BOTTOM_UP;
356 return bo;
359 drm_gem_object_release(&bo->gem);
360 kfree(bo);
370 struct tegra_bo *bo;
373 bo = tegra_bo_create(drm, size, flags);
374 if (IS_ERR(bo))
375 return bo;
377 err = drm_gem_handle_create(file, &bo->gem, handle);
379 tegra_bo_free_object(&bo->gem);
383 drm_gem_object_put(&bo->gem);
385 return bo;
393 struct tegra_bo *bo;
396 bo = tegra_bo_alloc_object(drm, buf->size);
397 if (IS_ERR(bo))
398 return bo;
408 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
409 if (IS_ERR(bo->sgt)) {
410 err = PTR_ERR(bo->sgt);
415 err = tegra_bo_iommu_map(tegra, bo);
420 bo->gem.import_attach = attach;
422 return bo;
425 if (!IS_ERR_OR_NULL(bo->sgt))
426 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
431 drm_gem_object_release(&bo->gem);
432 kfree(bo);
439 struct tegra_bo *bo = to_tegra_bo(gem);
442 tegra_bo_iommu_unmap(tegra, bo);
445 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
449 tegra_bo_free(gem->dev, bo);
453 kfree(bo);
461 struct tegra_bo *bo;
466 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
468 if (IS_ERR(bo))
469 return PTR_ERR(bo);
478 struct tegra_bo *bo = to_tegra_bo(gem);
482 if (!bo->pages)
486 page = bo->pages[offset];
499 struct tegra_bo *bo = to_tegra_bo(gem);
501 if (!bo->pages) {
513 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
552 struct tegra_bo *bo = to_tegra_bo(gem);
559 if (bo->pages) {
560 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
564 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
585 struct tegra_bo *bo = to_tegra_bo(gem);
587 if (bo->pages)
603 struct tegra_bo *bo = to_tegra_bo(gem);
606 if (bo->pages)
607 dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
616 struct tegra_bo *bo = to_tegra_bo(gem);
619 if (bo->pages)
620 dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
640 struct tegra_bo *bo = to_tegra_bo(gem);
642 return bo->vaddr;
678 struct tegra_bo *bo;
689 bo = tegra_bo_import(drm, buf);
690 if (IS_ERR(bo))
691 return ERR_CAST(bo);
693 return &bo->gem;