Lines Matching refs:bo
45 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
54 struct radeon_bo *bo;
56 bo = container_of(tbo, struct radeon_bo, tbo);
58 mutex_lock(&bo->rdev->gem.mutex);
59 list_del_init(&bo->list);
60 mutex_unlock(&bo->rdev->gem.mutex);
61 radeon_bo_clear_surface_reg(bo);
62 WARN_ON_ONCE(!list_empty(&bo->va));
63 if (bo->tbo.base.import_attach)
64 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
65 drm_gem_object_release(&bo->tbo.base);
66 kfree(bo);
69 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
71 if (bo->destroy == &radeon_ttm_bo_destroy)
136 struct radeon_bo *bo;
152 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
153 if (bo == NULL)
155 drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size);
156 bo->rdev = rdev;
157 bo->surface_reg = -1;
158 INIT_LIST_HEAD(&bo->list);
159 INIT_LIST_HEAD(&bo->va);
160 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
164 bo->flags = flags;
167 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
173 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
179 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
190 if (bo->flags & RADEON_GEM_GTT_WC)
193 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
199 bo->flags &= ~RADEON_GEM_GTT_WC;
202 radeon_ttm_placement_from_domain(bo, domain);
205 r = ttm_bo_init_validate(&rdev->mman.bdev, &bo->tbo, type,
206 &bo->placement, page_align, !kernel, sg, resv,
212 *bo_ptr = bo;
214 trace_radeon_bo_create(bo);
219 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
224 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
229 if (bo->kptr) {
231 *ptr = bo->kptr;
235 r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
239 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
241 *ptr = bo->kptr;
243 radeon_bo_check_tiling(bo, 0, 0);
247 void radeon_bo_kunmap(struct radeon_bo *bo)
249 if (bo->kptr == NULL)
251 bo->kptr = NULL;
252 radeon_bo_check_tiling(bo, 0, 0);
253 ttm_bo_kunmap(&bo->kmap);
256 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
258 if (bo == NULL)
261 ttm_bo_get(&bo->tbo);
262 return bo;
265 void radeon_bo_unref(struct radeon_bo **bo)
269 if ((*bo) == NULL)
271 tbo = &((*bo)->tbo);
273 *bo = NULL;
276 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
282 if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm))
285 if (bo->tbo.pin_count) {
286 ttm_bo_pin(&bo->tbo);
288 *gpu_addr = radeon_bo_gpu_offset(bo);
294 domain_start = bo->rdev->mc.vram_start;
296 domain_start = bo->rdev->mc.gtt_start;
298 (radeon_bo_gpu_offset(bo) - domain_start));
303 if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
308 radeon_ttm_placement_from_domain(bo, domain);
309 for (i = 0; i < bo->placement.num_placement; i++) {
311 if ((bo->placements[i].mem_type == TTM_PL_VRAM) &&
312 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
313 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
314 bo->placements[i].lpfn =
315 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
317 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
320 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
322 ttm_bo_pin(&bo->tbo);
324 *gpu_addr = radeon_bo_gpu_offset(bo);
326 bo->rdev->vram_pin_size += radeon_bo_size(bo);
328 bo->rdev->gart_pin_size += radeon_bo_size(bo);
330 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
335 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
337 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
340 void radeon_bo_unpin(struct radeon_bo *bo)
342 ttm_bo_unpin(&bo->tbo);
343 if (!bo->tbo.pin_count) {
344 if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
345 bo->rdev->vram_pin_size -= radeon_bo_size(bo);
347 bo->rdev->gart_pin_size -= radeon_bo_size(bo);
372 struct radeon_bo *bo, *n;
378 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
380 &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
381 *((unsigned long *)&bo->tbo.base.refcount));
382 mutex_lock(&bo->rdev->gem.mutex);
383 list_del_init(&bo->list);
384 mutex_unlock(&bo->rdev->gem.mutex);
385 /* this should unref the ttm bo */
386 drm_gem_object_put(&bo->tbo.base);
488 struct radeon_bo *bo = lobj->robj;
489 if (!bo->tbo.pin_count) {
493 radeon_mem_type_to_domain(bo->tbo.resource->mem_type);
499 * any size, because it doesn't take the current "bo"
511 radeon_ttm_placement_from_domain(bo, domain);
513 radeon_uvd_force_into_uvd_segment(bo, allowed);
516 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
530 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
531 lobj->tiling_flags = bo->tiling_flags;
542 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
544 struct radeon_device *rdev = bo->rdev;
550 dma_resv_assert_held(bo->tbo.base.resv);
552 if (!bo->tiling_flags)
555 if (bo->surface_reg >= 0) {
556 i = bo->surface_reg;
564 if (!reg->bo)
567 old_object = reg->bo;
578 old_object = reg->bo;
586 bo->surface_reg = i;
587 reg->bo = bo;
590 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
591 bo->tbo.resource->start << PAGE_SHIFT,
592 bo->tbo.base.size);
596 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
598 struct radeon_device *rdev = bo->rdev;
601 if (bo->surface_reg == -1)
604 reg = &rdev->surface_regs[bo->surface_reg];
605 radeon_clear_surface_reg(rdev, bo->surface_reg);
607 reg->bo = NULL;
608 bo->surface_reg = -1;
611 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
614 struct radeon_device *rdev = bo->rdev;
662 r = radeon_bo_reserve(bo, false);
665 bo->tiling_flags = tiling_flags;
666 bo->pitch = pitch;
667 radeon_bo_unreserve(bo);
671 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
675 dma_resv_assert_held(bo->tbo.base.resv);
678 *tiling_flags = bo->tiling_flags;
680 *pitch = bo->pitch;
683 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
687 dma_resv_assert_held(bo->tbo.base.resv);
689 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
693 radeon_bo_clear_surface_reg(bo);
697 if (bo->tbo.resource->mem_type != TTM_PL_VRAM) {
701 if (bo->surface_reg >= 0)
702 radeon_bo_clear_surface_reg(bo);
706 if ((bo->surface_reg >= 0) && !has_moved)
709 return radeon_bo_get_surface_reg(bo);
712 void radeon_bo_move_notify(struct ttm_buffer_object *bo)
716 if (!radeon_ttm_bo_is_radeon_bo(bo))
719 rbo = container_of(bo, struct radeon_bo, tbo);
724 vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
732 if (!radeon_ttm_bo_is_radeon_bo(bo))
734 rbo = container_of(bo, struct radeon_bo, tbo);
737 if (bo->resource->mem_type != TTM_PL_VRAM)
740 size = bo->resource->size;
741 offset = bo->resource->start << PAGE_SHIFT;
758 r = ttm_bo_validate(bo, &rbo->placement, &ctx);
761 r = ttm_bo_validate(bo, &rbo->placement, &ctx);
763 offset = bo->resource->start << PAGE_SHIFT;
774 ttm_bo_move_to_lru_tail_unlocked(bo);
781 * @bo: buffer object in question
786 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
789 struct dma_resv *resv = bo->tbo.base.resv;