Lines Matching refs:bo

49 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
60 man = ttm_manager_type(bo->bdev, mem_type);
68 * @bo: The buffer object.
74 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
76 dma_resv_assert_held(bo->base.resv);
78 if (bo->resource)
79 ttm_resource_move_to_lru_tail(bo->resource);
86 * @bo: The buffer object.
97 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
100 dma_resv_assert_held(bo->base.resv);
102 if (bo->bulk_move == bulk)
105 spin_lock(&bo->bdev->lru_lock);
106 if (bo->resource)
107 ttm_resource_del_bulk_move(bo->resource, bo);
108 bo->bulk_move = bulk;
109 if (bo->resource)
110 ttm_resource_add_bulk_move(bo->resource, bo);
111 spin_unlock(&bo->bdev->lru_lock);
115 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
120 struct ttm_device *bdev = bo->bdev;
124 old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
127 ttm_bo_unmap_virtual(bo);
137 ret = ttm_tt_create(bo, old_use_tt);
142 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
148 ret = dma_resv_reserve_fences(bo->base.resv, 1);
152 ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
159 ctx->bytes_moved += bo->base.size;
164 ttm_bo_tt_destroy(bo);
170 * Call bo::reserved.
174 * Will release the bo::reserved lock.
177 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
179 if (bo->bdev->funcs->delete_mem_notify)
180 bo->bdev->funcs->delete_mem_notify(bo);
182 ttm_bo_tt_destroy(bo);
183 ttm_resource_free(bo, &bo->resource);
186 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
190 if (bo->base.resv == &bo->base._resv)
193 BUG_ON(!dma_resv_trylock(&bo->base._resv));
195 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
196 dma_resv_unlock(&bo->base._resv);
200 if (bo->type != ttm_bo_type_sg) {
205 spin_lock(&bo->bdev->lru_lock);
206 bo->base.resv = &bo->base._resv;
207 spin_unlock(&bo->bdev->lru_lock);
213 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
215 struct dma_resv *resv = &bo->base._resv;
229 * If bo idle, remove from lru lists, and unref.
235 * @bo: The buffer object to clean-up
241 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
245 struct dma_resv *resv = &bo->base._resv;
257 dma_resv_unlock(bo->base.resv);
258 spin_unlock(&bo->bdev->lru_lock);
269 spin_lock(&bo->bdev->lru_lock);
270 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
279 spin_unlock(&bo->bdev->lru_lock);
287 dma_resv_unlock(bo->base.resv);
288 spin_unlock(&bo->bdev->lru_lock);
292 spin_unlock(&bo->bdev->lru_lock);
293 ttm_bo_cleanup_memtype_use(bo);
296 dma_resv_unlock(bo->base.resv);
307 struct ttm_buffer_object *bo;
309 bo = container_of(work, typeof(*bo), delayed_delete);
311 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
313 dma_resv_lock(bo->base.resv, NULL);
314 ttm_bo_cleanup_memtype_use(bo);
315 dma_resv_unlock(bo->base.resv);
316 ttm_bo_put(bo);
321 struct ttm_buffer_object *bo =
323 struct ttm_device *bdev = bo->bdev;
326 WARN_ON_ONCE(bo->pin_count);
327 WARN_ON_ONCE(bo->bulk_move);
329 if (!bo->deleted) {
330 ret = ttm_bo_individualize_resv(bo);
335 dma_resv_wait_timeout(bo->base.resv,
340 if (bo->bdev->funcs->release_notify)
341 bo->bdev->funcs->release_notify(bo);
343 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
344 ttm_mem_io_free(bdev, bo->resource);
346 if (!dma_resv_test_signaled(bo->base.resv,
348 (want_init_on_free() && (bo->ttm != NULL)) ||
349 !dma_resv_trylock(bo->base.resv)) {
351 ttm_bo_flush_all_fences(bo);
352 bo->deleted = true;
354 spin_lock(&bo->bdev->lru_lock);
364 if (bo->pin_count) {
365 bo->pin_count = 0;
366 ttm_resource_move_to_lru_tail(bo->resource);
369 kref_init(&bo->kref);
370 spin_unlock(&bo->bdev->lru_lock);
372 INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
373 queue_work(bdev->wq, &bo->delayed_delete);
377 ttm_bo_cleanup_memtype_use(bo);
378 dma_resv_unlock(bo->base.resv);
382 bo->destroy(bo);
388 * @bo: The buffer object.
392 void ttm_bo_put(struct ttm_buffer_object *bo)
394 kref_put(&bo->kref, ttm_bo_release);
398 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
411 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
415 ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
417 ttm_resource_free(bo, &hop_mem);
423 static int ttm_bo_evict(struct ttm_buffer_object *bo,
426 struct ttm_device *bdev = bo->bdev;
434 dma_resv_assert_held(bo->base.resv);
438 bdev->funcs->evict_flags(bo, &placement);
441 ret = ttm_bo_wait_ctx(bo, ctx);
449 return ttm_bo_pipeline_gutting(bo);
452 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
456 bo);
457 ttm_bo_mem_space_debug(bo, &placement);
463 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
467 ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
471 ttm_resource_free(bo, &evict_mem);
482 * @bo: The buffer object to evict
487 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
490 struct ttm_resource *res = bo->resource;
491 struct ttm_device *bdev = bo->bdev;
493 dma_resv_assert_held(bo->base.resv);
494 if (bo->resource->mem_type == TTM_PL_SYSTEM)
500 return ttm_resource_intersects(bdev, res, place, bo->base.size);
505 * Check the target bo is allowable to be evicted or swapout, including cases:
510 * or the target bo already is in delayed free list;
514 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
521 if (bo->pin_count) {
528 if (bo->base.resv == ctx->resv) {
529 dma_resv_assert_held(bo->base.resv);
536 ret = dma_resv_trylock(bo->base.resv);
542 if (ret && place && (bo->resource->mem_type != place->mem_type ||
543 !bo->bdev->funcs->eviction_valuable(bo, place))) {
546 dma_resv_unlock(bo->base.resv);
595 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
605 if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
608 dma_resv_locking_ctx(res->bo->base.resv))
609 busy_bo = res->bo;
613 if (ttm_bo_get_unless_zero(res->bo)) {
614 bo = res->bo;
618 dma_resv_unlock(res->bo->base.resv);
621 if (!bo) {
631 if (bo->deleted) {
632 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
634 ttm_bo_put(bo);
640 ret = ttm_bo_evict(bo, ctx);
642 ttm_bo_unreserve(bo);
644 ttm_bo_move_to_lru_tail_unlocked(bo);
646 ttm_bo_put(bo);
652 * @bo: The buffer object to pin
655 * @bo must be unpinned again by calling ttm_bo_unpin().
657 void ttm_bo_pin(struct ttm_buffer_object *bo)
659 dma_resv_assert_held(bo->base.resv);
660 WARN_ON_ONCE(!kref_read(&bo->kref));
661 spin_lock(&bo->bdev->lru_lock);
662 if (bo->resource)
663 ttm_resource_del_bulk_move(bo->resource, bo);
664 ++bo->pin_count;
665 spin_unlock(&bo->bdev->lru_lock);
671 * @bo: The buffer object to unpin
675 void ttm_bo_unpin(struct ttm_buffer_object *bo)
677 dma_resv_assert_held(bo->base.resv);
678 WARN_ON_ONCE(!kref_read(&bo->kref));
679 if (WARN_ON_ONCE(!bo->pin_count))
682 spin_lock(&bo->bdev->lru_lock);
683 --bo->pin_count;
684 if (bo->resource)
685 ttm_resource_add_bulk_move(bo->resource, bo);
686 spin_unlock(&bo->bdev->lru_lock);
694 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
715 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
717 ret = dma_resv_reserve_fences(bo->base.resv, 1);
726 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
731 struct ttm_device *bdev = bo->bdev;
737 ticket = dma_resv_locking_ctx(bo->base.resv);
739 ret = ttm_resource_alloc(bo, place, mem);
750 return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
756 * @bo: Pointer to a struct ttm_buffer_object. the data of which
762 * Allocate memory space for the buffer object pointed to by @bo, using
771 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
776 struct ttm_device *bdev = bo->bdev;
780 ret = dma_resv_reserve_fences(bo->base.resv, 1);
793 ret = ttm_resource_alloc(bo, place, mem);
799 ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
801 ttm_resource_free(bo, mem);
819 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
838 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
846 dma_resv_assert_held(bo->base.resv);
857 ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
861 ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
863 ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
871 ttm_resource_free(bo, &mem);
878 * @bo: The buffer object.
890 int ttm_bo_validate(struct ttm_buffer_object *bo,
896 dma_resv_assert_held(bo->base.resv);
902 return ttm_bo_pipeline_gutting(bo);
905 if (bo->resource && ttm_resource_compat(bo->resource, placement))
909 if (bo->pin_count)
912 ret = ttm_bo_move_buffer(bo, placement, ctx);
919 if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
920 ret = ttm_tt_create(bo, true);
932 * @bo: Pointer to a ttm_buffer_object to be initialized.
946 * On successful return, the caller owns an object kref to @bo. The kref and
948 * tasks may already be holding references to @bo as well.
953 * after a failure, dereferencing @bo is illegal and will likely cause memory
961 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
969 kref_init(&bo->kref);
970 bo->bdev = bdev;
971 bo->type = type;
972 bo->page_alignment = alignment;
973 bo->destroy = destroy;
974 bo->pin_count = 0;
975 bo->sg = sg;
976 bo->bulk_move = NULL;
978 bo->base.resv = resv;
980 bo->base.resv = &bo->base._resv;
987 if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
988 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
989 PFN_UP(bo->base.size));
998 WARN_ON(!dma_resv_trylock(bo->base.resv));
1002 ret = ttm_bo_validate(bo, placement, ctx);
1010 dma_resv_unlock(bo->base.resv);
1013 ttm_bo_put(bo);
1022 * @bo: Pointer to a ttm_buffer_object to be initialized.
1041 * On successful return, the caller owns an object kref to @bo. The kref and
1043 * tasks may already be holding references to @bo as well.
1046 * after a failure, dereferencing @bo is illegal and will likely cause memory
1054 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
1063 ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
1069 ttm_bo_unreserve(bo);
1082 * @bo: tear down the virtual mappings for this BO
1084 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1086 struct ttm_device *bdev = bo->bdev;
1088 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1089 ttm_mem_io_free(bdev, bo->resource);
1096 * @bo: The buffer object.
1103 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
1108 if (dma_resv_test_signaled(bo->base.resv,
1115 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1125 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
1133 * While the bo may already reside in SYSTEM placement, set
1139 place.mem_type = bo->resource->mem_type;
1140 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
1143 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
1144 bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
1145 bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
1146 !ttm_bo_get_unless_zero(bo)) {
1148 dma_resv_unlock(bo->base.resv);
1152 if (bo->deleted) {
1153 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1154 ttm_bo_put(bo);
1159 spin_unlock(&bo->bdev->lru_lock);
1164 if (bo->resource->mem_type != TTM_PL_SYSTEM) {
1170 ret = ttm_resource_alloc(bo, &place, &evict_mem);
1174 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
1177 ttm_resource_free(bo, &evict_mem);
1185 ret = ttm_bo_wait_ctx(bo, ctx);
1189 ttm_bo_unmap_virtual(bo);
1195 if (bo->bdev->funcs->swap_notify)
1196 bo->bdev->funcs->swap_notify(bo);
1198 if (ttm_tt_is_populated(bo->ttm))
1199 ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
1207 dma_resv_unlock(bo->base.resv);
1208 ttm_bo_put(bo);
1212 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1214 if (bo->ttm == NULL)
1217 ttm_tt_unpopulate(bo->bdev, bo->ttm);
1218 ttm_tt_destroy(bo->bdev, bo->ttm);
1219 bo->ttm = NULL;