Lines Matching refs:bo
62 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
64 kfree(bo);
67 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
75 bo, bo->mem.num_pages, bo->mem.size >> 10,
76 bo->mem.size >> 20);
81 man = ttm_manager_type(bo->bdev, mem_type);
112 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
115 struct ttm_bo_device *bdev = bo->bdev;
118 if (!list_empty(&bo->lru) || bo->pin_count)
125 list_add_tail(&bo->lru, &man->lru[bo->priority]);
127 if (man->use_tt && bo->ttm &&
128 !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
130 list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
134 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
136 struct ttm_bo_device *bdev = bo->bdev;
139 if (!list_empty(&bo->swap)) {
140 list_del_init(&bo->swap);
143 if (!list_empty(&bo->lru)) {
144 list_del_init(&bo->lru);
149 bdev->driver->del_from_lru_notify(bo);
153 struct ttm_buffer_object *bo)
156 pos->first = bo;
157 pos->last = bo;
160 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
163 dma_resv_assert_held(bo->base.resv);
165 ttm_bo_del_from_lru(bo);
166 ttm_bo_add_mem_to_lru(bo, &bo->mem);
168 if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT) &&
169 !bo->pin_count) {
170 switch (bo->mem.mem_type) {
172 ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
176 ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
179 if (bo->ttm && !(bo->ttm->page_flags &
181 ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
236 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
240 struct ttm_bo_device *bdev = bo->bdev;
241 struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
245 ttm_bo_unmap_virtual(bo);
255 ret = ttm_tt_create(bo, old_man->use_tt);
259 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
264 ret = ttm_tt_populate(bdev, bo->ttm, ctx);
268 ret = ttm_bo_tt_bind(bo, mem);
273 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
275 bdev->driver->move_notify(bo, evict, mem);
276 bo->mem = *mem;
282 bdev->driver->move_notify(bo, evict, mem);
285 ret = ttm_bo_move_ttm(bo, ctx, mem);
287 ret = bdev->driver->move(bo, evict, ctx, mem);
289 ret = ttm_bo_move_memcpy(bo, ctx, mem);
293 swap(*mem, bo->mem);
294 bdev->driver->move_notify(bo, false, mem);
295 swap(*mem, bo->mem);
302 ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
306 new_man = ttm_manager_type(bdev, bo->mem.mem_type);
308 ttm_bo_tt_destroy(bo);
314 * Call bo::reserved.
318 * Will release the bo::reserved lock.
321 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
323 if (bo->bdev->driver->move_notify)
324 bo->bdev->driver->move_notify(bo, false, NULL);
326 ttm_bo_tt_destroy(bo);
327 ttm_resource_free(bo, &bo->mem);
330 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
334 if (bo->base.resv == &bo->base._resv)
337 BUG_ON(!dma_resv_trylock(&bo->base._resv));
339 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
340 dma_resv_unlock(&bo->base._resv);
344 if (bo->type != ttm_bo_type_sg) {
350 bo->base.resv = &bo->base._resv;
357 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
359 struct dma_resv *resv = &bo->base._resv;
381 * If bo idle, remove from lru lists, and unref.
392 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
396 struct dma_resv *resv = &bo->base._resv;
408 dma_resv_unlock(bo->base.resv);
420 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
435 if (ret || unlikely(list_empty(&bo->ddestroy))) {
437 dma_resv_unlock(bo->base.resv);
442 ttm_bo_del_from_lru(bo);
443 list_del_init(&bo->ddestroy);
445 ttm_bo_cleanup_memtype_use(bo);
448 dma_resv_unlock(bo->base.resv);
450 ttm_bo_put(bo);
469 struct ttm_buffer_object *bo;
471 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
473 list_move_tail(&bo->ddestroy, &removed);
474 if (!ttm_bo_get_unless_zero(bo))
477 if (remove_all || bo->base.resv != &bo->base._resv) {
479 dma_resv_lock(bo->base.resv, NULL);
482 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
484 } else if (dma_resv_trylock(bo->base.resv)) {
485 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
490 ttm_bo_put(bo);
512 struct ttm_buffer_object *bo =
514 struct ttm_bo_device *bdev = bo->bdev;
515 size_t acc_size = bo->acc_size;
518 if (!bo->deleted) {
519 ret = ttm_bo_individualize_resv(bo);
524 dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
528 if (bo->bdev->driver->release_notify)
529 bo->bdev->driver->release_notify(bo);
531 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
532 ttm_mem_io_free(bdev, &bo->mem);
535 if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
536 !dma_resv_trylock(bo->base.resv)) {
538 ttm_bo_flush_all_fences(bo);
539 bo->deleted = true;
548 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT || bo->pin_count) {
549 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
550 bo->pin_count = 0;
551 ttm_bo_del_from_lru(bo);
552 ttm_bo_add_mem_to_lru(bo, &bo->mem);
555 kref_init(&bo->kref);
556 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
565 ttm_bo_del_from_lru(bo);
566 list_del(&bo->ddestroy);
569 ttm_bo_cleanup_memtype_use(bo);
570 dma_resv_unlock(bo->base.resv);
573 dma_fence_put(bo->moving);
574 if (!ttm_bo_uses_embedded_gem_object(bo))
575 dma_resv_fini(&bo->base._resv);
576 bo->destroy(bo);
580 void ttm_bo_put(struct ttm_buffer_object *bo)
582 kref_put(&bo->kref, ttm_bo_release);
600 static int ttm_bo_evict(struct ttm_buffer_object *bo,
603 struct ttm_bo_device *bdev = bo->bdev;
608 dma_resv_assert_held(bo->base.resv);
612 bdev->driver->evict_flags(bo, &placement);
615 ttm_bo_wait(bo, false, false);
617 ttm_bo_cleanup_memtype_use(bo);
618 return ttm_tt_create(bo, false);
621 evict_mem = bo->mem;
626 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
630 bo);
631 ttm_bo_mem_space_debug(bo, &placement);
636 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
640 ttm_resource_free(bo, &evict_mem);
646 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
652 if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
653 (place->lpfn && place->lpfn <= bo->mem.start))
661 * Check the target bo is allowable to be evicted or swapout, including cases:
666 * or the target bo already is in delayed free list;
670 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
675 if (bo->pin_count) {
682 if (bo->base.resv == ctx->resv) {
683 dma_resv_assert_held(bo->base.resv);
690 ret = dma_resv_trylock(bo->base.resv);
740 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
747 list_for_each_entry(bo, &man->lru[i], lru) {
750 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
753 dma_resv_locking_ctx(bo->base.resv))
754 busy_bo = bo;
758 if (place && !bdev->driver->eviction_valuable(bo,
761 dma_resv_unlock(bo->base.resv);
764 if (!ttm_bo_get_unless_zero(bo)) {
766 dma_resv_unlock(bo->base.resv);
773 if (&bo->lru != &man->lru[i])
776 bo = NULL;
779 if (!bo) {
789 if (bo->deleted) {
790 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
792 ttm_bo_put(bo);
798 ret = ttm_bo_evict(bo, ctx);
800 ttm_bo_unreserve(bo);
802 ttm_bo_move_to_lru_tail_unlocked(bo);
804 ttm_bo_put(bo);
811 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
831 dma_resv_add_shared_fence(bo->base.resv, fence);
833 ret = dma_resv_reserve_shared(bo->base.resv, 1);
839 dma_fence_put(bo->moving);
840 bo->moving = fence;
848 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
853 struct ttm_bo_device *bdev = bo->bdev;
858 ticket = dma_resv_locking_ctx(bo->base.resv);
860 ret = ttm_resource_alloc(bo, place, mem);
871 return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
899 * @bo: BO to find memory for
908 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
913 struct ttm_bo_device *bdev = bo->bdev;
921 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
929 ttm_bo_del_from_lru(bo);
930 ttm_bo_add_mem_to_lru(bo, mem);
944 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
949 struct ttm_bo_device *bdev = bo->bdev;
953 ret = dma_resv_reserve_shared(bo->base.resv, 1);
961 ret = ttm_bo_mem_placement(bo, place, mem, ctx);
966 ret = ttm_resource_alloc(bo, place, mem);
973 ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
975 ttm_resource_free(bo, mem);
987 ret = ttm_bo_mem_placement(bo, place, mem, ctx);
992 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
1007 if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
1008 ttm_bo_move_to_lru_tail_unlocked(bo);
1015 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1022 dma_resv_assert_held(bo->base.resv);
1024 mem.num_pages = bo->num_pages;
1026 mem.page_alignment = bo->mem.page_alignment;
1034 ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1037 ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
1040 ttm_resource_free(bo, &mem);
1087 int ttm_bo_validate(struct ttm_buffer_object *bo,
1094 dma_resv_assert_held(bo->base.resv);
1100 ret = ttm_bo_pipeline_gutting(bo);
1104 return ttm_tt_create(bo, false);
1110 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1111 ret = ttm_bo_move_buffer(bo, placement, ctx);
1115 bo->mem.placement &= TTM_PL_MASK_CACHING;
1116 bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING;
1121 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1122 ret = ttm_tt_create(bo, true);
1131 struct ttm_buffer_object *bo,
1151 (*destroy)(bo);
1153 kfree(bo);
1161 (*destroy)(bo);
1163 kfree(bo);
1167 bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1169 kref_init(&bo->kref);
1170 INIT_LIST_HEAD(&bo->lru);
1171 INIT_LIST_HEAD(&bo->ddestroy);
1172 INIT_LIST_HEAD(&bo->swap);
1173 bo->bdev = bdev;
1174 bo->type = type;
1175 bo->num_pages = num_pages;
1176 bo->mem.size = num_pages << PAGE_SHIFT;
1177 bo->mem.mem_type = TTM_PL_SYSTEM;
1178 bo->mem.num_pages = bo->num_pages;
1179 bo->mem.mm_node = NULL;
1180 bo->mem.page_alignment = page_alignment;
1181 bo->mem.bus.offset = 0;
1182 bo->mem.bus.addr = NULL;
1183 bo->moving = NULL;
1184 bo->mem.placement = TTM_PL_FLAG_CACHED;
1185 bo->acc_size = acc_size;
1186 bo->pin_count = 0;
1187 bo->sg = sg;
1189 bo->base.resv = resv;
1190 dma_resv_assert_held(bo->base.resv);
1192 bo->base.resv = &bo->base._resv;
1194 if (!ttm_bo_uses_embedded_gem_object(bo)) {
1196 * bo.gem is not initialized, so we have to setup the
1199 dma_resv_init(&bo->base._resv);
1200 drm_vma_node_reset(&bo->base.vma_node);
1208 if (bo->type == ttm_bo_type_device ||
1209 bo->type == ttm_bo_type_sg)
1210 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
1211 bo->mem.num_pages);
1217 locked = dma_resv_trylock(bo->base.resv);
1222 ret = ttm_bo_validate(bo, placement, ctx);
1226 ttm_bo_unreserve(bo);
1228 ttm_bo_put(bo);
1232 ttm_bo_move_to_lru_tail_unlocked(bo);
1239 struct ttm_buffer_object *bo,
1253 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1260 ttm_bo_unreserve(bo);
1301 struct ttm_buffer_object *bo;
1305 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1306 if (unlikely(bo == NULL))
1310 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1314 *p_bo = bo;
1483 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1485 struct ttm_bo_device *bdev = bo->bdev;
1487 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1488 ttm_mem_io_free(bdev, &bo->mem);
1492 int ttm_bo_wait(struct ttm_buffer_object *bo,
1498 if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1504 timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1512 dma_resv_add_excl_fence(bo->base.resv, NULL);
1523 struct ttm_buffer_object *bo;
1530 list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1531 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
1535 if (!ttm_bo_get_unless_zero(bo)) {
1537 dma_resv_unlock(bo->base.resv);
1553 if (bo->deleted) {
1554 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1555 ttm_bo_put(bo);
1559 ttm_bo_del_from_lru(bo);
1566 if (bo->mem.mem_type != TTM_PL_SYSTEM ||
1567 bo->ttm->caching_state != tt_cached) {
1571 evict_mem = bo->mem;
1576 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
1585 ret = ttm_bo_wait(bo, false, false);
1589 ttm_bo_unmap_virtual(bo);
1596 if (bo->bdev->driver->swap_notify)
1597 bo->bdev->driver->swap_notify(bo);
1599 ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage);
1608 dma_resv_unlock(bo->base.resv);
1609 ttm_bo_put(bo);
1625 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1627 if (bo->ttm == NULL)
1630 ttm_tt_destroy(bo->bdev, bo->ttm);
1631 bo->ttm = NULL;
1634 int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem)
1636 return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem);
1639 void ttm_bo_tt_unbind(struct ttm_buffer_object *bo)
1641 bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm);