Lines Matching refs:bdev
81 man = ttm_manager_type(bo->bdev, mem_type);
115 struct ttm_bo_device *bdev = bo->bdev;
124 man = ttm_manager_type(bdev, mem->mem_type);
136 struct ttm_bo_device *bdev = bo->bdev;
148 if (notify && bdev->driver->del_from_lru_notify)
149 bdev->driver->del_from_lru_notify(bo);
200 man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
215 man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
240 struct ttm_bo_device *bdev = bo->bdev;
241 struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
242 struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
264 ret = ttm_tt_populate(bdev, bo->ttm, ctx);
274 if (bdev->driver->move_notify)
275 bdev->driver->move_notify(bo, evict, mem);
281 if (bdev->driver->move_notify)
282 bdev->driver->move_notify(bo, evict, mem);
286 else if (bdev->driver->move)
287 ret = bdev->driver->move(bo, evict, ctx, mem);
292 if (bdev->driver->move_notify) {
294 bdev->driver->move_notify(bo, false, mem);
306 new_man = ttm_manager_type(bdev, bo->mem.mem_type);
323 if (bo->bdev->driver->move_notify)
324 bo->bdev->driver->move_notify(bo, false, NULL);
459 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
468 while (!list_empty(&bdev->ddestroy)) {
471 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
493 list_splice_tail(&removed, &bdev->ddestroy);
494 empty = list_empty(&bdev->ddestroy);
502 struct ttm_bo_device *bdev =
505 if (!ttm_bo_delayed_delete(bdev, false))
506 schedule_delayed_work(&bdev->wq,
514 struct ttm_bo_device *bdev = bo->bdev;
528 if (bo->bdev->driver->release_notify)
529 bo->bdev->driver->release_notify(bo);
531 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
532 ttm_mem_io_free(bdev, &bo->mem);
556 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
559 schedule_delayed_work(&bdev->wq,
586 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
588 return cancel_delayed_work_sync(&bdev->wq);
592 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
595 schedule_delayed_work(&bdev->wq,
603 struct ttm_bo_device *bdev = bo->bdev;
612 bdev->driver->evict_flags(bo, &placement);
734 int ttm_mem_evict_first(struct ttm_bo_device *bdev,
758 if (place && !bdev->driver->eviction_valuable(bo,
853 struct ttm_bo_device *bdev = bo->bdev;
854 struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
865 ret = ttm_mem_evict_first(bdev, man, place, ctx,
913 struct ttm_bo_device *bdev = bo->bdev;
917 man = ttm_manager_type(bdev, place->mem_type);
949 struct ttm_bo_device *bdev = bo->bdev;
972 man = ttm_manager_type(bdev, mem->mem_type);
1130 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1173 bo->bdev = bdev;
1210 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
1238 int ttm_bo_init(struct ttm_bo_device *bdev,
1253 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1266 static size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1279 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1293 int ttm_bo_create(struct ttm_bo_device *bdev,
1309 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1310 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1320 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1322 struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type);
1334 return ttm_resource_manager_force_list_clean(bdev, man);
1398 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1405 man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
1407 ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
1410 list_del(&bdev->device_list);
1413 cancel_delayed_work_sync(&bdev->wq);
1415 if (ttm_bo_delayed_delete(bdev, true))
1431 static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
1433 struct ttm_resource_manager *man = &bdev->sysman;
1442 ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
1446 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1462 bdev->driver = driver;
1464 ttm_bo_init_sysman(bdev);
1466 bdev->vma_manager = vma_manager;
1467 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1468 INIT_LIST_HEAD(&bdev->ddestroy);
1469 bdev->dev_mapping = mapping;
1470 bdev->need_dma32 = need_dma32;
1472 list_add_tail(&bdev->device_list, &glob->device_list);
1485 struct ttm_bo_device *bdev = bo->bdev;
1487 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1488 ttm_mem_io_free(bdev, &bo->mem);
1596 if (bo->bdev->driver->swap_notify)
1597 bo->bdev->driver->swap_notify(bo);
1599 ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage);
1630 ttm_tt_destroy(bo->bdev, bo->ttm);
1636 return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem);
1641 bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm);