Lines Matching refs:nvbo
141 struct nouveau_bo *nvbo = nouveau_bo(bo);
143 WARN_ON(nvbo->bo.pin_count > 0);
145 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
156 kfree(nvbo);
168 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
170 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
174 if (nvbo->mode) {
177 *size = roundup_64(*size, 64 * nvbo->mode);
181 *size = roundup_64(*size, 64 * nvbo->mode);
185 *size = roundup_64(*size, 64 * nvbo->mode);
189 *size = roundup_64(*size, 32 * nvbo->mode);
193 *size = roundup_64(*size, (1 << nvbo->page));
194 *align = max((1 << nvbo->page), *align);
205 struct nouveau_bo *nvbo;
215 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
216 if (!nvbo)
219 INIT_LIST_HEAD(&nvbo->head);
220 INIT_LIST_HEAD(&nvbo->entry);
221 INIT_LIST_HEAD(&nvbo->vma_list);
222 nvbo->bo.bdev = &drm->ttm.bdev;
233 nvbo->force_coherent = true;
236 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
240 nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
241 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
242 kfree(nvbo);
246 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
248 nvbo->kind = (tile_flags & 0x00007f00) >> 8;
249 nvbo->comp = (tile_flags & 0x00030000) >> 16;
250 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
251 kfree(nvbo);
255 nvbo->zeta = (tile_flags & 0x00000007);
257 nvbo->mode = tile_mode;
279 if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
288 kfree(nvbo);
293 if (nvbo->comp && !vmm->page[pi].comp) {
295 nvbo->kind = mmu->kind[nvbo->kind];
296 nvbo->comp = 0;
298 nvbo->page = vmm->page[pi].shift;
329 kfree(nvbo);
332 nvbo->page = vmm->page[pi].shift;
335 nouveau_bo_fixup_align(nvbo, align, size);
337 return nvbo;
341 nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
352 nouveau_bo_placement_set(nvbo, domain, 0);
353 INIT_LIST_HEAD(&nvbo->io_reserve_lru);
355 ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type,
356 &nvbo->placement, align >> PAGE_SHIFT, &ctx,
364 ttm_bo_unreserve(&nvbo->bo);
375 struct nouveau_bo *nvbo;
378 nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
380 if (IS_ERR(nvbo))
381 return PTR_ERR(nvbo);
383 nvbo->bo.base.size = size;
384 dma_resv_init(&nvbo->bo.base._resv);
385 drm_vma_node_reset(&nvbo->bo.base.vma_node);
390 drm_gem_gpuva_init(&nvbo->bo.base);
392 ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
396 *pnvbo = nvbo;
422 set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
424 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
429 nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
430 nvbo->bo.base.size < vram_size / 4) {
437 if (nvbo->zeta) {
444 for (i = 0; i < nvbo->placement.num_placement; ++i) {
445 nvbo->placements[i].fpfn = fpfn;
446 nvbo->placements[i].lpfn = lpfn;
448 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
449 nvbo->busy_placements[i].fpfn = fpfn;
450 nvbo->busy_placements[i].lpfn = lpfn;
456 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
459 struct ttm_placement *pl = &nvbo->placement;
461 pl->placement = nvbo->placements;
462 set_placement_list(nvbo->placements, &pl->num_placement, domain);
464 pl->busy_placement = nvbo->busy_placements;
465 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
468 set_placement_range(nvbo, domain);
472 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
474 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
475 struct ttm_buffer_object *bo = &nvbo->bo;
485 if (!nvbo->contig) {
486 nvbo->contig = true;
492 if (nvbo->bo.pin_count) {
512 ttm_bo_pin(&nvbo->bo);
517 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
518 ret = nouveau_bo_validate(nvbo, false, false);
523 nouveau_bo_placement_set(nvbo, domain, 0);
524 ret = nouveau_bo_validate(nvbo, false, false);
528 ttm_bo_pin(&nvbo->bo);
543 nvbo->contig = false;
549 nouveau_bo_unpin(struct nouveau_bo *nvbo)
551 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
552 struct ttm_buffer_object *bo = &nvbo->bo;
559 ttm_bo_unpin(&nvbo->bo);
560 if (!nvbo->bo.pin_count) {
578 nouveau_bo_map(struct nouveau_bo *nvbo)
582 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
586 ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap);
588 ttm_bo_unreserve(&nvbo->bo);
593 nouveau_bo_unmap(struct nouveau_bo *nvbo)
595 if (!nvbo)
598 ttm_bo_kunmap(&nvbo->kmap);
602 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
604 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
605 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
616 if (nvbo->force_coherent)
638 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
640 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
641 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
652 if (nvbo->force_coherent)
676 struct nouveau_bo *nvbo = nouveau_bo(bo);
679 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
686 struct nouveau_bo *nvbo = nouveau_bo(bo);
689 list_del_init(&nvbo->io_reserve_lru);
694 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
700 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
704 nouveau_bo_sync_for_device(nvbo);
710 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
713 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
724 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
727 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
738 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
741 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
798 struct nouveau_bo *nvbo = nouveau_bo(bo);
802 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
806 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
810 *pl = nvbo->placement;
977 struct nouveau_bo *nvbo = nouveau_bo(bo);
988 mem->mem.page == nvbo->page) {
989 list_for_each_entry(vma, &nvbo->vma_list, head) {
992 nouveau_uvmm_bo_map_all(nvbo, mem);
994 list_for_each_entry(vma, &nvbo->vma_list, head) {
1001 nouveau_uvmm_bo_unmap_all(nvbo);
1005 nvbo->offset = (new_reg->start << PAGE_SHIFT);
1015 struct nouveau_bo *nvbo = nouveau_bo(bo);
1024 nvbo->mode, nvbo->zeta);
1057 struct nouveau_bo *nvbo = nouveau_bo(bo);
1128 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1248 struct nouveau_bo *nvbo;
1250 nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
1251 typeof(*nvbo),
1253 if (nvbo) {
1254 list_del_init(&nvbo->io_reserve_lru);
1255 drm_vma_node_unmap(&nvbo->bo.base.vma_node,
1257 nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
1258 nvbo->bo.resource->bus.offset = 0;
1259 nvbo->bo.resource->bus.addr = NULL;
1281 struct nouveau_bo *nvbo = nouveau_bo(bo);
1291 !nvbo->kind)
1297 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
1305 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1306 nvbo->placements[i].fpfn = 0;
1307 nvbo->placements[i].lpfn = mappable;
1310 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1311 nvbo->busy_placements[i].fpfn = 0;
1312 nvbo->busy_placements[i].lpfn = mappable;
1315 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
1318 ret = nouveau_bo_validate(nvbo, false, false);
1382 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1384 struct dma_resv *resv = nvbo->bo.base.resv;