Lines Matching refs:nvbo

140 	struct nouveau_bo *nvbo = nouveau_bo(bo);
142 WARN_ON(nvbo->pin_refcnt > 0);
144 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
153 kfree(nvbo);
165 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
167 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
171 if (nvbo->mode) {
174 *size = roundup_64(*size, 64 * nvbo->mode);
178 *size = roundup_64(*size, 64 * nvbo->mode);
182 *size = roundup_64(*size, 64 * nvbo->mode);
186 *size = roundup_64(*size, 32 * nvbo->mode);
190 *size = roundup_64(*size, (1 << nvbo->page));
191 *align = max((1 << nvbo->page), *align);
202 struct nouveau_bo *nvbo;
212 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
213 if (!nvbo)
215 INIT_LIST_HEAD(&nvbo->head);
216 INIT_LIST_HEAD(&nvbo->entry);
217 INIT_LIST_HEAD(&nvbo->vma_list);
218 nvbo->bo.bdev = &drm->ttm.bdev;
229 nvbo->force_coherent = true;
233 nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
234 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
235 kfree(nvbo);
239 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
242 nvbo->kind = (tile_flags & 0x00007f00) >> 8;
243 nvbo->comp = (tile_flags & 0x00030000) >> 16;
244 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
245 kfree(nvbo);
249 nvbo->zeta = (tile_flags & 0x00000007);
251 nvbo->mode = tile_mode;
252 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
274 if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
283 kfree(nvbo);
288 if (nvbo->comp && !vmm->page[pi].comp) {
290 nvbo->kind = mmu->kind[nvbo->kind];
291 nvbo->comp = 0;
293 nvbo->page = vmm->page[pi].shift;
295 nouveau_bo_fixup_align(nvbo, align, size);
297 return nvbo;
301 nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
308 acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
310 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
311 nouveau_bo_placement_set(nvbo, domain, 0);
312 INIT_LIST_HEAD(&nvbo->io_reserve_lru);
314 ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
315 &nvbo->placement, align >> PAGE_SHIFT, false,
331 struct nouveau_bo *nvbo;
334 nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
336 if (IS_ERR(nvbo))
337 return PTR_ERR(nvbo);
339 ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
343 *pnvbo = nvbo;
382 set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
384 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
389 nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
390 nvbo->bo.mem.num_pages < vram_pages / 4) {
397 if (nvbo->zeta) {
404 for (i = 0; i < nvbo->placement.num_placement; ++i) {
405 nvbo->placements[i].fpfn = fpfn;
406 nvbo->placements[i].lpfn = lpfn;
408 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
409 nvbo->busy_placements[i].fpfn = fpfn;
410 nvbo->busy_placements[i].lpfn = lpfn;
416 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
419 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
420 struct ttm_placement *pl = &nvbo->placement;
421 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
423 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
425 pl->placement = nvbo->placements;
426 set_placement_list(drm, nvbo->placements, &pl->num_placement,
429 pl->busy_placement = nvbo->busy_placements;
430 set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement,
433 set_placement_range(nvbo, domain);
437 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
439 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
440 struct ttm_buffer_object *bo = &nvbo->bo;
450 if (!nvbo->contig) {
451 nvbo->contig = true;
457 if (nvbo->pin_refcnt) {
476 nvbo->pin_refcnt++;
481 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
482 ret = nouveau_bo_validate(nvbo, false, false);
487 nvbo->pin_refcnt++;
488 nouveau_bo_placement_set(nvbo, domain, 0);
494 nvbo->pin_refcnt--;
495 ret = nouveau_bo_validate(nvbo, false, false);
498 nvbo->pin_refcnt++;
513 nvbo->contig = false;
519 nouveau_bo_unpin(struct nouveau_bo *nvbo)
521 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
522 struct ttm_buffer_object *bo = &nvbo->bo;
529 ref = --nvbo->pin_refcnt;
536 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
539 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
545 ret = nouveau_bo_validate(nvbo, false, false);
565 nouveau_bo_map(struct nouveau_bo *nvbo)
569 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
573 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
575 ttm_bo_unreserve(&nvbo->bo);
580 nouveau_bo_unmap(struct nouveau_bo *nvbo)
582 if (!nvbo)
585 ttm_bo_kunmap(&nvbo->kmap);
589 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
591 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
592 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
599 if (nvbo->force_coherent)
609 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
611 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
612 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
619 if (nvbo->force_coherent)
630 struct nouveau_bo *nvbo = nouveau_bo(bo);
633 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
640 struct nouveau_bo *nvbo = nouveau_bo(bo);
643 list_del_init(&nvbo->io_reserve_lru);
648 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
654 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
658 nouveau_bo_sync_for_device(nvbo);
664 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
667 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
678 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
681 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
692 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
695 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
752 struct nouveau_bo *nvbo = nouveau_bo(bo);
756 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
760 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
764 *pl = nvbo->placement;
999 struct nouveau_bo *nvbo = nouveau_bo(bo);
1009 mem->mem.page == nvbo->page) {
1010 list_for_each_entry(vma, &nvbo->vma_list, head) {
1014 list_for_each_entry(vma, &nvbo->vma_list, head) {
1022 nvbo->offset = (new_reg->start << PAGE_SHIFT);
1024 nvbo->offset = 0;
1035 struct nouveau_bo *nvbo = nouveau_bo(bo);
1044 nvbo->mode, nvbo->zeta);
1069 struct nouveau_bo *nvbo = nouveau_bo(bo);
1078 if (nvbo->pin_refcnt)
1079 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1121 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1130 struct nouveau_bo *nvbo = nouveau_bo(bo);
1132 return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
1237 struct nouveau_bo *nvbo;
1239 nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
1240 typeof(*nvbo),
1242 if (nvbo) {
1243 list_del_init(&nvbo->io_reserve_lru);
1244 drm_vma_node_unmap(&nvbo->bo.base.vma_node,
1246 nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
1269 struct nouveau_bo *nvbo = nouveau_bo(bo);
1279 !nvbo->kind)
1283 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
1286 ret = nouveau_bo_validate(nvbo, false, false);
1298 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1299 nvbo->placements[i].fpfn = 0;
1300 nvbo->placements[i].lpfn = mappable;
1303 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1304 nvbo->busy_placements[i].fpfn = 0;
1305 nvbo->busy_placements[i].lpfn = mappable;
1308 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
1309 return nouveau_bo_validate(nvbo, false, false);
1398 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1400 struct dma_resv *resv = nvbo->bo.base.resv;