Lines Matching refs:nvbo

43 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
44 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
55 drm_prime_gem_destroy(gem, nvbo->bo.sg);
57 ttm_bo_put(&nvbo->bo);
67 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
68 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
77 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
87 ret = nouveau_vma_new(nvbo, vmm, &vma);
91 ttm_bo_unreserve(&nvbo->bo);
117 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
144 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
145 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
154 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
158 vma = nouveau_vma_find(nvbo, vmm);
163 nouveau_gem_object_unmap(nvbo, vma);
169 ttm_bo_unreserve(&nvbo->bo);
178 struct nouveau_bo *nvbo;
184 nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
186 if (IS_ERR(nvbo))
187 return PTR_ERR(nvbo);
191 ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
193 drm_gem_object_release(&nvbo->bo.base);
194 kfree(nvbo);
198 ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
206 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
209 nvbo->valid_domains &= domain;
211 nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
212 *pnvbo = nvbo;
221 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
225 if (is_power_of_2(nvbo->valid_domains))
226 rep->domain = nvbo->valid_domains;
227 else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
231 rep->offset = nvbo->offset;
233 vma = nouveau_vma_find(nvbo, vmm);
240 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
241 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
242 rep->tile_mode = nvbo->mode;
243 rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
245 rep->tile_flags |= nvbo->kind << 8;
248 rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
250 rep->tile_flags |= nvbo->zeta;
260 struct nouveau_bo *nvbo = NULL;
265 req->info.tile_flags, &nvbo);
269 ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
272 ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
278 drm_gem_object_put(&nvbo->bo.base);
286 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
287 struct ttm_buffer_object *bo = &nvbo->bo;
288 uint32_t domains = valid_domains & nvbo->valid_domains &
311 nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
326 struct nouveau_bo *nvbo;
330 nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
331 b = &pbbo[nvbo->pbbo_index];
334 nouveau_bo_fence(nvbo, fence, !!b->write_domains);
345 if (unlikely(nvbo->validate_mapped)) {
346 ttm_bo_kunmap(&nvbo->kmap);
347 nvbo->validate_mapped = false;
350 list_del(&nvbo->entry);
351 nvbo->reserved_by = NULL;
352 ttm_bo_unreserve(&nvbo->bo);
353 drm_gem_object_put(&nvbo->bo.base);
389 struct nouveau_bo *nvbo;
397 nvbo = nouveau_gem_object(gem);
398 if (nvbo == res_bo) {
404 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
412 ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
419 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
422 res_bo = nvbo;
433 struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
442 b->user_priv = (uint64_t)(unsigned long)nvbo;
445 nvbo->reserved_by = file_priv;
446 nvbo->pbbo_index = i;
449 list_add_tail(&nvbo->entry, &both_list);
452 list_add_tail(&nvbo->entry, &vram_list);
455 list_add_tail(&nvbo->entry, &gart_list);
459 list_add_tail(&nvbo->entry, &both_list);
463 if (nvbo == res_bo)
482 struct nouveau_bo *nvbo;
485 list_for_each_entry(nvbo, list, entry) {
486 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
488 ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
496 ret = nouveau_bo_validate(nvbo, true, false);
503 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
511 if (nvbo->offset == b->presumed.offset &&
512 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
514 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
518 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
522 b->presumed.offset = nvbo->offset;
604 struct nouveau_bo *nvbo;
622 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
625 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
631 if (!nvbo->kmap.virtual) {
632 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
633 &nvbo->kmap);
638 nvbo->validate_mapped = true;
656 ret = ttm_bo_wait(&nvbo->bo, false, false);
662 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
798 struct nouveau_bo *nvbo = (void *)(unsigned long)
801 PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
812 struct nouveau_bo *nvbo = (void *)(unsigned long)
819 if (!nvbo->kmap.virtual) {
820 ret = ttm_bo_kmap(&nvbo->bo, 0,
821 nvbo->bo.mem.
823 &nvbo->kmap);
828 nvbo->validate_mapped = true;
831 nouveau_bo_wr32(nvbo, (push[i].offset +
835 PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
904 struct nouveau_bo *nvbo;
913 nvbo = nouveau_gem_object(gem);
915 lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
924 nouveau_bo_sync_for_cpu(nvbo);
936 struct nouveau_bo *nvbo;
941 nvbo = nouveau_gem_object(gem);
943 nouveau_bo_sync_for_device(nvbo);