Lines Matching refs:nvbo
79 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
80 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
91 drm_prime_gem_destroy(gem, nvbo->bo.sg);
93 ttm_bo_put(&nvbo->bo);
103 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
104 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
114 if (nvbo->no_share && uvmm && &uvmm->resv != nvbo->bo.base.resv)
117 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
129 ret = nouveau_vma_new(nvbo, vmm, &vma);
135 ttm_bo_unreserve(&nvbo->bo);
161 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
188 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
189 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
201 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
205 vma = nouveau_vma_find(nvbo, vmm);
210 nouveau_gem_object_unmap(nvbo, vma);
216 ttm_bo_unreserve(&nvbo->bo);
241 struct nouveau_bo *nvbo;
254 nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
256 if (IS_ERR(nvbo))
257 return PTR_ERR(nvbo);
259 nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
260 nvbo->no_share = domain & NOUVEAU_GEM_DOMAIN_NO_SHARE;
264 ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
266 drm_gem_object_release(&nvbo->bo.base);
267 kfree(nvbo);
274 ret = nouveau_bo_init(nvbo, size, align, domain, NULL, resv);
286 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
289 nvbo->valid_domains &= domain;
291 *pnvbo = nvbo;
300 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
304 if (is_power_of_2(nvbo->valid_domains))
305 rep->domain = nvbo->valid_domains;
306 else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
310 rep->offset = nvbo->offset;
313 vma = nouveau_vma_find(nvbo, vmm);
321 rep->size = nvbo->bo.base.size;
322 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
323 rep->tile_mode = nvbo->mode;
324 rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
326 rep->tile_flags |= nvbo->kind << 8;
329 rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
331 rep->tile_flags |= nvbo->zeta;
341 struct nouveau_bo *nvbo = NULL;
351 req->info.tile_flags, &nvbo);
355 ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
358 ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
364 drm_gem_object_put(&nvbo->bo.base);
372 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
373 struct ttm_buffer_object *bo = &nvbo->bo;
374 uint32_t domains = valid_domains & nvbo->valid_domains &
397 nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
412 struct nouveau_bo *nvbo;
416 nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
417 b = &pbbo[nvbo->pbbo_index];
420 nouveau_bo_fence(nvbo, fence, !!b->write_domains);
431 if (unlikely(nvbo->validate_mapped)) {
432 ttm_bo_kunmap(&nvbo->kmap);
433 nvbo->validate_mapped = false;
436 list_del(&nvbo->entry);
437 nvbo->reserved_by = NULL;
438 ttm_bo_unreserve(&nvbo->bo);
439 drm_gem_object_put(&nvbo->bo.base);
475 struct nouveau_bo *nvbo;
483 nvbo = nouveau_gem_object(gem);
484 if (nvbo == res_bo) {
490 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
498 ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
505 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
508 res_bo = nvbo;
519 struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
528 b->user_priv = (uint64_t)(unsigned long)nvbo;
531 nvbo->reserved_by = file_priv;
532 nvbo->pbbo_index = i;
535 list_add_tail(&nvbo->entry, &both_list);
538 list_add_tail(&nvbo->entry, &vram_list);
541 list_add_tail(&nvbo->entry, &gart_list);
545 list_add_tail(&nvbo->entry, &both_list);
549 if (nvbo == res_bo)
568 struct nouveau_bo *nvbo;
571 list_for_each_entry(nvbo, list, entry) {
572 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
574 ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
582 ret = nouveau_bo_validate(nvbo, true, false);
589 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
597 if (nvbo->offset == b->presumed.offset &&
598 ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
600 (nvbo->bo.resource->mem_type == TTM_PL_TT &&
604 if (nvbo->bo.resource->mem_type == TTM_PL_TT)
608 b->presumed.offset = nvbo->offset;
664 struct nouveau_bo *nvbo;
683 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
686 nvbo->bo.base.size)) {
692 if (!nvbo->kmap.virtual) {
693 ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
694 &nvbo->kmap);
699 nvbo->validate_mapped = true;
717 lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
733 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
874 struct nouveau_bo *nvbo = (void *)(unsigned long)
877 PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
888 struct nouveau_bo *nvbo = (void *)(unsigned long)
895 if (!nvbo->kmap.virtual) {
896 ret = ttm_bo_kmap(&nvbo->bo, 0,
897 PFN_UP(nvbo->bo.base.size),
898 &nvbo->kmap);
903 nvbo->validate_mapped = true;
906 nouveau_bo_wr32(nvbo, (push[i].offset +
910 PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
979 struct nouveau_bo *nvbo;
988 nvbo = nouveau_gem_object(gem);
990 lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
1000 nouveau_bo_sync_for_cpu(nvbo);
1012 struct nouveau_bo *nvbo;
1017 nvbo = nouveau_gem_object(gem);
1019 nouveau_bo_sync_for_device(nvbo);