1bf215546Sopenharmony_ci 2bf215546Sopenharmony_ci#include "util/u_inlines.h" 3bf215546Sopenharmony_ci#include "util/u_memory.h" 4bf215546Sopenharmony_ci#include "util/u_math.h" 5bf215546Sopenharmony_ci#include "util/u_surface.h" 6bf215546Sopenharmony_ci 7bf215546Sopenharmony_ci#include "nouveau_screen.h" 8bf215546Sopenharmony_ci#include "nouveau_context.h" 9bf215546Sopenharmony_ci#include "nouveau_winsys.h" 10bf215546Sopenharmony_ci#include "nouveau_fence.h" 11bf215546Sopenharmony_ci#include "nouveau_buffer.h" 12bf215546Sopenharmony_ci#include "nouveau_mm.h" 13bf215546Sopenharmony_ci 14bf215546Sopenharmony_cistruct nouveau_transfer { 15bf215546Sopenharmony_ci struct pipe_transfer base; 16bf215546Sopenharmony_ci 17bf215546Sopenharmony_ci uint8_t *map; 18bf215546Sopenharmony_ci struct nouveau_bo *bo; 19bf215546Sopenharmony_ci struct nouveau_mm_allocation *mm; 20bf215546Sopenharmony_ci uint32_t offset; 21bf215546Sopenharmony_ci}; 22bf215546Sopenharmony_ci 23bf215546Sopenharmony_cistatic void * 24bf215546Sopenharmony_cinouveau_user_ptr_transfer_map(struct pipe_context *pipe, 25bf215546Sopenharmony_ci struct pipe_resource *resource, 26bf215546Sopenharmony_ci unsigned level, unsigned usage, 27bf215546Sopenharmony_ci const struct pipe_box *box, 28bf215546Sopenharmony_ci struct pipe_transfer **ptransfer); 29bf215546Sopenharmony_ci 30bf215546Sopenharmony_cistatic void 31bf215546Sopenharmony_cinouveau_user_ptr_transfer_unmap(struct pipe_context *pipe, 32bf215546Sopenharmony_ci struct pipe_transfer *transfer); 33bf215546Sopenharmony_ci 34bf215546Sopenharmony_cistatic inline struct nouveau_transfer * 35bf215546Sopenharmony_cinouveau_transfer(struct pipe_transfer *transfer) 36bf215546Sopenharmony_ci{ 37bf215546Sopenharmony_ci return (struct nouveau_transfer *)transfer; 38bf215546Sopenharmony_ci} 39bf215546Sopenharmony_ci 40bf215546Sopenharmony_cistatic inline bool 41bf215546Sopenharmony_cinouveau_buffer_malloc(struct nv04_resource *buf) 42bf215546Sopenharmony_ci{ 43bf215546Sopenharmony_ci if (!buf->data) 44bf215546Sopenharmony_ci buf->data = align_malloc(buf->base.width0, NOUVEAU_MIN_BUFFER_MAP_ALIGN); 45bf215546Sopenharmony_ci return !!buf->data; 46bf215546Sopenharmony_ci} 47bf215546Sopenharmony_ci 48bf215546Sopenharmony_cistatic inline bool 49bf215546Sopenharmony_cinouveau_buffer_allocate(struct nouveau_screen *screen, 50bf215546Sopenharmony_ci struct nv04_resource *buf, unsigned domain) 51bf215546Sopenharmony_ci{ 52bf215546Sopenharmony_ci uint32_t size = align(buf->base.width0, 0x100); 53bf215546Sopenharmony_ci 54bf215546Sopenharmony_ci if (domain == NOUVEAU_BO_VRAM) { 55bf215546Sopenharmony_ci buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size, 56bf215546Sopenharmony_ci &buf->bo, &buf->offset); 57bf215546Sopenharmony_ci if (!buf->bo) 58bf215546Sopenharmony_ci return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART); 59bf215546Sopenharmony_ci NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_vid, buf->base.width0); 60bf215546Sopenharmony_ci } else 61bf215546Sopenharmony_ci if (domain == NOUVEAU_BO_GART) { 62bf215546Sopenharmony_ci buf->mm = nouveau_mm_allocate(screen->mm_GART, size, 63bf215546Sopenharmony_ci &buf->bo, &buf->offset); 64bf215546Sopenharmony_ci if (!buf->bo) 65bf215546Sopenharmony_ci return false; 66bf215546Sopenharmony_ci NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0); 67bf215546Sopenharmony_ci } else { 68bf215546Sopenharmony_ci assert(domain == 0); 69bf215546Sopenharmony_ci if (!nouveau_buffer_malloc(buf)) 70bf215546Sopenharmony_ci return false; 71bf215546Sopenharmony_ci } 72bf215546Sopenharmony_ci buf->domain = domain; 73bf215546Sopenharmony_ci if (buf->bo) 74bf215546Sopenharmony_ci buf->address = buf->bo->offset + buf->offset; 75bf215546Sopenharmony_ci 76bf215546Sopenharmony_ci util_range_set_empty(&buf->valid_buffer_range); 77bf215546Sopenharmony_ci 78bf215546Sopenharmony_ci return true; 79bf215546Sopenharmony_ci} 80bf215546Sopenharmony_ci 81bf215546Sopenharmony_cistatic inline void 82bf215546Sopenharmony_cirelease_allocation(struct nouveau_mm_allocation **mm, 83bf215546Sopenharmony_ci struct nouveau_fence *fence) 84bf215546Sopenharmony_ci{ 85bf215546Sopenharmony_ci nouveau_fence_work(fence, nouveau_mm_free_work, *mm); 86bf215546Sopenharmony_ci (*mm) = NULL; 87bf215546Sopenharmony_ci} 88bf215546Sopenharmony_ci 89bf215546Sopenharmony_ciinline void 90bf215546Sopenharmony_cinouveau_buffer_release_gpu_storage(struct nv04_resource *buf) 91bf215546Sopenharmony_ci{ 92bf215546Sopenharmony_ci assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR)); 93bf215546Sopenharmony_ci 94bf215546Sopenharmony_ci if (buf->fence && buf->fence->state < NOUVEAU_FENCE_STATE_FLUSHED) { 95bf215546Sopenharmony_ci nouveau_fence_work(buf->fence, nouveau_fence_unref_bo, buf->bo); 96bf215546Sopenharmony_ci buf->bo = NULL; 97bf215546Sopenharmony_ci } else { 98bf215546Sopenharmony_ci nouveau_bo_ref(NULL, &buf->bo); 99bf215546Sopenharmony_ci } 100bf215546Sopenharmony_ci 101bf215546Sopenharmony_ci if (buf->mm) 102bf215546Sopenharmony_ci release_allocation(&buf->mm, buf->fence); 103bf215546Sopenharmony_ci 104bf215546Sopenharmony_ci if (buf->domain == NOUVEAU_BO_VRAM) 105bf215546Sopenharmony_ci NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_vid, -(uint64_t)buf->base.width0); 106bf215546Sopenharmony_ci if (buf->domain == NOUVEAU_BO_GART) 107bf215546Sopenharmony_ci NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_sys, -(uint64_t)buf->base.width0); 108bf215546Sopenharmony_ci 109bf215546Sopenharmony_ci buf->domain = 0; 110bf215546Sopenharmony_ci} 111bf215546Sopenharmony_ci 112bf215546Sopenharmony_cistatic inline bool 113bf215546Sopenharmony_cinouveau_buffer_reallocate(struct nouveau_screen *screen, 114bf215546Sopenharmony_ci struct nv04_resource *buf, unsigned domain) 115bf215546Sopenharmony_ci{ 116bf215546Sopenharmony_ci nouveau_buffer_release_gpu_storage(buf); 117bf215546Sopenharmony_ci 118bf215546Sopenharmony_ci nouveau_fence_ref(NULL, &buf->fence); 119bf215546Sopenharmony_ci nouveau_fence_ref(NULL, &buf->fence_wr); 120bf215546Sopenharmony_ci 121bf215546Sopenharmony_ci buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK; 122bf215546Sopenharmony_ci 123bf215546Sopenharmony_ci return nouveau_buffer_allocate(screen, buf, domain); 124bf215546Sopenharmony_ci} 125bf215546Sopenharmony_ci 126bf215546Sopenharmony_civoid 127bf215546Sopenharmony_cinouveau_buffer_destroy(struct pipe_screen *pscreen, 128bf215546Sopenharmony_ci struct pipe_resource *presource) 129bf215546Sopenharmony_ci{ 130bf215546Sopenharmony_ci struct nv04_resource *res = nv04_resource(presource); 131bf215546Sopenharmony_ci 132bf215546Sopenharmony_ci if (res->status & NOUVEAU_BUFFER_STATUS_USER_PTR) { 133bf215546Sopenharmony_ci FREE(res); 134bf215546Sopenharmony_ci return; 135bf215546Sopenharmony_ci } 136bf215546Sopenharmony_ci 137bf215546Sopenharmony_ci nouveau_buffer_release_gpu_storage(res); 138bf215546Sopenharmony_ci 139bf215546Sopenharmony_ci if (res->data && !(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY)) 140bf215546Sopenharmony_ci align_free(res->data); 141bf215546Sopenharmony_ci 142bf215546Sopenharmony_ci nouveau_fence_ref(NULL, &res->fence); 143bf215546Sopenharmony_ci nouveau_fence_ref(NULL, &res->fence_wr); 144bf215546Sopenharmony_ci 145bf215546Sopenharmony_ci util_range_destroy(&res->valid_buffer_range); 146bf215546Sopenharmony_ci 147bf215546Sopenharmony_ci FREE(res); 148bf215546Sopenharmony_ci 149bf215546Sopenharmony_ci NOUVEAU_DRV_STAT(nouveau_screen(pscreen), buf_obj_current_count, -1); 150bf215546Sopenharmony_ci} 151bf215546Sopenharmony_ci 152bf215546Sopenharmony_ci/* Set up a staging area for the transfer. This is either done in "regular" 153bf215546Sopenharmony_ci * system memory if the driver supports push_data (nv50+) and the data is 154bf215546Sopenharmony_ci * small enough (and permit_pb == true), or in GART memory. 155bf215546Sopenharmony_ci */ 156bf215546Sopenharmony_cistatic uint8_t * 157bf215546Sopenharmony_cinouveau_transfer_staging(struct nouveau_context *nv, 158bf215546Sopenharmony_ci struct nouveau_transfer *tx, bool permit_pb) 159bf215546Sopenharmony_ci{ 160bf215546Sopenharmony_ci const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK; 161bf215546Sopenharmony_ci const unsigned size = align(tx->base.box.width, 4) + adj; 162bf215546Sopenharmony_ci 163bf215546Sopenharmony_ci if (!nv->push_data) 164bf215546Sopenharmony_ci permit_pb = false; 165bf215546Sopenharmony_ci 166bf215546Sopenharmony_ci if ((size <= nv->screen->transfer_pushbuf_threshold) && permit_pb) { 167bf215546Sopenharmony_ci tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN); 168bf215546Sopenharmony_ci if (tx->map) 169bf215546Sopenharmony_ci tx->map += adj; 170bf215546Sopenharmony_ci } else { 171bf215546Sopenharmony_ci tx->mm = 172bf215546Sopenharmony_ci nouveau_mm_allocate(nv->screen->mm_GART, size, &tx->bo, &tx->offset); 173bf215546Sopenharmony_ci if (tx->bo) { 174bf215546Sopenharmony_ci tx->offset += adj; 175bf215546Sopenharmony_ci if (!nouveau_bo_map(tx->bo, 0, NULL)) 176bf215546Sopenharmony_ci tx->map = (uint8_t *)tx->bo->map + tx->offset; 177bf215546Sopenharmony_ci } 178bf215546Sopenharmony_ci } 179bf215546Sopenharmony_ci return tx->map; 180bf215546Sopenharmony_ci} 181bf215546Sopenharmony_ci 182bf215546Sopenharmony_ci/* Copies data from the resource into the transfer's temporary GART 183bf215546Sopenharmony_ci * buffer. Also updates buf->data if present. 184bf215546Sopenharmony_ci * 185bf215546Sopenharmony_ci * Maybe just migrate to GART right away if we actually need to do this. */ 186bf215546Sopenharmony_cistatic bool 187bf215546Sopenharmony_cinouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx) 188bf215546Sopenharmony_ci{ 189bf215546Sopenharmony_ci struct nv04_resource *buf = nv04_resource(tx->base.resource); 190bf215546Sopenharmony_ci const unsigned base = tx->base.box.x; 191bf215546Sopenharmony_ci const unsigned size = tx->base.box.width; 192bf215546Sopenharmony_ci 193bf215546Sopenharmony_ci NOUVEAU_DRV_STAT(nv->screen, buf_read_bytes_staging_vid, size); 194bf215546Sopenharmony_ci 195bf215546Sopenharmony_ci nv->copy_data(nv, tx->bo, tx->offset, NOUVEAU_BO_GART, 196bf215546Sopenharmony_ci buf->bo, buf->offset + base, buf->domain, size); 197bf215546Sopenharmony_ci 198bf215546Sopenharmony_ci if (nouveau_bo_wait(tx->bo, NOUVEAU_BO_RD, nv->client)) 199bf215546Sopenharmony_ci return false; 200bf215546Sopenharmony_ci 201bf215546Sopenharmony_ci if (buf->data) 202bf215546Sopenharmony_ci memcpy(buf->data + base, tx->map, size); 203bf215546Sopenharmony_ci 204bf215546Sopenharmony_ci return true; 205bf215546Sopenharmony_ci} 206bf215546Sopenharmony_ci 207bf215546Sopenharmony_cistatic void 208bf215546Sopenharmony_cinouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx, 209bf215546Sopenharmony_ci unsigned offset, unsigned size) 210bf215546Sopenharmony_ci{ 211bf215546Sopenharmony_ci struct nv04_resource *buf = nv04_resource(tx->base.resource); 212bf215546Sopenharmony_ci uint8_t *data = tx->map + offset; 213bf215546Sopenharmony_ci const unsigned base = tx->base.box.x + offset; 214bf215546Sopenharmony_ci const bool can_cb = !((base | size) & 3); 215bf215546Sopenharmony_ci 216bf215546Sopenharmony_ci if (buf->data) 217bf215546Sopenharmony_ci memcpy(data, buf->data + base, size); 218bf215546Sopenharmony_ci else 219bf215546Sopenharmony_ci buf->status |= NOUVEAU_BUFFER_STATUS_DIRTY; 220bf215546Sopenharmony_ci 221bf215546Sopenharmony_ci if (buf->domain == NOUVEAU_BO_VRAM) 222bf215546Sopenharmony_ci NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_vid, size); 223bf215546Sopenharmony_ci if (buf->domain == NOUVEAU_BO_GART) 224bf215546Sopenharmony_ci NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_sys, size); 225bf215546Sopenharmony_ci 226bf215546Sopenharmony_ci if (tx->bo) 227bf215546Sopenharmony_ci nv->copy_data(nv, buf->bo, buf->offset + base, buf->domain, 228bf215546Sopenharmony_ci tx->bo, tx->offset + offset, NOUVEAU_BO_GART, size); 229bf215546Sopenharmony_ci else 230bf215546Sopenharmony_ci if (nv->push_cb && can_cb) 231bf215546Sopenharmony_ci nv->push_cb(nv, buf, 232bf215546Sopenharmony_ci base, size / 4, (const uint32_t *)data); 233bf215546Sopenharmony_ci else 234bf215546Sopenharmony_ci nv->push_data(nv, buf->bo, buf->offset + base, buf->domain, size, data); 235bf215546Sopenharmony_ci 236bf215546Sopenharmony_ci nouveau_fence_ref(nv->screen->fence.current, &buf->fence); 237bf215546Sopenharmony_ci nouveau_fence_ref(nv->screen->fence.current, &buf->fence_wr); 238bf215546Sopenharmony_ci} 239bf215546Sopenharmony_ci 240bf215546Sopenharmony_ci/* Does a CPU wait for the buffer's backing data to become reliably accessible 241bf215546Sopenharmony_ci * for write/read by waiting on the buffer's relevant fences. 242bf215546Sopenharmony_ci */ 243bf215546Sopenharmony_cistatic inline bool 244bf215546Sopenharmony_cinouveau_buffer_sync(struct nouveau_context *nv, 245bf215546Sopenharmony_ci struct nv04_resource *buf, unsigned rw) 246bf215546Sopenharmony_ci{ 247bf215546Sopenharmony_ci if (rw == PIPE_MAP_READ) { 248bf215546Sopenharmony_ci if (!buf->fence_wr) 249bf215546Sopenharmony_ci return true; 250bf215546Sopenharmony_ci NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count, 251bf215546Sopenharmony_ci !nouveau_fence_signalled(buf->fence_wr)); 252bf215546Sopenharmony_ci if (!nouveau_fence_wait(buf->fence_wr, &nv->debug)) 253bf215546Sopenharmony_ci return false; 254bf215546Sopenharmony_ci } else { 255bf215546Sopenharmony_ci if (!buf->fence) 256bf215546Sopenharmony_ci return true; 257bf215546Sopenharmony_ci NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count, 258bf215546Sopenharmony_ci !nouveau_fence_signalled(buf->fence)); 259bf215546Sopenharmony_ci if (!nouveau_fence_wait(buf->fence, &nv->debug)) 260bf215546Sopenharmony_ci return false; 261bf215546Sopenharmony_ci 262bf215546Sopenharmony_ci nouveau_fence_ref(NULL, &buf->fence); 263bf215546Sopenharmony_ci } 264bf215546Sopenharmony_ci nouveau_fence_ref(NULL, &buf->fence_wr); 265bf215546Sopenharmony_ci 266bf215546Sopenharmony_ci return true; 267bf215546Sopenharmony_ci} 268bf215546Sopenharmony_ci 269bf215546Sopenharmony_cistatic inline bool 270bf215546Sopenharmony_cinouveau_buffer_busy(struct nv04_resource *buf, unsigned rw) 271bf215546Sopenharmony_ci{ 272bf215546Sopenharmony_ci if (rw == PIPE_MAP_READ) 273bf215546Sopenharmony_ci return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr)); 274bf215546Sopenharmony_ci else 275bf215546Sopenharmony_ci return (buf->fence && !nouveau_fence_signalled(buf->fence)); 276bf215546Sopenharmony_ci} 277bf215546Sopenharmony_ci 278bf215546Sopenharmony_cistatic inline void 279bf215546Sopenharmony_cinouveau_buffer_transfer_init(struct nouveau_transfer *tx, 280bf215546Sopenharmony_ci struct pipe_resource *resource, 281bf215546Sopenharmony_ci const struct pipe_box *box, 282bf215546Sopenharmony_ci unsigned usage) 283bf215546Sopenharmony_ci{ 284bf215546Sopenharmony_ci tx->base.resource = resource; 285bf215546Sopenharmony_ci tx->base.level = 0; 286bf215546Sopenharmony_ci tx->base.usage = usage; 287bf215546Sopenharmony_ci tx->base.box.x = box->x; 288bf215546Sopenharmony_ci tx->base.box.y = 0; 289bf215546Sopenharmony_ci tx->base.box.z = 0; 290bf215546Sopenharmony_ci tx->base.box.width = box->width; 291bf215546Sopenharmony_ci tx->base.box.height = 1; 292bf215546Sopenharmony_ci tx->base.box.depth = 1; 293bf215546Sopenharmony_ci tx->base.stride = 0; 294bf215546Sopenharmony_ci tx->base.layer_stride = 0; 295bf215546Sopenharmony_ci 296bf215546Sopenharmony_ci tx->bo = NULL; 297bf215546Sopenharmony_ci tx->map = NULL; 298bf215546Sopenharmony_ci} 299bf215546Sopenharmony_ci 300bf215546Sopenharmony_cistatic inline void 301bf215546Sopenharmony_cinouveau_buffer_transfer_del(struct nouveau_context *nv, 302bf215546Sopenharmony_ci struct nouveau_transfer *tx) 303bf215546Sopenharmony_ci{ 304bf215546Sopenharmony_ci if (tx->map) { 305bf215546Sopenharmony_ci if (likely(tx->bo)) { 306bf215546Sopenharmony_ci nouveau_fence_work(nv->screen->fence.current, 307bf215546Sopenharmony_ci nouveau_fence_unref_bo, tx->bo); 308bf215546Sopenharmony_ci if (tx->mm) 309bf215546Sopenharmony_ci release_allocation(&tx->mm, nv->screen->fence.current); 310bf215546Sopenharmony_ci } else { 311bf215546Sopenharmony_ci align_free(tx->map - 312bf215546Sopenharmony_ci (tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK)); 313bf215546Sopenharmony_ci } 314bf215546Sopenharmony_ci } 315bf215546Sopenharmony_ci} 316bf215546Sopenharmony_ci 317bf215546Sopenharmony_ci/* Creates a cache in system memory of the buffer data. */ 318bf215546Sopenharmony_cistatic bool 319bf215546Sopenharmony_cinouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf) 320bf215546Sopenharmony_ci{ 321bf215546Sopenharmony_ci struct nouveau_transfer tx; 322bf215546Sopenharmony_ci bool ret; 323bf215546Sopenharmony_ci tx.base.resource = &buf->base; 324bf215546Sopenharmony_ci tx.base.box.x = 0; 325bf215546Sopenharmony_ci tx.base.box.width = buf->base.width0; 326bf215546Sopenharmony_ci tx.bo = NULL; 327bf215546Sopenharmony_ci tx.map = NULL; 328bf215546Sopenharmony_ci 329bf215546Sopenharmony_ci if (!buf->data) 330bf215546Sopenharmony_ci if (!nouveau_buffer_malloc(buf)) 331bf215546Sopenharmony_ci return false; 332bf215546Sopenharmony_ci if (!(buf->status & NOUVEAU_BUFFER_STATUS_DIRTY)) 333bf215546Sopenharmony_ci return true; 334bf215546Sopenharmony_ci nv->stats.buf_cache_count++; 335bf215546Sopenharmony_ci 336bf215546Sopenharmony_ci if (!nouveau_transfer_staging(nv, &tx, false)) 337bf215546Sopenharmony_ci return false; 338bf215546Sopenharmony_ci 339bf215546Sopenharmony_ci ret = nouveau_transfer_read(nv, &tx); 340bf215546Sopenharmony_ci if (ret) { 341bf215546Sopenharmony_ci buf->status &= ~NOUVEAU_BUFFER_STATUS_DIRTY; 342bf215546Sopenharmony_ci memcpy(buf->data, tx.map, buf->base.width0); 343bf215546Sopenharmony_ci } 344bf215546Sopenharmony_ci nouveau_buffer_transfer_del(nv, &tx); 345bf215546Sopenharmony_ci return ret; 346bf215546Sopenharmony_ci} 347bf215546Sopenharmony_ci 348bf215546Sopenharmony_ci 349bf215546Sopenharmony_ci#define NOUVEAU_TRANSFER_DISCARD \ 350bf215546Sopenharmony_ci (PIPE_MAP_DISCARD_RANGE | PIPE_MAP_DISCARD_WHOLE_RESOURCE) 351bf215546Sopenharmony_ci 352bf215546Sopenharmony_ci/* Checks whether it is possible to completely discard the memory backing this 353bf215546Sopenharmony_ci * resource. This can be useful if we would otherwise have to wait for a read 354bf215546Sopenharmony_ci * operation to complete on this data. 355bf215546Sopenharmony_ci */ 356bf215546Sopenharmony_cistatic inline bool 357bf215546Sopenharmony_cinouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage) 358bf215546Sopenharmony_ci{ 359bf215546Sopenharmony_ci if (!(usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE)) 360bf215546Sopenharmony_ci return false; 361bf215546Sopenharmony_ci if (unlikely(buf->base.bind & PIPE_BIND_SHARED)) 362bf215546Sopenharmony_ci return false; 363bf215546Sopenharmony_ci if (unlikely(usage & PIPE_MAP_PERSISTENT)) 364bf215546Sopenharmony_ci return false; 365bf215546Sopenharmony_ci return buf->mm && nouveau_buffer_busy(buf, PIPE_MAP_WRITE); 366bf215546Sopenharmony_ci} 367bf215546Sopenharmony_ci 368bf215546Sopenharmony_ci/* Returns a pointer to a memory area representing a window into the 369bf215546Sopenharmony_ci * resource's data. 370bf215546Sopenharmony_ci * 371bf215546Sopenharmony_ci * This may or may not be the _actual_ memory area of the resource. However 372bf215546Sopenharmony_ci * when calling nouveau_buffer_transfer_unmap, if it wasn't the actual memory 373bf215546Sopenharmony_ci * area, the contents of the returned map are copied over to the resource. 374bf215546Sopenharmony_ci * 375bf215546Sopenharmony_ci * The usage indicates what the caller plans to do with the map: 376bf215546Sopenharmony_ci * 377bf215546Sopenharmony_ci * WRITE means that the user plans to write to it 378bf215546Sopenharmony_ci * 379bf215546Sopenharmony_ci * READ means that the user plans on reading from it 380bf215546Sopenharmony_ci * 381bf215546Sopenharmony_ci * DISCARD_WHOLE_RESOURCE means that the whole resource is going to be 382bf215546Sopenharmony_ci * potentially overwritten, and even if it isn't, the bits that aren't don't 383bf215546Sopenharmony_ci * need to be maintained. 384bf215546Sopenharmony_ci * 385bf215546Sopenharmony_ci * DISCARD_RANGE means that all the data in the specified range is going to 386bf215546Sopenharmony_ci * be overwritten. 387bf215546Sopenharmony_ci * 388bf215546Sopenharmony_ci * The strategy for determining what kind of memory area to return is complex, 389bf215546Sopenharmony_ci * see comments inside of the function. 390bf215546Sopenharmony_ci */ 391bf215546Sopenharmony_civoid * 392bf215546Sopenharmony_cinouveau_buffer_transfer_map(struct pipe_context *pipe, 393bf215546Sopenharmony_ci struct pipe_resource *resource, 394bf215546Sopenharmony_ci unsigned level, unsigned usage, 395bf215546Sopenharmony_ci const struct pipe_box *box, 396bf215546Sopenharmony_ci struct pipe_transfer **ptransfer) 397bf215546Sopenharmony_ci{ 398bf215546Sopenharmony_ci struct nouveau_context *nv = nouveau_context(pipe); 399bf215546Sopenharmony_ci struct nv04_resource *buf = nv04_resource(resource); 400bf215546Sopenharmony_ci 401bf215546Sopenharmony_ci if (buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR) 402bf215546Sopenharmony_ci return nouveau_user_ptr_transfer_map(pipe, resource, level, usage, box, ptransfer); 403bf215546Sopenharmony_ci 404bf215546Sopenharmony_ci struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer); 405bf215546Sopenharmony_ci uint8_t *map; 406bf215546Sopenharmony_ci int ret; 407bf215546Sopenharmony_ci 408bf215546Sopenharmony_ci if (!tx) 409bf215546Sopenharmony_ci return NULL; 410bf215546Sopenharmony_ci nouveau_buffer_transfer_init(tx, resource, box, usage); 411bf215546Sopenharmony_ci *ptransfer = &tx->base; 412bf215546Sopenharmony_ci 413bf215546Sopenharmony_ci if (usage & PIPE_MAP_READ) 414bf215546Sopenharmony_ci NOUVEAU_DRV_STAT(nv->screen, buf_transfers_rd, 1); 415bf215546Sopenharmony_ci if (usage & PIPE_MAP_WRITE) 416bf215546Sopenharmony_ci NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1); 417bf215546Sopenharmony_ci 418bf215546Sopenharmony_ci /* If we are trying to write to an uninitialized range, the user shouldn't 419bf215546Sopenharmony_ci * care what was there before. So we can treat the write as if the target 420bf215546Sopenharmony_ci * range were being discarded. Furthermore, since we know that even if this 421bf215546Sopenharmony_ci * buffer is busy due to GPU activity, because the contents were 422bf215546Sopenharmony_ci * uninitialized, the GPU can't care what was there, and so we can treat 423bf215546Sopenharmony_ci * the write as being unsynchronized. 424bf215546Sopenharmony_ci */ 425bf215546Sopenharmony_ci if ((usage & PIPE_MAP_WRITE) && 426bf215546Sopenharmony_ci !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width)) 427bf215546Sopenharmony_ci usage |= PIPE_MAP_DISCARD_RANGE | PIPE_MAP_UNSYNCHRONIZED; 428bf215546Sopenharmony_ci 429bf215546Sopenharmony_ci if (buf->domain == NOUVEAU_BO_VRAM) { 430bf215546Sopenharmony_ci if (usage & NOUVEAU_TRANSFER_DISCARD) { 431bf215546Sopenharmony_ci /* Set up a staging area for the user to write to. It will be copied 432bf215546Sopenharmony_ci * back into VRAM on unmap. */ 433bf215546Sopenharmony_ci if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) 434bf215546Sopenharmony_ci buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK; 435bf215546Sopenharmony_ci nouveau_transfer_staging(nv, tx, true); 436bf215546Sopenharmony_ci } else { 437bf215546Sopenharmony_ci if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) { 438bf215546Sopenharmony_ci /* The GPU is currently writing to this buffer. Copy its current 439bf215546Sopenharmony_ci * contents to a staging area in the GART. This is necessary since 440bf215546Sopenharmony_ci * not the whole area being mapped is being discarded. 441bf215546Sopenharmony_ci */ 442bf215546Sopenharmony_ci if (buf->data) { 443bf215546Sopenharmony_ci align_free(buf->data); 444bf215546Sopenharmony_ci buf->data = NULL; 445bf215546Sopenharmony_ci } 446bf215546Sopenharmony_ci nouveau_transfer_staging(nv, tx, false); 447bf215546Sopenharmony_ci nouveau_transfer_read(nv, tx); 448bf215546Sopenharmony_ci } else { 449bf215546Sopenharmony_ci /* The buffer is currently idle. Create a staging area for writes, 450bf215546Sopenharmony_ci * and make sure that the cached data is up-to-date. */ 451bf215546Sopenharmony_ci if (usage & PIPE_MAP_WRITE) 452bf215546Sopenharmony_ci nouveau_transfer_staging(nv, tx, true); 453bf215546Sopenharmony_ci if (!buf->data) 454bf215546Sopenharmony_ci nouveau_buffer_cache(nv, buf); 455bf215546Sopenharmony_ci } 456bf215546Sopenharmony_ci } 457bf215546Sopenharmony_ci return buf->data ? (buf->data + box->x) : tx->map; 458bf215546Sopenharmony_ci } else 459bf215546Sopenharmony_ci if (unlikely(buf->domain == 0)) { 460bf215546Sopenharmony_ci return buf->data + box->x; 461bf215546Sopenharmony_ci } 462bf215546Sopenharmony_ci 463bf215546Sopenharmony_ci /* At this point, buf->domain == GART */ 464bf215546Sopenharmony_ci 465bf215546Sopenharmony_ci if (nouveau_buffer_should_discard(buf, usage)) { 466bf215546Sopenharmony_ci int ref = buf->base.reference.count - 1; 467bf215546Sopenharmony_ci nouveau_buffer_reallocate(nv->screen, buf, buf->domain); 468bf215546Sopenharmony_ci if (ref > 0) /* any references inside context possible ? */ 469bf215546Sopenharmony_ci nv->invalidate_resource_storage(nv, &buf->base, ref); 470bf215546Sopenharmony_ci } 471bf215546Sopenharmony_ci 472bf215546Sopenharmony_ci /* Note that nouveau_bo_map ends up doing a nouveau_bo_wait with the 473bf215546Sopenharmony_ci * relevant flags. If buf->mm is set, that means this resource is part of a 474bf215546Sopenharmony_ci * larger slab bo that holds multiple resources. So in that case, don't 475bf215546Sopenharmony_ci * wait on the whole slab and instead use the logic below to return a 476bf215546Sopenharmony_ci * reasonable buffer for that case. 477bf215546Sopenharmony_ci */ 478bf215546Sopenharmony_ci ret = nouveau_bo_map(buf->bo, 479bf215546Sopenharmony_ci buf->mm ? 0 : nouveau_screen_transfer_flags(usage), 480bf215546Sopenharmony_ci nv->client); 481bf215546Sopenharmony_ci if (ret) { 482bf215546Sopenharmony_ci FREE(tx); 483bf215546Sopenharmony_ci return NULL; 484bf215546Sopenharmony_ci } 485bf215546Sopenharmony_ci map = (uint8_t *)buf->bo->map + buf->offset + box->x; 486bf215546Sopenharmony_ci 487bf215546Sopenharmony_ci /* using kernel fences only if !buf->mm */ 488bf215546Sopenharmony_ci if ((usage & PIPE_MAP_UNSYNCHRONIZED) || !buf->mm) 489bf215546Sopenharmony_ci return map; 490bf215546Sopenharmony_ci 491bf215546Sopenharmony_ci /* If the GPU is currently reading/writing this buffer, we shouldn't 492bf215546Sopenharmony_ci * interfere with its progress. So instead we either wait for the GPU to 493bf215546Sopenharmony_ci * complete its operation, or set up a staging area to perform our work in. 494bf215546Sopenharmony_ci */ 495bf215546Sopenharmony_ci if (nouveau_buffer_busy(buf, usage & PIPE_MAP_READ_WRITE)) { 496bf215546Sopenharmony_ci if (unlikely(usage & (PIPE_MAP_DISCARD_WHOLE_RESOURCE | 497bf215546Sopenharmony_ci PIPE_MAP_PERSISTENT))) { 498bf215546Sopenharmony_ci /* Discarding was not possible, must sync because 499bf215546Sopenharmony_ci * subsequent transfers might use UNSYNCHRONIZED. */ 500bf215546Sopenharmony_ci nouveau_buffer_sync(nv, buf, usage & PIPE_MAP_READ_WRITE); 501bf215546Sopenharmony_ci } else 502bf215546Sopenharmony_ci if (usage & PIPE_MAP_DISCARD_RANGE) { 503bf215546Sopenharmony_ci /* The whole range is being discarded, so it doesn't matter what was 504bf215546Sopenharmony_ci * there before. No need to copy anything over. */ 505bf215546Sopenharmony_ci nouveau_transfer_staging(nv, tx, true); 506bf215546Sopenharmony_ci map = tx->map; 507bf215546Sopenharmony_ci } else 508bf215546Sopenharmony_ci if (nouveau_buffer_busy(buf, PIPE_MAP_READ)) { 509bf215546Sopenharmony_ci if (usage & PIPE_MAP_DONTBLOCK) 510bf215546Sopenharmony_ci map = NULL; 511bf215546Sopenharmony_ci else 512bf215546Sopenharmony_ci nouveau_buffer_sync(nv, buf, usage & PIPE_MAP_READ_WRITE); 513bf215546Sopenharmony_ci } else { 514bf215546Sopenharmony_ci /* It is expected that the returned buffer be a representation of the 515bf215546Sopenharmony_ci * data in question, so we must copy it over from the buffer. */ 516bf215546Sopenharmony_ci nouveau_transfer_staging(nv, tx, true); 517bf215546Sopenharmony_ci if (tx->map) 518bf215546Sopenharmony_ci memcpy(tx->map, map, box->width); 519bf215546Sopenharmony_ci map = tx->map; 520bf215546Sopenharmony_ci } 521bf215546Sopenharmony_ci } 522bf215546Sopenharmony_ci if (!map) 523bf215546Sopenharmony_ci FREE(tx); 524bf215546Sopenharmony_ci return map; 525bf215546Sopenharmony_ci} 526bf215546Sopenharmony_ci 527bf215546Sopenharmony_ci 528bf215546Sopenharmony_ci 529bf215546Sopenharmony_civoid 530bf215546Sopenharmony_cinouveau_buffer_transfer_flush_region(struct pipe_context *pipe, 531bf215546Sopenharmony_ci struct pipe_transfer *transfer, 532bf215546Sopenharmony_ci const struct pipe_box *box) 533bf215546Sopenharmony_ci{ 534bf215546Sopenharmony_ci struct nouveau_transfer *tx = nouveau_transfer(transfer); 535bf215546Sopenharmony_ci struct nv04_resource *buf = nv04_resource(transfer->resource); 536bf215546Sopenharmony_ci 537bf215546Sopenharmony_ci if (tx->map) 538bf215546Sopenharmony_ci nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width); 539bf215546Sopenharmony_ci 540bf215546Sopenharmony_ci util_range_add(&buf->base, &buf->valid_buffer_range, 541bf215546Sopenharmony_ci tx->base.box.x + box->x, 542bf215546Sopenharmony_ci tx->base.box.x + box->x + box->width); 543bf215546Sopenharmony_ci} 544bf215546Sopenharmony_ci 545bf215546Sopenharmony_ci/* Unmap stage of the transfer. If it was a WRITE transfer and the map that 546bf215546Sopenharmony_ci * was returned was not the real resource's data, this needs to transfer the 547bf215546Sopenharmony_ci * data back to the resource. 548bf215546Sopenharmony_ci * 549bf215546Sopenharmony_ci * Also marks vbo dirty based on the buffer's binding 550bf215546Sopenharmony_ci */ 551bf215546Sopenharmony_civoid 552bf215546Sopenharmony_cinouveau_buffer_transfer_unmap(struct pipe_context *pipe, 553bf215546Sopenharmony_ci struct pipe_transfer *transfer) 554bf215546Sopenharmony_ci{ 555bf215546Sopenharmony_ci struct nouveau_context *nv = nouveau_context(pipe); 556bf215546Sopenharmony_ci struct nv04_resource *buf = nv04_resource(transfer->resource); 557bf215546Sopenharmony_ci 558bf215546Sopenharmony_ci if (buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR) 559bf215546Sopenharmony_ci return nouveau_user_ptr_transfer_unmap(pipe, transfer); 560bf215546Sopenharmony_ci 561bf215546Sopenharmony_ci struct nouveau_transfer *tx = nouveau_transfer(transfer); 562bf215546Sopenharmony_ci 563bf215546Sopenharmony_ci if (tx->base.usage & PIPE_MAP_WRITE) { 564bf215546Sopenharmony_ci if (!(tx->base.usage & PIPE_MAP_FLUSH_EXPLICIT)) { 565bf215546Sopenharmony_ci if (tx->map) 566bf215546Sopenharmony_ci nouveau_transfer_write(nv, tx, 0, tx->base.box.width); 567bf215546Sopenharmony_ci 568bf215546Sopenharmony_ci util_range_add(&buf->base, &buf->valid_buffer_range, 569bf215546Sopenharmony_ci tx->base.box.x, tx->base.box.x + tx->base.box.width); 570bf215546Sopenharmony_ci } 571bf215546Sopenharmony_ci 572bf215546Sopenharmony_ci if (likely(buf->domain)) { 573bf215546Sopenharmony_ci const uint8_t bind = buf->base.bind; 574bf215546Sopenharmony_ci /* make sure we invalidate dedicated caches */ 575bf215546Sopenharmony_ci if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) 576bf215546Sopenharmony_ci nv->vbo_dirty = true; 577bf215546Sopenharmony_ci } 578bf215546Sopenharmony_ci } 579bf215546Sopenharmony_ci 580bf215546Sopenharmony_ci if (!tx->bo && (tx->base.usage & PIPE_MAP_WRITE)) 581bf215546Sopenharmony_ci NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_direct, tx->base.box.width); 582bf215546Sopenharmony_ci 583bf215546Sopenharmony_ci nouveau_buffer_transfer_del(nv, tx); 584bf215546Sopenharmony_ci FREE(tx); 585bf215546Sopenharmony_ci} 586bf215546Sopenharmony_ci 587bf215546Sopenharmony_ci 588bf215546Sopenharmony_civoid 589bf215546Sopenharmony_cinouveau_copy_buffer(struct nouveau_context *nv, 590bf215546Sopenharmony_ci struct nv04_resource *dst, unsigned dstx, 591bf215546Sopenharmony_ci struct nv04_resource *src, unsigned srcx, unsigned size) 592bf215546Sopenharmony_ci{ 593bf215546Sopenharmony_ci assert(dst->base.target == PIPE_BUFFER && src->base.target == PIPE_BUFFER); 594bf215546Sopenharmony_ci 595bf215546Sopenharmony_ci assert(!(dst->status & NOUVEAU_BUFFER_STATUS_USER_PTR)); 596bf215546Sopenharmony_ci assert(!(src->status & NOUVEAU_BUFFER_STATUS_USER_PTR)); 597bf215546Sopenharmony_ci 598bf215546Sopenharmony_ci if (likely(dst->domain) && likely(src->domain)) { 599bf215546Sopenharmony_ci nv->copy_data(nv, 600bf215546Sopenharmony_ci dst->bo, dst->offset + dstx, dst->domain, 601bf215546Sopenharmony_ci src->bo, src->offset + srcx, src->domain, size); 602bf215546Sopenharmony_ci 603bf215546Sopenharmony_ci dst->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING; 604bf215546Sopenharmony_ci nouveau_fence_ref(nv->screen->fence.current, &dst->fence); 605bf215546Sopenharmony_ci nouveau_fence_ref(nv->screen->fence.current, &dst->fence_wr); 606bf215546Sopenharmony_ci 607bf215546Sopenharmony_ci src->status |= NOUVEAU_BUFFER_STATUS_GPU_READING; 608bf215546Sopenharmony_ci nouveau_fence_ref(nv->screen->fence.current, &src->fence); 609bf215546Sopenharmony_ci } else { 610bf215546Sopenharmony_ci struct pipe_box src_box; 611bf215546Sopenharmony_ci src_box.x = srcx; 612bf215546Sopenharmony_ci src_box.y = 0; 613bf215546Sopenharmony_ci src_box.z = 0; 614bf215546Sopenharmony_ci src_box.width = size; 615bf215546Sopenharmony_ci src_box.height = 1; 616bf215546Sopenharmony_ci src_box.depth = 1; 617bf215546Sopenharmony_ci util_resource_copy_region(&nv->pipe, 618bf215546Sopenharmony_ci &dst->base, 0, dstx, 0, 0, 619bf215546Sopenharmony_ci &src->base, 0, &src_box); 620bf215546Sopenharmony_ci } 621bf215546Sopenharmony_ci 622bf215546Sopenharmony_ci util_range_add(&dst->base, &dst->valid_buffer_range, dstx, dstx + size); 623bf215546Sopenharmony_ci} 624bf215546Sopenharmony_ci 625bf215546Sopenharmony_ci 626bf215546Sopenharmony_civoid * 627bf215546Sopenharmony_cinouveau_resource_map_offset(struct nouveau_context *nv, 628bf215546Sopenharmony_ci struct nv04_resource *res, uint32_t offset, 629bf215546Sopenharmony_ci uint32_t flags) 630bf215546Sopenharmony_ci{ 631bf215546Sopenharmony_ci if (unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) || 632bf215546Sopenharmony_ci unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_PTR)) 633bf215546Sopenharmony_ci return res->data + offset; 634bf215546Sopenharmony_ci 635bf215546Sopenharmony_ci if (res->domain == NOUVEAU_BO_VRAM) { 636bf215546Sopenharmony_ci if (!res->data || (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING)) 637bf215546Sopenharmony_ci nouveau_buffer_cache(nv, res); 638bf215546Sopenharmony_ci } 639bf215546Sopenharmony_ci if (res->domain != NOUVEAU_BO_GART) 640bf215546Sopenharmony_ci return res->data + offset; 641bf215546Sopenharmony_ci 642bf215546Sopenharmony_ci if (res->mm) { 643bf215546Sopenharmony_ci unsigned rw; 644bf215546Sopenharmony_ci rw = (flags & NOUVEAU_BO_WR) ? PIPE_MAP_WRITE : PIPE_MAP_READ; 645bf215546Sopenharmony_ci nouveau_buffer_sync(nv, res, rw); 646bf215546Sopenharmony_ci if (nouveau_bo_map(res->bo, 0, NULL)) 647bf215546Sopenharmony_ci return NULL; 648bf215546Sopenharmony_ci } else { 649bf215546Sopenharmony_ci if (nouveau_bo_map(res->bo, flags, nv->client)) 650bf215546Sopenharmony_ci return NULL; 651bf215546Sopenharmony_ci } 652bf215546Sopenharmony_ci return (uint8_t *)res->bo->map + res->offset + offset; 653bf215546Sopenharmony_ci} 654bf215546Sopenharmony_ci 655bf215546Sopenharmony_cistatic void * 656bf215546Sopenharmony_cinouveau_user_ptr_transfer_map(struct pipe_context *pipe, 657bf215546Sopenharmony_ci struct pipe_resource *resource, 658bf215546Sopenharmony_ci unsigned level, unsigned usage, 659bf215546Sopenharmony_ci const struct pipe_box *box, 660bf215546Sopenharmony_ci struct pipe_transfer **ptransfer) 661bf215546Sopenharmony_ci{ 662bf215546Sopenharmony_ci struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer); 663bf215546Sopenharmony_ci if (!tx) 664bf215546Sopenharmony_ci return NULL; 665bf215546Sopenharmony_ci nouveau_buffer_transfer_init(tx, resource, box, usage); 666bf215546Sopenharmony_ci *ptransfer = &tx->base; 667bf215546Sopenharmony_ci return nv04_resource(resource)->data; 668bf215546Sopenharmony_ci} 669bf215546Sopenharmony_ci 670bf215546Sopenharmony_cistatic void 671bf215546Sopenharmony_cinouveau_user_ptr_transfer_unmap(struct pipe_context *pipe, 672bf215546Sopenharmony_ci struct pipe_transfer *transfer) 673bf215546Sopenharmony_ci{ 674bf215546Sopenharmony_ci struct nouveau_transfer *tx = nouveau_transfer(transfer); 675bf215546Sopenharmony_ci FREE(tx); 676bf215546Sopenharmony_ci} 677bf215546Sopenharmony_ci 678bf215546Sopenharmony_cistruct pipe_resource * 679bf215546Sopenharmony_cinouveau_buffer_create(struct pipe_screen *pscreen, 680bf215546Sopenharmony_ci const struct pipe_resource *templ) 681bf215546Sopenharmony_ci{ 682bf215546Sopenharmony_ci struct nouveau_screen *screen = nouveau_screen(pscreen); 683bf215546Sopenharmony_ci struct nv04_resource *buffer; 684bf215546Sopenharmony_ci bool ret; 685bf215546Sopenharmony_ci 686bf215546Sopenharmony_ci buffer = CALLOC_STRUCT(nv04_resource); 687bf215546Sopenharmony_ci if (!buffer) 688bf215546Sopenharmony_ci return NULL; 689bf215546Sopenharmony_ci 690bf215546Sopenharmony_ci buffer->base = *templ; 691bf215546Sopenharmony_ci pipe_reference_init(&buffer->base.reference, 1); 692bf215546Sopenharmony_ci buffer->base.screen = pscreen; 693bf215546Sopenharmony_ci 694bf215546Sopenharmony_ci if (buffer->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT | 695bf215546Sopenharmony_ci PIPE_RESOURCE_FLAG_MAP_COHERENT)) { 696bf215546Sopenharmony_ci buffer->domain = NOUVEAU_BO_GART; 697bf215546Sopenharmony_ci } else if (buffer->base.bind == 0 || (buffer->base.bind & 698bf215546Sopenharmony_ci (screen->vidmem_bindings & screen->sysmem_bindings))) { 699bf215546Sopenharmony_ci switch (buffer->base.usage) { 700bf215546Sopenharmony_ci case PIPE_USAGE_DEFAULT: 701bf215546Sopenharmony_ci case PIPE_USAGE_IMMUTABLE: 702bf215546Sopenharmony_ci buffer->domain = NV_VRAM_DOMAIN(screen); 703bf215546Sopenharmony_ci break; 704bf215546Sopenharmony_ci case PIPE_USAGE_DYNAMIC: 705bf215546Sopenharmony_ci /* For most apps, we'd have to do staging transfers to avoid sync 706bf215546Sopenharmony_ci * with this usage, and GART -> GART copies would be suboptimal. 707bf215546Sopenharmony_ci */ 708bf215546Sopenharmony_ci buffer->domain = NV_VRAM_DOMAIN(screen); 709bf215546Sopenharmony_ci break; 710bf215546Sopenharmony_ci case PIPE_USAGE_STAGING: 711bf215546Sopenharmony_ci case PIPE_USAGE_STREAM: 712bf215546Sopenharmony_ci buffer->domain = NOUVEAU_BO_GART; 713bf215546Sopenharmony_ci break; 714bf215546Sopenharmony_ci default: 715bf215546Sopenharmony_ci assert(0); 716bf215546Sopenharmony_ci break; 717bf215546Sopenharmony_ci } 718bf215546Sopenharmony_ci } else { 719bf215546Sopenharmony_ci if (buffer->base.bind & screen->vidmem_bindings) 720bf215546Sopenharmony_ci buffer->domain = NV_VRAM_DOMAIN(screen); 721bf215546Sopenharmony_ci else 722bf215546Sopenharmony_ci if (buffer->base.bind & screen->sysmem_bindings) 723bf215546Sopenharmony_ci buffer->domain = NOUVEAU_BO_GART; 724bf215546Sopenharmony_ci } 725bf215546Sopenharmony_ci 726bf215546Sopenharmony_ci ret = nouveau_buffer_allocate(screen, buffer, buffer->domain); 727bf215546Sopenharmony_ci 728bf215546Sopenharmony_ci if (ret == false) 729bf215546Sopenharmony_ci goto fail; 730bf215546Sopenharmony_ci 731bf215546Sopenharmony_ci if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy) 732bf215546Sopenharmony_ci nouveau_buffer_cache(NULL, buffer); 733bf215546Sopenharmony_ci 734bf215546Sopenharmony_ci NOUVEAU_DRV_STAT(screen, buf_obj_current_count, 1); 735bf215546Sopenharmony_ci 736bf215546Sopenharmony_ci util_range_init(&buffer->valid_buffer_range); 737bf215546Sopenharmony_ci 738bf215546Sopenharmony_ci return &buffer->base; 739bf215546Sopenharmony_ci 740bf215546Sopenharmony_cifail: 741bf215546Sopenharmony_ci FREE(buffer); 742bf215546Sopenharmony_ci return NULL; 743bf215546Sopenharmony_ci} 744bf215546Sopenharmony_ci 745bf215546Sopenharmony_cistruct pipe_resource * 746bf215546Sopenharmony_cinouveau_buffer_create_from_user(struct pipe_screen *pscreen, 747bf215546Sopenharmony_ci const struct pipe_resource *templ, 748bf215546Sopenharmony_ci void *user_ptr) 749bf215546Sopenharmony_ci{ 750bf215546Sopenharmony_ci struct nv04_resource *buffer; 751bf215546Sopenharmony_ci 752bf215546Sopenharmony_ci buffer = CALLOC_STRUCT(nv04_resource); 753bf215546Sopenharmony_ci if (!buffer) 754bf215546Sopenharmony_ci return NULL; 755bf215546Sopenharmony_ci 756bf215546Sopenharmony_ci buffer->base = *templ; 757bf215546Sopenharmony_ci /* set address and data to the same thing for higher compatibility with 758bf215546Sopenharmony_ci * existing code. It's correct nonetheless as the same pointer is equally 759bf215546Sopenharmony_ci * valid on the CPU and the GPU. 760bf215546Sopenharmony_ci */ 761bf215546Sopenharmony_ci buffer->address = (uintptr_t)user_ptr; 762bf215546Sopenharmony_ci buffer->data = user_ptr; 763bf215546Sopenharmony_ci buffer->status = NOUVEAU_BUFFER_STATUS_USER_PTR; 764bf215546Sopenharmony_ci buffer->base.screen = pscreen; 765bf215546Sopenharmony_ci 766bf215546Sopenharmony_ci pipe_reference_init(&buffer->base.reference, 1); 767bf215546Sopenharmony_ci 768bf215546Sopenharmony_ci return &buffer->base; 769bf215546Sopenharmony_ci} 770bf215546Sopenharmony_ci 771bf215546Sopenharmony_cistruct pipe_resource * 772bf215546Sopenharmony_cinouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr, 773bf215546Sopenharmony_ci unsigned bytes, unsigned bind) 774bf215546Sopenharmony_ci{ 775bf215546Sopenharmony_ci struct nv04_resource *buffer; 776bf215546Sopenharmony_ci 777bf215546Sopenharmony_ci buffer = CALLOC_STRUCT(nv04_resource); 778bf215546Sopenharmony_ci if (!buffer) 779bf215546Sopenharmony_ci return NULL; 780bf215546Sopenharmony_ci 781bf215546Sopenharmony_ci pipe_reference_init(&buffer->base.reference, 1); 782bf215546Sopenharmony_ci buffer->base.screen = pscreen; 783bf215546Sopenharmony_ci buffer->base.format = PIPE_FORMAT_R8_UNORM; 784bf215546Sopenharmony_ci buffer->base.usage = PIPE_USAGE_IMMUTABLE; 785bf215546Sopenharmony_ci buffer->base.bind = bind; 786bf215546Sopenharmony_ci buffer->base.width0 = bytes; 787bf215546Sopenharmony_ci buffer->base.height0 = 1; 788bf215546Sopenharmony_ci buffer->base.depth0 = 1; 789bf215546Sopenharmony_ci 790bf215546Sopenharmony_ci buffer->data = ptr; 791bf215546Sopenharmony_ci buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY; 792bf215546Sopenharmony_ci 793bf215546Sopenharmony_ci util_range_init(&buffer->valid_buffer_range); 794bf215546Sopenharmony_ci util_range_add(&buffer->base, &buffer->valid_buffer_range, 0, bytes); 795bf215546Sopenharmony_ci 796bf215546Sopenharmony_ci return &buffer->base; 797bf215546Sopenharmony_ci} 798bf215546Sopenharmony_ci 799bf215546Sopenharmony_cistatic inline bool 800bf215546Sopenharmony_cinouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf, 801bf215546Sopenharmony_ci struct nouveau_bo *bo, unsigned offset, unsigned size) 802bf215546Sopenharmony_ci{ 803bf215546Sopenharmony_ci if (!nouveau_buffer_malloc(buf)) 804bf215546Sopenharmony_ci return false; 805bf215546Sopenharmony_ci if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->client)) 806bf215546Sopenharmony_ci return false; 807bf215546Sopenharmony_ci memcpy(buf->data, (uint8_t *)bo->map + offset, size); 808bf215546Sopenharmony_ci return true; 809bf215546Sopenharmony_ci} 810bf215546Sopenharmony_ci 811bf215546Sopenharmony_ci/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */ 812bf215546Sopenharmony_cibool 813bf215546Sopenharmony_cinouveau_buffer_migrate(struct nouveau_context *nv, 814bf215546Sopenharmony_ci struct nv04_resource *buf, const unsigned new_domain) 815bf215546Sopenharmony_ci{ 816bf215546Sopenharmony_ci assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR)); 817bf215546Sopenharmony_ci 818bf215546Sopenharmony_ci struct nouveau_screen *screen = nv->screen; 819bf215546Sopenharmony_ci struct nouveau_bo *bo; 820bf215546Sopenharmony_ci const unsigned old_domain = buf->domain; 821bf215546Sopenharmony_ci unsigned size = buf->base.width0; 822bf215546Sopenharmony_ci unsigned offset; 823bf215546Sopenharmony_ci int ret; 824bf215546Sopenharmony_ci 825bf215546Sopenharmony_ci assert(new_domain != old_domain); 826bf215546Sopenharmony_ci 827bf215546Sopenharmony_ci if (new_domain == NOUVEAU_BO_GART && old_domain == 0) { 828bf215546Sopenharmony_ci if (!nouveau_buffer_allocate(screen, buf, new_domain)) 829bf215546Sopenharmony_ci return false; 830bf215546Sopenharmony_ci ret = nouveau_bo_map(buf->bo, 0, nv->client); 831bf215546Sopenharmony_ci if (ret) 832bf215546Sopenharmony_ci return ret; 833bf215546Sopenharmony_ci memcpy((uint8_t *)buf->bo->map + buf->offset, buf->data, size); 834bf215546Sopenharmony_ci align_free(buf->data); 835bf215546Sopenharmony_ci } else 836bf215546Sopenharmony_ci if (old_domain != 0 && new_domain != 0) { 837bf215546Sopenharmony_ci struct nouveau_mm_allocation *mm = buf->mm; 838bf215546Sopenharmony_ci 839bf215546Sopenharmony_ci if (new_domain == NOUVEAU_BO_VRAM) { 840bf215546Sopenharmony_ci /* keep a system memory copy of our data in case we hit a fallback */ 841bf215546Sopenharmony_ci if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size)) 842bf215546Sopenharmony_ci return false; 843bf215546Sopenharmony_ci if (nouveau_mesa_debug) 844bf215546Sopenharmony_ci debug_printf("migrating %u KiB to VRAM\n", size / 1024); 845bf215546Sopenharmony_ci } 846bf215546Sopenharmony_ci 847bf215546Sopenharmony_ci offset = buf->offset; 848bf215546Sopenharmony_ci bo = buf->bo; 849bf215546Sopenharmony_ci buf->bo = NULL; 850bf215546Sopenharmony_ci buf->mm = NULL; 851bf215546Sopenharmony_ci nouveau_buffer_allocate(screen, buf, new_domain); 852bf215546Sopenharmony_ci 853bf215546Sopenharmony_ci nv->copy_data(nv, buf->bo, buf->offset, new_domain, 854bf215546Sopenharmony_ci bo, offset, old_domain, buf->base.width0); 855bf215546Sopenharmony_ci 856bf215546Sopenharmony_ci nouveau_fence_work(screen->fence.current, nouveau_fence_unref_bo, bo); 857bf215546Sopenharmony_ci if (mm) 858bf215546Sopenharmony_ci release_allocation(&mm, screen->fence.current); 859bf215546Sopenharmony_ci } else 860bf215546Sopenharmony_ci if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) { 861bf215546Sopenharmony_ci struct nouveau_transfer tx; 862bf215546Sopenharmony_ci if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM)) 863bf215546Sopenharmony_ci return false; 864bf215546Sopenharmony_ci tx.base.resource = &buf->base; 865bf215546Sopenharmony_ci tx.base.box.x = 0; 866bf215546Sopenharmony_ci tx.base.box.width = buf->base.width0; 867bf215546Sopenharmony_ci tx.bo = NULL; 868bf215546Sopenharmony_ci tx.map = NULL; 869bf215546Sopenharmony_ci if (!nouveau_transfer_staging(nv, &tx, false)) 870bf215546Sopenharmony_ci return false; 871bf215546Sopenharmony_ci nouveau_transfer_write(nv, &tx, 0, tx.base.box.width); 872bf215546Sopenharmony_ci nouveau_buffer_transfer_del(nv, &tx); 873bf215546Sopenharmony_ci } else 874bf215546Sopenharmony_ci return false; 875bf215546Sopenharmony_ci 876bf215546Sopenharmony_ci assert(buf->domain == new_domain); 877bf215546Sopenharmony_ci return true; 878bf215546Sopenharmony_ci} 879bf215546Sopenharmony_ci 880bf215546Sopenharmony_ci/* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART. 881bf215546Sopenharmony_ci * We'd like to only allocate @size bytes here, but then we'd have to rebase 882bf215546Sopenharmony_ci * the vertex indices ... 883bf215546Sopenharmony_ci */ 884bf215546Sopenharmony_cibool 885bf215546Sopenharmony_cinouveau_user_buffer_upload(struct nouveau_context *nv, 886bf215546Sopenharmony_ci struct nv04_resource *buf, 887bf215546Sopenharmony_ci unsigned base, unsigned size) 888bf215546Sopenharmony_ci{ 889bf215546Sopenharmony_ci assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR)); 890bf215546Sopenharmony_ci 891bf215546Sopenharmony_ci struct nouveau_screen *screen = nouveau_screen(buf->base.screen); 892bf215546Sopenharmony_ci int ret; 893bf215546Sopenharmony_ci 894bf215546Sopenharmony_ci assert(buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY); 895bf215546Sopenharmony_ci 896bf215546Sopenharmony_ci buf->base.width0 = base + size; 897bf215546Sopenharmony_ci if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART)) 898bf215546Sopenharmony_ci return false; 899bf215546Sopenharmony_ci 900bf215546Sopenharmony_ci ret = nouveau_bo_map(buf->bo, 0, nv->client); 901bf215546Sopenharmony_ci if (ret) 902bf215546Sopenharmony_ci return false; 903bf215546Sopenharmony_ci memcpy((uint8_t *)buf->bo->map + buf->offset + base, buf->data + base, size); 904bf215546Sopenharmony_ci 905bf215546Sopenharmony_ci return true; 906bf215546Sopenharmony_ci} 907bf215546Sopenharmony_ci 908bf215546Sopenharmony_ci/* Invalidate underlying buffer storage, reset fences, reallocate to non-busy 909bf215546Sopenharmony_ci * buffer. 910bf215546Sopenharmony_ci */ 911bf215546Sopenharmony_civoid 912bf215546Sopenharmony_cinouveau_buffer_invalidate(struct pipe_context *pipe, 913bf215546Sopenharmony_ci struct pipe_resource *resource) 914bf215546Sopenharmony_ci{ 915bf215546Sopenharmony_ci struct nouveau_context *nv = nouveau_context(pipe); 916bf215546Sopenharmony_ci struct nv04_resource *buf = nv04_resource(resource); 917bf215546Sopenharmony_ci int ref = buf->base.reference.count - 1; 918bf215546Sopenharmony_ci 919bf215546Sopenharmony_ci assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR)); 920bf215546Sopenharmony_ci 921bf215546Sopenharmony_ci /* Shared buffers shouldn't get reallocated */ 922bf215546Sopenharmony_ci if (unlikely(buf->base.bind & PIPE_BIND_SHARED)) 923bf215546Sopenharmony_ci return; 924bf215546Sopenharmony_ci 925bf215546Sopenharmony_ci /* If the buffer is sub-allocated and not currently being written, just 926bf215546Sopenharmony_ci * wipe the valid buffer range. Otherwise we have to create fresh 927bf215546Sopenharmony_ci * storage. (We don't keep track of fences for non-sub-allocated BO's.) 928bf215546Sopenharmony_ci */ 929bf215546Sopenharmony_ci if (buf->mm && !nouveau_buffer_busy(buf, PIPE_MAP_WRITE)) { 930bf215546Sopenharmony_ci util_range_set_empty(&buf->valid_buffer_range); 931bf215546Sopenharmony_ci } else { 932bf215546Sopenharmony_ci nouveau_buffer_reallocate(nv->screen, buf, buf->domain); 933bf215546Sopenharmony_ci if (ref > 0) /* any references inside context possible ? */ 934bf215546Sopenharmony_ci nv->invalidate_resource_storage(nv, &buf->base, ref); 935bf215546Sopenharmony_ci } 936bf215546Sopenharmony_ci} 937bf215546Sopenharmony_ci 938bf215546Sopenharmony_ci 939bf215546Sopenharmony_ci/* Scratch data allocation. */ 940bf215546Sopenharmony_ci 941bf215546Sopenharmony_cistatic inline int 942bf215546Sopenharmony_cinouveau_scratch_bo_alloc(struct nouveau_context *nv, struct nouveau_bo **pbo, 943bf215546Sopenharmony_ci unsigned size) 944bf215546Sopenharmony_ci{ 945bf215546Sopenharmony_ci return nouveau_bo_new(nv->screen->device, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 946bf215546Sopenharmony_ci 4096, size, NULL, pbo); 947bf215546Sopenharmony_ci} 948bf215546Sopenharmony_ci 949bf215546Sopenharmony_cistatic void 950bf215546Sopenharmony_cinouveau_scratch_unref_bos(void *d) 951bf215546Sopenharmony_ci{ 952bf215546Sopenharmony_ci struct runout *b = d; 953bf215546Sopenharmony_ci int i; 954bf215546Sopenharmony_ci 955bf215546Sopenharmony_ci for (i = 0; i < b->nr; ++i) 956bf215546Sopenharmony_ci nouveau_bo_ref(NULL, &b->bo[i]); 957bf215546Sopenharmony_ci 958bf215546Sopenharmony_ci FREE(b); 959bf215546Sopenharmony_ci} 960bf215546Sopenharmony_ci 961bf215546Sopenharmony_civoid 962bf215546Sopenharmony_cinouveau_scratch_runout_release(struct nouveau_context *nv) 963bf215546Sopenharmony_ci{ 964bf215546Sopenharmony_ci if (!nv->scratch.runout) 965bf215546Sopenharmony_ci return; 966bf215546Sopenharmony_ci 967bf215546Sopenharmony_ci if (!nouveau_fence_work(nv->screen->fence.current, nouveau_scratch_unref_bos, 968bf215546Sopenharmony_ci nv->scratch.runout)) 969bf215546Sopenharmony_ci return; 970bf215546Sopenharmony_ci 971bf215546Sopenharmony_ci nv->scratch.end = 0; 972bf215546Sopenharmony_ci nv->scratch.runout = NULL; 973bf215546Sopenharmony_ci} 974bf215546Sopenharmony_ci 975bf215546Sopenharmony_ci/* Allocate an extra bo if we can't fit everything we need simultaneously. 976bf215546Sopenharmony_ci * (Could happen for very large user arrays.) 977bf215546Sopenharmony_ci */ 978bf215546Sopenharmony_cistatic inline bool 979bf215546Sopenharmony_cinouveau_scratch_runout(struct nouveau_context *nv, unsigned size) 980bf215546Sopenharmony_ci{ 981bf215546Sopenharmony_ci int ret; 982bf215546Sopenharmony_ci unsigned n; 983bf215546Sopenharmony_ci 984bf215546Sopenharmony_ci if (nv->scratch.runout) 985bf215546Sopenharmony_ci n = nv->scratch.runout->nr; 986bf215546Sopenharmony_ci else 987bf215546Sopenharmony_ci n = 0; 988bf215546Sopenharmony_ci nv->scratch.runout = REALLOC(nv->scratch.runout, n == 0 ? 0 : 989bf215546Sopenharmony_ci (sizeof(*nv->scratch.runout) + (n + 0) * sizeof(void *)), 990bf215546Sopenharmony_ci sizeof(*nv->scratch.runout) + (n + 1) * sizeof(void *)); 991bf215546Sopenharmony_ci nv->scratch.runout->nr = n + 1; 992bf215546Sopenharmony_ci nv->scratch.runout->bo[n] = NULL; 993bf215546Sopenharmony_ci 994bf215546Sopenharmony_ci ret = nouveau_scratch_bo_alloc(nv, &nv->scratch.runout->bo[n], size); 995bf215546Sopenharmony_ci if (!ret) { 996bf215546Sopenharmony_ci ret = nouveau_bo_map(nv->scratch.runout->bo[n], 0, NULL); 997bf215546Sopenharmony_ci if (ret) 998bf215546Sopenharmony_ci nouveau_bo_ref(NULL, &nv->scratch.runout->bo[--nv->scratch.runout->nr]); 999bf215546Sopenharmony_ci } 1000bf215546Sopenharmony_ci if (!ret) { 1001bf215546Sopenharmony_ci nv->scratch.current = nv->scratch.runout->bo[n]; 1002bf215546Sopenharmony_ci nv->scratch.offset = 0; 1003bf215546Sopenharmony_ci nv->scratch.end = size; 1004bf215546Sopenharmony_ci nv->scratch.map = nv->scratch.current->map; 1005bf215546Sopenharmony_ci } 1006bf215546Sopenharmony_ci return !ret; 1007bf215546Sopenharmony_ci} 1008bf215546Sopenharmony_ci 1009bf215546Sopenharmony_ci/* Continue to next scratch buffer, if available (no wrapping, large enough). 1010bf215546Sopenharmony_ci * Allocate it if it has not yet been created. 1011bf215546Sopenharmony_ci */ 1012bf215546Sopenharmony_cistatic inline bool 1013bf215546Sopenharmony_cinouveau_scratch_next(struct nouveau_context *nv, unsigned size) 1014bf215546Sopenharmony_ci{ 1015bf215546Sopenharmony_ci struct nouveau_bo *bo; 1016bf215546Sopenharmony_ci int ret; 1017bf215546Sopenharmony_ci const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS; 1018bf215546Sopenharmony_ci 1019bf215546Sopenharmony_ci if ((size > nv->scratch.bo_size) || (i == nv->scratch.wrap)) 1020bf215546Sopenharmony_ci return false; 1021bf215546Sopenharmony_ci nv->scratch.id = i; 1022bf215546Sopenharmony_ci 1023bf215546Sopenharmony_ci bo = nv->scratch.bo[i]; 1024bf215546Sopenharmony_ci if (!bo) { 1025bf215546Sopenharmony_ci ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size); 1026bf215546Sopenharmony_ci if (ret) 1027bf215546Sopenharmony_ci return false; 1028bf215546Sopenharmony_ci nv->scratch.bo[i] = bo; 1029bf215546Sopenharmony_ci } 1030bf215546Sopenharmony_ci nv->scratch.current = bo; 1031bf215546Sopenharmony_ci nv->scratch.offset = 0; 1032bf215546Sopenharmony_ci nv->scratch.end = nv->scratch.bo_size; 1033bf215546Sopenharmony_ci 1034bf215546Sopenharmony_ci ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, nv->client); 1035bf215546Sopenharmony_ci if (!ret) 1036bf215546Sopenharmony_ci nv->scratch.map = bo->map; 1037bf215546Sopenharmony_ci return !ret; 1038bf215546Sopenharmony_ci} 1039bf215546Sopenharmony_ci 1040bf215546Sopenharmony_cistatic bool 1041bf215546Sopenharmony_cinouveau_scratch_more(struct nouveau_context *nv, unsigned min_size) 1042bf215546Sopenharmony_ci{ 1043bf215546Sopenharmony_ci bool ret; 1044bf215546Sopenharmony_ci 1045bf215546Sopenharmony_ci ret = nouveau_scratch_next(nv, min_size); 1046bf215546Sopenharmony_ci if (!ret) 1047bf215546Sopenharmony_ci ret = nouveau_scratch_runout(nv, min_size); 1048bf215546Sopenharmony_ci return ret; 1049bf215546Sopenharmony_ci} 1050bf215546Sopenharmony_ci 1051bf215546Sopenharmony_ci 1052bf215546Sopenharmony_ci/* Copy data to a scratch buffer and return address & bo the data resides in. */ 1053bf215546Sopenharmony_ciuint64_t 1054bf215546Sopenharmony_cinouveau_scratch_data(struct nouveau_context *nv, 1055bf215546Sopenharmony_ci const void *data, unsigned base, unsigned size, 1056bf215546Sopenharmony_ci struct nouveau_bo **bo) 1057bf215546Sopenharmony_ci{ 1058bf215546Sopenharmony_ci unsigned bgn = MAX2(base, nv->scratch.offset); 1059bf215546Sopenharmony_ci unsigned end = bgn + size; 1060bf215546Sopenharmony_ci 1061bf215546Sopenharmony_ci if (end >= nv->scratch.end) { 1062bf215546Sopenharmony_ci end = base + size; 1063bf215546Sopenharmony_ci if (!nouveau_scratch_more(nv, end)) 1064bf215546Sopenharmony_ci return 0; 1065bf215546Sopenharmony_ci bgn = base; 1066bf215546Sopenharmony_ci } 1067bf215546Sopenharmony_ci nv->scratch.offset = align(end, 4); 1068bf215546Sopenharmony_ci 1069bf215546Sopenharmony_ci memcpy(nv->scratch.map + bgn, (const uint8_t *)data + base, size); 1070bf215546Sopenharmony_ci 1071bf215546Sopenharmony_ci *bo = nv->scratch.current; 1072bf215546Sopenharmony_ci return (*bo)->offset + (bgn - base); 1073bf215546Sopenharmony_ci} 1074bf215546Sopenharmony_ci 1075bf215546Sopenharmony_civoid * 1076bf215546Sopenharmony_cinouveau_scratch_get(struct nouveau_context *nv, 1077bf215546Sopenharmony_ci unsigned size, uint64_t *gpu_addr, struct nouveau_bo **pbo) 1078bf215546Sopenharmony_ci{ 1079bf215546Sopenharmony_ci unsigned bgn = nv->scratch.offset; 1080bf215546Sopenharmony_ci unsigned end = nv->scratch.offset + size; 1081bf215546Sopenharmony_ci 1082bf215546Sopenharmony_ci if (end >= nv->scratch.end) { 1083bf215546Sopenharmony_ci end = size; 1084bf215546Sopenharmony_ci if (!nouveau_scratch_more(nv, end)) 1085bf215546Sopenharmony_ci return NULL; 1086bf215546Sopenharmony_ci bgn = 0; 1087bf215546Sopenharmony_ci } 1088bf215546Sopenharmony_ci nv->scratch.offset = align(end, 4); 1089bf215546Sopenharmony_ci 1090bf215546Sopenharmony_ci *pbo = nv->scratch.current; 1091bf215546Sopenharmony_ci *gpu_addr = nv->scratch.current->offset + bgn; 1092bf215546Sopenharmony_ci return nv->scratch.map + bgn; 1093bf215546Sopenharmony_ci} 1094