1/* 2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robclark@freedesktop.org> 25 */ 26 27#include "freedreno_context.h" 28#include "ir3/ir3_cache.h" 29#include "util/u_upload_mgr.h" 30#include "freedreno_blitter.h" 31#include "freedreno_draw.h" 32#include "freedreno_fence.h" 33#include "freedreno_gmem.h" 34#include "freedreno_program.h" 35#include "freedreno_query.h" 36#include "freedreno_query_hw.h" 37#include "freedreno_resource.h" 38#include "freedreno_state.h" 39#include "freedreno_texture.h" 40#include "freedreno_util.h" 41#include "freedreno_tracepoints.h" 42#include "util/u_trace_gallium.h" 43 44static void 45fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep, 46 unsigned flags) in_dt 47{ 48 struct fd_context *ctx = fd_context(pctx); 49 struct pipe_fence_handle *fence = NULL; 50 struct fd_batch *batch = NULL; 51 52 /* We want to lookup current batch if it exists, but not create a new 53 * one if not (unless we need a fence) 54 */ 55 fd_batch_reference(&batch, ctx->batch); 56 57 DBG("%p: flush: flags=%x, fencep=%p", batch, flags, fencep); 58 59 if (fencep && !batch) { 60 batch = fd_context_batch(ctx); 61 } else if (!batch) { 62 if (ctx->screen->reorder) 63 fd_bc_flush(ctx, flags & PIPE_FLUSH_DEFERRED); 64 fd_bc_dump(ctx, "%p: NULL batch, remaining:\n", ctx); 65 return; 66 } 67 68 /* With TC_FLUSH_ASYNC, the fence will have been pre-created from 69 * the front-end thread. But not yet associated with a batch, 70 * because we cannot safely access ctx->batch outside of the driver 71 * thread. So instead, replace the existing batch->fence with the 72 * one created earlier 73 */ 74 if ((flags & TC_FLUSH_ASYNC) && fencep) { 75 /* We don't currently expect async+flush in the fence-fd 76 * case.. for that to work properly we'd need TC to tell 77 * us in the create_fence callback that it needs an fd. 78 */ 79 assert(!(flags & PIPE_FLUSH_FENCE_FD)); 80 81 fd_fence_set_batch(*fencep, batch); 82 fd_fence_ref(&batch->fence, *fencep); 83 84 /* If we have nothing to flush, update the pre-created unflushed 85 * fence with the current state of the last-fence: 86 */ 87 if (ctx->last_fence) { 88 fd_fence_repopulate(*fencep, ctx->last_fence); 89 fd_fence_ref(&fence, *fencep); 90 fd_bc_dump(ctx, "%p: (deferred) reuse last_fence, remaining:\n", ctx); 91 goto out; 92 } 93 94 /* async flush is not compatible with deferred flush, since 95 * nothing triggers the batch flush which fence_flush() would 96 * be waiting for 97 */ 98 flags &= ~PIPE_FLUSH_DEFERRED; 99 } else if (!batch->fence) { 100 batch->fence = fd_fence_create(batch); 101 } 102 103 /* In some sequence of events, we can end up with a last_fence that is 104 * not an "fd" fence, which results in eglDupNativeFenceFDANDROID() 105 * errors. 106 */ 107 if ((flags & PIPE_FLUSH_FENCE_FD) && ctx->last_fence && 108 !fd_fence_is_fd(ctx->last_fence)) 109 fd_fence_ref(&ctx->last_fence, NULL); 110 111 /* if no rendering since last flush, ie. app just decided it needed 112 * a fence, re-use the last one: 113 */ 114 if (ctx->last_fence) { 115 fd_fence_ref(&fence, ctx->last_fence); 116 fd_bc_dump(ctx, "%p: reuse last_fence, remaining:\n", ctx); 117 goto out; 118 } 119 120 /* Take a ref to the batch's fence (batch can be unref'd when flushed: */ 121 fd_fence_ref(&fence, batch->fence); 122 123 if (flags & PIPE_FLUSH_FENCE_FD) 124 fence->submit_fence.use_fence_fd = true; 125 126 fd_bc_dump(ctx, "%p: flushing %p<%u>, flags=0x%x, pending:\n", ctx, 127 batch, batch->seqno, flags); 128 129 /* If we get here, we need to flush for a fence, even if there is 130 * no rendering yet: 131 */ 132 batch->needs_flush = true; 133 134 if (!ctx->screen->reorder) { 135 fd_batch_flush(batch); 136 } else { 137 fd_bc_flush(ctx, flags & PIPE_FLUSH_DEFERRED); 138 } 139 140 fd_bc_dump(ctx, "%p: remaining:\n", ctx); 141 142out: 143 if (fencep) 144 fd_fence_ref(fencep, fence); 145 146 fd_fence_ref(&ctx->last_fence, fence); 147 148 fd_fence_ref(&fence, NULL); 149 150 fd_batch_reference(&batch, NULL); 151 152 u_trace_context_process(&ctx->trace_context, 153 !!(flags & PIPE_FLUSH_END_OF_FRAME)); 154} 155 156static void 157fd_texture_barrier(struct pipe_context *pctx, unsigned flags) in_dt 158{ 159 if (flags == PIPE_TEXTURE_BARRIER_FRAMEBUFFER) { 160 struct fd_context *ctx = fd_context(pctx); 161 162 if (ctx->framebuffer_barrier) { 163 ctx->framebuffer_barrier(ctx); 164 return; 165 } 166 } 167 168 /* On devices that could sample from GMEM we could possibly do better. 169 * Or if we knew that we were doing GMEM bypass we could just emit a 170 * cache flush, perhaps? But we don't know if future draws would cause 171 * us to use GMEM, and a flush in bypass isn't the end of the world. 172 */ 173 fd_context_flush(pctx, NULL, 0); 174} 175 176static void 177fd_memory_barrier(struct pipe_context *pctx, unsigned flags) 178{ 179 if (!(flags & ~PIPE_BARRIER_UPDATE)) 180 return; 181 182 fd_context_flush(pctx, NULL, 0); 183 184 /* TODO do we need to check for persistently mapped buffers and 185 * fd_bo_cpu_prep()?? 186 */ 187} 188 189static void 190emit_string_tail(struct fd_ringbuffer *ring, const char *string, int len) 191{ 192 const uint32_t *buf = (const void *)string; 193 194 while (len >= 4) { 195 OUT_RING(ring, *buf); 196 buf++; 197 len -= 4; 198 } 199 200 /* copy remainder bytes without reading past end of input string: */ 201 if (len > 0) { 202 uint32_t w = 0; 203 memcpy(&w, buf, len); 204 OUT_RING(ring, w); 205 } 206} 207 208/* for prior to a5xx: */ 209void 210fd_emit_string(struct fd_ringbuffer *ring, const char *string, int len) 211{ 212 /* max packet size is 0x3fff+1 dwords: */ 213 len = MIN2(len, 0x4000 * 4); 214 215 OUT_PKT3(ring, CP_NOP, align(len, 4) / 4); 216 emit_string_tail(ring, string, len); 217} 218 219/* for a5xx+ */ 220void 221fd_emit_string5(struct fd_ringbuffer *ring, const char *string, int len) 222{ 223 /* max packet size is 0x3fff dwords: */ 224 len = MIN2(len, 0x3fff * 4); 225 226 OUT_PKT7(ring, CP_NOP, align(len, 4) / 4); 227 emit_string_tail(ring, string, len); 228} 229 230/** 231 * emit marker string as payload of a no-op packet, which can be 232 * decoded by cffdump. 233 */ 234static void 235fd_emit_string_marker(struct pipe_context *pctx, const char *string, 236 int len) in_dt 237{ 238 struct fd_context *ctx = fd_context(pctx); 239 240 DBG("%.*s", len, string); 241 242 if (!ctx->batch) 243 return; 244 245 struct fd_batch *batch = fd_context_batch_locked(ctx); 246 247 fd_batch_needs_flush(batch); 248 249 if (ctx->screen->gen >= 5) { 250 fd_emit_string5(batch->draw, string, len); 251 } else { 252 fd_emit_string(batch->draw, string, len); 253 } 254 255 fd_batch_unlock_submit(batch); 256 fd_batch_reference(&batch, NULL); 257} 258 259/** 260 * If we have a pending fence_server_sync() (GPU side sync), flush now. 261 * The alternative to try to track this with batch dependencies gets 262 * hairy quickly. 263 * 264 * Call this before switching to a different batch, to handle this case. 265 */ 266void 267fd_context_switch_from(struct fd_context *ctx) 268{ 269 if (ctx->batch && (ctx->batch->in_fence_fd != -1)) 270 fd_batch_flush(ctx->batch); 271} 272 273/** 274 * If there is a pending fence-fd that we need to sync on, this will 275 * transfer the reference to the next batch we are going to render 276 * to. 277 */ 278void 279fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch) 280{ 281 if (ctx->in_fence_fd != -1) { 282 sync_accumulate("freedreno", &batch->in_fence_fd, ctx->in_fence_fd); 283 close(ctx->in_fence_fd); 284 ctx->in_fence_fd = -1; 285 } 286} 287 288/** 289 * Return a reference to the current batch, caller must unref. 290 */ 291struct fd_batch * 292fd_context_batch(struct fd_context *ctx) 293{ 294 struct fd_batch *batch = NULL; 295 296 tc_assert_driver_thread(ctx->tc); 297 298 fd_batch_reference(&batch, ctx->batch); 299 300 if (unlikely(!batch)) { 301 batch = 302 fd_batch_from_fb(ctx, &ctx->framebuffer); 303 util_copy_framebuffer_state(&batch->framebuffer, &ctx->framebuffer); 304 fd_batch_reference(&ctx->batch, batch); 305 fd_context_all_dirty(ctx); 306 } 307 fd_context_switch_to(ctx, batch); 308 309 return batch; 310} 311 312/** 313 * Return a locked reference to the current batch. A batch with emit 314 * lock held is protected against flushing while the lock is held. 315 * The emit-lock should be acquired before screen-lock. The emit-lock 316 * should be held while emitting cmdstream. 317 */ 318struct fd_batch * 319fd_context_batch_locked(struct fd_context *ctx) 320{ 321 struct fd_batch *batch = NULL; 322 323 while (!batch) { 324 batch = fd_context_batch(ctx); 325 if (!fd_batch_lock_submit(batch)) { 326 fd_batch_reference(&batch, NULL); 327 } 328 } 329 330 return batch; 331} 332 333void 334fd_context_destroy(struct pipe_context *pctx) 335{ 336 struct fd_context *ctx = fd_context(pctx); 337 unsigned i; 338 339 DBG(""); 340 341 fd_screen_lock(ctx->screen); 342 list_del(&ctx->node); 343 fd_screen_unlock(ctx->screen); 344 345 fd_fence_ref(&ctx->last_fence, NULL); 346 347 if (ctx->in_fence_fd != -1) 348 close(ctx->in_fence_fd); 349 350 for (i = 0; i < ARRAY_SIZE(ctx->pvtmem); i++) { 351 if (ctx->pvtmem[i].bo) 352 fd_bo_del(ctx->pvtmem[i].bo); 353 } 354 355 util_copy_framebuffer_state(&ctx->framebuffer, NULL); 356 fd_batch_reference(&ctx->batch, NULL); /* unref current batch */ 357 358 /* Make sure nothing in the batch cache references our context any more. */ 359 fd_bc_flush(ctx, false); 360 361 fd_prog_fini(pctx); 362 363 if (ctx->blitter) 364 util_blitter_destroy(ctx->blitter); 365 366 if (pctx->stream_uploader) 367 u_upload_destroy(pctx->stream_uploader); 368 369 for (i = 0; i < ARRAY_SIZE(ctx->clear_rs_state); i++) 370 if (ctx->clear_rs_state[i]) 371 pctx->delete_rasterizer_state(pctx, ctx->clear_rs_state[i]); 372 373 slab_destroy_child(&ctx->transfer_pool); 374 slab_destroy_child(&ctx->transfer_pool_unsync); 375 376 for (i = 0; i < ARRAY_SIZE(ctx->vsc_pipe_bo); i++) { 377 if (!ctx->vsc_pipe_bo[i]) 378 break; 379 fd_bo_del(ctx->vsc_pipe_bo[i]); 380 } 381 382 fd_device_del(ctx->dev); 383 fd_pipe_purge(ctx->pipe); 384 fd_pipe_del(ctx->pipe); 385 386 simple_mtx_destroy(&ctx->gmem_lock); 387 388 u_trace_context_fini(&ctx->trace_context); 389 390 fd_autotune_fini(&ctx->autotune); 391 392 ir3_cache_destroy(ctx->shader_cache); 393 394 if (FD_DBG(BSTAT) || FD_DBG(MSGS)) { 395 mesa_logi( 396 "batch_total=%u, batch_sysmem=%u, batch_gmem=%u, batch_nondraw=%u, " 397 "batch_restore=%u\n", 398 (uint32_t)ctx->stats.batch_total, (uint32_t)ctx->stats.batch_sysmem, 399 (uint32_t)ctx->stats.batch_gmem, (uint32_t)ctx->stats.batch_nondraw, 400 (uint32_t)ctx->stats.batch_restore); 401 } 402} 403 404static void 405fd_set_debug_callback(struct pipe_context *pctx, 406 const struct util_debug_callback *cb) 407{ 408 struct fd_context *ctx = fd_context(pctx); 409 struct fd_screen *screen = ctx->screen; 410 411 util_queue_finish(&screen->compile_queue); 412 413 if (cb) 414 ctx->debug = *cb; 415 else 416 memset(&ctx->debug, 0, sizeof(ctx->debug)); 417} 418 419static uint32_t 420fd_get_reset_count(struct fd_context *ctx, bool per_context) 421{ 422 uint64_t val; 423 enum fd_param_id param = per_context ? FD_CTX_FAULTS : FD_GLOBAL_FAULTS; 424 int ret = fd_pipe_get_param(ctx->pipe, param, &val); 425 assert(!ret); 426 return val; 427} 428 429static enum pipe_reset_status 430fd_get_device_reset_status(struct pipe_context *pctx) 431{ 432 struct fd_context *ctx = fd_context(pctx); 433 int context_faults = fd_get_reset_count(ctx, true); 434 int global_faults = fd_get_reset_count(ctx, false); 435 enum pipe_reset_status status; 436 437 if (context_faults != ctx->context_reset_count) { 438 status = PIPE_GUILTY_CONTEXT_RESET; 439 } else if (global_faults != ctx->global_reset_count) { 440 status = PIPE_INNOCENT_CONTEXT_RESET; 441 } else { 442 status = PIPE_NO_RESET; 443 } 444 445 ctx->context_reset_count = context_faults; 446 ctx->global_reset_count = global_faults; 447 448 return status; 449} 450 451static void 452fd_trace_record_ts(struct u_trace *ut, void *cs, void *timestamps, 453 unsigned idx, bool end_of_pipe) 454{ 455 struct fd_batch *batch = container_of(ut, struct fd_batch, trace); 456 struct fd_ringbuffer *ring = cs; 457 struct pipe_resource *buffer = timestamps; 458 459 if (ring->cur == batch->last_timestamp_cmd) { 460 uint64_t *ts = fd_bo_map(fd_resource(buffer)->bo); 461 ts[idx] = U_TRACE_NO_TIMESTAMP; 462 return; 463 } 464 465 unsigned ts_offset = idx * sizeof(uint64_t); 466 batch->ctx->record_timestamp(ring, fd_resource(buffer)->bo, ts_offset); 467 batch->last_timestamp_cmd = ring->cur; 468} 469 470static uint64_t 471fd_trace_read_ts(struct u_trace_context *utctx, 472 void *timestamps, unsigned idx, void *flush_data) 473{ 474 struct fd_context *ctx = 475 container_of(utctx, struct fd_context, trace_context); 476 struct pipe_resource *buffer = timestamps; 477 struct fd_bo *ts_bo = fd_resource(buffer)->bo; 478 479 /* Only need to stall on results for the first entry: */ 480 if (idx == 0) { 481 /* Avoid triggering deferred submits from flushing, since that 482 * changes the behavior of what we are trying to measure: 483 */ 484 while (fd_bo_cpu_prep(ts_bo, ctx->pipe, FD_BO_PREP_NOSYNC)) 485 usleep(10000); 486 int ret = fd_bo_cpu_prep(ts_bo, ctx->pipe, FD_BO_PREP_READ); 487 if (ret) 488 return U_TRACE_NO_TIMESTAMP; 489 } 490 491 uint64_t *ts = fd_bo_map(ts_bo); 492 493 /* Don't translate the no-timestamp marker: */ 494 if (ts[idx] == U_TRACE_NO_TIMESTAMP) 495 return U_TRACE_NO_TIMESTAMP; 496 497 return ctx->ts_to_ns(ts[idx]); 498} 499 500static void 501fd_trace_delete_flush_data(struct u_trace_context *utctx, void *flush_data) 502{ 503 /* We don't use flush_data at the moment. */ 504} 505 506/* TODO we could combine a few of these small buffers (solid_vbuf, 507 * blit_texcoord_vbuf, and vsc_size_mem, into a single buffer and 508 * save a tiny bit of memory 509 */ 510 511static struct pipe_resource * 512create_solid_vertexbuf(struct pipe_context *pctx) 513{ 514 static const float init_shader_const[] = { 515 -1.000000f, +1.000000f, +1.000000f, +1.000000f, -1.000000f, +1.000000f, 516 }; 517 struct pipe_resource *prsc = 518 pipe_buffer_create(pctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, 519 sizeof(init_shader_const)); 520 pipe_buffer_write(pctx, prsc, 0, sizeof(init_shader_const), 521 init_shader_const); 522 return prsc; 523} 524 525static struct pipe_resource * 526create_blit_texcoord_vertexbuf(struct pipe_context *pctx) 527{ 528 struct pipe_resource *prsc = pipe_buffer_create( 529 pctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_DYNAMIC, 16); 530 return prsc; 531} 532 533void 534fd_context_setup_common_vbos(struct fd_context *ctx) 535{ 536 struct pipe_context *pctx = &ctx->base; 537 538 ctx->solid_vbuf = create_solid_vertexbuf(pctx); 539 ctx->blit_texcoord_vbuf = create_blit_texcoord_vertexbuf(pctx); 540 541 /* setup solid_vbuf_state: */ 542 ctx->solid_vbuf_state.vtx = pctx->create_vertex_elements_state( 543 pctx, 1, 544 (struct pipe_vertex_element[]){{ 545 .vertex_buffer_index = 0, 546 .src_offset = 0, 547 .src_format = PIPE_FORMAT_R32G32B32_FLOAT, 548 }}); 549 ctx->solid_vbuf_state.vertexbuf.count = 1; 550 ctx->solid_vbuf_state.vertexbuf.vb[0].stride = 12; 551 ctx->solid_vbuf_state.vertexbuf.vb[0].buffer.resource = ctx->solid_vbuf; 552 553 /* setup blit_vbuf_state: */ 554 ctx->blit_vbuf_state.vtx = pctx->create_vertex_elements_state( 555 pctx, 2, 556 (struct pipe_vertex_element[]){ 557 { 558 .vertex_buffer_index = 0, 559 .src_offset = 0, 560 .src_format = PIPE_FORMAT_R32G32_FLOAT, 561 }, 562 { 563 .vertex_buffer_index = 1, 564 .src_offset = 0, 565 .src_format = PIPE_FORMAT_R32G32B32_FLOAT, 566 }}); 567 ctx->blit_vbuf_state.vertexbuf.count = 2; 568 ctx->blit_vbuf_state.vertexbuf.vb[0].stride = 8; 569 ctx->blit_vbuf_state.vertexbuf.vb[0].buffer.resource = 570 ctx->blit_texcoord_vbuf; 571 ctx->blit_vbuf_state.vertexbuf.vb[1].stride = 12; 572 ctx->blit_vbuf_state.vertexbuf.vb[1].buffer.resource = ctx->solid_vbuf; 573} 574 575void 576fd_context_cleanup_common_vbos(struct fd_context *ctx) 577{ 578 struct pipe_context *pctx = &ctx->base; 579 580 pctx->delete_vertex_elements_state(pctx, ctx->solid_vbuf_state.vtx); 581 pctx->delete_vertex_elements_state(pctx, ctx->blit_vbuf_state.vtx); 582 583 pipe_resource_reference(&ctx->solid_vbuf, NULL); 584 pipe_resource_reference(&ctx->blit_texcoord_vbuf, NULL); 585} 586 587struct pipe_context * 588fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen, 589 void *priv, unsigned flags) 590 disable_thread_safety_analysis 591{ 592 struct fd_screen *screen = fd_screen(pscreen); 593 struct pipe_context *pctx; 594 unsigned prio = 1; 595 596 /* lower numerical value == higher priority: */ 597 if (FD_DBG(HIPRIO)) 598 prio = 0; 599 else if (flags & PIPE_CONTEXT_HIGH_PRIORITY) 600 prio = 0; 601 else if (flags & PIPE_CONTEXT_LOW_PRIORITY) 602 prio = 2; 603 604 /* Some of the stats will get printed out at context destroy, so 605 * make sure they are collected: 606 */ 607 if (FD_DBG(BSTAT) || FD_DBG(MSGS)) 608 ctx->stats_users++; 609 610 ctx->flags = flags; 611 ctx->screen = screen; 612 ctx->pipe = fd_pipe_new2(screen->dev, FD_PIPE_3D, prio); 613 614 ctx->in_fence_fd = -1; 615 616 if (fd_device_version(screen->dev) >= FD_VERSION_ROBUSTNESS) { 617 ctx->context_reset_count = fd_get_reset_count(ctx, true); 618 ctx->global_reset_count = fd_get_reset_count(ctx, false); 619 } 620 621 simple_mtx_init(&ctx->gmem_lock, mtx_plain); 622 623 /* need some sane default in case gallium frontends don't 624 * set some state: 625 */ 626 ctx->sample_mask = 0xffff; 627 ctx->active_queries = true; 628 629 pctx = &ctx->base; 630 pctx->screen = pscreen; 631 pctx->priv = priv; 632 pctx->flush = fd_context_flush; 633 pctx->emit_string_marker = fd_emit_string_marker; 634 pctx->set_debug_callback = fd_set_debug_callback; 635 pctx->get_device_reset_status = fd_get_device_reset_status; 636 pctx->create_fence_fd = fd_create_fence_fd; 637 pctx->fence_server_sync = fd_fence_server_sync; 638 pctx->fence_server_signal = fd_fence_server_signal; 639 pctx->texture_barrier = fd_texture_barrier; 640 pctx->memory_barrier = fd_memory_barrier; 641 642 pctx->stream_uploader = u_upload_create_default(pctx); 643 if (!pctx->stream_uploader) 644 goto fail; 645 pctx->const_uploader = pctx->stream_uploader; 646 647 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool); 648 slab_create_child(&ctx->transfer_pool_unsync, &screen->transfer_pool); 649 650 fd_draw_init(pctx); 651 fd_resource_context_init(pctx); 652 fd_query_context_init(pctx); 653 fd_texture_init(pctx); 654 fd_state_init(pctx); 655 656 ctx->blitter = util_blitter_create(pctx); 657 if (!ctx->blitter) 658 goto fail; 659 660 list_inithead(&ctx->hw_active_queries); 661 list_inithead(&ctx->acc_active_queries); 662 663 fd_screen_lock(ctx->screen); 664 ctx->seqno = ++screen->ctx_seqno; 665 list_add(&ctx->node, &ctx->screen->context_list); 666 fd_screen_unlock(ctx->screen); 667 668 ctx->current_scissor = &ctx->disabled_scissor; 669 670 fd_gpu_tracepoint_config_variable(); 671 u_trace_pipe_context_init(&ctx->trace_context, pctx, 672 fd_trace_record_ts, 673 fd_trace_read_ts, 674 fd_trace_delete_flush_data); 675 676 fd_autotune_init(&ctx->autotune, screen->dev); 677 678 return pctx; 679 680fail: 681 pctx->destroy(pctx); 682 return NULL; 683} 684 685struct pipe_context * 686fd_context_init_tc(struct pipe_context *pctx, unsigned flags) 687{ 688 struct fd_context *ctx = fd_context(pctx); 689 690 if (!(flags & PIPE_CONTEXT_PREFER_THREADED)) 691 return pctx; 692 693 /* Clover (compute-only) is unsupported. */ 694 if (flags & PIPE_CONTEXT_COMPUTE_ONLY) 695 return pctx; 696 697 struct pipe_context *tc = threaded_context_create( 698 pctx, &ctx->screen->transfer_pool, 699 fd_replace_buffer_storage, 700 &(struct threaded_context_options){ 701 .create_fence = fd_fence_create_unflushed, 702 .is_resource_busy = fd_resource_busy, 703 .unsynchronized_get_device_reset_status = true, 704 }, 705 &ctx->tc); 706 707 if (tc && tc != pctx) 708 threaded_context_init_bytes_mapped_limit((struct threaded_context *)tc, 16); 709 710 return tc; 711} 712