1/* 2 * Copyright © 2018 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included 12 * in all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23/** 24 * @file iris_blorp.c 25 * 26 * ============================= GENXML CODE ============================= 27 * [This file is compiled once per generation.] 28 * ======================================================================= 29 * 30 * GenX specific code for working with BLORP (blitting, resolves, clears 31 * on the 3D engine). This provides the driver-specific hooks needed to 32 * implement the BLORP API. 33 * 34 * See iris_blit.c, iris_clear.c, and so on. 35 */ 36 37#include <assert.h> 38 39#include "iris_batch.h" 40#include "iris_resource.h" 41#include "iris_context.h" 42 43#include "util/u_upload_mgr.h" 44#include "intel/common/intel_l3_config.h" 45 46#include "blorp/blorp_genX_exec.h" 47 48static uint32_t * 49stream_state(struct iris_batch *batch, 50 struct u_upload_mgr *uploader, 51 unsigned size, 52 unsigned alignment, 53 uint32_t *out_offset, 54 struct iris_bo **out_bo) 55{ 56 struct pipe_resource *res = NULL; 57 void *ptr = NULL; 58 59 u_upload_alloc(uploader, 0, size, alignment, out_offset, &res, &ptr); 60 61 struct iris_bo *bo = iris_resource_bo(res); 62 iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE); 63 64 iris_record_state_size(batch->state_sizes, 65 bo->address + *out_offset, size); 66 67 /* If the caller has asked for a BO, we leave them the responsibility of 68 * adding bo->address (say, by handing an address to genxml). If not, 69 * we assume they want the offset from a base address. 70 */ 71 if (out_bo) 72 *out_bo = bo; 73 else 74 *out_offset += iris_bo_offset_from_base_address(bo); 75 76 pipe_resource_reference(&res, NULL); 77 78 return ptr; 79} 80 81static void * 82blorp_emit_dwords(struct blorp_batch *blorp_batch, unsigned n) 83{ 84 struct iris_batch *batch = blorp_batch->driver_batch; 85 return iris_get_command_space(batch, n * sizeof(uint32_t)); 86} 87 88static uint64_t 89combine_and_pin_address(struct blorp_batch *blorp_batch, 90 struct blorp_address addr) 91{ 92 struct iris_batch *batch = blorp_batch->driver_batch; 93 struct iris_bo *bo = addr.buffer; 94 95 iris_use_pinned_bo(batch, bo, addr.reloc_flags & RELOC_WRITE, 96 IRIS_DOMAIN_NONE); 97 98 /* Assume this is a general address, not relative to a base. */ 99 return bo->address + addr.offset; 100} 101 102static uint64_t 103blorp_emit_reloc(struct blorp_batch *blorp_batch, UNUSED void *location, 104 struct blorp_address addr, uint32_t delta) 105{ 106 return combine_and_pin_address(blorp_batch, addr) + delta; 107} 108 109static void 110blorp_surface_reloc(struct blorp_batch *blorp_batch, uint32_t ss_offset, 111 struct blorp_address addr, uint32_t delta) 112{ 113 /* Let blorp_get_surface_address do the pinning. */ 114} 115 116static uint64_t 117blorp_get_surface_address(struct blorp_batch *blorp_batch, 118 struct blorp_address addr) 119{ 120 return combine_and_pin_address(blorp_batch, addr); 121} 122 123UNUSED static struct blorp_address 124blorp_get_surface_base_address(UNUSED struct blorp_batch *blorp_batch) 125{ 126 return (struct blorp_address) { .offset = IRIS_MEMZONE_BINDER_START }; 127} 128 129static void * 130blorp_alloc_dynamic_state(struct blorp_batch *blorp_batch, 131 uint32_t size, 132 uint32_t alignment, 133 uint32_t *offset) 134{ 135 struct iris_context *ice = blorp_batch->blorp->driver_ctx; 136 struct iris_batch *batch = blorp_batch->driver_batch; 137 138 return stream_state(batch, ice->state.dynamic_uploader, 139 size, alignment, offset, NULL); 140} 141 142UNUSED static void * 143blorp_alloc_general_state(struct blorp_batch *blorp_batch, 144 uint32_t size, 145 uint32_t alignment, 146 uint32_t *offset) 147{ 148 /* Use dynamic state range for general state on iris. */ 149 return blorp_alloc_dynamic_state(blorp_batch, size, alignment, offset); 150} 151 152static void 153blorp_alloc_binding_table(struct blorp_batch *blorp_batch, 154 unsigned num_entries, 155 unsigned state_size, 156 unsigned state_alignment, 157 uint32_t *out_bt_offset, 158 uint32_t *surface_offsets, 159 void **surface_maps) 160{ 161 struct iris_context *ice = blorp_batch->blorp->driver_ctx; 162 struct iris_binder *binder = &ice->state.binder; 163 struct iris_batch *batch = blorp_batch->driver_batch; 164 165 unsigned bt_offset = 166 iris_binder_reserve(ice, num_entries * sizeof(uint32_t)); 167 uint32_t *bt_map = binder->map + bt_offset; 168 169 uint32_t surf_base_offset = GFX_VER < 11 ? binder->bo->address : 0; 170 171 *out_bt_offset = bt_offset; 172 173 for (unsigned i = 0; i < num_entries; i++) { 174 surface_maps[i] = stream_state(batch, ice->state.surface_uploader, 175 state_size, state_alignment, 176 &surface_offsets[i], NULL); 177 bt_map[i] = surface_offsets[i] - surf_base_offset; 178 } 179 180 iris_use_pinned_bo(batch, binder->bo, false, IRIS_DOMAIN_NONE); 181 182 batch->screen->vtbl.update_binder_address(batch, binder); 183} 184 185static uint32_t 186blorp_binding_table_offset_to_pointer(struct blorp_batch *batch, 187 uint32_t offset) 188{ 189 /* See IRIS_BT_OFFSET_SHIFT in iris_state.c */ 190 return offset >> ((GFX_VER >= 11 && GFX_VERx10 < 125) ? 3 : 0); 191} 192 193static void * 194blorp_alloc_vertex_buffer(struct blorp_batch *blorp_batch, 195 uint32_t size, 196 struct blorp_address *addr) 197{ 198 struct iris_context *ice = blorp_batch->blorp->driver_ctx; 199 struct iris_batch *batch = blorp_batch->driver_batch; 200 struct iris_bo *bo; 201 uint32_t offset; 202 203 void *map = stream_state(batch, ice->ctx.const_uploader, size, 64, 204 &offset, &bo); 205 206 *addr = (struct blorp_address) { 207 .buffer = bo, 208 .offset = offset, 209 .mocs = iris_mocs(bo, &batch->screen->isl_dev, 210 ISL_SURF_USAGE_VERTEX_BUFFER_BIT), 211 .local_hint = iris_bo_likely_local(bo), 212 }; 213 214 return map; 215} 216 217/** 218 * See iris_upload_render_state's IRIS_DIRTY_VERTEX_BUFFERS handling for 219 * a comment about why these VF invalidations are needed. 220 */ 221static void 222blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *blorp_batch, 223 const struct blorp_address *addrs, 224 UNUSED uint32_t *sizes, 225 unsigned num_vbs) 226{ 227#if GFX_VER < 11 228 struct iris_context *ice = blorp_batch->blorp->driver_ctx; 229 struct iris_batch *batch = blorp_batch->driver_batch; 230 bool need_invalidate = false; 231 232 for (unsigned i = 0; i < num_vbs; i++) { 233 struct iris_bo *bo = addrs[i].buffer; 234 uint16_t high_bits = bo->address >> 32u; 235 236 if (high_bits != ice->state.last_vbo_high_bits[i]) { 237 need_invalidate = true; 238 ice->state.last_vbo_high_bits[i] = high_bits; 239 } 240 } 241 242 if (need_invalidate) { 243 iris_emit_pipe_control_flush(batch, 244 "workaround: VF cache 32-bit key [blorp]", 245 PIPE_CONTROL_VF_CACHE_INVALIDATE | 246 PIPE_CONTROL_CS_STALL); 247 } 248#endif 249} 250 251static struct blorp_address 252blorp_get_workaround_address(struct blorp_batch *blorp_batch) 253{ 254 struct iris_batch *batch = blorp_batch->driver_batch; 255 256 return (struct blorp_address) { 257 .buffer = batch->screen->workaround_address.bo, 258 .offset = batch->screen->workaround_address.offset, 259 .local_hint = 260 iris_bo_likely_local(batch->screen->workaround_address.bo), 261 }; 262} 263 264static void 265blorp_flush_range(UNUSED struct blorp_batch *blorp_batch, 266 UNUSED void *start, 267 UNUSED size_t size) 268{ 269 /* All allocated states come from the batch which we will flush before we 270 * submit it. There's nothing for us to do here. 271 */ 272} 273 274static const struct intel_l3_config * 275blorp_get_l3_config(struct blorp_batch *blorp_batch) 276{ 277 struct iris_batch *batch = blorp_batch->driver_batch; 278 return batch->screen->l3_config_3d; 279} 280 281static void 282iris_blorp_exec_render(struct blorp_batch *blorp_batch, 283 const struct blorp_params *params) 284{ 285 struct iris_context *ice = blorp_batch->blorp->driver_ctx; 286 struct iris_batch *batch = blorp_batch->driver_batch; 287 288#if GFX_VER >= 11 289 /* The PIPE_CONTROL command description says: 290 * 291 * "Whenever a Binding Table Index (BTI) used by a Render Target Message 292 * points to a different RENDER_SURFACE_STATE, SW must issue a Render 293 * Target Cache Flush by enabling this bit. When render target flush 294 * is set due to new association of BTI, PS Scoreboard Stall bit must 295 * be set in this packet." 296 */ 297 iris_emit_pipe_control_flush(batch, 298 "workaround: RT BTI change [blorp]", 299 PIPE_CONTROL_RENDER_TARGET_FLUSH | 300 PIPE_CONTROL_STALL_AT_SCOREBOARD); 301#endif 302 303 if (params->depth.enabled && 304 !(blorp_batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL)) 305 genX(emit_depth_state_workarounds)(ice, batch, ¶ms->depth.surf); 306 307 /* Flush the render cache in cases where the same surface is used with 308 * different aux modes, which can lead to GPU hangs. Invalidation of 309 * sampler caches and flushing of any caches which had previously written 310 * the source surfaces should already have been handled by the caller. 311 */ 312 if (params->dst.enabled) { 313 iris_cache_flush_for_render(batch, params->dst.addr.buffer, 314 params->dst.aux_usage); 315 } 316 317 iris_require_command_space(batch, 1400); 318 319#if GFX_VER == 8 320 genX(update_pma_fix)(ice, batch, false); 321#endif 322 323 const unsigned scale = params->fast_clear_op ? UINT_MAX : 1; 324 if (ice->state.current_hash_scale != scale) { 325 genX(emit_hashing_mode)(ice, batch, params->x1 - params->x0, 326 params->y1 - params->y0, scale); 327 } 328 329#if GFX_VERx10 == 125 330 iris_use_pinned_bo(batch, iris_resource_bo(ice->state.pixel_hashing_tables), 331 false, IRIS_DOMAIN_NONE); 332#else 333 assert(!ice->state.pixel_hashing_tables); 334#endif 335 336#if GFX_VER >= 12 337 genX(invalidate_aux_map_state)(batch); 338#endif 339 340 iris_handle_always_flush_cache(batch); 341 342 blorp_exec(blorp_batch, params); 343 344 iris_handle_always_flush_cache(batch); 345 346 /* We've smashed all state compared to what the normal 3D pipeline 347 * rendering tracks for GL. 348 */ 349 350 uint64_t skip_bits = (IRIS_DIRTY_POLYGON_STIPPLE | 351 IRIS_DIRTY_SO_BUFFERS | 352 IRIS_DIRTY_SO_DECL_LIST | 353 IRIS_DIRTY_LINE_STIPPLE | 354 IRIS_ALL_DIRTY_FOR_COMPUTE | 355 IRIS_DIRTY_SCISSOR_RECT | 356 IRIS_DIRTY_VF); 357 /* Wa_14016820455 358 * On Gfx 12.5 platforms, the SF_CL_VIEWPORT pointer can be invalidated 359 * likely by a read cache invalidation when clipping is disabled, so we 360 * don't skip its dirty bit here, in order to reprogram it. 361 */ 362 if (GFX_VERx10 != 125) 363 skip_bits |= IRIS_DIRTY_SF_CL_VIEWPORT; 364 365 uint64_t skip_stage_bits = (IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE | 366 IRIS_STAGE_DIRTY_UNCOMPILED_VS | 367 IRIS_STAGE_DIRTY_UNCOMPILED_TCS | 368 IRIS_STAGE_DIRTY_UNCOMPILED_TES | 369 IRIS_STAGE_DIRTY_UNCOMPILED_GS | 370 IRIS_STAGE_DIRTY_UNCOMPILED_FS | 371 IRIS_STAGE_DIRTY_SAMPLER_STATES_VS | 372 IRIS_STAGE_DIRTY_SAMPLER_STATES_TCS | 373 IRIS_STAGE_DIRTY_SAMPLER_STATES_TES | 374 IRIS_STAGE_DIRTY_SAMPLER_STATES_GS); 375 376 if (!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL]) { 377 /* BLORP disabled tessellation, that's fine for the next draw */ 378 skip_stage_bits |= IRIS_STAGE_DIRTY_TCS | 379 IRIS_STAGE_DIRTY_TES | 380 IRIS_STAGE_DIRTY_CONSTANTS_TCS | 381 IRIS_STAGE_DIRTY_CONSTANTS_TES | 382 IRIS_STAGE_DIRTY_BINDINGS_TCS | 383 IRIS_STAGE_DIRTY_BINDINGS_TES; 384 } 385 386 if (!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY]) { 387 /* BLORP disabled geometry shaders, that's fine for the next draw */ 388 skip_stage_bits |= IRIS_STAGE_DIRTY_GS | 389 IRIS_STAGE_DIRTY_CONSTANTS_GS | 390 IRIS_STAGE_DIRTY_BINDINGS_GS; 391 } 392 393 /* we can skip flagging IRIS_DIRTY_DEPTH_BUFFER, if 394 * BLORP_BATCH_NO_EMIT_DEPTH_STENCIL is set. 395 */ 396 if (blorp_batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL) 397 skip_bits |= IRIS_DIRTY_DEPTH_BUFFER; 398 399 if (!params->wm_prog_data) 400 skip_bits |= IRIS_DIRTY_BLEND_STATE | IRIS_DIRTY_PS_BLEND; 401 402 ice->state.dirty |= ~skip_bits; 403 ice->state.stage_dirty |= ~skip_stage_bits; 404 405 for (int i = 0; i < ARRAY_SIZE(ice->shaders.urb.size); i++) 406 ice->shaders.urb.size[i] = 0; 407 408 if (params->src.enabled) 409 iris_bo_bump_seqno(params->src.addr.buffer, batch->next_seqno, 410 IRIS_DOMAIN_SAMPLER_READ); 411 if (params->dst.enabled) 412 iris_bo_bump_seqno(params->dst.addr.buffer, batch->next_seqno, 413 IRIS_DOMAIN_RENDER_WRITE); 414 if (params->depth.enabled) 415 iris_bo_bump_seqno(params->depth.addr.buffer, batch->next_seqno, 416 IRIS_DOMAIN_DEPTH_WRITE); 417 if (params->stencil.enabled) 418 iris_bo_bump_seqno(params->stencil.addr.buffer, batch->next_seqno, 419 IRIS_DOMAIN_DEPTH_WRITE); 420} 421 422static void 423iris_blorp_exec_blitter(struct blorp_batch *blorp_batch, 424 const struct blorp_params *params) 425{ 426 struct iris_batch *batch = blorp_batch->driver_batch; 427 428 /* Around the length of a XY_BLOCK_COPY_BLT and MI_FLUSH_DW */ 429 iris_require_command_space(batch, 108); 430 431 iris_handle_always_flush_cache(batch); 432 433 blorp_exec(blorp_batch, params); 434 435 iris_handle_always_flush_cache(batch); 436 437 if (params->src.enabled) { 438 iris_bo_bump_seqno(params->src.addr.buffer, batch->next_seqno, 439 IRIS_DOMAIN_OTHER_READ); 440 } 441 442 iris_bo_bump_seqno(params->dst.addr.buffer, batch->next_seqno, 443 IRIS_DOMAIN_OTHER_WRITE); 444} 445 446static void 447iris_blorp_exec(struct blorp_batch *blorp_batch, 448 const struct blorp_params *params) 449{ 450 if (blorp_batch->flags & BLORP_BATCH_USE_BLITTER) 451 iris_blorp_exec_blitter(blorp_batch, params); 452 else 453 iris_blorp_exec_render(blorp_batch, params); 454} 455 456static void 457blorp_measure_start(struct blorp_batch *blorp_batch, 458 const struct blorp_params *params) 459{ 460 struct iris_context *ice = blorp_batch->blorp->driver_ctx; 461 struct iris_batch *batch = blorp_batch->driver_batch; 462 463 trace_intel_begin_blorp(&batch->trace); 464 465 if (batch->measure == NULL) 466 return; 467 468 iris_measure_snapshot(ice, batch, params->snapshot_type, NULL, NULL, NULL); 469} 470 471 472static void 473blorp_measure_end(struct blorp_batch *blorp_batch, 474 const struct blorp_params *params) 475{ 476 struct iris_batch *batch = blorp_batch->driver_batch; 477 478 trace_intel_end_blorp(&batch->trace, 479 params->x1 - params->x0, 480 params->y1 - params->y0, 481 params->hiz_op, 482 params->fast_clear_op, 483 params->shader_type, 484 params->shader_pipeline); 485} 486 487void 488genX(init_blorp)(struct iris_context *ice) 489{ 490 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen; 491 492 blorp_init(&ice->blorp, ice, &screen->isl_dev, NULL); 493 ice->blorp.compiler = screen->compiler; 494 ice->blorp.lookup_shader = iris_blorp_lookup_shader; 495 ice->blorp.upload_shader = iris_blorp_upload_shader; 496 ice->blorp.exec = iris_blorp_exec; 497} 498