1/* 2 * Copyright © 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#include <assert.h> 25#include <stdbool.h> 26#include <string.h> 27#include <unistd.h> 28#include <fcntl.h> 29 30#include "anv_private.h" 31#include "vk_format.h" 32 33#include "genxml/gen_macros.h" 34#include "genxml/genX_pack.h" 35 36static uint32_t 37get_depth_format(struct anv_cmd_buffer *cmd_buffer) 38{ 39 struct anv_cmd_graphics_state *gfx = &cmd_buffer->state.gfx; 40 41 switch (gfx->depth_att.vk_format) { 42 case VK_FORMAT_D16_UNORM: 43 case VK_FORMAT_D16_UNORM_S8_UINT: 44 return D16_UNORM; 45 46 case VK_FORMAT_X8_D24_UNORM_PACK32: 47 case VK_FORMAT_D24_UNORM_S8_UINT: 48 return D24_UNORM_X8_UINT; 49 50 case VK_FORMAT_D32_SFLOAT: 51 case VK_FORMAT_D32_SFLOAT_S8_UINT: 52 return D32_FLOAT; 53 54 default: 55 return D16_UNORM; 56 } 57} 58 59void 60genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) 61{ 62 struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; 63 const struct vk_dynamic_graphics_state *dyn = 64 &cmd_buffer->vk.dynamic_graphics_state; 65 66 if ((cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE | 67 ANV_CMD_DIRTY_RENDER_TARGETS)) || 68 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_IA_PRIMITIVE_TOPOLOGY) || 69 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_CULL_MODE) || 70 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_FRONT_FACE) || 71 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_DEPTH_BIAS_ENABLE) || 72 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_DEPTH_BIAS_FACTORS) || 73 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_LINE_WIDTH)) { 74 /* Take dynamic primitive topology in to account with 75 * 3DSTATE_SF::MultisampleRasterizationMode 76 */ 77 VkPolygonMode dynamic_raster_mode = 78 genX(raster_polygon_mode)(cmd_buffer->state.gfx.pipeline, 79 dyn->ia.primitive_topology); 80 uint32_t ms_rast_mode = 81 genX(ms_rasterization_mode)(pipeline, dynamic_raster_mode); 82 83 bool aa_enable = anv_rasterization_aa_mode(dynamic_raster_mode, 84 pipeline->line_mode); 85 86 uint32_t sf_dw[GENX(3DSTATE_SF_length)]; 87 struct GENX(3DSTATE_SF) sf = { 88 GENX(3DSTATE_SF_header), 89 .DepthBufferSurfaceFormat = get_depth_format(cmd_buffer), 90 .LineWidth = dyn->rs.line.width, 91 .AntialiasingEnable = aa_enable, 92 .CullMode = genX(vk_to_intel_cullmode)[dyn->rs.cull_mode], 93 .FrontWinding = genX(vk_to_intel_front_face)[dyn->rs.front_face], 94 .MultisampleRasterizationMode = ms_rast_mode, 95 .GlobalDepthOffsetEnableSolid = dyn->rs.depth_bias.enable, 96 .GlobalDepthOffsetEnableWireframe = dyn->rs.depth_bias.enable, 97 .GlobalDepthOffsetEnablePoint = dyn->rs.depth_bias.enable, 98 .GlobalDepthOffsetConstant = dyn->rs.depth_bias.constant, 99 .GlobalDepthOffsetScale = dyn->rs.depth_bias.slope, 100 .GlobalDepthOffsetClamp = dyn->rs.depth_bias.clamp, 101 }; 102 GENX(3DSTATE_SF_pack)(NULL, sf_dw, &sf); 103 104 anv_batch_emit_merge(&cmd_buffer->batch, sf_dw, pipeline->gfx7.sf); 105 } 106 107 if (BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_REFERENCE) || 108 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_CB_BLEND_CONSTANTS)) { 109 struct anv_state cc_state = 110 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 111 GENX(COLOR_CALC_STATE_length) * 4, 112 64); 113 struct GENX(COLOR_CALC_STATE) cc = { 114 .BlendConstantColorRed = dyn->cb.blend_constants[0], 115 .BlendConstantColorGreen = dyn->cb.blend_constants[1], 116 .BlendConstantColorBlue = dyn->cb.blend_constants[2], 117 .BlendConstantColorAlpha = dyn->cb.blend_constants[3], 118 .StencilReferenceValue = dyn->ds.stencil.front.reference & 0xff, 119 .BackfaceStencilReferenceValue = dyn->ds.stencil.back.reference & 0xff, 120 }; 121 GENX(COLOR_CALC_STATE_pack)(NULL, cc_state.map, &cc); 122 123 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), ccp) { 124 ccp.ColorCalcStatePointer = cc_state.offset; 125 } 126 } 127 128 if (BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_LINE_STIPPLE)) { 129 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_LINE_STIPPLE), ls) { 130 ls.LineStipplePattern = dyn->rs.line.stipple.pattern; 131 ls.LineStippleInverseRepeatCount = 132 1.0f / MAX2(1, dyn->rs.line.stipple.factor); 133 ls.LineStippleRepeatCount = dyn->rs.line.stipple.factor; 134 } 135 } 136 137 if ((cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE | 138 ANV_CMD_DIRTY_RENDER_TARGETS)) || 139 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_DEPTH_TEST_ENABLE) || 140 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_DEPTH_WRITE_ENABLE) || 141 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_DEPTH_COMPARE_OP) || 142 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_TEST_ENABLE) || 143 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_OP) || 144 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_COMPARE_MASK) || 145 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_WRITE_MASK)) { 146 uint32_t depth_stencil_dw[GENX(DEPTH_STENCIL_STATE_length)]; 147 148 VkImageAspectFlags ds_aspects = 0; 149 if (cmd_buffer->state.gfx.depth_att.vk_format != VK_FORMAT_UNDEFINED) 150 ds_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT; 151 if (cmd_buffer->state.gfx.stencil_att.vk_format != VK_FORMAT_UNDEFINED) 152 ds_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT; 153 154 struct vk_depth_stencil_state opt_ds = dyn->ds; 155 vk_optimize_depth_stencil_state(&opt_ds, ds_aspects, true); 156 157 struct GENX(DEPTH_STENCIL_STATE) depth_stencil = { 158 .DoubleSidedStencilEnable = true, 159 160 .StencilTestMask = opt_ds.stencil.front.compare_mask & 0xff, 161 .StencilWriteMask = opt_ds.stencil.front.write_mask & 0xff, 162 163 .BackfaceStencilTestMask = opt_ds.stencil.back.compare_mask & 0xff, 164 .BackfaceStencilWriteMask = opt_ds.stencil.back.write_mask & 0xff, 165 166 .DepthTestEnable = opt_ds.depth.test_enable, 167 .DepthBufferWriteEnable = opt_ds.depth.write_enable, 168 .DepthTestFunction = genX(vk_to_intel_compare_op)[opt_ds.depth.compare_op], 169 .StencilTestEnable = opt_ds.stencil.test_enable, 170 .StencilBufferWriteEnable = opt_ds.stencil.write_enable, 171 .StencilFailOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.front.op.fail], 172 .StencilPassDepthPassOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.front.op.pass], 173 .StencilPassDepthFailOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.front.op.depth_fail], 174 .StencilTestFunction = genX(vk_to_intel_compare_op)[opt_ds.stencil.front.op.compare], 175 .BackfaceStencilFailOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.back.op.fail], 176 .BackfaceStencilPassDepthPassOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.back.op.pass], 177 .BackfaceStencilPassDepthFailOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.back.op.depth_fail], 178 .BackfaceStencilTestFunction = genX(vk_to_intel_compare_op)[opt_ds.stencil.back.op.compare], 179 }; 180 GENX(DEPTH_STENCIL_STATE_pack)(NULL, depth_stencil_dw, &depth_stencil); 181 182 struct anv_state ds_state = 183 anv_cmd_buffer_emit_dynamic(cmd_buffer, depth_stencil_dw, 184 sizeof(depth_stencil_dw), 64); 185 186 anv_batch_emit(&cmd_buffer->batch, 187 GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS), dsp) { 188 dsp.PointertoDEPTH_STENCIL_STATE = ds_state.offset; 189 } 190 } 191 192 if (cmd_buffer->state.gfx.index_buffer && 193 ((cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE | 194 ANV_CMD_DIRTY_INDEX_BUFFER)) || 195 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_IA_PRIMITIVE_RESTART_ENABLE))) { 196 struct anv_buffer *buffer = cmd_buffer->state.gfx.index_buffer; 197 uint32_t offset = cmd_buffer->state.gfx.index_offset; 198 199#if GFX_VERx10 == 75 200 anv_batch_emit(&cmd_buffer->batch, GFX75_3DSTATE_VF, vf) { 201 vf.IndexedDrawCutIndexEnable = dyn->ia.primitive_restart_enable; 202 vf.CutIndex = cmd_buffer->state.gfx.restart_index; 203 } 204#endif 205 206 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), ib) { 207#if GFX_VERx10 != 75 208 ib.CutIndexEnable = dyn->ia.primitive_restart_enable; 209#endif 210 ib.IndexFormat = cmd_buffer->state.gfx.index_type; 211 ib.MOCS = anv_mocs(cmd_buffer->device, 212 buffer->address.bo, 213 ISL_SURF_USAGE_INDEX_BUFFER_BIT); 214 215 ib.BufferStartingAddress = anv_address_add(buffer->address, offset); 216 ib.BufferEndingAddress = anv_address_add(buffer->address, 217 buffer->vk.size); 218 } 219 } 220 221 /* 3DSTATE_WM in the hope we can avoid spawning fragment shaders 222 * threads or if we have dirty dynamic primitive topology state and 223 * need to toggle 3DSTATE_WM::MultisampleRasterizationMode dynamically. 224 */ 225 if ((cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) || 226 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_IA_PRIMITIVE_TOPOLOGY) || 227 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_CB_COLOR_WRITE_ENABLES)) { 228 VkPolygonMode dynamic_raster_mode = 229 genX(raster_polygon_mode)(cmd_buffer->state.gfx.pipeline, 230 dyn->ia.primitive_topology); 231 232 uint32_t dwords[GENX(3DSTATE_WM_length)]; 233 struct GENX(3DSTATE_WM) wm = { 234 GENX(3DSTATE_WM_header), 235 236 .ThreadDispatchEnable = anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT) && 237 (pipeline->force_fragment_thread_dispatch || 238 !anv_cmd_buffer_all_color_write_masked(cmd_buffer)), 239 .MultisampleRasterizationMode = 240 genX(ms_rasterization_mode)(pipeline, 241 dynamic_raster_mode), 242 }; 243 GENX(3DSTATE_WM_pack)(NULL, dwords, &wm); 244 245 anv_batch_emit_merge(&cmd_buffer->batch, dwords, pipeline->gfx7.wm); 246 } 247 248 if ((cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_RENDER_TARGETS) || 249 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_MS_SAMPLE_LOCATIONS)) { 250 const uint32_t samples = MAX2(1, cmd_buffer->state.gfx.samples); 251 const struct vk_sample_locations_state *sl = dyn->ms.sample_locations; 252 genX(emit_multisample)(&cmd_buffer->batch, samples, 253 sl->per_pixel == samples ? sl : NULL); 254 } 255 256 if ((cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) || 257 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_CB_LOGIC_OP) || 258 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_CB_COLOR_WRITE_ENABLES)) { 259 const uint8_t color_writes = dyn->cb.color_write_enables; 260 261 /* Blend states of each RT */ 262 uint32_t blend_dws[GENX(BLEND_STATE_length) + 263 MAX_RTS * GENX(BLEND_STATE_ENTRY_length)]; 264 uint32_t *dws = blend_dws; 265 memset(blend_dws, 0, sizeof(blend_dws)); 266 267 /* Skip this part */ 268 dws += GENX(BLEND_STATE_length); 269 270 for (uint32_t i = 0; i < MAX_RTS; i++) { 271 /* Disable anything above the current number of color attachments. */ 272 bool write_disabled = i >= cmd_buffer->state.gfx.color_att_count || 273 (color_writes & BITFIELD_BIT(i)) == 0; 274 struct GENX(BLEND_STATE_ENTRY) entry = { 275 .WriteDisableAlpha = write_disabled || 276 (pipeline->color_comp_writes[i] & 277 VK_COLOR_COMPONENT_A_BIT) == 0, 278 .WriteDisableRed = write_disabled || 279 (pipeline->color_comp_writes[i] & 280 VK_COLOR_COMPONENT_R_BIT) == 0, 281 .WriteDisableGreen = write_disabled || 282 (pipeline->color_comp_writes[i] & 283 VK_COLOR_COMPONENT_G_BIT) == 0, 284 .WriteDisableBlue = write_disabled || 285 (pipeline->color_comp_writes[i] & 286 VK_COLOR_COMPONENT_B_BIT) == 0, 287 .LogicOpFunction = genX(vk_to_intel_logic_op)[dyn->cb.logic_op], 288 }; 289 GENX(BLEND_STATE_ENTRY_pack)(NULL, dws, &entry); 290 dws += GENX(BLEND_STATE_ENTRY_length); 291 } 292 293 uint32_t num_dwords = GENX(BLEND_STATE_length) + 294 GENX(BLEND_STATE_ENTRY_length) * MAX_RTS; 295 296 struct anv_state blend_states = 297 anv_cmd_buffer_merge_dynamic(cmd_buffer, blend_dws, 298 pipeline->gfx7.blend_state, num_dwords, 64); 299 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) { 300 bsp.BlendStatePointer = blend_states.offset; 301 } 302 } 303 304 /* When we're done, there is no more dirty gfx state. */ 305 vk_dynamic_graphics_state_clear_dirty(&cmd_buffer->vk.dynamic_graphics_state); 306 cmd_buffer->state.gfx.dirty = 0; 307} 308 309void 310genX(cmd_buffer_enable_pma_fix)(struct anv_cmd_buffer *cmd_buffer, 311 bool enable) 312{ 313 /* The NP PMA fix doesn't exist on gfx7 */ 314} 315