1/* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#include "anv_nir.h" 25#include "nir/nir_builder.h" 26#include "util/debug.h" 27 28/** 29 * This file implements the lowering required for VK_KHR_multiview. 30 * 31 * When possible, Primitive Replication is used and the shader is modified to 32 * make gl_Position an array and fill it with values for each view. 33 * 34 * Otherwise we implement multiview using instanced rendering. The number of 35 * instances in each draw call is multiplied by the number of views in the 36 * subpass. Then, in the shader, we divide gl_InstanceId by the number of 37 * views and use gl_InstanceId % view_count to compute the actual ViewIndex. 38 */ 39 40struct lower_multiview_state { 41 nir_builder builder; 42 43 uint32_t view_mask; 44 45 nir_ssa_def *instance_id; 46 nir_ssa_def *view_index; 47}; 48 49static nir_ssa_def * 50build_instance_id(struct lower_multiview_state *state) 51{ 52 assert(state->builder.shader->info.stage == MESA_SHADER_VERTEX); 53 54 if (state->instance_id == NULL) { 55 nir_builder *b = &state->builder; 56 57 b->cursor = nir_before_block(nir_start_block(b->impl)); 58 59 /* We use instancing for implementing multiview. The actual instance id 60 * is given by dividing instance_id by the number of views in this 61 * subpass. 62 */ 63 state->instance_id = 64 nir_idiv(b, nir_load_instance_id(b), 65 nir_imm_int(b, util_bitcount(state->view_mask))); 66 } 67 68 return state->instance_id; 69} 70 71static nir_ssa_def * 72build_view_index(struct lower_multiview_state *state) 73{ 74 if (state->view_index == NULL) { 75 nir_builder *b = &state->builder; 76 77 b->cursor = nir_before_block(nir_start_block(b->impl)); 78 79 assert(state->view_mask != 0); 80 if (util_bitcount(state->view_mask) == 1) { 81 /* Set the view index directly. */ 82 state->view_index = nir_imm_int(b, ffs(state->view_mask) - 1); 83 } else if (state->builder.shader->info.stage == MESA_SHADER_VERTEX) { 84 /* We only support 16 viewports */ 85 assert((state->view_mask & 0xffff0000) == 0); 86 87 /* We use instancing for implementing multiview. The compacted view 88 * id is given by instance_id % view_count. We then have to convert 89 * that to an actual view id. 90 */ 91 nir_ssa_def *compacted = 92 nir_umod(b, nir_load_instance_id(b), 93 nir_imm_int(b, util_bitcount(state->view_mask))); 94 95 if (util_is_power_of_two_or_zero(state->view_mask + 1)) { 96 /* If we have a full view mask, then compacted is what we want */ 97 state->view_index = compacted; 98 } else { 99 /* Now we define a map from compacted view index to the actual 100 * view index that's based on the view_mask. The map is given by 101 * 16 nibbles, each of which is a value from 0 to 15. 102 */ 103 uint64_t remap = 0; 104 uint32_t i = 0; 105 u_foreach_bit(bit, state->view_mask) { 106 assert(bit < 16); 107 remap |= (uint64_t)bit << (i++ * 4); 108 } 109 110 nir_ssa_def *shift = nir_imul(b, compacted, nir_imm_int(b, 4)); 111 112 /* One of these days, when we have int64 everywhere, this will be 113 * easier. 114 */ 115 nir_ssa_def *shifted; 116 if (remap <= UINT32_MAX) { 117 shifted = nir_ushr(b, nir_imm_int(b, remap), shift); 118 } else { 119 nir_ssa_def *shifted_low = 120 nir_ushr(b, nir_imm_int(b, remap), shift); 121 nir_ssa_def *shifted_high = 122 nir_ushr(b, nir_imm_int(b, remap >> 32), 123 nir_isub(b, shift, nir_imm_int(b, 32))); 124 shifted = nir_bcsel(b, nir_ilt(b, shift, nir_imm_int(b, 32)), 125 shifted_low, shifted_high); 126 } 127 state->view_index = nir_iand(b, shifted, nir_imm_int(b, 0xf)); 128 } 129 } else { 130 const struct glsl_type *type = glsl_int_type(); 131 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL || 132 b->shader->info.stage == MESA_SHADER_GEOMETRY) 133 type = glsl_array_type(type, 1, 0); 134 135 nir_variable *idx_var = 136 nir_variable_create(b->shader, nir_var_shader_in, 137 type, "view index"); 138 idx_var->data.location = VARYING_SLOT_VIEW_INDEX; 139 if (b->shader->info.stage == MESA_SHADER_FRAGMENT) 140 idx_var->data.interpolation = INTERP_MODE_FLAT; 141 142 nir_deref_instr *deref = nir_build_deref_var(b, idx_var); 143 if (glsl_type_is_array(type)) 144 deref = nir_build_deref_array_imm(b, deref, 0); 145 146 state->view_index = nir_load_deref(b, deref); 147 } 148 } 149 150 return state->view_index; 151} 152 153static bool 154is_load_view_index(const nir_instr *instr, const void *data) 155{ 156 return instr->type == nir_instr_type_intrinsic && 157 nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_view_index; 158} 159 160static nir_ssa_def * 161replace_load_view_index_with_zero(struct nir_builder *b, 162 nir_instr *instr, void *data) 163{ 164 assert(is_load_view_index(instr, data)); 165 return nir_imm_zero(b, 1, 32); 166} 167 168bool 169anv_nir_lower_multiview(nir_shader *shader, 170 struct anv_graphics_pipeline *pipeline) 171{ 172 assert(shader->info.stage != MESA_SHADER_COMPUTE); 173 uint32_t view_mask = pipeline->view_mask; 174 175 /* If multiview isn't enabled, just lower the ViewIndex builtin to zero. */ 176 if (view_mask == 0) { 177 return nir_shader_lower_instructions(shader, is_load_view_index, 178 replace_load_view_index_with_zero, NULL); 179 } 180 181 /* This pass assumes a single entrypoint */ 182 nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader); 183 184 /* Primitive Replication allows a shader to write different positions for 185 * each view in the same execution. If only the position depends on the 186 * view, then it is possible to use the feature instead of instancing to 187 * implement multiview. 188 */ 189 if (pipeline->use_primitive_replication) { 190 if (shader->info.stage == MESA_SHADER_FRAGMENT) 191 return false; 192 193 bool progress = nir_lower_multiview(shader, pipeline->view_mask); 194 195 if (progress) { 196 nir_builder b; 197 nir_builder_init(&b, entrypoint); 198 b.cursor = nir_before_cf_list(&entrypoint->body); 199 200 /* Fill Layer ID with zero. Replication will use that as base to 201 * apply the RTAI offsets. 202 */ 203 nir_variable *layer_id_out = 204 nir_variable_create(shader, nir_var_shader_out, 205 glsl_int_type(), "layer ID"); 206 layer_id_out->data.location = VARYING_SLOT_LAYER; 207 nir_store_var(&b, layer_id_out, nir_imm_zero(&b, 1, 32), 0x1); 208 } 209 210 return progress; 211 } 212 213 struct lower_multiview_state state = { 214 .view_mask = view_mask, 215 }; 216 217 nir_builder_init(&state.builder, entrypoint); 218 219 bool progress = false; 220 nir_foreach_block(block, entrypoint) { 221 nir_foreach_instr_safe(instr, block) { 222 if (instr->type != nir_instr_type_intrinsic) 223 continue; 224 225 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr); 226 227 if (load->intrinsic != nir_intrinsic_load_instance_id && 228 load->intrinsic != nir_intrinsic_load_view_index) 229 continue; 230 231 assert(load->dest.is_ssa); 232 233 nir_ssa_def *value; 234 if (load->intrinsic == nir_intrinsic_load_instance_id) { 235 value = build_instance_id(&state); 236 } else { 237 assert(load->intrinsic == nir_intrinsic_load_view_index); 238 value = build_view_index(&state); 239 } 240 241 nir_ssa_def_rewrite_uses(&load->dest.ssa, value); 242 243 nir_instr_remove(&load->instr); 244 progress = true; 245 } 246 } 247 248 /* The view index is available in all stages but the instance id is only 249 * available in the VS. If it's not a fragment shader, we need to pass 250 * the view index on to the next stage. 251 */ 252 if (shader->info.stage != MESA_SHADER_FRAGMENT) { 253 nir_ssa_def *view_index = build_view_index(&state); 254 255 nir_builder *b = &state.builder; 256 257 assert(view_index->parent_instr->block == nir_start_block(entrypoint)); 258 b->cursor = nir_after_instr(view_index->parent_instr); 259 260 /* Unless there is only one possible view index (that would be set 261 * directly), pass it to the next stage. */ 262 if (util_bitcount(state.view_mask) != 1) { 263 nir_variable *view_index_out = 264 nir_variable_create(shader, nir_var_shader_out, 265 glsl_int_type(), "view index"); 266 view_index_out->data.location = VARYING_SLOT_VIEW_INDEX; 267 nir_store_var(b, view_index_out, view_index, 0x1); 268 } 269 270 nir_variable *layer_id_out = 271 nir_variable_create(shader, nir_var_shader_out, 272 glsl_int_type(), "layer ID"); 273 layer_id_out->data.location = VARYING_SLOT_LAYER; 274 nir_store_var(b, layer_id_out, view_index, 0x1); 275 276 progress = true; 277 } 278 279 if (progress) { 280 nir_metadata_preserve(entrypoint, nir_metadata_block_index | 281 nir_metadata_dominance); 282 } else { 283 nir_metadata_preserve(entrypoint, nir_metadata_all); 284 } 285 286 return progress; 287} 288 289bool 290anv_check_for_primitive_replication(nir_shader **shaders, 291 struct anv_graphics_pipeline *pipeline) 292{ 293 assert(pipeline->base.device->info.ver >= 12); 294 295 static int primitive_replication_max_views = -1; 296 if (primitive_replication_max_views < 0) { 297 /* TODO: Figure out why we are not getting same benefits for larger than 298 * 2 views. For now use Primitive Replication just for the 2-view case 299 * by default. 300 */ 301 const unsigned default_max_views = 2; 302 303 primitive_replication_max_views = 304 MIN2(MAX_VIEWS_FOR_PRIMITIVE_REPLICATION, 305 env_var_as_unsigned("ANV_PRIMITIVE_REPLICATION_MAX_VIEWS", 306 default_max_views)); 307 } 308 309 /* TODO: We should be able to support replication at 'geometry' stages 310 * later than Vertex. In that case only the last stage can refer to 311 * gl_ViewIndex. 312 */ 313 if (pipeline->active_stages != (VK_SHADER_STAGE_VERTEX_BIT | 314 VK_SHADER_STAGE_FRAGMENT_BIT)) { 315 return false; 316 } 317 318 uint32_t view_mask = pipeline->view_mask; 319 int view_count = util_bitcount(view_mask); 320 if (view_count == 1 || view_count > primitive_replication_max_views) 321 return false; 322 323 /* We can't access the view index in the fragment shader. */ 324 if (nir_shader_uses_view_index(shaders[MESA_SHADER_FRAGMENT])) 325 return false; 326 327 return nir_can_lower_multiview(shaders[MESA_SHADER_VERTEX]); 328} 329