1/* 2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robclark@freedesktop.org> 25 */ 26 27#ifndef IR3_CONTEXT_H_ 28#define IR3_CONTEXT_H_ 29 30#include "ir3.h" 31#include "ir3_compiler.h" 32#include "ir3_nir.h" 33 34/* for conditionally setting boolean flag(s): */ 35#define COND(bool, val) ((bool) ? (val) : 0) 36 37#define DBG(fmt, ...) \ 38 do { \ 39 mesa_logd("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \ 40 } while (0) 41 42/** 43 * The context for compilation of a single shader. 44 */ 45struct ir3_context { 46 struct ir3_compiler *compiler; 47 const struct ir3_context_funcs *funcs; 48 49 struct nir_shader *s; 50 51 struct nir_instr *cur_instr; /* current instruction, just for debug */ 52 53 struct ir3 *ir; 54 struct ir3_shader_variant *so; 55 56 /* Tables of scalar inputs/outputs. Because of the way varying packing 57 * works, we could have inputs w/ fractional location, which is a bit 58 * awkward to deal with unless we keep track of the split scalar in/ 59 * out components. 60 * 61 * These *only* have inputs/outputs that are touched by load_*input and 62 * store_output. 63 */ 64 unsigned ninputs, noutputs; 65 struct ir3_instruction **inputs; 66 struct ir3_instruction **outputs; 67 68 struct ir3_block *block; /* the current block */ 69 struct ir3_block *in_block; /* block created for shader inputs */ 70 71 nir_function_impl *impl; 72 73 /* For fragment shaders, varyings are not actual shader inputs, 74 * instead the hw passes a ij coord which is used with 75 * bary.f. 76 * 77 * But NIR doesn't know that, it still declares varyings as 78 * inputs. So we do all the input tracking normally and fix 79 * things up after compile_instructions() 80 */ 81 struct ir3_instruction *ij[IJ_COUNT]; 82 83 /* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */ 84 struct ir3_instruction *frag_face, *frag_coord; 85 86 /* For vertex shaders, keep track of the system values sources */ 87 struct ir3_instruction *vertex_id, *basevertex, *instance_id, *base_instance, 88 *draw_id, *view_index; 89 90 /* For fragment shaders: */ 91 struct ir3_instruction *samp_id, *samp_mask_in; 92 93 /* For geometry shaders: */ 94 struct ir3_instruction *primitive_id; 95 struct ir3_instruction *gs_header; 96 97 /* For tessellation shaders: */ 98 struct ir3_instruction *tcs_header; 99 struct ir3_instruction *tess_coord; 100 struct ir3_instruction *rel_patch_id; 101 102 /* Compute shader inputs: */ 103 struct ir3_instruction *local_invocation_id, *work_group_id; 104 105 /* mapping from nir_register to defining instruction: */ 106 struct hash_table *def_ht; 107 108 unsigned num_arrays; 109 110 /* Tracking for max level of flowcontrol (branchstack) needed 111 * by a5xx+: 112 */ 113 unsigned stack, max_stack; 114 115 unsigned loop_id; 116 unsigned loop_depth; 117 118 /* a common pattern for indirect addressing is to request the 119 * same address register multiple times. To avoid generating 120 * duplicate instruction sequences (which our backend does not 121 * try to clean up, since that should be done as the NIR stage) 122 * we cache the address value generated for a given src value: 123 * 124 * Note that we have to cache these per alignment, since same 125 * src used for an array of vec1 cannot be also used for an 126 * array of vec4. 127 */ 128 struct hash_table *addr0_ht[4]; 129 130 /* The same for a1.x. We only support immediate values for a1.x, as this 131 * is the only use so far. 132 */ 133 struct hash_table_u64 *addr1_ht; 134 135 struct hash_table *sel_cond_conversions; 136 137 /* last dst array, for indirect we need to insert a var-store. 138 */ 139 struct ir3_instruction **last_dst; 140 unsigned last_dst_n; 141 142 /* maps nir_block to ir3_block, mostly for the purposes of 143 * figuring out the blocks successors 144 */ 145 struct hash_table *block_ht; 146 147 /* maps nir_block at the top of a loop to ir3_block collecting continue 148 * edges. 149 */ 150 struct hash_table *continue_block_ht; 151 152 /* on a4xx, bitmask of samplers which need astc+srgb workaround: */ 153 unsigned astc_srgb; 154 155 /* on a4xx, per-sampler per-component swizzles, for tg4: */ 156 uint16_t sampler_swizzles[16]; 157 158 unsigned samples; /* bitmask of x,y sample shifts */ 159 160 unsigned max_texture_index; 161 162 unsigned prefetch_limit; 163 164 /* set if we encounter something we can't handle yet, so we 165 * can bail cleanly and fallback to TGSI compiler f/e 166 */ 167 bool error; 168}; 169 170struct ir3_context_funcs { 171 void (*emit_intrinsic_load_ssbo)(struct ir3_context *ctx, 172 nir_intrinsic_instr *intr, 173 struct ir3_instruction **dst); 174 void (*emit_intrinsic_store_ssbo)(struct ir3_context *ctx, 175 nir_intrinsic_instr *intr); 176 struct ir3_instruction *(*emit_intrinsic_atomic_ssbo)( 177 struct ir3_context *ctx, nir_intrinsic_instr *intr); 178 void (*emit_intrinsic_load_image)(struct ir3_context *ctx, 179 nir_intrinsic_instr *intr, 180 struct ir3_instruction **dst); 181 void (*emit_intrinsic_store_image)(struct ir3_context *ctx, 182 nir_intrinsic_instr *intr); 183 struct ir3_instruction *(*emit_intrinsic_atomic_image)( 184 struct ir3_context *ctx, nir_intrinsic_instr *intr); 185 void (*emit_intrinsic_image_size)(struct ir3_context *ctx, 186 nir_intrinsic_instr *intr, 187 struct ir3_instruction **dst); 188 void (*emit_intrinsic_load_global_ir3)(struct ir3_context *ctx, 189 nir_intrinsic_instr *intr, 190 struct ir3_instruction **dst); 191 void (*emit_intrinsic_store_global_ir3)(struct ir3_context *ctx, 192 nir_intrinsic_instr *intr); 193 struct ir3_instruction *(*emit_intrinsic_atomic_global)( 194 struct ir3_context *ctx, nir_intrinsic_instr *intr); 195}; 196 197extern const struct ir3_context_funcs ir3_a4xx_funcs; 198extern const struct ir3_context_funcs ir3_a6xx_funcs; 199 200struct ir3_context *ir3_context_init(struct ir3_compiler *compiler, 201 struct ir3_shader *shader, 202 struct ir3_shader_variant *so); 203void ir3_context_free(struct ir3_context *ctx); 204 205struct ir3_instruction **ir3_get_dst_ssa(struct ir3_context *ctx, 206 nir_ssa_def *dst, unsigned n); 207struct ir3_instruction **ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, 208 unsigned n); 209struct ir3_instruction *const *ir3_get_src(struct ir3_context *ctx, 210 nir_src *src); 211void ir3_put_dst(struct ir3_context *ctx, nir_dest *dst); 212struct ir3_instruction *ir3_create_collect(struct ir3_block *block, 213 struct ir3_instruction *const *arr, 214 unsigned arrsz); 215void ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst, 216 struct ir3_instruction *src, unsigned base, unsigned n); 217void ir3_handle_bindless_cat6(struct ir3_instruction *instr, nir_src rsrc); 218void ir3_handle_nonuniform(struct ir3_instruction *instr, 219 nir_intrinsic_instr *intrin); 220void emit_intrinsic_image_size_tex(struct ir3_context *ctx, 221 nir_intrinsic_instr *intr, 222 struct ir3_instruction **dst); 223 224#define ir3_collect(block, ...) \ 225 ({ \ 226 struct ir3_instruction *__arr[] = {__VA_ARGS__}; \ 227 ir3_create_collect(block, __arr, ARRAY_SIZE(__arr)); \ 228 }) 229 230NORETURN void ir3_context_error(struct ir3_context *ctx, const char *format, 231 ...); 232 233#define compile_assert(ctx, cond) \ 234 do { \ 235 if (!(cond)) \ 236 ir3_context_error((ctx), "failed assert: " #cond "\n"); \ 237 } while (0) 238 239struct ir3_instruction *ir3_get_addr0(struct ir3_context *ctx, 240 struct ir3_instruction *src, int align); 241struct ir3_instruction *ir3_get_addr1(struct ir3_context *ctx, 242 unsigned const_val); 243struct ir3_instruction *ir3_get_predicate(struct ir3_context *ctx, 244 struct ir3_instruction *src); 245 246void ir3_declare_array(struct ir3_context *ctx, nir_register *reg); 247struct ir3_array *ir3_get_array(struct ir3_context *ctx, nir_register *reg); 248struct ir3_instruction *ir3_create_array_load(struct ir3_context *ctx, 249 struct ir3_array *arr, int n, 250 struct ir3_instruction *address); 251void ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, 252 int n, struct ir3_instruction *src, 253 struct ir3_instruction *address); 254 255static inline type_t 256utype_for_size(unsigned bit_size) 257{ 258 switch (bit_size) { 259 case 32: 260 return TYPE_U32; 261 case 16: 262 return TYPE_U16; 263 case 8: 264 return TYPE_U8; 265 default: 266 unreachable("bad bitsize"); 267 return ~0; 268 } 269} 270 271static inline type_t 272utype_src(nir_src src) 273{ 274 return utype_for_size(nir_src_bit_size(src)); 275} 276 277static inline type_t 278utype_dst(nir_dest dst) 279{ 280 return utype_for_size(nir_dest_bit_size(dst)); 281} 282 283/** 284 * Convert nir bitsize to ir3 bitsize, handling the special case of 1b bools 285 * which can be 16b or 32b depending on gen. 286 */ 287static inline unsigned 288ir3_bitsize(struct ir3_context *ctx, unsigned nir_bitsize) 289{ 290 if (nir_bitsize == 1) 291 return type_size(ctx->compiler->bool_type); 292 return nir_bitsize; 293} 294 295#endif /* IR3_CONTEXT_H_ */ 296