1/* 2 * Copyright © 2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#ifndef BRW_SHADER_H 25#define BRW_SHADER_H 26 27#include <stdint.h> 28#include "brw_cfg.h" 29#include "brw_compiler.h" 30#include "compiler/nir/nir.h" 31 32#ifdef __cplusplus 33#include "brw_ir_analysis.h" 34#include "brw_ir_allocator.h" 35 36enum instruction_scheduler_mode { 37 SCHEDULE_PRE, 38 SCHEDULE_PRE_NON_LIFO, 39 SCHEDULE_PRE_LIFO, 40 SCHEDULE_POST, 41 SCHEDULE_NONE, 42}; 43 44#define UBO_START ((1 << 16) - 4) 45 46struct backend_shader { 47protected: 48 49 backend_shader(const struct brw_compiler *compiler, 50 void *log_data, 51 void *mem_ctx, 52 const nir_shader *shader, 53 struct brw_stage_prog_data *stage_prog_data, 54 bool debug_enabled); 55 56public: 57 virtual ~backend_shader(); 58 59 const struct brw_compiler *compiler; 60 void *log_data; /* Passed to compiler->*_log functions */ 61 62 const struct intel_device_info * const devinfo; 63 const nir_shader *nir; 64 struct brw_stage_prog_data * const stage_prog_data; 65 66 /** ralloc context for temporary data used during compile */ 67 void *mem_ctx; 68 69 /** 70 * List of either fs_inst or vec4_instruction (inheriting from 71 * backend_instruction) 72 */ 73 exec_list instructions; 74 75 cfg_t *cfg; 76 brw_analysis<brw::idom_tree, backend_shader> idom_analysis; 77 78 gl_shader_stage stage; 79 bool debug_enabled; 80 const char *stage_name; 81 const char *stage_abbrev; 82 83 brw::simple_allocator alloc; 84 85 virtual void dump_instruction(const backend_instruction *inst) const = 0; 86 virtual void dump_instruction(const backend_instruction *inst, FILE *file) const = 0; 87 virtual void dump_instructions() const; 88 virtual void dump_instructions(const char *name) const; 89 90 void calculate_cfg(); 91 92 virtual void invalidate_analysis(brw::analysis_dependency_class c); 93}; 94 95#else 96struct backend_shader; 97#endif /* __cplusplus */ 98 99enum brw_reg_type brw_type_for_base_type(const struct glsl_type *type); 100enum brw_conditional_mod brw_conditional_for_comparison(unsigned int op); 101uint32_t brw_math_function(enum opcode op); 102const char *brw_instruction_name(const struct brw_isa_info *isa, 103 enum opcode op); 104bool brw_saturate_immediate(enum brw_reg_type type, struct brw_reg *reg); 105bool brw_negate_immediate(enum brw_reg_type type, struct brw_reg *reg); 106bool brw_abs_immediate(enum brw_reg_type type, struct brw_reg *reg); 107 108bool opt_predicated_break(struct backend_shader *s); 109 110#ifdef __cplusplus 111extern "C" { 112#endif 113 114/* brw_fs_reg_allocate.cpp */ 115void brw_fs_alloc_reg_sets(struct brw_compiler *compiler); 116 117/* brw_vec4_reg_allocate.cpp */ 118void brw_vec4_alloc_reg_set(struct brw_compiler *compiler); 119 120/* brw_disasm.c */ 121extern const char *const conditional_modifier[16]; 122extern const char *const pred_ctrl_align16[16]; 123 124/* Per-thread scratch space is a power-of-two multiple of 1KB. */ 125static inline unsigned 126brw_get_scratch_size(int size) 127{ 128 return MAX2(1024, util_next_power_of_two(size)); 129} 130 131 132static inline nir_variable_mode 133brw_nir_no_indirect_mask(const struct brw_compiler *compiler, 134 gl_shader_stage stage) 135{ 136 const struct intel_device_info *devinfo = compiler->devinfo; 137 const bool is_scalar = compiler->scalar_stage[stage]; 138 nir_variable_mode indirect_mask = (nir_variable_mode) 0; 139 140 switch (stage) { 141 case MESA_SHADER_VERTEX: 142 case MESA_SHADER_FRAGMENT: 143 indirect_mask |= nir_var_shader_in; 144 break; 145 146 case MESA_SHADER_GEOMETRY: 147 if (!is_scalar) 148 indirect_mask |= nir_var_shader_in; 149 break; 150 151 default: 152 /* Everything else can handle indirect inputs */ 153 break; 154 } 155 156 if (is_scalar && stage != MESA_SHADER_TESS_CTRL && 157 stage != MESA_SHADER_TASK && 158 stage != MESA_SHADER_MESH) 159 indirect_mask |= nir_var_shader_out; 160 161 /* On HSW+, we allow indirects in scalar shaders. They get implemented 162 * using nir_lower_vars_to_explicit_types and nir_lower_explicit_io in 163 * brw_postprocess_nir. 164 * 165 * We haven't plumbed through the indirect scratch messages on gfx6 or 166 * earlier so doing indirects via scratch doesn't work there. On gfx7 and 167 * earlier the scratch space size is limited to 12kB. If we allowed 168 * indirects as scratch all the time, we may easily exceed this limit 169 * without having any fallback. 170 */ 171 if (is_scalar && devinfo->verx10 <= 70) 172 indirect_mask |= nir_var_function_temp; 173 174 return indirect_mask; 175} 176 177bool brw_texture_offset(const nir_tex_instr *tex, unsigned src, 178 uint32_t *offset_bits); 179 180/** 181 * Scratch data used when compiling a GLSL geometry shader. 182 */ 183struct brw_gs_compile 184{ 185 struct brw_gs_prog_key key; 186 struct brw_vue_map input_vue_map; 187 188 unsigned control_data_bits_per_vertex; 189 unsigned control_data_header_size_bits; 190}; 191 192#ifdef __cplusplus 193} 194#endif 195 196#endif /* BRW_SHADER_H */ 197