1 /*
2 * Copyright © 2016 Bas Nieuwenhuizen
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "ac_nir_to_llvm.h"
25 #include "ac_gpu_info.h"
26 #include "ac_binary.h"
27 #include "ac_llvm_build.h"
28 #include "ac_llvm_util.h"
29 #include "ac_shader_abi.h"
30 #include "ac_shader_util.h"
31 #include "nir/nir.h"
32 #include "nir/nir_deref.h"
33 #include "sid.h"
34 #include "util/bitscan.h"
35 #include "util/u_math.h"
36 #include <llvm/Config/llvm-config.h>
37
38 struct ac_nir_context {
39 struct ac_llvm_context ac;
40 struct ac_shader_abi *abi;
41 const struct ac_shader_args *args;
42
43 gl_shader_stage stage;
44 shader_info *info;
45
46 LLVMValueRef *ssa_defs;
47
48 LLVMValueRef scratch;
49 LLVMValueRef constant_data;
50
51 struct hash_table *defs;
52 struct hash_table *phis;
53 struct hash_table *vars;
54 struct hash_table *verified_interp;
55
56 LLVMValueRef main_function;
57 LLVMBasicBlockRef continue_block;
58 LLVMBasicBlockRef break_block;
59
60 LLVMValueRef vertex_id_replaced;
61 LLVMValueRef instance_id_replaced;
62 LLVMValueRef tes_u_replaced;
63 LLVMValueRef tes_v_replaced;
64 LLVMValueRef tes_rel_patch_id_replaced;
65 LLVMValueRef tes_patch_id_replaced;
66 };
67
68 static LLVMValueRef get_sampler_desc_index(struct ac_nir_context *ctx, nir_deref_instr *deref_instr,
69 const nir_instr *instr, bool image);
70
71 static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx, nir_deref_instr *deref_instr,
72 enum ac_descriptor_type desc_type, const nir_instr *instr,
73 LLVMValueRef index, bool image, bool write);
74
get_def_type(struct ac_nir_context *ctx, const nir_ssa_def *def)75 static LLVMTypeRef get_def_type(struct ac_nir_context *ctx, const nir_ssa_def *def)
76 {
77 LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, def->bit_size);
78 if (def->num_components > 1) {
79 type = LLVMVectorType(type, def->num_components);
80 }
81 return type;
82 }
83
get_src(struct ac_nir_context *nir, nir_src src)84 static LLVMValueRef get_src(struct ac_nir_context *nir, nir_src src)
85 {
86 assert(src.is_ssa);
87 return nir->ssa_defs[src.ssa->index];
88 }
89
get_memory_ptr_t(struct ac_nir_context *ctx, nir_src src, LLVMTypeRef elem_type, unsigned c_off)90 static LLVMValueRef get_memory_ptr_t(struct ac_nir_context *ctx, nir_src src, LLVMTypeRef elem_type, unsigned c_off)
91 {
92 LLVMValueRef ptr = get_src(ctx, src);
93 LLVMValueRef lds_i8 = ctx->ac.lds;
94 if (ctx->stage != MESA_SHADER_COMPUTE)
95 lds_i8 = LLVMBuildBitCast(ctx->ac.builder, ctx->ac.lds, LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_LDS), "");
96
97 ptr = LLVMBuildAdd(ctx->ac.builder, ptr, LLVMConstInt(ctx->ac.i32, c_off, 0), "");
98 ptr = LLVMBuildGEP2(ctx->ac.builder, ctx->ac.i8, lds_i8, &ptr, 1, "");
99 int addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
100
101 return LLVMBuildBitCast(ctx->ac.builder, ptr, LLVMPointerType(elem_type, addr_space), "");
102 }
103
get_memory_ptr(struct ac_nir_context *ctx, nir_src src, unsigned bit_size, unsigned c_off)104 static LLVMValueRef get_memory_ptr(struct ac_nir_context *ctx, nir_src src, unsigned bit_size, unsigned c_off)
105 {
106 return get_memory_ptr_t(ctx, src, LLVMIntTypeInContext(ctx->ac.context, bit_size), c_off);
107 }
108
get_block(struct ac_nir_context *nir, const struct nir_block *b)109 static LLVMBasicBlockRef get_block(struct ac_nir_context *nir, const struct nir_block *b)
110 {
111 struct hash_entry *entry = _mesa_hash_table_search(nir->defs, b);
112 return (LLVMBasicBlockRef)entry->data;
113 }
114
get_alu_src(struct ac_nir_context *ctx, nir_alu_src src, unsigned num_components)115 static LLVMValueRef get_alu_src(struct ac_nir_context *ctx, nir_alu_src src,
116 unsigned num_components)
117 {
118 LLVMValueRef value = get_src(ctx, src.src);
119 bool need_swizzle = false;
120
121 assert(value);
122 unsigned src_components = ac_get_llvm_num_components(value);
123 for (unsigned i = 0; i < num_components; ++i) {
124 assert(src.swizzle[i] < src_components);
125 if (src.swizzle[i] != i)
126 need_swizzle = true;
127 }
128
129 if (need_swizzle || num_components != src_components) {
130 LLVMValueRef masks[] = {LLVMConstInt(ctx->ac.i32, src.swizzle[0], false),
131 LLVMConstInt(ctx->ac.i32, src.swizzle[1], false),
132 LLVMConstInt(ctx->ac.i32, src.swizzle[2], false),
133 LLVMConstInt(ctx->ac.i32, src.swizzle[3], false)};
134
135 if (src_components > 1 && num_components == 1) {
136 value = LLVMBuildExtractElement(ctx->ac.builder, value, masks[0], "");
137 } else if (src_components == 1 && num_components > 1) {
138 LLVMValueRef values[] = {value, value, value, value};
139 value = ac_build_gather_values(&ctx->ac, values, num_components);
140 } else {
141 LLVMValueRef swizzle = LLVMConstVector(masks, num_components);
142 value = LLVMBuildShuffleVector(ctx->ac.builder, value, value, swizzle, "");
143 }
144 }
145 assert(!src.negate);
146 assert(!src.abs);
147 return value;
148 }
149
emit_int_cmp(struct ac_llvm_context *ctx, LLVMIntPredicate pred, LLVMValueRef src0, LLVMValueRef src1)150 static LLVMValueRef emit_int_cmp(struct ac_llvm_context *ctx, LLVMIntPredicate pred,
151 LLVMValueRef src0, LLVMValueRef src1)
152 {
153 src0 = ac_to_integer(ctx, src0);
154 src1 = ac_to_integer(ctx, src1);
155 return LLVMBuildICmp(ctx->builder, pred, src0, src1, "");
156 }
157
emit_float_cmp(struct ac_llvm_context *ctx, LLVMRealPredicate pred, LLVMValueRef src0, LLVMValueRef src1)158 static LLVMValueRef emit_float_cmp(struct ac_llvm_context *ctx, LLVMRealPredicate pred,
159 LLVMValueRef src0, LLVMValueRef src1)
160 {
161 src0 = ac_to_float(ctx, src0);
162 src1 = ac_to_float(ctx, src1);
163 return LLVMBuildFCmp(ctx->builder, pred, src0, src1, "");
164 }
165
emit_intrin_1f_param(struct ac_llvm_context *ctx, const char *intrin, LLVMTypeRef result_type, LLVMValueRef src0)166 static LLVMValueRef emit_intrin_1f_param(struct ac_llvm_context *ctx, const char *intrin,
167 LLVMTypeRef result_type, LLVMValueRef src0)
168 {
169 char name[64], type[64];
170 LLVMValueRef params[] = {
171 ac_to_float(ctx, src0),
172 };
173
174 ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
175 ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
176 assert(length < sizeof(name));
177 return ac_build_intrinsic(ctx, name, result_type, params, 1, AC_FUNC_ATTR_READNONE);
178 }
179
emit_intrin_1f_param_scalar(struct ac_llvm_context *ctx, const char *intrin, LLVMTypeRef result_type, LLVMValueRef src0)180 static LLVMValueRef emit_intrin_1f_param_scalar(struct ac_llvm_context *ctx, const char *intrin,
181 LLVMTypeRef result_type, LLVMValueRef src0)
182 {
183 if (LLVMGetTypeKind(result_type) != LLVMVectorTypeKind)
184 return emit_intrin_1f_param(ctx, intrin, result_type, src0);
185
186 LLVMTypeRef elem_type = LLVMGetElementType(result_type);
187 LLVMValueRef ret = LLVMGetUndef(result_type);
188
189 /* Scalarize the intrinsic, because vectors are not supported. */
190 for (unsigned i = 0; i < LLVMGetVectorSize(result_type); i++) {
191 char name[64], type[64];
192 LLVMValueRef params[] = {
193 ac_to_float(ctx, ac_llvm_extract_elem(ctx, src0, i)),
194 };
195
196 ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
197 ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
198 assert(length < sizeof(name));
199 ret = LLVMBuildInsertElement(
200 ctx->builder, ret,
201 ac_build_intrinsic(ctx, name, elem_type, params, 1, AC_FUNC_ATTR_READNONE),
202 LLVMConstInt(ctx->i32, i, 0), "");
203 }
204 return ret;
205 }
206
emit_intrin_2f_param(struct ac_llvm_context *ctx, const char *intrin, LLVMTypeRef result_type, LLVMValueRef src0, LLVMValueRef src1)207 static LLVMValueRef emit_intrin_2f_param(struct ac_llvm_context *ctx, const char *intrin,
208 LLVMTypeRef result_type, LLVMValueRef src0,
209 LLVMValueRef src1)
210 {
211 char name[64], type[64];
212 LLVMValueRef params[] = {
213 ac_to_float(ctx, src0),
214 ac_to_float(ctx, src1),
215 };
216
217 ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
218 ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
219 assert(length < sizeof(name));
220 return ac_build_intrinsic(ctx, name, result_type, params, 2, AC_FUNC_ATTR_READNONE);
221 }
222
emit_intrin_3f_param(struct ac_llvm_context *ctx, const char *intrin, LLVMTypeRef result_type, LLVMValueRef src0, LLVMValueRef src1, LLVMValueRef src2)223 static LLVMValueRef emit_intrin_3f_param(struct ac_llvm_context *ctx, const char *intrin,
224 LLVMTypeRef result_type, LLVMValueRef src0,
225 LLVMValueRef src1, LLVMValueRef src2)
226 {
227 char name[64], type[64];
228 LLVMValueRef params[] = {
229 ac_to_float(ctx, src0),
230 ac_to_float(ctx, src1),
231 ac_to_float(ctx, src2),
232 };
233
234 ac_build_type_name_for_intr(LLVMTypeOf(params[0]), type, sizeof(type));
235 ASSERTED const int length = snprintf(name, sizeof(name), "%s.%s", intrin, type);
236 assert(length < sizeof(name));
237 return ac_build_intrinsic(ctx, name, result_type, params, 3, AC_FUNC_ATTR_READNONE);
238 }
239
emit_bcsel(struct ac_llvm_context *ctx, LLVMValueRef src0, LLVMValueRef src1, LLVMValueRef src2)240 static LLVMValueRef emit_bcsel(struct ac_llvm_context *ctx, LLVMValueRef src0, LLVMValueRef src1,
241 LLVMValueRef src2)
242 {
243 LLVMTypeRef src1_type = LLVMTypeOf(src1);
244 LLVMTypeRef src2_type = LLVMTypeOf(src2);
245
246 if (LLVMGetTypeKind(src1_type) == LLVMPointerTypeKind &&
247 LLVMGetTypeKind(src2_type) != LLVMPointerTypeKind) {
248 src2 = LLVMBuildIntToPtr(ctx->builder, src2, src1_type, "");
249 } else if (LLVMGetTypeKind(src2_type) == LLVMPointerTypeKind &&
250 LLVMGetTypeKind(src1_type) != LLVMPointerTypeKind) {
251 src1 = LLVMBuildIntToPtr(ctx->builder, src1, src2_type, "");
252 }
253
254 return LLVMBuildSelect(ctx->builder, src0, ac_to_integer_or_pointer(ctx, src1),
255 ac_to_integer_or_pointer(ctx, src2), "");
256 }
257
emit_iabs(struct ac_llvm_context *ctx, LLVMValueRef src0)258 static LLVMValueRef emit_iabs(struct ac_llvm_context *ctx, LLVMValueRef src0)
259 {
260 return ac_build_imax(ctx, src0, LLVMBuildNeg(ctx->builder, src0, ""));
261 }
262
emit_uint_carry(struct ac_llvm_context *ctx, const char *intrin, LLVMValueRef src0, LLVMValueRef src1)263 static LLVMValueRef emit_uint_carry(struct ac_llvm_context *ctx, const char *intrin,
264 LLVMValueRef src0, LLVMValueRef src1)
265 {
266 LLVMTypeRef ret_type;
267 LLVMTypeRef types[] = {ctx->i32, ctx->i1};
268 LLVMValueRef res;
269 LLVMValueRef params[] = {src0, src1};
270 ret_type = LLVMStructTypeInContext(ctx->context, types, 2, true);
271
272 res = ac_build_intrinsic(ctx, intrin, ret_type, params, 2, AC_FUNC_ATTR_READNONE);
273
274 res = LLVMBuildExtractValue(ctx->builder, res, 1, "");
275 res = LLVMBuildZExt(ctx->builder, res, ctx->i32, "");
276 return res;
277 }
278
emit_b2f(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)279 static LLVMValueRef emit_b2f(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)
280 {
281 assert(ac_get_elem_bits(ctx, LLVMTypeOf(src0)) == 1);
282
283 switch (bitsize) {
284 case 16:
285 if (LLVMGetTypeKind(LLVMTypeOf(src0)) == LLVMVectorTypeKind) {
286 assert(LLVMGetVectorSize(LLVMTypeOf(src0)) == 2);
287 LLVMValueRef f[] = {
288 LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 0),
289 ctx->f16_1, ctx->f16_0, ""),
290 LLVMBuildSelect(ctx->builder, ac_llvm_extract_elem(ctx, src0, 1),
291 ctx->f16_1, ctx->f16_0, ""),
292 };
293 return ac_build_gather_values(ctx, f, 2);
294 }
295 return LLVMBuildSelect(ctx->builder, src0, ctx->f16_1, ctx->f16_0, "");
296 case 32:
297 return LLVMBuildSelect(ctx->builder, src0, ctx->f32_1, ctx->f32_0, "");
298 case 64:
299 return LLVMBuildSelect(ctx->builder, src0, ctx->f64_1, ctx->f64_0, "");
300 default:
301 unreachable("Unsupported bit size.");
302 }
303 }
304
emit_f2b(struct ac_llvm_context *ctx, LLVMValueRef src0)305 static LLVMValueRef emit_f2b(struct ac_llvm_context *ctx, LLVMValueRef src0)
306 {
307 src0 = ac_to_float(ctx, src0);
308 LLVMValueRef zero = LLVMConstNull(LLVMTypeOf(src0));
309 return LLVMBuildFCmp(ctx->builder, LLVMRealUNE, src0, zero, "");
310 }
311
emit_b2i(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)312 static LLVMValueRef emit_b2i(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)
313 {
314 switch (bitsize) {
315 case 8:
316 return LLVMBuildSelect(ctx->builder, src0, ctx->i8_1, ctx->i8_0, "");
317 case 16:
318 return LLVMBuildSelect(ctx->builder, src0, ctx->i16_1, ctx->i16_0, "");
319 case 32:
320 return LLVMBuildSelect(ctx->builder, src0, ctx->i32_1, ctx->i32_0, "");
321 case 64:
322 return LLVMBuildSelect(ctx->builder, src0, ctx->i64_1, ctx->i64_0, "");
323 default:
324 unreachable("Unsupported bit size.");
325 }
326 }
327
emit_i2b(struct ac_llvm_context *ctx, LLVMValueRef src0)328 static LLVMValueRef emit_i2b(struct ac_llvm_context *ctx, LLVMValueRef src0)
329 {
330 LLVMValueRef zero = LLVMConstNull(LLVMTypeOf(src0));
331 return LLVMBuildICmp(ctx->builder, LLVMIntNE, src0, zero, "");
332 }
333
emit_f2f16(struct ac_llvm_context *ctx, LLVMValueRef src0)334 static LLVMValueRef emit_f2f16(struct ac_llvm_context *ctx, LLVMValueRef src0)
335 {
336 LLVMValueRef result;
337 LLVMValueRef cond = NULL;
338
339 src0 = ac_to_float(ctx, src0);
340 result = LLVMBuildFPTrunc(ctx->builder, src0, ctx->f16, "");
341
342 if (ctx->gfx_level >= GFX8) {
343 LLVMValueRef args[2];
344 /* Check if the result is a denormal - and flush to 0 if so. */
345 args[0] = result;
346 args[1] = LLVMConstInt(ctx->i32, N_SUBNORMAL | P_SUBNORMAL, false);
347 cond =
348 ac_build_intrinsic(ctx, "llvm.amdgcn.class.f16", ctx->i1, args, 2, AC_FUNC_ATTR_READNONE);
349 }
350
351 /* need to convert back up to f32 */
352 result = LLVMBuildFPExt(ctx->builder, result, ctx->f32, "");
353
354 if (ctx->gfx_level >= GFX8)
355 result = LLVMBuildSelect(ctx->builder, cond, ctx->f32_0, result, "");
356 else {
357 /* for GFX6-GFX7 */
358 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
359 * so compare the result and flush to 0 if it's smaller.
360 */
361 LLVMValueRef temp, cond2;
362 temp = emit_intrin_1f_param(ctx, "llvm.fabs", ctx->f32, result);
363 cond = LLVMBuildFCmp(
364 ctx->builder, LLVMRealOGT,
365 LLVMBuildBitCast(ctx->builder, LLVMConstInt(ctx->i32, 0x38800000, false), ctx->f32, ""),
366 temp, "");
367 cond2 = LLVMBuildFCmp(ctx->builder, LLVMRealONE, temp, ctx->f32_0, "");
368 cond = LLVMBuildAnd(ctx->builder, cond, cond2, "");
369 result = LLVMBuildSelect(ctx->builder, cond, ctx->f32_0, result, "");
370 }
371 return result;
372 }
373
emit_umul_high(struct ac_llvm_context *ctx, LLVMValueRef src0, LLVMValueRef src1)374 static LLVMValueRef emit_umul_high(struct ac_llvm_context *ctx, LLVMValueRef src0,
375 LLVMValueRef src1)
376 {
377 LLVMValueRef dst64, result;
378 src0 = LLVMBuildZExt(ctx->builder, src0, ctx->i64, "");
379 src1 = LLVMBuildZExt(ctx->builder, src1, ctx->i64, "");
380
381 dst64 = LLVMBuildMul(ctx->builder, src0, src1, "");
382 dst64 = LLVMBuildLShr(ctx->builder, dst64, LLVMConstInt(ctx->i64, 32, false), "");
383 result = LLVMBuildTrunc(ctx->builder, dst64, ctx->i32, "");
384 return result;
385 }
386
emit_imul_high(struct ac_llvm_context *ctx, LLVMValueRef src0, LLVMValueRef src1)387 static LLVMValueRef emit_imul_high(struct ac_llvm_context *ctx, LLVMValueRef src0,
388 LLVMValueRef src1)
389 {
390 LLVMValueRef dst64, result;
391 src0 = LLVMBuildSExt(ctx->builder, src0, ctx->i64, "");
392 src1 = LLVMBuildSExt(ctx->builder, src1, ctx->i64, "");
393
394 dst64 = LLVMBuildMul(ctx->builder, src0, src1, "");
395 dst64 = LLVMBuildAShr(ctx->builder, dst64, LLVMConstInt(ctx->i64, 32, false), "");
396 result = LLVMBuildTrunc(ctx->builder, dst64, ctx->i32, "");
397 return result;
398 }
399
emit_bfm(struct ac_llvm_context *ctx, LLVMValueRef bits, LLVMValueRef offset)400 static LLVMValueRef emit_bfm(struct ac_llvm_context *ctx, LLVMValueRef bits, LLVMValueRef offset)
401 {
402 /* mask = ((1 << bits) - 1) << offset */
403 return LLVMBuildShl(
404 ctx->builder,
405 LLVMBuildSub(ctx->builder, LLVMBuildShl(ctx->builder, ctx->i32_1, bits, ""), ctx->i32_1, ""),
406 offset, "");
407 }
408
emit_bitfield_select(struct ac_llvm_context *ctx, LLVMValueRef mask, LLVMValueRef insert, LLVMValueRef base)409 static LLVMValueRef emit_bitfield_select(struct ac_llvm_context *ctx, LLVMValueRef mask,
410 LLVMValueRef insert, LLVMValueRef base)
411 {
412 /* Calculate:
413 * (mask & insert) | (~mask & base) = base ^ (mask & (insert ^ base))
414 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
415 */
416 return LLVMBuildXor(
417 ctx->builder, base,
418 LLVMBuildAnd(ctx->builder, mask, LLVMBuildXor(ctx->builder, insert, base, ""), ""), "");
419 }
420
emit_pack_2x16(struct ac_llvm_context *ctx, LLVMValueRef src0, LLVMValueRef (*pack)(struct ac_llvm_context *ctx, LLVMValueRef args[2]))421 static LLVMValueRef emit_pack_2x16(struct ac_llvm_context *ctx, LLVMValueRef src0,
422 LLVMValueRef (*pack)(struct ac_llvm_context *ctx,
423 LLVMValueRef args[2]))
424 {
425 LLVMValueRef comp[2];
426
427 src0 = ac_to_float(ctx, src0);
428 comp[0] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_0, "");
429 comp[1] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_1, "");
430
431 return LLVMBuildBitCast(ctx->builder, pack(ctx, comp), ctx->i32, "");
432 }
433
emit_unpack_half_2x16(struct ac_llvm_context *ctx, LLVMValueRef src0)434 static LLVMValueRef emit_unpack_half_2x16(struct ac_llvm_context *ctx, LLVMValueRef src0)
435 {
436 LLVMValueRef const16 = LLVMConstInt(ctx->i32, 16, false);
437 LLVMValueRef temps[2], val;
438 int i;
439
440 for (i = 0; i < 2; i++) {
441 val = i == 1 ? LLVMBuildLShr(ctx->builder, src0, const16, "") : src0;
442 val = LLVMBuildTrunc(ctx->builder, val, ctx->i16, "");
443 val = LLVMBuildBitCast(ctx->builder, val, ctx->f16, "");
444 temps[i] = LLVMBuildFPExt(ctx->builder, val, ctx->f32, "");
445 }
446 return ac_build_gather_values(ctx, temps, 2);
447 }
448
emit_ddxy(struct ac_nir_context *ctx, nir_op op, LLVMValueRef src0)449 static LLVMValueRef emit_ddxy(struct ac_nir_context *ctx, nir_op op, LLVMValueRef src0)
450 {
451 unsigned mask;
452 int idx;
453 LLVMValueRef result;
454
455 if (op == nir_op_fddx_fine)
456 mask = AC_TID_MASK_LEFT;
457 else if (op == nir_op_fddy_fine)
458 mask = AC_TID_MASK_TOP;
459 else
460 mask = AC_TID_MASK_TOP_LEFT;
461
462 /* for DDX we want to next X pixel, DDY next Y pixel. */
463 if (op == nir_op_fddx_fine || op == nir_op_fddx_coarse || op == nir_op_fddx)
464 idx = 1;
465 else
466 idx = 2;
467
468 result = ac_build_ddxy(&ctx->ac, mask, idx, src0);
469 return result;
470 }
471
472 struct waterfall_context {
473 LLVMBasicBlockRef phi_bb[2];
474 bool use_waterfall;
475 };
476
477 /* To deal with divergent descriptors we can create a loop that handles all
478 * lanes with the same descriptor on a given iteration (henceforth a
479 * waterfall loop).
480 *
481 * These helper create the begin and end of the loop leaving the caller
482 * to implement the body.
483 *
484 * params:
485 * - ctx is the usal nir context
486 * - wctx is a temporary struct containing some loop info. Can be left uninitialized.
487 * - value is the possibly divergent value for which we built the loop
488 * - divergent is whether value is actually divergent. If false we just pass
489 * things through.
490 */
enter_waterfall(struct ac_nir_context *ctx, struct waterfall_context *wctx, LLVMValueRef value, bool divergent)491 static LLVMValueRef enter_waterfall(struct ac_nir_context *ctx, struct waterfall_context *wctx,
492 LLVMValueRef value, bool divergent)
493 {
494 /* If the app claims the value is divergent but it is constant we can
495 * end up with a dynamic index of NULL. */
496 if (!value)
497 divergent = false;
498
499 wctx->use_waterfall = divergent;
500 if (!divergent)
501 return value;
502
503 ac_build_bgnloop(&ctx->ac, 6000);
504
505 LLVMValueRef active = LLVMConstInt(ctx->ac.i1, 1, false);
506 LLVMValueRef scalar_value[NIR_MAX_VEC_COMPONENTS];
507
508 for (unsigned i = 0; i < ac_get_llvm_num_components(value); i++) {
509 LLVMValueRef comp = ac_llvm_extract_elem(&ctx->ac, value, i);
510 scalar_value[i] = ac_build_readlane(&ctx->ac, comp, NULL);
511 active = LLVMBuildAnd(ctx->ac.builder, active,
512 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, comp, scalar_value[i], ""), "");
513 }
514
515 wctx->phi_bb[0] = LLVMGetInsertBlock(ctx->ac.builder);
516 ac_build_ifcc(&ctx->ac, active, 6001);
517
518 return ac_build_gather_values(&ctx->ac, scalar_value, ac_get_llvm_num_components(value));
519 }
520
exit_waterfall(struct ac_nir_context *ctx, struct waterfall_context *wctx, LLVMValueRef value)521 static LLVMValueRef exit_waterfall(struct ac_nir_context *ctx, struct waterfall_context *wctx,
522 LLVMValueRef value)
523 {
524 LLVMValueRef ret = NULL;
525 LLVMValueRef phi_src[2];
526 LLVMValueRef cc_phi_src[2] = {
527 LLVMConstInt(ctx->ac.i32, 0, false),
528 LLVMConstInt(ctx->ac.i32, 0xffffffff, false),
529 };
530
531 if (!wctx->use_waterfall)
532 return value;
533
534 wctx->phi_bb[1] = LLVMGetInsertBlock(ctx->ac.builder);
535
536 ac_build_endif(&ctx->ac, 6001);
537
538 if (value) {
539 phi_src[0] = LLVMGetUndef(LLVMTypeOf(value));
540 phi_src[1] = value;
541
542 ret = ac_build_phi(&ctx->ac, LLVMTypeOf(value), 2, phi_src, wctx->phi_bb);
543 }
544
545 /*
546 * By using the optimization barrier on the exit decision, we decouple
547 * the operations from the break, and hence avoid LLVM hoisting the
548 * opteration into the break block.
549 */
550 LLVMValueRef cc = ac_build_phi(&ctx->ac, ctx->ac.i32, 2, cc_phi_src, wctx->phi_bb);
551 ac_build_optimization_barrier(&ctx->ac, &cc, false);
552
553 LLVMValueRef active =
554 LLVMBuildICmp(ctx->ac.builder, LLVMIntNE, cc, ctx->ac.i32_0, "uniform_active2");
555 ac_build_ifcc(&ctx->ac, active, 6002);
556 ac_build_break(&ctx->ac);
557 ac_build_endif(&ctx->ac, 6002);
558
559 ac_build_endloop(&ctx->ac, 6000);
560 return ret;
561 }
562
visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)563 static void visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
564 {
565 LLVMValueRef src[16], result = NULL;
566 unsigned num_components = instr->dest.dest.ssa.num_components;
567 unsigned src_components;
568 LLVMTypeRef def_type = get_def_type(ctx, &instr->dest.dest.ssa);
569
570 assert(nir_op_infos[instr->op].num_inputs <= ARRAY_SIZE(src));
571 switch (instr->op) {
572 case nir_op_vec2:
573 case nir_op_vec3:
574 case nir_op_vec4:
575 case nir_op_vec5:
576 case nir_op_vec8:
577 case nir_op_vec16:
578 case nir_op_unpack_32_2x16:
579 case nir_op_unpack_64_2x32:
580 case nir_op_unpack_64_4x16:
581 src_components = 1;
582 break;
583 case nir_op_pack_half_2x16:
584 case nir_op_pack_snorm_2x16:
585 case nir_op_pack_unorm_2x16:
586 case nir_op_pack_uint_2x16:
587 case nir_op_pack_sint_2x16:
588 case nir_op_pack_32_2x16:
589 case nir_op_pack_64_2x32:
590 src_components = 2;
591 break;
592 case nir_op_unpack_half_2x16:
593 src_components = 1;
594 break;
595 case nir_op_cube_face_coord_amd:
596 case nir_op_cube_face_index_amd:
597 src_components = 3;
598 break;
599 case nir_op_pack_32_4x8:
600 case nir_op_pack_64_4x16:
601 src_components = 4;
602 break;
603 default:
604 src_components = num_components;
605 break;
606 }
607 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
608 src[i] = get_alu_src(ctx, instr->src[i], src_components);
609
610 switch (instr->op) {
611 case nir_op_mov:
612 result = src[0];
613 break;
614 case nir_op_fneg:
615 src[0] = ac_to_float(&ctx->ac, src[0]);
616 result = LLVMBuildFNeg(ctx->ac.builder, src[0], "");
617 if (ctx->ac.float_mode == AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO) {
618 /* fneg will be optimized by backend compiler with sign
619 * bit removed via XOR. This is probably a LLVM bug.
620 */
621 result = ac_build_canonicalize(&ctx->ac, result, instr->dest.dest.ssa.bit_size);
622 }
623 break;
624 case nir_op_ineg:
625 if (instr->no_unsigned_wrap)
626 result = LLVMBuildNUWNeg(ctx->ac.builder, src[0], "");
627 else if (instr->no_signed_wrap)
628 result = LLVMBuildNSWNeg(ctx->ac.builder, src[0], "");
629 else
630 result = LLVMBuildNeg(ctx->ac.builder, src[0], "");
631 break;
632 case nir_op_inot:
633 result = LLVMBuildNot(ctx->ac.builder, src[0], "");
634 break;
635 case nir_op_iadd:
636 if (instr->no_unsigned_wrap)
637 result = LLVMBuildNUWAdd(ctx->ac.builder, src[0], src[1], "");
638 else if (instr->no_signed_wrap)
639 result = LLVMBuildNSWAdd(ctx->ac.builder, src[0], src[1], "");
640 else
641 result = LLVMBuildAdd(ctx->ac.builder, src[0], src[1], "");
642 break;
643 case nir_op_uadd_sat:
644 case nir_op_iadd_sat: {
645 char name[64], type[64];
646 ac_build_type_name_for_intr(def_type, type, sizeof(type));
647 snprintf(name, sizeof(name), "llvm.%cadd.sat.%s",
648 instr->op == nir_op_uadd_sat ? 'u' : 's', type);
649 result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 2, AC_FUNC_ATTR_READNONE);
650 break;
651 }
652 case nir_op_usub_sat:
653 case nir_op_isub_sat: {
654 char name[64], type[64];
655 ac_build_type_name_for_intr(def_type, type, sizeof(type));
656 snprintf(name, sizeof(name), "llvm.%csub.sat.%s",
657 instr->op == nir_op_usub_sat ? 'u' : 's', type);
658 result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 2, AC_FUNC_ATTR_READNONE);
659 break;
660 }
661 case nir_op_fadd:
662 src[0] = ac_to_float(&ctx->ac, src[0]);
663 src[1] = ac_to_float(&ctx->ac, src[1]);
664 result = LLVMBuildFAdd(ctx->ac.builder, src[0], src[1], "");
665 break;
666 case nir_op_fsub:
667 src[0] = ac_to_float(&ctx->ac, src[0]);
668 src[1] = ac_to_float(&ctx->ac, src[1]);
669 result = LLVMBuildFSub(ctx->ac.builder, src[0], src[1], "");
670 break;
671 case nir_op_isub:
672 if (instr->no_unsigned_wrap)
673 result = LLVMBuildNUWSub(ctx->ac.builder, src[0], src[1], "");
674 else if (instr->no_signed_wrap)
675 result = LLVMBuildNSWSub(ctx->ac.builder, src[0], src[1], "");
676 else
677 result = LLVMBuildSub(ctx->ac.builder, src[0], src[1], "");
678 break;
679 case nir_op_imul:
680 if (instr->no_unsigned_wrap)
681 result = LLVMBuildNUWMul(ctx->ac.builder, src[0], src[1], "");
682 else if (instr->no_signed_wrap)
683 result = LLVMBuildNSWMul(ctx->ac.builder, src[0], src[1], "");
684 else
685 result = LLVMBuildMul(ctx->ac.builder, src[0], src[1], "");
686 break;
687 case nir_op_imod:
688 result = LLVMBuildSRem(ctx->ac.builder, src[0], src[1], "");
689 break;
690 case nir_op_umod:
691 result = LLVMBuildURem(ctx->ac.builder, src[0], src[1], "");
692 break;
693 case nir_op_irem:
694 result = LLVMBuildSRem(ctx->ac.builder, src[0], src[1], "");
695 break;
696 case nir_op_idiv:
697 result = LLVMBuildSDiv(ctx->ac.builder, src[0], src[1], "");
698 break;
699 case nir_op_udiv:
700 result = LLVMBuildUDiv(ctx->ac.builder, src[0], src[1], "");
701 break;
702 case nir_op_fmul:
703 src[0] = ac_to_float(&ctx->ac, src[0]);
704 src[1] = ac_to_float(&ctx->ac, src[1]);
705 result = LLVMBuildFMul(ctx->ac.builder, src[0], src[1], "");
706 break;
707 case nir_op_fmulz:
708 assert(LLVM_VERSION_MAJOR >= 12);
709 src[0] = ac_to_float(&ctx->ac, src[0]);
710 src[1] = ac_to_float(&ctx->ac, src[1]);
711 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.fmul.legacy", ctx->ac.f32,
712 src, 2, AC_FUNC_ATTR_READNONE);
713 break;
714 case nir_op_frcp:
715 /* For doubles, we need precise division to pass GLCTS. */
716 if (ctx->ac.float_mode == AC_FLOAT_MODE_DEFAULT_OPENGL && ac_get_type_size(def_type) == 8) {
717 result = LLVMBuildFDiv(ctx->ac.builder, ctx->ac.f64_1, ac_to_float(&ctx->ac, src[0]), "");
718 } else {
719 result = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.rcp",
720 ac_to_float_type(&ctx->ac, def_type), src[0]);
721 }
722 if (ctx->abi->clamp_div_by_zero)
723 result = ac_build_fmin(&ctx->ac, result,
724 LLVMConstReal(ac_to_float_type(&ctx->ac, def_type), FLT_MAX));
725 break;
726 case nir_op_iand:
727 result = LLVMBuildAnd(ctx->ac.builder, src[0], src[1], "");
728 break;
729 case nir_op_ior:
730 result = LLVMBuildOr(ctx->ac.builder, src[0], src[1], "");
731 break;
732 case nir_op_ixor:
733 result = LLVMBuildXor(ctx->ac.builder, src[0], src[1], "");
734 break;
735 case nir_op_ishl:
736 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[1])) <
737 ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])))
738 src[1] = LLVMBuildZExt(ctx->ac.builder, src[1], LLVMTypeOf(src[0]), "");
739 else if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[1])) >
740 ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])))
741 src[1] = LLVMBuildTrunc(ctx->ac.builder, src[1], LLVMTypeOf(src[0]), "");
742 result = LLVMBuildShl(ctx->ac.builder, src[0], src[1], "");
743 break;
744 case nir_op_ishr:
745 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[1])) <
746 ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])))
747 src[1] = LLVMBuildZExt(ctx->ac.builder, src[1], LLVMTypeOf(src[0]), "");
748 else if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[1])) >
749 ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])))
750 src[1] = LLVMBuildTrunc(ctx->ac.builder, src[1], LLVMTypeOf(src[0]), "");
751 result = LLVMBuildAShr(ctx->ac.builder, src[0], src[1], "");
752 break;
753 case nir_op_ushr:
754 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[1])) <
755 ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])))
756 src[1] = LLVMBuildZExt(ctx->ac.builder, src[1], LLVMTypeOf(src[0]), "");
757 else if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[1])) >
758 ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])))
759 src[1] = LLVMBuildTrunc(ctx->ac.builder, src[1], LLVMTypeOf(src[0]), "");
760 result = LLVMBuildLShr(ctx->ac.builder, src[0], src[1], "");
761 break;
762 case nir_op_ilt:
763 result = emit_int_cmp(&ctx->ac, LLVMIntSLT, src[0], src[1]);
764 break;
765 case nir_op_ine:
766 result = emit_int_cmp(&ctx->ac, LLVMIntNE, src[0], src[1]);
767 break;
768 case nir_op_ieq:
769 result = emit_int_cmp(&ctx->ac, LLVMIntEQ, src[0], src[1]);
770 break;
771 case nir_op_ige:
772 result = emit_int_cmp(&ctx->ac, LLVMIntSGE, src[0], src[1]);
773 break;
774 case nir_op_ult:
775 result = emit_int_cmp(&ctx->ac, LLVMIntULT, src[0], src[1]);
776 break;
777 case nir_op_uge:
778 result = emit_int_cmp(&ctx->ac, LLVMIntUGE, src[0], src[1]);
779 break;
780 case nir_op_feq:
781 result = emit_float_cmp(&ctx->ac, LLVMRealOEQ, src[0], src[1]);
782 break;
783 case nir_op_fneu:
784 result = emit_float_cmp(&ctx->ac, LLVMRealUNE, src[0], src[1]);
785 break;
786 case nir_op_flt:
787 result = emit_float_cmp(&ctx->ac, LLVMRealOLT, src[0], src[1]);
788 break;
789 case nir_op_fge:
790 result = emit_float_cmp(&ctx->ac, LLVMRealOGE, src[0], src[1]);
791 break;
792 case nir_op_fabs:
793 result =
794 emit_intrin_1f_param(&ctx->ac, "llvm.fabs", ac_to_float_type(&ctx->ac, def_type), src[0]);
795 if (ctx->ac.float_mode == AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO) {
796 /* fabs will be optimized by backend compiler with sign
797 * bit removed via AND.
798 */
799 result = ac_build_canonicalize(&ctx->ac, result, instr->dest.dest.ssa.bit_size);
800 }
801 break;
802 case nir_op_fsat:
803 src[0] = ac_to_float(&ctx->ac, src[0]);
804 result = ac_build_fsat(&ctx->ac, src[0],
805 ac_to_float_type(&ctx->ac, def_type));
806 break;
807 case nir_op_iabs:
808 result = emit_iabs(&ctx->ac, src[0]);
809 break;
810 case nir_op_imax:
811 result = ac_build_imax(&ctx->ac, src[0], src[1]);
812 break;
813 case nir_op_imin:
814 result = ac_build_imin(&ctx->ac, src[0], src[1]);
815 break;
816 case nir_op_umax:
817 result = ac_build_umax(&ctx->ac, src[0], src[1]);
818 break;
819 case nir_op_umin:
820 result = ac_build_umin(&ctx->ac, src[0], src[1]);
821 break;
822 case nir_op_isign:
823 result = ac_build_isign(&ctx->ac, src[0]);
824 break;
825 case nir_op_fsign:
826 src[0] = ac_to_float(&ctx->ac, src[0]);
827 result = ac_build_fsign(&ctx->ac, src[0]);
828 break;
829 case nir_op_ffloor:
830 result =
831 emit_intrin_1f_param(&ctx->ac, "llvm.floor", ac_to_float_type(&ctx->ac, def_type), src[0]);
832 break;
833 case nir_op_ftrunc:
834 result =
835 emit_intrin_1f_param(&ctx->ac, "llvm.trunc", ac_to_float_type(&ctx->ac, def_type), src[0]);
836 break;
837 case nir_op_fceil:
838 result =
839 emit_intrin_1f_param(&ctx->ac, "llvm.ceil", ac_to_float_type(&ctx->ac, def_type), src[0]);
840 break;
841 case nir_op_fround_even:
842 result =
843 emit_intrin_1f_param(&ctx->ac, "llvm.rint", ac_to_float_type(&ctx->ac, def_type), src[0]);
844 break;
845 case nir_op_ffract:
846 result = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.fract",
847 ac_to_float_type(&ctx->ac, def_type), src[0]);
848 break;
849 case nir_op_fsin:
850 result =
851 emit_intrin_1f_param(&ctx->ac, "llvm.sin", ac_to_float_type(&ctx->ac, def_type), src[0]);
852 break;
853 case nir_op_fcos:
854 result =
855 emit_intrin_1f_param(&ctx->ac, "llvm.cos", ac_to_float_type(&ctx->ac, def_type), src[0]);
856 break;
857 case nir_op_fsin_amd:
858 case nir_op_fcos_amd:
859 /* before GFX9, v_sin_f32 and v_cos_f32 had a valid input domain of [-256, +256] */
860 if (ctx->ac.gfx_level < GFX9)
861 src[0] = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.fract",
862 ac_to_float_type(&ctx->ac, def_type), src[0]);
863 result =
864 emit_intrin_1f_param(&ctx->ac, instr->op == nir_op_fsin_amd ? "llvm.amdgcn.sin" : "llvm.amdgcn.cos",
865 ac_to_float_type(&ctx->ac, def_type), src[0]);
866 break;
867 case nir_op_fsqrt:
868 result =
869 emit_intrin_1f_param(&ctx->ac, "llvm.sqrt", ac_to_float_type(&ctx->ac, def_type), src[0]);
870 break;
871 case nir_op_fexp2:
872 result =
873 emit_intrin_1f_param(&ctx->ac, "llvm.exp2", ac_to_float_type(&ctx->ac, def_type), src[0]);
874 break;
875 case nir_op_flog2:
876 result =
877 emit_intrin_1f_param(&ctx->ac, "llvm.log2", ac_to_float_type(&ctx->ac, def_type), src[0]);
878 break;
879 case nir_op_frsq:
880 result = emit_intrin_1f_param_scalar(&ctx->ac, "llvm.amdgcn.rsq",
881 ac_to_float_type(&ctx->ac, def_type), src[0]);
882 if (ctx->abi->clamp_div_by_zero)
883 result = ac_build_fmin(&ctx->ac, result,
884 LLVMConstReal(ac_to_float_type(&ctx->ac, def_type), FLT_MAX));
885 break;
886 case nir_op_frexp_exp:
887 src[0] = ac_to_float(&ctx->ac, src[0]);
888 result = ac_build_frexp_exp(&ctx->ac, src[0], ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])));
889 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) == 16)
890 result = LLVMBuildSExt(ctx->ac.builder, result, ctx->ac.i32, "");
891 break;
892 case nir_op_frexp_sig:
893 src[0] = ac_to_float(&ctx->ac, src[0]);
894 result = ac_build_frexp_mant(&ctx->ac, src[0], instr->dest.dest.ssa.bit_size);
895 break;
896 case nir_op_fpow:
897 if (instr->dest.dest.ssa.bit_size != 32) {
898 /* 16 and 64 bits */
899 result = emit_intrin_1f_param(&ctx->ac, "llvm.log2",
900 ac_to_float_type(&ctx->ac, def_type), src[0]);
901 result = LLVMBuildFMul(ctx->ac.builder, result, ac_to_float(&ctx->ac, src[1]), "");
902 result = emit_intrin_1f_param(&ctx->ac, "llvm.exp2",
903 ac_to_float_type(&ctx->ac, def_type), result);
904 break;
905 }
906 if (LLVM_VERSION_MAJOR >= 12) {
907 result = emit_intrin_1f_param(&ctx->ac, "llvm.log2",
908 ac_to_float_type(&ctx->ac, def_type), src[0]);
909 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.fmul.legacy", ctx->ac.f32,
910 (LLVMValueRef[]){result, ac_to_float(&ctx->ac, src[1])},
911 2, AC_FUNC_ATTR_READNONE);
912 result = emit_intrin_1f_param(&ctx->ac, "llvm.exp2",
913 ac_to_float_type(&ctx->ac, def_type), result);
914 break;
915 }
916 /* Older LLVM doesn't have fmul.legacy. */
917 result = emit_intrin_2f_param(&ctx->ac, "llvm.pow", ac_to_float_type(&ctx->ac, def_type),
918 src[0], src[1]);
919 break;
920 case nir_op_fmax:
921 result = emit_intrin_2f_param(&ctx->ac, "llvm.maxnum", ac_to_float_type(&ctx->ac, def_type),
922 src[0], src[1]);
923 if (ctx->ac.gfx_level < GFX9 && instr->dest.dest.ssa.bit_size == 32) {
924 /* Only pre-GFX9 chips do not flush denorms. */
925 result = ac_build_canonicalize(&ctx->ac, result, instr->dest.dest.ssa.bit_size);
926 }
927 break;
928 case nir_op_fmin:
929 result = emit_intrin_2f_param(&ctx->ac, "llvm.minnum", ac_to_float_type(&ctx->ac, def_type),
930 src[0], src[1]);
931 if (ctx->ac.gfx_level < GFX9 && instr->dest.dest.ssa.bit_size == 32) {
932 /* Only pre-GFX9 chips do not flush denorms. */
933 result = ac_build_canonicalize(&ctx->ac, result, instr->dest.dest.ssa.bit_size);
934 }
935 break;
936 case nir_op_ffma:
937 /* FMA is slow on gfx6-8, so it shouldn't be used. */
938 assert(instr->dest.dest.ssa.bit_size != 32 || ctx->ac.gfx_level >= GFX9);
939 result = emit_intrin_3f_param(&ctx->ac, "llvm.fma", ac_to_float_type(&ctx->ac, def_type),
940 src[0], src[1], src[2]);
941 break;
942 case nir_op_ffmaz:
943 assert(LLVM_VERSION_MAJOR >= 12 && ctx->ac.gfx_level >= GFX10_3);
944 src[0] = ac_to_float(&ctx->ac, src[0]);
945 src[1] = ac_to_float(&ctx->ac, src[1]);
946 src[2] = ac_to_float(&ctx->ac, src[2]);
947 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.fma.legacy", ctx->ac.f32,
948 src, 3, AC_FUNC_ATTR_READNONE);
949 break;
950 case nir_op_ldexp:
951 src[0] = ac_to_float(&ctx->ac, src[0]);
952 if (ac_get_elem_bits(&ctx->ac, def_type) == 32)
953 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ldexp.f32", ctx->ac.f32, src, 2,
954 AC_FUNC_ATTR_READNONE);
955 else if (ac_get_elem_bits(&ctx->ac, def_type) == 16)
956 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ldexp.f16", ctx->ac.f16, src, 2,
957 AC_FUNC_ATTR_READNONE);
958 else
959 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ldexp.f64", ctx->ac.f64, src, 2,
960 AC_FUNC_ATTR_READNONE);
961 break;
962 case nir_op_bfm:
963 result = emit_bfm(&ctx->ac, src[0], src[1]);
964 break;
965 case nir_op_bitfield_select:
966 result = emit_bitfield_select(&ctx->ac, src[0], src[1], src[2]);
967 break;
968 case nir_op_ubfe:
969 result = ac_build_bfe(&ctx->ac, src[0], src[1], src[2], false);
970 break;
971 case nir_op_ibfe:
972 result = ac_build_bfe(&ctx->ac, src[0], src[1], src[2], true);
973 break;
974 case nir_op_bitfield_reverse:
975 result = ac_build_bitfield_reverse(&ctx->ac, src[0]);
976 break;
977 case nir_op_bit_count:
978 result = ac_build_bit_count(&ctx->ac, src[0]);
979 break;
980 case nir_op_vec2:
981 case nir_op_vec3:
982 case nir_op_vec4:
983 case nir_op_vec5:
984 case nir_op_vec8:
985 case nir_op_vec16:
986 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
987 src[i] = ac_to_integer(&ctx->ac, src[i]);
988 result = ac_build_gather_values(&ctx->ac, src, num_components);
989 break;
990 case nir_op_f2i8:
991 case nir_op_f2i16:
992 case nir_op_f2imp:
993 case nir_op_f2i32:
994 case nir_op_f2i64:
995 src[0] = ac_to_float(&ctx->ac, src[0]);
996 result = LLVMBuildFPToSI(ctx->ac.builder, src[0], def_type, "");
997 break;
998 case nir_op_f2u8:
999 case nir_op_f2u16:
1000 case nir_op_f2ump:
1001 case nir_op_f2u32:
1002 case nir_op_f2u64:
1003 src[0] = ac_to_float(&ctx->ac, src[0]);
1004 result = LLVMBuildFPToUI(ctx->ac.builder, src[0], def_type, "");
1005 break;
1006 case nir_op_i2f16:
1007 case nir_op_i2fmp:
1008 case nir_op_i2f32:
1009 case nir_op_i2f64:
1010 result = LLVMBuildSIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
1011 break;
1012 case nir_op_u2f16:
1013 case nir_op_u2fmp:
1014 case nir_op_u2f32:
1015 case nir_op_u2f64:
1016 result = LLVMBuildUIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
1017 break;
1018 case nir_op_f2f16_rtz:
1019 case nir_op_f2f16:
1020 case nir_op_f2fmp:
1021 src[0] = ac_to_float(&ctx->ac, src[0]);
1022
1023 /* For OpenGL, we want fast packing with v_cvt_pkrtz_f16, but if we use it,
1024 * all f32->f16 conversions have to round towards zero, because both scalar
1025 * and vec2 down-conversions have to round equally.
1026 */
1027 if (ctx->ac.float_mode == AC_FLOAT_MODE_DEFAULT_OPENGL || instr->op == nir_op_f2f16_rtz) {
1028 src[0] = ac_to_float(&ctx->ac, src[0]);
1029
1030 if (LLVMTypeOf(src[0]) == ctx->ac.f64)
1031 src[0] = LLVMBuildFPTrunc(ctx->ac.builder, src[0], ctx->ac.f32, "");
1032
1033 /* Fast path conversion. This only works if NIR is vectorized
1034 * to vec2 16.
1035 */
1036 if (LLVMTypeOf(src[0]) == ctx->ac.v2f32) {
1037 LLVMValueRef args[] = {
1038 ac_llvm_extract_elem(&ctx->ac, src[0], 0),
1039 ac_llvm_extract_elem(&ctx->ac, src[0], 1),
1040 };
1041 result = ac_build_cvt_pkrtz_f16(&ctx->ac, args);
1042 break;
1043 }
1044
1045 assert(ac_get_llvm_num_components(src[0]) == 1);
1046 LLVMValueRef param[2] = {src[0], LLVMGetUndef(ctx->ac.f32)};
1047 result = ac_build_cvt_pkrtz_f16(&ctx->ac, param);
1048 result = LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, "");
1049 } else {
1050 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
1051 result =
1052 LLVMBuildFPExt(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
1053 else
1054 result =
1055 LLVMBuildFPTrunc(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
1056 }
1057 break;
1058 case nir_op_f2f16_rtne:
1059 case nir_op_f2f32:
1060 case nir_op_f2f64:
1061 src[0] = ac_to_float(&ctx->ac, src[0]);
1062 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
1063 result = LLVMBuildFPExt(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
1064 else
1065 result =
1066 LLVMBuildFPTrunc(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
1067 break;
1068 case nir_op_u2u8:
1069 case nir_op_u2u16:
1070 case nir_op_u2u32:
1071 case nir_op_u2u64:
1072 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
1073 result = LLVMBuildZExt(ctx->ac.builder, src[0], def_type, "");
1074 else
1075 result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
1076 break;
1077 case nir_op_i2i8:
1078 case nir_op_i2i16:
1079 case nir_op_i2imp:
1080 case nir_op_i2i32:
1081 case nir_op_i2i64:
1082 if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
1083 result = LLVMBuildSExt(ctx->ac.builder, src[0], def_type, "");
1084 else
1085 result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
1086 break;
1087 case nir_op_bcsel:
1088 result = emit_bcsel(&ctx->ac, src[0], src[1], src[2]);
1089 break;
1090 case nir_op_find_lsb:
1091 result = ac_find_lsb(&ctx->ac, ctx->ac.i32, src[0]);
1092 break;
1093 case nir_op_ufind_msb:
1094 result = ac_build_umsb(&ctx->ac, src[0], ctx->ac.i32);
1095 break;
1096 case nir_op_ifind_msb:
1097 result = ac_build_imsb(&ctx->ac, src[0], ctx->ac.i32);
1098 break;
1099 case nir_op_uclz: {
1100 LLVMValueRef params[2] = {
1101 src[0],
1102 ctx->ac.i1false,
1103 };
1104 result = ac_build_intrinsic(&ctx->ac, "llvm.ctlz.i32", ctx->ac.i32, params, 2, AC_FUNC_ATTR_READNONE);
1105 break;
1106 }
1107 case nir_op_uadd_carry:
1108 result = emit_uint_carry(&ctx->ac, "llvm.uadd.with.overflow.i32", src[0], src[1]);
1109 break;
1110 case nir_op_usub_borrow:
1111 result = emit_uint_carry(&ctx->ac, "llvm.usub.with.overflow.i32", src[0], src[1]);
1112 break;
1113 case nir_op_b2f16:
1114 case nir_op_b2f32:
1115 case nir_op_b2f64:
1116 result = emit_b2f(&ctx->ac, src[0], instr->dest.dest.ssa.bit_size);
1117 break;
1118 case nir_op_f2b1:
1119 result = emit_f2b(&ctx->ac, src[0]);
1120 break;
1121 case nir_op_b2i8:
1122 case nir_op_b2i16:
1123 case nir_op_b2i32:
1124 case nir_op_b2i64:
1125 result = emit_b2i(&ctx->ac, src[0], instr->dest.dest.ssa.bit_size);
1126 break;
1127 case nir_op_i2b1:
1128 case nir_op_b2b1: /* after loads */
1129 result = emit_i2b(&ctx->ac, src[0]);
1130 break;
1131 case nir_op_b2b16: /* before stores */
1132 result = LLVMBuildZExt(ctx->ac.builder, src[0], ctx->ac.i16, "");
1133 break;
1134 case nir_op_b2b32: /* before stores */
1135 result = LLVMBuildZExt(ctx->ac.builder, src[0], ctx->ac.i32, "");
1136 break;
1137 case nir_op_fquantize2f16:
1138 result = emit_f2f16(&ctx->ac, src[0]);
1139 break;
1140 case nir_op_umul_high:
1141 result = emit_umul_high(&ctx->ac, src[0], src[1]);
1142 break;
1143 case nir_op_imul_high:
1144 result = emit_imul_high(&ctx->ac, src[0], src[1]);
1145 break;
1146 case nir_op_pack_half_2x16:
1147 result = emit_pack_2x16(&ctx->ac, src[0], ac_build_cvt_pkrtz_f16);
1148 break;
1149 case nir_op_pack_half_2x16_split:
1150 src[0] = ac_to_float(&ctx->ac, src[0]);
1151 src[1] = ac_to_float(&ctx->ac, src[1]);
1152 result = LLVMBuildBitCast(ctx->ac.builder,
1153 ac_build_cvt_pkrtz_f16(&ctx->ac, src),
1154 ctx->ac.i32, "");
1155 break;
1156 case nir_op_pack_snorm_2x16:
1157 result = emit_pack_2x16(&ctx->ac, src[0], ac_build_cvt_pknorm_i16);
1158 break;
1159 case nir_op_pack_unorm_2x16:
1160 result = emit_pack_2x16(&ctx->ac, src[0], ac_build_cvt_pknorm_u16);
1161 break;
1162 case nir_op_pack_uint_2x16: {
1163 LLVMValueRef comp[2];
1164
1165 comp[0] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_0, "");
1166 comp[1] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_1, "");
1167
1168 result = ac_build_cvt_pk_u16(&ctx->ac, comp, 16, false);
1169 break;
1170 }
1171 case nir_op_pack_sint_2x16: {
1172 LLVMValueRef comp[2];
1173
1174 comp[0] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_0, "");
1175 comp[1] = LLVMBuildExtractElement(ctx->ac.builder, src[0], ctx->ac.i32_1, "");
1176
1177 result = ac_build_cvt_pk_i16(&ctx->ac, comp, 16, false);
1178 break;
1179 }
1180 case nir_op_unpack_half_2x16:
1181 result = emit_unpack_half_2x16(&ctx->ac, src[0]);
1182 break;
1183 case nir_op_unpack_half_2x16_split_x: {
1184 assert(ac_get_llvm_num_components(src[0]) == 1);
1185 LLVMValueRef tmp = emit_unpack_half_2x16(&ctx->ac, src[0]);
1186 result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_0, "");
1187 break;
1188 }
1189 case nir_op_unpack_half_2x16_split_y: {
1190 assert(ac_get_llvm_num_components(src[0]) == 1);
1191 LLVMValueRef tmp = emit_unpack_half_2x16(&ctx->ac, src[0]);
1192 result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
1193 break;
1194 }
1195 case nir_op_fddx:
1196 case nir_op_fddy:
1197 case nir_op_fddx_fine:
1198 case nir_op_fddy_fine:
1199 case nir_op_fddx_coarse:
1200 case nir_op_fddy_coarse:
1201 result = emit_ddxy(ctx, instr->op, src[0]);
1202 break;
1203
1204 case nir_op_unpack_64_4x16: {
1205 result = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v4i16, "");
1206 break;
1207 }
1208 case nir_op_pack_64_4x16: {
1209 result = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.i64, "");
1210 break;
1211 }
1212
1213 case nir_op_unpack_64_2x32: {
1214 result = LLVMBuildBitCast(ctx->ac.builder, src[0],
1215 ctx->ac.v2i32, "");
1216 break;
1217 }
1218 case nir_op_unpack_64_2x32_split_x: {
1219 assert(ac_get_llvm_num_components(src[0]) == 1);
1220 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i32, "");
1221 result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_0, "");
1222 break;
1223 }
1224 case nir_op_unpack_64_2x32_split_y: {
1225 assert(ac_get_llvm_num_components(src[0]) == 1);
1226 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i32, "");
1227 result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
1228 break;
1229 }
1230
1231 case nir_op_pack_64_2x32: {
1232 result = LLVMBuildBitCast(ctx->ac.builder, src[0],
1233 ctx->ac.i64, "");
1234 break;
1235 }
1236 case nir_op_pack_64_2x32_split: {
1237 LLVMValueRef tmp = ac_build_gather_values(&ctx->ac, src, 2);
1238 result = LLVMBuildBitCast(ctx->ac.builder, tmp, ctx->ac.i64, "");
1239 break;
1240 }
1241
1242 case nir_op_pack_32_4x8:
1243 case nir_op_pack_32_2x16: {
1244 result = LLVMBuildBitCast(ctx->ac.builder, src[0],
1245 ctx->ac.i32, "");
1246 break;
1247 }
1248 case nir_op_pack_32_2x16_split: {
1249 LLVMValueRef tmp = ac_build_gather_values(&ctx->ac, src, 2);
1250 result = LLVMBuildBitCast(ctx->ac.builder, tmp, ctx->ac.i32, "");
1251 break;
1252 }
1253
1254 case nir_op_unpack_32_2x16: {
1255 result = LLVMBuildBitCast(ctx->ac.builder, src[0],
1256 ctx->ac.v2i16, "");
1257 break;
1258 }
1259 case nir_op_unpack_32_2x16_split_x: {
1260 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i16, "");
1261 result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_0, "");
1262 break;
1263 }
1264 case nir_op_unpack_32_2x16_split_y: {
1265 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i16, "");
1266 result = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
1267 break;
1268 }
1269
1270 case nir_op_cube_face_coord_amd: {
1271 src[0] = ac_to_float(&ctx->ac, src[0]);
1272 LLVMValueRef results[2];
1273 LLVMValueRef in[3];
1274 for (unsigned chan = 0; chan < 3; chan++)
1275 in[chan] = ac_llvm_extract_elem(&ctx->ac, src[0], chan);
1276 results[0] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubesc", ctx->ac.f32, in, 3,
1277 AC_FUNC_ATTR_READNONE);
1278 results[1] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubetc", ctx->ac.f32, in, 3,
1279 AC_FUNC_ATTR_READNONE);
1280 LLVMValueRef ma = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubema", ctx->ac.f32, in, 3,
1281 AC_FUNC_ATTR_READNONE);
1282 results[0] = ac_build_fdiv(&ctx->ac, results[0], ma);
1283 results[1] = ac_build_fdiv(&ctx->ac, results[1], ma);
1284 LLVMValueRef offset = LLVMConstReal(ctx->ac.f32, 0.5);
1285 results[0] = LLVMBuildFAdd(ctx->ac.builder, results[0], offset, "");
1286 results[1] = LLVMBuildFAdd(ctx->ac.builder, results[1], offset, "");
1287 result = ac_build_gather_values(&ctx->ac, results, 2);
1288 break;
1289 }
1290
1291 case nir_op_cube_face_index_amd: {
1292 src[0] = ac_to_float(&ctx->ac, src[0]);
1293 LLVMValueRef in[3];
1294 for (unsigned chan = 0; chan < 3; chan++)
1295 in[chan] = ac_llvm_extract_elem(&ctx->ac, src[0], chan);
1296 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubeid", ctx->ac.f32, in, 3,
1297 AC_FUNC_ATTR_READNONE);
1298 break;
1299 }
1300
1301 case nir_op_extract_u8:
1302 case nir_op_extract_i8:
1303 case nir_op_extract_u16:
1304 case nir_op_extract_i16: {
1305 bool is_signed = instr->op == nir_op_extract_i16 || instr->op == nir_op_extract_i8;
1306 unsigned size = instr->op == nir_op_extract_u8 || instr->op == nir_op_extract_i8 ? 8 : 16;
1307 LLVMValueRef offset = LLVMConstInt(LLVMTypeOf(src[0]), nir_src_as_uint(instr->src[1].src) * size, false);
1308 result = LLVMBuildLShr(ctx->ac.builder, src[0], offset, "");
1309 result = LLVMBuildTrunc(ctx->ac.builder, result, LLVMIntTypeInContext(ctx->ac.context, size), "");
1310 if (is_signed)
1311 result = LLVMBuildSExt(ctx->ac.builder, result, LLVMTypeOf(src[0]), "");
1312 else
1313 result = LLVMBuildZExt(ctx->ac.builder, result, LLVMTypeOf(src[0]), "");
1314 break;
1315 }
1316
1317 case nir_op_insert_u8:
1318 case nir_op_insert_u16: {
1319 unsigned size = instr->op == nir_op_insert_u8 ? 8 : 16;
1320 LLVMValueRef offset = LLVMConstInt(LLVMTypeOf(src[0]), nir_src_as_uint(instr->src[1].src) * size, false);
1321 LLVMValueRef mask = LLVMConstInt(LLVMTypeOf(src[0]), u_bit_consecutive(0, size), false);
1322 result = LLVMBuildShl(ctx->ac.builder, LLVMBuildAnd(ctx->ac.builder, src[0], mask, ""), offset, "");
1323 break;
1324 }
1325
1326 case nir_op_sdot_4x8_iadd:
1327 case nir_op_udot_4x8_uadd:
1328 case nir_op_sdot_4x8_iadd_sat:
1329 case nir_op_udot_4x8_uadd_sat: {
1330 const char *name = instr->op == nir_op_sdot_4x8_iadd ||
1331 instr->op == nir_op_sdot_4x8_iadd_sat
1332 ? "llvm.amdgcn.sdot4" : "llvm.amdgcn.udot4";
1333 src[3] = LLVMConstInt(ctx->ac.i1, instr->op == nir_op_sdot_4x8_iadd_sat ||
1334 instr->op == nir_op_udot_4x8_uadd_sat, false);
1335 result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 4, AC_FUNC_ATTR_READNONE);
1336 break;
1337 }
1338
1339 case nir_op_sdot_2x16_iadd:
1340 case nir_op_udot_2x16_uadd:
1341 case nir_op_sdot_2x16_iadd_sat:
1342 case nir_op_udot_2x16_uadd_sat: {
1343 const char *name = instr->op == nir_op_sdot_2x16_iadd ||
1344 instr->op == nir_op_sdot_2x16_iadd_sat
1345 ? "llvm.amdgcn.sdot2" : "llvm.amdgcn.udot2";
1346 src[0] = LLVMBuildBitCast(ctx->ac.builder, src[0], ctx->ac.v2i16, "");
1347 src[1] = LLVMBuildBitCast(ctx->ac.builder, src[1], ctx->ac.v2i16, "");
1348 src[3] = LLVMConstInt(ctx->ac.i1, instr->op == nir_op_sdot_2x16_iadd_sat ||
1349 instr->op == nir_op_udot_2x16_uadd_sat, false);
1350 result = ac_build_intrinsic(&ctx->ac, name, def_type, src, 4, AC_FUNC_ATTR_READNONE);
1351 break;
1352 }
1353
1354 case nir_op_sad_u8x4:
1355 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.sad.u8", ctx->ac.i32,
1356 (LLVMValueRef[]){src[0], src[1], src[2]}, 3,
1357 AC_FUNC_ATTR_READNONE);
1358 break;
1359
1360 default:
1361 fprintf(stderr, "Unknown NIR alu instr: ");
1362 nir_print_instr(&instr->instr, stderr);
1363 fprintf(stderr, "\n");
1364 abort();
1365 }
1366
1367 if (result) {
1368 assert(instr->dest.dest.is_ssa);
1369 result = ac_to_integer_or_pointer(&ctx->ac, result);
1370 ctx->ssa_defs[instr->dest.dest.ssa.index] = result;
1371 }
1372 }
1373
visit_load_const(struct ac_nir_context *ctx, const nir_load_const_instr *instr)1374 static void visit_load_const(struct ac_nir_context *ctx, const nir_load_const_instr *instr)
1375 {
1376 LLVMValueRef values[4], value = NULL;
1377 LLVMTypeRef element_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
1378
1379 for (unsigned i = 0; i < instr->def.num_components; ++i) {
1380 switch (instr->def.bit_size) {
1381 case 1:
1382 values[i] = LLVMConstInt(element_type, instr->value[i].b, false);
1383 break;
1384 case 8:
1385 values[i] = LLVMConstInt(element_type, instr->value[i].u8, false);
1386 break;
1387 case 16:
1388 values[i] = LLVMConstInt(element_type, instr->value[i].u16, false);
1389 break;
1390 case 32:
1391 values[i] = LLVMConstInt(element_type, instr->value[i].u32, false);
1392 break;
1393 case 64:
1394 values[i] = LLVMConstInt(element_type, instr->value[i].u64, false);
1395 break;
1396 default:
1397 fprintf(stderr, "unsupported nir load_const bit_size: %d\n", instr->def.bit_size);
1398 abort();
1399 }
1400 }
1401 if (instr->def.num_components > 1) {
1402 value = LLVMConstVector(values, instr->def.num_components);
1403 } else
1404 value = values[0];
1405
1406 ctx->ssa_defs[instr->def.index] = value;
1407 }
1408
get_buffer_size(struct ac_nir_context *ctx, LLVMValueRef descriptor, bool in_elements)1409 static LLVMValueRef get_buffer_size(struct ac_nir_context *ctx, LLVMValueRef descriptor,
1410 bool in_elements)
1411 {
1412 LLVMValueRef size =
1413 LLVMBuildExtractElement(ctx->ac.builder, descriptor, LLVMConstInt(ctx->ac.i32, 2, false), "");
1414
1415 /* GFX8 only */
1416 if (ctx->ac.gfx_level == GFX8 && in_elements) {
1417 /* On GFX8, the descriptor contains the size in bytes,
1418 * but TXQ must return the size in elements.
1419 * The stride is always non-zero for resources using TXQ.
1420 */
1421 LLVMValueRef stride = LLVMBuildExtractElement(ctx->ac.builder, descriptor, ctx->ac.i32_1, "");
1422 stride = LLVMBuildLShr(ctx->ac.builder, stride, LLVMConstInt(ctx->ac.i32, 16, false), "");
1423 stride = LLVMBuildAnd(ctx->ac.builder, stride, LLVMConstInt(ctx->ac.i32, 0x3fff, false), "");
1424
1425 size = LLVMBuildUDiv(ctx->ac.builder, size, stride, "");
1426 }
1427 return size;
1428 }
1429
1430 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1431 * incorrectly forces nearest filtering if the texture format is integer.
1432 * The only effect it has on Gather4, which always returns 4 texels for
1433 * bilinear filtering, is that the final coordinates are off by 0.5 of
1434 * the texel size.
1435 *
1436 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1437 * or (0.5 / size) from the normalized coordinates.
1438 *
1439 * However, cube textures with 8_8_8_8 data formats require a different
1440 * workaround of overriding the num format to USCALED/SSCALED. This would lose
1441 * precision in 32-bit data formats, so it needs to be applied dynamically at
1442 * runtime. In this case, return an i1 value that indicates whether the
1443 * descriptor was overridden (and hence a fixup of the sampler result is needed).
1444 */
lower_gather4_integer(struct ac_llvm_context *ctx, struct ac_image_args *args, const nir_tex_instr *instr)1445 static LLVMValueRef lower_gather4_integer(struct ac_llvm_context *ctx, struct ac_image_args *args,
1446 const nir_tex_instr *instr)
1447 {
1448 nir_alu_type stype = nir_alu_type_get_base_type(instr->dest_type);
1449 LLVMValueRef wa_8888 = NULL;
1450 LLVMValueRef half_texel[2];
1451 LLVMValueRef result;
1452
1453 assert(stype == nir_type_int || stype == nir_type_uint);
1454
1455 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
1456 LLVMValueRef formats;
1457 LLVMValueRef data_format;
1458 LLVMValueRef wa_formats;
1459
1460 formats = LLVMBuildExtractElement(ctx->builder, args->resource, ctx->i32_1, "");
1461
1462 data_format = LLVMBuildLShr(ctx->builder, formats, LLVMConstInt(ctx->i32, 20, false), "");
1463 data_format =
1464 LLVMBuildAnd(ctx->builder, data_format, LLVMConstInt(ctx->i32, (1u << 6) - 1, false), "");
1465 wa_8888 = LLVMBuildICmp(ctx->builder, LLVMIntEQ, data_format,
1466 LLVMConstInt(ctx->i32, V_008F14_IMG_DATA_FORMAT_8_8_8_8, false), "");
1467
1468 uint32_t wa_num_format = stype == nir_type_uint
1469 ? S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED)
1470 : S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED);
1471 wa_formats = LLVMBuildAnd(ctx->builder, formats,
1472 LLVMConstInt(ctx->i32, C_008F14_NUM_FORMAT, false), "");
1473 wa_formats =
1474 LLVMBuildOr(ctx->builder, wa_formats, LLVMConstInt(ctx->i32, wa_num_format, false), "");
1475
1476 formats = LLVMBuildSelect(ctx->builder, wa_8888, wa_formats, formats, "");
1477 args->resource =
1478 LLVMBuildInsertElement(ctx->builder, args->resource, formats, ctx->i32_1, "");
1479 }
1480
1481 if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
1482 assert(!wa_8888);
1483 half_texel[0] = half_texel[1] = LLVMConstReal(ctx->f32, -0.5);
1484 } else {
1485 struct ac_image_args resinfo = {0};
1486 LLVMBasicBlockRef bbs[2];
1487
1488 LLVMValueRef unnorm = NULL;
1489 LLVMValueRef default_offset = ctx->f32_0;
1490 if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D && !instr->is_array) {
1491 /* In vulkan, whether the sampler uses unnormalized
1492 * coordinates or not is a dynamic property of the
1493 * sampler. Hence, to figure out whether or not we
1494 * need to divide by the texture size, we need to test
1495 * the sampler at runtime. This tests the bit set by
1496 * radv_init_sampler().
1497 */
1498 LLVMValueRef sampler0 =
1499 LLVMBuildExtractElement(ctx->builder, args->sampler, ctx->i32_0, "");
1500 sampler0 = LLVMBuildLShr(ctx->builder, sampler0, LLVMConstInt(ctx->i32, 15, false), "");
1501 sampler0 = LLVMBuildAnd(ctx->builder, sampler0, ctx->i32_1, "");
1502 unnorm = LLVMBuildICmp(ctx->builder, LLVMIntEQ, sampler0, ctx->i32_1, "");
1503 default_offset = LLVMConstReal(ctx->f32, -0.5);
1504 }
1505
1506 bbs[0] = LLVMGetInsertBlock(ctx->builder);
1507 if (wa_8888 || unnorm) {
1508 assert(!(wa_8888 && unnorm));
1509 LLVMValueRef not_needed = wa_8888 ? wa_8888 : unnorm;
1510 /* Skip the texture size query entirely if we don't need it. */
1511 ac_build_ifcc(ctx, LLVMBuildNot(ctx->builder, not_needed, ""), 2000);
1512 bbs[1] = LLVMGetInsertBlock(ctx->builder);
1513 }
1514
1515 /* Query the texture size. */
1516 resinfo.dim = ac_get_sampler_dim(ctx->gfx_level, instr->sampler_dim, instr->is_array);
1517 resinfo.opcode = ac_image_get_resinfo;
1518 resinfo.dmask = 0xf;
1519 resinfo.lod = ctx->i32_0;
1520 resinfo.resource = args->resource;
1521 resinfo.attributes = AC_FUNC_ATTR_READNONE;
1522 LLVMValueRef size = ac_build_image_opcode(ctx, &resinfo);
1523
1524 /* Compute -0.5 / size. */
1525 for (unsigned c = 0; c < 2; c++) {
1526 half_texel[c] =
1527 LLVMBuildExtractElement(ctx->builder, size, LLVMConstInt(ctx->i32, c, 0), "");
1528 half_texel[c] = LLVMBuildUIToFP(ctx->builder, half_texel[c], ctx->f32, "");
1529 half_texel[c] = ac_build_fdiv(ctx, ctx->f32_1, half_texel[c]);
1530 half_texel[c] =
1531 LLVMBuildFMul(ctx->builder, half_texel[c], LLVMConstReal(ctx->f32, -0.5), "");
1532 }
1533
1534 if (wa_8888 || unnorm) {
1535 ac_build_endif(ctx, 2000);
1536
1537 for (unsigned c = 0; c < 2; c++) {
1538 LLVMValueRef values[2] = {default_offset, half_texel[c]};
1539 half_texel[c] = ac_build_phi(ctx, ctx->f32, 2, values, bbs);
1540 }
1541 }
1542 }
1543
1544 for (unsigned c = 0; c < 2; c++) {
1545 LLVMValueRef tmp;
1546 tmp = LLVMBuildBitCast(ctx->builder, args->coords[c], ctx->f32, "");
1547 args->coords[c] = LLVMBuildFAdd(ctx->builder, tmp, half_texel[c], "");
1548 }
1549
1550 args->attributes = AC_FUNC_ATTR_READNONE;
1551 result = ac_build_image_opcode(ctx, args);
1552
1553 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
1554 LLVMValueRef tmp, tmp2;
1555
1556 /* if the cube workaround is in place, f2i the result. */
1557 for (unsigned c = 0; c < 4; c++) {
1558 tmp = LLVMBuildExtractElement(ctx->builder, result, LLVMConstInt(ctx->i32, c, false), "");
1559 if (stype == nir_type_uint)
1560 tmp2 = LLVMBuildFPToUI(ctx->builder, tmp, ctx->i32, "");
1561 else
1562 tmp2 = LLVMBuildFPToSI(ctx->builder, tmp, ctx->i32, "");
1563 tmp = LLVMBuildBitCast(ctx->builder, tmp, ctx->i32, "");
1564 tmp2 = LLVMBuildBitCast(ctx->builder, tmp2, ctx->i32, "");
1565 tmp = LLVMBuildSelect(ctx->builder, wa_8888, tmp2, tmp, "");
1566 tmp = LLVMBuildBitCast(ctx->builder, tmp, ctx->f32, "");
1567 result =
1568 LLVMBuildInsertElement(ctx->builder, result, tmp, LLVMConstInt(ctx->i32, c, false), "");
1569 }
1570 }
1571 return result;
1572 }
1573
build_tex_intrinsic(struct ac_nir_context *ctx, const nir_tex_instr *instr, struct ac_image_args *args)1574 static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_tex_instr *instr,
1575 struct ac_image_args *args)
1576 {
1577 assert((!args->tfe || !args->d16) && "unsupported");
1578
1579 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
1580 unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
1581
1582 assert(instr->dest.is_ssa);
1583
1584 /* Buffers don't support A16. */
1585 if (args->a16)
1586 args->coords[0] = LLVMBuildZExt(ctx->ac.builder, args->coords[0], ctx->ac.i32, "");
1587
1588 return ac_build_buffer_load_format(&ctx->ac, args->resource, args->coords[0], ctx->ac.i32_0,
1589 util_last_bit(mask), 0, true,
1590 instr->dest.ssa.bit_size == 16,
1591 args->tfe);
1592 }
1593
1594 args->opcode = ac_image_sample;
1595
1596 switch (instr->op) {
1597 case nir_texop_txf:
1598 case nir_texop_txf_ms:
1599 case nir_texop_samples_identical:
1600 args->opcode = args->level_zero || instr->sampler_dim == GLSL_SAMPLER_DIM_MS
1601 ? ac_image_load
1602 : ac_image_load_mip;
1603 args->level_zero = false;
1604 break;
1605 case nir_texop_txs:
1606 case nir_texop_query_levels:
1607 args->opcode = ac_image_get_resinfo;
1608 if (!args->lod)
1609 args->lod = ctx->ac.i32_0;
1610 args->level_zero = false;
1611 break;
1612 case nir_texop_tex:
1613 if (ctx->stage != MESA_SHADER_FRAGMENT &&
1614 (ctx->stage != MESA_SHADER_COMPUTE ||
1615 ctx->info->cs.derivative_group == DERIVATIVE_GROUP_NONE)) {
1616 assert(!args->lod);
1617 args->level_zero = true;
1618 }
1619 break;
1620 case nir_texop_tg4:
1621 args->opcode = ac_image_gather4;
1622 if (!args->lod && !args->bias)
1623 args->level_zero = true;
1624 break;
1625 case nir_texop_lod:
1626 args->opcode = ac_image_get_lod;
1627 break;
1628 case nir_texop_fragment_fetch_amd:
1629 case nir_texop_fragment_mask_fetch_amd:
1630 args->opcode = ac_image_load;
1631 args->level_zero = false;
1632 break;
1633 default:
1634 break;
1635 }
1636
1637 /* Aldebaran doesn't have image_sample_lz, but image_sample behaves like lz. */
1638 if (!ctx->ac.has_3d_cube_border_color_mipmap)
1639 args->level_zero = false;
1640
1641 if (instr->op == nir_texop_tg4 && ctx->ac.gfx_level <= GFX8 &&
1642 (instr->dest_type & (nir_type_int | nir_type_uint))) {
1643 return lower_gather4_integer(&ctx->ac, args, instr);
1644 }
1645
1646 /* Fixup for GFX9 which allocates 1D textures as 2D. */
1647 if (instr->op == nir_texop_lod && ctx->ac.gfx_level == GFX9) {
1648 if ((args->dim == ac_image_2darray || args->dim == ac_image_2d) && !args->coords[1]) {
1649 args->coords[1] = ctx->ac.i32_0;
1650 }
1651 }
1652
1653 args->attributes = AC_FUNC_ATTR_READNONE;
1654 bool cs_derivs =
1655 ctx->stage == MESA_SHADER_COMPUTE && ctx->info->cs.derivative_group != DERIVATIVE_GROUP_NONE;
1656 if (ctx->stage == MESA_SHADER_FRAGMENT || cs_derivs) {
1657 /* Prevent texture instructions with implicit derivatives from being
1658 * sinked into branches. */
1659 switch (instr->op) {
1660 case nir_texop_tex:
1661 case nir_texop_txb:
1662 case nir_texop_lod:
1663 args->attributes |= AC_FUNC_ATTR_CONVERGENT;
1664 break;
1665 default:
1666 break;
1667 }
1668 }
1669
1670 return ac_build_image_opcode(&ctx->ac, args);
1671 }
1672
visit_load_push_constant(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)1673 static LLVMValueRef visit_load_push_constant(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1674 {
1675 LLVMValueRef ptr, addr;
1676 LLVMValueRef src0 = get_src(ctx, instr->src[0]);
1677 unsigned index = nir_intrinsic_base(instr);
1678
1679 addr = LLVMConstInt(ctx->ac.i32, index, 0);
1680 addr = LLVMBuildAdd(ctx->ac.builder, addr, src0, "");
1681
1682 /* Load constant values from user SGPRS when possible, otherwise
1683 * fallback to the default path that loads directly from memory.
1684 */
1685 if (LLVMIsConstant(src0) && instr->dest.ssa.bit_size >= 32) {
1686 unsigned count = instr->dest.ssa.num_components;
1687 unsigned offset = index;
1688
1689 if (instr->dest.ssa.bit_size == 64)
1690 count *= 2;
1691
1692 offset += LLVMConstIntGetZExtValue(src0);
1693 offset /= 4;
1694
1695 uint64_t mask = BITFIELD64_MASK(count) << offset;
1696 if ((ctx->args->inline_push_const_mask | mask) == ctx->args->inline_push_const_mask &&
1697 offset + count <= (sizeof(ctx->args->inline_push_const_mask) * 8u)) {
1698 LLVMValueRef *const push_constants = alloca(count * sizeof(LLVMValueRef));
1699 unsigned arg_index =
1700 util_bitcount64(ctx->args->inline_push_const_mask & BITFIELD64_MASK(offset));
1701 for (unsigned i = 0; i < count; i++)
1702 push_constants[i] = ac_get_arg(&ctx->ac, ctx->args->inline_push_consts[arg_index++]);
1703 LLVMValueRef res = ac_build_gather_values(&ctx->ac, push_constants, count);
1704 return instr->dest.ssa.bit_size == 64
1705 ? LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->dest.ssa), "")
1706 : res;
1707 }
1708 }
1709
1710 ptr = LLVMBuildGEP(ctx->ac.builder, ac_get_arg(&ctx->ac, ctx->args->push_constants), &addr, 1, "");
1711
1712 if (instr->dest.ssa.bit_size == 8) {
1713 unsigned load_dwords = instr->dest.ssa.num_components > 1 ? 2 : 1;
1714 LLVMTypeRef vec_type = LLVMVectorType(ctx->ac.i8, 4 * load_dwords);
1715 ptr = ac_cast_ptr(&ctx->ac, ptr, vec_type);
1716 LLVMValueRef res = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
1717
1718 LLVMValueRef params[3];
1719 if (load_dwords > 1) {
1720 LLVMValueRef res_vec = LLVMBuildBitCast(ctx->ac.builder, res, ctx->ac.v2i32, "");
1721 params[0] = LLVMBuildExtractElement(ctx->ac.builder, res_vec,
1722 LLVMConstInt(ctx->ac.i32, 1, false), "");
1723 params[1] = LLVMBuildExtractElement(ctx->ac.builder, res_vec,
1724 LLVMConstInt(ctx->ac.i32, 0, false), "");
1725 } else {
1726 res = LLVMBuildBitCast(ctx->ac.builder, res, ctx->ac.i32, "");
1727 params[0] = ctx->ac.i32_0;
1728 params[1] = res;
1729 }
1730 params[2] = addr;
1731 res = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.alignbyte", ctx->ac.i32, params, 3, 0);
1732
1733 res = LLVMBuildTrunc(
1734 ctx->ac.builder, res,
1735 LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.num_components * 8), "");
1736 if (instr->dest.ssa.num_components > 1)
1737 res = LLVMBuildBitCast(ctx->ac.builder, res,
1738 LLVMVectorType(ctx->ac.i8, instr->dest.ssa.num_components), "");
1739 return res;
1740 } else if (instr->dest.ssa.bit_size == 16) {
1741 unsigned load_dwords = instr->dest.ssa.num_components / 2 + 1;
1742 LLVMTypeRef vec_type = LLVMVectorType(ctx->ac.i16, 2 * load_dwords);
1743 ptr = ac_cast_ptr(&ctx->ac, ptr, vec_type);
1744 LLVMValueRef res = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
1745 res = LLVMBuildBitCast(ctx->ac.builder, res, vec_type, "");
1746 LLVMValueRef cond = LLVMBuildLShr(ctx->ac.builder, addr, ctx->ac.i32_1, "");
1747 cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->ac.i1, "");
1748 LLVMValueRef mask[] = {
1749 LLVMConstInt(ctx->ac.i32, 0, false), LLVMConstInt(ctx->ac.i32, 1, false),
1750 LLVMConstInt(ctx->ac.i32, 2, false), LLVMConstInt(ctx->ac.i32, 3, false),
1751 LLVMConstInt(ctx->ac.i32, 4, false)};
1752 LLVMValueRef swizzle_aligned = LLVMConstVector(&mask[0], instr->dest.ssa.num_components);
1753 LLVMValueRef swizzle_unaligned = LLVMConstVector(&mask[1], instr->dest.ssa.num_components);
1754 LLVMValueRef shuffle_aligned =
1755 LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_aligned, "");
1756 LLVMValueRef shuffle_unaligned =
1757 LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_unaligned, "");
1758 res = LLVMBuildSelect(ctx->ac.builder, cond, shuffle_unaligned, shuffle_aligned, "");
1759 return LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->dest.ssa), "");
1760 }
1761
1762 LLVMTypeRef ptr_type = get_def_type(ctx, &instr->dest.ssa);
1763 ptr = ac_cast_ptr(&ctx->ac, ptr, ptr_type);
1764
1765 return LLVMBuildLoad2(ctx->ac.builder, ptr_type, ptr, "");
1766 }
1767
visit_get_ssbo_size(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)1768 static LLVMValueRef visit_get_ssbo_size(struct ac_nir_context *ctx,
1769 const nir_intrinsic_instr *instr)
1770 {
1771 bool non_uniform = nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM;
1772 LLVMValueRef rsrc = ctx->abi->load_ssbo(ctx->abi, get_src(ctx, instr->src[0]), false, non_uniform);
1773 return get_buffer_size(ctx, rsrc, false);
1774 }
1775
extract_vector_range(struct ac_llvm_context *ctx, LLVMValueRef src, unsigned start, unsigned count)1776 static LLVMValueRef extract_vector_range(struct ac_llvm_context *ctx, LLVMValueRef src,
1777 unsigned start, unsigned count)
1778 {
1779 LLVMValueRef mask[] = {ctx->i32_0, ctx->i32_1, LLVMConstInt(ctx->i32, 2, false),
1780 LLVMConstInt(ctx->i32, 3, false)};
1781
1782 unsigned src_elements = ac_get_llvm_num_components(src);
1783
1784 if (count == src_elements) {
1785 assert(start == 0);
1786 return src;
1787 } else if (count == 1) {
1788 assert(start < src_elements);
1789 return LLVMBuildExtractElement(ctx->builder, src, mask[start], "");
1790 } else {
1791 assert(start + count <= src_elements);
1792 assert(count <= 4);
1793 LLVMValueRef swizzle = LLVMConstVector(&mask[start], count);
1794 return LLVMBuildShuffleVector(ctx->builder, src, src, swizzle, "");
1795 }
1796 }
1797
get_cache_policy(struct ac_nir_context *ctx, enum gl_access_qualifier access, bool may_store_unaligned, bool writeonly_memory)1798 static unsigned get_cache_policy(struct ac_nir_context *ctx, enum gl_access_qualifier access,
1799 bool may_store_unaligned, bool writeonly_memory)
1800 {
1801 unsigned cache_policy = 0;
1802
1803 /* GFX6 has a TC L1 bug causing corruption of 8bit/16bit stores. All
1804 * store opcodes not aligned to a dword are affected. The only way to
1805 * get unaligned stores is through shader images.
1806 */
1807 if (((may_store_unaligned && ctx->ac.gfx_level == GFX6) ||
1808 /* If this is write-only, don't keep data in L1 to prevent
1809 * evicting L1 cache lines that may be needed by other
1810 * instructions.
1811 */
1812 writeonly_memory || access & (ACCESS_COHERENT | ACCESS_VOLATILE))) {
1813 cache_policy |= ac_glc;
1814 }
1815
1816 if (access & ACCESS_STREAM_CACHE_POLICY)
1817 cache_policy |= ac_slc | ac_glc;
1818
1819 return cache_policy;
1820 }
1821
enter_waterfall_ssbo(struct ac_nir_context *ctx, struct waterfall_context *wctx, const nir_intrinsic_instr *instr, nir_src src)1822 static LLVMValueRef enter_waterfall_ssbo(struct ac_nir_context *ctx, struct waterfall_context *wctx,
1823 const nir_intrinsic_instr *instr, nir_src src)
1824 {
1825 return enter_waterfall(ctx, wctx, get_src(ctx, src),
1826 nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
1827 }
1828
visit_store_ssbo(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)1829 static void visit_store_ssbo(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1830 {
1831 if (ctx->ac.postponed_kill) {
1832 LLVMValueRef cond = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.i1, ctx->ac.postponed_kill, "");
1833 ac_build_ifcc(&ctx->ac, cond, 7000);
1834 }
1835
1836 LLVMValueRef src_data = get_src(ctx, instr->src[0]);
1837 int elem_size_bytes = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src_data)) / 8;
1838 unsigned writemask = nir_intrinsic_write_mask(instr);
1839 enum gl_access_qualifier access = nir_intrinsic_access(instr);
1840 bool writeonly_memory = access & ACCESS_NON_READABLE;
1841 unsigned cache_policy = get_cache_policy(ctx, access, false, writeonly_memory);
1842
1843 struct waterfall_context wctx;
1844 LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[1]);
1845
1846 LLVMValueRef rsrc = ctx->abi->load_ssbo(ctx->abi, rsrc_base, true, false);
1847 LLVMValueRef base_data = src_data;
1848 base_data = ac_trim_vector(&ctx->ac, base_data, instr->num_components);
1849 LLVMValueRef base_offset = get_src(ctx, instr->src[2]);
1850
1851 while (writemask) {
1852 int start, count;
1853 LLVMValueRef data, offset;
1854 LLVMTypeRef data_type;
1855
1856 u_bit_scan_consecutive_range(&writemask, &start, &count);
1857
1858 if (count == 3 && elem_size_bytes != 4) {
1859 writemask |= 1 << (start + 2);
1860 count = 2;
1861 }
1862 int num_bytes = count * elem_size_bytes; /* count in bytes */
1863
1864 /* we can only store 4 DWords at the same time.
1865 * can only happen for 64 Bit vectors. */
1866 if (num_bytes > 16) {
1867 writemask |= ((1u << (count - 2)) - 1u) << (start + 2);
1868 count = 2;
1869 num_bytes = 16;
1870 }
1871
1872 /* check alignment of 16 Bit stores */
1873 if (elem_size_bytes == 2 && num_bytes > 2 && (start % 2) == 1) {
1874 writemask |= ((1u << (count - 1)) - 1u) << (start + 1);
1875 count = 1;
1876 num_bytes = 2;
1877 }
1878
1879 /* Due to alignment issues, split stores of 8-bit/16-bit
1880 * vectors.
1881 */
1882 if (ctx->ac.gfx_level == GFX6 && count > 1 && elem_size_bytes < 4) {
1883 writemask |= ((1u << (count - 1)) - 1u) << (start + 1);
1884 count = 1;
1885 num_bytes = elem_size_bytes;
1886 }
1887
1888 data = extract_vector_range(&ctx->ac, base_data, start, count);
1889
1890 offset = LLVMBuildAdd(ctx->ac.builder, base_offset,
1891 LLVMConstInt(ctx->ac.i32, start * elem_size_bytes, false), "");
1892
1893 if (num_bytes == 1) {
1894 ac_build_buffer_store_byte(&ctx->ac, rsrc, data, offset, ctx->ac.i32_0, cache_policy);
1895 } else if (num_bytes == 2) {
1896 ac_build_buffer_store_short(&ctx->ac, rsrc, data, offset, ctx->ac.i32_0, cache_policy);
1897 } else {
1898 switch (num_bytes) {
1899 case 16: /* v4f32 */
1900 data_type = ctx->ac.v4f32;
1901 break;
1902 case 12: /* v3f32 */
1903 data_type = ctx->ac.v3f32;
1904 break;
1905 case 8: /* v2f32 */
1906 data_type = ctx->ac.v2f32;
1907 break;
1908 case 4: /* f32 */
1909 data_type = ctx->ac.f32;
1910 break;
1911 default:
1912 unreachable("Malformed vector store.");
1913 }
1914 data = LLVMBuildBitCast(ctx->ac.builder, data, data_type, "");
1915
1916 ac_build_buffer_store_dword(&ctx->ac, rsrc, data, NULL, offset,
1917 ctx->ac.i32_0, cache_policy);
1918 }
1919 }
1920
1921 exit_waterfall(ctx, &wctx, NULL);
1922
1923 if (ctx->ac.postponed_kill)
1924 ac_build_endif(&ctx->ac, 7000);
1925 }
1926
emit_ssbo_comp_swap_64(struct ac_nir_context *ctx, LLVMValueRef descriptor, LLVMValueRef offset, LLVMValueRef compare, LLVMValueRef exchange, bool image)1927 static LLVMValueRef emit_ssbo_comp_swap_64(struct ac_nir_context *ctx, LLVMValueRef descriptor,
1928 LLVMValueRef offset, LLVMValueRef compare,
1929 LLVMValueRef exchange, bool image)
1930 {
1931 LLVMBasicBlockRef start_block = NULL, then_block = NULL;
1932 if (ctx->abi->robust_buffer_access || image) {
1933 LLVMValueRef size = ac_llvm_extract_elem(&ctx->ac, descriptor, 2);
1934
1935 LLVMValueRef cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, offset, size, "");
1936 start_block = LLVMGetInsertBlock(ctx->ac.builder);
1937
1938 ac_build_ifcc(&ctx->ac, cond, -1);
1939
1940 then_block = LLVMGetInsertBlock(ctx->ac.builder);
1941 }
1942
1943 if (image)
1944 offset = LLVMBuildMul(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, 8, false), "");
1945
1946 LLVMValueRef ptr_parts[2] = {
1947 ac_llvm_extract_elem(&ctx->ac, descriptor, 0),
1948 LLVMBuildAnd(ctx->ac.builder, ac_llvm_extract_elem(&ctx->ac, descriptor, 1),
1949 LLVMConstInt(ctx->ac.i32, 65535, 0), "")};
1950
1951 ptr_parts[1] = LLVMBuildTrunc(ctx->ac.builder, ptr_parts[1], ctx->ac.i16, "");
1952 ptr_parts[1] = LLVMBuildSExt(ctx->ac.builder, ptr_parts[1], ctx->ac.i32, "");
1953
1954 offset = LLVMBuildZExt(ctx->ac.builder, offset, ctx->ac.i64, "");
1955
1956 LLVMValueRef ptr = ac_build_gather_values(&ctx->ac, ptr_parts, 2);
1957 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, ctx->ac.i64, "");
1958 ptr = LLVMBuildAdd(ctx->ac.builder, ptr, offset, "");
1959 ptr = LLVMBuildIntToPtr(ctx->ac.builder, ptr, LLVMPointerType(ctx->ac.i64, AC_ADDR_SPACE_GLOBAL),
1960 "");
1961
1962 LLVMValueRef result =
1963 ac_build_atomic_cmp_xchg(&ctx->ac, ptr, compare, exchange, "singlethread-one-as");
1964 result = LLVMBuildExtractValue(ctx->ac.builder, result, 0, "");
1965
1966 if (ctx->abi->robust_buffer_access || image) {
1967 ac_build_endif(&ctx->ac, -1);
1968
1969 LLVMBasicBlockRef incoming_blocks[2] = {
1970 start_block,
1971 then_block,
1972 };
1973
1974 LLVMValueRef incoming_values[2] = {
1975 LLVMConstInt(ctx->ac.i64, 0, 0),
1976 result,
1977 };
1978 LLVMValueRef ret = LLVMBuildPhi(ctx->ac.builder, ctx->ac.i64, "");
1979 LLVMAddIncoming(ret, incoming_values, incoming_blocks, 2);
1980 return ret;
1981 } else {
1982 return result;
1983 }
1984 }
1985
visit_atomic_ssbo(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)1986 static LLVMValueRef visit_atomic_ssbo(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
1987 {
1988 if (ctx->ac.postponed_kill) {
1989 LLVMValueRef cond = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.i1, ctx->ac.postponed_kill, "");
1990 ac_build_ifcc(&ctx->ac, cond, 7001);
1991 }
1992
1993 LLVMTypeRef return_type = LLVMTypeOf(get_src(ctx, instr->src[2]));
1994 const char *op;
1995 char name[64], type[8];
1996 LLVMValueRef params[6], descriptor;
1997 LLVMValueRef result;
1998 int arg_count = 0;
1999
2000 struct waterfall_context wctx;
2001 LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[0]);
2002
2003 switch (instr->intrinsic) {
2004 case nir_intrinsic_ssbo_atomic_add:
2005 op = "add";
2006 break;
2007 case nir_intrinsic_ssbo_atomic_imin:
2008 op = "smin";
2009 break;
2010 case nir_intrinsic_ssbo_atomic_umin:
2011 op = "umin";
2012 break;
2013 case nir_intrinsic_ssbo_atomic_imax:
2014 op = "smax";
2015 break;
2016 case nir_intrinsic_ssbo_atomic_umax:
2017 op = "umax";
2018 break;
2019 case nir_intrinsic_ssbo_atomic_and:
2020 op = "and";
2021 break;
2022 case nir_intrinsic_ssbo_atomic_or:
2023 op = "or";
2024 break;
2025 case nir_intrinsic_ssbo_atomic_xor:
2026 op = "xor";
2027 break;
2028 case nir_intrinsic_ssbo_atomic_exchange:
2029 op = "swap";
2030 break;
2031 case nir_intrinsic_ssbo_atomic_comp_swap:
2032 op = "cmpswap";
2033 break;
2034 case nir_intrinsic_ssbo_atomic_fmin:
2035 op = "fmin";
2036 break;
2037 case nir_intrinsic_ssbo_atomic_fmax:
2038 op = "fmax";
2039 break;
2040 default:
2041 abort();
2042 }
2043
2044 descriptor = ctx->abi->load_ssbo(ctx->abi, rsrc_base, true, false);
2045
2046 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap && return_type == ctx->ac.i64) {
2047 result = emit_ssbo_comp_swap_64(ctx, descriptor, get_src(ctx, instr->src[1]),
2048 get_src(ctx, instr->src[2]), get_src(ctx, instr->src[3]), false);
2049 } else {
2050 LLVMValueRef data = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
2051
2052 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap) {
2053 params[arg_count++] = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[3]), 0);
2054 }
2055 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_fmin ||
2056 instr->intrinsic == nir_intrinsic_ssbo_atomic_fmax) {
2057 data = ac_to_float(&ctx->ac, data);
2058 return_type = LLVMTypeOf(data);
2059 }
2060 params[arg_count++] = data;
2061 params[arg_count++] = descriptor;
2062 params[arg_count++] = get_src(ctx, instr->src[1]); /* voffset */
2063 params[arg_count++] = ctx->ac.i32_0; /* soffset */
2064 params[arg_count++] = ctx->ac.i32_0; /* slc */
2065
2066 ac_build_type_name_for_intr(return_type, type, sizeof(type));
2067 snprintf(name, sizeof(name), "llvm.amdgcn.raw.buffer.atomic.%s.%s", op, type);
2068
2069 result = ac_build_intrinsic(&ctx->ac, name, return_type, params, arg_count, 0);
2070
2071 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_fmin ||
2072 instr->intrinsic == nir_intrinsic_ssbo_atomic_fmax) {
2073 result = ac_to_integer(&ctx->ac, result);
2074 }
2075 }
2076
2077 result = exit_waterfall(ctx, &wctx, result);
2078 if (ctx->ac.postponed_kill)
2079 ac_build_endif(&ctx->ac, 7001);
2080 return result;
2081 }
2082
visit_load_buffer(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)2083 static LLVMValueRef visit_load_buffer(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
2084 {
2085 struct waterfall_context wctx;
2086 LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[0]);
2087
2088 int elem_size_bytes = instr->dest.ssa.bit_size / 8;
2089 int num_components = instr->num_components;
2090 enum gl_access_qualifier access = nir_intrinsic_access(instr);
2091 unsigned cache_policy = get_cache_policy(ctx, access, false, false);
2092
2093 LLVMValueRef offset = get_src(ctx, instr->src[1]);
2094 LLVMValueRef rsrc = ctx->abi->load_ssbo(ctx->abi, rsrc_base, false, false);
2095 LLVMValueRef vindex = ctx->ac.i32_0;
2096
2097 LLVMTypeRef def_type = get_def_type(ctx, &instr->dest.ssa);
2098 LLVMTypeRef def_elem_type = num_components > 1 ? LLVMGetElementType(def_type) : def_type;
2099
2100 LLVMValueRef results[4];
2101 for (int i = 0; i < num_components;) {
2102 int num_elems = num_components - i;
2103 if (elem_size_bytes < 4 && nir_intrinsic_align(instr) % 4 != 0)
2104 num_elems = 1;
2105 if (num_elems * elem_size_bytes > 16)
2106 num_elems = 16 / elem_size_bytes;
2107 int load_bytes = num_elems * elem_size_bytes;
2108
2109 LLVMValueRef immoffset = LLVMConstInt(ctx->ac.i32, i * elem_size_bytes, false);
2110 LLVMValueRef voffset = LLVMBuildAdd(ctx->ac.builder, offset, immoffset, "");
2111
2112 LLVMValueRef ret;
2113
2114 if (load_bytes == 1) {
2115 ret = ac_build_buffer_load_byte(&ctx->ac, rsrc, voffset, ctx->ac.i32_0,
2116 cache_policy);
2117 } else if (load_bytes == 2) {
2118 ret = ac_build_buffer_load_short(&ctx->ac, rsrc, voffset, ctx->ac.i32_0,
2119 cache_policy);
2120 } else {
2121 int num_channels = util_next_power_of_two(load_bytes) / 4;
2122 bool can_speculate = access & ACCESS_CAN_REORDER;
2123
2124 ret = ac_build_buffer_load(&ctx->ac, rsrc, num_channels, vindex, voffset, ctx->ac.i32_0,
2125 ctx->ac.f32, cache_policy, can_speculate, false);
2126 }
2127
2128 LLVMTypeRef byte_vec = LLVMVectorType(ctx->ac.i8, ac_get_type_size(LLVMTypeOf(ret)));
2129 ret = LLVMBuildBitCast(ctx->ac.builder, ret, byte_vec, "");
2130 ret = ac_trim_vector(&ctx->ac, ret, load_bytes);
2131
2132 LLVMTypeRef ret_type = LLVMVectorType(def_elem_type, num_elems);
2133 ret = LLVMBuildBitCast(ctx->ac.builder, ret, ret_type, "");
2134
2135 for (unsigned j = 0; j < num_elems; j++) {
2136 results[i + j] =
2137 LLVMBuildExtractElement(ctx->ac.builder, ret, LLVMConstInt(ctx->ac.i32, j, false), "");
2138 }
2139 i += num_elems;
2140 }
2141
2142 LLVMValueRef ret = ac_build_gather_values(&ctx->ac, results, num_components);
2143 return exit_waterfall(ctx, &wctx, ret);
2144 }
2145
enter_waterfall_ubo(struct ac_nir_context *ctx, struct waterfall_context *wctx, const nir_intrinsic_instr *instr)2146 static LLVMValueRef enter_waterfall_ubo(struct ac_nir_context *ctx, struct waterfall_context *wctx,
2147 const nir_intrinsic_instr *instr)
2148 {
2149 return enter_waterfall(ctx, wctx, get_src(ctx, instr->src[0]),
2150 nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
2151 }
2152
get_global_address(struct ac_nir_context *ctx, nir_intrinsic_instr *instr, LLVMTypeRef type)2153 static LLVMValueRef get_global_address(struct ac_nir_context *ctx,
2154 nir_intrinsic_instr *instr,
2155 LLVMTypeRef type)
2156 {
2157 bool is_store = instr->intrinsic == nir_intrinsic_store_global ||
2158 instr->intrinsic == nir_intrinsic_store_global_amd;
2159 LLVMValueRef addr = get_src(ctx, instr->src[is_store ? 1 : 0]);
2160
2161 LLVMTypeRef ptr_type = LLVMPointerType(type, AC_ADDR_SPACE_GLOBAL);
2162
2163 if (nir_intrinsic_has_base(instr)) {
2164 /* _amd variants */
2165 uint32_t base = nir_intrinsic_base(instr);
2166 unsigned num_src = nir_intrinsic_infos[instr->intrinsic].num_srcs;
2167 LLVMValueRef offset = get_src(ctx, instr->src[num_src - 1]);
2168 offset = LLVMBuildAdd(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, base, false), "");
2169
2170 LLVMTypeRef i8_ptr_type = LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_GLOBAL);
2171 addr = LLVMBuildIntToPtr(ctx->ac.builder, addr, i8_ptr_type, "");
2172 addr = LLVMBuildGEP(ctx->ac.builder, addr, &offset, 1, "");
2173 return type == ctx->ac.i8 ? addr : LLVMBuildBitCast(ctx->ac.builder, addr, ptr_type, "");
2174 } else {
2175 return LLVMBuildIntToPtr(ctx->ac.builder, addr, ptr_type, "");
2176 }
2177 }
2178
visit_load_global(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)2179 static LLVMValueRef visit_load_global(struct ac_nir_context *ctx,
2180 nir_intrinsic_instr *instr)
2181 {
2182 LLVMTypeRef result_type = get_def_type(ctx, &instr->dest.ssa);
2183 LLVMValueRef val;
2184 LLVMValueRef addr = get_global_address(ctx, instr, result_type);
2185
2186 val = LLVMBuildLoad2(ctx->ac.builder, result_type, addr, "");
2187
2188 if (nir_intrinsic_access(instr) & (ACCESS_COHERENT | ACCESS_VOLATILE)) {
2189 LLVMSetOrdering(val, LLVMAtomicOrderingMonotonic);
2190 LLVMSetAlignment(val, ac_get_type_size(result_type));
2191 }
2192
2193 return val;
2194 }
2195
visit_store_global(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)2196 static void visit_store_global(struct ac_nir_context *ctx,
2197 nir_intrinsic_instr *instr)
2198 {
2199 if (ctx->ac.postponed_kill) {
2200 LLVMValueRef cond = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.i1, ctx->ac.postponed_kill, "");
2201 ac_build_ifcc(&ctx->ac, cond, 7002);
2202 }
2203
2204 LLVMValueRef data = get_src(ctx, instr->src[0]);
2205 LLVMTypeRef type = LLVMTypeOf(data);
2206 LLVMValueRef addr = get_global_address(ctx, instr, type);
2207 LLVMValueRef val;
2208
2209 val = LLVMBuildStore(ctx->ac.builder, data, addr);
2210
2211 if (nir_intrinsic_access(instr) & (ACCESS_COHERENT | ACCESS_VOLATILE)) {
2212 LLVMSetOrdering(val, LLVMAtomicOrderingMonotonic);
2213 LLVMSetAlignment(val, ac_get_type_size(type));
2214 }
2215
2216 if (ctx->ac.postponed_kill)
2217 ac_build_endif(&ctx->ac, 7002);
2218 }
2219
visit_global_atomic(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)2220 static LLVMValueRef visit_global_atomic(struct ac_nir_context *ctx,
2221 nir_intrinsic_instr *instr)
2222 {
2223 if (ctx->ac.postponed_kill) {
2224 LLVMValueRef cond = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.i1, ctx->ac.postponed_kill, "");
2225 ac_build_ifcc(&ctx->ac, cond, 7002);
2226 }
2227
2228 LLVMValueRef data = get_src(ctx, instr->src[1]);
2229 LLVMAtomicRMWBinOp op;
2230 LLVMValueRef result;
2231
2232 /* use "singlethread" sync scope to implement relaxed ordering */
2233 const char *sync_scope = "singlethread-one-as";
2234
2235 if (instr->intrinsic == nir_intrinsic_global_atomic_fmin ||
2236 instr->intrinsic == nir_intrinsic_global_atomic_fmax ||
2237 instr->intrinsic == nir_intrinsic_global_atomic_fmin_amd ||
2238 instr->intrinsic == nir_intrinsic_global_atomic_fmax_amd) {
2239 data = ac_to_float(&ctx->ac, data);
2240 }
2241
2242 LLVMTypeRef data_type = LLVMTypeOf(data);
2243
2244 LLVMValueRef addr = get_global_address(ctx, instr, data_type);
2245
2246 if (instr->intrinsic == nir_intrinsic_global_atomic_comp_swap ||
2247 instr->intrinsic == nir_intrinsic_global_atomic_comp_swap_amd) {
2248 LLVMValueRef data1 = get_src(ctx, instr->src[2]);
2249 result = ac_build_atomic_cmp_xchg(&ctx->ac, addr, data, data1, sync_scope);
2250 result = LLVMBuildExtractValue(ctx->ac.builder, result, 0, "");
2251 } else if (instr->intrinsic == nir_intrinsic_global_atomic_fmin ||
2252 instr->intrinsic == nir_intrinsic_global_atomic_fmax ||
2253 instr->intrinsic == nir_intrinsic_global_atomic_fmin_amd ||
2254 instr->intrinsic == nir_intrinsic_global_atomic_fmax_amd) {
2255 const char *op = instr->intrinsic == nir_intrinsic_global_atomic_fmin ? "fmin" : "fmax";
2256 char name[64], type[8];
2257 LLVMValueRef params[2];
2258 int arg_count = 0;
2259
2260 params[arg_count++] = addr;
2261 params[arg_count++] = data;
2262
2263 ac_build_type_name_for_intr(data_type, type, sizeof(type));
2264 snprintf(name, sizeof(name), "llvm.amdgcn.global.atomic.%s.%s.p1%s.%s", op, type, type, type);
2265
2266 result = ac_build_intrinsic(&ctx->ac, name, data_type, params, arg_count, 0);
2267 result = ac_to_integer(&ctx->ac, result);
2268 } else {
2269 switch (instr->intrinsic) {
2270 case nir_intrinsic_global_atomic_add:
2271 case nir_intrinsic_global_atomic_add_amd:
2272 op = LLVMAtomicRMWBinOpAdd;
2273 break;
2274 case nir_intrinsic_global_atomic_umin:
2275 case nir_intrinsic_global_atomic_umin_amd:
2276 op = LLVMAtomicRMWBinOpUMin;
2277 break;
2278 case nir_intrinsic_global_atomic_umax:
2279 case nir_intrinsic_global_atomic_umax_amd:
2280 op = LLVMAtomicRMWBinOpUMax;
2281 break;
2282 case nir_intrinsic_global_atomic_imin:
2283 case nir_intrinsic_global_atomic_imin_amd:
2284 op = LLVMAtomicRMWBinOpMin;
2285 break;
2286 case nir_intrinsic_global_atomic_imax:
2287 case nir_intrinsic_global_atomic_imax_amd:
2288 op = LLVMAtomicRMWBinOpMax;
2289 break;
2290 case nir_intrinsic_global_atomic_and:
2291 case nir_intrinsic_global_atomic_and_amd:
2292 op = LLVMAtomicRMWBinOpAnd;
2293 break;
2294 case nir_intrinsic_global_atomic_or:
2295 case nir_intrinsic_global_atomic_or_amd:
2296 op = LLVMAtomicRMWBinOpOr;
2297 break;
2298 case nir_intrinsic_global_atomic_xor:
2299 case nir_intrinsic_global_atomic_xor_amd:
2300 op = LLVMAtomicRMWBinOpXor;
2301 break;
2302 case nir_intrinsic_global_atomic_exchange:
2303 case nir_intrinsic_global_atomic_exchange_amd:
2304 op = LLVMAtomicRMWBinOpXchg;
2305 break;
2306 default:
2307 unreachable("Invalid global atomic operation");
2308 }
2309
2310 result = ac_build_atomic_rmw(&ctx->ac, op, addr, ac_to_integer(&ctx->ac, data), sync_scope);
2311 }
2312
2313 if (ctx->ac.postponed_kill)
2314 ac_build_endif(&ctx->ac, 7002);
2315
2316 return result;
2317 }
2318
visit_load_ubo_buffer(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)2319 static LLVMValueRef visit_load_ubo_buffer(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
2320 {
2321 struct waterfall_context wctx;
2322 LLVMValueRef rsrc_base = enter_waterfall_ubo(ctx, &wctx, instr);
2323
2324 LLVMValueRef ret;
2325 LLVMValueRef rsrc = rsrc_base;
2326 LLVMValueRef offset = get_src(ctx, instr->src[1]);
2327 int num_components = instr->num_components;
2328
2329 if (ctx->abi->load_ubo)
2330 rsrc = ctx->abi->load_ubo(ctx->abi, rsrc);
2331
2332 /* Convert to a scalar 32-bit load. */
2333 if (instr->dest.ssa.bit_size == 64)
2334 num_components *= 2;
2335 else if (instr->dest.ssa.bit_size == 16)
2336 num_components = DIV_ROUND_UP(num_components, 2);
2337 else if (instr->dest.ssa.bit_size == 8)
2338 num_components = DIV_ROUND_UP(num_components, 4);
2339
2340 ret =
2341 ac_build_buffer_load(&ctx->ac, rsrc, num_components, NULL, offset, NULL,
2342 ctx->ac.f32, 0, true, true);
2343
2344 /* Convert to the original type. */
2345 if (instr->dest.ssa.bit_size == 64) {
2346 ret = LLVMBuildBitCast(ctx->ac.builder, ret,
2347 LLVMVectorType(ctx->ac.i64, num_components / 2), "");
2348 } else if (instr->dest.ssa.bit_size == 16) {
2349 ret = LLVMBuildBitCast(ctx->ac.builder, ret,
2350 LLVMVectorType(ctx->ac.i16, num_components * 2), "");
2351 } else if (instr->dest.ssa.bit_size == 8) {
2352 ret = LLVMBuildBitCast(ctx->ac.builder, ret,
2353 LLVMVectorType(ctx->ac.i8, num_components * 4), "");
2354 }
2355
2356 ret = ac_trim_vector(&ctx->ac, ret, instr->num_components);
2357 ret = LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->dest.ssa), "");
2358
2359 return exit_waterfall(ctx, &wctx, ret);
2360 }
2361
type_scalar_size_bytes(const struct glsl_type *type)2362 static unsigned type_scalar_size_bytes(const struct glsl_type *type)
2363 {
2364 assert(glsl_type_is_vector_or_scalar(type) || glsl_type_is_matrix(type));
2365 return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
2366 }
2367
visit_store_output(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)2368 static void visit_store_output(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
2369 {
2370 if (ctx->ac.postponed_kill) {
2371 LLVMValueRef cond = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.i1, ctx->ac.postponed_kill, "");
2372 ac_build_ifcc(&ctx->ac, cond, 7002);
2373 }
2374
2375 unsigned base = nir_intrinsic_base(instr);
2376 unsigned writemask = nir_intrinsic_write_mask(instr);
2377 unsigned component = nir_intrinsic_component(instr);
2378 LLVMValueRef src = ac_to_float(&ctx->ac, get_src(ctx, instr->src[0]));
2379 nir_src offset = *nir_get_io_offset_src(instr);
2380
2381 /* No indirect indexing is allowed here. */
2382 assert(nir_src_is_const(offset) && nir_src_as_uint(offset) == 0);
2383
2384 switch (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src))) {
2385 case 16:
2386 case 32:
2387 break;
2388 case 64:
2389 unreachable("64-bit IO should have been lowered to 32 bits");
2390 return;
2391 default:
2392 unreachable("unhandled store_output bit size");
2393 return;
2394 }
2395
2396 writemask <<= component;
2397
2398 for (unsigned chan = 0; chan < 8; chan++) {
2399 if (!(writemask & (1 << chan)))
2400 continue;
2401
2402 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
2403 LLVMValueRef output_addr = ctx->abi->outputs[base * 4 + chan];
2404
2405 if (!ctx->abi->is_16bit[base * 4 + chan] &&
2406 LLVMTypeOf(value) == ctx->ac.f16) {
2407 LLVMValueRef output, index;
2408
2409 /* Insert the 16-bit value into the low or high bits of the 32-bit output
2410 * using read-modify-write.
2411 */
2412 index = LLVMConstInt(ctx->ac.i32, nir_intrinsic_io_semantics(instr).high_16bits, 0);
2413 output = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.v2f16, output_addr, "");
2414 output = LLVMBuildInsertElement(ctx->ac.builder, output, value, index, "");
2415 value = LLVMBuildBitCast(ctx->ac.builder, output, ctx->ac.f32, "");
2416 }
2417 LLVMBuildStore(ctx->ac.builder, value, output_addr);
2418 }
2419
2420 if (ctx->ac.postponed_kill)
2421 ac_build_endif(&ctx->ac, 7002);
2422 }
2423
image_type_to_components_count(enum glsl_sampler_dim dim, bool array)2424 static int image_type_to_components_count(enum glsl_sampler_dim dim, bool array)
2425 {
2426 switch (dim) {
2427 case GLSL_SAMPLER_DIM_BUF:
2428 return 1;
2429 case GLSL_SAMPLER_DIM_1D:
2430 return array ? 2 : 1;
2431 case GLSL_SAMPLER_DIM_2D:
2432 return array ? 3 : 2;
2433 case GLSL_SAMPLER_DIM_MS:
2434 return array ? 4 : 3;
2435 case GLSL_SAMPLER_DIM_3D:
2436 case GLSL_SAMPLER_DIM_CUBE:
2437 return 3;
2438 case GLSL_SAMPLER_DIM_RECT:
2439 case GLSL_SAMPLER_DIM_SUBPASS:
2440 return 2;
2441 case GLSL_SAMPLER_DIM_SUBPASS_MS:
2442 return 3;
2443 default:
2444 break;
2445 }
2446 return 0;
2447 }
2448
adjust_sample_index_using_fmask(struct ac_llvm_context *ctx, LLVMValueRef coord_x, LLVMValueRef coord_y, LLVMValueRef coord_z, LLVMValueRef sample_index, LLVMValueRef fmask_desc_ptr)2449 static LLVMValueRef adjust_sample_index_using_fmask(struct ac_llvm_context *ctx,
2450 LLVMValueRef coord_x, LLVMValueRef coord_y,
2451 LLVMValueRef coord_z, LLVMValueRef sample_index,
2452 LLVMValueRef fmask_desc_ptr)
2453 {
2454 if (!fmask_desc_ptr)
2455 return sample_index;
2456
2457 unsigned sample_chan = coord_z ? 3 : 2;
2458 LLVMValueRef addr[4] = {coord_x, coord_y, coord_z};
2459 addr[sample_chan] = sample_index;
2460
2461 ac_apply_fmask_to_sample(ctx, fmask_desc_ptr, addr, coord_z != NULL);
2462 return addr[sample_chan];
2463 }
2464
get_image_deref(const nir_intrinsic_instr *instr)2465 static nir_deref_instr *get_image_deref(const nir_intrinsic_instr *instr)
2466 {
2467 assert(instr->src[0].is_ssa);
2468 return nir_instr_as_deref(instr->src[0].ssa->parent_instr);
2469 }
2470
get_image_descriptor(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr, LLVMValueRef dynamic_index, enum ac_descriptor_type desc_type, bool write)2471 static LLVMValueRef get_image_descriptor(struct ac_nir_context *ctx,
2472 const nir_intrinsic_instr *instr,
2473 LLVMValueRef dynamic_index,
2474 enum ac_descriptor_type desc_type, bool write)
2475 {
2476 nir_deref_instr *deref_instr = instr->src[0].ssa->parent_instr->type == nir_instr_type_deref
2477 ? nir_instr_as_deref(instr->src[0].ssa->parent_instr)
2478 : NULL;
2479
2480 return get_sampler_desc(ctx, deref_instr, desc_type, &instr->instr, dynamic_index, true, write);
2481 }
2482
get_image_coords(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr, LLVMValueRef dynamic_desc_index, struct ac_image_args *args, enum glsl_sampler_dim dim, bool is_array)2483 static void get_image_coords(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr,
2484 LLVMValueRef dynamic_desc_index, struct ac_image_args *args,
2485 enum glsl_sampler_dim dim, bool is_array)
2486 {
2487 LLVMValueRef src0 = get_src(ctx, instr->src[1]);
2488 LLVMValueRef masks[] = {
2489 LLVMConstInt(ctx->ac.i32, 0, false),
2490 LLVMConstInt(ctx->ac.i32, 1, false),
2491 LLVMConstInt(ctx->ac.i32, 2, false),
2492 LLVMConstInt(ctx->ac.i32, 3, false),
2493 };
2494 LLVMValueRef sample_index = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
2495
2496 int count;
2497 ASSERTED bool add_frag_pos =
2498 (dim == GLSL_SAMPLER_DIM_SUBPASS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
2499 bool is_ms = (dim == GLSL_SAMPLER_DIM_MS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
2500 bool gfx9_1d = ctx->ac.gfx_level == GFX9 && dim == GLSL_SAMPLER_DIM_1D;
2501 assert(!add_frag_pos && "Input attachments should be lowered by this point.");
2502 count = image_type_to_components_count(dim, is_array);
2503
2504 if (ctx->ac.gfx_level < GFX11 &&
2505 is_ms && (instr->intrinsic == nir_intrinsic_image_deref_load ||
2506 instr->intrinsic == nir_intrinsic_bindless_image_load ||
2507 instr->intrinsic == nir_intrinsic_image_deref_sparse_load ||
2508 instr->intrinsic == nir_intrinsic_bindless_image_sparse_load)) {
2509 LLVMValueRef fmask_load_address[3];
2510
2511 fmask_load_address[0] = LLVMBuildExtractElement(ctx->ac.builder, src0, masks[0], "");
2512 fmask_load_address[1] = LLVMBuildExtractElement(ctx->ac.builder, src0, masks[1], "");
2513 if (is_array)
2514 fmask_load_address[2] = LLVMBuildExtractElement(ctx->ac.builder, src0, masks[2], "");
2515 else
2516 fmask_load_address[2] = NULL;
2517
2518 sample_index = adjust_sample_index_using_fmask(
2519 &ctx->ac, fmask_load_address[0], fmask_load_address[1], fmask_load_address[2],
2520 sample_index, get_image_descriptor(ctx, instr, dynamic_desc_index, AC_DESC_FMASK, false));
2521 }
2522 if (count == 1 && !gfx9_1d) {
2523 if (instr->src[1].ssa->num_components)
2524 args->coords[0] = LLVMBuildExtractElement(ctx->ac.builder, src0, masks[0], "");
2525 else
2526 args->coords[0] = src0;
2527 } else {
2528 int chan;
2529 if (is_ms)
2530 count--;
2531 for (chan = 0; chan < count; ++chan) {
2532 args->coords[chan] = ac_llvm_extract_elem(&ctx->ac, src0, chan);
2533 }
2534
2535 if (gfx9_1d) {
2536 if (is_array) {
2537 args->coords[2] = args->coords[1];
2538 args->coords[1] = ctx->ac.i32_0;
2539 } else
2540 args->coords[1] = ctx->ac.i32_0;
2541 count++;
2542 }
2543 if (ctx->ac.gfx_level == GFX9 && dim == GLSL_SAMPLER_DIM_2D && !is_array) {
2544 /* The hw can't bind a slice of a 3D image as a 2D
2545 * image, because it ignores BASE_ARRAY if the target
2546 * is 3D. The workaround is to read BASE_ARRAY and set
2547 * it as the 3rd address operand for all 2D images.
2548 */
2549 LLVMValueRef first_layer, const5, mask;
2550
2551 const5 = LLVMConstInt(ctx->ac.i32, 5, 0);
2552 mask = LLVMConstInt(ctx->ac.i32, S_008F24_BASE_ARRAY(~0), 0);
2553 first_layer = LLVMBuildExtractElement(ctx->ac.builder, args->resource, const5, "");
2554 first_layer = LLVMBuildAnd(ctx->ac.builder, first_layer, mask, "");
2555
2556 args->coords[count] = first_layer;
2557 count++;
2558 }
2559
2560 if (is_ms) {
2561 args->coords[count] = sample_index;
2562 count++;
2563 }
2564 }
2565 }
2566
enter_waterfall_image(struct ac_nir_context *ctx, struct waterfall_context *wctx, const nir_intrinsic_instr *instr)2567 static LLVMValueRef enter_waterfall_image(struct ac_nir_context *ctx,
2568 struct waterfall_context *wctx,
2569 const nir_intrinsic_instr *instr)
2570 {
2571 nir_deref_instr *deref_instr = NULL;
2572
2573 if (instr->src[0].ssa->parent_instr->type == nir_instr_type_deref)
2574 deref_instr = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
2575
2576 LLVMValueRef value = get_sampler_desc_index(ctx, deref_instr, &instr->instr, true);
2577 return enter_waterfall(ctx, wctx, value, nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
2578 }
2579
visit_image_load(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr, bool bindless)2580 static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr,
2581 bool bindless)
2582 {
2583 LLVMValueRef res;
2584
2585 enum glsl_sampler_dim dim;
2586 enum gl_access_qualifier access = nir_intrinsic_access(instr);
2587 bool is_array;
2588 if (bindless) {
2589 dim = nir_intrinsic_image_dim(instr);
2590 is_array = nir_intrinsic_image_array(instr);
2591 } else {
2592 const nir_deref_instr *image_deref = get_image_deref(instr);
2593 const struct glsl_type *type = image_deref->type;
2594 const nir_variable *var = nir_deref_instr_get_variable(image_deref);
2595 dim = glsl_get_sampler_dim(type);
2596 access |= var->data.access;
2597 is_array = glsl_sampler_type_is_array(type);
2598 }
2599
2600 struct waterfall_context wctx;
2601 LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2602
2603 struct ac_image_args args = {0};
2604
2605 args.cache_policy = get_cache_policy(ctx, access, false, false);
2606 args.tfe = instr->intrinsic == nir_intrinsic_image_deref_sparse_load ||
2607 instr->intrinsic == nir_intrinsic_bindless_image_sparse_load;
2608
2609 if (dim == GLSL_SAMPLER_DIM_BUF) {
2610 unsigned num_channels = util_last_bit(nir_ssa_def_components_read(&instr->dest.ssa));
2611 if (instr->dest.ssa.bit_size == 64)
2612 num_channels = num_channels < 4 ? 2 : 4;
2613 LLVMValueRef rsrc, vindex;
2614
2615 rsrc = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_BUFFER, false);
2616 vindex =
2617 LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i32_0, "");
2618
2619 assert(instr->dest.is_ssa);
2620 bool can_speculate = access & ACCESS_CAN_REORDER;
2621 res = ac_build_buffer_load_format(&ctx->ac, rsrc, vindex, ctx->ac.i32_0, num_channels,
2622 args.cache_policy, can_speculate,
2623 instr->dest.ssa.bit_size == 16,
2624 args.tfe);
2625 res = ac_build_expand(&ctx->ac, res, num_channels, args.tfe ? 5 : 4);
2626
2627 res = ac_trim_vector(&ctx->ac, res, instr->dest.ssa.num_components);
2628 res = ac_to_integer(&ctx->ac, res);
2629 } else {
2630 bool level_zero = nir_src_is_const(instr->src[3]) && nir_src_as_uint(instr->src[3]) == 0;
2631
2632 args.opcode = level_zero ? ac_image_load : ac_image_load_mip;
2633 args.resource = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_IMAGE, false);
2634 get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
2635 args.dim = ac_get_image_dim(ctx->ac.gfx_level, dim, is_array);
2636 if (!level_zero)
2637 args.lod = get_src(ctx, instr->src[3]);
2638 args.dmask = 15;
2639 args.attributes = AC_FUNC_ATTR_READONLY;
2640
2641 assert(instr->dest.is_ssa);
2642 args.d16 = instr->dest.ssa.bit_size == 16;
2643
2644 res = ac_build_image_opcode(&ctx->ac, &args);
2645 }
2646
2647 if (instr->dest.ssa.bit_size == 64) {
2648 LLVMValueRef code = NULL;
2649 if (args.tfe) {
2650 code = ac_llvm_extract_elem(&ctx->ac, res, 4);
2651 res = ac_trim_vector(&ctx->ac, res, 4);
2652 }
2653
2654 res = LLVMBuildBitCast(ctx->ac.builder, res, LLVMVectorType(ctx->ac.i64, 2), "");
2655 LLVMValueRef x = LLVMBuildExtractElement(ctx->ac.builder, res, ctx->ac.i32_0, "");
2656 LLVMValueRef w = LLVMBuildExtractElement(ctx->ac.builder, res, ctx->ac.i32_1, "");
2657
2658 if (code)
2659 code = LLVMBuildZExt(ctx->ac.builder, code, ctx->ac.i64, "");
2660 LLVMValueRef values[5] = {x, ctx->ac.i64_0, ctx->ac.i64_0, w, code};
2661 res = ac_build_gather_values(&ctx->ac, values, 4 + args.tfe);
2662 }
2663
2664 return exit_waterfall(ctx, &wctx, res);
2665 }
2666
visit_image_store(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr, bool bindless)2667 static void visit_image_store(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr,
2668 bool bindless)
2669 {
2670 if (ctx->ac.postponed_kill) {
2671 LLVMValueRef cond = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.i1, ctx->ac.postponed_kill, "");
2672 ac_build_ifcc(&ctx->ac, cond, 7003);
2673 }
2674
2675 enum glsl_sampler_dim dim;
2676 enum gl_access_qualifier access = nir_intrinsic_access(instr);
2677 bool is_array;
2678
2679 if (bindless) {
2680 dim = nir_intrinsic_image_dim(instr);
2681 is_array = nir_intrinsic_image_array(instr);
2682 } else {
2683 const nir_deref_instr *image_deref = get_image_deref(instr);
2684 const struct glsl_type *type = image_deref->type;
2685 const nir_variable *var = nir_deref_instr_get_variable(image_deref);
2686 dim = glsl_get_sampler_dim(type);
2687 access |= var->data.access;
2688 is_array = glsl_sampler_type_is_array(type);
2689 }
2690
2691 struct waterfall_context wctx;
2692 LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2693
2694 bool writeonly_memory = access & ACCESS_NON_READABLE;
2695 struct ac_image_args args = {0};
2696
2697 args.cache_policy = get_cache_policy(ctx, access, true, writeonly_memory);
2698
2699 LLVMValueRef src = get_src(ctx, instr->src[3]);
2700 if (instr->src[3].ssa->bit_size == 64) {
2701 /* only R64_UINT and R64_SINT supported */
2702 src = ac_llvm_extract_elem(&ctx->ac, src, 0);
2703 src = LLVMBuildBitCast(ctx->ac.builder, src, ctx->ac.v2f32, "");
2704 } else {
2705 src = ac_to_float(&ctx->ac, src);
2706 }
2707
2708 if (dim == GLSL_SAMPLER_DIM_BUF) {
2709 LLVMValueRef rsrc = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_BUFFER, true);
2710 unsigned src_channels = ac_get_llvm_num_components(src);
2711 LLVMValueRef vindex;
2712
2713 if (src_channels == 3)
2714 src = ac_build_expand_to_vec4(&ctx->ac, src, 3);
2715
2716 vindex =
2717 LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i32_0, "");
2718
2719 ac_build_buffer_store_format(&ctx->ac, rsrc, src, vindex, ctx->ac.i32_0, args.cache_policy);
2720 } else {
2721 bool level_zero = nir_src_is_const(instr->src[4]) && nir_src_as_uint(instr->src[4]) == 0;
2722
2723 args.opcode = level_zero ? ac_image_store : ac_image_store_mip;
2724 args.data[0] = src;
2725 args.resource = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_IMAGE, true);
2726 get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
2727 args.dim = ac_get_image_dim(ctx->ac.gfx_level, dim, is_array);
2728 if (!level_zero)
2729 args.lod = get_src(ctx, instr->src[4]);
2730 args.dmask = 15;
2731 args.d16 = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.data[0])) == 16;
2732
2733 ac_build_image_opcode(&ctx->ac, &args);
2734 }
2735
2736 exit_waterfall(ctx, &wctx, NULL);
2737 if (ctx->ac.postponed_kill)
2738 ac_build_endif(&ctx->ac, 7003);
2739 }
2740
visit_image_atomic(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr, bool bindless)2741 static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr,
2742 bool bindless)
2743 {
2744 if (ctx->ac.postponed_kill) {
2745 LLVMValueRef cond = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.i1, ctx->ac.postponed_kill, "");
2746 ac_build_ifcc(&ctx->ac, cond, 7004);
2747 }
2748
2749 LLVMValueRef params[7];
2750 int param_count = 0;
2751
2752 bool cmpswap = instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap ||
2753 instr->intrinsic == nir_intrinsic_bindless_image_atomic_comp_swap;
2754 const char *atomic_name;
2755 char intrinsic_name[64];
2756 enum ac_atomic_op atomic_subop;
2757 ASSERTED int length;
2758
2759 enum glsl_sampler_dim dim;
2760 bool is_array;
2761 if (bindless) {
2762 dim = nir_intrinsic_image_dim(instr);
2763 is_array = nir_intrinsic_image_array(instr);
2764 } else {
2765 const struct glsl_type *type = get_image_deref(instr)->type;
2766 dim = glsl_get_sampler_dim(type);
2767 is_array = glsl_sampler_type_is_array(type);
2768 }
2769
2770 struct waterfall_context wctx;
2771 LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2772
2773 switch (instr->intrinsic) {
2774 case nir_intrinsic_bindless_image_atomic_add:
2775 case nir_intrinsic_image_deref_atomic_add:
2776 atomic_name = "add";
2777 atomic_subop = ac_atomic_add;
2778 break;
2779 case nir_intrinsic_bindless_image_atomic_imin:
2780 case nir_intrinsic_image_deref_atomic_imin:
2781 atomic_name = "smin";
2782 atomic_subop = ac_atomic_smin;
2783 break;
2784 case nir_intrinsic_bindless_image_atomic_umin:
2785 case nir_intrinsic_image_deref_atomic_umin:
2786 atomic_name = "umin";
2787 atomic_subop = ac_atomic_umin;
2788 break;
2789 case nir_intrinsic_bindless_image_atomic_imax:
2790 case nir_intrinsic_image_deref_atomic_imax:
2791 atomic_name = "smax";
2792 atomic_subop = ac_atomic_smax;
2793 break;
2794 case nir_intrinsic_bindless_image_atomic_umax:
2795 case nir_intrinsic_image_deref_atomic_umax:
2796 atomic_name = "umax";
2797 atomic_subop = ac_atomic_umax;
2798 break;
2799 case nir_intrinsic_bindless_image_atomic_and:
2800 case nir_intrinsic_image_deref_atomic_and:
2801 atomic_name = "and";
2802 atomic_subop = ac_atomic_and;
2803 break;
2804 case nir_intrinsic_bindless_image_atomic_or:
2805 case nir_intrinsic_image_deref_atomic_or:
2806 atomic_name = "or";
2807 atomic_subop = ac_atomic_or;
2808 break;
2809 case nir_intrinsic_bindless_image_atomic_xor:
2810 case nir_intrinsic_image_deref_atomic_xor:
2811 atomic_name = "xor";
2812 atomic_subop = ac_atomic_xor;
2813 break;
2814 case nir_intrinsic_bindless_image_atomic_exchange:
2815 case nir_intrinsic_image_deref_atomic_exchange:
2816 atomic_name = "swap";
2817 atomic_subop = ac_atomic_swap;
2818 break;
2819 case nir_intrinsic_bindless_image_atomic_comp_swap:
2820 case nir_intrinsic_image_deref_atomic_comp_swap:
2821 atomic_name = "cmpswap";
2822 atomic_subop = 0; /* not used */
2823 break;
2824 case nir_intrinsic_bindless_image_atomic_inc_wrap:
2825 case nir_intrinsic_image_deref_atomic_inc_wrap: {
2826 atomic_name = "inc";
2827 atomic_subop = ac_atomic_inc_wrap;
2828 break;
2829 }
2830 case nir_intrinsic_bindless_image_atomic_dec_wrap:
2831 case nir_intrinsic_image_deref_atomic_dec_wrap:
2832 atomic_name = "dec";
2833 atomic_subop = ac_atomic_dec_wrap;
2834 break;
2835 case nir_intrinsic_image_deref_atomic_fmin:
2836 atomic_name = "fmin";
2837 atomic_subop = ac_atomic_fmin;
2838 break;
2839 case nir_intrinsic_image_deref_atomic_fmax:
2840 atomic_name = "fmax";
2841 atomic_subop = ac_atomic_fmax;
2842 break;
2843 default:
2844 abort();
2845 }
2846
2847 if (cmpswap)
2848 params[param_count++] = get_src(ctx, instr->src[4]);
2849 params[param_count++] = get_src(ctx, instr->src[3]);
2850
2851 if (atomic_subop == ac_atomic_fmin || atomic_subop == ac_atomic_fmax)
2852 params[0] = ac_to_float(&ctx->ac, params[0]);
2853
2854 LLVMValueRef result;
2855 if (dim == GLSL_SAMPLER_DIM_BUF) {
2856 params[param_count++] = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_BUFFER, true);
2857 params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
2858 ctx->ac.i32_0, ""); /* vindex */
2859 params[param_count++] = ctx->ac.i32_0; /* voffset */
2860 if (cmpswap && instr->dest.ssa.bit_size == 64) {
2861 result = emit_ssbo_comp_swap_64(ctx, params[2], params[3], params[1], params[0], true);
2862 } else {
2863 LLVMTypeRef data_type = LLVMTypeOf(params[0]);
2864 char type[8];
2865
2866 params[param_count++] = ctx->ac.i32_0; /* soffset */
2867 params[param_count++] = ctx->ac.i32_0; /* slc */
2868
2869 ac_build_type_name_for_intr(data_type, type, sizeof(type));
2870 length = snprintf(intrinsic_name, sizeof(intrinsic_name),
2871 "llvm.amdgcn.struct.buffer.atomic.%s.%s",
2872 atomic_name, type);
2873
2874 assert(length < sizeof(intrinsic_name));
2875 result = ac_build_intrinsic(&ctx->ac, intrinsic_name, LLVMTypeOf(params[0]), params, param_count, 0);
2876 }
2877 } else {
2878 struct ac_image_args args = {0};
2879 args.opcode = cmpswap ? ac_image_atomic_cmpswap : ac_image_atomic;
2880 args.atomic = atomic_subop;
2881 args.data[0] = params[0];
2882 if (cmpswap)
2883 args.data[1] = params[1];
2884 args.resource = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_IMAGE, true);
2885 get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
2886 args.dim = ac_get_image_dim(ctx->ac.gfx_level, dim, is_array);
2887
2888 result = ac_build_image_opcode(&ctx->ac, &args);
2889 }
2890
2891 result = exit_waterfall(ctx, &wctx, result);
2892 if (ctx->ac.postponed_kill)
2893 ac_build_endif(&ctx->ac, 7004);
2894 return result;
2895 }
2896
visit_image_samples(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)2897 static LLVMValueRef visit_image_samples(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
2898 {
2899 struct waterfall_context wctx;
2900 LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2901 LLVMValueRef rsrc = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_IMAGE, false);
2902
2903 LLVMValueRef ret = ac_build_image_get_sample_count(&ctx->ac, rsrc);
2904 if (ctx->abi->robust_buffer_access) {
2905 LLVMValueRef dword1, is_null_descriptor;
2906
2907 /* Extract the second dword of the descriptor, if it's
2908 * all zero, then it's a null descriptor.
2909 */
2910 dword1 =
2911 LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 1, false), "");
2912 is_null_descriptor = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, dword1,
2913 LLVMConstInt(ctx->ac.i32, 0, false), "");
2914 ret = LLVMBuildSelect(ctx->ac.builder, is_null_descriptor, ctx->ac.i32_0, ret, "");
2915 }
2916
2917 return exit_waterfall(ctx, &wctx, ret);
2918 }
2919
visit_image_size(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr, bool bindless)2920 static LLVMValueRef visit_image_size(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr,
2921 bool bindless)
2922 {
2923 LLVMValueRef res;
2924
2925 enum glsl_sampler_dim dim;
2926 bool is_array;
2927 if (bindless) {
2928 dim = nir_intrinsic_image_dim(instr);
2929 is_array = nir_intrinsic_image_array(instr);
2930 } else {
2931 const struct glsl_type *type = get_image_deref(instr)->type;
2932 dim = glsl_get_sampler_dim(type);
2933 is_array = glsl_sampler_type_is_array(type);
2934 }
2935
2936 struct waterfall_context wctx;
2937 LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
2938
2939 if (dim == GLSL_SAMPLER_DIM_BUF) {
2940 res = get_buffer_size(
2941 ctx, get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_BUFFER, false), true);
2942 } else {
2943
2944 struct ac_image_args args = {0};
2945
2946 args.dim = ac_get_image_dim(ctx->ac.gfx_level, dim, is_array);
2947 args.dmask = 0xf;
2948 args.resource = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_IMAGE, false);
2949 args.opcode = ac_image_get_resinfo;
2950 assert(nir_src_as_uint(instr->src[1]) == 0);
2951 args.lod = ctx->ac.i32_0;
2952 args.attributes = AC_FUNC_ATTR_READNONE;
2953
2954 res = ac_build_image_opcode(&ctx->ac, &args);
2955
2956 if (ctx->ac.gfx_level == GFX9 && dim == GLSL_SAMPLER_DIM_1D && is_array) {
2957 LLVMValueRef two = LLVMConstInt(ctx->ac.i32, 2, false);
2958 LLVMValueRef layers = LLVMBuildExtractElement(ctx->ac.builder, res, two, "");
2959 res = LLVMBuildInsertElement(ctx->ac.builder, res, layers, ctx->ac.i32_1, "");
2960 }
2961 }
2962 return exit_waterfall(ctx, &wctx, res);
2963 }
2964
emit_discard(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)2965 static void emit_discard(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2966 {
2967 LLVMValueRef cond;
2968
2969 if (instr->intrinsic == nir_intrinsic_discard_if ||
2970 instr->intrinsic == nir_intrinsic_terminate_if) {
2971 cond = LLVMBuildNot(ctx->ac.builder, get_src(ctx, instr->src[0]), "");
2972 } else {
2973 assert(instr->intrinsic == nir_intrinsic_discard ||
2974 instr->intrinsic == nir_intrinsic_terminate);
2975 cond = ctx->ac.i1false;
2976 }
2977
2978 ac_build_kill_if_false(&ctx->ac, cond);
2979 }
2980
emit_demote(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)2981 static void emit_demote(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
2982 {
2983 LLVMValueRef cond;
2984
2985 if (instr->intrinsic == nir_intrinsic_demote_if) {
2986 cond = LLVMBuildNot(ctx->ac.builder, get_src(ctx, instr->src[0]), "");
2987 } else {
2988 assert(instr->intrinsic == nir_intrinsic_demote);
2989 cond = ctx->ac.i1false;
2990 }
2991
2992 if (LLVM_VERSION_MAJOR >= 13) {
2993 /* This demotes the pixel if the condition is false. */
2994 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.wqm.demote", ctx->ac.voidt, &cond, 1, 0);
2995 return;
2996 }
2997
2998 LLVMValueRef mask = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.i1, ctx->ac.postponed_kill, "");
2999 mask = LLVMBuildAnd(ctx->ac.builder, mask, cond, "");
3000 LLVMBuildStore(ctx->ac.builder, mask, ctx->ac.postponed_kill);
3001
3002 if (!ctx->info->fs.needs_all_helper_invocations) {
3003 /* This is an optional optimization that only kills whole inactive quads.
3004 * It's not used when subgroup operations can possibly use all helper
3005 * invocations.
3006 */
3007 if (ctx->ac.flow->depth == 0) {
3008 ac_build_kill_if_false(&ctx->ac, ac_build_wqm_vote(&ctx->ac, cond));
3009 } else {
3010 /* amdgcn.wqm.vote doesn't work inside conditional blocks. Here's why.
3011 *
3012 * The problem is that kill(wqm.vote(0)) kills all active threads within
3013 * the block, which breaks the whole quad mode outside the block if
3014 * the conditional block has partially active quads (2x2 pixel blocks).
3015 * E.g. threads 0-3 are active outside the block, but only thread 0 is
3016 * active inside the block. Thread 0 shouldn't be killed by demote,
3017 * because threads 1-3 are still active outside the block.
3018 *
3019 * The fix for amdgcn.wqm.vote would be to return S_WQM((live & ~exec) | cond)
3020 * instead of S_WQM(cond).
3021 *
3022 * The less efficient workaround we do here is to save the kill condition
3023 * to a temporary (postponed_kill) and do kill(wqm.vote(cond)) after we
3024 * exit the conditional block.
3025 */
3026 ctx->ac.conditional_demote_seen = true;
3027 }
3028 }
3029 }
3030
visit_load_local_invocation_index(struct ac_nir_context *ctx)3031 static LLVMValueRef visit_load_local_invocation_index(struct ac_nir_context *ctx)
3032 {
3033 if (ctx->args->tcs_wave_id.used) {
3034 return ac_build_imad(&ctx->ac,
3035 ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->tcs_wave_id), 0, 3),
3036 LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0),
3037 ac_get_thread_id(&ctx->ac));
3038 } else if (ctx->args->vs_rel_patch_id.used) {
3039 return ac_get_arg(&ctx->ac, ctx->args->vs_rel_patch_id);
3040 } else if (ctx->args->merged_wave_info.used) {
3041 /* Thread ID in threadgroup in merged ESGS. */
3042 LLVMValueRef wave_id = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 24, 4);
3043 LLVMValueRef wave_size = LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, false);
3044 LLVMValueRef threads_before = LLVMBuildMul(ctx->ac.builder, wave_id, wave_size, "");
3045 return LLVMBuildAdd(ctx->ac.builder, threads_before, ac_get_thread_id(&ctx->ac), "");
3046 }
3047
3048 LLVMValueRef result;
3049 LLVMValueRef thread_id = ac_get_thread_id(&ctx->ac);
3050 result = LLVMBuildAnd(ctx->ac.builder, ac_get_arg(&ctx->ac, ctx->args->tg_size),
3051 LLVMConstInt(ctx->ac.i32, 0xfc0, false), "");
3052
3053 if (ctx->ac.wave_size == 32)
3054 result = LLVMBuildLShr(ctx->ac.builder, result, LLVMConstInt(ctx->ac.i32, 1, false), "");
3055
3056 return LLVMBuildAdd(ctx->ac.builder, result, thread_id, "");
3057 }
3058
visit_load_subgroup_id(struct ac_nir_context *ctx)3059 static LLVMValueRef visit_load_subgroup_id(struct ac_nir_context *ctx)
3060 {
3061 if (ctx->stage == MESA_SHADER_COMPUTE) {
3062 LLVMValueRef result;
3063 result = LLVMBuildAnd(ctx->ac.builder, ac_get_arg(&ctx->ac, ctx->args->tg_size),
3064 LLVMConstInt(ctx->ac.i32, 0xfc0, false), "");
3065 return LLVMBuildLShr(ctx->ac.builder, result, LLVMConstInt(ctx->ac.i32, 6, false), "");
3066 } else if (ctx->args->merged_wave_info.used) {
3067 return ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 24, 4);
3068 } else {
3069 return LLVMConstInt(ctx->ac.i32, 0, false);
3070 }
3071 }
3072
visit_load_num_subgroups(struct ac_nir_context *ctx)3073 static LLVMValueRef visit_load_num_subgroups(struct ac_nir_context *ctx)
3074 {
3075 if (ctx->stage == MESA_SHADER_COMPUTE) {
3076 return LLVMBuildAnd(ctx->ac.builder, ac_get_arg(&ctx->ac, ctx->args->tg_size),
3077 LLVMConstInt(ctx->ac.i32, 0x3f, false), "");
3078 } else if (ctx->args->merged_wave_info.used) {
3079 return ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 28, 4);
3080 } else {
3081 return LLVMConstInt(ctx->ac.i32, 1, false);
3082 }
3083 }
3084
visit_first_invocation(struct ac_nir_context *ctx)3085 static LLVMValueRef visit_first_invocation(struct ac_nir_context *ctx)
3086 {
3087 LLVMValueRef active_set = ac_build_ballot(&ctx->ac, ctx->ac.i32_1);
3088 const char *intr = ctx->ac.wave_size == 32 ? "llvm.cttz.i32" : "llvm.cttz.i64";
3089
3090 /* The second argument is whether cttz(0) should be defined, but we do not care. */
3091 LLVMValueRef args[] = {active_set, ctx->ac.i1false};
3092 LLVMValueRef result = ac_build_intrinsic(&ctx->ac, intr, ctx->ac.iN_wavemask, args, 2,
3093 AC_FUNC_ATTR_NOUNWIND | AC_FUNC_ATTR_READNONE);
3094
3095 return LLVMBuildTrunc(ctx->ac.builder, result, ctx->ac.i32, "");
3096 }
3097
visit_load_shared(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)3098 static LLVMValueRef visit_load_shared(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
3099 {
3100 LLVMValueRef values[4], derived_ptr, index, ret;
3101 unsigned const_off = nir_intrinsic_base(instr);
3102
3103 LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
3104 LLVMValueRef ptr = get_memory_ptr_t(ctx, instr->src[0], elem_type, const_off);
3105
3106 for (int chan = 0; chan < instr->num_components; chan++) {
3107 index = LLVMConstInt(ctx->ac.i32, chan, 0);
3108 derived_ptr = LLVMBuildGEP2(ctx->ac.builder, elem_type, ptr, &index, 1, "");
3109 values[chan] = LLVMBuildLoad2(ctx->ac.builder, elem_type, derived_ptr, "");
3110 }
3111
3112 ret = ac_build_gather_values(&ctx->ac, values, instr->num_components);
3113
3114 return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->dest.ssa), "");
3115 }
3116
visit_store_shared(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)3117 static void visit_store_shared(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
3118 {
3119 LLVMValueRef derived_ptr, data, index;
3120 LLVMBuilderRef builder = ctx->ac.builder;
3121
3122 unsigned const_off = nir_intrinsic_base(instr);
3123 LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->src[0].ssa->bit_size);
3124 LLVMValueRef ptr = get_memory_ptr_t(ctx, instr->src[1], elem_type, const_off);
3125 LLVMValueRef src = get_src(ctx, instr->src[0]);
3126
3127 int writemask = nir_intrinsic_write_mask(instr);
3128 for (int chan = 0; chan < 4; chan++) {
3129 if (!(writemask & (1 << chan))) {
3130 continue;
3131 }
3132 data = ac_llvm_extract_elem(&ctx->ac, src, chan);
3133 index = LLVMConstInt(ctx->ac.i32, chan, 0);
3134 derived_ptr = LLVMBuildGEP2(builder, elem_type, ptr, &index, 1, "");
3135 LLVMBuildStore(builder, data, derived_ptr);
3136 }
3137 }
3138
visit_load_shared2_amd(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)3139 static LLVMValueRef visit_load_shared2_amd(struct ac_nir_context *ctx,
3140 const nir_intrinsic_instr *instr)
3141 {
3142 LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], instr->dest.ssa.bit_size, 0);
3143
3144 LLVMValueRef values[2];
3145 uint8_t offsets[] = {nir_intrinsic_offset0(instr), nir_intrinsic_offset1(instr)};
3146 unsigned stride = nir_intrinsic_st64(instr) ? 64 : 1;
3147 for (unsigned i = 0; i < 2; i++) {
3148 LLVMValueRef index = LLVMConstInt(ctx->ac.i32, offsets[i] * stride, 0);
3149 LLVMValueRef derived_ptr = LLVMBuildGEP(ctx->ac.builder, ptr, &index, 1, "");
3150 values[i] = LLVMBuildLoad(ctx->ac.builder, derived_ptr, "");
3151 }
3152
3153 LLVMValueRef ret = ac_build_gather_values(&ctx->ac, values, 2);
3154 return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->dest.ssa), "");
3155 }
3156
visit_store_shared2_amd(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)3157 static void visit_store_shared2_amd(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
3158 {
3159 LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[1], instr->src[0].ssa->bit_size, 0);
3160 LLVMValueRef src = get_src(ctx, instr->src[0]);
3161
3162 uint8_t offsets[] = {nir_intrinsic_offset0(instr), nir_intrinsic_offset1(instr)};
3163 unsigned stride = nir_intrinsic_st64(instr) ? 64 : 1;
3164 for (unsigned i = 0; i < 2; i++) {
3165 LLVMValueRef index = LLVMConstInt(ctx->ac.i32, offsets[i] * stride, 0);
3166 LLVMValueRef derived_ptr = LLVMBuildGEP(ctx->ac.builder, ptr, &index, 1, "");
3167 LLVMBuildStore(ctx->ac.builder, ac_llvm_extract_elem(&ctx->ac, src, i), derived_ptr);
3168 }
3169 }
3170
visit_var_atomic(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr, LLVMValueRef ptr, int src_idx)3171 static LLVMValueRef visit_var_atomic(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr,
3172 LLVMValueRef ptr, int src_idx)
3173 {
3174 if (ctx->ac.postponed_kill) {
3175 LLVMValueRef cond = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.i1, ctx->ac.postponed_kill, "");
3176 ac_build_ifcc(&ctx->ac, cond, 7005);
3177 }
3178
3179 LLVMValueRef result;
3180 LLVMValueRef src = get_src(ctx, instr->src[src_idx]);
3181
3182 const char *sync_scope = "workgroup-one-as";
3183
3184 if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap) {
3185 LLVMValueRef src1 = get_src(ctx, instr->src[src_idx + 1]);
3186 result = ac_build_atomic_cmp_xchg(&ctx->ac, ptr, src, src1, sync_scope);
3187 result = LLVMBuildExtractValue(ctx->ac.builder, result, 0, "");
3188 } else if (instr->intrinsic == nir_intrinsic_shared_atomic_fmin ||
3189 instr->intrinsic == nir_intrinsic_shared_atomic_fmax) {
3190 const char *op = instr->intrinsic == nir_intrinsic_shared_atomic_fmin ? "fmin" : "fmax";
3191 char name[64], type[8];
3192 LLVMValueRef params[5];
3193 LLVMTypeRef src_type;
3194 int arg_count = 0;
3195
3196 src = ac_to_float(&ctx->ac, src);
3197 src_type = LLVMTypeOf(src);
3198
3199 LLVMTypeRef ptr_type =
3200 LLVMPointerType(src_type, LLVMGetPointerAddressSpace(LLVMTypeOf(ptr)));
3201 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, ptr_type, "");
3202
3203 params[arg_count++] = ptr;
3204 params[arg_count++] = src;
3205 params[arg_count++] = ctx->ac.i32_0;
3206 params[arg_count++] = ctx->ac.i32_0;
3207 params[arg_count++] = ctx->ac.i1false;
3208
3209 ac_build_type_name_for_intr(src_type, type, sizeof(type));
3210 snprintf(name, sizeof(name), "llvm.amdgcn.ds.%s.%s", op, type);
3211
3212 result = ac_build_intrinsic(&ctx->ac, name, src_type, params, arg_count, 0);
3213 result = ac_to_integer(&ctx->ac, result);
3214 } else {
3215 LLVMAtomicRMWBinOp op;
3216 switch (instr->intrinsic) {
3217 case nir_intrinsic_shared_atomic_add:
3218 op = LLVMAtomicRMWBinOpAdd;
3219 break;
3220 case nir_intrinsic_shared_atomic_umin:
3221 op = LLVMAtomicRMWBinOpUMin;
3222 break;
3223 case nir_intrinsic_shared_atomic_umax:
3224 op = LLVMAtomicRMWBinOpUMax;
3225 break;
3226 case nir_intrinsic_shared_atomic_imin:
3227 op = LLVMAtomicRMWBinOpMin;
3228 break;
3229 case nir_intrinsic_shared_atomic_imax:
3230 op = LLVMAtomicRMWBinOpMax;
3231 break;
3232 case nir_intrinsic_shared_atomic_and:
3233 op = LLVMAtomicRMWBinOpAnd;
3234 break;
3235 case nir_intrinsic_shared_atomic_or:
3236 op = LLVMAtomicRMWBinOpOr;
3237 break;
3238 case nir_intrinsic_shared_atomic_xor:
3239 op = LLVMAtomicRMWBinOpXor;
3240 break;
3241 case nir_intrinsic_shared_atomic_exchange:
3242 op = LLVMAtomicRMWBinOpXchg;
3243 break;
3244 case nir_intrinsic_shared_atomic_fadd:
3245 op = LLVMAtomicRMWBinOpFAdd;
3246 break;
3247 default:
3248 return NULL;
3249 }
3250
3251 LLVMValueRef val;
3252
3253 if (instr->intrinsic == nir_intrinsic_shared_atomic_fadd) {
3254 val = ac_to_float(&ctx->ac, src);
3255
3256 LLVMTypeRef ptr_type =
3257 LLVMPointerType(LLVMTypeOf(val), LLVMGetPointerAddressSpace(LLVMTypeOf(ptr)));
3258 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, ptr_type, "");
3259 } else {
3260 val = ac_to_integer(&ctx->ac, src);
3261 }
3262
3263 result = ac_build_atomic_rmw(&ctx->ac, op, ptr, val, sync_scope);
3264
3265 if (instr->intrinsic == nir_intrinsic_shared_atomic_fadd ||
3266 instr->intrinsic == nir_intrinsic_deref_atomic_fadd) {
3267 result = ac_to_integer(&ctx->ac, result);
3268 }
3269 }
3270
3271 if (ctx->ac.postponed_kill)
3272 ac_build_endif(&ctx->ac, 7005);
3273 return result;
3274 }
3275
load_sample_pos(struct ac_nir_context *ctx)3276 static LLVMValueRef load_sample_pos(struct ac_nir_context *ctx)
3277 {
3278 LLVMValueRef values[2];
3279 LLVMValueRef pos[2];
3280
3281 pos[0] = ac_to_float(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->frag_pos[0]));
3282 pos[1] = ac_to_float(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->frag_pos[1]));
3283
3284 values[0] = ac_build_fract(&ctx->ac, pos[0], 32);
3285 values[1] = ac_build_fract(&ctx->ac, pos[1], 32);
3286 return ac_build_gather_values(&ctx->ac, values, 2);
3287 }
3288
lookup_interp_param(struct ac_nir_context *ctx, enum glsl_interp_mode interp, unsigned location)3289 static LLVMValueRef lookup_interp_param(struct ac_nir_context *ctx, enum glsl_interp_mode interp,
3290 unsigned location)
3291 {
3292 switch (interp) {
3293 case INTERP_MODE_FLAT:
3294 default:
3295 return NULL;
3296 case INTERP_MODE_SMOOTH:
3297 case INTERP_MODE_NONE:
3298 if (location == INTERP_CENTER)
3299 return ac_get_arg(&ctx->ac, ctx->args->persp_center);
3300 else if (location == INTERP_CENTROID)
3301 return ctx->abi->persp_centroid;
3302 else if (location == INTERP_SAMPLE)
3303 return ac_get_arg(&ctx->ac, ctx->args->persp_sample);
3304 break;
3305 case INTERP_MODE_NOPERSPECTIVE:
3306 if (location == INTERP_CENTER)
3307 return ac_get_arg(&ctx->ac, ctx->args->linear_center);
3308 else if (location == INTERP_CENTROID)
3309 return ctx->abi->linear_centroid;
3310 else if (location == INTERP_SAMPLE)
3311 return ac_get_arg(&ctx->ac, ctx->args->linear_sample);
3312 break;
3313 }
3314 return NULL;
3315 }
3316
barycentric_center(struct ac_nir_context *ctx, unsigned mode)3317 static LLVMValueRef barycentric_center(struct ac_nir_context *ctx, unsigned mode)
3318 {
3319 LLVMValueRef interp_param = lookup_interp_param(ctx, mode, INTERP_CENTER);
3320 return LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2i32, "");
3321 }
3322
barycentric_offset(struct ac_nir_context *ctx, unsigned mode, LLVMValueRef offset)3323 static LLVMValueRef barycentric_offset(struct ac_nir_context *ctx, unsigned mode,
3324 LLVMValueRef offset)
3325 {
3326 LLVMValueRef interp_param = lookup_interp_param(ctx, mode, INTERP_CENTER);
3327 LLVMValueRef src_c0 =
3328 ac_to_float(&ctx->ac, LLVMBuildExtractElement(ctx->ac.builder, offset, ctx->ac.i32_0, ""));
3329 LLVMValueRef src_c1 =
3330 ac_to_float(&ctx->ac, LLVMBuildExtractElement(ctx->ac.builder, offset, ctx->ac.i32_1, ""));
3331
3332 LLVMValueRef ij_out[2];
3333 LLVMValueRef ddxy_out = ac_build_ddxy_interp(&ctx->ac, interp_param);
3334
3335 /*
3336 * take the I then J parameters, and the DDX/Y for it, and
3337 * calculate the IJ inputs for the interpolator.
3338 * temp1 = ddx * offset/sample.x + I;
3339 * interp_param.I = ddy * offset/sample.y + temp1;
3340 * temp1 = ddx * offset/sample.x + J;
3341 * interp_param.J = ddy * offset/sample.y + temp1;
3342 */
3343 for (unsigned i = 0; i < 2; i++) {
3344 LLVMValueRef ix_ll = LLVMConstInt(ctx->ac.i32, i, false);
3345 LLVMValueRef iy_ll = LLVMConstInt(ctx->ac.i32, i + 2, false);
3346 LLVMValueRef ddx_el = LLVMBuildExtractElement(ctx->ac.builder, ddxy_out, ix_ll, "");
3347 LLVMValueRef ddy_el = LLVMBuildExtractElement(ctx->ac.builder, ddxy_out, iy_ll, "");
3348 LLVMValueRef interp_el = LLVMBuildExtractElement(ctx->ac.builder, interp_param, ix_ll, "");
3349 LLVMValueRef temp1, temp2;
3350
3351 interp_el = LLVMBuildBitCast(ctx->ac.builder, interp_el, ctx->ac.f32, "");
3352
3353 temp1 = ac_build_fmad(&ctx->ac, ddx_el, src_c0, interp_el);
3354 temp2 = ac_build_fmad(&ctx->ac, ddy_el, src_c1, temp1);
3355
3356 ij_out[i] = LLVMBuildBitCast(ctx->ac.builder, temp2, ctx->ac.i32, "");
3357 }
3358 interp_param = ac_build_gather_values(&ctx->ac, ij_out, 2);
3359 return LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2i32, "");
3360 }
3361
barycentric_centroid(struct ac_nir_context *ctx, unsigned mode)3362 static LLVMValueRef barycentric_centroid(struct ac_nir_context *ctx, unsigned mode)
3363 {
3364 LLVMValueRef interp_param = lookup_interp_param(ctx, mode, INTERP_CENTROID);
3365 return LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2i32, "");
3366 }
3367
barycentric_at_sample(struct ac_nir_context *ctx, unsigned mode, LLVMValueRef sample_id)3368 static LLVMValueRef barycentric_at_sample(struct ac_nir_context *ctx, unsigned mode,
3369 LLVMValueRef sample_id)
3370 {
3371 if (ctx->abi->interp_at_sample_force_center)
3372 return barycentric_center(ctx, mode);
3373
3374 LLVMValueRef halfval = LLVMConstReal(ctx->ac.f32, 0.5f);
3375
3376 /* fetch sample ID */
3377 LLVMValueRef sample_pos = ctx->abi->load_sample_position(ctx->abi, sample_id);
3378
3379 LLVMValueRef src_c0 = LLVMBuildExtractElement(ctx->ac.builder, sample_pos, ctx->ac.i32_0, "");
3380 src_c0 = LLVMBuildFSub(ctx->ac.builder, src_c0, halfval, "");
3381 LLVMValueRef src_c1 = LLVMBuildExtractElement(ctx->ac.builder, sample_pos, ctx->ac.i32_1, "");
3382 src_c1 = LLVMBuildFSub(ctx->ac.builder, src_c1, halfval, "");
3383 LLVMValueRef coords[] = {src_c0, src_c1};
3384 LLVMValueRef offset = ac_build_gather_values(&ctx->ac, coords, 2);
3385
3386 return barycentric_offset(ctx, mode, offset);
3387 }
3388
barycentric_sample(struct ac_nir_context *ctx, unsigned mode)3389 static LLVMValueRef barycentric_sample(struct ac_nir_context *ctx, unsigned mode)
3390 {
3391 LLVMValueRef interp_param = lookup_interp_param(ctx, mode, INTERP_SAMPLE);
3392 return LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2i32, "");
3393 }
3394
barycentric_model(struct ac_nir_context *ctx)3395 static LLVMValueRef barycentric_model(struct ac_nir_context *ctx)
3396 {
3397 return LLVMBuildBitCast(ctx->ac.builder, ac_get_arg(&ctx->ac, ctx->args->pull_model),
3398 ctx->ac.v3i32, "");
3399 }
3400
load_interpolated_input(struct ac_nir_context *ctx, LLVMValueRef interp_param, unsigned index, unsigned comp_start, unsigned num_components, unsigned bitsize, bool high_16bits)3401 static LLVMValueRef load_interpolated_input(struct ac_nir_context *ctx, LLVMValueRef interp_param,
3402 unsigned index, unsigned comp_start,
3403 unsigned num_components, unsigned bitsize,
3404 bool high_16bits)
3405 {
3406 LLVMValueRef attr_number = LLVMConstInt(ctx->ac.i32, index, false);
3407 LLVMValueRef interp_param_f;
3408
3409 interp_param_f = LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2f32, "");
3410 LLVMValueRef i = LLVMBuildExtractElement(ctx->ac.builder, interp_param_f, ctx->ac.i32_0, "");
3411 LLVMValueRef j = LLVMBuildExtractElement(ctx->ac.builder, interp_param_f, ctx->ac.i32_1, "");
3412
3413 /* Workaround for issue 2647: kill threads with infinite interpolation coeffs */
3414 if (ctx->verified_interp && !_mesa_hash_table_search(ctx->verified_interp, interp_param)) {
3415 LLVMValueRef cond = ac_build_is_inf_or_nan(&ctx->ac, i);
3416 ac_build_kill_if_false(&ctx->ac, LLVMBuildNot(ctx->ac.builder, cond, ""));
3417 _mesa_hash_table_insert(ctx->verified_interp, interp_param, interp_param);
3418 }
3419
3420 LLVMValueRef values[4];
3421 assert(bitsize == 16 || bitsize == 32);
3422 for (unsigned comp = 0; comp < num_components; comp++) {
3423 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, comp_start + comp, false);
3424 if (bitsize == 16) {
3425 values[comp] = ac_build_fs_interp_f16(&ctx->ac, llvm_chan, attr_number,
3426 ac_get_arg(&ctx->ac, ctx->args->prim_mask), i, j,
3427 high_16bits);
3428 } else {
3429 values[comp] = ac_build_fs_interp(&ctx->ac, llvm_chan, attr_number,
3430 ac_get_arg(&ctx->ac, ctx->args->prim_mask), i, j);
3431 }
3432 }
3433
3434 return ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, num_components));
3435 }
3436
visit_load(struct ac_nir_context *ctx, nir_intrinsic_instr *instr, bool is_output)3437 static LLVMValueRef visit_load(struct ac_nir_context *ctx, nir_intrinsic_instr *instr,
3438 bool is_output)
3439 {
3440 LLVMValueRef values[8];
3441 LLVMTypeRef dest_type = get_def_type(ctx, &instr->dest.ssa);
3442 LLVMTypeRef component_type;
3443 unsigned base = nir_intrinsic_base(instr);
3444 unsigned component = nir_intrinsic_component(instr);
3445 unsigned count = instr->dest.ssa.num_components;
3446 nir_src *vertex_index_src = nir_get_io_arrayed_index_src(instr);
3447 LLVMValueRef vertex_index = vertex_index_src ? get_src(ctx, *vertex_index_src) : NULL;
3448 nir_src offset = *nir_get_io_offset_src(instr);
3449 LLVMValueRef indir_index = NULL;
3450
3451 switch (instr->dest.ssa.bit_size) {
3452 case 16:
3453 case 32:
3454 break;
3455 case 64:
3456 unreachable("64-bit IO should have been lowered");
3457 return NULL;
3458 default:
3459 unreachable("unhandled load type");
3460 return NULL;
3461 }
3462
3463 if (LLVMGetTypeKind(dest_type) == LLVMVectorTypeKind)
3464 component_type = LLVMGetElementType(dest_type);
3465 else
3466 component_type = dest_type;
3467
3468 if (nir_src_is_const(offset))
3469 assert(nir_src_as_uint(offset) == 0);
3470 else
3471 indir_index = get_src(ctx, offset);
3472
3473 if (ctx->stage == MESA_SHADER_TESS_CTRL) {
3474 LLVMValueRef result = ctx->abi->load_tess_varyings(ctx->abi, component_type,
3475 vertex_index, indir_index,
3476 base, component,
3477 count, !is_output);
3478 if (instr->dest.ssa.bit_size == 16) {
3479 result = ac_to_integer(&ctx->ac, result);
3480 result = LLVMBuildTrunc(ctx->ac.builder, result, dest_type, "");
3481 }
3482 return LLVMBuildBitCast(ctx->ac.builder, result, dest_type, "");
3483 }
3484
3485 /* No indirect indexing is allowed after this point. */
3486 assert(!indir_index);
3487
3488 if (ctx->stage == MESA_SHADER_FRAGMENT && is_output &&
3489 nir_intrinsic_io_semantics(instr).fb_fetch_output)
3490 return ctx->abi->emit_fbfetch(ctx->abi);
3491
3492 if (ctx->stage == MESA_SHADER_VERTEX && !is_output)
3493 return ctx->abi->load_inputs(ctx->abi, base, component, count, 0, component_type);
3494
3495 /* Other non-fragment cases have outputs in temporaries. */
3496 if (is_output && (ctx->stage == MESA_SHADER_VERTEX || ctx->stage == MESA_SHADER_TESS_EVAL)) {
3497 assert(is_output);
3498
3499 for (unsigned chan = component; chan < count + component; chan++)
3500 values[chan] = LLVMBuildLoad2(ctx->ac.builder, ctx->ac.f32,
3501 ctx->abi->outputs[base * 4 + chan], "");
3502
3503 LLVMValueRef result = ac_build_varying_gather_values(&ctx->ac, values, count, component);
3504 return LLVMBuildBitCast(ctx->ac.builder, result, dest_type, "");
3505 }
3506
3507 /* Fragment shader inputs. */
3508 assert(ctx->stage == MESA_SHADER_FRAGMENT);
3509 unsigned vertex_id = 2; /* P0 */
3510
3511 if (instr->intrinsic == nir_intrinsic_load_input_vertex) {
3512 nir_const_value *src0 = nir_src_as_const_value(instr->src[0]);
3513
3514 switch (src0[0].i32) {
3515 case 0:
3516 vertex_id = 2;
3517 break;
3518 case 1:
3519 vertex_id = 0;
3520 break;
3521 case 2:
3522 vertex_id = 1;
3523 break;
3524 default:
3525 unreachable("Invalid vertex index");
3526 }
3527 }
3528
3529 LLVMValueRef attr_number = LLVMConstInt(ctx->ac.i32, base, false);
3530
3531 for (unsigned chan = 0; chan < count; chan++) {
3532 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, (component + chan) % 4, false);
3533 values[chan] =
3534 ac_build_fs_interp_mov(&ctx->ac, LLVMConstInt(ctx->ac.i32, vertex_id, false), llvm_chan,
3535 attr_number, ac_get_arg(&ctx->ac, ctx->args->prim_mask));
3536 values[chan] = LLVMBuildBitCast(ctx->ac.builder, values[chan], ctx->ac.i32, "");
3537 if (instr->dest.ssa.bit_size == 16 &&
3538 nir_intrinsic_io_semantics(instr).high_16bits)
3539 values[chan] = LLVMBuildLShr(ctx->ac.builder, values[chan], LLVMConstInt(ctx->ac.i32, 16, 0), "");
3540 values[chan] =
3541 LLVMBuildTruncOrBitCast(ctx->ac.builder, values[chan],
3542 instr->dest.ssa.bit_size == 16 ? ctx->ac.i16 : ctx->ac.i32, "");
3543 }
3544
3545 LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, count);
3546 return LLVMBuildBitCast(ctx->ac.builder, result, dest_type, "");
3547 }
3548
3549 static LLVMValueRef
emit_load_frag_shading_rate(struct ac_nir_context *ctx)3550 emit_load_frag_shading_rate(struct ac_nir_context *ctx)
3551 {
3552 LLVMValueRef x_rate, y_rate, cond;
3553
3554 /* VRS Rate X = Ancillary[2:3]
3555 * VRS Rate Y = Ancillary[4:5]
3556 */
3557 x_rate = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ancillary), 2, 2);
3558 y_rate = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ancillary), 4, 2);
3559
3560 /* xRate = xRate == 0x1 ? Horizontal2Pixels : None. */
3561 cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, x_rate, ctx->ac.i32_1, "");
3562 x_rate = LLVMBuildSelect(ctx->ac.builder, cond,
3563 LLVMConstInt(ctx->ac.i32, 4, false), ctx->ac.i32_0, "");
3564
3565 /* yRate = yRate == 0x1 ? Vertical2Pixels : None. */
3566 cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, y_rate, ctx->ac.i32_1, "");
3567 y_rate = LLVMBuildSelect(ctx->ac.builder, cond,
3568 LLVMConstInt(ctx->ac.i32, 1, false), ctx->ac.i32_0, "");
3569
3570 return LLVMBuildOr(ctx->ac.builder, x_rate, y_rate, "");
3571 }
3572
3573 static LLVMValueRef
emit_load_frag_coord(struct ac_nir_context *ctx)3574 emit_load_frag_coord(struct ac_nir_context *ctx)
3575 {
3576 LLVMValueRef values[4] = {
3577 ac_get_arg(&ctx->ac, ctx->args->frag_pos[0]), ac_get_arg(&ctx->ac, ctx->args->frag_pos[1]),
3578 ac_get_arg(&ctx->ac, ctx->args->frag_pos[2]),
3579 ac_build_fdiv(&ctx->ac, ctx->ac.f32_1, ac_get_arg(&ctx->ac, ctx->args->frag_pos[3]))};
3580
3581 return ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, 4));
3582 }
3583
visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)3584 static void visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *instr)
3585 {
3586 LLVMValueRef result = NULL;
3587
3588 switch (instr->intrinsic) {
3589 case nir_intrinsic_ballot:
3590 result = ac_build_ballot(&ctx->ac, get_src(ctx, instr->src[0]));
3591 if (ctx->ac.ballot_mask_bits > ctx->ac.wave_size)
3592 result = LLVMBuildZExt(ctx->ac.builder, result, ctx->ac.iN_ballotmask, "");
3593 break;
3594 case nir_intrinsic_read_invocation:
3595 result =
3596 ac_build_readlane(&ctx->ac, get_src(ctx, instr->src[0]), get_src(ctx, instr->src[1]));
3597 break;
3598 case nir_intrinsic_read_first_invocation:
3599 result = ac_build_readlane(&ctx->ac, get_src(ctx, instr->src[0]), NULL);
3600 break;
3601 case nir_intrinsic_load_subgroup_invocation:
3602 result = ac_get_thread_id(&ctx->ac);
3603 break;
3604 case nir_intrinsic_load_workgroup_id: {
3605 LLVMValueRef values[3];
3606
3607 for (int i = 0; i < 3; i++) {
3608 values[i] = ctx->args->workgroup_ids[i].used
3609 ? ac_get_arg(&ctx->ac, ctx->args->workgroup_ids[i])
3610 : ctx->ac.i32_0;
3611 }
3612
3613 result = ac_build_gather_values(&ctx->ac, values, 3);
3614 break;
3615 }
3616 case nir_intrinsic_load_base_vertex:
3617 case nir_intrinsic_load_first_vertex:
3618 case nir_intrinsic_load_workgroup_size:
3619 case nir_intrinsic_load_tess_level_outer_default:
3620 case nir_intrinsic_load_tess_level_inner_default:
3621 case nir_intrinsic_load_tess_rel_patch_id_amd:
3622 case nir_intrinsic_load_patch_vertices_in:
3623 case nir_intrinsic_load_sample_mask_in:
3624 case nir_intrinsic_load_ring_tess_factors_amd:
3625 case nir_intrinsic_load_ring_tess_offchip_amd:
3626 case nir_intrinsic_load_ring_tess_offchip_offset_amd:
3627 case nir_intrinsic_load_ring_esgs_amd:
3628 case nir_intrinsic_load_ring_es2gs_offset_amd:
3629 case nir_intrinsic_load_lshs_vertex_stride_amd:
3630 case nir_intrinsic_load_tcs_num_patches_amd:
3631 case nir_intrinsic_load_hs_out_patch_data_offset_amd:
3632 result = ctx->abi->intrinsic_load(ctx->abi, instr->intrinsic);
3633 break;
3634 case nir_intrinsic_load_vertex_id_zero_base: {
3635 result = ctx->vertex_id_replaced ? ctx->vertex_id_replaced : ctx->abi->vertex_id;
3636 break;
3637 }
3638 case nir_intrinsic_load_local_invocation_id: {
3639 LLVMValueRef ids = ac_get_arg(&ctx->ac, ctx->args->local_invocation_ids);
3640
3641 if (LLVMGetTypeKind(LLVMTypeOf(ids)) == LLVMIntegerTypeKind) {
3642 /* Thread IDs are packed in VGPR0, 10 bits per component. */
3643 LLVMValueRef id[3];
3644
3645 for (unsigned i = 0; i < 3; i++)
3646 id[i] = ac_unpack_param(&ctx->ac, ids, i * 10, 10);
3647
3648 result = ac_build_gather_values(&ctx->ac, id, 3);
3649 } else {
3650 result = ids;
3651 }
3652 break;
3653 }
3654 case nir_intrinsic_load_base_instance:
3655 result = ac_get_arg(&ctx->ac, ctx->args->start_instance);
3656 break;
3657 case nir_intrinsic_load_draw_id:
3658 result = ac_get_arg(&ctx->ac, ctx->args->draw_id);
3659 break;
3660 case nir_intrinsic_load_view_index:
3661 result = ac_get_arg(&ctx->ac, ctx->args->view_index);
3662 break;
3663 case nir_intrinsic_load_invocation_id:
3664 if (ctx->stage == MESA_SHADER_TESS_CTRL) {
3665 result = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->tcs_rel_ids), 8, 5);
3666 } else {
3667 if (ctx->ac.gfx_level >= GFX10) {
3668 result =
3669 LLVMBuildAnd(ctx->ac.builder, ac_get_arg(&ctx->ac, ctx->args->gs_invocation_id),
3670 LLVMConstInt(ctx->ac.i32, 127, 0), "");
3671 } else {
3672 result = ac_get_arg(&ctx->ac, ctx->args->gs_invocation_id);
3673 }
3674 }
3675 break;
3676 case nir_intrinsic_load_primitive_id:
3677 if (ctx->stage == MESA_SHADER_GEOMETRY) {
3678 result = ac_get_arg(&ctx->ac, ctx->args->gs_prim_id);
3679 } else if (ctx->stage == MESA_SHADER_TESS_CTRL) {
3680 result = ac_get_arg(&ctx->ac, ctx->args->tcs_patch_id);
3681 } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
3682 result = ctx->tes_patch_id_replaced ? ctx->tes_patch_id_replaced
3683 : ac_get_arg(&ctx->ac, ctx->args->tes_patch_id);
3684 } else if (ctx->stage == MESA_SHADER_VERTEX) {
3685 if (ctx->args->vs_prim_id.used)
3686 result = ac_get_arg(&ctx->ac, ctx->args->vs_prim_id); /* legacy */
3687 else
3688 result = ac_get_arg(&ctx->ac, ctx->args->gs_prim_id); /* NGG */
3689 } else
3690 fprintf(stderr, "Unknown primitive id intrinsic: %d", ctx->stage);
3691 break;
3692 case nir_intrinsic_load_sample_id:
3693 result = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ancillary), 8, 4);
3694 break;
3695 case nir_intrinsic_load_sample_pos:
3696 result = load_sample_pos(ctx);
3697 break;
3698 case nir_intrinsic_load_frag_coord:
3699 result = emit_load_frag_coord(ctx);
3700 break;
3701 case nir_intrinsic_load_frag_shading_rate:
3702 result = emit_load_frag_shading_rate(ctx);
3703 break;
3704 case nir_intrinsic_load_front_face:
3705 result = emit_i2b(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->front_face));
3706 break;
3707 case nir_intrinsic_load_helper_invocation:
3708 result = ac_build_load_helper_invocation(&ctx->ac);
3709 break;
3710 case nir_intrinsic_is_helper_invocation:
3711 result = ac_build_is_helper_invocation(&ctx->ac);
3712 break;
3713 case nir_intrinsic_load_color0:
3714 result = ctx->abi->color0;
3715 break;
3716 case nir_intrinsic_load_color1:
3717 result = ctx->abi->color1;
3718 break;
3719 case nir_intrinsic_load_user_data_amd:
3720 assert(LLVMTypeOf(ctx->abi->user_data) == ctx->ac.v4i32);
3721 result = ctx->abi->user_data;
3722 break;
3723 case nir_intrinsic_load_instance_id:
3724 result = ctx->instance_id_replaced ? ctx->instance_id_replaced : ctx->abi->instance_id;
3725 break;
3726 case nir_intrinsic_load_num_workgroups:
3727 if (ctx->abi->load_grid_size_from_user_sgpr) {
3728 result = ac_get_arg(&ctx->ac, ctx->args->num_work_groups);
3729 } else {
3730 LLVMTypeRef ptr_type = ac_array_in_const_addr_space(ctx->ac.v3i32);
3731 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->args->num_work_groups);
3732 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, ptr_type, "");
3733 result = ac_build_load_invariant(&ctx->ac, ptr, ctx->ac.i32_0);
3734 }
3735 break;
3736 case nir_intrinsic_load_local_invocation_index:
3737 result = visit_load_local_invocation_index(ctx);
3738 break;
3739 case nir_intrinsic_load_subgroup_id:
3740 result = visit_load_subgroup_id(ctx);
3741 break;
3742 case nir_intrinsic_load_num_subgroups:
3743 result = visit_load_num_subgroups(ctx);
3744 break;
3745 case nir_intrinsic_first_invocation:
3746 result = visit_first_invocation(ctx);
3747 break;
3748 case nir_intrinsic_load_push_constant:
3749 result = visit_load_push_constant(ctx, instr);
3750 break;
3751 case nir_intrinsic_store_ssbo:
3752 visit_store_ssbo(ctx, instr);
3753 break;
3754 case nir_intrinsic_load_ssbo:
3755 result = visit_load_buffer(ctx, instr);
3756 break;
3757 case nir_intrinsic_load_global_constant:
3758 case nir_intrinsic_load_global:
3759 case nir_intrinsic_load_global_amd:
3760 result = visit_load_global(ctx, instr);
3761 break;
3762 case nir_intrinsic_store_global:
3763 case nir_intrinsic_store_global_amd:
3764 visit_store_global(ctx, instr);
3765 break;
3766 case nir_intrinsic_global_atomic_add:
3767 case nir_intrinsic_global_atomic_imin:
3768 case nir_intrinsic_global_atomic_umin:
3769 case nir_intrinsic_global_atomic_imax:
3770 case nir_intrinsic_global_atomic_umax:
3771 case nir_intrinsic_global_atomic_and:
3772 case nir_intrinsic_global_atomic_or:
3773 case nir_intrinsic_global_atomic_xor:
3774 case nir_intrinsic_global_atomic_exchange:
3775 case nir_intrinsic_global_atomic_comp_swap:
3776 case nir_intrinsic_global_atomic_fmin:
3777 case nir_intrinsic_global_atomic_fmax:
3778 case nir_intrinsic_global_atomic_add_amd:
3779 case nir_intrinsic_global_atomic_imin_amd:
3780 case nir_intrinsic_global_atomic_umin_amd:
3781 case nir_intrinsic_global_atomic_imax_amd:
3782 case nir_intrinsic_global_atomic_umax_amd:
3783 case nir_intrinsic_global_atomic_and_amd:
3784 case nir_intrinsic_global_atomic_or_amd:
3785 case nir_intrinsic_global_atomic_xor_amd:
3786 case nir_intrinsic_global_atomic_exchange_amd:
3787 case nir_intrinsic_global_atomic_comp_swap_amd:
3788 case nir_intrinsic_global_atomic_fmin_amd:
3789 case nir_intrinsic_global_atomic_fmax_amd:
3790 result = visit_global_atomic(ctx, instr);
3791 break;
3792 case nir_intrinsic_ssbo_atomic_add:
3793 case nir_intrinsic_ssbo_atomic_imin:
3794 case nir_intrinsic_ssbo_atomic_umin:
3795 case nir_intrinsic_ssbo_atomic_imax:
3796 case nir_intrinsic_ssbo_atomic_umax:
3797 case nir_intrinsic_ssbo_atomic_and:
3798 case nir_intrinsic_ssbo_atomic_or:
3799 case nir_intrinsic_ssbo_atomic_xor:
3800 case nir_intrinsic_ssbo_atomic_exchange:
3801 case nir_intrinsic_ssbo_atomic_comp_swap:
3802 case nir_intrinsic_ssbo_atomic_fmin:
3803 case nir_intrinsic_ssbo_atomic_fmax:
3804 result = visit_atomic_ssbo(ctx, instr);
3805 break;
3806 case nir_intrinsic_load_ubo:
3807 result = visit_load_ubo_buffer(ctx, instr);
3808 break;
3809 case nir_intrinsic_get_ssbo_size:
3810 result = visit_get_ssbo_size(ctx, instr);
3811 break;
3812 case nir_intrinsic_load_input:
3813 case nir_intrinsic_load_input_vertex:
3814 case nir_intrinsic_load_per_vertex_input:
3815 result = visit_load(ctx, instr, false);
3816 break;
3817 case nir_intrinsic_load_output:
3818 case nir_intrinsic_load_per_vertex_output:
3819 result = visit_load(ctx, instr, true);
3820 break;
3821 case nir_intrinsic_store_output:
3822 case nir_intrinsic_store_per_vertex_output:
3823 visit_store_output(ctx, instr);
3824 break;
3825 case nir_intrinsic_load_shared:
3826 result = visit_load_shared(ctx, instr);
3827 break;
3828 case nir_intrinsic_store_shared:
3829 visit_store_shared(ctx, instr);
3830 break;
3831 case nir_intrinsic_load_shared2_amd:
3832 result = visit_load_shared2_amd(ctx, instr);
3833 break;
3834 case nir_intrinsic_store_shared2_amd:
3835 visit_store_shared2_amd(ctx, instr);
3836 break;
3837 case nir_intrinsic_bindless_image_samples:
3838 case nir_intrinsic_image_deref_samples:
3839 result = visit_image_samples(ctx, instr);
3840 break;
3841 case nir_intrinsic_bindless_image_load:
3842 case nir_intrinsic_bindless_image_sparse_load:
3843 result = visit_image_load(ctx, instr, true);
3844 break;
3845 case nir_intrinsic_image_deref_load:
3846 case nir_intrinsic_image_deref_sparse_load:
3847 result = visit_image_load(ctx, instr, false);
3848 break;
3849 case nir_intrinsic_bindless_image_store:
3850 visit_image_store(ctx, instr, true);
3851 break;
3852 case nir_intrinsic_image_deref_store:
3853 visit_image_store(ctx, instr, false);
3854 break;
3855 case nir_intrinsic_bindless_image_atomic_add:
3856 case nir_intrinsic_bindless_image_atomic_imin:
3857 case nir_intrinsic_bindless_image_atomic_umin:
3858 case nir_intrinsic_bindless_image_atomic_imax:
3859 case nir_intrinsic_bindless_image_atomic_umax:
3860 case nir_intrinsic_bindless_image_atomic_and:
3861 case nir_intrinsic_bindless_image_atomic_or:
3862 case nir_intrinsic_bindless_image_atomic_xor:
3863 case nir_intrinsic_bindless_image_atomic_exchange:
3864 case nir_intrinsic_bindless_image_atomic_comp_swap:
3865 case nir_intrinsic_bindless_image_atomic_inc_wrap:
3866 case nir_intrinsic_bindless_image_atomic_dec_wrap:
3867 result = visit_image_atomic(ctx, instr, true);
3868 break;
3869 case nir_intrinsic_image_deref_atomic_add:
3870 case nir_intrinsic_image_deref_atomic_imin:
3871 case nir_intrinsic_image_deref_atomic_umin:
3872 case nir_intrinsic_image_deref_atomic_imax:
3873 case nir_intrinsic_image_deref_atomic_umax:
3874 case nir_intrinsic_image_deref_atomic_and:
3875 case nir_intrinsic_image_deref_atomic_or:
3876 case nir_intrinsic_image_deref_atomic_xor:
3877 case nir_intrinsic_image_deref_atomic_exchange:
3878 case nir_intrinsic_image_deref_atomic_comp_swap:
3879 case nir_intrinsic_image_deref_atomic_inc_wrap:
3880 case nir_intrinsic_image_deref_atomic_dec_wrap:
3881 case nir_intrinsic_image_deref_atomic_fmin:
3882 case nir_intrinsic_image_deref_atomic_fmax:
3883 result = visit_image_atomic(ctx, instr, false);
3884 break;
3885 case nir_intrinsic_bindless_image_size:
3886 result = visit_image_size(ctx, instr, true);
3887 break;
3888 case nir_intrinsic_image_deref_size:
3889 result = visit_image_size(ctx, instr, false);
3890 break;
3891 case nir_intrinsic_shader_clock:
3892 result = ac_build_shader_clock(&ctx->ac, nir_intrinsic_memory_scope(instr));
3893 break;
3894 case nir_intrinsic_discard:
3895 case nir_intrinsic_discard_if:
3896 case nir_intrinsic_terminate:
3897 case nir_intrinsic_terminate_if:
3898 emit_discard(ctx, instr);
3899 break;
3900 case nir_intrinsic_demote:
3901 case nir_intrinsic_demote_if:
3902 emit_demote(ctx, instr);
3903 break;
3904 case nir_intrinsic_memory_barrier:
3905 case nir_intrinsic_group_memory_barrier:
3906 ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM | AC_WAIT_VLOAD | AC_WAIT_VSTORE);
3907 break;
3908 case nir_intrinsic_memory_barrier_buffer:
3909 case nir_intrinsic_memory_barrier_image:
3910 ac_build_waitcnt(&ctx->ac, AC_WAIT_VLOAD | AC_WAIT_VSTORE);
3911 break;
3912 case nir_intrinsic_memory_barrier_shared:
3913 case nir_intrinsic_memory_barrier_tcs_patch:
3914 ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM);
3915 break;
3916 case nir_intrinsic_scoped_barrier: {
3917 assert(!(nir_intrinsic_memory_semantics(instr) &
3918 (NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_MAKE_VISIBLE)));
3919
3920 nir_variable_mode modes = nir_intrinsic_memory_modes(instr);
3921
3922 unsigned wait_flags = 0;
3923 if (modes & (nir_var_mem_global | nir_var_mem_ssbo | nir_var_image))
3924 wait_flags |= AC_WAIT_VLOAD | AC_WAIT_VSTORE;
3925 if (modes & nir_var_mem_shared)
3926 wait_flags |= AC_WAIT_LGKM;
3927
3928 if (wait_flags)
3929 ac_build_waitcnt(&ctx->ac, wait_flags);
3930
3931 if (nir_intrinsic_execution_scope(instr) == NIR_SCOPE_WORKGROUP)
3932 ac_build_s_barrier(&ctx->ac, ctx->stage);
3933 break;
3934 }
3935 case nir_intrinsic_control_barrier:
3936 /* If output patches are wholly in one wave, we don't need a barrier. */
3937 if (ctx->stage == MESA_SHADER_TESS_CTRL &&
3938 ctx->ac.wave_size % ctx->info->tess.tcs_vertices_out == 0)
3939 break;
3940
3941 ac_build_s_barrier(&ctx->ac, ctx->stage);
3942 break;
3943 case nir_intrinsic_shared_atomic_add:
3944 case nir_intrinsic_shared_atomic_imin:
3945 case nir_intrinsic_shared_atomic_umin:
3946 case nir_intrinsic_shared_atomic_imax:
3947 case nir_intrinsic_shared_atomic_umax:
3948 case nir_intrinsic_shared_atomic_and:
3949 case nir_intrinsic_shared_atomic_or:
3950 case nir_intrinsic_shared_atomic_xor:
3951 case nir_intrinsic_shared_atomic_exchange:
3952 case nir_intrinsic_shared_atomic_comp_swap:
3953 case nir_intrinsic_shared_atomic_fadd:
3954 case nir_intrinsic_shared_atomic_fmin:
3955 case nir_intrinsic_shared_atomic_fmax: {
3956 LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], instr->src[1].ssa->bit_size, 0);
3957 result = visit_var_atomic(ctx, instr, ptr, 1);
3958 break;
3959 }
3960 case nir_intrinsic_deref_atomic_add:
3961 case nir_intrinsic_deref_atomic_imin:
3962 case nir_intrinsic_deref_atomic_umin:
3963 case nir_intrinsic_deref_atomic_imax:
3964 case nir_intrinsic_deref_atomic_umax:
3965 case nir_intrinsic_deref_atomic_and:
3966 case nir_intrinsic_deref_atomic_or:
3967 case nir_intrinsic_deref_atomic_xor:
3968 case nir_intrinsic_deref_atomic_exchange:
3969 case nir_intrinsic_deref_atomic_comp_swap:
3970 case nir_intrinsic_deref_atomic_fadd: {
3971 LLVMValueRef ptr = get_src(ctx, instr->src[0]);
3972 result = visit_var_atomic(ctx, instr, ptr, 1);
3973 break;
3974 }
3975 case nir_intrinsic_load_barycentric_pixel:
3976 result = barycentric_center(ctx, nir_intrinsic_interp_mode(instr));
3977 break;
3978 case nir_intrinsic_load_barycentric_centroid:
3979 result = barycentric_centroid(ctx, nir_intrinsic_interp_mode(instr));
3980 break;
3981 case nir_intrinsic_load_barycentric_sample:
3982 result = barycentric_sample(ctx, nir_intrinsic_interp_mode(instr));
3983 break;
3984 case nir_intrinsic_load_barycentric_model:
3985 result = barycentric_model(ctx);
3986 break;
3987 case nir_intrinsic_load_barycentric_at_offset: {
3988 LLVMValueRef offset = ac_to_float(&ctx->ac, get_src(ctx, instr->src[0]));
3989 result = barycentric_offset(ctx, nir_intrinsic_interp_mode(instr), offset);
3990 break;
3991 }
3992 case nir_intrinsic_load_barycentric_at_sample: {
3993 LLVMValueRef sample_id = get_src(ctx, instr->src[0]);
3994 result = barycentric_at_sample(ctx, nir_intrinsic_interp_mode(instr), sample_id);
3995 break;
3996 }
3997 case nir_intrinsic_load_interpolated_input: {
3998 /* We assume any indirect loads have been lowered away */
3999 ASSERTED nir_const_value *offset = nir_src_as_const_value(instr->src[1]);
4000 assert(offset);
4001 assert(offset[0].i32 == 0);
4002
4003 LLVMValueRef interp_param = get_src(ctx, instr->src[0]);
4004 unsigned index = nir_intrinsic_base(instr);
4005 unsigned component = nir_intrinsic_component(instr);
4006 result = load_interpolated_input(ctx, interp_param, index, component,
4007 instr->dest.ssa.num_components, instr->dest.ssa.bit_size,
4008 nir_intrinsic_io_semantics(instr).high_16bits);
4009 break;
4010 }
4011 case nir_intrinsic_load_point_coord_maybe_flipped: {
4012 LLVMValueRef interp_param = lookup_interp_param(ctx, INTERP_MODE_NONE, INTERP_CENTER);
4013 /* Load point coordinates (x, y) which are written by the hw after the interpolated inputs */
4014 result = load_interpolated_input(ctx, interp_param, ctx->abi->num_interp, 2,
4015 instr->dest.ssa.num_components, instr->dest.ssa.bit_size,
4016 false);
4017 break;
4018 }
4019 case nir_intrinsic_emit_vertex:
4020 ctx->abi->emit_vertex(ctx->abi, nir_intrinsic_stream_id(instr), ctx->abi->outputs);
4021 break;
4022 case nir_intrinsic_emit_vertex_with_counter: {
4023 unsigned stream = nir_intrinsic_stream_id(instr);
4024 LLVMValueRef next_vertex = get_src(ctx, instr->src[0]);
4025 ctx->abi->emit_vertex_with_counter(ctx->abi, stream, next_vertex, ctx->abi->outputs);
4026 break;
4027 }
4028 case nir_intrinsic_end_primitive:
4029 case nir_intrinsic_end_primitive_with_counter:
4030 ctx->abi->emit_primitive(ctx->abi, nir_intrinsic_stream_id(instr));
4031 break;
4032 case nir_intrinsic_load_tess_coord: {
4033 LLVMValueRef coord[] = {
4034 ctx->tes_u_replaced ? ctx->tes_u_replaced : ac_get_arg(&ctx->ac, ctx->args->tes_u),
4035 ctx->tes_v_replaced ? ctx->tes_v_replaced : ac_get_arg(&ctx->ac, ctx->args->tes_v),
4036 ctx->ac.f32_0,
4037 };
4038
4039 /* For triangles, the vector should be (u, v, 1-u-v). */
4040 if (ctx->info->tess._primitive_mode == TESS_PRIMITIVE_TRIANGLES) {
4041 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
4042 LLVMBuildFAdd(ctx->ac.builder, coord[0], coord[1], ""), "");
4043 }
4044 result = ac_build_gather_values(&ctx->ac, coord, 3);
4045 break;
4046 }
4047 case nir_intrinsic_load_gs_vertex_offset_amd:
4048 result = ac_get_arg(&ctx->ac, ctx->args->gs_vtx_offset[nir_intrinsic_base(instr)]);
4049 break;
4050 case nir_intrinsic_vote_all: {
4051 result = ac_build_vote_all(&ctx->ac, get_src(ctx, instr->src[0]));
4052 break;
4053 }
4054 case nir_intrinsic_vote_any: {
4055 result = ac_build_vote_any(&ctx->ac, get_src(ctx, instr->src[0]));
4056 break;
4057 }
4058 case nir_intrinsic_shuffle:
4059 if (ctx->ac.gfx_level == GFX8 || ctx->ac.gfx_level == GFX9 ||
4060 (ctx->ac.gfx_level >= GFX10 && ctx->ac.wave_size == 32)) {
4061 result =
4062 ac_build_shuffle(&ctx->ac, get_src(ctx, instr->src[0]), get_src(ctx, instr->src[1]));
4063 } else {
4064 LLVMValueRef src = get_src(ctx, instr->src[0]);
4065 LLVMValueRef index = get_src(ctx, instr->src[1]);
4066 LLVMTypeRef type = LLVMTypeOf(src);
4067 struct waterfall_context wctx;
4068 LLVMValueRef index_val;
4069
4070 index_val = enter_waterfall(ctx, &wctx, index, true);
4071
4072 src = LLVMBuildZExt(ctx->ac.builder, src, ctx->ac.i32, "");
4073
4074 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.readlane", ctx->ac.i32,
4075 (LLVMValueRef[]){src, index_val}, 2,
4076 AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
4077
4078 result = LLVMBuildTrunc(ctx->ac.builder, result, type, "");
4079
4080 result = exit_waterfall(ctx, &wctx, result);
4081 }
4082 break;
4083 case nir_intrinsic_reduce:
4084 result = ac_build_reduce(&ctx->ac, get_src(ctx, instr->src[0]), instr->const_index[0],
4085 instr->const_index[1]);
4086 break;
4087 case nir_intrinsic_inclusive_scan:
4088 result =
4089 ac_build_inclusive_scan(&ctx->ac, get_src(ctx, instr->src[0]), instr->const_index[0]);
4090 break;
4091 case nir_intrinsic_exclusive_scan:
4092 result =
4093 ac_build_exclusive_scan(&ctx->ac, get_src(ctx, instr->src[0]), instr->const_index[0]);
4094 break;
4095 case nir_intrinsic_quad_broadcast: {
4096 unsigned lane = nir_src_as_uint(instr->src[1]);
4097 result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), lane, lane, lane, lane);
4098 break;
4099 }
4100 case nir_intrinsic_quad_swap_horizontal:
4101 result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 1, 0, 3, 2);
4102 break;
4103 case nir_intrinsic_quad_swap_vertical:
4104 result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 2, 3, 0, 1);
4105 break;
4106 case nir_intrinsic_quad_swap_diagonal:
4107 result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 3, 2, 1, 0);
4108 break;
4109 case nir_intrinsic_quad_swizzle_amd: {
4110 uint32_t mask = nir_intrinsic_swizzle_mask(instr);
4111 result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), mask & 0x3,
4112 (mask >> 2) & 0x3, (mask >> 4) & 0x3, (mask >> 6) & 0x3);
4113 break;
4114 }
4115 case nir_intrinsic_masked_swizzle_amd: {
4116 uint32_t mask = nir_intrinsic_swizzle_mask(instr);
4117 result = ac_build_ds_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), mask);
4118 break;
4119 }
4120 case nir_intrinsic_write_invocation_amd:
4121 result = ac_build_writelane(&ctx->ac, get_src(ctx, instr->src[0]),
4122 get_src(ctx, instr->src[1]), get_src(ctx, instr->src[2]));
4123 break;
4124 case nir_intrinsic_mbcnt_amd:
4125 result = ac_build_mbcnt_add(&ctx->ac, get_src(ctx, instr->src[0]), get_src(ctx, instr->src[1]));
4126 break;
4127 case nir_intrinsic_load_scratch: {
4128 LLVMValueRef offset = get_src(ctx, instr->src[0]);
4129 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->scratch, offset);
4130 LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
4131 LLVMTypeRef vec_type = instr->dest.ssa.num_components == 1
4132 ? comp_type
4133 : LLVMVectorType(comp_type, instr->dest.ssa.num_components);
4134 unsigned addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
4135 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, LLVMPointerType(vec_type, addr_space), "");
4136 result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
4137 break;
4138 }
4139 case nir_intrinsic_store_scratch: {
4140 LLVMValueRef offset = get_src(ctx, instr->src[1]);
4141 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->scratch, offset);
4142 LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->src[0].ssa->bit_size);
4143 unsigned addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
4144 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, LLVMPointerType(comp_type, addr_space), "");
4145 LLVMValueRef src = get_src(ctx, instr->src[0]);
4146 unsigned wrmask = nir_intrinsic_write_mask(instr);
4147 while (wrmask) {
4148 int start, count;
4149 u_bit_scan_consecutive_range(&wrmask, &start, &count);
4150
4151 LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, start, false);
4152 LLVMValueRef offset_ptr = LLVMBuildGEP2(ctx->ac.builder, comp_type, ptr, &offset, 1, "");
4153 LLVMTypeRef vec_type = count == 1 ? comp_type : LLVMVectorType(comp_type, count);
4154 offset_ptr = LLVMBuildBitCast(ctx->ac.builder, offset_ptr,
4155 LLVMPointerType(vec_type, addr_space), "");
4156 LLVMValueRef offset_src = ac_extract_components(&ctx->ac, src, start, count);
4157 LLVMBuildStore(ctx->ac.builder, offset_src, offset_ptr);
4158 }
4159 break;
4160 }
4161 case nir_intrinsic_load_constant: {
4162 unsigned base = nir_intrinsic_base(instr);
4163 unsigned range = nir_intrinsic_range(instr);
4164
4165 LLVMValueRef offset = get_src(ctx, instr->src[0]);
4166 offset = LLVMBuildAdd(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, base, false), "");
4167
4168 /* Clamp the offset to avoid out-of-bound access because global
4169 * instructions can't handle them.
4170 */
4171 LLVMValueRef size = LLVMConstInt(ctx->ac.i32, base + range, false);
4172 LLVMValueRef cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, offset, size, "");
4173 offset = LLVMBuildSelect(ctx->ac.builder, cond, offset, size, "");
4174
4175 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->constant_data, offset);
4176 LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
4177 LLVMTypeRef vec_type = instr->dest.ssa.num_components == 1
4178 ? comp_type
4179 : LLVMVectorType(comp_type, instr->dest.ssa.num_components);
4180 unsigned addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
4181 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, LLVMPointerType(vec_type, addr_space), "");
4182 result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
4183 break;
4184 }
4185 case nir_intrinsic_set_vertex_and_primitive_count:
4186 /* Currently ignored. */
4187 break;
4188 case nir_intrinsic_load_buffer_amd: {
4189 LLVMValueRef descriptor = get_src(ctx, instr->src[0]);
4190 LLVMValueRef addr_voffset = get_src(ctx, instr->src[1]);
4191 LLVMValueRef addr_soffset = get_src(ctx, instr->src[2]);
4192 unsigned num_components = instr->dest.ssa.num_components;
4193 unsigned const_offset = nir_intrinsic_base(instr);
4194 bool swizzled = nir_intrinsic_is_swizzled(instr);
4195 bool reorder = nir_intrinsic_can_reorder(instr);
4196 bool slc = nir_intrinsic_slc_amd(instr);
4197
4198 enum ac_image_cache_policy cache_policy = ac_glc;
4199 if (swizzled)
4200 cache_policy |= ac_swizzled;
4201 if (slc)
4202 cache_policy |= ac_slc;
4203
4204 LLVMTypeRef channel_type;
4205 if (instr->dest.ssa.bit_size == 8)
4206 channel_type = ctx->ac.i8;
4207 else if (instr->dest.ssa.bit_size == 16)
4208 channel_type = ctx->ac.i16;
4209 else if (instr->dest.ssa.bit_size == 32)
4210 channel_type = ctx->ac.i32;
4211 else if (instr->dest.ssa.bit_size == 64)
4212 channel_type = ctx->ac.i64;
4213 else if (instr->dest.ssa.bit_size == 128)
4214 channel_type = ctx->ac.i128;
4215 else
4216 unreachable("Unsupported channel type for load_buffer_amd");
4217
4218 LLVMValueRef voffset = LLVMBuildAdd(ctx->ac.builder, addr_voffset,
4219 LLVMConstInt(ctx->ac.i32, const_offset, 0), "");
4220 result = ac_build_buffer_load(&ctx->ac, descriptor, num_components, NULL, voffset,
4221 addr_soffset, channel_type, cache_policy, reorder, false);
4222 result = ac_to_integer(&ctx->ac, ac_trim_vector(&ctx->ac, result, num_components));
4223 break;
4224 }
4225 case nir_intrinsic_store_buffer_amd: {
4226 LLVMValueRef store_data = get_src(ctx, instr->src[0]);
4227 LLVMValueRef descriptor = get_src(ctx, instr->src[1]);
4228 LLVMValueRef addr_voffset = get_src(ctx, instr->src[2]);
4229 LLVMValueRef addr_soffset = get_src(ctx, instr->src[3]);
4230 unsigned const_offset = nir_intrinsic_base(instr);
4231 bool swizzled = nir_intrinsic_is_swizzled(instr);
4232 bool slc = nir_intrinsic_slc_amd(instr);
4233
4234 enum ac_image_cache_policy cache_policy = ac_glc;
4235 if (swizzled)
4236 cache_policy |= ac_swizzled;
4237 if (slc)
4238 cache_policy |= ac_slc;
4239
4240 unsigned writemask = nir_intrinsic_write_mask(instr);
4241 while (writemask) {
4242 int start, count;
4243 u_bit_scan_consecutive_range(&writemask, &start, &count);
4244
4245 LLVMValueRef voffset = LLVMBuildAdd(
4246 ctx->ac.builder, addr_voffset,
4247 LLVMConstInt(ctx->ac.i32, const_offset + start * 4, 0), "");
4248
4249 LLVMValueRef data = extract_vector_range(&ctx->ac, store_data, start, count);
4250 ac_build_buffer_store_dword(&ctx->ac, descriptor, data, NULL, voffset, addr_soffset,
4251 cache_policy);
4252 }
4253 break;
4254 }
4255 case nir_intrinsic_has_input_vertex_amd: {
4256 LLVMValueRef num =
4257 ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 0, 8);
4258 result = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), num, "");
4259 break;
4260 }
4261 case nir_intrinsic_has_input_primitive_amd: {
4262 LLVMValueRef num =
4263 ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->merged_wave_info), 8, 8);
4264 result = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), num, "");
4265 break;
4266 }
4267 case nir_intrinsic_alloc_vertices_and_primitives_amd:
4268 /* The caller should only call this conditionally for wave 0, so pass NULL to disable
4269 * the wave 0 check inside this function.
4270 */
4271 ac_build_sendmsg_gs_alloc_req(&ctx->ac, NULL,
4272 get_src(ctx, instr->src[0]),
4273 get_src(ctx, instr->src[1]));
4274 break;
4275 case nir_intrinsic_overwrite_vs_arguments_amd:
4276 ctx->vertex_id_replaced = get_src(ctx, instr->src[0]);
4277 ctx->instance_id_replaced = get_src(ctx, instr->src[1]);
4278 break;
4279 case nir_intrinsic_overwrite_tes_arguments_amd:
4280 ctx->tes_u_replaced = get_src(ctx, instr->src[0]);
4281 ctx->tes_v_replaced = get_src(ctx, instr->src[1]);
4282 ctx->tes_rel_patch_id_replaced = get_src(ctx, instr->src[2]);
4283 ctx->tes_patch_id_replaced = get_src(ctx, instr->src[3]);
4284 break;
4285 case nir_intrinsic_export_primitive_amd: {
4286 struct ac_ngg_prim prim = {0};
4287 prim.passthrough = get_src(ctx, instr->src[0]);
4288 ac_build_export_prim(&ctx->ac, &prim);
4289 break;
4290 }
4291 case nir_intrinsic_gds_atomic_add_amd: {
4292 LLVMValueRef store_val = get_src(ctx, instr->src[0]);
4293 LLVMValueRef addr = get_src(ctx, instr->src[1]);
4294 LLVMTypeRef gds_ptr_type = LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS);
4295 LLVMValueRef gds_base = LLVMBuildIntToPtr(ctx->ac.builder, addr, gds_ptr_type, "");
4296 ac_build_atomic_rmw(&ctx->ac, LLVMAtomicRMWBinOpAdd, gds_base, store_val, "workgroup-one-as");
4297 break;
4298 }
4299 case nir_intrinsic_export_vertex_amd:
4300 ctx->abi->export_vertex(ctx->abi);
4301 break;
4302 case nir_intrinsic_elect:
4303 result = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, visit_first_invocation(ctx),
4304 ac_get_thread_id(&ctx->ac), "");
4305 break;
4306 case nir_intrinsic_byte_permute_amd:
4307 if (LLVM_VERSION_MAJOR < 13) {
4308 assert("unimplemented byte_permute, LLVM 12 doesn't have amdgcn.perm");
4309 break;
4310 }
4311 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.perm", ctx->ac.i32,
4312 (LLVMValueRef[]){get_src(ctx, instr->src[0]),
4313 get_src(ctx, instr->src[1]),
4314 get_src(ctx, instr->src[2])},
4315 3, AC_FUNC_ATTR_READNONE);
4316 break;
4317 case nir_intrinsic_lane_permute_16_amd:
4318 result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.permlane16", ctx->ac.i32,
4319 (LLVMValueRef[]){get_src(ctx, instr->src[0]),
4320 get_src(ctx, instr->src[0]),
4321 get_src(ctx, instr->src[1]),
4322 get_src(ctx, instr->src[2]),
4323 ctx->ac.i1false,
4324 ctx->ac.i1false},
4325 6, AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
4326 break;
4327 case nir_intrinsic_load_force_vrs_rates_amd:
4328 result = ac_get_arg(&ctx->ac, ctx->args->force_vrs_rates);
4329 break;
4330 case nir_intrinsic_load_scalar_arg_amd:
4331 case nir_intrinsic_load_vector_arg_amd: {
4332 assert(nir_intrinsic_base(instr) < AC_MAX_ARGS);
4333 result = ac_to_integer(&ctx->ac, LLVMGetParam(ctx->main_function, nir_intrinsic_base(instr)));
4334 break;
4335 }
4336 case nir_intrinsic_load_smem_amd: {
4337 LLVMValueRef base = get_src(ctx, instr->src[0]);
4338 LLVMValueRef offset = get_src(ctx, instr->src[1]);
4339
4340 LLVMTypeRef result_type = get_def_type(ctx, &instr->dest.ssa);
4341 LLVMTypeRef ptr_type = LLVMPointerType(result_type, AC_ADDR_SPACE_CONST);
4342 LLVMTypeRef byte_ptr_type = LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_CONST);
4343
4344 LLVMValueRef addr = LLVMBuildIntToPtr(ctx->ac.builder, base, byte_ptr_type, "");
4345 addr = LLVMBuildGEP2(ctx->ac.builder, ctx->ac.i8, addr, &offset, 1, "");
4346 addr = LLVMBuildBitCast(ctx->ac.builder, addr, ptr_type, "");
4347
4348 LLVMSetMetadata(addr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
4349 result = LLVMBuildLoad2(ctx->ac.builder, result_type, addr, "");
4350 LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
4351 break;
4352 }
4353 default:
4354 fprintf(stderr, "Unknown intrinsic: ");
4355 nir_print_instr(&instr->instr, stderr);
4356 fprintf(stderr, "\n");
4357 abort();
4358 break;
4359 }
4360 if (result) {
4361 ctx->ssa_defs[instr->dest.ssa.index] = result;
4362 }
4363 }
4364
get_bindless_index_from_uniform(struct ac_nir_context *ctx, unsigned base_index, unsigned constant_index, LLVMValueRef dynamic_index)4365 static LLVMValueRef get_bindless_index_from_uniform(struct ac_nir_context *ctx, unsigned base_index,
4366 unsigned constant_index,
4367 LLVMValueRef dynamic_index)
4368 {
4369 LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, base_index * 4, 0);
4370 LLVMValueRef index = LLVMBuildAdd(ctx->ac.builder, dynamic_index,
4371 LLVMConstInt(ctx->ac.i32, constant_index, 0), "");
4372
4373 /* Bindless uniforms are 64bit so multiple index by 8 */
4374 index = LLVMBuildMul(ctx->ac.builder, index, LLVMConstInt(ctx->ac.i32, 8, 0), "");
4375 offset = LLVMBuildAdd(ctx->ac.builder, offset, index, "");
4376
4377 LLVMValueRef ubo_index = ctx->abi->load_ubo(ctx->abi, ctx->ac.i32_0);
4378
4379 LLVMValueRef ret =
4380 ac_build_buffer_load(&ctx->ac, ubo_index, 1, NULL, offset, NULL, ctx->ac.f32, 0, true, true);
4381
4382 return LLVMBuildBitCast(ctx->ac.builder, ret, ctx->ac.i32, "");
4383 }
4384
4385 struct sampler_desc_address {
4386 unsigned descriptor_set;
4387 unsigned base_index; /* binding in vulkan */
4388 unsigned constant_index;
4389 LLVMValueRef dynamic_index;
4390 bool image;
4391 bool bindless;
4392 };
4393
get_sampler_desc_internal(struct ac_nir_context *ctx, nir_deref_instr *deref_instr, const nir_instr *instr, bool image)4394 static struct sampler_desc_address get_sampler_desc_internal(struct ac_nir_context *ctx,
4395 nir_deref_instr *deref_instr,
4396 const nir_instr *instr, bool image)
4397 {
4398 LLVMValueRef index = NULL;
4399 unsigned constant_index = 0;
4400 unsigned descriptor_set;
4401 unsigned base_index;
4402 bool bindless = false;
4403
4404 if (!deref_instr) {
4405 descriptor_set = 0;
4406 if (image) {
4407 nir_intrinsic_instr *img_instr = nir_instr_as_intrinsic(instr);
4408 base_index = 0;
4409 bindless = true;
4410 index = get_src(ctx, img_instr->src[0]);
4411 } else {
4412 nir_tex_instr *tex_instr = nir_instr_as_tex(instr);
4413 int sampSrcIdx = nir_tex_instr_src_index(tex_instr, nir_tex_src_sampler_handle);
4414 if (sampSrcIdx != -1) {
4415 base_index = 0;
4416 bindless = true;
4417 index = get_src(ctx, tex_instr->src[sampSrcIdx].src);
4418 } else {
4419 assert(tex_instr && !image);
4420 base_index = tex_instr->sampler_index;
4421 }
4422 }
4423 } else {
4424 while (deref_instr->deref_type != nir_deref_type_var) {
4425 if (deref_instr->deref_type == nir_deref_type_array) {
4426 unsigned array_size = glsl_get_aoa_size(deref_instr->type);
4427 if (!array_size)
4428 array_size = 1;
4429
4430 if (nir_src_is_const(deref_instr->arr.index)) {
4431 constant_index += array_size * nir_src_as_uint(deref_instr->arr.index);
4432 } else {
4433 LLVMValueRef indirect = get_src(ctx, deref_instr->arr.index);
4434
4435 indirect = LLVMBuildMul(ctx->ac.builder, indirect,
4436 LLVMConstInt(ctx->ac.i32, array_size, false), "");
4437
4438 if (!index)
4439 index = indirect;
4440 else
4441 index = LLVMBuildAdd(ctx->ac.builder, index, indirect, "");
4442 }
4443
4444 deref_instr = nir_src_as_deref(deref_instr->parent);
4445 } else if (deref_instr->deref_type == nir_deref_type_struct) {
4446 unsigned sidx = deref_instr->strct.index;
4447 deref_instr = nir_src_as_deref(deref_instr->parent);
4448 constant_index += glsl_get_struct_location_offset(deref_instr->type, sidx);
4449 } else {
4450 unreachable("Unsupported deref type");
4451 }
4452 }
4453 descriptor_set = deref_instr->var->data.descriptor_set;
4454
4455 if (deref_instr->var->data.bindless) {
4456 /* For now just assert on unhandled variable types */
4457 assert(deref_instr->var->data.mode == nir_var_uniform);
4458
4459 base_index = deref_instr->var->data.driver_location;
4460 bindless = true;
4461
4462 index = index ? index : ctx->ac.i32_0;
4463 index = get_bindless_index_from_uniform(ctx, base_index, constant_index, index);
4464 } else
4465 base_index = deref_instr->var->data.binding;
4466 }
4467 return (struct sampler_desc_address){
4468 .descriptor_set = descriptor_set,
4469 .base_index = base_index,
4470 .constant_index = constant_index,
4471 .dynamic_index = index,
4472 .image = image,
4473 .bindless = bindless,
4474 };
4475 }
4476
4477 /* Extract any possibly divergent index into a separate value that can be fed
4478 * into get_sampler_desc with the same arguments. */
get_sampler_desc_index(struct ac_nir_context *ctx, nir_deref_instr *deref_instr, const nir_instr *instr, bool image)4479 static LLVMValueRef get_sampler_desc_index(struct ac_nir_context *ctx, nir_deref_instr *deref_instr,
4480 const nir_instr *instr, bool image)
4481 {
4482 struct sampler_desc_address addr = get_sampler_desc_internal(ctx, deref_instr, instr, image);
4483 return addr.dynamic_index;
4484 }
4485
get_sampler_desc(struct ac_nir_context *ctx, nir_deref_instr *deref_instr, enum ac_descriptor_type desc_type, const nir_instr *instr, LLVMValueRef index, bool image, bool write)4486 static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx, nir_deref_instr *deref_instr,
4487 enum ac_descriptor_type desc_type, const nir_instr *instr,
4488 LLVMValueRef index, bool image, bool write)
4489 {
4490 struct sampler_desc_address addr = get_sampler_desc_internal(ctx, deref_instr, instr, image);
4491 return ctx->abi->load_sampler_desc(ctx->abi, addr.descriptor_set, addr.base_index,
4492 addr.constant_index, index, desc_type, addr.image, write,
4493 addr.bindless);
4494 }
4495
4496 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
4497 *
4498 * GFX6-GFX7:
4499 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
4500 * filtering manually. The driver sets img7 to a mask clearing
4501 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
4502 * s_and_b32 samp0, samp0, img7
4503 *
4504 * GFX8:
4505 * The ANISO_OVERRIDE sampler field enables this fix in TA.
4506 */
sici_fix_sampler_aniso(struct ac_nir_context *ctx, LLVMValueRef res, LLVMValueRef samp)4507 static LLVMValueRef sici_fix_sampler_aniso(struct ac_nir_context *ctx, LLVMValueRef res,
4508 LLVMValueRef samp)
4509 {
4510 LLVMBuilderRef builder = ctx->ac.builder;
4511 LLVMValueRef img7, samp0;
4512
4513 if (ctx->ac.gfx_level >= GFX8)
4514 return samp;
4515
4516 img7 = LLVMBuildExtractElement(builder, res, LLVMConstInt(ctx->ac.i32, 7, 0), "");
4517 samp0 = LLVMBuildExtractElement(builder, samp, LLVMConstInt(ctx->ac.i32, 0, 0), "");
4518 samp0 = LLVMBuildAnd(builder, samp0, img7, "");
4519 return LLVMBuildInsertElement(builder, samp, samp0, LLVMConstInt(ctx->ac.i32, 0, 0), "");
4520 }
4521
tex_fetch_ptrs(struct ac_nir_context *ctx, nir_tex_instr *instr, struct waterfall_context *wctx, LLVMValueRef *res_ptr, LLVMValueRef *samp_ptr, LLVMValueRef *fmask_ptr)4522 static void tex_fetch_ptrs(struct ac_nir_context *ctx, nir_tex_instr *instr,
4523 struct waterfall_context *wctx, LLVMValueRef *res_ptr,
4524 LLVMValueRef *samp_ptr, LLVMValueRef *fmask_ptr)
4525 {
4526 LLVMValueRef texture_dynamic_handle = NULL;
4527 LLVMValueRef sampler_dynamic_handle = NULL;
4528 nir_deref_instr *texture_deref_instr = NULL;
4529 nir_deref_instr *sampler_deref_instr = NULL;
4530 int plane = -1;
4531
4532 *res_ptr = NULL;
4533 *samp_ptr = NULL;
4534 *fmask_ptr = NULL;
4535 for (unsigned i = 0; i < instr->num_srcs; i++) {
4536 switch (instr->src[i].src_type) {
4537 case nir_tex_src_texture_deref:
4538 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
4539 break;
4540 case nir_tex_src_sampler_deref:
4541 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
4542 break;
4543 case nir_tex_src_texture_handle:
4544 case nir_tex_src_sampler_handle: {
4545 LLVMValueRef val = get_src(ctx, instr->src[i].src);
4546 if (LLVMGetTypeKind(LLVMTypeOf(val)) == LLVMVectorTypeKind) {
4547 if (instr->src[i].src_type == nir_tex_src_texture_handle)
4548 *res_ptr = val;
4549 else
4550 *samp_ptr = val;
4551 } else {
4552 if (instr->src[i].src_type == nir_tex_src_texture_handle)
4553 texture_dynamic_handle = val;
4554 else
4555 sampler_dynamic_handle = val;
4556 }
4557 break;
4558 }
4559 case nir_tex_src_plane:
4560 plane = nir_src_as_int(instr->src[i].src);
4561 break;
4562 default:
4563 break;
4564 }
4565 }
4566
4567 if (*res_ptr) {
4568 /* descriptors given through nir_tex_src_{texture,sampler}_handle */
4569 return;
4570 }
4571
4572 enum ac_descriptor_type main_descriptor =
4573 instr->sampler_dim == GLSL_SAMPLER_DIM_BUF ? AC_DESC_BUFFER : AC_DESC_IMAGE;
4574
4575 if (plane >= 0) {
4576 assert(instr->op != nir_texop_txf_ms && instr->op != nir_texop_samples_identical);
4577 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_BUF);
4578
4579 main_descriptor = AC_DESC_PLANE_0 + plane;
4580 }
4581
4582 if (instr->op == nir_texop_fragment_mask_fetch_amd || instr->op == nir_texop_samples_identical) {
4583 /* The fragment mask is fetched from the compressed
4584 * multisampled surface.
4585 */
4586 assert(ctx->ac.gfx_level < GFX11);
4587 main_descriptor = AC_DESC_FMASK;
4588 }
4589
4590 if (texture_dynamic_handle) {
4591 /* descriptor handles given through nir_tex_src_{texture,sampler}_handle */
4592 if (instr->texture_non_uniform)
4593 texture_dynamic_handle = enter_waterfall(ctx, &wctx[0], texture_dynamic_handle, true);
4594
4595 if (instr->sampler_non_uniform)
4596 sampler_dynamic_handle = enter_waterfall(ctx, &wctx[1], sampler_dynamic_handle, true);
4597
4598 *res_ptr = ctx->abi->load_sampler_desc(ctx->abi, 0, 0, 0, texture_dynamic_handle,
4599 main_descriptor, false, false, true);
4600
4601 if (samp_ptr)
4602 *samp_ptr = ctx->abi->load_sampler_desc(ctx->abi, 0, 0, 0, sampler_dynamic_handle,
4603 AC_DESC_SAMPLER, false, false, true);
4604 return;
4605 }
4606
4607 LLVMValueRef texture_dynamic_index =
4608 get_sampler_desc_index(ctx, texture_deref_instr, &instr->instr, false);
4609 if (!sampler_deref_instr)
4610 sampler_deref_instr = texture_deref_instr;
4611
4612 LLVMValueRef sampler_dynamic_index =
4613 get_sampler_desc_index(ctx, sampler_deref_instr, &instr->instr, false);
4614
4615 /* instr->sampler_non_uniform and texture_non_uniform are always false in GLSL,
4616 * but this can lead to unexpected behavior if texture/sampler index come from
4617 * a vertex attribute.
4618 * For instance, 2 consecutive draws using 2 different index values,
4619 * could be squashed together by the hw - producing a single draw with
4620 * non-dynamically uniform index.
4621 * To avoid this, detect divergent indexing, and use enter_waterfall.
4622 * See https://gitlab.freedesktop.org/mesa/mesa/-/issues/2253.
4623 */
4624 if (instr->texture_non_uniform ||
4625 (ctx->abi->use_waterfall_for_divergent_tex_samplers && texture_deref_instr->dest.ssa.divergent))
4626 texture_dynamic_index = enter_waterfall(ctx, wctx + 0, texture_dynamic_index, true);
4627
4628 if (instr->sampler_non_uniform ||
4629 (ctx->abi->use_waterfall_for_divergent_tex_samplers && sampler_deref_instr->dest.ssa.divergent))
4630 sampler_dynamic_index = enter_waterfall(ctx, wctx + 1, sampler_dynamic_index, true);
4631
4632 *res_ptr = get_sampler_desc(ctx, texture_deref_instr, main_descriptor, &instr->instr,
4633 texture_dynamic_index, false, false);
4634
4635 if (samp_ptr) {
4636 *samp_ptr = get_sampler_desc(ctx, sampler_deref_instr, AC_DESC_SAMPLER, &instr->instr,
4637 sampler_dynamic_index, false, false);
4638 if (instr->sampler_dim < GLSL_SAMPLER_DIM_RECT)
4639 *samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
4640 }
4641 if (ctx->ac.gfx_level < GFX11 &&
4642 fmask_ptr && (instr->op == nir_texop_txf_ms || instr->op == nir_texop_samples_identical))
4643 *fmask_ptr = get_sampler_desc(ctx, texture_deref_instr, AC_DESC_FMASK, &instr->instr,
4644 texture_dynamic_index, false, false);
4645 }
4646
apply_round_slice(struct ac_llvm_context *ctx, LLVMValueRef coord)4647 static LLVMValueRef apply_round_slice(struct ac_llvm_context *ctx, LLVMValueRef coord)
4648 {
4649 coord = ac_to_float(ctx, coord);
4650 coord = ac_build_round(ctx, coord);
4651 coord = ac_to_integer(ctx, coord);
4652 return coord;
4653 }
4654
visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr)4655 static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr)
4656 {
4657 LLVMValueRef result = NULL;
4658 struct ac_image_args args = {0};
4659 LLVMValueRef fmask_ptr = NULL, sample_index = NULL;
4660 LLVMValueRef ddx = NULL, ddy = NULL;
4661 unsigned offset_src = 0;
4662 struct waterfall_context wctx[2] = {{{0}}};
4663
4664 tex_fetch_ptrs(ctx, instr, wctx, &args.resource, &args.sampler, &fmask_ptr);
4665
4666 for (unsigned i = 0; i < instr->num_srcs; i++) {
4667 switch (instr->src[i].src_type) {
4668 case nir_tex_src_coord: {
4669 LLVMValueRef coord = get_src(ctx, instr->src[i].src);
4670 args.a16 = instr->src[i].src.ssa->bit_size == 16;
4671 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
4672 args.coords[chan] = ac_llvm_extract_elem(&ctx->ac, coord, chan);
4673 break;
4674 }
4675 case nir_tex_src_projector:
4676 break;
4677 case nir_tex_src_comparator:
4678 if (instr->is_shadow) {
4679 args.compare = get_src(ctx, instr->src[i].src);
4680 args.compare = ac_to_float(&ctx->ac, args.compare);
4681 assert(instr->src[i].src.ssa->bit_size == 32);
4682 }
4683 break;
4684 case nir_tex_src_offset:
4685 args.offset = get_src(ctx, instr->src[i].src);
4686 offset_src = i;
4687 /* We pack it with bit shifts, so we need it to be 32-bit. */
4688 assert(ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.offset)) == 32);
4689 break;
4690 case nir_tex_src_bias:
4691 args.bias = get_src(ctx, instr->src[i].src);
4692 assert(ac_get_elem_bits(&ctx->ac, LLVMTypeOf(args.bias)) == 32);
4693 break;
4694 case nir_tex_src_lod:
4695 if (nir_src_is_const(instr->src[i].src) && nir_src_as_uint(instr->src[i].src) == 0)
4696 args.level_zero = true;
4697 else
4698 args.lod = get_src(ctx, instr->src[i].src);
4699 break;
4700 case nir_tex_src_ms_index:
4701 sample_index = get_src(ctx, instr->src[i].src);
4702 break;
4703 case nir_tex_src_ddx:
4704 ddx = get_src(ctx, instr->src[i].src);
4705 args.g16 = instr->src[i].src.ssa->bit_size == 16;
4706 break;
4707 case nir_tex_src_ddy:
4708 ddy = get_src(ctx, instr->src[i].src);
4709 assert(LLVMTypeOf(ddy) == LLVMTypeOf(ddx));
4710 break;
4711 case nir_tex_src_min_lod:
4712 args.min_lod = get_src(ctx, instr->src[i].src);
4713 break;
4714 case nir_tex_src_texture_offset:
4715 case nir_tex_src_sampler_offset:
4716 case nir_tex_src_plane:
4717 default:
4718 break;
4719 }
4720 }
4721
4722 if (instr->op == nir_texop_txs && instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
4723 result = get_buffer_size(ctx, args.resource, true);
4724 goto write_result;
4725 }
4726
4727 if (instr->op == nir_texop_texture_samples) {
4728 LLVMValueRef res, samples, is_msaa;
4729 LLVMValueRef default_sample;
4730
4731 res = LLVMBuildBitCast(ctx->ac.builder, args.resource, ctx->ac.v8i32, "");
4732 samples =
4733 LLVMBuildExtractElement(ctx->ac.builder, res, LLVMConstInt(ctx->ac.i32, 3, false), "");
4734 is_msaa = LLVMBuildLShr(ctx->ac.builder, samples, LLVMConstInt(ctx->ac.i32, 28, false), "");
4735 is_msaa = LLVMBuildAnd(ctx->ac.builder, is_msaa, LLVMConstInt(ctx->ac.i32, 0xe, false), "");
4736 is_msaa = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, is_msaa,
4737 LLVMConstInt(ctx->ac.i32, 0xe, false), "");
4738
4739 samples = LLVMBuildLShr(ctx->ac.builder, samples, LLVMConstInt(ctx->ac.i32, 16, false), "");
4740 samples = LLVMBuildAnd(ctx->ac.builder, samples, LLVMConstInt(ctx->ac.i32, 0xf, false), "");
4741 samples = LLVMBuildShl(ctx->ac.builder, ctx->ac.i32_1, samples, "");
4742
4743 if (ctx->abi->robust_buffer_access) {
4744 LLVMValueRef dword1, is_null_descriptor;
4745
4746 /* Extract the second dword of the descriptor, if it's
4747 * all zero, then it's a null descriptor.
4748 */
4749 dword1 =
4750 LLVMBuildExtractElement(ctx->ac.builder, res, LLVMConstInt(ctx->ac.i32, 1, false), "");
4751 is_null_descriptor = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, dword1,
4752 LLVMConstInt(ctx->ac.i32, 0, false), "");
4753 default_sample =
4754 LLVMBuildSelect(ctx->ac.builder, is_null_descriptor, ctx->ac.i32_0, ctx->ac.i32_1, "");
4755 } else {
4756 default_sample = ctx->ac.i32_1;
4757 }
4758
4759 samples = LLVMBuildSelect(ctx->ac.builder, is_msaa, samples, default_sample, "");
4760 result = samples;
4761 goto write_result;
4762 }
4763
4764 if (args.offset && instr->op != nir_texop_txf && instr->op != nir_texop_txf_ms) {
4765 LLVMValueRef offset[3], pack;
4766 for (unsigned chan = 0; chan < 3; ++chan)
4767 offset[chan] = ctx->ac.i32_0;
4768
4769 unsigned num_components = ac_get_llvm_num_components(args.offset);
4770 for (unsigned chan = 0; chan < num_components; chan++) {
4771 offset[chan] = ac_llvm_extract_elem(&ctx->ac, args.offset, chan);
4772 offset[chan] =
4773 LLVMBuildAnd(ctx->ac.builder, offset[chan], LLVMConstInt(ctx->ac.i32, 0x3f, false), "");
4774 if (chan)
4775 offset[chan] = LLVMBuildShl(ctx->ac.builder, offset[chan],
4776 LLVMConstInt(ctx->ac.i32, chan * 8, false), "");
4777 }
4778 pack = LLVMBuildOr(ctx->ac.builder, offset[0], offset[1], "");
4779 pack = LLVMBuildOr(ctx->ac.builder, pack, offset[2], "");
4780 args.offset = pack;
4781 }
4782
4783 /* Section 8.23.1 (Depth Texture Comparison Mode) of the
4784 * OpenGL 4.5 spec says:
4785 *
4786 * "If the texture’s internal format indicates a fixed-point
4787 * depth texture, then D_t and D_ref are clamped to the
4788 * range [0, 1]; otherwise no clamping is performed."
4789 *
4790 * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
4791 * so the depth comparison value isn't clamped for Z16 and
4792 * Z24 anymore. Do it manually here for GFX8-9; GFX10 has
4793 * an explicitly clamped 32-bit float format.
4794 */
4795 if (args.compare && ctx->ac.gfx_level >= GFX8 && ctx->ac.gfx_level <= GFX9 &&
4796 ctx->abi->clamp_shadow_reference) {
4797 LLVMValueRef upgraded, clamped;
4798
4799 upgraded = LLVMBuildExtractElement(ctx->ac.builder, args.sampler,
4800 LLVMConstInt(ctx->ac.i32, 3, false), "");
4801 upgraded = LLVMBuildLShr(ctx->ac.builder, upgraded, LLVMConstInt(ctx->ac.i32, 29, false), "");
4802 upgraded = LLVMBuildTrunc(ctx->ac.builder, upgraded, ctx->ac.i1, "");
4803 clamped = ac_build_clamp(&ctx->ac, args.compare);
4804 args.compare = LLVMBuildSelect(ctx->ac.builder, upgraded, clamped, args.compare, "");
4805 }
4806
4807 /* pack derivatives */
4808 if (ddx || ddy) {
4809 int num_src_deriv_channels, num_dest_deriv_channels;
4810 switch (instr->sampler_dim) {
4811 case GLSL_SAMPLER_DIM_3D:
4812 case GLSL_SAMPLER_DIM_CUBE:
4813 num_src_deriv_channels = 3;
4814 num_dest_deriv_channels = 3;
4815 break;
4816 case GLSL_SAMPLER_DIM_2D:
4817 default:
4818 num_src_deriv_channels = 2;
4819 num_dest_deriv_channels = 2;
4820 break;
4821 case GLSL_SAMPLER_DIM_1D:
4822 num_src_deriv_channels = 1;
4823 if (ctx->ac.gfx_level == GFX9) {
4824 num_dest_deriv_channels = 2;
4825 } else {
4826 num_dest_deriv_channels = 1;
4827 }
4828 break;
4829 }
4830
4831 for (unsigned i = 0; i < num_src_deriv_channels; i++) {
4832 args.derivs[i] = ac_to_float(&ctx->ac, ac_llvm_extract_elem(&ctx->ac, ddx, i));
4833 args.derivs[num_dest_deriv_channels + i] =
4834 ac_to_float(&ctx->ac, ac_llvm_extract_elem(&ctx->ac, ddy, i));
4835 }
4836 for (unsigned i = num_src_deriv_channels; i < num_dest_deriv_channels; i++) {
4837 LLVMValueRef zero = args.g16 ? ctx->ac.f16_0 : ctx->ac.f32_0;
4838 args.derivs[i] = zero;
4839 args.derivs[num_dest_deriv_channels + i] = zero;
4840 }
4841 }
4842
4843 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE && args.coords[0]) {
4844 for (unsigned chan = 0; chan < instr->coord_components; chan++)
4845 args.coords[chan] = ac_to_float(&ctx->ac, args.coords[chan]);
4846 if (instr->coord_components == 3)
4847 args.coords[3] = LLVMGetUndef(args.a16 ? ctx->ac.f16 : ctx->ac.f32);
4848 ac_prepare_cube_coords(&ctx->ac, instr->op == nir_texop_txd, instr->is_array,
4849 instr->op == nir_texop_lod, args.coords, args.derivs);
4850 }
4851
4852 /* Texture coordinates fixups */
4853 if (instr->coord_components > 1 && instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
4854 instr->is_array && instr->op != nir_texop_txf) {
4855 args.coords[1] = apply_round_slice(&ctx->ac, args.coords[1]);
4856 }
4857
4858 if (instr->coord_components > 2 &&
4859 (instr->sampler_dim == GLSL_SAMPLER_DIM_2D || instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
4860 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS ||
4861 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS) &&
4862 instr->is_array && instr->op != nir_texop_txf && instr->op != nir_texop_txf_ms &&
4863 instr->op != nir_texop_fragment_fetch_amd && instr->op != nir_texop_fragment_mask_fetch_amd) {
4864 args.coords[2] = apply_round_slice(&ctx->ac, args.coords[2]);
4865 }
4866
4867 if (ctx->ac.gfx_level == GFX9 && instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
4868 instr->op != nir_texop_lod) {
4869 LLVMValueRef filler;
4870 if (instr->op == nir_texop_txf)
4871 filler = args.a16 ? ctx->ac.i16_0 : ctx->ac.i32_0;
4872 else
4873 filler = LLVMConstReal(args.a16 ? ctx->ac.f16 : ctx->ac.f32, 0.5);
4874
4875 if (instr->is_array)
4876 args.coords[2] = args.coords[1];
4877 args.coords[1] = filler;
4878 }
4879
4880 /* Pack sample index */
4881 if (sample_index && (instr->op == nir_texop_txf_ms || instr->op == nir_texop_fragment_fetch_amd))
4882 args.coords[instr->coord_components] = sample_index;
4883
4884 if (instr->op == nir_texop_samples_identical) {
4885 assert(ctx->ac.gfx_level < GFX11);
4886 struct ac_image_args txf_args = {0};
4887 memcpy(txf_args.coords, args.coords, sizeof(txf_args.coords));
4888
4889 txf_args.dmask = 0xf;
4890 txf_args.resource = args.resource;
4891 txf_args.dim = instr->is_array ? ac_image_2darray : ac_image_2d;
4892 result = build_tex_intrinsic(ctx, instr, &txf_args);
4893
4894 result = LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, "");
4895 result = emit_int_cmp(&ctx->ac, LLVMIntEQ, result, ctx->ac.i32_0);
4896 goto write_result;
4897 }
4898
4899 if (ctx->ac.gfx_level < GFX11 &&
4900 (instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS ||
4901 instr->sampler_dim == GLSL_SAMPLER_DIM_MS) &&
4902 instr->op != nir_texop_txs && instr->op != nir_texop_fragment_fetch_amd &&
4903 instr->op != nir_texop_fragment_mask_fetch_amd) {
4904 unsigned sample_chan = instr->is_array ? 3 : 2;
4905 args.coords[sample_chan] = adjust_sample_index_using_fmask(
4906 &ctx->ac, args.coords[0], args.coords[1], instr->is_array ? args.coords[2] : NULL,
4907 args.coords[sample_chan], fmask_ptr);
4908 }
4909
4910 if (args.offset && (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)) {
4911 int num_offsets = instr->src[offset_src].src.ssa->num_components;
4912 num_offsets = MIN2(num_offsets, instr->coord_components);
4913 for (unsigned i = 0; i < num_offsets; ++i) {
4914 LLVMValueRef off = ac_llvm_extract_elem(&ctx->ac, args.offset, i);
4915 if (args.a16)
4916 off = LLVMBuildTrunc(ctx->ac.builder, off, ctx->ac.i16, "");
4917 args.coords[i] = LLVMBuildAdd(ctx->ac.builder, args.coords[i], off, "");
4918 }
4919 args.offset = NULL;
4920 }
4921
4922 /* DMASK was repurposed for GATHER4. 4 components are always
4923 * returned and DMASK works like a swizzle - it selects
4924 * the component to fetch. The only valid DMASK values are
4925 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4926 * (red,red,red,red) etc.) The ISA document doesn't mention
4927 * this.
4928 */
4929 args.dmask = 0xf;
4930 if (instr->op == nir_texop_tg4) {
4931 if (instr->is_shadow)
4932 args.dmask = 1;
4933 else
4934 args.dmask = 1 << instr->component;
4935 }
4936
4937 if (instr->sampler_dim != GLSL_SAMPLER_DIM_BUF) {
4938 args.dim = ac_get_sampler_dim(ctx->ac.gfx_level, instr->sampler_dim, instr->is_array);
4939 args.unorm = instr->sampler_dim == GLSL_SAMPLER_DIM_RECT;
4940 }
4941
4942 /* Adjust the number of coordinates because we only need (x,y) for 2D
4943 * multisampled images and (x,y,layer) for 2D multisampled layered
4944 * images or for multisampled input attachments.
4945 */
4946 if (instr->op == nir_texop_fragment_mask_fetch_amd) {
4947 if (args.dim == ac_image_2dmsaa) {
4948 args.dim = ac_image_2d;
4949 } else {
4950 assert(args.dim == ac_image_2darraymsaa);
4951 args.dim = ac_image_2darray;
4952 }
4953 }
4954
4955 /* Set TRUNC_COORD=0 for textureGather(). */
4956 if (instr->op == nir_texop_tg4) {
4957 LLVMValueRef dword0 = LLVMBuildExtractElement(ctx->ac.builder, args.sampler, ctx->ac.i32_0, "");
4958 dword0 = LLVMBuildAnd(ctx->ac.builder, dword0, LLVMConstInt(ctx->ac.i32, C_008F30_TRUNC_COORD, 0), "");
4959 args.sampler = LLVMBuildInsertElement(ctx->ac.builder, args.sampler, dword0, ctx->ac.i32_0, "");
4960 }
4961
4962 assert(instr->dest.is_ssa);
4963 args.d16 = instr->dest.ssa.bit_size == 16;
4964 args.tfe = instr->is_sparse;
4965
4966 result = build_tex_intrinsic(ctx, instr, &args);
4967
4968 LLVMValueRef code = NULL;
4969 if (instr->is_sparse) {
4970 code = ac_llvm_extract_elem(&ctx->ac, result, 4);
4971 result = ac_trim_vector(&ctx->ac, result, 4);
4972 }
4973
4974 if (instr->op == nir_texop_query_levels)
4975 result =
4976 LLVMBuildExtractElement(ctx->ac.builder, result, LLVMConstInt(ctx->ac.i32, 3, false), "");
4977 else if (instr->is_shadow && instr->is_new_style_shadow && instr->op != nir_texop_txs &&
4978 instr->op != nir_texop_lod && instr->op != nir_texop_tg4)
4979 result = LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, "");
4980 else if (ctx->ac.gfx_level == GFX9 && instr->op == nir_texop_txs &&
4981 instr->sampler_dim == GLSL_SAMPLER_DIM_1D && instr->is_array) {
4982 LLVMValueRef two = LLVMConstInt(ctx->ac.i32, 2, false);
4983 LLVMValueRef layers = LLVMBuildExtractElement(ctx->ac.builder, result, two, "");
4984 result = LLVMBuildInsertElement(ctx->ac.builder, result, layers, ctx->ac.i32_1, "");
4985 } else if (instr->op == nir_texop_fragment_mask_fetch_amd) {
4986 /* Use 0x76543210 if the image doesn't have FMASK. */
4987 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, args.resource, ctx->ac.v8i32, "");
4988 tmp = LLVMBuildExtractElement(ctx->ac.builder, tmp, ctx->ac.i32_1, "");
4989 tmp = LLVMBuildICmp(ctx->ac.builder, LLVMIntNE, tmp, ctx->ac.i32_0, "");
4990 result = LLVMBuildSelect(ctx->ac.builder, tmp,
4991 LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, ""),
4992 LLVMConstInt(ctx->ac.i32, 0x76543210, false), "");
4993 } else if (nir_tex_instr_result_size(instr) != 4)
4994 result = ac_trim_vector(&ctx->ac, result, instr->dest.ssa.num_components);
4995
4996 if (instr->is_sparse)
4997 result = ac_build_concat(&ctx->ac, result, code);
4998
4999 write_result:
5000 if (result) {
5001 assert(instr->dest.is_ssa);
5002 result = ac_to_integer(&ctx->ac, result);
5003
5004 for (int i = ARRAY_SIZE(wctx); --i >= 0;) {
5005 result = exit_waterfall(ctx, wctx + i, result);
5006 }
5007
5008 ctx->ssa_defs[instr->dest.ssa.index] = result;
5009 }
5010 }
5011
visit_phi(struct ac_nir_context *ctx, nir_phi_instr *instr)5012 static void visit_phi(struct ac_nir_context *ctx, nir_phi_instr *instr)
5013 {
5014 LLVMTypeRef type = get_def_type(ctx, &instr->dest.ssa);
5015 LLVMValueRef result = LLVMBuildPhi(ctx->ac.builder, type, "");
5016
5017 ctx->ssa_defs[instr->dest.ssa.index] = result;
5018 _mesa_hash_table_insert(ctx->phis, instr, result);
5019 }
5020
visit_post_phi(struct ac_nir_context *ctx, nir_phi_instr *instr, LLVMValueRef llvm_phi)5021 static void visit_post_phi(struct ac_nir_context *ctx, nir_phi_instr *instr, LLVMValueRef llvm_phi)
5022 {
5023 nir_foreach_phi_src (src, instr) {
5024 LLVMBasicBlockRef block = get_block(ctx, src->pred);
5025 LLVMValueRef llvm_src = get_src(ctx, src->src);
5026
5027 LLVMAddIncoming(llvm_phi, &llvm_src, &block, 1);
5028 }
5029 }
5030
phi_post_pass(struct ac_nir_context *ctx)5031 static void phi_post_pass(struct ac_nir_context *ctx)
5032 {
5033 hash_table_foreach(ctx->phis, entry)
5034 {
5035 visit_post_phi(ctx, (nir_phi_instr *)entry->key, (LLVMValueRef)entry->data);
5036 }
5037 }
5038
is_def_used_in_an_export(const nir_ssa_def *def)5039 static bool is_def_used_in_an_export(const nir_ssa_def *def)
5040 {
5041 nir_foreach_use (use_src, def) {
5042 if (use_src->parent_instr->type == nir_instr_type_intrinsic) {
5043 nir_intrinsic_instr *instr = nir_instr_as_intrinsic(use_src->parent_instr);
5044 if (instr->intrinsic == nir_intrinsic_store_deref)
5045 return true;
5046 } else if (use_src->parent_instr->type == nir_instr_type_alu) {
5047 nir_alu_instr *instr = nir_instr_as_alu(use_src->parent_instr);
5048 if (instr->op == nir_op_vec4 && is_def_used_in_an_export(&instr->dest.dest.ssa)) {
5049 return true;
5050 }
5051 }
5052 }
5053 return false;
5054 }
5055
visit_ssa_undef(struct ac_nir_context *ctx, const nir_ssa_undef_instr *instr)5056 static void visit_ssa_undef(struct ac_nir_context *ctx, const nir_ssa_undef_instr *instr)
5057 {
5058 unsigned num_components = instr->def.num_components;
5059 LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
5060
5061 if (!ctx->abi->convert_undef_to_zero || is_def_used_in_an_export(&instr->def)) {
5062 LLVMValueRef undef;
5063
5064 if (num_components == 1)
5065 undef = LLVMGetUndef(type);
5066 else {
5067 undef = LLVMGetUndef(LLVMVectorType(type, num_components));
5068 }
5069 ctx->ssa_defs[instr->def.index] = undef;
5070 } else {
5071 LLVMValueRef zero = LLVMConstInt(type, 0, false);
5072 if (num_components > 1) {
5073 zero = ac_build_gather_values_extended(&ctx->ac, &zero, num_components, 0, false);
5074 }
5075 ctx->ssa_defs[instr->def.index] = zero;
5076 }
5077 }
5078
visit_jump(struct ac_llvm_context *ctx, const nir_jump_instr *instr)5079 static void visit_jump(struct ac_llvm_context *ctx, const nir_jump_instr *instr)
5080 {
5081 switch (instr->type) {
5082 case nir_jump_break:
5083 ac_build_break(ctx);
5084 break;
5085 case nir_jump_continue:
5086 ac_build_continue(ctx);
5087 break;
5088 default:
5089 fprintf(stderr, "Unknown NIR jump instr: ");
5090 nir_print_instr(&instr->instr, stderr);
5091 fprintf(stderr, "\n");
5092 abort();
5093 }
5094 }
5095
glsl_base_to_llvm_type(struct ac_llvm_context *ac, enum glsl_base_type type)5096 static LLVMTypeRef glsl_base_to_llvm_type(struct ac_llvm_context *ac, enum glsl_base_type type)
5097 {
5098 switch (type) {
5099 case GLSL_TYPE_INT:
5100 case GLSL_TYPE_UINT:
5101 case GLSL_TYPE_BOOL:
5102 case GLSL_TYPE_SUBROUTINE:
5103 return ac->i32;
5104 case GLSL_TYPE_INT8:
5105 case GLSL_TYPE_UINT8:
5106 return ac->i8;
5107 case GLSL_TYPE_INT16:
5108 case GLSL_TYPE_UINT16:
5109 return ac->i16;
5110 case GLSL_TYPE_FLOAT:
5111 return ac->f32;
5112 case GLSL_TYPE_FLOAT16:
5113 return ac->f16;
5114 case GLSL_TYPE_INT64:
5115 case GLSL_TYPE_UINT64:
5116 return ac->i64;
5117 case GLSL_TYPE_DOUBLE:
5118 return ac->f64;
5119 default:
5120 unreachable("unknown GLSL type");
5121 }
5122 }
5123
glsl_to_llvm_type(struct ac_llvm_context *ac, const struct glsl_type *type)5124 static LLVMTypeRef glsl_to_llvm_type(struct ac_llvm_context *ac, const struct glsl_type *type)
5125 {
5126 if (glsl_type_is_scalar(type)) {
5127 return glsl_base_to_llvm_type(ac, glsl_get_base_type(type));
5128 }
5129
5130 if (glsl_type_is_vector(type)) {
5131 return LLVMVectorType(glsl_base_to_llvm_type(ac, glsl_get_base_type(type)),
5132 glsl_get_vector_elements(type));
5133 }
5134
5135 if (glsl_type_is_matrix(type)) {
5136 return LLVMArrayType(glsl_to_llvm_type(ac, glsl_get_column_type(type)),
5137 glsl_get_matrix_columns(type));
5138 }
5139
5140 if (glsl_type_is_array(type)) {
5141 return LLVMArrayType(glsl_to_llvm_type(ac, glsl_get_array_element(type)),
5142 glsl_get_length(type));
5143 }
5144
5145 assert(glsl_type_is_struct_or_ifc(type));
5146
5147 LLVMTypeRef *const member_types = alloca(glsl_get_length(type) * sizeof(LLVMTypeRef));
5148
5149 for (unsigned i = 0; i < glsl_get_length(type); i++) {
5150 member_types[i] = glsl_to_llvm_type(ac, glsl_get_struct_field(type, i));
5151 }
5152
5153 return LLVMStructTypeInContext(ac->context, member_types, glsl_get_length(type), false);
5154 }
5155
visit_deref(struct ac_nir_context *ctx, nir_deref_instr *instr)5156 static void visit_deref(struct ac_nir_context *ctx, nir_deref_instr *instr)
5157 {
5158 if (!nir_deref_mode_is_one_of(instr, nir_var_mem_shared | nir_var_mem_global))
5159 return;
5160
5161 LLVMValueRef result = NULL;
5162 switch (instr->deref_type) {
5163 case nir_deref_type_var: {
5164 struct hash_entry *entry = _mesa_hash_table_search(ctx->vars, instr->var);
5165 result = entry->data;
5166 break;
5167 }
5168 case nir_deref_type_struct:
5169 if (nir_deref_mode_is(instr, nir_var_mem_global)) {
5170 nir_deref_instr *parent = nir_deref_instr_parent(instr);
5171 uint64_t offset = glsl_get_struct_field_offset(parent->type, instr->strct.index);
5172 result = ac_build_gep_ptr(&ctx->ac, get_src(ctx, instr->parent),
5173 LLVMConstInt(ctx->ac.i32, offset, 0));
5174 } else {
5175 result = ac_build_gep0(&ctx->ac, get_src(ctx, instr->parent),
5176 LLVMConstInt(ctx->ac.i32, instr->strct.index, 0));
5177 }
5178 break;
5179 case nir_deref_type_array:
5180 if (nir_deref_mode_is(instr, nir_var_mem_global)) {
5181 nir_deref_instr *parent = nir_deref_instr_parent(instr);
5182 unsigned stride = glsl_get_explicit_stride(parent->type);
5183
5184 if ((glsl_type_is_matrix(parent->type) && glsl_matrix_type_is_row_major(parent->type)) ||
5185 (glsl_type_is_vector(parent->type) && stride == 0))
5186 stride = type_scalar_size_bytes(parent->type);
5187
5188 assert(stride > 0);
5189 LLVMValueRef index = get_src(ctx, instr->arr.index);
5190 if (LLVMTypeOf(index) != ctx->ac.i64)
5191 index = LLVMBuildZExt(ctx->ac.builder, index, ctx->ac.i64, "");
5192
5193 LLVMValueRef offset =
5194 LLVMBuildMul(ctx->ac.builder, index, LLVMConstInt(ctx->ac.i64, stride, 0), "");
5195
5196 result = ac_build_gep_ptr(&ctx->ac, get_src(ctx, instr->parent), offset);
5197 } else {
5198 result =
5199 ac_build_gep0(&ctx->ac, get_src(ctx, instr->parent), get_src(ctx, instr->arr.index));
5200 }
5201 break;
5202 case nir_deref_type_ptr_as_array:
5203 if (nir_deref_mode_is(instr, nir_var_mem_global)) {
5204 unsigned stride = nir_deref_instr_array_stride(instr);
5205
5206 LLVMValueRef index = get_src(ctx, instr->arr.index);
5207 if (LLVMTypeOf(index) != ctx->ac.i64)
5208 index = LLVMBuildZExt(ctx->ac.builder, index, ctx->ac.i64, "");
5209
5210 LLVMValueRef offset =
5211 LLVMBuildMul(ctx->ac.builder, index, LLVMConstInt(ctx->ac.i64, stride, 0), "");
5212
5213 result = ac_build_gep_ptr(&ctx->ac, get_src(ctx, instr->parent), offset);
5214 } else {
5215 result =
5216 ac_build_gep_ptr(&ctx->ac, get_src(ctx, instr->parent), get_src(ctx, instr->arr.index));
5217 }
5218 break;
5219 case nir_deref_type_cast: {
5220 result = get_src(ctx, instr->parent);
5221
5222 /* We can't use the structs from LLVM because the shader
5223 * specifies its own offsets. */
5224 LLVMTypeRef pointee_type = ctx->ac.i8;
5225 if (nir_deref_mode_is(instr, nir_var_mem_shared))
5226 pointee_type = glsl_to_llvm_type(&ctx->ac, instr->type);
5227
5228 unsigned address_space;
5229
5230 switch (instr->modes) {
5231 case nir_var_mem_shared:
5232 address_space = AC_ADDR_SPACE_LDS;
5233 break;
5234 case nir_var_mem_global:
5235 address_space = AC_ADDR_SPACE_GLOBAL;
5236 break;
5237 default:
5238 unreachable("Unhandled address space");
5239 }
5240
5241 LLVMTypeRef type = LLVMPointerType(pointee_type, address_space);
5242
5243 if (LLVMTypeOf(result) != type) {
5244 if (LLVMGetTypeKind(LLVMTypeOf(result)) == LLVMVectorTypeKind) {
5245 result = LLVMBuildBitCast(ctx->ac.builder, result, type, "");
5246 } else {
5247 result = LLVMBuildIntToPtr(ctx->ac.builder, result, type, "");
5248 }
5249 }
5250 break;
5251 }
5252 default:
5253 unreachable("Unhandled deref_instr deref type");
5254 }
5255
5256 ctx->ssa_defs[instr->dest.ssa.index] = result;
5257 }
5258
5259 static void visit_cf_list(struct ac_nir_context *ctx, struct exec_list *list);
5260
visit_block(struct ac_nir_context *ctx, nir_block *block)5261 static void visit_block(struct ac_nir_context *ctx, nir_block *block)
5262 {
5263 LLVMBasicBlockRef blockref = LLVMGetInsertBlock(ctx->ac.builder);
5264 LLVMValueRef first = LLVMGetFirstInstruction(blockref);
5265 if (first) {
5266 /* ac_branch_exited() might have already inserted non-phis */
5267 LLVMPositionBuilderBefore(ctx->ac.builder, LLVMGetFirstInstruction(blockref));
5268 }
5269
5270 nir_foreach_instr(instr, block) {
5271 if (instr->type != nir_instr_type_phi)
5272 break;
5273 visit_phi(ctx, nir_instr_as_phi(instr));
5274 }
5275
5276 LLVMPositionBuilderAtEnd(ctx->ac.builder, blockref);
5277
5278 nir_foreach_instr (instr, block) {
5279 switch (instr->type) {
5280 case nir_instr_type_alu:
5281 visit_alu(ctx, nir_instr_as_alu(instr));
5282 break;
5283 case nir_instr_type_load_const:
5284 visit_load_const(ctx, nir_instr_as_load_const(instr));
5285 break;
5286 case nir_instr_type_intrinsic:
5287 visit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
5288 break;
5289 case nir_instr_type_tex:
5290 visit_tex(ctx, nir_instr_as_tex(instr));
5291 break;
5292 case nir_instr_type_phi:
5293 break;
5294 case nir_instr_type_ssa_undef:
5295 visit_ssa_undef(ctx, nir_instr_as_ssa_undef(instr));
5296 break;
5297 case nir_instr_type_jump:
5298 visit_jump(&ctx->ac, nir_instr_as_jump(instr));
5299 break;
5300 case nir_instr_type_deref:
5301 visit_deref(ctx, nir_instr_as_deref(instr));
5302 break;
5303 default:
5304 fprintf(stderr, "Unknown NIR instr type: ");
5305 nir_print_instr(instr, stderr);
5306 fprintf(stderr, "\n");
5307 abort();
5308 }
5309 }
5310
5311 _mesa_hash_table_insert(ctx->defs, block, LLVMGetInsertBlock(ctx->ac.builder));
5312 }
5313
visit_if(struct ac_nir_context *ctx, nir_if *if_stmt)5314 static void visit_if(struct ac_nir_context *ctx, nir_if *if_stmt)
5315 {
5316 LLVMValueRef value = get_src(ctx, if_stmt->condition);
5317
5318 nir_block *then_block = (nir_block *)exec_list_get_head(&if_stmt->then_list);
5319
5320 ac_build_ifcc(&ctx->ac, value, then_block->index);
5321
5322 visit_cf_list(ctx, &if_stmt->then_list);
5323
5324 if (!exec_list_is_empty(&if_stmt->else_list)) {
5325 nir_block *else_block = (nir_block *)exec_list_get_head(&if_stmt->else_list);
5326
5327 ac_build_else(&ctx->ac, else_block->index);
5328 visit_cf_list(ctx, &if_stmt->else_list);
5329 }
5330
5331 ac_build_endif(&ctx->ac, then_block->index);
5332 }
5333
visit_loop(struct ac_nir_context *ctx, nir_loop *loop)5334 static void visit_loop(struct ac_nir_context *ctx, nir_loop *loop)
5335 {
5336 nir_block *first_loop_block = (nir_block *)exec_list_get_head(&loop->body);
5337
5338 ac_build_bgnloop(&ctx->ac, first_loop_block->index);
5339
5340 visit_cf_list(ctx, &loop->body);
5341
5342 ac_build_endloop(&ctx->ac, first_loop_block->index);
5343 }
5344
visit_cf_list(struct ac_nir_context *ctx, struct exec_list *list)5345 static void visit_cf_list(struct ac_nir_context *ctx, struct exec_list *list)
5346 {
5347 foreach_list_typed(nir_cf_node, node, node, list)
5348 {
5349 switch (node->type) {
5350 case nir_cf_node_block:
5351 visit_block(ctx, nir_cf_node_as_block(node));
5352 break;
5353
5354 case nir_cf_node_if:
5355 visit_if(ctx, nir_cf_node_as_if(node));
5356 break;
5357
5358 case nir_cf_node_loop:
5359 visit_loop(ctx, nir_cf_node_as_loop(node));
5360 break;
5361
5362 default:
5363 assert(0);
5364 }
5365 }
5366 }
5367
ac_handle_shader_output_decl(struct ac_llvm_context *ctx, struct ac_shader_abi *abi, struct nir_shader *nir, struct nir_variable *variable, gl_shader_stage stage)5368 void ac_handle_shader_output_decl(struct ac_llvm_context *ctx, struct ac_shader_abi *abi,
5369 struct nir_shader *nir, struct nir_variable *variable,
5370 gl_shader_stage stage)
5371 {
5372 unsigned output_loc = variable->data.driver_location;
5373 unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
5374
5375 /* tess ctrl has it's own load/store paths for outputs */
5376 if (stage == MESA_SHADER_TESS_CTRL)
5377 return;
5378
5379 if (stage == MESA_SHADER_VERTEX || stage == MESA_SHADER_TESS_EVAL ||
5380 stage == MESA_SHADER_GEOMETRY) {
5381 int idx = variable->data.location + variable->data.index;
5382 if (idx == VARYING_SLOT_CLIP_DIST0) {
5383 int length = nir->info.clip_distance_array_size + nir->info.cull_distance_array_size;
5384
5385 if (length > 4)
5386 attrib_count = 2;
5387 else
5388 attrib_count = 1;
5389 }
5390 }
5391
5392 bool is_16bit = glsl_type_is_16bit(glsl_without_array(variable->type));
5393 LLVMTypeRef type = is_16bit ? ctx->f16 : ctx->f32;
5394 for (unsigned i = 0; i < attrib_count; ++i) {
5395 for (unsigned chan = 0; chan < 4; chan++) {
5396 int idx = ac_llvm_reg_index_soa(output_loc + i, chan);
5397 abi->outputs[idx] = ac_build_alloca_undef(ctx, type, "");
5398 abi->is_16bit[idx] = is_16bit;
5399 }
5400 }
5401 }
5402
setup_scratch(struct ac_nir_context *ctx, struct nir_shader *shader)5403 static void setup_scratch(struct ac_nir_context *ctx, struct nir_shader *shader)
5404 {
5405 if (shader->scratch_size == 0)
5406 return;
5407
5408 ctx->scratch =
5409 ac_build_alloca_undef(&ctx->ac, LLVMArrayType(ctx->ac.i8, shader->scratch_size), "scratch");
5410 }
5411
setup_constant_data(struct ac_nir_context *ctx, struct nir_shader *shader)5412 static void setup_constant_data(struct ac_nir_context *ctx, struct nir_shader *shader)
5413 {
5414 if (!shader->constant_data)
5415 return;
5416
5417 LLVMValueRef data = LLVMConstStringInContext(ctx->ac.context, shader->constant_data,
5418 shader->constant_data_size, true);
5419 LLVMTypeRef type = LLVMArrayType(ctx->ac.i8, shader->constant_data_size);
5420 LLVMValueRef global =
5421 LLVMAddGlobalInAddressSpace(ctx->ac.module, type, "const_data", AC_ADDR_SPACE_CONST);
5422
5423 LLVMSetInitializer(global, data);
5424 LLVMSetGlobalConstant(global, true);
5425 LLVMSetVisibility(global, LLVMHiddenVisibility);
5426 ctx->constant_data = global;
5427 }
5428
setup_shared(struct ac_nir_context *ctx, struct nir_shader *nir)5429 static void setup_shared(struct ac_nir_context *ctx, struct nir_shader *nir)
5430 {
5431 if (ctx->ac.lds)
5432 return;
5433
5434 LLVMTypeRef type = LLVMArrayType(ctx->ac.i8, nir->info.shared_size);
5435
5436 LLVMValueRef lds =
5437 LLVMAddGlobalInAddressSpace(ctx->ac.module, type, "compute_lds", AC_ADDR_SPACE_LDS);
5438 LLVMSetAlignment(lds, 64 * 1024);
5439
5440 ctx->ac.lds =
5441 LLVMBuildBitCast(ctx->ac.builder, lds, LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_LDS), "");
5442 }
5443
setup_gds(struct ac_nir_context *ctx, nir_function_impl *impl)5444 static void setup_gds(struct ac_nir_context *ctx, nir_function_impl *impl)
5445 {
5446 bool has_gds_atomic = false;
5447
5448 if (ctx->ac.gfx_level >= GFX10 &&
5449 (ctx->stage == MESA_SHADER_VERTEX ||
5450 ctx->stage == MESA_SHADER_TESS_EVAL ||
5451 ctx->stage == MESA_SHADER_GEOMETRY)) {
5452
5453 nir_foreach_block(block, impl) {
5454 nir_foreach_instr(instr, block) {
5455 if (instr->type != nir_instr_type_intrinsic)
5456 continue;
5457
5458 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
5459 has_gds_atomic |= intrin->intrinsic == nir_intrinsic_gds_atomic_add_amd;
5460 }
5461 }
5462 }
5463
5464 unsigned gds_size = has_gds_atomic ? 0x100 : 0;
5465
5466 if (gds_size)
5467 ac_llvm_add_target_dep_function_attr(ctx->main_function, "amdgpu-gds-size", gds_size);
5468 }
5469
ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi, const struct ac_shader_args *args, struct nir_shader *nir)5470 void ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
5471 const struct ac_shader_args *args, struct nir_shader *nir)
5472 {
5473 struct ac_nir_context ctx = {0};
5474 struct nir_function *func;
5475
5476 ctx.ac = *ac;
5477 ctx.abi = abi;
5478 ctx.args = args;
5479
5480 ctx.stage = nir->info.stage;
5481 ctx.info = &nir->info;
5482
5483 ctx.main_function = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
5484
5485 /* TODO: remove this after RADV switches to lowered IO */
5486 if (!nir->info.io_lowered) {
5487 nir_foreach_shader_out_variable(variable, nir)
5488 {
5489 ac_handle_shader_output_decl(&ctx.ac, ctx.abi, nir, variable, ctx.stage);
5490 }
5491 }
5492
5493 ctx.defs = _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
5494 ctx.phis = _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
5495 ctx.vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
5496
5497 if (ctx.abi->kill_ps_if_inf_interp)
5498 ctx.verified_interp =
5499 _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
5500
5501 func = (struct nir_function *)exec_list_get_head(&nir->functions);
5502
5503 nir_index_ssa_defs(func->impl);
5504 ctx.ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
5505
5506 setup_scratch(&ctx, nir);
5507 setup_constant_data(&ctx, nir);
5508 setup_gds(&ctx, func->impl);
5509
5510 if (gl_shader_stage_is_compute(nir->info.stage))
5511 setup_shared(&ctx, nir);
5512
5513 if (nir->info.stage == MESA_SHADER_FRAGMENT && nir->info.fs.uses_demote &&
5514 LLVM_VERSION_MAJOR < 13) {
5515 /* true = don't kill. */
5516 ctx.ac.postponed_kill = ac_build_alloca_init(&ctx.ac, ctx.ac.i1true, "");
5517 }
5518
5519 visit_cf_list(&ctx, &func->impl->body);
5520 phi_post_pass(&ctx);
5521
5522 if (ctx.ac.postponed_kill)
5523 ac_build_kill_if_false(&ctx.ac, LLVMBuildLoad2(ctx.ac.builder, ctx.ac.i1, ctx.ac.postponed_kill, ""));
5524
5525 free(ctx.ssa_defs);
5526 ralloc_free(ctx.defs);
5527 ralloc_free(ctx.phis);
5528 ralloc_free(ctx.vars);
5529 if (ctx.abi->kill_ps_if_inf_interp)
5530 ralloc_free(ctx.verified_interp);
5531 }
5532