1/* 2 * Copyright © 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Jason Ekstrand (jason@jlekstrand.net) 25 * 26 */ 27 28#ifndef _VTN_PRIVATE_H_ 29#define _VTN_PRIVATE_H_ 30 31#include <setjmp.h> 32 33#include "nir/nir.h" 34#include "nir/nir_builder.h" 35#include "util/u_dynarray.h" 36#include "nir_spirv.h" 37#include "spirv.h" 38#include "vtn_generator_ids.h" 39 40struct vtn_builder; 41struct vtn_decoration; 42 43/* setjmp/longjmp is broken on MinGW: https://sourceforge.net/p/mingw-w64/bugs/406/ */ 44#if defined(__MINGW32__) && !defined(_UCRT) 45 #define vtn_setjmp __builtin_setjmp 46 #define vtn_longjmp __builtin_longjmp 47#else 48 #define vtn_setjmp setjmp 49 #define vtn_longjmp longjmp 50#endif 51 52void vtn_log(struct vtn_builder *b, enum nir_spirv_debug_level level, 53 size_t spirv_offset, const char *message); 54 55void vtn_logf(struct vtn_builder *b, enum nir_spirv_debug_level level, 56 size_t spirv_offset, const char *fmt, ...) PRINTFLIKE(4, 5); 57 58#define vtn_info(...) vtn_logf(b, NIR_SPIRV_DEBUG_LEVEL_INFO, 0, __VA_ARGS__) 59 60void _vtn_warn(struct vtn_builder *b, const char *file, unsigned line, 61 const char *fmt, ...) PRINTFLIKE(4, 5); 62#define vtn_warn(...) _vtn_warn(b, __FILE__, __LINE__, __VA_ARGS__) 63 64void _vtn_err(struct vtn_builder *b, const char *file, unsigned line, 65 const char *fmt, ...) PRINTFLIKE(4, 5); 66#define vtn_err(...) _vtn_err(b, __FILE__, __LINE__, __VA_ARGS__) 67 68/** Fail SPIR-V parsing 69 * 70 * This function logs an error and then bails out of the shader compile using 71 * longjmp. This being safe relies on two things: 72 * 73 * 1) We must guarantee that setjmp is called after allocating the builder 74 * and setting up b->debug (so that logging works) but before before any 75 * errors have a chance to occur. 76 * 77 * 2) While doing the SPIR-V -> NIR conversion, we need to be careful to 78 * ensure that all heap allocations happen through ralloc and are parented 79 * to the builder. This way they will get properly cleaned up on error. 80 * 81 * 3) We must ensure that _vtn_fail is never called while a mutex lock or a 82 * reference to any other resource is held with the exception of ralloc 83 * objects which are parented to the builder. 84 * 85 * So long as these two things continue to hold, we can easily longjmp back to 86 * spirv_to_nir(), clean up the builder, and return NULL. 87 */ 88NORETURN void 89_vtn_fail(struct vtn_builder *b, const char *file, unsigned line, 90 const char *fmt, ...) PRINTFLIKE(4, 5); 91 92#define vtn_fail(...) _vtn_fail(b, __FILE__, __LINE__, __VA_ARGS__) 93 94/** Fail if the given expression evaluates to true */ 95#define vtn_fail_if(expr, ...) \ 96 do { \ 97 if (unlikely(expr)) \ 98 vtn_fail(__VA_ARGS__); \ 99 } while (0) 100 101#define _vtn_fail_with(t, msg, v) \ 102 vtn_fail("%s: %s (%u)\n", msg, spirv_ ## t ## _to_string(v), v) 103 104#define vtn_fail_with_decoration(msg, v) _vtn_fail_with(decoration, msg, v) 105#define vtn_fail_with_opcode(msg, v) _vtn_fail_with(op, msg, v) 106 107/** Assert that a condition is true and, if it isn't, vtn_fail 108 * 109 * This macro is transitional only and should not be used in new code. Use 110 * vtn_fail_if and provide a real message instead. 111 */ 112#define vtn_assert(expr) \ 113 do { \ 114 if (!likely(expr)) \ 115 vtn_fail("%s", #expr); \ 116 } while (0) 117 118enum vtn_value_type { 119 vtn_value_type_invalid = 0, 120 vtn_value_type_undef, 121 vtn_value_type_string, 122 vtn_value_type_decoration_group, 123 vtn_value_type_type, 124 vtn_value_type_constant, 125 vtn_value_type_pointer, 126 vtn_value_type_function, 127 vtn_value_type_block, 128 vtn_value_type_ssa, 129 vtn_value_type_extension, 130 vtn_value_type_image_pointer, 131}; 132 133enum vtn_branch_type { 134 vtn_branch_type_none, 135 vtn_branch_type_if_merge, 136 vtn_branch_type_switch_break, 137 vtn_branch_type_switch_fallthrough, 138 vtn_branch_type_loop_break, 139 vtn_branch_type_loop_continue, 140 vtn_branch_type_loop_back_edge, 141 vtn_branch_type_discard, 142 vtn_branch_type_terminate_invocation, 143 vtn_branch_type_ignore_intersection, 144 vtn_branch_type_terminate_ray, 145 vtn_branch_type_return, 146}; 147 148enum vtn_cf_node_type { 149 vtn_cf_node_type_block, 150 vtn_cf_node_type_if, 151 vtn_cf_node_type_loop, 152 vtn_cf_node_type_case, 153 vtn_cf_node_type_switch, 154 vtn_cf_node_type_function, 155}; 156 157struct vtn_cf_node { 158 struct list_head link; 159 struct vtn_cf_node *parent; 160 enum vtn_cf_node_type type; 161}; 162 163struct vtn_loop { 164 struct vtn_cf_node node; 165 166 /* The main body of the loop */ 167 struct list_head body; 168 169 /* The "continue" part of the loop. This gets executed after the body 170 * and is where you go when you hit a continue. 171 */ 172 struct list_head cont_body; 173 174 struct vtn_block *header_block; 175 struct vtn_block *cont_block; 176 struct vtn_block *break_block; 177 178 SpvLoopControlMask control; 179}; 180 181struct vtn_if { 182 struct vtn_cf_node node; 183 184 enum vtn_branch_type then_type; 185 struct list_head then_body; 186 187 enum vtn_branch_type else_type; 188 struct list_head else_body; 189 190 struct vtn_block *header_block; 191 struct vtn_block *merge_block; 192 193 SpvSelectionControlMask control; 194}; 195 196struct vtn_case { 197 struct vtn_cf_node node; 198 199 struct vtn_block *block; 200 201 enum vtn_branch_type type; 202 struct list_head body; 203 204 /* The fallthrough case, if any */ 205 struct vtn_case *fallthrough; 206 207 /* The uint32_t values that map to this case */ 208 struct util_dynarray values; 209 210 /* True if this is the default case */ 211 bool is_default; 212 213 /* Initialized to false; used when sorting the list of cases */ 214 bool visited; 215}; 216 217struct vtn_switch { 218 struct vtn_cf_node node; 219 220 uint32_t selector; 221 222 struct list_head cases; 223 224 struct vtn_block *break_block; 225}; 226 227struct vtn_block { 228 struct vtn_cf_node node; 229 230 /** A pointer to the label instruction */ 231 const uint32_t *label; 232 233 /** A pointer to the merge instruction (or NULL if non exists) */ 234 const uint32_t *merge; 235 236 /** A pointer to the branch instruction that ends this block */ 237 const uint32_t *branch; 238 239 enum vtn_branch_type branch_type; 240 241 /* The CF node for which this is a merge target 242 * 243 * The SPIR-V spec requires that any given block can be the merge target 244 * for at most one merge instruction. If this block is a merge target, 245 * this points back to the block containing that merge instruction. 246 */ 247 struct vtn_cf_node *merge_cf_node; 248 249 /** Points to the loop that this block starts (if it starts a loop) */ 250 struct vtn_loop *loop; 251 252 /** Points to the switch case started by this block (if any) */ 253 struct vtn_case *switch_case; 254 255 /** Every block ends in a nop intrinsic so that we can find it again */ 256 nir_intrinsic_instr *end_nop; 257 258 /** attached nir_block */ 259 struct nir_block *block; 260}; 261 262struct vtn_function { 263 struct vtn_cf_node node; 264 265 struct vtn_type *type; 266 267 bool referenced; 268 bool emitted; 269 270 nir_function *nir_func; 271 struct vtn_block *start_block; 272 273 struct list_head body; 274 275 const uint32_t *end; 276 277 SpvLinkageType linkage; 278 SpvFunctionControlMask control; 279}; 280 281#define VTN_DECL_CF_NODE_CAST(_type) \ 282static inline struct vtn_##_type * \ 283vtn_cf_node_as_##_type(struct vtn_cf_node *node) \ 284{ \ 285 assert(node->type == vtn_cf_node_type_##_type); \ 286 return (struct vtn_##_type *)node; \ 287} 288 289VTN_DECL_CF_NODE_CAST(block) 290VTN_DECL_CF_NODE_CAST(loop) 291VTN_DECL_CF_NODE_CAST(if) 292VTN_DECL_CF_NODE_CAST(case) 293VTN_DECL_CF_NODE_CAST(switch) 294VTN_DECL_CF_NODE_CAST(function) 295 296#define vtn_foreach_cf_node(node, cf_list) \ 297 list_for_each_entry(struct vtn_cf_node, node, cf_list, link) 298 299typedef bool (*vtn_instruction_handler)(struct vtn_builder *, SpvOp, 300 const uint32_t *, unsigned); 301 302void vtn_build_cfg(struct vtn_builder *b, const uint32_t *words, 303 const uint32_t *end); 304void vtn_function_emit(struct vtn_builder *b, struct vtn_function *func, 305 vtn_instruction_handler instruction_handler); 306void vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode, 307 const uint32_t *w, unsigned count); 308 309const uint32_t * 310vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start, 311 const uint32_t *end, vtn_instruction_handler handler); 312 313struct vtn_ssa_value { 314 union { 315 nir_ssa_def *def; 316 struct vtn_ssa_value **elems; 317 }; 318 319 /* For matrices, if this is non-NULL, then this value is actually the 320 * transpose of some other value. The value that `transposed` points to 321 * always dominates this value. 322 */ 323 struct vtn_ssa_value *transposed; 324 325 const struct glsl_type *type; 326}; 327 328enum vtn_base_type { 329 vtn_base_type_void, 330 vtn_base_type_scalar, 331 vtn_base_type_vector, 332 vtn_base_type_matrix, 333 vtn_base_type_array, 334 vtn_base_type_struct, 335 vtn_base_type_pointer, 336 vtn_base_type_image, 337 vtn_base_type_sampler, 338 vtn_base_type_sampled_image, 339 vtn_base_type_accel_struct, 340 vtn_base_type_ray_query, 341 vtn_base_type_function, 342 vtn_base_type_event, 343}; 344 345struct vtn_type { 346 enum vtn_base_type base_type; 347 348 const struct glsl_type *type; 349 350 /* The SPIR-V id of the given type. */ 351 uint32_t id; 352 353 /* Specifies the length of complex types. 354 * 355 * For Workgroup pointers, this is the size of the referenced type. 356 */ 357 unsigned length; 358 359 /* for arrays, matrices and pointers, the array stride */ 360 unsigned stride; 361 362 /* Access qualifiers */ 363 enum gl_access_qualifier access; 364 365 union { 366 /* Members for scalar, vector, and array-like types */ 367 struct { 368 /* for arrays, the vtn_type for the elements of the array */ 369 struct vtn_type *array_element; 370 371 /* for matrices, whether the matrix is stored row-major */ 372 bool row_major:1; 373 374 /* Whether this type, or a parent type, has been decorated as a 375 * builtin 376 */ 377 bool is_builtin:1; 378 379 /* Which built-in to use */ 380 SpvBuiltIn builtin; 381 }; 382 383 /* Members for struct types */ 384 struct { 385 /* for structures, the vtn_type for each member */ 386 struct vtn_type **members; 387 388 /* for structs, the offset of each member */ 389 unsigned *offsets; 390 391 /* for structs, whether it was decorated as a "non-SSBO-like" block */ 392 bool block:1; 393 394 /* for structs, whether it was decorated as an "SSBO-like" block */ 395 bool buffer_block:1; 396 397 /* for structs with block == true, whether this is a builtin block 398 * (i.e. a block that contains only builtins). 399 */ 400 bool builtin_block:1; 401 402 /* for structs and unions it specifies the minimum alignment of the 403 * members. 0 means packed. 404 * 405 * Set by CPacked and Alignment Decorations in kernels. 406 */ 407 bool packed:1; 408 }; 409 410 /* Members for pointer types */ 411 struct { 412 /* For pointers, the vtn_type for dereferenced type */ 413 struct vtn_type *deref; 414 415 /* Storage class for pointers */ 416 SpvStorageClass storage_class; 417 418 /* Required alignment for pointers */ 419 uint32_t align; 420 }; 421 422 /* Members for image types */ 423 struct { 424 /* GLSL image type for this type. This is not to be confused with 425 * vtn_type::type which is actually going to be the GLSL type for a 426 * pointer to an image, likely a uint32_t. 427 */ 428 const struct glsl_type *glsl_image; 429 430 /* Image format for image_load_store type images */ 431 unsigned image_format; 432 433 /* Access qualifier for storage images */ 434 SpvAccessQualifier access_qualifier; 435 }; 436 437 /* Members for sampled image types */ 438 struct { 439 /* For sampled images, the image type */ 440 struct vtn_type *image; 441 }; 442 443 /* Members for function types */ 444 struct { 445 /* For functions, the vtn_type for each parameter */ 446 struct vtn_type **params; 447 448 /* Return type for functions */ 449 struct vtn_type *return_type; 450 }; 451 }; 452}; 453 454bool vtn_type_contains_block(struct vtn_builder *b, struct vtn_type *type); 455 456bool vtn_types_compatible(struct vtn_builder *b, 457 struct vtn_type *t1, struct vtn_type *t2); 458 459struct vtn_type *vtn_type_without_array(struct vtn_type *type); 460 461struct vtn_variable; 462 463enum vtn_access_mode { 464 vtn_access_mode_id, 465 vtn_access_mode_literal, 466}; 467 468struct vtn_access_link { 469 enum vtn_access_mode mode; 470 int64_t id; 471}; 472 473struct vtn_access_chain { 474 uint32_t length; 475 476 /** Whether or not to treat the base pointer as an array. This is only 477 * true if this access chain came from an OpPtrAccessChain. 478 */ 479 bool ptr_as_array; 480 481 /* Access qualifiers */ 482 enum gl_access_qualifier access; 483 484 bool in_bounds; 485 486 /** Struct elements and array offsets. 487 * 488 * This is an array of 1 so that it can conveniently be created on the 489 * stack but the real length is given by the length field. 490 */ 491 struct vtn_access_link link[1]; 492}; 493 494enum vtn_variable_mode { 495 vtn_variable_mode_function, 496 vtn_variable_mode_private, 497 vtn_variable_mode_uniform, 498 vtn_variable_mode_atomic_counter, 499 vtn_variable_mode_ubo, 500 vtn_variable_mode_ssbo, 501 vtn_variable_mode_phys_ssbo, 502 vtn_variable_mode_push_constant, 503 vtn_variable_mode_workgroup, 504 vtn_variable_mode_cross_workgroup, 505 vtn_variable_mode_task_payload, 506 vtn_variable_mode_generic, 507 vtn_variable_mode_constant, 508 vtn_variable_mode_input, 509 vtn_variable_mode_output, 510 vtn_variable_mode_image, 511 vtn_variable_mode_accel_struct, 512 vtn_variable_mode_call_data, 513 vtn_variable_mode_call_data_in, 514 vtn_variable_mode_ray_payload, 515 vtn_variable_mode_ray_payload_in, 516 vtn_variable_mode_hit_attrib, 517 vtn_variable_mode_shader_record, 518}; 519 520struct vtn_pointer { 521 /** The variable mode for the referenced data */ 522 enum vtn_variable_mode mode; 523 524 /** The dereferenced type of this pointer */ 525 struct vtn_type *type; 526 527 /** The pointer type of this pointer 528 * 529 * This may be NULL for some temporary pointers constructed as part of a 530 * large load, store, or copy. It MUST be valid for all pointers which are 531 * stored as SPIR-V SSA values. 532 */ 533 struct vtn_type *ptr_type; 534 535 /** The referenced variable, if known 536 * 537 * This field may be NULL if the pointer uses a (block_index, offset) pair 538 * instead of an access chain or if the access chain starts at a deref. 539 */ 540 struct vtn_variable *var; 541 542 /** The NIR deref corresponding to this pointer */ 543 nir_deref_instr *deref; 544 545 /** A (block_index, offset) pair representing a UBO or SSBO position. */ 546 struct nir_ssa_def *block_index; 547 struct nir_ssa_def *offset; 548 549 /* Access qualifiers */ 550 enum gl_access_qualifier access; 551}; 552 553struct vtn_variable { 554 enum vtn_variable_mode mode; 555 556 struct vtn_type *type; 557 558 unsigned descriptor_set; 559 unsigned binding; 560 bool explicit_binding; 561 unsigned offset; 562 unsigned input_attachment_index; 563 564 nir_variable *var; 565 566 /* If the variable is a struct with a location set on it then this will be 567 * stored here. This will be used to calculate locations for members that 568 * don’t have their own explicit location. 569 */ 570 int base_location; 571 572 /** 573 * In some early released versions of GLSLang, it implemented all function 574 * calls by making copies of all parameters into temporary variables and 575 * passing those variables into the function. It even did so for samplers 576 * and images which violates the SPIR-V spec. Unfortunately, two games 577 * (Talos Principle and Doom) shipped with this old version of GLSLang and 578 * also happen to pass samplers into functions. Talos Principle received 579 * an update fairly shortly after release with an updated GLSLang. Doom, 580 * on the other hand, has never received an update so we need to work 581 * around this GLSLang issue in SPIR-V -> NIR. Hopefully, we can drop this 582 * hack at some point in the future. 583 */ 584 struct vtn_pointer *copy_prop_sampler; 585 586 /* Access qualifiers. */ 587 enum gl_access_qualifier access; 588}; 589 590const struct glsl_type * 591vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type, 592 enum vtn_variable_mode mode); 593 594struct vtn_image_pointer { 595 nir_deref_instr *image; 596 nir_ssa_def *coord; 597 nir_ssa_def *sample; 598 nir_ssa_def *lod; 599}; 600 601struct vtn_value { 602 enum vtn_value_type value_type; 603 604 /* Workaround for https://gitlab.freedesktop.org/mesa/mesa/-/issues/3406 605 * Only set for OpImage / OpSampledImage. Note that this is in addition 606 * the existence of a NonUniform decoration on this value.*/ 607 uint32_t propagated_non_uniform : 1; 608 609 /* Valid for vtn_value_type_constant to indicate the value is OpConstantNull. */ 610 bool is_null_constant:1; 611 612 /* Valid when all the members of the value are undef. */ 613 bool is_undef_constant:1; 614 615 const char *name; 616 struct vtn_decoration *decoration; 617 struct vtn_type *type; 618 union { 619 const char *str; 620 nir_constant *constant; 621 struct vtn_pointer *pointer; 622 struct vtn_image_pointer *image; 623 struct vtn_function *func; 624 struct vtn_block *block; 625 struct vtn_ssa_value *ssa; 626 vtn_instruction_handler ext_handler; 627 }; 628}; 629 630#define VTN_DEC_DECORATION -1 631#define VTN_DEC_EXECUTION_MODE -2 632#define VTN_DEC_STRUCT_MEMBER_NAME0 -3 633#define VTN_DEC_STRUCT_MEMBER0 0 634 635struct vtn_decoration { 636 struct vtn_decoration *next; 637 638 /* Different kinds of decorations are stored in a value, 639 the scope defines what decoration it refers to: 640 641 - VTN_DEC_DECORATION: 642 decoration associated with the value 643 - VTN_DEC_EXECUTION_MODE: 644 an execution mode associated with an entrypoint value 645 - VTN_DEC_STRUCT_MEMBER0 + m: 646 decoration associated with member m of a struct value 647 - VTN_DEC_STRUCT_MEMBER_NAME0 - m: 648 name of m'th member of a struct value 649 */ 650 int scope; 651 652 uint32_t num_operands; 653 const uint32_t *operands; 654 struct vtn_value *group; 655 656 union { 657 SpvDecoration decoration; 658 SpvExecutionMode exec_mode; 659 const char *member_name; 660 }; 661}; 662 663struct vtn_builder { 664 nir_builder nb; 665 666 /* Used by vtn_fail to jump back to the beginning of SPIR-V compilation */ 667 jmp_buf fail_jump; 668 669 const uint32_t *spirv; 670 size_t spirv_word_count; 671 uint32_t version; 672 673 nir_shader *shader; 674 struct spirv_to_nir_options *options; 675 struct vtn_block *block; 676 677 /* Current offset, file, line, and column. Useful for debugging. Set 678 * automatically by vtn_foreach_instruction. 679 */ 680 size_t spirv_offset; 681 const char *file; 682 int line, col; 683 684 /* 685 * In SPIR-V, constants are global, whereas in NIR, the load_const 686 * instruction we use is per-function. So while we parse each function, we 687 * keep a hash table of constants we've resolved to nir_ssa_value's so 688 * far, and we lazily resolve them when we see them used in a function. 689 */ 690 struct hash_table *const_table; 691 692 /* 693 * Map from phi instructions (pointer to the start of the instruction) 694 * to the variable corresponding to it. 695 */ 696 struct hash_table *phi_table; 697 698 /* In Vulkan, when lowering some modes variable access, the derefs of the 699 * variables are replaced with a resource index intrinsics, leaving the 700 * variable hanging. This set keeps track of them so they can be filtered 701 * (and not removed) in nir_remove_dead_variables. 702 */ 703 struct set *vars_used_indirectly; 704 705 unsigned num_specializations; 706 struct nir_spirv_specialization *specializations; 707 708 unsigned value_id_bound; 709 struct vtn_value *values; 710 711 /* Information on the origin of the SPIR-V */ 712 enum vtn_generator generator_id; 713 SpvSourceLanguage source_lang; 714 715 /* True if we need to fix up CS OpControlBarrier */ 716 bool wa_glslang_cs_barrier; 717 718 /* True if we need to ignore undef initializers */ 719 bool wa_llvm_spirv_ignore_workgroup_initializer; 720 721 /* Workaround discard bugs in HLSL -> SPIR-V compilers */ 722 bool uses_demote_to_helper_invocation; 723 bool convert_discard_to_demote; 724 725 gl_shader_stage entry_point_stage; 726 const char *entry_point_name; 727 struct vtn_value *entry_point; 728 struct vtn_value *workgroup_size_builtin; 729 bool variable_pointers; 730 731 uint32_t *interface_ids; 732 size_t interface_ids_count; 733 734 struct vtn_function *func; 735 struct list_head functions; 736 737 /* Current function parameter index */ 738 unsigned func_param_idx; 739 740 /* false by default, set to true by the ContractionOff execution mode */ 741 bool exact; 742 743 /* when a physical memory model is choosen */ 744 bool physical_ptrs; 745 746 /* memory model specified by OpMemoryModel */ 747 unsigned mem_model; 748}; 749 750const char * 751vtn_string_literal(struct vtn_builder *b, const uint32_t *words, 752 unsigned word_count, unsigned *words_used); 753 754nir_ssa_def * 755vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr); 756struct vtn_pointer * 757vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa, 758 struct vtn_type *ptr_type); 759 760struct vtn_ssa_value * 761vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant, 762 const struct glsl_type *type); 763 764static inline struct vtn_value * 765vtn_untyped_value(struct vtn_builder *b, uint32_t value_id) 766{ 767 vtn_fail_if(value_id >= b->value_id_bound, 768 "SPIR-V id %u is out-of-bounds", value_id); 769 return &b->values[value_id]; 770} 771 772static inline uint32_t 773vtn_id_for_value(struct vtn_builder *b, struct vtn_value *value) 774{ 775 vtn_fail_if(value <= b->values, "vtn_value pointer outside the range of valid values"); 776 uint32_t value_id = value - b->values; 777 vtn_fail_if(value_id >= b->value_id_bound, "vtn_value pointer outside the range of valid values"); 778 return value_id; 779} 780 781/* Consider not using this function directly and instead use 782 * vtn_push_ssa/vtn_push_pointer so that appropriate applying of 783 * decorations is handled by common code. 784 */ 785static inline struct vtn_value * 786vtn_push_value(struct vtn_builder *b, uint32_t value_id, 787 enum vtn_value_type value_type) 788{ 789 struct vtn_value *val = vtn_untyped_value(b, value_id); 790 791 vtn_fail_if(value_type == vtn_value_type_ssa, 792 "Do not call vtn_push_value for value_type_ssa. Use " 793 "vtn_push_ssa_value instead."); 794 795 vtn_fail_if(val->value_type != vtn_value_type_invalid, 796 "SPIR-V id %u has already been written by another instruction", 797 value_id); 798 799 val->value_type = value_type; 800 801 return &b->values[value_id]; 802} 803 804static inline struct vtn_value * 805vtn_value(struct vtn_builder *b, uint32_t value_id, 806 enum vtn_value_type value_type) 807{ 808 struct vtn_value *val = vtn_untyped_value(b, value_id); 809 vtn_fail_if(val->value_type != value_type, 810 "SPIR-V id %u is the wrong kind of value", value_id); 811 return val; 812} 813 814static inline struct vtn_value * 815vtn_pointer_value(struct vtn_builder *b, uint32_t value_id) 816{ 817 struct vtn_value *val = vtn_untyped_value(b, value_id); 818 vtn_fail_if(val->value_type != vtn_value_type_pointer && 819 !val->is_null_constant, 820 "SPIR-V id %u is the wrong kind of value", value_id); 821 return val; 822} 823 824static inline struct vtn_pointer * 825vtn_value_to_pointer(struct vtn_builder *b, struct vtn_value *value) 826{ 827 if (value->is_null_constant) { 828 vtn_assert(glsl_type_is_vector_or_scalar(value->type->type)); 829 nir_ssa_def *const_ssa = 830 vtn_const_ssa_value(b, value->constant, value->type->type)->def; 831 return vtn_pointer_from_ssa(b, const_ssa, value->type); 832 } 833 vtn_assert(value->value_type == vtn_value_type_pointer); 834 return value->pointer; 835} 836 837static inline struct vtn_pointer * 838vtn_pointer(struct vtn_builder *b, uint32_t value_id) 839{ 840 return vtn_value_to_pointer(b, vtn_pointer_value(b, value_id)); 841} 842 843bool 844vtn_set_instruction_result_type(struct vtn_builder *b, SpvOp opcode, 845 const uint32_t *w, unsigned count); 846 847static inline uint64_t 848vtn_constant_uint(struct vtn_builder *b, uint32_t value_id) 849{ 850 struct vtn_value *val = vtn_value(b, value_id, vtn_value_type_constant); 851 852 vtn_fail_if(val->type->base_type != vtn_base_type_scalar || 853 !glsl_type_is_integer(val->type->type), 854 "Expected id %u to be an integer constant", value_id); 855 856 switch (glsl_get_bit_size(val->type->type)) { 857 case 8: return val->constant->values[0].u8; 858 case 16: return val->constant->values[0].u16; 859 case 32: return val->constant->values[0].u32; 860 case 64: return val->constant->values[0].u64; 861 default: unreachable("Invalid bit size"); 862 } 863} 864 865static inline int64_t 866vtn_constant_int(struct vtn_builder *b, uint32_t value_id) 867{ 868 struct vtn_value *val = vtn_value(b, value_id, vtn_value_type_constant); 869 870 vtn_fail_if(val->type->base_type != vtn_base_type_scalar || 871 !glsl_type_is_integer(val->type->type), 872 "Expected id %u to be an integer constant", value_id); 873 874 switch (glsl_get_bit_size(val->type->type)) { 875 case 8: return val->constant->values[0].i8; 876 case 16: return val->constant->values[0].i16; 877 case 32: return val->constant->values[0].i32; 878 case 64: return val->constant->values[0].i64; 879 default: unreachable("Invalid bit size"); 880 } 881} 882 883static inline struct vtn_type * 884vtn_get_value_type(struct vtn_builder *b, uint32_t value_id) 885{ 886 struct vtn_value *val = vtn_untyped_value(b, value_id); 887 vtn_fail_if(val->type == NULL, "Value %u does not have a type", value_id); 888 return val->type; 889} 890 891static inline struct vtn_type * 892vtn_get_type(struct vtn_builder *b, uint32_t value_id) 893{ 894 return vtn_value(b, value_id, vtn_value_type_type)->type; 895} 896 897struct vtn_ssa_value *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id); 898struct vtn_value *vtn_push_ssa_value(struct vtn_builder *b, uint32_t value_id, 899 struct vtn_ssa_value *ssa); 900 901nir_ssa_def *vtn_get_nir_ssa(struct vtn_builder *b, uint32_t value_id); 902struct vtn_value *vtn_push_nir_ssa(struct vtn_builder *b, uint32_t value_id, 903 nir_ssa_def *def); 904 905struct vtn_value *vtn_push_pointer(struct vtn_builder *b, 906 uint32_t value_id, 907 struct vtn_pointer *ptr); 908 909struct vtn_sampled_image { 910 nir_deref_instr *image; 911 nir_deref_instr *sampler; 912}; 913 914nir_ssa_def *vtn_sampled_image_to_nir_ssa(struct vtn_builder *b, 915 struct vtn_sampled_image si); 916 917void 918vtn_copy_value(struct vtn_builder *b, uint32_t src_value_id, 919 uint32_t dst_value_id); 920 921struct vtn_ssa_value *vtn_create_ssa_value(struct vtn_builder *b, 922 const struct glsl_type *type); 923 924struct vtn_ssa_value *vtn_ssa_transpose(struct vtn_builder *b, 925 struct vtn_ssa_value *src); 926 927nir_deref_instr *vtn_nir_deref(struct vtn_builder *b, uint32_t id); 928 929nir_deref_instr *vtn_pointer_to_deref(struct vtn_builder *b, 930 struct vtn_pointer *ptr); 931nir_ssa_def * 932vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr, 933 nir_ssa_def **index_out); 934 935nir_deref_instr * 936vtn_get_call_payload_for_location(struct vtn_builder *b, uint32_t location_id); 937 938struct vtn_ssa_value * 939vtn_local_load(struct vtn_builder *b, nir_deref_instr *src, 940 enum gl_access_qualifier access); 941 942void vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src, 943 nir_deref_instr *dest, 944 enum gl_access_qualifier access); 945 946struct vtn_ssa_value * 947vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src, 948 enum gl_access_qualifier access); 949 950void vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src, 951 struct vtn_pointer *dest, enum gl_access_qualifier access); 952 953void vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, 954 const uint32_t *w, unsigned count); 955 956 957typedef void (*vtn_decoration_foreach_cb)(struct vtn_builder *, 958 struct vtn_value *, 959 int member, 960 const struct vtn_decoration *, 961 void *); 962 963void vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value, 964 vtn_decoration_foreach_cb cb, void *data); 965 966typedef void (*vtn_execution_mode_foreach_cb)(struct vtn_builder *, 967 struct vtn_value *, 968 const struct vtn_decoration *, 969 void *); 970 971void vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value, 972 vtn_execution_mode_foreach_cb cb, void *data); 973 974nir_op vtn_nir_alu_op_for_spirv_opcode(struct vtn_builder *b, 975 SpvOp opcode, bool *swap, bool *exact, 976 unsigned src_bit_size, unsigned dst_bit_size); 977 978void vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, 979 const uint32_t *w, unsigned count); 980 981void vtn_handle_integer_dot(struct vtn_builder *b, SpvOp opcode, 982 const uint32_t *w, unsigned count); 983 984void vtn_handle_bitcast(struct vtn_builder *b, const uint32_t *w, 985 unsigned count); 986 987void vtn_handle_no_contraction(struct vtn_builder *b, struct vtn_value *val); 988 989void vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode, 990 const uint32_t *w, unsigned count); 991 992bool vtn_handle_glsl450_instruction(struct vtn_builder *b, SpvOp ext_opcode, 993 const uint32_t *words, unsigned count); 994 995bool vtn_handle_opencl_instruction(struct vtn_builder *b, SpvOp ext_opcode, 996 const uint32_t *words, unsigned count); 997bool vtn_handle_opencl_core_instruction(struct vtn_builder *b, SpvOp opcode, 998 const uint32_t *w, unsigned count); 999 1000struct vtn_builder* vtn_create_builder(const uint32_t *words, size_t word_count, 1001 gl_shader_stage stage, const char *entry_point_name, 1002 const struct spirv_to_nir_options *options); 1003 1004void vtn_handle_entry_point(struct vtn_builder *b, const uint32_t *w, 1005 unsigned count); 1006 1007void vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode, 1008 const uint32_t *w, unsigned count); 1009 1010enum vtn_variable_mode vtn_storage_class_to_mode(struct vtn_builder *b, 1011 SpvStorageClass class, 1012 struct vtn_type *interface_type, 1013 nir_variable_mode *nir_mode_out); 1014 1015nir_address_format vtn_mode_to_address_format(struct vtn_builder *b, 1016 enum vtn_variable_mode); 1017 1018nir_rounding_mode vtn_rounding_mode_to_nir(struct vtn_builder *b, 1019 SpvFPRoundingMode mode); 1020 1021static inline uint32_t 1022vtn_align_u32(uint32_t v, uint32_t a) 1023{ 1024 assert(a != 0 && a == (a & -((int32_t) a))); 1025 return (v + a - 1) & ~(a - 1); 1026} 1027 1028static inline uint64_t 1029vtn_u64_literal(const uint32_t *w) 1030{ 1031 return (uint64_t)w[1] << 32 | w[0]; 1032} 1033 1034bool vtn_handle_amd_gcn_shader_instruction(struct vtn_builder *b, SpvOp ext_opcode, 1035 const uint32_t *words, unsigned count); 1036 1037bool vtn_handle_amd_shader_ballot_instruction(struct vtn_builder *b, SpvOp ext_opcode, 1038 const uint32_t *w, unsigned count); 1039 1040bool vtn_handle_amd_shader_trinary_minmax_instruction(struct vtn_builder *b, SpvOp ext_opcode, 1041 const uint32_t *words, unsigned count); 1042 1043bool vtn_handle_amd_shader_explicit_vertex_parameter_instruction(struct vtn_builder *b, 1044 SpvOp ext_opcode, 1045 const uint32_t *words, 1046 unsigned count); 1047 1048SpvMemorySemanticsMask vtn_mode_to_memory_semantics(enum vtn_variable_mode mode); 1049 1050void vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope, 1051 SpvMemorySemanticsMask semantics); 1052 1053bool vtn_value_is_relaxed_precision(struct vtn_builder *b, struct vtn_value *val); 1054nir_ssa_def * 1055vtn_mediump_downconvert(struct vtn_builder *b, enum glsl_base_type base_type, nir_ssa_def *def); 1056struct vtn_ssa_value * 1057vtn_mediump_downconvert_value(struct vtn_builder *b, struct vtn_ssa_value *src); 1058void vtn_mediump_upconvert_value(struct vtn_builder *b, struct vtn_ssa_value *value); 1059 1060static inline int 1061cmp_uint32_t(const void *pa, const void *pb) 1062{ 1063 uint32_t a = *((const uint32_t *)pa); 1064 uint32_t b = *((const uint32_t *)pb); 1065 if (a < b) 1066 return -1; 1067 if (a > b) 1068 return 1; 1069 return 0; 1070} 1071 1072#endif /* _VTN_PRIVATE_H_ */ 1073