Lines Matching defs:shaders

380     * after lower_io_to_temporaries for vertex shaders.
676 /* Only compute shaders currently support requiring a
690 * shaders IR directly, so clone it first. */
894 /* Mesh shaders run as NGG which can implement local_invocation_index from
909 /* Mesh shaders only have a 1D "vertex index" which we use
1218 /* Culling doesn't make sense for meta shaders. */
1412 * Typically, shaders are allocated and only free'd when the device is destroyed. For this pattern,
1413 * this should allocate blocks for shaders fast and with no fragmentation, while still allowing
2115 radv_dump_nir_shaders(struct nir_shader *const *shaders, int shader_count)
2124 nir_print_shader(shaders[i], memf);
2196 shader_compile(struct radv_device *device, struct nir_shader *const *shaders, int shader_count, gl_shader_stage stage,
2213 options->dump_shader = radv_can_dump_shader(device, shaders[0], gs_copy_shader || trap_handler_shader);
2222 !is_meta_shader(shaders[0]) && options->key.ps.enable_mrt_output_nan_fixup;
2231 llvm_compile_shader(options, info, shader_count, shaders, &binary, args);
2240 aco_compile_shader(&ac_opts, &ac_info, shader_count, shaders, args, &radv_aco_build_shader_binary, (void **)&binary);
2252 fprintf(stderr, "%s", radv_get_shader_name(info, shaders[0]->info.stage));
2254 fprintf(stderr, " + %s", radv_get_shader_name(info, shaders[i]->info.stage));
2260 shader->nir_string = radv_dump_nir_shaders(shaders, shader_count);
2272 struct nir_shader *const *shaders, int shader_count,
2276 gl_shader_stage stage = shaders[shader_count - 1]->info.stage;
2285 return shader_compile(device, shaders, shader_count, stage, &pl_stage->info,
2588 struct radv_shader *shader = pipeline->shaders[i];
2743 struct radv_shader *shader = pipeline->shaders[stage];