1/* 2 * Copyright © 2021 Collabora Ltd. 3 * 4 * Derived from tu_pipeline.c which is: 5 * Copyright © 2016 Red Hat. 6 * Copyright © 2016 Bas Nieuwenhuizen 7 * Copyright © 2015 Intel Corporation 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26 * DEALINGS IN THE SOFTWARE. 27 */ 28 29#include "panvk_cs.h" 30#include "panvk_private.h" 31 32#include "pan_bo.h" 33 34#include "nir/nir.h" 35#include "nir/nir_builder.h" 36#include "spirv/nir_spirv.h" 37#include "util/debug.h" 38#include "util/mesa-sha1.h" 39#include "util/u_atomic.h" 40#include "vk_format.h" 41#include "vk_util.h" 42 43#include "panfrost/util/pan_lower_framebuffer.h" 44 45 46struct panvk_pipeline_builder 47{ 48 struct panvk_device *device; 49 struct panvk_pipeline_cache *cache; 50 const VkAllocationCallbacks *alloc; 51 struct { 52 const VkGraphicsPipelineCreateInfo *gfx; 53 const VkComputePipelineCreateInfo *compute; 54 } create_info; 55 const struct panvk_pipeline_layout *layout; 56 57 struct panvk_shader *shaders[MESA_SHADER_STAGES]; 58 struct { 59 uint32_t shader_offset; 60 uint32_t rsd_offset; 61 } stages[MESA_SHADER_STAGES]; 62 uint32_t blend_shader_offsets[MAX_RTS]; 63 uint32_t shader_total_size; 64 uint32_t static_state_size; 65 uint32_t vpd_offset; 66 67 bool rasterizer_discard; 68 /* these states are affectd by rasterizer_discard */ 69 VkSampleCountFlagBits samples; 70 bool use_depth_stencil_attachment; 71 uint8_t active_color_attachments; 72 enum pipe_format color_attachment_formats[MAX_RTS]; 73}; 74 75static VkResult 76panvk_pipeline_builder_create_pipeline(struct panvk_pipeline_builder *builder, 77 struct panvk_pipeline **out_pipeline) 78{ 79 struct panvk_device *dev = builder->device; 80 81 struct panvk_pipeline *pipeline = 82 vk_object_zalloc(&dev->vk, builder->alloc, 83 sizeof(*pipeline), VK_OBJECT_TYPE_PIPELINE); 84 if (!pipeline) 85 return VK_ERROR_OUT_OF_HOST_MEMORY; 86 87 pipeline->layout = builder->layout; 88 *out_pipeline = pipeline; 89 return VK_SUCCESS; 90} 91 92static void 93panvk_pipeline_builder_finish(struct panvk_pipeline_builder *builder) 94{ 95 for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++) { 96 if (!builder->shaders[i]) 97 continue; 98 panvk_shader_destroy(builder->device, builder->shaders[i], builder->alloc); 99 } 100} 101 102static bool 103panvk_pipeline_static_state(struct panvk_pipeline *pipeline, uint32_t id) 104{ 105 return !(pipeline->dynamic_state_mask & (1 << id)); 106} 107 108static VkResult 109panvk_pipeline_builder_compile_shaders(struct panvk_pipeline_builder *builder, 110 struct panvk_pipeline *pipeline) 111{ 112 const VkPipelineShaderStageCreateInfo *stage_infos[MESA_SHADER_STAGES] = { 113 NULL 114 }; 115 const VkPipelineShaderStageCreateInfo *stages = 116 builder->create_info.gfx ? 117 builder->create_info.gfx->pStages : 118 &builder->create_info.compute->stage; 119 unsigned stage_count = 120 builder->create_info.gfx ? builder->create_info.gfx->stageCount : 1; 121 122 for (uint32_t i = 0; i < stage_count; i++) { 123 gl_shader_stage stage = vk_to_mesa_shader_stage(stages[i].stage); 124 stage_infos[stage] = &stages[i]; 125 } 126 127 /* compile shaders in reverse order */ 128 for (gl_shader_stage stage = MESA_SHADER_STAGES - 1; 129 stage > MESA_SHADER_NONE; stage--) { 130 const VkPipelineShaderStageCreateInfo *stage_info = stage_infos[stage]; 131 if (!stage_info) 132 continue; 133 134 struct panvk_shader *shader; 135 136 shader = panvk_per_arch(shader_create)(builder->device, stage, stage_info, 137 builder->layout, 138 PANVK_SYSVAL_UBO_INDEX, 139 &pipeline->blend.state, 140 panvk_pipeline_static_state(pipeline, 141 VK_DYNAMIC_STATE_BLEND_CONSTANTS), 142 builder->alloc); 143 if (!shader) 144 return VK_ERROR_OUT_OF_HOST_MEMORY; 145 146 builder->shaders[stage] = shader; 147 builder->shader_total_size = ALIGN_POT(builder->shader_total_size, 128); 148 builder->stages[stage].shader_offset = builder->shader_total_size; 149 builder->shader_total_size += 150 util_dynarray_num_elements(&shader->binary, uint8_t); 151 } 152 153 return VK_SUCCESS; 154} 155 156static VkResult 157panvk_pipeline_builder_upload_shaders(struct panvk_pipeline_builder *builder, 158 struct panvk_pipeline *pipeline) 159{ 160 /* In some cases, the optimized shader is empty. Don't bother allocating 161 * anything in this case. 162 */ 163 if (builder->shader_total_size == 0) 164 return VK_SUCCESS; 165 166 struct panfrost_bo *bin_bo = 167 panfrost_bo_create(&builder->device->physical_device->pdev, 168 builder->shader_total_size, PAN_BO_EXECUTE, 169 "Shader"); 170 171 pipeline->binary_bo = bin_bo; 172 panfrost_bo_mmap(bin_bo); 173 174 for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++) { 175 const struct panvk_shader *shader = builder->shaders[i]; 176 if (!shader) 177 continue; 178 179 memcpy(pipeline->binary_bo->ptr.cpu + builder->stages[i].shader_offset, 180 util_dynarray_element(&shader->binary, uint8_t, 0), 181 util_dynarray_num_elements(&shader->binary, uint8_t)); 182 } 183 184 return VK_SUCCESS; 185} 186 187static bool 188panvk_pipeline_static_sysval(struct panvk_pipeline *pipeline, 189 unsigned id) 190{ 191 switch (id) { 192 case PAN_SYSVAL_VIEWPORT_SCALE: 193 case PAN_SYSVAL_VIEWPORT_OFFSET: 194 return panvk_pipeline_static_state(pipeline, VK_DYNAMIC_STATE_VIEWPORT); 195 default: 196 return false; 197 } 198} 199 200static void 201panvk_pipeline_builder_alloc_static_state_bo(struct panvk_pipeline_builder *builder, 202 struct panvk_pipeline *pipeline) 203{ 204 struct panfrost_device *pdev = 205 &builder->device->physical_device->pdev; 206 unsigned bo_size = 0; 207 208 for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++) { 209 const struct panvk_shader *shader = builder->shaders[i]; 210 if (!shader && i != MESA_SHADER_FRAGMENT) 211 continue; 212 213 if (pipeline->fs.dynamic_rsd && i == MESA_SHADER_FRAGMENT) 214 continue; 215 216 bo_size = ALIGN_POT(bo_size, pan_alignment(RENDERER_STATE)); 217 builder->stages[i].rsd_offset = bo_size; 218 bo_size += pan_size(RENDERER_STATE); 219 if (i == MESA_SHADER_FRAGMENT) 220 bo_size += pan_size(BLEND) * MAX2(pipeline->blend.state.rt_count, 1); 221 } 222 223 if (builder->create_info.gfx && 224 panvk_pipeline_static_state(pipeline, VK_DYNAMIC_STATE_VIEWPORT) && 225 panvk_pipeline_static_state(pipeline, VK_DYNAMIC_STATE_SCISSOR)) { 226 bo_size = ALIGN_POT(bo_size, pan_alignment(VIEWPORT)); 227 builder->vpd_offset = bo_size; 228 bo_size += pan_size(VIEWPORT); 229 } 230 231 if (bo_size) { 232 pipeline->state_bo = 233 panfrost_bo_create(pdev, bo_size, 0, "Pipeline descriptors"); 234 panfrost_bo_mmap(pipeline->state_bo); 235 } 236} 237 238static void 239panvk_pipeline_builder_init_sysvals(struct panvk_pipeline_builder *builder, 240 struct panvk_pipeline *pipeline, 241 gl_shader_stage stage) 242{ 243 const struct panvk_shader *shader = builder->shaders[stage]; 244 245 pipeline->sysvals[stage].ids = shader->info.sysvals; 246 pipeline->sysvals[stage].ubo_idx = shader->sysval_ubo; 247} 248 249static void 250panvk_pipeline_builder_init_shaders(struct panvk_pipeline_builder *builder, 251 struct panvk_pipeline *pipeline) 252{ 253 for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++) { 254 const struct panvk_shader *shader = builder->shaders[i]; 255 if (!shader) 256 continue; 257 258 pipeline->tls_size = MAX2(pipeline->tls_size, shader->info.tls_size); 259 pipeline->wls_size = MAX2(pipeline->wls_size, shader->info.wls_size); 260 261 if (shader->has_img_access) 262 pipeline->img_access_mask |= BITFIELD_BIT(i); 263 264 if (i == MESA_SHADER_VERTEX && shader->info.vs.writes_point_size) { 265 VkPrimitiveTopology topology = 266 builder->create_info.gfx->pInputAssemblyState->topology; 267 bool points = (topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST); 268 269 /* Even if the vertex shader writes point size, we only consider the 270 * pipeline to write point size when we're actually drawing points. 271 * Otherwise the point size write would conflict with wide lines. 272 */ 273 pipeline->ia.writes_point_size = points; 274 } 275 276 mali_ptr shader_ptr = 0; 277 278 /* Handle empty shaders gracefully */ 279 if (util_dynarray_num_elements(&builder->shaders[i]->binary, uint8_t)) { 280 shader_ptr = pipeline->binary_bo->ptr.gpu + 281 builder->stages[i].shader_offset; 282 } 283 284 if (i != MESA_SHADER_FRAGMENT) { 285 void *rsd = pipeline->state_bo->ptr.cpu + builder->stages[i].rsd_offset; 286 mali_ptr gpu_rsd = pipeline->state_bo->ptr.gpu + builder->stages[i].rsd_offset; 287 288 panvk_per_arch(emit_non_fs_rsd)(builder->device, &shader->info, shader_ptr, rsd); 289 pipeline->rsds[i] = gpu_rsd; 290 } 291 292 panvk_pipeline_builder_init_sysvals(builder, pipeline, i); 293 294 if (i == MESA_SHADER_COMPUTE) 295 pipeline->cs.local_size = shader->local_size; 296 } 297 298 if (builder->create_info.gfx && !pipeline->fs.dynamic_rsd) { 299 void *rsd = pipeline->state_bo->ptr.cpu + builder->stages[MESA_SHADER_FRAGMENT].rsd_offset; 300 mali_ptr gpu_rsd = pipeline->state_bo->ptr.gpu + builder->stages[MESA_SHADER_FRAGMENT].rsd_offset; 301 void *bd = rsd + pan_size(RENDERER_STATE); 302 303 panvk_per_arch(emit_base_fs_rsd)(builder->device, pipeline, rsd); 304 for (unsigned rt = 0; rt < pipeline->blend.state.rt_count; rt++) { 305 panvk_per_arch(emit_blend)(builder->device, pipeline, rt, bd); 306 bd += pan_size(BLEND); 307 } 308 309 pipeline->rsds[MESA_SHADER_FRAGMENT] = gpu_rsd; 310 } else if (builder->create_info.gfx) { 311 panvk_per_arch(emit_base_fs_rsd)(builder->device, pipeline, &pipeline->fs.rsd_template); 312 for (unsigned rt = 0; rt < MAX2(pipeline->blend.state.rt_count, 1); rt++) { 313 panvk_per_arch(emit_blend)(builder->device, pipeline, rt, 314 &pipeline->blend.bd_template[rt]); 315 } 316 } 317 318 pipeline->num_ubos = PANVK_NUM_BUILTIN_UBOS + 319 builder->layout->num_ubos + 320 builder->layout->num_dyn_ubos; 321} 322 323 324static void 325panvk_pipeline_builder_parse_viewport(struct panvk_pipeline_builder *builder, 326 struct panvk_pipeline *pipeline) 327{ 328 /* The spec says: 329 * 330 * pViewportState is a pointer to an instance of the 331 * VkPipelineViewportStateCreateInfo structure, and is ignored if the 332 * pipeline has rasterization disabled. 333 */ 334 if (!builder->rasterizer_discard && 335 panvk_pipeline_static_state(pipeline, VK_DYNAMIC_STATE_VIEWPORT) && 336 panvk_pipeline_static_state(pipeline, VK_DYNAMIC_STATE_SCISSOR)) { 337 void *vpd = pipeline->state_bo->ptr.cpu + builder->vpd_offset; 338 panvk_per_arch(emit_viewport)(builder->create_info.gfx->pViewportState->pViewports, 339 builder->create_info.gfx->pViewportState->pScissors, 340 vpd); 341 pipeline->vpd = pipeline->state_bo->ptr.gpu + 342 builder->vpd_offset; 343 } 344 if (panvk_pipeline_static_state(pipeline, VK_DYNAMIC_STATE_VIEWPORT)) 345 pipeline->viewport = builder->create_info.gfx->pViewportState->pViewports[0]; 346 347 if (panvk_pipeline_static_state(pipeline, VK_DYNAMIC_STATE_SCISSOR)) 348 pipeline->scissor = builder->create_info.gfx->pViewportState->pScissors[0]; 349} 350 351static void 352panvk_pipeline_builder_parse_dynamic(struct panvk_pipeline_builder *builder, 353 struct panvk_pipeline *pipeline) 354{ 355 const VkPipelineDynamicStateCreateInfo *dynamic_info = 356 builder->create_info.gfx->pDynamicState; 357 358 if (!dynamic_info) 359 return; 360 361 for (uint32_t i = 0; i < dynamic_info->dynamicStateCount; i++) { 362 VkDynamicState state = dynamic_info->pDynamicStates[i]; 363 switch (state) { 364 case VK_DYNAMIC_STATE_VIEWPORT ... VK_DYNAMIC_STATE_STENCIL_REFERENCE: 365 pipeline->dynamic_state_mask |= 1 << state; 366 break; 367 default: 368 unreachable("unsupported dynamic state"); 369 } 370 } 371 372} 373 374static enum mali_draw_mode 375translate_prim_topology(VkPrimitiveTopology in) 376{ 377 switch (in) { 378 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST: 379 return MALI_DRAW_MODE_POINTS; 380 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST: 381 return MALI_DRAW_MODE_LINES; 382 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP: 383 return MALI_DRAW_MODE_LINE_STRIP; 384 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST: 385 return MALI_DRAW_MODE_TRIANGLES; 386 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP: 387 return MALI_DRAW_MODE_TRIANGLE_STRIP; 388 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN: 389 return MALI_DRAW_MODE_TRIANGLE_FAN; 390 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY: 391 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY: 392 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY: 393 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY: 394 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST: 395 default: 396 unreachable("Invalid primitive type"); 397 } 398} 399 400static void 401panvk_pipeline_builder_parse_input_assembly(struct panvk_pipeline_builder *builder, 402 struct panvk_pipeline *pipeline) 403{ 404 pipeline->ia.primitive_restart = 405 builder->create_info.gfx->pInputAssemblyState->primitiveRestartEnable; 406 pipeline->ia.topology = 407 translate_prim_topology(builder->create_info.gfx->pInputAssemblyState->topology); 408} 409 410static enum pipe_logicop 411translate_logicop(VkLogicOp in) 412{ 413 switch (in) { 414 case VK_LOGIC_OP_CLEAR: return PIPE_LOGICOP_CLEAR; 415 case VK_LOGIC_OP_AND: return PIPE_LOGICOP_AND; 416 case VK_LOGIC_OP_AND_REVERSE: return PIPE_LOGICOP_AND_REVERSE; 417 case VK_LOGIC_OP_COPY: return PIPE_LOGICOP_COPY; 418 case VK_LOGIC_OP_AND_INVERTED: return PIPE_LOGICOP_AND_INVERTED; 419 case VK_LOGIC_OP_NO_OP: return PIPE_LOGICOP_NOOP; 420 case VK_LOGIC_OP_XOR: return PIPE_LOGICOP_XOR; 421 case VK_LOGIC_OP_OR: return PIPE_LOGICOP_OR; 422 case VK_LOGIC_OP_NOR: return PIPE_LOGICOP_NOR; 423 case VK_LOGIC_OP_EQUIVALENT: return PIPE_LOGICOP_EQUIV; 424 case VK_LOGIC_OP_INVERT: return PIPE_LOGICOP_INVERT; 425 case VK_LOGIC_OP_OR_REVERSE: return PIPE_LOGICOP_OR_REVERSE; 426 case VK_LOGIC_OP_COPY_INVERTED: return PIPE_LOGICOP_COPY_INVERTED; 427 case VK_LOGIC_OP_OR_INVERTED: return PIPE_LOGICOP_OR_INVERTED; 428 case VK_LOGIC_OP_NAND: return PIPE_LOGICOP_NAND; 429 case VK_LOGIC_OP_SET: return PIPE_LOGICOP_SET; 430 default: unreachable("Invalid logicop"); 431 } 432} 433 434static enum blend_func 435translate_blend_op(VkBlendOp in) 436{ 437 switch (in) { 438 case VK_BLEND_OP_ADD: return BLEND_FUNC_ADD; 439 case VK_BLEND_OP_SUBTRACT: return BLEND_FUNC_SUBTRACT; 440 case VK_BLEND_OP_REVERSE_SUBTRACT: return BLEND_FUNC_REVERSE_SUBTRACT; 441 case VK_BLEND_OP_MIN: return BLEND_FUNC_MIN; 442 case VK_BLEND_OP_MAX: return BLEND_FUNC_MAX; 443 default: unreachable("Invalid blend op"); 444 } 445} 446 447static enum blend_factor 448translate_blend_factor(VkBlendFactor in, bool dest_has_alpha) 449{ 450 switch (in) { 451 case VK_BLEND_FACTOR_ZERO: 452 case VK_BLEND_FACTOR_ONE: 453 return BLEND_FACTOR_ZERO; 454 case VK_BLEND_FACTOR_SRC_COLOR: 455 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR: 456 return BLEND_FACTOR_SRC_COLOR; 457 case VK_BLEND_FACTOR_DST_COLOR: 458 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR: 459 return BLEND_FACTOR_DST_COLOR; 460 case VK_BLEND_FACTOR_SRC_ALPHA: 461 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA: 462 return BLEND_FACTOR_SRC_ALPHA; 463 case VK_BLEND_FACTOR_DST_ALPHA: 464 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA: 465 return dest_has_alpha ? BLEND_FACTOR_DST_ALPHA : BLEND_FACTOR_ZERO; 466 case VK_BLEND_FACTOR_CONSTANT_COLOR: 467 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR: 468 return BLEND_FACTOR_CONSTANT_COLOR; 469 case VK_BLEND_FACTOR_CONSTANT_ALPHA: 470 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA: 471 return BLEND_FACTOR_CONSTANT_ALPHA; 472 case VK_BLEND_FACTOR_SRC1_COLOR: 473 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR: 474 return BLEND_FACTOR_SRC1_COLOR; 475 case VK_BLEND_FACTOR_SRC1_ALPHA: 476 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA: 477 return BLEND_FACTOR_SRC1_ALPHA; 478 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE: 479 return BLEND_FACTOR_SRC_ALPHA_SATURATE; 480 default: unreachable("Invalid blend factor"); 481 } 482} 483 484static bool 485inverted_blend_factor(VkBlendFactor in, bool dest_has_alpha) 486{ 487 switch (in) { 488 case VK_BLEND_FACTOR_ONE: 489 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR: 490 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR: 491 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA: 492 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR: 493 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA: 494 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR: 495 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA: 496 return true; 497 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA: 498 return dest_has_alpha ? true : false; 499 case VK_BLEND_FACTOR_DST_ALPHA: 500 return !dest_has_alpha ? true : false; 501 default: 502 return false; 503 } 504} 505 506bool 507panvk_per_arch(blend_needs_lowering)(const struct panfrost_device *dev, 508 const struct pan_blend_state *state, 509 unsigned rt) 510{ 511 /* LogicOp requires a blend shader */ 512 if (state->logicop_enable) 513 return true; 514 515 /* Not all formats can be blended by fixed-function hardware */ 516 if (!panfrost_blendable_formats_v7[state->rts[rt].format].internal) 517 return true; 518 519 unsigned constant_mask = pan_blend_constant_mask(state->rts[rt].equation); 520 521 /* v6 doesn't support blend constants in FF blend equations. 522 * v7 only uses the constant from RT 0 (TODO: what if it's the same 523 * constant? or a constant is shared?) 524 */ 525 if (constant_mask && (PAN_ARCH == 6 || (PAN_ARCH == 7 && rt > 0))) 526 return true; 527 528 if (!pan_blend_is_homogenous_constant(constant_mask, state->constants)) 529 return true; 530 531 bool supports_2src = pan_blend_supports_2src(dev->arch); 532 return !pan_blend_can_fixed_function(state->rts[rt].equation, supports_2src); 533} 534 535static void 536panvk_pipeline_builder_parse_color_blend(struct panvk_pipeline_builder *builder, 537 struct panvk_pipeline *pipeline) 538{ 539 struct panfrost_device *pdev = &builder->device->physical_device->pdev; 540 pipeline->blend.state.logicop_enable = 541 builder->create_info.gfx->pColorBlendState->logicOpEnable; 542 pipeline->blend.state.logicop_func = 543 translate_logicop(builder->create_info.gfx->pColorBlendState->logicOp); 544 pipeline->blend.state.rt_count = util_last_bit(builder->active_color_attachments); 545 memcpy(pipeline->blend.state.constants, 546 builder->create_info.gfx->pColorBlendState->blendConstants, 547 sizeof(pipeline->blend.state.constants)); 548 549 for (unsigned i = 0; i < pipeline->blend.state.rt_count; i++) { 550 const VkPipelineColorBlendAttachmentState *in = 551 &builder->create_info.gfx->pColorBlendState->pAttachments[i]; 552 struct pan_blend_rt_state *out = &pipeline->blend.state.rts[i]; 553 554 out->format = builder->color_attachment_formats[i]; 555 556 bool dest_has_alpha = util_format_has_alpha(out->format); 557 558 out->nr_samples = builder->create_info.gfx->pMultisampleState->rasterizationSamples; 559 out->equation.blend_enable = in->blendEnable; 560 out->equation.color_mask = in->colorWriteMask; 561 out->equation.rgb_func = translate_blend_op(in->colorBlendOp); 562 out->equation.rgb_src_factor = translate_blend_factor(in->srcColorBlendFactor, dest_has_alpha); 563 out->equation.rgb_invert_src_factor = inverted_blend_factor(in->srcColorBlendFactor, dest_has_alpha); 564 out->equation.rgb_dst_factor = translate_blend_factor(in->dstColorBlendFactor, dest_has_alpha); 565 out->equation.rgb_invert_dst_factor = inverted_blend_factor(in->dstColorBlendFactor, dest_has_alpha); 566 out->equation.alpha_func = translate_blend_op(in->alphaBlendOp); 567 out->equation.alpha_src_factor = translate_blend_factor(in->srcAlphaBlendFactor, dest_has_alpha); 568 out->equation.alpha_invert_src_factor = inverted_blend_factor(in->srcAlphaBlendFactor, dest_has_alpha); 569 out->equation.alpha_dst_factor = translate_blend_factor(in->dstAlphaBlendFactor, dest_has_alpha); 570 out->equation.alpha_invert_dst_factor = inverted_blend_factor(in->dstAlphaBlendFactor, dest_has_alpha); 571 572 pipeline->blend.reads_dest |= pan_blend_reads_dest(out->equation); 573 574 unsigned constant_mask = 575 panvk_per_arch(blend_needs_lowering)(pdev, &pipeline->blend.state, i) ? 576 0 : pan_blend_constant_mask(out->equation); 577 pipeline->blend.constant[i].index = ffs(constant_mask) - 1; 578 if (constant_mask) { 579 /* On Bifrost, the blend constant is expressed with a UNORM of the 580 * size of the target format. The value is then shifted such that 581 * used bits are in the MSB. Here we calculate the factor at pipeline 582 * creation time so we only have to do a 583 * hw_constant = float_constant * factor; 584 * at descriptor emission time. 585 */ 586 const struct util_format_description *format_desc = 587 util_format_description(out->format); 588 unsigned chan_size = 0; 589 for (unsigned c = 0; c < format_desc->nr_channels; c++) 590 chan_size = MAX2(format_desc->channel[c].size, chan_size); 591 pipeline->blend.constant[i].bifrost_factor = 592 ((1 << chan_size) - 1) << (16 - chan_size); 593 } 594 } 595} 596 597static void 598panvk_pipeline_builder_parse_multisample(struct panvk_pipeline_builder *builder, 599 struct panvk_pipeline *pipeline) 600{ 601 unsigned nr_samples = 602 MAX2(builder->create_info.gfx->pMultisampleState->rasterizationSamples, 1); 603 604 pipeline->ms.rast_samples = 605 builder->create_info.gfx->pMultisampleState->rasterizationSamples; 606 pipeline->ms.sample_mask = 607 builder->create_info.gfx->pMultisampleState->pSampleMask ? 608 builder->create_info.gfx->pMultisampleState->pSampleMask[0] : UINT16_MAX; 609 pipeline->ms.min_samples = 610 MAX2(builder->create_info.gfx->pMultisampleState->minSampleShading * nr_samples, 1); 611} 612 613static enum mali_stencil_op 614translate_stencil_op(VkStencilOp in) 615{ 616 switch (in) { 617 case VK_STENCIL_OP_KEEP: return MALI_STENCIL_OP_KEEP; 618 case VK_STENCIL_OP_ZERO: return MALI_STENCIL_OP_ZERO; 619 case VK_STENCIL_OP_REPLACE: return MALI_STENCIL_OP_REPLACE; 620 case VK_STENCIL_OP_INCREMENT_AND_CLAMP: return MALI_STENCIL_OP_INCR_SAT; 621 case VK_STENCIL_OP_DECREMENT_AND_CLAMP: return MALI_STENCIL_OP_DECR_SAT; 622 case VK_STENCIL_OP_INCREMENT_AND_WRAP: return MALI_STENCIL_OP_INCR_WRAP; 623 case VK_STENCIL_OP_DECREMENT_AND_WRAP: return MALI_STENCIL_OP_DECR_WRAP; 624 case VK_STENCIL_OP_INVERT: return MALI_STENCIL_OP_INVERT; 625 default: unreachable("Invalid stencil op"); 626 } 627} 628 629static void 630panvk_pipeline_builder_parse_zs(struct panvk_pipeline_builder *builder, 631 struct panvk_pipeline *pipeline) 632{ 633 if (!builder->use_depth_stencil_attachment) 634 return; 635 636 pipeline->zs.z_test = builder->create_info.gfx->pDepthStencilState->depthTestEnable; 637 638 /* The Vulkan spec says: 639 * 640 * depthWriteEnable controls whether depth writes are enabled when 641 * depthTestEnable is VK_TRUE. Depth writes are always disabled when 642 * depthTestEnable is VK_FALSE. 643 * 644 * The hardware does not make this distinction, though, so we AND in the 645 * condition ourselves. 646 */ 647 pipeline->zs.z_write = pipeline->zs.z_test && 648 builder->create_info.gfx->pDepthStencilState->depthWriteEnable; 649 650 pipeline->zs.z_compare_func = 651 panvk_per_arch(translate_compare_func)(builder->create_info.gfx->pDepthStencilState->depthCompareOp); 652 pipeline->zs.s_test = builder->create_info.gfx->pDepthStencilState->stencilTestEnable; 653 pipeline->zs.s_front.fail_op = 654 translate_stencil_op(builder->create_info.gfx->pDepthStencilState->front.failOp); 655 pipeline->zs.s_front.pass_op = 656 translate_stencil_op(builder->create_info.gfx->pDepthStencilState->front.passOp); 657 pipeline->zs.s_front.z_fail_op = 658 translate_stencil_op(builder->create_info.gfx->pDepthStencilState->front.depthFailOp); 659 pipeline->zs.s_front.compare_func = 660 panvk_per_arch(translate_compare_func)(builder->create_info.gfx->pDepthStencilState->front.compareOp); 661 pipeline->zs.s_front.compare_mask = 662 builder->create_info.gfx->pDepthStencilState->front.compareMask; 663 pipeline->zs.s_front.write_mask = 664 builder->create_info.gfx->pDepthStencilState->front.writeMask; 665 pipeline->zs.s_front.ref = 666 builder->create_info.gfx->pDepthStencilState->front.reference; 667 pipeline->zs.s_back.fail_op = 668 translate_stencil_op(builder->create_info.gfx->pDepthStencilState->back.failOp); 669 pipeline->zs.s_back.pass_op = 670 translate_stencil_op(builder->create_info.gfx->pDepthStencilState->back.passOp); 671 pipeline->zs.s_back.z_fail_op = 672 translate_stencil_op(builder->create_info.gfx->pDepthStencilState->back.depthFailOp); 673 pipeline->zs.s_back.compare_func = 674 panvk_per_arch(translate_compare_func)(builder->create_info.gfx->pDepthStencilState->back.compareOp); 675 pipeline->zs.s_back.compare_mask = 676 builder->create_info.gfx->pDepthStencilState->back.compareMask; 677 pipeline->zs.s_back.write_mask = 678 builder->create_info.gfx->pDepthStencilState->back.writeMask; 679 pipeline->zs.s_back.ref = 680 builder->create_info.gfx->pDepthStencilState->back.reference; 681} 682 683static void 684panvk_pipeline_builder_parse_rast(struct panvk_pipeline_builder *builder, 685 struct panvk_pipeline *pipeline) 686{ 687 pipeline->rast.clamp_depth = builder->create_info.gfx->pRasterizationState->depthClampEnable; 688 pipeline->rast.depth_bias.enable = builder->create_info.gfx->pRasterizationState->depthBiasEnable; 689 pipeline->rast.depth_bias.constant_factor = 690 builder->create_info.gfx->pRasterizationState->depthBiasConstantFactor; 691 pipeline->rast.depth_bias.clamp = builder->create_info.gfx->pRasterizationState->depthBiasClamp; 692 pipeline->rast.depth_bias.slope_factor = builder->create_info.gfx->pRasterizationState->depthBiasSlopeFactor; 693 pipeline->rast.front_ccw = builder->create_info.gfx->pRasterizationState->frontFace == VK_FRONT_FACE_COUNTER_CLOCKWISE; 694 pipeline->rast.cull_front_face = builder->create_info.gfx->pRasterizationState->cullMode & VK_CULL_MODE_FRONT_BIT; 695 pipeline->rast.cull_back_face = builder->create_info.gfx->pRasterizationState->cullMode & VK_CULL_MODE_BACK_BIT; 696 pipeline->rast.line_width = builder->create_info.gfx->pRasterizationState->lineWidth; 697 pipeline->rast.enable = !builder->create_info.gfx->pRasterizationState->rasterizerDiscardEnable; 698} 699 700static bool 701panvk_fs_required(struct panvk_pipeline *pipeline) 702{ 703 const struct pan_shader_info *info = &pipeline->fs.info; 704 705 /* If we generally have side effects */ 706 if (info->fs.sidefx) 707 return true; 708 709 /* If colour is written we need to execute */ 710 const struct pan_blend_state *blend = &pipeline->blend.state; 711 for (unsigned i = 0; i < blend->rt_count; ++i) { 712 if (blend->rts[i].equation.color_mask) 713 return true; 714 } 715 716 /* If depth is written and not implied we need to execute. 717 * TODO: Predicate on Z/S writes being enabled */ 718 return (info->fs.writes_depth || info->fs.writes_stencil); 719} 720 721#define PANVK_DYNAMIC_FS_RSD_MASK \ 722 ((1 << VK_DYNAMIC_STATE_DEPTH_BIAS) | \ 723 (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS) | \ 724 (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK) | \ 725 (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK) | \ 726 (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) 727 728static void 729panvk_pipeline_builder_init_fs_state(struct panvk_pipeline_builder *builder, 730 struct panvk_pipeline *pipeline) 731{ 732 if (!builder->shaders[MESA_SHADER_FRAGMENT]) 733 return; 734 735 pipeline->fs.dynamic_rsd = 736 pipeline->dynamic_state_mask & PANVK_DYNAMIC_FS_RSD_MASK; 737 pipeline->fs.address = pipeline->binary_bo->ptr.gpu + 738 builder->stages[MESA_SHADER_FRAGMENT].shader_offset; 739 pipeline->fs.info = builder->shaders[MESA_SHADER_FRAGMENT]->info; 740 pipeline->fs.rt_mask = builder->active_color_attachments; 741 pipeline->fs.required = panvk_fs_required(pipeline); 742} 743 744static void 745panvk_pipeline_update_varying_slot(struct panvk_varyings_info *varyings, 746 gl_shader_stage stage, 747 const struct pan_shader_varying *varying, 748 bool input) 749{ 750 gl_varying_slot loc = varying->location; 751 enum panvk_varying_buf_id buf_id = panvk_varying_buf_id(loc); 752 753 varyings->stage[stage].loc[varyings->stage[stage].count++] = loc; 754 755 assert(loc < ARRAY_SIZE(varyings->varying)); 756 757 enum pipe_format new_fmt = varying->format; 758 enum pipe_format old_fmt = varyings->varying[loc].format; 759 760 BITSET_SET(varyings->active, loc); 761 762 /* We expect inputs to either be set by a previous stage or be built 763 * in, skip the entry if that's not the case, we'll emit a const 764 * varying returning zero for those entries. 765 */ 766 if (input && old_fmt == PIPE_FORMAT_NONE) 767 return; 768 769 unsigned new_size = util_format_get_blocksize(new_fmt); 770 unsigned old_size = util_format_get_blocksize(old_fmt); 771 772 if (old_size < new_size) 773 varyings->varying[loc].format = new_fmt; 774 775 varyings->buf_mask |= 1 << buf_id; 776} 777 778static void 779panvk_pipeline_builder_collect_varyings(struct panvk_pipeline_builder *builder, 780 struct panvk_pipeline *pipeline) 781{ 782 for (uint32_t s = 0; s < MESA_SHADER_STAGES; s++) { 783 if (!builder->shaders[s]) 784 continue; 785 786 const struct pan_shader_info *info = &builder->shaders[s]->info; 787 788 for (unsigned i = 0; i < info->varyings.input_count; i++) { 789 panvk_pipeline_update_varying_slot(&pipeline->varyings, s, 790 &info->varyings.input[i], 791 true); 792 } 793 794 for (unsigned i = 0; i < info->varyings.output_count; i++) { 795 panvk_pipeline_update_varying_slot(&pipeline->varyings, s, 796 &info->varyings.output[i], 797 false); 798 } 799 } 800 801 /* TODO: Xfb */ 802 gl_varying_slot loc; 803 BITSET_FOREACH_SET(loc, pipeline->varyings.active, VARYING_SLOT_MAX) { 804 if (pipeline->varyings.varying[loc].format == PIPE_FORMAT_NONE) 805 continue; 806 807 enum panvk_varying_buf_id buf_id = panvk_varying_buf_id(loc); 808 unsigned buf_idx = panvk_varying_buf_index(&pipeline->varyings, buf_id); 809 unsigned varying_sz = panvk_varying_size(&pipeline->varyings, loc); 810 811 pipeline->varyings.varying[loc].buf = buf_idx; 812 pipeline->varyings.varying[loc].offset = 813 pipeline->varyings.buf[buf_idx].stride; 814 pipeline->varyings.buf[buf_idx].stride += varying_sz; 815 } 816} 817 818static void 819panvk_pipeline_builder_parse_vertex_input(struct panvk_pipeline_builder *builder, 820 struct panvk_pipeline *pipeline) 821{ 822 struct panvk_attribs_info *attribs = &pipeline->attribs; 823 const VkPipelineVertexInputStateCreateInfo *info = 824 builder->create_info.gfx->pVertexInputState; 825 826 const VkPipelineVertexInputDivisorStateCreateInfoEXT *div_info = 827 vk_find_struct_const(info->pNext, 828 PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT); 829 830 for (unsigned i = 0; i < info->vertexBindingDescriptionCount; i++) { 831 const VkVertexInputBindingDescription *desc = 832 &info->pVertexBindingDescriptions[i]; 833 attribs->buf_count = MAX2(desc->binding + 1, attribs->buf_count); 834 attribs->buf[desc->binding].stride = desc->stride; 835 attribs->buf[desc->binding].per_instance = 836 desc->inputRate == VK_VERTEX_INPUT_RATE_INSTANCE; 837 attribs->buf[desc->binding].instance_divisor = 1; 838 attribs->buf[desc->binding].special = false; 839 } 840 841 if (div_info) { 842 for (unsigned i = 0; i < div_info->vertexBindingDivisorCount; i++) { 843 const VkVertexInputBindingDivisorDescriptionEXT *div = 844 &div_info->pVertexBindingDivisors[i]; 845 attribs->buf[div->binding].instance_divisor = div->divisor; 846 } 847 } 848 849 const struct pan_shader_info *vs = 850 &builder->shaders[MESA_SHADER_VERTEX]->info; 851 852 for (unsigned i = 0; i < info->vertexAttributeDescriptionCount; i++) { 853 const VkVertexInputAttributeDescription *desc = 854 &info->pVertexAttributeDescriptions[i]; 855 856 unsigned attrib = desc->location + VERT_ATTRIB_GENERIC0; 857 unsigned slot = util_bitcount64(vs->attributes_read & 858 BITFIELD64_MASK(attrib)); 859 860 attribs->attrib[slot].buf = desc->binding; 861 attribs->attrib[slot].format = 862 vk_format_to_pipe_format(desc->format); 863 attribs->attrib[slot].offset = desc->offset; 864 } 865 866 if (vs->attribute_count >= PAN_VERTEX_ID) { 867 attribs->buf[attribs->buf_count].special = true; 868 attribs->buf[attribs->buf_count].special_id = PAN_VERTEX_ID; 869 attribs->attrib[PAN_VERTEX_ID].buf = attribs->buf_count++; 870 attribs->attrib[PAN_VERTEX_ID].format = PIPE_FORMAT_R32_UINT; 871 } 872 873 if (vs->attribute_count >= PAN_INSTANCE_ID) { 874 attribs->buf[attribs->buf_count].special = true; 875 attribs->buf[attribs->buf_count].special_id = PAN_INSTANCE_ID; 876 attribs->attrib[PAN_INSTANCE_ID].buf = attribs->buf_count++; 877 attribs->attrib[PAN_INSTANCE_ID].format = PIPE_FORMAT_R32_UINT; 878 } 879 880 attribs->attrib_count = MAX2(attribs->attrib_count, vs->attribute_count); 881} 882 883static VkResult 884panvk_pipeline_builder_build(struct panvk_pipeline_builder *builder, 885 struct panvk_pipeline **pipeline) 886{ 887 VkResult result = panvk_pipeline_builder_create_pipeline(builder, pipeline); 888 if (result != VK_SUCCESS) 889 return result; 890 891 /* TODO: make those functions return a result and handle errors */ 892 if (builder->create_info.gfx) { 893 panvk_pipeline_builder_parse_dynamic(builder, *pipeline); 894 panvk_pipeline_builder_parse_color_blend(builder, *pipeline); 895 panvk_pipeline_builder_compile_shaders(builder, *pipeline); 896 panvk_pipeline_builder_collect_varyings(builder, *pipeline); 897 panvk_pipeline_builder_parse_input_assembly(builder, *pipeline); 898 panvk_pipeline_builder_parse_multisample(builder, *pipeline); 899 panvk_pipeline_builder_parse_zs(builder, *pipeline); 900 panvk_pipeline_builder_parse_rast(builder, *pipeline); 901 panvk_pipeline_builder_parse_vertex_input(builder, *pipeline); 902 panvk_pipeline_builder_upload_shaders(builder, *pipeline); 903 panvk_pipeline_builder_init_fs_state(builder, *pipeline); 904 panvk_pipeline_builder_alloc_static_state_bo(builder, *pipeline); 905 panvk_pipeline_builder_init_shaders(builder, *pipeline); 906 panvk_pipeline_builder_parse_viewport(builder, *pipeline); 907 } else { 908 panvk_pipeline_builder_compile_shaders(builder, *pipeline); 909 panvk_pipeline_builder_upload_shaders(builder, *pipeline); 910 panvk_pipeline_builder_alloc_static_state_bo(builder, *pipeline); 911 panvk_pipeline_builder_init_shaders(builder, *pipeline); 912 } 913 914 return VK_SUCCESS; 915} 916 917static void 918panvk_pipeline_builder_init_graphics(struct panvk_pipeline_builder *builder, 919 struct panvk_device *dev, 920 struct panvk_pipeline_cache *cache, 921 const VkGraphicsPipelineCreateInfo *create_info, 922 const VkAllocationCallbacks *alloc) 923{ 924 VK_FROM_HANDLE(panvk_pipeline_layout, layout, create_info->layout); 925 assert(layout); 926 *builder = (struct panvk_pipeline_builder) { 927 .device = dev, 928 .cache = cache, 929 .layout = layout, 930 .create_info.gfx = create_info, 931 .alloc = alloc, 932 }; 933 934 builder->rasterizer_discard = 935 create_info->pRasterizationState->rasterizerDiscardEnable; 936 937 if (builder->rasterizer_discard) { 938 builder->samples = VK_SAMPLE_COUNT_1_BIT; 939 } else { 940 builder->samples = create_info->pMultisampleState->rasterizationSamples; 941 942 const struct panvk_render_pass *pass = panvk_render_pass_from_handle(create_info->renderPass); 943 const struct panvk_subpass *subpass = &pass->subpasses[create_info->subpass]; 944 945 builder->use_depth_stencil_attachment = 946 subpass->zs_attachment.idx != VK_ATTACHMENT_UNUSED; 947 948 assert(subpass->color_count <= create_info->pColorBlendState->attachmentCount); 949 builder->active_color_attachments = 0; 950 for (uint32_t i = 0; i < subpass->color_count; i++) { 951 uint32_t idx = subpass->color_attachments[i].idx; 952 if (idx == VK_ATTACHMENT_UNUSED) 953 continue; 954 955 builder->active_color_attachments |= 1 << i; 956 builder->color_attachment_formats[i] = pass->attachments[idx].format; 957 } 958 } 959} 960 961VkResult 962panvk_per_arch(CreateGraphicsPipelines)(VkDevice device, 963 VkPipelineCache pipelineCache, 964 uint32_t count, 965 const VkGraphicsPipelineCreateInfo *pCreateInfos, 966 const VkAllocationCallbacks *pAllocator, 967 VkPipeline *pPipelines) 968{ 969 VK_FROM_HANDLE(panvk_device, dev, device); 970 VK_FROM_HANDLE(panvk_pipeline_cache, cache, pipelineCache); 971 972 for (uint32_t i = 0; i < count; i++) { 973 struct panvk_pipeline_builder builder; 974 panvk_pipeline_builder_init_graphics(&builder, dev, cache, 975 &pCreateInfos[i], pAllocator); 976 977 struct panvk_pipeline *pipeline; 978 VkResult result = panvk_pipeline_builder_build(&builder, &pipeline); 979 panvk_pipeline_builder_finish(&builder); 980 981 if (result != VK_SUCCESS) { 982 for (uint32_t j = 0; j < i; j++) { 983 panvk_DestroyPipeline(device, pPipelines[j], pAllocator); 984 pPipelines[j] = VK_NULL_HANDLE; 985 } 986 987 return result; 988 } 989 990 pPipelines[i] = panvk_pipeline_to_handle(pipeline); 991 } 992 993 return VK_SUCCESS; 994} 995 996static void 997panvk_pipeline_builder_init_compute(struct panvk_pipeline_builder *builder, 998 struct panvk_device *dev, 999 struct panvk_pipeline_cache *cache, 1000 const VkComputePipelineCreateInfo *create_info, 1001 const VkAllocationCallbacks *alloc) 1002{ 1003 VK_FROM_HANDLE(panvk_pipeline_layout, layout, create_info->layout); 1004 assert(layout); 1005 *builder = (struct panvk_pipeline_builder) { 1006 .device = dev, 1007 .cache = cache, 1008 .layout = layout, 1009 .create_info.compute = create_info, 1010 .alloc = alloc, 1011 }; 1012} 1013 1014VkResult 1015panvk_per_arch(CreateComputePipelines)(VkDevice device, 1016 VkPipelineCache pipelineCache, 1017 uint32_t count, 1018 const VkComputePipelineCreateInfo *pCreateInfos, 1019 const VkAllocationCallbacks *pAllocator, 1020 VkPipeline *pPipelines) 1021{ 1022 VK_FROM_HANDLE(panvk_device, dev, device); 1023 VK_FROM_HANDLE(panvk_pipeline_cache, cache, pipelineCache); 1024 1025 for (uint32_t i = 0; i < count; i++) { 1026 struct panvk_pipeline_builder builder; 1027 panvk_pipeline_builder_init_compute(&builder, dev, cache, 1028 &pCreateInfos[i], pAllocator); 1029 1030 struct panvk_pipeline *pipeline; 1031 VkResult result = panvk_pipeline_builder_build(&builder, &pipeline); 1032 panvk_pipeline_builder_finish(&builder); 1033 1034 if (result != VK_SUCCESS) { 1035 for (uint32_t j = 0; j < i; j++) { 1036 panvk_DestroyPipeline(device, pPipelines[j], pAllocator); 1037 pPipelines[j] = VK_NULL_HANDLE; 1038 } 1039 1040 return result; 1041 } 1042 1043 pPipelines[i] = panvk_pipeline_to_handle(pipeline); 1044 } 1045 1046 return VK_SUCCESS; 1047} 1048