Lines Matching defs:pipeline
43 compute_vpm_config(struct v3dv_pipeline *pipeline);
79 /* The assembly BO is shared by all variants in the pipeline, so it can't
80 * be freed here and should be freed with the pipeline
102 struct v3dv_pipeline *pipeline,
105 assert(pipeline);
110 destroy_pipeline_stage(device, pipeline->vs, pAllocator);
111 destroy_pipeline_stage(device, pipeline->vs_bin, pAllocator);
112 destroy_pipeline_stage(device, pipeline->gs, pAllocator);
113 destroy_pipeline_stage(device, pipeline->gs_bin, pAllocator);
114 destroy_pipeline_stage(device, pipeline->fs, pAllocator);
115 destroy_pipeline_stage(device, pipeline->cs, pAllocator);
117 pipeline->vs = NULL;
118 pipeline->vs_bin = NULL;
119 pipeline->gs = NULL;
120 pipeline->gs_bin = NULL;
121 pipeline->fs = NULL;
122 pipeline->cs = NULL;
126 v3dv_destroy_pipeline(struct v3dv_pipeline *pipeline,
130 if (!pipeline)
133 pipeline_free_stages(device, pipeline, pAllocator);
135 if (pipeline->shared_data) {
136 v3dv_pipeline_shared_data_unref(device, pipeline->shared_data);
137 pipeline->shared_data = NULL;
140 if (pipeline->spill.bo) {
141 assert(pipeline->spill.size_per_thread > 0);
142 v3dv_bo_free(device, pipeline->spill.bo);
145 if (pipeline->default_attribute_values) {
146 v3dv_bo_free(device, pipeline->default_attribute_values);
147 pipeline->default_attribute_values = NULL;
150 if (pipeline->executables.mem_ctx)
151 ralloc_free(pipeline->executables.mem_ctx);
153 vk_object_free(&device->vk, pAllocator, pipeline);
162 V3DV_FROM_HANDLE(v3dv_pipeline, pipeline, _pipeline);
164 if (!pipeline)
167 v3dv_destroy_pipeline(pipeline, device, pAllocator);
461 _mesa_sha1_format(sha1buf, stage->pipeline->sha1);
537 struct v3dv_pipeline *pipeline;
552 pipeline_get_descriptor_map(struct v3dv_pipeline *pipeline,
560 assert(pipeline->shared_data &&
561 pipeline->shared_data->maps[broadcom_stage]);
565 return &pipeline->shared_data->maps[broadcom_stage]->sampler_map;
571 return &pipeline->shared_data->maps[broadcom_stage]->texture_map;
574 &pipeline->shared_data->maps[broadcom_stage]->sampler_map :
575 &pipeline->shared_data->maps[broadcom_stage]->texture_map;
579 return &pipeline->shared_data->maps[broadcom_stage]->ubo_map;
582 return &pipeline->shared_data->maps[broadcom_stage]->ssbo_map;
613 pipeline_get_descriptor_map(state->pipeline, binding_layout->type,
754 pipeline_get_descriptor_map(state->pipeline, binding_layout->type,
860 pipeline_get_descriptor_map(state->pipeline, binding_layout->type,
953 struct v3dv_pipeline *pipeline,
960 .pipeline = pipeline,
1038 assert(p_stage->pipeline->shared_data &&
1039 p_stage->pipeline->shared_data->maps[p_stage->stage]);
1041 /* The following values are default values used at pipeline create. We use
1045 &p_stage->pipeline->shared_data->maps[p_stage->stage]->sampler_map;
1047 &p_stage->pipeline->shared_data->maps[p_stage->stage]->texture_map;
1072 key->is_last_geometry_stage = p_stage->pipeline->gs == NULL;
1150 const bool rba = p_stage->pipeline->device->features.robustBufferAccess;
1185 p_stage->pipeline->sample_mask != (1 << V3D_MAX_SAMPLES) - 1;
1198 const struct v3dv_subpass *subpass = p_stage->pipeline->subpass;
1215 v3dv_get_format_swizzle(p_stage->pipeline->device, fb_format),
1268 const bool rba = p_stage->pipeline->device->features.robustBufferAccess;
1271 struct v3dv_pipeline *pipeline = p_stage->pipeline;
1286 pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
1310 const bool rba = p_stage->pipeline->device->features.robustBufferAccess;
1313 struct v3dv_pipeline *pipeline = p_stage->pipeline;
1337 assert(pipeline->gs);
1339 pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY_BIN];
1352 if (pipeline->gs) {
1355 pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY];
1369 pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
1395 * Creates the initial form of the pipeline stage for a binning shader by
1405 struct v3dv_device *device = src->pipeline->device;
1422 p_stage->pipeline = src->pipeline;
1442 upload_assembly(struct v3dv_pipeline *pipeline)
1447 pipeline->shared_data->variants[stage];
1453 struct v3dv_bo *bo = v3dv_bo_alloc(pipeline->device, total_size,
1454 "pipeline shader assembly", true);
1460 bool ok = v3dv_bo_map(pipeline->device, bo, total_size);
1469 pipeline->shared_data->variants[stage];
1484 pipeline->shared_data->assembly_bo = bo;
1490 pipeline_hash_graphics(const struct v3dv_pipeline *pipeline,
1497 if (pipeline->layout) {
1498 _mesa_sha1_update(&ctx, &pipeline->layout->sha1,
1499 sizeof(pipeline->layout->sha1));
1506 _mesa_sha1_update(&ctx, pipeline->vs->shader_sha1,
1507 sizeof(pipeline->vs->shader_sha1));
1509 if (pipeline->gs) {
1510 _mesa_sha1_update(&ctx, pipeline->gs->shader_sha1,
1511 sizeof(pipeline->gs->shader_sha1));
1514 _mesa_sha1_update(&ctx, pipeline->fs->shader_sha1,
1515 sizeof(pipeline->fs->shader_sha1));
1523 pipeline_hash_compute(const struct v3dv_pipeline *pipeline,
1530 if (pipeline->layout) {
1531 _mesa_sha1_update(&ctx, &pipeline->layout->sha1,
1532 sizeof(pipeline->layout->sha1));
1535 _mesa_sha1_update(&ctx, pipeline->cs->shader_sha1,
1536 sizeof(pipeline->cs->shader_sha1));
1543 /* Checks that the pipeline has enough spill size to use for any of their
1547 pipeline_check_spill_size(struct v3dv_pipeline *pipeline)
1553 pipeline->shared_data->variants[stage];
1562 struct v3dv_device *device = pipeline->device;
1571 if (pipeline->spill.bo) {
1572 assert(pipeline->spill.size_per_thread > 0);
1573 v3dv_bo_free(device, pipeline->spill.bo);
1575 pipeline->spill.bo =
1577 pipeline->spill.size_per_thread = max_spill_size;
1647 struct v3dv_pipeline *pipeline = p_stage->pipeline;
1649 &pipeline->device->instance->physicalDevice;
1684 v3dv_shader_variant_create(pipeline->device, p_stage->stage,
1691 * all the temporary p_stage structs used during the pipeline creation when
1739 pipeline_lower_nir(struct v3dv_pipeline *pipeline,
1745 assert(pipeline->shared_data &&
1746 pipeline->shared_data->maps[p_stage->stage]);
1758 pipeline->shared_data->maps[p_stage->stage];
1767 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
1769 NIR_PASS(_, p_stage->nir, lower_pipeline_layout_info, pipeline, layout,
1805 struct v3dv_pipeline *pipeline,
1812 nir = v3dv_pipeline_cache_search_for_nir(pipeline, cache,
1819 /* A NIR cach hit doesn't avoid the large majority of pipeline stage
1820 * creation so the cache hit is not recorded in the pipeline feedback
1829 nir = shader_module_compile_to_nir(pipeline->device, p_stage);
1833 &pipeline->device->default_pipeline_cache;
1835 v3dv_pipeline_cache_upload_nir(pipeline, cache, nir,
1842 v3dv_pipeline_cache_upload_nir(pipeline, default_cache, nir,
1856 pipeline_compile_vertex_shader(struct v3dv_pipeline *pipeline,
1860 assert(pipeline->vs_bin != NULL);
1861 if (pipeline->vs_bin->nir == NULL) {
1862 assert(pipeline->vs->nir);
1863 pipeline->vs_bin->nir = nir_shader_clone(NULL, pipeline->vs->nir);
1868 pipeline_populate_v3d_vs_key(&key, pCreateInfo, pipeline->vs);
1869 pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX] =
1870 pipeline_compile_shader_variant(pipeline->vs, &key.base, sizeof(key),
1875 pipeline_populate_v3d_vs_key(&key, pCreateInfo, pipeline->vs_bin);
1876 pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN] =
1877 pipeline_compile_shader_variant(pipeline->vs_bin, &key.base, sizeof(key),
1884 pipeline_compile_geometry_shader(struct v3dv_pipeline *pipeline,
1888 assert(pipeline->gs);
1890 assert(pipeline->gs_bin != NULL);
1891 if (pipeline->gs_bin->nir == NULL) {
1892 assert(pipeline->gs->nir);
1893 pipeline->gs_bin->nir = nir_shader_clone(NULL, pipeline->gs->nir);
1898 pipeline_populate_v3d_gs_key(&key, pCreateInfo, pipeline->gs);
1899 pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY] =
1900 pipeline_compile_shader_variant(pipeline->gs, &key.base, sizeof(key),
1905 pipeline_populate_v3d_gs_key(&key, pCreateInfo, pipeline->gs_bin);
1906 pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY_BIN] =
1907 pipeline_compile_shader_variant(pipeline->gs_bin, &key.base, sizeof(key),
1914 pipeline_compile_fragment_shader(struct v3dv_pipeline *pipeline,
1918 struct v3dv_pipeline_stage *p_stage = pipeline->vs;
1920 p_stage = pipeline->fs;
1925 pipeline->gs != NULL,
1926 get_ucp_enable_mask(pipeline->vs));
1929 pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT] =
1937 pipeline_populate_graphics_key(struct v3dv_pipeline *pipeline,
1943 pipeline->device->features.robustBufferAccess;
1971 pipeline->sample_mask != (1 << V3D_MAX_SAMPLES) - 1;
1979 const struct v3dv_subpass *subpass = pipeline->subpass;
1996 v3dv_get_format_swizzle(pipeline->device, fb_format),
2019 assert(pipeline->subpass);
2020 key->has_multiview = pipeline->subpass->view_mask != 0;
2024 pipeline_populate_compute_key(struct v3dv_pipeline *pipeline,
2028 /* We use the same pipeline key for graphics and compute, but we don't need
2035 pipeline->device->features.robustBufferAccess;
2040 struct v3dv_pipeline *pipeline,
2044 * and unref by both the pipeline and the pipeline cache, so we can't
2045 * ensure that the cache or pipeline alloc will be available on the last
2049 vk_zalloc2(&pipeline->device->vk.alloc, NULL,
2068 if (stage == BROADCOM_SHADER_GEOMETRY && !pipeline->gs) {
2070 if (!pipeline->subpass->view_mask)
2075 vk_zalloc2(&pipeline->device->vk.alloc, NULL,
2100 vk_free(&pipeline->device->vk.alloc, new_entry->maps[stage]);
2104 vk_free(&pipeline->device->vk.alloc, new_entry);
2110 write_creation_feedback(struct v3dv_pipeline *pipeline,
2131 pipeline->vs->feedback;
2134 pipeline->vs_bin->feedback.duration;
2139 pipeline->gs->feedback;
2142 pipeline->gs_bin->feedback.duration;
2147 pipeline->fs->feedback;
2152 pipeline->cs->feedback;
2163 multiview_gs_input_primitive_from_pipeline(struct v3dv_pipeline *pipeline)
2165 switch (pipeline->topology) {
2179 unreachable("Unexpected pipeline primitive type");
2184 multiview_gs_output_primitive_from_pipeline(struct v3dv_pipeline *pipeline)
2186 switch (pipeline->topology) {
2200 unreachable("Unexpected pipeline primitive type");
2205 pipeline_add_multiview_gs(struct v3dv_pipeline *pipeline,
2210 pipeline->vs->nir = pipeline_stage_get_nir(pipeline->vs, pipeline, cache);
2211 nir_shader *vs_nir = pipeline->vs->nir;
2221 uint32_t vertex_count = u_vertices_per_prim(pipeline->topology);
2223 multiview_gs_input_primitive_from_pipeline(pipeline);
2225 multiview_gs_output_primitive_from_pipeline(pipeline);
2287 /* Attach the geometry shader to the pipeline */
2288 struct v3dv_device *device = pipeline->device;
2301 p_stage->pipeline = pipeline;
2309 pipeline->has_gs = true;
2310 pipeline->gs = p_stage;
2311 pipeline->active_stages |= MESA_SHADER_GEOMETRY;
2313 pipeline->gs_bin =
2314 pipeline_stage_create_binning(pipeline->gs, pAllocator);
2315 if (pipeline->gs_bin == NULL)
2322 pipeline_check_buffer_device_address(struct v3dv_pipeline *pipeline)
2325 struct v3dv_shader_variant *variant = pipeline->shared_data->variants[i];
2327 pipeline->uses_buffer_device_address = true;
2332 pipeline->uses_buffer_device_address = false;
2336 * It compiles a pipeline. Note that it also allocate internal object, but if
2341 * the pipeline destroy method, and this would handle freeing the internal
2346 pipeline_compile_graphics(struct v3dv_pipeline *pipeline,
2356 struct v3dv_device *device = pipeline->device;
2375 * v3d. Here we are assigning one per pipeline stage, so vs and vs_bin
2383 p_stage->pipeline = pipeline;
2391 pipeline->active_stages |= sinfo->stage;
2400 pipeline->vs = p_stage;
2401 pipeline->vs_bin =
2402 pipeline_stage_create_binning(pipeline->vs, pAllocator);
2403 if (pipeline->vs_bin == NULL)
2408 pipeline->has_gs = true;
2409 pipeline->gs = p_stage;
2410 pipeline->gs_bin =
2411 pipeline_stage_create_binning(pipeline->gs, pAllocator);
2412 if (pipeline->gs_bin == NULL)
2417 pipeline->fs = p_stage;
2426 if (!pipeline->fs) {
2438 p_stage->pipeline = pipeline;
2447 pipeline->fs = p_stage;
2448 pipeline->active_stages |= MESA_SHADER_FRAGMENT;
2454 assert(!pipeline->subpass->view_mask || (!pipeline->has_gs && !pipeline->gs));
2455 if (pipeline->subpass->view_mask) {
2456 if (!pipeline_add_multiview_gs(pipeline, cache, pAllocator))
2460 /* First we try to get the variants from the pipeline cache (unless we are
2468 pipeline_populate_graphics_key(pipeline, &pipeline_key, pCreateInfo);
2469 pipeline_hash_graphics(pipeline, &pipeline_key, pipeline->sha1);
2473 pipeline->shared_data =
2475 pipeline->sha1,
2478 if (pipeline->shared_data != NULL) {
2479 /* A correct pipeline must have at least a VS and FS */
2480 assert(pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX]);
2481 assert(pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN]);
2482 assert(pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT]);
2483 assert(!pipeline->gs ||
2484 pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY]);
2485 assert(!pipeline->gs ||
2486 pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY_BIN]);
2488 if (cache_hit && cache != &pipeline->device->default_pipeline_cache)
2500 * shader or the pipeline cache) and compile.
2502 pipeline->shared_data =
2503 v3dv_pipeline_shared_data_new_empty(pipeline->sha1, pipeline, true);
2504 if (!pipeline->shared_data)
2507 pipeline->vs->feedback.flags |=
2509 if (pipeline->gs)
2510 pipeline->gs->feedback.flags |=
2512 pipeline->fs->feedback.flags |=
2515 if (!pipeline->vs->nir)
2516 pipeline->vs->nir = pipeline_stage_get_nir(pipeline->vs, pipeline, cache);
2517 if (pipeline->gs && !pipeline->gs->nir)
2518 pipeline->gs->nir = pipeline_stage_get_nir(pipeline->gs, pipeline, cache);
2519 if (!pipeline->fs->nir)
2520 pipeline->fs->nir = pipeline_stage_get_nir(pipeline->fs, pipeline, cache);
2522 /* Linking + pipeline lowerings */
2523 if (pipeline->gs) {
2524 link_shaders(pipeline->gs->nir, pipeline->fs->nir);
2525 link_shaders(pipeline->vs->nir, pipeline->gs->nir);
2527 link_shaders(pipeline->vs->nir, pipeline->fs->nir);
2530 pipeline_lower_nir(pipeline, pipeline->fs, pipeline->layout);
2531 lower_fs_io(pipeline->fs->nir);
2533 if (pipeline->gs) {
2534 pipeline_lower_nir(pipeline, pipeline->gs, pipeline->layout);
2535 lower_gs_io(pipeline->gs->nir);
2538 pipeline_lower_nir(pipeline, pipeline->vs, pipeline->layout);
2539 lower_vs_io(pipeline->vs->nir);
2545 assert(!pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT]);
2546 vk_result = pipeline_compile_fragment_shader(pipeline, pAllocator, pCreateInfo);
2550 assert(!pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY] &&
2551 !pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY_BIN]);
2553 if (pipeline->gs) {
2555 pipeline_compile_geometry_shader(pipeline, pAllocator, pCreateInfo);
2560 assert(!pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX] &&
2561 !pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN]);
2563 vk_result = pipeline_compile_vertex_shader(pipeline, pAllocator, pCreateInfo);
2567 if (!upload_assembly(pipeline))
2570 v3dv_pipeline_cache_upload_pipeline(pipeline, cache);
2574 pipeline_check_buffer_device_address(pipeline);
2577 write_creation_feedback(pipeline,
2583 /* Since we have the variants in the pipeline shared data we can now free
2584 * the pipeline stages.
2587 pipeline_free_stages(device, pipeline, pAllocator);
2589 pipeline_check_spill_size(pipeline);
2591 return compute_vpm_config(pipeline);
2595 compute_vpm_config(struct v3dv_pipeline *pipeline)
2598 pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX];
2600 pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX];
2606 if (pipeline->has_gs) {
2608 pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY];
2610 pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY_BIN];
2615 if (!v3d_compute_vpm_config(&pipeline->device->devinfo,
2617 &pipeline->vpm_cfg_bin,
2618 &pipeline->vpm_cfg)) {
2649 * ignoring this dynamic state. We are already asserting at pipeline creation
2662 struct v3dv_pipeline *pipeline,
2671 struct v3dv_dynamic_state *dynamic = &pipeline->dynamic_state;
2690 /* For any pipeline states that are not dynamic, set the dynamic state
2691 * from the static pipeline state.
2757 pipeline->dynamic_state.mask = dynamic_states;
2768 enable_depth_bias(struct v3dv_pipeline *pipeline,
2771 pipeline->depth_bias.enabled = false;
2772 pipeline->depth_bias.is_z16 = false;
2778 * this pipeline.
2780 assert(pipeline->pass && pipeline->subpass);
2781 struct v3dv_render_pass *pass = pipeline->pass;
2782 struct v3dv_subpass *subpass = pipeline->subpass;
2792 pipeline->depth_bias.is_z16 = true;
2794 pipeline->depth_bias.enabled = true;
2798 pipeline_set_ez_state(struct v3dv_pipeline *pipeline,
2802 pipeline->ez_state = V3D_EZ_DISABLED;
2809 pipeline->ez_state = V3D_EZ_LT_LE;
2813 pipeline->ez_state = V3D_EZ_GT_GE;
2817 pipeline->ez_state = V3D_EZ_UNDECIDED;
2820 pipeline->ez_state = V3D_EZ_DISABLED;
2821 pipeline->incompatible_ez_test = true;
2829 pipeline->ez_state = V3D_EZ_DISABLED;
2834 pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
2837 pipeline->ez_state = V3D_EZ_DISABLED;
2842 pipeline_has_integer_vertex_attrib(struct v3dv_pipeline *pipeline)
2844 for (uint8_t i = 0; i < pipeline->va_count; i++) {
2845 if (vk_format_is_int(pipeline->va[i].vk_format))
2851 /* @pipeline can be NULL. We assume in that case that all the attributes have
2854 * attribute used with the specific pipeline passed in.
2858 struct v3dv_pipeline *pipeline)
2878 uint8_t va_count = pipeline != NULL ? pipeline->va_count : 0;
2884 pipeline != NULL ? pipeline->va[i].vk_format : VK_FORMAT_UNDEFINED;
2898 pipeline_set_sample_mask(struct v3dv_pipeline *pipeline,
2901 pipeline->sample_mask = (1 << V3D_MAX_SAMPLES) - 1;
2908 pipeline->sample_mask &= ms_info->pSampleMask[0];
2913 pipeline_set_sample_rate_shading(struct v3dv_pipeline *pipeline,
2916 pipeline->sample_rate_shading =
2922 pipeline_init(struct v3dv_pipeline *pipeline,
2930 pipeline->device = device;
2933 pipeline->layout = layout;
2937 pipeline->pass = render_pass;
2938 pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
2942 pipeline->topology = vk_to_pipe_prim_type[ia_info->topology];
2982 pipeline_init_dynamic_state(pipeline,
2987 * feature and it shouldn't be used by any pipeline.
2991 v3dv_X(device, pipeline_pack_state)(pipeline, cb_info, ds_info,
2995 enable_depth_bias(pipeline, rs_info);
2996 pipeline_set_sample_mask(pipeline, ms_info);
2997 pipeline_set_sample_rate_shading(pipeline, ms_info);
2999 pipeline->primitive_restart =
3002 result = pipeline_compile_graphics(pipeline, cache, pCreateInfo, pAllocator);
3005 /* Caller would already destroy the pipeline, and we didn't allocate any
3018 v3dv_X(device, pipeline_pack_compile_state)(pipeline, vi_info, vd_info);
3020 if (pipeline_has_integer_vertex_attrib(pipeline)) {
3021 pipeline->default_attribute_values =
3022 v3dv_pipeline_create_default_attribute_values(pipeline->device, pipeline);
3023 if (!pipeline->default_attribute_values)
3026 pipeline->default_attribute_values = NULL;
3029 /* This must be done after the pipeline has been compiled */
3030 pipeline_set_ez_state(pipeline, ds_info);
3045 struct v3dv_pipeline *pipeline;
3048 /* Use the default pipeline cache if none is specified */
3052 pipeline = vk_object_zalloc(&device->vk, pAllocator, sizeof(*pipeline),
3055 if (pipeline == NULL)
3058 result = pipeline_init(pipeline, device, cache,
3063 v3dv_destroy_pipeline(pipeline, device, pAllocator);
3069 *pPipeline = v3dv_pipeline_to_handle(pipeline);
3139 pipeline_compile_compute(struct v3dv_pipeline *pipeline,
3149 struct v3dv_device *device = pipeline->device;
3163 p_stage->pipeline = pipeline;
3174 pipeline->cs = p_stage;
3175 pipeline->active_stages |= sinfo->stage;
3177 /* First we try to get the variants from the pipeline cache (unless we are
3185 pipeline_populate_compute_key(pipeline, &pipeline_key, info);
3186 pipeline_hash_compute(pipeline, &pipeline_key, pipeline->sha1);
3189 pipeline->shared_data =
3190 v3dv_pipeline_cache_search_for_pipeline(cache, pipeline->sha1, &cache_hit);
3192 if (pipeline->shared_data != NULL) {
3193 assert(pipeline->shared_data->variants[BROADCOM_SHADER_COMPUTE]);
3194 if (cache_hit && cache != &pipeline->device->default_pipeline_cache)
3205 pipeline->shared_data = v3dv_pipeline_shared_data_new_empty(pipeline->sha1,
3206 pipeline,
3208 if (!pipeline->shared_data)
3214 p_stage->nir = pipeline_stage_get_nir(p_stage, pipeline, cache);
3218 pipeline_lower_nir(pipeline, p_stage, pipeline->layout);
3226 pipeline->device->features.robustBufferAccess);
3227 pipeline->shared_data->variants[BROADCOM_SHADER_COMPUTE] =
3234 if (!upload_assembly(pipeline))
3237 v3dv_pipeline_cache_upload_pipeline(pipeline, cache);
3241 pipeline_check_buffer_device_address(pipeline);
3244 write_creation_feedback(pipeline,
3250 /* As we got the variants in pipeline->shared_data, after compiling we
3254 pipeline_free_stages(device, pipeline, alloc);
3256 pipeline_check_spill_size(pipeline);
3262 compute_pipeline_init(struct v3dv_pipeline *pipeline,
3270 pipeline->device = device;
3271 pipeline->layout = layout;
3273 VkResult result = pipeline_compile_compute(pipeline, cache, info, alloc);
3288 struct v3dv_pipeline *pipeline;
3291 /* Use the default pipeline cache if none is specified */
3295 pipeline = vk_object_zalloc(&device->vk, pAllocator, sizeof(*pipeline),
3297 if (pipeline == NULL)
3300 result = compute_pipeline_init(pipeline, device, cache,
3303 v3dv_destroy_pipeline(pipeline, device, pAllocator);
3309 *pPipeline = v3dv_pipeline_to_handle(pipeline);
3357 pipeline_get_nir(struct v3dv_pipeline *pipeline,
3362 if (pipeline->vs)
3363 return pipeline->vs->nir;
3366 if(pipeline->vs_bin)
3367 return pipeline->vs_bin->nir;
3370 if(pipeline->gs)
3371 return pipeline->gs->nir;
3374 if (pipeline->gs_bin)
3375 return pipeline->gs_bin->nir;
3378 if (pipeline->fs)
3379 return pipeline->fs->nir;
3382 if(pipeline->cs)
3383 return pipeline->cs->nir;
3393 pipeline_get_prog_data(struct v3dv_pipeline *pipeline,
3396 if (pipeline->shared_data->variants[stage])
3397 return pipeline->shared_data->variants[stage]->prog_data.base;
3402 pipeline_get_qpu(struct v3dv_pipeline *pipeline,
3407 pipeline->shared_data->variants[stage];
3414 struct v3dv_bo *qpu_bo = pipeline->shared_data->assembly_bo;
3464 pipeline_collect_executable_data(struct v3dv_pipeline *pipeline)
3466 if (pipeline->executables.mem_ctx)
3469 pipeline->executables.mem_ctx = ralloc_context(NULL);
3470 util_dynarray_init(&pipeline->executables.data,
3471 pipeline->executables.mem_ctx);
3474 if (!pipeline->shared_data || !pipeline->shared_data->assembly_bo)
3477 /* Map the assembly BO so we can read the pipeline's QPU code */
3478 struct v3dv_bo *qpu_bo = pipeline->shared_data->assembly_bo;
3480 if (!v3dv_bo_map(pipeline->device, qpu_bo, qpu_bo->size)) {
3488 if (!(vk_stage & pipeline->active_stages))
3491 nir_shader *nir = pipeline_get_nir(pipeline, s);
3493 nir_shader_as_str(nir, pipeline->executables.mem_ctx) : NULL;
3497 uint64_t *qpu = pipeline_get_qpu(pipeline, s, &qpu_size);
3500 qpu_str = rzalloc_size(pipeline->executables.mem_ctx,
3504 const char *str = v3d_qpu_disasm(&pipeline->device->devinfo, qpu[i]);
3515 util_dynarray_append(&pipeline->executables.data,
3519 v3dv_bo_unmap(pipeline->device, qpu_bo);
3523 pipeline_get_executable(struct v3dv_pipeline *pipeline, uint32_t index)
3525 assert(index < util_dynarray_num_elements(&pipeline->executables.data,
3527 return util_dynarray_element(&pipeline->executables.data,
3539 V3DV_FROM_HANDLE(v3dv_pipeline, pipeline, pExecutableInfo->pipeline);
3541 pipeline_collect_executable_data(pipeline);
3548 pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
3580 V3DV_FROM_HANDLE(v3dv_pipeline, pipeline, pPipelineInfo->pipeline);
3582 pipeline_collect_executable_data(pipeline);
3587 util_dynarray_foreach(&pipeline->executables.data,
3615 V3DV_FROM_HANDLE(v3dv_pipeline, pipeline, pExecutableInfo->pipeline);
3617 pipeline_collect_executable_data(pipeline);
3620 pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
3623 pipeline_get_prog_data(pipeline, exe->stage);
3626 pipeline->shared_data->variants[exe->stage];