Lines Matching refs:pipeline

48 lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline)
50 if (pipeline->shader_cso[PIPE_SHADER_VERTEX])
51 device->queue.ctx->delete_vs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_VERTEX]);
52 if (pipeline->shader_cso[PIPE_SHADER_FRAGMENT])
53 device->queue.ctx->delete_fs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_FRAGMENT]);
54 if (pipeline->shader_cso[PIPE_SHADER_GEOMETRY])
55 device->queue.ctx->delete_gs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_GEOMETRY]);
56 if (pipeline->shader_cso[PIPE_SHADER_TESS_CTRL])
57 device->queue.ctx->delete_tcs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_TESS_CTRL]);
58 if (pipeline->shader_cso[PIPE_SHADER_TESS_EVAL])
59 device->queue.ctx->delete_tes_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_TESS_EVAL]);
60 if (pipeline->shader_cso[PIPE_SHADER_COMPUTE])
61 device->queue.ctx->delete_compute_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_COMPUTE]);
64 ralloc_free(pipeline->pipeline_nir[i]);
66 if (pipeline->layout)
67 vk_pipeline_layout_unref(&device->vk, &pipeline->layout->vk);
69 ralloc_free(pipeline->mem_ctx);
70 vk_free(&device->vk.alloc, pipeline->state_data);
71 vk_object_base_finish(&pipeline->base);
72 vk_free(&device->vk.alloc, pipeline);
81 LVP_FROM_HANDLE(lvp_pipeline, pipeline, _pipeline);
87 util_dynarray_append(&device->queue.pipeline_destroys, struct lvp_pipeline*, pipeline);
128 set_image_access(struct lvp_pipeline *pipeline, nir_shader *nir,
136 get_binding_layout(pipeline->layout, var->data.descriptor_set, var->data.binding);
138 if (pipeline->layout->vk.set_layouts[s])
139 value += get_set_layout(pipeline->layout, s)->stage[nir->info.stage].image_count;
146 pipeline->access[nir->info.stage].images_read |= mask;
148 pipeline->access[nir->info.stage].images_written |= mask;
152 set_buffer_access(struct lvp_pipeline *pipeline, nir_shader *nir,
170 get_binding_layout(pipeline->layout, var->data.descriptor_set, var->data.binding);
172 if (pipeline->layout->vk.set_layouts[s])
173 value += get_set_layout(pipeline->layout, s)->stage[nir->info.stage].shader_buffer_count;
179 pipeline->access[nir->info.stage].buffers_written |= mask;
183 scan_intrinsic(struct lvp_pipeline *pipeline, nir_shader *nir, nir_intrinsic_instr *instr)
190 set_image_access(pipeline, nir, instr, true, false);
193 set_image_access(pipeline, nir, instr, false, true);
206 set_image_access(pipeline, nir, instr, true, true);
223 set_buffer_access(pipeline, nir, instr);
230 scan_pipeline_info(struct lvp_pipeline *pipeline, nir_shader *nir)
237 scan_intrinsic(pipeline, nir, nir_instr_as_intrinsic(instr));
383 lvp_shader_compile_to_ir(struct lvp_pipeline *pipeline,
386 struct lvp_device *pdevice = pipeline->device;
388 const nir_shader_compiler_options *drv_options = pdevice->pscreen->get_compiler_options(pipeline->device->pscreen, PIPE_SHADER_IR_NIR, st_shader_stage_to_ptarget(stage));
469 scan_pipeline_info(pipeline, nir);
472 lvp_lower_pipeline_layout(pipeline->device, pipeline->layout, nir);
528 pipeline->inlines[stage].must_inline = lvp_find_inlinable_uniforms(pipeline, nir);
529 pipeline->pipeline_nir[stage] = nir;
574 lvp_pipeline_xfb_init(struct lvp_pipeline *pipeline)
577 if (pipeline->pipeline_nir[MESA_SHADER_GEOMETRY])
579 else if (pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL])
581 pipeline->last_vertex = stage;
583 nir_xfb_info *xfb_info = pipeline->pipeline_nir[stage]->xfb_info;
588 nir_foreach_shader_out_variable(var, pipeline->pipeline_nir[stage]) {
595 pipeline->stream_output.num_outputs = xfb_info->output_count;
598 pipeline->stream_output.stride[i] = xfb_info->buffers[i].stride / 4;
602 pipeline->stream_output.output[i].output_buffer = xfb_info->outputs[i].buffer;
603 pipeline->stream_output.output[i].dst_offset = xfb_info->outputs[i].offset / 4;
604 pipeline->stream_output.output[i].register_index = output_mapping[xfb_info->outputs[i].location];
605 pipeline->stream_output.output[i].num_components = util_bitcount(xfb_info->outputs[i].component_mask);
606 pipeline->stream_output.output[i].start_component = ffs(xfb_info->outputs[i].component_mask) - 1;
607 pipeline->stream_output.output[i].stream = xfb_info->buffer_to_stream[xfb_info->outputs[i].buffer];
614 lvp_pipeline_compile_stage(struct lvp_pipeline *pipeline, nir_shader *nir)
616 struct lvp_device *device = pipeline->device;
627 if (nir->info.stage == pipeline->last_vertex)
628 memcpy(&shstate.stream_output, &pipeline->stream_output, sizeof(shstate.stream_output));
650 lvp_pipeline_compile(struct lvp_pipeline *pipeline, nir_shader *nir)
652 struct lvp_device *device = pipeline->device;
654 return lvp_pipeline_compile_stage(pipeline, nir);
740 lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
754 pipeline->stages = libinfo->flags;
756 pipeline->stages = VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT |
760 pipeline->mem_ctx = ralloc_context(NULL);
763 pipeline->library = true;
770 /* this is a regular pipeline with no partials: directly reuse */
771 pipeline->layout = layout;
772 else if (pipeline->stages & layout_stages) {
773 if ((pipeline->stages & layout_stages) == layout_stages)
775 pipeline->layout = layout;
778 merge_layouts(pipeline, layout);
785 vk_graphics_pipeline_state_merge(&pipeline->graphics_state,
788 pipeline->line_smooth = p->line_smooth;
789 pipeline->disable_multisample = p->disable_multisample;
790 pipeline->line_rectangular = p->line_rectangular;
791 pipeline->last_vertex = p->last_vertex;
792 memcpy(&pipeline->stream_output, &p->stream_output, sizeof(p->stream_output));
793 memcpy(&pipeline->access, &p->access, sizeof(p->access));
796 pipeline->force_min_sample = p->force_min_sample;
799 merge_layouts(pipeline, p->layout);
801 pipeline->stages |= p->stages;
806 &pipeline->graphics_state,
809 &pipeline->state_data);
813 assert(pipeline->library || pipeline->stages == (VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT |
818 pipeline->device = device;
824 if (!(pipeline->stages & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT))
827 if (!(pipeline->stages & VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT))
830 result = lvp_shader_compile_to_ir(pipeline, sinfo);
836 pipeline->gs_output_lines = pipeline->pipeline_nir[MESA_SHADER_GEOMETRY] &&
837 pipeline->pipeline_nir[MESA_SHADER_GEOMETRY]->info.gs.output_primitive == SHADER_PRIM_LINES;
840 if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.fs.uses_sample_shading)
841 pipeline->force_min_sample = true;
846 if (pCreateInfo->stageCount && pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]) {
847 nir_lower_patch_vertices(pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL], pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info.tess.tcs_vertices_out, NULL);
848 merge_tess_info(&pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info, &pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info);
849 if (pipeline->graphics_state.ts->domain_origin == VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT)
850 pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw = !pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw;
857 pipeline->pipeline_nir[MESA_SHADER_FRAGMENT] = nir_shader_clone(pipeline->mem_ctx, p->pipeline_nir[MESA_SHADER_FRAGMENT]);
862 pipeline->pipeline_nir[j] = nir_shader_clone(pipeline->mem_ctx, p->pipeline_nir[j]);
866 } else if (pipeline->stages & VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT) {
867 const struct vk_rasterization_state *rs = pipeline->graphics_state.rs;
870 pipeline->line_smooth = rs->line.mode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
871 pipeline->disable_multisample = rs->line.mode == VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT ||
873 pipeline->line_rectangular = rs->line.mode != VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT;
875 pipeline->line_rectangular = true;
876 lvp_pipeline_xfb_init(pipeline);
879 if (!pipeline->library) {
881 for (uint32_t i = 0; i < ARRAY_SIZE(pipeline->pipeline_nir); i++) {
882 if (!pipeline->pipeline_nir[i])
886 assert(stage == pipeline->pipeline_nir[i]->info.stage);
888 if (!pipeline->inlines[stage].can_inline)
889 pipeline->shader_cso[pstage] = lvp_pipeline_compile(pipeline,
890 nir_shader_clone(NULL, pipeline->pipeline_nir[stage]));
896 /* create a dummy fragment shader for this pipeline. */
900 pipeline->pipeline_nir[MESA_SHADER_FRAGMENT] = b.shader;
903 shstate.ir.nir = nir_shader_clone(NULL, pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]);
904 pipeline->shader_cso[PIPE_SHADER_FRAGMENT] = device->queue.ctx->create_fs_state(device->queue.ctx, &shstate);
910 for (unsigned i = 0; i < ARRAY_SIZE(pipeline->pipeline_nir); i++) {
911 if (pipeline->pipeline_nir[i])
912 ralloc_free(pipeline->pipeline_nir[i]);
914 vk_free(&device->vk.alloc, pipeline->state_data);
928 struct lvp_pipeline *pipeline;
933 pipeline = vk_zalloc(&device->vk.alloc, sizeof(*pipeline), 8,
935 if (pipeline == NULL)
938 vk_object_base_init(&device->vk, &pipeline->base,
941 result = lvp_graphics_pipeline_init(pipeline, device, cache, pCreateInfo);
943 vk_free(&device->vk.alloc, pipeline);
954 *pPipeline = lvp_pipeline_to_handle(pipeline);
993 lvp_compute_pipeline_init(struct lvp_pipeline *pipeline,
998 pipeline->device = device;
999 pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
1000 vk_pipeline_layout_ref(&pipeline->layout->vk);
1001 pipeline->force_min_sample = false;
1003 pipeline->mem_ctx = ralloc_context(NULL);
1004 pipeline->is_compute_pipeline = true;
1006 VkResult result = lvp_shader_compile_to_ir(pipeline, &pCreateInfo->stage);
1010 if (!pipeline->inlines[MESA_SHADER_COMPUTE].can_inline)
1011 pipeline->shader_cso[PIPE_SHADER_COMPUTE] = lvp_pipeline_compile(pipeline, nir_shader_clone(NULL, pipeline->pipeline_nir[MESA_SHADER_COMPUTE]));
1024 struct lvp_pipeline *pipeline;
1029 pipeline = vk_zalloc(&device->vk.alloc, sizeof(*pipeline), 8,
1031 if (pipeline == NULL)
1034 vk_object_base_init(&device->vk, &pipeline->base,
1037 result = lvp_compute_pipeline_init(pipeline, device, cache, pCreateInfo);
1039 vk_free(&device->vk.alloc, pipeline);
1050 *pPipeline = lvp_pipeline_to_handle(pipeline);