Lines Matching defs:shader
103 * shader images on Valhall.
406 /* The blend shader's address needs to be at
407 * the same top 32 bit as the fragment shader.
416 cfg.shader.pc = (u32) blend_shaders[i];
422 cfg.shader.return_value = ret_offset ?
484 /* Construct a partial RSD corresponding to no executed fragment shader, and
503 cfg.shader.shader = 0x1;
611 * need per-sample shading for the blend shader, accomplished
803 * CSO create time. However, the stencil reference values and shader
928 * Emit Valhall descriptors for shader images. Unlike previous generations,
1233 * shader, to keep the code as unified as possible. */
1464 struct panfrost_shader_variants *all = ctx->shader[stage];
1477 /* Upload sysvals requested by the shader */
1481 struct panfrost_shader_state *shader = panfrost_get_shader_state(ctx, stage);
1482 unsigned ubo_count = shader->info.ubo_count - (sys_size ? 1 : 0);
1524 /* Copy push constants required by the shader */
1590 struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1845 emit_image_attribs(struct panfrost_context *ctx, enum pipe_shader_type shader,
1849 unsigned last_bit = util_last_bit(ctx->image_mask[shader]);
1852 enum pipe_format format = ctx->images[shader][i].format;
1877 emit_image_bufs(struct panfrost_batch *batch, enum pipe_shader_type shader,
1882 unsigned last_bit = util_last_bit(ctx->image_mask[shader]);
1885 struct pipe_image_view *image = &ctx->images[shader][i];
1887 if (!(ctx->image_mask[shader] & (1 << i)) ||
1909 panfrost_track_image_access(batch, shader, image);
1955 struct panfrost_shader_state *shader = panfrost_get_shader_state(ctx, type);
1957 if (!shader->info.attribute_count) {
1963 unsigned attr_count = shader->info.attribute_count;
2074 * compute shader do the rest.
2084 * slot so the compute shader can retrieve it.
2270 /* Enable special buffers by the shader info */
2459 /* Allocate enough descriptors for both shader stages */
2630 * from the vertex shader, these are handled ahead-of-time with a compute
2631 * shader. This function should not be called if rasterization is skipped.
2917 * 3D state. In particular, it groups the fragment shader descriptor with
2952 /* On Bifrost and older, the fragment shader descriptor is fused
2954 * descriptor is emitted below. Otherwise, the shader descriptor is
2972 /* On Bifrost and older, if the fragment shader changes OR any renderer
2973 * state specified with the fragment shader, the whole renderer state
3191 cfg->shader = shader_ptr;
3243 * blend shader is used with multisampling, as this is handled
3244 * by a single ST_TILE in the blend shader with the current
3276 * target may be written if the fragment shader writes
3281 * Only set when there is a fragment shader, since
3288 /* Also use per-sample shading if required by the shader
3301 * fragment shader is omitted, we may also emit the
3311 panfrost_emit_shader(batch, &cfg.shader, PIPE_SHADER_FRAGMENT,
3321 /* No shader and no blend => no shader or blend
3328 /* No shader => no shader side effects */
3380 /* Varying shaders only feed data to the fragment shader, so if we omit
3381 * the fragment shader, we should omit the varying shader too.
3429 /* IDVS/points vertex shader */
3432 /* IDVS/triangle vertex shader */
3441 /* If a varying shader is used, we configure it with the same
3442 * state as the position shader for backwards compatible
3519 struct panfrost_shader_variants *saved_vs = ctx->shader[PIPE_SHADER_VERTEX];
3524 ctx->shader[PIPE_SHADER_VERTEX] = &v;
3572 ctx->shader[PIPE_SHADER_VERTEX] = saved_vs;
3714 /* Any side effects must be handled by the XFB shader, so we only need
3812 * vertex shader uses gl_VertexID or gl_BaseVertex.
4046 &ctx->shader[PIPE_SHADER_COMPUTE]->variants[0];
4082 .buffer_size = ctx->shader[PIPE_SHADER_COMPUTE]->req_input_mem,
4545 /* The address in the shader program descriptor must be non-null, but
4546 * the entire shader program descriptor may be omitted.