1/*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Rob Clark <robclark@freedesktop.org>
25 */
26
27#include "pipe/p_state.h"
28#include "util/u_dual_blend.h"
29#include "util/u_helpers.h"
30#include "util/u_memory.h"
31#include "util/u_string.h"
32
33#include "freedreno_context.h"
34#include "freedreno_gmem.h"
35#include "freedreno_query_hw.h"
36#include "freedreno_resource.h"
37#include "freedreno_state.h"
38#include "freedreno_texture.h"
39#include "freedreno_util.h"
40
41#define get_safe(ptr, field) ((ptr) ? (ptr)->field : 0)
42
43/* All the generic state handling.. In case of CSO's that are specific
44 * to the GPU version, when the bind and the delete are common they can
45 * go in here.
46 */
47
48static void
49update_draw_cost(struct fd_context *ctx) assert_dt
50{
51   struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
52
53   ctx->draw_cost = pfb->nr_cbufs;
54   for (unsigned i = 0; i < pfb->nr_cbufs; i++)
55      if (fd_blend_enabled(ctx, i))
56         ctx->draw_cost++;
57   if (fd_depth_enabled(ctx))
58      ctx->draw_cost++;
59   if (fd_depth_write_enabled(ctx))
60      ctx->draw_cost++;
61}
62
63static void
64fd_set_blend_color(struct pipe_context *pctx,
65                   const struct pipe_blend_color *blend_color) in_dt
66{
67   struct fd_context *ctx = fd_context(pctx);
68   ctx->blend_color = *blend_color;
69   fd_context_dirty(ctx, FD_DIRTY_BLEND_COLOR);
70}
71
72static void
73fd_set_stencil_ref(struct pipe_context *pctx,
74                   const struct pipe_stencil_ref stencil_ref) in_dt
75{
76   struct fd_context *ctx = fd_context(pctx);
77   ctx->stencil_ref = stencil_ref;
78   fd_context_dirty(ctx, FD_DIRTY_STENCIL_REF);
79}
80
81static void
82fd_set_clip_state(struct pipe_context *pctx,
83                  const struct pipe_clip_state *clip) in_dt
84{
85   struct fd_context *ctx = fd_context(pctx);
86   ctx->ucp = *clip;
87   fd_context_dirty(ctx, FD_DIRTY_UCP);
88}
89
90static void
91fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask) in_dt
92{
93   struct fd_context *ctx = fd_context(pctx);
94   ctx->sample_mask = (uint16_t)sample_mask;
95   fd_context_dirty(ctx, FD_DIRTY_SAMPLE_MASK);
96}
97
98static void
99fd_set_min_samples(struct pipe_context *pctx, unsigned min_samples) in_dt
100{
101   struct fd_context *ctx = fd_context(pctx);
102   ctx->min_samples = min_samples;
103   fd_context_dirty(ctx, FD_DIRTY_MIN_SAMPLES);
104}
105
106/* notes from calim on #dri-devel:
107 * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded
108 * out to vec4's
109 * I should be able to consider that I own the user_ptr until the next
110 * set_constant_buffer() call, at which point I don't really care about the
111 * previous values.
112 * index>0 will be UBO's.. well, I'll worry about that later
113 */
114static void
115fd_set_constant_buffer(struct pipe_context *pctx, enum pipe_shader_type shader,
116                       uint index, bool take_ownership,
117                       const struct pipe_constant_buffer *cb) in_dt
118{
119   struct fd_context *ctx = fd_context(pctx);
120   struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
121
122   util_copy_constant_buffer(&so->cb[index], cb, take_ownership);
123
124   /* Note that gallium frontends can unbind constant buffers by
125    * passing NULL here.
126    */
127   if (unlikely(!cb)) {
128      so->enabled_mask &= ~(1 << index);
129      return;
130   }
131
132   so->enabled_mask |= 1 << index;
133
134   fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_CONST);
135   fd_resource_set_usage(cb->buffer, FD_DIRTY_CONST);
136
137   if (index > 0) {
138      assert(!cb->user_buffer);
139      ctx->dirty |= FD_DIRTY_RESOURCE;
140   }
141}
142
143static void
144fd_set_shader_buffers(struct pipe_context *pctx, enum pipe_shader_type shader,
145                      unsigned start, unsigned count,
146                      const struct pipe_shader_buffer *buffers,
147                      unsigned writable_bitmask) in_dt
148{
149   struct fd_context *ctx = fd_context(pctx);
150   struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[shader];
151   const unsigned modified_bits = u_bit_consecutive(start, count);
152
153   so->enabled_mask &= ~modified_bits;
154   so->writable_mask &= ~modified_bits;
155   so->writable_mask |= writable_bitmask << start;
156
157   for (unsigned i = 0; i < count; i++) {
158      unsigned n = i + start;
159      struct pipe_shader_buffer *buf = &so->sb[n];
160
161      if (buffers && buffers[i].buffer) {
162         if ((buf->buffer == buffers[i].buffer) &&
163             (buf->buffer_offset == buffers[i].buffer_offset) &&
164             (buf->buffer_size == buffers[i].buffer_size))
165            continue;
166
167         buf->buffer_offset = buffers[i].buffer_offset;
168         buf->buffer_size = buffers[i].buffer_size;
169         pipe_resource_reference(&buf->buffer, buffers[i].buffer);
170
171         fd_resource_set_usage(buffers[i].buffer, FD_DIRTY_SSBO);
172
173         so->enabled_mask |= BIT(n);
174
175         if (writable_bitmask & BIT(i)) {
176            struct fd_resource *rsc = fd_resource(buf->buffer);
177            util_range_add(&rsc->b.b, &rsc->valid_buffer_range,
178                           buf->buffer_offset,
179                           buf->buffer_offset + buf->buffer_size);
180         }
181      } else {
182         pipe_resource_reference(&buf->buffer, NULL);
183      }
184   }
185
186   fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_SSBO);
187}
188
189void
190fd_set_shader_images(struct pipe_context *pctx, enum pipe_shader_type shader,
191                     unsigned start, unsigned count,
192                     unsigned unbind_num_trailing_slots,
193                     const struct pipe_image_view *images) in_dt
194{
195   struct fd_context *ctx = fd_context(pctx);
196   struct fd_shaderimg_stateobj *so = &ctx->shaderimg[shader];
197
198   unsigned mask = 0;
199
200   if (images) {
201      for (unsigned i = 0; i < count; i++) {
202         unsigned n = i + start;
203         struct pipe_image_view *buf = &so->si[n];
204
205         if ((buf->resource == images[i].resource) &&
206             (buf->format == images[i].format) &&
207             (buf->access == images[i].access) &&
208             !memcmp(&buf->u, &images[i].u, sizeof(buf->u)))
209            continue;
210
211         mask |= BIT(n);
212         util_copy_image_view(buf, &images[i]);
213
214         if (buf->resource) {
215            fd_resource_set_usage(buf->resource, FD_DIRTY_IMAGE);
216            so->enabled_mask |= BIT(n);
217
218            if ((buf->access & PIPE_IMAGE_ACCESS_WRITE) &&
219                (buf->resource->target == PIPE_BUFFER)) {
220
221               struct fd_resource *rsc = fd_resource(buf->resource);
222               util_range_add(&rsc->b.b, &rsc->valid_buffer_range,
223                              buf->u.buf.offset,
224                              buf->u.buf.offset + buf->u.buf.size);
225            }
226         } else {
227            so->enabled_mask &= ~BIT(n);
228         }
229      }
230   } else {
231      mask = (BIT(count) - 1) << start;
232
233      for (unsigned i = 0; i < count; i++) {
234         unsigned n = i + start;
235         struct pipe_image_view *img = &so->si[n];
236
237         pipe_resource_reference(&img->resource, NULL);
238      }
239
240      so->enabled_mask &= ~mask;
241   }
242
243   for (unsigned i = 0; i < unbind_num_trailing_slots; i++)
244      pipe_resource_reference(&so->si[i + start + count].resource, NULL);
245
246   so->enabled_mask &=
247      ~(BITFIELD_MASK(unbind_num_trailing_slots) << (start + count));
248
249   fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_IMAGE);
250}
251
252void
253fd_set_framebuffer_state(struct pipe_context *pctx,
254                         const struct pipe_framebuffer_state *framebuffer)
255{
256   struct fd_context *ctx = fd_context(pctx);
257   struct pipe_framebuffer_state *cso;
258
259   DBG("%ux%u, %u layers, %u samples", framebuffer->width, framebuffer->height,
260       framebuffer->layers, framebuffer->samples);
261
262   cso = &ctx->framebuffer;
263
264   if (util_framebuffer_state_equal(cso, framebuffer))
265      return;
266
267   /* Do this *after* checking that the framebuffer state is actually
268    * changing.  In the fd_blitter_clear() path, we get a pfb update
269    * to restore the current pfb state, which should not trigger us
270    * to flush (as that can cause the batch to be freed at a point
271    * before fd_clear() returns, but after the point where it expects
272    * flushes to potentially happen.
273    */
274   fd_context_switch_from(ctx);
275
276   util_copy_framebuffer_state(cso, framebuffer);
277
278   cso->samples = util_framebuffer_get_num_samples(cso);
279
280   if (ctx->screen->reorder) {
281      struct fd_batch *old_batch = NULL;
282
283      fd_batch_reference(&old_batch, ctx->batch);
284
285      if (likely(old_batch))
286         fd_batch_finish_queries(old_batch);
287
288      fd_batch_reference(&ctx->batch, NULL);
289      fd_context_all_dirty(ctx);
290      ctx->update_active_queries = true;
291
292      fd_batch_reference(&old_batch, NULL);
293   } else if (ctx->batch) {
294      DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
295          framebuffer->cbufs[0], framebuffer->zsbuf);
296      fd_batch_flush(ctx->batch);
297   }
298
299   fd_context_dirty(ctx, FD_DIRTY_FRAMEBUFFER);
300
301   ctx->disabled_scissor.minx = 0;
302   ctx->disabled_scissor.miny = 0;
303   ctx->disabled_scissor.maxx = cso->width;
304   ctx->disabled_scissor.maxy = cso->height;
305
306   fd_context_dirty(ctx, FD_DIRTY_SCISSOR);
307   update_draw_cost(ctx);
308}
309
310static void
311fd_set_polygon_stipple(struct pipe_context *pctx,
312                       const struct pipe_poly_stipple *stipple) in_dt
313{
314   struct fd_context *ctx = fd_context(pctx);
315   ctx->stipple = *stipple;
316   fd_context_dirty(ctx, FD_DIRTY_STIPPLE);
317}
318
319static void
320fd_set_scissor_states(struct pipe_context *pctx, unsigned start_slot,
321                      unsigned num_scissors,
322                      const struct pipe_scissor_state *scissor) in_dt
323{
324   struct fd_context *ctx = fd_context(pctx);
325
326   ctx->scissor = *scissor;
327   fd_context_dirty(ctx, FD_DIRTY_SCISSOR);
328}
329
330static void
331fd_set_viewport_states(struct pipe_context *pctx, unsigned start_slot,
332                       unsigned num_viewports,
333                       const struct pipe_viewport_state *viewport) in_dt
334{
335   struct fd_context *ctx = fd_context(pctx);
336   struct pipe_scissor_state *scissor = &ctx->viewport_scissor;
337   float minx, miny, maxx, maxy;
338
339   ctx->viewport = *viewport;
340
341   /* see si_get_scissor_from_viewport(): */
342
343   /* Convert (-1, -1) and (1, 1) from clip space into window space. */
344   minx = -viewport->scale[0] + viewport->translate[0];
345   miny = -viewport->scale[1] + viewport->translate[1];
346   maxx = viewport->scale[0] + viewport->translate[0];
347   maxy = viewport->scale[1] + viewport->translate[1];
348
349   /* Handle inverted viewports. */
350   if (minx > maxx) {
351      swap(minx, maxx);
352   }
353   if (miny > maxy) {
354      swap(miny, maxy);
355   }
356
357   const float max_dims = ctx->screen->gen >= 4 ? 16384.f : 4096.f;
358
359   /* Clamp, convert to integer and round up the max bounds. */
360   scissor->minx = CLAMP(minx, 0.f, max_dims);
361   scissor->miny = CLAMP(miny, 0.f, max_dims);
362   scissor->maxx = CLAMP(ceilf(maxx), 0.f, max_dims);
363   scissor->maxy = CLAMP(ceilf(maxy), 0.f, max_dims);
364
365   fd_context_dirty(ctx, FD_DIRTY_VIEWPORT);
366}
367
368static void
369fd_set_vertex_buffers(struct pipe_context *pctx, unsigned start_slot,
370                      unsigned count, unsigned unbind_num_trailing_slots,
371                      bool take_ownership,
372                      const struct pipe_vertex_buffer *vb) in_dt
373{
374   struct fd_context *ctx = fd_context(pctx);
375   struct fd_vertexbuf_stateobj *so = &ctx->vtx.vertexbuf;
376   int i;
377
378   /* on a2xx, pitch is encoded in the vtx fetch instruction, so
379    * we need to mark VTXSTATE as dirty as well to trigger patching
380    * and re-emitting the vtx shader:
381    */
382   if (ctx->screen->gen < 3) {
383      for (i = 0; i < count; i++) {
384         bool new_enabled = vb && vb[i].buffer.resource;
385         bool old_enabled = so->vb[start_slot + i].buffer.resource != NULL;
386         uint32_t new_stride = vb ? vb[i].stride : 0;
387         uint32_t old_stride = so->vb[start_slot + i].stride;
388         if ((new_enabled != old_enabled) || (new_stride != old_stride)) {
389            fd_context_dirty(ctx, FD_DIRTY_VTXSTATE);
390            break;
391         }
392      }
393   }
394
395   util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot,
396                                count, unbind_num_trailing_slots,
397                                take_ownership);
398   so->count = util_last_bit(so->enabled_mask);
399
400   if (!vb)
401      return;
402
403   fd_context_dirty(ctx, FD_DIRTY_VTXBUF);
404
405   for (unsigned i = 0; i < count; i++) {
406      assert(!vb[i].is_user_buffer);
407      fd_resource_set_usage(vb[i].buffer.resource, FD_DIRTY_VTXBUF);
408
409      /* Robust buffer access: Return undefined data (the start of the buffer)
410       * instead of process termination or a GPU hang in case of overflow.
411       */
412      if (vb[i].buffer.resource &&
413          unlikely(vb[i].buffer_offset >= vb[i].buffer.resource->width0)) {
414         so->vb[start_slot + i].buffer_offset = 0;
415      }
416   }
417}
418
419static void
420fd_blend_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
421{
422   struct fd_context *ctx = fd_context(pctx);
423   struct pipe_blend_state *cso = hwcso;
424   bool old_is_dual = ctx->blend ? ctx->blend->rt[0].blend_enable &&
425                                      util_blend_state_is_dual(ctx->blend, 0)
426                                 : false;
427   bool new_is_dual =
428      cso ? cso->rt[0].blend_enable && util_blend_state_is_dual(cso, 0) : false;
429   ctx->blend = hwcso;
430   fd_context_dirty(ctx, FD_DIRTY_BLEND);
431   if (old_is_dual != new_is_dual)
432      fd_context_dirty(ctx, FD_DIRTY_BLEND_DUAL);
433   update_draw_cost(ctx);
434}
435
436static void
437fd_blend_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
438{
439   FREE(hwcso);
440}
441
442static void
443fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
444{
445   struct fd_context *ctx = fd_context(pctx);
446   struct pipe_scissor_state *old_scissor = fd_context_get_scissor(ctx);
447   bool discard = get_safe(ctx->rasterizer, rasterizer_discard);
448   unsigned clip_plane_enable = get_safe(ctx->rasterizer, clip_plane_enable);
449
450   ctx->rasterizer = hwcso;
451   fd_context_dirty(ctx, FD_DIRTY_RASTERIZER);
452
453   if (ctx->rasterizer && ctx->rasterizer->scissor) {
454      ctx->current_scissor = &ctx->scissor;
455   } else {
456      ctx->current_scissor = &ctx->disabled_scissor;
457   }
458
459   /* if scissor enable bit changed we need to mark scissor
460    * state as dirty as well:
461    * NOTE: we can do a shallow compare, since we only care
462    * if it changed to/from &ctx->disable_scissor
463    */
464   if (old_scissor != fd_context_get_scissor(ctx))
465      fd_context_dirty(ctx, FD_DIRTY_SCISSOR);
466
467   if (discard != get_safe(ctx->rasterizer, rasterizer_discard))
468      fd_context_dirty(ctx, FD_DIRTY_RASTERIZER_DISCARD);
469
470   if (clip_plane_enable != get_safe(ctx->rasterizer, clip_plane_enable))
471      fd_context_dirty(ctx, FD_DIRTY_RASTERIZER_CLIP_PLANE_ENABLE);
472}
473
474static void
475fd_rasterizer_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
476{
477   FREE(hwcso);
478}
479
480static void
481fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
482{
483   struct fd_context *ctx = fd_context(pctx);
484   ctx->zsa = hwcso;
485   fd_context_dirty(ctx, FD_DIRTY_ZSA);
486   update_draw_cost(ctx);
487}
488
489static void
490fd_zsa_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
491{
492   FREE(hwcso);
493}
494
495static void *
496fd_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
497                       const struct pipe_vertex_element *elements)
498{
499   struct fd_vertex_stateobj *so = CALLOC_STRUCT(fd_vertex_stateobj);
500
501   if (!so)
502      return NULL;
503
504   memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
505   so->num_elements = num_elements;
506
507   return so;
508}
509
510static void
511fd_vertex_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
512{
513   FREE(hwcso);
514}
515
516static void
517fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
518{
519   struct fd_context *ctx = fd_context(pctx);
520   ctx->vtx.vtx = hwcso;
521   fd_context_dirty(ctx, FD_DIRTY_VTXSTATE);
522}
523
524static struct pipe_stream_output_target *
525fd_create_stream_output_target(struct pipe_context *pctx,
526                               struct pipe_resource *prsc,
527                               unsigned buffer_offset, unsigned buffer_size)
528{
529   struct fd_stream_output_target *target;
530   struct fd_resource *rsc = fd_resource(prsc);
531
532   target = CALLOC_STRUCT(fd_stream_output_target);
533   if (!target)
534      return NULL;
535
536   pipe_reference_init(&target->base.reference, 1);
537   pipe_resource_reference(&target->base.buffer, prsc);
538
539   target->base.context = pctx;
540   target->base.buffer_offset = buffer_offset;
541   target->base.buffer_size = buffer_size;
542
543   target->offset_buf = pipe_buffer_create(
544      pctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, sizeof(uint32_t));
545
546   assert(rsc->b.b.target == PIPE_BUFFER);
547   util_range_add(&rsc->b.b, &rsc->valid_buffer_range, buffer_offset,
548                  buffer_offset + buffer_size);
549
550   return &target->base;
551}
552
553static void
554fd_stream_output_target_destroy(struct pipe_context *pctx,
555                                struct pipe_stream_output_target *target)
556{
557   struct fd_stream_output_target *cso = fd_stream_output_target(target);
558
559   pipe_resource_reference(&cso->base.buffer, NULL);
560   pipe_resource_reference(&cso->offset_buf, NULL);
561
562   FREE(target);
563}
564
565static void
566fd_set_stream_output_targets(struct pipe_context *pctx, unsigned num_targets,
567                             struct pipe_stream_output_target **targets,
568                             const unsigned *offsets) in_dt
569{
570   struct fd_context *ctx = fd_context(pctx);
571   struct fd_streamout_stateobj *so = &ctx->streamout;
572   unsigned i;
573
574   assert(num_targets <= ARRAY_SIZE(so->targets));
575
576   /* Older targets need sw stats enabled for streamout emulation in VS: */
577   if (ctx->screen->gen < 5) {
578      if (num_targets && !so->num_targets) {
579         ctx->stats_users++;
580      } else if (so->num_targets && !num_targets) {
581         ctx->stats_users--;
582      }
583   }
584
585   for (i = 0; i < num_targets; i++) {
586      boolean changed = targets[i] != so->targets[i];
587      boolean reset = (offsets[i] != (unsigned)-1);
588
589      so->reset |= (reset << i);
590
591      if (!changed && !reset)
592         continue;
593
594      /* Note that all SO targets will be reset at once at a
595       * BeginTransformFeedback().
596       */
597      if (reset) {
598         so->offsets[i] = offsets[i];
599         ctx->streamout.verts_written = 0;
600      }
601
602      pipe_so_target_reference(&so->targets[i], targets[i]);
603   }
604
605   for (; i < so->num_targets; i++) {
606      pipe_so_target_reference(&so->targets[i], NULL);
607   }
608
609   so->num_targets = num_targets;
610
611   fd_context_dirty(ctx, FD_DIRTY_STREAMOUT);
612}
613
614static void
615fd_bind_compute_state(struct pipe_context *pctx, void *state) in_dt
616{
617   struct fd_context *ctx = fd_context(pctx);
618   ctx->compute = state;
619   /* NOTE: Don't mark FD_DIRTY_PROG for compute specific state */
620   ctx->dirty_shader[PIPE_SHADER_COMPUTE] |= FD_DIRTY_SHADER_PROG;
621}
622
623/* TODO pipe_context::set_compute_resources() should DIAF and clover
624 * should be updated to use pipe_context::set_constant_buffer() and
625 * pipe_context::set_shader_images().  Until then just directly frob
626 * the UBO/image state to avoid the rest of the driver needing to
627 * know about this bastard api..
628 */
629static void
630fd_set_compute_resources(struct pipe_context *pctx, unsigned start,
631                         unsigned count, struct pipe_surface **prscs) in_dt
632{
633   struct fd_context *ctx = fd_context(pctx);
634   struct fd_constbuf_stateobj *so = &ctx->constbuf[PIPE_SHADER_COMPUTE];
635
636   for (unsigned i = 0; i < count; i++) {
637      const uint32_t index = i + start + 1;   /* UBOs start at index 1 */
638
639      if (!prscs) {
640         util_copy_constant_buffer(&so->cb[index], NULL, false);
641         so->enabled_mask &= ~(1 << index);
642      } else if (prscs[i]->format == PIPE_FORMAT_NONE) {
643         struct pipe_constant_buffer cb = {
644               .buffer = prscs[i]->texture,
645         };
646         util_copy_constant_buffer(&so->cb[index], &cb, false);
647         so->enabled_mask |= (1 << index);
648      } else {
649         // TODO images
650         unreachable("finishme");
651      }
652   }
653}
654
655/* used by clover to bind global objects, returning the bo address
656 * via handles[n]
657 */
658static void
659fd_set_global_binding(struct pipe_context *pctx, unsigned first, unsigned count,
660                      struct pipe_resource **prscs, uint32_t **handles) in_dt
661{
662   struct fd_context *ctx = fd_context(pctx);
663   struct fd_global_bindings_stateobj *so = &ctx->global_bindings;
664   unsigned mask = 0;
665
666   if (prscs) {
667      for (unsigned i = 0; i < count; i++) {
668         unsigned n = i + first;
669
670         mask |= BIT(n);
671
672         pipe_resource_reference(&so->buf[n], prscs[i]);
673
674         if (so->buf[n]) {
675            struct fd_resource *rsc = fd_resource(so->buf[n]);
676            uint32_t offset = *handles[i];
677            uint64_t iova = fd_bo_get_iova(rsc->bo) + offset;
678
679            /* Yes, really, despite what the type implies: */
680            memcpy(handles[i], &iova, sizeof(iova));
681         }
682
683         if (prscs[i])
684            so->enabled_mask |= BIT(n);
685         else
686            so->enabled_mask &= ~BIT(n);
687      }
688   } else {
689      mask = (BIT(count) - 1) << first;
690
691      for (unsigned i = 0; i < count; i++) {
692         unsigned n = i + first;
693         pipe_resource_reference(&so->buf[n], NULL);
694      }
695
696      so->enabled_mask &= ~mask;
697   }
698}
699
700void
701fd_state_init(struct pipe_context *pctx)
702{
703   pctx->set_blend_color = fd_set_blend_color;
704   pctx->set_stencil_ref = fd_set_stencil_ref;
705   pctx->set_clip_state = fd_set_clip_state;
706   pctx->set_sample_mask = fd_set_sample_mask;
707   pctx->set_min_samples = fd_set_min_samples;
708   pctx->set_constant_buffer = fd_set_constant_buffer;
709   pctx->set_shader_buffers = fd_set_shader_buffers;
710   pctx->set_shader_images = fd_set_shader_images;
711   pctx->set_framebuffer_state = fd_set_framebuffer_state;
712   pctx->set_polygon_stipple = fd_set_polygon_stipple;
713   pctx->set_scissor_states = fd_set_scissor_states;
714   pctx->set_viewport_states = fd_set_viewport_states;
715
716   pctx->set_vertex_buffers = fd_set_vertex_buffers;
717
718   pctx->bind_blend_state = fd_blend_state_bind;
719   pctx->delete_blend_state = fd_blend_state_delete;
720
721   pctx->bind_rasterizer_state = fd_rasterizer_state_bind;
722   pctx->delete_rasterizer_state = fd_rasterizer_state_delete;
723
724   pctx->bind_depth_stencil_alpha_state = fd_zsa_state_bind;
725   pctx->delete_depth_stencil_alpha_state = fd_zsa_state_delete;
726
727   if (!pctx->create_vertex_elements_state)
728      pctx->create_vertex_elements_state = fd_vertex_state_create;
729   pctx->delete_vertex_elements_state = fd_vertex_state_delete;
730   pctx->bind_vertex_elements_state = fd_vertex_state_bind;
731
732   pctx->create_stream_output_target = fd_create_stream_output_target;
733   pctx->stream_output_target_destroy = fd_stream_output_target_destroy;
734   pctx->set_stream_output_targets = fd_set_stream_output_targets;
735
736   if (has_compute(fd_screen(pctx->screen))) {
737      pctx->bind_compute_state = fd_bind_compute_state;
738      pctx->set_compute_resources = fd_set_compute_resources;
739      pctx->set_global_binding = fd_set_global_binding;
740   }
741}
742