1/*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 *    Rob Clark <robclark@freedesktop.org>
26 */
27
28#include "pipe/p_state.h"
29#include "util/u_memory.h"
30#include "util/u_prim.h"
31#include "util/u_string.h"
32
33#include "freedreno_resource.h"
34#include "freedreno_state.h"
35
36#include "fd6_context.h"
37#include "fd6_draw.h"
38#include "fd6_emit.h"
39#include "fd6_program.h"
40#include "fd6_vsc.h"
41#include "fd6_zsa.h"
42
43#include "fd6_pack.h"
44
45static void
46draw_emit_xfb(struct fd_ringbuffer *ring, struct CP_DRAW_INDX_OFFSET_0 *draw0,
47              const struct pipe_draw_info *info,
48              const struct pipe_draw_indirect_info *indirect)
49{
50   struct fd_stream_output_target *target =
51      fd_stream_output_target(indirect->count_from_stream_output);
52   struct fd_resource *offset = fd_resource(target->offset_buf);
53
54   /* All known firmware versions do not wait for WFI's with CP_DRAW_AUTO.
55    * Plus, for the common case where the counter buffer is written by
56    * vkCmdEndTransformFeedback, we need to wait for the CP_WAIT_MEM_WRITES to
57    * complete which means we need a WAIT_FOR_ME anyway.
58    */
59   OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
60
61   OUT_PKT7(ring, CP_DRAW_AUTO, 6);
62   OUT_RING(ring, pack_CP_DRAW_INDX_OFFSET_0(*draw0).value);
63   OUT_RING(ring, info->instance_count);
64   OUT_RELOC(ring, offset->bo, 0, 0, 0);
65   OUT_RING(
66      ring,
67      0); /* byte counter offset subtraced from the value read from above */
68   OUT_RING(ring, target->stride);
69}
70
71static void
72draw_emit_indirect(struct fd_ringbuffer *ring,
73                   struct CP_DRAW_INDX_OFFSET_0 *draw0,
74                   const struct pipe_draw_info *info,
75                   const struct pipe_draw_indirect_info *indirect,
76                   unsigned index_offset)
77{
78   struct fd_resource *ind = fd_resource(indirect->buffer);
79
80   if (info->index_size) {
81      struct pipe_resource *idx = info->index.resource;
82      unsigned max_indices = (idx->width0 - index_offset) / info->index_size;
83
84      OUT_PKT(ring, CP_DRAW_INDX_INDIRECT, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
85              A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE(fd_resource(idx)->bo,
86                                                   index_offset),
87              A5XX_CP_DRAW_INDX_INDIRECT_3(.max_indices = max_indices),
88              A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT(ind->bo, indirect->offset));
89   } else {
90      OUT_PKT(ring, CP_DRAW_INDIRECT, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
91              A5XX_CP_DRAW_INDIRECT_INDIRECT(ind->bo, indirect->offset));
92   }
93}
94
95static void
96draw_emit(struct fd_ringbuffer *ring, struct CP_DRAW_INDX_OFFSET_0 *draw0,
97          const struct pipe_draw_info *info,
98          const struct pipe_draw_start_count_bias *draw, unsigned index_offset)
99{
100   if (info->index_size) {
101      assert(!info->has_user_indices);
102
103      struct pipe_resource *idx_buffer = info->index.resource;
104      unsigned max_indices =
105         (idx_buffer->width0 - index_offset) / info->index_size;
106
107      OUT_PKT(ring, CP_DRAW_INDX_OFFSET, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
108              CP_DRAW_INDX_OFFSET_1(.num_instances = info->instance_count),
109              CP_DRAW_INDX_OFFSET_2(.num_indices = draw->count),
110              CP_DRAW_INDX_OFFSET_3(.first_indx = draw->start),
111              A5XX_CP_DRAW_INDX_OFFSET_INDX_BASE(fd_resource(idx_buffer)->bo,
112                                                 index_offset),
113              A5XX_CP_DRAW_INDX_OFFSET_6(.max_indices = max_indices));
114   } else {
115      OUT_PKT(ring, CP_DRAW_INDX_OFFSET, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
116              CP_DRAW_INDX_OFFSET_1(.num_instances = info->instance_count),
117              CP_DRAW_INDX_OFFSET_2(.num_indices = draw->count));
118   }
119}
120
121static void
122fixup_draw_state(struct fd_context *ctx, struct fd6_emit *emit) assert_dt
123{
124   if (ctx->last.dirty ||
125       (ctx->last.primitive_restart != emit->primitive_restart)) {
126      /* rasterizer state is effected by primitive-restart: */
127      fd_context_dirty(ctx, FD_DIRTY_RASTERIZER);
128      ctx->last.primitive_restart = emit->primitive_restart;
129   }
130}
131
132static bool
133fd6_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
134             unsigned drawid_offset,
135             const struct pipe_draw_indirect_info *indirect,
136             const struct pipe_draw_start_count_bias *draw,
137             unsigned index_offset) assert_dt
138{
139   struct fd6_context *fd6_ctx = fd6_context(ctx);
140   struct shader_info *gs_info = ir3_get_shader_info(ctx->prog.gs);
141   struct fd6_emit emit = {
142      .ctx = ctx,
143      .vtx = &ctx->vtx,
144      .info = info,
145      .drawid_offset = drawid_offset,
146      .indirect = indirect,
147      .draw = draw,
148      .key = {
149         .vs = ctx->prog.vs,
150         .gs = ctx->prog.gs,
151         .fs = ctx->prog.fs,
152         .key = {
153            .rasterflat = ctx->rasterizer->flatshade,
154            .ucp_enables = ctx->rasterizer->clip_plane_enable,
155            .layer_zero = !gs_info || !(gs_info->outputs_written & VARYING_BIT_LAYER),
156            .sample_shading = (ctx->min_samples > 1),
157            .msaa = (ctx->framebuffer.samples > 1),
158         },
159         .clip_plane_enable = ctx->rasterizer->clip_plane_enable,
160      },
161      .rasterflat = ctx->rasterizer->flatshade,
162      .sprite_coord_enable = ctx->rasterizer->sprite_coord_enable,
163      .sprite_coord_mode = ctx->rasterizer->sprite_coord_mode,
164      .primitive_restart = info->primitive_restart && info->index_size,
165      .patch_vertices = ctx->patch_vertices,
166   };
167
168   if (!(ctx->prog.vs && ctx->prog.fs))
169      return false;
170
171   if (info->mode == PIPE_PRIM_PATCHES) {
172      emit.key.hs = ctx->prog.hs;
173      emit.key.ds = ctx->prog.ds;
174
175      if (!(ctx->prog.hs && ctx->prog.ds))
176         return false;
177
178      struct shader_info *ds_info = ir3_get_shader_info(emit.key.ds);
179      emit.key.key.tessellation = ir3_tess_mode(ds_info->tess._primitive_mode);
180      ctx->gen_dirty |= BIT(FD6_GROUP_PRIMITIVE_PARAMS);
181
182      struct shader_info *fs_info = ir3_get_shader_info(emit.key.fs);
183      emit.key.key.tcs_store_primid =
184         BITSET_TEST(ds_info->system_values_read, SYSTEM_VALUE_PRIMITIVE_ID) ||
185         (gs_info && BITSET_TEST(gs_info->system_values_read, SYSTEM_VALUE_PRIMITIVE_ID)) ||
186         (fs_info && (fs_info->inputs_read & (1ull << VARYING_SLOT_PRIMITIVE_ID)));
187   }
188
189   if (emit.key.gs) {
190      emit.key.key.has_gs = true;
191      ctx->gen_dirty |= BIT(FD6_GROUP_PRIMITIVE_PARAMS);
192   }
193
194   if (!(emit.key.hs || emit.key.ds || emit.key.gs || indirect))
195      fd6_vsc_update_sizes(ctx->batch, info, draw);
196
197   ir3_fixup_shader_state(&ctx->base, &emit.key.key);
198
199   if (!(ctx->gen_dirty & BIT(FD6_GROUP_PROG))) {
200      emit.prog = fd6_ctx->prog;
201   } else {
202      fd6_ctx->prog = fd6_emit_get_prog(&emit);
203   }
204
205   /* bail if compile failed: */
206   if (!fd6_ctx->prog)
207      return false;
208
209   fixup_draw_state(ctx, &emit);
210
211   /* *after* fixup_shader_state(): */
212   emit.dirty = ctx->dirty;
213   emit.dirty_groups = ctx->gen_dirty;
214
215   emit.bs = fd6_emit_get_prog(&emit)->bs;
216   emit.vs = fd6_emit_get_prog(&emit)->vs;
217   emit.hs = fd6_emit_get_prog(&emit)->hs;
218   emit.ds = fd6_emit_get_prog(&emit)->ds;
219   emit.gs = fd6_emit_get_prog(&emit)->gs;
220   emit.fs = fd6_emit_get_prog(&emit)->fs;
221
222   if (emit.vs->need_driver_params || fd6_ctx->has_dp_state)
223      emit.dirty_groups |= BIT(FD6_GROUP_DRIVER_PARAMS);
224   else if (emit.gs && emit.gs->need_driver_params)
225      emit.dirty_groups |= BIT(FD6_GROUP_DRIVER_PARAMS);
226   else if (emit.ds && emit.ds->need_driver_params)
227      emit.dirty_groups |= BIT(FD6_GROUP_DRIVER_PARAMS);
228
229   /* If we are doing xfb, we need to emit the xfb state on every draw: */
230   if (emit.prog->stream_output)
231      emit.dirty_groups |= BIT(FD6_GROUP_SO);
232
233   if (unlikely(ctx->stats_users > 0)) {
234      ctx->stats.vs_regs += ir3_shader_halfregs(emit.vs);
235      ctx->stats.hs_regs += COND(emit.hs, ir3_shader_halfregs(emit.hs));
236      ctx->stats.ds_regs += COND(emit.ds, ir3_shader_halfregs(emit.ds));
237      ctx->stats.gs_regs += COND(emit.gs, ir3_shader_halfregs(emit.gs));
238      ctx->stats.fs_regs += ir3_shader_halfregs(emit.fs);
239   }
240
241   struct fd_ringbuffer *ring = ctx->batch->draw;
242
243   struct CP_DRAW_INDX_OFFSET_0 draw0 = {
244      .prim_type = ctx->screen->primtypes[info->mode],
245      .vis_cull = USE_VISIBILITY,
246      .gs_enable = !!emit.key.gs,
247   };
248
249   if (indirect && indirect->count_from_stream_output) {
250      draw0.source_select = DI_SRC_SEL_AUTO_XFB;
251   } else if (info->index_size) {
252      draw0.source_select = DI_SRC_SEL_DMA;
253      draw0.index_size = fd4_size2indextype(info->index_size);
254   } else {
255      draw0.source_select = DI_SRC_SEL_AUTO_INDEX;
256   }
257
258   if (info->mode == PIPE_PRIM_PATCHES) {
259      uint32_t factor_stride = ir3_tess_factor_stride(emit.key.key.tessellation);
260
261      STATIC_ASSERT(IR3_TESS_ISOLINES == TESS_ISOLINES + 1);
262      STATIC_ASSERT(IR3_TESS_TRIANGLES == TESS_TRIANGLES + 1);
263      STATIC_ASSERT(IR3_TESS_QUADS == TESS_QUADS + 1);
264      draw0.patch_type = emit.key.key.tessellation - 1;
265
266      draw0.prim_type = DI_PT_PATCHES0 + ctx->patch_vertices;
267      draw0.tess_enable = true;
268
269      /* maximum number of patches that can fit in tess factor/param buffers */
270      uint32_t subdraw_size = MIN2(FD6_TESS_FACTOR_SIZE / factor_stride,
271                                   FD6_TESS_PARAM_SIZE / (emit.hs->output_size * 4));
272      /* convert from # of patches to draw count */
273      subdraw_size *= ctx->patch_vertices;
274
275      OUT_PKT7(ring, CP_SET_SUBDRAW_SIZE, 1);
276      OUT_RING(ring, subdraw_size);
277
278      ctx->batch->tessellation = true;
279   }
280
281   uint32_t index_start = info->index_size ? draw->index_bias : draw->start;
282   if (ctx->last.dirty || (ctx->last.index_start != index_start)) {
283      OUT_PKT4(ring, REG_A6XX_VFD_INDEX_OFFSET, 1);
284      OUT_RING(ring, index_start); /* VFD_INDEX_OFFSET */
285      ctx->last.index_start = index_start;
286   }
287
288   if (ctx->last.dirty || (ctx->last.instance_start != info->start_instance)) {
289      OUT_PKT4(ring, REG_A6XX_VFD_INSTANCE_START_OFFSET, 1);
290      OUT_RING(ring, info->start_instance); /* VFD_INSTANCE_START_OFFSET */
291      ctx->last.instance_start = info->start_instance;
292   }
293
294   uint32_t restart_index =
295      info->primitive_restart ? info->restart_index : 0xffffffff;
296   if (ctx->last.dirty || (ctx->last.restart_index != restart_index)) {
297      OUT_PKT4(ring, REG_A6XX_PC_RESTART_INDEX, 1);
298      OUT_RING(ring, restart_index); /* PC_RESTART_INDEX */
299      ctx->last.restart_index = restart_index;
300   }
301
302   // TODO move fd6_emit_streamout.. I think..
303   if (emit.dirty_groups)
304      fd6_emit_state(ring, &emit);
305
306   /* for debug after a lock up, write a unique counter value
307    * to scratch7 for each draw, to make it easier to match up
308    * register dumps to cmdstream.  The combination of IB
309    * (scratch6) and DRAW is enough to "triangulate" the
310    * particular draw that caused lockup.
311    */
312   emit_marker6(ring, 7);
313
314   if (indirect) {
315      if (indirect->count_from_stream_output) {
316         draw_emit_xfb(ring, &draw0, info, indirect);
317      } else {
318         draw_emit_indirect(ring, &draw0, info, indirect, index_offset);
319      }
320   } else {
321      draw_emit(ring, &draw0, info, draw, index_offset);
322   }
323
324   emit_marker6(ring, 7);
325   fd_reset_wfi(ctx->batch);
326
327   if (emit.streamout_mask) {
328      struct fd_ringbuffer *ring = ctx->batch->draw;
329
330      for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
331         if (emit.streamout_mask & (1 << i)) {
332            fd6_event_write(ctx->batch, ring, FLUSH_SO_0 + i, false);
333         }
334      }
335   }
336
337   fd_context_all_clean(ctx);
338
339   return true;
340}
341
342static void
343fd6_clear_lrz(struct fd_batch *batch, struct fd_resource *zsbuf, double depth) assert_dt
344{
345   struct fd_ringbuffer *ring;
346   struct fd_screen *screen = batch->ctx->screen;
347
348   ring = fd_batch_get_prologue(batch);
349
350   emit_marker6(ring, 7);
351   OUT_PKT7(ring, CP_SET_MARKER, 1);
352   OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
353   emit_marker6(ring, 7);
354
355   OUT_WFI5(ring);
356
357   OUT_REG(ring, A6XX_RB_CCU_CNTL(.color_offset = screen->ccu_offset_bypass));
358
359   OUT_REG(ring,
360           A6XX_HLSQ_INVALIDATE_CMD(.vs_state = true, .hs_state = true,
361                                    .ds_state = true, .gs_state = true,
362                                    .fs_state = true, .cs_state = true,
363                                    .gfx_ibo = true, .cs_ibo = true,
364                                    .gfx_shared_const = true,
365                                    .gfx_bindless = 0x1f, .cs_bindless = 0x1f));
366
367   emit_marker6(ring, 7);
368   OUT_PKT7(ring, CP_SET_MARKER, 1);
369   OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BLIT2DSCALE));
370   emit_marker6(ring, 7);
371
372   OUT_PKT4(ring, REG_A6XX_RB_2D_UNKNOWN_8C01, 1);
373   OUT_RING(ring, 0x0);
374
375   OUT_PKT4(ring, REG_A6XX_SP_PS_2D_SRC_INFO, 13);
376   OUT_RING(ring, 0x00000000);
377   OUT_RING(ring, 0x00000000);
378   OUT_RING(ring, 0x00000000);
379   OUT_RING(ring, 0x00000000);
380   OUT_RING(ring, 0x00000000);
381   OUT_RING(ring, 0x00000000);
382   OUT_RING(ring, 0x00000000);
383   OUT_RING(ring, 0x00000000);
384   OUT_RING(ring, 0x00000000);
385   OUT_RING(ring, 0x00000000);
386   OUT_RING(ring, 0x00000000);
387   OUT_RING(ring, 0x00000000);
388   OUT_RING(ring, 0x00000000);
389
390   OUT_PKT4(ring, REG_A6XX_SP_2D_DST_FORMAT, 1);
391   OUT_RING(ring, 0x0000f410);
392
393   OUT_PKT4(ring, REG_A6XX_GRAS_2D_BLIT_CNTL, 1);
394   OUT_RING(ring,
395            A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM) | 0x4f00080);
396
397   OUT_PKT4(ring, REG_A6XX_RB_2D_BLIT_CNTL, 1);
398   OUT_RING(ring, A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM) | 0x4f00080);
399
400   fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
401   fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
402   fd_wfi(batch, ring);
403
404   OUT_PKT4(ring, REG_A6XX_RB_2D_SRC_SOLID_C0, 4);
405   OUT_RING(ring, fui(depth));
406   OUT_RING(ring, 0x00000000);
407   OUT_RING(ring, 0x00000000);
408   OUT_RING(ring, 0x00000000);
409
410   OUT_PKT4(ring, REG_A6XX_RB_2D_DST_INFO, 9);
411   OUT_RING(ring, A6XX_RB_2D_DST_INFO_COLOR_FORMAT(FMT6_16_UNORM) |
412                     A6XX_RB_2D_DST_INFO_TILE_MODE(TILE6_LINEAR) |
413                     A6XX_RB_2D_DST_INFO_COLOR_SWAP(WZYX));
414   OUT_RELOC(ring, zsbuf->lrz, 0, 0, 0);
415   OUT_RING(ring, A6XX_RB_2D_DST_PITCH(zsbuf->lrz_pitch * 2).value);
416   OUT_RING(ring, 0x00000000);
417   OUT_RING(ring, 0x00000000);
418   OUT_RING(ring, 0x00000000);
419   OUT_RING(ring, 0x00000000);
420   OUT_RING(ring, 0x00000000);
421
422   OUT_REG(ring, A6XX_GRAS_2D_SRC_TL_X(0), A6XX_GRAS_2D_SRC_BR_X(0),
423           A6XX_GRAS_2D_SRC_TL_Y(0), A6XX_GRAS_2D_SRC_BR_Y(0));
424
425   OUT_PKT4(ring, REG_A6XX_GRAS_2D_DST_TL, 2);
426   OUT_RING(ring, A6XX_GRAS_2D_DST_TL_X(0) | A6XX_GRAS_2D_DST_TL_Y(0));
427   OUT_RING(ring, A6XX_GRAS_2D_DST_BR_X(zsbuf->lrz_width - 1) |
428                     A6XX_GRAS_2D_DST_BR_Y(zsbuf->lrz_height - 1));
429
430   fd6_event_write(batch, ring, 0x3f, false);
431
432   OUT_WFI5(ring);
433
434   OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
435   OUT_RING(ring, screen->info->a6xx.magic.RB_UNKNOWN_8E04_blit);
436
437   OUT_PKT7(ring, CP_BLIT, 1);
438   OUT_RING(ring, CP_BLIT_0_OP(BLIT_OP_SCALE));
439
440   OUT_WFI5(ring);
441
442   OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
443   OUT_RING(ring, 0x0); /* RB_UNKNOWN_8E04 */
444
445   fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
446   fd6_event_write(batch, ring, PC_CCU_FLUSH_DEPTH_TS, true);
447   fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
448   fd_wfi(batch, ring);
449
450   fd6_cache_inv(batch, ring);
451}
452
453static bool
454is_z32(enum pipe_format format)
455{
456   switch (format) {
457   case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
458   case PIPE_FORMAT_Z32_UNORM:
459   case PIPE_FORMAT_Z32_FLOAT:
460      return true;
461   default:
462      return false;
463   }
464}
465
466static bool
467fd6_clear(struct fd_context *ctx, unsigned buffers,
468          const union pipe_color_union *color, double depth,
469          unsigned stencil) assert_dt
470{
471   struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
472   const bool has_depth = pfb->zsbuf;
473   unsigned color_buffers = buffers >> 2;
474
475   /* we need to do multisample clear on 3d pipe, so fallback to u_blitter: */
476   if (pfb->samples > 1)
477      return false;
478
479   /* If we're clearing after draws, fallback to 3D pipe clears.  We could
480    * use blitter clears in the draw batch but then we'd have to patch up the
481    * gmem offsets. This doesn't seem like a useful thing to optimize for
482    * however.*/
483   if (ctx->batch->num_draws > 0)
484      return false;
485
486   u_foreach_bit (i, color_buffers)
487      ctx->batch->clear_color[i] = *color;
488   if (buffers & PIPE_CLEAR_DEPTH)
489      ctx->batch->clear_depth = depth;
490   if (buffers & PIPE_CLEAR_STENCIL)
491      ctx->batch->clear_stencil = stencil;
492
493   ctx->batch->fast_cleared |= buffers;
494
495   if (has_depth && (buffers & PIPE_CLEAR_DEPTH)) {
496      struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
497      if (zsbuf->lrz && !is_z32(pfb->zsbuf->format)) {
498         zsbuf->lrz_valid = true;
499         zsbuf->lrz_direction = FD_LRZ_UNKNOWN;
500         fd6_clear_lrz(ctx->batch, zsbuf, depth);
501      }
502   }
503
504   return true;
505}
506
507void
508fd6_draw_init(struct pipe_context *pctx) disable_thread_safety_analysis
509{
510   struct fd_context *ctx = fd_context(pctx);
511   ctx->draw_vbo = fd6_draw_vbo;
512   ctx->clear = fd6_clear;
513}
514