1/*
2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25#include "util/u_blitter.h"
26#include "util/u_draw.h"
27#include "util/u_prim.h"
28#include "util/format/u_format.h"
29#include "util/u_pack_color.h"
30#include "util/u_split_draw.h"
31#include "util/u_upload_mgr.h"
32
33#include "vc4_context.h"
34#include "vc4_resource.h"
35
36#define VC4_HW_2116_COUNT		0x1ef0
37
38static void
39vc4_get_draw_cl_space(struct vc4_job *job, int vert_count)
40{
41        /* The SW-5891 workaround may cause us to emit multiple shader recs
42         * and draw packets.
43         */
44        int num_draws = DIV_ROUND_UP(vert_count, 65535 - 2) + 1;
45
46        /* Binner gets our packet state -- vc4_emit.c contents,
47         * and the primitive itself.
48         */
49        cl_ensure_space(&job->bcl,
50                        256 + (VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE +
51                               VC4_PACKET_GL_SHADER_STATE_SIZE) * num_draws);
52
53        /* Nothing for rcl -- that's covered by vc4_context.c */
54
55        /* shader_rec gets up to 12 dwords of reloc handles plus a maximally
56         * sized shader_rec (104 bytes base for 8 vattrs plus 32 bytes of
57         * vattr stride).
58         */
59        cl_ensure_space(&job->shader_rec,
60                        (12 * sizeof(uint32_t) + 104 + 8 * 32) * num_draws);
61
62        /* Uniforms are covered by vc4_write_uniforms(). */
63
64        /* There could be up to 16 textures per stage, plus misc other
65         * pointers.
66         */
67        cl_ensure_space(&job->bo_handles, (2 * 16 + 20) * sizeof(uint32_t));
68        cl_ensure_space(&job->bo_pointers,
69                        (2 * 16 + 20) * sizeof(struct vc4_bo *));
70}
71
72/**
73 * Does the initial bining command list setup for drawing to a given FBO.
74 */
75static void
76vc4_start_draw(struct vc4_context *vc4)
77{
78        struct vc4_job *job = vc4->job;
79
80        if (job->needs_flush)
81                return;
82
83        vc4_get_draw_cl_space(job, 0);
84
85        cl_emit(&job->bcl, TILE_BINNING_MODE_CONFIGURATION, bin) {
86                bin.width_in_tiles = job->draw_tiles_x;
87                bin.height_in_tiles = job->draw_tiles_y;
88                bin.multisample_mode_4x = job->msaa;
89        }
90
91        /* START_TILE_BINNING resets the statechange counters in the hardware,
92         * which are what is used when a primitive is binned to a tile to
93         * figure out what new state packets need to be written to that tile's
94         * command list.
95         */
96        cl_emit(&job->bcl, START_TILE_BINNING, start);
97
98        /* Reset the current compressed primitives format.  This gets modified
99         * by VC4_PACKET_GL_INDEXED_PRIMITIVE and
100         * VC4_PACKET_GL_ARRAY_PRIMITIVE, so it needs to be reset at the start
101         * of every tile.
102         */
103        cl_emit(&job->bcl, PRIMITIVE_LIST_FORMAT, list) {
104                list.data_type = _16_BIT_INDEX;
105                list.primitive_type = TRIANGLES_LIST;
106        }
107
108        job->needs_flush = true;
109        job->draw_width = vc4->framebuffer.width;
110        job->draw_height = vc4->framebuffer.height;
111}
112
113static void
114vc4_predraw_check_textures(struct pipe_context *pctx,
115                           struct vc4_texture_stateobj *stage_tex)
116{
117        struct vc4_context *vc4 = vc4_context(pctx);
118
119        for (int i = 0; i < stage_tex->num_textures; i++) {
120                struct vc4_sampler_view *view =
121                        vc4_sampler_view(stage_tex->textures[i]);
122                if (!view)
123                        continue;
124
125                if (view->texture != view->base.texture)
126                        vc4_update_shadow_baselevel_texture(pctx, &view->base);
127
128                vc4_flush_jobs_writing_resource(vc4, view->texture);
129        }
130}
131
132static void
133vc4_emit_gl_shader_state(struct vc4_context *vc4,
134                         const struct pipe_draw_info *info,
135                         const struct pipe_draw_start_count_bias *draws,
136                         uint32_t extra_index_bias)
137{
138        struct vc4_job *job = vc4->job;
139        /* VC4_DIRTY_VTXSTATE */
140        struct vc4_vertex_stateobj *vtx = vc4->vtx;
141        /* VC4_DIRTY_VTXBUF */
142        struct vc4_vertexbuf_stateobj *vertexbuf = &vc4->vertexbuf;
143
144        /* The simulator throws a fit if VS or CS don't read an attribute, so
145         * we emit a dummy read.
146         */
147        uint32_t num_elements_emit = MAX2(vtx->num_elements, 1);
148
149        /* Emit the shader record. */
150        cl_start_shader_reloc(&job->shader_rec, 3 + num_elements_emit);
151
152        cl_emit(&job->shader_rec, SHADER_RECORD, rec) {
153                rec.enable_clipping = true;
154
155                /* VC4_DIRTY_COMPILED_FS */
156                rec.fragment_shader_is_single_threaded =
157                        !vc4->prog.fs->fs_threaded;
158
159                /* VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER */
160                rec.point_size_included_in_shaded_vertex_data =
161                         (info->mode == PIPE_PRIM_POINTS &&
162                          vc4->rasterizer->base.point_size_per_vertex);
163
164                /* VC4_DIRTY_COMPILED_FS */
165                rec.fragment_shader_number_of_varyings =
166                        vc4->prog.fs->num_inputs;
167                rec.fragment_shader_code_address =
168                        cl_address(vc4->prog.fs->bo, 0);
169
170                rec.coordinate_shader_attribute_array_select_bits =
171                         vc4->prog.cs->vattrs_live;
172                rec.coordinate_shader_total_attributes_size =
173                         vc4->prog.cs->vattr_offsets[8];
174                rec.coordinate_shader_code_address =
175                        cl_address(vc4->prog.cs->bo, 0);
176
177                rec.vertex_shader_attribute_array_select_bits =
178                         vc4->prog.vs->vattrs_live;
179                rec.vertex_shader_total_attributes_size =
180                         vc4->prog.vs->vattr_offsets[8];
181                rec.vertex_shader_code_address =
182                        cl_address(vc4->prog.vs->bo, 0);
183        };
184
185        uint32_t max_index = 0xffff;
186        unsigned index_bias = info->index_size ? draws->index_bias : 0;
187        for (int i = 0; i < vtx->num_elements; i++) {
188                struct pipe_vertex_element *elem = &vtx->pipe[i];
189                struct pipe_vertex_buffer *vb =
190                        &vertexbuf->vb[elem->vertex_buffer_index];
191                struct vc4_resource *rsc = vc4_resource(vb->buffer.resource);
192                /* not vc4->dirty tracked: vc4->last_index_bias */
193                uint32_t offset = (vb->buffer_offset +
194                                   elem->src_offset +
195                                   vb->stride * (index_bias +
196                                                 extra_index_bias));
197                uint32_t vb_size = rsc->bo->size - offset;
198                uint32_t elem_size =
199                        util_format_get_blocksize(elem->src_format);
200
201                cl_emit(&job->shader_rec, ATTRIBUTE_RECORD, attr) {
202                        attr.address = cl_address(rsc->bo, offset);
203                        attr.number_of_bytes_minus_1 = elem_size - 1;
204                        attr.stride = vb->stride;
205                        attr.coordinate_shader_vpm_offset =
206                                vc4->prog.cs->vattr_offsets[i];
207                        attr.vertex_shader_vpm_offset =
208                                vc4->prog.vs->vattr_offsets[i];
209                }
210
211                if (vb->stride > 0) {
212                        max_index = MIN2(max_index,
213                                         (vb_size - elem_size) / vb->stride);
214                }
215        }
216
217        if (vtx->num_elements == 0) {
218                assert(num_elements_emit == 1);
219                struct vc4_bo *bo = vc4_bo_alloc(vc4->screen, 4096, "scratch VBO");
220
221                cl_emit(&job->shader_rec, ATTRIBUTE_RECORD, attr) {
222                        attr.address = cl_address(bo, 0);
223                        attr.number_of_bytes_minus_1 = 16 - 1;
224                        attr.stride = 0;
225                        attr.coordinate_shader_vpm_offset = 0;
226                        attr.vertex_shader_vpm_offset = 0;
227                }
228
229                vc4_bo_unreference(&bo);
230        }
231
232        cl_emit(&job->bcl, GL_SHADER_STATE, shader_state) {
233                /* Note that number of attributes == 0 in the packet means 8
234                 * attributes.  This field also contains the offset into
235                 * shader_rec.
236                 */
237                assert(vtx->num_elements <= 8);
238                shader_state.number_of_attribute_arrays =
239                        num_elements_emit & 0x7;
240        }
241
242        vc4_write_uniforms(vc4, vc4->prog.fs,
243                           &vc4->constbuf[PIPE_SHADER_FRAGMENT],
244                           &vc4->fragtex);
245        vc4_write_uniforms(vc4, vc4->prog.vs,
246                           &vc4->constbuf[PIPE_SHADER_VERTEX],
247                           &vc4->verttex);
248        vc4_write_uniforms(vc4, vc4->prog.cs,
249                           &vc4->constbuf[PIPE_SHADER_VERTEX],
250                           &vc4->verttex);
251
252        vc4->last_index_bias = index_bias + extra_index_bias;
253        vc4->max_index = max_index;
254        job->shader_rec_count++;
255}
256
257/**
258 * HW-2116 workaround: Flush the batch before triggering the hardware state
259 * counter wraparound behavior.
260 *
261 * State updates are tracked by a global counter which increments at the first
262 * state update after a draw or a START_BINNING.  Tiles can then have their
263 * state updated at draw time with a set of cheap checks for whether the
264 * state's copy of the global counter matches the global counter the last time
265 * that state was written to the tile.
266 *
267 * The state counters are relatively small and wrap around quickly, so you
268 * could get false negatives for needing to update a particular state in the
269 * tile.  To avoid this, the hardware attempts to write all of the state in
270 * the tile at wraparound time.  This apparently is broken, so we just flush
271 * everything before that behavior is triggered.  A batch flush is sufficient
272 * to get our current contents drawn and reset the counters to 0.
273 *
274 * Note that we can't just use VC4_PACKET_FLUSH_ALL, because that caps the
275 * tiles with VC4_PACKET_RETURN_FROM_LIST.
276 */
277static void
278vc4_hw_2116_workaround(struct pipe_context *pctx, int vert_count)
279{
280        struct vc4_context *vc4 = vc4_context(pctx);
281        struct vc4_job *job = vc4_get_job_for_fbo(vc4);
282
283        if (job->draw_calls_queued + vert_count / 65535 >= VC4_HW_2116_COUNT) {
284                perf_debug("Flushing batch due to HW-2116 workaround "
285                           "(too many draw calls per scene\n");
286                vc4_job_submit(vc4, job);
287        }
288}
289
290/* A HW bug fails to draw 2-vert line loops.  Just draw it as two GL_LINES. */
291static bool
292vc4_draw_workaround_line_loop_2(struct pipe_context *pctx, const struct pipe_draw_info *info,
293             unsigned drawid_offset,
294             const struct pipe_draw_indirect_info *indirect,
295             const struct pipe_draw_start_count_bias *draw)
296{
297        if (draw->count != 2 || info->mode != PIPE_PRIM_LINE_LOOP)
298                return false;
299
300        struct pipe_draw_info local_info = *info;
301        local_info.mode = PIPE_PRIM_LINES;
302
303        /* Draw twice.  The vertex order will be wrong on the second prim, but
304         * that's probably not worth rewriting an index buffer over.
305         */
306        for (int i = 0; i < 2; i++)
307                pctx->draw_vbo(pctx, &local_info, drawid_offset, indirect, draw, 1);
308
309        return true;
310}
311
312static void
313vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
314             unsigned drawid_offset,
315             const struct pipe_draw_indirect_info *indirect,
316             const struct pipe_draw_start_count_bias *draws,
317             unsigned num_draws)
318{
319        if (num_draws > 1) {
320                util_draw_multi(pctx, info, drawid_offset, indirect, draws, num_draws);
321                return;
322        }
323
324        if (!indirect && (!draws[0].count || !info->instance_count))
325           return;
326
327        struct vc4_context *vc4 = vc4_context(pctx);
328
329	if (!indirect &&
330	    !info->primitive_restart &&
331	    !u_trim_pipe_prim(info->mode, (unsigned*)&draws[0].count))
332		return;
333
334        if (vc4_draw_workaround_line_loop_2(pctx, info, drawid_offset, indirect, draws))
335                return;
336
337        /* Before setting up the draw, do any fixup blits necessary. */
338        vc4_predraw_check_textures(pctx, &vc4->verttex);
339        vc4_predraw_check_textures(pctx, &vc4->fragtex);
340
341        vc4_hw_2116_workaround(pctx, draws[0].count);
342
343        struct vc4_job *job = vc4_get_job_for_fbo(vc4);
344
345        /* Make sure that the raster order flags haven't changed, which can
346         * only be set at job granularity.
347         */
348        if (job->flags != vc4->rasterizer->tile_raster_order_flags) {
349                vc4_job_submit(vc4, job);
350                job = vc4_get_job_for_fbo(vc4);
351        }
352
353        vc4_get_draw_cl_space(job, draws[0].count);
354
355        if (vc4->prim_mode != info->mode) {
356                vc4->prim_mode = info->mode;
357                vc4->dirty |= VC4_DIRTY_PRIM_MODE;
358        }
359
360        vc4_start_draw(vc4);
361        if (!vc4_update_compiled_shaders(vc4, info->mode)) {
362                debug_warn_once("shader compile failed, skipping draw call.\n");
363                return;
364        }
365
366        vc4_emit_state(pctx);
367
368        bool needs_drawarrays_shader_state = false;
369
370        unsigned index_bias = info->index_size ? draws->index_bias : 0;
371        if ((vc4->dirty & (VC4_DIRTY_VTXBUF |
372                           VC4_DIRTY_VTXSTATE |
373                           VC4_DIRTY_PRIM_MODE |
374                           VC4_DIRTY_RASTERIZER |
375                           VC4_DIRTY_COMPILED_CS |
376                           VC4_DIRTY_COMPILED_VS |
377                           VC4_DIRTY_COMPILED_FS |
378                           vc4->prog.cs->uniform_dirty_bits |
379                           vc4->prog.vs->uniform_dirty_bits |
380                           vc4->prog.fs->uniform_dirty_bits)) ||
381            vc4->last_index_bias != index_bias) {
382                if (info->index_size)
383                        vc4_emit_gl_shader_state(vc4, info, draws, 0);
384                else
385                        needs_drawarrays_shader_state = true;
386        }
387
388        vc4->dirty = 0;
389
390        /* Note that the primitive type fields match with OpenGL/gallium
391         * definitions, up to but not including QUADS.
392         */
393        if (info->index_size) {
394                uint32_t index_size = info->index_size;
395                uint32_t offset = draws[0].start * index_size;
396                struct pipe_resource *prsc;
397                if (info->index_size == 4) {
398                        prsc = vc4_get_shadow_index_buffer(pctx, info,
399                                                           offset,
400                                                           draws[0].count, &offset);
401                        index_size = 2;
402                } else {
403                        if (info->has_user_indices) {
404                                unsigned start_offset = draws[0].start * info->index_size;
405                                prsc = NULL;
406                                u_upload_data(vc4->uploader, start_offset,
407                                              draws[0].count * index_size, 4,
408                                              (char*)info->index.user + start_offset,
409                                              &offset, &prsc);
410                        } else {
411                                prsc = info->index.resource;
412                        }
413                }
414                struct vc4_resource *rsc = vc4_resource(prsc);
415
416                struct vc4_cl_out *bcl = cl_start(&job->bcl);
417
418                /* The original design for the VC4 kernel UABI had multiple
419                 * packets that used relocations in the BCL (some of which
420                 * needed two BOs), but later modifications eliminated all but
421                 * this one usage.  We have an arbitrary 32-bit offset value,
422                 * and need to also supply an arbitrary 32-bit index buffer
423                 * GEM handle, so we have this fake packet we emit in our BCL
424                 * to be validated, which the kernel uses at validation time
425                 * to perform the relocation in the IB packet (without
426                 * emitting to the actual HW).
427                 */
428                uint32_t hindex = vc4_gem_hindex(job, rsc->bo);
429                if (job->last_gem_handle_hindex != hindex) {
430                        cl_u8(&bcl, VC4_PACKET_GEM_HANDLES);
431                        cl_u32(&bcl, hindex);
432                        cl_u32(&bcl, 0);
433                        job->last_gem_handle_hindex = hindex;
434                }
435
436                cl_u8(&bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE);
437                cl_u8(&bcl,
438                      info->mode |
439                      (index_size == 2 ?
440                       VC4_INDEX_BUFFER_U16:
441                       VC4_INDEX_BUFFER_U8));
442                cl_u32(&bcl, draws[0].count);
443                cl_u32(&bcl, offset);
444                cl_u32(&bcl, vc4->max_index);
445
446                cl_end(&job->bcl, bcl);
447                job->draw_calls_queued++;
448
449                if (info->index_size == 4 || info->has_user_indices)
450                        pipe_resource_reference(&prsc, NULL);
451        } else {
452                uint32_t count = draws[0].count;
453                uint32_t start = draws[0].start;
454                uint32_t extra_index_bias = 0;
455                static const uint32_t max_verts = 65535;
456
457                /* GFXH-515 / SW-5891: The binner emits 16 bit indices for
458                 * drawarrays, which means that if start + count > 64k it
459                 * would truncate the top bits.  Work around this by emitting
460                 * a limited number of primitives at a time and reemitting the
461                 * shader state pointing farther down the vertex attribute
462                 * arrays.
463                 *
464                 * To do this properly for line loops or trifans, we'd need to
465                 * make a new VB containing the first vertex plus whatever
466                 * remainder.
467                 */
468                if (start + count > max_verts) {
469                        extra_index_bias = start;
470                        start = 0;
471                        needs_drawarrays_shader_state = true;
472                }
473
474                while (count) {
475                        uint32_t this_count = count;
476                        uint32_t step;
477
478                        if (needs_drawarrays_shader_state) {
479                                vc4_emit_gl_shader_state(vc4, info, draws,
480                                                         extra_index_bias);
481                        }
482
483                        u_split_draw(info, max_verts, &this_count, &step);
484
485                        cl_emit(&job->bcl, VERTEX_ARRAY_PRIMITIVES, array) {
486                                array.primitive_mode = info->mode;
487                                array.length = this_count;
488                                array.index_of_first_vertex = start;
489                        }
490                        job->draw_calls_queued++;
491
492                        count -= step;
493                        extra_index_bias += start + step;
494                        start = 0;
495                        needs_drawarrays_shader_state = true;
496                }
497        }
498
499        /* We shouldn't have tripped the HW_2116 bug with the GFXH-515
500         * workaround.
501         */
502        assert(job->draw_calls_queued <= VC4_HW_2116_COUNT);
503
504        if (vc4->zsa && vc4->framebuffer.zsbuf) {
505                struct vc4_resource *rsc =
506                        vc4_resource(vc4->framebuffer.zsbuf->texture);
507
508                if (vc4->zsa->base.depth_enabled) {
509                        job->resolve |= PIPE_CLEAR_DEPTH;
510                        rsc->initialized_buffers = PIPE_CLEAR_DEPTH;
511                }
512
513                if (vc4->zsa->base.stencil[0].enabled) {
514                        job->resolve |= PIPE_CLEAR_STENCIL;
515                        rsc->initialized_buffers |= PIPE_CLEAR_STENCIL;
516                }
517        }
518
519        job->resolve |= PIPE_CLEAR_COLOR0;
520
521        /* If we've used half of the presumably 256MB CMA area, flush the job
522         * so that we don't accumulate a job that will end up not being
523         * executable.
524         */
525        if (job->bo_space > 128 * 1024 * 1024)
526                vc4_flush(pctx);
527
528        if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH)
529                vc4_flush(pctx);
530}
531
532static uint32_t
533pack_rgba(enum pipe_format format, const float *rgba)
534{
535        union util_color uc;
536        util_pack_color(rgba, format, &uc);
537        if (util_format_get_blocksize(format) == 2)
538                return uc.us;
539        else
540                return uc.ui[0];
541}
542
543static void
544vc4_clear(struct pipe_context *pctx, unsigned buffers, const struct pipe_scissor_state *scissor_state,
545          const union pipe_color_union *color, double depth, unsigned stencil)
546{
547        struct vc4_context *vc4 = vc4_context(pctx);
548        struct vc4_job *job = vc4_get_job_for_fbo(vc4);
549
550        if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
551                struct vc4_resource *rsc =
552                        vc4_resource(vc4->framebuffer.zsbuf->texture);
553                unsigned zsclear = buffers & PIPE_CLEAR_DEPTHSTENCIL;
554
555                /* Clearing ZS will clear both Z and stencil, so if we're
556                 * trying to clear just one then we need to draw a quad to do
557                 * it instead.  We need to do this before setting up
558                 * tile-based clears in vc4->job, because the blitter may
559                 * submit the current job.
560                 */
561                if ((zsclear == PIPE_CLEAR_DEPTH ||
562                     zsclear == PIPE_CLEAR_STENCIL) &&
563                    (rsc->initialized_buffers & ~(zsclear | job->cleared)) &&
564                    util_format_is_depth_and_stencil(vc4->framebuffer.zsbuf->format)) {
565                        static const union pipe_color_union dummy_color = {};
566
567                        perf_debug("Partial clear of Z+stencil buffer, "
568                                   "drawing a quad instead of fast clearing\n");
569                        vc4_blitter_save(vc4);
570                        util_blitter_clear(vc4->blitter,
571                                           vc4->framebuffer.width,
572                                           vc4->framebuffer.height,
573                                           1,
574                                           zsclear,
575                                           &dummy_color, depth, stencil,
576                                           false);
577                        buffers &= ~zsclear;
578                        if (!buffers)
579                                return;
580                        job = vc4_get_job_for_fbo(vc4);
581                }
582        }
583
584        /* We can't flag new buffers for clearing once we've queued draws.  We
585         * could avoid this by using the 3d engine to clear.
586         */
587        if (job->draw_calls_queued) {
588                perf_debug("Flushing rendering to process new clear.\n");
589                vc4_job_submit(vc4, job);
590                job = vc4_get_job_for_fbo(vc4);
591        }
592
593        if (buffers & PIPE_CLEAR_COLOR0) {
594                struct vc4_resource *rsc =
595                        vc4_resource(vc4->framebuffer.cbufs[0]->texture);
596                uint32_t clear_color;
597
598                if (vc4_rt_format_is_565(vc4->framebuffer.cbufs[0]->format)) {
599                        /* In 565 mode, the hardware will be packing our color
600                         * for us.
601                         */
602                        clear_color = pack_rgba(PIPE_FORMAT_R8G8B8A8_UNORM,
603                                                color->f);
604                } else {
605                        /* Otherwise, we need to do this packing because we
606                         * support multiple swizzlings of RGBA8888.
607                         */
608                        clear_color =
609                                pack_rgba(vc4->framebuffer.cbufs[0]->format,
610                                          color->f);
611                }
612                job->clear_color[0] = job->clear_color[1] = clear_color;
613                rsc->initialized_buffers |= (buffers & PIPE_CLEAR_COLOR0);
614        }
615
616        if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
617                struct vc4_resource *rsc =
618                        vc4_resource(vc4->framebuffer.zsbuf->texture);
619
620                /* Though the depth buffer is stored with Z in the high 24,
621                 * for this field we just need to store it in the low 24.
622                 */
623                if (buffers & PIPE_CLEAR_DEPTH) {
624                        job->clear_depth = util_pack_z(PIPE_FORMAT_Z24X8_UNORM,
625                                                       depth);
626                }
627                if (buffers & PIPE_CLEAR_STENCIL)
628                        job->clear_stencil = stencil;
629
630                rsc->initialized_buffers |= (buffers & PIPE_CLEAR_DEPTHSTENCIL);
631        }
632
633        job->draw_min_x = 0;
634        job->draw_min_y = 0;
635        job->draw_max_x = vc4->framebuffer.width;
636        job->draw_max_y = vc4->framebuffer.height;
637        job->cleared |= buffers;
638        job->resolve |= buffers;
639
640        vc4_start_draw(vc4);
641}
642
643static void
644vc4_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
645                        const union pipe_color_union *color,
646                        unsigned x, unsigned y, unsigned w, unsigned h,
647			bool render_condition_enabled)
648{
649        fprintf(stderr, "unimpl: clear RT\n");
650}
651
652static void
653vc4_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
654                        unsigned buffers, double depth, unsigned stencil,
655                        unsigned x, unsigned y, unsigned w, unsigned h,
656			bool render_condition_enabled)
657{
658        fprintf(stderr, "unimpl: clear DS\n");
659}
660
661void
662vc4_draw_init(struct pipe_context *pctx)
663{
664        pctx->draw_vbo = vc4_draw_vbo;
665        pctx->clear = vc4_clear;
666        pctx->clear_render_target = vc4_clear_render_target;
667        pctx->clear_depth_stencil = vc4_clear_depth_stencil;
668}
669