Lines Matching defs:hwtnl
51 struct svga_hwtnl *hwtnl = CALLOC_STRUCT(svga_hwtnl);
52 if (!hwtnl)
55 hwtnl->svga = svga;
57 hwtnl->cmd.swc = svga->swc;
59 return hwtnl;
67 svga_hwtnl_destroy(struct svga_hwtnl *hwtnl)
73 pipe_resource_reference(&hwtnl->index_cache[i][j].buffer, NULL);
77 for (i = 0; i < hwtnl->cmd.vbuf_count; i++)
78 pipe_vertex_buffer_unreference(&hwtnl->cmd.vbufs[i]);
80 for (i = 0; i < hwtnl->cmd.prim_count; i++)
81 pipe_resource_reference(&hwtnl->cmd.prim_ib[i], NULL);
83 FREE(hwtnl);
88 svga_hwtnl_set_flatshade(struct svga_hwtnl *hwtnl,
91 struct svga_screen *svgascreen = svga_screen(hwtnl->svga->pipe.screen);
94 hwtnl->api_pv = (flatshade && !flatshade_first) ? PV_LAST : PV_FIRST;
99 hwtnl->hw_pv = hwtnl->api_pv;
103 hwtnl->hw_pv = PV_FIRST;
109 svga_hwtnl_set_fillmode(struct svga_hwtnl *hwtnl, unsigned mode)
111 hwtnl->api_fillmode = mode;
116 svga_hwtnl_vertex_decls(struct svga_hwtnl *hwtnl,
122 assert(hwtnl->cmd.prim_count == 0);
123 hwtnl->cmd.vdecl_count = count;
124 hwtnl->cmd.vdecl_layout_id = layout_id;
125 memcpy(hwtnl->cmd.vdecl, decls, count * sizeof(*decls));
126 memcpy(hwtnl->cmd.vdecl_buffer_index, buffer_indexes,
135 svga_hwtnl_vertex_buffers(struct svga_hwtnl *hwtnl,
138 struct pipe_vertex_buffer *dst = hwtnl->cmd.vbufs;
147 for ( ; i < hwtnl->cmd.vbuf_count; i++) {
152 hwtnl->cmd.vbuf_count = count;
161 svga_hwtnl_is_buffer_referred(struct svga_hwtnl *hwtnl,
170 if (!hwtnl->cmd.prim_count) {
174 for (i = 0; i < hwtnl->cmd.vbuf_count; ++i) {
175 if (hwtnl->cmd.vbufs[i].buffer.resource == buffer) {
180 for (i = 0; i < hwtnl->cmd.prim_count; ++i) {
181 if (hwtnl->cmd.prim_ib[i] == buffer) {
191 draw_vgpu9(struct svga_hwtnl *hwtnl)
193 struct svga_winsys_context *swc = hwtnl->cmd.swc;
194 struct svga_context *svga = hwtnl->svga;
221 for (i = 0; i < hwtnl->cmd.vdecl_count; i++) {
222 unsigned j = hwtnl->cmd.vdecl_buffer_index[i];
223 handle = svga_buffer_handle(svga, hwtnl->cmd.vbufs[j].buffer.resource,
231 for (i = 0; i < hwtnl->cmd.prim_count; i++) {
232 if (hwtnl->cmd.prim_ib[i]) {
233 handle = svga_buffer_handle(svga, hwtnl->cmd.prim_ib[i],
275 hwtnl->cmd.prim_count);
279 hwtnl->cmd.vdecl_count,
280 &prim, hwtnl->cmd.prim_count);
285 hwtnl->cmd.vdecl,
286 hwtnl->cmd.vdecl_count * sizeof hwtnl->cmd.vdecl[0]);
288 for (i = 0; i < hwtnl->cmd.vdecl_count; i++) {
297 if (hwtnl->cmd.prim_count == 1) {
298 vdecl[i].rangeHint.first = hwtnl->cmd.min_index[0];
299 vdecl[i].rangeHint.last = hwtnl->cmd.max_index[0] + 1;
312 hwtnl->cmd.prim, hwtnl->cmd.prim_count * sizeof hwtnl->cmd.prim[0]);
314 for (i = 0; i < hwtnl->cmd.prim_count; i++) {
318 pipe_resource_reference(&hwtnl->cmd.prim_ib[i], NULL);
323 hwtnl->cmd.prim_count = 0;
666 validate_vertex_buffers(struct svga_hwtnl *hwtnl,
669 struct svga_context *svga = hwtnl->svga;
673 const unsigned vbuf_count = so_vertex_count ? 1 : hwtnl->cmd.vbuf_count;
681 if (svga->state.hw_draw.layout_id != hwtnl->cmd.vdecl_layout_id) {
684 hwtnl->cmd.vdecl_layout_id);
688 svga->state.hw_draw.layout_id = hwtnl->cmd.vdecl_layout_id;
705 vbuffer_attrs[0].stride = hwtnl->cmd.vbufs[0].stride;
706 vbuffer_attrs[0].offset = hwtnl->cmd.vbufs[0].buffer_offset;
718 svga_buffer(hwtnl->cmd.vbufs[i].buffer.resource);
720 vbuffer_attrs[i].stride = hwtnl->cmd.vbufs[i].stride;
721 vbuffer_attrs[i].offset = hwtnl->cmd.vbufs[i].buffer_offset;
756 svga_buffer(hwtnl->cmd.vbufs[i].buffer.resource);
779 if (svga->state.hw_draw.layout_id != hwtnl->cmd.vdecl_layout_id) {
782 hwtnl->cmd.vdecl_layout_id);
786 svga->state.hw_draw.layout_id = hwtnl->cmd.vdecl_layout_id;
928 validate_index_buffer(struct svga_hwtnl *hwtnl,
932 struct svga_context *svga = hwtnl->svga;
953 svga_sws(hwtnl->svga)->have_index_vertex_buffer_offset_cmd &&
997 draw_vgpu10(struct svga_hwtnl *hwtnl,
1006 struct svga_context *svga = hwtnl->svga;
1011 assert(hwtnl->cmd.prim_count == 0);
1063 ret = validate_vertex_buffers(hwtnl, so_vertex_count);
1068 ret = validate_index_buffer(hwtnl, range, ib);
1160 hwtnl->cmd.prim_count = 0;
1175 svga_hwtnl_flush(struct svga_hwtnl *hwtnl)
1179 SVGA_STATS_TIME_PUSH(svga_sws(hwtnl->svga), SVGA_STATS_TIME_HWTNLFLUSH);
1181 if (!svga_have_vgpu10(hwtnl->svga) && hwtnl->cmd.prim_count) {
1183 ret = draw_vgpu9(hwtnl);
1186 SVGA_STATS_TIME_POP(svga_screen(hwtnl->svga->pipe.screen)->sws);
1192 svga_hwtnl_set_index_bias(struct svga_hwtnl *hwtnl, int index_bias)
1194 hwtnl->index_bias = index_bias;
1207 check_draw_params(struct svga_hwtnl *hwtnl,
1214 assert(!svga_have_vgpu10(hwtnl->svga));
1216 for (i = 0; i < hwtnl->cmd.vdecl_count; i++) {
1217 unsigned j = hwtnl->cmd.vdecl_buffer_index[i];
1218 const struct pipe_vertex_buffer *vb = &hwtnl->cmd.vbufs[j];
1220 unsigned offset = hwtnl->cmd.vdecl[i].array.offset;
1221 unsigned stride = hwtnl->cmd.vdecl[i].array.stride;
1222 int index_bias = (int) range->indexBias + hwtnl->index_bias;
1237 switch (hwtnl->cmd.vdecl[i].identity.type) {
1356 svga_hwtnl_prim(struct svga_hwtnl *hwtnl,
1367 SVGA_STATS_TIME_PUSH(svga_sws(hwtnl->svga), SVGA_STATS_TIME_HWTNLPRIM);
1369 if (svga_have_vgpu10(hwtnl->svga)) {
1371 SVGA_RETRY(hwtnl->svga, draw_vgpu10(hwtnl, range, vcount, min_index,
1380 check_draw_params(hwtnl, range, min_index, max_index, ib);
1387 if (hwtnl->cmd.prim_count + 1 >= QSZ) {
1388 ret = svga_hwtnl_flush(hwtnl);
1394 hwtnl->cmd.min_index[hwtnl->cmd.prim_count] = min_index;
1395 hwtnl->cmd.max_index[hwtnl->cmd.prim_count] = max_index;
1397 hwtnl->cmd.prim[hwtnl->cmd.prim_count] = *range;
1398 hwtnl->cmd.prim[hwtnl->cmd.prim_count].indexBias += hwtnl->index_bias;
1400 pipe_resource_reference(&hwtnl->cmd.prim_ib[hwtnl->cmd.prim_count], ib);
1401 hwtnl->cmd.prim_count++;
1405 SVGA_STATS_TIME_POP(svga_screen(hwtnl->svga->pipe.screen)->sws);
1414 svga_hwtnl_has_pending_prim(struct svga_hwtnl *hwtnl)
1416 return hwtnl->cmd.prim_count > 0;