Lines Matching refs:exec

55 vbo_reset_all_attr(struct vbo_exec_context *exec);
64 vbo_exec_wrap_buffers(struct vbo_exec_context *exec)
66 if (exec->vtx.prim_count == 0) {
67 exec->vtx.copied.nr = 0;
68 exec->vtx.vert_count = 0;
69 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
72 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
73 unsigned last = exec->vtx.prim_count - 1;
74 struct pipe_draw_start_count_bias *last_draw = &exec->vtx.draw[last];
75 const bool last_begin = exec->vtx.markers[last].begin;
79 last_draw->count = exec->vtx.vert_count - last_draw->start;
81 exec->vtx.markers[last].end = 0;
85 if (exec->vtx.mode[last] == GL_LINE_LOOP &&
87 !exec->vtx.markers[last].end) {
89 exec->vtx.mode[last] = GL_LINE_STRIP;
102 if (exec->vtx.vert_count)
103 vbo_exec_vtx_flush(exec);
105 exec->vtx.prim_count = 0;
106 exec->vtx.copied.nr = 0;
111 assert(exec->vtx.prim_count == 0);
114 exec->vtx.mode[0] = ctx->Driver.CurrentExecPrimitive;
115 exec->vtx.draw[0].start = 0;
116 exec->vtx.markers[0].begin = 0;
117 exec->vtx.prim_count++;
119 if (exec->vtx.copied.nr == last_count)
120 exec->vtx.markers[0].begin = last_begin;
131 vbo_exec_vtx_wrap(struct vbo_exec_context *exec)
136 * to exec->vtx.copied.
138 vbo_exec_wrap_buffers(exec);
140 if (!exec->vtx.buffer_ptr) {
147 assert(exec->vtx.max_vert - exec->vtx.vert_count > exec->vtx.copied.nr);
149 numComponents = exec->vtx.copied.nr * exec->vtx.vertex_size;
150 memcpy(exec->vtx.buffer_ptr,
151 exec->vtx.copied.buffer,
153 exec->vtx.buffer_ptr += numComponents;
154 exec->vtx.vert_count += exec->vtx.copied.nr;
156 exec->vtx.copied.nr = 0;
164 vbo_exec_copy_to_current(struct vbo_exec_context *exec)
166 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
168 GLbitfield64 enabled = exec->vtx.enabled & (~BITFIELD64_BIT(VBO_ATTRIB_POS));
174 /* Note: the exec->vtx.current[i] pointers point into the
181 assert(exec->vtx.attr[i].size);
187 if (exec->vtx.attr[i].type == GL_DOUBLE ||
188 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB) {
190 memcpy(tmp, exec->vtx.attrptr[i], exec->vtx.attr[i].size * sizeof(GLfloat));
194 exec->vtx.attr[i].size,
195 exec->vtx.attrptr[i],
196 exec->vtx.attr[i].type);
225 if (exec->vtx.attr[i].type != vbo->current[i].Format.Type ||
226 (exec->vtx.attr[i].size >> dmul_shift) != vbo->current[i].Format.Size) {
228 exec->vtx.attr[i].size >> dmul_shift,
229 exec->vtx.attr[i].type);
249 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context *exec,
252 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
254 const GLint lastcount = exec->vtx.vert_count;
256 const GLuint old_vtx_size_no_pos = exec->vtx.vertex_size_no_pos;
257 const GLuint old_vtx_size = exec->vtx.vertex_size; /* floats per vertex */
258 const GLuint oldSize = exec->vtx.attr[attr].size;
263 if (unlikely(!exec->vtx.buffer_ptr)) {
265 assert(exec->vtx.bufferobj);
266 vbo_exec_vtx_map(exec);
267 assert(exec->vtx.buffer_ptr);
271 * to exec->vtx.copied.
273 vbo_exec_wrap_buffers(exec);
275 if (unlikely(exec->vtx.copied.nr)) {
280 memcpy(old_attrptr, exec->vtx.attrptr, sizeof(old_attrptr));
287 !oldSize && lastcount > 8 && exec->vtx.vertex_size) {
288 vbo_exec_copy_to_current(exec);
289 vbo_reset_all_attr(exec);
294 exec->vtx.attr[attr].size = newSize;
295 exec->vtx.attr[attr].active_size = newSize;
296 exec->vtx.attr[attr].type = newType;
297 exec->vtx.vertex_size += newSize - oldSize;
298 exec->vtx.vertex_size_no_pos = exec->vtx.vertex_size - exec->vtx.attr[0].size;
299 exec->vtx.max_vert = vbo_compute_max_verts(exec);
300 exec->vtx.vert_count = 0;
301 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
302 exec->vtx.enabled |= BITFIELD64_BIT(attr);
306 unsigned offset = exec->vtx.attrptr[attr] - exec->vtx.vertex;
311 fi_type *old_first = exec->vtx.attrptr[attr] + oldSize;
312 fi_type *new_first = exec->vtx.attrptr[attr] + newSize;
313 fi_type *old_last = exec->vtx.vertex + old_vtx_size_no_pos - 1;
314 fi_type *new_last = exec->vtx.vertex + exec->vtx.vertex_size_no_pos - 1;
341 GLbitfield64 enabled = exec->vtx.enabled &
347 if (exec->vtx.attrptr[i] > exec->vtx.attrptr[attr])
348 exec->vtx.attrptr[i] += size_diff;
353 exec->vtx.attrptr[attr] = exec->vtx.vertex +
354 exec->vtx.vertex_size_no_pos - newSize;
359 exec->vtx.attrptr[0] = exec->vtx.vertex + exec->vtx.vertex_size_no_pos;
366 if (unlikely(exec->vtx.copied.nr)) {
367 fi_type *data = exec->vtx.copied.buffer;
368 fi_type *dest = exec->vtx.buffer_ptr;
370 assert(exec->vtx.buffer_ptr == exec->vtx.buffer_map);
372 for (i = 0 ; i < exec->vtx.copied.nr ; i++) {
373 GLbitfield64 enabled = exec->vtx.enabled;
376 GLuint sz = exec->vtx.attr[j].size;
377 GLint old_offset = old_attrptr[j] - exec->vtx.vertex;
378 GLint new_offset = exec->vtx.attrptr[j] - exec->vtx.vertex;
387 exec->vtx.attr[j].type);
400 dest += exec->vtx.vertex_size;
403 exec->vtx.buffer_ptr = dest;
404 exec->vtx.vert_count += exec->vtx.copied.nr;
405 exec->vtx.copied.nr = 0;
421 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
425 if (newSize > exec->vtx.attr[attr].size ||
426 newType != exec->vtx.attr[attr].type) {
430 vbo_exec_wrap_upgrade_vertex(exec, attr, newSize, newType);
432 else if (newSize < exec->vtx.attr[attr].active_size) {
435 vbo_get_default_vals_as_union(exec->vtx.attr[attr].type);
440 for (i = newSize; i <= exec->vtx.attr[attr].size; i++)
441 exec->vtx.attrptr[attr][i-1] = id[i-1];
443 exec->vtx.attr[attr].active_size = newSize;
486 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
490 /* store a copy of the attribute in exec except for glVertex */ \
493 if (unlikely(exec->vtx.attr[A].active_size != N * sz || \
494 exec->vtx.attr[A].type != T)) { \
498 C *dest = (C *)exec->vtx.attrptr[A]; \
503 assert(exec->vtx.attr[A].type == T); \
509 int size = exec->vtx.attr[0].size; \
513 exec->vtx.attr[0].type != T)) { \
514 vbo_exec_wrap_upgrade_vertex(exec, 0, N * sz, T); \
517 uint32_t *dst = (uint32_t *)exec->vtx.buffer_ptr; \
518 uint32_t *src = (uint32_t *)exec->vtx.vertex; \
519 unsigned vertex_size_no_pos = exec->vtx.vertex_size_no_pos; \
521 /* Copy over attributes from exec. */ \
554 exec->vtx.buffer_ptr = (fi_type*)dst; \
559 if (unlikely(++exec->vtx.vert_count >= exec->vtx.max_vert)) \
560 vbo_exec_vtx_wrap(exec); \
679 vbo_exec_FlushVertices_internal(struct vbo_exec_context *exec, unsigned flags)
681 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
684 if (exec->vtx.vert_count) {
685 vbo_exec_vtx_flush(exec);
688 if (exec->vtx.vertex_size) {
689 vbo_exec_copy_to_current(exec);
690 vbo_reset_all_attr(exec);
701 vbo_exec_copy_to_current(exec);
713 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
717 if (exec->eval.recalculate_maps)
718 vbo_exec_eval_update(exec);
721 if (exec->eval.map1[i].map)
722 if (exec->vtx.attr[i].active_size != exec->eval.map1[i].sz)
723 vbo_exec_fixup_vertex(ctx, i, exec->eval.map1[i].sz, GL_FLOAT);
727 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
728 exec->vtx.vertex_size * sizeof(GLfloat));
730 vbo_exec_do_EvalCoord1f(exec, u);
732 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
733 exec->vtx.vertex_size * sizeof(GLfloat));
741 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
745 if (exec->eval.recalculate_maps)
746 vbo_exec_eval_update(exec);
749 if (exec->eval.map2[i].map)
750 if (exec->vtx.attr[i].active_size != exec->eval.map2[i].sz)
751 vbo_exec_fixup_vertex(ctx, i, exec->eval.map2[i].sz, GL_FLOAT);
755 if (exec->vtx.attr[VBO_ATTRIB_NORMAL].active_size != 3)
759 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
760 exec->vtx.vertex_size * sizeof(GLfloat));
762 vbo_exec_do_EvalCoord2f(exec, u, v);
764 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
765 exec->vtx.vertex_size * sizeof(GLfloat));
818 struct vbo_exec_context *exec = &vbo->exec;
841 if (exec->vtx.vertex_size && !exec->vtx.attr[VBO_ATTRIB_POS].size)
842 vbo_exec_FlushVertices_internal(exec, FLUSH_STORED_VERTICES);
844 i = exec->vtx.prim_count++;
845 exec->vtx.mode[i] = mode;
846 exec->vtx.draw[i].start = exec->vtx.vert_count;
847 exec->vtx.markers[i].begin = 1;
873 try_vbo_merge(struct vbo_exec_context *exec)
875 unsigned cur = exec->vtx.prim_count - 1;
877 assert(exec->vtx.prim_count >= 1);
879 vbo_try_prim_conversion(&exec->vtx.mode[cur], &exec->vtx.draw[cur].count);
881 if (exec->vtx.prim_count >= 2) {
882 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
886 exec->vtx.mode[prev],
887 exec->vtx.mode[cur],
888 exec->vtx.draw[prev].start,
889 exec->vtx.draw[cur].start,
890 &exec->vtx.draw[prev].count,
891 exec->vtx.draw[cur].count,
893 &exec->vtx.markers[prev].end,
894 exec->vtx.markers[cur].begin,
895 exec->vtx.markers[cur].end))
896 exec->vtx.prim_count--; /* drop the last primitive */
908 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
928 if (exec->vtx.prim_count > 0) {
930 unsigned last = exec->vtx.prim_count - 1;
931 struct pipe_draw_start_count_bias *last_draw = &exec->vtx.draw[last];
932 unsigned count = exec->vtx.vert_count - last_draw->start;
935 exec->vtx.markers[last].end = 1;
946 if (exec->vtx.mode[last] == GL_LINE_LOOP &&
947 exec->vtx.markers[last].begin == 0) {
951 const fi_type *src = exec->vtx.buffer_map +
952 last_draw->start * exec->vtx.vertex_size;
953 fi_type *dst = exec->vtx.buffer_map +
954 exec->vtx.vert_count * exec->vtx.vertex_size;
957 memcpy(dst, src, exec->vtx.vertex_size * sizeof(fi_type));
961 exec->vtx.mode[last] = GL_LINE_STRIP;
966 exec->vtx.vert_count++;
967 exec->vtx.buffer_ptr += exec->vtx.vertex_size;
970 try_vbo_merge(exec);
975 if (exec->vtx.prim_count == VBO_MAX_PRIM)
976 vbo_exec_vtx_flush(exec);
1094 vbo_reset_all_attr(struct vbo_exec_context *exec)
1096 while (exec->vtx.enabled) {
1097 const int i = u_bit_scan64(&exec->vtx.enabled);
1100 exec->vtx.attr[i].size = 0;
1101 exec->vtx.attr[i].type = GL_FLOAT;
1102 exec->vtx.attr[i].active_size = 0;
1103 exec->vtx.attrptr[i] = NULL;
1106 exec->vtx.vertex_size = 0;
1111 vbo_exec_vtx_init(struct vbo_exec_context *exec)
1113 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
1115 exec->vtx.bufferobj = _mesa_bufferobj_alloc(ctx, IMM_BUFFER_NAME);
1117 exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
1118 vbo_reset_all_attr(exec);
1120 exec->vtx.info.instance_count = 1;
1121 exec->vtx.info.max_index = ~0;
1126 vbo_exec_vtx_destroy(struct vbo_exec_context *exec)
1129 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
1133 if (exec->vtx.buffer_map) {
1134 assert(!exec->vtx.bufferobj ||
1135 exec->vtx.bufferobj->Name == IMM_BUFFER_NAME);
1136 if (!exec->vtx.bufferobj) {
1137 align_free(exec->vtx.buffer_map);
1138 exec->vtx.buffer_map = NULL;
1139 exec->vtx.buffer_ptr = NULL;
1145 if (exec->vtx.bufferobj &&
1146 _mesa_bufferobj_mapped(exec->vtx.bufferobj, MAP_INTERNAL)) {
1147 _mesa_bufferobj_unmap(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
1149 _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
1167 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1171 exec->flush_call_depth++;
1172 assert(exec->flush_call_depth == 1);
1178 exec->flush_call_depth--;
1179 assert(exec->flush_call_depth == 0);
1185 vbo_exec_FlushVertices_internal(exec, flags);
1188 exec->flush_call_depth--;
1189 assert(exec->flush_call_depth == 0);