Lines Matching defs:mgr

200 u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count,
353 struct u_vbuf *mgr = CALLOC_STRUCT(u_vbuf);
355 mgr->caps = *caps;
356 mgr->pipe = pipe;
365 mgr->pc = util_primconvert_create_config(pipe, &cfg);
367 mgr->translate_cache = translate_cache_create();
368 memset(mgr->fallback_vbs, ~0, sizeof(mgr->fallback_vbs));
369 mgr->allowed_vb_mask = u_bit_consecutive(0, mgr->caps.max_vertex_buffers);
371 mgr->has_signed_vb_offset =
375 cso_cache_init(&mgr->cso_cache, pipe);
376 cso_cache_set_delete_cso_callback(&mgr->cso_cache,
379 return mgr;
385 u_vbuf_set_vertex_elements_internal(struct u_vbuf *mgr,
388 struct pipe_context *pipe = mgr->pipe;
397 iter = cso_find_state_template(&mgr->cso_cache, hash_key, CSO_VELEMENTS,
403 cso->data = u_vbuf_create_vertex_elements(mgr, velems->count,
406 iter = cso_insert_state(&mgr->cso_cache, hash_key, CSO_VELEMENTS, cso);
414 if (ve != mgr->ve)
420 void u_vbuf_set_vertex_elements(struct u_vbuf *mgr,
423 mgr->ve = u_vbuf_set_vertex_elements_internal(mgr, velems);
426 void u_vbuf_set_flatshade_first(struct u_vbuf *mgr, bool flatshade_first)
428 mgr->flatshade_first = flatshade_first;
431 void u_vbuf_unset_vertex_elements(struct u_vbuf *mgr)
433 mgr->ve = NULL;
436 void u_vbuf_destroy(struct u_vbuf *mgr)
438 struct pipe_screen *screen = mgr->pipe->screen;
443 mgr->pipe->set_vertex_buffers(mgr->pipe, 0, 0, num_vb, false, NULL);
446 pipe_vertex_buffer_unreference(&mgr->vertex_buffer[i]);
448 pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[i]);
450 if (mgr->pc)
451 util_primconvert_destroy(mgr->pc);
453 translate_cache_destroy(mgr->translate_cache);
454 cso_cache_delete(&mgr->cso_cache);
455 FREE(mgr);
459 u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
473 tr = translate_cache_find(mgr->translate_cache, key);
483 vb = &mgr->vertex_buffer[i];
529 map = pipe_buffer_map_range(mgr->pipe, vb->buffer.resource, offset, size,
548 u_upload_alloc(mgr->pipe->stream_uploader, 0,
558 map = pipe_buffer_map_range(mgr->pipe, info->index.resource, offset,
576 pipe_buffer_unmap(mgr->pipe, transfer);
580 u_upload_alloc(mgr->pipe->stream_uploader,
581 mgr->has_signed_vb_offset ?
600 pipe_buffer_unmap(mgr->pipe, vb_transfer[i]);
605 mgr->real_vertex_buffer[out_vb].buffer_offset = out_offset;
606 mgr->real_vertex_buffer[out_vb].stride = key->output_stride;
609 pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[out_vb]);
610 mgr->real_vertex_buffer[out_vb].buffer.resource = out_buffer;
611 mgr->real_vertex_buffer[out_vb].is_user_buffer = false;
617 u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr,
624 mgr->ve->incompatible_vb_mask_all | mgr->incompatible_vb_mask |
625 ~mgr->enabled_vb_mask;
634 mgr->fallback_vbs_mask = 0;
649 mgr->fallback_vbs_mask |= 1 << index;
661 mgr->fallback_vbs_mask = 1 << index;
669 mgr->dirty_real_vb_mask |= 1 << fallback_vbs[type];
673 memcpy(mgr->fallback_vbs, fallback_vbs, sizeof(fallback_vbs));
678 u_vbuf_translate_begin(struct u_vbuf *mgr,
689 const unsigned incompatible_vb_mask = (misaligned | mgr->incompatible_vb_mask) &
690 mgr->ve->used_vb_mask;
709 for (i = 0; i < mgr->ve->count; i++) {
710 unsigned vb_index = mgr->ve->ve[i].vertex_buffer_index;
712 if (!mgr->vertex_buffer[vb_index].stride) {
713 if (!(mgr->ve->incompatible_elem_mask & (1 << i)) &&
718 } else if (mgr->ve->ve[i].instance_divisor) {
719 if (!(mgr->ve->incompatible_elem_mask & (1 << i)) &&
726 !(mgr->ve->incompatible_elem_mask & (1 << i)) &&
737 if (!u_vbuf_translate_find_free_vb_slots(mgr, mask)) {
743 for (i = 0; i < mgr->ve->count; i++) {
746 enum pipe_format output_format = mgr->ve->native_format[i];
747 unsigned bit, vb_index = mgr->ve->ve[i].vertex_buffer_index;
750 if (!(mgr->ve->incompatible_elem_mask & (1 << i)) &&
764 if (mgr->ve->ve[i].src_format != output_format)
776 te->input_format = mgr->ve->ve[i].src_format;
777 te->input_offset = mgr->ve->ve[i].src_offset;
781 if (!mgr->caps.attrib_component_unaligned &&
782 te->output_offset % mgr->ve->component_size[i] != 0) {
783 unsigned aligned = align(te->output_offset, mgr->ve->component_size[i]);
788 k->output_stride += mgr->ve->native_format_size[i] + adjustment;
790 min_alignment[type] = MAX2(min_alignment[type], mgr->ve->component_size[i]);
797 if (!mgr->caps.attrib_component_unaligned)
799 err = u_vbuf_translate_buffers(mgr, &key[type], info, draw,
800 mask[type], mgr->fallback_vbs[type],
808 mgr->real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0;
814 for (i = 0; i < mgr->ve->count; i++) {
818 mgr->fallback_velems.velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor;
819 mgr->fallback_velems.velems[i].src_format = te->output_format;
820 mgr->fallback_velems.velems[i].src_offset = te->output_offset;
821 mgr->fallback_velems.velems[i].vertex_buffer_index = mgr->fallback_vbs[type];
831 memcpy(&mgr->fallback_velems.velems[i], &mgr->ve->ve[i],
836 mgr->fallback_velems.count = mgr->ve->count;
838 u_vbuf_set_vertex_elements_internal(mgr, &mgr->fallback_velems);
839 mgr->using_translate = TRUE;
843 static void u_vbuf_translate_end(struct u_vbuf *mgr)
848 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->ve->driver_cso);
849 mgr->using_translate = FALSE;
853 unsigned vb = mgr->fallback_vbs[i];
855 pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer.resource, NULL);
856 mgr->fallback_vbs[i] = ~0;
860 mgr->dirty_real_vb_mask |= mgr->fallback_vbs_mask;
861 mgr->fallback_vbs_mask = 0;
865 u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count,
871 struct pipe_context *pipe = mgr->pipe;
899 format = mgr->caps.format_translation[format];
915 (!mgr->caps.velem_src_offset_unaligned &&
917 (!mgr->caps.attrib_component_unaligned &&
930 if (used_buffers & ~mgr->allowed_vb_mask) {
946 if (!mgr->caps.velem_src_offset_unaligned) {
975 void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
996 struct pipe_context *pipe = mgr->pipe;
999 mgr->dirty_real_vb_mask &= mask;
1002 mgr->user_vb_mask &= mask;
1003 mgr->incompatible_vb_mask &= mask;
1004 mgr->nonzero_stride_vb_mask &= mask;
1005 mgr->enabled_vb_mask &= mask;
1006 mgr->unaligned_vb_mask[0] &= mask;
1007 mgr->unaligned_vb_mask[1] &= mask;
1012 pipe_vertex_buffer_unreference(&mgr->vertex_buffer[dst_index]);
1013 pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[dst_index]);
1024 struct pipe_vertex_buffer *orig_vb = &mgr->vertex_buffer[dst_index];
1025 struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[dst_index];
1060 if ((!mgr->caps.buffer_offset_unaligned && vb->buffer_offset % 4 != 0) ||
1061 (!mgr->caps.buffer_stride_unaligned && vb->stride % 4 != 0)) {
1070 if (!mgr->caps.attrib_component_unaligned) {
1077 if (!mgr->caps.user_vertex_buffers && vb->is_user_buffer) {
1092 pipe_vertex_buffer_unreference(&mgr->vertex_buffer[dst_index]);
1093 pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[dst_index]);
1098 mgr->user_vb_mask &= mask;
1099 mgr->incompatible_vb_mask &= mask;
1100 mgr->nonzero_stride_vb_mask &= mask;
1101 mgr->enabled_vb_mask &= mask;
1102 mgr->unaligned_vb_mask[0] &= mask;
1103 mgr->unaligned_vb_mask[1] &= mask;
1105 mgr->user_vb_mask |= user_vb_mask;
1106 mgr->incompatible_vb_mask |= incompatible_vb_mask;
1107 mgr->nonzero_stride_vb_mask |= nonzero_stride_vb_mask;
1108 mgr->enabled_vb_mask |= enabled_vb_mask;
1109 mgr->unaligned_vb_mask[0] |= unaligned_vb_mask[0];
1110 mgr->unaligned_vb_mask[1] |= unaligned_vb_mask[1];
1114 mgr->dirty_real_vb_mask |= ~mask;
1118 get_upload_offset_size(struct u_vbuf *mgr,
1128 if ((1 << vb_index) & mgr->fallback_vbs_mask || !vb->is_user_buffer)
1161 u_vbuf_upload_buffers(struct u_vbuf *mgr,
1166 struct u_vbuf_elements *ve = mgr->ve;
1169 mgr->using_translate ? mgr->fallback_velems.velems : ve->ve;
1172 if ((ve->interleaved_vb_mask & mgr->user_vb_mask) == 0) {
1176 struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[index];
1179 if (!get_upload_offset_size(mgr, vb, ve, velem, index, i, start_vertex,
1184 struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[index];
1185 const uint8_t *ptr = mgr->vertex_buffer[index].buffer.user;
1187 u_upload_data(mgr->pipe->stream_uploader,
1188 mgr->has_signed_vb_offset ? 0 : offset,
1208 struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[index];
1211 if (!get_upload_offset_size(mgr, vb, ve, velem, index, i, start_vertex,
1244 real_vb = &mgr->real_vertex_buffer[i];
1245 ptr = mgr->vertex_buffer[i].buffer.user;
1247 u_upload_data(mgr->pipe->stream_uploader,
1248 mgr->has_signed_vb_offset ? 0 : start,
1260 static boolean u_vbuf_need_minmax_index(const struct u_vbuf *mgr, uint32_t misaligned)
1265 return (mgr->ve->used_vb_mask &
1266 ((mgr->user_vb_mask |
1267 mgr->incompatible_vb_mask |
1269 mgr->ve->incompatible_vb_mask_any) &
1270 mgr->ve->noninstance_vb_mask_any &
1271 mgr->nonzero_stride_vb_mask)) != 0;
1274 static boolean u_vbuf_mapping_vertex_buffer_blocks(const struct u_vbuf *mgr, uint32_t misaligned)
1280 return (mgr->ve->used_vb_mask &
1281 (~mgr->user_vb_mask &
1282 ~mgr->incompatible_vb_mask &
1284 mgr->ve->compatible_vb_mask_all &
1285 mgr->ve->noninstance_vb_mask_any &
1286 mgr->nonzero_stride_vb_mask)) != 0;
1399 static void u_vbuf_set_driver_vertex_buffers(struct u_vbuf *mgr)
1401 struct pipe_context *pipe = mgr->pipe;
1404 start_slot = ffs(mgr->dirty_real_vb_mask) - 1;
1405 count = util_last_bit(mgr->dirty_real_vb_mask >> start_slot);
1407 if (mgr->dirty_real_vb_mask == mgr->enabled_vb_mask &&
1408 mgr->dirty_real_vb_mask == mgr->user_vb_mask) {
1414 mgr->real_vertex_buffer + start_slot);
1418 assert(!mgr->real_vertex_buffer[start_slot + i].is_user_buffer);
1419 mgr->real_vertex_buffer[start_slot + i].buffer.resource = NULL;
1424 mgr->real_vertex_buffer + start_slot);
1426 mgr->dirty_real_vb_mask = 0;
1430 u_vbuf_split_indexed_multidraw(struct u_vbuf *mgr, struct pipe_draw_info *info,
1453 u_vbuf_draw_vbo(mgr, info, drawid_offset, NULL, &draw, 1);
1457 void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info,
1463 struct pipe_context *pipe = mgr->pipe;
1468 const uint32_t used_vb_mask = mgr->ve->used_vb_mask;
1469 uint32_t user_vb_mask = mgr->user_vb_mask & used_vb_mask;
1475 if (!mgr->caps.attrib_component_unaligned) {
1476 for (unsigned i = 0; i < ARRAY_SIZE(mgr->unaligned_vb_mask); i++) {
1477 misaligned |= mgr->ve->vb_align_mask[i] & mgr->unaligned_vb_mask[i];
1481 (mgr->incompatible_vb_mask | misaligned) & used_vb_mask;
1485 !mgr->ve->incompatible_elem_mask &&
1487 (info->index_size != 1 || !mgr->caps.rewrite_ubyte_ibs) &&
1490 !mgr->caps.rewrite_restart_index) &&
1491 (!info->primitive_restart || mgr->caps.supported_restart_modes & BITFIELD_BIT(info->mode)) &&
1492 mgr->caps.supported_prim_modes & BITFIELD_BIT(info->mode)) {
1495 if (mgr->dirty_real_vb_mask & used_vb_mask) {
1496 u_vbuf_set_driver_vertex_buffers(mgr);
1550 mgr->ve->incompatible_elem_mask) {
1551 u_vbuf_split_indexed_multidraw(mgr, &new_info, drawid_offset, data,
1567 u_vbuf_split_indexed_multidraw(mgr, &new_info, drawid_offset, data,
1679 if (u_vbuf_need_minmax_index(mgr, misaligned)) {
1686 u_vbuf_get_minmax_index(mgr->pipe, &new_info, &new_draw,
1702 !u_vbuf_mapping_vertex_buffer_blocks(mgr, misaligned)) {
1704 user_vb_mask &= ~(mgr->nonzero_stride_vb_mask &
1705 mgr->ve->noninstance_vb_mask_any);
1722 mgr->ve->incompatible_elem_mask) {
1723 if (!u_vbuf_translate_begin(mgr, &new_info, &new_draw,
1740 mgr->ve->incompatible_vb_mask_all);
1745 if (u_vbuf_upload_buffers(mgr, start_vertex, num_vertices,
1752 mgr->dirty_real_vb_mask |= user_vb_mask;
1764 for (i = 0; i < mgr->nr_vertex_buffers; i++) {
1766 util_dump_vertex_buffer(stdout, mgr->vertex_buffer+i);
1769 for (i = 0; i < mgr->nr_real_vertex_buffers; i++) {
1771 util_dump_vertex_buffer(stdout, mgr->real_vertex_buffer+i);
1777 if (mgr->dirty_real_vb_mask)
1778 u_vbuf_set_driver_vertex_buffers(mgr);
1780 if ((new_info.index_size == 1 && mgr->caps.rewrite_ubyte_ibs) ||
1782 ((new_info.restart_index != fixed_restart_index && mgr->caps.rewrite_restart_index) ||
1783 !(mgr->caps.supported_restart_modes & BITFIELD_BIT(new_info.mode)))) ||
1784 !(mgr->caps.supported_prim_modes & BITFIELD_BIT(new_info.mode))) {
1785 util_primconvert_save_flatshade_first(mgr->pc, mgr->flatshade_first);
1786 util_primconvert_draw_vbo(mgr->pc, &new_info, drawid_offset, indirect, &new_draw, 1);
1791 if (mgr->using_translate) {
1792 u_vbuf_translate_end(mgr);
1803 void u_vbuf_save_vertex_elements(struct u_vbuf *mgr)
1805 assert(!mgr->ve_saved);
1806 mgr->ve_saved = mgr->ve;
1809 void u_vbuf_restore_vertex_elements(struct u_vbuf *mgr)
1811 if (mgr->ve != mgr->ve_saved) {
1812 struct pipe_context *pipe = mgr->pipe;
1814 mgr->ve = mgr->ve_saved;
1816 mgr->ve ? mgr->ve->driver_cso : NULL);
1818 mgr->ve_saved = NULL;