1/* 2 * Copyright 2019 Google LLC 3 * SPDX-License-Identifier: MIT 4 */ 5 6#include "vn_cs.h" 7 8#include "vn_instance.h" 9#include "vn_renderer.h" 10 11struct vn_cs_renderer_protocol_info _vn_cs_renderer_protocol_info = { 12 .mutex = _SIMPLE_MTX_INITIALIZER_NP, 13}; 14 15static void 16vn_cs_renderer_protocol_info_init_once(struct vn_instance *instance) 17{ 18 const struct vn_renderer_info *renderer_info = &instance->renderer->info; 19 /* assume renderer protocol supports all extensions if bit 0 is not set */ 20 const bool support_all_exts = 21 !vn_info_extension_mask_test(renderer_info->vk_extension_mask, 0); 22 23 _vn_cs_renderer_protocol_info.api_version = renderer_info->vk_xml_version; 24 25 STATIC_ASSERT(sizeof(renderer_info->vk_extension_mask) >= 26 sizeof(_vn_cs_renderer_protocol_info.extension_bitset)); 27 28 for (uint32_t i = 1; i <= VN_INFO_EXTENSION_MAX_NUMBER; i++) { 29 /* use protocl helper to ensure mask decoding matches encoding */ 30 if (support_all_exts || 31 vn_info_extension_mask_test(renderer_info->vk_extension_mask, i)) 32 BITSET_SET(_vn_cs_renderer_protocol_info.extension_bitset, i); 33 } 34} 35 36void 37vn_cs_renderer_protocol_info_init(struct vn_instance *instance) 38{ 39 simple_mtx_lock(&_vn_cs_renderer_protocol_info.mutex); 40 if (_vn_cs_renderer_protocol_info.init_once) { 41 simple_mtx_unlock(&_vn_cs_renderer_protocol_info.mutex); 42 return; 43 } 44 45 vn_cs_renderer_protocol_info_init_once(instance); 46 47 _vn_cs_renderer_protocol_info.init_once = true; 48 simple_mtx_unlock(&_vn_cs_renderer_protocol_info.mutex); 49} 50 51static void 52vn_cs_encoder_sanity_check(struct vn_cs_encoder *enc) 53{ 54 assert(enc->buffer_count <= enc->buffer_max); 55 56 size_t total_committed_size = 0; 57 for (uint32_t i = 0; i < enc->buffer_count; i++) 58 total_committed_size += enc->buffers[i].committed_size; 59 assert(enc->total_committed_size == total_committed_size); 60 61 if (enc->buffer_count) { 62 const struct vn_cs_encoder_buffer *cur_buf = 63 &enc->buffers[enc->buffer_count - 1]; 64 assert(cur_buf->base <= enc->cur && enc->cur <= enc->end && 65 enc->end <= cur_buf->base + enc->current_buffer_size); 66 if (cur_buf->committed_size) 67 assert(enc->cur == enc->end); 68 } else { 69 assert(!enc->current_buffer_size); 70 assert(!enc->cur && !enc->end); 71 } 72} 73 74static void 75vn_cs_encoder_add_buffer(struct vn_cs_encoder *enc, 76 struct vn_renderer_shmem *shmem, 77 size_t offset, 78 void *base, 79 size_t size) 80{ 81 /* add a buffer and make it current */ 82 assert(enc->buffer_count < enc->buffer_max); 83 struct vn_cs_encoder_buffer *cur_buf = &enc->buffers[enc->buffer_count++]; 84 /* shmem ownership transferred */ 85 cur_buf->shmem = shmem; 86 cur_buf->offset = offset; 87 cur_buf->base = base; 88 cur_buf->committed_size = 0; 89 90 /* update the write pointer */ 91 enc->cur = base; 92 enc->end = base + size; 93} 94 95static void 96vn_cs_encoder_commit_buffer(struct vn_cs_encoder *enc) 97{ 98 assert(enc->buffer_count); 99 struct vn_cs_encoder_buffer *cur_buf = 100 &enc->buffers[enc->buffer_count - 1]; 101 const size_t written_size = enc->cur - cur_buf->base; 102 if (cur_buf->committed_size) { 103 assert(cur_buf->committed_size == written_size); 104 } else { 105 cur_buf->committed_size = written_size; 106 enc->total_committed_size += written_size; 107 } 108} 109 110static void 111vn_cs_encoder_gc_buffers(struct vn_cs_encoder *enc) 112{ 113 /* when the shmem pool is used, no need to cache the shmem in cs */ 114 if (enc->storage_type == VN_CS_ENCODER_STORAGE_SHMEM_POOL) { 115 for (uint32_t i = 0; i < enc->buffer_count; i++) { 116 vn_renderer_shmem_unref(enc->instance->renderer, 117 enc->buffers[i].shmem); 118 } 119 120 enc->buffer_count = 0; 121 enc->total_committed_size = 0; 122 enc->current_buffer_size = 0; 123 124 enc->cur = NULL; 125 enc->end = NULL; 126 127 return; 128 } 129 130 /* free all but the current buffer */ 131 assert(enc->buffer_count); 132 struct vn_cs_encoder_buffer *cur_buf = 133 &enc->buffers[enc->buffer_count - 1]; 134 for (uint32_t i = 0; i < enc->buffer_count - 1; i++) 135 vn_renderer_shmem_unref(enc->instance->renderer, enc->buffers[i].shmem); 136 137 /* move the current buffer to the beginning, skipping the used part */ 138 const size_t used = cur_buf->offset + cur_buf->committed_size; 139 enc->buffer_count = 0; 140 vn_cs_encoder_add_buffer(enc, cur_buf->shmem, used, 141 cur_buf->base + cur_buf->committed_size, 142 enc->current_buffer_size - used); 143 144 enc->total_committed_size = 0; 145} 146 147void 148vn_cs_encoder_init(struct vn_cs_encoder *enc, 149 struct vn_instance *instance, 150 enum vn_cs_encoder_storage_type storage_type, 151 size_t min_size) 152{ 153 /* VN_CS_ENCODER_INITIALIZER* should be used instead */ 154 assert(storage_type != VN_CS_ENCODER_STORAGE_POINTER); 155 156 memset(enc, 0, sizeof(*enc)); 157 enc->instance = instance; 158 enc->storage_type = storage_type; 159 enc->min_buffer_size = min_size; 160} 161 162void 163vn_cs_encoder_fini(struct vn_cs_encoder *enc) 164{ 165 if (unlikely(enc->storage_type == VN_CS_ENCODER_STORAGE_POINTER)) 166 return; 167 168 for (uint32_t i = 0; i < enc->buffer_count; i++) 169 vn_renderer_shmem_unref(enc->instance->renderer, enc->buffers[i].shmem); 170 if (enc->buffers) 171 free(enc->buffers); 172} 173 174/** 175 * Reset a cs for reuse. 176 */ 177void 178vn_cs_encoder_reset(struct vn_cs_encoder *enc) 179{ 180 /* enc->error is sticky */ 181 if (likely(enc->buffer_count)) 182 vn_cs_encoder_gc_buffers(enc); 183} 184 185static uint32_t 186next_array_size(uint32_t cur_size, uint32_t min_size) 187{ 188 const uint32_t next_size = cur_size ? cur_size * 2 : min_size; 189 return next_size > cur_size ? next_size : 0; 190} 191 192static size_t 193next_buffer_size(size_t cur_size, size_t min_size, size_t need) 194{ 195 size_t next_size = cur_size ? cur_size * 2 : min_size; 196 while (next_size < need) { 197 next_size *= 2; 198 if (!next_size) 199 return 0; 200 } 201 return next_size; 202} 203 204static bool 205vn_cs_encoder_grow_buffer_array(struct vn_cs_encoder *enc) 206{ 207 const uint32_t buf_max = next_array_size(enc->buffer_max, 4); 208 if (!buf_max) 209 return false; 210 211 void *bufs = realloc(enc->buffers, sizeof(*enc->buffers) * buf_max); 212 if (!bufs) 213 return false; 214 215 enc->buffers = bufs; 216 enc->buffer_max = buf_max; 217 218 return true; 219} 220 221/** 222 * Add a new vn_cs_encoder_buffer to a cs. 223 */ 224bool 225vn_cs_encoder_reserve_internal(struct vn_cs_encoder *enc, size_t size) 226{ 227 VN_TRACE_FUNC(); 228 if (unlikely(enc->storage_type == VN_CS_ENCODER_STORAGE_POINTER)) 229 return false; 230 231 if (enc->buffer_count >= enc->buffer_max) { 232 if (!vn_cs_encoder_grow_buffer_array(enc)) 233 return false; 234 assert(enc->buffer_count < enc->buffer_max); 235 } 236 237 size_t buf_size = 0; 238 if (likely(enc->buffer_count)) { 239 vn_cs_encoder_commit_buffer(enc); 240 241 if (enc->storage_type == VN_CS_ENCODER_STORAGE_SHMEM_ARRAY) { 242 /* if the current buffer is reused from the last vn_cs_encoder_reset 243 * (i.e., offset != 0), do not double the size 244 * 245 * TODO better strategy to grow buffer size 246 */ 247 const struct vn_cs_encoder_buffer *cur_buf = 248 &enc->buffers[enc->buffer_count - 1]; 249 if (cur_buf->offset) 250 buf_size = next_buffer_size(0, enc->current_buffer_size, size); 251 } 252 } 253 254 if (!buf_size) { 255 /* double the size */ 256 buf_size = next_buffer_size(enc->current_buffer_size, 257 enc->min_buffer_size, size); 258 if (!buf_size) 259 return false; 260 } 261 262 struct vn_renderer_shmem *shmem; 263 size_t buf_offset; 264 if (enc->storage_type == VN_CS_ENCODER_STORAGE_SHMEM_ARRAY) { 265 shmem = vn_renderer_shmem_create(enc->instance->renderer, buf_size); 266 buf_offset = 0; 267 } else { 268 assert(enc->storage_type == VN_CS_ENCODER_STORAGE_SHMEM_POOL); 269 shmem = 270 vn_instance_cs_shmem_alloc(enc->instance, buf_size, &buf_offset); 271 } 272 if (!shmem) 273 return false; 274 275 if (unlikely(!enc->instance->renderer->info.supports_blob_id_0)) { 276 uint32_t roundtrip; 277 VkResult result = 278 vn_instance_submit_roundtrip(enc->instance, &roundtrip); 279 if (result != VK_SUCCESS) { 280 vn_renderer_shmem_unref(enc->instance->renderer, shmem); 281 return false; 282 } 283 284 enc->current_buffer_roundtrip = roundtrip; 285 } 286 287 vn_cs_encoder_add_buffer(enc, shmem, buf_offset, 288 shmem->mmap_ptr + buf_offset, buf_size); 289 enc->current_buffer_size = buf_size; 290 291 vn_cs_encoder_sanity_check(enc); 292 293 return true; 294} 295 296/* 297 * Commit written data. 298 */ 299void 300vn_cs_encoder_commit(struct vn_cs_encoder *enc) 301{ 302 if (likely(enc->buffer_count)) { 303 vn_cs_encoder_commit_buffer(enc); 304 305 /* trigger the slow path on next vn_cs_encoder_reserve */ 306 enc->end = enc->cur; 307 } 308 309 vn_cs_encoder_sanity_check(enc); 310} 311