1/* 2 * Copyright 2021 Google LLC 3 * SPDX-License-Identifier: MIT 4 */ 5 6#include "vn_renderer_util.h" 7 8VkResult 9vn_renderer_submit_simple_sync(struct vn_renderer *renderer, 10 const void *cs_data, 11 size_t cs_size) 12{ 13 struct vn_renderer_sync *sync; 14 VkResult result = 15 vn_renderer_sync_create(renderer, 0, VN_RENDERER_SYNC_BINARY, &sync); 16 if (result != VK_SUCCESS) 17 return result; 18 19 const struct vn_renderer_submit submit = { 20 .batches = 21 &(const struct vn_renderer_submit_batch){ 22 .cs_data = cs_data, 23 .cs_size = cs_size, 24 .sync_queue_cpu = true, 25 .syncs = &sync, 26 .sync_values = &(const uint64_t){ 1 }, 27 .sync_count = 1, 28 }, 29 .batch_count = 1, 30 }; 31 const struct vn_renderer_wait wait = { 32 .timeout = UINT64_MAX, 33 .syncs = &sync, 34 .sync_values = &(const uint64_t){ 1 }, 35 .sync_count = 1, 36 }; 37 38 result = vn_renderer_submit(renderer, &submit); 39 if (result == VK_SUCCESS) 40 result = vn_renderer_wait(renderer, &wait); 41 42 vn_renderer_sync_destroy(renderer, sync); 43 44 return result; 45} 46 47void 48vn_renderer_shmem_pool_init(UNUSED struct vn_renderer *renderer, 49 struct vn_renderer_shmem_pool *pool, 50 size_t min_alloc_size) 51{ 52 *pool = (struct vn_renderer_shmem_pool){ 53 /* power-of-two to hit shmem cache */ 54 .min_alloc_size = util_next_power_of_two(min_alloc_size), 55 }; 56} 57 58void 59vn_renderer_shmem_pool_fini(struct vn_renderer *renderer, 60 struct vn_renderer_shmem_pool *pool) 61{ 62 if (pool->shmem) 63 vn_renderer_shmem_unref(renderer, pool->shmem); 64} 65 66static bool 67vn_renderer_shmem_pool_grow(struct vn_renderer *renderer, 68 struct vn_renderer_shmem_pool *pool, 69 size_t size) 70{ 71 VN_TRACE_FUNC(); 72 /* power-of-two to hit shmem cache */ 73 size_t alloc_size = pool->min_alloc_size; 74 while (alloc_size < size) { 75 alloc_size <<= 1; 76 if (!alloc_size) 77 return false; 78 } 79 80 struct vn_renderer_shmem *shmem = 81 vn_renderer_shmem_create(renderer, alloc_size); 82 if (!shmem) 83 return false; 84 85 if (pool->shmem) 86 vn_renderer_shmem_unref(renderer, pool->shmem); 87 88 pool->shmem = shmem; 89 pool->size = alloc_size; 90 pool->used = 0; 91 92 return true; 93} 94 95struct vn_renderer_shmem * 96vn_renderer_shmem_pool_alloc(struct vn_renderer *renderer, 97 struct vn_renderer_shmem_pool *pool, 98 size_t size, 99 size_t *out_offset) 100{ 101 if (unlikely(size > pool->size - pool->used)) { 102 if (!vn_renderer_shmem_pool_grow(renderer, pool, size)) 103 return NULL; 104 105 assert(size <= pool->size - pool->used); 106 } 107 108 struct vn_renderer_shmem *shmem = 109 vn_renderer_shmem_ref(renderer, pool->shmem); 110 *out_offset = pool->used; 111 pool->used += size; 112 113 return shmem; 114} 115