1/* 2 * Copyright 2019 Google LLC 3 * SPDX-License-Identifier: MIT 4 * 5 * based in part on anv and radv which are: 6 * Copyright © 2015 Intel Corporation 7 * Copyright © 2016 Red Hat. 8 * Copyright © 2016 Bas Nieuwenhuizen 9 */ 10 11#include "vn_query_pool.h" 12 13#include "venus-protocol/vn_protocol_driver_query_pool.h" 14 15#include "vn_device.h" 16 17/* query pool commands */ 18 19VkResult 20vn_CreateQueryPool(VkDevice device, 21 const VkQueryPoolCreateInfo *pCreateInfo, 22 const VkAllocationCallbacks *pAllocator, 23 VkQueryPool *pQueryPool) 24{ 25 VN_TRACE_FUNC(); 26 struct vn_device *dev = vn_device_from_handle(device); 27 const VkAllocationCallbacks *alloc = 28 pAllocator ? pAllocator : &dev->base.base.alloc; 29 30 struct vn_query_pool *pool = 31 vk_zalloc(alloc, sizeof(*pool), VN_DEFAULT_ALIGN, 32 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 33 if (!pool) 34 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY); 35 36 vn_object_base_init(&pool->base, VK_OBJECT_TYPE_QUERY_POOL, &dev->base); 37 38 pool->allocator = *alloc; 39 40 switch (pCreateInfo->queryType) { 41 case VK_QUERY_TYPE_OCCLUSION: 42 pool->result_array_size = 1; 43 break; 44 case VK_QUERY_TYPE_PIPELINE_STATISTICS: 45 pool->result_array_size = 46 util_bitcount(pCreateInfo->pipelineStatistics); 47 break; 48 case VK_QUERY_TYPE_TIMESTAMP: 49 pool->result_array_size = 1; 50 break; 51 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: 52 pool->result_array_size = 2; 53 break; 54 default: 55 unreachable("bad query type"); 56 break; 57 } 58 59 VkQueryPool pool_handle = vn_query_pool_to_handle(pool); 60 vn_async_vkCreateQueryPool(dev->instance, device, pCreateInfo, NULL, 61 &pool_handle); 62 63 *pQueryPool = pool_handle; 64 65 return VK_SUCCESS; 66} 67 68void 69vn_DestroyQueryPool(VkDevice device, 70 VkQueryPool queryPool, 71 const VkAllocationCallbacks *pAllocator) 72{ 73 VN_TRACE_FUNC(); 74 struct vn_device *dev = vn_device_from_handle(device); 75 struct vn_query_pool *pool = vn_query_pool_from_handle(queryPool); 76 const VkAllocationCallbacks *alloc; 77 78 if (!pool) 79 return; 80 81 alloc = pAllocator ? pAllocator : &pool->allocator; 82 83 vn_async_vkDestroyQueryPool(dev->instance, device, queryPool, NULL); 84 85 vn_object_base_fini(&pool->base); 86 vk_free(alloc, pool); 87} 88 89void 90vn_ResetQueryPool(VkDevice device, 91 VkQueryPool queryPool, 92 uint32_t firstQuery, 93 uint32_t queryCount) 94{ 95 VN_TRACE_FUNC(); 96 struct vn_device *dev = vn_device_from_handle(device); 97 98 vn_async_vkResetQueryPool(dev->instance, device, queryPool, firstQuery, 99 queryCount); 100} 101 102VkResult 103vn_GetQueryPoolResults(VkDevice device, 104 VkQueryPool queryPool, 105 uint32_t firstQuery, 106 uint32_t queryCount, 107 size_t dataSize, 108 void *pData, 109 VkDeviceSize stride, 110 VkQueryResultFlags flags) 111{ 112 VN_TRACE_FUNC(); 113 struct vn_device *dev = vn_device_from_handle(device); 114 struct vn_query_pool *pool = vn_query_pool_from_handle(queryPool); 115 const VkAllocationCallbacks *alloc = &pool->allocator; 116 117 const size_t result_width = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4; 118 const size_t result_size = pool->result_array_size * result_width; 119 const bool result_always_written = 120 flags & (VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_PARTIAL_BIT); 121 122 VkQueryResultFlags packed_flags = flags; 123 size_t packed_stride = result_size; 124 if (!result_always_written) 125 packed_flags |= VK_QUERY_RESULT_WITH_AVAILABILITY_BIT; 126 if (packed_flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) 127 packed_stride += result_width; 128 129 const size_t packed_size = packed_stride * queryCount; 130 void *packed_data; 131 if (result_always_written && packed_stride == stride) { 132 packed_data = pData; 133 } else { 134 packed_data = vk_alloc(alloc, packed_size, VN_DEFAULT_ALIGN, 135 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 136 if (!packed_data) 137 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY); 138 } 139 140 /* TODO the renderer should transparently vkCmdCopyQueryPoolResults to a 141 * coherent memory such that we can memcpy from the coherent memory to 142 * avoid this serialized round trip. 143 */ 144 VkResult result = vn_call_vkGetQueryPoolResults( 145 dev->instance, device, queryPool, firstQuery, queryCount, packed_size, 146 packed_data, packed_stride, packed_flags); 147 148 if (packed_data == pData) 149 return vn_result(dev->instance, result); 150 151 const size_t copy_size = 152 result_size + 153 (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT ? result_width : 0); 154 const void *src = packed_data; 155 void *dst = pData; 156 if (result == VK_SUCCESS) { 157 for (uint32_t i = 0; i < queryCount; i++) { 158 memcpy(dst, src, copy_size); 159 src += packed_stride; 160 dst += stride; 161 } 162 } else if (result == VK_NOT_READY) { 163 assert(!result_always_written && 164 (packed_flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)); 165 if (flags & VK_QUERY_RESULT_64_BIT) { 166 for (uint32_t i = 0; i < queryCount; i++) { 167 const bool avail = *(const uint64_t *)(src + result_size); 168 if (avail) 169 memcpy(dst, src, copy_size); 170 else if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) 171 *(uint64_t *)(dst + result_size) = 0; 172 173 src += packed_stride; 174 dst += stride; 175 } 176 } else { 177 for (uint32_t i = 0; i < queryCount; i++) { 178 const bool avail = *(const uint32_t *)(src + result_size); 179 if (avail) 180 memcpy(dst, src, copy_size); 181 else if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) 182 *(uint32_t *)(dst + result_size) = 0; 183 184 src += packed_stride; 185 dst += stride; 186 } 187 } 188 } 189 190 vk_free(alloc, packed_data); 191 return vn_result(dev->instance, result); 192} 193