Lines Matching refs:pool

97  * pool. */
98 #define query_iova(type, pool, query, field) \
99 pool->bo->iova + pool->stride * (query) + offsetof(type, field)
101 #define occlusion_query_iova(pool, query, field) \
102 query_iova(struct occlusion_query_slot, pool, query, field)
104 #define pipeline_stat_query_iova(pool, query, field) \
105 pool->bo->iova + pool->stride * (query) + \
108 #define primitive_query_iova(pool, query, field, i) \
109 query_iova(struct primitive_query_slot, pool, query, field) + \
112 #define perf_query_iova(pool, query, field, i) \
113 pool->bo->iova + pool->stride * (query) + \
118 #define primitives_generated_query_iova(pool, query, field) \
119 query_iova(struct primitives_generated_query_slot, pool, query, field)
121 #define query_available_iova(pool, query) \
122 query_iova(struct query_slot, pool, query, available)
124 #define query_result_iova(pool, query, type, i) \
125 pool->bo->iova + pool->stride * (query) + \
128 #define query_result_addr(pool, query, type, i) \
129 pool->bo->map + pool->stride * (query) + \
172 * Returns a pointer to a given slot in a query pool.
174 static void* slot_address(struct tu_query_pool *pool, uint32_t query)
176 return (char*)pool->bo->map + query * pool->stride;
243 /* Size of the array pool->tu_perf_query_data */
255 struct tu_query_pool *pool =
258 if (!pool)
262 pool->perf_group = fd_perfcntrs(&device->physical_device->dev_id,
263 &pool->perf_group_count);
265 pool->counter_index_count = perf_query_info->counterIndexCount;
275 uint32_t regs[pool->perf_group_count], pass[pool->perf_group_count];
276 memset(regs, 0x00, pool->perf_group_count * sizeof(regs[0]));
277 memset(pass, 0x00, pool->perf_group_count * sizeof(pass[0]));
279 for (uint32_t i = 0; i < pool->counter_index_count; i++) {
282 perfcntr_index(pool->perf_group, pool->perf_group_count,
285 pool->perf_query_data[i].gid = gid;
286 pool->perf_query_data[i].cid = cid;
287 pool->perf_query_data[i].app_idx = i;
292 if (regs[gid] < pool->perf_group[gid].num_counters) {
293 pool->perf_query_data[i].cntr_reg = regs[gid]++;
294 pool->perf_query_data[i].pass = pass[gid];
296 pool->perf_query_data[i].pass = ++pass[gid];
297 pool->perf_query_data[i].cntr_reg = regs[gid] = 0;
305 qsort(pool->perf_query_data, pool->counter_index_count,
306 sizeof(pool->perf_query_data[0]),
310 VkResult result = tu_bo_init_new(device, &pool->bo,
313 vk_object_free(&device->vk, pAllocator, pool);
317 result = tu_bo_map(device, pool->bo);
319 tu_bo_finish(device, pool->bo);
320 vk_object_free(&device->vk, pAllocator, pool);
325 memset(pool->bo->map, 0, pool->bo->size);
327 pool->type = pCreateInfo->queryType;
328 pool->stride = slot_size;
329 pool->size = pCreateInfo->queryCount;
330 pool->pipeline_statistics = pCreateInfo->pipelineStatistics;
331 *pQueryPool = tu_query_pool_to_handle(pool);
342 TU_FROM_HANDLE(tu_query_pool, pool, _pool);
344 if (!pool)
347 tu_bo_finish(device, pool->bo);
348 vk_object_free(&device->vk, pAllocator, pool);
352 get_result_count(struct tu_query_pool *pool)
354 switch (pool->type) {
364 return util_bitcount(pool->pipeline_statistics);
366 return pool->counter_index_count;
437 wait_for_available(struct tu_device *device, struct tu_query_pool *pool,
443 struct query_slot *slot = slot_address(pool, query);
469 struct tu_query_pool *pool,
483 struct query_slot *slot = slot_address(pool, query);
485 uint32_t result_count = get_result_count(pool);
486 uint32_t statistics = pool->pipeline_statistics;
489 VkResult wait_result = wait_for_available(device, pool, query);
514 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
516 result = query_result_addr(pool, query, uint64_t, stat_idx);
517 } else if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
518 result = query_result_addr(pool, query, struct perfcntr_query_slot, k);
520 result = query_result_addr(pool, query, uint64_t, k);
562 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
563 assert(firstQuery + queryCount <= pool->size);
568 switch (pool->type) {
575 return get_query_pool_results(device, pool, firstQuery, queryCount,
606 struct tu_query_pool *pool,
627 uint64_t available_iova = query_available_iova(pool, query);
629 uint32_t result_count = get_result_count(pool);
630 uint32_t statistics = pool->pipeline_statistics;
647 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
649 result_iova = query_result_iova(pool, query, uint64_t, stat_idx);
650 } else if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
651 result_iova = query_result_iova(pool, query,
654 result_iova = query_result_iova(pool, query, uint64_t, k);
705 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
708 assert(firstQuery + queryCount <= pool->size);
710 switch (pool->type) {
716 return emit_copy_query_pool_results(cmdbuf, cs, pool, firstQuery,
727 struct tu_query_pool *pool,
735 uint32_t statistics = pool->pipeline_statistics;
738 tu_cs_emit_qw(cs, query_available_iova(pool, query));
741 for (uint32_t k = 0; k < get_result_count(pool); k++) {
744 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
746 result_iova = query_result_iova(pool, query, uint64_t, stat_idx);
747 } else if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
748 result_iova = query_result_iova(pool, query,
751 result_iova = query_result_iova(pool, query, uint64_t, k);
769 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
771 switch (pool->type) {
778 emit_reset_query_pool(cmdbuf, pool, firstQuery, queryCount);
791 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
794 struct query_slot *slot = slot_address(pool, i + firstQuery);
797 for (uint32_t k = 0; k < get_result_count(pool); k++) {
800 if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
801 res = query_result_addr(pool, i + firstQuery,
804 res = query_result_addr(pool, i + firstQuery, uint64_t, k);
814 struct tu_query_pool *pool,
832 uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
846 struct tu_query_pool *pool,
850 uint64_t begin_iova = pipeline_stat_query_iova(pool, query, begin);
852 if (is_pipeline_query_with_vertex_stage(pool->pipeline_statistics)) {
876 if (is_pipeline_query_with_fragment_stage(pool->pipeline_statistics)) {
880 if (is_pipeline_query_with_compute_stage(pool->pipeline_statistics)) {
906 struct tu_query_pool *pool,
934 for (uint32_t i = 0; i < pool->counter_index_count; i++) {
935 struct tu_perf_query_data *data = &pool->perf_query_data[i];
946 &pool->perf_group[data->gid].counters[data->cntr_reg];
948 &pool->perf_group[data->gid].countables[data->cid];
958 for (uint32_t i = 0; i < pool->counter_index_count; i++) {
959 struct tu_perf_query_data *data = &pool->perf_query_data[i];
970 &pool->perf_group[data->gid].counters[data->cntr_reg];
972 uint64_t begin_iova = perf_query_iova(pool, 0, begin, data->app_idx);
984 struct tu_query_pool *pool,
989 uint64_t begin_iova = primitive_query_iova(pool, query, begin[0], 0);
997 struct tu_query_pool *pool,
1001 uint64_t begin_iova = primitives_generated_query_iova(pool, query, begin);
1042 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
1043 assert(query < pool->size);
1045 switch (pool->type) {
1051 emit_begin_occlusion_query(cmdbuf, pool, query);
1054 emit_begin_xfb_query(cmdbuf, pool, query, 0);
1057 emit_begin_prim_generated_query(cmdbuf, pool, query);
1060 emit_begin_perf_query(cmdbuf, pool, query);
1063 emit_begin_stat_query(cmdbuf, pool, query);
1080 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
1081 assert(query < pool->size);
1083 switch (pool->type) {
1085 emit_begin_xfb_query(cmdbuf, pool, query, index);
1088 emit_begin_prim_generated_query(cmdbuf, pool, query);
1097 struct tu_query_pool *pool,
1117 uint64_t available_iova = query_available_iova(pool, query);
1118 uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
1119 uint64_t end_iova = occlusion_query_iova(pool, query, end);
1120 uint64_t result_iova = query_result_iova(pool, query, uint64_t, 0);
1217 struct tu_query_pool *pool,
1221 uint64_t end_iova = pipeline_stat_query_iova(pool, query, end);
1222 uint64_t available_iova = query_available_iova(pool, query);
1227 if (is_pipeline_query_with_vertex_stage(pool->pipeline_statistics)) {
1235 if (is_pipeline_query_with_fragment_stage(pool->pipeline_statistics)) {
1239 if (is_pipeline_query_with_compute_stage(pool->pipeline_statistics)) {
1252 result_iova = query_result_iova(pool, query, uint64_t, i);
1253 stat_start_iova = pipeline_stat_query_iova(pool, query, begin[i]);
1254 stat_stop_iova = pipeline_stat_query_iova(pool, query, end[i]);
1280 struct tu_query_pool *pool,
1284 uint64_t available_iova = query_available_iova(pool, query);
1290 for (uint32_t i = 0; i < pool->counter_index_count; i++) {
1291 struct tu_perf_query_data *data = &pool->perf_query_data[i];
1302 &pool->perf_group[data->gid].counters[data->cntr_reg];
1304 end_iova = perf_query_iova(pool, 0, end, data->app_idx);
1316 for (uint32_t i = 0; i < pool->counter_index_count; i++) {
1317 struct tu_perf_query_data *data = &pool->perf_query_data[i];
1328 result_iova = query_result_iova(pool, 0, struct perfcntr_query_slot,
1330 begin_iova = perf_query_iova(pool, 0, begin, data->app_idx);
1331 end_iova = perf_query_iova(pool, 0, end, data->app_idx);
1359 struct tu_query_pool *pool,
1365 uint64_t end_iova = primitive_query_iova(pool, query, end[0], 0);
1366 uint64_t result_written_iova = query_result_iova(pool, query, uint64_t, 0);
1367 uint64_t result_generated_iova = query_result_iova(pool, query, uint64_t, 1);
1368 uint64_t begin_written_iova = primitive_query_iova(pool, query, begin[stream_id], 0);
1369 uint64_t begin_generated_iova = primitive_query_iova(pool, query, begin[stream_id], 1);
1370 uint64_t end_written_iova = primitive_query_iova(pool, query, end[stream_id], 0);
1371 uint64_t end_generated_iova = primitive_query_iova(pool, query, end[stream_id], 1);
1372 uint64_t available_iova = query_available_iova(pool, query);
1408 struct tu_query_pool *pool,
1417 uint64_t begin_iova = primitives_generated_query_iova(pool, query, begin);
1418 uint64_t end_iova = primitives_generated_query_iova(pool, query, end);
1419 uint64_t result_iova = primitives_generated_query_iova(pool, query, result);
1420 uint64_t available_iova = query_available_iova(pool, query);
1468 * query pool (starting at query) where N is the number of bits set in the
1486 struct tu_query_pool *pool,
1497 tu_cs_emit_qw(cs, query_available_iova(pool, query + i));
1508 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
1509 assert(query < pool->size);
1511 switch (pool->type) {
1513 emit_end_occlusion_query(cmdbuf, pool, query);
1516 emit_end_xfb_query(cmdbuf, pool, query, 0);
1519 emit_end_prim_generated_query(cmdbuf, pool, query);
1522 emit_end_perf_query(cmdbuf, pool, query);
1525 emit_end_stat_query(cmdbuf, pool, query);
1533 handle_multiview_queries(cmdbuf, pool, query);
1543 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
1544 assert(query < pool->size);
1546 switch (pool->type) {
1549 emit_end_xfb_query(cmdbuf, pool, query, index);
1552 emit_end_prim_generated_query(cmdbuf, pool, query);
1566 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
1598 tu_cs_emit_qw(cs, query_result_iova(pool, query, uint64_t, 0));
1606 tu_cs_emit_qw(cs, query_available_iova(pool, query));
1613 * query indices in the query pool (starting at query) where N is the
1634 handle_multiview_queries(cmd, pool, query);