Lines Matching refs:pool

54 anv_query_address(struct anv_query_pool *pool, uint32_t query)
57 .bo = pool->bo,
58 .offset = query * pool->stride,
82 /* Query pool slots are made up of some number of 64-bit values packed
94 VK_MULTIALLOC_DECL(&ma, struct anv_query_pool, pool, 1);
174 pool->type = pCreateInfo->queryType;
175 pool->pipeline_statistics = pipeline_statistics;
176 pool->stride = uint64s_per_slot * sizeof(uint64_t);
177 pool->slots = pCreateInfo->queryCount;
179 if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL) {
180 pool->data_offset = data_offset;
181 pool->snapshot_size = (pool->stride - data_offset) / 2;
184 else if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
185 pool->pass_size = pool->stride / n_passes;
186 pool->data_offset = data_offset;
187 pool->snapshot_size = (pool->pass_size - data_offset) / 2;
188 pool->n_counters = perf_query_info->counterIndexCount;
189 pool->counter_pass = counter_pass;
193 pool->counter_pass);
194 pool->n_passes = n_passes;
195 pool->pass_query = pass_query;
199 pool->pass_query);
203 uint64_t size = pool->slots * (uint64_t)pool->stride;
204 result = anv_device_alloc_bo(device, "query-pool", size,
208 &pool->bo);
213 if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
214 for (uint32_t p = 0; p < pool->n_passes; p++) {
217 .start = pool->bo->map + khr_perf_query_preamble_offset(pool, p),
218 .end = pool->bo->map + khr_perf_query_preamble_offset(pool, p) + pool->data_offset,
224 mi_imm(p * (uint64_t)pool->pass_size));
230 *pQueryPool = anv_query_pool_to_handle(pool);
235 vk_free2(&device->vk.alloc, pAllocator, pool);
246 ANV_FROM_HANDLE(anv_query_pool, pool, _pool);
248 if (!pool)
251 anv_device_release_bo(device, pool->bo);
252 vk_object_free(&device->vk, pAllocator, pool);
301 khr_perf_query_availability_offset(struct anv_query_pool *pool, uint32_t query, uint32_t pass)
303 return query * (uint64_t)pool->stride + pass * (uint64_t)pool->pass_size;
307 khr_perf_query_data_offset(struct anv_query_pool *pool, uint32_t query, uint32_t pass, bool end)
309 return query * (uint64_t)pool->stride + pass * (uint64_t)pool->pass_size +
310 pool->data_offset + (end ? pool->snapshot_size : 0);
314 khr_perf_query_availability_address(struct anv_query_pool *pool, uint32_t query, uint32_t pass)
317 (struct anv_address) { .bo = pool->bo, },
318 khr_perf_query_availability_offset(pool, query, pass));
322 khr_perf_query_data_address(struct anv_query_pool *pool, uint32_t query, uint32_t pass, bool end)
325 (struct anv_address) { .bo = pool->bo, },
326 khr_perf_query_data_offset(pool, query, pass, end));
342 vk_alloc(&cmd_buffer->vk.pool->alloc,
378 intel_perf_query_data_offset(struct anv_query_pool *pool, bool end)
380 return pool->data_offset + (end ? pool->snapshot_size : 0);
397 query_slot(struct anv_query_pool *pool, uint32_t query)
399 return pool->bo->map + query * pool->stride;
403 query_is_available(struct anv_query_pool *pool, uint32_t query)
406 if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
407 for (uint32_t p = 0; p < pool->n_passes; p++) {
409 pool->bo->map + khr_perf_query_availability_offset(pool, query, p);
417 return *(volatile uint64_t *)query_slot(pool, query);
422 struct anv_query_pool *pool, uint32_t query)
427 if (query_is_available(pool, query))
448 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
450 assert(pool->type == VK_QUERY_TYPE_OCCLUSION ||
451 pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS ||
452 pool->type == VK_QUERY_TYPE_TIMESTAMP ||
453 pool->type == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT ||
454 pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR ||
455 pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL ||
456 pool->type == VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT);
468 bool available = query_is_available(pool, firstQuery + i);
471 status = wait_for_available(device, pool, firstQuery + i);
492 * into a query pool of type ename:VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR"
497 switch (pool->type) {
500 uint64_t *slot = query_slot(pool, firstQuery + i);
517 uint64_t *slot = query_slot(pool, firstQuery + i);
518 uint32_t statistics = pool->pipeline_statistics;
533 assert(idx == util_bitcount(pool->pipeline_statistics));
538 uint64_t *slot = query_slot(pool, firstQuery + i);
549 uint64_t *slot = query_slot(pool, firstQuery + i);
561 for (uint32_t p = 0; p < pool->n_passes; p++) {
562 const struct intel_perf_query_info *query = pool->pass_query[p];
566 pool->bo->map + khr_perf_query_data_offset(pool, firstQuery + i, p, false),
567 pool->bo->map + khr_perf_query_data_offset(pool, firstQuery + i, p, true),
569 anv_perf_write_pass_results(pdevice->perf, pool, p, &result, pData);
578 const void *query_data = query_slot(pool, firstQuery + i);
583 query_data + intel_perf_query_data_offset(pool, false),
584 query_data + intel_perf_query_data_offset(pool, true),
595 unreachable("invalid pool type");
655 * Goes through a series of consecutive query indices in the given pool
660 struct mi_builder *b, struct anv_query_pool *pool,
663 switch (pool->type) {
670 assert((pool->stride % 8) == 0);
673 anv_query_address(pool, first_index + i);
675 for (uint32_t qword = 1; qword < (pool->stride / 8); qword++) {
689 anv_query_address(pool, first_index + i);
690 mi_memset(b, anv_address_add(slot_addr, 8), 0, pool->stride - 8);
698 for (uint32_t p = 0; p < pool->n_passes; p++) {
699 mi_memset(b, khr_perf_query_data_address(pool, first_index + i, p, false),
700 0, 2 * pool->snapshot_size);
702 khr_perf_query_availability_address(pool, first_index + i, p),
713 anv_query_address(pool, first_index + i);
714 mi_memset(b, anv_address_add(slot_addr, 8), 0, pool->stride - 8);
731 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
733 switch (pool->type) {
737 anv_query_address(pool, firstQuery + i),
745 anv_query_address(pool, firstQuery + i),
766 emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
776 for (uint32_t p = 0; p < pool->n_passes; p++) {
779 khr_perf_query_availability_address(pool, firstQuery + i, p),
792 emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
807 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
810 if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
812 for (uint32_t p = 0; p < pool->n_passes; p++) {
813 uint64_t *pass_slot = pool->bo->map +
814 khr_perf_query_availability_offset(pool, firstQuery + i, p);
819 uint64_t *slot = query_slot(pool, firstQuery + i);
864 struct anv_query_pool *pool,
872 anv_address_add(query_addr, intel_perf_query_data_offset(pool, end));
924 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
925 struct anv_address query_addr = anv_query_address(pool, query);
930 switch (pool->type) {
951 uint32_t statistics = pool->pipeline_statistics;
985 mi_imm(intel_canonical_address(pool->bo->offset +
986 khr_perf_query_data_offset(pool, query, 0, end) +
996 mi_imm(intel_canonical_address(pool->bo->offset +
997 khr_perf_query_data_offset(pool, query, 0, end) +
1010 pool->bo->offset +
1011 khr_perf_query_availability_offset(pool, query, 0 /* pass */))),
1024 cmd_buffer->perf_query_pool = pool;
1087 emit_perf_intel_query(cmd_buffer, pool, &b, query_addr, false);
1111 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1112 struct anv_address query_addr = anv_query_address(pool, query);
1117 switch (pool->type) {
1144 uint32_t statistics = pool->pipeline_statistics;
1172 cmd_buffer->perf_query_pool = pool;
1254 emit_perf_intel_query(cmd_buffer, pool, &b, query_addr, true);
1275 emit_zero_queries(cmd_buffer, &b, pool, query + 1, num_queries - 1);
1288 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1289 struct anv_address query_addr = anv_query_address(pool, query);
1291 assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
1329 emit_zero_queries(cmd_buffer, &b, pool, query + 1, num_queries - 1);
1404 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1435 pool->type == VK_QUERY_TYPE_OCCLUSION ||
1436 pool->type == VK_QUERY_TYPE_TIMESTAMP) {
1445 struct anv_address query_addr = anv_query_address(pool, firstQuery + i);
1447 switch (pool->type) {
1466 uint32_t statistics = pool->pipeline_statistics;
1482 assert(idx == util_bitcount(pool->pipeline_statistics));