Lines Matching refs:device
39 radv_se_is_disabled(struct radv_device *device, unsigned se)
42 return device->physical_device->rad_info.cu_mask[se][0] == 0;
46 gfx10_get_thread_trace_ctrl(struct radv_device *device, bool enable)
54 if (device->physical_device->rad_info.gfx_level == GFX10_3)
57 if (device->physical_device->rad_info.has_sqtt_auto_flush_mode_bug)
64 radv_emit_wait_for_idle(struct radv_device *device, struct radeon_cmdbuf *cs, int family)
68 cs, device->physical_device->rad_info.gfx_level, NULL, 0,
69 family == AMD_IP_COMPUTE && device->physical_device->rad_info.gfx_level >= GFX7,
79 radv_emit_thread_trace_start(struct radv_device *device, struct radeon_cmdbuf *cs,
82 uint32_t shifted_size = device->thread_trace.buffer_size >> SQTT_BUFFER_ALIGN_SHIFT;
83 struct radeon_info *rad_info = &device->physical_device->rad_info;
87 uint64_t va = radv_buffer_get_va(device->thread_trace.bo);
88 uint64_t data_va = ac_thread_trace_get_data_va(rad_info, &device->thread_trace, va, se);
90 int first_active_cu = ffs(device->physical_device->rad_info.cu_mask[se][0]);
92 if (radv_se_is_disabled(device, se))
100 if (device->physical_device->rad_info.gfx_level >= GFX10) {
135 gfx10_get_thread_trace_ctrl(device, true));
152 if (device->physical_device->rad_info.gfx_level < GFX9) {
171 if (device->physical_device->rad_info.gfx_level == GFX9) {
183 if (device->physical_device->rad_info.gfx_level == GFX9) {
225 radv_copy_thread_trace_info_regs(struct radv_device *device, struct radeon_cmdbuf *cs,
230 if (device->physical_device->rad_info.gfx_level >= GFX10) {
232 } else if (device->physical_device->rad_info.gfx_level == GFX9) {
235 assert(device->physical_device->rad_info.gfx_level == GFX8);
240 uint64_t va = radv_buffer_get_va(device->thread_trace.bo);
256 radv_emit_thread_trace_stop(struct radv_device *device, struct radeon_cmdbuf *cs,
259 unsigned max_se = device->physical_device->rad_info.max_se;
272 if (device->physical_device->rad_info.has_sqtt_rb_harvest_bug) {
274 radv_emit_wait_for_idle(device, cs, qf);
278 if (radv_se_is_disabled(device, se))
286 if (device->physical_device->rad_info.gfx_level >= GFX10) {
287 if (!device->physical_device->rad_info.has_sqtt_rb_harvest_bug) {
302 gfx10_get_thread_trace_ctrl(device, false));
328 radv_copy_thread_trace_info_regs(device, cs, se);
341 struct radv_device *device = cmd_buffer->device;
352 radeon_check_space(device->ws, cs, 2 + count);
356 if (device->physical_device->rad_info.gfx_level >= GFX10)
368 radv_emit_spi_config_cntl(struct radv_device *device, struct radeon_cmdbuf *cs, bool enable)
370 if (device->physical_device->rad_info.gfx_level >= GFX9) {
375 if (device->physical_device->rad_info.gfx_level >= GFX10)
388 radv_emit_inhibit_clockgating(struct radv_device *device, struct radeon_cmdbuf *cs, bool inhibit)
390 if (device->physical_device->rad_info.gfx_level >= GFX11)
393 if (device->physical_device->rad_info.gfx_level >= GFX10) {
396 } else if (device->physical_device->rad_info.gfx_level >= GFX8) {
403 radv_thread_trace_init_bo(struct radv_device *device)
405 unsigned max_se = device->physical_device->rad_info.max_se;
406 struct radeon_winsys *ws = device->ws;
413 device->thread_trace.buffer_size =
414 align64(device->thread_trace.buffer_size, 1u << SQTT_BUFFER_ALIGN_SHIFT);
418 size += device->thread_trace.buffer_size * (uint64_t)max_se;
425 device->thread_trace.bo = bo;
429 result = ws->buffer_make_resident(ws, device->thread_trace.bo, true);
433 device->thread_trace.ptr = ws->buffer_map(device->thread_trace.bo);
434 if (!device->thread_trace.ptr)
441 radv_thread_trace_finish_bo(struct radv_device *device)
443 struct radeon_winsys *ws = device->ws;
445 if (unlikely(device->thread_trace.bo)) {
446 ws->buffer_make_resident(ws, device->thread_trace.bo, false);
447 ws->buffer_destroy(ws, device->thread_trace.bo);
452 radv_thread_trace_init(struct radv_device *device)
454 struct ac_thread_trace_data *thread_trace_data = &device->thread_trace;
457 device->thread_trace.buffer_size =
459 device->thread_trace.start_frame = radv_get_int_debug_option("RADV_THREAD_TRACE", -1);
463 device->thread_trace.trigger_file = strdup(trigger_file);
465 if (!radv_thread_trace_init_bo(device))
468 if (!radv_device_acquire_performance_counters(device))
484 radv_thread_trace_finish(struct radv_device *device)
486 struct ac_thread_trace_data *thread_trace_data = &device->thread_trace;
487 struct radeon_winsys *ws = device->ws;
489 free(device->thread_trace.trigger_file);
491 radv_thread_trace_finish_bo(device);
494 if (device->thread_trace.start_cs[i])
495 ws->cs_destroy(device->thread_trace.start_cs[i]);
496 if (device->thread_trace.stop_cs[i])
497 ws->cs_destroy(device->thread_trace.stop_cs[i]);
511 radv_thread_trace_resize_bo(struct radv_device *device)
514 radv_thread_trace_finish_bo(device);
517 device->thread_trace.buffer_size *= 2;
522 device->thread_trace.buffer_size / 1024);
525 return radv_thread_trace_init_bo(device);
531 struct radv_device *device = queue->device;
533 struct radeon_winsys *ws = device->ws;
538 if (device->thread_trace.start_cs[family]) {
539 ws->cs_destroy(device->thread_trace.start_cs[family]);
540 device->thread_trace.start_cs[family] = NULL;
563 radv_emit_wait_for_idle(device, cs, family);
566 radv_emit_inhibit_clockgating(device, cs, true);
569 radv_emit_spi_config_cntl(device, cs, true);
573 if (device->spm_trace.bo) {
577 radv_emit_spm_setup(device, cs);
581 radv_emit_thread_trace_start(device, cs, family);
583 if (device->spm_trace.bo)
584 radv_perfcounter_emit_spm_start(device, cs, family);
592 device->thread_trace.start_cs[family] = cs;
600 struct radv_device *device = queue->device;
602 struct radeon_winsys *ws = device->ws;
607 if (queue->device->thread_trace.stop_cs[family]) {
608 ws->cs_destroy(device->thread_trace.stop_cs[family]);
609 device->thread_trace.stop_cs[family] = NULL;
632 radv_emit_wait_for_idle(device, cs, family);
634 if (device->spm_trace.bo)
635 radv_perfcounter_emit_spm_stop(device, cs, family);
638 radv_emit_thread_trace_stop(device, cs, family);
643 radv_emit_spi_config_cntl(device, cs, false);
646 radv_emit_inhibit_clockgating(device, cs, false);
654 device->thread_trace.stop_cs[family] = cs;
662 struct radv_device *device = queue->device;
663 struct radeon_info *rad_info = &device->physical_device->rad_info;
665 void *thread_trace_ptr = device->thread_trace.ptr;
671 uint64_t data_offset = ac_thread_trace_get_data_offset(rad_info, &device->thread_trace, se);
676 int first_active_cu = ffs(device->physical_device->rad_info.cu_mask[se][0]);
678 if (radv_se_is_disabled(device, se))
681 if (!ac_is_thread_trace_complete(&device->physical_device->rad_info, &device->thread_trace,
683 if (!radv_thread_trace_resize_bo(device)) {
696 thread_trace_se.compute_unit = device->physical_device->rad_info.gfx_level >= GFX10
704 thread_trace->data = &device->thread_trace;