Lines Matching refs:device

128    struct tu_device *device = container_of(vk_device, struct tu_device, vk);
129 struct tu_physical_device *physical_device = device->physical_device;
134 return vk_device_set_lost(&device->vk, "error getting GPU fault count: %d", ret);
137 return vk_device_set_lost(&device->vk, "GPU faulted or hung");
392 /* Our BO structs are stored in a sparse array in the physical device,
439 struct tu_device *device = container_of(vk_device, struct tu_device, vk);
443 assert(device->fd >= 0);
445 int err = drmSyncobjCreate(device->fd, flags, &sync->syncobj);
448 return vk_error(device, VK_ERROR_DEVICE_LOST);
488 drm_syncobj_wait(struct tu_device *device,
498 int err = drmSyncobjWait(device->fd, handles,
505 return vk_errorf(device, VK_ERROR_UNKNOWN,
623 tu_drm_device_init(struct tu_physical_device *device,
637 "failed to open device %s", path);
648 "failed to query kernel driver version for device %s",
656 "device %s does not use the msm kernel driver",
663 "kernel driver for device %s has version %d.%d, "
673 device->msm_major_version = version->version_major;
674 device->msm_minor_version = version->version_minor;
679 mesa_logi("Found compatible device '%s'.", path);
681 device->instance = instance;
690 device->master_fd = master_fd;
691 device->local_fd = fd;
693 if (tu_drm_get_gpu_id(device, &device->dev_id.gpu_id)) {
699 if (tu_drm_get_param(device, MSM_PARAM_CHIP_ID, &device->dev_id.chip_id)) {
705 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
710 device->gmem_size = env_var_as_unsigned("TU_GMEM", device->gmem_size);
712 if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
721 device->has_master = true;
722 device->master_major = major(st.st_rdev);
723 device->master_minor = minor(st.st_rdev);
725 device->has_master = false;
726 device->master_major = 0;
727 device->master_minor = 0;
731 device->has_local = true;
732 device->local_major = major(st.st_rdev);
733 device->local_minor = minor(st.st_rdev);
740 int ret = tu_drm_get_param(device, MSM_PARAM_FAULTS, &device->fault_count);
747 device->syncobj_type = vk_drm_syncobj_get_type(fd);
749 if (!(device->syncobj_type.features & VK_SYNC_FEATURE_TIMELINE))
750 device->timeline_type = vk_sync_timeline_get_type(&tu_timeline_sync_type);
752 device->sync_types[0] = &device->syncobj_type;
753 device->sync_types[1] = &device->timeline_type.sync;
754 device->sync_types[2] = NULL;
756 device->heap.size = tu_get_system_heap_size();
757 device->heap.used = 0u;
758 device->heap.flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
760 result = tu_physical_device_init(device, instance);
823 bool u_trace_enabled = u_trace_context_actively_tracing(&queue->device->trace_context);
832 tu_insert_dynamic_cmdbufs(queue->device, &new_submit->cmd_buffers,
857 new_submit->cmds = vk_zalloc(&queue->device->vk.alloc,
869 queue->device, new_submit->cmd_buffers,
879 new_submit->in_syncobjs = vk_zalloc(&queue->device->vk.alloc,
889 new_submit->out_syncobjs = vk_zalloc(&queue->device->vk.alloc,
907 vk_free(&queue->device->vk.alloc, new_submit->in_syncobjs);
910 tu_u_trace_submission_data_finish(queue->device,
913 vk_free(&queue->device->vk.alloc, new_submit->cmds);
921 vk_free(&queue->device->vk.alloc, submit->cmds);
922 vk_free(&queue->device->vk.alloc, submit->in_syncobjs);
923 vk_free(&queue->device->vk.alloc, submit->out_syncobjs);
925 vk_free(&queue->device->vk.alloc, submit->cmd_buffers);
947 struct tu_device *dev = queue->device;
952 struct tu_device *dev = queue->device;
988 queue->device->submit_count++;
992 autotune_cs = tu_autotune_on_submit(queue->device,
993 &queue->device->autotune,
1006 mtx_lock(&queue->device->bo_mutex);
1008 if (queue->device->implicit_sync_bo_count == 0)
1020 .bos = (uint64_t)(uintptr_t) queue->device->bo_list,
1021 .nr_bos = submit->entry_count ? queue->device->bo_count : 0,
1031 int ret = drmCommandWriteRead(queue->device->fd,
1035 mtx_unlock(&queue->device->bo_mutex);
1038 return vk_device_set_lost(&queue->device->vk, "submit failed: %m");
1041 tu_perfetto_submit(queue->device, queue->device->submit_count);
1047 submission_data->submission_id = queue->device->submit_count;
1050 vk_alloc(&queue->device->vk.alloc, sizeof(struct tu_u_trace_syncobj),
1099 pthread_cond_broadcast(&queue->device->timeline_cond);
1137 uint32_t perf_pass_index = queue->device->perfcntrs_pass_cs ?
1141 if (unlikely(queue->device->physical_device->instance->debug_flags &
1143 tu_dbg_log_gmem_load_store_skips(queue->device);
1146 pthread_mutex_lock(&queue->device->submit_mutex);
1153 pthread_mutex_unlock(&queue->device->submit_mutex);
1183 pthread_mutex_unlock(&queue->device->submit_mutex);
1189 u_trace_context_process(&queue->device->trace_context, true);
1195 tu_syncobj_to_fd(struct tu_device *device, struct vk_sync *sync)
1199 ret = vk_sync_export_opaque_fd(&device->vk, sync, &fd);