1/*
2 * Copyright © 2020 Google, Inc.
3 * SPDX-License-Identifier: MIT
4 */
5
6#include "tu_drm.h"
7
8#include <errno.h>
9#include <fcntl.h>
10#include <sys/ioctl.h>
11#include <sys/mman.h>
12
13#include "msm_kgsl.h"
14#include "vk_util.h"
15
16#include "util/debug.h"
17
18#include "tu_cmd_buffer.h"
19#include "tu_cs.h"
20#include "tu_device.h"
21#include "tu_dynamic_rendering.h"
22
23struct tu_syncobj {
24   struct vk_object_base base;
25   uint32_t timestamp;
26   bool timestamp_valid;
27};
28
29static int
30safe_ioctl(int fd, unsigned long request, void *arg)
31{
32   int ret;
33
34   do {
35      ret = ioctl(fd, request, arg);
36   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
37
38   return ret;
39}
40
41int
42tu_drm_submitqueue_new(const struct tu_device *dev,
43                       int priority,
44                       uint32_t *queue_id)
45{
46   struct kgsl_drawctxt_create req = {
47      .flags = KGSL_CONTEXT_SAVE_GMEM |
48              KGSL_CONTEXT_NO_GMEM_ALLOC |
49              KGSL_CONTEXT_PREAMBLE,
50   };
51
52   int ret = safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_DRAWCTXT_CREATE, &req);
53   if (ret)
54      return ret;
55
56   *queue_id = req.drawctxt_id;
57
58   return 0;
59}
60
61void
62tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id)
63{
64   struct kgsl_drawctxt_destroy req = {
65      .drawctxt_id = queue_id,
66   };
67
68   safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_DRAWCTXT_DESTROY, &req);
69}
70
71VkResult
72tu_bo_init_new(struct tu_device *dev, struct tu_bo **out_bo, uint64_t size,
73               enum tu_bo_alloc_flags flags)
74{
75   struct kgsl_gpumem_alloc_id req = {
76      .size = size,
77   };
78
79   if (flags & TU_BO_ALLOC_GPU_READ_ONLY)
80      req.flags |= KGSL_MEMFLAGS_GPUREADONLY;
81
82   int ret;
83
84   ret = safe_ioctl(dev->physical_device->local_fd,
85                    IOCTL_KGSL_GPUMEM_ALLOC_ID, &req);
86   if (ret) {
87      return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
88                       "GPUMEM_ALLOC_ID failed (%s)", strerror(errno));
89   }
90
91   struct tu_bo* bo = tu_device_lookup_bo(dev, req.id);
92   assert(bo && bo->gem_handle == 0);
93
94   *bo = (struct tu_bo) {
95      .gem_handle = req.id,
96      .size = req.mmapsize,
97      .iova = req.gpuaddr,
98      .refcnt = 1,
99   };
100
101   *out_bo = bo;
102
103   return VK_SUCCESS;
104}
105
106VkResult
107tu_bo_init_dmabuf(struct tu_device *dev,
108                  struct tu_bo **out_bo,
109                  uint64_t size,
110                  int fd)
111{
112   struct kgsl_gpuobj_import_dma_buf import_dmabuf = {
113      .fd = fd,
114   };
115   struct kgsl_gpuobj_import req = {
116      .priv = (uintptr_t)&import_dmabuf,
117      .priv_len = sizeof(import_dmabuf),
118      .flags = 0,
119      .type = KGSL_USER_MEM_TYPE_DMABUF,
120   };
121   int ret;
122
123   ret = safe_ioctl(dev->physical_device->local_fd,
124                    IOCTL_KGSL_GPUOBJ_IMPORT, &req);
125   if (ret)
126      return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
127                       "Failed to import dma-buf (%s)\n", strerror(errno));
128
129   struct kgsl_gpuobj_info info_req = {
130      .id = req.id,
131   };
132
133   ret = safe_ioctl(dev->physical_device->local_fd,
134                    IOCTL_KGSL_GPUOBJ_INFO, &info_req);
135   if (ret)
136      return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
137                       "Failed to get dma-buf info (%s)\n", strerror(errno));
138
139   struct tu_bo* bo = tu_device_lookup_bo(dev, req.id);
140   assert(bo && bo->gem_handle == 0);
141
142   *bo = (struct tu_bo) {
143      .gem_handle = req.id,
144      .size = info_req.size,
145      .iova = info_req.gpuaddr,
146      .refcnt = 1,
147   };
148
149   *out_bo = bo;
150
151   return VK_SUCCESS;
152}
153
154int
155tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
156{
157   tu_stub();
158
159   return -1;
160}
161
162VkResult
163tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
164{
165   if (bo->map)
166      return VK_SUCCESS;
167
168   uint64_t offset = bo->gem_handle << 12;
169   void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
170                    dev->physical_device->local_fd, offset);
171   if (map == MAP_FAILED)
172      return vk_error(dev, VK_ERROR_MEMORY_MAP_FAILED);
173
174   bo->map = map;
175
176   return VK_SUCCESS;
177}
178
179void
180tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
181{
182   assert(bo->gem_handle);
183
184   if (!p_atomic_dec_zero(&bo->refcnt))
185      return;
186
187   if (bo->map)
188      munmap(bo->map, bo->size);
189
190   struct kgsl_gpumem_free_id req = {
191      .id = bo->gem_handle
192   };
193
194   /* Tell sparse array that entry is free */
195   memset(bo, 0, sizeof(*bo));
196
197   safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_GPUMEM_FREE_ID, &req);
198}
199
200static VkResult
201get_kgsl_prop(int fd, unsigned int type, void *value, size_t size)
202{
203   struct kgsl_device_getproperty getprop = {
204      .type = type,
205      .value = value,
206      .sizebytes = size,
207   };
208
209   return safe_ioctl(fd, IOCTL_KGSL_DEVICE_GETPROPERTY, &getprop);
210}
211
212VkResult
213tu_enumerate_devices(struct tu_instance *instance)
214{
215   static const char path[] = "/dev/kgsl-3d0";
216   int fd;
217
218   struct tu_physical_device *device = &instance->physical_devices[0];
219
220   if (instance->vk.enabled_extensions.KHR_display)
221      return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
222                       "I can't KHR_display");
223
224   fd = open(path, O_RDWR | O_CLOEXEC);
225   if (fd < 0) {
226      instance->physical_device_count = 0;
227      return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
228                       "failed to open device %s", path);
229   }
230
231   struct kgsl_devinfo info;
232   if (get_kgsl_prop(fd, KGSL_PROP_DEVICE_INFO, &info, sizeof(info)))
233      goto fail;
234
235   uint64_t gmem_iova;
236   if (get_kgsl_prop(fd, KGSL_PROP_UCHE_GMEM_VADDR, &gmem_iova, sizeof(gmem_iova)))
237      goto fail;
238
239   /* kgsl version check? */
240
241   if (instance->debug_flags & TU_DEBUG_STARTUP)
242      mesa_logi("Found compatible device '%s'.", path);
243
244   device->instance = instance;
245   device->master_fd = -1;
246   device->local_fd = fd;
247
248   device->dev_id.gpu_id =
249      ((info.chip_id >> 24) & 0xff) * 100 +
250      ((info.chip_id >> 16) & 0xff) * 10 +
251      ((info.chip_id >>  8) & 0xff);
252   device->dev_id.chip_id = info.chip_id;
253   device->gmem_size = env_var_as_unsigned("TU_GMEM", info.gmem_sizebytes);
254   device->gmem_base = gmem_iova;
255
256   device->heap.size = tu_get_system_heap_size();
257   device->heap.used = 0u;
258   device->heap.flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
259
260   if (tu_physical_device_init(device, instance) != VK_SUCCESS)
261      goto fail;
262
263   instance->physical_device_count = 1;
264
265   return VK_SUCCESS;
266
267fail:
268   close(fd);
269   return VK_ERROR_INITIALIZATION_FAILED;
270}
271
272static int
273timestamp_to_fd(struct tu_queue *queue, uint32_t timestamp)
274{
275   int fd;
276   struct kgsl_timestamp_event event = {
277      .type = KGSL_TIMESTAMP_EVENT_FENCE,
278      .context_id = queue->msm_queue_id,
279      .timestamp = timestamp,
280      .priv = &fd,
281      .len = sizeof(fd),
282   };
283
284   int ret = safe_ioctl(queue->device->fd, IOCTL_KGSL_TIMESTAMP_EVENT, &event);
285   if (ret)
286      return -1;
287
288   return fd;
289}
290
291/* return true if timestamp a is greater (more recent) then b
292 * this relies on timestamps never having a difference > (1<<31)
293 */
294static inline bool
295timestamp_cmp(uint32_t a, uint32_t b)
296{
297   return (int32_t) (a - b) >= 0;
298}
299
300static uint32_t
301max_ts(uint32_t a, uint32_t b)
302{
303   return timestamp_cmp(a, b) ? a : b;
304}
305
306static uint32_t
307min_ts(uint32_t a, uint32_t b)
308{
309   return timestamp_cmp(a, b) ? b : a;
310}
311
312static struct tu_syncobj
313sync_merge(const VkSemaphore *syncobjs, uint32_t count, bool wait_all, bool reset)
314{
315   struct tu_syncobj ret;
316
317   ret.timestamp_valid = false;
318
319   for (uint32_t i = 0; i < count; ++i) {
320      TU_FROM_HANDLE(tu_syncobj, sync, syncobjs[i]);
321
322      /* TODO: this means the fence is unsignaled and will never become signaled */
323      if (!sync->timestamp_valid)
324         continue;
325
326      if (!ret.timestamp_valid)
327         ret.timestamp = sync->timestamp;
328      else if (wait_all)
329         ret.timestamp = max_ts(ret.timestamp, sync->timestamp);
330      else
331         ret.timestamp = min_ts(ret.timestamp, sync->timestamp);
332
333      ret.timestamp_valid = true;
334      if (reset)
335         sync->timestamp_valid = false;
336
337   }
338   return ret;
339}
340
341VKAPI_ATTR VkResult VKAPI_CALL
342tu_QueueSubmit2(VkQueue _queue,
343                uint32_t submitCount,
344                const VkSubmitInfo2 *pSubmits,
345                VkFence _fence)
346{
347   TU_FROM_HANDLE(tu_queue, queue, _queue);
348   TU_FROM_HANDLE(tu_syncobj, fence, _fence);
349   VkResult result = VK_SUCCESS;
350
351   if (unlikely(queue->device->physical_device->instance->debug_flags &
352                 TU_DEBUG_LOG_SKIP_GMEM_OPS)) {
353      tu_dbg_log_gmem_load_store_skips(queue->device);
354   }
355
356   struct tu_cmd_buffer **submit_cmd_buffers[submitCount];
357   uint32_t submit_cmd_buffer_count[submitCount];
358
359   uint32_t max_entry_count = 0;
360   for (uint32_t i = 0; i < submitCount; ++i) {
361      const VkSubmitInfo2 *submit = pSubmits + i;
362
363      const VkPerformanceQuerySubmitInfoKHR *perf_info =
364         vk_find_struct_const(pSubmits[i].pNext,
365                              PERFORMANCE_QUERY_SUBMIT_INFO_KHR);
366
367      struct tu_cmd_buffer *old_cmd_buffers[submit->commandBufferInfoCount];
368      uint32_t cmdbuf_count = submit->commandBufferInfoCount;
369      for (uint32_t j = 0; j < cmdbuf_count; ++j) {
370         TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBufferInfos[j].commandBuffer);
371         old_cmd_buffers[j] = cmdbuf;
372      }
373
374      struct tu_cmd_buffer **cmd_buffers = old_cmd_buffers;
375      tu_insert_dynamic_cmdbufs(queue->device, &cmd_buffers, &cmdbuf_count);
376      if (cmd_buffers == old_cmd_buffers) {
377         cmd_buffers =
378            vk_alloc(&queue->device->vk.alloc,
379                     sizeof(*cmd_buffers) * cmdbuf_count, 8,
380                     VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
381         memcpy(cmd_buffers, old_cmd_buffers,
382                sizeof(*cmd_buffers) * cmdbuf_count);
383      }
384      submit_cmd_buffers[i] = cmd_buffers;
385      submit_cmd_buffer_count[i] = cmdbuf_count;
386
387      uint32_t entry_count = 0;
388      for (uint32_t j = 0; j < cmdbuf_count; ++j) {
389         entry_count += cmd_buffers[i]->cs.entry_count;
390         if (perf_info)
391            entry_count++;
392      }
393
394      if (tu_autotune_submit_requires_fence(cmd_buffers, cmdbuf_count))
395         entry_count++;
396
397      max_entry_count = MAX2(max_entry_count, entry_count);
398   }
399
400   struct kgsl_command_object *cmds =
401      vk_alloc(&queue->device->vk.alloc,
402               sizeof(cmds[0]) * max_entry_count, 8,
403               VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
404   if (cmds == NULL)
405      return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
406
407   for (uint32_t i = 0; i < submitCount; ++i) {
408      const VkSubmitInfo2 *submit = pSubmits + i;
409      uint32_t entry_idx = 0;
410      const VkPerformanceQuerySubmitInfoKHR *perf_info =
411         vk_find_struct_const(pSubmits[i].pNext,
412                              PERFORMANCE_QUERY_SUBMIT_INFO_KHR);
413
414
415      struct tu_cmd_buffer **cmd_buffers = submit_cmd_buffers[i];
416      uint32_t cmdbuf_count = submit_cmd_buffer_count[i];
417      for (uint32_t j = 0; j < cmdbuf_count; j++) {
418         struct tu_cmd_buffer *cmdbuf = cmd_buffers[j];
419         struct tu_cs *cs = &cmdbuf->cs;
420
421         if (perf_info) {
422            struct tu_cs_entry *perf_cs_entry =
423               &cmdbuf->device->perfcntrs_pass_cs_entries[perf_info->counterPassIndex];
424
425            cmds[entry_idx++] = (struct kgsl_command_object) {
426               .offset = perf_cs_entry->offset,
427               .gpuaddr = perf_cs_entry->bo->iova,
428               .size = perf_cs_entry->size,
429               .flags = KGSL_CMDLIST_IB,
430               .id = perf_cs_entry->bo->gem_handle,
431            };
432         }
433
434         for (unsigned k = 0; k < cs->entry_count; k++) {
435            cmds[entry_idx++] = (struct kgsl_command_object) {
436               .offset = cs->entries[k].offset,
437               .gpuaddr = cs->entries[k].bo->iova,
438               .size = cs->entries[k].size,
439               .flags = KGSL_CMDLIST_IB,
440               .id = cs->entries[k].bo->gem_handle,
441            };
442         }
443      }
444
445      if (tu_autotune_submit_requires_fence(cmd_buffers, cmdbuf_count)) {
446         struct tu_cs *autotune_cs =
447            tu_autotune_on_submit(queue->device,
448                                  &queue->device->autotune,
449                                  cmd_buffers,
450                                  cmdbuf_count);
451         cmds[entry_idx++] = (struct kgsl_command_object) {
452            .offset = autotune_cs->entries[0].offset,
453            .gpuaddr = autotune_cs->entries[0].bo->iova,
454            .size = autotune_cs->entries[0].size,
455            .flags = KGSL_CMDLIST_IB,
456            .id = autotune_cs->entries[0].bo->gem_handle,
457         };
458      }
459
460      VkSemaphore wait_semaphores[submit->waitSemaphoreInfoCount];
461      for (uint32_t j = 0; j < submit->waitSemaphoreInfoCount; j++) {
462         wait_semaphores[j] = submit->pWaitSemaphoreInfos[j].semaphore;
463      }
464
465      struct tu_syncobj s = sync_merge(wait_semaphores,
466                                       submit->waitSemaphoreInfoCount,
467                                       true, true);
468
469      struct kgsl_cmd_syncpoint_timestamp ts = {
470         .context_id = queue->msm_queue_id,
471         .timestamp = s.timestamp,
472      };
473      struct kgsl_command_syncpoint sync = {
474         .type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP,
475         .size = sizeof(ts),
476         .priv = (uintptr_t) &ts,
477      };
478
479      struct kgsl_gpu_command req = {
480         .flags = KGSL_CMDBATCH_SUBMIT_IB_LIST,
481         .context_id = queue->msm_queue_id,
482         .cmdlist = (uint64_t) (uintptr_t) cmds,
483         .numcmds = entry_idx,
484         .cmdsize = sizeof(struct kgsl_command_object),
485         .synclist = (uintptr_t) &sync,
486         .syncsize = sizeof(struct kgsl_command_syncpoint),
487         .numsyncs = s.timestamp_valid ? 1 : 0,
488      };
489
490      int ret = safe_ioctl(queue->device->physical_device->local_fd,
491                           IOCTL_KGSL_GPU_COMMAND, &req);
492      if (ret) {
493         result = vk_device_set_lost(&queue->device->vk,
494                                     "submit failed: %s\n", strerror(errno));
495         goto fail;
496      }
497
498      for (uint32_t i = 0; i < submit->signalSemaphoreInfoCount; i++) {
499         TU_FROM_HANDLE(tu_syncobj, sem, submit->pSignalSemaphoreInfos[i].semaphore);
500         sem->timestamp = req.timestamp;
501         sem->timestamp_valid = true;
502      }
503
504      /* no need to merge fences as queue execution is serialized */
505      if (i == submitCount - 1) {
506         int fd = timestamp_to_fd(queue, req.timestamp);
507         if (fd < 0) {
508            result = vk_device_set_lost(&queue->device->vk,
509                                        "Failed to create sync file for timestamp: %s\n",
510                                        strerror(errno));
511            goto fail;
512         }
513
514         if (queue->fence >= 0)
515            close(queue->fence);
516         queue->fence = fd;
517
518         if (fence) {
519            fence->timestamp = req.timestamp;
520            fence->timestamp_valid = true;
521         }
522      }
523   }
524fail:
525   vk_free(&queue->device->vk.alloc, cmds);
526
527   return result;
528}
529
530static VkResult
531sync_create(VkDevice _device,
532            bool signaled,
533            bool fence,
534            const VkAllocationCallbacks *pAllocator,
535            void **p_sync)
536{
537   TU_FROM_HANDLE(tu_device, device, _device);
538
539   struct tu_syncobj *sync =
540         vk_object_alloc(&device->vk, pAllocator, sizeof(*sync),
541                         fence ? VK_OBJECT_TYPE_FENCE : VK_OBJECT_TYPE_SEMAPHORE);
542   if (!sync)
543      return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
544
545   if (signaled)
546      tu_finishme("CREATE FENCE SIGNALED");
547
548   sync->timestamp_valid = false;
549   *p_sync = sync;
550
551   return VK_SUCCESS;
552}
553
554VKAPI_ATTR VkResult VKAPI_CALL
555tu_ImportSemaphoreFdKHR(VkDevice _device,
556                        const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
557{
558   tu_finishme("ImportSemaphoreFdKHR");
559   return VK_SUCCESS;
560}
561
562VKAPI_ATTR VkResult VKAPI_CALL
563tu_GetSemaphoreFdKHR(VkDevice _device,
564                     const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
565                     int *pFd)
566{
567   tu_finishme("GetSemaphoreFdKHR");
568   return VK_SUCCESS;
569}
570
571VKAPI_ATTR VkResult VKAPI_CALL
572tu_CreateSemaphore(VkDevice device,
573                   const VkSemaphoreCreateInfo *pCreateInfo,
574                   const VkAllocationCallbacks *pAllocator,
575                   VkSemaphore *pSemaphore)
576{
577   return sync_create(device, false, false, pAllocator, (void**) pSemaphore);
578}
579
580VKAPI_ATTR void VKAPI_CALL
581tu_DestroySemaphore(VkDevice _device,
582                    VkSemaphore semaphore,
583                    const VkAllocationCallbacks *pAllocator)
584{
585   TU_FROM_HANDLE(tu_device, device, _device);
586   TU_FROM_HANDLE(tu_syncobj, sync, semaphore);
587
588   if (!sync)
589      return;
590
591   vk_object_free(&device->vk, pAllocator, sync);
592}
593
594VKAPI_ATTR VkResult VKAPI_CALL
595tu_ImportFenceFdKHR(VkDevice _device,
596                    const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
597{
598   tu_stub();
599
600   return VK_SUCCESS;
601}
602
603VKAPI_ATTR VkResult VKAPI_CALL
604tu_GetFenceFdKHR(VkDevice _device,
605                 const VkFenceGetFdInfoKHR *pGetFdInfo,
606                 int *pFd)
607{
608   tu_stub();
609
610   return VK_SUCCESS;
611}
612
613VKAPI_ATTR VkResult VKAPI_CALL
614tu_CreateFence(VkDevice device,
615               const VkFenceCreateInfo *info,
616               const VkAllocationCallbacks *pAllocator,
617               VkFence *pFence)
618{
619   return sync_create(device, info->flags & VK_FENCE_CREATE_SIGNALED_BIT, true,
620                      pAllocator, (void**) pFence);
621}
622
623VKAPI_ATTR void VKAPI_CALL
624tu_DestroyFence(VkDevice _device, VkFence fence, const VkAllocationCallbacks *pAllocator)
625{
626   TU_FROM_HANDLE(tu_device, device, _device);
627   TU_FROM_HANDLE(tu_syncobj, sync, fence);
628
629   if (!sync)
630      return;
631
632   vk_object_free(&device->vk, pAllocator, sync);
633}
634
635VKAPI_ATTR VkResult VKAPI_CALL
636tu_WaitForFences(VkDevice _device,
637                 uint32_t count,
638                 const VkFence *pFences,
639                 VkBool32 waitAll,
640                 uint64_t timeout)
641{
642   TU_FROM_HANDLE(tu_device, device, _device);
643   struct tu_syncobj s = sync_merge((const VkSemaphore*) pFences, count, waitAll, false);
644
645   if (!s.timestamp_valid)
646      return VK_SUCCESS;
647
648   int ret = ioctl(device->fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
649                   &(struct kgsl_device_waittimestamp_ctxtid) {
650      .context_id = device->queues[0]->msm_queue_id,
651      .timestamp = s.timestamp,
652      .timeout = timeout / 1000000,
653   });
654   if (ret) {
655      assert(errno == ETIME);
656      return VK_TIMEOUT;
657   }
658
659   return VK_SUCCESS;
660}
661
662VKAPI_ATTR VkResult VKAPI_CALL
663tu_ResetFences(VkDevice _device, uint32_t count, const VkFence *pFences)
664{
665   for (uint32_t i = 0; i < count; i++) {
666      TU_FROM_HANDLE(tu_syncobj, sync, pFences[i]);
667      sync->timestamp_valid = false;
668   }
669   return VK_SUCCESS;
670}
671
672VKAPI_ATTR VkResult VKAPI_CALL
673tu_GetFenceStatus(VkDevice _device, VkFence _fence)
674{
675   TU_FROM_HANDLE(tu_device, device, _device);
676   TU_FROM_HANDLE(tu_syncobj, sync, _fence);
677
678   if (!sync->timestamp_valid)
679      return VK_NOT_READY;
680
681   int ret = ioctl(device->fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
682               &(struct kgsl_device_waittimestamp_ctxtid) {
683      .context_id = device->queues[0]->msm_queue_id,
684      .timestamp = sync->timestamp,
685      .timeout = 0,
686   });
687   if (ret) {
688      assert(errno == ETIME);
689      return VK_NOT_READY;
690   }
691
692   return VK_SUCCESS;
693}
694
695int
696tu_syncobj_to_fd(struct tu_device *device, struct vk_sync *sync)
697{
698   tu_finishme("tu_syncobj_to_fd");
699   return -1;
700}
701
702VkResult
703tu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj)
704{
705   tu_finishme("tu_device_wait_u_trace");
706   return VK_SUCCESS;
707}
708
709int
710tu_device_get_gpu_timestamp(struct tu_device *dev, uint64_t *ts)
711{
712   tu_finishme("tu_device_get_gpu_timestamp");
713   return 0;
714}
715
716int
717tu_device_get_suspend_count(struct tu_device *dev, uint64_t *suspend_count)
718{
719   /* kgsl doesn't have a way to get it */
720   *suspend_count = 0;
721   return 0;
722}
723
724VkResult
725tu_device_check_status(struct vk_device *vk_device)
726{
727   struct tu_device *device = container_of(vk_device, struct tu_device, vk);
728
729   for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
730      for (unsigned q = 0; q < device->queue_count[i]; q++) {
731         /* KGSL's KGSL_PROP_GPU_RESET_STAT takes the u32 msm_queue_id and returns a
732         * KGSL_CTX_STAT_* for the worst reset that happened since the last time it
733         * was queried on that queue.
734         */
735         uint32_t value = device->queues[i][q].msm_queue_id;
736         VkResult status = get_kgsl_prop(device->fd, KGSL_PROP_GPU_RESET_STAT,
737                                       &value, sizeof(value));
738         if (status != VK_SUCCESS)
739            return vk_device_set_lost(&device->vk, "Failed to get GPU reset status");
740
741         if (value != KGSL_CTX_STAT_NO_ERROR &&
742            value != KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT) {
743            return vk_device_set_lost(&device->vk, "GPU faulted or hung");
744         }
745      }
746   }
747
748   return VK_SUCCESS;
749}
750
751#ifdef ANDROID
752VKAPI_ATTR VkResult VKAPI_CALL
753tu_QueueSignalReleaseImageANDROID(VkQueue _queue,
754                                  uint32_t waitSemaphoreCount,
755                                  const VkSemaphore *pWaitSemaphores,
756                                  VkImage image,
757                                  int *pNativeFenceFd)
758{
759   TU_FROM_HANDLE(tu_queue, queue, _queue);
760   if (!pNativeFenceFd)
761      return VK_SUCCESS;
762
763   struct tu_syncobj s = sync_merge(pWaitSemaphores, waitSemaphoreCount, true, true);
764
765   if (!s.timestamp_valid) {
766      *pNativeFenceFd = -1;
767      return VK_SUCCESS;
768   }
769
770   *pNativeFenceFd = timestamp_to_fd(queue, s.timestamp);
771
772   return VK_SUCCESS;
773}
774#endif
775