1/*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on virgl which is:
6 * Copyright 2014, 2015 Red Hat.
7 */
8
9#include <errno.h>
10#include <netinet/in.h>
11#include <poll.h>
12#include <sys/mman.h>
13#include <sys/socket.h>
14#include <sys/types.h>
15#include <sys/un.h>
16#include <unistd.h>
17
18#include "util/os_file.h"
19#include "util/sparse_array.h"
20#include "util/u_process.h"
21#define VIRGL_RENDERER_UNSTABLE_APIS
22#include "virtio-gpu/virglrenderer_hw.h"
23#include "vtest/vtest_protocol.h"
24
25#include "vn_renderer_internal.h"
26
27#define VTEST_PCI_VENDOR_ID 0x1af4
28#define VTEST_PCI_DEVICE_ID 0x1050
29
30struct vtest;
31
32struct vtest_shmem {
33   struct vn_renderer_shmem base;
34};
35
36struct vtest_bo {
37   struct vn_renderer_bo base;
38
39   uint32_t blob_flags;
40   /* might be closed after mmap */
41   int res_fd;
42};
43
44struct vtest_sync {
45   struct vn_renderer_sync base;
46};
47
48struct vtest {
49   struct vn_renderer base;
50
51   struct vn_instance *instance;
52
53   mtx_t sock_mutex;
54   int sock_fd;
55
56   uint32_t protocol_version;
57   uint32_t max_sync_queue_count;
58
59   struct {
60      enum virgl_renderer_capset id;
61      uint32_t version;
62      struct virgl_renderer_capset_venus data;
63   } capset;
64
65   uint32_t shmem_blob_mem;
66
67   struct util_sparse_array shmem_array;
68   struct util_sparse_array bo_array;
69
70   struct vn_renderer_shmem_cache shmem_cache;
71};
72
73static int
74vtest_connect_socket(struct vn_instance *instance, const char *path)
75{
76   struct sockaddr_un un;
77   int sock;
78
79   sock = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
80   if (sock < 0) {
81      vn_log(instance, "failed to create a socket");
82      return -1;
83   }
84
85   memset(&un, 0, sizeof(un));
86   un.sun_family = AF_UNIX;
87   memcpy(un.sun_path, path, strlen(path));
88
89   if (connect(sock, (struct sockaddr *)&un, sizeof(un)) == -1) {
90      vn_log(instance, "failed to connect to %s: %s", path, strerror(errno));
91      close(sock);
92      return -1;
93   }
94
95   return sock;
96}
97
98static void
99vtest_read(struct vtest *vtest, void *buf, size_t size)
100{
101   do {
102      const ssize_t ret = read(vtest->sock_fd, buf, size);
103      if (unlikely(ret < 0)) {
104         vn_log(vtest->instance,
105                "lost connection to rendering server on %zu read %zi %d",
106                size, ret, errno);
107         abort();
108      }
109
110      buf += ret;
111      size -= ret;
112   } while (size);
113}
114
115static int
116vtest_receive_fd(struct vtest *vtest)
117{
118   char cmsg_buf[CMSG_SPACE(sizeof(int))];
119   char dummy;
120   struct msghdr msg = {
121      .msg_iov =
122         &(struct iovec){
123            .iov_base = &dummy,
124            .iov_len = sizeof(dummy),
125         },
126      .msg_iovlen = 1,
127      .msg_control = cmsg_buf,
128      .msg_controllen = sizeof(cmsg_buf),
129   };
130
131   if (recvmsg(vtest->sock_fd, &msg, 0) < 0) {
132      vn_log(vtest->instance, "recvmsg failed: %s", strerror(errno));
133      abort();
134   }
135
136   struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
137   if (!cmsg || cmsg->cmsg_level != SOL_SOCKET ||
138       cmsg->cmsg_type != SCM_RIGHTS) {
139      vn_log(vtest->instance, "invalid cmsghdr");
140      abort();
141   }
142
143   return *((int *)CMSG_DATA(cmsg));
144}
145
146static void
147vtest_write(struct vtest *vtest, const void *buf, size_t size)
148{
149   do {
150      const ssize_t ret = write(vtest->sock_fd, buf, size);
151      if (unlikely(ret < 0)) {
152         vn_log(vtest->instance,
153                "lost connection to rendering server on %zu write %zi %d",
154                size, ret, errno);
155         abort();
156      }
157
158      buf += ret;
159      size -= ret;
160   } while (size);
161}
162
163static void
164vtest_vcmd_create_renderer(struct vtest *vtest, const char *name)
165{
166   const size_t size = strlen(name) + 1;
167
168   uint32_t vtest_hdr[VTEST_HDR_SIZE];
169   vtest_hdr[VTEST_CMD_LEN] = size;
170   vtest_hdr[VTEST_CMD_ID] = VCMD_CREATE_RENDERER;
171
172   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
173   vtest_write(vtest, name, size);
174}
175
176static bool
177vtest_vcmd_ping_protocol_version(struct vtest *vtest)
178{
179   uint32_t vtest_hdr[VTEST_HDR_SIZE];
180   vtest_hdr[VTEST_CMD_LEN] = VCMD_PING_PROTOCOL_VERSION_SIZE;
181   vtest_hdr[VTEST_CMD_ID] = VCMD_PING_PROTOCOL_VERSION;
182
183   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
184
185   /* send a dummy busy wait to avoid blocking in vtest_read in case ping
186    * protocol version is not supported
187    */
188   uint32_t vcmd_busy_wait[VCMD_BUSY_WAIT_SIZE];
189   vtest_hdr[VTEST_CMD_LEN] = VCMD_BUSY_WAIT_SIZE;
190   vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_BUSY_WAIT;
191   vcmd_busy_wait[VCMD_BUSY_WAIT_HANDLE] = 0;
192   vcmd_busy_wait[VCMD_BUSY_WAIT_FLAGS] = 0;
193
194   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
195   vtest_write(vtest, vcmd_busy_wait, sizeof(vcmd_busy_wait));
196
197   uint32_t dummy;
198   vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
199   if (vtest_hdr[VTEST_CMD_ID] == VCMD_PING_PROTOCOL_VERSION) {
200      /* consume the dummy busy wait result */
201      vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
202      assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_BUSY_WAIT);
203      vtest_read(vtest, &dummy, sizeof(dummy));
204      return true;
205   } else {
206      /* no ping protocol version support */
207      assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_BUSY_WAIT);
208      vtest_read(vtest, &dummy, sizeof(dummy));
209      return false;
210   }
211}
212
213static uint32_t
214vtest_vcmd_protocol_version(struct vtest *vtest)
215{
216   uint32_t vtest_hdr[VTEST_HDR_SIZE];
217   uint32_t vcmd_protocol_version[VCMD_PROTOCOL_VERSION_SIZE];
218   vtest_hdr[VTEST_CMD_LEN] = VCMD_PROTOCOL_VERSION_SIZE;
219   vtest_hdr[VTEST_CMD_ID] = VCMD_PROTOCOL_VERSION;
220   vcmd_protocol_version[VCMD_PROTOCOL_VERSION_VERSION] =
221      VTEST_PROTOCOL_VERSION;
222
223   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
224   vtest_write(vtest, vcmd_protocol_version, sizeof(vcmd_protocol_version));
225
226   vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
227   assert(vtest_hdr[VTEST_CMD_LEN] == VCMD_PROTOCOL_VERSION_SIZE);
228   assert(vtest_hdr[VTEST_CMD_ID] == VCMD_PROTOCOL_VERSION);
229   vtest_read(vtest, vcmd_protocol_version, sizeof(vcmd_protocol_version));
230
231   return vcmd_protocol_version[VCMD_PROTOCOL_VERSION_VERSION];
232}
233
234static uint32_t
235vtest_vcmd_get_param(struct vtest *vtest, enum vcmd_param param)
236{
237   uint32_t vtest_hdr[VTEST_HDR_SIZE];
238   uint32_t vcmd_get_param[VCMD_GET_PARAM_SIZE];
239   vtest_hdr[VTEST_CMD_LEN] = VCMD_GET_PARAM_SIZE;
240   vtest_hdr[VTEST_CMD_ID] = VCMD_GET_PARAM;
241   vcmd_get_param[VCMD_GET_PARAM_PARAM] = param;
242
243   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
244   vtest_write(vtest, vcmd_get_param, sizeof(vcmd_get_param));
245
246   vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
247   assert(vtest_hdr[VTEST_CMD_LEN] == 2);
248   assert(vtest_hdr[VTEST_CMD_ID] == VCMD_GET_PARAM);
249
250   uint32_t resp[2];
251   vtest_read(vtest, resp, sizeof(resp));
252
253   return resp[0] ? resp[1] : 0;
254}
255
256static bool
257vtest_vcmd_get_capset(struct vtest *vtest,
258                      enum virgl_renderer_capset id,
259                      uint32_t version,
260                      void *capset,
261                      size_t capset_size)
262{
263   uint32_t vtest_hdr[VTEST_HDR_SIZE];
264   uint32_t vcmd_get_capset[VCMD_GET_CAPSET_SIZE];
265   vtest_hdr[VTEST_CMD_LEN] = VCMD_GET_CAPSET_SIZE;
266   vtest_hdr[VTEST_CMD_ID] = VCMD_GET_CAPSET;
267   vcmd_get_capset[VCMD_GET_CAPSET_ID] = id;
268   vcmd_get_capset[VCMD_GET_CAPSET_VERSION] = version;
269
270   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
271   vtest_write(vtest, vcmd_get_capset, sizeof(vcmd_get_capset));
272
273   vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
274   assert(vtest_hdr[VTEST_CMD_ID] == VCMD_GET_CAPSET);
275
276   uint32_t valid;
277   vtest_read(vtest, &valid, sizeof(valid));
278   if (!valid)
279      return false;
280
281   size_t read_size = (vtest_hdr[VTEST_CMD_LEN] - 1) * 4;
282   if (capset_size >= read_size) {
283      vtest_read(vtest, capset, read_size);
284      memset(capset + read_size, 0, capset_size - read_size);
285   } else {
286      vtest_read(vtest, capset, capset_size);
287
288      char temp[256];
289      read_size -= capset_size;
290      while (read_size) {
291         const size_t temp_size = MIN2(read_size, ARRAY_SIZE(temp));
292         vtest_read(vtest, temp, temp_size);
293         read_size -= temp_size;
294      }
295   }
296
297   return true;
298}
299
300static void
301vtest_vcmd_context_init(struct vtest *vtest,
302                        enum virgl_renderer_capset capset_id)
303{
304   uint32_t vtest_hdr[VTEST_HDR_SIZE];
305   uint32_t vcmd_context_init[VCMD_CONTEXT_INIT_SIZE];
306   vtest_hdr[VTEST_CMD_LEN] = VCMD_CONTEXT_INIT_SIZE;
307   vtest_hdr[VTEST_CMD_ID] = VCMD_CONTEXT_INIT;
308   vcmd_context_init[VCMD_CONTEXT_INIT_CAPSET_ID] = capset_id;
309
310   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
311   vtest_write(vtest, vcmd_context_init, sizeof(vcmd_context_init));
312}
313
314static uint32_t
315vtest_vcmd_resource_create_blob(struct vtest *vtest,
316                                enum vcmd_blob_type type,
317                                uint32_t flags,
318                                VkDeviceSize size,
319                                vn_object_id blob_id,
320                                int *res_fd)
321{
322   uint32_t vtest_hdr[VTEST_HDR_SIZE];
323   uint32_t vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE];
324
325   vtest_hdr[VTEST_CMD_LEN] = VCMD_RES_CREATE_BLOB_SIZE;
326   vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_CREATE_BLOB;
327
328   vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_TYPE] = type;
329   vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_FLAGS] = flags;
330   vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE_LO] = (uint32_t)size;
331   vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE_HI] =
332      (uint32_t)(size >> 32);
333   vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_ID_LO] = (uint32_t)blob_id;
334   vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_ID_HI] =
335      (uint32_t)(blob_id >> 32);
336
337   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
338   vtest_write(vtest, vcmd_res_create_blob, sizeof(vcmd_res_create_blob));
339
340   vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
341   assert(vtest_hdr[VTEST_CMD_LEN] == 1);
342   assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_CREATE_BLOB);
343
344   uint32_t res_id;
345   vtest_read(vtest, &res_id, sizeof(res_id));
346
347   *res_fd = vtest_receive_fd(vtest);
348
349   return res_id;
350}
351
352static void
353vtest_vcmd_resource_unref(struct vtest *vtest, uint32_t res_id)
354{
355   uint32_t vtest_hdr[VTEST_HDR_SIZE];
356   uint32_t vcmd_res_unref[VCMD_RES_UNREF_SIZE];
357
358   vtest_hdr[VTEST_CMD_LEN] = VCMD_RES_UNREF_SIZE;
359   vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_UNREF;
360   vcmd_res_unref[VCMD_RES_UNREF_RES_HANDLE] = res_id;
361
362   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
363   vtest_write(vtest, vcmd_res_unref, sizeof(vcmd_res_unref));
364}
365
366static uint32_t
367vtest_vcmd_sync_create(struct vtest *vtest, uint64_t initial_val)
368{
369   uint32_t vtest_hdr[VTEST_HDR_SIZE];
370   uint32_t vcmd_sync_create[VCMD_SYNC_CREATE_SIZE];
371
372   vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_CREATE_SIZE;
373   vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_CREATE;
374
375   vcmd_sync_create[VCMD_SYNC_CREATE_VALUE_LO] = (uint32_t)initial_val;
376   vcmd_sync_create[VCMD_SYNC_CREATE_VALUE_HI] =
377      (uint32_t)(initial_val >> 32);
378
379   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
380   vtest_write(vtest, vcmd_sync_create, sizeof(vcmd_sync_create));
381
382   vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
383   assert(vtest_hdr[VTEST_CMD_LEN] == 1);
384   assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_CREATE);
385
386   uint32_t sync_id;
387   vtest_read(vtest, &sync_id, sizeof(sync_id));
388
389   return sync_id;
390}
391
392static void
393vtest_vcmd_sync_unref(struct vtest *vtest, uint32_t sync_id)
394{
395   uint32_t vtest_hdr[VTEST_HDR_SIZE];
396   uint32_t vcmd_sync_unref[VCMD_SYNC_UNREF_SIZE];
397
398   vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_UNREF_SIZE;
399   vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_UNREF;
400   vcmd_sync_unref[VCMD_SYNC_UNREF_ID] = sync_id;
401
402   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
403   vtest_write(vtest, vcmd_sync_unref, sizeof(vcmd_sync_unref));
404}
405
406static uint64_t
407vtest_vcmd_sync_read(struct vtest *vtest, uint32_t sync_id)
408{
409   uint32_t vtest_hdr[VTEST_HDR_SIZE];
410   uint32_t vcmd_sync_read[VCMD_SYNC_READ_SIZE];
411
412   vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_READ_SIZE;
413   vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_READ;
414
415   vcmd_sync_read[VCMD_SYNC_READ_ID] = sync_id;
416
417   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
418   vtest_write(vtest, vcmd_sync_read, sizeof(vcmd_sync_read));
419
420   vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
421   assert(vtest_hdr[VTEST_CMD_LEN] == 2);
422   assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_READ);
423
424   uint64_t val;
425   vtest_read(vtest, &val, sizeof(val));
426
427   return val;
428}
429
430static void
431vtest_vcmd_sync_write(struct vtest *vtest, uint32_t sync_id, uint64_t val)
432{
433   uint32_t vtest_hdr[VTEST_HDR_SIZE];
434   uint32_t vcmd_sync_write[VCMD_SYNC_WRITE_SIZE];
435
436   vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_WRITE_SIZE;
437   vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_WRITE;
438
439   vcmd_sync_write[VCMD_SYNC_WRITE_ID] = sync_id;
440   vcmd_sync_write[VCMD_SYNC_WRITE_VALUE_LO] = (uint32_t)val;
441   vcmd_sync_write[VCMD_SYNC_WRITE_VALUE_HI] = (uint32_t)(val >> 32);
442
443   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
444   vtest_write(vtest, vcmd_sync_write, sizeof(vcmd_sync_write));
445}
446
447static int
448vtest_vcmd_sync_wait(struct vtest *vtest,
449                     uint32_t flags,
450                     int poll_timeout,
451                     struct vn_renderer_sync *const *syncs,
452                     const uint64_t *vals,
453                     uint32_t count)
454{
455   const uint32_t timeout = poll_timeout >= 0 && poll_timeout <= INT32_MAX
456                               ? poll_timeout
457                               : UINT32_MAX;
458
459   uint32_t vtest_hdr[VTEST_HDR_SIZE];
460   vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_WAIT_SIZE(count);
461   vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_WAIT;
462
463   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
464   vtest_write(vtest, &flags, sizeof(flags));
465   vtest_write(vtest, &timeout, sizeof(timeout));
466   for (uint32_t i = 0; i < count; i++) {
467      const uint64_t val = vals[i];
468      const uint32_t sync[3] = {
469         syncs[i]->sync_id,
470         (uint32_t)val,
471         (uint32_t)(val >> 32),
472      };
473      vtest_write(vtest, sync, sizeof(sync));
474   }
475
476   vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
477   assert(vtest_hdr[VTEST_CMD_LEN] == 0);
478   assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_WAIT);
479
480   return vtest_receive_fd(vtest);
481}
482
483static void
484submit_cmd2_sizes(const struct vn_renderer_submit *submit,
485                  size_t *header_size,
486                  size_t *cs_size,
487                  size_t *sync_size)
488{
489   if (!submit->batch_count) {
490      *header_size = 0;
491      *cs_size = 0;
492      *sync_size = 0;
493      return;
494   }
495
496   *header_size = sizeof(uint32_t) +
497                  sizeof(struct vcmd_submit_cmd2_batch) * submit->batch_count;
498
499   *cs_size = 0;
500   *sync_size = 0;
501   for (uint32_t i = 0; i < submit->batch_count; i++) {
502      const struct vn_renderer_submit_batch *batch = &submit->batches[i];
503      assert(batch->cs_size % sizeof(uint32_t) == 0);
504      *cs_size += batch->cs_size;
505      *sync_size += (sizeof(uint32_t) + sizeof(uint64_t)) * batch->sync_count;
506   }
507
508   assert(*header_size % sizeof(uint32_t) == 0);
509   assert(*cs_size % sizeof(uint32_t) == 0);
510   assert(*sync_size % sizeof(uint32_t) == 0);
511}
512
513static void
514vtest_vcmd_submit_cmd2(struct vtest *vtest,
515                       const struct vn_renderer_submit *submit)
516{
517   size_t header_size;
518   size_t cs_size;
519   size_t sync_size;
520   submit_cmd2_sizes(submit, &header_size, &cs_size, &sync_size);
521   const size_t total_size = header_size + cs_size + sync_size;
522   if (!total_size)
523      return;
524
525   uint32_t vtest_hdr[VTEST_HDR_SIZE];
526   vtest_hdr[VTEST_CMD_LEN] = total_size / sizeof(uint32_t);
527   vtest_hdr[VTEST_CMD_ID] = VCMD_SUBMIT_CMD2;
528   vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
529
530   /* write batch count and batch headers */
531   const uint32_t batch_count = submit->batch_count;
532   size_t cs_offset = header_size;
533   size_t sync_offset = cs_offset + cs_size;
534   vtest_write(vtest, &batch_count, sizeof(batch_count));
535   for (uint32_t i = 0; i < submit->batch_count; i++) {
536      const struct vn_renderer_submit_batch *batch = &submit->batches[i];
537      struct vcmd_submit_cmd2_batch dst = {
538         .cmd_offset = cs_offset / sizeof(uint32_t),
539         .cmd_size = batch->cs_size / sizeof(uint32_t),
540         .sync_offset = sync_offset / sizeof(uint32_t),
541         .sync_count = batch->sync_count,
542      };
543      if (!batch->sync_queue_cpu) {
544         dst.flags = VCMD_SUBMIT_CMD2_FLAG_SYNC_QUEUE;
545         dst.sync_queue_index = batch->sync_queue_index;
546         dst.sync_queue_id = batch->vk_queue_id;
547      }
548      vtest_write(vtest, &dst, sizeof(dst));
549
550      cs_offset += batch->cs_size;
551      sync_offset +=
552         (sizeof(uint32_t) + sizeof(uint64_t)) * batch->sync_count;
553   }
554
555   /* write cs */
556   if (cs_size) {
557      for (uint32_t i = 0; i < submit->batch_count; i++) {
558         const struct vn_renderer_submit_batch *batch = &submit->batches[i];
559         if (batch->cs_size)
560            vtest_write(vtest, batch->cs_data, batch->cs_size);
561      }
562   }
563
564   /* write syncs */
565   for (uint32_t i = 0; i < submit->batch_count; i++) {
566      const struct vn_renderer_submit_batch *batch = &submit->batches[i];
567
568      for (uint32_t j = 0; j < batch->sync_count; j++) {
569         const uint64_t val = batch->sync_values[j];
570         const uint32_t sync[3] = {
571            batch->syncs[j]->sync_id,
572            (uint32_t)val,
573            (uint32_t)(val >> 32),
574         };
575         vtest_write(vtest, sync, sizeof(sync));
576      }
577   }
578}
579
580static VkResult
581vtest_sync_write(struct vn_renderer *renderer,
582                 struct vn_renderer_sync *_sync,
583                 uint64_t val)
584{
585   struct vtest *vtest = (struct vtest *)renderer;
586   struct vtest_sync *sync = (struct vtest_sync *)_sync;
587
588   mtx_lock(&vtest->sock_mutex);
589   vtest_vcmd_sync_write(vtest, sync->base.sync_id, val);
590   mtx_unlock(&vtest->sock_mutex);
591
592   return VK_SUCCESS;
593}
594
595static VkResult
596vtest_sync_read(struct vn_renderer *renderer,
597                struct vn_renderer_sync *_sync,
598                uint64_t *val)
599{
600   struct vtest *vtest = (struct vtest *)renderer;
601   struct vtest_sync *sync = (struct vtest_sync *)_sync;
602
603   mtx_lock(&vtest->sock_mutex);
604   *val = vtest_vcmd_sync_read(vtest, sync->base.sync_id);
605   mtx_unlock(&vtest->sock_mutex);
606
607   return VK_SUCCESS;
608}
609
610static VkResult
611vtest_sync_reset(struct vn_renderer *renderer,
612                 struct vn_renderer_sync *sync,
613                 uint64_t initial_val)
614{
615   /* same as write */
616   return vtest_sync_write(renderer, sync, initial_val);
617}
618
619static void
620vtest_sync_destroy(struct vn_renderer *renderer,
621                   struct vn_renderer_sync *_sync)
622{
623   struct vtest *vtest = (struct vtest *)renderer;
624   struct vtest_sync *sync = (struct vtest_sync *)_sync;
625
626   mtx_lock(&vtest->sock_mutex);
627   vtest_vcmd_sync_unref(vtest, sync->base.sync_id);
628   mtx_unlock(&vtest->sock_mutex);
629
630   free(sync);
631}
632
633static VkResult
634vtest_sync_create(struct vn_renderer *renderer,
635                  uint64_t initial_val,
636                  uint32_t flags,
637                  struct vn_renderer_sync **out_sync)
638{
639   struct vtest *vtest = (struct vtest *)renderer;
640
641   struct vtest_sync *sync = calloc(1, sizeof(*sync));
642   if (!sync)
643      return VK_ERROR_OUT_OF_HOST_MEMORY;
644
645   mtx_lock(&vtest->sock_mutex);
646   sync->base.sync_id = vtest_vcmd_sync_create(vtest, initial_val);
647   mtx_unlock(&vtest->sock_mutex);
648
649   *out_sync = &sync->base;
650   return VK_SUCCESS;
651}
652
653static void
654vtest_bo_invalidate(struct vn_renderer *renderer,
655                    struct vn_renderer_bo *bo,
656                    VkDeviceSize offset,
657                    VkDeviceSize size)
658{
659   /* nop */
660}
661
662static void
663vtest_bo_flush(struct vn_renderer *renderer,
664               struct vn_renderer_bo *bo,
665               VkDeviceSize offset,
666               VkDeviceSize size)
667{
668   /* nop */
669}
670
671static void *
672vtest_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
673{
674   struct vtest *vtest = (struct vtest *)renderer;
675   struct vtest_bo *bo = (struct vtest_bo *)_bo;
676   const bool mappable = bo->blob_flags & VCMD_BLOB_FLAG_MAPPABLE;
677   const bool shareable = bo->blob_flags & VCMD_BLOB_FLAG_SHAREABLE;
678
679   /* not thread-safe but is fine */
680   if (!bo->base.mmap_ptr && mappable) {
681      /* We wrongly assume that mmap(dma_buf) and vkMapMemory(VkDeviceMemory)
682       * are equivalent when the blob type is VCMD_BLOB_TYPE_HOST3D.  While we
683       * check for VCMD_PARAM_HOST_COHERENT_DMABUF_BLOB, we know vtest can
684       * lie.
685       */
686      void *ptr = mmap(NULL, bo->base.mmap_size, PROT_READ | PROT_WRITE,
687                       MAP_SHARED, bo->res_fd, 0);
688      if (ptr == MAP_FAILED) {
689         vn_log(vtest->instance, "failed to mmap %d of size %zu rw: %s",
690                bo->res_fd, bo->base.mmap_size, strerror(errno));
691      } else {
692         bo->base.mmap_ptr = ptr;
693         /* we don't need the fd anymore */
694         if (!shareable) {
695            close(bo->res_fd);
696            bo->res_fd = -1;
697         }
698      }
699   }
700
701   return bo->base.mmap_ptr;
702}
703
704static int
705vtest_bo_export_dma_buf(struct vn_renderer *renderer,
706                        struct vn_renderer_bo *_bo)
707{
708   const struct vtest_bo *bo = (struct vtest_bo *)_bo;
709   const bool shareable = bo->blob_flags & VCMD_BLOB_FLAG_SHAREABLE;
710   return shareable ? os_dupfd_cloexec(bo->res_fd) : -1;
711}
712
713static bool
714vtest_bo_destroy(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
715{
716   struct vtest *vtest = (struct vtest *)renderer;
717   struct vtest_bo *bo = (struct vtest_bo *)_bo;
718
719   if (bo->base.mmap_ptr)
720      munmap(bo->base.mmap_ptr, bo->base.mmap_size);
721   if (bo->res_fd >= 0)
722      close(bo->res_fd);
723
724   mtx_lock(&vtest->sock_mutex);
725   vtest_vcmd_resource_unref(vtest, bo->base.res_id);
726   mtx_unlock(&vtest->sock_mutex);
727
728   return true;
729}
730
731static uint32_t
732vtest_bo_blob_flags(VkMemoryPropertyFlags flags,
733                    VkExternalMemoryHandleTypeFlags external_handles)
734{
735   uint32_t blob_flags = 0;
736   if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
737      blob_flags |= VCMD_BLOB_FLAG_MAPPABLE;
738   if (external_handles)
739      blob_flags |= VCMD_BLOB_FLAG_SHAREABLE;
740   if (external_handles & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
741      blob_flags |= VCMD_BLOB_FLAG_CROSS_DEVICE;
742
743   return blob_flags;
744}
745
746static VkResult
747vtest_bo_create_from_device_memory(
748   struct vn_renderer *renderer,
749   VkDeviceSize size,
750   vn_object_id mem_id,
751   VkMemoryPropertyFlags flags,
752   VkExternalMemoryHandleTypeFlags external_handles,
753   struct vn_renderer_bo **out_bo)
754{
755   struct vtest *vtest = (struct vtest *)renderer;
756   const uint32_t blob_flags = vtest_bo_blob_flags(flags, external_handles);
757
758   mtx_lock(&vtest->sock_mutex);
759   int res_fd;
760   uint32_t res_id = vtest_vcmd_resource_create_blob(
761      vtest, VCMD_BLOB_TYPE_HOST3D, blob_flags, size, mem_id, &res_fd);
762   assert(res_id > 0 && res_fd >= 0);
763   mtx_unlock(&vtest->sock_mutex);
764
765   struct vtest_bo *bo = util_sparse_array_get(&vtest->bo_array, res_id);
766   *bo = (struct vtest_bo){
767      .base = {
768         .refcount = VN_REFCOUNT_INIT(1),
769         .res_id = res_id,
770         .mmap_size = size,
771      },
772      .res_fd = res_fd,
773      .blob_flags = blob_flags,
774   };
775
776   *out_bo = &bo->base;
777
778   return VK_SUCCESS;
779}
780
781static void
782vtest_shmem_destroy_now(struct vn_renderer *renderer,
783                        struct vn_renderer_shmem *_shmem)
784{
785   struct vtest *vtest = (struct vtest *)renderer;
786   struct vtest_shmem *shmem = (struct vtest_shmem *)_shmem;
787
788   munmap(shmem->base.mmap_ptr, shmem->base.mmap_size);
789
790   mtx_lock(&vtest->sock_mutex);
791   vtest_vcmd_resource_unref(vtest, shmem->base.res_id);
792   mtx_unlock(&vtest->sock_mutex);
793}
794
795static void
796vtest_shmem_destroy(struct vn_renderer *renderer,
797                    struct vn_renderer_shmem *shmem)
798{
799   struct vtest *vtest = (struct vtest *)renderer;
800
801   if (vn_renderer_shmem_cache_add(&vtest->shmem_cache, shmem))
802      return;
803
804   vtest_shmem_destroy_now(&vtest->base, shmem);
805}
806
807static struct vn_renderer_shmem *
808vtest_shmem_create(struct vn_renderer *renderer, size_t size)
809{
810   struct vtest *vtest = (struct vtest *)renderer;
811
812   struct vn_renderer_shmem *cached_shmem =
813      vn_renderer_shmem_cache_get(&vtest->shmem_cache, size);
814   if (cached_shmem) {
815      cached_shmem->refcount = VN_REFCOUNT_INIT(1);
816      return cached_shmem;
817   }
818
819   mtx_lock(&vtest->sock_mutex);
820   int res_fd;
821   uint32_t res_id = vtest_vcmd_resource_create_blob(
822      vtest, vtest->shmem_blob_mem, VCMD_BLOB_FLAG_MAPPABLE, size, 0,
823      &res_fd);
824   assert(res_id > 0 && res_fd >= 0);
825   mtx_unlock(&vtest->sock_mutex);
826
827   void *ptr =
828      mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, res_fd, 0);
829   close(res_fd);
830   if (ptr == MAP_FAILED) {
831      mtx_lock(&vtest->sock_mutex);
832      vtest_vcmd_resource_unref(vtest, res_id);
833      mtx_unlock(&vtest->sock_mutex);
834      return NULL;
835   }
836
837   struct vtest_shmem *shmem =
838      util_sparse_array_get(&vtest->shmem_array, res_id);
839   *shmem = (struct vtest_shmem){
840      .base = {
841         .refcount = VN_REFCOUNT_INIT(1),
842         .res_id = res_id,
843         .mmap_size = size,
844         .mmap_ptr = ptr,
845      },
846   };
847
848   return &shmem->base;
849}
850
851static VkResult
852sync_wait_poll(int fd, int poll_timeout)
853{
854   struct pollfd pollfd = {
855      .fd = fd,
856      .events = POLLIN,
857   };
858   int ret;
859   do {
860      ret = poll(&pollfd, 1, poll_timeout);
861   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
862
863   if (ret < 0 || (ret > 0 && !(pollfd.revents & POLLIN))) {
864      return (ret < 0 && errno == ENOMEM) ? VK_ERROR_OUT_OF_HOST_MEMORY
865                                          : VK_ERROR_DEVICE_LOST;
866   }
867
868   return ret ? VK_SUCCESS : VK_TIMEOUT;
869}
870
871static int
872timeout_to_poll_timeout(uint64_t timeout)
873{
874   const uint64_t ns_per_ms = 1000000;
875   const uint64_t ms = (timeout + ns_per_ms - 1) / ns_per_ms;
876   if (!ms && timeout)
877      return -1;
878   return ms <= INT_MAX ? ms : -1;
879}
880
881static VkResult
882vtest_wait(struct vn_renderer *renderer, const struct vn_renderer_wait *wait)
883{
884   struct vtest *vtest = (struct vtest *)renderer;
885   const uint32_t flags = wait->wait_any ? VCMD_SYNC_WAIT_FLAG_ANY : 0;
886   const int poll_timeout = timeout_to_poll_timeout(wait->timeout);
887
888   /*
889    * vtest_vcmd_sync_wait (and some other sync commands) is executed after
890    * all prior commands are dispatched.  That is far from ideal.
891    *
892    * In virtio-gpu, a drm_syncobj wait ioctl is executed immediately.  It
893    * works because it uses virtio-gpu interrupts as a side channel.  vtest
894    * needs a side channel to perform well.
895    *
896    * virtio-gpu or vtest, we should also set up a 1-byte coherent memory that
897    * is set to non-zero by GPU after the syncs signal.  That would allow us
898    * to do a quick check (or spin a bit) before waiting.
899    */
900   mtx_lock(&vtest->sock_mutex);
901   const int fd =
902      vtest_vcmd_sync_wait(vtest, flags, poll_timeout, wait->syncs,
903                           wait->sync_values, wait->sync_count);
904   mtx_unlock(&vtest->sock_mutex);
905
906   VkResult result = sync_wait_poll(fd, poll_timeout);
907   close(fd);
908
909   return result;
910}
911
912static VkResult
913vtest_submit(struct vn_renderer *renderer,
914             const struct vn_renderer_submit *submit)
915{
916   struct vtest *vtest = (struct vtest *)renderer;
917
918   mtx_lock(&vtest->sock_mutex);
919   vtest_vcmd_submit_cmd2(vtest, submit);
920   mtx_unlock(&vtest->sock_mutex);
921
922   return VK_SUCCESS;
923}
924
925static void
926vtest_init_renderer_info(struct vtest *vtest)
927{
928   struct vn_renderer_info *info = &vtest->base.info;
929
930   info->drm.has_primary = false;
931   info->drm.primary_major = 0;
932   info->drm.primary_minor = 0;
933   info->drm.has_render = false;
934   info->drm.render_major = 0;
935   info->drm.render_minor = 0;
936
937   info->pci.vendor_id = VTEST_PCI_VENDOR_ID;
938   info->pci.device_id = VTEST_PCI_DEVICE_ID;
939
940   info->has_dma_buf_import = false;
941   info->has_cache_management = false;
942   info->has_external_sync = false;
943   info->has_implicit_fencing = false;
944
945   info->max_sync_queue_count = vtest->max_sync_queue_count;
946
947   const struct virgl_renderer_capset_venus *capset = &vtest->capset.data;
948   info->wire_format_version = capset->wire_format_version;
949   info->vk_xml_version = capset->vk_xml_version;
950   info->vk_ext_command_serialization_spec_version =
951      capset->vk_ext_command_serialization_spec_version;
952   info->vk_mesa_venus_protocol_spec_version =
953      capset->vk_mesa_venus_protocol_spec_version;
954   info->supports_blob_id_0 = capset->supports_blob_id_0;
955
956   /* ensure vk_extension_mask is large enough to hold all capset masks */
957   STATIC_ASSERT(sizeof(info->vk_extension_mask) >=
958                 sizeof(capset->vk_extension_mask1));
959   memcpy(info->vk_extension_mask, capset->vk_extension_mask1,
960          sizeof(capset->vk_extension_mask1));
961
962   info->allow_vk_wait_syncs = capset->allow_vk_wait_syncs;
963}
964
965static void
966vtest_destroy(struct vn_renderer *renderer,
967              const VkAllocationCallbacks *alloc)
968{
969   struct vtest *vtest = (struct vtest *)renderer;
970
971   vn_renderer_shmem_cache_fini(&vtest->shmem_cache);
972
973   if (vtest->sock_fd >= 0) {
974      shutdown(vtest->sock_fd, SHUT_RDWR);
975      close(vtest->sock_fd);
976   }
977
978   mtx_destroy(&vtest->sock_mutex);
979   util_sparse_array_finish(&vtest->shmem_array);
980   util_sparse_array_finish(&vtest->bo_array);
981
982   vk_free(alloc, vtest);
983}
984
985static VkResult
986vtest_init_capset(struct vtest *vtest)
987{
988   vtest->capset.id = VIRGL_RENDERER_CAPSET_VENUS;
989   vtest->capset.version = 0;
990
991   if (!vtest_vcmd_get_capset(vtest, vtest->capset.id, vtest->capset.version,
992                              &vtest->capset.data,
993                              sizeof(vtest->capset.data))) {
994      vn_log(vtest->instance, "no venus capset");
995      return VK_ERROR_INITIALIZATION_FAILED;
996   }
997
998   return VK_SUCCESS;
999}
1000
1001static VkResult
1002vtest_init_params(struct vtest *vtest)
1003{
1004   uint32_t val =
1005      vtest_vcmd_get_param(vtest, VCMD_PARAM_MAX_SYNC_QUEUE_COUNT);
1006   if (!val) {
1007      vn_log(vtest->instance, "no sync queue support");
1008      return VK_ERROR_INITIALIZATION_FAILED;
1009   }
1010   vtest->max_sync_queue_count = val;
1011
1012   return VK_SUCCESS;
1013}
1014
1015static VkResult
1016vtest_init_protocol_version(struct vtest *vtest)
1017{
1018   const uint32_t min_protocol_version = 3;
1019
1020   const uint32_t ver = vtest_vcmd_ping_protocol_version(vtest)
1021                           ? vtest_vcmd_protocol_version(vtest)
1022                           : 0;
1023   if (ver < min_protocol_version) {
1024      vn_log(vtest->instance, "vtest protocol version (%d) too old", ver);
1025      return VK_ERROR_INITIALIZATION_FAILED;
1026   }
1027
1028   vtest->protocol_version = ver;
1029
1030   return VK_SUCCESS;
1031}
1032
1033static VkResult
1034vtest_init(struct vtest *vtest)
1035{
1036   util_sparse_array_init(&vtest->shmem_array, sizeof(struct vtest_shmem),
1037                          1024);
1038   util_sparse_array_init(&vtest->bo_array, sizeof(struct vtest_bo), 1024);
1039
1040   mtx_init(&vtest->sock_mutex, mtx_plain);
1041   vtest->sock_fd =
1042      vtest_connect_socket(vtest->instance, VTEST_DEFAULT_SOCKET_NAME);
1043   if (vtest->sock_fd < 0)
1044      return VK_ERROR_INITIALIZATION_FAILED;
1045
1046   const char *renderer_name = util_get_process_name();
1047   if (!renderer_name)
1048      renderer_name = "venus";
1049   vtest_vcmd_create_renderer(vtest, renderer_name);
1050
1051   VkResult result = vtest_init_protocol_version(vtest);
1052   if (result == VK_SUCCESS)
1053      result = vtest_init_params(vtest);
1054   if (result == VK_SUCCESS)
1055      result = vtest_init_capset(vtest);
1056   if (result != VK_SUCCESS)
1057      return result;
1058
1059   /* see virtgpu_init_shmem_blob_mem */
1060   vtest->shmem_blob_mem = vtest->capset.data.supports_blob_id_0
1061                              ? VCMD_BLOB_TYPE_HOST3D
1062                              : VCMD_BLOB_TYPE_GUEST;
1063
1064   vn_renderer_shmem_cache_init(&vtest->shmem_cache, &vtest->base,
1065                                vtest_shmem_destroy_now);
1066
1067   vtest_vcmd_context_init(vtest, vtest->capset.id);
1068
1069   vtest_init_renderer_info(vtest);
1070
1071   vtest->base.ops.destroy = vtest_destroy;
1072   vtest->base.ops.submit = vtest_submit;
1073   vtest->base.ops.wait = vtest_wait;
1074
1075   vtest->base.shmem_ops.create = vtest_shmem_create;
1076   vtest->base.shmem_ops.destroy = vtest_shmem_destroy;
1077
1078   vtest->base.bo_ops.create_from_device_memory =
1079      vtest_bo_create_from_device_memory;
1080   vtest->base.bo_ops.create_from_dma_buf = NULL;
1081   vtest->base.bo_ops.destroy = vtest_bo_destroy;
1082   vtest->base.bo_ops.export_dma_buf = vtest_bo_export_dma_buf;
1083   vtest->base.bo_ops.map = vtest_bo_map;
1084   vtest->base.bo_ops.flush = vtest_bo_flush;
1085   vtest->base.bo_ops.invalidate = vtest_bo_invalidate;
1086
1087   vtest->base.sync_ops.create = vtest_sync_create;
1088   vtest->base.sync_ops.create_from_syncobj = NULL;
1089   vtest->base.sync_ops.destroy = vtest_sync_destroy;
1090   vtest->base.sync_ops.export_syncobj = NULL;
1091   vtest->base.sync_ops.reset = vtest_sync_reset;
1092   vtest->base.sync_ops.read = vtest_sync_read;
1093   vtest->base.sync_ops.write = vtest_sync_write;
1094
1095   return VK_SUCCESS;
1096}
1097
1098VkResult
1099vn_renderer_create_vtest(struct vn_instance *instance,
1100                         const VkAllocationCallbacks *alloc,
1101                         struct vn_renderer **renderer)
1102{
1103   struct vtest *vtest = vk_zalloc(alloc, sizeof(*vtest), VN_DEFAULT_ALIGN,
1104                                   VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1105   if (!vtest)
1106      return VK_ERROR_OUT_OF_HOST_MEMORY;
1107
1108   vtest->instance = instance;
1109   vtest->sock_fd = -1;
1110
1111   VkResult result = vtest_init(vtest);
1112   if (result != VK_SUCCESS) {
1113      vtest_destroy(&vtest->base, alloc);
1114      return result;
1115   }
1116
1117   *renderer = &vtest->base;
1118
1119   return VK_SUCCESS;
1120}
1121