1/*
2 * Copyright © 2022 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include "util/libsync.h"
25
26#include "virtio_priv.h"
27
28static int
29bo_allocate(struct virtio_bo *virtio_bo)
30{
31   struct fd_bo *bo = &virtio_bo->base;
32   if (!virtio_bo->offset) {
33      struct drm_virtgpu_map req = {
34         .handle = bo->handle,
35      };
36      int ret;
37
38      ret = drmIoctl(bo->dev->fd, DRM_IOCTL_VIRTGPU_MAP, &req);
39      if (ret) {
40         ERROR_MSG("alloc failed: %s", strerror(errno));
41         return ret;
42      }
43
44      virtio_bo->offset = req.offset;
45   }
46
47   return 0;
48}
49
50static int
51virtio_bo_offset(struct fd_bo *bo, uint64_t *offset)
52{
53   struct virtio_bo *virtio_bo = to_virtio_bo(bo);
54   int ret = bo_allocate(virtio_bo);
55   if (ret)
56      return ret;
57   *offset = virtio_bo->offset;
58   return 0;
59}
60
61static int
62virtio_bo_cpu_prep_guest(struct fd_bo *bo)
63{
64   struct drm_virtgpu_3d_wait args = {
65         .handle = bo->handle,
66   };
67   int ret;
68
69   /* Side note, this ioctl is defined as IO_WR but should be IO_W: */
70   ret = drmIoctl(bo->dev->fd, DRM_IOCTL_VIRTGPU_WAIT, &args);
71   if (ret && errno == EBUSY)
72      return -EBUSY;
73
74   return 0;
75}
76
77static int
78virtio_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
79{
80   int ret;
81
82   /*
83    * Wait first in the guest, to avoid a blocking call in host.
84    * If implicit sync it used, we still need to *also* wait in
85    * host, if it is a shared buffer, because the guest doesn't
86    * know about usage of the bo in the host (or other guests).
87    */
88
89   ret = virtio_bo_cpu_prep_guest(bo);
90   if (ret)
91      goto out;
92
93   /* If buffer is not shared, then it is not shared with host,
94    * so we don't need to worry about implicit sync in host:
95    */
96   if (!bo->shared)
97      goto out;
98
99   /* If buffer is shared, but we are using explicit sync, no
100    * need to fallback to implicit sync in host:
101    */
102   if (pipe && to_virtio_pipe(pipe)->no_implicit_sync)
103      goto out;
104
105   struct msm_ccmd_gem_cpu_prep_req req = {
106         .hdr = MSM_CCMD(GEM_CPU_PREP, sizeof(req)),
107         .res_id = to_virtio_bo(bo)->res_id,
108         .op = op,
109   };
110   struct msm_ccmd_gem_cpu_prep_rsp *rsp;
111
112   /* We can't do a blocking wait in the host, so we have to poll: */
113   do {
114      rsp = virtio_alloc_rsp(bo->dev, &req.hdr, sizeof(*rsp));
115
116      ret = virtio_execbuf(bo->dev, &req.hdr, true);
117      if (ret)
118         goto out;
119
120      ret = rsp->ret;
121   } while (ret == -EBUSY);
122
123out:
124   return ret;
125}
126
127static void
128virtio_bo_cpu_fini(struct fd_bo *bo)
129{
130   /* no-op */
131}
132
133static int
134virtio_bo_madvise(struct fd_bo *bo, int willneed)
135{
136   /* TODO:
137    * Currently unsupported, synchronous WILLNEED calls would introduce too
138    * much latency.. ideally we'd keep state in the guest and only flush
139    * down to host when host is under memory pressure.  (Perhaps virtio-balloon
140    * could signal this?)
141    */
142   return willneed;
143}
144
145static uint64_t
146virtio_bo_iova(struct fd_bo *bo)
147{
148   /* The shmem bo is allowed to have no iova, as it is only used for
149    * guest<->host communications:
150    */
151   assert(bo->iova || (to_virtio_bo(bo)->blob_id == 0));
152   return bo->iova;
153}
154
155static void
156virtio_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
157{
158   char name[32];
159   int sz;
160
161   /* Note, we cannot set name on the host for the shmem bo, as
162    * that isn't a real gem obj on the host side.. not having
163    * an iova is a convenient way to detect this case:
164    */
165   if (!bo->iova)
166      return;
167
168   sz = vsnprintf(name, sizeof(name), fmt, ap);
169   sz = MIN2(sz, sizeof(name));
170
171   unsigned req_len = sizeof(struct msm_ccmd_gem_set_name_req) + align(sz, 4);
172
173   uint8_t buf[req_len];
174   struct msm_ccmd_gem_set_name_req *req = (void *)buf;
175
176   req->hdr = MSM_CCMD(GEM_SET_NAME, req_len);
177   req->res_id = to_virtio_bo(bo)->res_id;
178   req->len = sz;
179
180   memcpy(req->payload, name, sz);
181
182   virtio_execbuf(bo->dev, &req->hdr, false);
183}
184
185static void
186bo_upload(struct fd_bo *bo, unsigned off, void *src, unsigned len)
187{
188   unsigned req_len = sizeof(struct msm_ccmd_gem_upload_req) + align(len, 4);
189
190   uint8_t buf[req_len];
191   struct msm_ccmd_gem_upload_req *req = (void *)buf;
192
193   req->hdr = MSM_CCMD(GEM_UPLOAD, req_len);
194   req->res_id = to_virtio_bo(bo)->res_id;
195   req->pad = 0;
196   req->off = off;
197   req->len = len;
198
199   memcpy(req->payload, src, len);
200
201   virtio_execbuf(bo->dev, &req->hdr, false);
202}
203
204static void
205virtio_bo_upload(struct fd_bo *bo, void *src, unsigned len)
206{
207   unsigned off = 0;
208   while (len > 0) {
209      unsigned sz = MIN2(len, 0x1000);
210      bo_upload(bo, off, src, sz);
211      off += sz;
212      src += sz;
213      len -= sz;
214   }
215}
216
217static void
218set_iova(struct fd_bo *bo, uint64_t iova)
219{
220   struct msm_ccmd_gem_set_iova_req req = {
221         .hdr = MSM_CCMD(GEM_SET_IOVA, sizeof(req)),
222         .res_id = to_virtio_bo(bo)->res_id,
223         .iova = iova,
224   };
225
226   virtio_execbuf(bo->dev, &req.hdr, false);
227}
228
229static void
230virtio_bo_destroy(struct fd_bo *bo)
231{
232   struct virtio_bo *virtio_bo = to_virtio_bo(bo);
233
234   /* Release iova by setting to zero: */
235   if (bo->iova) {
236      set_iova(bo, 0);
237
238      virtio_dev_free_iova(bo->dev, bo->iova, bo->size);
239
240      /* Need to flush batched ccmds to ensure the host sees the iova
241       * release before the GEM handle is closed (ie. detach_resource()
242       * on the host side)
243       */
244      virtio_execbuf_flush(bo->dev);
245   }
246
247   free(virtio_bo);
248}
249
250static const struct fd_bo_funcs funcs = {
251   .offset = virtio_bo_offset,
252   .cpu_prep = virtio_bo_cpu_prep,
253   .cpu_fini = virtio_bo_cpu_fini,
254   .madvise = virtio_bo_madvise,
255   .iova = virtio_bo_iova,
256   .set_name = virtio_bo_set_name,
257   .upload = virtio_bo_upload,
258   .destroy = virtio_bo_destroy,
259};
260
261static struct fd_bo *
262bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
263{
264   struct virtio_bo *virtio_bo;
265   struct fd_bo *bo;
266
267   virtio_bo = calloc(1, sizeof(*virtio_bo));
268   if (!virtio_bo)
269      return NULL;
270
271   bo = &virtio_bo->base;
272
273   /* Note we need to set these because allocation_wait_execute() could
274    * run before bo_init_commont():
275    */
276   bo->dev = dev;
277   p_atomic_set(&bo->refcnt, 1);
278
279   bo->size = size;
280   bo->funcs = &funcs;
281   bo->handle = handle;
282
283   /* Don't assume we can mmap an imported bo: */
284   bo->alloc_flags = FD_BO_NOMAP;
285
286   struct drm_virtgpu_resource_info args = {
287         .bo_handle = handle,
288   };
289   int ret;
290
291   ret = drmCommandWriteRead(dev->fd, DRM_VIRTGPU_RESOURCE_INFO, &args, sizeof(args));
292   if (ret) {
293      INFO_MSG("failed to get resource info: %s", strerror(errno));
294      free(virtio_bo);
295      return NULL;
296   }
297
298   virtio_bo->res_id = args.res_handle;
299
300   fd_bo_init_common(bo, dev);
301
302   return bo;
303}
304
305/* allocate a new buffer object from existing handle */
306struct fd_bo *
307virtio_bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
308{
309   struct fd_bo *bo = bo_from_handle(dev, size, handle);
310
311   if (!bo)
312      return NULL;
313
314   bo->iova = virtio_dev_alloc_iova(dev, size);
315   if (!bo->iova)
316      goto fail;
317
318   set_iova(bo, bo->iova);
319
320   return bo;
321
322fail:
323   virtio_bo_destroy(bo);
324   return NULL;
325}
326
327/* allocate a buffer object: */
328struct fd_bo *
329virtio_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
330{
331   struct virtio_device *virtio_dev = to_virtio_device(dev);
332   struct drm_virtgpu_resource_create_blob args = {
333         .blob_mem   = VIRTGPU_BLOB_MEM_HOST3D,
334         .size       = size,
335   };
336   struct msm_ccmd_gem_new_req req = {
337         .hdr = MSM_CCMD(GEM_NEW, sizeof(req)),
338         .size = size,
339   };
340   int ret;
341
342   if (flags & FD_BO_SCANOUT)
343      req.flags |= MSM_BO_SCANOUT;
344
345   if (flags & FD_BO_GPUREADONLY)
346      req.flags |= MSM_BO_GPU_READONLY;
347
348   if (flags & FD_BO_CACHED_COHERENT) {
349      req.flags |= MSM_BO_CACHED_COHERENT;
350   } else {
351      req.flags |= MSM_BO_WC;
352   }
353
354   if (flags & _FD_BO_VIRTIO_SHM) {
355      args.blob_id = 0;
356      args.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
357   } else {
358      if (flags & (FD_BO_SHARED | FD_BO_SCANOUT)) {
359         args.blob_flags = VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE |
360               VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
361      }
362
363      if (!(flags & FD_BO_NOMAP)) {
364         args.blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
365      }
366
367      args.blob_id = p_atomic_inc_return(&virtio_dev->next_blob_id);
368      args.cmd = VOID2U64(&req);
369      args.cmd_size = sizeof(req);
370
371      /* tunneled cmds are processed separately on host side,
372       * before the renderer->get_blob() callback.. the blob_id
373       * is used to like the created bo to the get_blob() call
374       */
375      req.blob_id = args.blob_id;
376      req.iova = virtio_dev_alloc_iova(dev, size);
377      if (!req.iova) {
378         ret = -ENOMEM;
379         goto fail;
380      }
381   }
382
383   simple_mtx_lock(&virtio_dev->eb_lock);
384   if (args.cmd)
385      req.hdr.seqno = ++virtio_dev->next_seqno;
386   ret = drmIoctl(dev->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &args);
387   simple_mtx_unlock(&virtio_dev->eb_lock);
388   if (ret)
389      goto fail;
390
391   struct fd_bo *bo = bo_from_handle(dev, size, args.bo_handle);
392   struct virtio_bo *virtio_bo = to_virtio_bo(bo);
393
394   virtio_bo->blob_id = args.blob_id;
395   bo->iova = req.iova;
396
397   return bo;
398
399fail:
400   if (req.iova) {
401      virtio_dev_free_iova(dev, req.iova, size);
402   }
403   return NULL;
404}
405