1/*
2 * Copyright 2022 Google LLC
3 * SPDX-License-Identifier: MIT
4 */
5
6#ifndef MSM_PROTO_H_
7#define MSM_PROTO_H_
8
9/**
10 * General protocol notes:
11 * 1) Request (req) messages are generally sent over DRM_VIRTGPU_EXECBUFFER
12 *    but can also be sent via DRM_VIRTGPU_RESOURCE_CREATE_BLOB (in which
13 *    case they are processed by the host before ctx->get_blob())
14 * 2) Response (rsp) messages are returned via shmem->rsp_mem, at an offset
15 *    specified by the guest in the req message.  Not all req messages have
16 *    a rsp.
17 * 3) Host and guest could have different pointer sizes, ie. 32b guest and
18 *    64b host, or visa versa, so similar to kernel uabi, req and rsp msgs
19 *    should be explicitly padded to avoid 32b vs 64b struct padding issues
20 */
21
22/**
23 * Defines the layout of shmem buffer used for host->guest communication.
24 */
25struct msm_shmem {
26   /**
27    * The sequence # of last cmd processed by the host
28    */
29   uint32_t seqno;
30
31   /**
32    * Offset to the start of rsp memory region in the shmem buffer.  This
33    * is set by the host when the shmem buffer is allocated, to allow for
34    * extending the shmem buffer with new fields.  The size of the rsp
35    * memory region is the size of the shmem buffer (controlled by the
36    * guest) minus rsp_mem_offset.
37    *
38    * The guest should use the msm_shmem_has_field() macro to determine
39    * if the host supports a given field, ie. to handle compatibility of
40    * newer guest vs older host.
41    *
42    * Making the guest userspace responsible for backwards compatibility
43    * simplifies the host VMM.
44    */
45   uint32_t rsp_mem_offset;
46
47#define msm_shmem_has_field(shmem, field) ({                         \
48      struct msm_shmem *_shmem = (shmem);                            \
49      (_shmem->rsp_mem_offset > offsetof(struct msm_shmem, field));  \
50   })
51
52   /**
53    * Counter that is incremented on asynchronous errors, like SUBMIT
54    * or GEM_NEW failures.  The guest should treat errors as context-
55    * lost.
56    */
57   uint32_t async_error;
58};
59
60#define DEFINE_CAST(parent, child)                                             \
61   static inline struct child *to_##child(const struct parent *x)              \
62   {                                                                           \
63      return (struct child *)x;                                                \
64   }
65
66/*
67 * Possible cmd types for "command stream", ie. payload of EXECBUF ioctl:
68 */
69enum msm_ccmd {
70   MSM_CCMD_NOP = 1,         /* No payload, can be used to sync with host */
71   MSM_CCMD_IOCTL_SIMPLE,
72   MSM_CCMD_GEM_NEW,
73   MSM_CCMD_GEM_SET_IOVA,
74   MSM_CCMD_GEM_CPU_PREP,
75   MSM_CCMD_GEM_SET_NAME,
76   MSM_CCMD_GEM_SUBMIT,
77   MSM_CCMD_GEM_UPLOAD,
78   MSM_CCMD_SUBMITQUEUE_QUERY,
79   MSM_CCMD_WAIT_FENCE,
80   MSM_CCMD_SET_DEBUGINFO,
81   MSM_CCMD_LAST,
82};
83
84struct msm_ccmd_req {
85   uint32_t cmd;
86   uint32_t len;
87   uint32_t seqno;
88
89   /* Offset into shmem ctrl buffer to write response.  The host ensures
90    * that it doesn't write outside the bounds of the ctrl buffer, but
91    * otherwise it is up to the guest to manage allocation of where responses
92    * should be written in the ctrl buf.
93    */
94   uint32_t rsp_off;
95};
96
97struct msm_ccmd_rsp {
98   uint32_t len;
99};
100
101#define MSM_CCMD(_cmd, _len) (struct msm_ccmd_req){ \
102       .cmd = MSM_CCMD_##_cmd,                      \
103       .len = (_len),                               \
104   }
105
106/*
107 * MSM_CCMD_NOP
108 */
109struct msm_ccmd_nop_req {
110   struct msm_ccmd_req hdr;
111};
112
113/*
114 * MSM_CCMD_IOCTL_SIMPLE
115 *
116 * Forward simple/flat IOC_RW or IOC_W ioctls.  Limited ioctls are supported.
117 */
118struct msm_ccmd_ioctl_simple_req {
119   struct msm_ccmd_req hdr;
120
121   uint32_t cmd;
122   uint8_t payload[];
123};
124DEFINE_CAST(msm_ccmd_req, msm_ccmd_ioctl_simple_req)
125
126struct msm_ccmd_ioctl_simple_rsp {
127   struct msm_ccmd_rsp hdr;
128
129   /* ioctl return value, interrupted syscalls are handled on the host without
130    * returning to the guest.
131    */
132   int32_t ret;
133
134   /* The output payload for IOC_RW ioctls, the payload is the same size as
135    * msm_context_cmd_ioctl_simple_req.
136    *
137    * For IOC_W ioctls (userspace writes, kernel reads) this is zero length.
138    */
139   uint8_t payload[];
140};
141
142/*
143 * MSM_CCMD_GEM_NEW
144 *
145 * GEM buffer allocation, maps to DRM_MSM_GEM_NEW plus DRM_MSM_GEM_INFO to
146 * set the BO's iova (to avoid extra guest -> host trip)
147 *
148 * No response.
149 */
150struct msm_ccmd_gem_new_req {
151   struct msm_ccmd_req hdr;
152
153   uint64_t iova;
154   uint64_t size;
155   uint32_t flags;
156   uint32_t blob_id;
157};
158DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_new_req)
159
160/*
161 * MSM_CCMD_GEM_SET_IOVA
162 *
163 * Set the buffer iova (for imported BOs).  Also used to release the iova
164 * (by setting it to zero) when a BO is freed.
165 */
166struct msm_ccmd_gem_set_iova_req {
167   struct msm_ccmd_req hdr;
168
169   uint64_t iova;
170   uint32_t res_id;
171};
172DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_set_iova_req)
173
174/*
175 * MSM_CCMD_GEM_CPU_PREP
176 *
177 * Maps to DRM_MSM_GEM_CPU_PREP
178 *
179 * Note: Since we don't want to block the single threaded host, this returns
180 * immediately with -EBUSY if the fence is not yet signaled.  The guest
181 * should poll if needed.
182 */
183struct msm_ccmd_gem_cpu_prep_req {
184   struct msm_ccmd_req hdr;
185
186   uint32_t res_id;
187   uint32_t op;
188};
189DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_cpu_prep_req)
190
191struct msm_ccmd_gem_cpu_prep_rsp {
192   struct msm_ccmd_rsp hdr;
193
194   int32_t ret;
195};
196
197/*
198 * MSM_CCMD_GEM_SET_NAME
199 *
200 * Maps to DRM_MSM_GEM_INFO:MSM_INFO_SET_NAME
201 *
202 * No response.
203 */
204struct msm_ccmd_gem_set_name_req {
205   struct msm_ccmd_req hdr;
206
207   uint32_t res_id;
208   /* Note: packet size aligned to 4 bytes, so the string name may
209    * be shorter than the packet header indicates.
210    */
211   uint32_t len;
212   uint8_t  payload[];
213};
214DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_set_name_req)
215
216/*
217 * MSM_CCMD_GEM_SUBMIT
218 *
219 * Maps to DRM_MSM_GEM_SUBMIT
220 *
221 * The actual for-reals cmdstream submission.  Note this intentionally
222 * does not support relocs, since we already require a non-ancient
223 * kernel.
224 *
225 * Note, no in/out fence-fd, that synchronization is handled on guest
226 * kernel side (ugg).. need to come up with a better story for fencing.
227 * We probably need to sort something out for that to handle syncobjs.
228 *
229 * Note that the bo handles referenced are the host handles, so that
230 * they can be directly passed to the host kernel without translation.
231 *
232 * TODO we can pack the payload tighter (and enforce no-relocs) if we
233 * defined our own structs, at the cost of host userspace having to
234 * do a bit more work.  Is it worth it?  It could probably be done
235 * without extra overhead in guest userspace..
236 *
237 * No response.
238 */
239struct msm_ccmd_gem_submit_req {
240   struct msm_ccmd_req hdr;
241
242   uint32_t flags;
243   uint32_t queue_id;
244   uint32_t nr_bos;
245   uint32_t nr_cmds;
246
247   /**
248    * What userspace expects the next seqno fence to be.  To avoid having
249    * to wait for host, the guest tracks what it expects to be the next
250    * returned seqno fence.  This is passed to guest just for error
251    * checking.
252    */
253   uint32_t fence;
254
255   /**
256    * Payload is first an array of 'struct drm_msm_gem_submit_bo' of
257    * length determined by nr_bos (note that handles are host handles),
258    * followed by an array of 'struct drm_msm_gem_submit_cmd' of length
259    * determined by nr_cmds
260    */
261   int8_t   payload[];
262};
263DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_submit_req)
264
265/*
266 * MSM_CCMD_GEM_UPLOAD
267 *
268 * Upload data to a GEM buffer
269 *
270 * No response.
271 */
272struct msm_ccmd_gem_upload_req {
273   struct msm_ccmd_req hdr;
274
275   uint32_t res_id;
276   uint32_t pad;
277   uint32_t off;
278
279   /* Note: packet size aligned to 4 bytes, so the string name may
280    * be shorter than the packet header indicates.
281    */
282   uint32_t len;
283   uint8_t  payload[];
284};
285DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_upload_req)
286
287/*
288 * MSM_CCMD_SUBMITQUEUE_QUERY
289 *
290 * Maps to DRM_MSM_SUBMITQUEUE_QUERY
291 */
292struct msm_ccmd_submitqueue_query_req {
293   struct msm_ccmd_req hdr;
294
295   uint32_t queue_id;
296   uint32_t param;
297   uint32_t len;   /* size of payload in rsp */
298};
299DEFINE_CAST(msm_ccmd_req, msm_ccmd_submitqueue_query_req)
300
301struct msm_ccmd_submitqueue_query_rsp {
302   struct msm_ccmd_rsp hdr;
303
304   int32_t  ret;
305   uint32_t out_len;
306   uint8_t  payload[];
307};
308
309/*
310 * MSM_CCMD_WAIT_FENCE
311 *
312 * Maps to DRM_MSM_WAIT_FENCE
313 *
314 * Note: Since we don't want to block the single threaded host, this returns
315 * immediately with -ETIMEDOUT if the fence is not yet signaled.  The guest
316 * should poll if needed.
317 */
318struct msm_ccmd_wait_fence_req {
319   struct msm_ccmd_req hdr;
320
321   uint32_t queue_id;
322   uint32_t fence;
323};
324DEFINE_CAST(msm_ccmd_req, msm_ccmd_wait_fence_req)
325
326struct msm_ccmd_wait_fence_rsp {
327   struct msm_ccmd_rsp hdr;
328
329   int32_t ret;
330};
331
332/*
333 * MSM_CCMD_SET_DEBUGINFO
334 *
335 * Set per-guest-process debug info (comm and cmdline).  For GPU faults/
336 * crashes, it isn't too useful to see the crosvm (for ex.) comm/cmdline,
337 * since the host process is only a proxy.  This allows the guest to
338 * pass through the guest process comm and commandline for debugging
339 * purposes.
340 *
341 * No response.
342 */
343struct msm_ccmd_set_debuginfo_req {
344   struct msm_ccmd_req hdr;
345
346   uint32_t comm_len;
347   uint32_t cmdline_len;
348
349   /**
350    * Payload is first the comm string followed by cmdline string, padded
351    * out to a multiple of 4.
352    */
353   int8_t   payload[];
354};
355DEFINE_CAST(msm_ccmd_req, msm_ccmd_set_debuginfo_req)
356
357#endif /* MSM_PROTO_H_ */
358