1 /*
2  * Copyright © 2022 Google, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #ifdef X
25 #undef X
26 #endif
27 
28 /*
29  * TODO make this magic easier to share btw msm_ringbuffer_sp and virtio_ringbuffer
30  */
31 
32 #if PTRSZ == 32
33 #define X(n) n##_32
34 #else
35 #define X(n) n##_64
36 #endif
37 
emit_reloc_common(struct fd_ringbuffer *ring, const struct fd_reloc *reloc)38 static void X(emit_reloc_common)(struct fd_ringbuffer *ring,
39                                  const struct fd_reloc *reloc)
40 {
41    (*ring->cur++) = (uint32_t)reloc->iova;
42 #if PTRSZ == 64
43    (*ring->cur++) = (uint32_t)(reloc->iova >> 32);
44 #endif
45 }
46 
virtio_ringbuffer_emit_reloc_nonobj(struct fd_ringbuffer *ring, const struct fd_reloc *reloc)47 static void X(virtio_ringbuffer_emit_reloc_nonobj)(struct fd_ringbuffer *ring,
48                                                    const struct fd_reloc *reloc)
49 {
50    X(emit_reloc_common)(ring, reloc);
51 
52    assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
53 
54    struct virtio_ringbuffer *virtio_ring = to_virtio_ringbuffer(ring);
55 
56    struct virtio_submit *virtio_submit = to_virtio_submit(virtio_ring->u.submit);
57 
58    virtio_submit_append_bo(virtio_submit, reloc->bo);
59 }
60 
virtio_ringbuffer_emit_reloc_obj(struct fd_ringbuffer *ring, const struct fd_reloc *reloc)61 static void X(virtio_ringbuffer_emit_reloc_obj)(struct fd_ringbuffer *ring,
62                                                 const struct fd_reloc *reloc)
63 {
64    X(emit_reloc_common)(ring, reloc);
65 
66    assert(ring->flags & _FD_RINGBUFFER_OBJECT);
67 
68    struct virtio_ringbuffer *virtio_ring = to_virtio_ringbuffer(ring);
69 
70    /* Avoid emitting duplicate BO references into the list.  Ringbuffer
71     * objects are long-lived, so this saves ongoing work at draw time in
72     * exchange for a bit at context setup/first draw.  And the number of
73     * relocs per ringbuffer object is fairly small, so the O(n^2) doesn't
74     * hurt much.
75     */
76    if (!virtio_ringbuffer_references_bo(ring, reloc->bo)) {
77       APPEND(&virtio_ring->u, reloc_bos, fd_bo_ref(reloc->bo));
78    }
79 }
80 
virtio_ringbuffer_emit_reloc_ring( struct fd_ringbuffer *ring, struct fd_ringbuffer *target, uint32_t cmd_idx)81 static uint32_t X(virtio_ringbuffer_emit_reloc_ring)(
82    struct fd_ringbuffer *ring, struct fd_ringbuffer *target, uint32_t cmd_idx)
83 {
84    struct virtio_ringbuffer *virtio_target = to_virtio_ringbuffer(target);
85    struct fd_bo *bo;
86    uint32_t size;
87 
88    if ((target->flags & FD_RINGBUFFER_GROWABLE) &&
89        (cmd_idx < virtio_target->u.nr_cmds)) {
90       bo = virtio_target->u.cmds[cmd_idx].ring_bo;
91       size = virtio_target->u.cmds[cmd_idx].size;
92    } else {
93       bo = virtio_target->ring_bo;
94       size = offset_bytes(target->cur, target->start);
95    }
96 
97    if (ring->flags & _FD_RINGBUFFER_OBJECT) {
98       X(virtio_ringbuffer_emit_reloc_obj)(ring, &(struct fd_reloc){
99                 .bo = bo,
100                 .iova = bo->iova + virtio_target->offset,
101                 .offset = virtio_target->offset,
102              });
103    } else {
104       X(virtio_ringbuffer_emit_reloc_nonobj)(ring, &(struct fd_reloc){
105                 .bo = bo,
106                 .iova = bo->iova + virtio_target->offset,
107                 .offset = virtio_target->offset,
108              });
109    }
110 
111    if (!(target->flags & _FD_RINGBUFFER_OBJECT))
112       return size;
113 
114    struct virtio_ringbuffer *virtio_ring = to_virtio_ringbuffer(ring);
115 
116    if (ring->flags & _FD_RINGBUFFER_OBJECT) {
117       for (unsigned i = 0; i < virtio_target->u.nr_reloc_bos; i++) {
118          struct fd_bo *target_bo = virtio_target->u.reloc_bos[i];
119          if (!virtio_ringbuffer_references_bo(ring, target_bo))
120             APPEND(&virtio_ring->u, reloc_bos, fd_bo_ref(target_bo));
121       }
122    } else {
123       // TODO it would be nice to know whether we have already
124       // seen this target before.  But hopefully we hit the
125       // append_bo() fast path enough for this to not matter:
126       struct virtio_submit *virtio_submit = to_virtio_submit(virtio_ring->u.submit);
127 
128       for (unsigned i = 0; i < virtio_target->u.nr_reloc_bos; i++) {
129          virtio_submit_append_bo(virtio_submit, virtio_target->u.reloc_bos[i]);
130       }
131    }
132 
133    return size;
134 }
135