Lines Matching refs:ring

16 vn_ring_load_head(const struct vn_ring *ring)
21 return atomic_load_explicit(ring->shared.head, memory_order_acquire);
25 vn_ring_store_tail(struct vn_ring *ring)
30 return atomic_store_explicit(ring->shared.tail, ring->cur,
35 vn_ring_load_status(const struct vn_ring *ring)
38 return atomic_load_explicit(ring->shared.status, memory_order_seq_cst);
42 vn_ring_write_buffer(struct vn_ring *ring, const void *data, uint32_t size)
44 assert(ring->cur + size - vn_ring_load_head(ring) <= ring->buffer_size);
46 const uint32_t offset = ring->cur & ring->buffer_mask;
47 if (offset + size <= ring->buffer_size) {
48 memcpy(ring->shared.buffer + offset, data, size);
50 const uint32_t s = ring->buffer_size - offset;
51 memcpy(ring->shared.buffer + offset, data, s);
52 memcpy(ring->shared.buffer, data + s, size - s);
55 ring->cur += size;
59 vn_ring_ge_seqno(const struct vn_ring *ring, uint32_t a, uint32_t b)
68 return ring->cur >= a || ring->cur < b;
70 return ring->cur >= a && ring->cur < b;
74 vn_ring_retire_submits(struct vn_ring *ring, uint32_t seqno)
76 list_for_each_entry_safe(struct vn_ring_submit, submit, &ring->submits,
78 if (!vn_ring_ge_seqno(ring, seqno, submit->seqno))
82 vn_renderer_shmem_unref(ring->renderer, submit->shmems[i]);
85 list_add(&submit->head, &ring->free_submits);
90 vn_ring_wait_seqno(const struct vn_ring *ring, uint32_t seqno)
97 const uint32_t head = vn_ring_load_head(ring);
98 if (vn_ring_ge_seqno(ring, head, seqno))
100 vn_relax(&iter, "ring seqno");
105 vn_ring_has_space(const struct vn_ring *ring,
109 const uint32_t head = vn_ring_load_head(ring);
110 if (likely(ring->cur + size - head <= ring->buffer_size)) {
119 vn_ring_wait_space(const struct vn_ring *ring, uint32_t size)
121 assert(size <= ring->buffer_size);
124 if (likely(vn_ring_has_space(ring, size, &head)))
133 vn_relax(&iter, "ring space");
134 if (vn_ring_has_space(ring, size, &head))
170 vn_ring_init(struct vn_ring *ring,
175 memset(ring, 0, sizeof(*ring));
178 ring->renderer = renderer;
182 ring->buffer_size = layout->buffer_size;
183 ring->buffer_mask = ring->buffer_size - 1;
185 ring->shared.head = shared + layout->head_offset;
186 ring->shared.tail = shared + layout->tail_offset;
187 ring->shared.status = shared + layout->status_offset;
188 ring->shared.buffer = shared + layout->buffer_offset;
189 ring->shared.extra = shared + layout->extra_offset;
191 list_inithead(&ring->submits);
192 list_inithead(&ring->free_submits);
196 vn_ring_fini(struct vn_ring *ring)
198 vn_ring_retire_submits(ring, ring->cur);
199 assert(list_is_empty(&ring->submits));
202 &ring->free_submits, head)
207 vn_ring_get_submit(struct vn_ring *ring, uint32_t shmem_count)
214 !list_is_empty(&ring->free_submits)) {
216 list_first_entry(&ring->free_submits, struct vn_ring_submit, head);
228 vn_ring_submit(struct vn_ring *ring,
233 /* write cs to the ring */
238 cur_seqno = vn_ring_wait_space(ring, buf->committed_size);
239 vn_ring_write_buffer(ring, buf->base, buf->committed_size);
242 vn_ring_store_tail(ring);
243 const bool notify = vn_ring_load_status(ring) & VN_RING_STATUS_IDLE;
245 vn_ring_retire_submits(ring, cur_seqno);
247 submit->seqno = ring->cur;
248 list_addtail(&submit->head, &ring->submits);
258 vn_ring_wait(const struct vn_ring *ring, uint32_t seqno)
260 vn_ring_wait_seqno(ring, seqno);
264 vn_ring_wait_all(const struct vn_ring *ring)
266 /* load from tail rather than ring->cur for atomicity */
268 atomic_load_explicit(ring->shared.tail, memory_order_relaxed);
269 vn_ring_wait(ring, pending_seqno);