1/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3/*
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 *    Rob Clark <robclark@freedesktop.org>
27 */
28
29#include <assert.h>
30
31#include "xf86atomic.h"
32#include "freedreno_ringbuffer.h"
33#include "kgsl_priv.h"
34
35
36/* because kgsl tries to validate the gpuaddr on kernel side in ISSUEIBCMDS,
37 * we can't use normal gem bo's for ringbuffer..  someday the kernel part
38 * needs to be reworked into a single sane drm driver :-/
39 */
40struct kgsl_rb_bo {
41	struct kgsl_pipe *pipe;
42	void    *hostptr;
43	uint32_t gpuaddr;
44	uint32_t size;
45};
46
47struct kgsl_ringbuffer {
48	struct fd_ringbuffer base;
49	struct kgsl_rb_bo *bo;
50};
51
52static inline struct kgsl_ringbuffer * to_kgsl_ringbuffer(struct fd_ringbuffer *x)
53{
54	return (struct kgsl_ringbuffer *)x;
55}
56
57static void kgsl_rb_bo_del(struct kgsl_rb_bo *bo)
58{
59	struct kgsl_sharedmem_free req = {
60			.gpuaddr = bo->gpuaddr,
61	};
62	int ret;
63
64	drm_munmap(bo->hostptr, bo->size);
65
66	ret = ioctl(bo->pipe->fd, IOCTL_KGSL_SHAREDMEM_FREE, &req);
67	if (ret) {
68		ERROR_MSG("sharedmem free failed: %s", strerror(errno));
69	}
70
71	free(bo);
72}
73
74static struct kgsl_rb_bo * kgsl_rb_bo_new(struct kgsl_pipe *pipe, uint32_t size)
75{
76	struct kgsl_rb_bo *bo;
77	struct kgsl_gpumem_alloc req = {
78			.size = ALIGN(size, 4096),
79			.flags = KGSL_MEMFLAGS_GPUREADONLY,
80	};
81	int ret;
82
83	bo = calloc(1, sizeof(*bo));
84	if (!bo) {
85		ERROR_MSG("allocation failed");
86		return NULL;
87	}
88	ret = ioctl(pipe->fd, IOCTL_KGSL_GPUMEM_ALLOC, &req);
89	if (ret) {
90		ERROR_MSG("gpumem allocation failed: %s", strerror(errno));
91		goto fail;
92	}
93
94	bo->pipe = pipe;
95	bo->gpuaddr = req.gpuaddr;
96	bo->size = size;
97	bo->hostptr = drm_mmap(NULL, size, PROT_WRITE|PROT_READ,
98				MAP_SHARED, pipe->fd, req.gpuaddr);
99
100	return bo;
101fail:
102	if (bo)
103		kgsl_rb_bo_del(bo);
104	return NULL;
105}
106
107static void * kgsl_ringbuffer_hostptr(struct fd_ringbuffer *ring)
108{
109	struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring);
110	return kgsl_ring->bo->hostptr;
111}
112
113static int kgsl_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start,
114		int in_fence_fd, int *out_fence_fd)
115{
116	struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring);
117	struct kgsl_pipe *kgsl_pipe = to_kgsl_pipe(ring->pipe);
118	uint32_t offset = (uint8_t *)last_start - (uint8_t *)ring->start;
119	struct kgsl_ibdesc ibdesc = {
120			.gpuaddr     = kgsl_ring->bo->gpuaddr + offset,
121			.hostptr     = last_start,
122			.sizedwords  = ring->cur - last_start,
123	};
124	struct kgsl_ringbuffer_issueibcmds req = {
125			.drawctxt_id = kgsl_pipe->drawctxt_id,
126			.ibdesc_addr = (unsigned long)&ibdesc,
127			.numibs      = 1,
128			.flags       = KGSL_CONTEXT_SUBMIT_IB_LIST,
129	};
130	int ret;
131
132	assert(in_fence_fd == -1);
133	assert(out_fence_fd == NULL);
134
135	kgsl_pipe_pre_submit(kgsl_pipe);
136
137	/* z180_cmdstream_issueibcmds() is made of fail: */
138	if (ring->pipe->id == FD_PIPE_2D) {
139		/* fix up size field in last cmd packet */
140		uint32_t last_size = (uint32_t)(ring->cur - last_start);
141		/* 5 is length of first packet, 2 for the two 7f000000's */
142		last_start[2] = last_size - (5 + 2);
143		ibdesc.gpuaddr = kgsl_ring->bo->gpuaddr;
144		ibdesc.hostptr = kgsl_ring->bo->hostptr;
145		ibdesc.sizedwords = 0x145;
146		req.timestamp = (uintptr_t)kgsl_ring->bo->hostptr;
147	}
148
149	do {
150		ret = ioctl(kgsl_pipe->fd, IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS, &req);
151	} while ((ret == -1) && ((errno == EINTR) || (errno == EAGAIN)));
152	if (ret)
153		ERROR_MSG("issueibcmds failed!  %d (%s)", ret, strerror(errno));
154
155	ring->last_timestamp = req.timestamp;
156	ring->last_start = ring->cur;
157
158	kgsl_pipe_post_submit(kgsl_pipe, req.timestamp);
159
160	return ret;
161}
162
163static void kgsl_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
164		const struct fd_reloc *r)
165{
166	struct kgsl_bo *kgsl_bo = to_kgsl_bo(r->bo);
167	uint32_t addr = kgsl_bo_gpuaddr(kgsl_bo, r->offset);
168	assert(addr);
169	if (r->shift < 0)
170		addr >>= -r->shift;
171	else
172		addr <<= r->shift;
173	(*ring->cur++) = addr | r->or;
174	kgsl_pipe_add_submit(to_kgsl_pipe(ring->pipe), kgsl_bo);
175}
176
177static uint32_t kgsl_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
178		struct fd_ringbuffer *target, uint32_t cmd_idx)
179{
180	struct kgsl_ringbuffer *target_ring = to_kgsl_ringbuffer(target);
181	assert(cmd_idx == 0);
182	(*ring->cur++) = target_ring->bo->gpuaddr;
183	return 	offset_bytes(target->cur, target->start);
184}
185
186static void kgsl_ringbuffer_destroy(struct fd_ringbuffer *ring)
187{
188	struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring);
189	if (ring->last_timestamp)
190		fd_pipe_wait(ring->pipe, ring->last_timestamp);
191	if (kgsl_ring->bo)
192		kgsl_rb_bo_del(kgsl_ring->bo);
193	free(kgsl_ring);
194}
195
196static const struct fd_ringbuffer_funcs funcs = {
197		.hostptr = kgsl_ringbuffer_hostptr,
198		.flush = kgsl_ringbuffer_flush,
199		.emit_reloc = kgsl_ringbuffer_emit_reloc,
200		.emit_reloc_ring = kgsl_ringbuffer_emit_reloc_ring,
201		.destroy = kgsl_ringbuffer_destroy,
202};
203
204drm_private struct fd_ringbuffer * kgsl_ringbuffer_new(struct fd_pipe *pipe,
205		uint32_t size, enum fd_ringbuffer_flags flags)
206{
207	struct kgsl_ringbuffer *kgsl_ring;
208	struct fd_ringbuffer *ring = NULL;
209
210	assert(!flags);
211
212	kgsl_ring = calloc(1, sizeof(*kgsl_ring));
213	if (!kgsl_ring) {
214		ERROR_MSG("allocation failed");
215		goto fail;
216	}
217
218	ring = &kgsl_ring->base;
219	atomic_set(&ring->refcnt, 1);
220
221	ring->funcs = &funcs;
222	ring->size = size;
223
224	kgsl_ring->bo = kgsl_rb_bo_new(to_kgsl_pipe(pipe), size);
225	if (!kgsl_ring->bo) {
226		ERROR_MSG("ringbuffer allocation failed");
227		goto fail;
228	}
229
230	return ring;
231fail:
232	if (ring)
233		fd_ringbuffer_del(ring);
234	return NULL;
235}
236