1/*
2 * Copyright 2011 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/delay.h>
24
25#include <trace/events/dma_fence.h>
26
27#include "qxl_drv.h"
28#include "qxl_object.h"
29
30/*
31 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
32 * into 256 byte chunks for now - gives 16 cmds per page.
33 *
34 * use an ida to index into the chunks?
35 */
36/* manage releaseables */
37/* stack them 16 high for now -drawable object is 191 */
38#define RELEASE_SIZE 256
39#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
40/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
41#define SURFACE_RELEASE_SIZE 128
42#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
43
44static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
45static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
46
47static const char *qxl_get_driver_name(struct dma_fence *fence)
48{
49	return "qxl";
50}
51
52static const char *qxl_get_timeline_name(struct dma_fence *fence)
53{
54	return "release";
55}
56
57static long qxl_fence_wait(struct dma_fence *fence, bool intr,
58			   signed long timeout)
59{
60	struct qxl_device *qdev;
61	struct qxl_release *release;
62	int count = 0, sc = 0;
63	bool have_drawable_releases;
64	unsigned long cur, end = jiffies + timeout;
65
66	qdev = container_of(fence->lock, struct qxl_device, release_lock);
67	release = container_of(fence, struct qxl_release, base);
68	have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
69
70retry:
71	sc++;
72
73	if (dma_fence_is_signaled(fence))
74		goto signaled;
75
76	qxl_io_notify_oom(qdev);
77
78	for (count = 0; count < 11; count++) {
79		if (!qxl_queue_garbage_collect(qdev, true))
80			break;
81
82		if (dma_fence_is_signaled(fence))
83			goto signaled;
84	}
85
86	if (dma_fence_is_signaled(fence))
87		goto signaled;
88
89	if (have_drawable_releases || sc < 4) {
90		if (sc > 2)
91			/* back off */
92			usleep_range(500, 1000);
93
94		if (time_after(jiffies, end))
95			return 0;
96
97		if (have_drawable_releases && sc > 300) {
98			DMA_FENCE_WARN(fence, "failed to wait on release %llu "
99				       "after spincount %d\n",
100				       fence->context & ~0xf0000000, sc);
101			goto signaled;
102		}
103		goto retry;
104	}
105	/*
106	 * yeah, original sync_obj_wait gave up after 3 spins when
107	 * have_drawable_releases is not set.
108	 */
109
110signaled:
111	cur = jiffies;
112	if (time_after(cur, end))
113		return 0;
114	return end - cur;
115}
116
117static const struct dma_fence_ops qxl_fence_ops = {
118	.get_driver_name = qxl_get_driver_name,
119	.get_timeline_name = qxl_get_timeline_name,
120	.wait = qxl_fence_wait,
121};
122
123static int
124qxl_release_alloc(struct qxl_device *qdev, int type,
125		  struct qxl_release **ret)
126{
127	struct qxl_release *release;
128	int handle;
129	size_t size = sizeof(*release);
130
131	release = kmalloc(size, GFP_KERNEL);
132	if (!release) {
133		DRM_ERROR("Out of memory\n");
134		return -ENOMEM;
135	}
136	release->base.ops = NULL;
137	release->type = type;
138	release->release_offset = 0;
139	release->surface_release_id = 0;
140	INIT_LIST_HEAD(&release->bos);
141
142	idr_preload(GFP_KERNEL);
143	spin_lock(&qdev->release_idr_lock);
144	handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
145	release->base.seqno = ++qdev->release_seqno;
146	spin_unlock(&qdev->release_idr_lock);
147	idr_preload_end();
148	if (handle < 0) {
149		kfree(release);
150		*ret = NULL;
151		return handle;
152	}
153	*ret = release;
154	DRM_DEBUG_DRIVER("allocated release %d\n", handle);
155	release->id = handle;
156	return handle;
157}
158
159static void
160qxl_release_free_list(struct qxl_release *release)
161{
162	while (!list_empty(&release->bos)) {
163		struct qxl_bo_list *entry;
164		struct qxl_bo *bo;
165
166		entry = container_of(release->bos.next,
167				     struct qxl_bo_list, tv.head);
168		bo = to_qxl_bo(entry->tv.bo);
169		qxl_bo_unref(&bo);
170		list_del(&entry->tv.head);
171		kfree(entry);
172	}
173	release->release_bo = NULL;
174}
175
176void
177qxl_release_free(struct qxl_device *qdev,
178		 struct qxl_release *release)
179{
180	DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
181
182	if (release->surface_release_id)
183		qxl_surface_id_dealloc(qdev, release->surface_release_id);
184
185	spin_lock(&qdev->release_idr_lock);
186	idr_remove(&qdev->release_idr, release->id);
187	spin_unlock(&qdev->release_idr_lock);
188
189	if (release->base.ops) {
190		WARN_ON(list_empty(&release->bos));
191		qxl_release_free_list(release);
192
193		dma_fence_signal(&release->base);
194		dma_fence_put(&release->base);
195	} else {
196		qxl_release_free_list(release);
197		kfree(release);
198	}
199}
200
201static int qxl_release_bo_alloc(struct qxl_device *qdev,
202				struct qxl_bo **bo,
203				u32 priority)
204{
205	/* pin releases bo's they are too messy to evict */
206	return qxl_bo_create(qdev, PAGE_SIZE, false, true,
207			     QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
208}
209
210int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
211{
212	struct qxl_bo_list *entry;
213
214	list_for_each_entry(entry, &release->bos, tv.head) {
215		if (entry->tv.bo == &bo->tbo)
216			return 0;
217	}
218
219	entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
220	if (!entry)
221		return -ENOMEM;
222
223	qxl_bo_ref(bo);
224	entry->tv.bo = &bo->tbo;
225	entry->tv.num_shared = 0;
226	list_add_tail(&entry->tv.head, &release->bos);
227	return 0;
228}
229
230static int qxl_release_validate_bo(struct qxl_bo *bo)
231{
232	struct ttm_operation_ctx ctx = { true, false };
233	int ret;
234
235	if (!bo->pin_count) {
236		qxl_ttm_placement_from_domain(bo, bo->type, false);
237		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
238		if (ret)
239			return ret;
240	}
241
242	ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
243	if (ret)
244		return ret;
245
246	/* allocate a surface for reserved + validated buffers */
247	ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
248	if (ret)
249		return ret;
250	return 0;
251}
252
253int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
254{
255	int ret;
256	struct qxl_bo_list *entry;
257
258	/* if only one object on the release its the release itself
259	   since these objects are pinned no need to reserve */
260	if (list_is_singular(&release->bos))
261		return 0;
262
263	ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
264				     !no_intr, NULL);
265	if (ret)
266		return ret;
267
268	list_for_each_entry(entry, &release->bos, tv.head) {
269		struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
270
271		ret = qxl_release_validate_bo(bo);
272		if (ret) {
273			ttm_eu_backoff_reservation(&release->ticket, &release->bos);
274			return ret;
275		}
276	}
277	return 0;
278}
279
280void qxl_release_backoff_reserve_list(struct qxl_release *release)
281{
282	/* if only one object on the release its the release itself
283	   since these objects are pinned no need to reserve */
284	if (list_is_singular(&release->bos))
285		return;
286
287	ttm_eu_backoff_reservation(&release->ticket, &release->bos);
288}
289
290int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
291				       enum qxl_surface_cmd_type surface_cmd_type,
292				       struct qxl_release *create_rel,
293				       struct qxl_release **release)
294{
295	if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
296		int idr_ret;
297		struct qxl_bo *bo;
298		union qxl_release_info *info;
299
300		/* stash the release after the create command */
301		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
302		if (idr_ret < 0)
303			return idr_ret;
304		bo = create_rel->release_bo;
305
306		(*release)->release_bo = bo;
307		(*release)->release_offset = create_rel->release_offset + 64;
308
309		qxl_release_list_add(*release, bo);
310
311		info = qxl_release_map(qdev, *release);
312		info->id = idr_ret;
313		qxl_release_unmap(qdev, *release, info);
314		return 0;
315	}
316
317	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
318					 QXL_RELEASE_SURFACE_CMD, release, NULL);
319}
320
321int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
322				       int type, struct qxl_release **release,
323				       struct qxl_bo **rbo)
324{
325	struct qxl_bo *bo;
326	int idr_ret;
327	int ret = 0;
328	union qxl_release_info *info;
329	int cur_idx;
330	u32 priority;
331
332	if (type == QXL_RELEASE_DRAWABLE) {
333		cur_idx = 0;
334		priority = 0;
335	} else if (type == QXL_RELEASE_SURFACE_CMD) {
336		cur_idx = 1;
337		priority = 1;
338	} else if (type == QXL_RELEASE_CURSOR_CMD) {
339		cur_idx = 2;
340		priority = 1;
341	}
342	else {
343		DRM_ERROR("got illegal type: %d\n", type);
344		return -EINVAL;
345	}
346
347	idr_ret = qxl_release_alloc(qdev, type, release);
348	if (idr_ret < 0) {
349		if (rbo)
350			*rbo = NULL;
351		return idr_ret;
352	}
353
354	mutex_lock(&qdev->release_mutex);
355	if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
356		qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
357		qdev->current_release_bo_offset[cur_idx] = 0;
358		qdev->current_release_bo[cur_idx] = NULL;
359	}
360	if (!qdev->current_release_bo[cur_idx]) {
361		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
362		if (ret) {
363			mutex_unlock(&qdev->release_mutex);
364			qxl_release_free(qdev, *release);
365			return ret;
366		}
367	}
368
369	bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
370
371	(*release)->release_bo = bo;
372	(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
373	qdev->current_release_bo_offset[cur_idx]++;
374
375	if (rbo)
376		*rbo = bo;
377
378	mutex_unlock(&qdev->release_mutex);
379
380	ret = qxl_release_list_add(*release, bo);
381	qxl_bo_unref(&bo);
382	if (ret) {
383		qxl_release_free(qdev, *release);
384		return ret;
385	}
386
387	info = qxl_release_map(qdev, *release);
388	info->id = idr_ret;
389	qxl_release_unmap(qdev, *release, info);
390
391	return ret;
392}
393
394struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
395						   uint64_t id)
396{
397	struct qxl_release *release;
398
399	spin_lock(&qdev->release_idr_lock);
400	release = idr_find(&qdev->release_idr, id);
401	spin_unlock(&qdev->release_idr_lock);
402	if (!release) {
403		DRM_ERROR("failed to find id in release_idr\n");
404		return NULL;
405	}
406
407	return release;
408}
409
410union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
411					struct qxl_release *release)
412{
413	void *ptr;
414	union qxl_release_info *info;
415	struct qxl_bo *bo = release->release_bo;
416
417	ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
418	if (!ptr)
419		return NULL;
420	info = ptr + (release->release_offset & ~PAGE_MASK);
421	return info;
422}
423
424void qxl_release_unmap(struct qxl_device *qdev,
425		       struct qxl_release *release,
426		       union qxl_release_info *info)
427{
428	struct qxl_bo *bo = release->release_bo;
429	void *ptr;
430
431	ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
432	qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
433}
434
435void qxl_release_fence_buffer_objects(struct qxl_release *release)
436{
437	struct ttm_buffer_object *bo;
438	struct ttm_bo_device *bdev;
439	struct ttm_validate_buffer *entry;
440	struct qxl_device *qdev;
441
442	/* if only one object on the release its the release itself
443	   since these objects are pinned no need to reserve */
444	if (list_is_singular(&release->bos) || list_empty(&release->bos))
445		return;
446
447	bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
448	bdev = bo->bdev;
449	qdev = container_of(bdev, struct qxl_device, mman.bdev);
450
451	/*
452	 * Since we never really allocated a context and we don't want to conflict,
453	 * set the highest bits. This will break if we really allow exporting of dma-bufs.
454	 */
455	dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
456		       release->id | 0xf0000000, release->base.seqno);
457	trace_dma_fence_emit(&release->base);
458
459	spin_lock(&ttm_bo_glob.lru_lock);
460
461	list_for_each_entry(entry, &release->bos, head) {
462		bo = entry->bo;
463
464		dma_resv_add_shared_fence(bo->base.resv, &release->base);
465		ttm_bo_move_to_lru_tail(bo, NULL);
466		dma_resv_unlock(bo->base.resv);
467	}
468	spin_unlock(&ttm_bo_glob.lru_lock);
469	ww_acquire_fini(&release->ticket);
470}
471
472