1/*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "nouveau_drv.h"
28#include "nouveau_dma.h"
29#include "nouveau_fence.h"
30#include "nouveau_abi16.h"
31
32#include "nouveau_ttm.h"
33#include "nouveau_gem.h"
34#include "nouveau_mem.h"
35#include "nouveau_vmm.h"
36
37#include <nvif/class.h>
38#include <nvif/push206e.h>
39
40void
41nouveau_gem_object_del(struct drm_gem_object *gem)
42{
43	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
44	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
45	struct device *dev = drm->dev->dev;
46	int ret;
47
48	ret = pm_runtime_get_sync(dev);
49	if (WARN_ON(ret < 0 && ret != -EACCES)) {
50		pm_runtime_put_autosuspend(dev);
51		return;
52	}
53
54	if (gem->import_attach)
55		drm_prime_gem_destroy(gem, nvbo->bo.sg);
56
57	ttm_bo_put(&nvbo->bo);
58
59	pm_runtime_mark_last_busy(dev);
60	pm_runtime_put_autosuspend(dev);
61}
62
63int
64nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
65{
66	struct nouveau_cli *cli = nouveau_cli(file_priv);
67	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
68	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
69	struct device *dev = drm->dev->dev;
70	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
71	struct nouveau_vma *vma;
72	int ret;
73
74	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
75		return 0;
76
77	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
78	if (ret)
79		return ret;
80
81	ret = pm_runtime_get_sync(dev);
82	if (ret < 0 && ret != -EACCES) {
83		pm_runtime_put_autosuspend(dev);
84		goto out;
85	}
86
87	ret = nouveau_vma_new(nvbo, vmm, &vma);
88	pm_runtime_mark_last_busy(dev);
89	pm_runtime_put_autosuspend(dev);
90out:
91	ttm_bo_unreserve(&nvbo->bo);
92	return ret;
93}
94
95struct nouveau_gem_object_unmap {
96	struct nouveau_cli_work work;
97	struct nouveau_vma *vma;
98};
99
100static void
101nouveau_gem_object_delete(struct nouveau_vma *vma)
102{
103	nouveau_fence_unref(&vma->fence);
104	nouveau_vma_del(&vma);
105}
106
107static void
108nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
109{
110	struct nouveau_gem_object_unmap *work =
111		container_of(w, typeof(*work), work);
112	nouveau_gem_object_delete(work->vma);
113	kfree(work);
114}
115
116static void
117nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
118{
119	struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
120	struct nouveau_gem_object_unmap *work;
121
122	list_del_init(&vma->head);
123
124	if (!fence) {
125		nouveau_gem_object_delete(vma);
126		return;
127	}
128
129	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
130		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
131		nouveau_gem_object_delete(vma);
132		return;
133	}
134
135	work->work.func = nouveau_gem_object_delete_work;
136	work->vma = vma;
137	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
138}
139
140void
141nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
142{
143	struct nouveau_cli *cli = nouveau_cli(file_priv);
144	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
145	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
146	struct device *dev = drm->dev->dev;
147	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
148	struct nouveau_vma *vma;
149	int ret;
150
151	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
152		return;
153
154	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
155	if (ret)
156		return;
157
158	vma = nouveau_vma_find(nvbo, vmm);
159	if (vma) {
160		if (--vma->refs == 0) {
161			ret = pm_runtime_get_sync(dev);
162			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
163				nouveau_gem_object_unmap(nvbo, vma);
164				pm_runtime_mark_last_busy(dev);
165			}
166			pm_runtime_put_autosuspend(dev);
167		}
168	}
169	ttm_bo_unreserve(&nvbo->bo);
170}
171
172int
173nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
174		uint32_t tile_mode, uint32_t tile_flags,
175		struct nouveau_bo **pnvbo)
176{
177	struct nouveau_drm *drm = cli->drm;
178	struct nouveau_bo *nvbo;
179	int ret;
180
181	if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
182		domain |= NOUVEAU_GEM_DOMAIN_CPU;
183
184	nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
185				tile_flags);
186	if (IS_ERR(nvbo))
187		return PTR_ERR(nvbo);
188
189	/* Initialize the embedded gem-object. We return a single gem-reference
190	 * to the caller, instead of a normal nouveau_bo ttm reference. */
191	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
192	if (ret) {
193		drm_gem_object_release(&nvbo->bo.base);
194		kfree(nvbo);
195		return ret;
196	}
197
198	ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
199	if (ret)
200		return ret;
201
202	/* we restrict allowed domains on nv50+ to only the types
203	 * that were requested at creation time.  not possibly on
204	 * earlier chips without busting the ABI.
205	 */
206	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
207			      NOUVEAU_GEM_DOMAIN_GART;
208	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
209		nvbo->valid_domains &= domain;
210
211	nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
212	*pnvbo = nvbo;
213	return 0;
214}
215
216static int
217nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
218		 struct drm_nouveau_gem_info *rep)
219{
220	struct nouveau_cli *cli = nouveau_cli(file_priv);
221	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
222	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
223	struct nouveau_vma *vma;
224
225	if (is_power_of_2(nvbo->valid_domains))
226		rep->domain = nvbo->valid_domains;
227	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
228		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
229	else
230		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
231	rep->offset = nvbo->offset;
232	if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
233		vma = nouveau_vma_find(nvbo, vmm);
234		if (!vma)
235			return -EINVAL;
236
237		rep->offset = vma->addr;
238	}
239
240	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
241	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
242	rep->tile_mode = nvbo->mode;
243	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
244	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
245		rep->tile_flags |= nvbo->kind << 8;
246	else
247	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
248		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
249	else
250		rep->tile_flags |= nvbo->zeta;
251	return 0;
252}
253
254int
255nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
256		      struct drm_file *file_priv)
257{
258	struct nouveau_cli *cli = nouveau_cli(file_priv);
259	struct drm_nouveau_gem_new *req = data;
260	struct nouveau_bo *nvbo = NULL;
261	int ret = 0;
262
263	ret = nouveau_gem_new(cli, req->info.size, req->align,
264			      req->info.domain, req->info.tile_mode,
265			      req->info.tile_flags, &nvbo);
266	if (ret)
267		return ret;
268
269	ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
270				    &req->info.handle);
271	if (ret == 0) {
272		ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
273		if (ret)
274			drm_gem_handle_delete(file_priv, req->info.handle);
275	}
276
277	/* drop reference from allocate - handle holds it now */
278	drm_gem_object_put(&nvbo->bo.base);
279	return ret;
280}
281
282static int
283nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
284		       uint32_t write_domains, uint32_t valid_domains)
285{
286	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
287	struct ttm_buffer_object *bo = &nvbo->bo;
288	uint32_t domains = valid_domains & nvbo->valid_domains &
289		(write_domains ? write_domains : read_domains);
290	uint32_t pref_domains = 0;;
291
292	if (!domains)
293		return -EINVAL;
294
295	valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
296
297	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
298	    bo->mem.mem_type == TTM_PL_VRAM)
299		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
300
301	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
302		 bo->mem.mem_type == TTM_PL_TT)
303		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
304
305	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
306		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
307
308	else
309		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
310
311	nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
312
313	return 0;
314}
315
316struct validate_op {
317	struct list_head list;
318	struct ww_acquire_ctx ticket;
319};
320
321static void
322validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
323			struct nouveau_fence *fence,
324			struct drm_nouveau_gem_pushbuf_bo *pbbo)
325{
326	struct nouveau_bo *nvbo;
327	struct drm_nouveau_gem_pushbuf_bo *b;
328
329	while (!list_empty(&op->list)) {
330		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
331		b = &pbbo[nvbo->pbbo_index];
332
333		if (likely(fence)) {
334			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
335
336			if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
337				struct nouveau_vma *vma =
338					(void *)(unsigned long)b->user_priv;
339				nouveau_fence_unref(&vma->fence);
340				dma_fence_get(&fence->base);
341				vma->fence = fence;
342			}
343		}
344
345		if (unlikely(nvbo->validate_mapped)) {
346			ttm_bo_kunmap(&nvbo->kmap);
347			nvbo->validate_mapped = false;
348		}
349
350		list_del(&nvbo->entry);
351		nvbo->reserved_by = NULL;
352		ttm_bo_unreserve(&nvbo->bo);
353		drm_gem_object_put(&nvbo->bo.base);
354	}
355}
356
357static void
358validate_fini(struct validate_op *op, struct nouveau_channel *chan,
359	      struct nouveau_fence *fence,
360	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
361{
362	validate_fini_no_ticket(op, chan, fence, pbbo);
363	ww_acquire_fini(&op->ticket);
364}
365
366static int
367validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
368	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
369	      int nr_buffers, struct validate_op *op)
370{
371	struct nouveau_cli *cli = nouveau_cli(file_priv);
372	int trycnt = 0;
373	int ret = -EINVAL, i;
374	struct nouveau_bo *res_bo = NULL;
375	LIST_HEAD(gart_list);
376	LIST_HEAD(vram_list);
377	LIST_HEAD(both_list);
378
379	ww_acquire_init(&op->ticket, &reservation_ww_class);
380retry:
381	if (++trycnt > 100000) {
382		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
383		return -EINVAL;
384	}
385
386	for (i = 0; i < nr_buffers; i++) {
387		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
388		struct drm_gem_object *gem;
389		struct nouveau_bo *nvbo;
390
391		gem = drm_gem_object_lookup(file_priv, b->handle);
392		if (!gem) {
393			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
394			ret = -ENOENT;
395			break;
396		}
397		nvbo = nouveau_gem_object(gem);
398		if (nvbo == res_bo) {
399			res_bo = NULL;
400			drm_gem_object_put(gem);
401			continue;
402		}
403
404		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
405			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
406				      "validation list\n", b->handle);
407			drm_gem_object_put(gem);
408			ret = -EINVAL;
409			break;
410		}
411
412		ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
413		if (ret) {
414			list_splice_tail_init(&vram_list, &op->list);
415			list_splice_tail_init(&gart_list, &op->list);
416			list_splice_tail_init(&both_list, &op->list);
417			validate_fini_no_ticket(op, chan, NULL, NULL);
418			if (unlikely(ret == -EDEADLK)) {
419				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
420							      &op->ticket);
421				if (!ret)
422					res_bo = nvbo;
423			}
424			if (unlikely(ret)) {
425				if (ret != -ERESTARTSYS)
426					NV_PRINTK(err, cli, "fail reserve\n");
427				break;
428			}
429		}
430
431		if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
432			struct nouveau_vmm *vmm = chan->vmm;
433			struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
434			if (!vma) {
435				NV_PRINTK(err, cli, "vma not found!\n");
436				ret = -EINVAL;
437				break;
438			}
439
440			b->user_priv = (uint64_t)(unsigned long)vma;
441		} else {
442			b->user_priv = (uint64_t)(unsigned long)nvbo;
443		}
444
445		nvbo->reserved_by = file_priv;
446		nvbo->pbbo_index = i;
447		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
448		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
449			list_add_tail(&nvbo->entry, &both_list);
450		else
451		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
452			list_add_tail(&nvbo->entry, &vram_list);
453		else
454		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
455			list_add_tail(&nvbo->entry, &gart_list);
456		else {
457			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
458				 b->valid_domains);
459			list_add_tail(&nvbo->entry, &both_list);
460			ret = -EINVAL;
461			break;
462		}
463		if (nvbo == res_bo)
464			goto retry;
465	}
466
467	ww_acquire_done(&op->ticket);
468	list_splice_tail(&vram_list, &op->list);
469	list_splice_tail(&gart_list, &op->list);
470	list_splice_tail(&both_list, &op->list);
471	if (ret)
472		validate_fini(op, chan, NULL, NULL);
473	return ret;
474
475}
476
477static int
478validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
479	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
480{
481	struct nouveau_drm *drm = chan->drm;
482	struct nouveau_bo *nvbo;
483	int ret, relocs = 0;
484
485	list_for_each_entry(nvbo, list, entry) {
486		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
487
488		ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
489					     b->write_domains,
490					     b->valid_domains);
491		if (unlikely(ret)) {
492			NV_PRINTK(err, cli, "fail set_domain\n");
493			return ret;
494		}
495
496		ret = nouveau_bo_validate(nvbo, true, false);
497		if (unlikely(ret)) {
498			if (ret != -ERESTARTSYS)
499				NV_PRINTK(err, cli, "fail ttm_validate\n");
500			return ret;
501		}
502
503		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
504		if (unlikely(ret)) {
505			if (ret != -ERESTARTSYS)
506				NV_PRINTK(err, cli, "fail post-validate sync\n");
507			return ret;
508		}
509
510		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
511			if (nvbo->offset == b->presumed.offset &&
512			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
513			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
514			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
515			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
516				continue;
517
518			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
519				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
520			else
521				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
522			b->presumed.offset = nvbo->offset;
523			b->presumed.valid = 0;
524			relocs++;
525		}
526	}
527
528	return relocs;
529}
530
531static int
532nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
533			     struct drm_file *file_priv,
534			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
535			     int nr_buffers,
536			     struct validate_op *op, bool *apply_relocs)
537{
538	struct nouveau_cli *cli = nouveau_cli(file_priv);
539	int ret;
540
541	INIT_LIST_HEAD(&op->list);
542
543	if (nr_buffers == 0)
544		return 0;
545
546	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
547	if (unlikely(ret)) {
548		if (ret != -ERESTARTSYS)
549			NV_PRINTK(err, cli, "validate_init\n");
550		return ret;
551	}
552
553	ret = validate_list(chan, cli, &op->list, pbbo);
554	if (unlikely(ret < 0)) {
555		if (ret != -ERESTARTSYS)
556			NV_PRINTK(err, cli, "validating bo list\n");
557		validate_fini(op, chan, NULL, NULL);
558		return ret;
559	} else if (ret > 0) {
560		*apply_relocs = true;
561	}
562
563	return 0;
564}
565
566static inline void
567u_free(void *addr)
568{
569	kvfree(addr);
570}
571
572static inline void *
573u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
574{
575	void *mem;
576	void __user *userptr = (void __force __user *)(uintptr_t)user;
577
578	size *= nmemb;
579
580	mem = kvmalloc(size, GFP_KERNEL);
581	if (!mem)
582		return ERR_PTR(-ENOMEM);
583
584	if (copy_from_user(mem, userptr, size)) {
585		u_free(mem);
586		return ERR_PTR(-EFAULT);
587	}
588
589	return mem;
590}
591
592static int
593nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
594				struct drm_nouveau_gem_pushbuf *req,
595				struct drm_nouveau_gem_pushbuf_reloc *reloc,
596				struct drm_nouveau_gem_pushbuf_bo *bo)
597{
598	int ret = 0;
599	unsigned i;
600
601	for (i = 0; i < req->nr_relocs; i++) {
602		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
603		struct drm_nouveau_gem_pushbuf_bo *b;
604		struct nouveau_bo *nvbo;
605		uint32_t data;
606
607		if (unlikely(r->bo_index >= req->nr_buffers)) {
608			NV_PRINTK(err, cli, "reloc bo index invalid\n");
609			ret = -EINVAL;
610			break;
611		}
612
613		b = &bo[r->bo_index];
614		if (b->presumed.valid)
615			continue;
616
617		if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
618			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
619			ret = -EINVAL;
620			break;
621		}
622		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
623
624		if (unlikely(r->reloc_bo_offset + 4 >
625			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
626			NV_PRINTK(err, cli, "reloc outside of bo\n");
627			ret = -EINVAL;
628			break;
629		}
630
631		if (!nvbo->kmap.virtual) {
632			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
633					  &nvbo->kmap);
634			if (ret) {
635				NV_PRINTK(err, cli, "failed kmap for reloc\n");
636				break;
637			}
638			nvbo->validate_mapped = true;
639		}
640
641		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
642			data = b->presumed.offset + r->data;
643		else
644		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
645			data = (b->presumed.offset + r->data) >> 32;
646		else
647			data = r->data;
648
649		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
650			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
651				data |= r->tor;
652			else
653				data |= r->vor;
654		}
655
656		ret = ttm_bo_wait(&nvbo->bo, false, false);
657		if (ret) {
658			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
659			break;
660		}
661
662		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
663	}
664
665	return ret;
666}
667
668int
669nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
670			  struct drm_file *file_priv)
671{
672	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
673	struct nouveau_cli *cli = nouveau_cli(file_priv);
674	struct nouveau_abi16_chan *temp;
675	struct nouveau_drm *drm = nouveau_drm(dev);
676	struct drm_nouveau_gem_pushbuf *req = data;
677	struct drm_nouveau_gem_pushbuf_push *push;
678	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
679	struct drm_nouveau_gem_pushbuf_bo *bo;
680	struct nouveau_channel *chan = NULL;
681	struct validate_op op;
682	struct nouveau_fence *fence = NULL;
683	int i, j, ret = 0;
684	bool do_reloc = false, sync = false;
685
686	if (unlikely(!abi16))
687		return -ENOMEM;
688
689	list_for_each_entry(temp, &abi16->channels, head) {
690		if (temp->chan->chid == req->channel) {
691			chan = temp->chan;
692			break;
693		}
694	}
695
696	if (!chan)
697		return nouveau_abi16_put(abi16, -ENOENT);
698	if (unlikely(atomic_read(&chan->killed)))
699		return nouveau_abi16_put(abi16, -ENODEV);
700
701	sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
702
703	req->vram_available = drm->gem.vram_available;
704	req->gart_available = drm->gem.gart_available;
705	if (unlikely(req->nr_push == 0))
706		goto out_next;
707
708	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
709		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
710			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
711		return nouveau_abi16_put(abi16, -EINVAL);
712	}
713
714	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
715		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
716			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
717		return nouveau_abi16_put(abi16, -EINVAL);
718	}
719
720	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
721		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
722			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
723		return nouveau_abi16_put(abi16, -EINVAL);
724	}
725
726	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
727	if (IS_ERR(push))
728		return nouveau_abi16_put(abi16, PTR_ERR(push));
729
730	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
731	if (IS_ERR(bo)) {
732		u_free(push);
733		return nouveau_abi16_put(abi16, PTR_ERR(bo));
734	}
735
736	/* Ensure all push buffers are on validate list */
737	for (i = 0; i < req->nr_push; i++) {
738		if (push[i].bo_index >= req->nr_buffers) {
739			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
740			ret = -EINVAL;
741			goto out_prevalid;
742		}
743	}
744
745	/* Validate buffer list */
746revalidate:
747	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
748					   req->nr_buffers, &op, &do_reloc);
749	if (ret) {
750		if (ret != -ERESTARTSYS)
751			NV_PRINTK(err, cli, "validate: %d\n", ret);
752		goto out_prevalid;
753	}
754
755	/* Apply any relocations that are required */
756	if (do_reloc) {
757		if (!reloc) {
758			validate_fini(&op, chan, NULL, bo);
759			reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
760			if (IS_ERR(reloc)) {
761				ret = PTR_ERR(reloc);
762				goto out_prevalid;
763			}
764
765			goto revalidate;
766		}
767
768		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
769		if (ret) {
770			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
771			goto out;
772		}
773	}
774
775	if (chan->dma.ib_max) {
776		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
777		if (ret) {
778			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
779			goto out;
780		}
781
782		for (i = 0; i < req->nr_push; i++) {
783			struct nouveau_vma *vma = (void *)(unsigned long)
784				bo[push[i].bo_index].user_priv;
785
786			nv50_dma_push(chan, vma->addr + push[i].offset,
787				      push[i].length);
788		}
789	} else
790	if (drm->client.device.info.chipset >= 0x25) {
791		ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
792		if (ret) {
793			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
794			goto out;
795		}
796
797		for (i = 0; i < req->nr_push; i++) {
798			struct nouveau_bo *nvbo = (void *)(unsigned long)
799				bo[push[i].bo_index].user_priv;
800
801			PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
802			PUSH_DATA(chan->chan.push, 0);
803		}
804	} else {
805		ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
806		if (ret) {
807			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
808			goto out;
809		}
810
811		for (i = 0; i < req->nr_push; i++) {
812			struct nouveau_bo *nvbo = (void *)(unsigned long)
813				bo[push[i].bo_index].user_priv;
814			uint32_t cmd;
815
816			cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
817			cmd |= 0x20000000;
818			if (unlikely(cmd != req->suffix0)) {
819				if (!nvbo->kmap.virtual) {
820					ret = ttm_bo_kmap(&nvbo->bo, 0,
821							  nvbo->bo.mem.
822							  num_pages,
823							  &nvbo->kmap);
824					if (ret) {
825						WIND_RING(chan);
826						goto out;
827					}
828					nvbo->validate_mapped = true;
829				}
830
831				nouveau_bo_wr32(nvbo, (push[i].offset +
832						push[i].length - 8) / 4, cmd);
833			}
834
835			PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
836			PUSH_DATA(chan->chan.push, 0);
837			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
838				PUSH_DATA(chan->chan.push, 0);
839		}
840	}
841
842	ret = nouveau_fence_new(chan, false, &fence);
843	if (ret) {
844		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
845		WIND_RING(chan);
846		goto out;
847	}
848
849	if (sync) {
850		if (!(ret = nouveau_fence_wait(fence, false, false))) {
851			if ((ret = dma_fence_get_status(&fence->base)) == 1)
852				ret = 0;
853		}
854	}
855
856out:
857	validate_fini(&op, chan, fence, bo);
858	nouveau_fence_unref(&fence);
859
860	if (do_reloc) {
861		struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
862			u64_to_user_ptr(req->buffers);
863
864		for (i = 0; i < req->nr_buffers; i++) {
865			if (bo[i].presumed.valid)
866				continue;
867
868			if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
869					 sizeof(bo[i].presumed))) {
870				ret = -EFAULT;
871				break;
872			}
873		}
874	}
875out_prevalid:
876	if (!IS_ERR(reloc))
877		u_free(reloc);
878	u_free(bo);
879	u_free(push);
880
881out_next:
882	if (chan->dma.ib_max) {
883		req->suffix0 = 0x00000000;
884		req->suffix1 = 0x00000000;
885	} else
886	if (drm->client.device.info.chipset >= 0x25) {
887		req->suffix0 = 0x00020000;
888		req->suffix1 = 0x00000000;
889	} else {
890		req->suffix0 = 0x20000000 |
891			      (chan->push.addr + ((chan->dma.cur + 2) << 2));
892		req->suffix1 = 0x00000000;
893	}
894
895	return nouveau_abi16_put(abi16, ret);
896}
897
898int
899nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
900			   struct drm_file *file_priv)
901{
902	struct drm_nouveau_gem_cpu_prep *req = data;
903	struct drm_gem_object *gem;
904	struct nouveau_bo *nvbo;
905	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
906	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
907	long lret;
908	int ret;
909
910	gem = drm_gem_object_lookup(file_priv, req->handle);
911	if (!gem)
912		return -ENOENT;
913	nvbo = nouveau_gem_object(gem);
914
915	lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
916						   no_wait ? 0 : 30 * HZ);
917	if (!lret)
918		ret = -EBUSY;
919	else if (lret > 0)
920		ret = 0;
921	else
922		ret = lret;
923
924	nouveau_bo_sync_for_cpu(nvbo);
925	drm_gem_object_put(gem);
926
927	return ret;
928}
929
930int
931nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
932			   struct drm_file *file_priv)
933{
934	struct drm_nouveau_gem_cpu_fini *req = data;
935	struct drm_gem_object *gem;
936	struct nouveau_bo *nvbo;
937
938	gem = drm_gem_object_lookup(file_priv, req->handle);
939	if (!gem)
940		return -ENOENT;
941	nvbo = nouveau_gem_object(gem);
942
943	nouveau_bo_sync_for_device(nvbo);
944	drm_gem_object_put(gem);
945	return 0;
946}
947
948int
949nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
950		       struct drm_file *file_priv)
951{
952	struct drm_nouveau_gem_info *req = data;
953	struct drm_gem_object *gem;
954	int ret;
955
956	gem = drm_gem_object_lookup(file_priv, req->handle);
957	if (!gem)
958		return -ENOENT;
959
960	ret = nouveau_gem_info(file_priv, gem, req);
961	drm_gem_object_put(gem);
962	return ret;
963}
964
965