1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28/*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32#define pr_fmt(fmt) "[TTM] " fmt
33
34#include <drm/ttm/ttm_module.h>
35#include <drm/ttm/ttm_bo_driver.h>
36#include <drm/ttm/ttm_placement.h>
37#include <linux/jiffies.h>
38#include <linux/slab.h>
39#include <linux/sched.h>
40#include <linux/mm.h>
41#include <linux/file.h>
42#include <linux/module.h>
43#include <linux/atomic.h>
44#include <linux/dma-resv.h>
45
46static void ttm_bo_global_kobj_release(struct kobject *kobj);
47
48/**
49 * ttm_global_mutex - protecting the global BO state
50 */
51DEFINE_MUTEX(ttm_global_mutex);
52unsigned ttm_bo_glob_use_count;
53struct ttm_bo_global ttm_bo_glob;
54EXPORT_SYMBOL(ttm_bo_glob);
55
56static struct attribute ttm_bo_count = {
57	.name = "bo_count",
58	.mode = S_IRUGO
59};
60
61/* default destructor */
62static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
63{
64	kfree(bo);
65}
66
67static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
68					struct ttm_placement *placement)
69{
70	struct drm_printer p = drm_debug_printer(TTM_PFX);
71	struct ttm_resource_manager *man;
72	int i, mem_type;
73
74	drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
75		   bo, bo->mem.num_pages, bo->mem.size >> 10,
76		   bo->mem.size >> 20);
77	for (i = 0; i < placement->num_placement; i++) {
78		mem_type = placement->placement[i].mem_type;
79		drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
80			   i, placement->placement[i].flags, mem_type);
81		man = ttm_manager_type(bo->bdev, mem_type);
82		ttm_resource_manager_debug(man, &p);
83	}
84}
85
86static ssize_t ttm_bo_global_show(struct kobject *kobj,
87				  struct attribute *attr,
88				  char *buffer)
89{
90	struct ttm_bo_global *glob =
91		container_of(kobj, struct ttm_bo_global, kobj);
92
93	return snprintf(buffer, PAGE_SIZE, "%d\n",
94				atomic_read(&glob->bo_count));
95}
96
97static struct attribute *ttm_bo_global_attrs[] = {
98	&ttm_bo_count,
99	NULL
100};
101
102static const struct sysfs_ops ttm_bo_global_ops = {
103	.show = &ttm_bo_global_show
104};
105
106static struct kobj_type ttm_bo_glob_kobj_type  = {
107	.release = &ttm_bo_global_kobj_release,
108	.sysfs_ops = &ttm_bo_global_ops,
109	.default_attrs = ttm_bo_global_attrs
110};
111
112static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
113				  struct ttm_resource *mem)
114{
115	struct ttm_bo_device *bdev = bo->bdev;
116	struct ttm_resource_manager *man;
117
118	if (!list_empty(&bo->lru) || bo->pin_count)
119		return;
120
121	if (mem->placement & TTM_PL_FLAG_NO_EVICT)
122		return;
123
124	man = ttm_manager_type(bdev, mem->mem_type);
125	list_add_tail(&bo->lru, &man->lru[bo->priority]);
126
127	if (man->use_tt && bo->ttm &&
128	    !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
129				     TTM_PAGE_FLAG_SWAPPED))) {
130		list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
131	}
132}
133
134static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
135{
136	struct ttm_bo_device *bdev = bo->bdev;
137	bool notify = false;
138
139	if (!list_empty(&bo->swap)) {
140		list_del_init(&bo->swap);
141		notify = true;
142	}
143	if (!list_empty(&bo->lru)) {
144		list_del_init(&bo->lru);
145		notify = true;
146	}
147
148	if (notify && bdev->driver->del_from_lru_notify)
149		bdev->driver->del_from_lru_notify(bo);
150}
151
152static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
153				     struct ttm_buffer_object *bo)
154{
155	if (!pos->first)
156		pos->first = bo;
157	pos->last = bo;
158}
159
160void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
161			     struct ttm_lru_bulk_move *bulk)
162{
163	dma_resv_assert_held(bo->base.resv);
164
165	ttm_bo_del_from_lru(bo);
166	ttm_bo_add_mem_to_lru(bo, &bo->mem);
167
168	if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT) &&
169	    !bo->pin_count) {
170		switch (bo->mem.mem_type) {
171		case TTM_PL_TT:
172			ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
173			break;
174
175		case TTM_PL_VRAM:
176			ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
177			break;
178		}
179		if (bo->ttm && !(bo->ttm->page_flags &
180				 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
181			ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
182	}
183}
184EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
185
186void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
187{
188	unsigned i;
189
190	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
191		struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
192		struct ttm_resource_manager *man;
193
194		if (!pos->first)
195			continue;
196
197		dma_resv_assert_held(pos->first->base.resv);
198		dma_resv_assert_held(pos->last->base.resv);
199
200		man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
201		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
202				    &pos->last->lru);
203	}
204
205	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
206		struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
207		struct ttm_resource_manager *man;
208
209		if (!pos->first)
210			continue;
211
212		dma_resv_assert_held(pos->first->base.resv);
213		dma_resv_assert_held(pos->last->base.resv);
214
215		man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
216		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
217				    &pos->last->lru);
218	}
219
220	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
221		struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
222		struct list_head *lru;
223
224		if (!pos->first)
225			continue;
226
227		dma_resv_assert_held(pos->first->base.resv);
228		dma_resv_assert_held(pos->last->base.resv);
229
230		lru = &ttm_bo_glob.swap_lru[i];
231		list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
232	}
233}
234EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
235
236static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
237				  struct ttm_resource *mem, bool evict,
238				  struct ttm_operation_ctx *ctx)
239{
240	struct ttm_bo_device *bdev = bo->bdev;
241	struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
242	struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
243	int ret;
244
245	ttm_bo_unmap_virtual(bo);
246
247	/*
248	 * Create and bind a ttm if required.
249	 */
250
251	if (new_man->use_tt) {
252		/* Zero init the new TTM structure if the old location should
253		 * have used one as well.
254		 */
255		ret = ttm_tt_create(bo, old_man->use_tt);
256		if (ret)
257			goto out_err;
258
259		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
260		if (ret)
261			goto out_err;
262
263		if (mem->mem_type != TTM_PL_SYSTEM) {
264			ret = ttm_tt_populate(bdev, bo->ttm, ctx);
265			if (ret)
266				goto out_err;
267
268			ret = ttm_bo_tt_bind(bo, mem);
269			if (ret)
270				goto out_err;
271		}
272
273		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
274			if (bdev->driver->move_notify)
275				bdev->driver->move_notify(bo, evict, mem);
276			bo->mem = *mem;
277			goto moved;
278		}
279	}
280
281	if (bdev->driver->move_notify)
282		bdev->driver->move_notify(bo, evict, mem);
283
284	if (old_man->use_tt && new_man->use_tt)
285		ret = ttm_bo_move_ttm(bo, ctx, mem);
286	else if (bdev->driver->move)
287		ret = bdev->driver->move(bo, evict, ctx, mem);
288	else
289		ret = ttm_bo_move_memcpy(bo, ctx, mem);
290
291	if (ret) {
292		if (bdev->driver->move_notify) {
293			swap(*mem, bo->mem);
294			bdev->driver->move_notify(bo, false, mem);
295			swap(*mem, bo->mem);
296		}
297
298		goto out_err;
299	}
300
301moved:
302	ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
303	return 0;
304
305out_err:
306	new_man = ttm_manager_type(bdev, bo->mem.mem_type);
307	if (!new_man->use_tt)
308		ttm_bo_tt_destroy(bo);
309
310	return ret;
311}
312
313/**
314 * Call bo::reserved.
315 * Will release GPU memory type usage on destruction.
316 * This is the place to put in driver specific hooks to release
317 * driver private resources.
318 * Will release the bo::reserved lock.
319 */
320
321static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
322{
323	if (bo->bdev->driver->move_notify)
324		bo->bdev->driver->move_notify(bo, false, NULL);
325
326	ttm_bo_tt_destroy(bo);
327	ttm_resource_free(bo, &bo->mem);
328}
329
330static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
331{
332	int r;
333
334	if (bo->base.resv == &bo->base._resv)
335		return 0;
336
337	BUG_ON(!dma_resv_trylock(&bo->base._resv));
338
339	r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
340	dma_resv_unlock(&bo->base._resv);
341	if (r)
342		return r;
343
344	if (bo->type != ttm_bo_type_sg) {
345		/* This works because the BO is about to be destroyed and nobody
346		 * reference it any more. The only tricky case is the trylock on
347		 * the resv object while holding the lru_lock.
348		 */
349		spin_lock(&ttm_bo_glob.lru_lock);
350		bo->base.resv = &bo->base._resv;
351		spin_unlock(&ttm_bo_glob.lru_lock);
352	}
353
354	return r;
355}
356
357static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
358{
359	struct dma_resv *resv = &bo->base._resv;
360	struct dma_resv_list *fobj;
361	struct dma_fence *fence;
362	int i;
363
364	rcu_read_lock();
365	fobj = rcu_dereference(resv->fence);
366	fence = rcu_dereference(resv->fence_excl);
367	if (fence && !fence->ops->signaled)
368		dma_fence_enable_sw_signaling(fence);
369
370	for (i = 0; fobj && i < fobj->shared_count; ++i) {
371		fence = rcu_dereference(fobj->shared[i]);
372
373		if (!fence->ops->signaled)
374			dma_fence_enable_sw_signaling(fence);
375	}
376	rcu_read_unlock();
377}
378
379/**
380 * function ttm_bo_cleanup_refs
381 * If bo idle, remove from lru lists, and unref.
382 * If not idle, block if possible.
383 *
384 * Must be called with lru_lock and reservation held, this function
385 * will drop the lru lock and optionally the reservation lock before returning.
386 *
387 * @interruptible         Any sleeps should occur interruptibly.
388 * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
389 * @unlock_resv           Unlock the reservation lock as well.
390 */
391
392static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
393			       bool interruptible, bool no_wait_gpu,
394			       bool unlock_resv)
395{
396	struct dma_resv *resv = &bo->base._resv;
397	int ret;
398
399	if (dma_resv_test_signaled_rcu(resv, true))
400		ret = 0;
401	else
402		ret = -EBUSY;
403
404	if (ret && !no_wait_gpu) {
405		long lret;
406
407		if (unlock_resv)
408			dma_resv_unlock(bo->base.resv);
409		spin_unlock(&ttm_bo_glob.lru_lock);
410
411		lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
412						 30 * HZ);
413
414		if (lret < 0)
415			return lret;
416		else if (lret == 0)
417			return -EBUSY;
418
419		spin_lock(&ttm_bo_glob.lru_lock);
420		if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
421			/*
422			 * We raced, and lost, someone else holds the reservation now,
423			 * and is probably busy in ttm_bo_cleanup_memtype_use.
424			 *
425			 * Even if it's not the case, because we finished waiting any
426			 * delayed destruction would succeed, so just return success
427			 * here.
428			 */
429			spin_unlock(&ttm_bo_glob.lru_lock);
430			return 0;
431		}
432		ret = 0;
433	}
434
435	if (ret || unlikely(list_empty(&bo->ddestroy))) {
436		if (unlock_resv)
437			dma_resv_unlock(bo->base.resv);
438		spin_unlock(&ttm_bo_glob.lru_lock);
439		return ret;
440	}
441
442	ttm_bo_del_from_lru(bo);
443	list_del_init(&bo->ddestroy);
444	spin_unlock(&ttm_bo_glob.lru_lock);
445	ttm_bo_cleanup_memtype_use(bo);
446
447	if (unlock_resv)
448		dma_resv_unlock(bo->base.resv);
449
450	ttm_bo_put(bo);
451
452	return 0;
453}
454
455/**
456 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
457 * encountered buffers.
458 */
459static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
460{
461	struct ttm_bo_global *glob = &ttm_bo_glob;
462	struct list_head removed;
463	bool empty;
464
465	INIT_LIST_HEAD(&removed);
466
467	spin_lock(&glob->lru_lock);
468	while (!list_empty(&bdev->ddestroy)) {
469		struct ttm_buffer_object *bo;
470
471		bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
472				      ddestroy);
473		list_move_tail(&bo->ddestroy, &removed);
474		if (!ttm_bo_get_unless_zero(bo))
475			continue;
476
477		if (remove_all || bo->base.resv != &bo->base._resv) {
478			spin_unlock(&glob->lru_lock);
479			dma_resv_lock(bo->base.resv, NULL);
480
481			spin_lock(&glob->lru_lock);
482			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
483
484		} else if (dma_resv_trylock(bo->base.resv)) {
485			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
486		} else {
487			spin_unlock(&glob->lru_lock);
488		}
489
490		ttm_bo_put(bo);
491		spin_lock(&glob->lru_lock);
492	}
493	list_splice_tail(&removed, &bdev->ddestroy);
494	empty = list_empty(&bdev->ddestroy);
495	spin_unlock(&glob->lru_lock);
496
497	return empty;
498}
499
500static void ttm_bo_delayed_workqueue(struct work_struct *work)
501{
502	struct ttm_bo_device *bdev =
503	    container_of(work, struct ttm_bo_device, wq.work);
504
505	if (!ttm_bo_delayed_delete(bdev, false))
506		schedule_delayed_work(&bdev->wq,
507				      ((HZ / 100) < 1) ? 1 : HZ / 100);
508}
509
510static void ttm_bo_release(struct kref *kref)
511{
512	struct ttm_buffer_object *bo =
513	    container_of(kref, struct ttm_buffer_object, kref);
514	struct ttm_bo_device *bdev = bo->bdev;
515	size_t acc_size = bo->acc_size;
516	int ret;
517
518	if (!bo->deleted) {
519		ret = ttm_bo_individualize_resv(bo);
520		if (ret) {
521			/* Last resort, if we fail to allocate memory for the
522			 * fences block for the BO to become idle
523			 */
524			dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
525						  30 * HZ);
526		}
527
528		if (bo->bdev->driver->release_notify)
529			bo->bdev->driver->release_notify(bo);
530
531		drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
532		ttm_mem_io_free(bdev, &bo->mem);
533	}
534
535	if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
536	    !dma_resv_trylock(bo->base.resv)) {
537		/* The BO is not idle, resurrect it for delayed destroy */
538		ttm_bo_flush_all_fences(bo);
539		bo->deleted = true;
540
541		spin_lock(&ttm_bo_glob.lru_lock);
542
543		/*
544		 * Make NO_EVICT bos immediately available to
545		 * shrinkers, now that they are queued for
546		 * destruction.
547		 */
548		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT || bo->pin_count) {
549			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
550			bo->pin_count = 0;
551			ttm_bo_del_from_lru(bo);
552			ttm_bo_add_mem_to_lru(bo, &bo->mem);
553		}
554
555		kref_init(&bo->kref);
556		list_add_tail(&bo->ddestroy, &bdev->ddestroy);
557		spin_unlock(&ttm_bo_glob.lru_lock);
558
559		schedule_delayed_work(&bdev->wq,
560				      ((HZ / 100) < 1) ? 1 : HZ / 100);
561		return;
562	}
563
564	spin_lock(&ttm_bo_glob.lru_lock);
565	ttm_bo_del_from_lru(bo);
566	list_del(&bo->ddestroy);
567	spin_unlock(&ttm_bo_glob.lru_lock);
568
569	ttm_bo_cleanup_memtype_use(bo);
570	dma_resv_unlock(bo->base.resv);
571
572	atomic_dec(&ttm_bo_glob.bo_count);
573	dma_fence_put(bo->moving);
574	if (!ttm_bo_uses_embedded_gem_object(bo))
575		dma_resv_fini(&bo->base._resv);
576	bo->destroy(bo);
577	ttm_mem_global_free(&ttm_mem_glob, acc_size);
578}
579
580void ttm_bo_put(struct ttm_buffer_object *bo)
581{
582	kref_put(&bo->kref, ttm_bo_release);
583}
584EXPORT_SYMBOL(ttm_bo_put);
585
586int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
587{
588	return cancel_delayed_work_sync(&bdev->wq);
589}
590EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
591
592void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
593{
594	if (resched)
595		schedule_delayed_work(&bdev->wq,
596				      ((HZ / 100) < 1) ? 1 : HZ / 100);
597}
598EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
599
600static int ttm_bo_evict(struct ttm_buffer_object *bo,
601			struct ttm_operation_ctx *ctx)
602{
603	struct ttm_bo_device *bdev = bo->bdev;
604	struct ttm_resource evict_mem;
605	struct ttm_placement placement;
606	int ret = 0;
607
608	dma_resv_assert_held(bo->base.resv);
609
610	placement.num_placement = 0;
611	placement.num_busy_placement = 0;
612	bdev->driver->evict_flags(bo, &placement);
613
614	if (!placement.num_placement && !placement.num_busy_placement) {
615		ttm_bo_wait(bo, false, false);
616
617		ttm_bo_cleanup_memtype_use(bo);
618		return ttm_tt_create(bo, false);
619	}
620
621	evict_mem = bo->mem;
622	evict_mem.mm_node = NULL;
623	evict_mem.bus.offset = 0;
624	evict_mem.bus.addr = NULL;
625
626	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
627	if (ret) {
628		if (ret != -ERESTARTSYS) {
629			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
630			       bo);
631			ttm_bo_mem_space_debug(bo, &placement);
632		}
633		goto out;
634	}
635
636	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
637	if (unlikely(ret)) {
638		if (ret != -ERESTARTSYS)
639			pr_err("Buffer eviction failed\n");
640		ttm_resource_free(bo, &evict_mem);
641	}
642out:
643	return ret;
644}
645
646bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
647			      const struct ttm_place *place)
648{
649	/* Don't evict this BO if it's outside of the
650	 * requested placement range
651	 */
652	if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
653	    (place->lpfn && place->lpfn <= bo->mem.start))
654		return false;
655
656	return true;
657}
658EXPORT_SYMBOL(ttm_bo_eviction_valuable);
659
660/**
661 * Check the target bo is allowable to be evicted or swapout, including cases:
662 *
663 * a. if share same reservation object with ctx->resv, have assumption
664 * reservation objects should already be locked, so not lock again and
665 * return true directly when either the opreation allow_reserved_eviction
666 * or the target bo already is in delayed free list;
667 *
668 * b. Otherwise, trylock it.
669 */
670static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
671			struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
672{
673	bool ret = false;
674
675	if (bo->pin_count) {
676		*locked = false;
677		if (busy)
678			*busy = false;
679		return false;
680	}
681
682	if (bo->base.resv == ctx->resv) {
683		dma_resv_assert_held(bo->base.resv);
684		if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
685			ret = true;
686		*locked = false;
687		if (busy)
688			*busy = false;
689	} else {
690		ret = dma_resv_trylock(bo->base.resv);
691		*locked = ret;
692		if (busy)
693			*busy = !ret;
694	}
695
696	return ret;
697}
698
699/**
700 * ttm_mem_evict_wait_busy - wait for a busy BO to become available
701 *
702 * @busy_bo: BO which couldn't be locked with trylock
703 * @ctx: operation context
704 * @ticket: acquire ticket
705 *
706 * Try to lock a busy buffer object to avoid failing eviction.
707 */
708static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
709				   struct ttm_operation_ctx *ctx,
710				   struct ww_acquire_ctx *ticket)
711{
712	int r;
713
714	if (!busy_bo || !ticket)
715		return -EBUSY;
716
717	if (ctx->interruptible)
718		r = dma_resv_lock_interruptible(busy_bo->base.resv,
719							  ticket);
720	else
721		r = dma_resv_lock(busy_bo->base.resv, ticket);
722
723	/*
724	 * TODO: It would be better to keep the BO locked until allocation is at
725	 * least tried one more time, but that would mean a much larger rework
726	 * of TTM.
727	 */
728	if (!r)
729		dma_resv_unlock(busy_bo->base.resv);
730
731	return r == -EDEADLK ? -EBUSY : r;
732}
733
734int ttm_mem_evict_first(struct ttm_bo_device *bdev,
735			struct ttm_resource_manager *man,
736			const struct ttm_place *place,
737			struct ttm_operation_ctx *ctx,
738			struct ww_acquire_ctx *ticket)
739{
740	struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
741	bool locked = false;
742	unsigned i;
743	int ret;
744
745	spin_lock(&ttm_bo_glob.lru_lock);
746	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
747		list_for_each_entry(bo, &man->lru[i], lru) {
748			bool busy;
749
750			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
751							    &busy)) {
752				if (busy && !busy_bo && ticket !=
753				    dma_resv_locking_ctx(bo->base.resv))
754					busy_bo = bo;
755				continue;
756			}
757
758			if (place && !bdev->driver->eviction_valuable(bo,
759								      place)) {
760				if (locked)
761					dma_resv_unlock(bo->base.resv);
762				continue;
763			}
764			if (!ttm_bo_get_unless_zero(bo)) {
765				if (locked)
766					dma_resv_unlock(bo->base.resv);
767				continue;
768			}
769			break;
770		}
771
772		/* If the inner loop terminated early, we have our candidate */
773		if (&bo->lru != &man->lru[i])
774			break;
775
776		bo = NULL;
777	}
778
779	if (!bo) {
780		if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
781			busy_bo = NULL;
782		spin_unlock(&ttm_bo_glob.lru_lock);
783		ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
784		if (busy_bo)
785			ttm_bo_put(busy_bo);
786		return ret;
787	}
788
789	if (bo->deleted) {
790		ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
791					  ctx->no_wait_gpu, locked);
792		ttm_bo_put(bo);
793		return ret;
794	}
795
796	spin_unlock(&ttm_bo_glob.lru_lock);
797
798	ret = ttm_bo_evict(bo, ctx);
799	if (locked)
800		ttm_bo_unreserve(bo);
801	else
802		ttm_bo_move_to_lru_tail_unlocked(bo);
803
804	ttm_bo_put(bo);
805	return ret;
806}
807
808/**
809 * Add the last move fence to the BO and reserve a new shared slot.
810 */
811static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
812				 struct ttm_resource_manager *man,
813				 struct ttm_resource *mem,
814				 bool no_wait_gpu)
815{
816	struct dma_fence *fence;
817	int ret;
818
819	spin_lock(&man->move_lock);
820	fence = dma_fence_get(man->move);
821	spin_unlock(&man->move_lock);
822
823	if (!fence)
824		return 0;
825
826	if (no_wait_gpu) {
827		dma_fence_put(fence);
828		return -EBUSY;
829	}
830
831	dma_resv_add_shared_fence(bo->base.resv, fence);
832
833	ret = dma_resv_reserve_shared(bo->base.resv, 1);
834	if (unlikely(ret)) {
835		dma_fence_put(fence);
836		return ret;
837	}
838
839	dma_fence_put(bo->moving);
840	bo->moving = fence;
841	return 0;
842}
843
844/**
845 * Repeatedly evict memory from the LRU for @mem_type until we create enough
846 * space, or we've evicted everything and there isn't enough space.
847 */
848static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
849				  const struct ttm_place *place,
850				  struct ttm_resource *mem,
851				  struct ttm_operation_ctx *ctx)
852{
853	struct ttm_bo_device *bdev = bo->bdev;
854	struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
855	struct ww_acquire_ctx *ticket;
856	int ret;
857
858	ticket = dma_resv_locking_ctx(bo->base.resv);
859	do {
860		ret = ttm_resource_alloc(bo, place, mem);
861		if (likely(!ret))
862			break;
863		if (unlikely(ret != -ENOSPC))
864			return ret;
865		ret = ttm_mem_evict_first(bdev, man, place, ctx,
866					  ticket);
867		if (unlikely(ret != 0))
868			return ret;
869	} while (1);
870
871	return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
872}
873
874static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man,
875				      uint32_t cur_placement,
876				      uint32_t proposed_placement)
877{
878	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
879	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
880
881	/**
882	 * Keep current caching if possible.
883	 */
884
885	if ((cur_placement & caching) != 0)
886		result |= (cur_placement & caching);
887	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
888		result |= TTM_PL_FLAG_CACHED;
889	else if ((TTM_PL_FLAG_WC & caching) != 0)
890		result |= TTM_PL_FLAG_WC;
891	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
892		result |= TTM_PL_FLAG_UNCACHED;
893
894	return result;
895}
896
897/**
898 * ttm_bo_mem_placement - check if placement is compatible
899 * @bo: BO to find memory for
900 * @place: where to search
901 * @mem: the memory object to fill in
902 * @ctx: operation context
903 *
904 * Check if placement is compatible and fill in mem structure.
905 * Returns -EBUSY if placement won't work or negative error code.
906 * 0 when placement can be used.
907 */
908static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
909				const struct ttm_place *place,
910				struct ttm_resource *mem,
911				struct ttm_operation_ctx *ctx)
912{
913	struct ttm_bo_device *bdev = bo->bdev;
914	struct ttm_resource_manager *man;
915	uint32_t cur_flags = 0;
916
917	man = ttm_manager_type(bdev, place->mem_type);
918	if (!man || !ttm_resource_manager_used(man))
919		return -EBUSY;
920
921	cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
922					  place->flags);
923	cur_flags |= place->flags & ~TTM_PL_MASK_CACHING;
924
925	mem->mem_type = place->mem_type;
926	mem->placement = cur_flags;
927
928	spin_lock(&ttm_bo_glob.lru_lock);
929	ttm_bo_del_from_lru(bo);
930	ttm_bo_add_mem_to_lru(bo, mem);
931	spin_unlock(&ttm_bo_glob.lru_lock);
932
933	return 0;
934}
935
936/**
937 * Creates space for memory region @mem according to its type.
938 *
939 * This function first searches for free space in compatible memory types in
940 * the priority order defined by the driver.  If free space isn't found, then
941 * ttm_bo_mem_force_space is attempted in priority order to evict and find
942 * space.
943 */
944int ttm_bo_mem_space(struct ttm_buffer_object *bo,
945			struct ttm_placement *placement,
946			struct ttm_resource *mem,
947			struct ttm_operation_ctx *ctx)
948{
949	struct ttm_bo_device *bdev = bo->bdev;
950	bool type_found = false;
951	int i, ret;
952
953	ret = dma_resv_reserve_shared(bo->base.resv, 1);
954	if (unlikely(ret))
955		return ret;
956
957	for (i = 0; i < placement->num_placement; ++i) {
958		const struct ttm_place *place = &placement->placement[i];
959		struct ttm_resource_manager *man;
960
961		ret = ttm_bo_mem_placement(bo, place, mem, ctx);
962		if (ret)
963			continue;
964
965		type_found = true;
966		ret = ttm_resource_alloc(bo, place, mem);
967		if (ret == -ENOSPC)
968			continue;
969		if (unlikely(ret))
970			goto error;
971
972		man = ttm_manager_type(bdev, mem->mem_type);
973		ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
974		if (unlikely(ret)) {
975			ttm_resource_free(bo, mem);
976			if (ret == -EBUSY)
977				continue;
978
979			goto error;
980		}
981		return 0;
982	}
983
984	for (i = 0; i < placement->num_busy_placement; ++i) {
985		const struct ttm_place *place = &placement->busy_placement[i];
986
987		ret = ttm_bo_mem_placement(bo, place, mem, ctx);
988		if (ret)
989			continue;
990
991		type_found = true;
992		ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
993		if (likely(!ret))
994			return 0;
995
996		if (ret && ret != -EBUSY)
997			goto error;
998	}
999
1000	ret = -ENOMEM;
1001	if (!type_found) {
1002		pr_err(TTM_PFX "No compatible memory type found\n");
1003		ret = -EINVAL;
1004	}
1005
1006error:
1007	if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
1008		ttm_bo_move_to_lru_tail_unlocked(bo);
1009	}
1010
1011	return ret;
1012}
1013EXPORT_SYMBOL(ttm_bo_mem_space);
1014
1015static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1016			      struct ttm_placement *placement,
1017			      struct ttm_operation_ctx *ctx)
1018{
1019	int ret = 0;
1020	struct ttm_resource mem;
1021
1022	dma_resv_assert_held(bo->base.resv);
1023
1024	mem.num_pages = bo->num_pages;
1025	mem.size = mem.num_pages << PAGE_SHIFT;
1026	mem.page_alignment = bo->mem.page_alignment;
1027	mem.bus.offset = 0;
1028	mem.bus.addr = NULL;
1029	mem.mm_node = NULL;
1030
1031	/*
1032	 * Determine where to move the buffer.
1033	 */
1034	ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1035	if (ret)
1036		goto out_unlock;
1037	ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
1038out_unlock:
1039	if (ret)
1040		ttm_resource_free(bo, &mem);
1041	return ret;
1042}
1043
1044static bool ttm_bo_places_compat(const struct ttm_place *places,
1045				 unsigned num_placement,
1046				 struct ttm_resource *mem,
1047				 uint32_t *new_flags)
1048{
1049	unsigned i;
1050
1051	for (i = 0; i < num_placement; i++) {
1052		const struct ttm_place *heap = &places[i];
1053
1054		if ((mem->start < heap->fpfn ||
1055		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1056			continue;
1057
1058		*new_flags = heap->flags;
1059		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1060		    (mem->mem_type == heap->mem_type) &&
1061		    (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1062		     (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1063			return true;
1064	}
1065	return false;
1066}
1067
1068bool ttm_bo_mem_compat(struct ttm_placement *placement,
1069		       struct ttm_resource *mem,
1070		       uint32_t *new_flags)
1071{
1072	if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1073				 mem, new_flags))
1074		return true;
1075
1076	if ((placement->busy_placement != placement->placement ||
1077	     placement->num_busy_placement > placement->num_placement) &&
1078	    ttm_bo_places_compat(placement->busy_placement,
1079				 placement->num_busy_placement,
1080				 mem, new_flags))
1081		return true;
1082
1083	return false;
1084}
1085EXPORT_SYMBOL(ttm_bo_mem_compat);
1086
1087int ttm_bo_validate(struct ttm_buffer_object *bo,
1088		    struct ttm_placement *placement,
1089		    struct ttm_operation_ctx *ctx)
1090{
1091	int ret;
1092	uint32_t new_flags;
1093
1094	dma_resv_assert_held(bo->base.resv);
1095
1096	/*
1097	 * Remove the backing store if no placement is given.
1098	 */
1099	if (!placement->num_placement && !placement->num_busy_placement) {
1100		ret = ttm_bo_pipeline_gutting(bo);
1101		if (ret)
1102			return ret;
1103
1104		return ttm_tt_create(bo, false);
1105	}
1106
1107	/*
1108	 * Check whether we need to move buffer.
1109	 */
1110	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1111		ret = ttm_bo_move_buffer(bo, placement, ctx);
1112		if (ret)
1113			return ret;
1114	} else {
1115		bo->mem.placement &= TTM_PL_MASK_CACHING;
1116		bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING;
1117	}
1118	/*
1119	 * We might need to add a TTM.
1120	 */
1121	if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1122		ret = ttm_tt_create(bo, true);
1123		if (ret)
1124			return ret;
1125	}
1126	return 0;
1127}
1128EXPORT_SYMBOL(ttm_bo_validate);
1129
1130int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1131			 struct ttm_buffer_object *bo,
1132			 unsigned long size,
1133			 enum ttm_bo_type type,
1134			 struct ttm_placement *placement,
1135			 uint32_t page_alignment,
1136			 struct ttm_operation_ctx *ctx,
1137			 size_t acc_size,
1138			 struct sg_table *sg,
1139			 struct dma_resv *resv,
1140			 void (*destroy) (struct ttm_buffer_object *))
1141{
1142	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
1143	int ret = 0;
1144	unsigned long num_pages;
1145	bool locked;
1146
1147	ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
1148	if (ret) {
1149		pr_err("Out of kernel memory\n");
1150		if (destroy)
1151			(*destroy)(bo);
1152		else
1153			kfree(bo);
1154		return -ENOMEM;
1155	}
1156
1157	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1158	if (num_pages == 0) {
1159		pr_err("Illegal buffer object size\n");
1160		if (destroy)
1161			(*destroy)(bo);
1162		else
1163			kfree(bo);
1164		ttm_mem_global_free(mem_glob, acc_size);
1165		return -EINVAL;
1166	}
1167	bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1168
1169	kref_init(&bo->kref);
1170	INIT_LIST_HEAD(&bo->lru);
1171	INIT_LIST_HEAD(&bo->ddestroy);
1172	INIT_LIST_HEAD(&bo->swap);
1173	bo->bdev = bdev;
1174	bo->type = type;
1175	bo->num_pages = num_pages;
1176	bo->mem.size = num_pages << PAGE_SHIFT;
1177	bo->mem.mem_type = TTM_PL_SYSTEM;
1178	bo->mem.num_pages = bo->num_pages;
1179	bo->mem.mm_node = NULL;
1180	bo->mem.page_alignment = page_alignment;
1181	bo->mem.bus.offset = 0;
1182	bo->mem.bus.addr = NULL;
1183	bo->moving = NULL;
1184	bo->mem.placement = TTM_PL_FLAG_CACHED;
1185	bo->acc_size = acc_size;
1186	bo->pin_count = 0;
1187	bo->sg = sg;
1188	if (resv) {
1189		bo->base.resv = resv;
1190		dma_resv_assert_held(bo->base.resv);
1191	} else {
1192		bo->base.resv = &bo->base._resv;
1193	}
1194	if (!ttm_bo_uses_embedded_gem_object(bo)) {
1195		/*
1196		 * bo.gem is not initialized, so we have to setup the
1197		 * struct elements we want use regardless.
1198		 */
1199		dma_resv_init(&bo->base._resv);
1200		drm_vma_node_reset(&bo->base.vma_node);
1201	}
1202	atomic_inc(&ttm_bo_glob.bo_count);
1203
1204	/*
1205	 * For ttm_bo_type_device buffers, allocate
1206	 * address space from the device.
1207	 */
1208	if (bo->type == ttm_bo_type_device ||
1209	    bo->type == ttm_bo_type_sg)
1210		ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
1211					 bo->mem.num_pages);
1212
1213	/* passed reservation objects should already be locked,
1214	 * since otherwise lockdep will be angered in radeon.
1215	 */
1216	if (!resv) {
1217		locked = dma_resv_trylock(bo->base.resv);
1218		WARN_ON(!locked);
1219	}
1220
1221	if (likely(!ret))
1222		ret = ttm_bo_validate(bo, placement, ctx);
1223
1224	if (unlikely(ret)) {
1225		if (!resv)
1226			ttm_bo_unreserve(bo);
1227
1228		ttm_bo_put(bo);
1229		return ret;
1230	}
1231
1232	ttm_bo_move_to_lru_tail_unlocked(bo);
1233
1234	return ret;
1235}
1236EXPORT_SYMBOL(ttm_bo_init_reserved);
1237
1238int ttm_bo_init(struct ttm_bo_device *bdev,
1239		struct ttm_buffer_object *bo,
1240		unsigned long size,
1241		enum ttm_bo_type type,
1242		struct ttm_placement *placement,
1243		uint32_t page_alignment,
1244		bool interruptible,
1245		size_t acc_size,
1246		struct sg_table *sg,
1247		struct dma_resv *resv,
1248		void (*destroy) (struct ttm_buffer_object *))
1249{
1250	struct ttm_operation_ctx ctx = { interruptible, false };
1251	int ret;
1252
1253	ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1254				   page_alignment, &ctx, acc_size,
1255				   sg, resv, destroy);
1256	if (ret)
1257		return ret;
1258
1259	if (!resv)
1260		ttm_bo_unreserve(bo);
1261
1262	return 0;
1263}
1264EXPORT_SYMBOL(ttm_bo_init);
1265
1266static size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1267			      unsigned long bo_size,
1268			      unsigned struct_size)
1269{
1270	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1271	size_t size = 0;
1272
1273	size += ttm_round_pot(struct_size);
1274	size += ttm_round_pot(npages * sizeof(void *));
1275	size += ttm_round_pot(sizeof(struct ttm_tt));
1276	return size;
1277}
1278
1279size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1280			   unsigned long bo_size,
1281			   unsigned struct_size)
1282{
1283	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1284	size_t size = 0;
1285
1286	size += ttm_round_pot(struct_size);
1287	size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1288	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1289	return size;
1290}
1291EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1292
1293int ttm_bo_create(struct ttm_bo_device *bdev,
1294			unsigned long size,
1295			enum ttm_bo_type type,
1296			struct ttm_placement *placement,
1297			uint32_t page_alignment,
1298			bool interruptible,
1299			struct ttm_buffer_object **p_bo)
1300{
1301	struct ttm_buffer_object *bo;
1302	size_t acc_size;
1303	int ret;
1304
1305	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1306	if (unlikely(bo == NULL))
1307		return -ENOMEM;
1308
1309	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1310	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1311			  interruptible, acc_size,
1312			  NULL, NULL, NULL);
1313	if (likely(ret == 0))
1314		*p_bo = bo;
1315
1316	return ret;
1317}
1318EXPORT_SYMBOL(ttm_bo_create);
1319
1320int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1321{
1322	struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type);
1323
1324	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1325		pr_err("Illegal memory manager memory type %u\n", mem_type);
1326		return -EINVAL;
1327	}
1328
1329	if (!man) {
1330		pr_err("Memory type %u has not been initialized\n", mem_type);
1331		return 0;
1332	}
1333
1334	return ttm_resource_manager_force_list_clean(bdev, man);
1335}
1336EXPORT_SYMBOL(ttm_bo_evict_mm);
1337
1338static void ttm_bo_global_kobj_release(struct kobject *kobj)
1339{
1340	struct ttm_bo_global *glob =
1341		container_of(kobj, struct ttm_bo_global, kobj);
1342
1343	__free_page(glob->dummy_read_page);
1344}
1345
1346static void ttm_bo_global_release(void)
1347{
1348	struct ttm_bo_global *glob = &ttm_bo_glob;
1349
1350	mutex_lock(&ttm_global_mutex);
1351	if (--ttm_bo_glob_use_count > 0)
1352		goto out;
1353
1354	kobject_del(&glob->kobj);
1355	kobject_put(&glob->kobj);
1356	ttm_mem_global_release(&ttm_mem_glob);
1357	memset(glob, 0, sizeof(*glob));
1358out:
1359	mutex_unlock(&ttm_global_mutex);
1360}
1361
1362static int ttm_bo_global_init(void)
1363{
1364	struct ttm_bo_global *glob = &ttm_bo_glob;
1365	int ret = 0;
1366	unsigned i;
1367
1368	mutex_lock(&ttm_global_mutex);
1369	if (++ttm_bo_glob_use_count > 1)
1370		goto out;
1371
1372	ret = ttm_mem_global_init(&ttm_mem_glob);
1373	if (ret)
1374		goto out;
1375
1376	spin_lock_init(&glob->lru_lock);
1377	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1378
1379	if (unlikely(glob->dummy_read_page == NULL)) {
1380		ret = -ENOMEM;
1381		goto out;
1382	}
1383
1384	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1385		INIT_LIST_HEAD(&glob->swap_lru[i]);
1386	INIT_LIST_HEAD(&glob->device_list);
1387	atomic_set(&glob->bo_count, 0);
1388
1389	ret = kobject_init_and_add(
1390		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1391	if (unlikely(ret != 0))
1392		kobject_put(&glob->kobj);
1393out:
1394	mutex_unlock(&ttm_global_mutex);
1395	return ret;
1396}
1397
1398int ttm_bo_device_release(struct ttm_bo_device *bdev)
1399{
1400	struct ttm_bo_global *glob = &ttm_bo_glob;
1401	int ret = 0;
1402	unsigned i;
1403	struct ttm_resource_manager *man;
1404
1405	man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
1406	ttm_resource_manager_set_used(man, false);
1407	ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
1408
1409	mutex_lock(&ttm_global_mutex);
1410	list_del(&bdev->device_list);
1411	mutex_unlock(&ttm_global_mutex);
1412
1413	cancel_delayed_work_sync(&bdev->wq);
1414
1415	if (ttm_bo_delayed_delete(bdev, true))
1416		pr_debug("Delayed destroy list was clean\n");
1417
1418	spin_lock(&glob->lru_lock);
1419	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1420		if (list_empty(&man->lru[0]))
1421			pr_debug("Swap list %d was clean\n", i);
1422	spin_unlock(&glob->lru_lock);
1423
1424	if (!ret)
1425		ttm_bo_global_release();
1426
1427	return ret;
1428}
1429EXPORT_SYMBOL(ttm_bo_device_release);
1430
1431static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
1432{
1433	struct ttm_resource_manager *man = &bdev->sysman;
1434
1435	/*
1436	 * Initialize the system memory buffer type.
1437	 * Other types need to be driver / IOCTL initialized.
1438	 */
1439	man->use_tt = true;
1440
1441	ttm_resource_manager_init(man, 0);
1442	ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
1443	ttm_resource_manager_set_used(man, true);
1444}
1445
1446int ttm_bo_device_init(struct ttm_bo_device *bdev,
1447		       struct ttm_bo_driver *driver,
1448		       struct address_space *mapping,
1449		       struct drm_vma_offset_manager *vma_manager,
1450		       bool need_dma32)
1451{
1452	struct ttm_bo_global *glob = &ttm_bo_glob;
1453	int ret;
1454
1455	if (WARN_ON(vma_manager == NULL))
1456		return -EINVAL;
1457
1458	ret = ttm_bo_global_init();
1459	if (ret)
1460		return ret;
1461
1462	bdev->driver = driver;
1463
1464	ttm_bo_init_sysman(bdev);
1465
1466	bdev->vma_manager = vma_manager;
1467	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1468	INIT_LIST_HEAD(&bdev->ddestroy);
1469	bdev->dev_mapping = mapping;
1470	bdev->need_dma32 = need_dma32;
1471	mutex_lock(&ttm_global_mutex);
1472	list_add_tail(&bdev->device_list, &glob->device_list);
1473	mutex_unlock(&ttm_global_mutex);
1474
1475	return 0;
1476}
1477EXPORT_SYMBOL(ttm_bo_device_init);
1478
1479/*
1480 * buffer object vm functions.
1481 */
1482
1483void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1484{
1485	struct ttm_bo_device *bdev = bo->bdev;
1486
1487	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1488	ttm_mem_io_free(bdev, &bo->mem);
1489}
1490EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1491
1492int ttm_bo_wait(struct ttm_buffer_object *bo,
1493		bool interruptible, bool no_wait)
1494{
1495	long timeout = 15 * HZ;
1496
1497	if (no_wait) {
1498		if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1499			return 0;
1500		else
1501			return -EBUSY;
1502	}
1503
1504	timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1505						      interruptible, timeout);
1506	if (timeout < 0)
1507		return timeout;
1508
1509	if (timeout == 0)
1510		return -EBUSY;
1511
1512	dma_resv_add_excl_fence(bo->base.resv, NULL);
1513	return 0;
1514}
1515EXPORT_SYMBOL(ttm_bo_wait);
1516
1517/**
1518 * A buffer object shrink method that tries to swap out the first
1519 * buffer object on the bo_global::swap_lru list.
1520 */
1521int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
1522{
1523	struct ttm_buffer_object *bo;
1524	int ret = -EBUSY;
1525	bool locked;
1526	unsigned i;
1527
1528	spin_lock(&glob->lru_lock);
1529	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1530		list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1531			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
1532							    NULL))
1533				continue;
1534
1535			if (!ttm_bo_get_unless_zero(bo)) {
1536				if (locked)
1537					dma_resv_unlock(bo->base.resv);
1538				continue;
1539			}
1540
1541			ret = 0;
1542			break;
1543		}
1544		if (!ret)
1545			break;
1546	}
1547
1548	if (ret) {
1549		spin_unlock(&glob->lru_lock);
1550		return ret;
1551	}
1552
1553	if (bo->deleted) {
1554		ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1555		ttm_bo_put(bo);
1556		return ret;
1557	}
1558
1559	ttm_bo_del_from_lru(bo);
1560	spin_unlock(&glob->lru_lock);
1561
1562	/**
1563	 * Move to system cached
1564	 */
1565
1566	if (bo->mem.mem_type != TTM_PL_SYSTEM ||
1567	    bo->ttm->caching_state != tt_cached) {
1568		struct ttm_operation_ctx ctx = { false, false };
1569		struct ttm_resource evict_mem;
1570
1571		evict_mem = bo->mem;
1572		evict_mem.mm_node = NULL;
1573		evict_mem.placement = TTM_PL_FLAG_CACHED;
1574		evict_mem.mem_type = TTM_PL_SYSTEM;
1575
1576		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
1577		if (unlikely(ret != 0))
1578			goto out;
1579	}
1580
1581	/**
1582	 * Make sure BO is idle.
1583	 */
1584
1585	ret = ttm_bo_wait(bo, false, false);
1586	if (unlikely(ret != 0))
1587		goto out;
1588
1589	ttm_bo_unmap_virtual(bo);
1590
1591	/**
1592	 * Swap out. Buffer will be swapped in again as soon as
1593	 * anyone tries to access a ttm page.
1594	 */
1595
1596	if (bo->bdev->driver->swap_notify)
1597		bo->bdev->driver->swap_notify(bo);
1598
1599	ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage);
1600out:
1601
1602	/**
1603	 *
1604	 * Unreserve without putting on LRU to avoid swapping out an
1605	 * already swapped buffer.
1606	 */
1607	if (locked)
1608		dma_resv_unlock(bo->base.resv);
1609	ttm_bo_put(bo);
1610	return ret;
1611}
1612EXPORT_SYMBOL(ttm_bo_swapout);
1613
1614void ttm_bo_swapout_all(void)
1615{
1616	struct ttm_operation_ctx ctx = {
1617		.interruptible = false,
1618		.no_wait_gpu = false
1619	};
1620
1621	while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
1622}
1623EXPORT_SYMBOL(ttm_bo_swapout_all);
1624
1625void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1626{
1627	if (bo->ttm == NULL)
1628		return;
1629
1630	ttm_tt_destroy(bo->bdev, bo->ttm);
1631	bo->ttm = NULL;
1632}
1633
1634int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem)
1635{
1636	return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem);
1637}
1638
1639void ttm_bo_tt_unbind(struct ttm_buffer_object *bo)
1640{
1641	bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm);
1642}
1643