162306a36Sopenharmony_ci/*
262306a36Sopenharmony_ci * Copyright © 2016 Intel Corporation
362306a36Sopenharmony_ci *
462306a36Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a
562306a36Sopenharmony_ci * copy of this software and associated documentation files (the "Software"),
662306a36Sopenharmony_ci * to deal in the Software without restriction, including without limitation
762306a36Sopenharmony_ci * the rights to use, copy, modify, merge, publish, distribute, sublicense,
862306a36Sopenharmony_ci * and/or sell copies of the Software, and to permit persons to whom the
962306a36Sopenharmony_ci * Software is furnished to do so, subject to the following conditions:
1062306a36Sopenharmony_ci *
1162306a36Sopenharmony_ci * The above copyright notice and this permission notice (including the next
1262306a36Sopenharmony_ci * paragraph) shall be included in all copies or substantial portions of the
1362306a36Sopenharmony_ci * Software.
1462306a36Sopenharmony_ci *
1562306a36Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1662306a36Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1762306a36Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1862306a36Sopenharmony_ci * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1962306a36Sopenharmony_ci * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2062306a36Sopenharmony_ci * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
2162306a36Sopenharmony_ci * IN THE SOFTWARE.
2262306a36Sopenharmony_ci *
2362306a36Sopenharmony_ci */
2462306a36Sopenharmony_ci
2562306a36Sopenharmony_ci#include <linux/sched/mm.h>
2662306a36Sopenharmony_ci#include <linux/dma-fence-array.h>
2762306a36Sopenharmony_ci#include <drm/drm_gem.h>
2862306a36Sopenharmony_ci
2962306a36Sopenharmony_ci#include "display/intel_display.h"
3062306a36Sopenharmony_ci#include "display/intel_frontbuffer.h"
3162306a36Sopenharmony_ci#include "gem/i915_gem_lmem.h"
3262306a36Sopenharmony_ci#include "gem/i915_gem_tiling.h"
3362306a36Sopenharmony_ci#include "gt/intel_engine.h"
3462306a36Sopenharmony_ci#include "gt/intel_engine_heartbeat.h"
3562306a36Sopenharmony_ci#include "gt/intel_gt.h"
3662306a36Sopenharmony_ci#include "gt/intel_gt_requests.h"
3762306a36Sopenharmony_ci#include "gt/intel_tlb.h"
3862306a36Sopenharmony_ci
3962306a36Sopenharmony_ci#include "i915_drv.h"
4062306a36Sopenharmony_ci#include "i915_gem_evict.h"
4162306a36Sopenharmony_ci#include "i915_sw_fence_work.h"
4262306a36Sopenharmony_ci#include "i915_trace.h"
4362306a36Sopenharmony_ci#include "i915_vma.h"
4462306a36Sopenharmony_ci#include "i915_vma_resource.h"
4562306a36Sopenharmony_ci
4662306a36Sopenharmony_cistatic inline void assert_vma_held_evict(const struct i915_vma *vma)
4762306a36Sopenharmony_ci{
4862306a36Sopenharmony_ci	/*
4962306a36Sopenharmony_ci	 * We may be forced to unbind when the vm is dead, to clean it up.
5062306a36Sopenharmony_ci	 * This is the only exception to the requirement of the object lock
5162306a36Sopenharmony_ci	 * being held.
5262306a36Sopenharmony_ci	 */
5362306a36Sopenharmony_ci	if (kref_read(&vma->vm->ref))
5462306a36Sopenharmony_ci		assert_object_held_shared(vma->obj);
5562306a36Sopenharmony_ci}
5662306a36Sopenharmony_ci
5762306a36Sopenharmony_cistatic struct kmem_cache *slab_vmas;
5862306a36Sopenharmony_ci
5962306a36Sopenharmony_cistatic struct i915_vma *i915_vma_alloc(void)
6062306a36Sopenharmony_ci{
6162306a36Sopenharmony_ci	return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
6262306a36Sopenharmony_ci}
6362306a36Sopenharmony_ci
6462306a36Sopenharmony_cistatic void i915_vma_free(struct i915_vma *vma)
6562306a36Sopenharmony_ci{
6662306a36Sopenharmony_ci	return kmem_cache_free(slab_vmas, vma);
6762306a36Sopenharmony_ci}
6862306a36Sopenharmony_ci
6962306a36Sopenharmony_ci#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
7062306a36Sopenharmony_ci
7162306a36Sopenharmony_ci#include <linux/stackdepot.h>
7262306a36Sopenharmony_ci
7362306a36Sopenharmony_cistatic void vma_print_allocator(struct i915_vma *vma, const char *reason)
7462306a36Sopenharmony_ci{
7562306a36Sopenharmony_ci	char buf[512];
7662306a36Sopenharmony_ci
7762306a36Sopenharmony_ci	if (!vma->node.stack) {
7862306a36Sopenharmony_ci		drm_dbg(vma->obj->base.dev,
7962306a36Sopenharmony_ci			"vma.node [%08llx + %08llx] %s: unknown owner\n",
8062306a36Sopenharmony_ci			vma->node.start, vma->node.size, reason);
8162306a36Sopenharmony_ci		return;
8262306a36Sopenharmony_ci	}
8362306a36Sopenharmony_ci
8462306a36Sopenharmony_ci	stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
8562306a36Sopenharmony_ci	drm_dbg(vma->obj->base.dev,
8662306a36Sopenharmony_ci		"vma.node [%08llx + %08llx] %s: inserted at %s\n",
8762306a36Sopenharmony_ci		vma->node.start, vma->node.size, reason, buf);
8862306a36Sopenharmony_ci}
8962306a36Sopenharmony_ci
9062306a36Sopenharmony_ci#else
9162306a36Sopenharmony_ci
9262306a36Sopenharmony_cistatic void vma_print_allocator(struct i915_vma *vma, const char *reason)
9362306a36Sopenharmony_ci{
9462306a36Sopenharmony_ci}
9562306a36Sopenharmony_ci
9662306a36Sopenharmony_ci#endif
9762306a36Sopenharmony_ci
9862306a36Sopenharmony_cistatic inline struct i915_vma *active_to_vma(struct i915_active *ref)
9962306a36Sopenharmony_ci{
10062306a36Sopenharmony_ci	return container_of(ref, typeof(struct i915_vma), active);
10162306a36Sopenharmony_ci}
10262306a36Sopenharmony_ci
10362306a36Sopenharmony_cistatic int __i915_vma_active(struct i915_active *ref)
10462306a36Sopenharmony_ci{
10562306a36Sopenharmony_ci	return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
10662306a36Sopenharmony_ci}
10762306a36Sopenharmony_ci
10862306a36Sopenharmony_cistatic void __i915_vma_retire(struct i915_active *ref)
10962306a36Sopenharmony_ci{
11062306a36Sopenharmony_ci	i915_vma_put(active_to_vma(ref));
11162306a36Sopenharmony_ci}
11262306a36Sopenharmony_ci
11362306a36Sopenharmony_cistatic struct i915_vma *
11462306a36Sopenharmony_civma_create(struct drm_i915_gem_object *obj,
11562306a36Sopenharmony_ci	   struct i915_address_space *vm,
11662306a36Sopenharmony_ci	   const struct i915_gtt_view *view)
11762306a36Sopenharmony_ci{
11862306a36Sopenharmony_ci	struct i915_vma *pos = ERR_PTR(-E2BIG);
11962306a36Sopenharmony_ci	struct i915_vma *vma;
12062306a36Sopenharmony_ci	struct rb_node *rb, **p;
12162306a36Sopenharmony_ci	int err;
12262306a36Sopenharmony_ci
12362306a36Sopenharmony_ci	/* The aliasing_ppgtt should never be used directly! */
12462306a36Sopenharmony_ci	GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
12562306a36Sopenharmony_ci
12662306a36Sopenharmony_ci	vma = i915_vma_alloc();
12762306a36Sopenharmony_ci	if (vma == NULL)
12862306a36Sopenharmony_ci		return ERR_PTR(-ENOMEM);
12962306a36Sopenharmony_ci
13062306a36Sopenharmony_ci	vma->ops = &vm->vma_ops;
13162306a36Sopenharmony_ci	vma->obj = obj;
13262306a36Sopenharmony_ci	vma->size = obj->base.size;
13362306a36Sopenharmony_ci	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
13462306a36Sopenharmony_ci
13562306a36Sopenharmony_ci	i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
13662306a36Sopenharmony_ci
13762306a36Sopenharmony_ci	/* Declare ourselves safe for use inside shrinkers */
13862306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_LOCKDEP)) {
13962306a36Sopenharmony_ci		fs_reclaim_acquire(GFP_KERNEL);
14062306a36Sopenharmony_ci		might_lock(&vma->active.mutex);
14162306a36Sopenharmony_ci		fs_reclaim_release(GFP_KERNEL);
14262306a36Sopenharmony_ci	}
14362306a36Sopenharmony_ci
14462306a36Sopenharmony_ci	INIT_LIST_HEAD(&vma->closed_link);
14562306a36Sopenharmony_ci	INIT_LIST_HEAD(&vma->obj_link);
14662306a36Sopenharmony_ci	RB_CLEAR_NODE(&vma->obj_node);
14762306a36Sopenharmony_ci
14862306a36Sopenharmony_ci	if (view && view->type != I915_GTT_VIEW_NORMAL) {
14962306a36Sopenharmony_ci		vma->gtt_view = *view;
15062306a36Sopenharmony_ci		if (view->type == I915_GTT_VIEW_PARTIAL) {
15162306a36Sopenharmony_ci			GEM_BUG_ON(range_overflows_t(u64,
15262306a36Sopenharmony_ci						     view->partial.offset,
15362306a36Sopenharmony_ci						     view->partial.size,
15462306a36Sopenharmony_ci						     obj->base.size >> PAGE_SHIFT));
15562306a36Sopenharmony_ci			vma->size = view->partial.size;
15662306a36Sopenharmony_ci			vma->size <<= PAGE_SHIFT;
15762306a36Sopenharmony_ci			GEM_BUG_ON(vma->size > obj->base.size);
15862306a36Sopenharmony_ci		} else if (view->type == I915_GTT_VIEW_ROTATED) {
15962306a36Sopenharmony_ci			vma->size = intel_rotation_info_size(&view->rotated);
16062306a36Sopenharmony_ci			vma->size <<= PAGE_SHIFT;
16162306a36Sopenharmony_ci		} else if (view->type == I915_GTT_VIEW_REMAPPED) {
16262306a36Sopenharmony_ci			vma->size = intel_remapped_info_size(&view->remapped);
16362306a36Sopenharmony_ci			vma->size <<= PAGE_SHIFT;
16462306a36Sopenharmony_ci		}
16562306a36Sopenharmony_ci	}
16662306a36Sopenharmony_ci
16762306a36Sopenharmony_ci	if (unlikely(vma->size > vm->total))
16862306a36Sopenharmony_ci		goto err_vma;
16962306a36Sopenharmony_ci
17062306a36Sopenharmony_ci	GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
17162306a36Sopenharmony_ci
17262306a36Sopenharmony_ci	err = mutex_lock_interruptible(&vm->mutex);
17362306a36Sopenharmony_ci	if (err) {
17462306a36Sopenharmony_ci		pos = ERR_PTR(err);
17562306a36Sopenharmony_ci		goto err_vma;
17662306a36Sopenharmony_ci	}
17762306a36Sopenharmony_ci
17862306a36Sopenharmony_ci	vma->vm = vm;
17962306a36Sopenharmony_ci	list_add_tail(&vma->vm_link, &vm->unbound_list);
18062306a36Sopenharmony_ci
18162306a36Sopenharmony_ci	spin_lock(&obj->vma.lock);
18262306a36Sopenharmony_ci	if (i915_is_ggtt(vm)) {
18362306a36Sopenharmony_ci		if (unlikely(overflows_type(vma->size, u32)))
18462306a36Sopenharmony_ci			goto err_unlock;
18562306a36Sopenharmony_ci
18662306a36Sopenharmony_ci		vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
18762306a36Sopenharmony_ci						      i915_gem_object_get_tiling(obj),
18862306a36Sopenharmony_ci						      i915_gem_object_get_stride(obj));
18962306a36Sopenharmony_ci		if (unlikely(vma->fence_size < vma->size || /* overflow */
19062306a36Sopenharmony_ci			     vma->fence_size > vm->total))
19162306a36Sopenharmony_ci			goto err_unlock;
19262306a36Sopenharmony_ci
19362306a36Sopenharmony_ci		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
19462306a36Sopenharmony_ci
19562306a36Sopenharmony_ci		vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
19662306a36Sopenharmony_ci								i915_gem_object_get_tiling(obj),
19762306a36Sopenharmony_ci								i915_gem_object_get_stride(obj));
19862306a36Sopenharmony_ci		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
19962306a36Sopenharmony_ci
20062306a36Sopenharmony_ci		__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
20162306a36Sopenharmony_ci	}
20262306a36Sopenharmony_ci
20362306a36Sopenharmony_ci	rb = NULL;
20462306a36Sopenharmony_ci	p = &obj->vma.tree.rb_node;
20562306a36Sopenharmony_ci	while (*p) {
20662306a36Sopenharmony_ci		long cmp;
20762306a36Sopenharmony_ci
20862306a36Sopenharmony_ci		rb = *p;
20962306a36Sopenharmony_ci		pos = rb_entry(rb, struct i915_vma, obj_node);
21062306a36Sopenharmony_ci
21162306a36Sopenharmony_ci		/*
21262306a36Sopenharmony_ci		 * If the view already exists in the tree, another thread
21362306a36Sopenharmony_ci		 * already created a matching vma, so return the older instance
21462306a36Sopenharmony_ci		 * and dispose of ours.
21562306a36Sopenharmony_ci		 */
21662306a36Sopenharmony_ci		cmp = i915_vma_compare(pos, vm, view);
21762306a36Sopenharmony_ci		if (cmp < 0)
21862306a36Sopenharmony_ci			p = &rb->rb_right;
21962306a36Sopenharmony_ci		else if (cmp > 0)
22062306a36Sopenharmony_ci			p = &rb->rb_left;
22162306a36Sopenharmony_ci		else
22262306a36Sopenharmony_ci			goto err_unlock;
22362306a36Sopenharmony_ci	}
22462306a36Sopenharmony_ci	rb_link_node(&vma->obj_node, rb, p);
22562306a36Sopenharmony_ci	rb_insert_color(&vma->obj_node, &obj->vma.tree);
22662306a36Sopenharmony_ci
22762306a36Sopenharmony_ci	if (i915_vma_is_ggtt(vma))
22862306a36Sopenharmony_ci		/*
22962306a36Sopenharmony_ci		 * We put the GGTT vma at the start of the vma-list, followed
23062306a36Sopenharmony_ci		 * by the ppGGTT vma. This allows us to break early when
23162306a36Sopenharmony_ci		 * iterating over only the GGTT vma for an object, see
23262306a36Sopenharmony_ci		 * for_each_ggtt_vma()
23362306a36Sopenharmony_ci		 */
23462306a36Sopenharmony_ci		list_add(&vma->obj_link, &obj->vma.list);
23562306a36Sopenharmony_ci	else
23662306a36Sopenharmony_ci		list_add_tail(&vma->obj_link, &obj->vma.list);
23762306a36Sopenharmony_ci
23862306a36Sopenharmony_ci	spin_unlock(&obj->vma.lock);
23962306a36Sopenharmony_ci	mutex_unlock(&vm->mutex);
24062306a36Sopenharmony_ci
24162306a36Sopenharmony_ci	return vma;
24262306a36Sopenharmony_ci
24362306a36Sopenharmony_cierr_unlock:
24462306a36Sopenharmony_ci	spin_unlock(&obj->vma.lock);
24562306a36Sopenharmony_ci	list_del_init(&vma->vm_link);
24662306a36Sopenharmony_ci	mutex_unlock(&vm->mutex);
24762306a36Sopenharmony_cierr_vma:
24862306a36Sopenharmony_ci	i915_vma_free(vma);
24962306a36Sopenharmony_ci	return pos;
25062306a36Sopenharmony_ci}
25162306a36Sopenharmony_ci
25262306a36Sopenharmony_cistatic struct i915_vma *
25362306a36Sopenharmony_cii915_vma_lookup(struct drm_i915_gem_object *obj,
25462306a36Sopenharmony_ci	   struct i915_address_space *vm,
25562306a36Sopenharmony_ci	   const struct i915_gtt_view *view)
25662306a36Sopenharmony_ci{
25762306a36Sopenharmony_ci	struct rb_node *rb;
25862306a36Sopenharmony_ci
25962306a36Sopenharmony_ci	rb = obj->vma.tree.rb_node;
26062306a36Sopenharmony_ci	while (rb) {
26162306a36Sopenharmony_ci		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
26262306a36Sopenharmony_ci		long cmp;
26362306a36Sopenharmony_ci
26462306a36Sopenharmony_ci		cmp = i915_vma_compare(vma, vm, view);
26562306a36Sopenharmony_ci		if (cmp == 0)
26662306a36Sopenharmony_ci			return vma;
26762306a36Sopenharmony_ci
26862306a36Sopenharmony_ci		if (cmp < 0)
26962306a36Sopenharmony_ci			rb = rb->rb_right;
27062306a36Sopenharmony_ci		else
27162306a36Sopenharmony_ci			rb = rb->rb_left;
27262306a36Sopenharmony_ci	}
27362306a36Sopenharmony_ci
27462306a36Sopenharmony_ci	return NULL;
27562306a36Sopenharmony_ci}
27662306a36Sopenharmony_ci
27762306a36Sopenharmony_ci/**
27862306a36Sopenharmony_ci * i915_vma_instance - return the singleton instance of the VMA
27962306a36Sopenharmony_ci * @obj: parent &struct drm_i915_gem_object to be mapped
28062306a36Sopenharmony_ci * @vm: address space in which the mapping is located
28162306a36Sopenharmony_ci * @view: additional mapping requirements
28262306a36Sopenharmony_ci *
28362306a36Sopenharmony_ci * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
28462306a36Sopenharmony_ci * the same @view characteristics. If a match is not found, one is created.
28562306a36Sopenharmony_ci * Once created, the VMA is kept until either the object is freed, or the
28662306a36Sopenharmony_ci * address space is closed.
28762306a36Sopenharmony_ci *
28862306a36Sopenharmony_ci * Returns the vma, or an error pointer.
28962306a36Sopenharmony_ci */
29062306a36Sopenharmony_cistruct i915_vma *
29162306a36Sopenharmony_cii915_vma_instance(struct drm_i915_gem_object *obj,
29262306a36Sopenharmony_ci		  struct i915_address_space *vm,
29362306a36Sopenharmony_ci		  const struct i915_gtt_view *view)
29462306a36Sopenharmony_ci{
29562306a36Sopenharmony_ci	struct i915_vma *vma;
29662306a36Sopenharmony_ci
29762306a36Sopenharmony_ci	GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
29862306a36Sopenharmony_ci	GEM_BUG_ON(!kref_read(&vm->ref));
29962306a36Sopenharmony_ci
30062306a36Sopenharmony_ci	spin_lock(&obj->vma.lock);
30162306a36Sopenharmony_ci	vma = i915_vma_lookup(obj, vm, view);
30262306a36Sopenharmony_ci	spin_unlock(&obj->vma.lock);
30362306a36Sopenharmony_ci
30462306a36Sopenharmony_ci	/* vma_create() will resolve the race if another creates the vma */
30562306a36Sopenharmony_ci	if (unlikely(!vma))
30662306a36Sopenharmony_ci		vma = vma_create(obj, vm, view);
30762306a36Sopenharmony_ci
30862306a36Sopenharmony_ci	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
30962306a36Sopenharmony_ci	return vma;
31062306a36Sopenharmony_ci}
31162306a36Sopenharmony_ci
31262306a36Sopenharmony_cistruct i915_vma_work {
31362306a36Sopenharmony_ci	struct dma_fence_work base;
31462306a36Sopenharmony_ci	struct i915_address_space *vm;
31562306a36Sopenharmony_ci	struct i915_vm_pt_stash stash;
31662306a36Sopenharmony_ci	struct i915_vma_resource *vma_res;
31762306a36Sopenharmony_ci	struct drm_i915_gem_object *obj;
31862306a36Sopenharmony_ci	struct i915_sw_dma_fence_cb cb;
31962306a36Sopenharmony_ci	unsigned int pat_index;
32062306a36Sopenharmony_ci	unsigned int flags;
32162306a36Sopenharmony_ci};
32262306a36Sopenharmony_ci
32362306a36Sopenharmony_cistatic void __vma_bind(struct dma_fence_work *work)
32462306a36Sopenharmony_ci{
32562306a36Sopenharmony_ci	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
32662306a36Sopenharmony_ci	struct i915_vma_resource *vma_res = vw->vma_res;
32762306a36Sopenharmony_ci
32862306a36Sopenharmony_ci	/*
32962306a36Sopenharmony_ci	 * We are about the bind the object, which must mean we have already
33062306a36Sopenharmony_ci	 * signaled the work to potentially clear/move the pages underneath. If
33162306a36Sopenharmony_ci	 * something went wrong at that stage then the object should have
33262306a36Sopenharmony_ci	 * unknown_state set, in which case we need to skip the bind.
33362306a36Sopenharmony_ci	 */
33462306a36Sopenharmony_ci	if (i915_gem_object_has_unknown_state(vw->obj))
33562306a36Sopenharmony_ci		return;
33662306a36Sopenharmony_ci
33762306a36Sopenharmony_ci	vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
33862306a36Sopenharmony_ci			       vma_res, vw->pat_index, vw->flags);
33962306a36Sopenharmony_ci}
34062306a36Sopenharmony_ci
34162306a36Sopenharmony_cistatic void __vma_release(struct dma_fence_work *work)
34262306a36Sopenharmony_ci{
34362306a36Sopenharmony_ci	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
34462306a36Sopenharmony_ci
34562306a36Sopenharmony_ci	if (vw->obj)
34662306a36Sopenharmony_ci		i915_gem_object_put(vw->obj);
34762306a36Sopenharmony_ci
34862306a36Sopenharmony_ci	i915_vm_free_pt_stash(vw->vm, &vw->stash);
34962306a36Sopenharmony_ci	if (vw->vma_res)
35062306a36Sopenharmony_ci		i915_vma_resource_put(vw->vma_res);
35162306a36Sopenharmony_ci}
35262306a36Sopenharmony_ci
35362306a36Sopenharmony_cistatic const struct dma_fence_work_ops bind_ops = {
35462306a36Sopenharmony_ci	.name = "bind",
35562306a36Sopenharmony_ci	.work = __vma_bind,
35662306a36Sopenharmony_ci	.release = __vma_release,
35762306a36Sopenharmony_ci};
35862306a36Sopenharmony_ci
35962306a36Sopenharmony_cistruct i915_vma_work *i915_vma_work(void)
36062306a36Sopenharmony_ci{
36162306a36Sopenharmony_ci	struct i915_vma_work *vw;
36262306a36Sopenharmony_ci
36362306a36Sopenharmony_ci	vw = kzalloc(sizeof(*vw), GFP_KERNEL);
36462306a36Sopenharmony_ci	if (!vw)
36562306a36Sopenharmony_ci		return NULL;
36662306a36Sopenharmony_ci
36762306a36Sopenharmony_ci	dma_fence_work_init(&vw->base, &bind_ops);
36862306a36Sopenharmony_ci	vw->base.dma.error = -EAGAIN; /* disable the worker by default */
36962306a36Sopenharmony_ci
37062306a36Sopenharmony_ci	return vw;
37162306a36Sopenharmony_ci}
37262306a36Sopenharmony_ci
37362306a36Sopenharmony_ciint i915_vma_wait_for_bind(struct i915_vma *vma)
37462306a36Sopenharmony_ci{
37562306a36Sopenharmony_ci	int err = 0;
37662306a36Sopenharmony_ci
37762306a36Sopenharmony_ci	if (rcu_access_pointer(vma->active.excl.fence)) {
37862306a36Sopenharmony_ci		struct dma_fence *fence;
37962306a36Sopenharmony_ci
38062306a36Sopenharmony_ci		rcu_read_lock();
38162306a36Sopenharmony_ci		fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
38262306a36Sopenharmony_ci		rcu_read_unlock();
38362306a36Sopenharmony_ci		if (fence) {
38462306a36Sopenharmony_ci			err = dma_fence_wait(fence, true);
38562306a36Sopenharmony_ci			dma_fence_put(fence);
38662306a36Sopenharmony_ci		}
38762306a36Sopenharmony_ci	}
38862306a36Sopenharmony_ci
38962306a36Sopenharmony_ci	return err;
39062306a36Sopenharmony_ci}
39162306a36Sopenharmony_ci
39262306a36Sopenharmony_ci#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
39362306a36Sopenharmony_cistatic int i915_vma_verify_bind_complete(struct i915_vma *vma)
39462306a36Sopenharmony_ci{
39562306a36Sopenharmony_ci	struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
39662306a36Sopenharmony_ci	int err;
39762306a36Sopenharmony_ci
39862306a36Sopenharmony_ci	if (!fence)
39962306a36Sopenharmony_ci		return 0;
40062306a36Sopenharmony_ci
40162306a36Sopenharmony_ci	if (dma_fence_is_signaled(fence))
40262306a36Sopenharmony_ci		err = fence->error;
40362306a36Sopenharmony_ci	else
40462306a36Sopenharmony_ci		err = -EBUSY;
40562306a36Sopenharmony_ci
40662306a36Sopenharmony_ci	dma_fence_put(fence);
40762306a36Sopenharmony_ci
40862306a36Sopenharmony_ci	return err;
40962306a36Sopenharmony_ci}
41062306a36Sopenharmony_ci#else
41162306a36Sopenharmony_ci#define i915_vma_verify_bind_complete(_vma) 0
41262306a36Sopenharmony_ci#endif
41362306a36Sopenharmony_ci
41462306a36Sopenharmony_ciI915_SELFTEST_EXPORT void
41562306a36Sopenharmony_cii915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
41662306a36Sopenharmony_ci				struct i915_vma *vma)
41762306a36Sopenharmony_ci{
41862306a36Sopenharmony_ci	struct drm_i915_gem_object *obj = vma->obj;
41962306a36Sopenharmony_ci
42062306a36Sopenharmony_ci	i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
42162306a36Sopenharmony_ci			       obj->mm.rsgt, i915_gem_object_is_readonly(obj),
42262306a36Sopenharmony_ci			       i915_gem_object_is_lmem(obj), obj->mm.region,
42362306a36Sopenharmony_ci			       vma->ops, vma->private, __i915_vma_offset(vma),
42462306a36Sopenharmony_ci			       __i915_vma_size(vma), vma->size, vma->guard);
42562306a36Sopenharmony_ci}
42662306a36Sopenharmony_ci
42762306a36Sopenharmony_ci/**
42862306a36Sopenharmony_ci * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
42962306a36Sopenharmony_ci * @vma: VMA to map
43062306a36Sopenharmony_ci * @pat_index: PAT index to set in PTE
43162306a36Sopenharmony_ci * @flags: flags like global or local mapping
43262306a36Sopenharmony_ci * @work: preallocated worker for allocating and binding the PTE
43362306a36Sopenharmony_ci * @vma_res: pointer to a preallocated vma resource. The resource is either
43462306a36Sopenharmony_ci * consumed or freed.
43562306a36Sopenharmony_ci *
43662306a36Sopenharmony_ci * DMA addresses are taken from the scatter-gather table of this object (or of
43762306a36Sopenharmony_ci * this VMA in case of non-default GGTT views) and PTE entries set up.
43862306a36Sopenharmony_ci * Note that DMA addresses are also the only part of the SG table we care about.
43962306a36Sopenharmony_ci */
44062306a36Sopenharmony_ciint i915_vma_bind(struct i915_vma *vma,
44162306a36Sopenharmony_ci		  unsigned int pat_index,
44262306a36Sopenharmony_ci		  u32 flags,
44362306a36Sopenharmony_ci		  struct i915_vma_work *work,
44462306a36Sopenharmony_ci		  struct i915_vma_resource *vma_res)
44562306a36Sopenharmony_ci{
44662306a36Sopenharmony_ci	u32 bind_flags;
44762306a36Sopenharmony_ci	u32 vma_flags;
44862306a36Sopenharmony_ci	int ret;
44962306a36Sopenharmony_ci
45062306a36Sopenharmony_ci	lockdep_assert_held(&vma->vm->mutex);
45162306a36Sopenharmony_ci	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
45262306a36Sopenharmony_ci	GEM_BUG_ON(vma->size > i915_vma_size(vma));
45362306a36Sopenharmony_ci
45462306a36Sopenharmony_ci	if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
45562306a36Sopenharmony_ci					      vma->node.size,
45662306a36Sopenharmony_ci					      vma->vm->total))) {
45762306a36Sopenharmony_ci		i915_vma_resource_free(vma_res);
45862306a36Sopenharmony_ci		return -ENODEV;
45962306a36Sopenharmony_ci	}
46062306a36Sopenharmony_ci
46162306a36Sopenharmony_ci	if (GEM_DEBUG_WARN_ON(!flags)) {
46262306a36Sopenharmony_ci		i915_vma_resource_free(vma_res);
46362306a36Sopenharmony_ci		return -EINVAL;
46462306a36Sopenharmony_ci	}
46562306a36Sopenharmony_ci
46662306a36Sopenharmony_ci	bind_flags = flags;
46762306a36Sopenharmony_ci	bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
46862306a36Sopenharmony_ci
46962306a36Sopenharmony_ci	vma_flags = atomic_read(&vma->flags);
47062306a36Sopenharmony_ci	vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
47162306a36Sopenharmony_ci
47262306a36Sopenharmony_ci	bind_flags &= ~vma_flags;
47362306a36Sopenharmony_ci	if (bind_flags == 0) {
47462306a36Sopenharmony_ci		i915_vma_resource_free(vma_res);
47562306a36Sopenharmony_ci		return 0;
47662306a36Sopenharmony_ci	}
47762306a36Sopenharmony_ci
47862306a36Sopenharmony_ci	GEM_BUG_ON(!atomic_read(&vma->pages_count));
47962306a36Sopenharmony_ci
48062306a36Sopenharmony_ci	/* Wait for or await async unbinds touching our range */
48162306a36Sopenharmony_ci	if (work && bind_flags & vma->vm->bind_async_flags)
48262306a36Sopenharmony_ci		ret = i915_vma_resource_bind_dep_await(vma->vm,
48362306a36Sopenharmony_ci						       &work->base.chain,
48462306a36Sopenharmony_ci						       vma->node.start,
48562306a36Sopenharmony_ci						       vma->node.size,
48662306a36Sopenharmony_ci						       true,
48762306a36Sopenharmony_ci						       GFP_NOWAIT |
48862306a36Sopenharmony_ci						       __GFP_RETRY_MAYFAIL |
48962306a36Sopenharmony_ci						       __GFP_NOWARN);
49062306a36Sopenharmony_ci	else
49162306a36Sopenharmony_ci		ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
49262306a36Sopenharmony_ci						      vma->node.size, true);
49362306a36Sopenharmony_ci	if (ret) {
49462306a36Sopenharmony_ci		i915_vma_resource_free(vma_res);
49562306a36Sopenharmony_ci		return ret;
49662306a36Sopenharmony_ci	}
49762306a36Sopenharmony_ci
49862306a36Sopenharmony_ci	if (vma->resource || !vma_res) {
49962306a36Sopenharmony_ci		/* Rebinding with an additional I915_VMA_*_BIND */
50062306a36Sopenharmony_ci		GEM_WARN_ON(!vma_flags);
50162306a36Sopenharmony_ci		i915_vma_resource_free(vma_res);
50262306a36Sopenharmony_ci	} else {
50362306a36Sopenharmony_ci		i915_vma_resource_init_from_vma(vma_res, vma);
50462306a36Sopenharmony_ci		vma->resource = vma_res;
50562306a36Sopenharmony_ci	}
50662306a36Sopenharmony_ci	trace_i915_vma_bind(vma, bind_flags);
50762306a36Sopenharmony_ci	if (work && bind_flags & vma->vm->bind_async_flags) {
50862306a36Sopenharmony_ci		struct dma_fence *prev;
50962306a36Sopenharmony_ci
51062306a36Sopenharmony_ci		work->vma_res = i915_vma_resource_get(vma->resource);
51162306a36Sopenharmony_ci		work->pat_index = pat_index;
51262306a36Sopenharmony_ci		work->flags = bind_flags;
51362306a36Sopenharmony_ci
51462306a36Sopenharmony_ci		/*
51562306a36Sopenharmony_ci		 * Note we only want to chain up to the migration fence on
51662306a36Sopenharmony_ci		 * the pages (not the object itself). As we don't track that,
51762306a36Sopenharmony_ci		 * yet, we have to use the exclusive fence instead.
51862306a36Sopenharmony_ci		 *
51962306a36Sopenharmony_ci		 * Also note that we do not want to track the async vma as
52062306a36Sopenharmony_ci		 * part of the obj->resv->excl_fence as it only affects
52162306a36Sopenharmony_ci		 * execution and not content or object's backing store lifetime.
52262306a36Sopenharmony_ci		 */
52362306a36Sopenharmony_ci		prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
52462306a36Sopenharmony_ci		if (prev) {
52562306a36Sopenharmony_ci			__i915_sw_fence_await_dma_fence(&work->base.chain,
52662306a36Sopenharmony_ci							prev,
52762306a36Sopenharmony_ci							&work->cb);
52862306a36Sopenharmony_ci			dma_fence_put(prev);
52962306a36Sopenharmony_ci		}
53062306a36Sopenharmony_ci
53162306a36Sopenharmony_ci		work->base.dma.error = 0; /* enable the queue_work() */
53262306a36Sopenharmony_ci		work->obj = i915_gem_object_get(vma->obj);
53362306a36Sopenharmony_ci	} else {
53462306a36Sopenharmony_ci		ret = i915_gem_object_wait_moving_fence(vma->obj, true);
53562306a36Sopenharmony_ci		if (ret) {
53662306a36Sopenharmony_ci			i915_vma_resource_free(vma->resource);
53762306a36Sopenharmony_ci			vma->resource = NULL;
53862306a36Sopenharmony_ci
53962306a36Sopenharmony_ci			return ret;
54062306a36Sopenharmony_ci		}
54162306a36Sopenharmony_ci		vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index,
54262306a36Sopenharmony_ci				   bind_flags);
54362306a36Sopenharmony_ci	}
54462306a36Sopenharmony_ci
54562306a36Sopenharmony_ci	atomic_or(bind_flags, &vma->flags);
54662306a36Sopenharmony_ci	return 0;
54762306a36Sopenharmony_ci}
54862306a36Sopenharmony_ci
54962306a36Sopenharmony_civoid __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
55062306a36Sopenharmony_ci{
55162306a36Sopenharmony_ci	void __iomem *ptr;
55262306a36Sopenharmony_ci	int err;
55362306a36Sopenharmony_ci
55462306a36Sopenharmony_ci	if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
55562306a36Sopenharmony_ci		return IOMEM_ERR_PTR(-EINVAL);
55662306a36Sopenharmony_ci
55762306a36Sopenharmony_ci	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
55862306a36Sopenharmony_ci	GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
55962306a36Sopenharmony_ci	GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
56062306a36Sopenharmony_ci
56162306a36Sopenharmony_ci	ptr = READ_ONCE(vma->iomap);
56262306a36Sopenharmony_ci	if (ptr == NULL) {
56362306a36Sopenharmony_ci		/*
56462306a36Sopenharmony_ci		 * TODO: consider just using i915_gem_object_pin_map() for lmem
56562306a36Sopenharmony_ci		 * instead, which already supports mapping non-contiguous chunks
56662306a36Sopenharmony_ci		 * of pages, that way we can also drop the
56762306a36Sopenharmony_ci		 * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
56862306a36Sopenharmony_ci		 */
56962306a36Sopenharmony_ci		if (i915_gem_object_is_lmem(vma->obj)) {
57062306a36Sopenharmony_ci			ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
57162306a36Sopenharmony_ci							  vma->obj->base.size);
57262306a36Sopenharmony_ci		} else if (i915_vma_is_map_and_fenceable(vma)) {
57362306a36Sopenharmony_ci			ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
57462306a36Sopenharmony_ci						i915_vma_offset(vma),
57562306a36Sopenharmony_ci						i915_vma_size(vma));
57662306a36Sopenharmony_ci		} else {
57762306a36Sopenharmony_ci			ptr = (void __iomem *)
57862306a36Sopenharmony_ci				i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
57962306a36Sopenharmony_ci			if (IS_ERR(ptr)) {
58062306a36Sopenharmony_ci				err = PTR_ERR(ptr);
58162306a36Sopenharmony_ci				goto err;
58262306a36Sopenharmony_ci			}
58362306a36Sopenharmony_ci			ptr = page_pack_bits(ptr, 1);
58462306a36Sopenharmony_ci		}
58562306a36Sopenharmony_ci
58662306a36Sopenharmony_ci		if (ptr == NULL) {
58762306a36Sopenharmony_ci			err = -ENOMEM;
58862306a36Sopenharmony_ci			goto err;
58962306a36Sopenharmony_ci		}
59062306a36Sopenharmony_ci
59162306a36Sopenharmony_ci		if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
59262306a36Sopenharmony_ci			if (page_unmask_bits(ptr))
59362306a36Sopenharmony_ci				__i915_gem_object_release_map(vma->obj);
59462306a36Sopenharmony_ci			else
59562306a36Sopenharmony_ci				io_mapping_unmap(ptr);
59662306a36Sopenharmony_ci			ptr = vma->iomap;
59762306a36Sopenharmony_ci		}
59862306a36Sopenharmony_ci	}
59962306a36Sopenharmony_ci
60062306a36Sopenharmony_ci	__i915_vma_pin(vma);
60162306a36Sopenharmony_ci
60262306a36Sopenharmony_ci	err = i915_vma_pin_fence(vma);
60362306a36Sopenharmony_ci	if (err)
60462306a36Sopenharmony_ci		goto err_unpin;
60562306a36Sopenharmony_ci
60662306a36Sopenharmony_ci	i915_vma_set_ggtt_write(vma);
60762306a36Sopenharmony_ci
60862306a36Sopenharmony_ci	/* NB Access through the GTT requires the device to be awake. */
60962306a36Sopenharmony_ci	return page_mask_bits(ptr);
61062306a36Sopenharmony_ci
61162306a36Sopenharmony_cierr_unpin:
61262306a36Sopenharmony_ci	__i915_vma_unpin(vma);
61362306a36Sopenharmony_cierr:
61462306a36Sopenharmony_ci	return IOMEM_ERR_PTR(err);
61562306a36Sopenharmony_ci}
61662306a36Sopenharmony_ci
61762306a36Sopenharmony_civoid i915_vma_flush_writes(struct i915_vma *vma)
61862306a36Sopenharmony_ci{
61962306a36Sopenharmony_ci	if (i915_vma_unset_ggtt_write(vma))
62062306a36Sopenharmony_ci		intel_gt_flush_ggtt_writes(vma->vm->gt);
62162306a36Sopenharmony_ci}
62262306a36Sopenharmony_ci
62362306a36Sopenharmony_civoid i915_vma_unpin_iomap(struct i915_vma *vma)
62462306a36Sopenharmony_ci{
62562306a36Sopenharmony_ci	GEM_BUG_ON(vma->iomap == NULL);
62662306a36Sopenharmony_ci
62762306a36Sopenharmony_ci	/* XXX We keep the mapping until __i915_vma_unbind()/evict() */
62862306a36Sopenharmony_ci
62962306a36Sopenharmony_ci	i915_vma_flush_writes(vma);
63062306a36Sopenharmony_ci
63162306a36Sopenharmony_ci	i915_vma_unpin_fence(vma);
63262306a36Sopenharmony_ci	i915_vma_unpin(vma);
63362306a36Sopenharmony_ci}
63462306a36Sopenharmony_ci
63562306a36Sopenharmony_civoid i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
63662306a36Sopenharmony_ci{
63762306a36Sopenharmony_ci	struct i915_vma *vma;
63862306a36Sopenharmony_ci	struct drm_i915_gem_object *obj;
63962306a36Sopenharmony_ci
64062306a36Sopenharmony_ci	vma = fetch_and_zero(p_vma);
64162306a36Sopenharmony_ci	if (!vma)
64262306a36Sopenharmony_ci		return;
64362306a36Sopenharmony_ci
64462306a36Sopenharmony_ci	obj = vma->obj;
64562306a36Sopenharmony_ci	GEM_BUG_ON(!obj);
64662306a36Sopenharmony_ci
64762306a36Sopenharmony_ci	i915_vma_unpin(vma);
64862306a36Sopenharmony_ci
64962306a36Sopenharmony_ci	if (flags & I915_VMA_RELEASE_MAP)
65062306a36Sopenharmony_ci		i915_gem_object_unpin_map(obj);
65162306a36Sopenharmony_ci
65262306a36Sopenharmony_ci	i915_gem_object_put(obj);
65362306a36Sopenharmony_ci}
65462306a36Sopenharmony_ci
65562306a36Sopenharmony_cibool i915_vma_misplaced(const struct i915_vma *vma,
65662306a36Sopenharmony_ci			u64 size, u64 alignment, u64 flags)
65762306a36Sopenharmony_ci{
65862306a36Sopenharmony_ci	if (!drm_mm_node_allocated(&vma->node))
65962306a36Sopenharmony_ci		return false;
66062306a36Sopenharmony_ci
66162306a36Sopenharmony_ci	if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
66262306a36Sopenharmony_ci		return true;
66362306a36Sopenharmony_ci
66462306a36Sopenharmony_ci	if (i915_vma_size(vma) < size)
66562306a36Sopenharmony_ci		return true;
66662306a36Sopenharmony_ci
66762306a36Sopenharmony_ci	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
66862306a36Sopenharmony_ci	if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment))
66962306a36Sopenharmony_ci		return true;
67062306a36Sopenharmony_ci
67162306a36Sopenharmony_ci	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
67262306a36Sopenharmony_ci		return true;
67362306a36Sopenharmony_ci
67462306a36Sopenharmony_ci	if (flags & PIN_OFFSET_BIAS &&
67562306a36Sopenharmony_ci	    i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK))
67662306a36Sopenharmony_ci		return true;
67762306a36Sopenharmony_ci
67862306a36Sopenharmony_ci	if (flags & PIN_OFFSET_FIXED &&
67962306a36Sopenharmony_ci	    i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK))
68062306a36Sopenharmony_ci		return true;
68162306a36Sopenharmony_ci
68262306a36Sopenharmony_ci	if (flags & PIN_OFFSET_GUARD &&
68362306a36Sopenharmony_ci	    vma->guard < (flags & PIN_OFFSET_MASK))
68462306a36Sopenharmony_ci		return true;
68562306a36Sopenharmony_ci
68662306a36Sopenharmony_ci	return false;
68762306a36Sopenharmony_ci}
68862306a36Sopenharmony_ci
68962306a36Sopenharmony_civoid __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
69062306a36Sopenharmony_ci{
69162306a36Sopenharmony_ci	bool mappable, fenceable;
69262306a36Sopenharmony_ci
69362306a36Sopenharmony_ci	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
69462306a36Sopenharmony_ci	GEM_BUG_ON(!vma->fence_size);
69562306a36Sopenharmony_ci
69662306a36Sopenharmony_ci	fenceable = (i915_vma_size(vma) >= vma->fence_size &&
69762306a36Sopenharmony_ci		     IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment));
69862306a36Sopenharmony_ci
69962306a36Sopenharmony_ci	mappable = i915_ggtt_offset(vma) + vma->fence_size <=
70062306a36Sopenharmony_ci		   i915_vm_to_ggtt(vma->vm)->mappable_end;
70162306a36Sopenharmony_ci
70262306a36Sopenharmony_ci	if (mappable && fenceable)
70362306a36Sopenharmony_ci		set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
70462306a36Sopenharmony_ci	else
70562306a36Sopenharmony_ci		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
70662306a36Sopenharmony_ci}
70762306a36Sopenharmony_ci
70862306a36Sopenharmony_cibool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
70962306a36Sopenharmony_ci{
71062306a36Sopenharmony_ci	struct drm_mm_node *node = &vma->node;
71162306a36Sopenharmony_ci	struct drm_mm_node *other;
71262306a36Sopenharmony_ci
71362306a36Sopenharmony_ci	/*
71462306a36Sopenharmony_ci	 * On some machines we have to be careful when putting differing types
71562306a36Sopenharmony_ci	 * of snoopable memory together to avoid the prefetcher crossing memory
71662306a36Sopenharmony_ci	 * domains and dying. During vm initialisation, we decide whether or not
71762306a36Sopenharmony_ci	 * these constraints apply and set the drm_mm.color_adjust
71862306a36Sopenharmony_ci	 * appropriately.
71962306a36Sopenharmony_ci	 */
72062306a36Sopenharmony_ci	if (!i915_vm_has_cache_coloring(vma->vm))
72162306a36Sopenharmony_ci		return true;
72262306a36Sopenharmony_ci
72362306a36Sopenharmony_ci	/* Only valid to be called on an already inserted vma */
72462306a36Sopenharmony_ci	GEM_BUG_ON(!drm_mm_node_allocated(node));
72562306a36Sopenharmony_ci	GEM_BUG_ON(list_empty(&node->node_list));
72662306a36Sopenharmony_ci
72762306a36Sopenharmony_ci	other = list_prev_entry(node, node_list);
72862306a36Sopenharmony_ci	if (i915_node_color_differs(other, color) &&
72962306a36Sopenharmony_ci	    !drm_mm_hole_follows(other))
73062306a36Sopenharmony_ci		return false;
73162306a36Sopenharmony_ci
73262306a36Sopenharmony_ci	other = list_next_entry(node, node_list);
73362306a36Sopenharmony_ci	if (i915_node_color_differs(other, color) &&
73462306a36Sopenharmony_ci	    !drm_mm_hole_follows(node))
73562306a36Sopenharmony_ci		return false;
73662306a36Sopenharmony_ci
73762306a36Sopenharmony_ci	return true;
73862306a36Sopenharmony_ci}
73962306a36Sopenharmony_ci
74062306a36Sopenharmony_ci/**
74162306a36Sopenharmony_ci * i915_vma_insert - finds a slot for the vma in its address space
74262306a36Sopenharmony_ci * @vma: the vma
74362306a36Sopenharmony_ci * @ww: An optional struct i915_gem_ww_ctx
74462306a36Sopenharmony_ci * @size: requested size in bytes (can be larger than the VMA)
74562306a36Sopenharmony_ci * @alignment: required alignment
74662306a36Sopenharmony_ci * @flags: mask of PIN_* flags to use
74762306a36Sopenharmony_ci *
74862306a36Sopenharmony_ci * First we try to allocate some free space that meets the requirements for
74962306a36Sopenharmony_ci * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
75062306a36Sopenharmony_ci * preferrably the oldest idle entry to make room for the new VMA.
75162306a36Sopenharmony_ci *
75262306a36Sopenharmony_ci * Returns:
75362306a36Sopenharmony_ci * 0 on success, negative error code otherwise.
75462306a36Sopenharmony_ci */
75562306a36Sopenharmony_cistatic int
75662306a36Sopenharmony_cii915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
75762306a36Sopenharmony_ci		u64 size, u64 alignment, u64 flags)
75862306a36Sopenharmony_ci{
75962306a36Sopenharmony_ci	unsigned long color, guard;
76062306a36Sopenharmony_ci	u64 start, end;
76162306a36Sopenharmony_ci	int ret;
76262306a36Sopenharmony_ci
76362306a36Sopenharmony_ci	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
76462306a36Sopenharmony_ci	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
76562306a36Sopenharmony_ci	GEM_BUG_ON(hweight64(flags & (PIN_OFFSET_GUARD | PIN_OFFSET_FIXED | PIN_OFFSET_BIAS)) > 1);
76662306a36Sopenharmony_ci
76762306a36Sopenharmony_ci	size = max(size, vma->size);
76862306a36Sopenharmony_ci	alignment = max_t(typeof(alignment), alignment, vma->display_alignment);
76962306a36Sopenharmony_ci	if (flags & PIN_MAPPABLE) {
77062306a36Sopenharmony_ci		size = max_t(typeof(size), size, vma->fence_size);
77162306a36Sopenharmony_ci		alignment = max_t(typeof(alignment),
77262306a36Sopenharmony_ci				  alignment, vma->fence_alignment);
77362306a36Sopenharmony_ci	}
77462306a36Sopenharmony_ci
77562306a36Sopenharmony_ci	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
77662306a36Sopenharmony_ci	GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
77762306a36Sopenharmony_ci	GEM_BUG_ON(!is_power_of_2(alignment));
77862306a36Sopenharmony_ci
77962306a36Sopenharmony_ci	guard = vma->guard; /* retain guard across rebinds */
78062306a36Sopenharmony_ci	if (flags & PIN_OFFSET_GUARD) {
78162306a36Sopenharmony_ci		GEM_BUG_ON(overflows_type(flags & PIN_OFFSET_MASK, u32));
78262306a36Sopenharmony_ci		guard = max_t(u32, guard, flags & PIN_OFFSET_MASK);
78362306a36Sopenharmony_ci	}
78462306a36Sopenharmony_ci	/*
78562306a36Sopenharmony_ci	 * As we align the node upon insertion, but the hardware gets
78662306a36Sopenharmony_ci	 * node.start + guard, the easiest way to make that work is
78762306a36Sopenharmony_ci	 * to make the guard a multiple of the alignment size.
78862306a36Sopenharmony_ci	 */
78962306a36Sopenharmony_ci	guard = ALIGN(guard, alignment);
79062306a36Sopenharmony_ci
79162306a36Sopenharmony_ci	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
79262306a36Sopenharmony_ci	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
79362306a36Sopenharmony_ci
79462306a36Sopenharmony_ci	end = vma->vm->total;
79562306a36Sopenharmony_ci	if (flags & PIN_MAPPABLE)
79662306a36Sopenharmony_ci		end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
79762306a36Sopenharmony_ci	if (flags & PIN_ZONE_4G)
79862306a36Sopenharmony_ci		end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
79962306a36Sopenharmony_ci	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
80062306a36Sopenharmony_ci
80162306a36Sopenharmony_ci	alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
80262306a36Sopenharmony_ci
80362306a36Sopenharmony_ci	/*
80462306a36Sopenharmony_ci	 * If binding the object/GGTT view requires more space than the entire
80562306a36Sopenharmony_ci	 * aperture has, reject it early before evicting everything in a vain
80662306a36Sopenharmony_ci	 * attempt to find space.
80762306a36Sopenharmony_ci	 */
80862306a36Sopenharmony_ci	if (size > end - 2 * guard) {
80962306a36Sopenharmony_ci		drm_dbg(vma->obj->base.dev,
81062306a36Sopenharmony_ci			"Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
81162306a36Sopenharmony_ci			size, flags & PIN_MAPPABLE ? "mappable" : "total", end);
81262306a36Sopenharmony_ci		return -ENOSPC;
81362306a36Sopenharmony_ci	}
81462306a36Sopenharmony_ci
81562306a36Sopenharmony_ci	color = 0;
81662306a36Sopenharmony_ci
81762306a36Sopenharmony_ci	if (i915_vm_has_cache_coloring(vma->vm))
81862306a36Sopenharmony_ci		color = vma->obj->pat_index;
81962306a36Sopenharmony_ci
82062306a36Sopenharmony_ci	if (flags & PIN_OFFSET_FIXED) {
82162306a36Sopenharmony_ci		u64 offset = flags & PIN_OFFSET_MASK;
82262306a36Sopenharmony_ci		if (!IS_ALIGNED(offset, alignment) ||
82362306a36Sopenharmony_ci		    range_overflows(offset, size, end))
82462306a36Sopenharmony_ci			return -EINVAL;
82562306a36Sopenharmony_ci		/*
82662306a36Sopenharmony_ci		 * The caller knows not of the guard added by others and
82762306a36Sopenharmony_ci		 * requests for the offset of the start of its buffer
82862306a36Sopenharmony_ci		 * to be fixed, which may not be the same as the position
82962306a36Sopenharmony_ci		 * of the vma->node due to the guard pages.
83062306a36Sopenharmony_ci		 */
83162306a36Sopenharmony_ci		if (offset < guard || offset + size > end - guard)
83262306a36Sopenharmony_ci			return -ENOSPC;
83362306a36Sopenharmony_ci
83462306a36Sopenharmony_ci		ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
83562306a36Sopenharmony_ci					   size + 2 * guard,
83662306a36Sopenharmony_ci					   offset - guard,
83762306a36Sopenharmony_ci					   color, flags);
83862306a36Sopenharmony_ci		if (ret)
83962306a36Sopenharmony_ci			return ret;
84062306a36Sopenharmony_ci	} else {
84162306a36Sopenharmony_ci		size += 2 * guard;
84262306a36Sopenharmony_ci		/*
84362306a36Sopenharmony_ci		 * We only support huge gtt pages through the 48b PPGTT,
84462306a36Sopenharmony_ci		 * however we also don't want to force any alignment for
84562306a36Sopenharmony_ci		 * objects which need to be tightly packed into the low 32bits.
84662306a36Sopenharmony_ci		 *
84762306a36Sopenharmony_ci		 * Note that we assume that GGTT are limited to 4GiB for the
84862306a36Sopenharmony_ci		 * forseeable future. See also i915_ggtt_offset().
84962306a36Sopenharmony_ci		 */
85062306a36Sopenharmony_ci		if (upper_32_bits(end - 1) &&
85162306a36Sopenharmony_ci		    vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
85262306a36Sopenharmony_ci		    !HAS_64K_PAGES(vma->vm->i915)) {
85362306a36Sopenharmony_ci			/*
85462306a36Sopenharmony_ci			 * We can't mix 64K and 4K PTEs in the same page-table
85562306a36Sopenharmony_ci			 * (2M block), and so to avoid the ugliness and
85662306a36Sopenharmony_ci			 * complexity of coloring we opt for just aligning 64K
85762306a36Sopenharmony_ci			 * objects to 2M.
85862306a36Sopenharmony_ci			 */
85962306a36Sopenharmony_ci			u64 page_alignment =
86062306a36Sopenharmony_ci				rounddown_pow_of_two(vma->page_sizes.sg |
86162306a36Sopenharmony_ci						     I915_GTT_PAGE_SIZE_2M);
86262306a36Sopenharmony_ci
86362306a36Sopenharmony_ci			/*
86462306a36Sopenharmony_ci			 * Check we don't expand for the limited Global GTT
86562306a36Sopenharmony_ci			 * (mappable aperture is even more precious!). This
86662306a36Sopenharmony_ci			 * also checks that we exclude the aliasing-ppgtt.
86762306a36Sopenharmony_ci			 */
86862306a36Sopenharmony_ci			GEM_BUG_ON(i915_vma_is_ggtt(vma));
86962306a36Sopenharmony_ci
87062306a36Sopenharmony_ci			alignment = max(alignment, page_alignment);
87162306a36Sopenharmony_ci
87262306a36Sopenharmony_ci			if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
87362306a36Sopenharmony_ci				size = round_up(size, I915_GTT_PAGE_SIZE_2M);
87462306a36Sopenharmony_ci		}
87562306a36Sopenharmony_ci
87662306a36Sopenharmony_ci		ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
87762306a36Sopenharmony_ci					  size, alignment, color,
87862306a36Sopenharmony_ci					  start, end, flags);
87962306a36Sopenharmony_ci		if (ret)
88062306a36Sopenharmony_ci			return ret;
88162306a36Sopenharmony_ci
88262306a36Sopenharmony_ci		GEM_BUG_ON(vma->node.start < start);
88362306a36Sopenharmony_ci		GEM_BUG_ON(vma->node.start + vma->node.size > end);
88462306a36Sopenharmony_ci	}
88562306a36Sopenharmony_ci	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
88662306a36Sopenharmony_ci	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
88762306a36Sopenharmony_ci
88862306a36Sopenharmony_ci	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
88962306a36Sopenharmony_ci	vma->guard = guard;
89062306a36Sopenharmony_ci
89162306a36Sopenharmony_ci	return 0;
89262306a36Sopenharmony_ci}
89362306a36Sopenharmony_ci
89462306a36Sopenharmony_cistatic void
89562306a36Sopenharmony_cii915_vma_detach(struct i915_vma *vma)
89662306a36Sopenharmony_ci{
89762306a36Sopenharmony_ci	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
89862306a36Sopenharmony_ci	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
89962306a36Sopenharmony_ci
90062306a36Sopenharmony_ci	/*
90162306a36Sopenharmony_ci	 * And finally now the object is completely decoupled from this
90262306a36Sopenharmony_ci	 * vma, we can drop its hold on the backing storage and allow
90362306a36Sopenharmony_ci	 * it to be reaped by the shrinker.
90462306a36Sopenharmony_ci	 */
90562306a36Sopenharmony_ci	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
90662306a36Sopenharmony_ci}
90762306a36Sopenharmony_ci
90862306a36Sopenharmony_cistatic bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
90962306a36Sopenharmony_ci{
91062306a36Sopenharmony_ci	unsigned int bound;
91162306a36Sopenharmony_ci
91262306a36Sopenharmony_ci	bound = atomic_read(&vma->flags);
91362306a36Sopenharmony_ci
91462306a36Sopenharmony_ci	if (flags & PIN_VALIDATE) {
91562306a36Sopenharmony_ci		flags &= I915_VMA_BIND_MASK;
91662306a36Sopenharmony_ci
91762306a36Sopenharmony_ci		return (flags & bound) == flags;
91862306a36Sopenharmony_ci	}
91962306a36Sopenharmony_ci
92062306a36Sopenharmony_ci	/* with the lock mandatory for unbind, we don't race here */
92162306a36Sopenharmony_ci	flags &= I915_VMA_BIND_MASK;
92262306a36Sopenharmony_ci	do {
92362306a36Sopenharmony_ci		if (unlikely(flags & ~bound))
92462306a36Sopenharmony_ci			return false;
92562306a36Sopenharmony_ci
92662306a36Sopenharmony_ci		if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
92762306a36Sopenharmony_ci			return false;
92862306a36Sopenharmony_ci
92962306a36Sopenharmony_ci		GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
93062306a36Sopenharmony_ci	} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
93162306a36Sopenharmony_ci
93262306a36Sopenharmony_ci	return true;
93362306a36Sopenharmony_ci}
93462306a36Sopenharmony_ci
93562306a36Sopenharmony_cistatic struct scatterlist *
93662306a36Sopenharmony_cirotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
93762306a36Sopenharmony_ci	     unsigned int width, unsigned int height,
93862306a36Sopenharmony_ci	     unsigned int src_stride, unsigned int dst_stride,
93962306a36Sopenharmony_ci	     struct sg_table *st, struct scatterlist *sg)
94062306a36Sopenharmony_ci{
94162306a36Sopenharmony_ci	unsigned int column, row;
94262306a36Sopenharmony_ci	pgoff_t src_idx;
94362306a36Sopenharmony_ci
94462306a36Sopenharmony_ci	for (column = 0; column < width; column++) {
94562306a36Sopenharmony_ci		unsigned int left;
94662306a36Sopenharmony_ci
94762306a36Sopenharmony_ci		src_idx = src_stride * (height - 1) + column + offset;
94862306a36Sopenharmony_ci		for (row = 0; row < height; row++) {
94962306a36Sopenharmony_ci			st->nents++;
95062306a36Sopenharmony_ci			/*
95162306a36Sopenharmony_ci			 * We don't need the pages, but need to initialize
95262306a36Sopenharmony_ci			 * the entries so the sg list can be happily traversed.
95362306a36Sopenharmony_ci			 * The only thing we need are DMA addresses.
95462306a36Sopenharmony_ci			 */
95562306a36Sopenharmony_ci			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
95662306a36Sopenharmony_ci			sg_dma_address(sg) =
95762306a36Sopenharmony_ci				i915_gem_object_get_dma_address(obj, src_idx);
95862306a36Sopenharmony_ci			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
95962306a36Sopenharmony_ci			sg = sg_next(sg);
96062306a36Sopenharmony_ci			src_idx -= src_stride;
96162306a36Sopenharmony_ci		}
96262306a36Sopenharmony_ci
96362306a36Sopenharmony_ci		left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
96462306a36Sopenharmony_ci
96562306a36Sopenharmony_ci		if (!left)
96662306a36Sopenharmony_ci			continue;
96762306a36Sopenharmony_ci
96862306a36Sopenharmony_ci		st->nents++;
96962306a36Sopenharmony_ci
97062306a36Sopenharmony_ci		/*
97162306a36Sopenharmony_ci		 * The DE ignores the PTEs for the padding tiles, the sg entry
97262306a36Sopenharmony_ci		 * here is just a conenience to indicate how many padding PTEs
97362306a36Sopenharmony_ci		 * to insert at this spot.
97462306a36Sopenharmony_ci		 */
97562306a36Sopenharmony_ci		sg_set_page(sg, NULL, left, 0);
97662306a36Sopenharmony_ci		sg_dma_address(sg) = 0;
97762306a36Sopenharmony_ci		sg_dma_len(sg) = left;
97862306a36Sopenharmony_ci		sg = sg_next(sg);
97962306a36Sopenharmony_ci	}
98062306a36Sopenharmony_ci
98162306a36Sopenharmony_ci	return sg;
98262306a36Sopenharmony_ci}
98362306a36Sopenharmony_ci
98462306a36Sopenharmony_cistatic noinline struct sg_table *
98562306a36Sopenharmony_ciintel_rotate_pages(struct intel_rotation_info *rot_info,
98662306a36Sopenharmony_ci		   struct drm_i915_gem_object *obj)
98762306a36Sopenharmony_ci{
98862306a36Sopenharmony_ci	unsigned int size = intel_rotation_info_size(rot_info);
98962306a36Sopenharmony_ci	struct drm_i915_private *i915 = to_i915(obj->base.dev);
99062306a36Sopenharmony_ci	struct sg_table *st;
99162306a36Sopenharmony_ci	struct scatterlist *sg;
99262306a36Sopenharmony_ci	int ret = -ENOMEM;
99362306a36Sopenharmony_ci	int i;
99462306a36Sopenharmony_ci
99562306a36Sopenharmony_ci	/* Allocate target SG list. */
99662306a36Sopenharmony_ci	st = kmalloc(sizeof(*st), GFP_KERNEL);
99762306a36Sopenharmony_ci	if (!st)
99862306a36Sopenharmony_ci		goto err_st_alloc;
99962306a36Sopenharmony_ci
100062306a36Sopenharmony_ci	ret = sg_alloc_table(st, size, GFP_KERNEL);
100162306a36Sopenharmony_ci	if (ret)
100262306a36Sopenharmony_ci		goto err_sg_alloc;
100362306a36Sopenharmony_ci
100462306a36Sopenharmony_ci	st->nents = 0;
100562306a36Sopenharmony_ci	sg = st->sgl;
100662306a36Sopenharmony_ci
100762306a36Sopenharmony_ci	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
100862306a36Sopenharmony_ci		sg = rotate_pages(obj, rot_info->plane[i].offset,
100962306a36Sopenharmony_ci				  rot_info->plane[i].width, rot_info->plane[i].height,
101062306a36Sopenharmony_ci				  rot_info->plane[i].src_stride,
101162306a36Sopenharmony_ci				  rot_info->plane[i].dst_stride,
101262306a36Sopenharmony_ci				  st, sg);
101362306a36Sopenharmony_ci
101462306a36Sopenharmony_ci	return st;
101562306a36Sopenharmony_ci
101662306a36Sopenharmony_cierr_sg_alloc:
101762306a36Sopenharmony_ci	kfree(st);
101862306a36Sopenharmony_cierr_st_alloc:
101962306a36Sopenharmony_ci
102062306a36Sopenharmony_ci	drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
102162306a36Sopenharmony_ci		obj->base.size, rot_info->plane[0].width,
102262306a36Sopenharmony_ci		rot_info->plane[0].height, size);
102362306a36Sopenharmony_ci
102462306a36Sopenharmony_ci	return ERR_PTR(ret);
102562306a36Sopenharmony_ci}
102662306a36Sopenharmony_ci
102762306a36Sopenharmony_cistatic struct scatterlist *
102862306a36Sopenharmony_ciadd_padding_pages(unsigned int count,
102962306a36Sopenharmony_ci		  struct sg_table *st, struct scatterlist *sg)
103062306a36Sopenharmony_ci{
103162306a36Sopenharmony_ci	st->nents++;
103262306a36Sopenharmony_ci
103362306a36Sopenharmony_ci	/*
103462306a36Sopenharmony_ci	 * The DE ignores the PTEs for the padding tiles, the sg entry
103562306a36Sopenharmony_ci	 * here is just a convenience to indicate how many padding PTEs
103662306a36Sopenharmony_ci	 * to insert at this spot.
103762306a36Sopenharmony_ci	 */
103862306a36Sopenharmony_ci	sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
103962306a36Sopenharmony_ci	sg_dma_address(sg) = 0;
104062306a36Sopenharmony_ci	sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
104162306a36Sopenharmony_ci	sg = sg_next(sg);
104262306a36Sopenharmony_ci
104362306a36Sopenharmony_ci	return sg;
104462306a36Sopenharmony_ci}
104562306a36Sopenharmony_ci
104662306a36Sopenharmony_cistatic struct scatterlist *
104762306a36Sopenharmony_ciremap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
104862306a36Sopenharmony_ci			      unsigned long offset, unsigned int alignment_pad,
104962306a36Sopenharmony_ci			      unsigned int width, unsigned int height,
105062306a36Sopenharmony_ci			      unsigned int src_stride, unsigned int dst_stride,
105162306a36Sopenharmony_ci			      struct sg_table *st, struct scatterlist *sg,
105262306a36Sopenharmony_ci			      unsigned int *gtt_offset)
105362306a36Sopenharmony_ci{
105462306a36Sopenharmony_ci	unsigned int row;
105562306a36Sopenharmony_ci
105662306a36Sopenharmony_ci	if (!width || !height)
105762306a36Sopenharmony_ci		return sg;
105862306a36Sopenharmony_ci
105962306a36Sopenharmony_ci	if (alignment_pad)
106062306a36Sopenharmony_ci		sg = add_padding_pages(alignment_pad, st, sg);
106162306a36Sopenharmony_ci
106262306a36Sopenharmony_ci	for (row = 0; row < height; row++) {
106362306a36Sopenharmony_ci		unsigned int left = width * I915_GTT_PAGE_SIZE;
106462306a36Sopenharmony_ci
106562306a36Sopenharmony_ci		while (left) {
106662306a36Sopenharmony_ci			dma_addr_t addr;
106762306a36Sopenharmony_ci			unsigned int length;
106862306a36Sopenharmony_ci
106962306a36Sopenharmony_ci			/*
107062306a36Sopenharmony_ci			 * We don't need the pages, but need to initialize
107162306a36Sopenharmony_ci			 * the entries so the sg list can be happily traversed.
107262306a36Sopenharmony_ci			 * The only thing we need are DMA addresses.
107362306a36Sopenharmony_ci			 */
107462306a36Sopenharmony_ci
107562306a36Sopenharmony_ci			addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
107662306a36Sopenharmony_ci
107762306a36Sopenharmony_ci			length = min(left, length);
107862306a36Sopenharmony_ci
107962306a36Sopenharmony_ci			st->nents++;
108062306a36Sopenharmony_ci
108162306a36Sopenharmony_ci			sg_set_page(sg, NULL, length, 0);
108262306a36Sopenharmony_ci			sg_dma_address(sg) = addr;
108362306a36Sopenharmony_ci			sg_dma_len(sg) = length;
108462306a36Sopenharmony_ci			sg = sg_next(sg);
108562306a36Sopenharmony_ci
108662306a36Sopenharmony_ci			offset += length / I915_GTT_PAGE_SIZE;
108762306a36Sopenharmony_ci			left -= length;
108862306a36Sopenharmony_ci		}
108962306a36Sopenharmony_ci
109062306a36Sopenharmony_ci		offset += src_stride - width;
109162306a36Sopenharmony_ci
109262306a36Sopenharmony_ci		left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
109362306a36Sopenharmony_ci
109462306a36Sopenharmony_ci		if (!left)
109562306a36Sopenharmony_ci			continue;
109662306a36Sopenharmony_ci
109762306a36Sopenharmony_ci		sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
109862306a36Sopenharmony_ci	}
109962306a36Sopenharmony_ci
110062306a36Sopenharmony_ci	*gtt_offset += alignment_pad + dst_stride * height;
110162306a36Sopenharmony_ci
110262306a36Sopenharmony_ci	return sg;
110362306a36Sopenharmony_ci}
110462306a36Sopenharmony_ci
110562306a36Sopenharmony_cistatic struct scatterlist *
110662306a36Sopenharmony_ciremap_contiguous_pages(struct drm_i915_gem_object *obj,
110762306a36Sopenharmony_ci		       pgoff_t obj_offset,
110862306a36Sopenharmony_ci		       unsigned int count,
110962306a36Sopenharmony_ci		       struct sg_table *st, struct scatterlist *sg)
111062306a36Sopenharmony_ci{
111162306a36Sopenharmony_ci	struct scatterlist *iter;
111262306a36Sopenharmony_ci	unsigned int offset;
111362306a36Sopenharmony_ci
111462306a36Sopenharmony_ci	iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
111562306a36Sopenharmony_ci	GEM_BUG_ON(!iter);
111662306a36Sopenharmony_ci
111762306a36Sopenharmony_ci	do {
111862306a36Sopenharmony_ci		unsigned int len;
111962306a36Sopenharmony_ci
112062306a36Sopenharmony_ci		len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
112162306a36Sopenharmony_ci			  count << PAGE_SHIFT);
112262306a36Sopenharmony_ci		sg_set_page(sg, NULL, len, 0);
112362306a36Sopenharmony_ci		sg_dma_address(sg) =
112462306a36Sopenharmony_ci			sg_dma_address(iter) + (offset << PAGE_SHIFT);
112562306a36Sopenharmony_ci		sg_dma_len(sg) = len;
112662306a36Sopenharmony_ci
112762306a36Sopenharmony_ci		st->nents++;
112862306a36Sopenharmony_ci		count -= len >> PAGE_SHIFT;
112962306a36Sopenharmony_ci		if (count == 0)
113062306a36Sopenharmony_ci			return sg;
113162306a36Sopenharmony_ci
113262306a36Sopenharmony_ci		sg = __sg_next(sg);
113362306a36Sopenharmony_ci		iter = __sg_next(iter);
113462306a36Sopenharmony_ci		offset = 0;
113562306a36Sopenharmony_ci	} while (1);
113662306a36Sopenharmony_ci}
113762306a36Sopenharmony_ci
113862306a36Sopenharmony_cistatic struct scatterlist *
113962306a36Sopenharmony_ciremap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
114062306a36Sopenharmony_ci			       pgoff_t obj_offset, unsigned int alignment_pad,
114162306a36Sopenharmony_ci			       unsigned int size,
114262306a36Sopenharmony_ci			       struct sg_table *st, struct scatterlist *sg,
114362306a36Sopenharmony_ci			       unsigned int *gtt_offset)
114462306a36Sopenharmony_ci{
114562306a36Sopenharmony_ci	if (!size)
114662306a36Sopenharmony_ci		return sg;
114762306a36Sopenharmony_ci
114862306a36Sopenharmony_ci	if (alignment_pad)
114962306a36Sopenharmony_ci		sg = add_padding_pages(alignment_pad, st, sg);
115062306a36Sopenharmony_ci
115162306a36Sopenharmony_ci	sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
115262306a36Sopenharmony_ci	sg = sg_next(sg);
115362306a36Sopenharmony_ci
115462306a36Sopenharmony_ci	*gtt_offset += alignment_pad + size;
115562306a36Sopenharmony_ci
115662306a36Sopenharmony_ci	return sg;
115762306a36Sopenharmony_ci}
115862306a36Sopenharmony_ci
115962306a36Sopenharmony_cistatic struct scatterlist *
116062306a36Sopenharmony_ciremap_color_plane_pages(const struct intel_remapped_info *rem_info,
116162306a36Sopenharmony_ci			struct drm_i915_gem_object *obj,
116262306a36Sopenharmony_ci			int color_plane,
116362306a36Sopenharmony_ci			struct sg_table *st, struct scatterlist *sg,
116462306a36Sopenharmony_ci			unsigned int *gtt_offset)
116562306a36Sopenharmony_ci{
116662306a36Sopenharmony_ci	unsigned int alignment_pad = 0;
116762306a36Sopenharmony_ci
116862306a36Sopenharmony_ci	if (rem_info->plane_alignment)
116962306a36Sopenharmony_ci		alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
117062306a36Sopenharmony_ci
117162306a36Sopenharmony_ci	if (rem_info->plane[color_plane].linear)
117262306a36Sopenharmony_ci		sg = remap_linear_color_plane_pages(obj,
117362306a36Sopenharmony_ci						    rem_info->plane[color_plane].offset,
117462306a36Sopenharmony_ci						    alignment_pad,
117562306a36Sopenharmony_ci						    rem_info->plane[color_plane].size,
117662306a36Sopenharmony_ci						    st, sg,
117762306a36Sopenharmony_ci						    gtt_offset);
117862306a36Sopenharmony_ci
117962306a36Sopenharmony_ci	else
118062306a36Sopenharmony_ci		sg = remap_tiled_color_plane_pages(obj,
118162306a36Sopenharmony_ci						   rem_info->plane[color_plane].offset,
118262306a36Sopenharmony_ci						   alignment_pad,
118362306a36Sopenharmony_ci						   rem_info->plane[color_plane].width,
118462306a36Sopenharmony_ci						   rem_info->plane[color_plane].height,
118562306a36Sopenharmony_ci						   rem_info->plane[color_plane].src_stride,
118662306a36Sopenharmony_ci						   rem_info->plane[color_plane].dst_stride,
118762306a36Sopenharmony_ci						   st, sg,
118862306a36Sopenharmony_ci						   gtt_offset);
118962306a36Sopenharmony_ci
119062306a36Sopenharmony_ci	return sg;
119162306a36Sopenharmony_ci}
119262306a36Sopenharmony_ci
119362306a36Sopenharmony_cistatic noinline struct sg_table *
119462306a36Sopenharmony_ciintel_remap_pages(struct intel_remapped_info *rem_info,
119562306a36Sopenharmony_ci		  struct drm_i915_gem_object *obj)
119662306a36Sopenharmony_ci{
119762306a36Sopenharmony_ci	unsigned int size = intel_remapped_info_size(rem_info);
119862306a36Sopenharmony_ci	struct drm_i915_private *i915 = to_i915(obj->base.dev);
119962306a36Sopenharmony_ci	struct sg_table *st;
120062306a36Sopenharmony_ci	struct scatterlist *sg;
120162306a36Sopenharmony_ci	unsigned int gtt_offset = 0;
120262306a36Sopenharmony_ci	int ret = -ENOMEM;
120362306a36Sopenharmony_ci	int i;
120462306a36Sopenharmony_ci
120562306a36Sopenharmony_ci	/* Allocate target SG list. */
120662306a36Sopenharmony_ci	st = kmalloc(sizeof(*st), GFP_KERNEL);
120762306a36Sopenharmony_ci	if (!st)
120862306a36Sopenharmony_ci		goto err_st_alloc;
120962306a36Sopenharmony_ci
121062306a36Sopenharmony_ci	ret = sg_alloc_table(st, size, GFP_KERNEL);
121162306a36Sopenharmony_ci	if (ret)
121262306a36Sopenharmony_ci		goto err_sg_alloc;
121362306a36Sopenharmony_ci
121462306a36Sopenharmony_ci	st->nents = 0;
121562306a36Sopenharmony_ci	sg = st->sgl;
121662306a36Sopenharmony_ci
121762306a36Sopenharmony_ci	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
121862306a36Sopenharmony_ci		sg = remap_color_plane_pages(rem_info, obj, i, st, sg, &gtt_offset);
121962306a36Sopenharmony_ci
122062306a36Sopenharmony_ci	i915_sg_trim(st);
122162306a36Sopenharmony_ci
122262306a36Sopenharmony_ci	return st;
122362306a36Sopenharmony_ci
122462306a36Sopenharmony_cierr_sg_alloc:
122562306a36Sopenharmony_ci	kfree(st);
122662306a36Sopenharmony_cierr_st_alloc:
122762306a36Sopenharmony_ci
122862306a36Sopenharmony_ci	drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
122962306a36Sopenharmony_ci		obj->base.size, rem_info->plane[0].width,
123062306a36Sopenharmony_ci		rem_info->plane[0].height, size);
123162306a36Sopenharmony_ci
123262306a36Sopenharmony_ci	return ERR_PTR(ret);
123362306a36Sopenharmony_ci}
123462306a36Sopenharmony_ci
123562306a36Sopenharmony_cistatic noinline struct sg_table *
123662306a36Sopenharmony_ciintel_partial_pages(const struct i915_gtt_view *view,
123762306a36Sopenharmony_ci		    struct drm_i915_gem_object *obj)
123862306a36Sopenharmony_ci{
123962306a36Sopenharmony_ci	struct sg_table *st;
124062306a36Sopenharmony_ci	struct scatterlist *sg;
124162306a36Sopenharmony_ci	unsigned int count = view->partial.size;
124262306a36Sopenharmony_ci	int ret = -ENOMEM;
124362306a36Sopenharmony_ci
124462306a36Sopenharmony_ci	st = kmalloc(sizeof(*st), GFP_KERNEL);
124562306a36Sopenharmony_ci	if (!st)
124662306a36Sopenharmony_ci		goto err_st_alloc;
124762306a36Sopenharmony_ci
124862306a36Sopenharmony_ci	ret = sg_alloc_table(st, count, GFP_KERNEL);
124962306a36Sopenharmony_ci	if (ret)
125062306a36Sopenharmony_ci		goto err_sg_alloc;
125162306a36Sopenharmony_ci
125262306a36Sopenharmony_ci	st->nents = 0;
125362306a36Sopenharmony_ci
125462306a36Sopenharmony_ci	sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
125562306a36Sopenharmony_ci
125662306a36Sopenharmony_ci	sg_mark_end(sg);
125762306a36Sopenharmony_ci	i915_sg_trim(st); /* Drop any unused tail entries. */
125862306a36Sopenharmony_ci
125962306a36Sopenharmony_ci	return st;
126062306a36Sopenharmony_ci
126162306a36Sopenharmony_cierr_sg_alloc:
126262306a36Sopenharmony_ci	kfree(st);
126362306a36Sopenharmony_cierr_st_alloc:
126462306a36Sopenharmony_ci	return ERR_PTR(ret);
126562306a36Sopenharmony_ci}
126662306a36Sopenharmony_ci
126762306a36Sopenharmony_cistatic int
126862306a36Sopenharmony_ci__i915_vma_get_pages(struct i915_vma *vma)
126962306a36Sopenharmony_ci{
127062306a36Sopenharmony_ci	struct sg_table *pages;
127162306a36Sopenharmony_ci
127262306a36Sopenharmony_ci	/*
127362306a36Sopenharmony_ci	 * The vma->pages are only valid within the lifespan of the borrowed
127462306a36Sopenharmony_ci	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
127562306a36Sopenharmony_ci	 * must be the vma->pages. A simple rule is that vma->pages must only
127662306a36Sopenharmony_ci	 * be accessed when the obj->mm.pages are pinned.
127762306a36Sopenharmony_ci	 */
127862306a36Sopenharmony_ci	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
127962306a36Sopenharmony_ci
128062306a36Sopenharmony_ci	switch (vma->gtt_view.type) {
128162306a36Sopenharmony_ci	default:
128262306a36Sopenharmony_ci		GEM_BUG_ON(vma->gtt_view.type);
128362306a36Sopenharmony_ci		fallthrough;
128462306a36Sopenharmony_ci	case I915_GTT_VIEW_NORMAL:
128562306a36Sopenharmony_ci		pages = vma->obj->mm.pages;
128662306a36Sopenharmony_ci		break;
128762306a36Sopenharmony_ci
128862306a36Sopenharmony_ci	case I915_GTT_VIEW_ROTATED:
128962306a36Sopenharmony_ci		pages =
129062306a36Sopenharmony_ci			intel_rotate_pages(&vma->gtt_view.rotated, vma->obj);
129162306a36Sopenharmony_ci		break;
129262306a36Sopenharmony_ci
129362306a36Sopenharmony_ci	case I915_GTT_VIEW_REMAPPED:
129462306a36Sopenharmony_ci		pages =
129562306a36Sopenharmony_ci			intel_remap_pages(&vma->gtt_view.remapped, vma->obj);
129662306a36Sopenharmony_ci		break;
129762306a36Sopenharmony_ci
129862306a36Sopenharmony_ci	case I915_GTT_VIEW_PARTIAL:
129962306a36Sopenharmony_ci		pages = intel_partial_pages(&vma->gtt_view, vma->obj);
130062306a36Sopenharmony_ci		break;
130162306a36Sopenharmony_ci	}
130262306a36Sopenharmony_ci
130362306a36Sopenharmony_ci	if (IS_ERR(pages)) {
130462306a36Sopenharmony_ci		drm_err(&vma->vm->i915->drm,
130562306a36Sopenharmony_ci			"Failed to get pages for VMA view type %u (%ld)!\n",
130662306a36Sopenharmony_ci			vma->gtt_view.type, PTR_ERR(pages));
130762306a36Sopenharmony_ci		return PTR_ERR(pages);
130862306a36Sopenharmony_ci	}
130962306a36Sopenharmony_ci
131062306a36Sopenharmony_ci	vma->pages = pages;
131162306a36Sopenharmony_ci
131262306a36Sopenharmony_ci	return 0;
131362306a36Sopenharmony_ci}
131462306a36Sopenharmony_ci
131562306a36Sopenharmony_ciI915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
131662306a36Sopenharmony_ci{
131762306a36Sopenharmony_ci	int err;
131862306a36Sopenharmony_ci
131962306a36Sopenharmony_ci	if (atomic_add_unless(&vma->pages_count, 1, 0))
132062306a36Sopenharmony_ci		return 0;
132162306a36Sopenharmony_ci
132262306a36Sopenharmony_ci	err = i915_gem_object_pin_pages(vma->obj);
132362306a36Sopenharmony_ci	if (err)
132462306a36Sopenharmony_ci		return err;
132562306a36Sopenharmony_ci
132662306a36Sopenharmony_ci	err = __i915_vma_get_pages(vma);
132762306a36Sopenharmony_ci	if (err)
132862306a36Sopenharmony_ci		goto err_unpin;
132962306a36Sopenharmony_ci
133062306a36Sopenharmony_ci	vma->page_sizes = vma->obj->mm.page_sizes;
133162306a36Sopenharmony_ci	atomic_inc(&vma->pages_count);
133262306a36Sopenharmony_ci
133362306a36Sopenharmony_ci	return 0;
133462306a36Sopenharmony_ci
133562306a36Sopenharmony_cierr_unpin:
133662306a36Sopenharmony_ci	__i915_gem_object_unpin_pages(vma->obj);
133762306a36Sopenharmony_ci
133862306a36Sopenharmony_ci	return err;
133962306a36Sopenharmony_ci}
134062306a36Sopenharmony_ci
134162306a36Sopenharmony_civoid vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
134262306a36Sopenharmony_ci{
134362306a36Sopenharmony_ci	struct intel_gt *gt;
134462306a36Sopenharmony_ci	int id;
134562306a36Sopenharmony_ci
134662306a36Sopenharmony_ci	if (!tlb)
134762306a36Sopenharmony_ci		return;
134862306a36Sopenharmony_ci
134962306a36Sopenharmony_ci	/*
135062306a36Sopenharmony_ci	 * Before we release the pages that were bound by this vma, we
135162306a36Sopenharmony_ci	 * must invalidate all the TLBs that may still have a reference
135262306a36Sopenharmony_ci	 * back to our physical address. It only needs to be done once,
135362306a36Sopenharmony_ci	 * so after updating the PTE to point away from the pages, record
135462306a36Sopenharmony_ci	 * the most recent TLB invalidation seqno, and if we have not yet
135562306a36Sopenharmony_ci	 * flushed the TLBs upon release, perform a full invalidation.
135662306a36Sopenharmony_ci	 */
135762306a36Sopenharmony_ci	for_each_gt(gt, vm->i915, id)
135862306a36Sopenharmony_ci		WRITE_ONCE(tlb[id],
135962306a36Sopenharmony_ci			   intel_gt_next_invalidate_tlb_full(gt));
136062306a36Sopenharmony_ci}
136162306a36Sopenharmony_ci
136262306a36Sopenharmony_cistatic void __vma_put_pages(struct i915_vma *vma, unsigned int count)
136362306a36Sopenharmony_ci{
136462306a36Sopenharmony_ci	/* We allocate under vma_get_pages, so beware the shrinker */
136562306a36Sopenharmony_ci	GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
136662306a36Sopenharmony_ci
136762306a36Sopenharmony_ci	if (atomic_sub_return(count, &vma->pages_count) == 0) {
136862306a36Sopenharmony_ci		if (vma->pages != vma->obj->mm.pages) {
136962306a36Sopenharmony_ci			sg_free_table(vma->pages);
137062306a36Sopenharmony_ci			kfree(vma->pages);
137162306a36Sopenharmony_ci		}
137262306a36Sopenharmony_ci		vma->pages = NULL;
137362306a36Sopenharmony_ci
137462306a36Sopenharmony_ci		i915_gem_object_unpin_pages(vma->obj);
137562306a36Sopenharmony_ci	}
137662306a36Sopenharmony_ci}
137762306a36Sopenharmony_ci
137862306a36Sopenharmony_ciI915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
137962306a36Sopenharmony_ci{
138062306a36Sopenharmony_ci	if (atomic_add_unless(&vma->pages_count, -1, 1))
138162306a36Sopenharmony_ci		return;
138262306a36Sopenharmony_ci
138362306a36Sopenharmony_ci	__vma_put_pages(vma, 1);
138462306a36Sopenharmony_ci}
138562306a36Sopenharmony_ci
138662306a36Sopenharmony_cistatic void vma_unbind_pages(struct i915_vma *vma)
138762306a36Sopenharmony_ci{
138862306a36Sopenharmony_ci	unsigned int count;
138962306a36Sopenharmony_ci
139062306a36Sopenharmony_ci	lockdep_assert_held(&vma->vm->mutex);
139162306a36Sopenharmony_ci
139262306a36Sopenharmony_ci	/* The upper portion of pages_count is the number of bindings */
139362306a36Sopenharmony_ci	count = atomic_read(&vma->pages_count);
139462306a36Sopenharmony_ci	count >>= I915_VMA_PAGES_BIAS;
139562306a36Sopenharmony_ci	GEM_BUG_ON(!count);
139662306a36Sopenharmony_ci
139762306a36Sopenharmony_ci	__vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
139862306a36Sopenharmony_ci}
139962306a36Sopenharmony_ci
140062306a36Sopenharmony_ciint i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
140162306a36Sopenharmony_ci		    u64 size, u64 alignment, u64 flags)
140262306a36Sopenharmony_ci{
140362306a36Sopenharmony_ci	struct i915_vma_work *work = NULL;
140462306a36Sopenharmony_ci	struct dma_fence *moving = NULL;
140562306a36Sopenharmony_ci	struct i915_vma_resource *vma_res = NULL;
140662306a36Sopenharmony_ci	intel_wakeref_t wakeref = 0;
140762306a36Sopenharmony_ci	unsigned int bound;
140862306a36Sopenharmony_ci	int err;
140962306a36Sopenharmony_ci
141062306a36Sopenharmony_ci	assert_vma_held(vma);
141162306a36Sopenharmony_ci	GEM_BUG_ON(!ww);
141262306a36Sopenharmony_ci
141362306a36Sopenharmony_ci	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
141462306a36Sopenharmony_ci	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
141562306a36Sopenharmony_ci
141662306a36Sopenharmony_ci	GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
141762306a36Sopenharmony_ci
141862306a36Sopenharmony_ci	/* First try and grab the pin without rebinding the vma */
141962306a36Sopenharmony_ci	if (try_qad_pin(vma, flags))
142062306a36Sopenharmony_ci		return 0;
142162306a36Sopenharmony_ci
142262306a36Sopenharmony_ci	err = i915_vma_get_pages(vma);
142362306a36Sopenharmony_ci	if (err)
142462306a36Sopenharmony_ci		return err;
142562306a36Sopenharmony_ci
142662306a36Sopenharmony_ci	if (flags & PIN_GLOBAL)
142762306a36Sopenharmony_ci		wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
142862306a36Sopenharmony_ci
142962306a36Sopenharmony_ci	if (flags & vma->vm->bind_async_flags) {
143062306a36Sopenharmony_ci		/* lock VM */
143162306a36Sopenharmony_ci		err = i915_vm_lock_objects(vma->vm, ww);
143262306a36Sopenharmony_ci		if (err)
143362306a36Sopenharmony_ci			goto err_rpm;
143462306a36Sopenharmony_ci
143562306a36Sopenharmony_ci		work = i915_vma_work();
143662306a36Sopenharmony_ci		if (!work) {
143762306a36Sopenharmony_ci			err = -ENOMEM;
143862306a36Sopenharmony_ci			goto err_rpm;
143962306a36Sopenharmony_ci		}
144062306a36Sopenharmony_ci
144162306a36Sopenharmony_ci		work->vm = vma->vm;
144262306a36Sopenharmony_ci
144362306a36Sopenharmony_ci		err = i915_gem_object_get_moving_fence(vma->obj, &moving);
144462306a36Sopenharmony_ci		if (err)
144562306a36Sopenharmony_ci			goto err_rpm;
144662306a36Sopenharmony_ci
144762306a36Sopenharmony_ci		dma_fence_work_chain(&work->base, moving);
144862306a36Sopenharmony_ci
144962306a36Sopenharmony_ci		/* Allocate enough page directories to used PTE */
145062306a36Sopenharmony_ci		if (vma->vm->allocate_va_range) {
145162306a36Sopenharmony_ci			err = i915_vm_alloc_pt_stash(vma->vm,
145262306a36Sopenharmony_ci						     &work->stash,
145362306a36Sopenharmony_ci						     vma->size);
145462306a36Sopenharmony_ci			if (err)
145562306a36Sopenharmony_ci				goto err_fence;
145662306a36Sopenharmony_ci
145762306a36Sopenharmony_ci			err = i915_vm_map_pt_stash(vma->vm, &work->stash);
145862306a36Sopenharmony_ci			if (err)
145962306a36Sopenharmony_ci				goto err_fence;
146062306a36Sopenharmony_ci		}
146162306a36Sopenharmony_ci	}
146262306a36Sopenharmony_ci
146362306a36Sopenharmony_ci	vma_res = i915_vma_resource_alloc();
146462306a36Sopenharmony_ci	if (IS_ERR(vma_res)) {
146562306a36Sopenharmony_ci		err = PTR_ERR(vma_res);
146662306a36Sopenharmony_ci		goto err_fence;
146762306a36Sopenharmony_ci	}
146862306a36Sopenharmony_ci
146962306a36Sopenharmony_ci	/*
147062306a36Sopenharmony_ci	 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
147162306a36Sopenharmony_ci	 *
147262306a36Sopenharmony_ci	 * We conflate the Global GTT with the user's vma when using the
147362306a36Sopenharmony_ci	 * aliasing-ppgtt, but it is still vitally important to try and
147462306a36Sopenharmony_ci	 * keep the use cases distinct. For example, userptr objects are
147562306a36Sopenharmony_ci	 * not allowed inside the Global GTT as that will cause lock
147662306a36Sopenharmony_ci	 * inversions when we have to evict them the mmu_notifier callbacks -
147762306a36Sopenharmony_ci	 * but they are allowed to be part of the user ppGTT which can never
147862306a36Sopenharmony_ci	 * be mapped. As such we try to give the distinct users of the same
147962306a36Sopenharmony_ci	 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
148062306a36Sopenharmony_ci	 * and i915_ppgtt separate].
148162306a36Sopenharmony_ci	 *
148262306a36Sopenharmony_ci	 * NB this may cause us to mask real lock inversions -- while the
148362306a36Sopenharmony_ci	 * code is safe today, lockdep may not be able to spot future
148462306a36Sopenharmony_ci	 * transgressions.
148562306a36Sopenharmony_ci	 */
148662306a36Sopenharmony_ci	err = mutex_lock_interruptible_nested(&vma->vm->mutex,
148762306a36Sopenharmony_ci					      !(flags & PIN_GLOBAL));
148862306a36Sopenharmony_ci	if (err)
148962306a36Sopenharmony_ci		goto err_vma_res;
149062306a36Sopenharmony_ci
149162306a36Sopenharmony_ci	/* No more allocations allowed now we hold vm->mutex */
149262306a36Sopenharmony_ci
149362306a36Sopenharmony_ci	if (unlikely(i915_vma_is_closed(vma))) {
149462306a36Sopenharmony_ci		err = -ENOENT;
149562306a36Sopenharmony_ci		goto err_unlock;
149662306a36Sopenharmony_ci	}
149762306a36Sopenharmony_ci
149862306a36Sopenharmony_ci	bound = atomic_read(&vma->flags);
149962306a36Sopenharmony_ci	if (unlikely(bound & I915_VMA_ERROR)) {
150062306a36Sopenharmony_ci		err = -ENOMEM;
150162306a36Sopenharmony_ci		goto err_unlock;
150262306a36Sopenharmony_ci	}
150362306a36Sopenharmony_ci
150462306a36Sopenharmony_ci	if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
150562306a36Sopenharmony_ci		err = -EAGAIN; /* pins are meant to be fairly temporary */
150662306a36Sopenharmony_ci		goto err_unlock;
150762306a36Sopenharmony_ci	}
150862306a36Sopenharmony_ci
150962306a36Sopenharmony_ci	if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
151062306a36Sopenharmony_ci		if (!(flags & PIN_VALIDATE))
151162306a36Sopenharmony_ci			__i915_vma_pin(vma);
151262306a36Sopenharmony_ci		goto err_unlock;
151362306a36Sopenharmony_ci	}
151462306a36Sopenharmony_ci
151562306a36Sopenharmony_ci	err = i915_active_acquire(&vma->active);
151662306a36Sopenharmony_ci	if (err)
151762306a36Sopenharmony_ci		goto err_unlock;
151862306a36Sopenharmony_ci
151962306a36Sopenharmony_ci	if (!(bound & I915_VMA_BIND_MASK)) {
152062306a36Sopenharmony_ci		err = i915_vma_insert(vma, ww, size, alignment, flags);
152162306a36Sopenharmony_ci		if (err)
152262306a36Sopenharmony_ci			goto err_active;
152362306a36Sopenharmony_ci
152462306a36Sopenharmony_ci		if (i915_is_ggtt(vma->vm))
152562306a36Sopenharmony_ci			__i915_vma_set_map_and_fenceable(vma);
152662306a36Sopenharmony_ci	}
152762306a36Sopenharmony_ci
152862306a36Sopenharmony_ci	GEM_BUG_ON(!vma->pages);
152962306a36Sopenharmony_ci	err = i915_vma_bind(vma,
153062306a36Sopenharmony_ci			    vma->obj->pat_index,
153162306a36Sopenharmony_ci			    flags, work, vma_res);
153262306a36Sopenharmony_ci	vma_res = NULL;
153362306a36Sopenharmony_ci	if (err)
153462306a36Sopenharmony_ci		goto err_remove;
153562306a36Sopenharmony_ci
153662306a36Sopenharmony_ci	/* There should only be at most 2 active bindings (user, global) */
153762306a36Sopenharmony_ci	GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
153862306a36Sopenharmony_ci	atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
153962306a36Sopenharmony_ci	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
154062306a36Sopenharmony_ci
154162306a36Sopenharmony_ci	if (!(flags & PIN_VALIDATE)) {
154262306a36Sopenharmony_ci		__i915_vma_pin(vma);
154362306a36Sopenharmony_ci		GEM_BUG_ON(!i915_vma_is_pinned(vma));
154462306a36Sopenharmony_ci	}
154562306a36Sopenharmony_ci	GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
154662306a36Sopenharmony_ci	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
154762306a36Sopenharmony_ci
154862306a36Sopenharmony_cierr_remove:
154962306a36Sopenharmony_ci	if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
155062306a36Sopenharmony_ci		i915_vma_detach(vma);
155162306a36Sopenharmony_ci		drm_mm_remove_node(&vma->node);
155262306a36Sopenharmony_ci	}
155362306a36Sopenharmony_cierr_active:
155462306a36Sopenharmony_ci	i915_active_release(&vma->active);
155562306a36Sopenharmony_cierr_unlock:
155662306a36Sopenharmony_ci	mutex_unlock(&vma->vm->mutex);
155762306a36Sopenharmony_cierr_vma_res:
155862306a36Sopenharmony_ci	i915_vma_resource_free(vma_res);
155962306a36Sopenharmony_cierr_fence:
156062306a36Sopenharmony_ci	if (work)
156162306a36Sopenharmony_ci		dma_fence_work_commit_imm(&work->base);
156262306a36Sopenharmony_cierr_rpm:
156362306a36Sopenharmony_ci	if (wakeref)
156462306a36Sopenharmony_ci		intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
156562306a36Sopenharmony_ci
156662306a36Sopenharmony_ci	if (moving)
156762306a36Sopenharmony_ci		dma_fence_put(moving);
156862306a36Sopenharmony_ci
156962306a36Sopenharmony_ci	i915_vma_put_pages(vma);
157062306a36Sopenharmony_ci	return err;
157162306a36Sopenharmony_ci}
157262306a36Sopenharmony_ci
157362306a36Sopenharmony_cistatic void flush_idle_contexts(struct intel_gt *gt)
157462306a36Sopenharmony_ci{
157562306a36Sopenharmony_ci	struct intel_engine_cs *engine;
157662306a36Sopenharmony_ci	enum intel_engine_id id;
157762306a36Sopenharmony_ci
157862306a36Sopenharmony_ci	for_each_engine(engine, gt, id)
157962306a36Sopenharmony_ci		intel_engine_flush_barriers(engine);
158062306a36Sopenharmony_ci
158162306a36Sopenharmony_ci	intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
158262306a36Sopenharmony_ci}
158362306a36Sopenharmony_ci
158462306a36Sopenharmony_cistatic int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
158562306a36Sopenharmony_ci			   u32 align, unsigned int flags)
158662306a36Sopenharmony_ci{
158762306a36Sopenharmony_ci	struct i915_address_space *vm = vma->vm;
158862306a36Sopenharmony_ci	struct intel_gt *gt;
158962306a36Sopenharmony_ci	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
159062306a36Sopenharmony_ci	int err;
159162306a36Sopenharmony_ci
159262306a36Sopenharmony_ci	do {
159362306a36Sopenharmony_ci		err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
159462306a36Sopenharmony_ci
159562306a36Sopenharmony_ci		if (err != -ENOSPC) {
159662306a36Sopenharmony_ci			if (!err) {
159762306a36Sopenharmony_ci				err = i915_vma_wait_for_bind(vma);
159862306a36Sopenharmony_ci				if (err)
159962306a36Sopenharmony_ci					i915_vma_unpin(vma);
160062306a36Sopenharmony_ci			}
160162306a36Sopenharmony_ci			return err;
160262306a36Sopenharmony_ci		}
160362306a36Sopenharmony_ci
160462306a36Sopenharmony_ci		/* Unlike i915_vma_pin, we don't take no for an answer! */
160562306a36Sopenharmony_ci		list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
160662306a36Sopenharmony_ci			flush_idle_contexts(gt);
160762306a36Sopenharmony_ci		if (mutex_lock_interruptible(&vm->mutex) == 0) {
160862306a36Sopenharmony_ci			/*
160962306a36Sopenharmony_ci			 * We pass NULL ww here, as we don't want to unbind
161062306a36Sopenharmony_ci			 * locked objects when called from execbuf when pinning
161162306a36Sopenharmony_ci			 * is removed. This would probably regress badly.
161262306a36Sopenharmony_ci			 */
161362306a36Sopenharmony_ci			i915_gem_evict_vm(vm, NULL, NULL);
161462306a36Sopenharmony_ci			mutex_unlock(&vm->mutex);
161562306a36Sopenharmony_ci		}
161662306a36Sopenharmony_ci	} while (1);
161762306a36Sopenharmony_ci}
161862306a36Sopenharmony_ci
161962306a36Sopenharmony_ciint i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
162062306a36Sopenharmony_ci		  u32 align, unsigned int flags)
162162306a36Sopenharmony_ci{
162262306a36Sopenharmony_ci	struct i915_gem_ww_ctx _ww;
162362306a36Sopenharmony_ci	int err;
162462306a36Sopenharmony_ci
162562306a36Sopenharmony_ci	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
162662306a36Sopenharmony_ci
162762306a36Sopenharmony_ci	if (ww)
162862306a36Sopenharmony_ci		return __i915_ggtt_pin(vma, ww, align, flags);
162962306a36Sopenharmony_ci
163062306a36Sopenharmony_ci	lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
163162306a36Sopenharmony_ci
163262306a36Sopenharmony_ci	for_i915_gem_ww(&_ww, err, true) {
163362306a36Sopenharmony_ci		err = i915_gem_object_lock(vma->obj, &_ww);
163462306a36Sopenharmony_ci		if (!err)
163562306a36Sopenharmony_ci			err = __i915_ggtt_pin(vma, &_ww, align, flags);
163662306a36Sopenharmony_ci	}
163762306a36Sopenharmony_ci
163862306a36Sopenharmony_ci	return err;
163962306a36Sopenharmony_ci}
164062306a36Sopenharmony_ci
164162306a36Sopenharmony_ci/**
164262306a36Sopenharmony_ci * i915_ggtt_clear_scanout - Clear scanout flag for all objects ggtt vmas
164362306a36Sopenharmony_ci * @obj: i915 GEM object
164462306a36Sopenharmony_ci * This function clears scanout flags for objects ggtt vmas. These flags are set
164562306a36Sopenharmony_ci * when object is pinned for display use and this function to clear them all is
164662306a36Sopenharmony_ci * targeted to be called by frontbuffer tracking code when the frontbuffer is
164762306a36Sopenharmony_ci * about to be released.
164862306a36Sopenharmony_ci */
164962306a36Sopenharmony_civoid i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj)
165062306a36Sopenharmony_ci{
165162306a36Sopenharmony_ci	struct i915_vma *vma;
165262306a36Sopenharmony_ci
165362306a36Sopenharmony_ci	spin_lock(&obj->vma.lock);
165462306a36Sopenharmony_ci	for_each_ggtt_vma(vma, obj) {
165562306a36Sopenharmony_ci		i915_vma_clear_scanout(vma);
165662306a36Sopenharmony_ci		vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
165762306a36Sopenharmony_ci	}
165862306a36Sopenharmony_ci	spin_unlock(&obj->vma.lock);
165962306a36Sopenharmony_ci}
166062306a36Sopenharmony_ci
166162306a36Sopenharmony_cistatic void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
166262306a36Sopenharmony_ci{
166362306a36Sopenharmony_ci	/*
166462306a36Sopenharmony_ci	 * We defer actually closing, unbinding and destroying the VMA until
166562306a36Sopenharmony_ci	 * the next idle point, or if the object is freed in the meantime. By
166662306a36Sopenharmony_ci	 * postponing the unbind, we allow for it to be resurrected by the
166762306a36Sopenharmony_ci	 * client, avoiding the work required to rebind the VMA. This is
166862306a36Sopenharmony_ci	 * advantageous for DRI, where the client/server pass objects
166962306a36Sopenharmony_ci	 * between themselves, temporarily opening a local VMA to the
167062306a36Sopenharmony_ci	 * object, and then closing it again. The same object is then reused
167162306a36Sopenharmony_ci	 * on the next frame (or two, depending on the depth of the swap queue)
167262306a36Sopenharmony_ci	 * causing us to rebind the VMA once more. This ends up being a lot
167362306a36Sopenharmony_ci	 * of wasted work for the steady state.
167462306a36Sopenharmony_ci	 */
167562306a36Sopenharmony_ci	GEM_BUG_ON(i915_vma_is_closed(vma));
167662306a36Sopenharmony_ci	list_add(&vma->closed_link, &gt->closed_vma);
167762306a36Sopenharmony_ci}
167862306a36Sopenharmony_ci
167962306a36Sopenharmony_civoid i915_vma_close(struct i915_vma *vma)
168062306a36Sopenharmony_ci{
168162306a36Sopenharmony_ci	struct intel_gt *gt = vma->vm->gt;
168262306a36Sopenharmony_ci	unsigned long flags;
168362306a36Sopenharmony_ci
168462306a36Sopenharmony_ci	if (i915_vma_is_ggtt(vma))
168562306a36Sopenharmony_ci		return;
168662306a36Sopenharmony_ci
168762306a36Sopenharmony_ci	GEM_BUG_ON(!atomic_read(&vma->open_count));
168862306a36Sopenharmony_ci	if (atomic_dec_and_lock_irqsave(&vma->open_count,
168962306a36Sopenharmony_ci					&gt->closed_lock,
169062306a36Sopenharmony_ci					flags)) {
169162306a36Sopenharmony_ci		__vma_close(vma, gt);
169262306a36Sopenharmony_ci		spin_unlock_irqrestore(&gt->closed_lock, flags);
169362306a36Sopenharmony_ci	}
169462306a36Sopenharmony_ci}
169562306a36Sopenharmony_ci
169662306a36Sopenharmony_cistatic void __i915_vma_remove_closed(struct i915_vma *vma)
169762306a36Sopenharmony_ci{
169862306a36Sopenharmony_ci	list_del_init(&vma->closed_link);
169962306a36Sopenharmony_ci}
170062306a36Sopenharmony_ci
170162306a36Sopenharmony_civoid i915_vma_reopen(struct i915_vma *vma)
170262306a36Sopenharmony_ci{
170362306a36Sopenharmony_ci	struct intel_gt *gt = vma->vm->gt;
170462306a36Sopenharmony_ci
170562306a36Sopenharmony_ci	spin_lock_irq(&gt->closed_lock);
170662306a36Sopenharmony_ci	if (i915_vma_is_closed(vma))
170762306a36Sopenharmony_ci		__i915_vma_remove_closed(vma);
170862306a36Sopenharmony_ci	spin_unlock_irq(&gt->closed_lock);
170962306a36Sopenharmony_ci}
171062306a36Sopenharmony_ci
171162306a36Sopenharmony_cistatic void force_unbind(struct i915_vma *vma)
171262306a36Sopenharmony_ci{
171362306a36Sopenharmony_ci	if (!drm_mm_node_allocated(&vma->node))
171462306a36Sopenharmony_ci		return;
171562306a36Sopenharmony_ci
171662306a36Sopenharmony_ci	atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
171762306a36Sopenharmony_ci	WARN_ON(__i915_vma_unbind(vma));
171862306a36Sopenharmony_ci	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
171962306a36Sopenharmony_ci}
172062306a36Sopenharmony_ci
172162306a36Sopenharmony_cistatic void release_references(struct i915_vma *vma, struct intel_gt *gt,
172262306a36Sopenharmony_ci			       bool vm_ddestroy)
172362306a36Sopenharmony_ci{
172462306a36Sopenharmony_ci	struct drm_i915_gem_object *obj = vma->obj;
172562306a36Sopenharmony_ci
172662306a36Sopenharmony_ci	GEM_BUG_ON(i915_vma_is_active(vma));
172762306a36Sopenharmony_ci
172862306a36Sopenharmony_ci	spin_lock(&obj->vma.lock);
172962306a36Sopenharmony_ci	list_del(&vma->obj_link);
173062306a36Sopenharmony_ci	if (!RB_EMPTY_NODE(&vma->obj_node))
173162306a36Sopenharmony_ci		rb_erase(&vma->obj_node, &obj->vma.tree);
173262306a36Sopenharmony_ci
173362306a36Sopenharmony_ci	spin_unlock(&obj->vma.lock);
173462306a36Sopenharmony_ci
173562306a36Sopenharmony_ci	spin_lock_irq(&gt->closed_lock);
173662306a36Sopenharmony_ci	__i915_vma_remove_closed(vma);
173762306a36Sopenharmony_ci	spin_unlock_irq(&gt->closed_lock);
173862306a36Sopenharmony_ci
173962306a36Sopenharmony_ci	if (vm_ddestroy)
174062306a36Sopenharmony_ci		i915_vm_resv_put(vma->vm);
174162306a36Sopenharmony_ci
174262306a36Sopenharmony_ci	/* Wait for async active retire */
174362306a36Sopenharmony_ci	i915_active_wait(&vma->active);
174462306a36Sopenharmony_ci	i915_active_fini(&vma->active);
174562306a36Sopenharmony_ci	GEM_WARN_ON(vma->resource);
174662306a36Sopenharmony_ci	i915_vma_free(vma);
174762306a36Sopenharmony_ci}
174862306a36Sopenharmony_ci
174962306a36Sopenharmony_ci/*
175062306a36Sopenharmony_ci * i915_vma_destroy_locked - Remove all weak reference to the vma and put
175162306a36Sopenharmony_ci * the initial reference.
175262306a36Sopenharmony_ci *
175362306a36Sopenharmony_ci * This function should be called when it's decided the vma isn't needed
175462306a36Sopenharmony_ci * anymore. The caller must assure that it doesn't race with another lookup
175562306a36Sopenharmony_ci * plus destroy, typically by taking an appropriate reference.
175662306a36Sopenharmony_ci *
175762306a36Sopenharmony_ci * Current callsites are
175862306a36Sopenharmony_ci * - __i915_gem_object_pages_fini()
175962306a36Sopenharmony_ci * - __i915_vm_close() - Blocks the above function by taking a reference on
176062306a36Sopenharmony_ci * the object.
176162306a36Sopenharmony_ci * - __i915_vma_parked() - Blocks the above functions by taking a reference
176262306a36Sopenharmony_ci * on the vm and a reference on the object. Also takes the object lock so
176362306a36Sopenharmony_ci * destruction from __i915_vma_parked() can be blocked by holding the
176462306a36Sopenharmony_ci * object lock. Since the object lock is only allowed from within i915 with
176562306a36Sopenharmony_ci * an object refcount, holding the object lock also implicitly blocks the
176662306a36Sopenharmony_ci * vma freeing from __i915_gem_object_pages_fini().
176762306a36Sopenharmony_ci *
176862306a36Sopenharmony_ci * Because of locks taken during destruction, a vma is also guaranteed to
176962306a36Sopenharmony_ci * stay alive while the following locks are held if it was looked up while
177062306a36Sopenharmony_ci * holding one of the locks:
177162306a36Sopenharmony_ci * - vm->mutex
177262306a36Sopenharmony_ci * - obj->vma.lock
177362306a36Sopenharmony_ci * - gt->closed_lock
177462306a36Sopenharmony_ci */
177562306a36Sopenharmony_civoid i915_vma_destroy_locked(struct i915_vma *vma)
177662306a36Sopenharmony_ci{
177762306a36Sopenharmony_ci	lockdep_assert_held(&vma->vm->mutex);
177862306a36Sopenharmony_ci
177962306a36Sopenharmony_ci	force_unbind(vma);
178062306a36Sopenharmony_ci	list_del_init(&vma->vm_link);
178162306a36Sopenharmony_ci	release_references(vma, vma->vm->gt, false);
178262306a36Sopenharmony_ci}
178362306a36Sopenharmony_ci
178462306a36Sopenharmony_civoid i915_vma_destroy(struct i915_vma *vma)
178562306a36Sopenharmony_ci{
178662306a36Sopenharmony_ci	struct intel_gt *gt;
178762306a36Sopenharmony_ci	bool vm_ddestroy;
178862306a36Sopenharmony_ci
178962306a36Sopenharmony_ci	mutex_lock(&vma->vm->mutex);
179062306a36Sopenharmony_ci	force_unbind(vma);
179162306a36Sopenharmony_ci	list_del_init(&vma->vm_link);
179262306a36Sopenharmony_ci	vm_ddestroy = vma->vm_ddestroy;
179362306a36Sopenharmony_ci	vma->vm_ddestroy = false;
179462306a36Sopenharmony_ci
179562306a36Sopenharmony_ci	/* vma->vm may be freed when releasing vma->vm->mutex. */
179662306a36Sopenharmony_ci	gt = vma->vm->gt;
179762306a36Sopenharmony_ci	mutex_unlock(&vma->vm->mutex);
179862306a36Sopenharmony_ci	release_references(vma, gt, vm_ddestroy);
179962306a36Sopenharmony_ci}
180062306a36Sopenharmony_ci
180162306a36Sopenharmony_civoid i915_vma_parked(struct intel_gt *gt)
180262306a36Sopenharmony_ci{
180362306a36Sopenharmony_ci	struct i915_vma *vma, *next;
180462306a36Sopenharmony_ci	LIST_HEAD(closed);
180562306a36Sopenharmony_ci
180662306a36Sopenharmony_ci	spin_lock_irq(&gt->closed_lock);
180762306a36Sopenharmony_ci	list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
180862306a36Sopenharmony_ci		struct drm_i915_gem_object *obj = vma->obj;
180962306a36Sopenharmony_ci		struct i915_address_space *vm = vma->vm;
181062306a36Sopenharmony_ci
181162306a36Sopenharmony_ci		/* XXX All to avoid keeping a reference on i915_vma itself */
181262306a36Sopenharmony_ci
181362306a36Sopenharmony_ci		if (!kref_get_unless_zero(&obj->base.refcount))
181462306a36Sopenharmony_ci			continue;
181562306a36Sopenharmony_ci
181662306a36Sopenharmony_ci		if (!i915_vm_tryget(vm)) {
181762306a36Sopenharmony_ci			i915_gem_object_put(obj);
181862306a36Sopenharmony_ci			continue;
181962306a36Sopenharmony_ci		}
182062306a36Sopenharmony_ci
182162306a36Sopenharmony_ci		list_move(&vma->closed_link, &closed);
182262306a36Sopenharmony_ci	}
182362306a36Sopenharmony_ci	spin_unlock_irq(&gt->closed_lock);
182462306a36Sopenharmony_ci
182562306a36Sopenharmony_ci	/* As the GT is held idle, no vma can be reopened as we destroy them */
182662306a36Sopenharmony_ci	list_for_each_entry_safe(vma, next, &closed, closed_link) {
182762306a36Sopenharmony_ci		struct drm_i915_gem_object *obj = vma->obj;
182862306a36Sopenharmony_ci		struct i915_address_space *vm = vma->vm;
182962306a36Sopenharmony_ci
183062306a36Sopenharmony_ci		if (i915_gem_object_trylock(obj, NULL)) {
183162306a36Sopenharmony_ci			INIT_LIST_HEAD(&vma->closed_link);
183262306a36Sopenharmony_ci			i915_vma_destroy(vma);
183362306a36Sopenharmony_ci			i915_gem_object_unlock(obj);
183462306a36Sopenharmony_ci		} else {
183562306a36Sopenharmony_ci			/* back you go.. */
183662306a36Sopenharmony_ci			spin_lock_irq(&gt->closed_lock);
183762306a36Sopenharmony_ci			list_add(&vma->closed_link, &gt->closed_vma);
183862306a36Sopenharmony_ci			spin_unlock_irq(&gt->closed_lock);
183962306a36Sopenharmony_ci		}
184062306a36Sopenharmony_ci
184162306a36Sopenharmony_ci		i915_gem_object_put(obj);
184262306a36Sopenharmony_ci		i915_vm_put(vm);
184362306a36Sopenharmony_ci	}
184462306a36Sopenharmony_ci}
184562306a36Sopenharmony_ci
184662306a36Sopenharmony_cistatic void __i915_vma_iounmap(struct i915_vma *vma)
184762306a36Sopenharmony_ci{
184862306a36Sopenharmony_ci	GEM_BUG_ON(i915_vma_is_pinned(vma));
184962306a36Sopenharmony_ci
185062306a36Sopenharmony_ci	if (vma->iomap == NULL)
185162306a36Sopenharmony_ci		return;
185262306a36Sopenharmony_ci
185362306a36Sopenharmony_ci	if (page_unmask_bits(vma->iomap))
185462306a36Sopenharmony_ci		__i915_gem_object_release_map(vma->obj);
185562306a36Sopenharmony_ci	else
185662306a36Sopenharmony_ci		io_mapping_unmap(vma->iomap);
185762306a36Sopenharmony_ci	vma->iomap = NULL;
185862306a36Sopenharmony_ci}
185962306a36Sopenharmony_ci
186062306a36Sopenharmony_civoid i915_vma_revoke_mmap(struct i915_vma *vma)
186162306a36Sopenharmony_ci{
186262306a36Sopenharmony_ci	struct drm_vma_offset_node *node;
186362306a36Sopenharmony_ci	u64 vma_offset;
186462306a36Sopenharmony_ci
186562306a36Sopenharmony_ci	if (!i915_vma_has_userfault(vma))
186662306a36Sopenharmony_ci		return;
186762306a36Sopenharmony_ci
186862306a36Sopenharmony_ci	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
186962306a36Sopenharmony_ci	GEM_BUG_ON(!vma->obj->userfault_count);
187062306a36Sopenharmony_ci
187162306a36Sopenharmony_ci	node = &vma->mmo->vma_node;
187262306a36Sopenharmony_ci	vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
187362306a36Sopenharmony_ci	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
187462306a36Sopenharmony_ci			    drm_vma_node_offset_addr(node) + vma_offset,
187562306a36Sopenharmony_ci			    vma->size,
187662306a36Sopenharmony_ci			    1);
187762306a36Sopenharmony_ci
187862306a36Sopenharmony_ci	i915_vma_unset_userfault(vma);
187962306a36Sopenharmony_ci	if (!--vma->obj->userfault_count)
188062306a36Sopenharmony_ci		list_del(&vma->obj->userfault_link);
188162306a36Sopenharmony_ci}
188262306a36Sopenharmony_ci
188362306a36Sopenharmony_cistatic int
188462306a36Sopenharmony_ci__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
188562306a36Sopenharmony_ci{
188662306a36Sopenharmony_ci	return __i915_request_await_exclusive(rq, &vma->active);
188762306a36Sopenharmony_ci}
188862306a36Sopenharmony_ci
188962306a36Sopenharmony_cistatic int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
189062306a36Sopenharmony_ci{
189162306a36Sopenharmony_ci	int err;
189262306a36Sopenharmony_ci
189362306a36Sopenharmony_ci	/* Wait for the vma to be bound before we start! */
189462306a36Sopenharmony_ci	err = __i915_request_await_bind(rq, vma);
189562306a36Sopenharmony_ci	if (err)
189662306a36Sopenharmony_ci		return err;
189762306a36Sopenharmony_ci
189862306a36Sopenharmony_ci	return i915_active_add_request(&vma->active, rq);
189962306a36Sopenharmony_ci}
190062306a36Sopenharmony_ci
190162306a36Sopenharmony_ciint _i915_vma_move_to_active(struct i915_vma *vma,
190262306a36Sopenharmony_ci			     struct i915_request *rq,
190362306a36Sopenharmony_ci			     struct dma_fence *fence,
190462306a36Sopenharmony_ci			     unsigned int flags)
190562306a36Sopenharmony_ci{
190662306a36Sopenharmony_ci	struct drm_i915_gem_object *obj = vma->obj;
190762306a36Sopenharmony_ci	int err;
190862306a36Sopenharmony_ci
190962306a36Sopenharmony_ci	assert_object_held(obj);
191062306a36Sopenharmony_ci
191162306a36Sopenharmony_ci	GEM_BUG_ON(!vma->pages);
191262306a36Sopenharmony_ci
191362306a36Sopenharmony_ci	if (!(flags & __EXEC_OBJECT_NO_REQUEST_AWAIT)) {
191462306a36Sopenharmony_ci		err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE);
191562306a36Sopenharmony_ci		if (unlikely(err))
191662306a36Sopenharmony_ci			return err;
191762306a36Sopenharmony_ci	}
191862306a36Sopenharmony_ci	err = __i915_vma_move_to_active(vma, rq);
191962306a36Sopenharmony_ci	if (unlikely(err))
192062306a36Sopenharmony_ci		return err;
192162306a36Sopenharmony_ci
192262306a36Sopenharmony_ci	/*
192362306a36Sopenharmony_ci	 * Reserve fences slot early to prevent an allocation after preparing
192462306a36Sopenharmony_ci	 * the workload and associating fences with dma_resv.
192562306a36Sopenharmony_ci	 */
192662306a36Sopenharmony_ci	if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
192762306a36Sopenharmony_ci		struct dma_fence *curr;
192862306a36Sopenharmony_ci		int idx;
192962306a36Sopenharmony_ci
193062306a36Sopenharmony_ci		dma_fence_array_for_each(curr, idx, fence)
193162306a36Sopenharmony_ci			;
193262306a36Sopenharmony_ci		err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
193362306a36Sopenharmony_ci		if (unlikely(err))
193462306a36Sopenharmony_ci			return err;
193562306a36Sopenharmony_ci	}
193662306a36Sopenharmony_ci
193762306a36Sopenharmony_ci	if (flags & EXEC_OBJECT_WRITE) {
193862306a36Sopenharmony_ci		struct intel_frontbuffer *front;
193962306a36Sopenharmony_ci
194062306a36Sopenharmony_ci		front = i915_gem_object_get_frontbuffer(obj);
194162306a36Sopenharmony_ci		if (unlikely(front)) {
194262306a36Sopenharmony_ci			if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
194362306a36Sopenharmony_ci				i915_active_add_request(&front->write, rq);
194462306a36Sopenharmony_ci			intel_frontbuffer_put(front);
194562306a36Sopenharmony_ci		}
194662306a36Sopenharmony_ci	}
194762306a36Sopenharmony_ci
194862306a36Sopenharmony_ci	if (fence) {
194962306a36Sopenharmony_ci		struct dma_fence *curr;
195062306a36Sopenharmony_ci		enum dma_resv_usage usage;
195162306a36Sopenharmony_ci		int idx;
195262306a36Sopenharmony_ci
195362306a36Sopenharmony_ci		if (flags & EXEC_OBJECT_WRITE) {
195462306a36Sopenharmony_ci			usage = DMA_RESV_USAGE_WRITE;
195562306a36Sopenharmony_ci			obj->write_domain = I915_GEM_DOMAIN_RENDER;
195662306a36Sopenharmony_ci			obj->read_domains = 0;
195762306a36Sopenharmony_ci		} else {
195862306a36Sopenharmony_ci			usage = DMA_RESV_USAGE_READ;
195962306a36Sopenharmony_ci			obj->write_domain = 0;
196062306a36Sopenharmony_ci		}
196162306a36Sopenharmony_ci
196262306a36Sopenharmony_ci		dma_fence_array_for_each(curr, idx, fence)
196362306a36Sopenharmony_ci			dma_resv_add_fence(vma->obj->base.resv, curr, usage);
196462306a36Sopenharmony_ci	}
196562306a36Sopenharmony_ci
196662306a36Sopenharmony_ci	if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
196762306a36Sopenharmony_ci		i915_active_add_request(&vma->fence->active, rq);
196862306a36Sopenharmony_ci
196962306a36Sopenharmony_ci	obj->read_domains |= I915_GEM_GPU_DOMAINS;
197062306a36Sopenharmony_ci	obj->mm.dirty = true;
197162306a36Sopenharmony_ci
197262306a36Sopenharmony_ci	GEM_BUG_ON(!i915_vma_is_active(vma));
197362306a36Sopenharmony_ci	return 0;
197462306a36Sopenharmony_ci}
197562306a36Sopenharmony_ci
197662306a36Sopenharmony_cistruct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
197762306a36Sopenharmony_ci{
197862306a36Sopenharmony_ci	struct i915_vma_resource *vma_res = vma->resource;
197962306a36Sopenharmony_ci	struct dma_fence *unbind_fence;
198062306a36Sopenharmony_ci
198162306a36Sopenharmony_ci	GEM_BUG_ON(i915_vma_is_pinned(vma));
198262306a36Sopenharmony_ci	assert_vma_held_evict(vma);
198362306a36Sopenharmony_ci
198462306a36Sopenharmony_ci	if (i915_vma_is_map_and_fenceable(vma)) {
198562306a36Sopenharmony_ci		/* Force a pagefault for domain tracking on next user access */
198662306a36Sopenharmony_ci		i915_vma_revoke_mmap(vma);
198762306a36Sopenharmony_ci
198862306a36Sopenharmony_ci		/*
198962306a36Sopenharmony_ci		 * Check that we have flushed all writes through the GGTT
199062306a36Sopenharmony_ci		 * before the unbind, other due to non-strict nature of those
199162306a36Sopenharmony_ci		 * indirect writes they may end up referencing the GGTT PTE
199262306a36Sopenharmony_ci		 * after the unbind.
199362306a36Sopenharmony_ci		 *
199462306a36Sopenharmony_ci		 * Note that we may be concurrently poking at the GGTT_WRITE
199562306a36Sopenharmony_ci		 * bit from set-domain, as we mark all GGTT vma associated
199662306a36Sopenharmony_ci		 * with an object. We know this is for another vma, as we
199762306a36Sopenharmony_ci		 * are currently unbinding this one -- so if this vma will be
199862306a36Sopenharmony_ci		 * reused, it will be refaulted and have its dirty bit set
199962306a36Sopenharmony_ci		 * before the next write.
200062306a36Sopenharmony_ci		 */
200162306a36Sopenharmony_ci		i915_vma_flush_writes(vma);
200262306a36Sopenharmony_ci
200362306a36Sopenharmony_ci		/* release the fence reg _after_ flushing */
200462306a36Sopenharmony_ci		i915_vma_revoke_fence(vma);
200562306a36Sopenharmony_ci
200662306a36Sopenharmony_ci		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
200762306a36Sopenharmony_ci	}
200862306a36Sopenharmony_ci
200962306a36Sopenharmony_ci	__i915_vma_iounmap(vma);
201062306a36Sopenharmony_ci
201162306a36Sopenharmony_ci	GEM_BUG_ON(vma->fence);
201262306a36Sopenharmony_ci	GEM_BUG_ON(i915_vma_has_userfault(vma));
201362306a36Sopenharmony_ci
201462306a36Sopenharmony_ci	/* Object backend must be async capable. */
201562306a36Sopenharmony_ci	GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
201662306a36Sopenharmony_ci
201762306a36Sopenharmony_ci	/* If vm is not open, unbind is a nop. */
201862306a36Sopenharmony_ci	vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
201962306a36Sopenharmony_ci		kref_read(&vma->vm->ref);
202062306a36Sopenharmony_ci	vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
202162306a36Sopenharmony_ci		vma->vm->skip_pte_rewrite;
202262306a36Sopenharmony_ci	trace_i915_vma_unbind(vma);
202362306a36Sopenharmony_ci
202462306a36Sopenharmony_ci	if (async)
202562306a36Sopenharmony_ci		unbind_fence = i915_vma_resource_unbind(vma_res,
202662306a36Sopenharmony_ci							vma->obj->mm.tlb);
202762306a36Sopenharmony_ci	else
202862306a36Sopenharmony_ci		unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
202962306a36Sopenharmony_ci
203062306a36Sopenharmony_ci	vma->resource = NULL;
203162306a36Sopenharmony_ci
203262306a36Sopenharmony_ci	atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
203362306a36Sopenharmony_ci		   &vma->flags);
203462306a36Sopenharmony_ci
203562306a36Sopenharmony_ci	i915_vma_detach(vma);
203662306a36Sopenharmony_ci
203762306a36Sopenharmony_ci	if (!async) {
203862306a36Sopenharmony_ci		if (unbind_fence) {
203962306a36Sopenharmony_ci			dma_fence_wait(unbind_fence, false);
204062306a36Sopenharmony_ci			dma_fence_put(unbind_fence);
204162306a36Sopenharmony_ci			unbind_fence = NULL;
204262306a36Sopenharmony_ci		}
204362306a36Sopenharmony_ci		vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
204462306a36Sopenharmony_ci	}
204562306a36Sopenharmony_ci
204662306a36Sopenharmony_ci	/*
204762306a36Sopenharmony_ci	 * Binding itself may not have completed until the unbind fence signals,
204862306a36Sopenharmony_ci	 * so don't drop the pages until that happens, unless the resource is
204962306a36Sopenharmony_ci	 * async_capable.
205062306a36Sopenharmony_ci	 */
205162306a36Sopenharmony_ci
205262306a36Sopenharmony_ci	vma_unbind_pages(vma);
205362306a36Sopenharmony_ci	return unbind_fence;
205462306a36Sopenharmony_ci}
205562306a36Sopenharmony_ci
205662306a36Sopenharmony_ciint __i915_vma_unbind(struct i915_vma *vma)
205762306a36Sopenharmony_ci{
205862306a36Sopenharmony_ci	int ret;
205962306a36Sopenharmony_ci
206062306a36Sopenharmony_ci	lockdep_assert_held(&vma->vm->mutex);
206162306a36Sopenharmony_ci	assert_vma_held_evict(vma);
206262306a36Sopenharmony_ci
206362306a36Sopenharmony_ci	if (!drm_mm_node_allocated(&vma->node))
206462306a36Sopenharmony_ci		return 0;
206562306a36Sopenharmony_ci
206662306a36Sopenharmony_ci	if (i915_vma_is_pinned(vma)) {
206762306a36Sopenharmony_ci		vma_print_allocator(vma, "is pinned");
206862306a36Sopenharmony_ci		return -EAGAIN;
206962306a36Sopenharmony_ci	}
207062306a36Sopenharmony_ci
207162306a36Sopenharmony_ci	/*
207262306a36Sopenharmony_ci	 * After confirming that no one else is pinning this vma, wait for
207362306a36Sopenharmony_ci	 * any laggards who may have crept in during the wait (through
207462306a36Sopenharmony_ci	 * a residual pin skipping the vm->mutex) to complete.
207562306a36Sopenharmony_ci	 */
207662306a36Sopenharmony_ci	ret = i915_vma_sync(vma);
207762306a36Sopenharmony_ci	if (ret)
207862306a36Sopenharmony_ci		return ret;
207962306a36Sopenharmony_ci
208062306a36Sopenharmony_ci	GEM_BUG_ON(i915_vma_is_active(vma));
208162306a36Sopenharmony_ci	__i915_vma_evict(vma, false);
208262306a36Sopenharmony_ci
208362306a36Sopenharmony_ci	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
208462306a36Sopenharmony_ci	return 0;
208562306a36Sopenharmony_ci}
208662306a36Sopenharmony_ci
208762306a36Sopenharmony_cistatic struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
208862306a36Sopenharmony_ci{
208962306a36Sopenharmony_ci	struct dma_fence *fence;
209062306a36Sopenharmony_ci
209162306a36Sopenharmony_ci	lockdep_assert_held(&vma->vm->mutex);
209262306a36Sopenharmony_ci
209362306a36Sopenharmony_ci	if (!drm_mm_node_allocated(&vma->node))
209462306a36Sopenharmony_ci		return NULL;
209562306a36Sopenharmony_ci
209662306a36Sopenharmony_ci	if (i915_vma_is_pinned(vma) ||
209762306a36Sopenharmony_ci	    &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
209862306a36Sopenharmony_ci		return ERR_PTR(-EAGAIN);
209962306a36Sopenharmony_ci
210062306a36Sopenharmony_ci	/*
210162306a36Sopenharmony_ci	 * We probably need to replace this with awaiting the fences of the
210262306a36Sopenharmony_ci	 * object's dma_resv when the vma active goes away. When doing that
210362306a36Sopenharmony_ci	 * we need to be careful to not add the vma_resource unbind fence
210462306a36Sopenharmony_ci	 * immediately to the object's dma_resv, because then unbinding
210562306a36Sopenharmony_ci	 * the next vma from the object, in case there are many, will
210662306a36Sopenharmony_ci	 * actually await the unbinding of the previous vmas, which is
210762306a36Sopenharmony_ci	 * undesirable.
210862306a36Sopenharmony_ci	 */
210962306a36Sopenharmony_ci	if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
211062306a36Sopenharmony_ci				       I915_ACTIVE_AWAIT_EXCL |
211162306a36Sopenharmony_ci				       I915_ACTIVE_AWAIT_ACTIVE) < 0) {
211262306a36Sopenharmony_ci		return ERR_PTR(-EBUSY);
211362306a36Sopenharmony_ci	}
211462306a36Sopenharmony_ci
211562306a36Sopenharmony_ci	fence = __i915_vma_evict(vma, true);
211662306a36Sopenharmony_ci
211762306a36Sopenharmony_ci	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
211862306a36Sopenharmony_ci
211962306a36Sopenharmony_ci	return fence;
212062306a36Sopenharmony_ci}
212162306a36Sopenharmony_ci
212262306a36Sopenharmony_ciint i915_vma_unbind(struct i915_vma *vma)
212362306a36Sopenharmony_ci{
212462306a36Sopenharmony_ci	struct i915_address_space *vm = vma->vm;
212562306a36Sopenharmony_ci	intel_wakeref_t wakeref = 0;
212662306a36Sopenharmony_ci	int err;
212762306a36Sopenharmony_ci
212862306a36Sopenharmony_ci	assert_object_held_shared(vma->obj);
212962306a36Sopenharmony_ci
213062306a36Sopenharmony_ci	/* Optimistic wait before taking the mutex */
213162306a36Sopenharmony_ci	err = i915_vma_sync(vma);
213262306a36Sopenharmony_ci	if (err)
213362306a36Sopenharmony_ci		return err;
213462306a36Sopenharmony_ci
213562306a36Sopenharmony_ci	if (!drm_mm_node_allocated(&vma->node))
213662306a36Sopenharmony_ci		return 0;
213762306a36Sopenharmony_ci
213862306a36Sopenharmony_ci	if (i915_vma_is_pinned(vma)) {
213962306a36Sopenharmony_ci		vma_print_allocator(vma, "is pinned");
214062306a36Sopenharmony_ci		return -EAGAIN;
214162306a36Sopenharmony_ci	}
214262306a36Sopenharmony_ci
214362306a36Sopenharmony_ci	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
214462306a36Sopenharmony_ci		/* XXX not always required: nop_clear_range */
214562306a36Sopenharmony_ci		wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
214662306a36Sopenharmony_ci
214762306a36Sopenharmony_ci	err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
214862306a36Sopenharmony_ci	if (err)
214962306a36Sopenharmony_ci		goto out_rpm;
215062306a36Sopenharmony_ci
215162306a36Sopenharmony_ci	err = __i915_vma_unbind(vma);
215262306a36Sopenharmony_ci	mutex_unlock(&vm->mutex);
215362306a36Sopenharmony_ci
215462306a36Sopenharmony_ciout_rpm:
215562306a36Sopenharmony_ci	if (wakeref)
215662306a36Sopenharmony_ci		intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
215762306a36Sopenharmony_ci	return err;
215862306a36Sopenharmony_ci}
215962306a36Sopenharmony_ci
216062306a36Sopenharmony_ciint i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
216162306a36Sopenharmony_ci{
216262306a36Sopenharmony_ci	struct drm_i915_gem_object *obj = vma->obj;
216362306a36Sopenharmony_ci	struct i915_address_space *vm = vma->vm;
216462306a36Sopenharmony_ci	intel_wakeref_t wakeref = 0;
216562306a36Sopenharmony_ci	struct dma_fence *fence;
216662306a36Sopenharmony_ci	int err;
216762306a36Sopenharmony_ci
216862306a36Sopenharmony_ci	/*
216962306a36Sopenharmony_ci	 * We need the dma-resv lock since we add the
217062306a36Sopenharmony_ci	 * unbind fence to the dma-resv object.
217162306a36Sopenharmony_ci	 */
217262306a36Sopenharmony_ci	assert_object_held(obj);
217362306a36Sopenharmony_ci
217462306a36Sopenharmony_ci	if (!drm_mm_node_allocated(&vma->node))
217562306a36Sopenharmony_ci		return 0;
217662306a36Sopenharmony_ci
217762306a36Sopenharmony_ci	if (i915_vma_is_pinned(vma)) {
217862306a36Sopenharmony_ci		vma_print_allocator(vma, "is pinned");
217962306a36Sopenharmony_ci		return -EAGAIN;
218062306a36Sopenharmony_ci	}
218162306a36Sopenharmony_ci
218262306a36Sopenharmony_ci	if (!obj->mm.rsgt)
218362306a36Sopenharmony_ci		return -EBUSY;
218462306a36Sopenharmony_ci
218562306a36Sopenharmony_ci	err = dma_resv_reserve_fences(obj->base.resv, 2);
218662306a36Sopenharmony_ci	if (err)
218762306a36Sopenharmony_ci		return -EBUSY;
218862306a36Sopenharmony_ci
218962306a36Sopenharmony_ci	/*
219062306a36Sopenharmony_ci	 * It would be great if we could grab this wakeref from the
219162306a36Sopenharmony_ci	 * async unbind work if needed, but we can't because it uses
219262306a36Sopenharmony_ci	 * kmalloc and it's in the dma-fence signalling critical path.
219362306a36Sopenharmony_ci	 */
219462306a36Sopenharmony_ci	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
219562306a36Sopenharmony_ci		wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
219662306a36Sopenharmony_ci
219762306a36Sopenharmony_ci	if (trylock_vm && !mutex_trylock(&vm->mutex)) {
219862306a36Sopenharmony_ci		err = -EBUSY;
219962306a36Sopenharmony_ci		goto out_rpm;
220062306a36Sopenharmony_ci	} else if (!trylock_vm) {
220162306a36Sopenharmony_ci		err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
220262306a36Sopenharmony_ci		if (err)
220362306a36Sopenharmony_ci			goto out_rpm;
220462306a36Sopenharmony_ci	}
220562306a36Sopenharmony_ci
220662306a36Sopenharmony_ci	fence = __i915_vma_unbind_async(vma);
220762306a36Sopenharmony_ci	mutex_unlock(&vm->mutex);
220862306a36Sopenharmony_ci	if (IS_ERR_OR_NULL(fence)) {
220962306a36Sopenharmony_ci		err = PTR_ERR_OR_ZERO(fence);
221062306a36Sopenharmony_ci		goto out_rpm;
221162306a36Sopenharmony_ci	}
221262306a36Sopenharmony_ci
221362306a36Sopenharmony_ci	dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
221462306a36Sopenharmony_ci	dma_fence_put(fence);
221562306a36Sopenharmony_ci
221662306a36Sopenharmony_ciout_rpm:
221762306a36Sopenharmony_ci	if (wakeref)
221862306a36Sopenharmony_ci		intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
221962306a36Sopenharmony_ci	return err;
222062306a36Sopenharmony_ci}
222162306a36Sopenharmony_ci
222262306a36Sopenharmony_ciint i915_vma_unbind_unlocked(struct i915_vma *vma)
222362306a36Sopenharmony_ci{
222462306a36Sopenharmony_ci	int err;
222562306a36Sopenharmony_ci
222662306a36Sopenharmony_ci	i915_gem_object_lock(vma->obj, NULL);
222762306a36Sopenharmony_ci	err = i915_vma_unbind(vma);
222862306a36Sopenharmony_ci	i915_gem_object_unlock(vma->obj);
222962306a36Sopenharmony_ci
223062306a36Sopenharmony_ci	return err;
223162306a36Sopenharmony_ci}
223262306a36Sopenharmony_ci
223362306a36Sopenharmony_cistruct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
223462306a36Sopenharmony_ci{
223562306a36Sopenharmony_ci	i915_gem_object_make_unshrinkable(vma->obj);
223662306a36Sopenharmony_ci	return vma;
223762306a36Sopenharmony_ci}
223862306a36Sopenharmony_ci
223962306a36Sopenharmony_civoid i915_vma_make_shrinkable(struct i915_vma *vma)
224062306a36Sopenharmony_ci{
224162306a36Sopenharmony_ci	i915_gem_object_make_shrinkable(vma->obj);
224262306a36Sopenharmony_ci}
224362306a36Sopenharmony_ci
224462306a36Sopenharmony_civoid i915_vma_make_purgeable(struct i915_vma *vma)
224562306a36Sopenharmony_ci{
224662306a36Sopenharmony_ci	i915_gem_object_make_purgeable(vma->obj);
224762306a36Sopenharmony_ci}
224862306a36Sopenharmony_ci
224962306a36Sopenharmony_ci#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
225062306a36Sopenharmony_ci#include "selftests/i915_vma.c"
225162306a36Sopenharmony_ci#endif
225262306a36Sopenharmony_ci
225362306a36Sopenharmony_civoid i915_vma_module_exit(void)
225462306a36Sopenharmony_ci{
225562306a36Sopenharmony_ci	kmem_cache_destroy(slab_vmas);
225662306a36Sopenharmony_ci}
225762306a36Sopenharmony_ci
225862306a36Sopenharmony_ciint __init i915_vma_module_init(void)
225962306a36Sopenharmony_ci{
226062306a36Sopenharmony_ci	slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
226162306a36Sopenharmony_ci	if (!slab_vmas)
226262306a36Sopenharmony_ci		return -ENOMEM;
226362306a36Sopenharmony_ci
226462306a36Sopenharmony_ci	return 0;
226562306a36Sopenharmony_ci}
2266