162306a36Sopenharmony_ci// SPDX-License-Identifier: MIT
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Copyright © 2020 Intel Corporation
462306a36Sopenharmony_ci */
562306a36Sopenharmony_ci
662306a36Sopenharmony_ci#include <linux/slab.h> /* fault-inject.h is not standalone! */
762306a36Sopenharmony_ci
862306a36Sopenharmony_ci#include <linux/fault-inject.h>
962306a36Sopenharmony_ci#include <linux/sched/mm.h>
1062306a36Sopenharmony_ci
1162306a36Sopenharmony_ci#include <drm/drm_cache.h>
1262306a36Sopenharmony_ci
1362306a36Sopenharmony_ci#include "gem/i915_gem_internal.h"
1462306a36Sopenharmony_ci#include "gem/i915_gem_lmem.h"
1562306a36Sopenharmony_ci#include "i915_reg.h"
1662306a36Sopenharmony_ci#include "i915_trace.h"
1762306a36Sopenharmony_ci#include "i915_utils.h"
1862306a36Sopenharmony_ci#include "intel_gt.h"
1962306a36Sopenharmony_ci#include "intel_gt_mcr.h"
2062306a36Sopenharmony_ci#include "intel_gt_print.h"
2162306a36Sopenharmony_ci#include "intel_gt_regs.h"
2262306a36Sopenharmony_ci#include "intel_gtt.h"
2362306a36Sopenharmony_ci
2462306a36Sopenharmony_ci
2562306a36Sopenharmony_cistatic bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
2662306a36Sopenharmony_ci{
2762306a36Sopenharmony_ci	return IS_BROXTON(i915) && i915_vtd_active(i915);
2862306a36Sopenharmony_ci}
2962306a36Sopenharmony_ci
3062306a36Sopenharmony_cibool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
3162306a36Sopenharmony_ci{
3262306a36Sopenharmony_ci	return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
3362306a36Sopenharmony_ci}
3462306a36Sopenharmony_ci
3562306a36Sopenharmony_cistruct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
3662306a36Sopenharmony_ci{
3762306a36Sopenharmony_ci	struct drm_i915_gem_object *obj;
3862306a36Sopenharmony_ci
3962306a36Sopenharmony_ci	/*
4062306a36Sopenharmony_ci	 * To avoid severe over-allocation when dealing with min_page_size
4162306a36Sopenharmony_ci	 * restrictions, we override that behaviour here by allowing an object
4262306a36Sopenharmony_ci	 * size and page layout which can be smaller. In practice this should be
4362306a36Sopenharmony_ci	 * totally fine, since GTT paging structures are not typically inserted
4462306a36Sopenharmony_ci	 * into the GTT.
4562306a36Sopenharmony_ci	 *
4662306a36Sopenharmony_ci	 * Note that we also hit this path for the scratch page, and for this
4762306a36Sopenharmony_ci	 * case it might need to be 64K, but that should work fine here since we
4862306a36Sopenharmony_ci	 * used the passed in size for the page size, which should ensure it
4962306a36Sopenharmony_ci	 * also has the same alignment.
5062306a36Sopenharmony_ci	 */
5162306a36Sopenharmony_ci	obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
5262306a36Sopenharmony_ci						    vm->lmem_pt_obj_flags);
5362306a36Sopenharmony_ci	/*
5462306a36Sopenharmony_ci	 * Ensure all paging structures for this vm share the same dma-resv
5562306a36Sopenharmony_ci	 * object underneath, with the idea that one object_lock() will lock
5662306a36Sopenharmony_ci	 * them all at once.
5762306a36Sopenharmony_ci	 */
5862306a36Sopenharmony_ci	if (!IS_ERR(obj)) {
5962306a36Sopenharmony_ci		obj->base.resv = i915_vm_resv_get(vm);
6062306a36Sopenharmony_ci		obj->shares_resv_from = vm;
6162306a36Sopenharmony_ci	}
6262306a36Sopenharmony_ci
6362306a36Sopenharmony_ci	return obj;
6462306a36Sopenharmony_ci}
6562306a36Sopenharmony_ci
6662306a36Sopenharmony_cistruct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
6762306a36Sopenharmony_ci{
6862306a36Sopenharmony_ci	struct drm_i915_gem_object *obj;
6962306a36Sopenharmony_ci
7062306a36Sopenharmony_ci	if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
7162306a36Sopenharmony_ci		i915_gem_shrink_all(vm->i915);
7262306a36Sopenharmony_ci
7362306a36Sopenharmony_ci	obj = i915_gem_object_create_internal(vm->i915, sz);
7462306a36Sopenharmony_ci	/*
7562306a36Sopenharmony_ci	 * Ensure all paging structures for this vm share the same dma-resv
7662306a36Sopenharmony_ci	 * object underneath, with the idea that one object_lock() will lock
7762306a36Sopenharmony_ci	 * them all at once.
7862306a36Sopenharmony_ci	 */
7962306a36Sopenharmony_ci	if (!IS_ERR(obj)) {
8062306a36Sopenharmony_ci		obj->base.resv = i915_vm_resv_get(vm);
8162306a36Sopenharmony_ci		obj->shares_resv_from = vm;
8262306a36Sopenharmony_ci	}
8362306a36Sopenharmony_ci
8462306a36Sopenharmony_ci	return obj;
8562306a36Sopenharmony_ci}
8662306a36Sopenharmony_ci
8762306a36Sopenharmony_ciint map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
8862306a36Sopenharmony_ci{
8962306a36Sopenharmony_ci	enum i915_map_type type;
9062306a36Sopenharmony_ci	void *vaddr;
9162306a36Sopenharmony_ci
9262306a36Sopenharmony_ci	type = intel_gt_coherent_map_type(vm->gt, obj, true);
9362306a36Sopenharmony_ci	vaddr = i915_gem_object_pin_map_unlocked(obj, type);
9462306a36Sopenharmony_ci	if (IS_ERR(vaddr))
9562306a36Sopenharmony_ci		return PTR_ERR(vaddr);
9662306a36Sopenharmony_ci
9762306a36Sopenharmony_ci	i915_gem_object_make_unshrinkable(obj);
9862306a36Sopenharmony_ci	return 0;
9962306a36Sopenharmony_ci}
10062306a36Sopenharmony_ci
10162306a36Sopenharmony_ciint map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
10262306a36Sopenharmony_ci{
10362306a36Sopenharmony_ci	enum i915_map_type type;
10462306a36Sopenharmony_ci	void *vaddr;
10562306a36Sopenharmony_ci
10662306a36Sopenharmony_ci	type = intel_gt_coherent_map_type(vm->gt, obj, true);
10762306a36Sopenharmony_ci	vaddr = i915_gem_object_pin_map(obj, type);
10862306a36Sopenharmony_ci	if (IS_ERR(vaddr))
10962306a36Sopenharmony_ci		return PTR_ERR(vaddr);
11062306a36Sopenharmony_ci
11162306a36Sopenharmony_ci	i915_gem_object_make_unshrinkable(obj);
11262306a36Sopenharmony_ci	return 0;
11362306a36Sopenharmony_ci}
11462306a36Sopenharmony_ci
11562306a36Sopenharmony_cistatic void clear_vm_list(struct list_head *list)
11662306a36Sopenharmony_ci{
11762306a36Sopenharmony_ci	struct i915_vma *vma, *vn;
11862306a36Sopenharmony_ci
11962306a36Sopenharmony_ci	list_for_each_entry_safe(vma, vn, list, vm_link) {
12062306a36Sopenharmony_ci		struct drm_i915_gem_object *obj = vma->obj;
12162306a36Sopenharmony_ci
12262306a36Sopenharmony_ci		if (!i915_gem_object_get_rcu(obj)) {
12362306a36Sopenharmony_ci			/*
12462306a36Sopenharmony_ci			 * Object is dying, but has not yet cleared its
12562306a36Sopenharmony_ci			 * vma list.
12662306a36Sopenharmony_ci			 * Unbind the dying vma to ensure our list
12762306a36Sopenharmony_ci			 * is completely drained. We leave the destruction to
12862306a36Sopenharmony_ci			 * the object destructor to avoid the vma
12962306a36Sopenharmony_ci			 * disappearing under it.
13062306a36Sopenharmony_ci			 */
13162306a36Sopenharmony_ci			atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
13262306a36Sopenharmony_ci			WARN_ON(__i915_vma_unbind(vma));
13362306a36Sopenharmony_ci
13462306a36Sopenharmony_ci			/* Remove from the unbound list */
13562306a36Sopenharmony_ci			list_del_init(&vma->vm_link);
13662306a36Sopenharmony_ci
13762306a36Sopenharmony_ci			/*
13862306a36Sopenharmony_ci			 * Delay the vm and vm mutex freeing until the
13962306a36Sopenharmony_ci			 * object is done with destruction.
14062306a36Sopenharmony_ci			 */
14162306a36Sopenharmony_ci			i915_vm_resv_get(vma->vm);
14262306a36Sopenharmony_ci			vma->vm_ddestroy = true;
14362306a36Sopenharmony_ci		} else {
14462306a36Sopenharmony_ci			i915_vma_destroy_locked(vma);
14562306a36Sopenharmony_ci			i915_gem_object_put(obj);
14662306a36Sopenharmony_ci		}
14762306a36Sopenharmony_ci
14862306a36Sopenharmony_ci	}
14962306a36Sopenharmony_ci}
15062306a36Sopenharmony_ci
15162306a36Sopenharmony_cistatic void __i915_vm_close(struct i915_address_space *vm)
15262306a36Sopenharmony_ci{
15362306a36Sopenharmony_ci	mutex_lock(&vm->mutex);
15462306a36Sopenharmony_ci
15562306a36Sopenharmony_ci	clear_vm_list(&vm->bound_list);
15662306a36Sopenharmony_ci	clear_vm_list(&vm->unbound_list);
15762306a36Sopenharmony_ci
15862306a36Sopenharmony_ci	/* Check for must-fix unanticipated side-effects */
15962306a36Sopenharmony_ci	GEM_BUG_ON(!list_empty(&vm->bound_list));
16062306a36Sopenharmony_ci	GEM_BUG_ON(!list_empty(&vm->unbound_list));
16162306a36Sopenharmony_ci
16262306a36Sopenharmony_ci	mutex_unlock(&vm->mutex);
16362306a36Sopenharmony_ci}
16462306a36Sopenharmony_ci
16562306a36Sopenharmony_ci/* lock the vm into the current ww, if we lock one, we lock all */
16662306a36Sopenharmony_ciint i915_vm_lock_objects(struct i915_address_space *vm,
16762306a36Sopenharmony_ci			 struct i915_gem_ww_ctx *ww)
16862306a36Sopenharmony_ci{
16962306a36Sopenharmony_ci	if (vm->scratch[0]->base.resv == &vm->_resv) {
17062306a36Sopenharmony_ci		return i915_gem_object_lock(vm->scratch[0], ww);
17162306a36Sopenharmony_ci	} else {
17262306a36Sopenharmony_ci		struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
17362306a36Sopenharmony_ci
17462306a36Sopenharmony_ci		/* We borrowed the scratch page from ggtt, take the top level object */
17562306a36Sopenharmony_ci		return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
17662306a36Sopenharmony_ci	}
17762306a36Sopenharmony_ci}
17862306a36Sopenharmony_ci
17962306a36Sopenharmony_civoid i915_address_space_fini(struct i915_address_space *vm)
18062306a36Sopenharmony_ci{
18162306a36Sopenharmony_ci	drm_mm_takedown(&vm->mm);
18262306a36Sopenharmony_ci}
18362306a36Sopenharmony_ci
18462306a36Sopenharmony_ci/**
18562306a36Sopenharmony_ci * i915_vm_resv_release - Final struct i915_address_space destructor
18662306a36Sopenharmony_ci * @kref: Pointer to the &i915_address_space.resv_ref member.
18762306a36Sopenharmony_ci *
18862306a36Sopenharmony_ci * This function is called when the last lock sharer no longer shares the
18962306a36Sopenharmony_ci * &i915_address_space._resv lock, and also if we raced when
19062306a36Sopenharmony_ci * destroying a vma by the vma destruction
19162306a36Sopenharmony_ci */
19262306a36Sopenharmony_civoid i915_vm_resv_release(struct kref *kref)
19362306a36Sopenharmony_ci{
19462306a36Sopenharmony_ci	struct i915_address_space *vm =
19562306a36Sopenharmony_ci		container_of(kref, typeof(*vm), resv_ref);
19662306a36Sopenharmony_ci
19762306a36Sopenharmony_ci	dma_resv_fini(&vm->_resv);
19862306a36Sopenharmony_ci	mutex_destroy(&vm->mutex);
19962306a36Sopenharmony_ci
20062306a36Sopenharmony_ci	kfree(vm);
20162306a36Sopenharmony_ci}
20262306a36Sopenharmony_ci
20362306a36Sopenharmony_cistatic void __i915_vm_release(struct work_struct *work)
20462306a36Sopenharmony_ci{
20562306a36Sopenharmony_ci	struct i915_address_space *vm =
20662306a36Sopenharmony_ci		container_of(work, struct i915_address_space, release_work);
20762306a36Sopenharmony_ci
20862306a36Sopenharmony_ci	__i915_vm_close(vm);
20962306a36Sopenharmony_ci
21062306a36Sopenharmony_ci	/* Synchronize async unbinds. */
21162306a36Sopenharmony_ci	i915_vma_resource_bind_dep_sync_all(vm);
21262306a36Sopenharmony_ci
21362306a36Sopenharmony_ci	vm->cleanup(vm);
21462306a36Sopenharmony_ci	i915_address_space_fini(vm);
21562306a36Sopenharmony_ci
21662306a36Sopenharmony_ci	i915_vm_resv_put(vm);
21762306a36Sopenharmony_ci}
21862306a36Sopenharmony_ci
21962306a36Sopenharmony_civoid i915_vm_release(struct kref *kref)
22062306a36Sopenharmony_ci{
22162306a36Sopenharmony_ci	struct i915_address_space *vm =
22262306a36Sopenharmony_ci		container_of(kref, struct i915_address_space, ref);
22362306a36Sopenharmony_ci
22462306a36Sopenharmony_ci	GEM_BUG_ON(i915_is_ggtt(vm));
22562306a36Sopenharmony_ci	trace_i915_ppgtt_release(vm);
22662306a36Sopenharmony_ci
22762306a36Sopenharmony_ci	queue_work(vm->i915->wq, &vm->release_work);
22862306a36Sopenharmony_ci}
22962306a36Sopenharmony_ci
23062306a36Sopenharmony_civoid i915_address_space_init(struct i915_address_space *vm, int subclass)
23162306a36Sopenharmony_ci{
23262306a36Sopenharmony_ci	kref_init(&vm->ref);
23362306a36Sopenharmony_ci
23462306a36Sopenharmony_ci	/*
23562306a36Sopenharmony_ci	 * Special case for GGTT that has already done an early
23662306a36Sopenharmony_ci	 * kref_init here.
23762306a36Sopenharmony_ci	 */
23862306a36Sopenharmony_ci	if (!kref_read(&vm->resv_ref))
23962306a36Sopenharmony_ci		kref_init(&vm->resv_ref);
24062306a36Sopenharmony_ci
24162306a36Sopenharmony_ci	vm->pending_unbind = RB_ROOT_CACHED;
24262306a36Sopenharmony_ci	INIT_WORK(&vm->release_work, __i915_vm_release);
24362306a36Sopenharmony_ci
24462306a36Sopenharmony_ci	/*
24562306a36Sopenharmony_ci	 * The vm->mutex must be reclaim safe (for use in the shrinker).
24662306a36Sopenharmony_ci	 * Do a dummy acquire now under fs_reclaim so that any allocation
24762306a36Sopenharmony_ci	 * attempt holding the lock is immediately reported by lockdep.
24862306a36Sopenharmony_ci	 */
24962306a36Sopenharmony_ci	mutex_init(&vm->mutex);
25062306a36Sopenharmony_ci	lockdep_set_subclass(&vm->mutex, subclass);
25162306a36Sopenharmony_ci
25262306a36Sopenharmony_ci	if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
25362306a36Sopenharmony_ci		i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
25462306a36Sopenharmony_ci	} else {
25562306a36Sopenharmony_ci		/*
25662306a36Sopenharmony_ci		 * CHV + BXT VTD workaround use stop_machine(),
25762306a36Sopenharmony_ci		 * which is allowed to allocate memory. This means &vm->mutex
25862306a36Sopenharmony_ci		 * is the outer lock, and in theory we can allocate memory inside
25962306a36Sopenharmony_ci		 * it through stop_machine().
26062306a36Sopenharmony_ci		 *
26162306a36Sopenharmony_ci		 * Add the annotation for this, we use trylock in shrinker.
26262306a36Sopenharmony_ci		 */
26362306a36Sopenharmony_ci		mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
26462306a36Sopenharmony_ci		might_alloc(GFP_KERNEL);
26562306a36Sopenharmony_ci		mutex_release(&vm->mutex.dep_map, _THIS_IP_);
26662306a36Sopenharmony_ci	}
26762306a36Sopenharmony_ci	dma_resv_init(&vm->_resv);
26862306a36Sopenharmony_ci
26962306a36Sopenharmony_ci	GEM_BUG_ON(!vm->total);
27062306a36Sopenharmony_ci	drm_mm_init(&vm->mm, 0, vm->total);
27162306a36Sopenharmony_ci
27262306a36Sopenharmony_ci	memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT,
27362306a36Sopenharmony_ci		 ARRAY_SIZE(vm->min_alignment));
27462306a36Sopenharmony_ci
27562306a36Sopenharmony_ci	if (HAS_64K_PAGES(vm->i915)) {
27662306a36Sopenharmony_ci		vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_64K;
27762306a36Sopenharmony_ci		vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_64K;
27862306a36Sopenharmony_ci	}
27962306a36Sopenharmony_ci
28062306a36Sopenharmony_ci	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
28162306a36Sopenharmony_ci
28262306a36Sopenharmony_ci	INIT_LIST_HEAD(&vm->bound_list);
28362306a36Sopenharmony_ci	INIT_LIST_HEAD(&vm->unbound_list);
28462306a36Sopenharmony_ci}
28562306a36Sopenharmony_ci
28662306a36Sopenharmony_civoid *__px_vaddr(struct drm_i915_gem_object *p)
28762306a36Sopenharmony_ci{
28862306a36Sopenharmony_ci	enum i915_map_type type;
28962306a36Sopenharmony_ci
29062306a36Sopenharmony_ci	GEM_BUG_ON(!i915_gem_object_has_pages(p));
29162306a36Sopenharmony_ci	return page_unpack_bits(p->mm.mapping, &type);
29262306a36Sopenharmony_ci}
29362306a36Sopenharmony_ci
29462306a36Sopenharmony_cidma_addr_t __px_dma(struct drm_i915_gem_object *p)
29562306a36Sopenharmony_ci{
29662306a36Sopenharmony_ci	GEM_BUG_ON(!i915_gem_object_has_pages(p));
29762306a36Sopenharmony_ci	return sg_dma_address(p->mm.pages->sgl);
29862306a36Sopenharmony_ci}
29962306a36Sopenharmony_ci
30062306a36Sopenharmony_cistruct page *__px_page(struct drm_i915_gem_object *p)
30162306a36Sopenharmony_ci{
30262306a36Sopenharmony_ci	GEM_BUG_ON(!i915_gem_object_has_pages(p));
30362306a36Sopenharmony_ci	return sg_page(p->mm.pages->sgl);
30462306a36Sopenharmony_ci}
30562306a36Sopenharmony_ci
30662306a36Sopenharmony_civoid
30762306a36Sopenharmony_cifill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
30862306a36Sopenharmony_ci{
30962306a36Sopenharmony_ci	void *vaddr = __px_vaddr(p);
31062306a36Sopenharmony_ci
31162306a36Sopenharmony_ci	memset64(vaddr, val, count);
31262306a36Sopenharmony_ci	drm_clflush_virt_range(vaddr, PAGE_SIZE);
31362306a36Sopenharmony_ci}
31462306a36Sopenharmony_ci
31562306a36Sopenharmony_cistatic void poison_scratch_page(struct drm_i915_gem_object *scratch)
31662306a36Sopenharmony_ci{
31762306a36Sopenharmony_ci	void *vaddr = __px_vaddr(scratch);
31862306a36Sopenharmony_ci	u8 val;
31962306a36Sopenharmony_ci
32062306a36Sopenharmony_ci	val = 0;
32162306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
32262306a36Sopenharmony_ci		val = POISON_FREE;
32362306a36Sopenharmony_ci
32462306a36Sopenharmony_ci	memset(vaddr, val, scratch->base.size);
32562306a36Sopenharmony_ci	drm_clflush_virt_range(vaddr, scratch->base.size);
32662306a36Sopenharmony_ci}
32762306a36Sopenharmony_ci
32862306a36Sopenharmony_ciint setup_scratch_page(struct i915_address_space *vm)
32962306a36Sopenharmony_ci{
33062306a36Sopenharmony_ci	unsigned long size;
33162306a36Sopenharmony_ci
33262306a36Sopenharmony_ci	/*
33362306a36Sopenharmony_ci	 * In order to utilize 64K pages for an object with a size < 2M, we will
33462306a36Sopenharmony_ci	 * need to support a 64K scratch page, given that every 16th entry for a
33562306a36Sopenharmony_ci	 * page-table operating in 64K mode must point to a properly aligned 64K
33662306a36Sopenharmony_ci	 * region, including any PTEs which happen to point to scratch.
33762306a36Sopenharmony_ci	 *
33862306a36Sopenharmony_ci	 * This is only relevant for the 48b PPGTT where we support
33962306a36Sopenharmony_ci	 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
34062306a36Sopenharmony_ci	 * scratch (read-only) between all vm, we create one 64k scratch page
34162306a36Sopenharmony_ci	 * for all.
34262306a36Sopenharmony_ci	 */
34362306a36Sopenharmony_ci	size = I915_GTT_PAGE_SIZE_4K;
34462306a36Sopenharmony_ci	if (i915_vm_is_4lvl(vm) &&
34562306a36Sopenharmony_ci	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K) &&
34662306a36Sopenharmony_ci	    !HAS_64K_PAGES(vm->i915))
34762306a36Sopenharmony_ci		size = I915_GTT_PAGE_SIZE_64K;
34862306a36Sopenharmony_ci
34962306a36Sopenharmony_ci	do {
35062306a36Sopenharmony_ci		struct drm_i915_gem_object *obj;
35162306a36Sopenharmony_ci
35262306a36Sopenharmony_ci		obj = vm->alloc_scratch_dma(vm, size);
35362306a36Sopenharmony_ci		if (IS_ERR(obj))
35462306a36Sopenharmony_ci			goto skip;
35562306a36Sopenharmony_ci
35662306a36Sopenharmony_ci		if (map_pt_dma(vm, obj))
35762306a36Sopenharmony_ci			goto skip_obj;
35862306a36Sopenharmony_ci
35962306a36Sopenharmony_ci		/* We need a single contiguous page for our scratch */
36062306a36Sopenharmony_ci		if (obj->mm.page_sizes.sg < size)
36162306a36Sopenharmony_ci			goto skip_obj;
36262306a36Sopenharmony_ci
36362306a36Sopenharmony_ci		/* And it needs to be correspondingly aligned */
36462306a36Sopenharmony_ci		if (__px_dma(obj) & (size - 1))
36562306a36Sopenharmony_ci			goto skip_obj;
36662306a36Sopenharmony_ci
36762306a36Sopenharmony_ci		/*
36862306a36Sopenharmony_ci		 * Use a non-zero scratch page for debugging.
36962306a36Sopenharmony_ci		 *
37062306a36Sopenharmony_ci		 * We want a value that should be reasonably obvious
37162306a36Sopenharmony_ci		 * to spot in the error state, while also causing a GPU hang
37262306a36Sopenharmony_ci		 * if executed. We prefer using a clear page in production, so
37362306a36Sopenharmony_ci		 * should it ever be accidentally used, the effect should be
37462306a36Sopenharmony_ci		 * fairly benign.
37562306a36Sopenharmony_ci		 */
37662306a36Sopenharmony_ci		poison_scratch_page(obj);
37762306a36Sopenharmony_ci
37862306a36Sopenharmony_ci		vm->scratch[0] = obj;
37962306a36Sopenharmony_ci		vm->scratch_order = get_order(size);
38062306a36Sopenharmony_ci		return 0;
38162306a36Sopenharmony_ci
38262306a36Sopenharmony_ciskip_obj:
38362306a36Sopenharmony_ci		i915_gem_object_put(obj);
38462306a36Sopenharmony_ciskip:
38562306a36Sopenharmony_ci		if (size == I915_GTT_PAGE_SIZE_4K)
38662306a36Sopenharmony_ci			return -ENOMEM;
38762306a36Sopenharmony_ci
38862306a36Sopenharmony_ci		size = I915_GTT_PAGE_SIZE_4K;
38962306a36Sopenharmony_ci	} while (1);
39062306a36Sopenharmony_ci}
39162306a36Sopenharmony_ci
39262306a36Sopenharmony_civoid free_scratch(struct i915_address_space *vm)
39362306a36Sopenharmony_ci{
39462306a36Sopenharmony_ci	int i;
39562306a36Sopenharmony_ci
39662306a36Sopenharmony_ci	if (!vm->scratch[0])
39762306a36Sopenharmony_ci		return;
39862306a36Sopenharmony_ci
39962306a36Sopenharmony_ci	for (i = 0; i <= vm->top; i++)
40062306a36Sopenharmony_ci		i915_gem_object_put(vm->scratch[i]);
40162306a36Sopenharmony_ci}
40262306a36Sopenharmony_ci
40362306a36Sopenharmony_civoid gtt_write_workarounds(struct intel_gt *gt)
40462306a36Sopenharmony_ci{
40562306a36Sopenharmony_ci	struct drm_i915_private *i915 = gt->i915;
40662306a36Sopenharmony_ci	struct intel_uncore *uncore = gt->uncore;
40762306a36Sopenharmony_ci
40862306a36Sopenharmony_ci	/*
40962306a36Sopenharmony_ci	 * This function is for gtt related workarounds. This function is
41062306a36Sopenharmony_ci	 * called on driver load and after a GPU reset, so you can place
41162306a36Sopenharmony_ci	 * workarounds here even if they get overwritten by GPU reset.
41262306a36Sopenharmony_ci	 */
41362306a36Sopenharmony_ci	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
41462306a36Sopenharmony_ci	if (IS_BROADWELL(i915))
41562306a36Sopenharmony_ci		intel_uncore_write(uncore,
41662306a36Sopenharmony_ci				   GEN8_L3_LRA_1_GPGPU,
41762306a36Sopenharmony_ci				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
41862306a36Sopenharmony_ci	else if (IS_CHERRYVIEW(i915))
41962306a36Sopenharmony_ci		intel_uncore_write(uncore,
42062306a36Sopenharmony_ci				   GEN8_L3_LRA_1_GPGPU,
42162306a36Sopenharmony_ci				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
42262306a36Sopenharmony_ci	else if (IS_GEN9_LP(i915))
42362306a36Sopenharmony_ci		intel_uncore_write(uncore,
42462306a36Sopenharmony_ci				   GEN8_L3_LRA_1_GPGPU,
42562306a36Sopenharmony_ci				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
42662306a36Sopenharmony_ci	else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
42762306a36Sopenharmony_ci		intel_uncore_write(uncore,
42862306a36Sopenharmony_ci				   GEN8_L3_LRA_1_GPGPU,
42962306a36Sopenharmony_ci				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
43062306a36Sopenharmony_ci
43162306a36Sopenharmony_ci	/*
43262306a36Sopenharmony_ci	 * To support 64K PTEs we need to first enable the use of the
43362306a36Sopenharmony_ci	 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
43462306a36Sopenharmony_ci	 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
43562306a36Sopenharmony_ci	 * shouldn't be needed after GEN10.
43662306a36Sopenharmony_ci	 *
43762306a36Sopenharmony_ci	 * 64K pages were first introduced from BDW+, although technically they
43862306a36Sopenharmony_ci	 * only *work* from gen9+. For pre-BDW we instead have the option for
43962306a36Sopenharmony_ci	 * 32K pages, but we don't currently have any support for it in our
44062306a36Sopenharmony_ci	 * driver.
44162306a36Sopenharmony_ci	 */
44262306a36Sopenharmony_ci	if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
44362306a36Sopenharmony_ci	    GRAPHICS_VER(i915) <= 10)
44462306a36Sopenharmony_ci		intel_uncore_rmw(uncore,
44562306a36Sopenharmony_ci				 GEN8_GAMW_ECO_DEV_RW_IA,
44662306a36Sopenharmony_ci				 0,
44762306a36Sopenharmony_ci				 GAMW_ECO_ENABLE_64K_IPS_FIELD);
44862306a36Sopenharmony_ci
44962306a36Sopenharmony_ci	if (IS_GRAPHICS_VER(i915, 8, 11)) {
45062306a36Sopenharmony_ci		bool can_use_gtt_cache = true;
45162306a36Sopenharmony_ci
45262306a36Sopenharmony_ci		/*
45362306a36Sopenharmony_ci		 * According to the BSpec if we use 2M/1G pages then we also
45462306a36Sopenharmony_ci		 * need to disable the GTT cache. At least on BDW we can see
45562306a36Sopenharmony_ci		 * visual corruption when using 2M pages, and not disabling the
45662306a36Sopenharmony_ci		 * GTT cache.
45762306a36Sopenharmony_ci		 */
45862306a36Sopenharmony_ci		if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
45962306a36Sopenharmony_ci			can_use_gtt_cache = false;
46062306a36Sopenharmony_ci
46162306a36Sopenharmony_ci		/* WaGttCachingOffByDefault */
46262306a36Sopenharmony_ci		intel_uncore_write(uncore,
46362306a36Sopenharmony_ci				   HSW_GTT_CACHE_EN,
46462306a36Sopenharmony_ci				   can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
46562306a36Sopenharmony_ci		gt_WARN_ON_ONCE(gt, can_use_gtt_cache &&
46662306a36Sopenharmony_ci				intel_uncore_read(uncore,
46762306a36Sopenharmony_ci						  HSW_GTT_CACHE_EN) == 0);
46862306a36Sopenharmony_ci	}
46962306a36Sopenharmony_ci}
47062306a36Sopenharmony_ci
47162306a36Sopenharmony_cistatic void xelpmp_setup_private_ppat(struct intel_uncore *uncore)
47262306a36Sopenharmony_ci{
47362306a36Sopenharmony_ci	intel_uncore_write(uncore, XELPMP_PAT_INDEX(0),
47462306a36Sopenharmony_ci			   MTL_PPAT_L4_0_WB);
47562306a36Sopenharmony_ci	intel_uncore_write(uncore, XELPMP_PAT_INDEX(1),
47662306a36Sopenharmony_ci			   MTL_PPAT_L4_1_WT);
47762306a36Sopenharmony_ci	intel_uncore_write(uncore, XELPMP_PAT_INDEX(2),
47862306a36Sopenharmony_ci			   MTL_PPAT_L4_3_UC);
47962306a36Sopenharmony_ci	intel_uncore_write(uncore, XELPMP_PAT_INDEX(3),
48062306a36Sopenharmony_ci			   MTL_PPAT_L4_0_WB | MTL_2_COH_1W);
48162306a36Sopenharmony_ci	intel_uncore_write(uncore, XELPMP_PAT_INDEX(4),
48262306a36Sopenharmony_ci			   MTL_PPAT_L4_0_WB | MTL_3_COH_2W);
48362306a36Sopenharmony_ci
48462306a36Sopenharmony_ci	/*
48562306a36Sopenharmony_ci	 * Remaining PAT entries are left at the hardware-default
48662306a36Sopenharmony_ci	 * fully-cached setting
48762306a36Sopenharmony_ci	 */
48862306a36Sopenharmony_ci}
48962306a36Sopenharmony_ci
49062306a36Sopenharmony_cistatic void xelpg_setup_private_ppat(struct intel_gt *gt)
49162306a36Sopenharmony_ci{
49262306a36Sopenharmony_ci	intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(0),
49362306a36Sopenharmony_ci				     MTL_PPAT_L4_0_WB);
49462306a36Sopenharmony_ci	intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(1),
49562306a36Sopenharmony_ci				     MTL_PPAT_L4_1_WT);
49662306a36Sopenharmony_ci	intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(2),
49762306a36Sopenharmony_ci				     MTL_PPAT_L4_3_UC);
49862306a36Sopenharmony_ci	intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(3),
49962306a36Sopenharmony_ci				     MTL_PPAT_L4_0_WB | MTL_2_COH_1W);
50062306a36Sopenharmony_ci	intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(4),
50162306a36Sopenharmony_ci				     MTL_PPAT_L4_0_WB | MTL_3_COH_2W);
50262306a36Sopenharmony_ci
50362306a36Sopenharmony_ci	/*
50462306a36Sopenharmony_ci	 * Remaining PAT entries are left at the hardware-default
50562306a36Sopenharmony_ci	 * fully-cached setting
50662306a36Sopenharmony_ci	 */
50762306a36Sopenharmony_ci}
50862306a36Sopenharmony_ci
50962306a36Sopenharmony_cistatic void tgl_setup_private_ppat(struct intel_uncore *uncore)
51062306a36Sopenharmony_ci{
51162306a36Sopenharmony_ci	/* TGL doesn't support LLC or AGE settings */
51262306a36Sopenharmony_ci	intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
51362306a36Sopenharmony_ci	intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
51462306a36Sopenharmony_ci	intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
51562306a36Sopenharmony_ci	intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
51662306a36Sopenharmony_ci	intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
51762306a36Sopenharmony_ci	intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
51862306a36Sopenharmony_ci	intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
51962306a36Sopenharmony_ci	intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
52062306a36Sopenharmony_ci}
52162306a36Sopenharmony_ci
52262306a36Sopenharmony_cistatic void xehp_setup_private_ppat(struct intel_gt *gt)
52362306a36Sopenharmony_ci{
52462306a36Sopenharmony_ci	enum forcewake_domains fw;
52562306a36Sopenharmony_ci	unsigned long flags;
52662306a36Sopenharmony_ci
52762306a36Sopenharmony_ci	fw = intel_uncore_forcewake_for_reg(gt->uncore, _MMIO(XEHP_PAT_INDEX(0).reg),
52862306a36Sopenharmony_ci					    FW_REG_WRITE);
52962306a36Sopenharmony_ci	intel_uncore_forcewake_get(gt->uncore, fw);
53062306a36Sopenharmony_ci
53162306a36Sopenharmony_ci	intel_gt_mcr_lock(gt, &flags);
53262306a36Sopenharmony_ci	intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(0), GEN8_PPAT_WB);
53362306a36Sopenharmony_ci	intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(1), GEN8_PPAT_WC);
53462306a36Sopenharmony_ci	intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(2), GEN8_PPAT_WT);
53562306a36Sopenharmony_ci	intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(3), GEN8_PPAT_UC);
53662306a36Sopenharmony_ci	intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(4), GEN8_PPAT_WB);
53762306a36Sopenharmony_ci	intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(5), GEN8_PPAT_WB);
53862306a36Sopenharmony_ci	intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(6), GEN8_PPAT_WB);
53962306a36Sopenharmony_ci	intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(7), GEN8_PPAT_WB);
54062306a36Sopenharmony_ci	intel_gt_mcr_unlock(gt, flags);
54162306a36Sopenharmony_ci
54262306a36Sopenharmony_ci	intel_uncore_forcewake_put(gt->uncore, fw);
54362306a36Sopenharmony_ci}
54462306a36Sopenharmony_ci
54562306a36Sopenharmony_cistatic void icl_setup_private_ppat(struct intel_uncore *uncore)
54662306a36Sopenharmony_ci{
54762306a36Sopenharmony_ci	intel_uncore_write(uncore,
54862306a36Sopenharmony_ci			   GEN10_PAT_INDEX(0),
54962306a36Sopenharmony_ci			   GEN8_PPAT_WB | GEN8_PPAT_LLC);
55062306a36Sopenharmony_ci	intel_uncore_write(uncore,
55162306a36Sopenharmony_ci			   GEN10_PAT_INDEX(1),
55262306a36Sopenharmony_ci			   GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
55362306a36Sopenharmony_ci	intel_uncore_write(uncore,
55462306a36Sopenharmony_ci			   GEN10_PAT_INDEX(2),
55562306a36Sopenharmony_ci			   GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
55662306a36Sopenharmony_ci	intel_uncore_write(uncore,
55762306a36Sopenharmony_ci			   GEN10_PAT_INDEX(3),
55862306a36Sopenharmony_ci			   GEN8_PPAT_UC);
55962306a36Sopenharmony_ci	intel_uncore_write(uncore,
56062306a36Sopenharmony_ci			   GEN10_PAT_INDEX(4),
56162306a36Sopenharmony_ci			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
56262306a36Sopenharmony_ci	intel_uncore_write(uncore,
56362306a36Sopenharmony_ci			   GEN10_PAT_INDEX(5),
56462306a36Sopenharmony_ci			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
56562306a36Sopenharmony_ci	intel_uncore_write(uncore,
56662306a36Sopenharmony_ci			   GEN10_PAT_INDEX(6),
56762306a36Sopenharmony_ci			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
56862306a36Sopenharmony_ci	intel_uncore_write(uncore,
56962306a36Sopenharmony_ci			   GEN10_PAT_INDEX(7),
57062306a36Sopenharmony_ci			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
57162306a36Sopenharmony_ci}
57262306a36Sopenharmony_ci
57362306a36Sopenharmony_ci/*
57462306a36Sopenharmony_ci * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
57562306a36Sopenharmony_ci * bits. When using advanced contexts each context stores its own PAT, but
57662306a36Sopenharmony_ci * writing this data shouldn't be harmful even in those cases.
57762306a36Sopenharmony_ci */
57862306a36Sopenharmony_cistatic void bdw_setup_private_ppat(struct intel_uncore *uncore)
57962306a36Sopenharmony_ci{
58062306a36Sopenharmony_ci	struct drm_i915_private *i915 = uncore->i915;
58162306a36Sopenharmony_ci	u64 pat;
58262306a36Sopenharmony_ci
58362306a36Sopenharmony_ci	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |	/* for normal objects, no eLLC */
58462306a36Sopenharmony_ci	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |	/* for something pointing to ptes? */
58562306a36Sopenharmony_ci	      GEN8_PPAT(3, GEN8_PPAT_UC) |			/* Uncached objects, mostly for scanout */
58662306a36Sopenharmony_ci	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
58762306a36Sopenharmony_ci	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
58862306a36Sopenharmony_ci	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
58962306a36Sopenharmony_ci	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
59062306a36Sopenharmony_ci
59162306a36Sopenharmony_ci	/* for scanout with eLLC */
59262306a36Sopenharmony_ci	if (GRAPHICS_VER(i915) >= 9)
59362306a36Sopenharmony_ci		pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
59462306a36Sopenharmony_ci	else
59562306a36Sopenharmony_ci		pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
59662306a36Sopenharmony_ci
59762306a36Sopenharmony_ci	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
59862306a36Sopenharmony_ci	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
59962306a36Sopenharmony_ci}
60062306a36Sopenharmony_ci
60162306a36Sopenharmony_cistatic void chv_setup_private_ppat(struct intel_uncore *uncore)
60262306a36Sopenharmony_ci{
60362306a36Sopenharmony_ci	u64 pat;
60462306a36Sopenharmony_ci
60562306a36Sopenharmony_ci	/*
60662306a36Sopenharmony_ci	 * Map WB on BDW to snooped on CHV.
60762306a36Sopenharmony_ci	 *
60862306a36Sopenharmony_ci	 * Only the snoop bit has meaning for CHV, the rest is
60962306a36Sopenharmony_ci	 * ignored.
61062306a36Sopenharmony_ci	 *
61162306a36Sopenharmony_ci	 * The hardware will never snoop for certain types of accesses:
61262306a36Sopenharmony_ci	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
61362306a36Sopenharmony_ci	 * - PPGTT page tables
61462306a36Sopenharmony_ci	 * - some other special cycles
61562306a36Sopenharmony_ci	 *
61662306a36Sopenharmony_ci	 * As with BDW, we also need to consider the following for GT accesses:
61762306a36Sopenharmony_ci	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
61862306a36Sopenharmony_ci	 * so RTL will always use the value corresponding to
61962306a36Sopenharmony_ci	 * pat_sel = 000".
62062306a36Sopenharmony_ci	 * Which means we must set the snoop bit in PAT entry 0
62162306a36Sopenharmony_ci	 * in order to keep the global status page working.
62262306a36Sopenharmony_ci	 */
62362306a36Sopenharmony_ci
62462306a36Sopenharmony_ci	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
62562306a36Sopenharmony_ci	      GEN8_PPAT(1, 0) |
62662306a36Sopenharmony_ci	      GEN8_PPAT(2, 0) |
62762306a36Sopenharmony_ci	      GEN8_PPAT(3, 0) |
62862306a36Sopenharmony_ci	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
62962306a36Sopenharmony_ci	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
63062306a36Sopenharmony_ci	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
63162306a36Sopenharmony_ci	      GEN8_PPAT(7, CHV_PPAT_SNOOP);
63262306a36Sopenharmony_ci
63362306a36Sopenharmony_ci	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
63462306a36Sopenharmony_ci	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
63562306a36Sopenharmony_ci}
63662306a36Sopenharmony_ci
63762306a36Sopenharmony_civoid setup_private_pat(struct intel_gt *gt)
63862306a36Sopenharmony_ci{
63962306a36Sopenharmony_ci	struct intel_uncore *uncore = gt->uncore;
64062306a36Sopenharmony_ci	struct drm_i915_private *i915 = gt->i915;
64162306a36Sopenharmony_ci
64262306a36Sopenharmony_ci	GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
64362306a36Sopenharmony_ci
64462306a36Sopenharmony_ci	if (gt->type == GT_MEDIA) {
64562306a36Sopenharmony_ci		xelpmp_setup_private_ppat(gt->uncore);
64662306a36Sopenharmony_ci		return;
64762306a36Sopenharmony_ci	}
64862306a36Sopenharmony_ci
64962306a36Sopenharmony_ci	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
65062306a36Sopenharmony_ci		xelpg_setup_private_ppat(gt);
65162306a36Sopenharmony_ci	else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
65262306a36Sopenharmony_ci		xehp_setup_private_ppat(gt);
65362306a36Sopenharmony_ci	else if (GRAPHICS_VER(i915) >= 12)
65462306a36Sopenharmony_ci		tgl_setup_private_ppat(uncore);
65562306a36Sopenharmony_ci	else if (GRAPHICS_VER(i915) >= 11)
65662306a36Sopenharmony_ci		icl_setup_private_ppat(uncore);
65762306a36Sopenharmony_ci	else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
65862306a36Sopenharmony_ci		chv_setup_private_ppat(uncore);
65962306a36Sopenharmony_ci	else
66062306a36Sopenharmony_ci		bdw_setup_private_ppat(uncore);
66162306a36Sopenharmony_ci}
66262306a36Sopenharmony_ci
66362306a36Sopenharmony_cistruct i915_vma *
66462306a36Sopenharmony_ci__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
66562306a36Sopenharmony_ci{
66662306a36Sopenharmony_ci	struct drm_i915_gem_object *obj;
66762306a36Sopenharmony_ci	struct i915_vma *vma;
66862306a36Sopenharmony_ci
66962306a36Sopenharmony_ci	obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
67062306a36Sopenharmony_ci	if (IS_ERR(obj))
67162306a36Sopenharmony_ci		return ERR_CAST(obj);
67262306a36Sopenharmony_ci
67362306a36Sopenharmony_ci	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
67462306a36Sopenharmony_ci
67562306a36Sopenharmony_ci	vma = i915_vma_instance(obj, vm, NULL);
67662306a36Sopenharmony_ci	if (IS_ERR(vma)) {
67762306a36Sopenharmony_ci		i915_gem_object_put(obj);
67862306a36Sopenharmony_ci		return vma;
67962306a36Sopenharmony_ci	}
68062306a36Sopenharmony_ci
68162306a36Sopenharmony_ci	return vma;
68262306a36Sopenharmony_ci}
68362306a36Sopenharmony_ci
68462306a36Sopenharmony_cistruct i915_vma *
68562306a36Sopenharmony_ci__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
68662306a36Sopenharmony_ci{
68762306a36Sopenharmony_ci	struct i915_vma *vma;
68862306a36Sopenharmony_ci	int err;
68962306a36Sopenharmony_ci
69062306a36Sopenharmony_ci	vma = __vm_create_scratch_for_read(vm, size);
69162306a36Sopenharmony_ci	if (IS_ERR(vma))
69262306a36Sopenharmony_ci		return vma;
69362306a36Sopenharmony_ci
69462306a36Sopenharmony_ci	err = i915_vma_pin(vma, 0, 0,
69562306a36Sopenharmony_ci			   i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
69662306a36Sopenharmony_ci	if (err) {
69762306a36Sopenharmony_ci		i915_vma_put(vma);
69862306a36Sopenharmony_ci		return ERR_PTR(err);
69962306a36Sopenharmony_ci	}
70062306a36Sopenharmony_ci
70162306a36Sopenharmony_ci	return vma;
70262306a36Sopenharmony_ci}
70362306a36Sopenharmony_ci
70462306a36Sopenharmony_ci#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
70562306a36Sopenharmony_ci#include "selftests/mock_gtt.c"
70662306a36Sopenharmony_ci#endif
707