1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2020 Intel Corporation
4 */
5
6#include <linux/log2.h>
7
8#include "gen8_ppgtt.h"
9#include "i915_scatterlist.h"
10#include "i915_trace.h"
11#include "i915_pvinfo.h"
12#include "i915_vgpu.h"
13#include "intel_gt.h"
14#include "intel_gtt.h"
15
16static u64 gen8_pde_encode(const dma_addr_t addr,
17			   const enum i915_cache_level level)
18{
19	u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
20
21	if (level != I915_CACHE_NONE)
22		pde |= PPAT_CACHED_PDE;
23	else
24		pde |= PPAT_UNCACHED;
25
26	return pde;
27}
28
29static u64 gen8_pte_encode(dma_addr_t addr,
30			   enum i915_cache_level level,
31			   u32 flags)
32{
33	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
34
35	if (unlikely(flags & PTE_READ_ONLY))
36		pte &= ~_PAGE_RW;
37
38	switch (level) {
39	case I915_CACHE_NONE:
40		pte |= PPAT_UNCACHED;
41		break;
42	case I915_CACHE_WT:
43		pte |= PPAT_DISPLAY_ELLC;
44		break;
45	default:
46		pte |= PPAT_CACHED;
47		break;
48	}
49
50	return pte;
51}
52
53static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
54{
55	struct drm_i915_private *i915 = ppgtt->vm.i915;
56	struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
57	enum vgt_g2v_type msg;
58	int i;
59
60	if (create)
61		atomic_inc(px_used(ppgtt->pd)); /* never remove */
62	else
63		atomic_dec(px_used(ppgtt->pd));
64
65	mutex_lock(&i915->vgpu.lock);
66
67	if (i915_vm_is_4lvl(&ppgtt->vm)) {
68		const u64 daddr = px_dma(ppgtt->pd);
69
70		intel_uncore_write(uncore,
71				   vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
72		intel_uncore_write(uncore,
73				   vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
74
75		msg = create ?
76			VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
77			VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
78	} else {
79		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
80			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
81
82			intel_uncore_write(uncore,
83					   vgtif_reg(pdp[i].lo),
84					   lower_32_bits(daddr));
85			intel_uncore_write(uncore,
86					   vgtif_reg(pdp[i].hi),
87					   upper_32_bits(daddr));
88		}
89
90		msg = create ?
91			VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
92			VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
93	}
94
95	/* g2v_notify atomically (via hv trap) consumes the message packet. */
96	intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
97
98	mutex_unlock(&i915->vgpu.lock);
99}
100
101/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
102#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
103#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
104#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
105#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
106#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
107#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
108#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
109
110#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
111
112static inline unsigned int
113gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
114{
115	const int shift = gen8_pd_shift(lvl);
116	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
117
118	GEM_BUG_ON(start >= end);
119	end += ~mask >> gen8_pd_shift(1);
120
121	*idx = i915_pde_index(start, shift);
122	if ((start ^ end) & mask)
123		return GEN8_PDES - *idx;
124	else
125		return i915_pde_index(end, shift) - *idx;
126}
127
128static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
129{
130	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
131
132	GEM_BUG_ON(start >= end);
133	return (start ^ end) & mask && (start & ~mask) == 0;
134}
135
136static inline unsigned int gen8_pt_count(u64 start, u64 end)
137{
138	GEM_BUG_ON(start >= end);
139	if ((start ^ end) >> gen8_pd_shift(1))
140		return GEN8_PDES - (start & (GEN8_PDES - 1));
141	else
142		return end - start;
143}
144
145static inline unsigned int
146gen8_pd_top_count(const struct i915_address_space *vm)
147{
148	unsigned int shift = __gen8_pte_shift(vm->top);
149	return (vm->total + (1ull << shift) - 1) >> shift;
150}
151
152static inline struct i915_page_directory *
153gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
154{
155	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
156
157	if (vm->top == 2)
158		return ppgtt->pd;
159	else
160		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
161}
162
163static inline struct i915_page_directory *
164gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
165{
166	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
167}
168
169static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
170				 struct i915_page_directory *pd,
171				 int count, int lvl)
172{
173	if (lvl) {
174		void **pde = pd->entry;
175
176		do {
177			if (!*pde)
178				continue;
179
180			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
181		} while (pde++, --count);
182	}
183
184	free_px(vm, &pd->pt, lvl);
185}
186
187static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
188{
189	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
190
191	if (intel_vgpu_active(vm->i915))
192		gen8_ppgtt_notify_vgt(ppgtt, false);
193
194	__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
195	free_scratch(vm);
196}
197
198static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
199			      struct i915_page_directory * const pd,
200			      u64 start, const u64 end, int lvl)
201{
202	const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
203	unsigned int idx, len;
204
205	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
206
207	len = gen8_pd_range(start, end, lvl--, &idx);
208	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
209	    __func__, vm, lvl + 1, start, end,
210	    idx, len, atomic_read(px_used(pd)));
211	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
212
213	do {
214		struct i915_page_table *pt = pd->entry[idx];
215
216		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
217		    gen8_pd_contains(start, end, lvl)) {
218			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
219			    __func__, vm, lvl + 1, idx, start, end);
220			clear_pd_entry(pd, idx, scratch);
221			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
222			start += (u64)I915_PDES << gen8_pd_shift(lvl);
223			continue;
224		}
225
226		if (lvl) {
227			start = __gen8_ppgtt_clear(vm, as_pd(pt),
228						   start, end, lvl);
229		} else {
230			unsigned int count;
231			u64 *vaddr;
232
233			count = gen8_pt_count(start, end);
234			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
235			    __func__, vm, lvl, start, end,
236			    gen8_pd_index(start, 0), count,
237			    atomic_read(&pt->used));
238			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
239
240			vaddr = kmap_atomic_px(pt);
241			memset64(vaddr + gen8_pd_index(start, 0),
242				 vm->scratch[0]->encode,
243				 count);
244			kunmap_atomic(vaddr);
245
246			atomic_sub(count, &pt->used);
247			start += count;
248		}
249
250		if (release_pd_entry(pd, idx, pt, scratch))
251			free_px(vm, pt, lvl);
252	} while (idx++, --len);
253
254	return start;
255}
256
257static void gen8_ppgtt_clear(struct i915_address_space *vm,
258			     u64 start, u64 length)
259{
260	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
261	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
262	GEM_BUG_ON(range_overflows(start, length, vm->total));
263
264	start >>= GEN8_PTE_SHIFT;
265	length >>= GEN8_PTE_SHIFT;
266	GEM_BUG_ON(length == 0);
267
268	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
269			   start, start + length, vm->top);
270}
271
272static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
273			       struct i915_vm_pt_stash *stash,
274			       struct i915_page_directory * const pd,
275			       u64 * const start, const u64 end, int lvl)
276{
277	unsigned int idx, len;
278
279	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
280
281	len = gen8_pd_range(*start, end, lvl--, &idx);
282	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
283	    __func__, vm, lvl + 1, *start, end,
284	    idx, len, atomic_read(px_used(pd)));
285	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
286
287	spin_lock(&pd->lock);
288	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
289	do {
290		struct i915_page_table *pt = pd->entry[idx];
291
292		if (!pt) {
293			spin_unlock(&pd->lock);
294
295			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
296			    __func__, vm, lvl + 1, idx);
297
298			pt = stash->pt[!!lvl];
299			__i915_gem_object_pin_pages(pt->base);
300			i915_gem_object_make_unshrinkable(pt->base);
301
302			fill_px(pt, vm->scratch[lvl]->encode);
303
304			spin_lock(&pd->lock);
305			if (likely(!pd->entry[idx])) {
306				stash->pt[!!lvl] = pt->stash;
307				atomic_set(&pt->used, 0);
308				set_pd_entry(pd, idx, pt);
309			} else {
310				pt = pd->entry[idx];
311			}
312		}
313
314		if (lvl) {
315			atomic_inc(&pt->used);
316			spin_unlock(&pd->lock);
317
318			__gen8_ppgtt_alloc(vm, stash,
319					   as_pd(pt), start, end, lvl);
320
321			spin_lock(&pd->lock);
322			atomic_dec(&pt->used);
323			GEM_BUG_ON(!atomic_read(&pt->used));
324		} else {
325			unsigned int count = gen8_pt_count(*start, end);
326
327			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
328			    __func__, vm, lvl, *start, end,
329			    gen8_pd_index(*start, 0), count,
330			    atomic_read(&pt->used));
331
332			atomic_add(count, &pt->used);
333			/* All other pdes may be simultaneously removed */
334			GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
335			*start += count;
336		}
337	} while (idx++, --len);
338	spin_unlock(&pd->lock);
339}
340
341static void gen8_ppgtt_alloc(struct i915_address_space *vm,
342			     struct i915_vm_pt_stash *stash,
343			     u64 start, u64 length)
344{
345	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
346	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
347	GEM_BUG_ON(range_overflows(start, length, vm->total));
348
349	start >>= GEN8_PTE_SHIFT;
350	length >>= GEN8_PTE_SHIFT;
351	GEM_BUG_ON(length == 0);
352
353	__gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
354			   &start, start + length, vm->top);
355}
356
357static __always_inline u64
358gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
359		      struct i915_page_directory *pdp,
360		      struct sgt_dma *iter,
361		      u64 idx,
362		      enum i915_cache_level cache_level,
363		      u32 flags)
364{
365	struct i915_page_directory *pd;
366	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
367	gen8_pte_t *vaddr;
368
369	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
370	vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
371	do {
372		GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
373		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
374
375		iter->dma += I915_GTT_PAGE_SIZE;
376		if (iter->dma >= iter->max) {
377			iter->sg = __sg_next(iter->sg);
378			if (!iter->sg) {
379				idx = 0;
380				break;
381			}
382
383			iter->dma = sg_dma_address(iter->sg);
384			iter->max = iter->dma + iter->sg->length;
385		}
386
387		if (gen8_pd_index(++idx, 0) == 0) {
388			if (gen8_pd_index(idx, 1) == 0) {
389				/* Limited by sg length for 3lvl */
390				if (gen8_pd_index(idx, 2) == 0)
391					break;
392
393				pd = pdp->entry[gen8_pd_index(idx, 2)];
394			}
395
396			clflush_cache_range(vaddr, PAGE_SIZE);
397			kunmap_atomic(vaddr);
398			vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
399		}
400	} while (1);
401	clflush_cache_range(vaddr, PAGE_SIZE);
402	kunmap_atomic(vaddr);
403
404	return idx;
405}
406
407static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
408				   struct sgt_dma *iter,
409				   enum i915_cache_level cache_level,
410				   u32 flags)
411{
412	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
413	u64 start = vma->node.start;
414	dma_addr_t rem = iter->sg->length;
415
416	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
417
418	do {
419		struct i915_page_directory * const pdp =
420			gen8_pdp_for_page_address(vma->vm, start);
421		struct i915_page_directory * const pd =
422			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
423		gen8_pte_t encode = pte_encode;
424		unsigned int maybe_64K = -1;
425		unsigned int page_size;
426		gen8_pte_t *vaddr;
427		u16 index;
428
429		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
430		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
431		    rem >= I915_GTT_PAGE_SIZE_2M &&
432		    !__gen8_pte_index(start, 0)) {
433			index = __gen8_pte_index(start, 1);
434			encode |= GEN8_PDE_PS_2M;
435			page_size = I915_GTT_PAGE_SIZE_2M;
436
437			vaddr = kmap_atomic_px(pd);
438		} else {
439			struct i915_page_table *pt =
440				i915_pt_entry(pd, __gen8_pte_index(start, 1));
441
442			index = __gen8_pte_index(start, 0);
443			page_size = I915_GTT_PAGE_SIZE;
444
445			if (!index &&
446			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
447			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
448			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
449			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
450				maybe_64K = __gen8_pte_index(start, 1);
451
452			vaddr = kmap_atomic_px(pt);
453		}
454
455		do {
456			GEM_BUG_ON(iter->sg->length < page_size);
457			vaddr[index++] = encode | iter->dma;
458
459			start += page_size;
460			iter->dma += page_size;
461			rem -= page_size;
462			if (iter->dma >= iter->max) {
463				iter->sg = __sg_next(iter->sg);
464				if (!iter->sg)
465					break;
466
467				rem = iter->sg->length;
468				iter->dma = sg_dma_address(iter->sg);
469				iter->max = iter->dma + rem;
470
471				if (maybe_64K != -1 && index < I915_PDES &&
472				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
473				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
474				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
475					maybe_64K = -1;
476
477				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
478					break;
479			}
480		} while (rem >= page_size && index < I915_PDES);
481
482		clflush_cache_range(vaddr, PAGE_SIZE);
483		kunmap_atomic(vaddr);
484
485		/*
486		 * Is it safe to mark the 2M block as 64K? -- Either we have
487		 * filled whole page-table with 64K entries, or filled part of
488		 * it and have reached the end of the sg table and we have
489		 * enough padding.
490		 */
491		if (maybe_64K != -1 &&
492		    (index == I915_PDES ||
493		     (i915_vm_has_scratch_64K(vma->vm) &&
494		      !iter->sg && IS_ALIGNED(vma->node.start +
495					      vma->node.size,
496					      I915_GTT_PAGE_SIZE_2M)))) {
497			vaddr = kmap_atomic_px(pd);
498			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
499			kunmap_atomic(vaddr);
500			page_size = I915_GTT_PAGE_SIZE_64K;
501
502			/*
503			 * We write all 4K page entries, even when using 64K
504			 * pages. In order to verify that the HW isn't cheating
505			 * by using the 4K PTE instead of the 64K PTE, we want
506			 * to remove all the surplus entries. If the HW skipped
507			 * the 64K PTE, it will read/write into the scratch page
508			 * instead - which we detect as missing results during
509			 * selftests.
510			 */
511			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
512				u16 i;
513
514				encode = vma->vm->scratch[0]->encode;
515				vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
516
517				for (i = 1; i < index; i += 16)
518					memset64(vaddr + i, encode, 15);
519
520				kunmap_atomic(vaddr);
521			}
522		}
523
524		vma->page_sizes.gtt |= page_size;
525	} while (iter->sg);
526}
527
528static void gen8_ppgtt_insert(struct i915_address_space *vm,
529			      struct i915_vma *vma,
530			      enum i915_cache_level cache_level,
531			      u32 flags)
532{
533	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
534	struct sgt_dma iter = sgt_dma(vma);
535
536	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
537		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
538	} else  {
539		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
540
541		do {
542			struct i915_page_directory * const pdp =
543				gen8_pdp_for_page_index(vm, idx);
544
545			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
546						    cache_level, flags);
547		} while (idx);
548
549		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
550	}
551}
552
553static int gen8_init_scratch(struct i915_address_space *vm)
554{
555	int ret;
556	int i;
557
558	/*
559	 * If everybody agrees to not to write into the scratch page,
560	 * we can reuse it for all vm, keeping contexts and processes separate.
561	 */
562	if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
563		struct i915_address_space *clone = vm->gt->vm;
564
565		GEM_BUG_ON(!clone->has_read_only);
566
567		vm->scratch_order = clone->scratch_order;
568		for (i = 0; i <= vm->top; i++)
569			vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
570
571		return 0;
572	}
573
574	ret = setup_scratch_page(vm);
575	if (ret)
576		return ret;
577
578	vm->scratch[0]->encode =
579		gen8_pte_encode(px_dma(vm->scratch[0]),
580				I915_CACHE_LLC, vm->has_read_only);
581
582	for (i = 1; i <= vm->top; i++) {
583		struct drm_i915_gem_object *obj;
584
585		obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
586		if (IS_ERR(obj))
587			goto free_scratch;
588
589		ret = pin_pt_dma(vm, obj);
590		if (ret) {
591			i915_gem_object_put(obj);
592			goto free_scratch;
593		}
594
595		fill_px(obj, vm->scratch[i - 1]->encode);
596		obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC);
597
598		vm->scratch[i] = obj;
599	}
600
601	return 0;
602
603free_scratch:
604	while (i--)
605		i915_gem_object_put(vm->scratch[i]);
606	return -ENOMEM;
607}
608
609static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
610{
611	struct i915_address_space *vm = &ppgtt->vm;
612	struct i915_page_directory *pd = ppgtt->pd;
613	unsigned int idx;
614
615	GEM_BUG_ON(vm->top != 2);
616	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
617
618	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
619		struct i915_page_directory *pde;
620		int err;
621
622		pde = alloc_pd(vm);
623		if (IS_ERR(pde))
624			return PTR_ERR(pde);
625
626		err = pin_pt_dma(vm, pde->pt.base);
627		if (err) {
628			free_pd(vm, pde);
629			return err;
630		}
631
632		fill_px(pde, vm->scratch[1]->encode);
633		set_pd_entry(pd, idx, pde);
634		atomic_inc(px_used(pde)); /* keep pinned */
635	}
636	wmb();
637
638	return 0;
639}
640
641static struct i915_page_directory *
642gen8_alloc_top_pd(struct i915_address_space *vm)
643{
644	const unsigned int count = gen8_pd_top_count(vm);
645	struct i915_page_directory *pd;
646	int err;
647
648	GEM_BUG_ON(count > I915_PDES);
649
650	pd = __alloc_pd(count);
651	if (unlikely(!pd))
652		return ERR_PTR(-ENOMEM);
653
654	pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
655	if (IS_ERR(pd->pt.base)) {
656		err = PTR_ERR(pd->pt.base);
657		pd->pt.base = NULL;
658		goto err_pd;
659	}
660
661	err = pin_pt_dma(vm, pd->pt.base);
662	if (err)
663		goto err_pd;
664
665	fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
666	atomic_inc(px_used(pd)); /* mark as pinned */
667	return pd;
668
669err_pd:
670	free_pd(vm, pd);
671	return ERR_PTR(err);
672}
673
674/*
675 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
676 * with a net effect resembling a 2-level page table in normal x86 terms. Each
677 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
678 * space.
679 *
680 */
681struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
682{
683	struct i915_ppgtt *ppgtt;
684	int err;
685
686	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
687	if (!ppgtt)
688		return ERR_PTR(-ENOMEM);
689
690	ppgtt_init(ppgtt, gt);
691	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
692	ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
693
694	/*
695	 * From bdw, there is hw support for read-only pages in the PPGTT.
696	 *
697	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
698	 * for now.
699	 *
700	 * Gen12 has inherited the same read-only fault issue from gen11.
701	 */
702	ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
703
704	ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
705
706	err = gen8_init_scratch(&ppgtt->vm);
707	if (err)
708		goto err_free;
709
710	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
711	if (IS_ERR(ppgtt->pd)) {
712		err = PTR_ERR(ppgtt->pd);
713		goto err_free_scratch;
714	}
715
716	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
717		err = gen8_preallocate_top_level_pdp(ppgtt);
718		if (err)
719			goto err_free_pd;
720	}
721
722	ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
723	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
724	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
725	ppgtt->vm.clear_range = gen8_ppgtt_clear;
726
727	ppgtt->vm.pte_encode = gen8_pte_encode;
728
729	if (intel_vgpu_active(gt->i915))
730		gen8_ppgtt_notify_vgt(ppgtt, true);
731
732	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
733
734	return ppgtt;
735
736err_free_pd:
737	__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
738			     gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
739err_free_scratch:
740	free_scratch(&ppgtt->vm);
741err_free:
742	kfree(ppgtt);
743	return ERR_PTR(err);
744}
745