1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PGTABLE_H
3#define _ASM_X86_PGTABLE_H
4
5#include <linux/mem_encrypt.h>
6#include <asm/page.h>
7#include <asm/pgtable_types.h>
8
9/*
10 * Macro to mark a page protection value as UC-
11 */
12#define pgprot_noncached(prot)						\
13	((boot_cpu_data.x86 > 3)					\
14	 ? (__pgprot(pgprot_val(prot) |					\
15		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
16	 : (prot))
17
18/*
19 * Macros to add or remove encryption attribute
20 */
21#define pgprot_encrypted(prot)	__pgprot(__sme_set(pgprot_val(prot)))
22#define pgprot_decrypted(prot)	__pgprot(__sme_clr(pgprot_val(prot)))
23
24#ifndef __ASSEMBLY__
25#include <asm/x86_init.h>
26#include <asm/fpu/xstate.h>
27#include <asm/fpu/api.h>
28#include <asm-generic/pgtable_uffd.h>
29
30extern pgd_t early_top_pgt[PTRS_PER_PGD];
31bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
32
33void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
34void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
35				   bool user);
36void ptdump_walk_pgd_level_checkwx(void);
37void ptdump_walk_user_pgd_level_checkwx(void);
38
39#ifdef CONFIG_DEBUG_WX
40#define debug_checkwx()		ptdump_walk_pgd_level_checkwx()
41#define debug_checkwx_user()	ptdump_walk_user_pgd_level_checkwx()
42#else
43#define debug_checkwx()		do { } while (0)
44#define debug_checkwx_user()	do { } while (0)
45#endif
46
47/*
48 * ZERO_PAGE is a global shared page that is always zero: used
49 * for zero-mapped memory areas etc..
50 */
51extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
52	__visible;
53#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
54
55extern spinlock_t pgd_lock;
56extern struct list_head pgd_list;
57
58extern struct mm_struct *pgd_page_get_mm(struct page *page);
59
60extern pmdval_t early_pmd_flags;
61
62#ifdef CONFIG_PARAVIRT_XXL
63#include <asm/paravirt.h>
64#else  /* !CONFIG_PARAVIRT_XXL */
65#define set_pte(ptep, pte)		native_set_pte(ptep, pte)
66
67#define set_pte_atomic(ptep, pte)					\
68	native_set_pte_atomic(ptep, pte)
69
70#define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
71
72#ifndef __PAGETABLE_P4D_FOLDED
73#define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
74#define pgd_clear(pgd)			(pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
75#endif
76
77#ifndef set_p4d
78# define set_p4d(p4dp, p4d)		native_set_p4d(p4dp, p4d)
79#endif
80
81#ifndef __PAGETABLE_PUD_FOLDED
82#define p4d_clear(p4d)			native_p4d_clear(p4d)
83#endif
84
85#ifndef set_pud
86# define set_pud(pudp, pud)		native_set_pud(pudp, pud)
87#endif
88
89#ifndef __PAGETABLE_PUD_FOLDED
90#define pud_clear(pud)			native_pud_clear(pud)
91#endif
92
93#define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
94#define pmd_clear(pmd)			native_pmd_clear(pmd)
95
96#define pgd_val(x)	native_pgd_val(x)
97#define __pgd(x)	native_make_pgd(x)
98
99#ifndef __PAGETABLE_P4D_FOLDED
100#define p4d_val(x)	native_p4d_val(x)
101#define __p4d(x)	native_make_p4d(x)
102#endif
103
104#ifndef __PAGETABLE_PUD_FOLDED
105#define pud_val(x)	native_pud_val(x)
106#define __pud(x)	native_make_pud(x)
107#endif
108
109#ifndef __PAGETABLE_PMD_FOLDED
110#define pmd_val(x)	native_pmd_val(x)
111#define __pmd(x)	native_make_pmd(x)
112#endif
113
114#define pte_val(x)	native_pte_val(x)
115#define __pte(x)	native_make_pte(x)
116
117#define arch_end_context_switch(prev)	do {} while(0)
118#endif	/* CONFIG_PARAVIRT_XXL */
119
120/*
121 * The following only work if pte_present() is true.
122 * Undefined behaviour if not..
123 */
124static inline int pte_dirty(pte_t pte)
125{
126	return pte_flags(pte) & _PAGE_DIRTY;
127}
128
129
130static inline u32 read_pkru(void)
131{
132	if (boot_cpu_has(X86_FEATURE_OSPKE))
133		return rdpkru();
134	return 0;
135}
136
137static inline void write_pkru(u32 pkru)
138{
139	struct pkru_state *pk;
140
141	if (!boot_cpu_has(X86_FEATURE_OSPKE))
142		return;
143
144	pk = get_xsave_addr(&current->thread.fpu.state.xsave, XFEATURE_PKRU);
145
146	/*
147	 * The PKRU value in xstate needs to be in sync with the value that is
148	 * written to the CPU. The FPU restore on return to userland would
149	 * otherwise load the previous value again.
150	 */
151	fpregs_lock();
152	if (pk)
153		pk->pkru = pkru;
154	__write_pkru(pkru);
155	fpregs_unlock();
156}
157
158static inline int pte_young(pte_t pte)
159{
160	return pte_flags(pte) & _PAGE_ACCESSED;
161}
162
163static inline int pmd_dirty(pmd_t pmd)
164{
165	return pmd_flags(pmd) & _PAGE_DIRTY;
166}
167
168static inline int pmd_young(pmd_t pmd)
169{
170	return pmd_flags(pmd) & _PAGE_ACCESSED;
171}
172
173static inline int pud_dirty(pud_t pud)
174{
175	return pud_flags(pud) & _PAGE_DIRTY;
176}
177
178static inline int pud_young(pud_t pud)
179{
180	return pud_flags(pud) & _PAGE_ACCESSED;
181}
182
183static inline int pte_write(pte_t pte)
184{
185	return pte_flags(pte) & _PAGE_RW;
186}
187
188static inline int pte_huge(pte_t pte)
189{
190	return pte_flags(pte) & _PAGE_PSE;
191}
192
193static inline int pte_global(pte_t pte)
194{
195	return pte_flags(pte) & _PAGE_GLOBAL;
196}
197
198static inline int pte_exec(pte_t pte)
199{
200	return !(pte_flags(pte) & _PAGE_NX);
201}
202
203static inline int pte_special(pte_t pte)
204{
205	return pte_flags(pte) & _PAGE_SPECIAL;
206}
207
208/* Entries that were set to PROT_NONE are inverted */
209
210static inline u64 protnone_mask(u64 val);
211
212static inline unsigned long pte_pfn(pte_t pte)
213{
214	phys_addr_t pfn = pte_val(pte);
215	pfn ^= protnone_mask(pfn);
216	return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
217}
218
219static inline unsigned long pmd_pfn(pmd_t pmd)
220{
221	phys_addr_t pfn = pmd_val(pmd);
222	pfn ^= protnone_mask(pfn);
223	return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
224}
225
226static inline unsigned long pud_pfn(pud_t pud)
227{
228	phys_addr_t pfn = pud_val(pud);
229	pfn ^= protnone_mask(pfn);
230	return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
231}
232
233static inline unsigned long p4d_pfn(p4d_t p4d)
234{
235	return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
236}
237
238static inline unsigned long pgd_pfn(pgd_t pgd)
239{
240	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
241}
242
243#define p4d_leaf	p4d_large
244static inline int p4d_large(p4d_t p4d)
245{
246	/* No 512 GiB pages yet */
247	return 0;
248}
249
250#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
251
252#define pmd_leaf	pmd_large
253static inline int pmd_large(pmd_t pte)
254{
255	return pmd_flags(pte) & _PAGE_PSE;
256}
257
258#ifdef CONFIG_TRANSPARENT_HUGEPAGE
259/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
260static inline int pmd_trans_huge(pmd_t pmd)
261{
262	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
263}
264
265#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
266static inline int pud_trans_huge(pud_t pud)
267{
268	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
269}
270#endif
271
272#define has_transparent_hugepage has_transparent_hugepage
273static inline int has_transparent_hugepage(void)
274{
275	return boot_cpu_has(X86_FEATURE_PSE);
276}
277
278#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
279static inline int pmd_devmap(pmd_t pmd)
280{
281	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
282}
283
284#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
285static inline int pud_devmap(pud_t pud)
286{
287	return !!(pud_val(pud) & _PAGE_DEVMAP);
288}
289#else
290static inline int pud_devmap(pud_t pud)
291{
292	return 0;
293}
294#endif
295
296static inline int pgd_devmap(pgd_t pgd)
297{
298	return 0;
299}
300#endif
301#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
302
303static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
304{
305	pteval_t v = native_pte_val(pte);
306
307	return native_make_pte(v | set);
308}
309
310static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
311{
312	pteval_t v = native_pte_val(pte);
313
314	return native_make_pte(v & ~clear);
315}
316
317#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
318static inline int pte_uffd_wp(pte_t pte)
319{
320	return pte_flags(pte) & _PAGE_UFFD_WP;
321}
322
323static inline pte_t pte_mkuffd_wp(pte_t pte)
324{
325	return pte_set_flags(pte, _PAGE_UFFD_WP);
326}
327
328static inline pte_t pte_clear_uffd_wp(pte_t pte)
329{
330	return pte_clear_flags(pte, _PAGE_UFFD_WP);
331}
332#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
333
334static inline pte_t pte_mkclean(pte_t pte)
335{
336	return pte_clear_flags(pte, _PAGE_DIRTY);
337}
338
339static inline pte_t pte_mkold(pte_t pte)
340{
341	return pte_clear_flags(pte, _PAGE_ACCESSED);
342}
343
344static inline pte_t pte_wrprotect(pte_t pte)
345{
346	return pte_clear_flags(pte, _PAGE_RW);
347}
348
349static inline pte_t pte_mkexec(pte_t pte)
350{
351	return pte_clear_flags(pte, _PAGE_NX);
352}
353
354static inline pte_t pte_mkdirty(pte_t pte)
355{
356	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
357}
358
359static inline pte_t pte_mkyoung(pte_t pte)
360{
361	return pte_set_flags(pte, _PAGE_ACCESSED);
362}
363
364static inline pte_t pte_mkwrite(pte_t pte)
365{
366	return pte_set_flags(pte, _PAGE_RW);
367}
368
369static inline pte_t pte_mkhuge(pte_t pte)
370{
371	return pte_set_flags(pte, _PAGE_PSE);
372}
373
374static inline pte_t pte_clrhuge(pte_t pte)
375{
376	return pte_clear_flags(pte, _PAGE_PSE);
377}
378
379static inline pte_t pte_mkglobal(pte_t pte)
380{
381	return pte_set_flags(pte, _PAGE_GLOBAL);
382}
383
384static inline pte_t pte_clrglobal(pte_t pte)
385{
386	return pte_clear_flags(pte, _PAGE_GLOBAL);
387}
388
389static inline pte_t pte_mkspecial(pte_t pte)
390{
391	return pte_set_flags(pte, _PAGE_SPECIAL);
392}
393
394static inline pte_t pte_mkdevmap(pte_t pte)
395{
396	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
397}
398
399static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
400{
401	pmdval_t v = native_pmd_val(pmd);
402
403	return native_make_pmd(v | set);
404}
405
406static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
407{
408	pmdval_t v = native_pmd_val(pmd);
409
410	return native_make_pmd(v & ~clear);
411}
412
413#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
414static inline int pmd_uffd_wp(pmd_t pmd)
415{
416	return pmd_flags(pmd) & _PAGE_UFFD_WP;
417}
418
419static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
420{
421	return pmd_set_flags(pmd, _PAGE_UFFD_WP);
422}
423
424static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
425{
426	return pmd_clear_flags(pmd, _PAGE_UFFD_WP);
427}
428#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
429
430static inline pmd_t pmd_mkold(pmd_t pmd)
431{
432	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
433}
434
435static inline pmd_t pmd_mkclean(pmd_t pmd)
436{
437	return pmd_clear_flags(pmd, _PAGE_DIRTY);
438}
439
440static inline pmd_t pmd_wrprotect(pmd_t pmd)
441{
442	return pmd_clear_flags(pmd, _PAGE_RW);
443}
444
445static inline pmd_t pmd_mkdirty(pmd_t pmd)
446{
447	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
448}
449
450static inline pmd_t pmd_mkdevmap(pmd_t pmd)
451{
452	return pmd_set_flags(pmd, _PAGE_DEVMAP);
453}
454
455static inline pmd_t pmd_mkhuge(pmd_t pmd)
456{
457	return pmd_set_flags(pmd, _PAGE_PSE);
458}
459
460static inline pmd_t pmd_mkyoung(pmd_t pmd)
461{
462	return pmd_set_flags(pmd, _PAGE_ACCESSED);
463}
464
465static inline pmd_t pmd_mkwrite(pmd_t pmd)
466{
467	return pmd_set_flags(pmd, _PAGE_RW);
468}
469
470static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
471{
472	pudval_t v = native_pud_val(pud);
473
474	return native_make_pud(v | set);
475}
476
477static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
478{
479	pudval_t v = native_pud_val(pud);
480
481	return native_make_pud(v & ~clear);
482}
483
484static inline pud_t pud_mkold(pud_t pud)
485{
486	return pud_clear_flags(pud, _PAGE_ACCESSED);
487}
488
489static inline pud_t pud_mkclean(pud_t pud)
490{
491	return pud_clear_flags(pud, _PAGE_DIRTY);
492}
493
494static inline pud_t pud_wrprotect(pud_t pud)
495{
496	return pud_clear_flags(pud, _PAGE_RW);
497}
498
499static inline pud_t pud_mkdirty(pud_t pud)
500{
501	return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
502}
503
504static inline pud_t pud_mkdevmap(pud_t pud)
505{
506	return pud_set_flags(pud, _PAGE_DEVMAP);
507}
508
509static inline pud_t pud_mkhuge(pud_t pud)
510{
511	return pud_set_flags(pud, _PAGE_PSE);
512}
513
514static inline pud_t pud_mkyoung(pud_t pud)
515{
516	return pud_set_flags(pud, _PAGE_ACCESSED);
517}
518
519static inline pud_t pud_mkwrite(pud_t pud)
520{
521	return pud_set_flags(pud, _PAGE_RW);
522}
523
524#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
525static inline int pte_soft_dirty(pte_t pte)
526{
527	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
528}
529
530static inline int pmd_soft_dirty(pmd_t pmd)
531{
532	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
533}
534
535static inline int pud_soft_dirty(pud_t pud)
536{
537	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
538}
539
540static inline pte_t pte_mksoft_dirty(pte_t pte)
541{
542	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
543}
544
545static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
546{
547	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
548}
549
550static inline pud_t pud_mksoft_dirty(pud_t pud)
551{
552	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
553}
554
555static inline pte_t pte_clear_soft_dirty(pte_t pte)
556{
557	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
558}
559
560static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
561{
562	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
563}
564
565static inline pud_t pud_clear_soft_dirty(pud_t pud)
566{
567	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
568}
569
570#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
571
572/*
573 * Mask out unsupported bits in a present pgprot.  Non-present pgprots
574 * can use those bits for other purposes, so leave them be.
575 */
576static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
577{
578	pgprotval_t protval = pgprot_val(pgprot);
579
580	if (protval & _PAGE_PRESENT)
581		protval &= __supported_pte_mask;
582
583	return protval;
584}
585
586static inline pgprotval_t check_pgprot(pgprot_t pgprot)
587{
588	pgprotval_t massaged_val = massage_pgprot(pgprot);
589
590	/* mmdebug.h can not be included here because of dependencies */
591#ifdef CONFIG_DEBUG_VM
592	WARN_ONCE(pgprot_val(pgprot) != massaged_val,
593		  "attempted to set unsupported pgprot: %016llx "
594		  "bits: %016llx supported: %016llx\n",
595		  (u64)pgprot_val(pgprot),
596		  (u64)pgprot_val(pgprot) ^ massaged_val,
597		  (u64)__supported_pte_mask);
598#endif
599
600	return massaged_val;
601}
602
603static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
604{
605	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
606	pfn ^= protnone_mask(pgprot_val(pgprot));
607	pfn &= PTE_PFN_MASK;
608	return __pte(pfn | check_pgprot(pgprot));
609}
610
611static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
612{
613	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
614	pfn ^= protnone_mask(pgprot_val(pgprot));
615	pfn &= PHYSICAL_PMD_PAGE_MASK;
616	return __pmd(pfn | check_pgprot(pgprot));
617}
618
619static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
620{
621	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
622	pfn ^= protnone_mask(pgprot_val(pgprot));
623	pfn &= PHYSICAL_PUD_PAGE_MASK;
624	return __pud(pfn | check_pgprot(pgprot));
625}
626
627static inline pmd_t pmd_mkinvalid(pmd_t pmd)
628{
629	return pfn_pmd(pmd_pfn(pmd),
630		      __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
631}
632
633static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
634
635static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
636{
637	pteval_t val = pte_val(pte), oldval = val;
638
639	/*
640	 * Chop off the NX bit (if present), and add the NX portion of
641	 * the newprot (if present):
642	 */
643	val &= _PAGE_CHG_MASK;
644	val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
645	val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
646	return __pte(val);
647}
648
649static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
650{
651	pmdval_t val = pmd_val(pmd), oldval = val;
652
653	val &= _HPAGE_CHG_MASK;
654	val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
655	val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
656	return __pmd(val);
657}
658
659/*
660 * mprotect needs to preserve PAT and encryption bits when updating
661 * vm_page_prot
662 */
663#define pgprot_modify pgprot_modify
664static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
665{
666	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
667	pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
668	return __pgprot(preservebits | addbits);
669}
670
671#define pte_pgprot(x) __pgprot(pte_flags(x))
672#define pmd_pgprot(x) __pgprot(pmd_flags(x))
673#define pud_pgprot(x) __pgprot(pud_flags(x))
674#define p4d_pgprot(x) __pgprot(p4d_flags(x))
675
676#define canon_pgprot(p) __pgprot(massage_pgprot(p))
677
678static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
679{
680	return canon_pgprot(prot);
681}
682
683static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
684					 enum page_cache_mode pcm,
685					 enum page_cache_mode new_pcm)
686{
687	/*
688	 * PAT type is always WB for untracked ranges, so no need to check.
689	 */
690	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
691		return 1;
692
693	/*
694	 * Certain new memtypes are not allowed with certain
695	 * requested memtype:
696	 * - request is uncached, return cannot be write-back
697	 * - request is write-combine, return cannot be write-back
698	 * - request is write-through, return cannot be write-back
699	 * - request is write-through, return cannot be write-combine
700	 */
701	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
702	     new_pcm == _PAGE_CACHE_MODE_WB) ||
703	    (pcm == _PAGE_CACHE_MODE_WC &&
704	     new_pcm == _PAGE_CACHE_MODE_WB) ||
705	    (pcm == _PAGE_CACHE_MODE_WT &&
706	     new_pcm == _PAGE_CACHE_MODE_WB) ||
707	    (pcm == _PAGE_CACHE_MODE_WT &&
708	     new_pcm == _PAGE_CACHE_MODE_WC)) {
709		return 0;
710	}
711
712	return 1;
713}
714
715pmd_t *populate_extra_pmd(unsigned long vaddr);
716pte_t *populate_extra_pte(unsigned long vaddr);
717
718#ifdef CONFIG_PAGE_TABLE_ISOLATION
719pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
720
721/*
722 * Take a PGD location (pgdp) and a pgd value that needs to be set there.
723 * Populates the user and returns the resulting PGD that must be set in
724 * the kernel copy of the page tables.
725 */
726static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
727{
728	if (!static_cpu_has(X86_FEATURE_PTI))
729		return pgd;
730	return __pti_set_user_pgtbl(pgdp, pgd);
731}
732#else   /* CONFIG_PAGE_TABLE_ISOLATION */
733static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
734{
735	return pgd;
736}
737#endif  /* CONFIG_PAGE_TABLE_ISOLATION */
738
739#endif	/* __ASSEMBLY__ */
740
741
742#ifdef CONFIG_X86_32
743# include <asm/pgtable_32.h>
744#else
745# include <asm/pgtable_64.h>
746#endif
747
748#ifndef __ASSEMBLY__
749#include <linux/mm_types.h>
750#include <linux/mmdebug.h>
751#include <linux/log2.h>
752#include <asm/fixmap.h>
753
754static inline int pte_none(pte_t pte)
755{
756	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
757}
758
759#define __HAVE_ARCH_PTE_SAME
760static inline int pte_same(pte_t a, pte_t b)
761{
762	return a.pte == b.pte;
763}
764
765static inline int pte_present(pte_t a)
766{
767	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
768}
769
770#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
771static inline int pte_devmap(pte_t a)
772{
773	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
774}
775#endif
776
777#define pte_accessible pte_accessible
778static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
779{
780	if (pte_flags(a) & _PAGE_PRESENT)
781		return true;
782
783	if ((pte_flags(a) & _PAGE_PROTNONE) &&
784			mm_tlb_flush_pending(mm))
785		return true;
786
787	return false;
788}
789
790static inline int pmd_present(pmd_t pmd)
791{
792	/*
793	 * Checking for _PAGE_PSE is needed too because
794	 * split_huge_page will temporarily clear the present bit (but
795	 * the _PAGE_PSE flag will remain set at all times while the
796	 * _PAGE_PRESENT bit is clear).
797	 */
798	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
799}
800
801#ifdef CONFIG_NUMA_BALANCING
802/*
803 * These work without NUMA balancing but the kernel does not care. See the
804 * comment in include/linux/pgtable.h
805 */
806static inline int pte_protnone(pte_t pte)
807{
808	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
809		== _PAGE_PROTNONE;
810}
811
812static inline int pmd_protnone(pmd_t pmd)
813{
814	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
815		== _PAGE_PROTNONE;
816}
817#endif /* CONFIG_NUMA_BALANCING */
818
819static inline int pmd_none(pmd_t pmd)
820{
821	/* Only check low word on 32-bit platforms, since it might be
822	   out of sync with upper half. */
823	unsigned long val = native_pmd_val(pmd);
824	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
825}
826
827static inline unsigned long pmd_page_vaddr(pmd_t pmd)
828{
829	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
830}
831
832/*
833 * Currently stuck as a macro due to indirect forward reference to
834 * linux/mmzone.h's __section_mem_map_addr() definition:
835 */
836#define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
837
838/*
839 * Conversion functions: convert a page and protection to a page entry,
840 * and a page entry and page directory to the page they refer to.
841 *
842 * (Currently stuck as a macro because of indirect forward reference
843 * to linux/mm.h:page_to_nid())
844 */
845#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
846
847static inline int pmd_bad(pmd_t pmd)
848{
849	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
850}
851
852static inline unsigned long pages_to_mb(unsigned long npg)
853{
854	return npg >> (20 - PAGE_SHIFT);
855}
856
857#if CONFIG_PGTABLE_LEVELS > 2
858static inline int pud_none(pud_t pud)
859{
860	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
861}
862
863static inline int pud_present(pud_t pud)
864{
865	return pud_flags(pud) & _PAGE_PRESENT;
866}
867
868static inline pmd_t *pud_pgtable(pud_t pud)
869{
870	return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud));
871}
872
873/*
874 * Currently stuck as a macro due to indirect forward reference to
875 * linux/mmzone.h's __section_mem_map_addr() definition:
876 */
877#define pud_page(pud)	pfn_to_page(pud_pfn(pud))
878
879#define pud_leaf	pud_large
880static inline int pud_large(pud_t pud)
881{
882	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
883		(_PAGE_PSE | _PAGE_PRESENT);
884}
885
886static inline int pud_bad(pud_t pud)
887{
888	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
889}
890#else
891#define pud_leaf	pud_large
892static inline int pud_large(pud_t pud)
893{
894	return 0;
895}
896#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
897
898#if CONFIG_PGTABLE_LEVELS > 3
899static inline int p4d_none(p4d_t p4d)
900{
901	return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
902}
903
904static inline int p4d_present(p4d_t p4d)
905{
906	return p4d_flags(p4d) & _PAGE_PRESENT;
907}
908
909static inline pud_t *p4d_pgtable(p4d_t p4d)
910{
911	return (pud_t *)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
912}
913
914/*
915 * Currently stuck as a macro due to indirect forward reference to
916 * linux/mmzone.h's __section_mem_map_addr() definition:
917 */
918#define p4d_page(p4d)	pfn_to_page(p4d_pfn(p4d))
919
920static inline int p4d_bad(p4d_t p4d)
921{
922	unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
923
924	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
925		ignore_flags |= _PAGE_NX;
926
927	return (p4d_flags(p4d) & ~ignore_flags) != 0;
928}
929#endif  /* CONFIG_PGTABLE_LEVELS > 3 */
930
931static inline unsigned long p4d_index(unsigned long address)
932{
933	return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
934}
935
936#if CONFIG_PGTABLE_LEVELS > 4
937static inline int pgd_present(pgd_t pgd)
938{
939	if (!pgtable_l5_enabled())
940		return 1;
941	return pgd_flags(pgd) & _PAGE_PRESENT;
942}
943
944static inline unsigned long pgd_page_vaddr(pgd_t pgd)
945{
946	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
947}
948
949/*
950 * Currently stuck as a macro due to indirect forward reference to
951 * linux/mmzone.h's __section_mem_map_addr() definition:
952 */
953#define pgd_page(pgd)	pfn_to_page(pgd_pfn(pgd))
954
955/* to find an entry in a page-table-directory. */
956static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
957{
958	if (!pgtable_l5_enabled())
959		return (p4d_t *)pgd;
960	return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
961}
962
963static inline int pgd_bad(pgd_t pgd)
964{
965	unsigned long ignore_flags = _PAGE_USER;
966
967	if (!pgtable_l5_enabled())
968		return 0;
969
970	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
971		ignore_flags |= _PAGE_NX;
972
973	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
974}
975
976static inline int pgd_none(pgd_t pgd)
977{
978	if (!pgtable_l5_enabled())
979		return 0;
980	/*
981	 * There is no need to do a workaround for the KNL stray
982	 * A/D bit erratum here.  PGDs only point to page tables
983	 * except on 32-bit non-PAE which is not supported on
984	 * KNL.
985	 */
986	return !native_pgd_val(pgd);
987}
988#endif	/* CONFIG_PGTABLE_LEVELS > 4 */
989
990#endif	/* __ASSEMBLY__ */
991
992#define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
993#define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
994
995#ifndef __ASSEMBLY__
996
997extern int direct_gbpages;
998void init_mem_mapping(void);
999void early_alloc_pgt_buf(void);
1000extern void memblock_find_dma_reserve(void);
1001void __init poking_init(void);
1002unsigned long init_memory_mapping(unsigned long start,
1003				  unsigned long end, pgprot_t prot);
1004
1005#ifdef CONFIG_X86_64
1006extern pgd_t trampoline_pgd_entry;
1007#endif
1008
1009/* local pte updates need not use xchg for locking */
1010static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
1011{
1012	pte_t res = *ptep;
1013
1014	/* Pure native function needs no input for mm, addr */
1015	native_pte_clear(NULL, 0, ptep);
1016	return res;
1017}
1018
1019static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
1020{
1021	pmd_t res = *pmdp;
1022
1023	native_pmd_clear(pmdp);
1024	return res;
1025}
1026
1027static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
1028{
1029	pud_t res = *pudp;
1030
1031	native_pud_clear(pudp);
1032	return res;
1033}
1034
1035static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1036			      pte_t *ptep, pte_t pte)
1037{
1038	set_pte(ptep, pte);
1039}
1040
1041static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1042			      pmd_t *pmdp, pmd_t pmd)
1043{
1044	set_pmd(pmdp, pmd);
1045}
1046
1047static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1048			      pud_t *pudp, pud_t pud)
1049{
1050	native_set_pud(pudp, pud);
1051}
1052
1053/*
1054 * We only update the dirty/accessed state if we set
1055 * the dirty bit by hand in the kernel, since the hardware
1056 * will do the accessed bit for us, and we don't want to
1057 * race with other CPU's that might be updating the dirty
1058 * bit at the same time.
1059 */
1060struct vm_area_struct;
1061
1062#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1063extern int ptep_set_access_flags(struct vm_area_struct *vma,
1064				 unsigned long address, pte_t *ptep,
1065				 pte_t entry, int dirty);
1066
1067#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1068extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1069				     unsigned long addr, pte_t *ptep);
1070
1071#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1072extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1073				  unsigned long address, pte_t *ptep);
1074
1075#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1076static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1077				       pte_t *ptep)
1078{
1079	pte_t pte = native_ptep_get_and_clear(ptep);
1080	return pte;
1081}
1082
1083#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1084static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1085					    unsigned long addr, pte_t *ptep,
1086					    int full)
1087{
1088	pte_t pte;
1089	if (full) {
1090		/*
1091		 * Full address destruction in progress; paravirt does not
1092		 * care about updates and native needs no locking
1093		 */
1094		pte = native_local_ptep_get_and_clear(ptep);
1095	} else {
1096		pte = ptep_get_and_clear(mm, addr, ptep);
1097	}
1098	return pte;
1099}
1100
1101#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1102static inline void ptep_set_wrprotect(struct mm_struct *mm,
1103				      unsigned long addr, pte_t *ptep)
1104{
1105	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
1106}
1107
1108#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1109
1110#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
1111
1112#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1113extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1114				 unsigned long address, pmd_t *pmdp,
1115				 pmd_t entry, int dirty);
1116extern int pudp_set_access_flags(struct vm_area_struct *vma,
1117				 unsigned long address, pud_t *pudp,
1118				 pud_t entry, int dirty);
1119
1120#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1121extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1122				     unsigned long addr, pmd_t *pmdp);
1123extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1124				     unsigned long addr, pud_t *pudp);
1125
1126#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1127extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1128				  unsigned long address, pmd_t *pmdp);
1129
1130
1131#define pmd_write pmd_write
1132static inline int pmd_write(pmd_t pmd)
1133{
1134	return pmd_flags(pmd) & _PAGE_RW;
1135}
1136
1137#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1138static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
1139				       pmd_t *pmdp)
1140{
1141	return native_pmdp_get_and_clear(pmdp);
1142}
1143
1144#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1145static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1146					unsigned long addr, pud_t *pudp)
1147{
1148	return native_pudp_get_and_clear(pudp);
1149}
1150
1151#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1152static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1153				      unsigned long addr, pmd_t *pmdp)
1154{
1155	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
1156}
1157
1158#define pud_write pud_write
1159static inline int pud_write(pud_t pud)
1160{
1161	return pud_flags(pud) & _PAGE_RW;
1162}
1163
1164#ifndef pmdp_establish
1165#define pmdp_establish pmdp_establish
1166static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1167		unsigned long address, pmd_t *pmdp, pmd_t pmd)
1168{
1169	if (IS_ENABLED(CONFIG_SMP)) {
1170		return xchg(pmdp, pmd);
1171	} else {
1172		pmd_t old = *pmdp;
1173		WRITE_ONCE(*pmdp, pmd);
1174		return old;
1175	}
1176}
1177#endif
1178/*
1179 * Page table pages are page-aligned.  The lower half of the top
1180 * level is used for userspace and the top half for the kernel.
1181 *
1182 * Returns true for parts of the PGD that map userspace and
1183 * false for the parts that map the kernel.
1184 */
1185static inline bool pgdp_maps_userspace(void *__ptr)
1186{
1187	unsigned long ptr = (unsigned long)__ptr;
1188
1189	return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
1190}
1191
1192#define pgd_leaf	pgd_large
1193static inline int pgd_large(pgd_t pgd) { return 0; }
1194
1195#ifdef CONFIG_PAGE_TABLE_ISOLATION
1196/*
1197 * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
1198 * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
1199 * the user one is in the last 4k.  To switch between them, you
1200 * just need to flip the 12th bit in their addresses.
1201 */
1202#define PTI_PGTABLE_SWITCH_BIT	PAGE_SHIFT
1203
1204/*
1205 * This generates better code than the inline assembly in
1206 * __set_bit().
1207 */
1208static inline void *ptr_set_bit(void *ptr, int bit)
1209{
1210	unsigned long __ptr = (unsigned long)ptr;
1211
1212	__ptr |= BIT(bit);
1213	return (void *)__ptr;
1214}
1215static inline void *ptr_clear_bit(void *ptr, int bit)
1216{
1217	unsigned long __ptr = (unsigned long)ptr;
1218
1219	__ptr &= ~BIT(bit);
1220	return (void *)__ptr;
1221}
1222
1223static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
1224{
1225	return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1226}
1227
1228static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
1229{
1230	return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1231}
1232
1233static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
1234{
1235	return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1236}
1237
1238static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
1239{
1240	return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1241}
1242#endif /* CONFIG_PAGE_TABLE_ISOLATION */
1243
1244/*
1245 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1246 *
1247 *  dst - pointer to pgd range anwhere on a pgd page
1248 *  src - ""
1249 *  count - the number of pgds to copy.
1250 *
1251 * dst and src can be on the same page, but the range must not overlap,
1252 * and must not cross a page boundary.
1253 */
1254static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1255{
1256	memcpy(dst, src, count * sizeof(pgd_t));
1257#ifdef CONFIG_PAGE_TABLE_ISOLATION
1258	if (!static_cpu_has(X86_FEATURE_PTI))
1259		return;
1260	/* Clone the user space pgd as well */
1261	memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1262	       count * sizeof(pgd_t));
1263#endif
1264}
1265
1266#define PTE_SHIFT ilog2(PTRS_PER_PTE)
1267static inline int page_level_shift(enum pg_level level)
1268{
1269	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1270}
1271static inline unsigned long page_level_size(enum pg_level level)
1272{
1273	return 1UL << page_level_shift(level);
1274}
1275static inline unsigned long page_level_mask(enum pg_level level)
1276{
1277	return ~(page_level_size(level) - 1);
1278}
1279
1280/*
1281 * The x86 doesn't have any external MMU info: the kernel page
1282 * tables contain all the necessary information.
1283 */
1284static inline void update_mmu_cache(struct vm_area_struct *vma,
1285		unsigned long addr, pte_t *ptep)
1286{
1287}
1288static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1289		unsigned long addr, pmd_t *pmd)
1290{
1291}
1292static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1293		unsigned long addr, pud_t *pud)
1294{
1295}
1296
1297#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1298static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1299{
1300	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1301}
1302
1303static inline int pte_swp_soft_dirty(pte_t pte)
1304{
1305	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1306}
1307
1308static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1309{
1310	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1311}
1312
1313#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1314static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1315{
1316	return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1317}
1318
1319static inline int pmd_swp_soft_dirty(pmd_t pmd)
1320{
1321	return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1322}
1323
1324static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1325{
1326	return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1327}
1328#endif
1329#endif
1330
1331#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1332static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
1333{
1334	return pte_set_flags(pte, _PAGE_SWP_UFFD_WP);
1335}
1336
1337static inline int pte_swp_uffd_wp(pte_t pte)
1338{
1339	return pte_flags(pte) & _PAGE_SWP_UFFD_WP;
1340}
1341
1342static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
1343{
1344	return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP);
1345}
1346
1347static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
1348{
1349	return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP);
1350}
1351
1352static inline int pmd_swp_uffd_wp(pmd_t pmd)
1353{
1354	return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP;
1355}
1356
1357static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
1358{
1359	return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP);
1360}
1361#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
1362
1363#define PKRU_AD_BIT 0x1u
1364#define PKRU_WD_BIT 0x2u
1365#define PKRU_BITS_PER_PKEY 2
1366
1367#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1368extern u32 init_pkru_value;
1369#else
1370#define init_pkru_value	0
1371#endif
1372
1373static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
1374{
1375	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1376	return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
1377}
1378
1379static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
1380{
1381	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1382	/*
1383	 * Access-disable disables writes too so we need to check
1384	 * both bits here.
1385	 */
1386	return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
1387}
1388
1389static inline u16 pte_flags_pkey(unsigned long pte_flags)
1390{
1391#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1392	/* ifdef to avoid doing 59-bit shift on 32-bit values */
1393	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1394#else
1395	return 0;
1396#endif
1397}
1398
1399static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1400{
1401	u32 pkru = read_pkru();
1402
1403	if (!__pkru_allows_read(pkru, pkey))
1404		return false;
1405	if (write && !__pkru_allows_write(pkru, pkey))
1406		return false;
1407
1408	return true;
1409}
1410
1411/*
1412 * 'pteval' can come from a PTE, PMD or PUD.  We only check
1413 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1414 * same value on all 3 types.
1415 */
1416static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1417{
1418	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1419
1420	if (write)
1421		need_pte_bits |= _PAGE_RW;
1422
1423	if ((pteval & need_pte_bits) != need_pte_bits)
1424		return 0;
1425
1426	return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1427}
1428
1429#define pte_access_permitted pte_access_permitted
1430static inline bool pte_access_permitted(pte_t pte, bool write)
1431{
1432	return __pte_access_permitted(pte_val(pte), write);
1433}
1434
1435#define pmd_access_permitted pmd_access_permitted
1436static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1437{
1438	return __pte_access_permitted(pmd_val(pmd), write);
1439}
1440
1441#define pud_access_permitted pud_access_permitted
1442static inline bool pud_access_permitted(pud_t pud, bool write)
1443{
1444	return __pte_access_permitted(pud_val(pud), write);
1445}
1446
1447#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1448extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1449
1450static inline bool arch_has_pfn_modify_check(void)
1451{
1452	return boot_cpu_has_bug(X86_BUG_L1TF);
1453}
1454
1455#define arch_faults_on_old_pte arch_faults_on_old_pte
1456static inline bool arch_faults_on_old_pte(void)
1457{
1458	return false;
1459}
1460
1461#endif	/* __ASSEMBLY__ */
1462
1463#endif /* _ASM_X86_PGTABLE_H */
1464