xref: /kernel/linux/linux-5.10/arch/x86/include/asm/mmu.h (revision 8c2ecf20)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_MMU_H
3#define _ASM_X86_MMU_H
4
5#include <linux/spinlock.h>
6#include <linux/rwsem.h>
7#include <linux/mutex.h>
8#include <linux/atomic.h>
9
10/*
11 * x86 has arch-specific MMU state beyond what lives in mm_struct.
12 */
13typedef struct {
14	/*
15	 * ctx_id uniquely identifies this mm_struct.  A ctx_id will never
16	 * be reused, and zero is not a valid ctx_id.
17	 */
18	u64 ctx_id;
19
20	/*
21	 * Any code that needs to do any sort of TLB flushing for this
22	 * mm will first make its changes to the page tables, then
23	 * increment tlb_gen, then flush.  This lets the low-level
24	 * flushing code keep track of what needs flushing.
25	 *
26	 * This is not used on Xen PV.
27	 */
28	atomic64_t tlb_gen;
29
30#ifdef CONFIG_MODIFY_LDT_SYSCALL
31	struct rw_semaphore	ldt_usr_sem;
32	struct ldt_struct	*ldt;
33#endif
34
35#ifdef CONFIG_X86_64
36	/* True if mm supports a task running in 32 bit compatibility mode. */
37	unsigned short ia32_compat;
38#endif
39
40	struct mutex lock;
41	void __user *vdso;			/* vdso base address */
42	const struct vdso_image *vdso_image;	/* vdso image in use */
43
44	atomic_t perf_rdpmc_allowed;	/* nonzero if rdpmc is allowed */
45#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
46	/*
47	 * One bit per protection key says whether userspace can
48	 * use it or not.  protected by mmap_lock.
49	 */
50	u16 pkey_allocation_map;
51	s16 execute_only_pkey;
52#endif
53} mm_context_t;
54
55#define INIT_MM_CONTEXT(mm)						\
56	.context = {							\
57		.ctx_id = 1,						\
58		.lock = __MUTEX_INITIALIZER(mm.context.lock),		\
59	}
60
61void leave_mm(int cpu);
62#define leave_mm leave_mm
63
64#endif /* _ASM_X86_MMU_H */
65