1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5#ifndef __ASM_MMU_H
6#define __ASM_MMU_H
7
8#include <asm/cputype.h>
9
10#define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
11#define USER_ASID_BIT	48
12#define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
13#define TTBR_ASID_MASK	(UL(0xffff) << 48)
14
15#define BP_HARDEN_EL2_SLOTS 4
16#define __BP_HARDEN_HYP_VECS_SZ (BP_HARDEN_EL2_SLOTS * SZ_2K)
17
18#ifndef __ASSEMBLY__
19
20#include <linux/refcount.h>
21
22typedef struct {
23	atomic64_t	id;
24#ifdef CONFIG_COMPAT
25	void		*sigpage;
26#endif
27	refcount_t	pinned;
28	void		*vdso;
29	unsigned long	flags;
30} mm_context_t;
31
32/*
33 * We use atomic64_read() here because the ASID for an 'mm_struct' can
34 * be reallocated when scheduling one of its threads following a
35 * rollover event (see new_context() and flush_context()). In this case,
36 * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
37 * may use a stale ASID. This is fine in principle as the new ASID is
38 * guaranteed to be clean in the TLB, but the TLBI routines have to take
39 * care to handle the following race:
40 *
41 *    CPU 0                    CPU 1                          CPU 2
42 *
43 *    // ptep_clear_flush(mm)
44 *    xchg_relaxed(pte, 0)
45 *    DSB ISHST
46 *    old = ASID(mm)
47 *         |                                                  <rollover>
48 *         |                   new = new_context(mm)
49 *         \-----------------> atomic_set(mm->context.id, new)
50 *                             cpu_switch_mm(mm)
51 *                             // Hardware walk of pte using new ASID
52 *    TLBI(old)
53 *
54 * In this scenario, the barrier on CPU 0 and the dependency on CPU 1
55 * ensure that the page-table walker on CPU 1 *must* see the invalid PTE
56 * written by CPU 0.
57 */
58#define ASID(mm)	(atomic64_read(&(mm)->context.id) & 0xffff)
59
60static inline bool arm64_kernel_unmapped_at_el0(void)
61{
62	return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
63}
64
65typedef void (*bp_hardening_cb_t)(void);
66
67struct bp_hardening_data {
68	int			hyp_vectors_slot;
69	bp_hardening_cb_t	fn;
70
71	/*
72	 * template_start is only used by the BHB mitigation to identify the
73	 * hyp_vectors_slot sequence.
74	 */
75	const char *template_start;
76};
77
78DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
79
80static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
81{
82	return this_cpu_ptr(&bp_hardening_data);
83}
84
85static inline void arm64_apply_bp_hardening(void)
86{
87	struct bp_hardening_data *d;
88
89	if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
90		return;
91
92	d = arm64_get_bp_hardening_data();
93	if (d->fn)
94		d->fn();
95}
96
97extern void arm64_memblock_init(void);
98extern void paging_init(void);
99extern void bootmem_init(void);
100extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
101extern void init_mem_pgprot(void);
102extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
103			       unsigned long virt, phys_addr_t size,
104			       pgprot_t prot, bool page_mappings_only);
105extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
106extern void mark_linear_text_alias_ro(void);
107extern bool kaslr_requires_kpti(void);
108
109#define INIT_MM_CONTEXT(name)	\
110	.pgd = init_pg_dir,
111
112#endif	/* !__ASSEMBLY__ */
113#endif
114