18c2ecf20Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0-only */
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * Copyright (C) 2012 ARM Ltd.
48c2ecf20Sopenharmony_ci */
58c2ecf20Sopenharmony_ci#ifndef __ASM_MMU_H
68c2ecf20Sopenharmony_ci#define __ASM_MMU_H
78c2ecf20Sopenharmony_ci
88c2ecf20Sopenharmony_ci#include <asm/cputype.h>
98c2ecf20Sopenharmony_ci
108c2ecf20Sopenharmony_ci#define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
118c2ecf20Sopenharmony_ci#define USER_ASID_BIT	48
128c2ecf20Sopenharmony_ci#define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
138c2ecf20Sopenharmony_ci#define TTBR_ASID_MASK	(UL(0xffff) << 48)
148c2ecf20Sopenharmony_ci
158c2ecf20Sopenharmony_ci#define BP_HARDEN_EL2_SLOTS 4
168c2ecf20Sopenharmony_ci#define __BP_HARDEN_HYP_VECS_SZ (BP_HARDEN_EL2_SLOTS * SZ_2K)
178c2ecf20Sopenharmony_ci
188c2ecf20Sopenharmony_ci#ifndef __ASSEMBLY__
198c2ecf20Sopenharmony_ci
208c2ecf20Sopenharmony_ci#include <linux/refcount.h>
218c2ecf20Sopenharmony_ci
228c2ecf20Sopenharmony_citypedef struct {
238c2ecf20Sopenharmony_ci	atomic64_t	id;
248c2ecf20Sopenharmony_ci#ifdef CONFIG_COMPAT
258c2ecf20Sopenharmony_ci	void		*sigpage;
268c2ecf20Sopenharmony_ci#endif
278c2ecf20Sopenharmony_ci	refcount_t	pinned;
288c2ecf20Sopenharmony_ci	void		*vdso;
298c2ecf20Sopenharmony_ci	unsigned long	flags;
308c2ecf20Sopenharmony_ci} mm_context_t;
318c2ecf20Sopenharmony_ci
328c2ecf20Sopenharmony_ci/*
338c2ecf20Sopenharmony_ci * We use atomic64_read() here because the ASID for an 'mm_struct' can
348c2ecf20Sopenharmony_ci * be reallocated when scheduling one of its threads following a
358c2ecf20Sopenharmony_ci * rollover event (see new_context() and flush_context()). In this case,
368c2ecf20Sopenharmony_ci * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
378c2ecf20Sopenharmony_ci * may use a stale ASID. This is fine in principle as the new ASID is
388c2ecf20Sopenharmony_ci * guaranteed to be clean in the TLB, but the TLBI routines have to take
398c2ecf20Sopenharmony_ci * care to handle the following race:
408c2ecf20Sopenharmony_ci *
418c2ecf20Sopenharmony_ci *    CPU 0                    CPU 1                          CPU 2
428c2ecf20Sopenharmony_ci *
438c2ecf20Sopenharmony_ci *    // ptep_clear_flush(mm)
448c2ecf20Sopenharmony_ci *    xchg_relaxed(pte, 0)
458c2ecf20Sopenharmony_ci *    DSB ISHST
468c2ecf20Sopenharmony_ci *    old = ASID(mm)
478c2ecf20Sopenharmony_ci *         |                                                  <rollover>
488c2ecf20Sopenharmony_ci *         |                   new = new_context(mm)
498c2ecf20Sopenharmony_ci *         \-----------------> atomic_set(mm->context.id, new)
508c2ecf20Sopenharmony_ci *                             cpu_switch_mm(mm)
518c2ecf20Sopenharmony_ci *                             // Hardware walk of pte using new ASID
528c2ecf20Sopenharmony_ci *    TLBI(old)
538c2ecf20Sopenharmony_ci *
548c2ecf20Sopenharmony_ci * In this scenario, the barrier on CPU 0 and the dependency on CPU 1
558c2ecf20Sopenharmony_ci * ensure that the page-table walker on CPU 1 *must* see the invalid PTE
568c2ecf20Sopenharmony_ci * written by CPU 0.
578c2ecf20Sopenharmony_ci */
588c2ecf20Sopenharmony_ci#define ASID(mm)	(atomic64_read(&(mm)->context.id) & 0xffff)
598c2ecf20Sopenharmony_ci
608c2ecf20Sopenharmony_cistatic inline bool arm64_kernel_unmapped_at_el0(void)
618c2ecf20Sopenharmony_ci{
628c2ecf20Sopenharmony_ci	return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
638c2ecf20Sopenharmony_ci}
648c2ecf20Sopenharmony_ci
658c2ecf20Sopenharmony_citypedef void (*bp_hardening_cb_t)(void);
668c2ecf20Sopenharmony_ci
678c2ecf20Sopenharmony_cistruct bp_hardening_data {
688c2ecf20Sopenharmony_ci	int			hyp_vectors_slot;
698c2ecf20Sopenharmony_ci	bp_hardening_cb_t	fn;
708c2ecf20Sopenharmony_ci
718c2ecf20Sopenharmony_ci	/*
728c2ecf20Sopenharmony_ci	 * template_start is only used by the BHB mitigation to identify the
738c2ecf20Sopenharmony_ci	 * hyp_vectors_slot sequence.
748c2ecf20Sopenharmony_ci	 */
758c2ecf20Sopenharmony_ci	const char *template_start;
768c2ecf20Sopenharmony_ci};
778c2ecf20Sopenharmony_ci
788c2ecf20Sopenharmony_ciDECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
798c2ecf20Sopenharmony_ci
808c2ecf20Sopenharmony_cistatic inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
818c2ecf20Sopenharmony_ci{
828c2ecf20Sopenharmony_ci	return this_cpu_ptr(&bp_hardening_data);
838c2ecf20Sopenharmony_ci}
848c2ecf20Sopenharmony_ci
858c2ecf20Sopenharmony_cistatic inline void arm64_apply_bp_hardening(void)
868c2ecf20Sopenharmony_ci{
878c2ecf20Sopenharmony_ci	struct bp_hardening_data *d;
888c2ecf20Sopenharmony_ci
898c2ecf20Sopenharmony_ci	if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
908c2ecf20Sopenharmony_ci		return;
918c2ecf20Sopenharmony_ci
928c2ecf20Sopenharmony_ci	d = arm64_get_bp_hardening_data();
938c2ecf20Sopenharmony_ci	if (d->fn)
948c2ecf20Sopenharmony_ci		d->fn();
958c2ecf20Sopenharmony_ci}
968c2ecf20Sopenharmony_ci
978c2ecf20Sopenharmony_ciextern void arm64_memblock_init(void);
988c2ecf20Sopenharmony_ciextern void paging_init(void);
998c2ecf20Sopenharmony_ciextern void bootmem_init(void);
1008c2ecf20Sopenharmony_ciextern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
1018c2ecf20Sopenharmony_ciextern void init_mem_pgprot(void);
1028c2ecf20Sopenharmony_ciextern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
1038c2ecf20Sopenharmony_ci			       unsigned long virt, phys_addr_t size,
1048c2ecf20Sopenharmony_ci			       pgprot_t prot, bool page_mappings_only);
1058c2ecf20Sopenharmony_ciextern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
1068c2ecf20Sopenharmony_ciextern void mark_linear_text_alias_ro(void);
1078c2ecf20Sopenharmony_ciextern bool kaslr_requires_kpti(void);
1088c2ecf20Sopenharmony_ci
1098c2ecf20Sopenharmony_ci#define INIT_MM_CONTEXT(name)	\
1108c2ecf20Sopenharmony_ci	.pgd = init_pg_dir,
1118c2ecf20Sopenharmony_ci
1128c2ecf20Sopenharmony_ci#endif	/* !__ASSEMBLY__ */
1138c2ecf20Sopenharmony_ci#endif
114