1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/kvm_host.h>
8#include <linux/random.h>
9#include <linux/memblock.h>
10#include <asm/alternative.h>
11#include <asm/debug-monitors.h>
12#include <asm/insn.h>
13#include <asm/kvm_mmu.h>
14#include <asm/memory.h>
15
16/*
17 * The LSB of the HYP VA tag
18 */
19static u8 tag_lsb;
20/*
21 * The HYP VA tag value with the region bit
22 */
23static u64 tag_val;
24static u64 va_mask;
25
26/*
27 * Compute HYP VA by using the same computation as kern_hyp_va().
28 */
29static u64 __early_kern_hyp_va(u64 addr)
30{
31	addr &= va_mask;
32	addr |= tag_val << tag_lsb;
33	return addr;
34}
35
36/*
37 * Store a hyp VA <-> PA offset into a EL2-owned variable.
38 */
39static void init_hyp_physvirt_offset(void)
40{
41	u64 kern_va, hyp_va;
42
43	/* Compute the offset from the hyp VA and PA of a random symbol. */
44	kern_va = (u64)lm_alias(__hyp_text_start);
45	hyp_va = __early_kern_hyp_va(kern_va);
46	hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
47}
48
49/*
50 * We want to generate a hyp VA with the following format (with V ==
51 * vabits_actual):
52 *
53 *  63 ... V |     V-1    | V-2 .. tag_lsb | tag_lsb - 1 .. 0
54 *  ---------------------------------------------------------
55 * | 0000000 | hyp_va_msb |   random tag   |  kern linear VA |
56 *           |--------- tag_val -----------|----- va_mask ---|
57 *
58 * which does not conflict with the idmap regions.
59 */
60__init void kvm_compute_layout(void)
61{
62	phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
63	u64 hyp_va_msb;
64
65	/* Where is my RAM region? */
66	hyp_va_msb  = idmap_addr & BIT(vabits_actual - 1);
67	hyp_va_msb ^= BIT(vabits_actual - 1);
68
69	tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
70			(u64)(high_memory - 1));
71
72	va_mask = GENMASK_ULL(tag_lsb - 1, 0);
73	tag_val = hyp_va_msb;
74
75	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) {
76		/* We have some free bits to insert a random tag. */
77		tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
78	}
79	tag_val >>= tag_lsb;
80
81	init_hyp_physvirt_offset();
82}
83
84/*
85 * The .hyp.reloc ELF section contains a list of kimg positions that
86 * contains kimg VAs but will be accessed only in hyp execution context.
87 * Convert them to hyp VAs. See gen-hyprel.c for more details.
88 */
89__init void kvm_apply_hyp_relocations(void)
90{
91	int32_t *rel;
92	int32_t *begin = (int32_t *)__hyp_reloc_begin;
93	int32_t *end = (int32_t *)__hyp_reloc_end;
94
95	for (rel = begin; rel < end; ++rel) {
96		uintptr_t *ptr, kimg_va;
97
98		/*
99		 * Each entry contains a 32-bit relative offset from itself
100		 * to a kimg VA position.
101		 */
102		ptr = (uintptr_t *)lm_alias((char *)rel + *rel);
103
104		/* Read the kimg VA value at the relocation address. */
105		kimg_va = *ptr;
106
107		/* Convert to hyp VA and store back to the relocation address. */
108		*ptr = __early_kern_hyp_va((uintptr_t)lm_alias(kimg_va));
109	}
110}
111
112static u32 compute_instruction(int n, u32 rd, u32 rn)
113{
114	u32 insn = AARCH64_BREAK_FAULT;
115
116	switch (n) {
117	case 0:
118		insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND,
119							  AARCH64_INSN_VARIANT_64BIT,
120							  rn, rd, va_mask);
121		break;
122
123	case 1:
124		/* ROR is a variant of EXTR with Rm = Rn */
125		insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
126					     rn, rn, rd,
127					     tag_lsb);
128		break;
129
130	case 2:
131		insn = aarch64_insn_gen_add_sub_imm(rd, rn,
132						    tag_val & GENMASK(11, 0),
133						    AARCH64_INSN_VARIANT_64BIT,
134						    AARCH64_INSN_ADSB_ADD);
135		break;
136
137	case 3:
138		insn = aarch64_insn_gen_add_sub_imm(rd, rn,
139						    tag_val & GENMASK(23, 12),
140						    AARCH64_INSN_VARIANT_64BIT,
141						    AARCH64_INSN_ADSB_ADD);
142		break;
143
144	case 4:
145		/* ROR is a variant of EXTR with Rm = Rn */
146		insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
147					     rn, rn, rd, 64 - tag_lsb);
148		break;
149	}
150
151	return insn;
152}
153
154void __init kvm_update_va_mask(struct alt_instr *alt,
155			       __le32 *origptr, __le32 *updptr, int nr_inst)
156{
157	int i;
158
159	BUG_ON(nr_inst != 5);
160
161	for (i = 0; i < nr_inst; i++) {
162		u32 rd, rn, insn, oinsn;
163
164		/*
165		 * VHE doesn't need any address translation, let's NOP
166		 * everything.
167		 *
168		 * Alternatively, if the tag is zero (because the layout
169		 * dictates it and we don't have any spare bits in the
170		 * address), NOP everything after masking the kernel VA.
171		 */
172		if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN) || (!tag_val && i > 0)) {
173			updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
174			continue;
175		}
176
177		oinsn = le32_to_cpu(origptr[i]);
178		rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
179		rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn);
180
181		insn = compute_instruction(i, rd, rn);
182		BUG_ON(insn == AARCH64_BREAK_FAULT);
183
184		updptr[i] = cpu_to_le32(insn);
185	}
186}
187
188void kvm_patch_vector_branch(struct alt_instr *alt,
189			     __le32 *origptr, __le32 *updptr, int nr_inst)
190{
191	u64 addr;
192	u32 insn;
193
194	BUG_ON(nr_inst != 4);
195
196	if (!cpus_have_cap(ARM64_SPECTRE_V3A) ||
197	    WARN_ON_ONCE(cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN)))
198		return;
199
200	/*
201	 * Compute HYP VA by using the same computation as kern_hyp_va()
202	 */
203	addr = __early_kern_hyp_va((u64)kvm_ksym_ref(__kvm_hyp_vector));
204
205	/* Use PC[10:7] to branch to the same vector in KVM */
206	addr |= ((u64)origptr & GENMASK_ULL(10, 7));
207
208	/*
209	 * Branch over the preamble in order to avoid the initial store on
210	 * the stack (which we already perform in the hardening vectors).
211	 */
212	addr += KVM_VECTOR_PREAMBLE;
213
214	/* movz x0, #(addr & 0xffff) */
215	insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
216					 (u16)addr,
217					 0,
218					 AARCH64_INSN_VARIANT_64BIT,
219					 AARCH64_INSN_MOVEWIDE_ZERO);
220	*updptr++ = cpu_to_le32(insn);
221
222	/* movk x0, #((addr >> 16) & 0xffff), lsl #16 */
223	insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
224					 (u16)(addr >> 16),
225					 16,
226					 AARCH64_INSN_VARIANT_64BIT,
227					 AARCH64_INSN_MOVEWIDE_KEEP);
228	*updptr++ = cpu_to_le32(insn);
229
230	/* movk x0, #((addr >> 32) & 0xffff), lsl #32 */
231	insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
232					 (u16)(addr >> 32),
233					 32,
234					 AARCH64_INSN_VARIANT_64BIT,
235					 AARCH64_INSN_MOVEWIDE_KEEP);
236	*updptr++ = cpu_to_le32(insn);
237
238	/* br x0 */
239	insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0,
240					   AARCH64_INSN_BRANCH_NOLINK);
241	*updptr++ = cpu_to_le32(insn);
242}
243
244static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst)
245{
246	u32 insn, oinsn, rd;
247
248	BUG_ON(nr_inst != 4);
249
250	/* Compute target register */
251	oinsn = le32_to_cpu(*origptr);
252	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
253
254	/* movz rd, #(val & 0xffff) */
255	insn = aarch64_insn_gen_movewide(rd,
256					 (u16)val,
257					 0,
258					 AARCH64_INSN_VARIANT_64BIT,
259					 AARCH64_INSN_MOVEWIDE_ZERO);
260	*updptr++ = cpu_to_le32(insn);
261
262	/* movk rd, #((val >> 16) & 0xffff), lsl #16 */
263	insn = aarch64_insn_gen_movewide(rd,
264					 (u16)(val >> 16),
265					 16,
266					 AARCH64_INSN_VARIANT_64BIT,
267					 AARCH64_INSN_MOVEWIDE_KEEP);
268	*updptr++ = cpu_to_le32(insn);
269
270	/* movk rd, #((val >> 32) & 0xffff), lsl #32 */
271	insn = aarch64_insn_gen_movewide(rd,
272					 (u16)(val >> 32),
273					 32,
274					 AARCH64_INSN_VARIANT_64BIT,
275					 AARCH64_INSN_MOVEWIDE_KEEP);
276	*updptr++ = cpu_to_le32(insn);
277
278	/* movk rd, #((val >> 48) & 0xffff), lsl #48 */
279	insn = aarch64_insn_gen_movewide(rd,
280					 (u16)(val >> 48),
281					 48,
282					 AARCH64_INSN_VARIANT_64BIT,
283					 AARCH64_INSN_MOVEWIDE_KEEP);
284	*updptr++ = cpu_to_le32(insn);
285}
286
287void kvm_get_kimage_voffset(struct alt_instr *alt,
288			    __le32 *origptr, __le32 *updptr, int nr_inst)
289{
290	generate_mov_q(kimage_voffset, origptr, updptr, nr_inst);
291}
292
293void kvm_compute_final_ctr_el0(struct alt_instr *alt,
294			       __le32 *origptr, __le32 *updptr, int nr_inst)
295{
296	generate_mov_q(read_sanitised_ftr_reg(SYS_CTR_EL0),
297		       origptr, updptr, nr_inst);
298}
299