1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/kvm_host.h>
8#include <linux/random.h>
9#include <linux/memblock.h>
10#include <asm/alternative.h>
11#include <asm/debug-monitors.h>
12#include <asm/insn.h>
13#include <asm/kvm_mmu.h>
14
15/*
16 * The LSB of the HYP VA tag
17 */
18static u8 tag_lsb;
19/*
20 * The HYP VA tag value with the region bit
21 */
22static u64 tag_val;
23static u64 va_mask;
24
25/*
26 * We want to generate a hyp VA with the following format (with V ==
27 * vabits_actual):
28 *
29 *  63 ... V |     V-1    | V-2 .. tag_lsb | tag_lsb - 1 .. 0
30 *  ---------------------------------------------------------
31 * | 0000000 | hyp_va_msb |   random tag   |  kern linear VA |
32 *           |--------- tag_val -----------|----- va_mask ---|
33 *
34 * which does not conflict with the idmap regions.
35 */
36__init void kvm_compute_layout(void)
37{
38	phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
39	u64 hyp_va_msb;
40
41	/* Where is my RAM region? */
42	hyp_va_msb  = idmap_addr & BIT(vabits_actual - 1);
43	hyp_va_msb ^= BIT(vabits_actual - 1);
44
45	tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
46			(u64)(high_memory - 1));
47
48	va_mask = GENMASK_ULL(tag_lsb - 1, 0);
49	tag_val = hyp_va_msb;
50
51	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) {
52		/* We have some free bits to insert a random tag. */
53		tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
54	}
55	tag_val >>= tag_lsb;
56}
57
58static u32 compute_instruction(int n, u32 rd, u32 rn)
59{
60	u32 insn = AARCH64_BREAK_FAULT;
61
62	switch (n) {
63	case 0:
64		insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND,
65							  AARCH64_INSN_VARIANT_64BIT,
66							  rn, rd, va_mask);
67		break;
68
69	case 1:
70		/* ROR is a variant of EXTR with Rm = Rn */
71		insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
72					     rn, rn, rd,
73					     tag_lsb);
74		break;
75
76	case 2:
77		insn = aarch64_insn_gen_add_sub_imm(rd, rn,
78						    tag_val & GENMASK(11, 0),
79						    AARCH64_INSN_VARIANT_64BIT,
80						    AARCH64_INSN_ADSB_ADD);
81		break;
82
83	case 3:
84		insn = aarch64_insn_gen_add_sub_imm(rd, rn,
85						    tag_val & GENMASK(23, 12),
86						    AARCH64_INSN_VARIANT_64BIT,
87						    AARCH64_INSN_ADSB_ADD);
88		break;
89
90	case 4:
91		/* ROR is a variant of EXTR with Rm = Rn */
92		insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
93					     rn, rn, rd, 64 - tag_lsb);
94		break;
95	}
96
97	return insn;
98}
99
100void __init kvm_update_va_mask(struct alt_instr *alt,
101			       __le32 *origptr, __le32 *updptr, int nr_inst)
102{
103	int i;
104
105	BUG_ON(nr_inst != 5);
106
107	for (i = 0; i < nr_inst; i++) {
108		u32 rd, rn, insn, oinsn;
109
110		/*
111		 * VHE doesn't need any address translation, let's NOP
112		 * everything.
113		 *
114		 * Alternatively, if the tag is zero (because the layout
115		 * dictates it and we don't have any spare bits in the
116		 * address), NOP everything after masking the kernel VA.
117		 */
118		if (has_vhe() || (!tag_val && i > 0)) {
119			updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
120			continue;
121		}
122
123		oinsn = le32_to_cpu(origptr[i]);
124		rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
125		rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn);
126
127		insn = compute_instruction(i, rd, rn);
128		BUG_ON(insn == AARCH64_BREAK_FAULT);
129
130		updptr[i] = cpu_to_le32(insn);
131	}
132}
133
134void *__kvm_bp_vect_base;
135int __kvm_harden_el2_vector_slot;
136
137void kvm_patch_vector_branch(struct alt_instr *alt,
138			     __le32 *origptr, __le32 *updptr, int nr_inst)
139{
140	u64 addr;
141	u32 insn;
142
143	BUG_ON(nr_inst != 5);
144
145	if (has_vhe() || !cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
146		WARN_ON_ONCE(cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS));
147		return;
148	}
149
150	/*
151	 * Compute HYP VA by using the same computation as kern_hyp_va()
152	 */
153	addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector);
154	addr &= va_mask;
155	addr |= tag_val << tag_lsb;
156
157	/* Use PC[10:7] to branch to the same vector in KVM */
158	addr |= ((u64)origptr & GENMASK_ULL(10, 7));
159
160	/*
161	 * Branch over the preamble in order to avoid the initial store on
162	 * the stack (which we already perform in the hardening vectors).
163	 */
164	addr += KVM_VECTOR_PREAMBLE;
165
166	/* stp x0, x1, [sp, #-16]! */
167	insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0,
168						AARCH64_INSN_REG_1,
169						AARCH64_INSN_REG_SP,
170						-16,
171						AARCH64_INSN_VARIANT_64BIT,
172						AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX);
173	*updptr++ = cpu_to_le32(insn);
174
175	/* movz x0, #(addr & 0xffff) */
176	insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
177					 (u16)addr,
178					 0,
179					 AARCH64_INSN_VARIANT_64BIT,
180					 AARCH64_INSN_MOVEWIDE_ZERO);
181	*updptr++ = cpu_to_le32(insn);
182
183	/* movk x0, #((addr >> 16) & 0xffff), lsl #16 */
184	insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
185					 (u16)(addr >> 16),
186					 16,
187					 AARCH64_INSN_VARIANT_64BIT,
188					 AARCH64_INSN_MOVEWIDE_KEEP);
189	*updptr++ = cpu_to_le32(insn);
190
191	/* movk x0, #((addr >> 32) & 0xffff), lsl #32 */
192	insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
193					 (u16)(addr >> 32),
194					 32,
195					 AARCH64_INSN_VARIANT_64BIT,
196					 AARCH64_INSN_MOVEWIDE_KEEP);
197	*updptr++ = cpu_to_le32(insn);
198
199	/* br x0 */
200	insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0,
201					   AARCH64_INSN_BRANCH_NOLINK);
202	*updptr++ = cpu_to_le32(insn);
203}
204