1// SPDX-License-Identifier: GPL-2.0-only
2
3/* hypercalls: Check the ARM64's psuedo-firmware bitmap register interface.
4 *
5 * The test validates the basic hypercall functionalities that are exposed
6 * via the psuedo-firmware bitmap register. This includes the registers'
7 * read/write behavior before and after the VM has started, and if the
8 * hypercalls are properly masked or unmasked to the guest when disabled or
9 * enabled from the KVM userspace, respectively.
10 */
11#include <errno.h>
12#include <linux/arm-smccc.h>
13#include <asm/kvm.h>
14#include <kvm_util.h>
15
16#include "processor.h"
17
18#define FW_REG_ULIMIT_VAL(max_feat_bit) (GENMASK(max_feat_bit, 0))
19
20/* Last valid bits of the bitmapped firmware registers */
21#define KVM_REG_ARM_STD_BMAP_BIT_MAX		0
22#define KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX	0
23#define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX	1
24
25struct kvm_fw_reg_info {
26	uint64_t reg;		/* Register definition */
27	uint64_t max_feat_bit;	/* Bit that represents the upper limit of the feature-map */
28};
29
30#define FW_REG_INFO(r)			\
31	{					\
32		.reg = r,			\
33		.max_feat_bit = r##_BIT_MAX,	\
34	}
35
36static const struct kvm_fw_reg_info fw_reg_info[] = {
37	FW_REG_INFO(KVM_REG_ARM_STD_BMAP),
38	FW_REG_INFO(KVM_REG_ARM_STD_HYP_BMAP),
39	FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP),
40};
41
42enum test_stage {
43	TEST_STAGE_REG_IFACE,
44	TEST_STAGE_HVC_IFACE_FEAT_DISABLED,
45	TEST_STAGE_HVC_IFACE_FEAT_ENABLED,
46	TEST_STAGE_HVC_IFACE_FALSE_INFO,
47	TEST_STAGE_END,
48};
49
50static int stage = TEST_STAGE_REG_IFACE;
51
52struct test_hvc_info {
53	uint32_t func_id;
54	uint64_t arg1;
55};
56
57#define TEST_HVC_INFO(f, a1)	\
58	{			\
59		.func_id = f,	\
60		.arg1 = a1,	\
61	}
62
63static const struct test_hvc_info hvc_info[] = {
64	/* KVM_REG_ARM_STD_BMAP */
65	TEST_HVC_INFO(ARM_SMCCC_TRNG_VERSION, 0),
66	TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_TRNG_RND64),
67	TEST_HVC_INFO(ARM_SMCCC_TRNG_GET_UUID, 0),
68	TEST_HVC_INFO(ARM_SMCCC_TRNG_RND32, 0),
69	TEST_HVC_INFO(ARM_SMCCC_TRNG_RND64, 0),
70
71	/* KVM_REG_ARM_STD_HYP_BMAP */
72	TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_HV_PV_TIME_FEATURES),
73	TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_HV_PV_TIME_ST),
74	TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_ST, 0),
75
76	/* KVM_REG_ARM_VENDOR_HYP_BMAP */
77	TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID,
78			ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
79	TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, 0),
80	TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID, KVM_PTP_VIRT_COUNTER),
81};
82
83/* Feed false hypercall info to test the KVM behavior */
84static const struct test_hvc_info false_hvc_info[] = {
85	/* Feature support check against a different family of hypercalls */
86	TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
87	TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_TRNG_RND64),
88	TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_TRNG_RND64),
89};
90
91static void guest_test_hvc(const struct test_hvc_info *hc_info)
92{
93	unsigned int i;
94	struct arm_smccc_res res;
95	unsigned int hvc_info_arr_sz;
96
97	hvc_info_arr_sz =
98	hc_info == hvc_info ? ARRAY_SIZE(hvc_info) : ARRAY_SIZE(false_hvc_info);
99
100	for (i = 0; i < hvc_info_arr_sz; i++, hc_info++) {
101		memset(&res, 0, sizeof(res));
102		smccc_hvc(hc_info->func_id, hc_info->arg1, 0, 0, 0, 0, 0, 0, &res);
103
104		switch (stage) {
105		case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
106		case TEST_STAGE_HVC_IFACE_FALSE_INFO:
107			__GUEST_ASSERT(res.a0 == SMCCC_RET_NOT_SUPPORTED,
108				       "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u",
109					res.a0, hc_info->func_id, hc_info->arg1, stage);
110			break;
111		case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
112			__GUEST_ASSERT(res.a0 != SMCCC_RET_NOT_SUPPORTED,
113				       "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u",
114					res.a0, hc_info->func_id, hc_info->arg1, stage);
115			break;
116		default:
117			GUEST_FAIL("Unexpected stage = %u", stage);
118		}
119	}
120}
121
122static void guest_code(void)
123{
124	while (stage != TEST_STAGE_END) {
125		switch (stage) {
126		case TEST_STAGE_REG_IFACE:
127			break;
128		case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
129		case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
130			guest_test_hvc(hvc_info);
131			break;
132		case TEST_STAGE_HVC_IFACE_FALSE_INFO:
133			guest_test_hvc(false_hvc_info);
134			break;
135		default:
136			GUEST_FAIL("Unexpected stage = %u", stage);
137		}
138
139		GUEST_SYNC(stage);
140	}
141
142	GUEST_DONE();
143}
144
145struct st_time {
146	uint32_t rev;
147	uint32_t attr;
148	uint64_t st_time;
149};
150
151#define STEAL_TIME_SIZE		((sizeof(struct st_time) + 63) & ~63)
152#define ST_GPA_BASE		(1 << 30)
153
154static void steal_time_init(struct kvm_vcpu *vcpu)
155{
156	uint64_t st_ipa = (ulong)ST_GPA_BASE;
157	unsigned int gpages;
158
159	gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE);
160	vm_userspace_mem_region_add(vcpu->vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
161
162	vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PVTIME_CTRL,
163			     KVM_ARM_VCPU_PVTIME_IPA, &st_ipa);
164}
165
166static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
167{
168	uint64_t val;
169	unsigned int i;
170	int ret;
171
172	for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
173		const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
174
175		/* First 'read' should be an upper limit of the features supported */
176		vcpu_get_reg(vcpu, reg_info->reg, &val);
177		TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
178			"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n",
179			reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
180
181		/* Test a 'write' by disabling all the features of the register map */
182		ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
183		TEST_ASSERT(ret == 0,
184			"Failed to clear all the features of reg: 0x%lx; ret: %d\n",
185			reg_info->reg, errno);
186
187		vcpu_get_reg(vcpu, reg_info->reg, &val);
188		TEST_ASSERT(val == 0,
189			"Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg);
190
191		/*
192		 * Test enabling a feature that's not supported.
193		 * Avoid this check if all the bits are occupied.
194		 */
195		if (reg_info->max_feat_bit < 63) {
196			ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
197			TEST_ASSERT(ret != 0 && errno == EINVAL,
198			"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n",
199			errno, reg_info->reg);
200		}
201	}
202}
203
204static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
205{
206	uint64_t val;
207	unsigned int i;
208	int ret;
209
210	for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
211		const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
212
213		/*
214		 * Before starting the VM, the test clears all the bits.
215		 * Check if that's still the case.
216		 */
217		vcpu_get_reg(vcpu, reg_info->reg, &val);
218		TEST_ASSERT(val == 0,
219			"Expected all the features to be cleared for reg: 0x%lx\n",
220			reg_info->reg);
221
222		/*
223		 * Since the VM has run at least once, KVM shouldn't allow modification of
224		 * the registers and should return EBUSY. Set the registers and check for
225		 * the expected errno.
226		 */
227		ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
228		TEST_ASSERT(ret != 0 && errno == EBUSY,
229		"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n",
230		errno, reg_info->reg);
231	}
232}
233
234static struct kvm_vm *test_vm_create(struct kvm_vcpu **vcpu)
235{
236	struct kvm_vm *vm;
237
238	vm = vm_create_with_one_vcpu(vcpu, guest_code);
239
240	steal_time_init(*vcpu);
241
242	return vm;
243}
244
245static void test_guest_stage(struct kvm_vm **vm, struct kvm_vcpu **vcpu)
246{
247	int prev_stage = stage;
248
249	pr_debug("Stage: %d\n", prev_stage);
250
251	/* Sync the stage early, the VM might be freed below. */
252	stage++;
253	sync_global_to_guest(*vm, stage);
254
255	switch (prev_stage) {
256	case TEST_STAGE_REG_IFACE:
257		test_fw_regs_after_vm_start(*vcpu);
258		break;
259	case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
260		/* Start a new VM so that all the features are now enabled by default */
261		kvm_vm_free(*vm);
262		*vm = test_vm_create(vcpu);
263		break;
264	case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
265	case TEST_STAGE_HVC_IFACE_FALSE_INFO:
266		break;
267	default:
268		TEST_FAIL("Unknown test stage: %d\n", prev_stage);
269	}
270}
271
272static void test_run(void)
273{
274	struct kvm_vcpu *vcpu;
275	struct kvm_vm *vm;
276	struct ucall uc;
277	bool guest_done = false;
278
279	vm = test_vm_create(&vcpu);
280
281	test_fw_regs_before_vm_start(vcpu);
282
283	while (!guest_done) {
284		vcpu_run(vcpu);
285
286		switch (get_ucall(vcpu, &uc)) {
287		case UCALL_SYNC:
288			test_guest_stage(&vm, &vcpu);
289			break;
290		case UCALL_DONE:
291			guest_done = true;
292			break;
293		case UCALL_ABORT:
294			REPORT_GUEST_ASSERT(uc);
295			break;
296		default:
297			TEST_FAIL("Unexpected guest exit\n");
298		}
299	}
300
301	kvm_vm_free(vm);
302}
303
304int main(void)
305{
306	test_run();
307	return 0;
308}
309