1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2019 Arm Ltd.
3
4#include <linux/arm-smccc.h>
5#include <linux/kvm_host.h>
6
7#include <asm/kvm_emulate.h>
8
9#include <kvm/arm_hypercalls.h>
10#include <kvm/arm_psci.h>
11
12#define KVM_ARM_SMCCC_STD_FEATURES				\
13	GENMASK(KVM_REG_ARM_STD_BMAP_BIT_COUNT - 1, 0)
14#define KVM_ARM_SMCCC_STD_HYP_FEATURES				\
15	GENMASK(KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT - 1, 0)
16#define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES			\
17	GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT - 1, 0)
18
19static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
20{
21	struct system_time_snapshot systime_snapshot;
22	u64 cycles = ~0UL;
23	u32 feature;
24
25	/*
26	 * system time and counter value must captured at the same
27	 * time to keep consistency and precision.
28	 */
29	ktime_get_snapshot(&systime_snapshot);
30
31	/*
32	 * This is only valid if the current clocksource is the
33	 * architected counter, as this is the only one the guest
34	 * can see.
35	 */
36	if (systime_snapshot.cs_id != CSID_ARM_ARCH_COUNTER)
37		return;
38
39	/*
40	 * The guest selects one of the two reference counters
41	 * (virtual or physical) with the first argument of the SMCCC
42	 * call. In case the identifier is not supported, error out.
43	 */
44	feature = smccc_get_arg1(vcpu);
45	switch (feature) {
46	case KVM_PTP_VIRT_COUNTER:
47		cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset;
48		break;
49	case KVM_PTP_PHYS_COUNTER:
50		cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.poffset;
51		break;
52	default:
53		return;
54	}
55
56	/*
57	 * This relies on the top bit of val[0] never being set for
58	 * valid values of system time, because that is *really* far
59	 * in the future (about 292 years from 1970, and at that stage
60	 * nobody will give a damn about it).
61	 */
62	val[0] = upper_32_bits(systime_snapshot.real);
63	val[1] = lower_32_bits(systime_snapshot.real);
64	val[2] = upper_32_bits(cycles);
65	val[3] = lower_32_bits(cycles);
66}
67
68static bool kvm_smccc_default_allowed(u32 func_id)
69{
70	switch (func_id) {
71	/*
72	 * List of function-ids that are not gated with the bitmapped
73	 * feature firmware registers, and are to be allowed for
74	 * servicing the call by default.
75	 */
76	case ARM_SMCCC_VERSION_FUNC_ID:
77	case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
78		return true;
79	default:
80		/* PSCI 0.2 and up is in the 0:0x1f range */
81		if (ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
82		    ARM_SMCCC_FUNC_NUM(func_id) <= 0x1f)
83			return true;
84
85		/*
86		 * KVM's PSCI 0.1 doesn't comply with SMCCC, and has
87		 * its own function-id base and range
88		 */
89		if (func_id >= KVM_PSCI_FN(0) && func_id <= KVM_PSCI_FN(3))
90			return true;
91
92		return false;
93	}
94}
95
96static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id)
97{
98	struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
99
100	switch (func_id) {
101	case ARM_SMCCC_TRNG_VERSION:
102	case ARM_SMCCC_TRNG_FEATURES:
103	case ARM_SMCCC_TRNG_GET_UUID:
104	case ARM_SMCCC_TRNG_RND32:
105	case ARM_SMCCC_TRNG_RND64:
106		return test_bit(KVM_REG_ARM_STD_BIT_TRNG_V1_0,
107				&smccc_feat->std_bmap);
108	case ARM_SMCCC_HV_PV_TIME_FEATURES:
109	case ARM_SMCCC_HV_PV_TIME_ST:
110		return test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME,
111				&smccc_feat->std_hyp_bmap);
112	case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
113	case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
114		return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT,
115				&smccc_feat->vendor_hyp_bmap);
116	case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
117		return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP,
118				&smccc_feat->vendor_hyp_bmap);
119	default:
120		return false;
121	}
122}
123
124#define SMC32_ARCH_RANGE_BEGIN	ARM_SMCCC_VERSION_FUNC_ID
125#define SMC32_ARCH_RANGE_END	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,		\
126						   ARM_SMCCC_SMC_32,		\
127						   0, ARM_SMCCC_FUNC_MASK)
128
129#define SMC64_ARCH_RANGE_BEGIN	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,		\
130						   ARM_SMCCC_SMC_64,		\
131						   0, 0)
132#define SMC64_ARCH_RANGE_END	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,		\
133						   ARM_SMCCC_SMC_64,		\
134						   0, ARM_SMCCC_FUNC_MASK)
135
136static void init_smccc_filter(struct kvm *kvm)
137{
138	int r;
139
140	mt_init(&kvm->arch.smccc_filter);
141
142	/*
143	 * Prevent userspace from handling any SMCCC calls in the architecture
144	 * range, avoiding the risk of misrepresenting Spectre mitigation status
145	 * to the guest.
146	 */
147	r = mtree_insert_range(&kvm->arch.smccc_filter,
148			       SMC32_ARCH_RANGE_BEGIN, SMC32_ARCH_RANGE_END,
149			       xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
150			       GFP_KERNEL_ACCOUNT);
151	WARN_ON_ONCE(r);
152
153	r = mtree_insert_range(&kvm->arch.smccc_filter,
154			       SMC64_ARCH_RANGE_BEGIN, SMC64_ARCH_RANGE_END,
155			       xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
156			       GFP_KERNEL_ACCOUNT);
157	WARN_ON_ONCE(r);
158
159}
160
161static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user *uaddr)
162{
163	const void *zero_page = page_to_virt(ZERO_PAGE(0));
164	struct kvm_smccc_filter filter;
165	u32 start, end;
166	int r;
167
168	if (copy_from_user(&filter, uaddr, sizeof(filter)))
169		return -EFAULT;
170
171	if (memcmp(filter.pad, zero_page, sizeof(filter.pad)))
172		return -EINVAL;
173
174	start = filter.base;
175	end = start + filter.nr_functions - 1;
176
177	if (end < start || filter.action >= NR_SMCCC_FILTER_ACTIONS)
178		return -EINVAL;
179
180	mutex_lock(&kvm->arch.config_lock);
181
182	if (kvm_vm_has_ran_once(kvm)) {
183		r = -EBUSY;
184		goto out_unlock;
185	}
186
187	r = mtree_insert_range(&kvm->arch.smccc_filter, start, end,
188			       xa_mk_value(filter.action), GFP_KERNEL_ACCOUNT);
189	if (r)
190		goto out_unlock;
191
192	set_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags);
193
194out_unlock:
195	mutex_unlock(&kvm->arch.config_lock);
196	return r;
197}
198
199static u8 kvm_smccc_filter_get_action(struct kvm *kvm, u32 func_id)
200{
201	unsigned long idx = func_id;
202	void *val;
203
204	if (!test_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags))
205		return KVM_SMCCC_FILTER_HANDLE;
206
207	/*
208	 * But where's the error handling, you say?
209	 *
210	 * mt_find() returns NULL if no entry was found, which just so happens
211	 * to match KVM_SMCCC_FILTER_HANDLE.
212	 */
213	val = mt_find(&kvm->arch.smccc_filter, &idx, idx);
214	return xa_to_value(val);
215}
216
217static u8 kvm_smccc_get_action(struct kvm_vcpu *vcpu, u32 func_id)
218{
219	/*
220	 * Intervening actions in the SMCCC filter take precedence over the
221	 * pseudo-firmware register bitmaps.
222	 */
223	u8 action = kvm_smccc_filter_get_action(vcpu->kvm, func_id);
224	if (action != KVM_SMCCC_FILTER_HANDLE)
225		return action;
226
227	if (kvm_smccc_test_fw_bmap(vcpu, func_id) ||
228	    kvm_smccc_default_allowed(func_id))
229		return KVM_SMCCC_FILTER_HANDLE;
230
231	return KVM_SMCCC_FILTER_DENY;
232}
233
234static void kvm_prepare_hypercall_exit(struct kvm_vcpu *vcpu, u32 func_id)
235{
236	u8 ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
237	struct kvm_run *run = vcpu->run;
238	u64 flags = 0;
239
240	if (ec == ESR_ELx_EC_SMC32 || ec == ESR_ELx_EC_SMC64)
241		flags |= KVM_HYPERCALL_EXIT_SMC;
242
243	if (!kvm_vcpu_trap_il_is32bit(vcpu))
244		flags |= KVM_HYPERCALL_EXIT_16BIT;
245
246	run->exit_reason = KVM_EXIT_HYPERCALL;
247	run->hypercall = (typeof(run->hypercall)) {
248		.nr	= func_id,
249		.flags	= flags,
250	};
251}
252
253int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
254{
255	struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
256	u32 func_id = smccc_get_function(vcpu);
257	u64 val[4] = {SMCCC_RET_NOT_SUPPORTED};
258	u32 feature;
259	u8 action;
260	gpa_t gpa;
261
262	action = kvm_smccc_get_action(vcpu, func_id);
263	switch (action) {
264	case KVM_SMCCC_FILTER_HANDLE:
265		break;
266	case KVM_SMCCC_FILTER_DENY:
267		goto out;
268	case KVM_SMCCC_FILTER_FWD_TO_USER:
269		kvm_prepare_hypercall_exit(vcpu, func_id);
270		return 0;
271	default:
272		WARN_RATELIMIT(1, "Unhandled SMCCC filter action: %d\n", action);
273		goto out;
274	}
275
276	switch (func_id) {
277	case ARM_SMCCC_VERSION_FUNC_ID:
278		val[0] = ARM_SMCCC_VERSION_1_1;
279		break;
280	case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
281		feature = smccc_get_arg1(vcpu);
282		switch (feature) {
283		case ARM_SMCCC_ARCH_WORKAROUND_1:
284			switch (arm64_get_spectre_v2_state()) {
285			case SPECTRE_VULNERABLE:
286				break;
287			case SPECTRE_MITIGATED:
288				val[0] = SMCCC_RET_SUCCESS;
289				break;
290			case SPECTRE_UNAFFECTED:
291				val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
292				break;
293			}
294			break;
295		case ARM_SMCCC_ARCH_WORKAROUND_2:
296			switch (arm64_get_spectre_v4_state()) {
297			case SPECTRE_VULNERABLE:
298				break;
299			case SPECTRE_MITIGATED:
300				/*
301				 * SSBS everywhere: Indicate no firmware
302				 * support, as the SSBS support will be
303				 * indicated to the guest and the default is
304				 * safe.
305				 *
306				 * Otherwise, expose a permanent mitigation
307				 * to the guest, and hide SSBS so that the
308				 * guest stays protected.
309				 */
310				if (cpus_have_final_cap(ARM64_SSBS))
311					break;
312				fallthrough;
313			case SPECTRE_UNAFFECTED:
314				val[0] = SMCCC_RET_NOT_REQUIRED;
315				break;
316			}
317			break;
318		case ARM_SMCCC_ARCH_WORKAROUND_3:
319			switch (arm64_get_spectre_bhb_state()) {
320			case SPECTRE_VULNERABLE:
321				break;
322			case SPECTRE_MITIGATED:
323				val[0] = SMCCC_RET_SUCCESS;
324				break;
325			case SPECTRE_UNAFFECTED:
326				val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
327				break;
328			}
329			break;
330		case ARM_SMCCC_HV_PV_TIME_FEATURES:
331			if (test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME,
332				     &smccc_feat->std_hyp_bmap))
333				val[0] = SMCCC_RET_SUCCESS;
334			break;
335		}
336		break;
337	case ARM_SMCCC_HV_PV_TIME_FEATURES:
338		val[0] = kvm_hypercall_pv_features(vcpu);
339		break;
340	case ARM_SMCCC_HV_PV_TIME_ST:
341		gpa = kvm_init_stolen_time(vcpu);
342		if (gpa != INVALID_GPA)
343			val[0] = gpa;
344		break;
345	case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
346		val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0;
347		val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1;
348		val[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2;
349		val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3;
350		break;
351	case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
352		val[0] = smccc_feat->vendor_hyp_bmap;
353		break;
354	case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
355		kvm_ptp_get_time(vcpu, val);
356		break;
357	case ARM_SMCCC_TRNG_VERSION:
358	case ARM_SMCCC_TRNG_FEATURES:
359	case ARM_SMCCC_TRNG_GET_UUID:
360	case ARM_SMCCC_TRNG_RND32:
361	case ARM_SMCCC_TRNG_RND64:
362		return kvm_trng_call(vcpu);
363	default:
364		return kvm_psci_call(vcpu);
365	}
366
367out:
368	smccc_set_retval(vcpu, val[0], val[1], val[2], val[3]);
369	return 1;
370}
371
372static const u64 kvm_arm_fw_reg_ids[] = {
373	KVM_REG_ARM_PSCI_VERSION,
374	KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1,
375	KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2,
376	KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3,
377	KVM_REG_ARM_STD_BMAP,
378	KVM_REG_ARM_STD_HYP_BMAP,
379	KVM_REG_ARM_VENDOR_HYP_BMAP,
380};
381
382void kvm_arm_init_hypercalls(struct kvm *kvm)
383{
384	struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat;
385
386	smccc_feat->std_bmap = KVM_ARM_SMCCC_STD_FEATURES;
387	smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES;
388	smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
389
390	init_smccc_filter(kvm);
391}
392
393void kvm_arm_teardown_hypercalls(struct kvm *kvm)
394{
395	mtree_destroy(&kvm->arch.smccc_filter);
396}
397
398int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
399{
400	return ARRAY_SIZE(kvm_arm_fw_reg_ids);
401}
402
403int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
404{
405	int i;
406
407	for (i = 0; i < ARRAY_SIZE(kvm_arm_fw_reg_ids); i++) {
408		if (put_user(kvm_arm_fw_reg_ids[i], uindices++))
409			return -EFAULT;
410	}
411
412	return 0;
413}
414
415#define KVM_REG_FEATURE_LEVEL_MASK	GENMASK(3, 0)
416
417/*
418 * Convert the workaround level into an easy-to-compare number, where higher
419 * values mean better protection.
420 */
421static int get_kernel_wa_level(u64 regid)
422{
423	switch (regid) {
424	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
425		switch (arm64_get_spectre_v2_state()) {
426		case SPECTRE_VULNERABLE:
427			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
428		case SPECTRE_MITIGATED:
429			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
430		case SPECTRE_UNAFFECTED:
431			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
432		}
433		return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
434	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
435		switch (arm64_get_spectre_v4_state()) {
436		case SPECTRE_MITIGATED:
437			/*
438			 * As for the hypercall discovery, we pretend we
439			 * don't have any FW mitigation if SSBS is there at
440			 * all times.
441			 */
442			if (cpus_have_final_cap(ARM64_SSBS))
443				return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
444			fallthrough;
445		case SPECTRE_UNAFFECTED:
446			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
447		case SPECTRE_VULNERABLE:
448			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
449		}
450		break;
451	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
452		switch (arm64_get_spectre_bhb_state()) {
453		case SPECTRE_VULNERABLE:
454			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
455		case SPECTRE_MITIGATED:
456			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
457		case SPECTRE_UNAFFECTED:
458			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
459		}
460		return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
461	}
462
463	return -EINVAL;
464}
465
466int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
467{
468	struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
469	void __user *uaddr = (void __user *)(long)reg->addr;
470	u64 val;
471
472	switch (reg->id) {
473	case KVM_REG_ARM_PSCI_VERSION:
474		val = kvm_psci_version(vcpu);
475		break;
476	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
477	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
478	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
479		val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
480		break;
481	case KVM_REG_ARM_STD_BMAP:
482		val = READ_ONCE(smccc_feat->std_bmap);
483		break;
484	case KVM_REG_ARM_STD_HYP_BMAP:
485		val = READ_ONCE(smccc_feat->std_hyp_bmap);
486		break;
487	case KVM_REG_ARM_VENDOR_HYP_BMAP:
488		val = READ_ONCE(smccc_feat->vendor_hyp_bmap);
489		break;
490	default:
491		return -ENOENT;
492	}
493
494	if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
495		return -EFAULT;
496
497	return 0;
498}
499
500static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
501{
502	int ret = 0;
503	struct kvm *kvm = vcpu->kvm;
504	struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat;
505	unsigned long *fw_reg_bmap, fw_reg_features;
506
507	switch (reg_id) {
508	case KVM_REG_ARM_STD_BMAP:
509		fw_reg_bmap = &smccc_feat->std_bmap;
510		fw_reg_features = KVM_ARM_SMCCC_STD_FEATURES;
511		break;
512	case KVM_REG_ARM_STD_HYP_BMAP:
513		fw_reg_bmap = &smccc_feat->std_hyp_bmap;
514		fw_reg_features = KVM_ARM_SMCCC_STD_HYP_FEATURES;
515		break;
516	case KVM_REG_ARM_VENDOR_HYP_BMAP:
517		fw_reg_bmap = &smccc_feat->vendor_hyp_bmap;
518		fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
519		break;
520	default:
521		return -ENOENT;
522	}
523
524	/* Check for unsupported bit */
525	if (val & ~fw_reg_features)
526		return -EINVAL;
527
528	mutex_lock(&kvm->arch.config_lock);
529
530	if (kvm_vm_has_ran_once(kvm) && val != *fw_reg_bmap) {
531		ret = -EBUSY;
532		goto out;
533	}
534
535	WRITE_ONCE(*fw_reg_bmap, val);
536out:
537	mutex_unlock(&kvm->arch.config_lock);
538	return ret;
539}
540
541int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
542{
543	void __user *uaddr = (void __user *)(long)reg->addr;
544	u64 val;
545	int wa_level;
546
547	if (KVM_REG_SIZE(reg->id) != sizeof(val))
548		return -ENOENT;
549	if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
550		return -EFAULT;
551
552	switch (reg->id) {
553	case KVM_REG_ARM_PSCI_VERSION:
554	{
555		bool wants_02;
556
557		wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
558
559		switch (val) {
560		case KVM_ARM_PSCI_0_1:
561			if (wants_02)
562				return -EINVAL;
563			vcpu->kvm->arch.psci_version = val;
564			return 0;
565		case KVM_ARM_PSCI_0_2:
566		case KVM_ARM_PSCI_1_0:
567		case KVM_ARM_PSCI_1_1:
568			if (!wants_02)
569				return -EINVAL;
570			vcpu->kvm->arch.psci_version = val;
571			return 0;
572		}
573		break;
574	}
575
576	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
577	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
578		if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
579			return -EINVAL;
580
581		if (get_kernel_wa_level(reg->id) < val)
582			return -EINVAL;
583
584		return 0;
585
586	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
587		if (val & ~(KVM_REG_FEATURE_LEVEL_MASK |
588			    KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
589			return -EINVAL;
590
591		/* The enabled bit must not be set unless the level is AVAIL. */
592		if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
593		    (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
594			return -EINVAL;
595
596		/*
597		 * Map all the possible incoming states to the only two we
598		 * really want to deal with.
599		 */
600		switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
601		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
602		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
603			wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
604			break;
605		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
606		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
607			wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
608			break;
609		default:
610			return -EINVAL;
611		}
612
613		/*
614		 * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
615		 * other way around.
616		 */
617		if (get_kernel_wa_level(reg->id) < wa_level)
618			return -EINVAL;
619
620		return 0;
621	case KVM_REG_ARM_STD_BMAP:
622	case KVM_REG_ARM_STD_HYP_BMAP:
623	case KVM_REG_ARM_VENDOR_HYP_BMAP:
624		return kvm_arm_set_fw_reg_bmap(vcpu, reg->id, val);
625	default:
626		return -ENOENT;
627	}
628
629	return -EINVAL;
630}
631
632int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
633{
634	switch (attr->attr) {
635	case KVM_ARM_VM_SMCCC_FILTER:
636		return 0;
637	default:
638		return -ENXIO;
639	}
640}
641
642int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
643{
644	void __user *uaddr = (void __user *)attr->addr;
645
646	switch (attr->attr) {
647	case KVM_ARM_VM_SMCCC_FILTER:
648		return kvm_smccc_set_filter(kvm, uaddr);
649	default:
650		return -ENXIO;
651	}
652}
653