xref: /kernel/linux/linux-5.10/arch/arm64/kvm/pvtime.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2019 Arm Ltd.
3
4#include <linux/arm-smccc.h>
5#include <linux/kvm_host.h>
6#include <linux/sched/stat.h>
7
8#include <asm/kvm_mmu.h>
9#include <asm/pvclock-abi.h>
10
11#include <kvm/arm_hypercalls.h>
12
13void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
14{
15	struct kvm *kvm = vcpu->kvm;
16	u64 base = vcpu->arch.steal.base;
17	u64 last_steal = vcpu->arch.steal.last_steal;
18	u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
19	u64 steal = 0;
20	int idx;
21
22	if (base == GPA_INVALID)
23		return;
24
25	idx = srcu_read_lock(&kvm->srcu);
26	if (!kvm_get_guest(kvm, base + offset, steal)) {
27		steal = le64_to_cpu(steal);
28		vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
29		steal += vcpu->arch.steal.last_steal - last_steal;
30		kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
31	}
32	srcu_read_unlock(&kvm->srcu, idx);
33}
34
35long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
36{
37	u32 feature = smccc_get_arg1(vcpu);
38	long val = SMCCC_RET_NOT_SUPPORTED;
39
40	switch (feature) {
41	case ARM_SMCCC_HV_PV_TIME_FEATURES:
42	case ARM_SMCCC_HV_PV_TIME_ST:
43		if (vcpu->arch.steal.base != GPA_INVALID)
44			val = SMCCC_RET_SUCCESS;
45		break;
46	}
47
48	return val;
49}
50
51gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
52{
53	struct pvclock_vcpu_stolen_time init_values = {};
54	struct kvm *kvm = vcpu->kvm;
55	u64 base = vcpu->arch.steal.base;
56	int idx;
57
58	if (base == GPA_INVALID)
59		return base;
60
61	/*
62	 * Start counting stolen time from the time the guest requests
63	 * the feature enabled.
64	 */
65	vcpu->arch.steal.last_steal = current->sched_info.run_delay;
66
67	idx = srcu_read_lock(&kvm->srcu);
68	kvm_write_guest(kvm, base, &init_values, sizeof(init_values));
69	srcu_read_unlock(&kvm->srcu, idx);
70
71	return base;
72}
73
74bool kvm_arm_pvtime_supported(void)
75{
76	return !!sched_info_on();
77}
78
79int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
80			    struct kvm_device_attr *attr)
81{
82	u64 __user *user = (u64 __user *)attr->addr;
83	struct kvm *kvm = vcpu->kvm;
84	u64 ipa;
85	int ret = 0;
86	int idx;
87
88	if (!kvm_arm_pvtime_supported() ||
89	    attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
90		return -ENXIO;
91
92	if (get_user(ipa, user))
93		return -EFAULT;
94	if (!IS_ALIGNED(ipa, 64))
95		return -EINVAL;
96	if (vcpu->arch.steal.base != GPA_INVALID)
97		return -EEXIST;
98
99	/* Check the address is in a valid memslot */
100	idx = srcu_read_lock(&kvm->srcu);
101	if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
102		ret = -EINVAL;
103	srcu_read_unlock(&kvm->srcu, idx);
104
105	if (!ret)
106		vcpu->arch.steal.base = ipa;
107
108	return ret;
109}
110
111int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
112			    struct kvm_device_attr *attr)
113{
114	u64 __user *user = (u64 __user *)attr->addr;
115	u64 ipa;
116
117	if (!kvm_arm_pvtime_supported() ||
118	    attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
119		return -ENXIO;
120
121	ipa = vcpu->arch.steal.base;
122
123	if (put_user(ipa, user))
124		return -EFAULT;
125	return 0;
126}
127
128int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
129			    struct kvm_device_attr *attr)
130{
131	switch (attr->attr) {
132	case KVM_ARM_VCPU_PVTIME_IPA:
133		if (kvm_arm_pvtime_supported())
134			return 0;
135	}
136	return -ENXIO;
137}
138