1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * KVM L1 hypervisor optimizations on Hyper-V.
4 */
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
7#include <linux/kvm_host.h>
8#include <asm/mshyperv.h>
9
10#include "hyperv.h"
11#include "kvm_onhyperv.h"
12
13struct kvm_hv_tlb_range {
14	u64 start_gfn;
15	u64 pages;
16};
17
18static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
19		void *data)
20{
21	struct kvm_hv_tlb_range *range = data;
22
23	return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
24			range->pages);
25}
26
27static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
28					   struct kvm_hv_tlb_range *range)
29{
30	if (range)
31		return hyperv_flush_guest_mapping_range(root_tdp,
32				kvm_fill_hv_flush_list_func, (void *)range);
33	else
34		return hyperv_flush_guest_mapping(root_tdp);
35}
36
37static int __hv_flush_remote_tlbs_range(struct kvm *kvm,
38					struct kvm_hv_tlb_range *range)
39{
40	struct kvm_arch *kvm_arch = &kvm->arch;
41	struct kvm_vcpu *vcpu;
42	int ret = 0, nr_unique_valid_roots;
43	unsigned long i;
44	hpa_t root;
45
46	spin_lock(&kvm_arch->hv_root_tdp_lock);
47
48	if (!VALID_PAGE(kvm_arch->hv_root_tdp)) {
49		nr_unique_valid_roots = 0;
50
51		/*
52		 * Flush all valid roots, and see if all vCPUs have converged
53		 * on a common root, in which case future flushes can skip the
54		 * loop and flush the common root.
55		 */
56		kvm_for_each_vcpu(i, vcpu, kvm) {
57			root = vcpu->arch.hv_root_tdp;
58			if (!VALID_PAGE(root) || root == kvm_arch->hv_root_tdp)
59				continue;
60
61			/*
62			 * Set the tracked root to the first valid root.  Keep
63			 * this root for the entirety of the loop even if more
64			 * roots are encountered as a low effort optimization
65			 * to avoid flushing the same (first) root again.
66			 */
67			if (++nr_unique_valid_roots == 1)
68				kvm_arch->hv_root_tdp = root;
69
70			if (!ret)
71				ret = hv_remote_flush_root_tdp(root, range);
72
73			/*
74			 * Stop processing roots if a failure occurred and
75			 * multiple valid roots have already been detected.
76			 */
77			if (ret && nr_unique_valid_roots > 1)
78				break;
79		}
80
81		/*
82		 * The optimized flush of a single root can't be used if there
83		 * are multiple valid roots (obviously).
84		 */
85		if (nr_unique_valid_roots > 1)
86			kvm_arch->hv_root_tdp = INVALID_PAGE;
87	} else {
88		ret = hv_remote_flush_root_tdp(kvm_arch->hv_root_tdp, range);
89	}
90
91	spin_unlock(&kvm_arch->hv_root_tdp_lock);
92	return ret;
93}
94
95int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages)
96{
97	struct kvm_hv_tlb_range range = {
98		.start_gfn = start_gfn,
99		.pages = nr_pages,
100	};
101
102	return __hv_flush_remote_tlbs_range(kvm, &range);
103}
104EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range);
105
106int hv_flush_remote_tlbs(struct kvm *kvm)
107{
108	return __hv_flush_remote_tlbs_range(kvm, NULL);
109}
110EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs);
111
112void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
113{
114	struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
115
116	if (kvm_x86_ops.flush_remote_tlbs == hv_flush_remote_tlbs) {
117		spin_lock(&kvm_arch->hv_root_tdp_lock);
118		vcpu->arch.hv_root_tdp = root_tdp;
119		if (root_tdp != kvm_arch->hv_root_tdp)
120			kvm_arch->hv_root_tdp = INVALID_PAGE;
121		spin_unlock(&kvm_arch->hv_root_tdp_lock);
122	}
123}
124EXPORT_SYMBOL_GPL(hv_track_root_tdp);
125