xref: /kernel/linux/linux-5.10/arch/x86/hyperv/nested.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Hyper-V nested virtualization code.
5 *
6 * Copyright (C) 2018, Microsoft, Inc.
7 *
8 * Author : Lan Tianyu <Tianyu.Lan@microsoft.com>
9 */
10#define pr_fmt(fmt)  "Hyper-V: " fmt
11
12
13#include <linux/types.h>
14#include <asm/hyperv-tlfs.h>
15#include <asm/mshyperv.h>
16#include <asm/tlbflush.h>
17
18#include <asm/trace/hyperv.h>
19
20int hyperv_flush_guest_mapping(u64 as)
21{
22	struct hv_guest_mapping_flush **flush_pcpu;
23	struct hv_guest_mapping_flush *flush;
24	u64 status;
25	unsigned long flags;
26	int ret = -ENOTSUPP;
27
28	if (!hv_hypercall_pg)
29		goto fault;
30
31	local_irq_save(flags);
32
33	flush_pcpu = (struct hv_guest_mapping_flush **)
34		this_cpu_ptr(hyperv_pcpu_input_arg);
35
36	flush = *flush_pcpu;
37
38	if (unlikely(!flush)) {
39		local_irq_restore(flags);
40		goto fault;
41	}
42
43	flush->address_space = as;
44	flush->flags = 0;
45
46	status = hv_do_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE,
47				 flush, NULL);
48	local_irq_restore(flags);
49
50	if (!(status & HV_HYPERCALL_RESULT_MASK))
51		ret = 0;
52
53fault:
54	trace_hyperv_nested_flush_guest_mapping(as, ret);
55	return ret;
56}
57EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping);
58
59int hyperv_fill_flush_guest_mapping_list(
60		struct hv_guest_mapping_flush_list *flush,
61		u64 start_gfn, u64 pages)
62{
63	u64 cur = start_gfn;
64	u64 additional_pages;
65	int gpa_n = 0;
66
67	do {
68		/*
69		 * If flush requests exceed max flush count, go back to
70		 * flush tlbs without range.
71		 */
72		if (gpa_n >= HV_MAX_FLUSH_REP_COUNT)
73			return -ENOSPC;
74
75		additional_pages = min_t(u64, pages, HV_MAX_FLUSH_PAGES) - 1;
76
77		flush->gpa_list[gpa_n].page.additional_pages = additional_pages;
78		flush->gpa_list[gpa_n].page.largepage = false;
79		flush->gpa_list[gpa_n].page.basepfn = cur;
80
81		pages -= additional_pages + 1;
82		cur += additional_pages + 1;
83		gpa_n++;
84	} while (pages > 0);
85
86	return gpa_n;
87}
88EXPORT_SYMBOL_GPL(hyperv_fill_flush_guest_mapping_list);
89
90int hyperv_flush_guest_mapping_range(u64 as,
91		hyperv_fill_flush_list_func fill_flush_list_func, void *data)
92{
93	struct hv_guest_mapping_flush_list **flush_pcpu;
94	struct hv_guest_mapping_flush_list *flush;
95	u64 status = 0;
96	unsigned long flags;
97	int ret = -ENOTSUPP;
98	int gpa_n = 0;
99
100	if (!hv_hypercall_pg || !fill_flush_list_func)
101		goto fault;
102
103	local_irq_save(flags);
104
105	flush_pcpu = (struct hv_guest_mapping_flush_list **)
106		this_cpu_ptr(hyperv_pcpu_input_arg);
107
108	flush = *flush_pcpu;
109	if (unlikely(!flush)) {
110		local_irq_restore(flags);
111		goto fault;
112	}
113
114	flush->address_space = as;
115	flush->flags = 0;
116
117	gpa_n = fill_flush_list_func(flush, data);
118	if (gpa_n < 0) {
119		local_irq_restore(flags);
120		goto fault;
121	}
122
123	status = hv_do_rep_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST,
124				     gpa_n, 0, flush, NULL);
125
126	local_irq_restore(flags);
127
128	if (!(status & HV_HYPERCALL_RESULT_MASK))
129		ret = 0;
130	else
131		ret = status;
132fault:
133	trace_hyperv_nested_flush_guest_mapping_range(as, ret);
134	return ret;
135}
136EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping_range);
137