xref: /kernel/linux/linux-5.10/drivers/hv/hv.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 *   Haiyang Zhang <haiyangz@microsoft.com>
7 *   Hank Janssen  <hjanssen@microsoft.com>
8 */
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/hyperv.h>
16#include <linux/version.h>
17#include <linux/random.h>
18#include <linux/clockchips.h>
19#include <clocksource/hyperv_timer.h>
20#include <asm/mshyperv.h>
21#include "hyperv_vmbus.h"
22
23/* The one and only */
24struct hv_context hv_context;
25
26/*
27 * hv_init - Main initialization routine.
28 *
29 * This routine must be called before any other routines in here are called
30 */
31int hv_init(void)
32{
33	hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
34	if (!hv_context.cpu_context)
35		return -ENOMEM;
36	return 0;
37}
38
39/*
40 * hv_post_message - Post a message using the hypervisor message IPC.
41 *
42 * This involves a hypercall.
43 */
44int hv_post_message(union hv_connection_id connection_id,
45		  enum hv_message_type message_type,
46		  void *payload, size_t payload_size)
47{
48	struct hv_input_post_message *aligned_msg;
49	struct hv_per_cpu_context *hv_cpu;
50	u64 status;
51
52	if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
53		return -EMSGSIZE;
54
55	hv_cpu = get_cpu_ptr(hv_context.cpu_context);
56	aligned_msg = hv_cpu->post_msg_page;
57	aligned_msg->connectionid = connection_id;
58	aligned_msg->reserved = 0;
59	aligned_msg->message_type = message_type;
60	aligned_msg->payload_size = payload_size;
61	memcpy((void *)aligned_msg->payload, payload, payload_size);
62
63	status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
64
65	/* Preemption must remain disabled until after the hypercall
66	 * so some other thread can't get scheduled onto this cpu and
67	 * corrupt the per-cpu post_msg_page
68	 */
69	put_cpu_ptr(hv_cpu);
70
71	return status & 0xFFFF;
72}
73
74int hv_synic_alloc(void)
75{
76	int cpu;
77	struct hv_per_cpu_context *hv_cpu;
78
79	/*
80	 * First, zero all per-cpu memory areas so hv_synic_free() can
81	 * detect what memory has been allocated and cleanup properly
82	 * after any failures.
83	 */
84	for_each_present_cpu(cpu) {
85		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
86		memset(hv_cpu, 0, sizeof(*hv_cpu));
87	}
88
89	hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
90					 GFP_KERNEL);
91	if (hv_context.hv_numa_map == NULL) {
92		pr_err("Unable to allocate NUMA map\n");
93		goto err;
94	}
95
96	for_each_present_cpu(cpu) {
97		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
98
99		tasklet_init(&hv_cpu->msg_dpc,
100			     vmbus_on_msg_dpc, (unsigned long) hv_cpu);
101
102		hv_cpu->synic_message_page =
103			(void *)get_zeroed_page(GFP_ATOMIC);
104		if (hv_cpu->synic_message_page == NULL) {
105			pr_err("Unable to allocate SYNIC message page\n");
106			goto err;
107		}
108
109		hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
110		if (hv_cpu->synic_event_page == NULL) {
111			pr_err("Unable to allocate SYNIC event page\n");
112			goto err;
113		}
114
115		hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
116		if (hv_cpu->post_msg_page == NULL) {
117			pr_err("Unable to allocate post msg page\n");
118			goto err;
119		}
120	}
121
122	return 0;
123err:
124	/*
125	 * Any memory allocations that succeeded will be freed when
126	 * the caller cleans up by calling hv_synic_free()
127	 */
128	return -ENOMEM;
129}
130
131
132void hv_synic_free(void)
133{
134	int cpu;
135
136	for_each_present_cpu(cpu) {
137		struct hv_per_cpu_context *hv_cpu
138			= per_cpu_ptr(hv_context.cpu_context, cpu);
139
140		free_page((unsigned long)hv_cpu->synic_event_page);
141		free_page((unsigned long)hv_cpu->synic_message_page);
142		free_page((unsigned long)hv_cpu->post_msg_page);
143	}
144
145	kfree(hv_context.hv_numa_map);
146}
147
148/*
149 * hv_synic_init - Initialize the Synthetic Interrupt Controller.
150 *
151 * If it is already initialized by another entity (ie x2v shim), we need to
152 * retrieve the initialized message and event pages.  Otherwise, we create and
153 * initialize the message and event pages.
154 */
155void hv_synic_enable_regs(unsigned int cpu)
156{
157	struct hv_per_cpu_context *hv_cpu
158		= per_cpu_ptr(hv_context.cpu_context, cpu);
159	union hv_synic_simp simp;
160	union hv_synic_siefp siefp;
161	union hv_synic_sint shared_sint;
162	union hv_synic_scontrol sctrl;
163
164	/* Setup the Synic's message page */
165	hv_get_simp(simp.as_uint64);
166	simp.simp_enabled = 1;
167	simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
168		>> HV_HYP_PAGE_SHIFT;
169
170	hv_set_simp(simp.as_uint64);
171
172	/* Setup the Synic's event page */
173	hv_get_siefp(siefp.as_uint64);
174	siefp.siefp_enabled = 1;
175	siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
176		>> HV_HYP_PAGE_SHIFT;
177
178	hv_set_siefp(siefp.as_uint64);
179
180	/* Setup the shared SINT. */
181	hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
182
183	shared_sint.vector = hv_get_vector();
184	shared_sint.masked = false;
185	shared_sint.auto_eoi = hv_recommend_using_aeoi();
186	hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
187
188	/* Enable the global synic bit */
189	hv_get_synic_state(sctrl.as_uint64);
190	sctrl.enable = 1;
191
192	hv_set_synic_state(sctrl.as_uint64);
193}
194
195int hv_synic_init(unsigned int cpu)
196{
197	hv_synic_enable_regs(cpu);
198
199	hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT);
200
201	return 0;
202}
203
204/*
205 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
206 */
207void hv_synic_disable_regs(unsigned int cpu)
208{
209	union hv_synic_sint shared_sint;
210	union hv_synic_simp simp;
211	union hv_synic_siefp siefp;
212	union hv_synic_scontrol sctrl;
213
214	hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
215
216	shared_sint.masked = 1;
217
218	/* Need to correctly cleanup in the case of SMP!!! */
219	/* Disable the interrupt */
220	hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
221
222	hv_get_simp(simp.as_uint64);
223	simp.simp_enabled = 0;
224	simp.base_simp_gpa = 0;
225
226	hv_set_simp(simp.as_uint64);
227
228	hv_get_siefp(siefp.as_uint64);
229	siefp.siefp_enabled = 0;
230	siefp.base_siefp_gpa = 0;
231
232	hv_set_siefp(siefp.as_uint64);
233
234	/* Disable the global synic bit */
235	hv_get_synic_state(sctrl.as_uint64);
236	sctrl.enable = 0;
237	hv_set_synic_state(sctrl.as_uint64);
238}
239
240int hv_synic_cleanup(unsigned int cpu)
241{
242	struct vmbus_channel *channel, *sc;
243	bool channel_found = false;
244
245	/*
246	 * Hyper-V does not provide a way to change the connect CPU once
247	 * it is set; we must prevent the connect CPU from going offline
248	 * while the VM is running normally. But in the panic or kexec()
249	 * path where the vmbus is already disconnected, the CPU must be
250	 * allowed to shut down.
251	 */
252	if (cpu == VMBUS_CONNECT_CPU &&
253	    vmbus_connection.conn_state == CONNECTED)
254		return -EBUSY;
255
256	/*
257	 * Search for channels which are bound to the CPU we're about to
258	 * cleanup.  In case we find one and vmbus is still connected, we
259	 * fail; this will effectively prevent CPU offlining.
260	 *
261	 * TODO: Re-bind the channels to different CPUs.
262	 */
263	mutex_lock(&vmbus_connection.channel_mutex);
264	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
265		if (channel->target_cpu == cpu) {
266			channel_found = true;
267			break;
268		}
269		list_for_each_entry(sc, &channel->sc_list, sc_list) {
270			if (sc->target_cpu == cpu) {
271				channel_found = true;
272				break;
273			}
274		}
275		if (channel_found)
276			break;
277	}
278	mutex_unlock(&vmbus_connection.channel_mutex);
279
280	if (channel_found && vmbus_connection.conn_state == CONNECTED)
281		return -EBUSY;
282
283	hv_stimer_legacy_cleanup(cpu);
284
285	hv_synic_disable_regs(cpu);
286
287	return 0;
288}
289