xref: /kernel/linux/linux-5.10/arch/x86/xen/smp.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/smp.h>
3#include <linux/cpu.h>
4#include <linux/slab.h>
5#include <linux/cpumask.h>
6#include <linux/percpu.h>
7
8#include <xen/events.h>
9
10#include <xen/hvc-console.h>
11#include "xen-ops.h"
12#include "smp.h"
13
14static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
15static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
16static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
17static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
18
19static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
20static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
21
22/*
23 * Reschedule call back.
24 */
25static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
26{
27	inc_irq_stat(irq_resched_count);
28	scheduler_ipi();
29
30	return IRQ_HANDLED;
31}
32
33void xen_smp_intr_free(unsigned int cpu)
34{
35	kfree(per_cpu(xen_resched_irq, cpu).name);
36	per_cpu(xen_resched_irq, cpu).name = NULL;
37	if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
38		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
39		per_cpu(xen_resched_irq, cpu).irq = -1;
40	}
41	kfree(per_cpu(xen_callfunc_irq, cpu).name);
42	per_cpu(xen_callfunc_irq, cpu).name = NULL;
43	if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
44		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
45		per_cpu(xen_callfunc_irq, cpu).irq = -1;
46	}
47	kfree(per_cpu(xen_debug_irq, cpu).name);
48	per_cpu(xen_debug_irq, cpu).name = NULL;
49	if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
50		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
51		per_cpu(xen_debug_irq, cpu).irq = -1;
52	}
53	kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
54	per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
55	if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
56		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
57				       NULL);
58		per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
59	}
60}
61
62int xen_smp_intr_init(unsigned int cpu)
63{
64	int rc;
65	char *resched_name, *callfunc_name, *debug_name;
66
67	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
68	per_cpu(xen_resched_irq, cpu).name = resched_name;
69	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
70				    cpu,
71				    xen_reschedule_interrupt,
72				    IRQF_PERCPU|IRQF_NOBALANCING,
73				    resched_name,
74				    NULL);
75	if (rc < 0)
76		goto fail;
77	per_cpu(xen_resched_irq, cpu).irq = rc;
78
79	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
80	per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
81	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
82				    cpu,
83				    xen_call_function_interrupt,
84				    IRQF_PERCPU|IRQF_NOBALANCING,
85				    callfunc_name,
86				    NULL);
87	if (rc < 0)
88		goto fail;
89	per_cpu(xen_callfunc_irq, cpu).irq = rc;
90
91	if (!xen_fifo_events) {
92		debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
93		per_cpu(xen_debug_irq, cpu).name = debug_name;
94		rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
95					     xen_debug_interrupt,
96					     IRQF_PERCPU | IRQF_NOBALANCING,
97					     debug_name, NULL);
98		if (rc < 0)
99			goto fail;
100		per_cpu(xen_debug_irq, cpu).irq = rc;
101	}
102
103	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
104	per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
105	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
106				    cpu,
107				    xen_call_function_single_interrupt,
108				    IRQF_PERCPU|IRQF_NOBALANCING,
109				    callfunc_name,
110				    NULL);
111	if (rc < 0)
112		goto fail;
113	per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
114
115	return 0;
116
117 fail:
118	xen_smp_intr_free(cpu);
119	return rc;
120}
121
122void __init xen_smp_cpus_done(unsigned int max_cpus)
123{
124	int cpu, rc, count = 0;
125
126	if (xen_hvm_domain())
127		native_smp_cpus_done(max_cpus);
128	else
129		calculate_max_logical_packages();
130
131	if (xen_have_vcpu_info_placement)
132		return;
133
134	for_each_online_cpu(cpu) {
135		if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
136			continue;
137
138		rc = remove_cpu(cpu);
139
140		if (rc == 0) {
141			/*
142			 * Reset vcpu_info so this cpu cannot be onlined again.
143			 */
144			xen_vcpu_info_reset(cpu);
145			count++;
146		} else {
147			pr_warn("%s: failed to bring CPU %d down, error %d\n",
148				__func__, cpu, rc);
149		}
150	}
151	WARN(count, "%s: brought %d CPUs offline\n", __func__, count);
152}
153
154void xen_smp_send_reschedule(int cpu)
155{
156	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
157}
158
159static void __xen_send_IPI_mask(const struct cpumask *mask,
160			      int vector)
161{
162	unsigned cpu;
163
164	for_each_cpu_and(cpu, mask, cpu_online_mask)
165		xen_send_IPI_one(cpu, vector);
166}
167
168void xen_smp_send_call_function_ipi(const struct cpumask *mask)
169{
170	int cpu;
171
172	__xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
173
174	/* Make sure other vcpus get a chance to run if they need to. */
175	for_each_cpu(cpu, mask) {
176		if (xen_vcpu_stolen(cpu)) {
177			HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
178			break;
179		}
180	}
181}
182
183void xen_smp_send_call_function_single_ipi(int cpu)
184{
185	__xen_send_IPI_mask(cpumask_of(cpu),
186			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
187}
188
189static inline int xen_map_vector(int vector)
190{
191	int xen_vector;
192
193	switch (vector) {
194	case RESCHEDULE_VECTOR:
195		xen_vector = XEN_RESCHEDULE_VECTOR;
196		break;
197	case CALL_FUNCTION_VECTOR:
198		xen_vector = XEN_CALL_FUNCTION_VECTOR;
199		break;
200	case CALL_FUNCTION_SINGLE_VECTOR:
201		xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
202		break;
203	case IRQ_WORK_VECTOR:
204		xen_vector = XEN_IRQ_WORK_VECTOR;
205		break;
206#ifdef CONFIG_X86_64
207	case NMI_VECTOR:
208	case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
209		xen_vector = XEN_NMI_VECTOR;
210		break;
211#endif
212	default:
213		xen_vector = -1;
214		printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
215			vector);
216	}
217
218	return xen_vector;
219}
220
221void xen_send_IPI_mask(const struct cpumask *mask,
222			      int vector)
223{
224	int xen_vector = xen_map_vector(vector);
225
226	if (xen_vector >= 0)
227		__xen_send_IPI_mask(mask, xen_vector);
228}
229
230void xen_send_IPI_all(int vector)
231{
232	int xen_vector = xen_map_vector(vector);
233
234	if (xen_vector >= 0)
235		__xen_send_IPI_mask(cpu_online_mask, xen_vector);
236}
237
238void xen_send_IPI_self(int vector)
239{
240	int xen_vector = xen_map_vector(vector);
241
242	if (xen_vector >= 0)
243		xen_send_IPI_one(smp_processor_id(), xen_vector);
244}
245
246void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
247				int vector)
248{
249	unsigned cpu;
250	unsigned int this_cpu = smp_processor_id();
251	int xen_vector = xen_map_vector(vector);
252
253	if (!(num_online_cpus() > 1) || (xen_vector < 0))
254		return;
255
256	for_each_cpu_and(cpu, mask, cpu_online_mask) {
257		if (this_cpu == cpu)
258			continue;
259
260		xen_send_IPI_one(cpu, xen_vector);
261	}
262}
263
264void xen_send_IPI_allbutself(int vector)
265{
266	xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
267}
268
269static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
270{
271	irq_enter();
272	generic_smp_call_function_interrupt();
273	inc_irq_stat(irq_call_count);
274	irq_exit();
275
276	return IRQ_HANDLED;
277}
278
279static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
280{
281	irq_enter();
282	generic_smp_call_function_single_interrupt();
283	inc_irq_stat(irq_call_count);
284	irq_exit();
285
286	return IRQ_HANDLED;
287}
288