1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Author: Huacai Chen <chenhuacai@loongson.cn>
4 * Copyright (C) 2020 Loongson Technology Corporation Limited
5 */
6#ifndef __ASM_SMP_H
7#define __ASM_SMP_H
8
9#include <linux/atomic.h>
10#include <linux/bitops.h>
11#include <linux/linkage.h>
12#include <linux/smp.h>
13#include <linux/threads.h>
14#include <linux/cpumask.h>
15
16#ifdef CONFIG_SMP
17
18struct task_struct;
19
20struct plat_smp_ops {
21	void (*send_ipi_single)(int cpu, unsigned int action);
22	void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
23	void (*smp_setup)(void);
24	void (*prepare_cpus)(unsigned int max_cpus);
25	int (*boot_secondary)(int cpu, struct task_struct *idle);
26	void (*init_secondary)(void);
27	void (*smp_finish)(void);
28#ifdef CONFIG_HOTPLUG_CPU
29	int (*cpu_disable)(void);
30	void (*cpu_die)(unsigned int cpu);
31#endif
32};
33
34extern struct plat_smp_ops *mp_ops;
35void register_smp_ops(const struct plat_smp_ops *ops);
36
37static inline void plat_smp_setup(void)
38{
39	mp_ops->smp_setup();
40}
41
42#else /* !CONFIG_SMP */
43
44struct plat_smp_ops;
45
46static inline void plat_smp_setup(void)
47{
48	/* UP, nothing to do ...  */
49}
50
51static inline void register_smp_ops(const struct plat_smp_ops *ops)
52{
53}
54
55#endif /* !CONFIG_SMP */
56
57extern int smp_num_siblings;
58extern int num_processors;
59extern int disabled_cpus;
60extern cpumask_t cpu_sibling_map[];
61extern cpumask_t cpu_core_map[];
62extern cpumask_t cpu_foreign_map[];
63
64static inline int raw_smp_processor_id(void)
65{
66#if defined(__VDSO__)
67	extern int vdso_smp_processor_id(void)
68		__compiletime_error("VDSO should not call smp_processor_id()");
69	return vdso_smp_processor_id();
70#else
71	return current_thread_info()->cpu;
72#endif
73}
74#define raw_smp_processor_id raw_smp_processor_id
75
76/* Map from cpu id to sequential logical cpu number.  This will only
77   not be idempotent when cpus failed to come on-line.	*/
78extern int __cpu_number_map[NR_CPUS];
79#define cpu_number_map(cpu)  __cpu_number_map[cpu]
80
81/* The reverse map from sequential logical cpu number to cpu id.  */
82extern int __cpu_logical_map[NR_CPUS];
83#define cpu_logical_map(cpu)  __cpu_logical_map[cpu]
84
85#define cpu_physical_id(cpu)	cpu_logical_map(cpu)
86
87#define SMP_BOOT_CPU		0x1
88#define SMP_RESCHEDULE		0x2
89#define SMP_CALL_FUNCTION	0x4
90
91struct secondary_data {
92	unsigned long stack;
93	unsigned long thread_info;
94};
95extern struct secondary_data cpuboot_data;
96
97extern asmlinkage void smpboot_entry(void);
98extern asmlinkage void start_secondary(void);
99
100extern void set_cpu_sibling_map(int cpu);
101extern void clear_cpu_sibling_map(int cpu);
102extern void calculate_cpu_foreign_map(void);
103
104/*
105 * Generate IPI list text
106 */
107extern void show_ipi_list(struct seq_file *p, int prec);
108
109/*
110 * this function sends a 'reschedule' IPI to another CPU.
111 * it goes straight through and wastes no time serializing
112 * anything. Worst case is that we lose a reschedule ...
113 */
114static inline void smp_send_reschedule(int cpu)
115{
116	mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE);
117}
118
119#ifdef CONFIG_HOTPLUG_CPU
120static inline int __cpu_disable(void)
121{
122	return mp_ops->cpu_disable();
123}
124
125static inline void __cpu_die(unsigned int cpu)
126{
127	mp_ops->cpu_die(cpu);
128}
129#endif
130
131static inline void arch_send_call_function_single_ipi(int cpu)
132{
133	mp_ops->send_ipi_single(cpu, SMP_CALL_FUNCTION);
134}
135
136static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
137{
138	mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
139}
140
141#endif /* __ASM_SMP_H */
142