1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_CPUSET_H
3#define _LINUX_CPUSET_H
4/*
5 *  cpuset interface
6 *
7 *  Copyright (C) 2003 BULL SA
8 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12#include <linux/sched.h>
13#include <linux/sched/topology.h>
14#include <linux/sched/task.h>
15#include <linux/cpumask.h>
16#include <linux/nodemask.h>
17#include <linux/mm.h>
18#include <linux/mmu_context.h>
19#include <linux/jump_label.h>
20
21#ifdef CONFIG_CPUSETS
22
23/*
24 * Static branch rewrites can happen in an arbitrary order for a given
25 * key. In code paths where we need to loop with read_mems_allowed_begin() and
26 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
27 * to ensure that begin() always gets rewritten before retry() in the
28 * disabled -> enabled transition. If not, then if local irqs are disabled
29 * around the loop, we can deadlock since retry() would always be
30 * comparing the latest value of the mems_allowed seqcount against 0 as
31 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
32 * transition should happen in reverse order for the same reasons (want to stop
33 * looking at real value of mems_allowed.sequence in retry() first).
34 */
35extern struct static_key_false cpusets_pre_enable_key;
36extern struct static_key_false cpusets_enabled_key;
37static inline bool cpusets_enabled(void)
38{
39    return static_branch_unlikely(&cpusets_enabled_key);
40}
41
42static inline void cpuset_inc(void)
43{
44    static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
45    static_branch_inc_cpuslocked(&cpusets_enabled_key);
46}
47
48static inline void cpuset_dec(void)
49{
50    static_branch_dec_cpuslocked(&cpusets_enabled_key);
51    static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
52}
53
54extern int cpuset_init(void);
55extern void cpuset_init_smp(void);
56extern void cpuset_force_rebuild(void);
57extern void cpuset_update_active_cpus(void);
58extern void cpuset_wait_for_hotplug(void);
59extern void cpuset_read_lock(void);
60extern void cpuset_read_unlock(void);
61extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
62extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
63extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
64#define cpuset_current_mems_allowed (current->mems_allowed)
65void cpuset_init_current_mems_allowed(void);
66int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
67
68extern bool _cpuset_node_allowed(int node, gfp_t gfp_mask);
69
70static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
71{
72    if (cpusets_enabled()) {
73        return _cpuset_node_allowed(node, gfp_mask);
74    }
75    return true;
76}
77
78static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
79{
80    return _cpuset_node_allowed(zone_to_nid(z), gfp_mask);
81}
82
83static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
84{
85    if (cpusets_enabled()) {
86        return __cpuset_zone_allowed(z, gfp_mask);
87    }
88    return true;
89}
90
91extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, const struct task_struct *tsk2);
92
93#define cpuset_memory_pressure_bump()                                                                                  \
94    do {                                                                                                               \
95        if (cpuset_memory_pressure_enabled)                                                                            \
96            _cpuset_memory_pressure_bump();                                                                            \
97    } while (0)
98extern int cpuset_memory_pressure_enabled;
99extern void _cpuset_memory_pressure_bump(void);
100
101extern void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task);
102extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk);
103
104extern int cpuset_mem_spread_node(void);
105extern int cpuset_slab_spread_node(void);
106
107static inline int cpuset_do_page_mem_spread(void)
108{
109    return task_spread_page(current);
110}
111
112static inline int cpuset_do_slab_mem_spread(void)
113{
114    return task_spread_slab(current);
115}
116
117extern bool current_cpuset_is_being_rebound(void);
118
119extern void rebuild_sched_domains(void);
120
121extern void cpuset_print_current_mems_allowed(void);
122
123/*
124 * read_mems_allowed_begin is required when making decisions involving
125 * mems_allowed such as during page allocation. mems_allowed can be updated in
126 * parallel and depending on the new value an operation can fail potentially
127 * causing process failure. A retry loop with read_mems_allowed_begin and
128 * read_mems_allowed_retry prevents these artificial failures.
129 */
130static inline unsigned int read_mems_allowed_begin(void)
131{
132    if (!static_branch_unlikely(&cpusets_pre_enable_key)) {
133        return 0;
134    }
135
136    return read_seqcount_begin(&current->mems_allowed_seq);
137}
138
139/*
140 * If this returns true, the operation that took place after
141 * read_mems_allowed_begin may have failed artificially due to a concurrent
142 * update of mems_allowed. It is up to the caller to retry the operation if
143 * appropriate.
144 */
145static inline bool read_mems_allowed_retry(unsigned int seq)
146{
147    if (!static_branch_unlikely(&cpusets_enabled_key)) {
148        return false;
149    }
150
151    return read_seqcount_retry(&current->mems_allowed_seq, seq);
152}
153
154static inline void set_mems_allowed(nodemask_t nodemask)
155{
156    unsigned long flags;
157
158    task_lock(current);
159    local_irq_save(flags);
160    write_seqcount_begin(&current->mems_allowed_seq);
161    current->mems_allowed = nodemask;
162    write_seqcount_end(&current->mems_allowed_seq);
163    local_irq_restore(flags);
164    task_unlock(current);
165}
166
167extern void cpuset_hotplug_workfn(struct work_struct *work);
168
169#else /* !CONFIG_CPUSETS */
170
171static inline bool cpusets_enabled(void)
172{
173    return false;
174}
175
176static inline int cpuset_init(void)
177{
178    return 0;
179}
180static inline void cpuset_init_smp(void)
181{
182}
183
184static inline void cpuset_force_rebuild(void)
185{
186}
187
188static inline void cpuset_update_active_cpus(void)
189{
190    partition_sched_domains(1, NULL, NULL);
191}
192
193static inline void cpuset_wait_for_hotplug(void)
194{
195}
196
197static inline void cpuset_read_lock(void)
198{
199}
200static inline void cpuset_read_unlock(void)
201{
202}
203
204static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask)
205{
206    cpumask_copy(mask, task_cpu_possible_mask(p));
207}
208
209static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
210{
211}
212
213static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
214{
215    return node_possible_map;
216}
217
218#define cpuset_current_mems_allowed (node_states[N_MEMORY])
219static inline void cpuset_init_current_mems_allowed(void)
220{
221}
222
223static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
224{
225    return 1;
226}
227
228static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
229{
230    return true;
231}
232
233static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
234{
235    return true;
236}
237
238static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
239{
240    return true;
241}
242
243static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, const struct task_struct *tsk2)
244{
245    return 1;
246}
247
248static inline void cpuset_memory_pressure_bump(void)
249{
250}
251
252static inline void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
253{
254}
255
256static inline int cpuset_mem_spread_node(void)
257{
258    return 0;
259}
260
261static inline int cpuset_slab_spread_node(void)
262{
263    return 0;
264}
265
266static inline int cpuset_do_page_mem_spread(void)
267{
268    return 0;
269}
270
271static inline int cpuset_do_slab_mem_spread(void)
272{
273    return 0;
274}
275
276static inline bool current_cpuset_is_being_rebound(void)
277{
278    return false;
279}
280
281static inline void rebuild_sched_domains(void)
282{
283    partition_sched_domains(1, NULL, NULL);
284}
285
286static inline void cpuset_print_current_mems_allowed(void)
287{
288}
289
290static inline void set_mems_allowed(nodemask_t nodemask)
291{
292}
293
294static inline unsigned int read_mems_allowed_begin(void)
295{
296    return 0;
297}
298
299static inline bool read_mems_allowed_retry(unsigned int seq)
300{
301    return false;
302}
303
304static inline void cpuset_hotplug_workfn(struct work_struct *work)
305{
306}
307
308#endif /* !CONFIG_CPUSETS */
309
310#endif /* _LINUX_CPUSET_H */
311