Lines Matching refs:cpuset
2 * kernel/cpuset.c
27 #include <linux/cpuset.h>
81 struct cpuset {
90 * cpuset.cpus and cpuset.mems, and won't be limited by the
94 * in the cpuset. They may be changed if the configured masks are
129 * - A new cpuset's old_mems_allowed is initialized when some
132 * cpuset.mems_allowed and have tasks' nodemask updated, and
140 * Tasks are being attached to this cpuset. Used to prevent
175 * subparts_cpus. In this case, the cpuset is not a real partition
177 * and the cpuset can be restored back to a partition root if the
178 * parent cpuset can give more CPUs back to this child cpuset.
193 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
195 return css ? container_of(css, struct cpuset, css) : NULL;
198 /* Retrieve the cpuset for a task */
199 static inline struct cpuset *task_cs(struct task_struct *task)
204 static inline struct cpuset *parent_cs(struct cpuset *cs)
209 /* bits in struct cpuset flags field */
222 static inline bool is_cpuset_online(struct cpuset *cs)
227 static inline int is_cpu_exclusive(const struct cpuset *cs)
232 static inline int is_mem_exclusive(const struct cpuset *cs)
237 static inline int is_mem_hardwall(const struct cpuset *cs)
242 static inline int is_sched_load_balance(const struct cpuset *cs)
247 static inline int is_memory_migrate(const struct cpuset *cs)
252 static inline int is_spread_page(const struct cpuset *cs)
257 static inline int is_spread_slab(const struct cpuset *cs)
262 static inline int is_partition_root(const struct cpuset *cs)
267 static struct cpuset top_cpuset = {
273 * cpuset_for_each_child - traverse online children of a cpuset
276 * @parent_cs: target cpuset to walk children of
285 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
288 * @root_cs: target cpuset to walk ancestor of
299 * There are two global locks guarding cpuset structures - cpuset_mutex and
301 * task's cpuset pointer. See "The task_lock() exception", at the end of this
307 * modify cpusets. It can perform various checks on the cpuset structure
316 * from one of the callbacks into the cpuset code from within
330 * Accessing a task's cpuset should be done in accordance with the
363 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
376 * walk up the cpuset hierarchy until we find one that does have some
387 struct cpuset *cs;
400 * The top cpuset doesn't have any online cpu as a
403 * cpuset's effective_cpus is on its way to be
418 * up the cpuset hierarchy until we find one that does have some
419 * online mems. The top cpuset always has some mems online.
426 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
435 * update task's spread flag if cpuset's page/slab spread flag is set
439 static void cpuset_update_task_spread_flag(struct cpuset *cs, struct task_struct *tsk)
455 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
457 * One cpuset is a subset of another if all its allowed CPUs and
462 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
469 * alloc_cpumasks - allocate three cpumasks for cpuset
470 * @cs: the cpuset that have cpumasks to be allocated.
476 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
524 * @cs: the cpuset that have cpumasks to be free.
527 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
543 * alloc_trial_cpuset - allocate a trial cpuset
544 * @cs: the cpuset that the trial cpuset duplicates
546 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
548 struct cpuset *trial;
567 * free_cpuset - free the cpuset
568 * @cs: the cpuset to be freed
570 static inline void free_cpuset(struct cpuset *cs)
577 * validate_change() - Used to validate that any proposed cpuset change
580 * If we replaced the flag and mask values of the current cpuset
581 * (cur) with those values in the trial cpuset (trial), would
585 * 'cur' is the address of an actual, in-use cpuset. Operations
587 * cpuset in the list must use cur below, not trial.
596 static int validate_change(struct cpuset *cur, struct cpuset *trial)
599 struct cpuset *c, *par;
608 /* Remaining checks don't apply to root cpuset */
616 /* On legacy hiearchy, we must be a subset of our parent cpuset. */
672 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
677 static void update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
685 static void update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *root_cs)
687 struct cpuset *cp;
708 /* jump label reference count + the top-level cpuset */
734 * cp - cpuset pointer, used (together with pos_css) to perform a
742 * cpus_allowed of every cpuset marked is_sched_load_balance
753 * load balanced cpusets (using the array of cpuset pointers in
767 struct cpuset *cp; /* top-down scan of cpusets */
768 struct cpuset **csa; /* array of all cpuset ptrs */
769 int csn; /* how many cpuset ptrs in csa so far */
854 struct cpuset *a = csa[i];
858 struct cpuset *b = csa[j];
863 struct cpuset *c = csa[k];
891 struct cpuset *a = csa[i];
917 struct cpuset *b = csa[j];
950 static void update_tasks_root_domain(struct cpuset *cs)
966 struct cpuset *cs = NULL;
1011 * If the flag 'sched_load_balance' of any cpuset with non-empty
1012 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
1013 * which has that flag enabled, or if any cpuset with a non-empty
1024 struct cpuset *cs;
1083 static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p, const struct cpumask *new_mask)
1089 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1090 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1093 * effective cpuset's. As this function is called with cpuset_mutex held,
1094 * cpuset membership stays stable.
1096 static void update_tasks_cpumask(struct cpuset *cs)
1109 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1111 * @cs: the cpuset the need to recompute the new effective_cpus mask
1112 * @parent: the parent cpuset
1119 static void compute_effective_cpumask(struct cpumask *new_cpus, struct cpuset *cs, struct cpuset *parent)
1140 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1141 * @cpuset: The cpuset that requests change in partition root state
1147 * For partcmd_enable, the cpuset is being transformed from a non-partition
1148 * root to a partition root. The cpus_allowed mask of the given cpuset will
1153 * For partcmd_disable, the cpuset is being transofrmed from a partition
1160 * cpus_allowed is assumed to remain the same. The cpuset should either
1181 static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, struct cpumask *newmask, struct tmpmasks *tmp)
1183 struct cpuset *parent = parent_cs(cpuset);
1197 (!newmask && cpumask_empty(cpuset->cpus_allowed))) {
1205 if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css)) {
1214 if ((cmd == partcmd_enable) && (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) ||
1215 cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) {
1223 new_prs = cpuset->partition_root_state;
1225 cpumask_copy(tmp->addmask, cpuset->cpus_allowed);
1228 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, parent->subparts_cpus);
1237 cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask);
1269 adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed, parent->effective_cpus);
1274 int prev_prs = cpuset->partition_root_state;
1280 switch (cpuset->partition_root_state) {
1309 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, parent->subparts_cpus);
1312 if (!adding && !deleting && (new_prs == cpuset->partition_root_state)) {
1337 if (cpuset->partition_root_state != new_prs) {
1338 cpuset->partition_root_state = new_prs;
1347 * @cs: the cpuset to consider
1350 * When congifured cpumask is changed, the effective cpumasks of this cpuset
1357 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
1359 struct cpuset *cp;
1366 struct cpuset *parent = parent_cs(cp);
1399 * cpuset if the parent's subparts_cpus changes.
1482 * empty cpuset is changed, we need to rebuild sched domains.
1483 * On default hierarchy, the cpuset needs to be a partition
1503 * @parent: Parent cpuset
1504 * @cs: Current cpuset
1507 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, struct tmpmasks *tmp)
1509 struct cpuset *sibling;
1538 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1539 * @cs: the cpuset to consider
1540 * @trialcs: trial cpuset
1541 * @buf: buffer of cpu numbers written to this cpuset
1543 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, const char *buf)
1554 * An empty cpus_requested is ok only if the cpuset has no tasks.
1620 struct cpuset *parent = parent_cs(cs);
1709 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1710 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1713 * effective cpuset's. As this function is called with cpuset_mutex held,
1714 * cpuset membership stays stable.
1716 static void update_tasks_nodemask(struct cpuset *cs)
1765 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
1771 * @cs: the cpuset to consider
1774 * When configured nodemask is changed, the effective nodemasks of this cpuset
1781 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
1783 struct cpuset *cp;
1788 struct cpuset *parent = parent_cs(cp);
1827 * of a cpuset. Needs to validate the request, update the
1828 * cpusets mems_allowed, and for each task in the cpuset,
1830 * mempolicies and if the cpuset is marked 'memory_migrate',
1834 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1838 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, const char *buf)
1852 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1901 static int update_relax_domain_level(struct cpuset *cs, s64 val)
1920 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1921 * @cs: the cpuset in which each task's spread flags needs to be changed
1924 * function is called with cpuset_mutex held, cpuset membership stays
1927 static void update_tasks_flags(struct cpuset *cs)
1942 * cs: the cpuset to update
1948 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on)
1950 struct cpuset *trialcs;
1994 * cs: the cpuset to update
1999 static int update_prstate(struct cpuset *cs, int new_prs)
2002 struct cpuset *parent = parent_cs(cs);
2064 * cpuset as some system daemons cannot be mapped to other CPUs.
2189 static struct cpuset *cpuset_attach_old_cs;
2191 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
2195 struct cpuset *cs;
2205 /* allow moving tasks into an empty cpuset if on default hierarchy */
2259 struct cpuset *cs;
2260 struct cpuset *oldcs = cpuset_attach_old_cs;
2324 /* The various types of files and directories in a cpuset file system */
2347 struct cpuset *cs = css_cs(css);
2395 struct cpuset *cs = css_cs(css);
2424 struct cpuset *cs = css_cs(of_css(of));
2425 struct cpuset *trialcs;
2497 struct cpuset *cs = css_cs(seq_css(sf));
2529 struct cpuset *cs = css_cs(css);
2560 struct cpuset *cs = css_cs(css);
2575 struct cpuset *cs = css_cs(seq_css(seq));
2595 struct cpuset *cs = css_cs(of_css(of));
2786 * cpuset_css_alloc - allocate a cpuset css
2787 * cgrp: control group that the new cpuset will be part of
2792 struct cpuset *cs;
2819 struct cpuset *cs = css_cs(css);
2820 struct cpuset *parent = parent_cs(cs);
2821 struct cpuset *tmp_cs;
2890 * If the cpuset being removed has its flag 'sched_load_balance'
2896 * If the cpuset has the 'sched.partition' flag enabled, simulate
2902 struct cpuset *cs = css_cs(css);
2916 struct cpuset *parent = parent_cs(cs);
2931 struct cpuset *cs = css_cs(css);
2955 * which could have been changed by cpuset just after it inherits the
3015 * or memory nodes, we need to walk over the cpuset hierarchy,
3017 * last CPU or node from a cpuset, then move the tasks in the empty
3018 * cpuset to its next-highest non-empty parent.
3020 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
3022 struct cpuset *parent;
3025 * Find its next-highest non-empty parent, (top cpuset
3034 pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
3040 static void hotplug_update_tasks_legacy(struct cpuset *cs, struct cpumask *new_cpus, nodemask_t *new_mems,
3053 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
3070 * cpuset. Should be done outside any lock.
3079 static void hotplug_update_tasks(struct cpuset *cs, struct cpumask *new_cpus, nodemask_t *new_mems, bool cpus_updated,
3110 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3111 * @cs: cpuset in interest
3118 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3124 struct cpuset *parent;
3133 * is finished, so we won't attach a task to an empty cpuset.
3212 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3215 * changed and updates cpuset accordingly. The top_cpuset is always
3301 struct cpuset *cs;
3362 * Description: Finish top cpuset after cpu, node maps are initialized
3369 * cpuset_bind() call when v1 cpuset is mounted.
3383 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3384 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3387 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3390 * tasks cpuset.
3456 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
3457 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
3459 * Description: Returns the nodemask_t mems_allowed of the cpuset
3462 * tasks cpuset.
3492 * mem_hardwall ancestor to the specified cpuset. Call holding
3494 * (an unusual configuration), then returns the root cpuset.
3496 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
3511 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
3516 * and do not allow allocations outside the current tasks cpuset
3519 * nearest enclosing hardwalled ancestor cpuset.
3526 * cpuset are short of memory, might require taking the callback_lock.
3530 * so no allocation on a node outside the cpuset is allowed (unless
3541 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
3546 struct cpuset *cs; /* current cpuset ancestors */
3588 * tasks in a cpuset with is_spread_page or is_spread_slab set),
3652 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
3654 * Description: Prints current's name, cpuset name, and cached copy of its
3664 pr_cont(",cpuset=");
3674 * cpuset file 'memory_pressure_enabled' in the root cpuset.
3680 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
3683 * page reclaim efforts initiated by tasks in each cpuset.
3685 * This represents the rate at which some task in the cpuset
3691 * Display to user space in the per-cpuset read-only file
3694 * (direct) page reclaim by any task attached to the cpuset.
3707 * - Print tasks cpuset path into seq_file.
3708 * - Used for /proc/<pid>/cpuset.
3709 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
3710 * doesn't really matter if tsk->cpuset changes after we read it,