Lines Matching refs:parent

91      * parent masks.
97 * effective_mask == configured_mask & parent's effective_mask,
98 * and if it ends up empty, it will inherit the parent's mask.
117 * - CPUs granted by the parent = effective_cpus U subparts_cpus
159 * use_parent_ecpus - set if using parent's effective_cpus
174 * None of the cpus in cpus_allowed can be put into the parent's
178 * parent cpuset can give more CPUs back to this child cpuset.
206 return css_cs(cs->css.parent);
616 /* On legacy hiearchy, we must be a subset of our parent cpuset. */
818 * parent's cpus, so just skip them, and then we call
1112 * @parent: the parent cpuset
1114 * If the parent has subpartition CPUs, include them in the list of
1119 static void compute_effective_cpumask(struct cpumask *new_cpus, struct cpuset *cs, struct cpuset *parent)
1121 if (parent->nr_subparts_cpus) {
1122 cpumask_or(new_cpus, parent->effective_cpus, parent->subparts_cpus);
1136 partcmd_update, /* Update parent's subparts_cpus */
1140 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1149 * be put into parent's subparts_cpus and taken away from parent's
1155 * parent's subparts_cpus will be taken away from that cpumask and put back
1156 * into parent's effective_cpus. 0 should always be returned.
1163 * be granted by the parent. The function will return 1 if changes to
1164 * parent's subparts_cpus and effective_cpus happen or 0 otherwise.
1183 struct cpuset *parent = parent_cs(cpuset);
1192 * The parent must be a partition root.
1196 if (!is_partition_root(parent) || (newmask && cpumask_empty(newmask)) ||
1211 * can be granted from parent's effective_cpus or at least one
1214 if ((cmd == partcmd_enable) && (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) ||
1215 cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) {
1220 * A cpumask update cannot make parent's effective_cpus become empty.
1228 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, parent->subparts_cpus);
1233 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus
1234 * addmask = newmask & parent->effective_cpus
1235 * & ~parent->subparts_cpus
1238 deleting = cpumask_and(tmp->delmask, tmp->delmask, parent->subparts_cpus);
1240 cpumask_and(tmp->addmask, newmask, parent->effective_cpus);
1241 adding = cpumask_andnot(tmp->addmask, tmp->addmask, parent->subparts_cpus);
1245 if (adding && cpumask_equal(parent->effective_cpus, tmp->addmask)) {
1257 cpumask_copy(tmp->addmask, parent->effective_cpus);
1263 * addmask = cpus_allowed & parent->effective_cpus
1265 * Note that parent's subparts_cpus may have been
1269 adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed, parent->effective_cpus);
1270 part_error = cpumask_equal(tmp->addmask, parent->effective_cpus);
1306 * Remove all its cpus from parent's subparts_cpus.
1309 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, parent->subparts_cpus);
1317 * Change the parent's subparts_cpus.
1323 cpumask_or(parent->subparts_cpus, parent->subparts_cpus, tmp->addmask);
1324 cpumask_andnot(parent->effective_cpus, parent->effective_cpus, tmp->addmask);
1327 cpumask_andnot(parent->subparts_cpus, parent->subparts_cpus, tmp->delmask);
1332 cpumask_or(parent->effective_cpus, parent->effective_cpus, tmp->delmask);
1335 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
1366 struct cpuset *parent = parent_cs(cp);
1368 compute_effective_cpumask(tmp->new_cpus, cp, parent);
1372 * parent, which is guaranteed to have some CPUs.
1375 cpumask_copy(tmp->new_cpus, parent->effective_cpus);
1378 parent->child_ecpus_count++;
1382 WARN_ON_ONCE(!parent->child_ecpus_count);
1383 parent->child_ecpus_count--;
1398 * update_tasks_cpumask() again for tasks in the parent
1399 * cpuset if the parent's subparts_cpus changes.
1403 switch (parent->partition_root_state) {
1406 * If parent is not a partition root or an
1425 update_tasks_cpumask(parent);
1431 * When parent is invalid, it has to be too.
1503 * @parent: Parent cpuset
1507 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, struct tmpmasks *tmp)
1519 cpuset_for_each_child(sibling, pos_css, parent) {
1620 struct cpuset *parent = parent_cs(cs);
1624 * cpusets if they use parent's effective_cpus.
1626 if (parent->child_ecpus_count) {
1627 update_sibling_cpumasks(parent, cs, &tmp);
1788 struct cpuset *parent = parent_cs(cp);
1790 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
1794 * parent, which is guaranteed to have some MEMs.
1797 *new_mems = parent->effective_mems;
2002 struct cpuset *parent = parent_cs(cs);
2063 * Update cpumask of parent's tasks except when it is the top
2066 if (parent != &top_cpuset) {
2067 update_tasks_cpumask(parent);
2070 if (parent->child_ecpus_count) {
2071 update_sibling_cpumasks(parent, cs, &tmpmask);
2820 struct cpuset *parent = parent_cs(cs);
2824 if (!parent) {
2832 if (is_spread_page(parent)) {
2835 if (is_spread_slab(parent)) {
2843 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
2844 cs->effective_mems = parent->effective_mems;
2846 parent->child_ecpus_count++;
2855 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
2864 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
2868 cpuset_for_each_child(tmp_cs, pos_css, parent) {
2877 cs->mems_allowed = parent->mems_allowed;
2878 cs->effective_mems = parent->mems_allowed;
2879 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
2880 cpumask_copy(cs->cpus_requested, parent->cpus_requested);
2881 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
2916 struct cpuset *parent = parent_cs(cs);
2919 parent->child_ecpus_count--;
2954 * Make sure the new task conform to the current state of its parent,
2956 * state from the parent and before it sits on the cgroup's task list.
3018 * cpuset to its next-highest non-empty parent.
3022 struct cpuset *parent;
3025 * Find its next-highest non-empty parent, (top cpuset
3028 parent = parent_cs(cs);
3029 while (cpumask_empty(parent->cpus_allowed) || nodes_empty(parent->mems_allowed)) {
3030 parent = parent_cs(parent);
3033 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
3124 struct cpuset *parent;
3142 parent = parent_cs(cs);
3143 compute_effective_cpumask(&new_cpus, cs, parent);
3144 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3160 * effective_cpus or its parent becomes erroneous, we have to
3163 if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || (parent->partition_root_state == PRS_ERROR))) {
3169 compute_effective_cpumask(&new_cpus, cs, parent);
3178 if ((parent->partition_root_state == PRS_ERROR) || cpumask_empty(&new_cpus)) {
3190 * from the parent may change to erroneous.
3192 if (is_partition_root(parent) &&
3193 ((cs->partition_root_state == PRS_ERROR) || !cpumask_intersects(&new_cpus, parent->subparts_cpus)) &&
3521 * Scanning up parent cpusets requires callback_lock. The