Lines Matching refs:cp
687 struct cpuset *cp;
691 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
692 /* skip the whole subtree if @cp doesn't have any CPU */
693 if (cpumask_empty(cp->cpus_allowed)) {
698 if (is_sched_load_balance(cp)) {
699 update_domain_attr(dattr, cp);
734 * cp - cpuset pointer, used (together with pos_css) to perform a
767 struct cpuset *cp; /* top-down scan of cpusets */
800 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
810 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
811 if (cp == &top_cpuset) {
815 * Continue traversing beyond @cp iff @cp has some CPUs and
822 * If root is load-balancing, we can skip @cp if it
825 if (!cpumask_empty(cp->cpus_allowed) &&
826 !(is_sched_load_balance(cp) &&
827 cpumask_intersects(cp->cpus_allowed, housekeeping_cpumask(HK_FLAG_DOMAIN)))) {
831 if (root_load_balance && cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) {
835 if (is_sched_load_balance(cp) && !cpumask_empty(cp->effective_cpus)) {
836 csa[csn++] = cp;
839 /* skip @cp's subtree if not a partition root */
840 if (!is_partition_root(cp)) {
1359 struct cpuset *cp;
1365 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1366 struct cpuset *parent = parent_cs(cp);
1368 compute_effective_cpumask(tmp->new_cpus, cp, parent);
1376 if (!cp->use_parent_ecpus) {
1377 cp->use_parent_ecpus = true;
1380 } else if (cp->use_parent_ecpus) {
1381 cp->use_parent_ecpus = false;
1390 if (!cp->partition_root_state && cpumask_equal(tmp->new_cpus, cp->effective_cpus)) {
1401 new_prs = cp->partition_root_state;
1402 if ((cp != cs) && new_prs) {
1410 WARN_ON_ONCE(cp->partition_root_state != PRS_ERROR);
1420 clear_bit(CS_CPU_EXCLUSIVE, &cp->flags);
1424 if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp)) {
1438 if (!css_tryget_online(&cp->css)) {
1445 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1446 if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) {
1447 cp->nr_subparts_cpus = 0;
1448 cpumask_clear(cp->subparts_cpus);
1449 } else if (cp->nr_subparts_cpus) {
1455 * becomes empty. we clear cp->nr_subparts_cpus and
1459 cpumask_andnot(cp->effective_cpus, cp->effective_cpus, cp->subparts_cpus);
1460 if (cpumask_empty(cp->effective_cpus)) {
1461 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1462 cpumask_clear(cp->subparts_cpus);
1463 cp->nr_subparts_cpus = 0;
1464 } else if (!cpumask_subset(cp->subparts_cpus, tmp->new_cpus)) {
1465 cpumask_andnot(cp->subparts_cpus, cp->subparts_cpus, tmp->new_cpus);
1466 cp->nr_subparts_cpus = cpumask_weight(cp->subparts_cpus);
1470 if (new_prs != cp->partition_root_state) {
1471 cp->partition_root_state = new_prs;
1476 WARN_ON(!is_in_v2_mode() && !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
1478 update_tasks_cpumask(cp);
1486 if (!cpumask_empty(cp->cpus_allowed) && is_sched_load_balance(cp) &&
1487 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || is_partition_root(cp))) {
1492 css_put(&cp->css);
1783 struct cpuset *cp;
1787 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1788 struct cpuset *parent = parent_cs(cp);
1790 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
1801 if (nodes_equal(*new_mems, cp->effective_mems)) {
1806 if (!css_tryget_online(&cp->css)) {
1812 cp->effective_mems = *new_mems;
1815 WARN_ON(!is_in_v2_mode() && !nodes_equal(cp->mems_allowed, cp->effective_mems));
1817 update_tasks_nodemask(cp);
1820 css_put(&cp->css);