Lines Matching defs:cpus

200     unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
208 factor = cpus;
212 factor = 1 + ilog2(cpus);
6399 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
6410 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6412 cpumask_andnot(cpus, cpus, cpu_isolated_mask);
6415 for_each_cpu_wrap(core, cpus, target)
6426 cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
6489 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
6523 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6525 for_each_cpu_wrap(cpu, cpus, target)
6553 struct cpumask *cpus;
6555 cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
6556 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6560 for_each_cpu_wrap(cpu, cpus, target)
7870 struct cpumask *cpus;
8034 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus)
8737 __maybe_unused cpumask_t *cpus = sched_group_span(group);
8739 if (!cpu_isolated(cpumask_first(cpus))) {
8946 for_each_cpu_and(i, sched_group_span(group), env->cpus)
9122 * Select not overloaded group with lowest number of idle cpus
9691 * idle cpus.
9921 for_each_cpu_and(i, sched_group_span(group), env->cpus)
10101 cpumask_t cpus;
10103 cpumask_and(&cpus, sched_group_span(sg), group_balance_mask(sg));
10104 cpumask_andnot(&cpus, &cpus, cpu_isolated_mask);
10105 return cpumask_first(&cpus);
10120 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) {
10133 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus)
10159 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
10168 .cpus = cpus,
10173 cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
10266 __cpumask_clear_cpu(env.dst_cpu, env.cpus);
10294 __cpumask_clear_cpu(cpu_of(busiest), cpus);
10303 if (!cpumask_subset(cpus, env.dst_grpmask)) {
11051 * enable the periodic update of the load of idle cpus
11057 * Internal function that runs load balance for all idle cpus. The load balance
11074 cpumask_t cpus;
11095 cpumask_andnot(&cpus, nohz.idle_cpus_mask, cpu_isolated_mask);
11097 cpumask_copy(&cpus, nohz.idle_cpus_mask);
11100 for_each_cpu(balance_cpu, &cpus)
11177 * rebalancing for all the cpus for whom scheduler ticks are stopped.