Lines Matching refs:cpu

143 int __weak arch_asym_cpu_priority(int cpu)
145 return -cpu;
344 int cpu = cpu_of(rq);
361 if (cfs_rq->tg->parent && cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
368 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
787 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
1579 static inline bool is_core_idle(int cpu)
1584 for_each_cpu(sibling, cpu_smt_mask(cpu))
1586 if (cpu == sibling) {
1638 static inline bool test_idle_cores(int cpu, bool def);
1639 static inline int numa_idle_core(int idle_core, int cpu)
1641 if (!static_branch_likely(&sched_smt_present) || idle_core >= 0 || !test_idle_cores(cpu, false)) {
1649 if (is_core_idle(cpu)) {
1650 idle_core = cpu;
1656 static inline int numa_idle_core(int idle_core, int cpu)
1670 int cpu, idle_core = -1;
1676 for_each_cpu(cpu, cpumask_of_node(nid))
1678 struct rq *rq = cpu_rq(cpu);
1682 ns->util += cpu_util(cpu);
1684 ns->compute_capacity += capacity_of(cpu);
1686 if (find_idle && !rq->nr_running && idle_cpu(cpu)) {
1687 if (READ_ONCE(rq->numa_migrate_on) || !cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
1692 ns->idle_cpu = cpu;
1695 idle_core = numa_idle_core(idle_core, cpu);
1715 int cpu;
1719 for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start)
1721 if (cpu == env->best_cpu || !idle_cpu(cpu) || !cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
1725 env->dst_cpu = cpu;
1837 /* Skip this swap candidate if cannot move to the source cpu. */
1941 int cpu = env->dst_stats.idle_cpu;
1944 if (cpu < 0) {
1945 cpu = env->dst_cpu;
1952 if (!idle_cpu(cpu) && env->best_cpu >= 0 && idle_cpu(env->best_cpu)) {
1953 cpu = env->best_cpu;
1956 env->dst_cpu = cpu;
1986 int cpu;
2027 for_each_cpu(cpu, cpumask_of_node(env->dst_nid))
2030 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
2034 env->dst_cpu = cpu;
2575 int cpu = cpupid_to_cpu(cpupid);
2605 tsk = READ_ONCE(cpu_rq(cpu)->curr);
4221 bool task_fits_max(struct task_struct *p, int cpu)
4223 unsigned long capacity = capacity_orig_of(cpu);
4224 unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity;
4237 int cpu = cpu_of(rq);
4253 task_fits = capacity_orig_of(cpu) >= capacity_orig_of(cpumask_first(rtg_target));
5409 static void sync_throttle(struct task_group *tg, int cpu)
5421 cfs_rq = tg->cfs_rq[cpu];
5422 pcfs_rq = tg->parent->cfs_rq[cpu];
5425 cfs_rq->throttled_clock_pelt = rq_clock_task(cpu_rq(cpu));
5489 pr_warn_ratelimited("cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, "
5494 pr_warn_ratelimited("cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing "
5565 /* cpu online calback */
5585 /* cpu offline callback */
5636 static inline void sync_throttle(struct task_group *tg, int cpu)
5739 static inline bool cpu_overutilized(int cpu)
5741 return !fits_capacity(cpu_util(cpu), capacity_of(cpu));
5746 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
5764 static int sched_idle_cpu(int cpu)
5766 return sched_idle_rq(cpu_rq(cpu));
6007 * @cpu: the CPU which load is requested
6276 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, int cpu, int prev_cpu, int sd_flag)
6278 int new_cpu = cpu;
6302 group = find_idlest_group(sd, p, cpu);
6308 new_cpu = find_idlest_group_cpu(group, p, cpu);
6309 if (new_cpu == cpu) {
6310 /* Now try balancing at a lower domain level of 'cpu': */
6316 cpu = new_cpu;
6319 for_each_domain(cpu, tmp)
6337 static inline void set_idle_cores(int cpu, int val)
6341 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
6347 static inline bool test_idle_cores(int cpu, bool def)
6351 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
6369 int cpu;
6376 for_each_cpu(cpu, cpu_smt_mask(core))
6378 if (cpu == core) {
6382 if (!available_idle_cpu(cpu)) {
6400 int core, cpu;
6419 for_each_cpu(cpu, cpu_smt_mask(core))
6421 if (!available_idle_cpu(cpu)) {
6446 int cpu;
6452 for_each_cpu(cpu, cpu_smt_mask(target))
6454 if (!cpumask_test_cpu(cpu, p->cpus_ptr) || !cpumask_test_cpu(cpu, sched_domain_span(sd))) {
6457 if (cpu_isolated(cpu)) {
6460 if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) {
6461 return cpu;
6494 int cpu, nr = INT_MAX;
6525 for_each_cpu_wrap(cpu, cpus, target)
6530 if (cpu_isolated(cpu)) {
6533 if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) {
6541 return cpu;
6552 int cpu, best_cpu = -1;
6560 for_each_cpu_wrap(cpu, cpus, target)
6562 unsigned long cpu_cap = capacity_of(cpu);
6564 if (cpu_isolated(cpu)) {
6568 if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu)) {
6572 return cpu;
6577 best_cpu = cpu;
6584 static inline bool asym_fits_capacity(int task_util, int cpu)
6587 return fits_capacity(task_util, capacity_of(cpu));
6604 * that the task fits with cpu's capacity.
6675 * @cpu: the CPU to get the utilization of
6711 unsigned long cpu_util(int cpu)
6718 u64 walt_cpu_util = cpu_rq(cpu)->walt_stats.cumulative_runnable_avg_scaled;
6720 return min_t(unsigned long, walt_cpu_util, capacity_orig_of(cpu));
6724 cfs_rq = &cpu_rq(cpu)->cfs;
6731 return min_t(unsigned long, util, capacity_orig_of(cpu));
6735 * cpu_util_without: compute cpu utilization without any contributions from *p
6736 * @cpu: the CPU which utilization is requested
6747 static unsigned long cpu_util_without(int cpu, struct task_struct *p)
6756 * utilization from cpu utilization. Instead just use
6760 return cpu_util(cpu);
6765 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
6766 return cpu_util(cpu);
6771 util = max_t(long, cpu_util(cpu) - task_util(p), 0);
6772 return min_t(unsigned long, util, capacity_orig_of(cpu));
6776 cfs_rq = &cpu_rq(cpu)->cfs;
6840 return min_t(unsigned long, util, capacity_orig_of(cpu));
6844 unsigned long capacity_spare_without(int cpu, struct task_struct *p)
6846 return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0);
6850 * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
6853 static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
6855 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
6859 * If @p migrates from @cpu to another, remove its contribution. Or,
6860 * if @p migrates from another CPU to @cpu, add its contribution. In
6861 * the other cases, @cpu is not impacted by the migration, so the
6864 if (task_cpu(p) == cpu && dst_cpu != cpu) {
6866 } else if (task_cpu(p) != cpu && dst_cpu == cpu) {
6879 if (dst_cpu == cpu) {
6886 return min(util, capacity_orig_of(cpu));
6890 * Returns the current capacity of cpu after applying both
6891 * cpu and freq scaling.
6893 unsigned long capacity_curr_of(int cpu)
6895 unsigned long max_cap = cpu_rq(cpu)->cpu_capacity_orig;
6896 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
6913 int cpu;
6924 for_each_cpu_and(cpu, pd_mask, cpu_online_mask)
6926 unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu);
6927 struct task_struct *tsk = cpu == dst_cpu ? p : NULL;
6935 sum_util += schedutil_cpu_util(cpu, util_cfs, cpu_cap, ENERGY_UTIL, NULL);
6944 cpu_util = schedutil_cpu_util(cpu, util_cfs, cpu_cap, FREQUENCY_UTIL, tsk);
6995 int cpu, best_energy_cpu = prev_cpu;
7031 for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd))
7033 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) {
7037 util = cpu_util_next(cpu, p, cpu);
7038 cpu_cap = capacity_of(cpu);
7049 util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
7055 if (cpu == prev_cpu) {
7067 max_spare_cap_cpu = cpu;
7119 int cpu = smp_processor_id();
7142 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
7146 for_each_domain(cpu, tmp)
7149 * If both 'cpu' and 'prev_cpu' are part of this domain,
7150 * cpu is a valid SD_WAKE_AFFINE target.
7153 if (cpu != prev_cpu) {
7154 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
7170 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
7177 current->recent_used_cpu = cpu;
8015 int cpu;
8034 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus)
8036 if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
8038 env->new_dst_cpu = cpu;
8454 int cpu = cpu_of(rq);
8472 se = cfs_rq->tg->se[cpu];
8560 static void update_blocked_averages(int cpu)
8563 struct rq *rq = cpu_rq(cpu);
8641 static unsigned long scale_rt_capacity(int cpu)
8643 struct rq *rq = cpu_rq(cpu);
8644 unsigned long max = arch_scale_cpu_capacity(cpu);
8671 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
8673 unsigned long capacity = scale_rt_capacity(cpu);
8676 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
8682 cpu_rq(cpu)->cpu_capacity = capacity;
8683 trace_sched_cpu_capacity_tp(cpu_rq(cpu));
8690 void update_group_capacity(struct sched_domain *sd, int cpu)
8702 update_cpu_capacity(sd, cpu);
8716 for_each_cpu(cpu, sched_group_span(sdg))
8718 unsigned long cpu_cap = capacity_of(cpu);
8720 if (cpu_isolated(cpu)) {
8786 * cpu 3 and leave one of the CPUs in the second group unused.
8908 unsigned int cpu = rq->cpu;
8914 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) {
8922 update_blocked_averages(cpu);
8983 /* Idle cpu can't have misfit task */
8991 /* Check for a misfit task on the cpu */
9188 * task_running_on_cpu - return 1 if @p is running on @cpu.
9191 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
9194 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
9207 * @cpu: the processor on which idleness is tested.
9212 static int idle_cpu_without(int cpu, struct task_struct *p)
9214 struct rq *rq = cpu_rq(cpu);
9222 * impact of p on cpu must be used instead. The updated nr_running
10114 int cpu;
10133 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus)
10135 if (!idle_cpu(cpu) || cpu_isolated(cpu)) {
10140 return cpu == env->dst_cpu;
10199 env.src_cpu = busiest->cpu;
10520 .src_cpu = busiest_rq->cpu,
10550 .src_cpu = busiest_rq->cpu,
10635 int cpu = rq->cpu;
10636 int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
10646 for_each_domain(cpu, sd)
10681 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
10687 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
10688 busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
10713 * When the cpu is attached to null domain for ex, it will not be
10815 int nr_busy, i, cpu = rq->cpu;
10873 sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
10882 if (sched_asym_prefer(i, cpu)) {
10889 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
10910 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
10935 static void set_cpu_sd_state_busy(int cpu)
10940 sd = rcu_dereference(per_cpu(sd_llc, cpu));
10960 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
10963 set_cpu_sd_state_busy(rq->cpu);
10966 static void set_cpu_sd_state_idle(int cpu)
10971 sd = rcu_dereference(per_cpu(sd_llc, cpu));
10986 void nohz_balance_enter_idle(int cpu)
10988 struct rq *rq = cpu_rq(cpu);
10990 SCHED_WARN_ON(cpu != smp_processor_id());
10992 if (!cpu_active(cpu)) {
11008 if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) {
11036 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
11046 set_cpu_sd_state_idle(cpu);
11050 * Each time a cpu enter idle, we assume that it has blocked load and
11070 int this_cpu = this_rq->cpu;
11080 * the has_blocked flag. If a cpu enters idle in the mean time, it will
11082 * Because a cpu that becomes idle, is added to idle_cpus_mask before
11084 * check the load of an idle cpu.
11200 int this_cpu = this_rq->cpu;
11260 int this_cpu = this_rq->cpu;
11389 * is a possibility this nohz kicked cpu could be isolated. Hence
11390 * return if the cpu is isolated.
11392 if (cpu_isolated(this_rq->cpu)) {
11409 update_blocked_averages(this_rq->cpu);
11419 * cpu is isolated.
11931 int cpu;
11933 for_each_possible_cpu(cpu)
11935 if (tg->se[cpu]) {
11936 remove_entity_load_avg(tg->se[cpu]);
11943 if (!tg->cfs_rq[cpu]->on_list) {
11947 rq = cpu_rq(cpu);
11950 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
11955 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu,
11958 struct rq *rq = cpu_rq(cpu);
11964 tg->cfs_rq[cpu] = cfs_rq;
11965 tg->se[cpu] = se;
12117 void print_cfs_stats(struct seq_file *m, int cpu)
12122 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) print_cfs_rq(m, cpu, cfs_rq);