Lines Matching defs:dst_cpu

1909 				int src_nid, int dst_cpu)
1912 int dst_nid = cpu_to_node(dst_cpu);
1946 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
2047 int dst_cpu, dst_nid;
2154 struct rq *rq = cpu_rq(env->dst_cpu);
2157 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
2159 int start = env->dst_cpu;
2168 env->dst_cpu = cpu;
2169 rq = cpu_rq(env->dst_cpu);
2183 if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) {
2195 env->best_cpu = env->dst_cpu;
2235 * be improved if the source tasks was migrated to the target dst_cpu taking
2236 * into account that it might be best if task running on the dst_cpu should
2243 struct rq *dst_rq = cpu_rq(env->dst_cpu);
2389 cpu = env->dst_cpu;
2400 env->dst_cpu = cpu;
2457 env->dst_cpu = env->dst_stats.idle_cpu;
2479 env->dst_cpu = cpu;
7718 * @dst_cpu: CPU @p migrates to, -1 if @p moves from @cpu or @p == NULL
7756 cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost)
7778 * If @dst_cpu is -1 or @p migrates from @cpu to @dst_cpu remove its
7783 if (p && task_cpu(p) == cpu && dst_cpu != cpu)
7785 else if (p && task_cpu(p) != cpu && dst_cpu == cpu)
7796 * If @dst_cpu == @cpu add it to "simulate" cpu_util after @p
7799 * During exec (@dst_cpu = -1) @p is enqueued and does
7819 if (dst_cpu == cpu)
7974 * is placed on the cpu @dst_cpu.
7981 struct task_struct *p, int dst_cpu)
7987 struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL;
7988 unsigned long util = cpu_util(cpu, p, dst_cpu, 1);
8007 * consume for a given utilization landscape @eenv. When @dst_cpu < 0, the task
8012 struct cpumask *pd_cpus, struct task_struct *p, int dst_cpu)
8014 unsigned long max_util = eenv_pd_max_util(eenv, pd_cpus, p, dst_cpu);
8017 if (dst_cpu >= 0)
8995 int dst_cpu;
9049 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p))
9079 dst_nid = cpu_to_node(env->dst_cpu);
9137 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
9144 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
9165 /* Prevent to re-select dst_cpu via env's CPUs: */
9177 /* Record that we found at least one task that could run on dst_cpu */
9183 !preferred_cluster(cpu_rq(env->dst_cpu)->cluster, p))
9232 set_task_cpu(p, env->dst_cpu);
9300 if (!same_cluster(env->dst_cpu, env->src_cpu))
10019 * @env::dst_cpu can do asym_packing if it has higher priority than the
10022 * SMT is a special case. If we are balancing load between cores, @env::dst_cpu
10030 * Return: true if @env::dst_cpu can do with asym_packing load balance. False
10038 if (!sched_use_asym_prio(env->sd, env->dst_cpu))
10050 return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
10265 (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) ||
10384 (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu))))
10780 llc_weight = per_cpu(sd_llc_size, env->dst_cpu);
10784 sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu));
10852 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
10859 update_group_capacity(env->sd, env->dst_cpu);
11107 * nr_idle : dst_cpu is not busy and the number of idle CPUs is quite
11316 !capacity_greater(capacity_of(env->dst_cpu), capacity) &&
11329 sched_asym_prefer(i, env->dst_cpu) &&
11423 * the lower priority @env::dst_cpu help it. Do not follow
11427 sched_use_asym_prio(env->sd, env->dst_cpu) &&
11428 (sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
11460 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
11463 * available on dst_cpu.
11468 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
11500 if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
11545 return cpu == env->dst_cpu;
11550 return idle_smt == env->dst_cpu;
11553 return group_balance_cpu_not_isolated(sg) == env->dst_cpu;
11572 .dst_cpu = this_cpu,
11659 * us and move them to an alternate dst_cpu in our sched_group
11678 /* Prevent to re-select dst_cpu via env's CPUs */
11679 __cpumask_clear_cpu(env.dst_cpu, env.cpus);
11682 env.dst_cpu = env.new_dst_cpu;
11916 .dst_cpu = target_cpu,
11946 .dst_cpu = target_cpu,
12090 * env->dst_cpu, so we can't know our idle