Lines Matching refs:rd
4224 unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity;
5746 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
5747 WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
5748 trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
6916 * The capacity state of CPUs of the current rd can be driven by CPUs
6917 * of another rd if they belong to the same pd. So, account for the
6919 * instead of the rd span.
6921 * If an entire pd is outside of the current rd, it will not appear in
6993 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
7000 pd = rcu_dereference(rd->pd);
7001 if (!pd || READ_ONCE(rd->overutilized)) {
8770 return rq->misfit_task_load && (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity || check_cpu_capacity(rq, sd));
9573 struct root_domain *rd = env->dst_rq->rd;
9576 WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
9579 WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
9580 trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
9582 struct root_domain *rd = env->dst_rq->rd;
9584 WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
9585 trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
9789 struct root_domain *rd = env->dst_rq->rd;
9791 if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) {
11291 if (this_rq->avg_idle < sysctl_sched_migration_cost || !READ_ONCE(this_rq->rd->overload)) {
12324 const struct cpumask *sched_trace_rd_span(struct root_domain *rd)
12327 return rd ? rd->span : NULL;