Lines Matching refs:rd

339  *    5. schedutil is driving the frequency of all CPUs of the rd;
364 struct root_domain *rd = cpu_rq(cpu)->rd;
375 pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n", cpumask_pr_args(cpu_map));
382 pr_warn("rd %*pbl: Disabling EAS, SMT is not supported\n", cpumask_pr_args(cpu_map));
401 if (rd->pd) {
402 pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n", cpumask_pr_args(cpu_map));
425 WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n", cpumask_pr_args(cpu_map));
432 tmp = rd->pd;
433 rcu_assign_pointer(rd->pd, pd);
442 tmp = rd->pd;
443 rcu_assign_pointer(rd->pd, NULL);
458 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
460 cpupri_cleanup(&rd->cpupri);
461 cpudl_cleanup(&rd->cpudl);
462 free_cpumask_var(rd->dlo_mask);
463 free_cpumask_var(rd->rto_mask);
464 free_cpumask_var(rd->online);
465 free_cpumask_var(rd->span);
466 free_pd(rd->pd);
467 kfree(rd);
470 void rq_attach_root(struct rq *rq, struct root_domain *rd)
477 if (rq->rd) {
478 old_rd = rq->rd;
496 atomic_inc(&rd->refcount);
497 rq->rd = rd;
499 cpumask_set_cpu(rq->cpu, rd->span);
511 void sched_get_rd(struct root_domain *rd)
513 atomic_inc(&rd->refcount);
516 void sched_put_rd(struct root_domain *rd)
518 if (!atomic_dec_and_test(&rd->refcount)) {
522 call_rcu(&rd->rcu, free_rootdomain);
525 static int init_rootdomain(struct root_domain *rd)
527 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) {
530 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) {
533 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) {
536 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) {
541 rd->rto_cpu = -1;
542 raw_spin_lock_init(&rd->rto_lock);
543 init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
546 init_dl_bw(&rd->dl_bw);
547 if (cpudl_init(&rd->cpudl) != 0) {
551 if (cpupri_init(&rd->cpupri) != 0) {
556 rd->max_cap_orig_cpu = -1;
561 cpudl_cleanup(&rd->cpudl);
563 free_cpumask_var(rd->rto_mask);
565 free_cpumask_var(rd->dlo_mask);
567 free_cpumask_var(rd->online);
569 free_cpumask_var(rd->span);
589 struct root_domain *rd;
591 rd = kzalloc(sizeof(*rd), GFP_KERNEL);
592 if (!rd) {
596 if (init_rootdomain(rd) != 0) {
597 kfree(rd);
601 return rd;
711 static void cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
758 rq_attach_root(rq, rd);
769 struct root_domain *rd;
1358 if (!atomic_read(&d->rd->refcount)) {
1359 free_rootdomain(&d->rd->rcu);
1384 d->rd = alloc_rootdomain();
1385 if (!d->rd) {
2198 int max_cpu = READ_ONCE(d.rd->max_cap_orig_cpu);
2206 WRITE_ONCE(d.rd->max_cap_orig_cpu, i);
2211 if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) {
2212 WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
2215 cpu_attach_domain(sd, d.rd, i);
2225 rq->rd->max_cpu_capacity);
2402 struct root_domain *rd;
2410 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;
2411 dl_clear_root_domain(rd);
2443 if (cpumask_equal(doms_new[i], doms_cur[j]) && cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
2448 /* No match - add perf. domains for a new rd */