Lines Matching refs:sd

42 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, struct cpumask *groupmask)
44 struct sched_group *group = sd->groups;
45 unsigned long flags = sd->flags;
51 printk(KERN_CONT "span=%*pbl level=%s\n", cpumask_pr_args(sched_domain_span(sd)), sd->name);
53 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
65 if ((meta_flags & SDF_SHARED_CHILD) && sd->child && !(sd->child->flags & flag)) {
69 if ((meta_flags & SDF_SHARED_PARENT) && sd->parent && !(sd->parent->flags & flag)) {
88 if (!(sd->flags & SD_OVERLAP) && cpumask_intersects(groupmask, sched_group_span(group))) {
98 if ((sd->flags & SD_OVERLAP) && !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
106 if (group == sd->groups && sd->child && !cpumask_equal(sched_domain_span(sd->child), sched_group_span(group))) {
114 if (group != sd->groups) {
117 } while (group != sd->groups);
120 if (!cpumask_equal(sched_domain_span(sd), groupmask)) {
124 if (sd->parent && !cpumask_subset(groupmask, sched_domain_span(sd->parent))) {
130 static void sched_domain_debug(struct sched_domain *sd, int cpu)
138 if (!sd) {
146 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) {
150 sd = sd->parent;
151 if (!sd) {
159 #define sched_domain_debug(sd, cpu) \
175 static int sd_degenerate(struct sched_domain *sd)
177 if (cpumask_weight(sched_domain_span(sd)) == 1) {
182 if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) && (sd->groups != sd->groups->next)) {
187 if (sd->flags & (SD_WAKE_AFFINE)) {
194 static int sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
196 unsigned long cflags = sd->flags, pflags = parent->flags;
202 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) {
627 static void destroy_sched_domain(struct sched_domain *sd)
634 free_sched_groups(sd->groups, 1);
636 if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) {
637 kfree(sd->shared);
639 kfree(sd);
644 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
646 while (sd) {
647 struct sched_domain *parent = sd->parent;
648 destroy_sched_domain(sd);
649 sd = parent;
653 static void destroy_sched_domains(struct sched_domain *sd)
655 if (sd) {
656 call_rcu(&sd->rcu, destroy_sched_domains_rcu);
681 struct sched_domain *sd;
685 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
686 if (sd) {
687 id = cpumask_first(sched_domain_span(sd));
688 size = cpumask_weight(sched_domain_span(sd));
689 sds = sd->shared;
692 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
697 sd = lowest_flag_domain(cpu, SD_NUMA);
698 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
700 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
701 rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
703 sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY);
704 rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
708 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
711 static void cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
718 for (tmp = sd; tmp;) {
743 if (sd && sd_degenerate(sd)) {
744 tmp = sd;
745 sd = sd->parent;
747 if (sd) {
748 sd->child = NULL;
752 for (tmp = sd; tmp; tmp = tmp->parent) {
756 sched_domain_debug(sd, cpu);
759 tmp = rq->sd;
760 rcu_assign_pointer(rq->sd, sd);
768 struct sched_domain *__percpu *sd;
897 static void build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
900 struct sd_data *sdd = sd->private;
908 sibling = *per_cpu_ptr(sdd->sd, i);
935 static struct sched_group *build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
946 if (sd->child) {
947 cpumask_copy(sg_span, sched_domain_span(sd->child));
949 cpumask_copy(sg_span, sched_domain_span(sd));
956 static void init_overlap_sched_group(struct sched_domain *sd, struct sched_group *sg)
959 struct sd_data *sdd = sd->private;
963 build_balance_mask(sd, sg, mask);
984 static struct sched_domain *find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
988 * of sd
990 while (sibling->child && !cpumask_subset(sched_domain_span(sibling->child), sched_domain_span(sd))) {
1006 static int build_overlap_sched_groups(struct sched_domain *sd, int cpu)
1009 const struct cpumask *span = sched_domain_span(sd);
1011 struct sd_data *sdd = sd->private;
1025 sibling = *per_cpu_ptr(sdd->sd, i);
1032 * iteration early and our sibling sd spans will be empty.
1072 sibling = find_descended_sibling(sd, sibling);
1094 sd->groups = first;
1177 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1178 struct sched_domain *child = sd->child;
1221 static int build_sched_groups(struct sched_domain *sd, int cpu)
1224 struct sd_data *sdd = sd->private;
1225 const struct cpumask *span = sched_domain_span(sd);
1255 sd->groups = first;
1270 void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
1272 struct sched_group *sg = sd->groups;
1289 if (!(sd->flags & SD_ASYM_PACKING)) {
1305 } while (sg != sd->groups);
1311 update_group_capacity(sd, cpu);
1332 static void set_domain_attribute(struct sched_domain *sd, struct sched_domain_attr *attr)
1345 if (sd->level > request) {
1347 sd->flags &= ~(SD_BALANCE_WAKE | SD_BALANCE_NEWIDLE);
1363 free_percpu(d->sd);
1380 d->sd = alloc_percpu(struct sched_domain *);
1381 if (!d->sd) {
1397 static void claim_allocations(int cpu, struct sched_domain *sd)
1399 struct sd_data *sdd = sd->private;
1401 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1402 *per_cpu_ptr(sdd->sd, cpu) = NULL;
1451 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1473 *sd = (struct sched_domain) {
1495 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
1496 sd_id = cpumask_first(sched_domain_span(sd));
1503 if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child) {
1504 sd->child->flags &= ~SD_PREFER_SIBLING;
1507 if (sd->flags & SD_SHARE_CPUCAPACITY) {
1508 sd->imbalance_pct = IMBALANCE_SD_SHARE_CPUCAPACITY;
1509 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1510 sd->imbalance_pct = IMBALANCE_SD_SHARE_PKG;
1511 sd->cache_nice_tries = 1;
1514 } else if (sd->flags & SD_NUMA) {
1515 sd->cache_nice_tries = IMBALANCE_SD_NUMA;
1517 sd->flags &= ~SD_PREFER_SIBLING;
1518 sd->flags |= SD_SERIALIZE;
1520 sd->flags &= ~(SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE);
1525 sd->cache_nice_tries = 1;
1532 if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1533 sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
1534 atomic_inc(&sd->shared->ref);
1535 atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
1538 sd->private = sdd;
1540 return sd;
1878 sdd->sd = alloc_percpu(struct sched_domain *);
1879 if (!sdd->sd) {
1900 struct sched_domain *sd;
1905 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), GFP_KERNEL, cpu_to_node(j));
1906 if (!sd) {
1910 *per_cpu_ptr(sdd->sd, j) = sd;
1953 struct sched_domain *sd;
1955 if (sdd->sd) {
1956 sd = *per_cpu_ptr(sdd->sd, j);
1957 if (sd && (sd->flags & SD_OVERLAP)) {
1958 free_sched_groups(sd->groups, 0);
1960 kfree(*per_cpu_ptr(sdd->sd, j));
1973 free_percpu(sdd->sd);
1974 sdd->sd = NULL;
1988 struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu);
1991 sd->level = child->level + 1;
1992 sched_domain_level_max = max(sched_domain_level_max, sd->level);
1993 child->parent = sd;
1995 if (!cpumask_subset(sched_domain_span(child), sched_domain_span(sd))) {
1998 pr_err(" the %s domain not a subset of the %s domain\n", child->name, sd->name);
2000 /* Fixup, ensure @sd has at least @child CPUs. */
2001 cpumask_or(sched_domain_span(sd), sched_domain_span(sd), sched_domain_span(child));
2004 set_domain_attribute(sd, attr);
2006 return sd;
2115 struct sched_domain *sd;
2139 sd = NULL;
2150 sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i);
2153 *per_cpu_ptr(d.sd, i) = sd;
2156 sd->flags |= SD_OVERLAP;
2158 if (cpumask_equal(cpu_map, sched_domain_span(sd))) {
2167 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2168 sd->span_weight = cpumask_weight(sched_domain_span(sd));
2169 if (sd->flags & SD_OVERLAP) {
2170 if (build_overlap_sched_groups(sd, i)) {
2174 if (build_sched_groups(sd, i)) {
2187 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2188 claim_allocations(i, sd);
2189 init_sched_groups_capacity(i, sd);
2202 sd = *per_cpu_ptr(d.sd, i);
2215 cpu_attach_domain(sd, d.rd, i);