Lines Matching refs:sg
604 static void free_sched_groups(struct sched_group *sg, int free_sgc)
608 if (!sg) {
612 first = sg;
614 tmp = sg->next;
616 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) {
617 kfree(sg->sgc);
620 if (atomic_dec_and_test(&sg->ref)) {
621 kfree(sg);
623 sg = tmp;
624 } while (sg != first);
788 int group_balance_cpu(struct sched_group *sg)
790 return cpumask_first(group_balance_mask(sg));
897 static void build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
899 const struct cpumask *sg_span = sched_group_span(sg);
937 struct sched_group *sg;
940 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), GFP_KERNEL, cpu_to_node(cpu));
941 if (!sg) {
945 sg_span = sched_group_span(sg);
952 atomic_inc(&sg->ref);
953 return sg;
956 static void init_overlap_sched_group(struct sched_domain *sd, struct sched_group *sg)
963 build_balance_mask(sd, sg, mask);
964 cpu = cpumask_first_and(sched_group_span(sg), mask);
966 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
967 if (atomic_inc_return(&sg->sgc->ref) == 1) {
968 cpumask_copy(group_balance_mask(sg), mask);
970 WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
978 sg_span = sched_group_span(sg);
979 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
980 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
981 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
1008 struct sched_group *first = NULL, *last = NULL, *sg;
1075 sg = build_group_from_child_sched_domain(sibling, cpu);
1076 if (!sg) {
1080 sg_span = sched_group_span(sg);
1083 init_overlap_sched_group(sibling, sg);
1086 first = sg;
1089 last->next = sg;
1091 last = sg;
1179 struct sched_group *sg;
1186 sg = *per_cpu_ptr(sdd->sg, cpu);
1187 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1190 already_visited = atomic_inc_return(&sg->ref) > 1;
1191 /* sgc visits should follow a similar trend as sg */
1192 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1));
1196 return sg;
1200 cpumask_copy(sched_group_span(sg), sched_domain_span(child));
1201 cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
1203 cpumask_set_cpu(cpu, sched_group_span(sg));
1204 cpumask_set_cpu(cpu, group_balance_mask(sg));
1207 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
1208 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1209 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
1211 return sg;
1236 struct sched_group *sg;
1242 sg = get_group(i, sdd);
1244 cpumask_or(covered, covered, sched_group_span(sg));
1247 first = sg;
1250 last->next = sg;
1252 last = sg;
1272 struct sched_group *sg = sd->groups;
1277 WARN_ON(!sg);
1283 cpumask_andnot(&avail_mask, sched_group_span(sg), cpu_isolated_mask);
1284 sg->group_weight = cpumask_weight(&avail_mask);
1286 sg->group_weight = cpumask_weight(sched_group_span(sg));
1293 for_each_cpu(cpu, sched_group_span(sg))
1301 sg->asym_prefer_cpu = max_cpu;
1304 sg = sg->next;
1305 } while (sg != sd->groups);
1307 if (cpu != group_balance_cpu(sg)) {
1408 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) {
1409 *per_cpu_ptr(sdd->sg, cpu) = NULL;
1888 sdd->sg = alloc_percpu(struct sched_group *);
1889 if (!sdd->sg) {
1902 struct sched_group *sg;
1919 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), GFP_KERNEL, cpu_to_node(j));
1920 if (!sg) {
1924 sg->next = sg;
1926 *per_cpu_ptr(sdd->sg, j) = sg;
1966 if (sdd->sg) {
1967 kfree(*per_cpu_ptr(sdd->sg, j));
1977 free_percpu(sdd->sg);
1978 sdd->sg = NULL;