Lines Matching defs:sgc

96         printk(KERN_CONT " %d:{ span=%*pbl", group->sgc->id, cpumask_pr_args(sched_group_span(group)));
102 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
103 printk(KERN_CONT " cap=%lu", group->sgc->capacity);
616 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) {
617 kfree(sg->sgc);
966 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
967 if (atomic_inc_return(&sg->sgc->ref) == 1) {
974 * Initialize sgc->capacity such that even if we mess up the
979 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
980 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
981 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
995 * As we are referencing sgc across different topology level, we need
1187 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1191 /* sgc visits should follow a similar trend as sg */
1192 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1));
1207 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
1208 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1209 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
1217 * and will initialize their ->sgc.
1412 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) {
1413 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
1893 sdd->sgc = alloc_percpu(struct sched_group_capacity *);
1894 if (!sdd->sgc) {
1903 struct sched_group_capacity *sgc;
1928 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), GFP_KERNEL, cpu_to_node(j));
1929 if (!sgc) {
1934 sgc->id = j;
1937 *per_cpu_ptr(sdd->sgc, j) = sgc;
1969 if (sdd->sgc) {
1970 kfree(*per_cpu_ptr(sdd->sgc, j));
1979 free_percpu(sdd->sgc);
1980 sdd->sgc = NULL;