Lines Matching refs:sgs

9926 group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
9928 if (sgs->sum_nr_running < sgs->group_weight)
9931 if ((sgs->group_capacity * imbalance_pct) <
9932 (sgs->group_runnable * 100))
9935 if ((sgs->group_capacity * 100) >
9936 (sgs->group_util * imbalance_pct))
9951 group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
9953 if (sgs->sum_nr_running <= sgs->group_weight)
9956 if ((sgs->group_capacity * 100) <
9957 (sgs->group_util * imbalance_pct))
9960 if ((sgs->group_capacity * imbalance_pct) <
9961 (sgs->group_runnable * 100))
9970 struct sg_lb_stats *sgs)
9972 if (group_is_overloaded(imbalance_pct, sgs))
9978 if (sgs->group_asym_packing)
9981 if (sgs->group_smt_balance)
9984 if (sgs->group_misfit_task_load)
9987 if (!group_has_capacity(imbalance_pct, sgs))
10016 * @sgs: Load-balancing statistics of the candidate busiest group
10034 sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs,
10046 if (sgs->group_weight - sgs->idle_cpus != 1)
10064 static inline bool smt_balance(struct lb_env *env, struct sg_lb_stats *sgs,
10077 sgs->sum_h_nr_running > 1)
10136 * @sgs: variable to hold the statistics for this group.
10142 struct sg_lb_stats *sgs,
10147 memset(sgs, 0, sizeof(*sgs));
10157 sgs->group_load += load;
10158 sgs->group_util += cpu_util_cfs(i);
10159 sgs->group_runnable += cpu_runnable(rq);
10160 sgs->sum_h_nr_running += rq->cfs.h_nr_running;
10163 sgs->sum_nr_running += nr_running;
10172 sgs->nr_numa_running += rq->nr_numa_running;
10173 sgs->nr_preferred_running += rq->nr_preferred_running;
10179 sgs->idle_cpus++;
10189 if (sgs->group_misfit_task_load < rq->misfit_task_load) {
10190 sgs->group_misfit_task_load = rq->misfit_task_load;
10196 if (sgs->group_misfit_task_load < load)
10197 sgs->group_misfit_task_load = load;
10201 sgs->group_capacity = group->sgc->capacity;
10203 sgs->group_weight = group->group_weight;
10207 sgs->group_capacity = 0;
10208 sgs->avg_load = 0;
10209 sgs->group_type = group_has_spare;
10210 sgs->group_weight = group->group_weight;
10216 env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
10217 sched_asym(env, sds, sgs, group)) {
10218 sgs->group_asym_packing = 1;
10222 if (!local_group && smt_balance(env, sgs, group))
10223 sgs->group_smt_balance = 1;
10225 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
10228 if (sgs->group_type == group_overloaded)
10229 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
10230 sgs->group_capacity;
10238 * @sgs: sched_group statistics
10249 struct sg_lb_stats *sgs)
10254 if (!sgs->sum_h_nr_running)
10264 (sgs->group_type == group_misfit_task) &&
10269 if (sgs->group_type > busiest->group_type)
10272 if (sgs->group_type < busiest->group_type)
10280 switch (sgs->group_type) {
10283 if (sgs->avg_load <= busiest->avg_load)
10305 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load)
10314 if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
10332 if (sgs->avg_load < busiest->avg_load)
10335 if (sgs->avg_load == busiest->avg_load) {
10353 if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
10367 if (sgs->idle_cpus > busiest->idle_cpus)
10369 else if ((sgs->idle_cpus == busiest->idle_cpus) &&
10370 (sgs->sum_nr_running <= busiest->sum_nr_running))
10383 (sgs->group_type <= group_fully_busy) &&
10391 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
10393 if (sgs->sum_h_nr_running > sgs->nr_numa_running)
10395 if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
10409 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
10471 * @sgs: variable to hold the statistics for this group.
10476 struct sg_lb_stats *sgs,
10481 memset(sgs, 0, sizeof(*sgs));
10485 sgs->group_misfit_task_load = 1;
10491 sgs->group_load += cpu_load_without(rq, p);
10492 sgs->group_util += cpu_util_without(i, p);
10493 sgs->group_runnable += cpu_runnable_without(rq, p);
10495 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
10498 sgs->sum_nr_running += nr_running;
10504 sgs->idle_cpus++;
10508 sgs->group_misfit_task_load &&
10510 sgs->group_misfit_task_load = 0;
10514 sgs->group_capacity = group->sgc->capacity;
10516 sgs->group_weight = group->group_weight;
10518 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
10524 if (sgs->group_type == group_fully_busy ||
10525 sgs->group_type == group_overloaded)
10526 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
10527 sgs->group_capacity;
10533 struct sg_lb_stats *sgs)
10535 if (sgs->group_type < idlest_sgs->group_type)
10538 if (sgs->group_type > idlest_sgs->group_type)
10546 switch (sgs->group_type) {
10550 if (idlest_sgs->avg_load <= sgs->avg_load)
10568 if (idlest_sgs->idle_cpus > sgs->idle_cpus)
10572 if (idlest_sgs->idle_cpus == sgs->idle_cpus &&
10573 idlest_sgs->group_util <= sgs->group_util)
10593 struct sg_lb_stats *sgs;
10625 sgs = &local_sgs;
10628 sgs = &tmp_sgs;
10631 update_sg_wakeup_stats(sd, group, sgs, p);
10633 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
10635 idlest_sgs = *sgs;
10849 struct sg_lb_stats *sgs = &tmp_sgs;
10855 sgs = local;
10862 update_sg_lb_stats(env, sds, sg, sgs, &sg_status);
10868 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
10870 sds->busiest_stat = *sgs;
10875 sds->total_load += sgs->group_load;
10876 sds->total_capacity += sgs->group_capacity;
10878 sum_util += sgs->group_util;