Lines Matching defs:sgs
8819 static inline bool group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
8821 if (sgs->sum_nr_running < sgs->group_weight) {
8825 if ((sgs->group_capacity * imbalance_pct) < (sgs->group_runnable * FAIR_ONEHUNDRED)) {
8829 if ((sgs->group_capacity * FAIR_ONEHUNDRED) > (sgs->group_util * imbalance_pct)) {
8844 static inline bool group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
8846 if (sgs->sum_nr_running <= sgs->group_weight) {
8850 if ((sgs->group_capacity * FAIR_ONEHUNDRED) < (sgs->group_util * imbalance_pct)) {
8854 if ((sgs->group_capacity * imbalance_pct) < (sgs->group_runnable * 0x64)) {
8880 struct sg_lb_stats *sgs)
8882 if (group_is_overloaded(imbalance_pct, sgs)) {
8890 if (sgs->group_asym_packing) {
8894 if (sgs->group_misfit_task_load) {
8898 if (!group_has_capacity(imbalance_pct, sgs)) {
8934 * @sgs: variable to hold the statistics for this group.
8937 static inline void update_sg_lb_stats(struct lb_env *env, struct sched_group *group, struct sg_lb_stats *sgs,
8942 memset(sgs, 0, sizeof(*sgs));
8958 sgs->group_load += cpu_load(rq);
8959 sgs->group_util += cpu_util(i);
8960 sgs->group_runnable += cpu_runnable(rq);
8961 sgs->sum_h_nr_running += rq->cfs.h_nr_running;
8964 sgs->sum_nr_running += nr_running;
8975 sgs->nr_numa_running += rq->nr_numa_running;
8976 sgs->nr_preferred_running += rq->nr_preferred_running;
8982 sgs->idle_cpus++;
8992 if (env->sd->flags & SD_ASYM_CPUCAPACITY && sgs->group_misfit_task_load < rq->misfit_task_load) {
8993 sgs->group_misfit_task_load = rq->misfit_task_load;
9000 sgs->group_capacity = 0;
9001 sgs->avg_load = 0;
9002 sgs->group_type = group_has_spare;
9003 sgs->group_weight = group->group_weight;
9008 if (env->sd->flags & SD_ASYM_PACKING && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
9010 sgs->group_asym_packing = 1;
9013 sgs->group_capacity = group->sgc->capacity;
9015 sgs->group_weight = group->group_weight;
9017 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
9020 if (sgs->group_type == group_overloaded) {
9021 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / sgs->group_capacity;
9030 * @sgs: sched_group statistics
9039 struct sg_lb_stats *sgs)
9044 if (!sgs->sum_h_nr_running) {
9054 if (sgs->group_type == group_misfit_task &&
9059 if (sgs->group_type > busiest->group_type) {
9063 if (sgs->group_type < busiest->group_type) {
9072 switch (sgs->group_type) {
9075 if (sgs->avg_load <= busiest->avg_load) {
9099 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) {
9115 if (sgs->avg_load <= busiest->avg_load) {
9128 if (sgs->idle_cpus > busiest->idle_cpus) {
9130 } else if ((sgs->idle_cpus == busiest->idle_cpus) && (sgs->sum_nr_running <= busiest->sum_nr_running)) {
9143 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && (sgs->group_type <= group_fully_busy) &&
9152 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
9154 if (sgs->sum_h_nr_running > sgs->nr_numa_running) {
9157 if (sgs->sum_h_nr_running > sgs->nr_preferred_running) {
9174 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
9239 * @sgs: variable to hold the statistics for this group.
9242 static inline void update_sg_wakeup_stats(struct sched_domain *sd, struct sched_group *group, struct sg_lb_stats *sgs,
9247 memset(sgs, 0, sizeof(*sgs));
9254 sgs->group_load += cpu_load_without(rq, p);
9255 sgs->group_util += cpu_util_without(i, p);
9256 sgs->group_runnable += cpu_runnable_without(rq, p);
9258 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
9261 sgs->sum_nr_running += nr_running;
9267 sgs->idle_cpus++;
9273 sgs->group_misfit_task_load = 1;
9276 sgs->group_capacity = group->sgc->capacity;
9278 sgs->group_weight = group->group_weight;
9280 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
9286 if (sgs->group_type == group_fully_busy || sgs->group_type == group_overloaded) {
9287 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / sgs->group_capacity;
9292 struct sg_lb_stats *sgs)
9294 if (sgs->group_type < idlest_sgs->group_type) {
9298 if (sgs->group_type > idlest_sgs->group_type) {
9307 switch (sgs->group_type) {
9311 if (idlest_sgs->avg_load <= sgs->avg_load) {
9330 if (idlest_sgs->idle_cpus > sgs->idle_cpus) {
9335 if (idlest_sgs->idle_cpus == sgs->idle_cpus && idlest_sgs->group_util <= sgs->group_util) {
9355 struct sg_lb_stats *sgs;
9382 sgs = &local_sgs;
9385 sgs = &tmp_sgs;
9388 update_sg_wakeup_stats(sd, group, sgs, p);
9390 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
9392 idlest_sgs = *sgs;
9527 struct sg_lb_stats *sgs = &tmp_sgs;
9533 sgs = local;
9540 update_sg_lb_stats(env, sg, sgs, &sg_status);
9546 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
9548 sds->busiest_stat = *sgs;
9553 sds->total_load += sgs->group_load;
9554 sds->total_capacity += sgs->group_capacity;