Lines Matching defs:sds
7309 struct sched_domain_shared *sds;
7311 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
7312 if (sds)
7313 WRITE_ONCE(sds->has_idle_cores, val);
7318 struct sched_domain_shared *sds;
7320 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
7321 if (sds)
7322 return READ_ONCE(sds->has_idle_cores);
9722 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
9731 *sds = (struct sd_lb_stats){
10015 * @sds: Load-balancing data with statistics of the local group
10034 sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs,
10084 struct sd_lb_stats *sds,
10094 ncores_busiest = sds->busiest->cores;
10095 ncores_local = sds->local->cores;
10134 * @sds: Load-balancing data with statistics of the local group.
10140 struct sd_lb_stats *sds,
10149 local_group = group == sds->local;
10217 sched_asym(env, sds, sgs, group)) {
10236 * @sds: sched_domain statistics
10247 struct sd_lb_stats *sds,
10251 struct sg_lb_stats *busiest = &sds->busiest_stat;
10266 sds->local_stat.group_type != group_has_spare))
10296 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu))
10340 if (sds->busiest->flags & SD_SHARE_CPUCAPACITY)
10352 if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
10837 * @sds: variable to hold the statistics for this sched_domain.
10840 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
10843 struct sg_lb_stats *local = &sds->local_stat;
10854 sds->local = sg;
10862 update_sg_lb_stats(env, sds, sg, sgs, &sg_status);
10868 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
10869 sds->busiest = sg;
10870 sds->busiest_stat = *sgs;
10875 sds->total_load += sgs->group_load;
10876 sds->total_capacity += sgs->group_capacity;
10887 if (sds->busiest)
10888 sds->prefer_sibling = !!(sds->busiest->flags & SD_PREFER_SIBLING);
10892 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
10917 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
10919 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
10923 local = &sds->local_stat;
10924 busiest = &sds->busiest_stat;
11005 if (busiest->group_weight == 1 || sds->prefer_sibling) {
11011 env->imbalance = sibling_imbalance(env, sds, busiest, local);
11060 sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
11061 sds->total_capacity;
11067 if (local->avg_load >= sds->avg_load) {
11084 (busiest->avg_load - sds->avg_load) * busiest->group_capacity,
11085 (sds->avg_load - local->avg_load) * local->group_capacity
11124 struct sd_lb_stats sds;
11126 init_sd_lb_stats(&sds);
11132 update_sd_lb_stats(env, &sds);
11135 if (!sds.busiest)
11138 busiest = &sds.busiest_stat;
11163 local = &sds.local_stat;
11184 sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) /
11185 sds.total_capacity;
11191 if (local->avg_load >= sds.avg_load)
11207 if (sds.prefer_sibling && local->group_type == group_has_spare &&
11208 sibling_imbalance(env, &sds, busiest, local) > 1)
11222 smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
11251 calculate_imbalance(env, &sds);
11252 return env->imbalance ? sds.busiest : NULL;
12207 struct sched_domain_shared *sds;
12303 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
12304 if (sds) {
12314 nr_busy = atomic_read(&sds->nr_busy_cpus);