Lines Matching refs:sds
6339 struct sched_domain_shared *sds;
6341 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
6342 if (sds) {
6343 WRITE_ONCE(sds->has_idle_cores, val);
6349 struct sched_domain_shared *sds;
6351 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
6352 if (sds) {
6353 return READ_ONCE(sds->has_idle_cores);
8619 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
8628 *sds = (struct sd_lb_stats) {
9028 * @sds: sched_domain statistics
9038 static bool update_sd_pick_busiest(struct lb_env *env, struct sd_lb_stats *sds, struct sched_group *sg,
9041 struct sg_lb_stats *busiest = &sds->busiest_stat;
9055 (!group_smaller_max_cpu_capacity(sg, sds->local) || sds->local_stat.group_type != group_has_spare)) {
9089 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) {
9144 (group_smaller_min_cpu_capacity(sds->local, sg))) {
9509 * @sds: variable to hold the statistics for this sched_domain.
9512 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
9516 struct sg_lb_stats *local = &sds->local_stat;
9532 sds->local = sg;
9546 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
9547 sds->busiest = sg;
9548 sds->busiest_stat = *sgs;
9553 sds->total_load += sgs->group_load;
9554 sds->total_capacity += sgs->group_capacity;
9560 sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
9569 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
9609 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
9611 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
9615 local = &sds->local_stat;
9616 busiest = &sds->busiest_stat;
9679 if (busiest->group_weight == 1 || sds->prefer_sibling) {
9717 sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) / sds->total_capacity;
9737 env->imbalance = min((busiest->avg_load - sds->avg_load) * busiest->group_capacity,
9738 (sds->avg_load - local->avg_load) * local->group_capacity) /
9778 struct sd_lb_stats sds;
9780 init_sd_lb_stats(&sds);
9786 update_sd_lb_stats(env, &sds);
9796 local = &sds.local_stat;
9797 busiest = &sds.busiest_stat;
9800 if (!sds.busiest) {
9845 sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) / sds.total_capacity;
9851 if (local->avg_load >= sds.avg_load) {
9865 if (sds.prefer_sibling && local->group_type == group_has_spare &&
9903 calculate_imbalance(env, &sds);
9904 return env->imbalance ? sds.busiest : NULL;
10813 struct sched_domain_shared *sds;
10910 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
10911 if (sds) {
10921 nr_busy = atomic_read(&sds->nr_busy_cpus);