Lines Matching refs:busiest
8935 * first so the group_type can simply be compared when selecting the busiest
9711 struct sched_group *busiest; /* Busiest group in this sd */
9718 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
9728 * busiest_stat::idle_cpus to the worst busiest group because
9732 .busiest = NULL,
9898 * When this is so detected; this group becomes a candidate for busiest; see
10016 * @sgs: Load-balancing statistics of the candidate busiest group
10017 * @group: The candidate busiest group
10085 struct sg_lb_stats *busiest,
10091 if (env->idle == CPU_NOT_IDLE || !busiest->sum_nr_running)
10094 ncores_busiest = sds->busiest->cores;
10098 imbalance = busiest->sum_nr_running;
10104 imbalance = ncores_local * busiest->sum_nr_running;
10112 busiest->sum_nr_running > 1)
10234 * update_sd_pick_busiest - return 1 on busiest group
10237 * @sg: sched_group candidate to be checked for being the busiest
10241 * busiest group.
10244 * busiest group. %false otherwise.
10251 struct sg_lb_stats *busiest = &sds->busiest_stat;
10269 if (sgs->group_type > busiest->group_type)
10272 if (sgs->group_type < busiest->group_type)
10276 * The candidate and the current busiest group are the same type of
10277 * group. Let check which one is the busiest according to the type.
10283 if (sgs->avg_load <= busiest->avg_load)
10296 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu))
10305 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load)
10314 if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
10332 if (sgs->avg_load < busiest->avg_load)
10335 if (sgs->avg_load == busiest->avg_load) {
10340 if (sds->busiest->flags & SD_SHARE_CPUCAPACITY)
10352 if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
10367 if (sgs->idle_cpus > busiest->idle_cpus)
10369 else if ((sgs->idle_cpus == busiest->idle_cpus) &&
10370 (sgs->sum_nr_running <= busiest->sum_nr_running))
10869 sds->busiest = sg;
10883 * Indicate that the child domain of the busiest group prefers tasks
10887 if (sds->busiest)
10888 sds->prefer_sibling = !!(sds->busiest->flags & SD_PREFER_SIBLING);
10921 struct sg_lb_stats *local, *busiest;
10924 busiest = &sds->busiest_stat;
10926 if (busiest->group_type == group_misfit_task) {
10937 env->imbalance = busiest->group_misfit_task_load;
10942 if (busiest->group_type == group_asym_packing) {
10948 env->imbalance = busiest->sum_h_nr_running;
10952 if (busiest->group_type == group_smt_balance) {
10959 if (busiest->group_type == group_imbalanced) {
10973 * emptying busiest.
10976 if ((busiest->group_type > group_fully_busy) &&
10979 * If busiest is overloaded, try to fill spare
10981 * in busiest or busiest still being overloaded but
10994 * waiting task in this overloaded busiest group. Let's
11005 if (busiest->group_weight == 1 || sds->prefer_sibling) {
11011 env->imbalance = sibling_imbalance(env, sds, busiest, local);
11020 (local->idle_cpus - busiest->idle_cpus));
11040 * busiest group
11053 * busiest group don't try to pull any tasks.
11055 if (local->avg_load >= busiest->avg_load) {
11084 (busiest->avg_load - sds->avg_load) * busiest->group_capacity,
11092 * Decision matrix according to the local and busiest group type:
11094 * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
11112 * find_busiest_group - Returns the busiest group within the sched_domain
11119 * Return: - The busiest group if imbalance exists.
11123 struct sg_lb_stats *local, *busiest;
11135 if (!sds.busiest)
11138 busiest = &sds.busiest_stat;
11141 if (busiest->group_type == group_misfit_task)
11152 if (busiest->group_type == group_asym_packing)
11156 * If the busiest group is imbalanced the below checks don't
11160 if (busiest->group_type == group_imbalanced)
11165 * If the local group is busier than the selected busiest group
11168 if (local->group_type > busiest->group_type)
11178 * busiest group don't try to pull any tasks.
11180 if (local->avg_load >= busiest->avg_load)
11195 * If the busiest group is more loaded, use imbalance_pct to be
11198 if (100 * busiest->avg_load <=
11204 * Try to move all excess tasks to a sibling domain of the busiest
11208 sibling_imbalance(env, &sds, busiest, local) > 1)
11211 if (busiest->group_type != group_overloaded) {
11214 * If the busiest group is not overloaded (and as a
11221 if (busiest->group_type == group_smt_balance &&
11222 smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
11227 if (busiest->group_weight > 1 &&
11228 local->idle_cpus <= (busiest->idle_cpus + 1)) {
11230 * If the busiest group is not overloaded
11231 * and there is no imbalance between this and busiest
11241 if (busiest->sum_h_nr_running == 1) {
11243 * busiest doesn't have any tasks waiting to run
11252 return env->imbalance ? sds.busiest : NULL;
11260 * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
11265 struct rq *busiest = NULL, *rq;
11287 * If we ignore the actual busiest queue to migrate another
11288 * task, the next balance pass can still reduce the busiest
11361 busiest = rq;
11378 busiest = rq;
11385 busiest = rq;
11396 busiest = rq;
11404 return busiest;
11567 struct rq *busiest;
11598 busiest = find_busiest_queue(&env, group);
11599 if (!busiest) {
11604 WARN_ON_ONCE(busiest == env.dst_rq);
11608 env.src_cpu = busiest->cpu;
11609 env.src_rq = busiest;
11614 if (busiest->nr_running > 1) {
11617 * an imbalance but busiest->nr_running <= 1, the group is
11621 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
11624 rq_lock_irqsave(busiest, &rf);
11625 update_rq_clock(busiest);
11636 * unlock busiest->lock, and we are able to be sure
11641 rq_unlock(busiest, &rf);
11653 if (env.loop < busiest->nr_running)
11706 __cpumask_clear_cpu(cpu_of(busiest), cpus);
11710 * active CPUs remaining as possible busiest CPUs to
11738 raw_spin_rq_lock_irqsave(busiest, flags);
11742 * if the curr task on busiest CPU can't be
11745 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
11746 raw_spin_rq_unlock_irqrestore(busiest, flags);
11758 if (!busiest->active_balance &&
11759 !cpu_isolated(cpu_of(busiest))) {
11760 busiest->active_balance = 1;
11761 busiest->push_cpu = this_cpu;
11766 raw_spin_rq_unlock_irqrestore(busiest, flags);
11768 stop_one_cpu_nowait(cpu_of(busiest),
11769 active_load_balance_cpu_stop, busiest,
11770 &busiest->active_balance_work);
11868 * running tasks off the busiest CPU onto idle CPUs. It requires at