Lines Matching defs:busiest
7812 * first so the group_type can simply be compared when selecting the busiest
8608 struct sched_group *busiest; /* Busiest group in this sd */
8615 struct sg_lb_stats busiest_stat; /* Statistics of the busiest group */
8625 * busiest_stat::idle_cpus to the worst busiest group because
8629 .busiest = NULL,
8792 * When this is so detected; this group becomes a candidate for busiest; see
9026 * update_sd_pick_busiest - return 1 on busiest group
9029 * @sg: sched_group candidate to be checked for being the busiest
9033 * busiest group.
9036 * busiest group. %false otherwise.
9041 struct sg_lb_stats *busiest = &sds->busiest_stat;
9059 if (sgs->group_type > busiest->group_type) {
9063 if (sgs->group_type < busiest->group_type) {
9068 * The candidate and the current busiest group are the same type of
9069 * group. Let check which one is the busiest according to the type.
9075 if (sgs->avg_load <= busiest->avg_load) {
9089 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) {
9099 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) {
9115 if (sgs->avg_load <= busiest->avg_load) {
9128 if (sgs->idle_cpus > busiest->idle_cpus) {
9130 } else if ((sgs->idle_cpus == busiest->idle_cpus) && (sgs->sum_nr_running <= busiest->sum_nr_running)) {
9547 sds->busiest = sg;
9613 struct sg_lb_stats *local, *busiest;
9616 busiest = &sds->busiest_stat;
9618 if (busiest->group_type == group_misfit_task) {
9625 if (busiest->group_type == group_asym_packing) {
9631 env->imbalance = busiest->sum_h_nr_running;
9635 if (busiest->group_type == group_imbalanced) {
9649 * emptying busiest.
9652 if ((busiest->group_type > group_fully_busy) && !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
9654 * If busiest is overloaded, try to fill spare
9656 * in busiest or busiest still being overloaded but
9668 * waiting task in this overloaded busiest group. Let's
9679 if (busiest->group_weight == 1 || sds->prefer_sibling) {
9680 unsigned int nr_diff = busiest->sum_nr_running;
9694 env->imbalance = max_t(long, 0, (local->idle_cpus - busiest->idle_cpus) >> 1);
9699 env->imbalance = adjust_numa_imbalance(env->imbalance, busiest->sum_nr_running);
9707 * busiest group
9720 * busiest group don't try to pull any tasks.
9722 if (local->avg_load >= busiest->avg_load) {
9737 env->imbalance = min((busiest->avg_load - sds->avg_load) * busiest->group_capacity,
9745 * Decision matrix according to the local and busiest group type:
9747 * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
9765 * find_busiest_group - Returns the busiest group within the sched_domain
9773 * Return: - The busiest group if imbalance exists.
9777 struct sg_lb_stats *local, *busiest;
9797 busiest = &sds.busiest_stat;
9800 if (!sds.busiest) {
9805 if (busiest->group_type == group_misfit_task) {
9810 if (busiest->group_type == group_asym_packing) {
9815 * If the busiest group is imbalanced the below checks don't
9819 if (busiest->group_type == group_imbalanced) {
9824 * If the local group is busier than the selected busiest group
9827 if (local->group_type > busiest->group_type) {
9838 * busiest group don't try to pull any tasks.
9840 if (local->avg_load >= busiest->avg_load) {
9856 * If the busiest group is more loaded, use imbalance_pct to be
9859 if (FAIR_ONEHUNDRED * busiest->avg_load <= env->sd->imbalance_pct * local->avg_load) {
9866 busiest->sum_nr_running > local->sum_nr_running + 1) {
9870 if (busiest->group_type != group_overloaded) {
9873 * If the busiest group is not overloaded (and as a
9880 if (busiest->group_weight > 1 && local->idle_cpus <= (busiest->idle_cpus + 1)) {
9882 * If the busiest group is not overloaded
9883 * and there is no imbalance between this and busiest
9893 if (busiest->sum_h_nr_running == 1) {
9895 * busiest doesn't have any tasks waiting to run
9904 return env->imbalance ? sds.busiest : NULL;
9912 * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
9916 struct rq *busiest = NULL, *rq;
9938 * If we ignore the actual busiest queue to migrate another
9939 * task, the next balance pass can still reduce the busiest
9996 busiest = rq;
10014 busiest = rq;
10021 busiest = rq;
10032 busiest = rq;
10039 return busiest;
10157 struct rq *busiest;
10189 busiest = find_busiest_queue(&env, group);
10190 if (!busiest) {
10195 BUG_ON(busiest == env.dst_rq);
10199 env.src_cpu = busiest->cpu;
10200 env.src_rq = busiest;
10203 if (busiest->nr_running > 1) {
10206 * an imbalance but busiest->nr_running <= 1, the group is
10211 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
10214 rq_lock_irqsave(busiest, &rf);
10215 update_rq_clock(busiest);
10226 * unlock busiest->lock, and we are able to be sure
10231 rq_unlock(busiest, &rf);
10294 __cpumask_clear_cpu(cpu_of(busiest), cpus);
10298 * active CPUs remaining as possible busiest CPUs to
10327 raw_spin_lock_irqsave(&busiest->lock, flags);
10331 * if the curr task on busiest CPU can't be
10334 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
10335 raw_spin_unlock_irqrestore(&busiest->lock, flags);
10345 if (!busiest->active_balance && !cpu_isolated(cpu_of(busiest))) {
10346 busiest->active_balance = 1;
10347 busiest->push_cpu = this_cpu;
10350 raw_spin_unlock_irqrestore(&busiest->lock, flags);
10353 stop_one_cpu_nowait(cpu_of(busiest), active_load_balance_cpu_stop, busiest,
10354 &busiest->active_balance_work);
10467 * running tasks off the busiest CPU onto idle CPUs. It requires at