Lines Matching refs:imbalance
1411 adjust_numa_imbalance(int imbalance, int dst_running, int imb_numa_nr)
1414 * Allow a NUMA imbalance if busy CPUs is less than the maximum
1421 return imbalance;
1424 * Allow a small imbalance based on a simple pair of communicating
1427 if (imbalance <= NUMA_IMBALANCE_MIN)
1430 return imbalance;
2305 * not address the load imbalance and helps one task at
2435 * imbalance that would be overruled by the load balancer.
2438 unsigned int imbalance;
2442 * Would movement cause an imbalance? Note that if src has
2443 * more running tasks that the imbalance is ignored as the
2444 * move improves the imbalance from the perspective of the
2449 imbalance = max(0, dst_running - src_running);
2450 imbalance = adjust_numa_imbalance(imbalance, dst_running,
2453 /* Use idle CPU if there is no imbalance */
2454 if (!imbalance) {
2508 * imbalance and would be the first to start moving tasks about.
8837 * To achieve this balance we define a measure of imbalance which follows
8842 * We them move tasks around to minimize the imbalance. In the continuous
8853 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
9001 long imbalance;
9270 * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
9296 if (env->imbalance <= 0)
9339 * value. Make sure that env->imbalance decreases
9355 if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
9358 env->imbalance -= load;
9364 if (util > env->imbalance)
9367 env->imbalance -= util;
9371 env->imbalance--;
9379 env->imbalance = 0;
9402 if (env->imbalance <= 0)
9880 * Group imbalance indicates (and tries to solve) the problem where balancing
9901 * to create an effective group imbalance.
9904 * group imbalance and decide the groups need to be balanced again. A most
9910 return group->sgc->imbalance;
10089 long imbalance;
10098 imbalance = busiest->sum_nr_running;
10099 lsub_positive(&imbalance, local->sum_nr_running);
10100 return imbalance;
10104 imbalance = ncores_local * busiest->sum_nr_running;
10105 lsub_positive(&imbalance, ncores_busiest * local->sum_nr_running);
10106 /* Normalize imbalance and do rounding on normalization */
10107 imbalance = 2 * imbalance + ncores_local + ncores_busiest;
10108 imbalance /= ncores_local + ncores_busiest;
10111 if (imbalance <= 1 && local->sum_nr_running == 0 &&
10113 imbalance = 2;
10115 return imbalance;
10261 * internally or be covered by avg_load imbalance (eventually).
10594 unsigned long imbalance;
10667 /* Calculate allowed imbalance based on load */
10668 imbalance = scale_load_down(NICE_0_LOAD) *
10674 * remote domains but "imbalance" skews the comparison making
10676 * cross-domain, add imbalance to the load on the remote node
10681 ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load))
10688 if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance))
10727 * would remain below threshold where an imbalance is
10740 imbalance = abs(local_sgs.idle_cpus - idlest_sgs.idle_cpus);
10741 if (!adjust_numa_imbalance(imbalance,
10914 * calculate_imbalance - Calculate the amount of imbalance present within the
10917 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
10928 /* Set imbalance to allow misfit tasks to be balanced. */
10930 env->imbalance = 1;
10933 * Set load imbalance to allow moving task from cpu
10937 env->imbalance = busiest->group_misfit_task_load;
10948 env->imbalance = busiest->sum_h_nr_running;
10955 env->imbalance = 1;
10963 * the imbalance. The next load balance will take care of
10967 env->imbalance = 1;
10987 env->imbalance = max(local->group_capacity, local->group_util) -
10997 if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
10999 env->imbalance = 1;
11011 env->imbalance = sibling_imbalance(env, sds, busiest, local);
11019 env->imbalance = max_t(long, 0,
11024 /* Consider allowing a small imbalance between NUMA groups */
11026 env->imbalance = adjust_numa_imbalance(env->imbalance,
11033 env->imbalance >>= 1;
11056 env->imbalance = 0;
11068 env->imbalance = 0;
11080 * the minimum possible imbalance.
11083 env->imbalance = min(
11105 * force : Calculate the imbalance as load migration is probably needed.
11106 * avg_load : Only if imbalance is significant enough.
11113 * if there is an imbalance.
11119 * Return: - The busiest group if imbalance exists.
11231 * and there is no imbalance between this and busiest
11232 * group wrt idle CPUs, it is balanced. The imbalance
11234 * otherwise we might end up to just move the imbalance
11250 /* Looks like there is an imbalance. Compute it */
11252 return env->imbalance ? sds.busiest : NULL;
11255 env->imbalance = 0;
11336 * When comparing with load imbalance, use cpu_load()
11341 if (nr_running == 1 && load > env->imbalance &&
11558 * tasks if there is an imbalance.
11606 schedstat_add(sd->lb_imbalance[idle], env.imbalance);
11617 * an imbalance but busiest->nr_running <= 1, the group is
11619 * correctly treated as an imbalance.
11676 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
11698 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
11700 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
11788 * constraints. Clear the imbalance flag only if other tasks got
11789 * a chance to move and fix the imbalance.
11792 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
11801 * we can't migrate them. Let the imbalance flag set so parent level
12306 * If there is an imbalance between LLC domains (IOW we could