Lines Matching refs:load

710     if (unlikely(se->load.weight != NICE_0_LOAD)) {
711 delta = fair_calc_delta(delta, NICE_0_LOAD, &se->load);
752 struct load_weight *load;
756 load = &cfs_rq->load;
759 lw = cfs_rq->load;
761 update_load_add(&lw, se->load.weight);
762 load = &lw;
764 slice = fair_calc_delta(slice, se->load.weight, load);
790 /* Give new sched_entity start runnable values to heavy its load in infant time */
798 * Tasks are initialized with full load to be seen as heavy tasks until
799 * they get a chance to stabilize to their real load level.
800 * Group entities are initialized with zero load to reflect the fact that
804 sa->load_avg = scale_load_down(se->load.weight);
816 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
848 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
1549 * 'numa_type' describes the node at the moment of load balancing.
1568 unsigned long load;
1647 * and triggering future load balancing.
1664 * decisions that are compatible with standard load balancer. This
1680 ns->load += cpu_load(rq);
1765 * The load is corrected for the CPU capacity available on each node.
1776 orig_src_load = env->src_stats.load;
1777 orig_dst_load = env->dst_stats.load;
1807 long load;
1924 * In the overloaded case, try and keep the load balanced.
1926 load = task_h_load(env->p) - task_h_load(cur);
1927 if (!load) {
1931 dst_load = env->dst_stats.load + load;
1932 src_load = env->src_stats.load - load;
1962 * If a move to idle is allowed because there is capacity or load
1990 * imbalance that would be overruled by the load balancer.
2000 * CPU load balancer.
2016 long src_load, dst_load, load;
2021 load = task_h_load(env->p);
2022 dst_load = env->dst_stats.load + load;
2023 src_load = env->src_stats.load - load;
2688 * concurrent reads from places like load balancing and procfs, and we should
3071 * is pulled cross-node due to wakeups or load balancing.
3109 update_load_add(&cfs_rq->load, se->load.weight);
3123 update_load_sub(&cfs_rq->load, se->load.weight);
3136 * Explicitly do a load-store to ensure the intermediate value never hits
3157 * Explicitly do a load-store to ensure the intermediate value never hits
3175 * A variant of sub_positive(), which does not use explicit load-store
3212 update_load_sub(&cfs_rq->load, se->load.weight);
3216 update_load_set(&se->load, weight);
3228 update_load_add(&cfs_rq->load, se->load.weight);
3236 struct load_weight *load = &se->load;
3240 load->inv_weight = sched_prio_to_wmult[prio];
3252 * tg->weight * grq->load.weight
3253 * ge->load.weight = ----------------------------- (1)
3254 * \Sum grq->load.weight
3262 * grq->load.weight -> grq->avg.load_avg (2)
3267 * ge->load.weight = ------------------------------ (3)
3282 * tg->weight * grq->load.weight
3283 * ge->load.weight = ----------------------------- = tg->weight (4)
3284 * grp->load.weight
3291 * ge->load.weight =
3293 * tg->weight * grq->load.weight
3295 * tg->load_avg - grq->avg.load_avg + grq->load.weight
3297 * But because grq->load.weight can drop to 0, resulting in a divide by zero,
3301 * tg->weight * grq->load.weight
3302 * ge->load.weight = ----------------------------- (6)
3308 * max(grq->load.weight, grq->avg.load_avg)
3312 * overestimates the ge->load.weight and therefore:
3314 * \Sum ge->load.weight >= tg->weight
3320 long tg_weight, tg_shares, load, shares;
3325 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
3329 /* Ensure tg_weight >= load */
3331 tg_weight += load;
3333 shares = (tg_shares * load);
3341 * assigned as a minimum load.weight to the sched_entity representing
3375 if (likely(se->load.weight == shares)) {
3417 * update_tg_load_avg - update the tg's load avg
3420 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
3466 * the wakee more load sounds not bad.
3511 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
3516 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
3524 * ge->load.weight * grq->avg.load_avg
3526 * grq->load.weight
3535 * historical weights. Those represent the blocked load, the load we expect
3557 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
3644 if (scale_load_down(gcfs_rq->load.weight)) {
3645 load_sum = div_s64(gcfs_rq->avg.load_sum, scale_load_down(gcfs_rq->load.weight));
3679 /* Update task and its cfs_rq load average */
3710 * Check if we need to update the load and the utilization of a blocked
3718 * If sched_entity still have not zero load or utilization, we have to
3726 * If there is a pending propagation, we have to update the load and
3734 * Otherwise, the load and the utilization of the sched_entity is
3759 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
3769 * Returns true if the load decayed or we removed load.
3771 * Since both these conditions indicate a changed cfs_rq->avg.load we should
3835 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3890 * detach_entity_load_avg - detach this entity from its cfs_rq load avg
3919 * Optional action to be done while updating the load average
3925 /* Update task and its cfs_rq load average */
3932 * Track task load average for carrying it to new CPU after migrated, and
3933 * track group sched_entity load average for task_h_load calc in migration
3983 * Synchronize entity load avg of dequeued entity without locking
4444 * - Add its load to cfs_rq->runnable_avg
4447 * - Add its new weight to cfs_rq->load.weight
4543 * - Subtract its load from the cfs_rq->runnable_avg.
4544 * - Subtract its previous weight from cfs_rq->load.weight.
4611 * This also mitigates buddy induced latencies under load.
4647 * Track our maximum slice length, if the CPU's load is at
4651 if (schedstat_enabled() && rq_of(cfs_rq)->cfs.load.weight >= 0x2 * se->load.weight) {
4937 * load-balance operations.
5037 if (qcfs_rq->load.weight) {
5078 if (!cfs_rq->load.weight) {
5867 * result in the load balancer ruining all the task placement
5936 if (cfs_rq->load.weight) {
5937 /* Avoid re-evaluating load for this entity: */
5993 int has_blocked; /* Idle CPUS has blocked load */
5995 unsigned long next_blocked; /* Next update of blocked load in jiffies */
6006 * cpu_load_without - compute CPU load without any contributions from *p
6007 * @cpu: the CPU which load is requested
6008 * @p: the task which load should be discounted
6010 * The load of a CPU is defined by the load of tasks currently enqueued on that
6014 * This method returns the load of the specified CPU by discounting the load of
6016 * load.
6021 unsigned int load;
6029 load = READ_ONCE(cfs_rq->avg.load_avg);
6032 lsub_positive(&load, task_h_load(p));
6034 return load;
6084 * In order to determine whether we should let the load spread vs consolidating
6220 unsigned long load, min_load = ULONG_MAX;
6265 load = cpu_load(cpu_rq(i));
6266 if (load < min_load) {
6267 min_load = load;
7109 * Balances load by selecting the idlest CPU in the idlest group, or under
7233 * wakee task is less decayed, but giving the wakee more load
7687 * Fair scheduling class load-balancing methods.
7691 * The purpose of load-balancing is to achieve the same basic fairness the
7735 * of load-balance at each level inv. proportional to the number of CPUs in
7744 * | | `- number of CPUs doing load-balance
7809 * 'group_type' describes the group of CPUs at the moment of load balancing.
7836 * from balancing the load across the system.
7869 /* The set of CPUs under consideration for load-balancing */
8024 * meet load balance goals by pulling other tasks on src_cpu.
8138 * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
8146 unsigned long util, load;
8203 load = max_t(unsigned long, task_h_load(p), 1);
8205 if (sched_feat(LB_MIN) && load < 0x10 && !env->sd->nr_balance_failed) {
8210 * Make sure that we don't migrate too much load.
8215 if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance) {
8219 env->imbalance -= load;
8263 * load/util/tasks.
8431 if (cfs_rq->load.weight) {
8471 /* Propagate pending load changes to the parent, if any: */
8485 /* Don't need periodic decay once load/util_avg are null */
8495 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
8496 * This needs to be done in a top-down fashion because the load of a child
8497 * group is a fraction of its parents load.
8504 unsigned long load;
8525 load = cfs_rq->h_load;
8526 load = div64_ul(load * se->avg.load_avg, cfs_rq_load_avg(cfs_rq) + 1);
8528 cfs_rq->h_load = load;
8585 unsigned long avg_load; /* Avg load across the CPUs of the group */
8586 unsigned long group_load; /* Total load over the CPUs of the group */
8605 * during load balancing.
8610 unsigned long total_load; /* Total load of all groups in sd */
8612 unsigned long avg_load; /* Average load across all groups in sd */
8657 * average uses the actual delta max capacity(load).
8814 * account the variance of the tasks' load and to return true if the available
8815 * capacity in meaningful for the load balancer.
8817 * any benefit for the load balance.
8931 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
8932 * @env: The load balancing environment.
9027 * @env: The load balancing environment.
9430 * cross-domain, add imbalance to the load on the remote node
9483 * a real need of migration, periodic load balance will
9507 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
9508 * @env: The load balancing environment.
9607 * groups of a given sched_domain during load balance.
9608 * @env: load balance environment
9627 * In case of asym capacity, we will try to migrate all load to
9638 * to ensure CPU-load equilibrium, try to move any task to fix
9639 * the imbalance. The next load balance will take care of
9658 * amount of load to migrate in order to balance the
9706 * Local is fully busy but has to take more load to relieve the
9731 * above the average load, nor do we wish to reduce the max loaded CPU
9732 * below the average load. At the same time, we also don't want to
9733 * reduce the group load below the group capacity. Thus we look for
9758 * force : Calculate the imbalance as load migration is probably needed.
9768 * Also calculates the amount of runnable load which should be moved
9771 * @env: The load balancing environment.
9783 * Compute the various statistics relevant for load balancing at
9804 /* Misfit tasks should be dealt with regardless of the avg load */
9809 /* ASYM feature bypasses nice load balance check */
9849 * domain average load.
9923 unsigned long capacity, load, util;
9942 * If we cannot move enough load due to this classification
9963 * average load.
9972 * When comparing with load imbalance, use cpu_load()
9975 load = cpu_load(rq);
9976 if (nr_running == 1 && load > env->imbalance && !check_cpu_capacity(rq, env->sd)) {
9981 * For the load comparisons with the other CPUs,
9983 * capacity, so that the load can be moved away
9993 if (load * busiest_capacity > busiest_load * capacity) {
9994 busiest_load = load;
10126 * to do the newly idle load balance.
10218 * cur_ld_moved - load moved in current iteration
10219 * ld_moved - cumulative load moved across iterations
10252 * This changes load balance semantics a bit on who can move
10253 * load to a given_cpu. In addition to the given_cpu itself
10256 * load to given_cpu. In rare situations, this may cause
10258 * _independently_ and at _same_ time to move some load to
10259 * given_cpu) causing exceess load to be moved to given_cpu.
10261 * moreover subsequent load balance cycles should correct the
10262 * excess load moved.
10296 * Attempting to continue load balancing at the current
10299 * pull load from which are not contained within the
10301 * load.
10343 * only after active load balance is finished.
10609 * This trades load-balance latency on larger machines for less cross talk.
10660 * Stop the load balance at this level. There is another
10661 * CPU in our sched group which is doing load balancing more
10724 * updated accordingly. This CPU is now running the idle load
10742 * idle load balancing details
10744 * needed, they will kick the idle load balancer, which then does idle
10745 * load balancing for all the idle CPUs.
10800 * is idle. And the softirq performing nohz idle load balance
10807 * Current decision point for kicking the idle load balancer in the presence
10830 * None are in tickless mode and hence no need for NOHZ idle load
10915 * domain to pull some load. Likewise, we may need to spread
10916 * load within the current LLC domain (e.g. packed SMT cores but
10984 * This info will be used in performing idle load balancing in the future.
11007 /* Spare idle load balancing on CPUs that don't want to be disturbed: */
11020 * The tick is still stopped but load could have been added in the
11023 * of nohz.has_blocked can only happen after checking the new load
11050 * Each time a cpu enter idle, we assume that it has blocked load and
11051 * enable the periodic update of the load of idle cpus
11057 * Internal function that runs load balance for all idle cpus. The load balance
11058 * can be a simple update of blocked load or a complete load balance with
11079 * We assume there will be no idle load after this update and clear
11081 * set the has_blocked flag and trig another update of idle load.
11084 * check the load of an idle cpu.
11107 * If this CPU gets work to do, stop the load balancing
11108 * work being done for other CPUs. Next load
11167 /* There is still blocked load, enable periodic update */
11215 /* Don't need to update blocked load of idle CPUs */
11222 * This CPU is going to be idle and blocked load of idle CPUs
11285 * for load-balance and preemption/IRQs are still disabled avoiding
11400 * give the idle CPUs a chance to load balance. Else we may
11401 * load balance only within the local sched_domain hierarchy
11402 * and abort nohz_idle_balance altogether if we pull some load.
11408 /* normal load balance */
11414 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
11692 /* Catch up with the cfs_rq and remove our load when we leave */
11982 update_load_set(&se->load, NICE_0_LOAD);
12056 if (rq->cfs.load.weight) {