Lines Matching defs:load_avg

1135 		sa->load_avg = scale_load_down(se->load.weight);
1137 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
1144 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
1192 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
3678 cfs_rq->avg.load_avg += se->avg.load_avg;
3685 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3689 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
3836 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
3889 * grq->load.weight -> grq->avg.load_avg (2)
3893 * tg->weight * grq->avg.load_avg
3895 * tg->load_avg
3897 * Where: tg->load_avg ~= \Sum grq->avg.load_avg
3904 * one task. It takes time for our CPU's grq->avg.load_avg to build up,
3922 * tg->load_avg - grq->avg.load_avg + grq->load.weight
3925 * we need to use grq->avg.load_avg as its lower bound, which then gives:
3934 * tg_load_avg' = tg->load_avg - grq->avg.load_avg +
3935 * max(grq->load.weight, grq->avg.load_avg)
3952 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
3954 tg_weight = atomic_long_read(&tg->load_avg);
4050 SCHED_WARN_ON(sa->load_avg ||
4107 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
4108 * However, because tg->load_avg is a global value there are performance
4115 * Updating tg's load_avg is necessary before update_cfs_share().
4119 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
4122 * No need to update load_avg for root_task_group as it is not used.
4128 atomic_long_add(delta, &cfs_rq->tg->load_avg);
4129 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
4180 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
4185 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
4193 * ge->load.weight * grq->avg.load_avg
4194 * ge->avg.load_avg = ----------------------------------- (4)
4203 * We specifically want runqueues to have a load_avg that includes
4297 unsigned long load_avg;
4344 load_avg = div_u64(load_sum, divider);
4346 delta_avg = load_avg - se->avg.load_avg;
4353 se->avg.load_avg = load_avg;
4354 add_positive(&cfs_rq->avg.load_avg, delta_avg);
4358 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
4407 if (se->avg.load_avg || se->avg.util_avg)
4549 swap(cfs_rq->removed.load_avg, removed_load);
4555 sub_positive(&sa->load_avg, r);
4558 sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);
4636 se->avg.load_sum = se->avg.load_avg * divider;
4769 cfs_rq->removed.load_avg += se->avg.load_avg;
4781 return cfs_rq->avg.load_avg;
6990 load = READ_ONCE(cfs_rq->avg.load_avg);
9481 if (cfs_rq->avg.load_avg)
9627 load = div64_ul(load * se->avg.load_avg,
9640 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
9658 return p->se.avg.load_avg;
9758 * avg_thermal.load_avg tracks thermal pressure and the weighted