Lines Matching refs:avg

793     struct sched_avg *sa = &se->avg;
825 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
842 struct sched_avg *sa = &se->avg;
844 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
847 if (cfs_rq->avg.util_avg != 0) {
848 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
849 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
872 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
2343 delta = p->se.avg.load_sum;
3187 cfs_rq->avg.load_avg += se->avg.load_avg;
3188 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
3193 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3194 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
3220 u32 divider = get_pelt_divider(&se->avg);
3222 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
3262 * grq->load.weight -> grq->avg.load_avg (2)
3266 * tg->weight * grq->avg.load_avg
3270 * Where: tg->load_avg ~= \Sum grq->avg.load_avg
3277 * one task. It takes time for our CPU's grq->avg.load_avg to build up,
3295 * tg->load_avg - grq->avg.load_avg + grq->load.weight
3298 * we need to use grq->avg.load_avg as its lower bound, which then gives:
3307 * tg_load_avg' = tg->load_avg - grq->avg.load_avg +
3308 * max(grq->load.weight, grq->avg.load_avg)
3325 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
3417 * update_tg_load_avg - update the tg's load avg
3418 * @cfs_rq: the cfs_rq whose avg changed
3420 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
3432 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
3443 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
3468 if (!(se->avg.last_update_time && prev)) {
3483 p_last_update_time = prev->avg.last_update_time;
3484 n_last_update_time = next->avg.last_update_time;
3488 p_last_update_time = prev->avg.last_update_time;
3489 n_last_update_time = next->avg.last_update_time;
3492 se->avg.last_update_time = n_last_update_time;
3500 * ge->avg == grq->avg (1)
3511 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
3516 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
3520 * ge->avg.runnable_avg == grq->avg.runnable_avg
3524 * ge->load.weight * grq->avg.load_avg
3525 * ge->avg.load_avg = ----------------------------------- (4)
3550 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
3557 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
3565 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
3574 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3577 divider = get_pelt_divider(&cfs_rq->avg);
3580 se->avg.util_avg = gcfs_rq->avg.util_avg;
3581 se->avg.util_sum = se->avg.util_avg * divider;
3584 add_positive(&cfs_rq->avg.util_avg, delta);
3585 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
3590 long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
3599 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3602 divider = get_pelt_divider(&cfs_rq->avg);
3605 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
3606 se->avg.runnable_sum = se->avg.runnable_avg * divider;
3609 add_positive(&cfs_rq->avg.runnable_avg, delta);
3610 cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
3627 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3630 divider = get_pelt_divider(&cfs_rq->avg);
3637 runnable_sum += se->avg.load_sum;
3645 load_sum = div_s64(gcfs_rq->avg.load_sum, scale_load_down(gcfs_rq->load.weight));
3649 runnable_sum = min(se->avg.load_sum, load_sum);
3658 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
3664 delta = load_avg - se->avg.load_avg;
3666 se->avg.load_sum = runnable_sum;
3667 se->avg.load_avg = load_avg;
3669 add_positive(&cfs_rq->avg.load_avg, delta);
3670 cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
3721 if (se->avg.load_avg || se->avg.util_avg) {
3763 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
3764 * avg. The immediate corollary is that all (fair) tasks must be attached, see
3767 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
3771 * Since both these conditions indicate a changed cfs_rq->avg.load we should
3777 struct sched_avg *sa = &cfs_rq->avg;
3782 u32 divider = get_pelt_divider(&cfs_rq->avg);
3835 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3840 * cfs_rq->avg.last_update_time being current.
3845 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3848 u32 divider = get_pelt_divider(&cfs_rq->avg);
3857 se->avg.last_update_time = cfs_rq->avg.last_update_time;
3858 se->avg.period_contrib = cfs_rq->avg.period_contrib;
3866 se->avg.util_sum = se->avg.util_avg * divider;
3868 se->avg.runnable_sum = se->avg.runnable_avg * divider;
3870 se->avg.load_sum = se->avg.load_avg * divider;
3871 if (se_weight(se) < se->avg.load_sum)
3872 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
3874 se->avg.load_sum = 1;
3877 cfs_rq->avg.util_avg += se->avg.util_avg;
3878 cfs_rq->avg.util_sum += se->avg.util_sum;
3879 cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
3880 cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
3882 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
3890 * detach_entity_load_avg - detach this entity from its cfs_rq load avg
3895 * cfs_rq->avg.last_update_time being current.
3900 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3903 u32 divider = get_pelt_divider(&cfs_rq->avg);
3906 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3907 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
3908 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
3909 cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
3911 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
3935 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) {
3942 if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
3970 last_update_time = cfs_rq->avg.last_update_time;
3978 return cfs_rq->avg.last_update_time;
3983 * Synchronize entity load avg of dequeued entity without locking
4014 cfs_rq->removed.util_avg += se->avg.util_avg;
4015 cfs_rq->removed.load_avg += se->avg.load_avg;
4016 cfs_rq->removed.runnable_avg += se->avg.runnable_avg;
4022 return cfs_rq->avg.runnable_avg;
4027 return cfs_rq->avg.load_avg;
4039 return READ_ONCE(p->se.avg.util_avg);
4044 struct util_est ue = READ_ONCE(p->se.avg.util_est);
4088 enqueued = cfs_rq->avg.util_est.enqueued;
4090 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
4104 enqueued = cfs_rq->avg.util_est.enqueued;
4106 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
4147 ue = p->se.avg.util_est;
4210 WRITE_ONCE(p->se.avg.util_est, ue);
6024 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
6029 load = READ_ONCE(cfs_rq->avg.load_avg);
6048 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
6053 runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
6056 lsub_positive(&runnable, p->se.avg.runnable_avg);
6681 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
6691 * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
6698 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
6700 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
6725 util = READ_ONCE(cfs_rq->avg.util_avg);
6728 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
6765 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
6777 util = READ_ONCE(cfs_rq->avg.util_avg);
6809 unsigned int estimated = READ_ONCE(cfs_rq->avg.util_est.enqueued);
6856 unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg);
6871 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
6875 * appear in the cfs_rq->avg.util_est.enqueued of any rq,
7240 p->se.avg.last_update_time = 0;
7717 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
8345 if (cfs_rq->avg.load_avg) {
8349 if (cfs_rq->avg.util_avg) {
8435 if (cfs_rq->avg.load_sum) {
8439 if (cfs_rq->avg.util_sum) {
8443 if (cfs_rq->avg.runnable_sum) {
8526 load = div64_ul(load * se->avg.load_avg, cfs_rq_load_avg(cfs_rq) + 1);
8538 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, cfs_rq_load_avg(cfs_rq) + 1);
8556 return p->se.avg.load_avg;
9194 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
9804 /* Misfit tasks should be dealt with regardless of the avg load */
11826 p->se.avg.last_update_time = 0;
12248 return cfs_rq ? &cfs_rq->avg : NULL;