Lines Matching refs:util_avg
1142 * based on the cfs_rq's current util_avg:
1144 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
1146 * However, in many cases, the above util_avg does not give a desired
1150 * To solve this problem, we also cap the util_avg of successive tasks to
1153 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
1160 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
1161 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
1163 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
1164 * if util_avg > util_avg_cap.
1172 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
1190 if (cfs_rq->avg.util_avg != 0) {
1191 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
1192 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
1194 if (sa->util_avg > cap)
1195 sa->util_avg = cap;
1197 sa->util_avg = cap;
1201 sa->runnable_avg = sa->util_avg;
4051 sa->util_avg ||
4234 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg;
4249 se->avg.util_avg = gcfs_rq->avg.util_avg;
4250 new_sum = se->avg.util_avg * divider;
4255 add_positive(&cfs_rq->avg.util_avg, delta_avg);
4260 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
4407 if (se->avg.load_avg || se->avg.util_avg)
4548 swap(cfs_rq->removed.util_avg, removed_util);
4561 sub_positive(&sa->util_avg, r);
4567 * util_avg (~1ms) can make cfs->util_sum becoming null whereas
4570 * util_avg. Given that period_contrib might have moved since the last
4572 * util_avg * minimum possible divider
4574 sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
4632 se->avg.util_sum = se->avg.util_avg * divider;
4643 cfs_rq->avg.util_avg += se->avg.util_avg;
4666 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
4670 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
4768 cfs_rq->removed.util_avg += se->avg.util_avg;
4792 return READ_ONCE(p->se.avg.util_avg);
6837 * Since new tasks are assigned an initial util_avg equal to
7759 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg);
7781 * migration so its util_avg is already correct.
7939 * (util_avg or util_est).
8058 * their util_avg from the parent task, but those heuristics could hurt
9484 if (cfs_rq->avg.util_avg)
9492 if (READ_ONCE(rq->avg_rt.util_avg))
9495 if (READ_ONCE(rq->avg_dl.util_avg))
9502 if (READ_ONCE(rq->avg_irq.util_avg))
9589 /* Don't need periodic decay once load/util_avg are null */
9756 * avg_rt.util_avg and avg_dl.util_avg track binary signals
9761 used = READ_ONCE(rq->avg_rt.util_avg);
9762 used += READ_ONCE(rq->avg_dl.util_avg);