Lines Matching refs:avg

741 	s64 avg = cfs_rq->avg_vruntime;
747 avg += entity_key(cfs_rq, curr) * weight;
753 if (avg < 0)
754 avg -= (load - 1);
755 avg = div_s64(avg, load);
758 return cfs_rq->min_vruntime + avg;
808 s64 avg = cfs_rq->avg_vruntime;
814 avg += entity_key(cfs_rq, curr) * weight;
818 return avg >= entity_key(cfs_rq, se) * load;
1124 struct sched_avg *sa = &se->avg;
1153 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
1170 struct sched_avg *sa = &se->avg;
1172 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
1185 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
1190 if (cfs_rq->avg.util_avg != 0) {
1191 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
1192 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
2777 delta = p->se.avg.load_sum;
3678 cfs_rq->avg.load_avg += se->avg.load_avg;
3679 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
3685 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3686 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
3688 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
3689 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
3834 u32 divider = get_pelt_divider(&se->avg);
3836 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
3889 * grq->load.weight -> grq->avg.load_avg (2)
3893 * tg->weight * grq->avg.load_avg
3897 * Where: tg->load_avg ~= \Sum grq->avg.load_avg
3904 * one task. It takes time for our CPU's grq->avg.load_avg to build up,
3922 * tg->load_avg - grq->avg.load_avg + grq->load.weight
3925 * we need to use grq->avg.load_avg as its lower bound, which then gives:
3934 * tg_load_avg' = tg->load_avg - grq->avg.load_avg +
3935 * max(grq->load.weight, grq->avg.load_avg)
3952 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
4059 return u64_u32_load_copy(cfs_rq->avg.last_update_time,
4094 if (!load_avg_is_decayed(&cfs_rq->avg))
4104 * update_tg_load_avg - update the tg's load avg
4105 * @cfs_rq: the cfs_rq whose avg changed
4107 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
4119 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
4129 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
4154 if (!(se->avg.last_update_time && prev))
4161 se->avg.last_update_time = n_last_update_time;
4169 * ge->avg == grq->avg (1)
4180 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
4185 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
4189 * ge->avg.runnable_avg == grq->avg.runnable_avg
4193 * ge->load.weight * grq->avg.load_avg
4194 * ge->avg.load_avg = ----------------------------------- (4)
4219 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
4226 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
4234 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg;
4242 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4245 divider = get_pelt_divider(&cfs_rq->avg);
4249 se->avg.util_avg = gcfs_rq->avg.util_avg;
4250 new_sum = se->avg.util_avg * divider;
4251 delta_sum = (long)new_sum - (long)se->avg.util_sum;
4252 se->avg.util_sum = new_sum;
4255 add_positive(&cfs_rq->avg.util_avg, delta_avg);
4256 add_positive(&cfs_rq->avg.util_sum, delta_sum);
4259 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
4260 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
4266 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
4274 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4277 divider = get_pelt_divider(&cfs_rq->avg);
4280 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
4281 new_sum = se->avg.runnable_avg * divider;
4282 delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
4283 se->avg.runnable_sum = new_sum;
4286 add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
4287 add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
4289 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
4290 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
4308 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4311 divider = get_pelt_divider(&cfs_rq->avg);
4318 runnable_sum += se->avg.load_sum;
4326 load_sum = div_u64(gcfs_rq->avg.load_sum,
4331 runnable_sum = min(se->avg.load_sum, load_sum);
4340 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
4346 delta_avg = load_avg - se->avg.load_avg;
4350 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
4352 se->avg.load_sum = runnable_sum;
4353 se->avg.load_avg = load_avg;
4354 add_positive(&cfs_rq->avg.load_avg, delta_avg);
4355 add_positive(&cfs_rq->avg.load_sum, delta_sum);
4357 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
4358 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
4407 if (se->avg.load_avg || se->avg.util_avg)
4446 if (load_avg_is_decayed(&se->avg))
4508 * cfs_rq->avg.last_update_time is more recent than our
4526 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
4527 * avg. The immediate corollary is that all (fair) tasks must be attached.
4529 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
4533 * Since both these conditions indicate a changed cfs_rq->avg.load we should
4540 struct sched_avg *sa = &cfs_rq->avg;
4545 u32 divider = get_pelt_divider(&cfs_rq->avg);
4601 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
4606 * cfs_rq->avg.last_update_time being current.
4611 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4614 u32 divider = get_pelt_divider(&cfs_rq->avg);
4623 se->avg.last_update_time = cfs_rq->avg.last_update_time;
4624 se->avg.period_contrib = cfs_rq->avg.period_contrib;
4632 se->avg.util_sum = se->avg.util_avg * divider;
4634 se->avg.runnable_sum = se->avg.runnable_avg * divider;
4636 se->avg.load_sum = se->avg.load_avg * divider;
4637 if (se_weight(se) < se->avg.load_sum)
4638 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
4640 se->avg.load_sum = 1;
4643 cfs_rq->avg.util_avg += se->avg.util_avg;
4644 cfs_rq->avg.util_sum += se->avg.util_sum;
4645 cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
4646 cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
4648 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
4656 * detach_entity_load_avg - detach this entity from its cfs_rq load avg
4661 * cfs_rq->avg.last_update_time being current.
4666 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
4667 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
4669 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
4670 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
4672 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
4673 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
4675 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
4676 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
4678 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
4703 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
4709 if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
4737 * Synchronize entity load avg of dequeued entity without locking
4768 cfs_rq->removed.util_avg += se->avg.util_avg;
4769 cfs_rq->removed.load_avg += se->avg.load_avg;
4770 cfs_rq->removed.runnable_avg += se->avg.runnable_avg;
4776 return cfs_rq->avg.runnable_avg;
4781 return cfs_rq->avg.load_avg;
4792 return READ_ONCE(p->se.avg.util_avg);
4797 struct util_est ue = READ_ONCE(p->se.avg.util_est);
4848 enqueued = cfs_rq->avg.util_est.enqueued;
4850 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
4864 enqueued = cfs_rq->avg.util_est.enqueued;
4866 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
4907 ue = p->se.avg.util_est;
4967 WRITE_ONCE(p->se.avg.util_est, ue);
6986 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
6990 load = READ_ONCE(cfs_rq->avg.load_avg);
7009 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
7013 runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
7016 lsub_positive(&runnable, p->se.avg.runnable_avg);
7759 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg);
7773 runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
7791 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
7795 * to any cpu_rq(cpu)->cfs.avg.util_est.enqueued.
7868 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
8362 se->avg.last_update_time = 0;
8840 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
9481 if (cfs_rq->avg.load_avg)
9484 if (cfs_rq->avg.util_avg)
9627 load = div64_ul(load * se->avg.load_avg,
9640 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
9658 return p->se.avg.load_avg;
10430 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
11140 /* Misfit tasks should be dealt with regardless of the avg load */
13185 if (!se->avg.last_update_time)
13294 p->se.avg.last_update_time = 0;