Searched refs:util_sum (Results 1 - 10 of 10) sorted by relevance
| /kernel/linux/linux-5.10/kernel/sched/ |
| H A D | pelt.h | 108 * because it was fully busy. A rq is fully used when the /Sum util_sum 113 * bound of util_sum to decide. 118 u32 util_sum = rq->cfs.avg.util_sum; in update_idle_rq_clock_pelt() local 119 util_sum += rq->avg_rt.util_sum; in update_idle_rq_clock_pelt() 120 util_sum += rq->avg_dl.util_sum; in update_idle_rq_clock_pelt() 131 if (util_sum >= divider) in update_idle_rq_clock_pelt()
|
| H A D | pelt.c | 122 sa->util_sum = decay_load((u64)(sa->util_sum), periods); in accumulate_sum() 150 sa->util_sum += contrib << SCHED_CAPACITY_SHIFT; in accumulate_sum() 270 WRITE_ONCE(sa->util_avg, sa->util_sum / divider); in ___update_load_avg() 342 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked 343 * util_sum = cpu_scale * load_sum 344 * runnable_sum = util_sum 368 * util_sum [all...] |
| H A D | fair.c | 3494 se->avg.util_sum = se->avg.util_avg * divider; in update_tg_cfs_util() 3498 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider; in update_tg_cfs_util() 3572 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load() 3704 sub_positive(&sa->util_sum, r * divider); in update_cfs_rq_load_avg() 3706 * Because of rounding, se->util_sum might ends up being +1 more than in update_cfs_rq_load_avg() 3707 * cfs->util_sum. Although this is not a problem by itself, detaching in update_cfs_rq_load_avg() 3709 * util_avg (~1ms) can make cfs->util_sum becoming null whereas in update_cfs_rq_load_avg() 3711 * Check that util_sum is still above its lower bound for the new in update_cfs_rq_load_avg() 3713 * sync, we are only sure that util_sum must be above or equal to in update_cfs_rq_load_avg() 3716 sa->util_sum in update_cfs_rq_load_avg() [all...] |
| H A D | debug.c | 1023 P(se.avg.util_sum); in proc_sched_show_task()
|
| /kernel/linux/linux-6.6/kernel/sched/ |
| H A D | pelt.h | 126 * because it was fully busy. A rq is fully used when the /Sum util_sum 131 * bound of util_sum to decide. 136 u32 util_sum = rq->cfs.avg.util_sum; in update_idle_rq_clock_pelt() local 137 util_sum += rq->avg_rt.util_sum; in update_idle_rq_clock_pelt() 138 util_sum += rq->avg_dl.util_sum; in update_idle_rq_clock_pelt() 149 if (util_sum >= divider) in update_idle_rq_clock_pelt()
|
| H A D | pelt.c | 118 sa->util_sum = decay_load((u64)(sa->util_sum), periods); in accumulate_sum() 146 sa->util_sum += contrib << SCHED_CAPACITY_SHIFT; in accumulate_sum() 266 WRITE_ONCE(sa->util_avg, sa->util_sum / divider); in ___update_load_avg() 338 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked 339 * util_sum = cpu_scale * load_sum 340 * runnable_sum = util_sum 364 * util_sum [all...] |
| H A D | fair.c | 4039 if (sa->util_sum) in load_avg_is_decayed() 4251 delta_sum = (long)new_sum - (long)se->avg.util_sum; in update_tg_cfs_util() 4252 se->avg.util_sum = new_sum; in update_tg_cfs_util() 4256 add_positive(&cfs_rq->avg.util_sum, delta_sum); in update_tg_cfs_util() 4259 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in update_tg_cfs_util() 4340 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load() 4557 /* See sa->util_sum below */ in update_cfs_rq_load_avg() 4562 sub_positive(&sa->util_sum, r * divider); in update_cfs_rq_load_avg() 4564 * Because of rounding, se->util_sum migh in update_cfs_rq_load_avg() [all...] |
| H A D | debug.c | 1094 P(se.avg.util_sum); in proc_sched_show_task()
|
| /kernel/linux/linux-5.10/include/linux/ |
| H A D | sched.h | 470 u32 util_sum; member
|
| /kernel/linux/linux-6.6/include/linux/ |
| H A D | sched.h | 547 u32 util_sum; member
|
Completed in 32 milliseconds