/kernel/linux/linux-5.10/kernel/sched/ |
H A D | fair.c | 801 * based on the cfs_rq's current util_avg: 803 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight 805 * However, in many cases, the above util_avg does not give a desired 809 * To solve this problem, we also cap the util_avg of successive tasks to 812 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n 819 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ... 820 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ... 822 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap) 823 * if util_avg > util_avg_ca [all...] |
H A D | debug.c | 459 P(se->avg.util_avg); in print_cfs_group_stats() 603 SEQ_printf(m, " .%-30s: %lu\n", "util_avg", in print_cfs_rq() 604 cfs_rq->avg.util_avg); in print_cfs_rq() 609 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg", in print_cfs_rq() 610 cfs_rq->removed.util_avg); in print_cfs_rq() 1026 P(se.avg.util_avg); in proc_sched_show_task()
|
H A D | sched_avg.c | 166 util = rq->cfs.avg.util_avg; in sched_get_cpu_util()
|
H A D | pelt.c | 270 WRITE_ONCE(sa->util_avg, sa->util_sum / divider); in ___update_load_avg() 397 * util_avg and runnable_load_avg are not supported and meaningless. 400 * running a rt/dl task through util_avg, the average thermal pressure is 402 * time weighted "delta" capacity unlike util_avg which is binary.
|
H A D | sched.h | 652 unsigned long util_avg; member 2775 return READ_ONCE(rq->avg_dl.util_avg); in cpu_util_dl() 2780 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); in cpu_util_cfs() 2792 return READ_ONCE(rq->avg_rt.util_avg); in cpu_util_rt() 2806 return rq->avg_irq.util_avg; in cpu_util_irq()
|
/kernel/linux/linux-6.6/kernel/sched/ |
H A D | fair.c | 1142 * based on the cfs_rq's current util_avg: 1144 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight 1146 * However, in many cases, the above util_avg does not give a desired 1150 * To solve this problem, we also cap the util_avg of successive tasks to 1153 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n 1160 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ... 1161 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ... 1163 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap) 1164 * if util_avg > util_avg_ca [all...] |
H A D | sched_avg.c | 166 util = rq->cfs.avg.util_avg; in sched_get_cpu_util()
|
H A D | pelt.c | 266 WRITE_ONCE(sa->util_avg, sa->util_sum / divider); in ___update_load_avg() 393 * util_avg and runnable_load_avg are not supported and meaningless. 396 * running a rt/dl task through util_avg, the average thermal pressure is 398 * time weighted "delta" capacity unlike util_avg which is binary.
|
H A D | debug.c | 529 P(se->avg.util_avg); in print_cfs_group_stats() 681 SEQ_printf(m, " .%-30s: %lu\n", "util_avg", in print_cfs_rq() 682 cfs_rq->avg.util_avg); in print_cfs_rq() 687 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg", in print_cfs_rq() 688 cfs_rq->removed.util_avg); in print_cfs_rq() 1097 P(se.avg.util_avg); in proc_sched_show_task()
|
H A D | sched.h | 687 unsigned long util_avg; member 3210 return READ_ONCE(rq->avg_dl.util_avg); in cpu_util_dl() 3219 return READ_ONCE(rq->avg_rt.util_avg); in cpu_util_rt() 3380 return rq->avg_irq.util_avg; in cpu_util_irq()
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | sched.h | 368 * has a few: load, load_avg, util_avg, freq, and capacity. 398 * - task: the task's util_avg at last task dequeue time 407 * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg 409 * util_avg has not been updated in the meantime. 411 * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg 422 * The load/runnable/util_avg accumulates an infinite geometric series 433 * [util_avg definition] 435 * util_avg = running% * SCHED_CAPACITY_SCALE 443 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU 450 * util_avg' 474 unsigned long util_avg; global() member [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | sched.h | 445 * has a few: load, load_avg, util_avg, freq, and capacity. 475 * - task: the task's util_avg at last task dequeue time 484 * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg 486 * util_avg has not been updated in the meantime. 488 * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg 499 * The load/runnable/util_avg accumulates an infinite geometric series 510 * [util_avg definition] 512 * util_avg = running% * SCHED_CAPACITY_SCALE 520 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU 527 * util_avg' 551 unsigned long util_avg; global() member [all...] |