Searched refs:load_avg (Results 1 - 12 of 12) sorted by relevance
/kernel/linux/linux-5.10/kernel/sched/ |
H A D | debug.c | 458 P(se->avg.load_avg); in print_cfs_group_stats() 599 SEQ_printf(m, " .%-30s: %lu\n", "load_avg", in print_cfs_rq() 600 cfs_rq->avg.load_avg); in print_cfs_rq() 607 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg", in print_cfs_rq() 608 cfs_rq->removed.load_avg); in print_cfs_rq() 617 atomic_long_read(&cfs_rq->tg->load_avg)); in print_cfs_rq() 1024 P(se.avg.load_avg); in proc_sched_show_task()
|
H A D | fair.c | 792 sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average() 794 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ in init_entity_runnable_average() 803 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight 836 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg() 3105 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg() 3112 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg() 3139 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity() 3179 * grq->load.weight -> grq->avg.load_avg ( 3530 unsigned long load_avg; update_tg_cfs_load() local [all...] |
H A D | pelt.h | 15 return READ_ONCE(rq->avg_thermal.load_avg); in thermal_load_avg()
|
H A D | pelt.c | 180 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... ) 268 sa->load_avg = div_u64(load * sa->load_sum, divider); in ___update_load_avg() 281 * se_weight() = tg->weight * grq->load_avg / tg->load_avg 288 * load_avg = se_weight(se) * load_sum 296 * load_avg = \Sum se->avg.load_avg 346 * load_avg and runnable_avg are not supported and meaningless. 372 * load_avg and runnable_avg are not supported and meaningless. 401 * tracked through load_avg [all...] |
H A D | sched.h | 480 * load_avg can be heavily contended at clock tick time, so put 484 atomic_long_t load_avg ____cacheline_aligned; 651 unsigned long load_avg; member
|
/kernel/linux/linux-6.6/kernel/sched/ |
H A D | fair.c | 1135 sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average() 1137 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ in init_entity_runnable_average() 1144 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight 1192 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg() 3678 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg() 3685 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg() 3689 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in dequeue_load_avg() 3836 se->avg.load_avg in reweight_entity() 4297 unsigned long load_avg; update_tg_cfs_load() local [all...] |
H A D | debug.c | 528 P(se->avg.load_avg); in print_cfs_group_stats() 677 SEQ_printf(m, " .%-30s: %lu\n", "load_avg", in print_cfs_rq() 678 cfs_rq->avg.load_avg); in print_cfs_rq() 685 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg", in print_cfs_rq() 686 cfs_rq->removed.load_avg); in print_cfs_rq() 695 atomic_long_read(&cfs_rq->tg->load_avg)); in print_cfs_rq() 1095 P(se.avg.load_avg); in proc_sched_show_task()
|
H A D | pelt.h | 15 return READ_ONCE(rq->avg_thermal.load_avg); in thermal_load_avg()
|
H A D | pelt.c | 176 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... ) 264 sa->load_avg = div_u64(load * sa->load_sum, divider); in ___update_load_avg() 277 * se_weight() = tg->weight * grq->load_avg / tg->load_avg 284 * load_avg = se_weight(se) * load_sum 292 * load_avg = \Sum se->avg.load_avg 342 * load_avg and runnable_avg are not supported and meaningless. 368 * load_avg and runnable_avg are not supported and meaningless. 397 * tracked through load_avg [all...] |
H A D | sched.h | 463 * load_avg can be heavily contended at clock tick time, so put 467 atomic_long_t load_avg ____cacheline_aligned; 686 unsigned long load_avg; member
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | sched.h | 368 * has a few: load, load_avg, util_avg, freq, and capacity. 425 * [load_avg definition] 427 * load_avg = runnable% * scale_load_down(load) 461 * Max(load_avg) <= Max(load.weight) 472 unsigned long load_avg; member
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | sched.h | 445 * has a few: load, load_avg, util_avg, freq, and capacity. 502 * [load_avg definition] 504 * load_avg = runnable% * scale_load_down(load) 538 * Max(load_avg) <= Max(load.weight) 549 unsigned long load_avg; member
|
Completed in 43 milliseconds