Home
last modified time | relevance | path

Searched refs:util_est (Results 1 - 3 of 3) sorted by relevance

/device/soc/rockchip/common/sdk_linux/include/linux/
H A Dsched.h378 * struct util_est - Estimation utilization of FAIR tasks
391 * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
392 * Thus, the util_est.enqueued of a task represents the contribution on the
399 * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
400 * updates. When a task is dequeued, its util_est should not be updated if its
402 * This information is mapped into the MSB bit of util_est.enqueued at dequeue
403 * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
406 struct util_est { struct
467 struct util_est util_est; member
[all...]
/device/soc/rockchip/common/sdk_linux/kernel/sched/
H A Dfair.c4044 struct util_est ue = READ_ONCE(p->se.avg.util_est); in _task_util_est()
4088 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_enqueue()
4090 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_enqueue()
4104 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_dequeue()
4106 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_dequeue()
4129 struct util_est ue; in util_est_update()
4145 * skip the util_est update. in util_est_update()
4147 ue = p->se.avg.util_est; in util_est_update()
4210 WRITE_ONCE(p->se.avg.util_est, u in util_est_update()
6856 unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg); cpu_util_next() local
[all...]
H A Dsched.h2666 util = max_t(unsigned long, util, READ_ONCE(rq->cfs.avg.util_est.enqueued)); in cpu_util_cfs()

Completed in 24 milliseconds