Lines Matching defs:vruntime
615 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
617 s64 delta = (s64)(vruntime - max_vruntime);
619 max_vruntime = vruntime;
624 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
626 s64 delta = (s64)(vruntime - min_vruntime);
628 min_vruntime = vruntime;
636 return (s64)(a->vruntime - b->vruntime) < 0;
641 return (s64)(se->vruntime - cfs_rq->min_vruntime);
666 * se->vruntime):
782 lag = avg_vruntime(cfs_rq) - se->vruntime;
802 * Note: using 'avg_vruntime() > se->vruntime' is inacurate due
821 static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime)
827 s64 delta = (s64)(vruntime - min_vruntime);
830 min_vruntime = vruntime;
840 u64 vruntime = cfs_rq->min_vruntime;
844 vruntime = curr->vruntime;
851 vruntime = se->vruntime;
853 vruntime = min_vruntime(vruntime, se->vruntime);
858 __update_min_vruntime(cfs_rq, vruntime));
1090 if ((s64)(se->vruntime - se->deadline) < 0)
1103 se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
1245 curr->vruntime += calc_delta_fair(delta_exec, curr);
1252 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
3713 * re-weight without changing vruntime at !0-lag point.
3744 * So the cfs_rq contains only one entity, hence vruntime of
3746 * average vruntime @V, which means we will always re-weight
3751 * vruntime of all the entities.
3775 * on vruntime should be:
3782 if (avruntime != se->vruntime) {
3783 vlag = (s64)(avruntime - se->vruntime);
3785 se->vruntime = avruntime - vlag;
3847 * The entity's vruntime has been adjusted, so let's check
5198 u64 vslice, vruntime = avg_vruntime(cfs_rq);
5280 se->vruntime = vruntime - lag;
5293 se->deadline = se->vruntime + vslice;
8452 s64 gran, vdiff = curr->vruntime - se->vruntime;
8626 * entity, update_curr() will update its vruntime, otherwise
13040 * Find delta after normalizing se's vruntime with its cfs_rq's
13044 delta = (s64)(sea->vruntime - seb->vruntime) +