Lines Matching defs:se

300 static inline struct task_struct *task_of(struct sched_entity *se)
302 SCHED_WARN_ON(!entity_is_task(se));
303 return container_of(se, struct task_struct, se);
307 #define for_each_sched_entity(se) for (; se; se = se->parent)
311 return p->se.cfs_rq;
315 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
317 return se->cfs_rq;
438 static inline struct cfs_rq *is_same_group(struct sched_entity *se, struct sched_entity *pse)
440 if (se->cfs_rq == pse->cfs_rq) {
441 return se->cfs_rq;
447 static inline struct sched_entity *parent_entity(struct sched_entity *se)
449 return se->parent;
452 static void find_matching_se(struct sched_entity **se, struct sched_entity **pse)
464 se_depth = (*se)->depth;
469 *se = parent_entity(*se);
477 while (!is_same_group(*se, *pse)) {
478 *se = parent_entity(*se);
485 static inline struct task_struct *task_of(struct sched_entity *se)
487 return container_of(se, struct task_struct, se);
490 #define for_each_sched_entity(se) (for (; se; se = NULL))
497 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
499 struct task_struct *p = task_of(se);
533 static inline struct sched_entity *parent_entity(struct sched_entity *se)
538 static inline void find_matching_se(struct sched_entity **se, struct sched_entity **pse)
591 struct sched_entity *se;
592 se = rb_entry(leftmost, struct sched_entity, run_node);
595 vruntime = se->vruntime;
597 vruntime = min_vruntime(vruntime, se->vruntime);
612 static void fair_enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
629 if (entity_before(se, entry)) {
637 rb_link_node(&se->run_node, parent, link);
638 rb_insert_color_cached(&se->run_node, &cfs_rq->tasks_timeline, leftmost);
641 static void fair_dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
643 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
657 static struct sched_entity *fair_pick_next_entity(struct sched_entity *se)
659 struct rb_node *next = rb_next(&se->run_node);
708 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
710 if (unlikely(se->load.weight != NICE_0_LOAD)) {
711 delta = fair_calc_delta(delta, NICE_0_LOAD, &se->load);
740 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
749 slice = fair_sched_period(nr_running + !se->on_rq);
751 for_each_sched_entity(se) {
755 cfs_rq = cfs_rq_of(se);
758 if (unlikely(!se->on_rq)) {
761 update_load_add(&lw, se->load.weight);
764 slice = fair_calc_delta(slice, se->load.weight, load);
779 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
781 return calc_delta_fair(sched_slice(cfs_rq, se), se);
791 void init_entity_runnable_average(struct sched_entity *se)
793 struct sched_avg *sa = &se->avg;
803 if (entity_is_task(se)) {
804 sa->load_avg = scale_load_down(se->load.weight);
810 static void attach_entity_cfs_rq(struct sched_entity *se);
816 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
840 struct sched_entity *se = &p->se;
841 struct cfs_rq *cfs_rq = cfs_rq_of(se);
842 struct sched_avg *sa = &se->avg;
848 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
866 attach_entity_load_avg(cfs_rq, se);
872 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
876 attach_entity_cfs_rq(se);
880 void init_entity_runnable_average(struct sched_entity *se)
932 update_curr(cfs_rq_of(&rq->curr->se));
935 static inline void update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
944 prev_wait_start = schedstat_val(se->statistics.wait_start);
945 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && likely(wait_start > prev_wait_start)) {
949 __schedstat_set(se->statistics.wait_start, wait_start);
952 static inline void update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
961 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
963 if (entity_is_task(se)) {
964 p = task_of(se);
971 __schedstat_set(se->statistics.wait_start, delta);
977 __schedstat_set(se->statistics.wait_max, max(schedstat_val(se->statistics.wait_max), delta));
978 __schedstat_inc(se->statistics.wait_count);
979 __schedstat_add(se->statistics.wait_sum, delta);
980 __schedstat_set(se->statistics.wait_start, 0);
983 static inline void update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
992 sleep_start = schedstat_val(se->statistics.sleep_start);
993 block_start = schedstat_val(se->statistics.block_start);
995 if (entity_is_task(se)) {
996 tsk = task_of(se);
1005 if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) {
1006 __schedstat_set(se->statistics.sleep_max, delta);
1009 __schedstat_set(se->statistics.sleep_start, 0);
1010 __schedstat_add(se->statistics.sum_sleep_runtime, delta);
1023 if (unlikely(delta > schedstat_val(se->statistics.block_max))) {
1024 __schedstat_set(se->statistics.block_max, delta);
1027 __schedstat_set(se->statistics.block_start, 0);
1028 __schedstat_add(se->statistics.sum_sleep_runtime, delta);
1032 __schedstat_add(se->statistics.iowait_sum, delta);
1033 __schedstat_inc(se->statistics.iowait_count);
1055 static inline void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1065 if (se != cfs_rq->curr) {
1066 update_stats_wait_start(cfs_rq, se);
1070 update_stats_enqueue_sleeper(cfs_rq, se);
1074 static inline void update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1084 if (se != cfs_rq->curr) {
1085 update_stats_wait_end(cfs_rq, se);
1088 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
1089 struct task_struct *tsk = task_of(se);
1092 __schedstat_set(se->statistics.sleep_start, rq_clock(rq_of(cfs_rq)));
1095 __schedstat_set(se->statistics.block_start, rq_clock(rq_of(cfs_rq)));
1103 static inline void update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1108 se->exec_start = rq_clock_task(rq_of(cfs_rq));
2331 now = p->se.exec_start;
2332 runtime = p->se.sum_exec_runtime;
2343 delta = p->se.avg.load_sum;
2830 u64 runtime = p->se.sum_exec_runtime;
2966 if (unlikely(p->se.sum_exec_runtime != runtime)) {
2967 u64 diff = p->se.sum_exec_runtime - runtime;
3036 now = curr->se.sum_exec_runtime;
3107 static void account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3109 update_load_add(&cfs_rq->load, se->load.weight);
3111 if (entity_is_task(se)) {
3114 account_numa_enqueue(rq, task_of(se));
3115 list_add(&se->group_node, &rq->cfs_tasks);
3121 static void account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3123 update_load_sub(&cfs_rq->load, se->load.weight);
3125 if (entity_is_task(se)) {
3126 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
3127 list_del_init(&se->group_node);
3185 static inline void enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3187 cfs_rq->avg.load_avg += se->avg.load_avg;
3188 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
3191 static inline void dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3193 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3194 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
3197 static inline void enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3200 static inline void dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3205 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight)
3207 if (se->on_rq) {
3209 if (cfs_rq->curr == se) {
3212 update_load_sub(&cfs_rq->load, se->load.weight);
3214 dequeue_load_avg(cfs_rq, se);
3216 update_load_set(&se->load, weight);
3220 u32 divider = get_pelt_divider(&se->avg);
3222 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
3226 enqueue_load_avg(cfs_rq, se);
3227 if (se->on_rq) {
3228 update_load_add(&cfs_rq->load, se->load.weight);
3234 struct sched_entity *se = &p->se;
3235 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3236 struct load_weight *load = &se->load;
3239 reweight_entity(cfs_rq, se, weight);
3360 static void update_cfs_group(struct sched_entity *se)
3362 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3375 if (likely(se->load.weight == shares)) {
3382 reweight_entity(cfs_rq_of(se), se, shares);
3386 static inline void update_cfs_group(struct sched_entity *se)
3452 void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next)
3468 if (!(se->avg.last_update_time && prev)) {
3491 __update_load_avg_blocked_se(p_last_update_time, se);
3492 se->avg.last_update_time = n_last_update_time;
3563 static inline void update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3565 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
3574 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3580 se->avg.util_avg = gcfs_rq->avg.util_avg;
3581 se->avg.util_sum = se->avg.util_avg * divider;
3588 static inline void update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3590 long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
3599 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3605 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
3606 se->avg.runnable_sum = se->avg.runnable_avg * divider;
3613 static inline void update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3627 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3637 runnable_sum += se->avg.load_sum;
3648 /* But make sure to not inflate se's runnable */
3649 runnable_sum = min(se->avg.load_sum, load_sum);
3658 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
3661 load_sum = (s64)se_weight(se) * runnable_sum;
3664 delta = load_avg - se->avg.load_avg;
3666 se->avg.load_sum = runnable_sum;
3667 se->avg.load_avg = load_avg;
3680 static inline int propagate_entity_load_avg(struct sched_entity *se)
3684 if (entity_is_task(se)) {
3688 gcfs_rq = group_cfs_rq(se);
3695 cfs_rq = cfs_rq_of(se);
3699 update_tg_cfs_util(cfs_rq, se, gcfs_rq);
3700 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
3701 update_tg_cfs_load(cfs_rq, se, gcfs_rq);
3704 trace_pelt_se_tp(se);
3713 static inline bool skip_blocked_update(struct sched_entity *se)
3715 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3721 if (se->avg.load_avg || se->avg.util_avg) {
3747 static inline int propagate_entity_load_avg(struct sched_entity *se)
3799 * Because of rounding, se->util_sum might ends up being +1 more than
3837 * @se: sched_entity to attach
3842 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3845 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3851 * When we attach the @se to the @cfs_rq, we must align the decay
3857 se->avg.last_update_time = cfs_rq->avg.last_update_time;
3858 se->avg.period_contrib = cfs_rq->avg.period_contrib;
3866 se->avg.util_sum = se->avg.util_avg * divider;
3868 se->avg.runnable_sum = se->avg.runnable_avg * divider;
3870 se->avg.load_sum = se->avg.load_avg * divider;
3871 if (se_weight(se) < se->avg.load_sum)
3872 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
3874 se->avg.load_sum = 1;
3876 enqueue_load_avg(cfs_rq, se);
3877 cfs_rq->avg.util_avg += se->avg.util_avg;
3878 cfs_rq->avg.util_sum += se->avg.util_sum;
3879 cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
3880 cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
3882 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
3892 * @se: sched_entity to detach
3897 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3900 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3905 dequeue_load_avg(cfs_rq, se);
3906 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3908 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
3911 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
3926 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3935 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) {
3936 __update_load_avg_se(now, cfs_rq, se);
3940 decayed |= propagate_entity_load_avg(se);
3942 if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
3950 attach_entity_load_avg(cfs_rq, se);
3986 static void sync_entity_load_avg(struct sched_entity *se)
3988 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3992 __update_load_avg_blocked_se(last_update_time, se);
3999 static void remove_entity_load_avg(struct sched_entity *se)
4001 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4010 sync_entity_load_avg(se);
4014 cfs_rq->removed.util_avg += se->avg.util_avg;
4015 cfs_rq->removed.load_avg += se->avg.load_avg;
4016 cfs_rq->removed.runnable_avg += se->avg.runnable_avg;
4039 return READ_ONCE(p->se.avg.util_avg);
4044 struct util_est ue = READ_ONCE(p->se.avg.util_est);
4147 ue = p->se.avg.util_est;
4210 WRITE_ONCE(p->se.avg.util_est, ue);
4212 trace_sched_util_est_se_tp(&p->se);
4278 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
4283 static inline void remove_entity_load_avg(struct sched_entity *se)
4287 static inline void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4290 static inline void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4316 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
4319 s64 d = se->vruntime - cfs_rq->min_vruntime;
4331 static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
4342 vruntime += sched_vslice(cfs_rq, se);
4361 se->vruntime = max_vruntime(se->vruntime, vruntime);
4416 static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4419 bool curr = cfs_rq->curr == se;
4426 se->vruntime += cfs_rq->min_vruntime;
4438 se->vruntime += cfs_rq->min_vruntime;
4449 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
4450 se_update_runnable(se);
4451 update_cfs_group(se);
4452 account_entity_enqueue(cfs_rq, se);
4455 place_entity(cfs_rq, se, 0);
4459 update_stats_enqueue(cfs_rq, se, flags);
4460 check_spread(cfs_rq, se);
4462 fair_enqueue_entity(cfs_rq, se);
4464 se->on_rq = 1;
4480 static void fair_clear_buddies_last(struct sched_entity *se)
4482 for_each_sched_entity(se) {
4483 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4484 if (cfs_rq->last != se) {
4492 static void fair_clear_buddies_next(struct sched_entity *se)
4494 for_each_sched_entity(se) {
4495 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4496 if (cfs_rq->next != se) {
4504 static void fair_clear_buddies_skip(struct sched_entity *se)
4506 for_each_sched_entity(se) {
4507 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4508 if (cfs_rq->skip != se) {
4516 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
4518 if (cfs_rq->last == se) {
4519 fair_clear_buddies_last(se);
4522 if (cfs_rq->next == se) {
4523 fair_clear_buddies_next(se);
4526 if (cfs_rq->skip == se) {
4527 fair_clear_buddies_skip(se);
4533 static void dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4548 update_load_avg(cfs_rq, se, UPDATE_TG);
4549 se_update_runnable(se);
4551 update_stats_dequeue(cfs_rq, se, flags);
4553 clear_buddies(cfs_rq, se);
4555 if (se != cfs_rq->curr) {
4556 fair_dequeue_entity(cfs_rq, se);
4558 se->on_rq = 0;
4559 account_entity_dequeue(cfs_rq, se);
4563 * min_vruntime if @se is the one holding it back. But before doing
4564 * update_min_vruntime() again, which will discount @se's position and
4568 se->vruntime -= cfs_rq->min_vruntime;
4574 update_cfs_group(se);
4577 * Now advance min_vruntime if @se was the entity holding it back,
4593 struct sched_entity *se;
4617 se = __pick_first_entity(cfs_rq);
4618 delta = curr->vruntime - se->vruntime;
4629 static void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
4632 if (se->on_rq) {
4638 update_stats_wait_end(cfs_rq, se);
4639 fair_dequeue_entity(cfs_rq, se);
4640 update_load_avg(cfs_rq, se, UPDATE_TG);
4643 update_stats_curr_start(cfs_rq, se);
4644 cfs_rq->curr = se;
4651 if (schedstat_enabled() && rq_of(cfs_rq)->cfs.load.weight >= 0x2 * se->load.weight) {
4652 schedstat_set(se->statistics.slice_max, max((u64)schedstat_val(se->statistics.slice_max),
4653 se->sum_exec_runtime - se->prev_sum_exec_runtime));
4656 se->prev_sum_exec_runtime = se->sum_exec_runtime;
4659 static int wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
4671 struct sched_entity *se;
4681 se = left; /* ideally we run the leftmost entity */
4687 if (cfs_rq->skip == se) {
4690 if (se == curr) {
4693 second = fair_pick_next_entity(se);
4700 se = second;
4708 se = cfs_rq->next;
4713 se = cfs_rq->last;
4716 clear_buddies(cfs_rq, se);
4718 return se;
4986 struct sched_entity *se;
5010 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
5019 for_each_sched_entity(se) {
5020 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5022 if (!se->on_rq) {
5027 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
5029 update_load_avg(qcfs_rq, se, 0);
5030 se_update_runnable(se);
5042 if (!se) {
5060 struct sched_entity *se;
5064 se = cfs_rq->tg->se[cpu_of(rq)];
5084 for_each_sched_entity(se) {
5085 if (se->on_rq) {
5088 cfs_rq = cfs_rq_of(se);
5089 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
5101 for_each_sched_entity(se) {
5102 cfs_rq = cfs_rq_of(se);
5104 update_load_avg(cfs_rq, se, UPDATE_TG);
5105 se_update_runnable(se);
5125 /* At this point se is NULL and we are at root level */
5135 for_each_sched_entity(se) {
5136 cfs_rq = cfs_rq_of(se);
5691 struct sched_entity *se = &p->se;
5692 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5697 u64 slice = sched_slice(cfs_rq, se);
5698 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
5724 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) {
5770 static void set_next_buddy(struct sched_entity *se);
5772 static void check_preempt_from_idle(struct cfs_rq *cfs, struct sched_entity *se)
5775 if (se->latency_weight <= 0)
5783 if (next && wakeup_preempt_entity(next, se) == 1)
5784 set_next_buddy(se);
5795 struct sched_entity *se = &p->se;
5816 for_each_sched_entity(se) {
5817 if (se->on_rq) {
5820 cfs_rq = cfs_rq_of(se);
5821 enqueue_entity(cfs_rq, se, flags);
5835 for_each_sched_entity(se) {
5836 cfs_rq = cfs_rq_of(se);
5838 update_load_avg(cfs_rq, se, UPDATE_TG);
5839 se_update_runnable(se);
5840 update_cfs_group(se);
5860 /* At this point se is NULL and we are at root level */
5882 check_preempt_from_idle(cfs_rq_of(&p->se), &p->se);
5893 for_each_sched_entity(se) {
5894 cfs_rq = cfs_rq_of(se);
5915 struct sched_entity *se = &p->se;
5922 for_each_sched_entity(se) {
5923 cfs_rq = cfs_rq_of(se);
5924 dequeue_entity(cfs_rq, se, flags);
5938 se = parent_entity(se);
5943 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) {
5944 set_next_buddy(se);
5951 for_each_sched_entity(se) {
5952 cfs_rq = cfs_rq_of(se);
5954 update_load_avg(cfs_rq, se, UPDATE_TG);
5955 se_update_runnable(se);
5956 update_cfs_group(se);
5968 /* At this point se is NULL and we are at root level */
6024 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
6048 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
6056 lsub_positive(&runnable, p->se.avg.runnable_avg);
6203 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
6209 schedstat_inc(p->se.statistics.nr_wakeups_affine);
6289 sync_entity_load_avg(&p->se);
6607 sync_entity_load_avg(&p->se);
6765 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
7017 sync_entity_load_avg(&p->se);
7185 static void detach_entity_cfs_rq(struct sched_entity *se);
7201 struct sched_entity *se = &p->se;
7202 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7217 se->vruntime -= min_vruntime;
7226 detach_entity_cfs_rq(&p->se);
7236 remove_entity_load_avg(&p->se);
7240 p->se.avg.last_update_time = 0;
7243 p->se.exec_start = 0;
7250 remove_entity_load_avg(&p->se);
7263 static long wakeup_latency_gran(struct sched_entity *curr, struct sched_entity *se)
7265 int latency_weight = se->latency_weight;
7267 if ((se->latency_weight > 0) || (curr->latency_weight > 0))
7280 static unsigned long wakeup_gran(struct sched_entity *se)
7288 * By using 'se' instead of 'curr' we penalize light tasks, so
7289 * they get preempted easier. That is, if 'se' < 'curr' then
7291 * lighter, if otoh 'se' > 'curr' then the resulting gran will
7297 return calc_delta_fair(gran, se);
7301 * Should 'se' preempt 'curr'.
7314 static int wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
7316 s64 gran, vdiff = curr->vruntime - se->vruntime;
7319 vdiff += wakeup_latency_gran(curr, se);
7325 gran = wakeup_gran(se);
7333 static void set_last_buddy(struct sched_entity *se)
7335 if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) {
7339 for_each_sched_entity(se) {
7340 if (SCHED_WARN_ON(!se->on_rq)) {
7343 cfs_rq_of(se)->last = se;
7347 static void set_next_buddy(struct sched_entity *se)
7349 if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) {
7353 for_each_sched_entity(se) {
7354 if (SCHED_WARN_ON(!se->on_rq)) {
7357 cfs_rq_of(se)->next = se;
7361 static void set_skip_buddy(struct sched_entity *se)
7363 for_each_sched_entity(se) cfs_rq_of(se)->skip = se;
7372 struct sched_entity *se = &curr->se, *pse = &p->se;
7377 if (unlikely(se == pse)) {
7423 find_matching_se(&se, &pse);
7424 update_curr(cfs_rq_of(se));
7426 if (wakeup_preempt_entity(se, pse) == 1) {
7450 if (unlikely(!se->on_rq || curr == rq->idle)) {
7454 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) {
7455 set_last_buddy(se);
7462 struct sched_entity *se;
7517 se = pick_next_entity(cfs_rq, curr);
7518 cfs_rq = group_cfs_rq(se);
7521 p = task_of(se);
7528 struct sched_entity *pse = &prev->se;
7530 while (!(cfs_rq = is_same_group(se, pse))) {
7531 int se_depth = se->depth;
7539 set_next_entity(cfs_rq_of(se), se);
7540 se = parent_entity(se);
7545 set_next_entity(cfs_rq, se);
7556 se = pick_next_entity(cfs_rq, NULL);
7557 set_next_entity(cfs_rq, se);
7558 cfs_rq = group_cfs_rq(se);
7561 p = task_of(se);
7571 list_move(&p->se.group_node, &rq->cfs_tasks);
7622 struct sched_entity *se = &prev->se;
7625 for_each_sched_entity(se) {
7626 cfs_rq = cfs_rq_of(se);
7627 put_prev_entity(cfs_rq, se);
7640 struct sched_entity *se = &curr->se;
7649 clear_buddies(cfs_rq, se);
7665 set_skip_buddy(se);
7670 struct sched_entity *se = &p->se;
7673 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) {
7678 set_next_buddy(se);
7909 (&p->se == cfs_rq_of(&p->se)->next || &p->se == cfs_rq_of(&p->se)->last)) {
7920 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
8017 schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
8056 schedstat_inc(p->se.statistics.nr_failed_migrations_running);
8074 schedstat_inc(p->se.statistics.nr_forced_migrations);
8079 schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
8115 list_for_each_entry_reverse(p, &env->src_rq->cfs_tasks, se.group_node)
8175 p = list_last_entry(tasks, struct task_struct, se.group_node);
8246 list_add(&p->se.group_node, &env->tasks);
8271 list_move(&p->se.group_node, tasks);
8333 p = list_first_entry(tasks, struct task_struct, se.group_node);
8334 list_del_init(&p->se.group_node);
8461 struct sched_entity *se;
8472 se = cfs_rq->tg->se[cpu];
8473 if (se && !skip_blocked_update(se)) {
8474 update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
8502 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
8511 for_each_sched_entity(se) {
8512 cfs_rq = cfs_rq_of(se);
8513 WRITE_ONCE(cfs_rq->h_load_next, se);
8519 if (!se) {
8524 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
8526 load = div64_ul(load * se->avg.load_avg, cfs_rq_load_avg(cfs_rq) + 1);
8527 cfs_rq = group_cfs_rq(se);
8538 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, cfs_rq_load_avg(cfs_rq) + 1);
8556 return p->se.avg.load_avg;
9194 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
11547 struct sched_entity *se = &curr->se;
11549 for_each_sched_entity(se) {
11550 cfs_rq = cfs_rq_of(se);
11551 entity_tick(cfs_rq, se, queued);
11570 struct sched_entity *se = &p->se, *curr;
11581 se->vruntime = curr->vruntime;
11583 place_entity(cfs_rq, se, 1);
11585 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
11590 swap(curr->vruntime, se->vruntime);
11594 se->vruntime -= cfs_rq->min_vruntime;
11628 struct sched_entity *se = &p->se;
11648 if (!se->sum_exec_runtime || (p->state == TASK_WAKING && p->sched_remote_wakeup)) {
11660 static void propagate_entity_cfs_rq(struct sched_entity *se)
11664 list_add_leaf_cfs_rq(cfs_rq_of(se));
11667 se = se->parent;
11669 for_each_sched_entity(se) {
11670 cfs_rq = cfs_rq_of(se);
11672 update_load_avg(cfs_rq, se, UPDATE_TG);
11683 static void propagate_entity_cfs_rq(struct sched_entity *se)
11688 static void detach_entity_cfs_rq(struct sched_entity *se)
11690 struct cfs_rq *cfs_rq = cfs_rq_of(se);
11693 update_load_avg(cfs_rq, se, 0);
11694 detach_entity_load_avg(cfs_rq, se);
11696 propagate_entity_cfs_rq(se);
11699 static void attach_entity_cfs_rq(struct sched_entity *se)
11701 struct cfs_rq *cfs_rq = cfs_rq_of(se);
11708 se->depth = se->parent ? se->parent->depth + 1 : 0;
11712 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
11713 attach_entity_load_avg(cfs_rq, se);
11715 propagate_entity_cfs_rq(se);
11720 struct sched_entity *se = &p->se;
11721 struct cfs_rq *cfs_rq = cfs_rq_of(se);
11728 place_entity(cfs_rq, se, 0);
11729 se->vruntime -= cfs_rq->min_vruntime;
11732 detach_entity_cfs_rq(se);
11737 struct sched_entity *se = &p->se;
11738 struct cfs_rq *cfs_rq = cfs_rq_of(se);
11740 attach_entity_cfs_rq(se);
11743 se->vruntime += cfs_rq->min_vruntime;
11777 struct sched_entity *se = &p->se;
11785 list_move(&se->group_node, &rq->cfs_tasks);
11789 for_each_sched_entity(se) {
11790 struct cfs_rq *cfs_rq = cfs_rq_of(se);
11792 set_next_entity(cfs_rq, se);
11813 struct sched_entity *se = &p->se;
11816 se->depth = se->parent ? se->parent->depth + 1 : 0;
11825 /* Tell se's cfs_rq has been changed -- migrated */
11826 p->se.avg.last_update_time = 0;
11855 if (tg->se) {
11856 kfree(tg->se[i]);
11861 kfree(tg->se);
11866 struct sched_entity *se;
11874 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
11875 if (!tg->se) {
11890 se = kzalloc_node(sizeof(struct sched_entity), GFP_KERNEL, cpu_to_node(i));
11891 if (!se) {
11896 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
11897 init_entity_runnable_average(se);
11910 struct sched_entity *se;
11918 se = tg->se[i];
11921 attach_entity_cfs_rq(se);
11935 if (tg->se[cpu]) {
11936 remove_entity_load_avg(tg->se[cpu]);
11955 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu,
11965 tg->se[cpu] = se;
11967 /* se could be NULL for root_task_group */
11968 if (!se) {
11973 se->cfs_rq = &rq->cfs;
11974 se->depth = 0;
11976 se->cfs_rq = parent->my_q;
11977 se->depth = parent->depth + 1;
11980 se->my_q = cfs_rq;
11982 update_load_set(&se->load, NICE_0_LOAD);
11983 se->parent = parent;
11995 if (!tg->se[0]) {
12009 struct sched_entity *se = tg->se[i];
12015 for_each_sched_entity(se) {
12016 update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
12017 update_cfs_group(se);
12049 struct sched_entity *se = &task->se;
12057 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
12214 struct sched_entity *se = &p->se;
12217 for_each_sched_entity(se) {
12218 cfs_rq = cfs_rq_of(se);
12227 if (!se) {