Lines Matching refs:se

378 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
380 if (unlikely(se->load.weight != NICE_0_LOAD))
381 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
395 #define for_each_sched_entity(se) \
396 for (; se; se = se->parent)
498 is_same_group(struct sched_entity *se, struct sched_entity *pse)
500 if (se->cfs_rq == pse->cfs_rq)
501 return se->cfs_rq;
506 static inline struct sched_entity *parent_entity(const struct sched_entity *se)
508 return se->parent;
512 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
524 se_depth = (*se)->depth;
529 *se = parent_entity(*se);
537 while (!is_same_group(*se, *pse)) {
538 *se = parent_entity(*se);
553 static int se_is_idle(struct sched_entity *se)
555 if (entity_is_task(se))
556 return task_has_idle_policy(task_of(se));
557 return cfs_rq_is_idle(group_cfs_rq(se));
562 #define for_each_sched_entity(se) \
563 for (; se; se = NULL)
581 static inline struct sched_entity *parent_entity(struct sched_entity *se)
587 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
601 static int se_is_idle(struct sched_entity *se)
639 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
641 return (s64)(se->vruntime - cfs_rq->min_vruntime);
666 * se->vruntime):
706 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
708 unsigned long weight = scale_load_down(se->load.weight);
709 s64 key = entity_key(cfs_rq, se);
716 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
718 unsigned long weight = scale_load_down(se->load.weight);
719 s64 key = entity_key(cfs_rq, se);
777 static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
781 SCHED_WARN_ON(!se->on_rq);
782 lag = avg_vruntime(cfs_rq) - se->vruntime;
784 limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
785 se->vlag = clamp(lag, -limit, limit);
802 * Note: using 'avg_vruntime() > se->vruntime' is inacurate due
805 int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
818 return avg >= entity_key(cfs_rq, se) * load;
837 struct sched_entity *se = __pick_first_entity(cfs_rq);
849 if (se) {
851 vruntime = se->vruntime;
853 vruntime = min_vruntime(vruntime, se->vruntime);
868 static inline void __update_min_deadline(struct sched_entity *se, struct rb_node *node)
872 if (deadline_gt(min_deadline, se, rse))
873 se->min_deadline = rse->min_deadline;
878 * se->min_deadline = min(se->deadline, left->min_deadline, right->min_deadline)
880 static inline bool min_deadline_update(struct sched_entity *se, bool exit)
882 u64 old_min_deadline = se->min_deadline;
883 struct rb_node *node = &se->run_node;
885 se->min_deadline = se->deadline;
886 __update_min_deadline(se, node->rb_right);
887 __update_min_deadline(se, node->rb_left);
889 return se->min_deadline == old_min_deadline;
898 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
900 avg_vruntime_add(cfs_rq, se);
901 se->min_deadline = se->deadline;
902 rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
906 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
908 rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
910 avg_vruntime_sub(cfs_rq, se);
938 * se->min_deadline = min(se->deadline, se->{left,right}->min_deadline)
961 struct sched_entity *se = __node_2_se(node);
966 if (!entity_eligible(cfs_rq, se)) {
974 if (!best || deadline_gt(deadline, best, se))
975 best = se;
978 * Every se in a left branch is eligible, keep track of the
992 if (left->min_deadline == se->min_deadline)
997 if (se->deadline == se->min_deadline)
1017 struct sched_entity *se = __node_2_se(node);
1020 if (se->deadline == se->min_deadline)
1021 return se;
1025 __node_2_se(node->rb_left)->min_deadline == se->min_deadline) {
1038 struct sched_entity *se = __pick_eevdf(cfs_rq);
1040 if (!se) {
1048 return se;
1082 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
1088 static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
1090 if ((s64)(se->vruntime - se->deadline) < 0)
1098 se->slice = sysctl_sched_base_slice;
1103 se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
1110 clear_buddies(cfs_rq, se);
1122 void init_entity_runnable_average(struct sched_entity *se)
1124 struct sched_avg *sa = &se->avg;
1134 if (entity_is_task(se))
1135 sa->load_avg = scale_load_down(se->load.weight);
1144 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
1168 struct sched_entity *se = &p->se;
1169 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1170 struct sched_avg *sa = &se->avg;
1179 attach_entity_load_avg(cfs_rq, se);
1185 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
1191 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
1205 void init_entity_runnable_average(struct sched_entity *se)
1262 update_curr(cfs_rq_of(&rq->curr->se));
1266 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1274 stats = __schedstats_from_se(se);
1276 if (entity_is_task(se))
1277 p = task_of(se);
1283 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1291 stats = __schedstats_from_se(se);
1294 * When the sched_schedstat changes from 0 to 1, some sched se
1295 * maybe already in the runqueue, the se->statistics.wait_start
1302 if (entity_is_task(se))
1303 p = task_of(se);
1309 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1317 stats = __schedstats_from_se(se);
1319 if (entity_is_task(se))
1320 tsk = task_of(se);
1329 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1338 if (se != cfs_rq->curr)
1339 update_stats_wait_start_fair(cfs_rq, se);
1342 update_stats_enqueue_sleeper_fair(cfs_rq, se);
1346 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1356 if (se != cfs_rq->curr)
1357 update_stats_wait_end_fair(cfs_rq, se);
1359 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
1360 struct task_struct *tsk = task_of(se);
1378 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1383 se->exec_start = rq_clock_task(rq_of(cfs_rq));
2766 now = p->se.exec_start;
2767 runtime = p->se.sum_exec_runtime;
2777 delta = p->se.avg.load_sum;
3281 u64 runtime = p->se.sum_exec_runtime;
3455 if (unlikely(p->se.sum_exec_runtime != runtime)) {
3456 u64 diff = p->se.sum_exec_runtime - runtime;
3528 now = curr->se.sum_exec_runtime;
3595 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3597 update_load_add(&cfs_rq->load, se->load.weight);
3599 if (entity_is_task(se)) {
3602 account_numa_enqueue(rq, task_of(se));
3603 list_add(&se->group_node, &rq->cfs_tasks);
3607 if (se_is_idle(se))
3612 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3614 update_load_sub(&cfs_rq->load, se->load.weight);
3616 if (entity_is_task(se)) {
3617 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
3618 list_del_init(&se->group_node);
3622 if (se_is_idle(se))
3676 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3678 cfs_rq->avg.load_avg += se->avg.load_avg;
3679 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
3683 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3685 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3686 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
3693 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3695 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3698 static void reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se,
3701 unsigned long old_weight = se->load.weight;
3782 if (avruntime != se->vruntime) {
3783 vlag = (s64)(avruntime - se->vruntime);
3785 se->vruntime = avruntime - vlag;
3800 vslice = (s64)(se->deadline - avruntime);
3802 se->deadline = avruntime + vslice;
3805 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
3808 bool curr = cfs_rq->curr == se;
3810 if (se->on_rq) {
3815 __dequeue_entity(cfs_rq, se);
3816 update_load_sub(&cfs_rq->load, se->load.weight);
3818 dequeue_load_avg(cfs_rq, se);
3820 if (!se->on_rq) {
3822 * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
3823 * we need to scale se->vlag when w_i changes.
3825 se->vlag = div_s64(se->vlag * se->load.weight, weight);
3827 reweight_eevdf(cfs_rq, se, weight);
3830 update_load_set(&se->load, weight);
3834 u32 divider = get_pelt_divider(&se->avg);
3836 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
3840 enqueue_load_avg(cfs_rq, se);
3841 if (se->on_rq) {
3842 update_load_add(&cfs_rq->load, se->load.weight);
3844 __enqueue_entity(cfs_rq, se);
3859 struct sched_entity *se = &p->se;
3860 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3861 struct load_weight *load = &se->load;
3864 reweight_entity(cfs_rq, se, weight);
3984 static void update_cfs_group(struct sched_entity *se)
3986 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
4000 if (unlikely(se->load.weight != shares))
4001 reweight_entity(cfs_rq_of(se), se, shares);
4005 static inline void update_cfs_group(struct sched_entity *se)
4138 void set_task_rq_fair(struct sched_entity *se,
4154 if (!(se->avg.last_update_time && prev))
4160 __update_load_avg_blocked_se(p_last_update_time, se);
4161 se->avg.last_update_time = n_last_update_time;
4232 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4234 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg;
4242 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4249 se->avg.util_avg = gcfs_rq->avg.util_avg;
4250 new_sum = se->avg.util_avg * divider;
4251 delta_sum = (long)new_sum - (long)se->avg.util_sum;
4252 se->avg.util_sum = new_sum;
4264 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4266 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
4274 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4280 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
4281 new_sum = se->avg.runnable_avg * divider;
4282 delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
4283 se->avg.runnable_sum = new_sum;
4294 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4308 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4318 runnable_sum += se->avg.load_sum;
4330 /* But make sure to not inflate se's runnable */
4331 runnable_sum = min(se->avg.load_sum, load_sum);
4340 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
4343 load_sum = se_weight(se) * runnable_sum;
4346 delta_avg = load_avg - se->avg.load_avg;
4350 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
4352 se->avg.load_sum = runnable_sum;
4353 se->avg.load_avg = load_avg;
4368 static inline int propagate_entity_load_avg(struct sched_entity *se)
4372 if (entity_is_task(se))
4375 gcfs_rq = group_cfs_rq(se);
4381 cfs_rq = cfs_rq_of(se);
4385 update_tg_cfs_util(cfs_rq, se, gcfs_rq);
4386 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
4387 update_tg_cfs_load(cfs_rq, se, gcfs_rq);
4390 trace_pelt_se_tp(se);
4399 static inline bool skip_blocked_update(struct sched_entity *se)
4401 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
4407 if (se->avg.load_avg || se->avg.util_avg)
4429 static inline int propagate_entity_load_avg(struct sched_entity *se)
4439 static inline void migrate_se_pelt_lag(struct sched_entity *se)
4446 if (load_avg_is_decayed(&se->avg))
4449 cfs_rq = cfs_rq_of(se);
4515 __update_load_avg_blocked_se(now, se);
4518 static void migrate_se_pelt_lag(struct sched_entity *se) {}
4564 * Because of rounding, se->util_sum might ends up being +1 more than
4603 * @se: sched_entity to attach
4608 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4611 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4617 * When we attach the @se to the @cfs_rq, we must align the decay
4623 se->avg.last_update_time = cfs_rq->avg.last_update_time;
4624 se->avg.period_contrib = cfs_rq->avg.period_contrib;
4632 se->avg.util_sum = se->avg.util_avg * divider;
4634 se->avg.runnable_sum = se->avg.runnable_avg * divider;
4636 se->avg.load_sum = se->avg.load_avg * divider;
4637 if (se_weight(se) < se->avg.load_sum)
4638 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
4640 se->avg.load_sum = 1;
4642 enqueue_load_avg(cfs_rq, se);
4643 cfs_rq->avg.util_avg += se->avg.util_avg;
4644 cfs_rq->avg.util_sum += se->avg.util_sum;
4645 cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
4646 cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
4648 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
4658 * @se: sched_entity to detach
4663 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4665 dequeue_load_avg(cfs_rq, se);
4666 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
4667 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
4672 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
4673 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
4678 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
4694 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4703 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
4704 __update_load_avg_se(now, cfs_rq, se);
4707 decayed |= propagate_entity_load_avg(se);
4709 if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
4718 attach_entity_load_avg(cfs_rq, se);
4726 detach_entity_load_avg(cfs_rq, se);
4740 static void sync_entity_load_avg(struct sched_entity *se)
4742 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4746 __update_load_avg_blocked_se(last_update_time, se);
4753 static void remove_entity_load_avg(struct sched_entity *se)
4755 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4764 sync_entity_load_avg(se);
4768 cfs_rq->removed.util_avg += se->avg.util_avg;
4769 cfs_rq->removed.load_avg += se->avg.load_avg;
4770 cfs_rq->removed.runnable_avg += se->avg.runnable_avg;
4792 return READ_ONCE(p->se.avg.util_avg);
4797 struct util_est ue = READ_ONCE(p->se.avg.util_est);
4907 ue = p->se.avg.util_est;
4967 WRITE_ONCE(p->se.avg.util_est, ue);
4969 trace_sched_util_est_se_tp(&p->se);
5165 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
5170 static inline void remove_entity_load_avg(struct sched_entity *se) {}
5173 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
5175 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
5196 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5201 se->slice = sysctl_sched_base_slice;
5202 vslice = calc_delta_fair(se->slice, se);
5216 lag = se->vlag;
5274 lag *= load + scale_load_down(se->load.weight);
5280 se->vruntime = vruntime - lag;
5293 se->deadline = se->vruntime + vslice;
5302 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5304 bool curr = cfs_rq->curr == se;
5311 place_entity(cfs_rq, se, flags);
5324 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
5325 se_update_runnable(se);
5331 update_cfs_group(se);
5338 place_entity(cfs_rq, se, flags);
5340 account_entity_enqueue(cfs_rq, se);
5344 se->exec_start = 0;
5347 update_stats_enqueue_fair(cfs_rq, se, flags);
5349 __enqueue_entity(cfs_rq, se);
5350 se->on_rq = 1;
5369 static void __clear_buddies_next(struct sched_entity *se)
5371 for_each_sched_entity(se) {
5372 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5373 if (cfs_rq->next != se)
5380 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
5382 if (cfs_rq->next == se)
5383 __clear_buddies_next(se);
5389 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5393 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
5410 update_load_avg(cfs_rq, se, action);
5411 se_update_runnable(se);
5413 update_stats_dequeue_fair(cfs_rq, se, flags);
5415 clear_buddies(cfs_rq, se);
5417 update_entity_lag(cfs_rq, se);
5418 if (se != cfs_rq->curr)
5419 __dequeue_entity(cfs_rq, se);
5420 se->on_rq = 0;
5421 account_entity_dequeue(cfs_rq, se);
5426 update_cfs_group(se);
5429 * Now advance min_vruntime if @se was the entity holding it back,
5442 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
5444 clear_buddies(cfs_rq, se);
5447 if (se->on_rq) {
5453 update_stats_wait_end_fair(cfs_rq, se);
5454 __dequeue_entity(cfs_rq, se);
5455 update_load_avg(cfs_rq, se, UPDATE_TG);
5460 se->vlag = se->deadline;
5463 update_stats_curr_start(cfs_rq, se);
5464 cfs_rq->curr = se;
5472 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
5475 stats = __schedstats_from_se(se);
5478 se->sum_exec_runtime - se->prev_sum_exec_runtime));
5481 se->prev_sum_exec_runtime = se->sum_exec_runtime;
5485 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
5789 struct sched_entity *se;
5813 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
5822 for_each_sched_entity(se) {
5823 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5825 if (!se->on_rq)
5828 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
5830 if (cfs_rq_is_idle(group_cfs_rq(se)))
5838 se = parent_entity(se);
5843 for_each_sched_entity(se) {
5844 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5846 if (!se->on_rq)
5849 update_load_avg(qcfs_rq, se, 0);
5850 se_update_runnable(se);
5852 if (cfs_rq_is_idle(group_cfs_rq(se)))
5860 /* At this point se is NULL and we are at root level*/
5880 struct sched_entity *se;
5884 se = cfs_rq->tg->se[cpu_of(rq)];
5908 for_each_sched_entity(se) {
5909 if (list_add_leaf_cfs_rq(cfs_rq_of(se)))
5917 for_each_sched_entity(se) {
5918 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5920 if (se->on_rq)
5922 enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
5924 if (cfs_rq_is_idle(group_cfs_rq(se)))
5936 for_each_sched_entity(se) {
5937 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5939 update_load_avg(qcfs_rq, se, UPDATE_TG);
5940 se_update_runnable(se);
5942 if (cfs_rq_is_idle(group_cfs_rq(se)))
5954 /* At this point se is NULL and we are at root level*/
6658 struct sched_entity *se = &p->se;
6663 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
6664 u64 slice = se->slice;
6736 static void set_next_buddy(struct sched_entity *se);
6739 static void check_preempt_from_idle(struct cfs_rq *cfs, struct sched_entity *se)
6743 if (se->latency_weight <= 0)
6762 if (next && wakeup_preempt_entity(next, se) == 1)
6763 set_next_buddy(se);
6776 struct sched_entity *se = &p->se;
6796 for_each_sched_entity(se) {
6797 if (se->on_rq)
6799 cfs_rq = cfs_rq_of(se);
6800 enqueue_entity(cfs_rq, se, flags);
6815 for_each_sched_entity(se) {
6816 cfs_rq = cfs_rq_of(se);
6818 update_load_avg(cfs_rq, se, UPDATE_TG);
6819 se_update_runnable(se);
6820 update_cfs_group(se);
6833 /* At this point se is NULL and we are at root level*/
6855 check_preempt_from_idle(cfs_rq_of(&p->se), &p->se);
6864 static void set_next_buddy(struct sched_entity *se);
6874 struct sched_entity *se = &p->se;
6881 for_each_sched_entity(se) {
6882 cfs_rq = cfs_rq_of(se);
6883 dequeue_entity(cfs_rq, se, flags);
6898 se = parent_entity(se);
6903 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
6904 set_next_buddy(se);
6910 for_each_sched_entity(se) {
6911 cfs_rq = cfs_rq_of(se);
6913 update_load_avg(cfs_rq, se, UPDATE_TG);
6914 se_update_runnable(se);
6915 update_cfs_group(se);
6929 /* At this point se is NULL and we are at root level*/
6986 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
7009 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
7016 lsub_positive(&runnable, p->se.avg.runnable_avg);
7254 sync_entity_load_avg(&p->se);
7622 sync_entity_load_avg(&p->se);
7868 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
8094 sync_entity_load_avg(&p->se);
8343 struct sched_entity *se = &p->se;
8346 remove_entity_load_avg(se);
8358 migrate_se_pelt_lag(se);
8362 se->avg.last_update_time = 0;
8369 remove_entity_load_avg(&p->se);
8383 static long wakeup_latency_gran(struct sched_entity *curr, struct sched_entity *se)
8385 int latency_weight = se->latency_weight;
8392 * delay is acceptable by se.
8394 if ((se->latency_weight > 0) || (curr->latency_weight > 0))
8415 static unsigned long wakeup_gran(struct sched_entity *se)
8423 * By using 'se' instead of 'curr' we penalize light tasks, so
8424 * they get preempted easier. That is, if 'se' < 'curr' then
8426 * lighter, if otoh 'se' > 'curr' then the resulting gran will
8432 return calc_delta_fair(gran, se);
8436 * Should 'se' preempt 'curr'.
8450 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
8452 s64 gran, vdiff = curr->vruntime - se->vruntime;
8456 vdiff += wakeup_latency_gran(curr, se);
8462 gran = wakeup_gran(se);
8469 static void set_next_buddy(struct sched_entity *se)
8471 for_each_sched_entity(se) {
8472 if (SCHED_WARN_ON(!se->on_rq))
8474 if (se_is_idle(se))
8476 cfs_rq_of(se)->next = se;
8486 struct sched_entity *se = &curr->se, *pse = &p->se;
8491 if (unlikely(se == pse))
8533 find_matching_se(&se, &pse);
8536 cse_is_idle = se_is_idle(se);
8548 cfs_rq = cfs_rq_of(se);
8552 * XXX pick_eevdf(cfs_rq) != se ?
8566 struct sched_entity *se;
8588 se = pick_next_entity(cfs_rq, curr);
8589 cfs_rq = group_cfs_rq(se);
8592 return task_of(se);
8600 struct sched_entity *se;
8651 se = pick_next_entity(cfs_rq, curr);
8652 cfs_rq = group_cfs_rq(se);
8655 p = task_of(se);
8663 struct sched_entity *pse = &prev->se;
8665 while (!(cfs_rq = is_same_group(se, pse))) {
8666 int se_depth = se->depth;
8674 set_next_entity(cfs_rq_of(se), se);
8675 se = parent_entity(se);
8680 set_next_entity(cfs_rq, se);
8690 se = pick_next_entity(cfs_rq, NULL);
8691 set_next_entity(cfs_rq, se);
8692 cfs_rq = group_cfs_rq(se);
8695 p = task_of(se);
8704 list_move(&p->se.group_node, &rq->cfs_tasks);
8751 struct sched_entity *se = &prev->se;
8754 for_each_sched_entity(se) {
8755 cfs_rq = cfs_rq_of(se);
8756 put_prev_entity(cfs_rq, se);
8767 struct sched_entity *se = &curr->se;
8775 clear_buddies(cfs_rq, se);
8789 se->deadline += calc_delta_fair(se->slice, se);
8794 struct sched_entity *se = &p->se;
8797 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
8801 set_next_buddy(se);
9039 (&p->se == cfs_rq_of(&p->se)->next))
9055 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
9251 &env->src_rq->cfs_tasks, se.group_node) {
9329 p = list_last_entry(tasks, struct task_struct, se.group_node);
9384 list_add(&p->se.group_node, &env->tasks);
9407 list_move(&p->se.group_node, tasks);
9469 p = list_first_entry(tasks, struct task_struct, se.group_node);
9470 list_del_init(&p->se.group_node);
9565 struct sched_entity *se;
9578 se = cfs_rq->tg->se[cpu];
9579 if (se && !skip_blocked_update(se))
9580 update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
9605 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
9613 for_each_sched_entity(se) {
9614 cfs_rq = cfs_rq_of(se);
9615 WRITE_ONCE(cfs_rq->h_load_next, se);
9620 if (!se) {
9625 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
9627 load = div64_ul(load * se->avg.load_avg,
9629 cfs_rq = group_cfs_rq(se);
9640 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
9658 return p->se.avg.load_avg;
10430 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
12940 __entity_slice_used(struct sched_entity *se, int min_nr_tasks)
12942 u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;
12943 u64 slice = se->slice;
12969 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
12976 static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq,
12979 for_each_sched_entity(se) {
12980 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12994 struct sched_entity *se = &p->se;
12999 se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
13006 const struct sched_entity *sea = &a->se;
13007 const struct sched_entity *seb = &b->se;
13016 * Find an se in the hierarchy for tasks a and b, such that the se's
13040 * Find delta after normalizing se's vruntime with its cfs_rq's
13076 struct sched_entity *se = &curr->se;
13078 for_each_sched_entity(se) {
13079 cfs_rq = cfs_rq_of(se);
13080 entity_tick(cfs_rq, se, queued);
13099 struct sched_entity *se = &p->se, *curr;
13111 place_entity(cfs_rq, se, ENQUEUE_INITIAL);
13145 static void propagate_entity_cfs_rq(struct sched_entity *se)
13147 struct cfs_rq *cfs_rq = cfs_rq_of(se);
13156 se = se->parent;
13158 for_each_sched_entity(se) {
13159 cfs_rq = cfs_rq_of(se);
13161 update_load_avg(cfs_rq, se, UPDATE_TG);
13171 static void propagate_entity_cfs_rq(struct sched_entity *se) { }
13174 static void detach_entity_cfs_rq(struct sched_entity *se)
13176 struct cfs_rq *cfs_rq = cfs_rq_of(se);
13185 if (!se->avg.last_update_time)
13190 update_load_avg(cfs_rq, se, 0);
13191 detach_entity_load_avg(cfs_rq, se);
13193 propagate_entity_cfs_rq(se);
13196 static void attach_entity_cfs_rq(struct sched_entity *se)
13198 struct cfs_rq *cfs_rq = cfs_rq_of(se);
13201 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
13202 attach_entity_load_avg(cfs_rq, se);
13204 propagate_entity_cfs_rq(se);
13209 struct sched_entity *se = &p->se;
13211 detach_entity_cfs_rq(se);
13216 struct sched_entity *se = &p->se;
13218 attach_entity_cfs_rq(se);
13250 struct sched_entity *se = &p->se;
13258 list_move(&se->group_node, &rq->cfs_tasks);
13262 for_each_sched_entity(se) {
13263 struct cfs_rq *cfs_rq = cfs_rq_of(se);
13265 set_next_entity(cfs_rq, se);
13293 /* Tell se's cfs_rq has been changed -- migrated */
13294 p->se.avg.last_update_time = 0;
13307 if (tg->se)
13308 kfree(tg->se[i]);
13312 kfree(tg->se);
13317 struct sched_entity *se;
13324 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
13325 if (!tg->se)
13338 se = kzalloc_node(sizeof(struct sched_entity_stats),
13340 if (!se)
13344 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
13345 init_entity_runnable_average(se);
13358 struct sched_entity *se;
13365 se = tg->se[i];
13368 attach_entity_cfs_rq(se);
13383 if (tg->se[cpu])
13384 remove_entity_load_avg(tg->se[cpu]);
13402 struct sched_entity *se, int cpu,
13412 tg->se[cpu] = se;
13414 /* se could be NULL for root_task_group */
13415 if (!se)
13419 se->cfs_rq = &rq->cfs;
13420 se->depth = 0;
13422 se->cfs_rq = parent->my_q;
13423 se->depth = parent->depth + 1;
13426 se->my_q = cfs_rq;
13428 update_load_set(&se->load, NICE_0_LOAD);
13429 se->parent = parent;
13443 if (!tg->se[0])
13454 struct sched_entity *se = tg->se[i];
13460 for_each_sched_entity(se) {
13461 update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
13462 update_cfs_group(se);
13505 struct sched_entity *se = tg->se[i];
13517 if (se->on_rq) {
13518 parent_cfs_rq = cfs_rq_of(se);
13530 for_each_sched_entity(se) {
13531 struct cfs_rq *cfs_rq = cfs_rq_of(se);
13533 if (!se->on_rq)
13575 struct sched_entity *se = &task->se;
13583 rr_interval = NS_TO_JIFFIES(se->slice);
13769 struct sched_entity *se = &p->se;
13773 for_each_sched_entity(se) {
13774 cfs_rq = cfs_rq_of(se);
13783 if (!se) {