Lines Matching defs:rq
28 static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p, u16 updated_demand_scaled);
343 struct rq *rq = rq_of(cfs_rq);
344 int cpu = cpu_of(rq);
347 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
374 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
380 * cfs rq without parent should be put
383 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
388 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
398 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
403 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
410 struct rq *rq = rq_of(cfs_rq);
416 * to the prev element but it will point to rq->leaf_cfs_rq_list
419 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) {
420 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
428 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
430 SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
434 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
435 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
500 struct rq *rq = task_rq(p);
502 return &rq->cfs;
527 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
531 #define for_each_leaf_cfs_rq_safe(rq, cfs, pos) (for ((cfs) = &(rq)->cfs, (pos) = NULL; (cfs); (cfs) = (pos)))
867 switched_from_fair(rq, p);
930 static void update_curr_fair(struct rq *rq)
932 update_curr(cfs_rq_of(&rq->curr->se));
1254 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1256 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
1257 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1260 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1262 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
1263 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1615 static unsigned long cpu_load(struct rq *rq);
1616 static unsigned long cpu_runnable(struct rq *rq);
1678 struct rq *rq = cpu_rq(cpu);
1680 ns->load += cpu_load(rq);
1681 ns->runnable += cpu_runnable(rq);
1683 ns->nr_running += rq->cfs.h_nr_running;
1686 if (find_idle && !rq->nr_running && idle_cpu(cpu)) {
1687 if (READ_ONCE(rq->numa_migrate_on) || !cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
1711 struct rq *rq = cpu_rq(env->dst_cpu);
1714 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
1726 rq = cpu_rq(env->dst_cpu);
1727 if (!xchg(&rq->numa_migrate_on, 1)) {
1738 * Clear previous best_cpu/rq numa-migrate flag, since task now
1742 rq = cpu_rq(env->best_cpu);
1743 WRITE_ONCE(rq->numa_migrate_on, 0);
1801 struct rq *dst_rq = cpu_rq(env->dst_cpu);
2059 struct rq *best_rq;
3018 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3089 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3093 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
3097 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
3112 struct rq *rq = rq_of(cfs_rq);
3114 account_numa_enqueue(rq, task_of(se));
3115 list_add(&se->group_node, &rq->cfs_tasks);
3393 struct rq *rq = rq_of(cfs_rq);
3395 if (&rq->cfs == cfs_rq) {
3410 cpufreq_update_util(rq, flags);
3450 * including the state of rq->lock, should be made.
3507 * the group entity and group rq do not have their PELT windows aligned).
3540 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
3541 * rq itself is runnable anywhere between 2/3 and 1 depending on how the
3543 * align the rq as a whole would be runnable 2/3 of the time. If however we
3544 * always have at least 1 runnable task, the rq as a whole is always runnable.
3552 * We can construct a rule that adds runnable to a rq by assuming minimal
3984 * the previous rq.
4030 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
4233 static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
4237 int cpu = cpu_of(rq);
4246 rq->misfit_task_load = 0;
4255 task_fits = task_fits_capacity(p, capacity_of(cpu_of(rq)));
4258 task_fits = task_fits_capacity(p, capacity_of(cpu_of(rq)));
4261 rq->misfit_task_load = 0;
4269 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
4294 static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
4310 static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
4836 * directly instead of rq->clock to avoid adding additional synchronization
4837 * around rq->lock.
4951 struct rq *rq = data;
4952 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4956 cfs_rq->throttled_clock_pelt_time += rq_clock_task(rq) - cfs_rq->throttled_clock_pelt;
4969 struct rq *rq = data;
4970 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4974 cfs_rq->throttled_clock_pelt = rq_clock_task(rq);
4984 struct rq *rq = rq_of(cfs_rq);
5014 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
5043 sub_nr_running(rq, task_delta);
5044 walt_dec_throttled_cfs_rq_stats(&rq->walt_stats, cfs_rq);
5049 * throttled-list. rq->lock protects completion.
5052 cfs_rq->throttled_clock = rq_clock(rq);
5058 struct rq *rq = rq_of(cfs_rq);
5064 se = cfs_rq->tg->se[cpu_of(rq)];
5068 update_rq_clock(rq);
5071 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
5076 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
5126 add_nr_running(rq, task_delta);
5127 walt_inc_throttled_cfs_rq_stats(&rq->walt_stats, tcfs_rq);
5142 assert_list_leaf_cfs_rq(rq);
5145 if (rq->curr == rq->idle && rq->cfs.nr_running) {
5146 resched_curr(rq);
5158 struct rq *rq = rq_of(cfs_rq);
5161 rq_lock_irqsave(rq, &rf);
5186 rq_unlock_irqrestore(rq, &rf);
5323 /* we are under rq->lock, defer unthrottling using a timer */
5349 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
5384 * runtime as update_curr() throttling can not not trigger until it's on-rq.
5566 static void __maybe_unused update_runtime_enabled(struct rq *rq)
5570 lockdep_assert_held(&rq->lock);
5576 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5586 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
5590 lockdep_assert_held(&rq->lock);
5595 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5607 * Offline rq is schedulable till CPU is completely disabled
5675 static inline void update_runtime_enabled(struct rq *rq)
5678 static inline void unthrottle_offline_cfs_rqs(struct rq *rq)
5689 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
5694 SCHED_WARN_ON(task_rq(p) != rq);
5696 if (rq->cfs.h_nr_running > 1) {
5702 if (rq->curr == p) {
5703 resched_curr(rq);
5707 hrtick_start(rq, delta);
5716 static void hrtick_update(struct rq *rq)
5718 struct task_struct *curr = rq->curr;
5720 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) {
5725 hrtick_start_fair(rq, curr);
5729 static inline void hrtick_start_fair(struct rq *rq, struct task_struct *p)
5733 static inline void hrtick_update(struct rq *rq)
5744 static inline void update_overutilized_status(struct rq *rq)
5746 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
5747 WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
5748 trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
5752 static inline void update_overutilized_status(struct rq *rq)
5758 static int sched_idle_rq(struct rq *rq)
5760 return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running && rq->nr_running);
5792 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
5805 util_est_enqueue(&rq->cfs, p);
5813 cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
5861 add_nr_running(rq, 1);
5862 inc_rq_walt_stats(rq, p);
5878 update_overutilized_status(rq);
5881 if (rq->curr == rq->idle)
5901 assert_list_leaf_cfs_rq(rq);
5903 hrtick_update(rq);
5912 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
5918 bool was_sched_idle = sched_idle_rq(rq);
5920 util_est_dequeue(&rq->cfs, p);
5969 sub_nr_running(rq, 1);
5970 dec_rq_walt_stats(rq, p);
5973 if (unlikely(!was_sched_idle && sched_idle_rq(rq))) {
5974 rq->next_balance = jiffies;
5978 util_est_update(&rq->cfs, p, task_sleep);
5979 hrtick_update(rq);
6000 static unsigned long cpu_load(struct rq *rq)
6002 return cfs_rq_load_avg(&rq->cfs);
6018 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
6024 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
6025 return cpu_load(rq);
6028 cfs_rq = &rq->cfs;
6037 static unsigned long cpu_runnable(struct rq *rq)
6039 return cfs_rq_runnable_avg(&rq->cfs);
6042 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
6048 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
6049 return cpu_runnable(rq);
6052 cfs_rq = &rq->cfs;
6244 struct rq *rq = cpu_rq(i);
6245 struct cpuidle_state *idle = idle_get_state(rq);
6253 latest_idle_timestamp = rq->idle_stamp;
6255 } else if ((!idle || idle->exit_latency == min_exit_latency) && rq->idle_stamp > latest_idle_timestamp) {
6261 latest_idle_timestamp = rq->idle_stamp;
6366 void fair_update_idle_core(struct rq *rq)
6368 int core = cpu_of(rq);
6485 * average idle time for this rq (as found in rq->avg_idle).
6875 * appear in the cfs_rq->avg.util_est.enqueued of any rq,
7223 * rq->lock and can modify state directly.
7253 static int balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
7255 if (rq->nr_running) {
7258 return newidle_balance(rq, rf) != 0;
7369 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
7371 struct task_struct *curr = rq->curr;
7440 resched_curr(rq);
7443 * on the rq. This can happen when a wakeup gets interleaved
7445 * point, either of which can * drop the rq lock.
7450 if (unlikely(!se->on_rq || curr == rq->idle)) {
7459 struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
7461 struct cfs_rq *cfs_rq = &rq->cfs;
7467 if (!sched_fair_runnable(rq)) {
7507 cfs_rq = &rq->cfs;
7552 put_prev_task(rq, prev);
7571 list_move(&p->se.group_node, &rq->cfs_tasks);
7574 if (hrtick_enabled(rq)) {
7575 hrtick_start_fair(rq, p);
7578 update_misfit_status(p, rq);
7587 new_tasks = newidle_balance(rq, rf);
7589 * Because newidle_balance() releases (and re-acquires) rq->lock, it is
7604 * rq is about to be idle, check if we need to update the
7607 update_idle_rq_clock_pelt(rq);
7612 static struct task_struct *fair_pick_next_task_fair(struct rq *rq)
7614 return pick_next_task_fair(rq, NULL, NULL);
7620 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
7636 static void yield_task_fair(struct rq *rq)
7638 struct task_struct *curr = rq->curr;
7645 if (unlikely(rq->nr_running == 1)) {
7652 update_rq_clock(rq);
7662 rq_clock_skip_update(rq);
7668 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
7680 yield_task_fair(rq);
7859 struct rq *src_rq;
7863 struct rq *dst_rq;
7990 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
8294 * attach_task() -- attach the task detached by detach_task() to its new rq.
8296 static void attach_task(struct rq *rq, struct task_struct *p)
8298 lockdep_assert_held(&rq->lock);
8300 BUG_ON(task_rq(p) != rq);
8301 activate_task(rq, p, ENQUEUE_NOCLOCK);
8302 check_preempt_curr(rq, p, 0);
8307 * its new rq.
8309 static void attach_one_task(struct rq *rq, struct task_struct *p)
8313 rq_lock(rq, &rf);
8314 update_rq_clock(rq);
8315 attach_task(rq, p);
8316 rq_unlock(rq, &rf);
8321 * new rq.
8356 static inline bool others_have_blocked(struct rq *rq)
8358 if (READ_ONCE(rq->avg_rt.util_avg)) {
8362 if (READ_ONCE(rq->avg_dl.util_avg)) {
8366 if (thermal_load_avg(rq)) {
8371 if (READ_ONCE(rq->avg_irq.util_avg)) {
8379 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
8381 rq->last_blocked_load_update_tick = jiffies;
8384 rq->has_blocked_load = 0;
8392 static inline bool others_have_blocked(struct rq *rq)
8396 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
8401 static bool fair_update_blocked_others(struct rq *rq, bool *done)
8404 u64 now = rq_clock_pelt(rq);
8412 curr_class = rq->curr->sched_class;
8414 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
8416 decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
8417 update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
8418 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) | update_irq_load_avg(rq, 0);
8420 if (others_have_blocked(rq)) {
8450 static bool fair_update_blocked_fair(struct rq *rq, bool *done)
8454 int cpu = cpu_of(rq);
8460 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
8466 if (cfs_rq == &rq->cfs) {
8501 struct rq *rq = rq_of(cfs_rq);
8502 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
8541 static bool fair_update_blocked_fair(struct rq *rq, bool *done)
8543 struct cfs_rq *cfs_rq = &rq->cfs;
8563 struct rq *rq = cpu_rq(cpu);
8566 rq_lock_irqsave(rq, &rf);
8567 update_rq_clock(rq);
8569 decayed |= fair_update_blocked_others(rq, &done);
8570 decayed |= fair_update_blocked_fair(rq, &done);
8572 update_blocked_load_status(rq, !done);
8574 cpufreq_update_util(rq, 0);
8576 rq_unlock_irqrestore(rq, &rf);
8643 struct rq *rq = cpu_rq(cpu);
8648 irq = cpu_util_irq(rq);
8659 used = READ_ONCE(rq->avg_rt.util_avg);
8660 used += READ_ONCE(rq->avg_dl.util_avg);
8661 used += thermal_load_avg(rq);
8754 * Check whether the capacity of the rq has been noticeably reduced by side
8758 static inline int check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
8760 return ((rq->cpu_capacity * sd->imbalance_pct) < (rq->cpu_capacity_orig * FAIR_ONEHUNDRED));
8764 * Check whether a rq has a misfit task and if it looks like we can actually
8768 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
8770 return rq->misfit_task_load && (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity || check_cpu_capacity(rq, sd));
8905 static bool update_nohz_stats(struct rq *rq, bool force)
8908 unsigned int cpu = rq->cpu;
8910 if (!rq->has_blocked_load) {
8918 if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick)) {
8924 return rq->has_blocked_load;
8948 struct rq *rq = cpu_rq(i);
8954 if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) {
8958 sgs->group_load += cpu_load(rq);
8960 sgs->group_runnable += cpu_runnable(rq);
8961 sgs->sum_h_nr_running += rq->cfs.h_nr_running;
8963 nr_running = rq->nr_running;
8975 sgs->nr_numa_running += rq->nr_numa_running;
8976 sgs->nr_preferred_running += rq->nr_preferred_running;
8992 if (env->sd->flags & SD_ASYM_CPUCAPACITY && sgs->group_misfit_task_load < rq->misfit_task_load) {
8993 sgs->group_misfit_task_load = rq->misfit_task_load;
9163 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
9165 if (rq->nr_running > rq->nr_numa_running) {
9168 if (rq->nr_running > rq->nr_preferred_running) {
9179 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
9214 struct rq *rq = cpu_rq(cpu);
9216 if (rq->curr != rq->idle && rq->curr != p) {
9221 * rq->nr_running can't be used but an updated version without the
9227 if (rq->ttwu_pending) {
9251 struct rq *rq = cpu_rq(i);
9254 sgs->group_load += cpu_load_without(rq, p);
9256 sgs->group_runnable += cpu_runnable_without(rq, p);
9258 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
9260 nr_running = rq->nr_running - local;
9914 static struct rq *find_busiest_queue(struct lb_env *env, struct sched_group *group)
9916 struct rq *busiest = NULL, *rq;
9927 rq = cpu_rq(i);
9928 rt = fbq_classify_rq(rq);
9957 nr_running = rq->cfs.h_nr_running;
9975 load = cpu_load(rq);
9976 if (nr_running == 1 && load > env->imbalance && !check_cpu_capacity(rq, env->sd)) {
9996 busiest = rq;
10001 util = cpu_util(cpu_of(rq));
10014 busiest = rq;
10021 busiest = rq;
10030 if (rq->misfit_task_load > busiest_load) {
10031 busiest_load = rq->misfit_task_load;
10032 busiest = rq;
10151 static int load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum cpu_idle_type idle,
10157 struct rq *busiest;
10473 struct rq *busiest_rq = data;
10476 struct rq *target_rq = cpu_rq(target_cpu);
10632 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
10635 int cpu = rq->cpu;
10681 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
10704 * Ensure the rq-wide value also decays but keep it at a
10705 * reasonable floor to avoid funnies with rq->avg_idle.
10707 rq->max_idle_balance_cost = max((u64)sysctl_sched_migration_cost, max_cost);
10717 rq->next_balance = next_balance;
10728 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) {
10729 nohz.next_balance = rq->next_balance;
10735 static inline int on_null_domain(struct rq *rq)
10737 return unlikely(!rcu_dereference_sched(rq->sd));
10790 * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
10810 static void nohz_balancer_kick(struct rq *rq)
10815 int nr_busy, i, cpu = rq->cpu;
10819 if (unlikely(rq->idle_balance)) {
10827 nohz_balance_exit_idle(rq);
10853 if (rq->nr_running >= 0x2) {
10860 sd = rcu_dereference(rq->sd);
10867 if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
10895 if (check_misfit_status(rq, sd)) {
10951 void nohz_balance_exit_idle(struct rq *rq)
10953 SCHED_WARN_ON(rq != this_rq());
10955 if (likely(!rq->nohz_tick_stopped)) {
10959 rq->nohz_tick_stopped = 0;
10960 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
10963 set_cpu_sd_state_busy(rq->cpu);
10988 struct rq *rq = cpu_rq(cpu);
11003 nohz_balance_exit_idle(rq);
11013 * Can be set safely without rq->lock held
11015 * rq->lock is held during the check and the clear
11017 rq->has_blocked_load = 1;
11025 if (rq->nohz_tick_stopped) {
11030 if (on_null_domain(rq)) {
11034 rq->nohz_tick_stopped = 1;
11063 static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, enum cpu_idle_type idle)
11073 struct rq *rq;
11116 rq = cpu_rq(balance_cpu);
11118 has_blocked_load |= update_nohz_stats(rq, true);
11124 if (time_after_eq(jiffies, rq->next_balance)) {
11127 rq_lock_irqsave(rq, &rf);
11128 update_rq_clock(rq);
11129 rq_unlock_irqrestore(rq, &rf);
11132 rebalance_domains(rq, CPU_IDLE);
11136 if (time_after(next_balance, rq->next_balance)) {
11137 next_balance = rq->next_balance;
11179 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
11198 static void nohz_newidle_balance(struct rq *this_rq)
11234 static inline void nohz_balancer_kick(struct rq *rq)
11238 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
11243 static inline void nohz_newidle_balance(struct rq *this_rq)
11257 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
11335 * now runnable tasks on this rq.
11351 * While browsing the domains, we released the rq lock, a task could
11384 struct rq *this_rq = this_rq();
11416 void trigger_load_balance(struct rq *rq)
11421 if (unlikely(on_null_domain(rq)) || cpu_isolated(cpu_of(rq))) {
11425 if (time_after_eq(jiffies, rq->next_balance)) {
11429 nohz_balancer_kick(rq);
11432 static void rq_online_fair(struct rq *rq)
11436 update_runtime_enabled(rq);
11439 static void rq_offline_fair(struct rq *rq)
11444 unthrottle_offline_cfs_rqs(rq);
11448 static inline int kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
11453 if (cpu_of(rq) == new_cpu) {
11458 raw_spin_lock_irqsave(&rq->lock, flags);
11459 if (!rq->active_balance) {
11460 rq->active_balance = 1;
11461 rq->push_cpu = new_cpu;
11463 rq->push_task = p;
11466 raw_spin_unlock_irqrestore(&rq->lock, flags);
11471 static void check_for_migration_fair(struct rq *rq, struct task_struct *p)
11487 if (rq->misfit_task_load) {
11488 if (rq->curr->state != TASK_RUNNING || rq->curr->nr_cpus_allowed == 1) {
11517 active_balance = kick_active_balance(rq, p, new_cpu);
11521 ret = stop_one_cpu_nowait(prev_cpu, active_load_balance_cpu_stop, rq, &rq->active_balance_work);
11541 * and everything must be accessed through the @rq and @curr passed in
11544 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
11555 task_tick_numa(rq, curr);
11558 update_misfit_status(curr, rq);
11571 struct rq *rq = this_rq();
11574 rq_lock(rq, &rf);
11575 update_rq_clock(rq);
11591 resched_curr(rq);
11595 rq_unlock(rq, &rf);
11602 static void prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
11608 if (rq->cfs.nr_running == 1) {
11617 if (rq->curr == p) {
11619 resched_curr(rq);
11622 check_preempt_curr(rq, p, 0);
11747 static void switched_from_fair(struct rq *rq, struct task_struct *p)
11752 static void switched_to_fair(struct rq *rq, struct task_struct *p)
11762 if (rq->curr == p) {
11763 resched_curr(rq);
11765 check_preempt_curr(rq, p, 0);
11775 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
11785 list_move(&se->group_node, &rq->cfs_tasks);
11912 struct rq *rq;
11917 rq = cpu_rq(i);
11919 rq_lock_irq(rq, &rf);
11920 update_rq_clock(rq);
11923 rq_unlock_irq(rq, &rf);
11930 struct rq *rq;
11947 rq = cpu_rq(cpu);
11949 raw_spin_lock_irqsave(&rq->lock, flags);
11951 raw_spin_unlock_irqrestore(&rq->lock, flags);
11958 struct rq *rq = cpu_rq(cpu);
11961 cfs_rq->rq = rq;
11973 se->cfs_rq = &rq->cfs;
12008 struct rq *rq = cpu_rq(i);
12013 rq_lock_irqsave(rq, &rf);
12014 update_rq_clock(rq);
12019 rq_unlock_irqrestore(rq, &rf);
12047 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
12056 if (rq->cfs.load.weight) {
12186 struct rq *rq = rq_of(tcfs_rq);
12190 if (stats == &rq->walt_stats) {
12191 walt_fixup_cum_window_demand(rq, tcfs_rq->walt_stats.cumulative_runnable_avg_scaled);
12197 struct rq *rq = rq_of(tcfs_rq);
12206 if (stats == &rq->walt_stats) {
12207 walt_fixup_cum_window_demand(rq, -tcfs_rq->walt_stats.cumulative_runnable_avg_scaled);
12211 static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p, u16 updated_demand_scaled)
12226 /* Fix up rq->walt_stats only if we didn't find any throttled cfs_rq */
12228 fixup_cumulative_runnable_avg(&rq->walt_stats, task_load_delta);
12229 walt_fixup_cum_window_demand(rq, task_load_delta);
12234 static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p, u16 updated_demand_scaled)
12236 fixup_walt_sched_stats_common(rq, p, updated_demand_scaled);
12276 const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq)
12279 return rq ? &rq->avg_rt : NULL;
12286 const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq)
12289 return rq ? &rq->avg_dl : NULL;
12296 const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq)
12299 return rq ? &rq->avg_irq : NULL;
12306 int sched_trace_rq_cpu(struct rq *rq)
12308 return rq ? cpu_of(rq) : -1;
12312 int sched_trace_rq_cpu_capacity(struct rq *rq)
12314 return rq ?
12316 rq->cpu_capacity
12334 int sched_trace_rq_nr_running(struct rq *rq)
12336 return rq ? rq->nr_running : -1;