Lines Matching refs:rq
90 struct rq;
148 extern void calc_global_load_tick(struct rq *this_rq);
149 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
155 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
324 /* nests inside the rq lock: */
651 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */
721 /* Nests inside the rq lock: */
727 struct rq *rq;
747 * earliest ready task on this rq. Caching these facilitates
760 * Tasks on this rq that can be pushed away. They are kept in
919 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
943 * struct uclamp_rq - rq's utilization clamp
944 * @value: currently active clamp values for a rq
945 * @bucket: utilization clamp buckets affecting a rq
947 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values.
948 * A clamp value is affecting a rq when there is at least one task RUNNABLE
962 * the metrics required to compute all the per-rq utilization clamp values.
979 struct rq {
1150 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
1173 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1175 return cfs_rq->rq;
1180 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1182 return container_of(cfs_rq, struct rq, cfs);
1186 static inline int cpu_of(struct rq *rq)
1189 return rq->cpu;
1196 extern void __update_idle_core(struct rq *rq);
1198 static inline void update_idle_core(struct rq *rq)
1201 __update_idle_core(rq);
1206 static inline void update_idle_core(struct rq *rq)
1211 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1219 extern void update_rq_clock(struct rq *rq);
1221 static inline u64 __rq_clock_broken(struct rq *rq)
1223 return READ_ONCE(rq->clock);
1227 * rq::clock_update_flags bits
1231 * neighbouring rq clock updates.
1237 * made to update_rq_clock() since the last time rq::lock was pinned.
1243 * if (rq-clock_update_flags >= RQCF_UPDATED)
1253 static inline void assert_clock_updated(struct rq *rq)
1259 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
1262 static inline u64 rq_clock(struct rq *rq)
1264 lockdep_assert_held(&rq->lock);
1265 assert_clock_updated(rq);
1267 return rq->clock;
1270 static inline u64 rq_clock_task(struct rq *rq)
1272 lockdep_assert_held(&rq->lock);
1273 assert_clock_updated(rq);
1275 return rq->clock_task;
1291 static inline u64 rq_clock_thermal(struct rq *rq)
1293 return rq_clock_task(rq) >> sched_thermal_decay_shift;
1296 static inline void rq_clock_skip_update(struct rq *rq)
1298 lockdep_assert_held(&rq->lock);
1299 rq->clock_update_flags |= RQCF_REQ_SKIP;
1306 static inline void rq_clock_cancel_skipupdate(struct rq *rq)
1308 lockdep_assert_held(&rq->lock);
1309 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
1317 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
1329 * This avoids code that has access to 'struct rq *rq' (basically everything in
1330 * the scheduler) from accidentally unlocking the rq if they do not also have a
1335 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1337 rf->cookie = lockdep_pin_lock(&rq->lock);
1340 rq->clock_update_flags &= (RQCF_REQ_SKIP | RQCF_ACT_SKIP);
1345 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1348 if (rq->clock_update_flags > RQCF_ACT_SKIP) {
1353 lockdep_unpin_lock(&rq->lock, rf->cookie);
1356 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1358 lockdep_repin_lock(&rq->lock, rf->cookie);
1364 rq->clock_update_flags |= rf->clock_update_flags;
1368 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires(rq->lock);
1370 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires(p->pi_lock) __acquires(rq->lock);
1372 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) __releases(rq->lock)
1374 rq_unpin_lock(rq, rf);
1375 raw_spin_unlock(&rq->lock);
1378 static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) __releases(rq->lock)
1381 rq_unpin_lock(rq, rf);
1382 raw_spin_unlock(&rq->lock);
1386 static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock)
1388 raw_spin_lock_irqsave(&rq->lock, rf->flags);
1389 rq_pin_lock(rq, rf);
1392 static inline void rq_lock_irq(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock)
1394 raw_spin_lock_irq(&rq->lock);
1395 rq_pin_lock(rq, rf);
1398 static inline void rq_lock(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock)
1400 raw_spin_lock(&rq->lock);
1401 rq_pin_lock(rq, rf);
1404 static inline void rq_relock(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock)
1406 raw_spin_lock(&rq->lock);
1407 rq_repin_lock(rq, rf);
1410 static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) __releases(rq->lock)
1412 rq_unpin_lock(rq, rf);
1413 raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
1416 static inline void rq_unlock_irq(struct rq *rq, struct rq_flags *rf) __releases(rq->lock)
1418 rq_unpin_lock(rq, rf);
1419 raw_spin_unlock_irq(&rq->lock);
1422 static inline void rq_unlock(struct rq *rq, struct rq_flags *rf) __releases(rq->lock)
1424 rq_unpin_lock(rq, rf);
1425 raw_spin_unlock(&rq->lock);
1428 static inline struct rq *this_rq_lock_irq(struct rq_flags *rf) __acquires(rq->lock)
1430 struct rq *rq;
1433 rq = this_rq();
1434 rq_lock(rq, rf);
1435 return rq;
1482 static inline void queue_balance_callback(struct rq *rq, struct callback_head *head, void (*func)(struct rq *rq))
1484 lockdep_assert_held(&rq->lock);
1491 head->next = rq->balance_callback;
1492 rq->balance_callback = head;
1498 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1653 * holding both task_struct::pi_lock and rq::lock.
1790 static inline int task_current(struct rq *rq, struct task_struct *p)
1792 return rq->curr == p;
1795 static inline int task_running(struct rq *rq, struct task_struct *p)
1800 return task_current(rq, p);
1886 void (*enqueue_task)(struct rq *rq, struct task_struct *p, int flags);
1887 void (*dequeue_task)(struct rq *rq, struct task_struct *p, int flags);
1888 void (*yield_task)(struct rq *rq);
1889 bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
1891 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
1893 struct task_struct *(*pick_next_task)(struct rq *rq);
1895 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1896 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
1899 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1903 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
1907 void (*rq_online)(struct rq *rq);
1908 void (*rq_offline)(struct rq *rq);
1911 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
1916 * The switched_from() call is allowed to drop rq->lock, therefore we
1918 * rq->lock. They are however serialized by p->pi_lock.
1920 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
1921 void (*switched_to)(struct rq *this_rq, struct task_struct *task);
1922 void (*prio_changed)(struct rq *this_rq, struct task_struct *task, int oldprio);
1924 unsigned int (*get_rr_interval)(struct rq *rq, struct task_struct *task);
1926 void (*update_curr)(struct rq *rq);
1935 void (*fixup_walt_sched_stats)(struct rq *rq, struct task_struct *p, u16 updated_demand_scaled);
1938 void (*check_for_migration)(struct rq *rq, struct task_struct *p);
1942 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1944 WARN_ON_ONCE(rq->curr != prev);
1945 prev->sched_class->put_prev_task(rq, prev);
1948 static inline void set_next_task(struct rq *rq, struct task_struct *next)
1950 WARN_ON_ONCE(rq->curr != next);
1951 next->sched_class->set_next_task(rq, next, false);
1971 static inline bool sched_stop_runnable(struct rq *rq)
1973 return rq->stop && task_on_rq_queued(rq->stop);
1976 static inline bool sched_dl_runnable(struct rq *rq)
1978 return rq->dl.dl_nr_running > 0;
1981 static inline bool sched_rt_runnable(struct rq *rq)
1983 return rq->rt.rt_queued > 0;
1986 static inline bool sched_fair_runnable(struct rq *rq)
1988 return rq->cfs.nr_running > 0;
1991 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1992 extern struct task_struct *pick_next_task_idle(struct rq *rq);
1998 extern void trigger_load_balance(struct rq *rq);
2005 static inline void idle_set_state(struct rq *rq, struct cpuidle_state *idle_state)
2007 rq->idle_state = idle_state;
2010 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
2014 return rq->idle_state;
2017 static inline void idle_set_state(struct rq *rq, struct cpuidle_state *idle_state)
2021 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
2039 extern void resched_curr(struct rq *rq);
2061 extern bool sched_can_stop_tick(struct rq *rq);
2069 static inline void sched_update_tick_dependency(struct rq *rq)
2071 int cpu = cpu_of(rq);
2076 if (sched_can_stop_tick(rq)) {
2087 static inline void sched_update_tick_dependency(struct rq *rq)
2092 static inline void add_nr_running(struct rq *rq, unsigned count)
2094 unsigned prev_nr = rq->nr_running;
2096 rq->nr_running = prev_nr + count;
2098 call_trace_sched_update_nr_running(rq, count);
2102 if (prev_nr < TASK_ON_RQ_MIGRATING && rq->nr_running >= TASK_ON_RQ_MIGRATING) {
2103 if (!READ_ONCE(rq->rd->overload)) {
2104 WRITE_ONCE(rq->rd->overload, 1);
2109 sched_update_tick_dependency(rq);
2112 static inline void sub_nr_running(struct rq *rq, unsigned count)
2114 rq->nr_running -= count;
2116 call_trace_sched_update_nr_running(rq, -count);
2120 sched_update_tick_dependency(rq);
2123 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
2124 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
2126 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
2138 static inline int hrtick_enabled(struct rq *rq)
2143 if (!cpu_active(cpu_of(rq))) {
2146 return hrtimer_is_hres_active(&rq->hrtick_timer);
2149 void hrtick_start(struct rq *rq, u64 delay);
2153 static inline int hrtick_enabled(struct rq *rq)
2202 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
2205 * fair double_lock_balance: Safely acquires both rq->locks in a fair
2212 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock)
2229 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock)
2252 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2255 /* printk() doesn't work well under rq->lock */
2263 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) __releases(busiest->lock)
2305 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) __acquires(rq1->lock) __acquires(rq2->lock)
2328 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock)
2338 extern void set_rq_online(struct rq *rq);
2339 extern void set_rq_offline(struct rq *rq);
2350 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) __acquires(rq1->lock) __acquires(rq2->lock)
2364 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock)
2410 extern void nohz_balance_exit_idle(struct rq *rq);
2412 static inline void nohz_balance_exit_idle(struct rq *rq)
2426 struct rq *rq = cpu_rq(i);
2428 rq->dl.extra_bw += bw;
2475 * @rq: Runqueue to carry out the update for.
2495 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2507 clock = rq_clock(rq);
2509 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, cpu_of(rq)));
2515 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2524 * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
2525 * @rq: The rq to clamp against. Must not be NULL.
2528 * against @rq only.
2530 * Clamps the passed @util to the max(@rq, @p) effective uclamp values.
2533 * without any clamping since uclamp aggregation at the rq level in the fast
2536 * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It
2540 static __always_inline unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p)
2555 * reset it. Similarly, no need to read the rq's min clamp.
2557 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) {
2562 min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value));
2563 max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value));
2583 * When uclamp is compiled in, the aggregation at rq level is 'turned off'
2587 * Returns true if userspace opted-in to use uclamp and aggregation at rq level
2595 static inline unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p)
2651 static inline unsigned long cpu_bw_dl(struct rq *rq)
2653 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
2656 static inline unsigned long cpu_util_dl(struct rq *rq)
2658 return READ_ONCE(rq->avg_dl.util_avg);
2661 static inline unsigned long cpu_util_cfs(struct rq *rq)
2663 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2666 util = max_t(unsigned long, util, READ_ONCE(rq->cfs.avg.util_est.enqueued));
2672 static inline unsigned long cpu_util_rt(struct rq *rq)
2674 return READ_ONCE(rq->avg_rt.util_avg);
2685 static inline unsigned long cpu_util_irq(struct rq *rq)
2687 return rq->avg_irq.util_avg;
2698 static inline unsigned long cpu_util_irq(struct rq *rq)
2733 * - prior user-space memory accesses and store to rq->membarrier_state,
2734 * - store to rq->membarrier_state and following user-space memory accesses.
2735 * In the same way it provides those guarantees around store to rq->curr.
2737 static inline void membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm)
2746 if (READ_ONCE(rq->membarrier_state) == membarrier_state) {
2750 WRITE_ONCE(rq->membarrier_state, membarrier_state);
2753 static inline void membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm)
2812 static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta)
2814 rq->cum_window_demand_scaled += scaled_delta;
2815 if (unlikely((s64)rq->cum_window_demand_scaled < 0)) {
2816 rq->cum_window_demand_scaled = 0;
2823 struct rq *rq = cpu_rq(src_cpu);
2829 return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
2837 struct rq *rq = cpu_rq(cpu);
2839 return test_bit(CPU_RESERVED, &rq->walt_flags);
2844 struct rq *rq = cpu_rq(cpu);
2846 return test_and_set_bit(CPU_RESERVED, &rq->walt_flags);
2851 struct rq *rq = cpu_rq(cpu);
2853 clear_bit(CPU_RESERVED, &rq->walt_flags);
2988 struct rq *rq = cpu_rq(cpu);
2995 util = rq->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
3006 static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta)
3054 extern void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf, bool migrate_pinned_tasks);