Lines Matching defs:rq

132 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
134 return rt_rq->rq;
142 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
146 return rt_rq->rq;
174 struct rq *rq = cpu_rq(cpu);
178 rt_rq->rq = rq;
189 rt_se->rt_rq = &rq->rt;
250 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
252 return container_of(rt_rq, struct rq, rt);
255 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
264 struct rq *rq = rq_of_rt_se(rt_se);
266 return &rq->rt;
281 static void pull_rt_task(struct rq *this_rq);
283 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
286 * Try to pull RT tasks here if we lower this rq's prio and cpu is not
289 return rq->rt.highest_prio.curr > prev->prio && !cpu_isolated(cpu_of(rq));
292 static inline int rt_overloaded(struct rq *rq)
294 return atomic_read(&rq->rd->rto_count);
297 static inline void rt_set_overload(struct rq *rq)
299 if (!rq->online) {
303 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
314 atomic_inc(&rq->rd->rto_count);
317 static inline void rt_clear_overload(struct rq *rq)
319 if (!rq->online) {
324 atomic_dec(&rq->rd->rto_count);
325 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
379 static inline int has_pushable_tasks(struct rq *rq)
381 return !plist_head_empty(&rq->rt.pushable_tasks);
387 static void push_rt_tasks(struct rq *);
388 static void pull_rt_task(struct rq *);
390 static inline void rt_queue_push_tasks(struct rq *rq)
392 if (!has_pushable_tasks(rq)) {
396 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
399 static inline void rt_queue_pull_task(struct rq *rq)
401 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
404 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
406 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
408 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
411 if (p->prio < rq->rt.highest_prio.next) {
412 rq->rt.highest_prio.next = p->prio;
416 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
418 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
421 if (has_pushable_tasks(rq)) {
422 p = plist_first_entry(&rq->rt.pushable_tasks, struct task_struct, pushable_tasks);
423 rq->rt.highest_prio.next = p->prio;
425 rq->rt.highest_prio.next = MAX_RT_PRIO;
431 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
435 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
447 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
452 static inline void pull_rt_task(struct rq *this_rq)
456 static inline void rt_queue_push_tasks(struct rq *rq)
540 #define cycle_each_rt_rq(rt_rq, iter, rq)
543 (iter = next_task_group(iter)) && (rt_rq = iter->rt_rq[cpu_of(rq)]);) \
559 struct rq *rq = rq_of_rt_rq(rt_rq);
562 int cpu = cpu_of(rq);
574 resched_curr(rq);
649 #define cycle_each_rt_rq(rt_rq, iter, rq) for ((void)(iter), (rt_rq) = &(rq)->rt; (rt_rq); (rt_rq) = NULL)
660 struct rq *rq = rq_of_rt_rq(rt_rq);
667 resched_curr(rq);
731 * or __disable_runtime() below sets a specific rq to inf to
764 static void __disable_runtime(struct rq *rq)
766 struct root_domain *rd = rq->rd;
774 cycle_each_rt_rq(rt_rq, iter, rq) {
850 static void __enable_runtime(struct rq *rq)
862 cycle_each_rt_rq(rt_rq, iter, rq) {
914 struct rq *rq = rq_of_rt_rq(rt_rq);
918 * When span == cpu_online_mask, taking each rq->lock
931 raw_spin_lock(&rq->lock);
932 update_rq_clock(rq);
954 if (rt_rq->rt_nr_running && rq->curr == rq->idle) {
955 rq_clock_cancel_skipupdate(rq);
975 raw_spin_unlock(&rq->lock);
1059 static void update_curr_rt(struct rq *rq)
1061 struct task_struct *curr = rq->curr;
1070 now = rq_clock_task(rq);
1097 resched_curr(rq);
1109 struct rq *rq = rq_of_rt_rq(rt_rq);
1111 BUG_ON(&rq->rt != rt_rq);
1117 BUG_ON(!rq->nr_running);
1119 sub_nr_running(rq, count);
1125 struct rq *rq = rq_of_rt_rq(rt_rq);
1127 BUG_ON(&rq->rt != rt_rq);
1138 add_nr_running(rq, rt_rq->rt_nr_running);
1143 cpufreq_update_util(rq, 0);
1150 struct rq *rq = rq_of_rt_rq(rt_rq);
1154 * Change rq's cpupri only if rt_rq is the top queue.
1156 if (&rq->rt != rt_rq) {
1160 if (rq->online && prio < prev_prio) {
1161 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1167 struct rq *rq = rq_of_rt_rq(rt_rq);
1171 * Change rq's cpupri only if rt_rq is the top queue.
1173 if (&rq->rt != rt_rq) {
1177 if (rq->online && rt_rq->highest_prio.curr != prev_prio) {
1178 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1425 struct rq *rq = rq_of_rt_se(rt_se);
1429 enqueue_top_rt_rq(&rq->rt);
1434 struct rq *rq = rq_of_rt_se(rt_se);
1445 enqueue_top_rt_rq(&rq->rt);
1449 static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, bool sync)
1456 return sync && task_has_rt_policy(rq->curr) && p->prio <= rq->rt.highest_prio.next && rq->rt.rt_nr_running <= 0x2;
1459 static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, bool sync)
1468 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1478 walt_inc_cumulative_runnable_avg(rq, p);
1480 if (!task_current(rq, p) && p->nr_cpus_allowed > 1 && !should_honor_rt_sync(rq, p, sync)) {
1481 enqueue_pushable_task(rq, p);
1485 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1489 update_curr_rt(rq);
1491 walt_dec_cumulative_runnable_avg(rq, p);
1493 dequeue_pushable_task(rq, p);
1514 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1525 static void yield_task_rt(struct rq *rq)
1527 requeue_task_rt(rq, rq->curr, 0);
1536 struct rq *rq;
1537 struct rq *this_cpu_rq;
1548 rq = cpu_rq(cpu);
1551 curr = READ_ONCE(rq->curr); /* unlocked access */
1617 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1623 if (rq->curr->nr_cpus_allowed == 1 || !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) {
1631 if (p->nr_cpus_allowed != 1 && cpupri_find(&rq->rd->cpupri, p, NULL)) {
1640 requeue_task_rt(rq, p, 1);
1641 resched_curr(rq);
1644 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1646 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1653 rq_unpin_lock(rq, rf);
1654 pull_rt_task(rq);
1655 rq_repin_lock(rq, rf);
1658 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1665 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1667 if (p->prio < rq->curr->prio) {
1668 resched_curr(rq);
1685 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) {
1686 check_preempt_equal_prio(rq, p);
1691 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1693 p->se.exec_start = rq_clock_task(rq);
1696 dequeue_pushable_task(rq, p);
1707 if (rq->curr->sched_class != &rt_sched_class) {
1708 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1711 rt_queue_push_tasks(rq);
1714 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, struct rt_rq *rt_rq)
1730 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1733 struct rt_rq *rt_rq = &rq->rt;
1736 rt_se = pick_next_rt_entity(rq, rt_rq);
1744 static struct task_struct *pick_next_task_rt(struct rq *rq)
1748 if (!sched_rt_runnable(rq)) {
1752 p = _pick_next_task_rt(rq);
1753 set_next_task_rt(rq, p, true);
1757 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1759 update_curr_rt(rq);
1761 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1768 enqueue_pushable_task(rq, p);
1777 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1779 if (!task_running(rq, p) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
1787 * Return the highest pushable rq's task, which is suitable to be executed
1790 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1792 struct plist_head *head = &rq->rt.pushable_tasks;
1795 if (!has_pushable_tasks(rq)) {
1801 if (pick_rt_task(rq, p, cpu)) {
2078 static struct task_struct *pick_next_pushable_task(struct rq *rq)
2082 if (!has_pushable_tasks(rq)) {
2086 p = plist_first_entry(&rq->rt.pushable_tasks, struct task_struct, pushable_tasks);
2088 BUG_ON(rq->cpu != task_cpu(p));
2089 BUG_ON(task_current(rq, p));
2098 /* Will lock the rq it finds */
2099 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
2101 struct rq *lowest_rq = NULL;
2107 if ((cpu == -1) || (cpu == rq->cpu)) {
2114 * Target rq has tasks of equal or higher priority,
2123 if (double_lock_balance(rq, lowest_rq)) {
2129 struct task_struct *next_task = pick_next_pushable_task(rq);
2131 double_unlock_balance(rq, lowest_rq);
2137 /* If this rq is still suitable use it. */
2143 double_unlock_balance(rq, lowest_rq);
2155 static int push_rt_task(struct rq *rq)
2158 struct rq *lowest_rq;
2161 if (!rq->rt.overloaded) {
2165 next_task = pick_next_pushable_task(rq);
2171 if (WARN_ON(next_task == rq->curr)) {
2180 if (unlikely(next_task->prio < rq->curr->prio)) {
2181 resched_curr(rq);
2185 /* We might release rq lock */
2188 /* find_lock_lowest_rq locks the rq if found */
2189 lowest_rq = find_lock_lowest_rq(next_task, rq);
2193 * find_lock_lowest_rq releases rq->lock
2200 task = pick_next_pushable_task(rq);
2224 deactivate_task(rq, next_task, 0);
2231 double_unlock_balance(rq, lowest_rq);
2239 static void push_rt_tasks(struct rq *rq)
2242 while (push_rt_task(rq)) {
2347 static void tell_cpu_to_push(struct rq *rq)
2352 atomic_inc(&rq->rd->rto_loop_next);
2355 if (!rto_start_trylock(&rq->rd->rto_loop_start)) {
2359 raw_spin_lock(&rq->rd->rto_lock);
2367 if (rq->rd->rto_cpu < 0) {
2368 cpu = rto_next_cpu(rq->rd);
2371 raw_spin_unlock(&rq->rd->rto_lock);
2373 rto_start_unlock(&rq->rd->rto_loop_start);
2377 sched_get_rd(rq->rd);
2378 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2386 struct rq *rq;
2389 rq = this_rq();
2394 if (has_pushable_tasks(rq)) {
2395 raw_spin_lock(&rq->lock);
2396 push_rt_tasks(rq);
2397 raw_spin_unlock(&rq->lock);
2417 static void pull_rt_task(struct rq *this_rq)
2422 struct rq *src_rq;
2473 * on its rq, and no others.
2521 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2523 bool need_to_push = !task_running(rq, p) && !test_tsk_need_resched(rq->curr) && p->nr_cpus_allowed > 1 &&
2524 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2525 (rq->curr->nr_cpus_allowed < 2 || rq->curr->prio <= p->prio);
2527 push_rt_tasks(rq);
2531 /* Assumes rq->lock is held */
2532 static void rq_online_rt(struct rq *rq)
2534 if (rq->rt.overloaded) {
2535 rt_set_overload(rq);
2538 __enable_runtime(rq);
2540 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2543 /* Assumes rq->lock is held */
2544 static void rq_offline_rt(struct rq *rq)
2546 if (rq->rt.overloaded) {
2547 rt_clear_overload(rq);
2550 __disable_runtime(rq);
2552 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2559 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2568 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running || cpu_isolated(cpu_of(rq))) {
2572 rt_queue_pull_task(rq);
2591 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2597 if (task_current(rq, p)) {
2598 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2609 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) {
2610 rt_queue_push_tasks(rq);
2613 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) {
2614 resched_curr(rq);
2623 static void prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2629 if (rq->curr == p) {
2636 rt_queue_pull_task(rq);
2643 if (p->prio > rq->rt.highest_prio.curr) {
2644 resched_curr(rq);
2649 resched_curr(rq);
2658 if (p->prio < rq->curr->prio) {
2659 resched_curr(rq);
2665 static void watchdog(struct rq *rq, struct task_struct *p)
2688 static inline void watchdog(struct rq *rq, struct task_struct *p)
2698 * and everything must be accessed through the @rq and @curr passed in
2701 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2705 update_curr_rt(rq);
2706 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2708 watchdog(rq, p);
2730 requeue_task_rt(rq, p, 0);
2731 resched_curr(rq);
2740 struct rq *busiest_rq = data;
2742 struct rq *lowest_rq = NULL;
2748 /* find_lock_lowest_rq locks the rq if found */
2772 static void check_for_migration_rt(struct rq *rq, struct task_struct *p)
2792 if (cpu_orig_cap == rq->rd->max_cpu_capacity) {
2807 raw_spin_lock(&rq->lock);
2808 if (!rq->active_balance && !rq->rt_active_balance) {
2809 rq->rt_active_balance = 1;
2810 rq->rt_push_task = p;
2814 raw_spin_unlock(&rq->lock);
2817 stop_one_cpu_nowait(task_cpu(p), rt_active_load_balance_cpu_stop, rq, &rq->rt_active_balance_work);
2823 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)