Lines Matching refs:rt

129     return container_of(rt_se, struct task_struct, rt);
189 rt_se->rt_rq = &rq->rt;
247 return container_of(rt_se, struct task_struct, rt);
252 return container_of(rt_rq, struct rq, rt);
266 return &rq->rt;
289 return rq->rt.highest_prio.curr > prev->prio && !cpu_isolated(cpu_of(rq));
350 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
369 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
381 return !plist_head_empty(&rq->rt.pushable_tasks);
406 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
408 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
411 if (p->prio < rq->rt.highest_prio.next) {
412 rq->rt.highest_prio.next = p->prio;
418 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
422 p = plist_first_entry(&rq->rt.pushable_tasks, struct task_struct, pushable_tasks);
423 rq->rt.highest_prio.next = p->prio;
425 rq->rt.highest_prio.next = MAX_RT_PRIO;
649 #define cycle_each_rt_rq(rt_rq, iter, rq) for ((void)(iter), (rt_rq) = &(rq)->rt; (rt_rq); (rt_rq) = NULL)
687 return &cpu_rq(cpu)->rt;
948 * When we're idle and a woken (rt) task is
1062 struct sched_rt_entity *rt_se = &curr->rt;
1111 BUG_ON(&rq->rt != rt_rq);
1127 BUG_ON(&rq->rt != rt_rq);
1156 if (&rq->rt != rt_rq) {
1173 if (&rq->rt != rt_rq) {
1429 enqueue_top_rt_rq(&rq->rt);
1445 enqueue_top_rt_rq(&rq->rt);
1456 return sync && task_has_rt_policy(rq->curr) && p->prio <= rq->rt.highest_prio.next && rq->rt.rt_nr_running <= 0x2;
1470 struct sched_rt_entity *rt_se = &p->rt;
1487 struct sched_rt_entity *rt_se = &p->rt;
1516 struct sched_rt_entity *rt_se = &p->rt;
1605 if (target != -1 && p->prio < cpu_rq(target)->rt.highest_prio.curr) {
1646 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1703 * If prev task was rt, put_prev_task() has already updated the
1705 * rt task
1733 struct rt_rq *rt_rq = &rq->rt;
1767 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) {
1792 struct plist_head *head = &rq->rt.pushable_tasks;
2086 p = plist_first_entry(&rq->rt.pushable_tasks, struct task_struct, pushable_tasks);
2112 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
2138 if (lowest_rq->rt.highest_prio.curr > task->prio) {
2161 if (!rq->rt.overloaded) {
2402 /* Pass the IPI to the next rt overloaded queue */
2460 if (src_rq->rt.highest_prio.next >= this_rq->rt.highest_prio.curr) {
2480 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2534 if (rq->rt.overloaded) {
2540 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2546 if (rq->rt.overloaded) {
2556 * When switch from the rt queue, we bring ourselves to a position
2568 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running || cpu_isolated(cpu_of(rq))) {
2609 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) {
2643 if (p->prio > rq->rt.highest_prio.curr) {
2676 if (p->rt.watchdog_stamp != jiffies) {
2677 p->rt.timeout++;
2678 p->rt.watchdog_stamp = jiffies;
2682 if (p->rt.timeout > next) {
2703 struct sched_rt_entity *rt_se = &p->rt;
2718 if (--p->rt.time_slice) {
2722 p->rt.time_slice = sched_rr_timeslice;
3117 struct rt_rq *rt_rq = &cpu_rq(i)->rt;