Lines Matching refs:prio

286      * Try to pull RT tasks here if we lower this rq's prio and cpu is not
289 return rq->rt.highest_prio.curr > prev->prio && !cpu_isolated(cpu_of(rq));
407 plist_node_init(&p->pushable_tasks, p->prio);
410 /* Update the highest prio pushable task */
411 if (p->prio < rq->rt.highest_prio.next) {
412 rq->rt.highest_prio.next = p->prio;
420 /* Update the new highest prio pushable task */
423 rq->rt.highest_prio.next = p->prio;
573 if (rt_rq->highest_prio.curr < curr->prio) {
610 return p->prio != p->normal_prio;
995 return rt_task_of(rt_se)->prio;
1148 static void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1160 if (rq->online && prio < prev_prio) {
1161 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1165 static void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1184 static inline void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1187 static inline void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1194 static void inc_rt_prio(struct rt_rq *rt_rq, int prio)
1198 if (prio < prev_prio) {
1199 rt_rq->highest_prio.curr = prio;
1202 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1205 static void dec_rt_prio(struct rt_rq *rt_rq, int prio)
1210 WARN_ON(prio < prev_prio);
1216 if (prio == prev_prio) {
1225 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1230 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio)
1233 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio)
1301 int prio = rt_se_prio(rt_se);
1303 WARN_ON(!rt_prio(prio));
1307 inc_rt_prio(rt_rq, prio);
1400 * Because the prio of an upper entry depends on the lower
1456 return sync && task_has_rt_policy(rq->curr) && p->prio <= rq->rt.highest_prio.next && rq->rt.rt_nr_running <= 0x2;
1563 * and the lower prio task should be moved to another CPU.
1564 * Even though this will probably make the lower prio task
1569 * For equal prio tasks, we just let the scheduler sort it out.
1581 test = curr && unlikely(rt_task(curr)) && (curr->nr_cpus_allowed < 0x2 || curr->prio <= p->prio);
1605 if (target != -1 && p->prio < cpu_rq(target)->rt.highest_prio.curr) {
1667 if (p->prio < rq->curr->prio) {
1685 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) {
2112 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
2122 /* if the prio of this runqueue changed, try again */
2138 if (lowest_rq->rt.highest_prio.curr > task->prio) {
2180 if (unlikely(next_task->prio < rq->curr->prio)) {
2480 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2492 if (p->prio < src_rq->curr->prio) {
2503 * case there's an even higher prio task
2525 (rq->curr->nr_cpus_allowed < 2 || rq->curr->prio <= p->prio);
2613 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) {
2635 if (oldprio < p->prio) {
2643 if (p->prio > rq->rt.highest_prio.curr) {
2647 /* For UP simply resched on drop of prio */
2648 if (oldprio < p->prio) {
2658 if (p->prio < rq->curr->prio) {