Searched refs:busiest_rq (Results 1 - 4 of 4) sorted by relevance
/kernel/linux/linux-5.10/kernel/sched/ |
H A D | rt.c | 2641 struct rq *busiest_rq = data; in rt_active_load_balance_cpu_stop() local 2642 struct task_struct *next_task = busiest_rq->rt_push_task; in rt_active_load_balance_cpu_stop() 2646 raw_spin_lock_irqsave(&busiest_rq->lock, flags); in rt_active_load_balance_cpu_stop() 2647 busiest_rq->rt_active_balance = 0; in rt_active_load_balance_cpu_stop() 2650 task_cpu(next_task) != cpu_of(busiest_rq)) in rt_active_load_balance_cpu_stop() 2654 lowest_rq = find_lock_lowest_rq(next_task, busiest_rq); in rt_active_load_balance_cpu_stop() 2661 deactivate_task(busiest_rq, next_task, 0); in rt_active_load_balance_cpu_stop() 2667 double_unlock_balance(busiest_rq, lowest_rq); in rt_active_load_balance_cpu_stop() 2670 raw_spin_unlock_irqrestore(&busiest_rq->lock, flags); in rt_active_load_balance_cpu_stop()
|
H A D | fair.c | 8134 * busiest_rq, as part of a balancing operation within domain "sd". 10243 * We've detached some tasks from busiest_rq. Every in load_balance() 10489 struct rq *busiest_rq = data; in active_load_balance_cpu_stop() local 10490 int busiest_cpu = cpu_of(busiest_rq); in active_load_balance_cpu_stop() 10491 int target_cpu = busiest_rq->push_cpu; in active_load_balance_cpu_stop() 10501 rq_lock_irq(busiest_rq, &rf); in active_load_balance_cpu_stop() 10512 !busiest_rq->active_balance)) in active_load_balance_cpu_stop() 10516 if (busiest_rq->nr_running <= 1) in active_load_balance_cpu_stop() 10524 BUG_ON(busiest_rq == target_rq); in active_load_balance_cpu_stop() 10527 push_task = busiest_rq in active_load_balance_cpu_stop() [all...] |
/kernel/linux/linux-6.6/kernel/sched/ |
H A D | rt.c | 2877 struct rq *busiest_rq = data; in rt_active_load_balance_cpu_stop() local 2878 struct task_struct *next_task = busiest_rq->rt_push_task; in rt_active_load_balance_cpu_stop() 2882 raw_spin_lock_irqsave(&busiest_rq->__lock, flags); in rt_active_load_balance_cpu_stop() 2883 busiest_rq->rt_active_balance = 0; in rt_active_load_balance_cpu_stop() 2886 task_cpu(next_task) != cpu_of(busiest_rq)) in rt_active_load_balance_cpu_stop() 2890 lowest_rq = find_lock_lowest_rq(next_task, busiest_rq); in rt_active_load_balance_cpu_stop() 2897 deactivate_task(busiest_rq, next_task, 0); in rt_active_load_balance_cpu_stop() 2903 double_unlock_balance(busiest_rq, lowest_rq); in rt_active_load_balance_cpu_stop() 2906 raw_spin_unlock_irqrestore(&busiest_rq->__lock, flags); in rt_active_load_balance_cpu_stop()
|
H A D | fair.c | 9271 * busiest_rq, as part of a balancing operation within domain "sd". 11634 * We've detached some tasks from busiest_rq. Every in load_balance() 11874 struct rq *busiest_rq = data; in active_load_balance_cpu_stop() local 11875 int busiest_cpu = cpu_of(busiest_rq); in active_load_balance_cpu_stop() 11876 int target_cpu = busiest_rq->push_cpu; in active_load_balance_cpu_stop() 11886 rq_lock_irq(busiest_rq, &rf); in active_load_balance_cpu_stop() 11897 !busiest_rq->active_balance)) in active_load_balance_cpu_stop() 11901 if (busiest_rq->nr_running <= 1) in active_load_balance_cpu_stop() 11909 WARN_ON_ONCE(busiest_rq == target_rq); in active_load_balance_cpu_stop() 11911 push_task = busiest_rq in active_load_balance_cpu_stop() [all...] |
Completed in 35 milliseconds