Searched refs:task_rq (Results 1 - 12 of 12) sorted by relevance
/kernel/linux/linux-5.10/kernel/sched/ |
H A D | core.c | 197 rq = task_rq(p); 199 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 221 rq = task_rq(p); 227 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 229 * [S] ->cpu = new_cpu [L] task_rq() 237 * dependency headed by '[L] rq = task_rq()' and the acquire 240 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 1880 * If task_rq(p) != rq, it cannot be migrated here, because we're in migration_cpu_stop() 1884 if (task_rq(p) == rq) { in migration_cpu_stop() 1909 struct rq *rq = task_rq( in do_set_cpus_allowed() [all...] |
H A D | deadline.c | 38 struct rq *rq = task_rq(p); in dl_rq_of_se() 228 rq = task_rq(p); in dl_change_utilization() 986 struct rq *rq = task_rq(p); in start_dl_timer() 1015 * harmless because we're holding task_rq()->lock, therefore the timer in start_dl_timer() 1737 rq = task_rq(p); in migrate_task_rq_dl() 1979 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) in find_later_rq() 2329 rq = task_rq(p); in set_cpus_allowed_dl()
|
H A D | walt.c | 477 struct rq *src_rq = task_rq(p); in fixup_busy_time() 503 update_task_ravg(task_rq(p)->curr, task_rq(p), in fixup_busy_time() 509 update_task_ravg(p, task_rq(p), TASK_MIGRATE, in fixup_busy_time() 1303 struct rq *rq = task_rq(p); in mark_task_starting()
|
H A D | rt.c | 252 return task_rq(p); in rq_of_rt_se() 1917 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, in find_lowest_rq() 1922 ret = cpupri_find(&task_rq(task)->rd->cpupri, in find_lowest_rq()
|
H A D | fair.c | 487 return &task_rq(p)->cfs; in task_cfs_rq() 493 struct rq *rq = task_rq(p); in cfs_rq_of() 1142 (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu))); in deref_task_numa_group() 5702 SCHED_WARN_ON(task_rq(p) != rq); in hrtick_start_fair() 7199 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. 7234 lockdep_assert_held(&task_rq(p)->lock); in migrate_task_rq_fair() 8286 BUG_ON(task_rq(p) != rq); in attach_task() 11522 update_overutilized_status(task_rq(curr)); in task_tick_fair()
|
H A D | sched.h | 1234 #define task_rq(p) cpu_rq(task_cpu(p)) macro
|
/kernel/linux/linux-6.6/kernel/sched/ |
H A D | deadline.c | 71 struct rq *rq = task_rq(p); in dl_rq_of_se() 321 rq = task_rq(p); in dl_change_utilization() 1050 struct rq *rq = task_rq(p); in start_dl_timer() 1079 * harmless because we're holding task_rq()->lock, therefore the timer in start_dl_timer() 1877 rq = task_rq(p); in migrate_task_rq_dl() 2143 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) in find_later_rq() 2239 if (unlikely(task_rq(task) != rq || in find_lock_later_rq() 2492 rq = task_rq(p); in set_cpus_allowed_dl()
|
H A D | core.c | 218 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) in __sched_core_less() 641 rq = task_rq(p); 643 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 665 rq = task_rq(p); 671 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 673 * [S] ->cpu = new_cpu [L] task_rq() 681 * dependency headed by '[L] rq = task_rq()' and the acquire 684 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 2348 rq = task_rq(p); in wait_task_inactive() 2668 * If task_rq( in migration_cpu_stop() [all...] |
H A D | walt.c | 478 struct rq *src_rq = task_rq(p); in fixup_busy_time() 504 update_task_ravg(task_rq(p)->curr, task_rq(p), in fixup_busy_time() 510 update_task_ravg(p, task_rq(p), TASK_MIGRATE, in fixup_busy_time() 1304 struct rq *rq = task_rq(p); in mark_task_starting()
|
H A D | rt.c | 313 return task_rq(p); in rq_of_rt_se() 2091 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, in find_lowest_rq() 2096 ret = cpupri_find(&task_rq(task)->rd->cpupri, in find_lowest_rq() 2203 if (unlikely(task_rq(task) != rq || in find_lock_lowest_rq()
|
H A D | sched.h | 1346 #define task_rq(p) cpu_rq(task_cpu(p)) macro 1584 return &task_rq(p)->cfs; in task_cfs_rq() 1590 struct rq *rq = task_rq(p); in cfs_rq_of()
|
H A D | fair.c | 1480 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); in deref_task_numa_group() 6660 SCHED_WARN_ON(task_rq(p) != rq); in hrtick_start_fair() 8339 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. 9436 WARN_ON_ONCE(task_rq(p) != rq); in attach_task() 13005 struct rq *rq = task_rq(a); in cfs_prio_less() 13012 SCHED_WARN_ON(task_rq(b)->core != rq->core); in cfs_prio_less() 13035 cfs_rqa = &task_rq(a)->cfs; in cfs_prio_less() 13036 cfs_rqb = &task_rq(b)->cfs; in cfs_prio_less() 13087 update_overutilized_status(task_rq(curr)); in task_tick_fair()
|
Completed in 71 milliseconds