Home
last modified time | relevance | path

Searched refs:task_rq (Results 1 - 4 of 4) sorted by relevance

/device/soc/rockchip/common/sdk_linux/kernel/sched/
H A Dcore.c198 rq = task_rq(p);
200 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
222 rq = task_rq(p);
228 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
230 * [S] ->cpu = new_cpu [L] task_rq()
238 * dependency headed by '[L] rq = task_rq()' and the acquire
241 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
1983 * If task_rq(p) != rq, it cannot be migrated here, because we're in migration_cpu_stop()
1987 if (task_rq(p) == rq) { in migration_cpu_stop()
2014 struct rq *rq = task_rq( in do_set_cpus_allowed()
[all...]
H A Drt.c259 return task_rq(p); in rq_of_rt_se()
2001 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, task, lowest_mask, rt_task_fits_capacity); in find_lowest_rq()
2003 ret = cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask); in find_lowest_rq()
H A Dfair.c494 return &task_rq(p)->cfs; in task_cfs_rq()
500 struct rq *rq = task_rq(p); in cfs_rq_of()
1157 p == current || (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu))); in deref_task_numa_group()
5694 SCHED_WARN_ON(task_rq(p) != rq); in hrtick_start_fair()
7190 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
7225 lockdep_assert_held(&task_rq(p)->lock); in migrate_task_rq_fair()
8300 BUG_ON(task_rq(p) != rq); in attach_task()
11559 update_overutilized_status(task_rq(curr)); in task_tick_fair()
H A Dsched.h1215 #define task_rq(p) cpu_rq(task_cpu(p)) macro

Completed in 33 milliseconds