Home
last modified time | relevance | path

Searched refs:cpu_rq (Results 1 - 5 of 5) sorted by relevance

/device/soc/rockchip/common/sdk_linux/kernel/sched/
H A Dsched.h1213 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) macro
1215 #define task_rq(p) cpu_rq(task_cpu(p))
1216 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1505 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
1520 for (sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); sd; sd = sd->parent) { in highest_flag_domain()
1534 for (sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); sd; sd = sd->parent) { in lowest_flag_domain()
2408 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
2426 struct rq *rq = cpu_rq(i); in __dl_update()
2622 return cpu_rq(cpu)->cpu_capacity; in capacity_of()
2627 return cpu_rq(cp in capacity_orig_of()
[all...]
H A Dcore.c658 struct rq *rq = cpu_rq(cpu); in resched_cpu()
746 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu()
1667 for_each_possible_cpu(cpu) init_uclamp_rq(cpu_rq(cpu)); in init_uclamp()
1906 double_lock_balance(rq, cpu_rq(new_cpu)); in move_queued_task()
1913 double_rq_unlock(cpu_rq(new_cpu), rq); in move_queued_task()
1918 rq = cpu_rq(new_cpu); in move_queued_task()
2224 dst_rq = cpu_rq(cpu); in __migrate_swap_task()
2261 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop()
2262 dst_rq = cpu_rq(arg->dst_cpu); in migrate_swap_stop()
2647 struct task_struct *old_stop = cpu_rq(cp in sched_set_stop_task()
[all...]
H A Dfair.c1678 struct rq *rq = cpu_rq(cpu); in update_numa_stats()
1711 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1726 rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1742 rq = cpu_rq(env->best_cpu); in task_numa_assign()
1801 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
2161 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()
2605 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group()
4224 unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity; in task_fits_max()
5425 cfs_rq->throttled_clock_pelt = rq_clock_task(cpu_rq(cpu)); in sync_throttle()
5766 return sched_idle_rq(cpu_rq(cp in sched_idle_cpu()
[all...]
H A Drt.c174 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry()
687 return &cpu_rq(cpu)->rt; in sched_rt_period_rt_rq()
1548 rq = cpu_rq(cpu); in select_task_rq_rt()
1553 this_cpu_rq = cpu_rq(this_cpu); in select_task_rq_rt()
1605 if (target != -1 && p->prio < cpu_rq(target)->rt.highest_prio.curr) { in select_task_rq_rt()
1812 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; in find_cas_cpu()
2111 lowest_rq = cpu_rq(cpu); in find_lock_lowest_rq()
2452 src_rq = cpu_rq(cpu); in pull_rt_task()
3117 struct rt_rq *rt_rq = &cpu_rq(i)->rt; in sched_rt_global_constraints()
3218 cycle_each_rt_rq(rt_rq, iter, cpu_rq(cp in print_rt_stats()
[all...]
H A Dtopology.c364 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()
713 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain()
2201 rq = cpu_rq(i); in build_sched_domains()
2410 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; in partition_sched_domains_locked()
2443 if (cpumask_equal(doms_new[i], doms_cur[j]) && cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()

Completed in 31 milliseconds