/kernel/linux/linux-5.10/kernel/sched/ |
H A D | pelt.h | 100 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt() 101 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt()
|
H A D | pelt.c | 443 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq))); in update_irq_load_avg() 444 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq))); in update_irq_load_avg()
|
H A D | rt.c | 281 !cpu_isolated(cpu_of(rq)); in need_pull_rt_task() 527 (rt_rq = iter->rt_rq[cpu_of(rq)]);) 546 int cpu = cpu_of(rq); in sched_rt_rq_enqueue() 564 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); in sched_rt_rq_dequeue() 2479 cpu_isolated(cpu_of(rq))) in switched_from_rt() 2522 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) in switched_to_rt() 2650 task_cpu(next_task) != cpu_of(busiest_rq)) in rt_active_load_balance_cpu_stop() 2658 if (capacity_orig_of(cpu_of(lowest_rq)) <= capacity_orig_of(task_cpu(next_task))) in rt_active_load_balance_cpu_stop()
|
H A D | fair.c | 333 int cpu = cpu_of(rq); in list_add_leaf_cfs_rq() 830 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); in post_init_entity_util_avg() 4096 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) in util_est_update() 4278 int cpu = cpu_of(rq); in update_misfit_status() 4296 task_fits = task_fits_cpu(p, cpu_of(rq)); in update_misfit_status() 4298 task_fits = task_fits_cpu(p, cpu_of(rq)); in update_misfit_status() 5012 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() 5030 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() 5070 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq() 5122 se = cfs_rq->tg->se[cpu_of(r in unthrottle_cfs_rq() [all...] |
H A D | stats.h | 153 psi_memstall_tick(rq->curr, cpu_of(rq)); in psi_task_tick()
|
H A D | walt.h | 55 unsigned long capcurr = capacity_curr_of(cpu_of(rq)); in scale_exec_time()
|
H A D | core.c | 265 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task() 290 steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task() 325 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock() 360 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick() 413 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start() 626 cpu = cpu_of(rq); in resched_curr() 751 int cpu = cpu_of(rq); in nohz_csd_func() 2030 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); in __set_cpus_allowed_ptr() 2720 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) in sched_ttwu_pending() 2721 set_task_cpu(p, cpu_of(r in sched_ttwu_pending() [all...] |
H A D | sched.h | 1207 static inline int cpu_of(struct rq *rq) in cpu_of() function 2119 int cpu = cpu_of(rq); in sched_update_tick_dependency() 2183 if (!cpu_active(cpu_of(rq))) in hrtick_enabled() 2564 cpu_of(rq))); in cpufreq_update_util()
|
H A D | deadline.c | 1246 int cpu = cpu_of(rq); in update_curr_dl() 2340 src_dl_b = dl_bw_of(cpu_of(rq)); in set_cpus_allowed_dl()
|
H A D | walt.c | 1794 cpumask_set_cpu(cpu_of(rq), &rq->freq_domain_cpumask); in walt_sched_init_rq()
|
/kernel/linux/linux-6.6/kernel/sched/ |
H A D | pelt.h | 118 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt() 119 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt()
|
H A D | pelt.c | 439 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq))); in update_irq_load_avg() 440 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq))); in update_irq_load_avg()
|
H A D | fair.c | 401 int cpu = cpu_of(rq); in list_add_leaf_cfs_rq() 1171 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); in post_init_entity_util_avg() 4513 now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle); in migrate_se_pelt_lag() 4942 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) in util_est_update() 5120 int cpu = cpu_of(rq); in update_misfit_status() 5137 task_fits = task_fits_cpu(p, cpu_of(rq)); in update_misfit_status() 5139 task_fits = task_fits_cpu(p, cpu_of(rq)); in update_misfit_status() 5740 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() 5769 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() 5813 se = cfs_rq->tg->se[cpu_of(rq_o in throttle_cfs_rq() [all...] |
H A D | rt.c | 339 !cpu_isolated(cpu_of(rq)); in need_pull_rt_task() 577 (rt_rq = iter->rt_rq[cpu_of(rq)]);) 596 int cpu = cpu_of(rq); in sched_rt_rq_enqueue() 614 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); in sched_rt_rq_dequeue() 2715 cpu_isolated(cpu_of(rq))) in switched_from_rt() 2758 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) in switched_to_rt() 2886 task_cpu(next_task) != cpu_of(busiest_rq)) in rt_active_load_balance_cpu_stop() 2894 if (capacity_orig_of(cpu_of(lowest_rq)) <= capacity_orig_of(task_cpu(next_task))) in rt_active_load_balance_cpu_stop()
|
H A D | walt.h | 56 unsigned long capcurr = capacity_curr_of(cpu_of(rq)); in scale_exec_time()
|
H A D | core_sched.c | 242 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); in __sched_core_account_forceidle()
|
H A D | sched.h | 1322 static inline int cpu_of(struct rq *rq) in cpu_of() function 1414 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { in sched_core_cookie_match() 2629 int cpu = cpu_of(rq); in sched_update_tick_dependency() 2712 if (!cpu_active(cpu_of(rq))) in hrtick_enabled() 3147 cpu_of(rq))); in cpufreq_update_util() 3310 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); in uclamp_rq_is_capped() 3597 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq)); in mm_cid_snapshot_time()
|
H A D | core.c | 709 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task() 736 steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task() 771 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock() 798 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick() 851 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start() 1057 cpu = cpu_of(rq); in resched_curr() 1181 int cpu = cpu_of(rq); in nohz_csd_func() 3124 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, 4011 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) in sched_ttwu_pending() 4012 set_task_cpu(p, cpu_of(r in sched_ttwu_pending() [all...] |
H A D | deadline.c | 1304 int cpu = cpu_of(rq); in update_curr_dl() 2503 src_dl_b = dl_bw_of(cpu_of(rq)); in set_cpus_allowed_dl()
|
H A D | walt.c | 1795 cpumask_set_cpu(cpu_of(rq), &rq->freq_domain_cpumask); in walt_sched_init_rq()
|
/kernel/linux/linux-5.10/kernel/sched/rtg/ |
H A D | rtg.c | 113 int cpu = cpu_of(rq); in transfer_busy_time()
|
/kernel/linux/linux-6.6/kernel/sched/rtg/ |
H A D | rtg.c | 114 int cpu = cpu_of(rq); in transfer_busy_time()
|