/kernel/linux/linux-5.10/kernel/sched/ |
H A D | walt.c | 439 struct rq *src_rq = cpu_rq(task_cpu); in inter_cluster_migration_fixup() local 450 src_rq->curr_runnable_sum -= p->ravg.curr_window_cpu[task_cpu]; in inter_cluster_migration_fixup() 451 src_rq->prev_runnable_sum -= p->ravg.prev_window_cpu[task_cpu]; in inter_cluster_migration_fixup() 457 src_rq->nt_curr_runnable_sum -= in inter_cluster_migration_fixup() 459 src_rq->nt_prev_runnable_sum -= in inter_cluster_migration_fixup() 467 src_rq->window_start, new_task); in inter_cluster_migration_fixup() 469 BUG_ON((s64)src_rq->prev_runnable_sum < 0); in inter_cluster_migration_fixup() 470 BUG_ON((s64)src_rq->curr_runnable_sum < 0); in inter_cluster_migration_fixup() 471 BUG_ON((s64)src_rq->nt_prev_runnable_sum < 0); in inter_cluster_migration_fixup() 472 BUG_ON((s64)src_rq in inter_cluster_migration_fixup() 477 struct rq *src_rq = task_rq(p); fixup_busy_time() local [all...] |
H A D | deadline.c | 2229 struct rq *src_rq; in pull_dl_task() local 2245 src_rq = cpu_rq(cpu); in pull_dl_task() 2253 src_rq->dl.earliest_dl.next)) in pull_dl_task() 2257 double_lock_balance(this_rq, src_rq); in pull_dl_task() 2263 if (src_rq->dl.dl_nr_running <= 1) in pull_dl_task() 2266 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); in pull_dl_task() 2277 WARN_ON(p == src_rq->curr); in pull_dl_task() 2285 src_rq->curr->dl.deadline)) in pull_dl_task() 2290 deactivate_task(src_rq, p, 0); in pull_dl_task() 2298 double_unlock_balance(this_rq, src_rq); in pull_dl_task() [all...] |
H A D | rt.c | 2333 struct rq *src_rq; in pull_rt_task() local 2361 src_rq = cpu_rq(cpu); in pull_rt_task() 2364 * Don't bother taking the src_rq->lock if the next highest in pull_rt_task() 2367 * logically higher, the src_rq will push this task away. in pull_rt_task() 2370 if (src_rq->rt.highest_prio.next >= in pull_rt_task() 2379 double_lock_balance(this_rq, src_rq); in pull_rt_task() 2385 p = pick_highest_pushable_task(src_rq, this_cpu); in pull_rt_task() 2392 WARN_ON(p == src_rq->curr); in pull_rt_task() 2403 if (p->prio < src_rq->curr->prio) in pull_rt_task() 2408 deactivate_task(src_rq, in pull_rt_task() [all...] |
H A D | fair.c | 7867 struct rq *src_rq; member 7898 lockdep_assert_held(&env->src_rq->lock); in task_hot() 7923 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot() 7954 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) in migrate_degrades_locality() 7996 lockdep_assert_held(&env->src_rq->lock); in can_migrate_task() 8052 if (task_running(env->src_rq, p)) { in can_migrate_task() 8085 lockdep_assert_held(&env->src_rq->lock); in detach_task() 8087 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); in detach_task() 8089 double_lock_balance(env->src_rq, en in detach_task() [all...] |
H A D | core.c | 2108 struct rq *src_rq, *dst_rq; in __migrate_swap_task() local 2111 src_rq = task_rq(p); in __migrate_swap_task() 2114 rq_pin_lock(src_rq, &srf); in __migrate_swap_task() 2117 deactivate_task(src_rq, p, 0); in __migrate_swap_task() 2123 rq_unpin_lock(src_rq, &srf); in __migrate_swap_task() 2143 struct rq *src_rq, *dst_rq; in migrate_swap_stop() local 2149 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop() 2154 double_rq_lock(src_rq, dst_rq); in migrate_swap_stop() 2174 double_rq_unlock(src_rq, dst_rq); in migrate_swap_stop()
|
/kernel/linux/linux-6.6/kernel/sched/ |
H A D | walt.c | 440 struct rq *src_rq = cpu_rq(task_cpu); in inter_cluster_migration_fixup() local 451 src_rq->curr_runnable_sum -= p->ravg.curr_window_cpu[task_cpu]; in inter_cluster_migration_fixup() 452 src_rq->prev_runnable_sum -= p->ravg.prev_window_cpu[task_cpu]; in inter_cluster_migration_fixup() 458 src_rq->nt_curr_runnable_sum -= in inter_cluster_migration_fixup() 460 src_rq->nt_prev_runnable_sum -= in inter_cluster_migration_fixup() 468 src_rq->window_start, new_task); in inter_cluster_migration_fixup() 470 BUG_ON((s64)src_rq->prev_runnable_sum < 0); in inter_cluster_migration_fixup() 471 BUG_ON((s64)src_rq->curr_runnable_sum < 0); in inter_cluster_migration_fixup() 472 BUG_ON((s64)src_rq->nt_prev_runnable_sum < 0); in inter_cluster_migration_fixup() 473 BUG_ON((s64)src_rq in inter_cluster_migration_fixup() 478 struct rq *src_rq = task_rq(p); fixup_busy_time() local [all...] |
H A D | deadline.c | 2381 struct rq *src_rq; in pull_dl_task() local 2397 src_rq = cpu_rq(cpu); in pull_dl_task() 2405 src_rq->dl.earliest_dl.next)) in pull_dl_task() 2410 double_lock_balance(this_rq, src_rq); in pull_dl_task() 2416 if (src_rq->dl.dl_nr_running <= 1) in pull_dl_task() 2419 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); in pull_dl_task() 2428 WARN_ON(p == src_rq->curr); in pull_dl_task() 2436 src_rq->curr->dl.deadline)) in pull_dl_task() 2440 push_task = get_push_task(src_rq); in pull_dl_task() 2442 deactivate_task(src_rq, in pull_dl_task() [all...] |
H A D | rt.c | 2556 struct rq *src_rq; in pull_rt_task() local 2584 src_rq = cpu_rq(cpu); in pull_rt_task() 2587 * Don't bother taking the src_rq->lock if the next highest in pull_rt_task() 2590 * logically higher, the src_rq will push this task away. in pull_rt_task() 2593 if (src_rq->rt.highest_prio.next >= in pull_rt_task() 2603 double_lock_balance(this_rq, src_rq); in pull_rt_task() 2609 p = pick_highest_pushable_task(src_rq, this_cpu); in pull_rt_task() 2616 WARN_ON(p == src_rq->curr); in pull_rt_task() 2627 if (p->prio < src_rq->curr->prio) in pull_rt_task() 2631 push_task = get_push_task(src_rq); in pull_rt_task() [all...] |
H A D | fair.c | 8992 struct rq *src_rq; member 9023 lockdep_assert_rq_held(env->src_rq); in task_hot() 9055 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot() 9086 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) in migrate_degrades_locality() 9128 lockdep_assert_rq_held(env->src_rq); in can_migrate_task() 9187 if (task_on_cpu(env->src_rq, p)) { in can_migrate_task() 9224 lockdep_assert_rq_held(env->src_rq); in detach_task() 9226 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); in detach_task() 9228 double_lock_balance(env->src_rq, en in detach_task() [all...] |
H A D | core.c | 3485 struct rq *src_rq, *dst_rq; in __migrate_swap_task() local 3488 src_rq = task_rq(p); in __migrate_swap_task() 3491 rq_pin_lock(src_rq, &srf); in __migrate_swap_task() 3494 deactivate_task(src_rq, p, 0); in __migrate_swap_task() 3500 rq_unpin_lock(src_rq, &srf); in __migrate_swap_task() 3520 struct rq *src_rq, *dst_rq; in migrate_swap_stop() local 3525 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop() 3529 guard(double_rq_lock)(src_rq, dst_rq); in migrate_swap_stop() 12427 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr) 12459 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq, in __sched_mm_cid_migrate_from_fetch_cid() argument 12500 __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq, struct task_struct *t, struct mm_cid *src_pcpu_cid, int src_cid) __sched_mm_cid_migrate_from_try_steal_cid() argument 12569 struct rq *src_rq; sched_mm_cid_migrate_to() local [all...] |