/kernel/linux/linux-5.10/kernel/sched/ |
H A D | loadavg.c | 79 long calc_load_fold_active(struct rq *this_rq, long adjust) in calc_load_fold_active() argument 83 nr_active = this_rq->nr_running - adjust; in calc_load_fold_active() 84 nr_active += (long)this_rq->nr_uninterruptible; in calc_load_fold_active() 86 if (nr_active != this_rq->calc_load_active) { in calc_load_fold_active() 87 delta = nr_active - this_rq->calc_load_active; in calc_load_fold_active() 88 this_rq->calc_load_active = nr_active; in calc_load_fold_active() 252 calc_load_nohz_fold(this_rq()); in calc_load_nohz_start() 266 struct rq *this_rq = this_rq(); in calc_load_nohz_stop() local 271 this_rq in calc_load_nohz_stop() 386 calc_global_load_tick(struct rq *this_rq) calc_global_load_tick() argument [all...] |
H A D | sched.h | 146 extern void calc_global_load_tick(struct rq *this_rq); 147 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 1233 #define this_rq() this_cpu_ptr(&runqueues) macro 1473 rq = this_rq(); 1943 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 1961 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 1962 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1963 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 2253 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2254 __releases(this_rq 2298 double_lock_balance(struct rq *this_rq, struct rq *busiest) double_lock_balance() argument [all...] |
H A D | rt.c | 272 static void pull_rt_task(struct rq *this_rq); 438 static inline void pull_rt_task(struct rq *this_rq) in pull_rt_task() argument 597 return this_rq()->rd->span; in sched_rt_period_mask() 2299 rq = this_rq(); in rto_push_irq_work_func() 2328 static void pull_rt_task(struct rq *this_rq) in pull_rt_task() argument 2330 int this_cpu = this_rq->cpu, cpu; in pull_rt_task() 2334 int rt_overload_count = rt_overloaded(this_rq); in pull_rt_task() 2347 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) in pull_rt_task() 2352 tell_cpu_to_push(this_rq); in pull_rt_task() [all...] |
H A D | fair.c | 3938 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf); 6490 avg_idle = this_rq()->avg_idle / 512; in select_idle_cpu() 6616 this_rq()->nr_running <= 1 && in select_idle_sibling() 10169 static int load_balance(int this_cpu, struct rq *this_rq, in load_balance() argument 10183 .dst_rq = this_rq, in load_balance() 10951 SCHED_WARN_ON(rq != this_rq()); in nohz_balance_exit_idle() 11046 static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, in _nohz_idle_balance() argument 11054 int this_cpu = this_rq->cpu; in _nohz_idle_balance() 11134 has_blocked_load |= this_rq->has_blocked_load; in _nohz_idle_balance() 11138 rebalance_domains(this_rq, CPU_IDL in _nohz_idle_balance() 11158 nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) nohz_idle_balance() argument 11175 nohz_newidle_balance(struct rq *this_rq) nohz_newidle_balance() argument 11210 nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) nohz_idle_balance() argument 11215 nohz_newidle_balance(struct rq *this_rq) nohz_newidle_balance() argument 11227 newidle_balance(struct rq *this_rq, struct rq_flags *rf) newidle_balance() argument 11347 struct rq *this_rq = this_rq(); run_rebalance_domains() local [all...] |
H A D | deadline.c | 2224 static void pull_dl_task(struct rq *this_rq) in pull_dl_task() argument 2226 int this_cpu = this_rq->cpu, cpu; in pull_dl_task() 2232 if (likely(!dl_overloaded(this_rq))) in pull_dl_task() 2241 for_each_cpu(cpu, this_rq->rd->dlo_mask) { in pull_dl_task() 2251 if (this_rq->dl.dl_nr_running && in pull_dl_task() 2252 dl_time_before(this_rq->dl.earliest_dl.curr, in pull_dl_task() 2256 /* Might drop this_rq->lock */ in pull_dl_task() 2257 double_lock_balance(this_rq, src_rq); in pull_dl_task() 2274 (!this_rq->dl.dl_nr_running || in pull_dl_task() 2276 this_rq in pull_dl_task() [all...] |
H A D | cputime.c | 236 struct rq *rq = this_rq(); in account_idle_time() 256 steal -= this_rq()->prev_steal_time; in steal_account_process_time() 259 this_rq()->prev_steal_time += steal; in steal_account_process_time() 399 } else if (p == this_rq()->idle) { in irqtime_account_process_tick() 508 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET)) in account_process_tick()
|
H A D | idle.c | 22 idle_set_state(this_rq(), idle_state); in sched_idle_set_state()
|
H A D | core.c | 410 if (rq == this_rq()) in hrtick_start() 1862 struct rq *rq = this_rq(); in migration_cpu_stop() 2561 rq = this_rq(); in ttwu_stat() 2699 struct rq *rq = this_rq(); in sched_ttwu_pending() 3771 * past. prev == current is still correct but we need to recalculate this_rq 3777 struct rq *rq = this_rq(); 4509 schedstat_inc(this_rq()->sched_count); in schedule_debug() 6460 rq = this_rq(); in yield_to() 6923 BUG_ON(current != this_rq()->idle); in idle_task_exit()
|
H A D | walt.c | 1765 core_ctl_check(this_rq()->window_start); in walt_irq_work()
|
/kernel/linux/linux-6.6/kernel/sched/ |
H A D | loadavg.c | 78 long calc_load_fold_active(struct rq *this_rq, long adjust) in calc_load_fold_active() argument 82 nr_active = this_rq->nr_running - adjust; in calc_load_fold_active() 83 nr_active += (int)this_rq->nr_uninterruptible; in calc_load_fold_active() 85 if (nr_active != this_rq->calc_load_active) { in calc_load_fold_active() 86 delta = nr_active - this_rq->calc_load_active; in calc_load_fold_active() 87 this_rq->calc_load_active = nr_active; in calc_load_fold_active() 251 calc_load_nohz_fold(this_rq()); in calc_load_nohz_start() 265 struct rq *this_rq = this_rq(); in calc_load_nohz_stop() local 270 this_rq in calc_load_nohz_stop() 385 calc_global_load_tick(struct rq *this_rq) calc_global_load_tick() argument [all...] |
H A D | sched.h | 164 extern void calc_global_load_tick(struct rq *this_rq); 165 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 1345 #define this_rq() this_cpu_ptr(&runqueues) macro 1874 rq = this_rq(); 2412 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 2431 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 2432 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 2433 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 2856 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2857 __releases(this_rq 2861 raw_spin_rq_unlock(this_rq); global() variable 2892 raw_spin_rq_unlock(this_rq); global() variable 2903 double_lock_balance(struct rq *this_rq, struct rq *busiest) double_lock_balance() argument [all...] |
H A D | fair.c | 4784 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf); 6011 if (rq == this_rq()) { in __unthrottle_cfs_rq_async() 7451 struct rq *this_rq = this_rq(); in select_idle_cpu() local 7471 if (unlikely(this_rq->wake_stamp < now)) { in select_idle_cpu() 7472 while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) { in select_idle_cpu() 7473 this_rq->wake_stamp++; in select_idle_cpu() 7474 this_rq->wake_avg_idle >>= 1; in select_idle_cpu() 7478 avg_idle = this_rq in select_idle_cpu() 11560 load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum cpu_idle_type idle, int *continue_balancing) load_balance() argument 12460 _nohz_idle_balance(struct rq *this_rq, unsigned int flags) _nohz_idle_balance() argument 12570 nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) nohz_idle_balance() argument 12605 nohz_newidle_balance(struct rq *this_rq) nohz_newidle_balance() argument 12635 nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) nohz_idle_balance() argument 12640 nohz_newidle_balance(struct rq *this_rq) nohz_newidle_balance() argument 12652 newidle_balance(struct rq *this_rq, struct rq_flags *rf) newidle_balance() argument 12783 struct rq *this_rq = this_rq(); run_rebalance_domains() local [all...] |
H A D | rt.c | 647 return this_rq()->rd->span; in sched_rt_period_mask() 2521 rq = this_rq(); in rto_push_irq_work_func() 2551 static void pull_rt_task(struct rq *this_rq) in pull_rt_task() argument 2553 int this_cpu = this_rq->cpu, cpu; in pull_rt_task() 2557 int rt_overload_count = rt_overloaded(this_rq); in pull_rt_task() 2570 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) in pull_rt_task() 2575 tell_cpu_to_push(this_rq); in pull_rt_task() 2580 for_each_cpu(cpu, this_rq->rd->rto_mask) { in pull_rt_task() 2594 this_rq in pull_rt_task() [all...] |
H A D | deadline.c | 2376 static void pull_dl_task(struct rq *this_rq) in pull_dl_task() argument 2378 int this_cpu = this_rq->cpu, cpu; in pull_dl_task() 2384 if (likely(!dl_overloaded(this_rq))) in pull_dl_task() 2393 for_each_cpu(cpu, this_rq->rd->dlo_mask) { in pull_dl_task() 2403 if (this_rq->dl.dl_nr_running && in pull_dl_task() 2404 dl_time_before(this_rq->dl.earliest_dl.curr, in pull_dl_task() 2408 /* Might drop this_rq->lock */ in pull_dl_task() 2410 double_lock_balance(this_rq, src_rq); in pull_dl_task() 2427 dl_task_is_earliest_deadline(p, this_rq)) { in pull_dl_task() 2444 activate_task(this_rq, in pull_dl_task() [all...] |
H A D | cputime.c | 240 struct rq *rq = this_rq(); in account_idle_time() 275 steal -= this_rq()->prev_steal_time; in steal_account_process_time() 278 this_rq()->prev_steal_time += steal; in steal_account_process_time() 418 } else if (p == this_rq()->idle) { in irqtime_account_process_tick() 524 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET)) in account_process_tick()
|
H A D | idle.c | 19 idle_set_state(this_rq(), idle_state); in sched_idle_set_state()
|
H A D | membarrier.c | 238 struct rq *rq = this_rq(); in membarrier_update_current_mm()
|
H A D | core.c | 848 if (rq == this_rq()) in hrtick_start() 2470 this_rq()->nr_pinned++; in migrate_disable() 2506 this_rq()->nr_pinned--; in migrate_enable() 2642 struct rq *rq = this_rq(); in migration_cpu_stop() 2745 struct rq *lowest_rq = NULL, *rq = this_rq(); in push_cpu_stop() 3850 rq = this_rq(); in ttwu_stat() 3997 struct rq *rq = this_rq(); in sched_ttwu_pending() 5381 * past. prev == current is still correct but we need to recalculate this_rq 5387 struct rq *rq = this_rq(); 6143 schedstat_inc(this_rq() in schedule_debug() [all...] |
H A D | walt.c | 1766 core_ctl_check(this_rq()->window_start); in walt_irq_work()
|
/kernel/linux/linux-6.6/tools/testing/selftests/bpf/progs/ |
H A D | test_access_variable_array.c | 11 int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq, in BPF_PROG() argument
|