1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Scheduler internal types and methods: 4 */ 5#ifndef COMMON_SDK_LINUX_KERNEL_SCHED_SCHED_H 6#define COMMON_SDK_LINUX_KERNEL_SCHED_SCHED_H 7 8#include <linux/sched.h> 9#include <linux/sched/autogroup.h> 10#include <linux/sched/clock.h> 11#include <linux/sched/coredump.h> 12#include <linux/sched/cpufreq.h> 13#include <linux/sched/cputime.h> 14#include <linux/sched/deadline.h> 15#include <linux/sched/debug.h> 16#include <linux/sched/hotplug.h> 17#include <linux/sched/idle.h> 18#include <linux/sched/init.h> 19#include <linux/sched/isolation.h> 20#include <linux/sched/jobctl.h> 21#include <linux/sched/loadavg.h> 22#include <linux/sched/mm.h> 23#include <linux/sched/nohz.h> 24#include <linux/sched/numa_balancing.h> 25#include <linux/sched/prio.h> 26#include <linux/sched/rt.h> 27#include <linux/sched/signal.h> 28#include <linux/sched/smt.h> 29#include <linux/sched/stat.h> 30#include <linux/sched/sysctl.h> 31#include <linux/sched/task.h> 32#include <linux/sched/task_stack.h> 33#include <linux/sched/topology.h> 34#include <linux/sched/user.h> 35#include <linux/sched/wake_q.h> 36#include <linux/sched/xacct.h> 37 38#include <uapi/linux/sched/types.h> 39 40#include <linux/binfmts.h> 41#include <linux/blkdev.h> 42#include <linux/compat.h> 43#include <linux/context_tracking.h> 44#include <linux/cpufreq.h> 45#include <linux/cpuidle.h> 46#include <linux/cpuset.h> 47#include <linux/ctype.h> 48#include <linux/debugfs.h> 49#include <linux/delayacct.h> 50#include <linux/energy_model.h> 51#include <linux/init_task.h> 52#include <linux/kprobes.h> 53#include <linux/kthread.h> 54#include <linux/membarrier.h> 55#include <linux/migrate.h> 56#include <linux/mmu_context.h> 57#include <linux/nmi.h> 58#include <linux/proc_fs.h> 59#include <linux/prefetch.h> 60#include <linux/profile.h> 61#include <linux/psi.h> 62#include <linux/rcupdate_wait.h> 63#include <linux/security.h> 64#include <linux/stop_machine.h> 65#include <linux/suspend.h> 66#include <linux/swait.h> 67#include <linux/syscalls.h> 68#include <linux/task_work.h> 69#include <linux/tsacct_kern.h> 70 71#include <asm/tlb.h> 72#include <asm-generic/vmlinux.lds.h> 73 74#ifdef CONFIG_PARAVIRT 75#include <asm/paravirt.h> 76#endif 77 78#include "cpupri.h" 79#include "cpudeadline.h" 80 81#include <trace/events/sched.h> 82 83#ifdef CONFIG_SCHED_DEBUG 84#define SCHED_WARN_ON(x) (WARN_ONCE(x, #x)) 85#else 86#define SCHED_WARN_ON(x) ( { \ 87 (void)(x), 0; }) 88#endif 89 90struct rq; 91struct cpuidle_state; 92 93#ifdef CONFIG_SCHED_RT_CAS 94extern unsigned long uclamp_task_util(struct task_struct *p); 95#endif 96 97#ifdef CONFIG_SCHED_WALT 98extern unsigned int sched_ravg_window; 99extern unsigned int walt_cpu_util_freq_divisor; 100 101struct walt_sched_stats { 102 u64 cumulative_runnable_avg_scaled; 103}; 104 105struct load_subtractions { 106 u64 window_start; 107 u64 subs; 108 u64 new_subs; 109}; 110 111#define NUM_TRACKED_WINDOWS 2 112 113struct sched_cluster { 114 raw_spinlock_t load_lock; 115 struct list_head list; 116 struct cpumask cpus; 117 int id; 118 int max_power_cost; 119 int min_power_cost; 120 int max_possible_capacity; 121 int capacity; 122 int efficiency; /* Differentiate cpus with different IPC capability */ 123 int load_scale_factor; 124 unsigned int exec_scale_factor; 125 /* 126 * max_freq = user maximum 127 * max_possible_freq = maximum supported by hardware 128 */ 129 unsigned int cur_freq, max_freq, min_freq; 130 unsigned int max_possible_freq; 131 bool freq_init_done; 132}; 133 134extern unsigned int sched_disable_window_stats; 135#endif /* CONFIG_SCHED_WALT */ 136 137/* task_struct::on_rq states: */ 138#define TASK_ON_RQ_QUEUED 1 139#define TASK_ON_RQ_MIGRATING 2 140 141extern __read_mostly int scheduler_running; 142 143extern unsigned long calc_load_update; 144extern atomic_long_t calc_load_tasks; 145 146extern const u64 max_cfs_quota_period; 147 148extern void calc_global_load_tick(struct rq *this_rq); 149extern long calc_load_fold_active(struct rq *this_rq, long adjust); 150 151#ifdef CONFIG_SMP 152extern void init_sched_groups_capacity(int cpu, struct sched_domain *sd); 153#endif 154 155extern void call_trace_sched_update_nr_running(struct rq *rq, int count); 156/* 157 * Helpers for converting nanosecond timing to jiffy resolution 158 */ 159#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 160#ifdef CONFIG_SCHED_LATENCY_NICE 161#define MAX_LATENCY_NICE 19 162#define MIN_LATENCY_NICE -20 163#define LATENCY_NICE_WIDTH \ 164 (MAX_LATENCY_NICE - MIN_LATENCY_NICE + 1) 165#define DEFAULT_LATENCY_NICE 0 166#define DEFAULT_LATENCY_PRIO (DEFAULT_LATENCY_NICE + LATENCY_NICE_WIDTH/2) 167#define NICE_TO_LATENCY(nice) ((nice) + DEFAULT_LATENCY_PRIO) 168#define LATENCY_TO_NICE(prio) ((prio) - DEFAULT_LATENCY_PRIO) 169#define NICE_LATENCY_SHIFT (SCHED_FIXEDPOINT_SHIFT) 170#define NICE_LATENCY_WEIGHT_MAX (1L << NICE_LATENCY_SHIFT) 171#endif /* CONFIG_SCHED_LATENCY_NICE */ 172 173/* 174 * Increase resolution of nice-level calculations for 64-bit architectures. 175 * The extra resolution improves shares distribution and load balancing of 176 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 177 * hierarchies, especially on larger systems. This is not a user-visible change 178 * and does not change the user-interface for setting shares/weights. 179 * 180 * We increase resolution only if we have enough bits to allow this increased 181 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 182 * are pretty high and the returns do not justify the increased costs. 183 * 184 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 185 * increase coverage and consistency always enable it on 64-bit platforms. 186 */ 187#ifdef CONFIG_64BIT 188#define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 189#define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 190#define scale_load_down(w) \ 191 ( { \ 192 unsigned long __w = (w); \ 193 if (__w) \ 194 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 195 __w; \ 196 }) 197#else 198#define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 199#define scale_load(w) (w) 200#define scale_load_down(w) (w) 201#endif 202 203/* 204 * Task weight (visible to users) and its load (invisible to users) have 205 * independent resolution, but they should be well calibrated. We use 206 * scale_load() and scale_load_down(w) to convert between them. The 207 * following must be true: 208 * 209 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 210 * 211 */ 212#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 213#define CPU_FREQ_1K 1024 214#define CPU_SAMPLE_ARTE 8 215 216extern struct cpufreq_governor schedutil_gov; 217 218/* 219 * Single value that decides SCHED_DEADLINE internal math precision. 220 * 10 -> just above 1us 221 * 9 -> just above 0.5us 222 */ 223#define DL_SCALE 10 224 225/* 226 * Single value that denotes runtime == period, ie unlimited time. 227 */ 228#define RUNTIME_INF ((u64)~0ULL) 229 230static inline int idle_policy(int policy) 231{ 232 return policy == SCHED_IDLE; 233} 234static inline int fair_policy(int policy) 235{ 236 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 237} 238 239static inline int rt_policy(int policy) 240{ 241 return policy == SCHED_FIFO || policy == SCHED_RR; 242} 243 244static inline int dl_policy(int policy) 245{ 246 return policy == SCHED_DEADLINE; 247} 248static inline bool valid_policy(int policy) 249{ 250 return idle_policy(policy) || fair_policy(policy) || rt_policy(policy) || dl_policy(policy); 251} 252 253static inline int task_has_idle_policy(struct task_struct *p) 254{ 255 return idle_policy(p->policy); 256} 257 258static inline int task_has_rt_policy(struct task_struct *p) 259{ 260 return rt_policy(p->policy); 261} 262 263static inline int task_has_dl_policy(struct task_struct *p) 264{ 265 return dl_policy(p->policy); 266} 267 268#define cap_scale(v, s) (((v) * (s)) >> SCHED_CAPACITY_SHIFT) 269 270static inline void update_avg(u64 *avg, u64 sample) 271{ 272 s64 diff = sample - *avg; 273 *avg += diff / CPU_SAMPLE_ARTE; 274} 275 276/* 277 * Shifting a value by an exponent greater *or equal* to the size of said value 278 * is UB; cap at size-1. 279 */ 280#define shr_bound(val, shift) ((val) >> min_t(typeof(shift), (shift), BITS_PER_TYPE(typeof(val)) - 1)) 281 282/* 283 * !! For sched_setattr_nocheck() (kernel) only !! 284 * 285 * This is actually gross. :( 286 * 287 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 288 * tasks, but still be able to sleep. We need this on platforms that cannot 289 * atomically change clock frequency. Remove once fast switching will be 290 * available on such platforms. 291 * 292 * SUGOV stands for SchedUtil GOVernor. 293 */ 294#define SCHED_FLAG_SUGOV 0x10000000 295 296#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV) 297 298static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 299{ 300#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 301 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 302#else 303 return false; 304#endif 305} 306 307/* 308 * Tells if entity @a should preempt entity @b. 309 */ 310static inline bool dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 311{ 312 return dl_entity_is_special(a) || dl_time_before(a->deadline, b->deadline); 313} 314 315/* 316 * This is the priority-queue data structure of the RT scheduling class: 317 */ 318struct rt_prio_array { 319 DECLARE_BITMAP(bitmap, MAX_RT_PRIO + 1); /* include 1 bit for delimiter */ 320 struct list_head queue[MAX_RT_PRIO]; 321}; 322 323struct rt_bandwidth { 324 /* nests inside the rq lock: */ 325 raw_spinlock_t rt_runtime_lock; 326 ktime_t rt_period; 327 u64 rt_runtime; 328 struct hrtimer rt_period_timer; 329 unsigned int rt_period_active; 330}; 331 332void __dl_clear_params(struct task_struct *p); 333 334struct dl_bandwidth { 335 raw_spinlock_t dl_runtime_lock; 336 u64 dl_runtime; 337 u64 dl_period; 338}; 339 340static inline int dl_bandwidth_enabled(void) 341{ 342 return sysctl_sched_rt_runtime >= 0; 343} 344 345/* 346 * To keep the bandwidth of -deadline tasks under control 347 * we need some place where: 348 * - store the maximum -deadline bandwidth of each cpu; 349 * - cache the fraction of bandwidth that is currently allocated in 350 * each root domain; 351 * 352 * This is all done in the data structure below. It is similar to the 353 * one used for RT-throttling (rt_bandwidth), with the main difference 354 * that, since here we are only interested in admission control, we 355 * do not decrease any runtime while the group "executes", neither we 356 * need a timer to replenish it. 357 * 358 * With respect to SMP, bandwidth is given on a per root domain basis, 359 * meaning that: 360 * - bw (< 100%) is the deadline bandwidth of each CPU; 361 * - total_bw is the currently allocated bandwidth in each root domain; 362 */ 363struct dl_bw { 364 raw_spinlock_t lock; 365 u64 bw; 366 u64 total_bw; 367}; 368 369static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 370 371static inline void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 372{ 373 dl_b->total_bw -= tsk_bw; 374 __dl_update(dl_b, (s32)tsk_bw / cpus); 375} 376 377static inline void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 378{ 379 dl_b->total_bw += tsk_bw; 380 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 381} 382 383static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw) 384{ 385 return (dl_b->bw != -1) && (cap_scale(dl_b->bw, cap) < (dl_b->total_bw - old_bw + new_bw)); 386} 387 388/* 389 * Verify the fitness of task @p to run on @cpu taking into account the 390 * CPU original capacity and the runtime/deadline ratio of the task. 391 * 392 * The function will return true if the CPU original capacity of the 393 * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the 394 * task and false otherwise. 395 */ 396static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) 397{ 398 unsigned long cap = arch_scale_cpu_capacity(cpu); 399 400 return ((cap_scale(p->dl.dl_deadline, cap)) >= (p->dl.dl_runtime)); 401} 402 403extern void init_dl_bw(struct dl_bw *dl_b); 404extern int sched_dl_global_validate(void); 405extern void sched_dl_do_global(void); 406extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 407extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 408extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 409extern bool __checkparam_dl(const struct sched_attr *attr); 410extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 411extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 412extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 413extern int dl_cpu_busy(int cpu, struct task_struct *p); 414 415#ifdef CONFIG_CGROUP_SCHED 416 417#include <linux/cgroup.h> 418#include <linux/psi.h> 419 420struct cfs_rq; 421struct rt_rq; 422 423extern struct list_head task_groups; 424 425struct cfs_bandwidth { 426#ifdef CONFIG_CFS_BANDWIDTH 427 raw_spinlock_t lock; 428 ktime_t period; 429 u64 quota; 430 u64 runtime; 431 s64 hierarchical_quota; 432 433 u8 idle; 434 u8 period_active; 435 u8 slack_started; 436 struct hrtimer period_timer; 437 struct hrtimer slack_timer; 438 struct list_head throttled_cfs_rq; 439 440 /* Statistics: */ 441 int nr_periods; 442 int nr_throttled; 443 u64 throttled_time; 444#endif 445}; 446 447/* Task group related information */ 448struct task_group { 449 struct cgroup_subsys_state css; 450 451#ifdef CONFIG_FAIR_GROUP_SCHED 452 /* schedulable entities of this group on each CPU */ 453 struct sched_entity **se; 454 /* runqueue "owned" by this group on each CPU */ 455 struct cfs_rq **cfs_rq; 456 unsigned long shares; 457 458#ifdef CONFIG_SMP 459 /* 460 * load_avg can be heavily contended at clock tick time, so put 461 * it in its own cacheline separated from the fields above which 462 * will also be accessed at each tick. 463 */ 464 atomic_long_t load_avg ____cacheline_aligned; 465#endif 466#endif 467 468#ifdef CONFIG_RT_GROUP_SCHED 469 struct sched_rt_entity **rt_se; 470 struct rt_rq **rt_rq; 471 472 struct rt_bandwidth rt_bandwidth; 473#endif 474 475 struct rcu_head rcu; 476 struct list_head list; 477 478 struct task_group *parent; 479 struct list_head siblings; 480 struct list_head children; 481 482#ifdef CONFIG_SCHED_AUTOGROUP 483 struct autogroup *autogroup; 484#endif 485 486 struct cfs_bandwidth cfs_bandwidth; 487 488#ifdef CONFIG_UCLAMP_TASK_GROUP 489 /* The two decimal precision [%] value requested from user-space */ 490 unsigned int uclamp_pct[UCLAMP_CNT]; 491 /* Clamp values requested for a task group */ 492 struct uclamp_se uclamp_req[UCLAMP_CNT]; 493 /* Effective clamp values used for a task group */ 494 struct uclamp_se uclamp[UCLAMP_CNT]; 495#endif 496 497#ifdef CONFIG_SCHED_RTG_CGROUP 498 /* 499 * Controls whether tasks of this cgroup should be colocated with each 500 * other and tasks of other cgroups that have the same flag turned on. 501 */ 502 bool colocate; 503 504 /* Controls whether further updates are allowed to the colocate flag */ 505 bool colocate_update_disabled; 506#endif 507}; 508 509#ifdef CONFIG_FAIR_GROUP_SCHED 510#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 511 512/* 513 * A weight of 0 or 1 can cause arithmetics problems. 514 * A weight of a cfs_rq is the sum of weights of which entities 515 * are queued on this cfs_rq, so a weight of a entity should not be 516 * too large, so as the shares value of a task group. 517 * (The default weight is 1024 - so there's no practical 518 * limitation from this.) 519 */ 520#define MIN_SHARES (1UL << 1) 521#define MAX_SHARES (1UL << 18) 522#endif 523 524typedef int (*tg_visitor)(struct task_group *, void *); 525 526extern int walk_tg_tree_from(struct task_group *from, tg_visitor down, tg_visitor up, void *data); 527 528/* 529 * Iterate the full tree, calling @down when first entering a node and @up when 530 * leaving it for the final time. 531 * 532 * Caller must hold rcu_lock or sufficient equivalent. 533 */ 534static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 535{ 536 return walk_tg_tree_from(&root_task_group, down, up, data); 537} 538 539extern int tg_nop(struct task_group *tg, void *data); 540 541extern void free_fair_sched_group(struct task_group *tg); 542extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 543extern void online_fair_sched_group(struct task_group *tg); 544extern void unregister_fair_sched_group(struct task_group *tg); 545extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, 546 struct sched_entity *parent); 547extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 548 549extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 550extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 551extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 552 553extern void free_rt_sched_group(struct task_group *tg); 554extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 555extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu, 556 struct sched_rt_entity *parent); 557extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 558extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 559extern long sched_group_rt_runtime(struct task_group *tg); 560extern long sched_group_rt_period(struct task_group *tg); 561extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 562 563extern struct task_group *sched_create_group(struct task_group *parent); 564extern void sched_online_group(struct task_group *tg, struct task_group *parent); 565extern void sched_destroy_group(struct task_group *tg); 566extern void sched_offline_group(struct task_group *tg); 567 568extern void sched_move_task(struct task_struct *tsk); 569 570#ifdef CONFIG_FAIR_GROUP_SCHED 571extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 572 573#ifdef CONFIG_SMP 574extern void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next); 575#else /* !CONFIG_SMP */ 576static inline void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next) 577{ 578} 579#endif /* CONFIG_SMP */ 580#endif /* CONFIG_FAIR_GROUP_SCHED */ 581 582#else /* CONFIG_CGROUP_SCHED */ 583 584struct cfs_bandwidth { 585}; 586 587#endif /* CONFIG_CGROUP_SCHED */ 588 589/* CFS-related fields in a runqueue */ 590struct cfs_rq { 591 struct load_weight load; 592 unsigned int nr_running; 593 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 594 unsigned int idle_h_nr_running; /* SCHED_IDLE */ 595 596 u64 exec_clock; 597 u64 min_vruntime; 598#ifndef CONFIG_64BIT 599 u64 min_vruntime_copy; 600#endif 601 602 struct rb_root_cached tasks_timeline; 603 604 /* 605 * 'curr' points to currently running entity on this cfs_rq. 606 * It is set to NULL otherwise (i.e when none are currently running). 607 */ 608 struct sched_entity *curr; 609 struct sched_entity *next; 610 struct sched_entity *last; 611 struct sched_entity *skip; 612 613#ifdef CONFIG_SCHED_DEBUG 614 unsigned int nr_spread_over; 615#endif 616 617#ifdef CONFIG_SMP 618 /* 619 * CFS load tracking 620 */ 621 struct sched_avg avg; 622#ifndef CONFIG_64BIT 623 u64 load_last_update_time_copy; 624#endif 625 struct { 626 raw_spinlock_t lock ____cacheline_aligned; 627 int nr; 628 unsigned long load_avg; 629 unsigned long util_avg; 630 unsigned long runnable_avg; 631 } removed; 632 633#ifdef CONFIG_FAIR_GROUP_SCHED 634 unsigned long tg_load_avg_contrib; 635 long propagate; 636 long prop_runnable_sum; 637 638 /* 639 * h_load = weight * f(tg) 640 * 641 * Where f(tg) is the recursive weight fraction assigned to 642 * this group. 643 */ 644 unsigned long h_load; 645 u64 last_h_load_update; 646 struct sched_entity *h_load_next; 647#endif /* CONFIG_FAIR_GROUP_SCHED */ 648#endif /* CONFIG_SMP */ 649 650#ifdef CONFIG_FAIR_GROUP_SCHED 651 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 652 653 /* 654 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 655 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 656 * (like users, containers etc.) 657 * 658 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 659 * This list is used during load balance. 660 */ 661 int on_list; 662 struct list_head leaf_cfs_rq_list; 663 struct task_group *tg; /* group that "owns" this runqueue */ 664 665#ifdef CONFIG_SCHED_WALT 666 struct walt_sched_stats walt_stats; 667#endif 668 669#ifdef CONFIG_CFS_BANDWIDTH 670 int runtime_enabled; 671 s64 runtime_remaining; 672 673 u64 throttled_clock; 674 u64 throttled_clock_pelt; 675 u64 throttled_clock_pelt_time; 676 int throttled; 677 int throttle_count; 678 struct list_head throttled_list; 679#ifdef CONFIG_SCHED_WALT 680 u64 cumulative_runnable_avg; 681#endif 682#endif /* CONFIG_CFS_BANDWIDTH */ 683#endif /* CONFIG_FAIR_GROUP_SCHED */ 684}; 685 686static inline int rt_bandwidth_enabled(void) 687{ 688 return sysctl_sched_rt_runtime >= 0; 689} 690 691/* RT IPI pull logic requires IRQ_WORK */ 692#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 693#define HAVE_RT_PUSH_IPI 694#endif 695 696/* Real-Time classes' related field in a runqueue: */ 697struct rt_rq { 698 struct rt_prio_array active; 699 unsigned int rt_nr_running; 700 unsigned int rr_nr_running; 701#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 702 struct { 703 int curr; /* highest queued rt task prio */ 704#ifdef CONFIG_SMP 705 int next; /* next highest */ 706#endif 707 } highest_prio; 708#endif 709#ifdef CONFIG_SMP 710 unsigned long rt_nr_migratory; 711 unsigned long rt_nr_total; 712 int overloaded; 713 struct plist_head pushable_tasks; 714 715#endif /* CONFIG_SMP */ 716 int rt_queued; 717 718 int rt_throttled; 719 u64 rt_time; 720 u64 rt_runtime; 721 /* Nests inside the rq lock: */ 722 raw_spinlock_t rt_runtime_lock; 723 724#ifdef CONFIG_RT_GROUP_SCHED 725 unsigned long rt_nr_boosted; 726 727 struct rq *rq; 728 struct task_group *tg; 729#endif 730}; 731 732static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 733{ 734 return rt_rq->rt_queued && rt_rq->rt_nr_running; 735} 736 737/* Deadline class' related fields in a runqueue */ 738struct dl_rq { 739 /* runqueue is an rbtree, ordered by deadline */ 740 struct rb_root_cached root; 741 742 unsigned long dl_nr_running; 743 744#ifdef CONFIG_SMP 745 /* 746 * Deadline values of the currently executing and the 747 * earliest ready task on this rq. Caching these facilitates 748 * the decision whether or not a ready but not running task 749 * should migrate somewhere else. 750 */ 751 struct { 752 u64 curr; 753 u64 next; 754 } earliest_dl; 755 756 unsigned long dl_nr_migratory; 757 int overloaded; 758 759 /* 760 * Tasks on this rq that can be pushed away. They are kept in 761 * an rb-tree, ordered by tasks' deadlines, with caching 762 * of the leftmost (earliest deadline) element. 763 */ 764 struct rb_root_cached pushable_dl_tasks_root; 765#else 766 struct dl_bw dl_bw; 767#endif 768 /* 769 * "Active utilization" for this runqueue: increased when a 770 * task wakes up (becomes TASK_RUNNING) and decreased when a 771 * task blocks 772 */ 773 u64 running_bw; 774 775 /* 776 * Utilization of the tasks "assigned" to this runqueue (including 777 * the tasks that are in runqueue and the tasks that executed on this 778 * CPU and blocked). Increased when a task moves to this runqueue, and 779 * decreased when the task moves away (migrates, changes scheduling 780 * policy, or terminates). 781 * This is needed to compute the "inactive utilization" for the 782 * runqueue (inactive utilization = this_bw - running_bw). 783 */ 784 u64 this_bw; 785 u64 extra_bw; 786 787 /* 788 * Inverse of the fraction of CPU utilization that can be reclaimed 789 * by the GRUB algorithm. 790 */ 791 u64 bw_ratio; 792}; 793 794#ifdef CONFIG_FAIR_GROUP_SCHED 795/* An entity is a task if it doesn't "own" a runqueue */ 796#define entity_is_task(se) (!se->my_q) 797 798static inline void se_update_runnable(struct sched_entity *se) 799{ 800 if (!entity_is_task(se)) { 801 se->runnable_weight = se->my_q->h_nr_running; 802 } 803} 804 805static inline long se_runnable(struct sched_entity *se) 806{ 807 if (entity_is_task(se)) { 808 return !!se->on_rq; 809 } else { 810 return se->runnable_weight; 811 } 812} 813 814#else 815#define entity_is_task(se) 1 816 817static inline void se_update_runnable(struct sched_entity *se) 818{ 819} 820 821static inline long se_runnable(struct sched_entity *se) 822{ 823 return !!se->on_rq; 824} 825#endif 826 827#ifdef CONFIG_SMP 828/* 829 * XXX we want to get rid of these helpers and use the full load resolution. 830 */ 831static inline long se_weight(struct sched_entity *se) 832{ 833 return scale_load_down(se->load.weight); 834} 835 836static inline bool sched_asym_prefer(int a, int b) 837{ 838 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 839} 840 841struct perf_domain { 842 struct em_perf_domain *em_pd; 843 struct perf_domain *next; 844 struct rcu_head rcu; 845}; 846 847/* Scheduling group status flags */ 848#define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 849#define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 850 851/* 852 * We add the notion of a root-domain which will be used to define per-domain 853 * variables. Each exclusive cpuset essentially defines an island domain by 854 * fully partitioning the member CPUs from any other cpuset. Whenever a new 855 * exclusive cpuset is created, we also create and attach a new root-domain 856 * object. 857 * 858 */ 859struct root_domain { 860 atomic_t refcount; 861 atomic_t rto_count; 862 struct rcu_head rcu; 863 cpumask_var_t span; 864 cpumask_var_t online; 865 866 /* 867 * Indicate pullable load on at least one CPU, e.g: 868 * - More than one runnable task 869 * - Running task is misfit 870 */ 871 int overload; 872 873 /* Indicate one or more cpus over-utilized (tipping point) */ 874 int overutilized; 875 876 /* 877 * The bit corresponding to a CPU gets set here if such CPU has more 878 * than one runnable -deadline task (as it is below for RT tasks). 879 */ 880 cpumask_var_t dlo_mask; 881 atomic_t dlo_count; 882 struct dl_bw dl_bw; 883 struct cpudl cpudl; 884 885#ifdef HAVE_RT_PUSH_IPI 886 /* 887 * For IPI pull requests, loop across the rto_mask. 888 */ 889 struct irq_work rto_push_work; 890 raw_spinlock_t rto_lock; 891 /* These are only updated and read within rto_lock */ 892 int rto_loop; 893 int rto_cpu; 894 /* These atomics are updated outside of a lock */ 895 atomic_t rto_loop_next; 896 atomic_t rto_loop_start; 897#endif 898 /* 899 * The "RT overload" flag: it gets set if a CPU has more than 900 * one runnable RT task. 901 */ 902 cpumask_var_t rto_mask; 903 struct cpupri cpupri; 904 905 unsigned long max_cpu_capacity; 906 907 /* 908 * NULL-terminated list of performance domains intersecting with the 909 * CPUs of the rd. Protected by RCU. 910 */ 911 struct perf_domain __rcu *pd; 912#ifdef CONFIG_SCHED_RT_CAS 913 int max_cap_orig_cpu; 914#endif 915}; 916 917extern void init_defrootdomain(void); 918extern int sched_init_domains(const struct cpumask *cpu_map); 919extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 920extern void sched_get_rd(struct root_domain *rd); 921extern void sched_put_rd(struct root_domain *rd); 922 923#ifdef HAVE_RT_PUSH_IPI 924extern void rto_push_irq_work_func(struct irq_work *work); 925#endif 926#endif /* CONFIG_SMP */ 927 928#ifdef CONFIG_UCLAMP_TASK 929/* 930 * struct uclamp_bucket - Utilization clamp bucket 931 * @value: utilization clamp value for tasks on this clamp bucket 932 * @tasks: number of RUNNABLE tasks on this clamp bucket 933 * 934 * Keep track of how many tasks are RUNNABLE for a given utilization 935 * clamp value. 936 */ 937struct uclamp_bucket { 938 unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 939 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 940}; 941 942/* 943 * struct uclamp_rq - rq's utilization clamp 944 * @value: currently active clamp values for a rq 945 * @bucket: utilization clamp buckets affecting a rq 946 * 947 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 948 * A clamp value is affecting a rq when there is at least one task RUNNABLE 949 * (or actually running) with that value. 950 * 951 * There are up to UCLAMP_CNT possible different clamp values, currently there 952 * are only two: minimum utilization and maximum utilization. 953 * 954 * All utilization clamping values are MAX aggregated, since: 955 * - for util_min: we want to run the CPU at least at the max of the minimum 956 * utilization required by its currently RUNNABLE tasks. 957 * - for util_max: we want to allow the CPU to run up to the max of the 958 * maximum utilization allowed by its currently RUNNABLE tasks. 959 * 960 * Since on each system we expect only a limited number of different 961 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 962 * the metrics required to compute all the per-rq utilization clamp values. 963 */ 964struct uclamp_rq { 965 unsigned int value; 966 struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 967}; 968 969DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); 970#endif /* CONFIG_UCLAMP_TASK */ 971 972/* 973 * This is the main, per-CPU runqueue data structure. 974 * 975 * Locking rule: those places that want to lock multiple runqueues 976 * (such as the load balancing or the thread migration code), lock 977 * acquire operations must be ordered by ascending &runqueue. 978 */ 979struct rq { 980 /* runqueue lock: */ 981 raw_spinlock_t lock; 982 983 /* 984 * nr_running and cpu_load should be in the same cacheline because 985 * remote CPUs use both these fields when doing load calculation. 986 */ 987 unsigned int nr_running; 988#ifdef CONFIG_NUMA_BALANCING 989 unsigned int nr_numa_running; 990 unsigned int nr_preferred_running; 991 unsigned int numa_migrate_on; 992#endif 993#ifdef CONFIG_NO_HZ_COMMON 994#ifdef CONFIG_SMP 995 unsigned long last_blocked_load_update_tick; 996 unsigned int has_blocked_load; 997 call_single_data_t nohz_csd; 998#endif /* CONFIG_SMP */ 999 unsigned int nohz_tick_stopped; 1000 atomic_t nohz_flags; 1001#endif /* CONFIG_NO_HZ_COMMON */ 1002 1003#ifdef CONFIG_SMP 1004 unsigned int ttwu_pending; 1005#endif 1006 u64 nr_switches; 1007 1008#ifdef CONFIG_UCLAMP_TASK 1009 /* Utilization clamp values based on CPU's RUNNABLE tasks */ 1010 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 1011 unsigned int uclamp_flags; 1012#define UCLAMP_FLAG_IDLE 0x01 1013#endif 1014 1015 struct cfs_rq cfs; 1016 struct rt_rq rt; 1017 struct dl_rq dl; 1018 1019#ifdef CONFIG_FAIR_GROUP_SCHED 1020 /* list of leaf cfs_rq on this CPU: */ 1021 struct list_head leaf_cfs_rq_list; 1022 struct list_head *tmp_alone_branch; 1023#endif /* CONFIG_FAIR_GROUP_SCHED */ 1024 1025 /* 1026 * This is part of a global counter where only the total sum 1027 * over all CPUs matters. A task can increase this counter on 1028 * one CPU and if it got migrated afterwards it may decrease 1029 * it on another CPU. Always updated under the runqueue lock: 1030 */ 1031 unsigned long nr_uninterruptible; 1032 1033 struct task_struct __rcu *curr; 1034 struct task_struct *idle; 1035 struct task_struct *stop; 1036 unsigned long next_balance; 1037 struct mm_struct *prev_mm; 1038 1039 unsigned int clock_update_flags; 1040 u64 clock; 1041 /* Ensure that all clocks are in the same cache line */ 1042 u64 clock_task ____cacheline_aligned; 1043 u64 clock_pelt; 1044 unsigned long lost_idle_time; 1045 1046 atomic_t nr_iowait; 1047 1048#ifdef CONFIG_MEMBARRIER 1049 int membarrier_state; 1050#endif 1051 1052#ifdef CONFIG_SMP 1053 struct root_domain *rd; 1054 struct sched_domain __rcu *sd; 1055 1056 unsigned long cpu_capacity; 1057 unsigned long cpu_capacity_orig; 1058 1059 struct callback_head *balance_callback; 1060 1061 unsigned char nohz_idle_balance; 1062 unsigned char idle_balance; 1063 1064 unsigned long misfit_task_load; 1065 1066 /* For active balancing */ 1067 int active_balance; 1068 int push_cpu; 1069#ifdef CONFIG_SCHED_EAS 1070 struct task_struct *push_task; 1071#endif 1072 struct cpu_stop_work active_balance_work; 1073 1074 /* For rt active balancing */ 1075#ifdef CONFIG_SCHED_RT_ACTIVE_LB 1076 int rt_active_balance; 1077 struct task_struct *rt_push_task; 1078 struct cpu_stop_work rt_active_balance_work; 1079#endif 1080 1081 /* CPU of this runqueue: */ 1082 int cpu; 1083 int online; 1084 1085 struct list_head cfs_tasks; 1086 1087 struct sched_avg avg_rt; 1088 struct sched_avg avg_dl; 1089#ifdef CONFIG_HAVE_SCHED_AVG_IRQ 1090 struct sched_avg avg_irq; 1091#endif 1092#ifdef CONFIG_SCHED_THERMAL_PRESSURE 1093 struct sched_avg avg_thermal; 1094#endif 1095 u64 idle_stamp; 1096 u64 avg_idle; 1097 1098 /* This is used to determine avg_idle's max value */ 1099 u64 max_idle_balance_cost; 1100#endif /* CONFIG_SMP */ 1101 1102#ifdef CONFIG_SCHED_WALT 1103 struct sched_cluster *cluster; 1104 struct cpumask freq_domain_cpumask; 1105 struct walt_sched_stats walt_stats; 1106 1107 u64 window_start; 1108 unsigned long walt_flags; 1109 1110 u64 cur_irqload; 1111 u64 avg_irqload; 1112 u64 irqload_ts; 1113 u64 curr_runnable_sum; 1114 u64 prev_runnable_sum; 1115 u64 nt_curr_runnable_sum; 1116 u64 nt_prev_runnable_sum; 1117 u64 cum_window_demand_scaled; 1118 struct load_subtractions load_subs[NUM_TRACKED_WINDOWS]; 1119#ifdef CONFIG_SCHED_RTG 1120 struct group_cpu_time grp_time; 1121#endif 1122#endif /* CONFIG_SCHED_WALT */ 1123 1124#ifdef CONFIG_IRQ_TIME_ACCOUNTING 1125 u64 prev_irq_time; 1126#endif 1127#ifdef CONFIG_PARAVIRT 1128 u64 prev_steal_time; 1129#endif 1130#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 1131 u64 prev_steal_time_rq; 1132#endif 1133 1134 /* calc_load related fields */ 1135 unsigned long calc_load_update; 1136 long calc_load_active; 1137 1138#ifdef CONFIG_SCHED_HRTICK 1139#ifdef CONFIG_SMP 1140 call_single_data_t hrtick_csd; 1141#endif 1142 struct hrtimer hrtick_timer; 1143 ktime_t hrtick_time; 1144#endif 1145 1146#ifdef CONFIG_SCHEDSTATS 1147 /* latency stats */ 1148 struct sched_info rq_sched_info; 1149 unsigned long long rq_cpu_time; 1150 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 1151 1152 /* sys_sched_yield() stats */ 1153 unsigned int yld_count; 1154 1155 /* schedule() stats */ 1156 unsigned int sched_count; 1157 unsigned int sched_goidle; 1158 1159 /* try_to_wake_up() stats */ 1160 unsigned int ttwu_count; 1161 unsigned int ttwu_local; 1162#endif 1163 1164#ifdef CONFIG_CPU_IDLE 1165 /* Must be inspected within a rcu lock section */ 1166 struct cpuidle_state *idle_state; 1167#endif 1168}; 1169 1170#ifdef CONFIG_FAIR_GROUP_SCHED 1171 1172/* CPU runqueue to which this cfs_rq is attached */ 1173static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1174{ 1175 return cfs_rq->rq; 1176} 1177 1178#else 1179 1180static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1181{ 1182 return container_of(cfs_rq, struct rq, cfs); 1183} 1184#endif 1185 1186static inline int cpu_of(struct rq *rq) 1187{ 1188#ifdef CONFIG_SMP 1189 return rq->cpu; 1190#else 1191 return 0; 1192#endif 1193} 1194 1195#ifdef CONFIG_SCHED_SMT 1196extern void __update_idle_core(struct rq *rq); 1197 1198static inline void update_idle_core(struct rq *rq) 1199{ 1200 if (static_branch_unlikely(&sched_smt_present)) { 1201 __update_idle_core(rq); 1202 } 1203} 1204 1205#else 1206static inline void update_idle_core(struct rq *rq) 1207{ 1208} 1209#endif 1210 1211DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1212 1213#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1214#define this_rq() this_cpu_ptr(&runqueues) 1215#define task_rq(p) cpu_rq(task_cpu(p)) 1216#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1217#define raw_rq() raw_cpu_ptr(&runqueues) 1218 1219extern void update_rq_clock(struct rq *rq); 1220 1221static inline u64 __rq_clock_broken(struct rq *rq) 1222{ 1223 return READ_ONCE(rq->clock); 1224} 1225 1226/* 1227 * rq::clock_update_flags bits 1228 * 1229 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1230 * call to __schedule(). This is an optimisation to avoid 1231 * neighbouring rq clock updates. 1232 * 1233 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1234 * in effect and calls to update_rq_clock() are being ignored. 1235 * 1236 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1237 * made to update_rq_clock() since the last time rq::lock was pinned. 1238 * 1239 * If inside of __schedule(), clock_update_flags will have been 1240 * shifted left (a left shift is a cheap operation for the fast path 1241 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1242 * 1243 * if (rq-clock_update_flags >= RQCF_UPDATED) 1244 * 1245 * to check if %RQCF_UPADTED is set. It'll never be shifted more than 1246 * one position though, because the next rq_unpin_lock() will shift it 1247 * back. 1248 */ 1249#define RQCF_REQ_SKIP 0x01 1250#define RQCF_ACT_SKIP 0x02 1251#define RQCF_UPDATED 0x04 1252 1253static inline void assert_clock_updated(struct rq *rq) 1254{ 1255 /* 1256 * The only reason for not seeing a clock update since the 1257 * last rq_pin_lock() is if we're currently skipping updates. 1258 */ 1259 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1260} 1261 1262static inline u64 rq_clock(struct rq *rq) 1263{ 1264 lockdep_assert_held(&rq->lock); 1265 assert_clock_updated(rq); 1266 1267 return rq->clock; 1268} 1269 1270static inline u64 rq_clock_task(struct rq *rq) 1271{ 1272 lockdep_assert_held(&rq->lock); 1273 assert_clock_updated(rq); 1274 1275 return rq->clock_task; 1276} 1277 1278/** 1279 * By default the decay is the default pelt decay period. 1280 * The decay shift can change the decay period in 1281 * multiples of 32. 1282 * Decay shift Decay period(ms) 1283 * 0 32 1284 * 1 64 1285 * 2 128 1286 * 3 256 1287 * 4 512 1288 */ 1289extern int sched_thermal_decay_shift; 1290 1291static inline u64 rq_clock_thermal(struct rq *rq) 1292{ 1293 return rq_clock_task(rq) >> sched_thermal_decay_shift; 1294} 1295 1296static inline void rq_clock_skip_update(struct rq *rq) 1297{ 1298 lockdep_assert_held(&rq->lock); 1299 rq->clock_update_flags |= RQCF_REQ_SKIP; 1300} 1301 1302/* 1303 * See rt task throttling, which is the only time a skip 1304 * request is cancelled. 1305 */ 1306static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1307{ 1308 lockdep_assert_held(&rq->lock); 1309 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 1310} 1311 1312struct rq_flags { 1313 unsigned long flags; 1314 struct pin_cookie cookie; 1315#ifdef CONFIG_SCHED_DEBUG 1316 /* 1317 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1318 * current pin context is stashed here in case it needs to be 1319 * restored in rq_repin_lock(). 1320 */ 1321 unsigned int clock_update_flags; 1322#endif 1323}; 1324 1325/* 1326 * Lockdep annotation that avoids accidental unlocks; it's like a 1327 * sticky/continuous lockdep_assert_held(). 1328 * 1329 * This avoids code that has access to 'struct rq *rq' (basically everything in 1330 * the scheduler) from accidentally unlocking the rq if they do not also have a 1331 * copy of the (on-stack) 'struct rq_flags rf'. 1332 * 1333 * Also see Documentation/locking/lockdep-design.rst. 1334 */ 1335static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1336{ 1337 rf->cookie = lockdep_pin_lock(&rq->lock); 1338 1339#ifdef CONFIG_SCHED_DEBUG 1340 rq->clock_update_flags &= (RQCF_REQ_SKIP | RQCF_ACT_SKIP); 1341 rf->clock_update_flags = 0; 1342#endif 1343} 1344 1345static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1346{ 1347#ifdef CONFIG_SCHED_DEBUG 1348 if (rq->clock_update_flags > RQCF_ACT_SKIP) { 1349 rf->clock_update_flags = RQCF_UPDATED; 1350 } 1351#endif 1352 1353 lockdep_unpin_lock(&rq->lock, rf->cookie); 1354} 1355 1356static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1357{ 1358 lockdep_repin_lock(&rq->lock, rf->cookie); 1359 1360#ifdef CONFIG_SCHED_DEBUG 1361 /* 1362 * Restore the value we stashed in @rf for this pin context. 1363 */ 1364 rq->clock_update_flags |= rf->clock_update_flags; 1365#endif 1366} 1367 1368struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires(rq->lock); 1369 1370struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires(p->pi_lock) __acquires(rq->lock); 1371 1372static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) 1373{ 1374 rq_unpin_lock(rq, rf); 1375 raw_spin_unlock(&rq->lock); 1376} 1377 1378static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) __releases(rq->lock) 1379 __releases(p->pi_lock) 1380{ 1381 rq_unpin_lock(rq, rf); 1382 raw_spin_unlock(&rq->lock); 1383 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1384} 1385 1386static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock) 1387{ 1388 raw_spin_lock_irqsave(&rq->lock, rf->flags); 1389 rq_pin_lock(rq, rf); 1390} 1391 1392static inline void rq_lock_irq(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock) 1393{ 1394 raw_spin_lock_irq(&rq->lock); 1395 rq_pin_lock(rq, rf); 1396} 1397 1398static inline void rq_lock(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock) 1399{ 1400 raw_spin_lock(&rq->lock); 1401 rq_pin_lock(rq, rf); 1402} 1403 1404static inline void rq_relock(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock) 1405{ 1406 raw_spin_lock(&rq->lock); 1407 rq_repin_lock(rq, rf); 1408} 1409 1410static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) 1411{ 1412 rq_unpin_lock(rq, rf); 1413 raw_spin_unlock_irqrestore(&rq->lock, rf->flags); 1414} 1415 1416static inline void rq_unlock_irq(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) 1417{ 1418 rq_unpin_lock(rq, rf); 1419 raw_spin_unlock_irq(&rq->lock); 1420} 1421 1422static inline void rq_unlock(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) 1423{ 1424 rq_unpin_lock(rq, rf); 1425 raw_spin_unlock(&rq->lock); 1426} 1427 1428static inline struct rq *this_rq_lock_irq(struct rq_flags *rf) __acquires(rq->lock) 1429{ 1430 struct rq *rq; 1431 1432 local_irq_disable(); 1433 rq = this_rq(); 1434 rq_lock(rq, rf); 1435 return rq; 1436} 1437 1438#ifdef CONFIG_NUMA 1439enum numa_topology_type { 1440 NUMA_DIRECT, 1441 NUMA_GLUELESS_MESH, 1442 NUMA_BACKPLANE, 1443}; 1444extern enum numa_topology_type sched_numa_topology_type; 1445extern int sched_max_numa_distance; 1446extern bool find_numa_distance(int distance); 1447extern void sched_init_numa(void); 1448extern void sched_domains_numa_masks_set(unsigned int cpu); 1449extern void sched_domains_numa_masks_clear(unsigned int cpu); 1450extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1451#else 1452static inline void sched_init_numa(void) 1453{ 1454} 1455static inline void sched_domains_numa_masks_set(unsigned int cpu) 1456{ 1457} 1458static inline void sched_domains_numa_masks_clear(unsigned int cpu) 1459{ 1460} 1461static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1462{ 1463 return nr_cpu_ids; 1464} 1465#endif 1466 1467#ifdef CONFIG_NUMA_BALANCING 1468/* The regions in numa_faults array from task_struct */ 1469enum numa_faults_stats { NUMA_MEM = 0, NUMA_CPU, NUMA_MEMBUF, NUMA_CPUBUF }; 1470extern void sched_setnuma(struct task_struct *p, int node); 1471extern int migrate_task_to(struct task_struct *p, int cpu); 1472extern int migrate_swap(struct task_struct *p, struct task_struct *t, int cpu, int scpu); 1473extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 1474#else 1475static inline void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1476{ 1477} 1478#endif /* CONFIG_NUMA_BALANCING */ 1479 1480#ifdef CONFIG_SMP 1481 1482static inline void queue_balance_callback(struct rq *rq, struct callback_head *head, void (*func)(struct rq *rq)) 1483{ 1484 lockdep_assert_held(&rq->lock); 1485 1486 if (unlikely(head->next)) { 1487 return; 1488 } 1489 1490 head->func = (void (*)(struct callback_head *))func; 1491 head->next = rq->balance_callback; 1492 rq->balance_callback = head; 1493} 1494 1495#define rcu_dereference_check_sched_domain(p) rcu_dereference_check((p), lockdep_is_held(&sched_domains_mutex)) 1496 1497/* 1498 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1499 * See destroy_sched_domains: call_rcu for details. 1500 * 1501 * The domain tree of any CPU may only be accessed from within 1502 * preempt-disabled sections. 1503 */ 1504#define for_each_domain(cpu, __sd) \ 1505 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) 1506 1507/** 1508 * highest_flag_domain - Return highest sched_domain containing flag. 1509 * @cpu: The CPU whose highest level of sched domain is to 1510 * be returned. 1511 * @flag: The flag to check for the highest sched_domain 1512 * for the given CPU. 1513 * 1514 * Returns the highest sched_domain of a CPU which contains the given flag. 1515 */ 1516static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1517{ 1518 struct sched_domain *sd, *hsd = NULL; 1519 1520 for (sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); sd; sd = sd->parent) { 1521 if (!(sd->flags & flag)) { 1522 break; 1523 } 1524 hsd = sd; 1525 } 1526 1527 return hsd; 1528} 1529 1530static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1531{ 1532 struct sched_domain *sd; 1533 1534 for (sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); sd; sd = sd->parent) { 1535 if (sd->flags & flag) { 1536 break; 1537 } 1538 } 1539 1540 return sd; 1541} 1542 1543DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 1544DECLARE_PER_CPU(int, sd_llc_size); 1545DECLARE_PER_CPU(int, sd_llc_id); 1546DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1547DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1548DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1549DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1550extern struct static_key_false sched_asym_cpucapacity; 1551 1552struct sched_group_capacity { 1553 atomic_t ref; 1554 /* 1555 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1556 * for a single CPU. 1557 */ 1558 unsigned long capacity; 1559 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1560 unsigned long max_capacity; /* Max per-CPU capacity in group */ 1561 unsigned long next_update; 1562 int imbalance; /* XXX unrelated to capacity but shared group state */ 1563 1564#ifdef CONFIG_SCHED_DEBUG 1565 int id; 1566#endif 1567 1568 unsigned long cpumask[]; /* Balance mask */ 1569}; 1570 1571struct sched_group { 1572 struct sched_group *next; /* Must be a circular list */ 1573 atomic_t ref; 1574 1575 unsigned int group_weight; 1576 struct sched_group_capacity *sgc; 1577 int asym_prefer_cpu; /* CPU of highest priority in group */ 1578 1579 /* 1580 * The CPUs this group covers. 1581 * 1582 * NOTE: this field is variable length. (Allocated dynamically 1583 * by attaching extra space to the end of the structure, 1584 * depending on how many CPUs the kernel has booted up with) 1585 */ 1586 unsigned long cpumask[]; 1587}; 1588 1589static inline struct cpumask *sched_group_span(struct sched_group *sg) 1590{ 1591 return to_cpumask(sg->cpumask); 1592} 1593 1594/* 1595 * See build_balance_mask(). 1596 */ 1597static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1598{ 1599 return to_cpumask(sg->sgc->cpumask); 1600} 1601 1602/** 1603 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 1604 * @group: The group whose first CPU is to be returned. 1605 */ 1606static inline unsigned int group_first_cpu(struct sched_group *group) 1607{ 1608 return cpumask_first(sched_group_span(group)); 1609} 1610 1611extern int group_balance_cpu(struct sched_group *sg); 1612 1613#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 1614void register_sched_domain_sysctl(void); 1615void dirty_sched_domain_sysctl(int cpu); 1616void unregister_sched_domain_sysctl(void); 1617#else 1618static inline void register_sched_domain_sysctl(void) 1619{ 1620} 1621static inline void dirty_sched_domain_sysctl(int cpu) 1622{ 1623} 1624static inline void unregister_sched_domain_sysctl(void) 1625{ 1626} 1627#endif 1628 1629extern void flush_smp_call_function_from_idle(void); 1630 1631#else /* !CONFIG_SMP: */ 1632static inline void flush_smp_call_function_from_idle(void) 1633{ 1634} 1635#endif 1636 1637#include "stats.h" 1638#include "autogroup.h" 1639 1640#ifdef CONFIG_CGROUP_SCHED 1641 1642/* 1643 * Return the group to which this tasks belongs. 1644 * 1645 * We cannot use task_css() and friends because the cgroup subsystem 1646 * changes that value before the cgroup_subsys::attach() method is called, 1647 * therefore we cannot pin it and might observe the wrong value. 1648 * 1649 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 1650 * core changes this before calling sched_move_task(). 1651 * 1652 * Instead we use a 'copy' which is updated from sched_move_task() while 1653 * holding both task_struct::pi_lock and rq::lock. 1654 */ 1655static inline struct task_group *task_group(struct task_struct *p) 1656{ 1657 return p->sched_task_group; 1658} 1659 1660/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1661static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1662{ 1663#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1664 struct task_group *tg = task_group(p); 1665#endif 1666 1667#ifdef CONFIG_FAIR_GROUP_SCHED 1668 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1669 p->se.cfs_rq = tg->cfs_rq[cpu]; 1670 p->se.parent = tg->se[cpu]; 1671#endif 1672 1673#ifdef CONFIG_RT_GROUP_SCHED 1674 p->rt.rt_rq = tg->rt_rq[cpu]; 1675 p->rt.parent = tg->rt_se[cpu]; 1676#endif 1677} 1678 1679#else /* CONFIG_CGROUP_SCHED */ 1680 1681static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1682{ 1683} 1684static inline struct task_group *task_group(struct task_struct *p) 1685{ 1686 return NULL; 1687} 1688 1689#endif /* CONFIG_CGROUP_SCHED */ 1690 1691static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1692{ 1693 set_task_rq(p, cpu); 1694#ifdef CONFIG_SMP 1695 /* 1696 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1697 * successfully executed on another CPU. We must ensure that updates of 1698 * per-task data have been completed by this moment. 1699 */ 1700 smp_wmb(); 1701#ifdef CONFIG_THREAD_INFO_IN_TASK 1702 WRITE_ONCE(p->cpu, cpu); 1703#else 1704 WRITE_ONCE(task_thread_info(p)->cpu, cpu); 1705#endif 1706 p->wake_cpu = cpu; 1707#endif 1708} 1709 1710/* 1711 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1712 */ 1713#ifdef CONFIG_SCHED_DEBUG 1714#include <linux/static_key.h> 1715#define const_debug __read_mostly 1716#else 1717#define const_debug const 1718#endif 1719 1720#define SCHED_FEAT(name, enabled) __SCHED_FEAT_##name, 1721 1722enum { 1723#include "features.h" 1724 __SCHED_FEAT_NR, 1725}; 1726 1727#undef SCHED_FEAT 1728 1729#ifdef CONFIG_SCHED_DEBUG 1730 1731/* 1732 * To support run-time toggling of sched features, all the translation units 1733 * (but core.c) reference the sysctl_sched_features defined in core.c. 1734 */ 1735extern const_debug unsigned int sysctl_sched_features; 1736 1737#ifdef CONFIG_JUMP_LABEL 1738#define SCHED_FEAT(name, enabled) \ 1739 static __always_inline bool static_branch_##name(struct static_key *key) \ 1740 { \ 1741 return static_key_##enabled(key); \ 1742 } 1743 1744#include "features.h" 1745#undef SCHED_FEAT 1746 1747extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1748#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1749 1750#else /* !CONFIG_JUMP_LABEL */ 1751 1752#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1753 1754#endif /* CONFIG_JUMP_LABEL */ 1755 1756#else /* !SCHED_DEBUG */ 1757 1758/* 1759 * Each translation unit has its own copy of sysctl_sched_features to allow 1760 * constants propagation at compile time and compiler optimization based on 1761 * features default. 1762 */ 1763#define SCHED_FEAT(name, enabled) (1UL << __SCHED_FEAT_##name) * (enabled) | 1764static const_debug __maybe_unused unsigned int sysctl_sched_features = 1765#include "features.h" 1766 0; 1767#undef SCHED_FEAT 1768 1769#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1770 1771#endif /* SCHED_DEBUG */ 1772 1773extern struct static_key_false sched_numa_balancing; 1774extern struct static_key_false sched_schedstats; 1775 1776static inline u64 global_rt_period(void) 1777{ 1778 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1779} 1780 1781static inline u64 global_rt_runtime(void) 1782{ 1783 if (sysctl_sched_rt_runtime < 0) { 1784 return RUNTIME_INF; 1785 } 1786 1787 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1788} 1789 1790static inline int task_current(struct rq *rq, struct task_struct *p) 1791{ 1792 return rq->curr == p; 1793} 1794 1795static inline int task_running(struct rq *rq, struct task_struct *p) 1796{ 1797#ifdef CONFIG_SMP 1798 return p->on_cpu; 1799#else 1800 return task_current(rq, p); 1801#endif 1802} 1803 1804static inline int task_on_rq_queued(struct task_struct *p) 1805{ 1806 return p->on_rq == TASK_ON_RQ_QUEUED; 1807} 1808 1809static inline int task_on_rq_migrating(struct task_struct *p) 1810{ 1811 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 1812} 1813 1814/* 1815 * wake flags 1816 */ 1817#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ 1818#define WF_FORK 0x02 /* Child wakeup after fork */ 1819#define WF_MIGRATED 0x04 /* Internal use, task got migrated */ 1820#define WF_ON_CPU 0x08 /* Wakee is on_cpu */ 1821 1822/* 1823 * To aid in avoiding the subversion of "niceness" due to uneven distribution 1824 * of tasks with abnormal "nice" values across CPUs the contribution that 1825 * each task makes to its run queue's load is weighted according to its 1826 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1827 * scaled version of the new time slice allocation that they receive on time 1828 * slice expiry etc. 1829 */ 1830 1831#define WEIGHT_IDLEPRIO 3 1832#define WMULT_IDLEPRIO 1431655765 1833 1834extern const int sched_prio_to_weight[40]; 1835extern const u32 sched_prio_to_wmult[40]; 1836#ifdef CONFIG_SCHED_LATENCY_NICE 1837extern const int sched_latency_to_weight[40]; 1838#endif 1839 1840/* 1841 * {de,en}queue flags: 1842 * 1843 * DEQUEUE_SLEEP - task is no longer runnable 1844 * ENQUEUE_WAKEUP - task just became runnable 1845 * 1846 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 1847 * are in a known state which allows modification. Such pairs 1848 * should preserve as much state as possible. 1849 * 1850 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 1851 * in the runqueue. 1852 * 1853 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 1854 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 1855 * ENQUEUE_MIGRATED - the task was migrated during wakeup 1856 * 1857 */ 1858 1859#define DEQUEUE_SLEEP 0x01 1860#define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 1861#define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 1862#define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 1863 1864#define ENQUEUE_WAKEUP 0x01 1865#define ENQUEUE_RESTORE 0x02 1866#define ENQUEUE_MOVE 0x04 1867#define ENQUEUE_NOCLOCK 0x08 1868 1869#define ENQUEUE_HEAD 0x10 1870#define ENQUEUE_REPLENISH 0x20 1871#ifdef CONFIG_SMP 1872#define ENQUEUE_MIGRATED 0x40 1873#else 1874#define ENQUEUE_MIGRATED 0x00 1875#endif 1876 1877#define ENQUEUE_WAKEUP_SYNC 0x80 1878 1879#define RETRY_TASK ((void *)-1UL) 1880 1881struct sched_class { 1882#ifdef CONFIG_UCLAMP_TASK 1883 int uclamp_enabled; 1884#endif 1885 1886 void (*enqueue_task)(struct rq *rq, struct task_struct *p, int flags); 1887 void (*dequeue_task)(struct rq *rq, struct task_struct *p, int flags); 1888 void (*yield_task)(struct rq *rq); 1889 bool (*yield_to_task)(struct rq *rq, struct task_struct *p); 1890 1891 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 1892 1893 struct task_struct *(*pick_next_task)(struct rq *rq); 1894 1895 void (*put_prev_task)(struct rq *rq, struct task_struct *p); 1896 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 1897 1898#ifdef CONFIG_SMP 1899 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 1900 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1901 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 1902 1903 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 1904 1905 void (*set_cpus_allowed)(struct task_struct *p, const struct cpumask *newmask); 1906 1907 void (*rq_online)(struct rq *rq); 1908 void (*rq_offline)(struct rq *rq); 1909#endif 1910 1911 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 1912 void (*task_fork)(struct task_struct *p); 1913 void (*task_dead)(struct task_struct *p); 1914 1915 /* 1916 * The switched_from() call is allowed to drop rq->lock, therefore we 1917 * cannot assume the switched_from/switched_to pair is serliazed by 1918 * rq->lock. They are however serialized by p->pi_lock. 1919 */ 1920 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 1921 void (*switched_to)(struct rq *this_rq, struct task_struct *task); 1922 void (*prio_changed)(struct rq *this_rq, struct task_struct *task, int oldprio); 1923 1924 unsigned int (*get_rr_interval)(struct rq *rq, struct task_struct *task); 1925 1926 void (*update_curr)(struct rq *rq); 1927 1928#define TASK_SET_GROUP 0 1929#define TASK_MOVE_GROUP 1 1930 1931#ifdef CONFIG_FAIR_GROUP_SCHED 1932 void (*task_change_group)(struct task_struct *p, int type); 1933#endif 1934#ifdef CONFIG_SCHED_WALT 1935 void (*fixup_walt_sched_stats)(struct rq *rq, struct task_struct *p, u16 updated_demand_scaled); 1936#endif 1937#ifdef CONFIG_SCHED_EAS 1938 void (*check_for_migration)(struct rq *rq, struct task_struct *p); 1939#endif 1940} __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */ 1941 1942static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 1943{ 1944 WARN_ON_ONCE(rq->curr != prev); 1945 prev->sched_class->put_prev_task(rq, prev); 1946} 1947 1948static inline void set_next_task(struct rq *rq, struct task_struct *next) 1949{ 1950 WARN_ON_ONCE(rq->curr != next); 1951 next->sched_class->set_next_task(rq, next, false); 1952} 1953 1954/* Defined in include/asm-generic/vmlinux.lds.h */ 1955extern struct sched_class __begin_sched_classes[]; 1956extern struct sched_class __end_sched_classes[]; 1957 1958#define sched_class_highest (__end_sched_classes - 1) 1959#define sched_class_lowest (__begin_sched_classes - 1) 1960 1961#define for_class_range(class, _from, _to) for (class = (_from); class != (_to); (class)--) 1962 1963#define for_each_class(class) for_class_range(class, sched_class_highest, sched_class_lowest) 1964 1965extern const struct sched_class stop_sched_class; 1966extern const struct sched_class dl_sched_class; 1967extern const struct sched_class rt_sched_class; 1968extern const struct sched_class fair_sched_class; 1969extern const struct sched_class idle_sched_class; 1970 1971static inline bool sched_stop_runnable(struct rq *rq) 1972{ 1973 return rq->stop && task_on_rq_queued(rq->stop); 1974} 1975 1976static inline bool sched_dl_runnable(struct rq *rq) 1977{ 1978 return rq->dl.dl_nr_running > 0; 1979} 1980 1981static inline bool sched_rt_runnable(struct rq *rq) 1982{ 1983 return rq->rt.rt_queued > 0; 1984} 1985 1986static inline bool sched_fair_runnable(struct rq *rq) 1987{ 1988 return rq->cfs.nr_running > 0; 1989} 1990 1991extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 1992extern struct task_struct *pick_next_task_idle(struct rq *rq); 1993 1994#ifdef CONFIG_SMP 1995 1996extern void update_group_capacity(struct sched_domain *sd, int cpu); 1997 1998extern void trigger_load_balance(struct rq *rq); 1999 2000extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 2001 2002#endif 2003 2004#ifdef CONFIG_CPU_IDLE 2005static inline void idle_set_state(struct rq *rq, struct cpuidle_state *idle_state) 2006{ 2007 rq->idle_state = idle_state; 2008} 2009 2010static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2011{ 2012 SCHED_WARN_ON(!rcu_read_lock_held()); 2013 2014 return rq->idle_state; 2015} 2016#else 2017static inline void idle_set_state(struct rq *rq, struct cpuidle_state *idle_state) 2018{ 2019} 2020 2021static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2022{ 2023 return NULL; 2024} 2025#endif 2026 2027extern void schedule_idle(void); 2028 2029extern void sysrq_sched_debug_show(void); 2030extern void sched_init_granularity(void); 2031extern void update_max_interval(void); 2032 2033extern void init_sched_dl_class(void); 2034extern void init_sched_rt_class(void); 2035extern void init_sched_fair_class(void); 2036 2037extern void reweight_task(struct task_struct *p, int prio); 2038 2039extern void resched_curr(struct rq *rq); 2040extern void resched_cpu(int cpu); 2041 2042extern struct rt_bandwidth def_rt_bandwidth; 2043extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 2044 2045extern struct dl_bandwidth def_dl_bandwidth; 2046extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 2047extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 2048extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 2049 2050#define BW_SHIFT 20 2051#define BW_UNIT (1 << BW_SHIFT) 2052#define RATIO_SHIFT 8 2053#define MAX_BW_BITS (64 - BW_SHIFT) 2054#define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 2055unsigned long to_ratio(u64 period, u64 runtime); 2056 2057extern void init_entity_runnable_average(struct sched_entity *se); 2058extern void post_init_entity_util_avg(struct task_struct *p); 2059 2060#ifdef CONFIG_NO_HZ_FULL 2061extern bool sched_can_stop_tick(struct rq *rq); 2062extern int __init sched_tick_offload_init(void); 2063 2064/* 2065 * Tick may be needed by tasks in the runqueue depending on their policy and 2066 * requirements. If tick is needed, lets send the target an IPI to kick it out of 2067 * nohz mode if necessary. 2068 */ 2069static inline void sched_update_tick_dependency(struct rq *rq) 2070{ 2071 int cpu = cpu_of(rq); 2072 if (!tick_nohz_full_cpu(cpu)) { 2073 return; 2074 } 2075 2076 if (sched_can_stop_tick(rq)) { 2077 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 2078 } else { 2079 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 2080 } 2081} 2082#else 2083static inline int sched_tick_offload_init(void) 2084{ 2085 return 0; 2086} 2087static inline void sched_update_tick_dependency(struct rq *rq) 2088{ 2089} 2090#endif 2091 2092static inline void add_nr_running(struct rq *rq, unsigned count) 2093{ 2094 unsigned prev_nr = rq->nr_running; 2095 2096 rq->nr_running = prev_nr + count; 2097 if (trace_sched_update_nr_running_tp_enabled()) { 2098 call_trace_sched_update_nr_running(rq, count); 2099 } 2100 2101#ifdef CONFIG_SMP 2102 if (prev_nr < TASK_ON_RQ_MIGRATING && rq->nr_running >= TASK_ON_RQ_MIGRATING) { 2103 if (!READ_ONCE(rq->rd->overload)) { 2104 WRITE_ONCE(rq->rd->overload, 1); 2105 } 2106 } 2107#endif 2108 2109 sched_update_tick_dependency(rq); 2110} 2111 2112static inline void sub_nr_running(struct rq *rq, unsigned count) 2113{ 2114 rq->nr_running -= count; 2115 if (trace_sched_update_nr_running_tp_enabled()) { 2116 call_trace_sched_update_nr_running(rq, -count); 2117 } 2118 2119 /* Check if we still need preemption */ 2120 sched_update_tick_dependency(rq); 2121} 2122 2123extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 2124extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 2125 2126extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 2127 2128extern const_debug unsigned int sysctl_sched_nr_migrate; 2129extern const_debug unsigned int sysctl_sched_migration_cost; 2130 2131#ifdef CONFIG_SCHED_HRTICK 2132 2133/* 2134 * Use hrtick when: 2135 * - enabled by features 2136 * - hrtimer is actually high res 2137 */ 2138static inline int hrtick_enabled(struct rq *rq) 2139{ 2140 if (!sched_feat(HRTICK)) { 2141 return 0; 2142 } 2143 if (!cpu_active(cpu_of(rq))) { 2144 return 0; 2145 } 2146 return hrtimer_is_hres_active(&rq->hrtick_timer); 2147} 2148 2149void hrtick_start(struct rq *rq, u64 delay); 2150 2151#else 2152 2153static inline int hrtick_enabled(struct rq *rq) 2154{ 2155 return 0; 2156} 2157 2158#endif /* CONFIG_SCHED_HRTICK */ 2159 2160#ifdef CONFIG_SCHED_WALT 2161u64 sched_ktime_clock(void); 2162#else 2163static inline u64 sched_ktime_clock(void) 2164{ 2165 return sched_clock(); 2166} 2167#endif 2168 2169#ifndef arch_scale_freq_tick 2170static __always_inline void arch_scale_freq_tick(void) 2171{ 2172} 2173#endif 2174 2175#ifndef arch_scale_freq_capacity 2176/** 2177 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. 2178 * @cpu: the CPU in question. 2179 * 2180 * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. 2181 * 2182 * f_curr 2183 * ------ * SCHED_CAPACITY_SCALE 2184 * f_max 2185 */ 2186static __always_inline unsigned long arch_scale_freq_capacity(int cpu) 2187{ 2188 return SCHED_CAPACITY_SCALE; 2189} 2190#endif 2191 2192unsigned long capacity_curr_of(int cpu); 2193unsigned long cpu_util(int cpu); 2194 2195#ifdef CONFIG_SMP 2196#ifdef CONFIG_SCHED_WALT 2197extern unsigned int sysctl_sched_use_walt_cpu_util; 2198extern unsigned int walt_disabled; 2199#endif 2200#ifdef CONFIG_PREEMPTION 2201 2202static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 2203 2204/* 2205 * fair double_lock_balance: Safely acquires both rq->locks in a fair 2206 * way at the expense of forcing extra atomic operations in all 2207 * invocations. This assures that the double_lock is acquired using the 2208 * same underlying policy as the spinlock_t on this architecture, which 2209 * reduces latency compared to the unfair variant below. However, it 2210 * also adds more overhead and therefore may reduce throughput. 2211 */ 2212static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock) 2213 __acquires(busiest->lock) __acquires(this_rq->lock) 2214{ 2215 raw_spin_unlock(&this_rq->lock); 2216 double_rq_lock(this_rq, busiest); 2217 2218 return 1; 2219} 2220 2221#else 2222/* 2223 * Unfair double_lock_balance: Optimizes throughput at the expense of 2224 * latency by eliminating extra atomic operations when the locks are 2225 * already in proper order on entry. This favors lower CPU-ids and will 2226 * grant the double lock to lower CPUs over higher ids under contention, 2227 * regardless of entry order into the function. 2228 */ 2229static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock) 2230 __acquires(busiest->lock) __acquires(this_rq->lock) 2231{ 2232 int ret = 0; 2233 2234 if (unlikely(!raw_spin_trylock(&busiest->lock))) { 2235 if (busiest < this_rq) { 2236 raw_spin_unlock(&this_rq->lock); 2237 raw_spin_lock(&busiest->lock); 2238 raw_spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); 2239 ret = 1; 2240 } else { 2241 raw_spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); 2242 } 2243 } 2244 return ret; 2245} 2246 2247#endif /* CONFIG_PREEMPTION */ 2248 2249/* 2250 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2251 */ 2252static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2253{ 2254 if (unlikely(!irqs_disabled())) { 2255 /* printk() doesn't work well under rq->lock */ 2256 raw_spin_unlock(&this_rq->lock); 2257 BUG_ON(1); 2258 } 2259 2260 return _double_lock_balance(this_rq, busiest); 2261} 2262 2263static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) __releases(busiest->lock) 2264{ 2265 raw_spin_unlock(&busiest->lock); 2266 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 2267} 2268 2269static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 2270{ 2271 if (l1 > l2) { 2272 swap(l1, l2); 2273 } 2274 2275 spin_lock(l1); 2276 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2277} 2278 2279static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 2280{ 2281 if (l1 > l2) { 2282 swap(l1, l2); 2283 } 2284 2285 spin_lock_irq(l1); 2286 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2287} 2288 2289static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2290{ 2291 if (l1 > l2) { 2292 swap(l1, l2); 2293 } 2294 2295 raw_spin_lock(l1); 2296 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2297} 2298 2299/* 2300 * double_rq_lock - safely lock two runqueues 2301 * 2302 * Note this does not disable interrupts like task_rq_lock, 2303 * you need to do so manually before calling. 2304 */ 2305static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) __acquires(rq1->lock) __acquires(rq2->lock) 2306{ 2307 BUG_ON(!irqs_disabled()); 2308 if (rq1 == rq2) { 2309 raw_spin_lock(&rq1->lock); 2310 __acquire(rq2->lock); /* Fake it out ;) */ 2311 } else { 2312 if (rq1 < rq2) { 2313 raw_spin_lock(&rq1->lock); 2314 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 2315 } else { 2316 raw_spin_lock(&rq2->lock); 2317 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 2318 } 2319 } 2320} 2321 2322/* 2323 * double_rq_unlock - safely unlock two runqueues 2324 * 2325 * Note this does not restore interrupts like task_rq_unlock, 2326 * you need to do so manually after calling. 2327 */ 2328static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock) 2329{ 2330 raw_spin_unlock(&rq1->lock); 2331 if (rq1 != rq2) { 2332 raw_spin_unlock(&rq2->lock); 2333 } else { 2334 __release(rq2->lock); 2335 } 2336} 2337 2338extern void set_rq_online(struct rq *rq); 2339extern void set_rq_offline(struct rq *rq); 2340extern bool sched_smp_initialized; 2341 2342#else /* CONFIG_SMP */ 2343 2344/* 2345 * double_rq_lock - safely lock two runqueues 2346 * 2347 * Note this does not disable interrupts like task_rq_lock, 2348 * you need to do so manually before calling. 2349 */ 2350static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) __acquires(rq1->lock) __acquires(rq2->lock) 2351{ 2352 BUG_ON(!irqs_disabled()); 2353 BUG_ON(rq1 != rq2); 2354 raw_spin_lock(&rq1->lock); 2355 __acquire(rq2->lock); /* Fake it out ;) */ 2356} 2357 2358/* 2359 * double_rq_unlock - safely unlock two runqueues 2360 * 2361 * Note this does not restore interrupts like task_rq_unlock, 2362 * you need to do so manually after calling. 2363 */ 2364static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock) 2365{ 2366 BUG_ON(rq1 != rq2); 2367 raw_spin_unlock(&rq1->lock); 2368 __release(rq2->lock); 2369} 2370 2371#endif 2372 2373extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2374extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2375 2376#ifdef CONFIG_SCHED_DEBUG 2377extern bool sched_debug_enabled; 2378 2379extern void print_cfs_stats(struct seq_file *m, int cpu); 2380extern void print_rt_stats(struct seq_file *m, int cpu); 2381extern void print_dl_stats(struct seq_file *m, int cpu); 2382extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2383extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2384extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2385#ifdef CONFIG_NUMA_BALANCING 2386extern void show_numa_stats(struct task_struct *p, struct seq_file *m); 2387extern void print_numa_stats(struct seq_file *m, int node, unsigned long tsf, unsigned long tpf, unsigned long gsf, 2388 unsigned long gpf); 2389#endif /* CONFIG_NUMA_BALANCING */ 2390#endif /* CONFIG_SCHED_DEBUG */ 2391 2392extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2393extern void init_rt_rq(struct rt_rq *rt_rq); 2394extern void init_dl_rq(struct dl_rq *dl_rq); 2395 2396extern void cfs_bandwidth_usage_inc(void); 2397extern void cfs_bandwidth_usage_dec(void); 2398 2399#ifdef CONFIG_NO_HZ_COMMON 2400#define NOHZ_BALANCE_KICK_BIT 0 2401#define NOHZ_STATS_KICK_BIT 1 2402 2403#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2404#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2405 2406#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) 2407 2408#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2409 2410extern void nohz_balance_exit_idle(struct rq *rq); 2411#else 2412static inline void nohz_balance_exit_idle(struct rq *rq) 2413{ 2414} 2415#endif 2416 2417#ifdef CONFIG_SMP 2418static inline void __dl_update(struct dl_bw *dl_b, s64 bw) 2419{ 2420 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2421 int i; 2422 2423 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), "sched RCU must be held"); 2424 for_each_cpu_and(i, rd->span, cpu_active_mask) 2425 { 2426 struct rq *rq = cpu_rq(i); 2427 2428 rq->dl.extra_bw += bw; 2429 } 2430} 2431#else 2432static inline void __dl_update(struct dl_bw *dl_b, s64 bw) 2433{ 2434 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2435 2436 dl->extra_bw += bw; 2437} 2438#endif 2439 2440#ifdef CONFIG_IRQ_TIME_ACCOUNTING 2441struct irqtime { 2442 u64 total; 2443 u64 tick_delta; 2444 u64 irq_start_time; 2445 struct u64_stats_sync sync; 2446}; 2447 2448DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2449 2450/* 2451 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2452 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 2453 * and never move forward. 2454 */ 2455static inline u64 irq_time_read(int cpu) 2456{ 2457 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2458 unsigned int seq; 2459 u64 total; 2460 2461 do { 2462 seq = __u64_stats_fetch_begin(&irqtime->sync); 2463 total = irqtime->total; 2464 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2465 2466 return total; 2467} 2468#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2469 2470#ifdef CONFIG_CPU_FREQ 2471DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 2472 2473/** 2474 * cpufreq_update_util - Take a note about CPU utilization changes. 2475 * @rq: Runqueue to carry out the update for. 2476 * @flags: Update reason flags. 2477 * 2478 * This function is called by the scheduler on the CPU whose utilization is 2479 * being updated. 2480 * 2481 * It can only be called from RCU-sched read-side critical sections. 2482 * 2483 * The way cpufreq is currently arranged requires it to evaluate the CPU 2484 * performance state (frequency/voltage) on a regular basis to prevent it from 2485 * being stuck in a completely inadequate performance level for too long. 2486 * That is not guaranteed to happen if the updates are only triggered from CFS 2487 * and DL, though, because they may not be coming in if only RT tasks are 2488 * active all the time (or there are RT tasks only). 2489 * 2490 * As a workaround for that issue, this function is called periodically by the 2491 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2492 * but that really is a band-aid. Going forward it should be replaced with 2493 * solutions targeted more specifically at RT tasks. 2494 */ 2495static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2496{ 2497 struct update_util_data *data; 2498 u64 clock; 2499 2500#ifdef CONFIG_SCHED_WALT 2501 if (!(flags & SCHED_CPUFREQ_WALT)) { 2502 return; 2503 } 2504 2505 clock = sched_ktime_clock(); 2506#else 2507 clock = rq_clock(rq); 2508#endif 2509 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, cpu_of(rq))); 2510 if (data) { 2511 data->func(data, clock, flags); 2512 } 2513} 2514#else 2515static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2516{ 2517} 2518#endif /* CONFIG_CPU_FREQ */ 2519 2520#ifdef CONFIG_UCLAMP_TASK 2521unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 2522 2523/** 2524 * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. 2525 * @rq: The rq to clamp against. Must not be NULL. 2526 * @util: The util value to clamp. 2527 * @p: The task to clamp against. Can be NULL if you want to clamp 2528 * against @rq only. 2529 * 2530 * Clamps the passed @util to the max(@rq, @p) effective uclamp values. 2531 * 2532 * If sched_uclamp_used static key is disabled, then just return the util 2533 * without any clamping since uclamp aggregation at the rq level in the fast 2534 * path is disabled, rendering this operation a NOP. 2535 * 2536 * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It 2537 * will return the correct effective uclamp value of the task even if the 2538 * static key is disabled. 2539 */ 2540static __always_inline unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p) 2541{ 2542 unsigned long min_util = 0; 2543 unsigned long max_util = 0; 2544 2545 if (!static_branch_likely(&sched_uclamp_used)) { 2546 return util; 2547 } 2548 2549 if (p) { 2550 min_util = uclamp_eff_value(p, UCLAMP_MIN); 2551 max_util = uclamp_eff_value(p, UCLAMP_MAX); 2552 2553 /* 2554 * Ignore last runnable task's max clamp, as this task will 2555 * reset it. Similarly, no need to read the rq's min clamp. 2556 */ 2557 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) { 2558 goto out; 2559 } 2560 } 2561 2562 min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); 2563 max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); 2564out: 2565 /* 2566 * Since CPU's {min,max}_util clamps are MAX aggregated considering 2567 * RUNNABLE tasks with _different_ clamps, we can end up with an 2568 * inversion. Fix it now when the clamps are applied. 2569 */ 2570 if (unlikely(min_util >= max_util)) { 2571 return min_util; 2572 } 2573 2574 return clamp(util, min_util, max_util); 2575} 2576 2577static inline bool uclamp_boosted(struct task_struct *p) 2578{ 2579 return uclamp_eff_value(p, UCLAMP_MIN) > 0; 2580} 2581 2582/* 2583 * When uclamp is compiled in, the aggregation at rq level is 'turned off' 2584 * by default in the fast path and only gets turned on once userspace performs 2585 * an operation that requires it. 2586 * 2587 * Returns true if userspace opted-in to use uclamp and aggregation at rq level 2588 * hence is active. 2589 */ 2590static inline bool uclamp_is_used(void) 2591{ 2592 return static_branch_likely(&sched_uclamp_used); 2593} 2594#else /* CONFIG_UCLAMP_TASK */ 2595static inline unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p) 2596{ 2597 return util; 2598} 2599 2600static inline bool uclamp_boosted(struct task_struct *p) 2601{ 2602 return false; 2603} 2604 2605static inline bool uclamp_is_used(void) 2606{ 2607 return false; 2608} 2609#endif /* CONFIG_UCLAMP_TASK */ 2610 2611#ifdef arch_scale_freq_capacity 2612#ifndef arch_scale_freq_invariant 2613#define arch_scale_freq_invariant() true 2614#endif 2615#else 2616#define arch_scale_freq_invariant() false 2617#endif 2618 2619#ifdef CONFIG_SMP 2620static inline unsigned long capacity_of(int cpu) 2621{ 2622 return cpu_rq(cpu)->cpu_capacity; 2623} 2624 2625static inline unsigned long capacity_orig_of(int cpu) 2626{ 2627 return cpu_rq(cpu)->cpu_capacity_orig; 2628} 2629#endif 2630 2631/** 2632 * enum schedutil_type - CPU utilization type 2633 * @FREQUENCY_UTIL: Utilization used to select frequency 2634 * @ENERGY_UTIL: Utilization used during energy calculation 2635 * 2636 * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 2637 * need to be aggregated differently depending on the usage made of them. This 2638 * enum is used within schedutil_freq_util() to differentiate the types of 2639 * utilization expected by the callers, and adjust the aggregation accordingly. 2640 */ 2641enum schedutil_type { 2642 FREQUENCY_UTIL, 2643 ENERGY_UTIL, 2644}; 2645 2646#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 2647 2648unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, unsigned long max, enum schedutil_type type, 2649 struct task_struct *p); 2650 2651static inline unsigned long cpu_bw_dl(struct rq *rq) 2652{ 2653 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2654} 2655 2656static inline unsigned long cpu_util_dl(struct rq *rq) 2657{ 2658 return READ_ONCE(rq->avg_dl.util_avg); 2659} 2660 2661static inline unsigned long cpu_util_cfs(struct rq *rq) 2662{ 2663 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 2664 2665 if (sched_feat(UTIL_EST)) { 2666 util = max_t(unsigned long, util, READ_ONCE(rq->cfs.avg.util_est.enqueued)); 2667 } 2668 2669 return util; 2670} 2671 2672static inline unsigned long cpu_util_rt(struct rq *rq) 2673{ 2674 return READ_ONCE(rq->avg_rt.util_avg); 2675} 2676#else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2677static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, unsigned long max, 2678 enum schedutil_type type, struct task_struct *p) 2679{ 2680 return 0; 2681} 2682#endif /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2683 2684#ifdef CONFIG_HAVE_SCHED_AVG_IRQ 2685static inline unsigned long cpu_util_irq(struct rq *rq) 2686{ 2687 return rq->avg_irq.util_avg; 2688} 2689 2690static inline unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2691{ 2692 util *= (max - irq); 2693 util /= max; 2694 2695 return util; 2696} 2697#else 2698static inline unsigned long cpu_util_irq(struct rq *rq) 2699{ 2700 return 0; 2701} 2702 2703static inline unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2704{ 2705 return util; 2706} 2707#endif 2708 2709#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 2710 2711#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 2712 2713DECLARE_STATIC_KEY_FALSE(sched_energy_present); 2714 2715static inline bool sched_energy_enabled(void) 2716{ 2717 return static_branch_unlikely(&sched_energy_present); 2718} 2719 2720#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 2721 2722#define perf_domain_span(pd) NULL 2723static inline bool sched_energy_enabled(void) 2724{ 2725 return false; 2726} 2727 2728#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2729 2730#ifdef CONFIG_MEMBARRIER 2731/* 2732 * The scheduler provides memory barriers required by membarrier between: 2733 * - prior user-space memory accesses and store to rq->membarrier_state, 2734 * - store to rq->membarrier_state and following user-space memory accesses. 2735 * In the same way it provides those guarantees around store to rq->curr. 2736 */ 2737static inline void membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm) 2738{ 2739 int membarrier_state; 2740 2741 if (prev_mm == next_mm) { 2742 return; 2743 } 2744 2745 membarrier_state = atomic_read(&next_mm->membarrier_state); 2746 if (READ_ONCE(rq->membarrier_state) == membarrier_state) { 2747 return; 2748 } 2749 2750 WRITE_ONCE(rq->membarrier_state, membarrier_state); 2751} 2752#else 2753static inline void membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm) 2754{ 2755} 2756#endif 2757 2758#ifdef CONFIG_SMP 2759static inline bool is_per_cpu_kthread(struct task_struct *p) 2760{ 2761 if (!(p->flags & PF_KTHREAD)) { 2762 return false; 2763 } 2764 2765 if (p->nr_cpus_allowed != 1) { 2766 return false; 2767 } 2768 2769 return true; 2770} 2771#endif 2772 2773void swake_up_all_locked(struct swait_queue_head *q); 2774void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 2775 2776#ifdef CONFIG_SCHED_RTG 2777extern bool task_fits_max(struct task_struct *p, int cpu); 2778extern unsigned long capacity_spare_without(int cpu, struct task_struct *p); 2779extern int update_preferred_cluster(struct related_thread_group *grp, struct task_struct *p, u32 old_load, 2780 bool from_tick); 2781extern struct cpumask *find_rtg_target(struct task_struct *p); 2782#endif 2783 2784#ifdef CONFIG_SCHED_WALT 2785static inline int cluster_first_cpu(struct sched_cluster *cluster) 2786{ 2787 return cpumask_first(&cluster->cpus); 2788} 2789 2790extern struct list_head cluster_head; 2791extern struct sched_cluster *sched_cluster[NR_CPUS]; 2792 2793#define for_each_sched_cluster(cluster) list_for_each_entry_rcu(cluster, &cluster_head, list) 2794 2795extern struct mutex policy_mutex; 2796extern unsigned int sched_disable_window_stats; 2797extern unsigned int max_possible_freq; 2798extern unsigned int min_max_freq; 2799extern unsigned int max_possible_efficiency; 2800extern unsigned int min_possible_efficiency; 2801extern unsigned int max_capacity; 2802extern unsigned int min_capacity; 2803extern unsigned int max_load_scale_factor; 2804extern unsigned int max_possible_capacity; 2805extern unsigned int min_max_possible_capacity; 2806extern unsigned int max_power_cost; 2807extern unsigned int __read_mostly sched_init_task_load_windows; 2808extern unsigned int sysctl_sched_restrict_cluster_spill; 2809extern unsigned int sched_pred_alert_load; 2810extern struct sched_cluster init_cluster; 2811 2812static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta) 2813{ 2814 rq->cum_window_demand_scaled += scaled_delta; 2815 if (unlikely((s64)rq->cum_window_demand_scaled < 0)) { 2816 rq->cum_window_demand_scaled = 0; 2817 } 2818} 2819 2820/* Is frequency of two cpus synchronized with each other? */ 2821static inline int same_freq_domain(int src_cpu, int dst_cpu) 2822{ 2823 struct rq *rq = cpu_rq(src_cpu); 2824 2825 if (src_cpu == dst_cpu) { 2826 return 1; 2827 } 2828 2829 return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask); 2830} 2831 2832extern void reset_task_stats(struct task_struct *p); 2833 2834#define CPU_RESERVED 1 2835static inline int is_reserved(int cpu) 2836{ 2837 struct rq *rq = cpu_rq(cpu); 2838 2839 return test_bit(CPU_RESERVED, &rq->walt_flags); 2840} 2841 2842static inline int mark_reserved(int cpu) 2843{ 2844 struct rq *rq = cpu_rq(cpu); 2845 2846 return test_and_set_bit(CPU_RESERVED, &rq->walt_flags); 2847} 2848 2849static inline void clear_reserved(int cpu) 2850{ 2851 struct rq *rq = cpu_rq(cpu); 2852 2853 clear_bit(CPU_RESERVED, &rq->walt_flags); 2854} 2855 2856static inline int cpu_capacity(int cpu) 2857{ 2858 return cpu_rq(cpu)->cluster->capacity; 2859} 2860 2861static inline int cpu_max_possible_capacity(int cpu) 2862{ 2863 return cpu_rq(cpu)->cluster->max_possible_capacity; 2864} 2865 2866static inline int cpu_load_scale_factor(int cpu) 2867{ 2868 return cpu_rq(cpu)->cluster->load_scale_factor; 2869} 2870 2871static inline unsigned int cluster_max_freq(struct sched_cluster *cluster) 2872{ 2873 /* 2874 * Governor and thermal driver don't know the other party's mitigation 2875 * voting. So struct cluster saves both and return min() for current 2876 * cluster fmax. 2877 */ 2878 return cluster->max_freq; 2879} 2880 2881/* Keep track of max/min capacity possible across CPUs "currently" */ 2882static inline void __update_min_max_capacity(void) 2883{ 2884 int i; 2885 int max_cap = 0, min_cap = INT_MAX; 2886 2887 for_each_possible_cpu(i) 2888 { 2889 if (!cpu_active(i)) { 2890 continue; 2891 } 2892 2893 max_cap = max(max_cap, cpu_capacity(i)); 2894 min_cap = min(min_cap, cpu_capacity(i)); 2895 } 2896 2897 max_capacity = max_cap; 2898 min_capacity = min_cap; 2899} 2900 2901/* 2902 * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so 2903 * that "most" efficient cpu gets a load_scale_factor of 1 2904 */ 2905static inline unsigned long load_scale_cpu_efficiency(struct sched_cluster *cluster) 2906{ 2907 return DIV_ROUND_UP(CPU_FREQ_1K * max_possible_efficiency, cluster->efficiency); 2908} 2909 2910/* 2911 * Return load_scale_factor of a cpu in reference to cpu with best max_freq 2912 * (max_possible_freq), so that one with best max_freq gets a load_scale_factor 2913 * of 1. 2914 */ 2915static inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster) 2916{ 2917 return DIV_ROUND_UP(CPU_FREQ_1K * max_possible_freq, cluster_max_freq(cluster)); 2918} 2919 2920static inline int compute_load_scale_factor(struct sched_cluster *cluster) 2921{ 2922 int load_scale = CPU_FREQ_1K; 2923 2924 /* 2925 * load_scale_factor accounts for the fact that task load 2926 * is in reference to "best" performing cpu. Task's load will need to be 2927 * scaled (up) by a factor to determine suitability to be placed on a 2928 * (little) cpu. 2929 */ 2930 load_scale *= load_scale_cpu_efficiency(cluster); 2931 load_scale >>= 0xa; 2932 2933 load_scale *= load_scale_cpu_freq(cluster); 2934 load_scale >>= 0xa; 2935 2936 return load_scale; 2937} 2938 2939static inline bool is_max_capacity_cpu(int cpu) 2940{ 2941 return cpu_max_possible_capacity(cpu) == max_possible_capacity; 2942} 2943 2944static inline bool is_min_capacity_cpu(int cpu) 2945{ 2946 return cpu_max_possible_capacity(cpu) == min_max_possible_capacity; 2947} 2948 2949/* 2950 * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that 2951 * least efficient cpu gets capacity of 1024 2952 */ 2953static unsigned long capacity_scale_cpu_efficiency(struct sched_cluster *cluster) 2954{ 2955 return (0x400 * cluster->efficiency) / min_possible_efficiency; 2956} 2957 2958/* 2959 * Return 'capacity' of a cpu in reference to cpu with lowest max_freq 2960 * (min_max_freq), such that one with lowest max_freq gets capacity of 1024. 2961 */ 2962static unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster) 2963{ 2964 return (0x400 * cluster_max_freq(cluster)) / min_max_freq; 2965} 2966 2967static inline int compute_capacity(struct sched_cluster *cluster) 2968{ 2969 int capacity = 0x400; 2970 2971 capacity *= capacity_scale_cpu_efficiency(cluster); 2972 capacity >>= 0xa; 2973 2974 capacity *= capacity_scale_cpu_freq(cluster); 2975 capacity >>= 0xa; 2976 2977 return capacity; 2978} 2979 2980static inline unsigned int power_cost(int cpu, u64 demand) 2981{ 2982 return cpu_max_possible_capacity(cpu); 2983} 2984 2985static inline unsigned long cpu_util_freq_walt(int cpu) 2986{ 2987 u64 util; 2988 struct rq *rq = cpu_rq(cpu); 2989 unsigned long capacity = capacity_orig_of(cpu); 2990 2991 if (unlikely(walt_disabled || !sysctl_sched_use_walt_cpu_util)) { 2992 return cpu_util(cpu); 2993 } 2994 2995 util = rq->prev_runnable_sum << SCHED_CAPACITY_SHIFT; 2996 util = div_u64(util, sched_ravg_window); 2997 2998 return (util >= capacity) ? capacity : util; 2999} 3000 3001static inline bool hmp_capable(void) 3002{ 3003 return max_possible_capacity != min_max_possible_capacity; 3004} 3005#else /* CONFIG_SCHED_WALT */ 3006static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta) 3007{ 3008} 3009 3010static inline int same_freq_domain(int src_cpu, int dst_cpu) 3011{ 3012 return 1; 3013} 3014 3015static inline int is_reserved(int cpu) 3016{ 3017 return 0; 3018} 3019 3020static inline void clear_reserved(int cpu) 3021{ 3022} 3023 3024static inline bool hmp_capable(void) 3025{ 3026 return false; 3027} 3028#endif /* CONFIG_SCHED_WALT */ 3029 3030struct sched_avg_stats { 3031 int nr; 3032 int nr_misfit; 3033 int nr_max; 3034 int nr_scaled; 3035}; 3036#ifdef CONFIG_SCHED_RUNNING_AVG 3037extern void sched_get_nr_running_avg(struct sched_avg_stats *stats); 3038#else 3039static inline void sched_get_nr_running_avg(struct sched_avg_stats *stats) 3040{ 3041} 3042#endif 3043 3044#ifdef CONFIG_CPU_ISOLATION_OPT 3045extern int group_balance_cpu_not_isolated(struct sched_group *sg); 3046#else 3047static inline int group_balance_cpu_not_isolated(struct sched_group *sg) 3048{ 3049 return group_balance_cpu(sg); 3050} 3051#endif /* CONFIG_CPU_ISOLATION_OPT */ 3052 3053#ifdef CONFIG_HOTPLUG_CPU 3054extern void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf, bool migrate_pinned_tasks); 3055#endif 3056#endif