13d0407baSopenharmony_ci/* SPDX-License-Identifier: GPL-2.0 */ 23d0407baSopenharmony_ci/* 33d0407baSopenharmony_ci * Scheduler internal types and methods: 43d0407baSopenharmony_ci */ 53d0407baSopenharmony_ci#ifndef COMMON_SDK_LINUX_KERNEL_SCHED_SCHED_H 63d0407baSopenharmony_ci#define COMMON_SDK_LINUX_KERNEL_SCHED_SCHED_H 73d0407baSopenharmony_ci 83d0407baSopenharmony_ci#include <linux/sched.h> 93d0407baSopenharmony_ci#include <linux/sched/autogroup.h> 103d0407baSopenharmony_ci#include <linux/sched/clock.h> 113d0407baSopenharmony_ci#include <linux/sched/coredump.h> 123d0407baSopenharmony_ci#include <linux/sched/cpufreq.h> 133d0407baSopenharmony_ci#include <linux/sched/cputime.h> 143d0407baSopenharmony_ci#include <linux/sched/deadline.h> 153d0407baSopenharmony_ci#include <linux/sched/debug.h> 163d0407baSopenharmony_ci#include <linux/sched/hotplug.h> 173d0407baSopenharmony_ci#include <linux/sched/idle.h> 183d0407baSopenharmony_ci#include <linux/sched/init.h> 193d0407baSopenharmony_ci#include <linux/sched/isolation.h> 203d0407baSopenharmony_ci#include <linux/sched/jobctl.h> 213d0407baSopenharmony_ci#include <linux/sched/loadavg.h> 223d0407baSopenharmony_ci#include <linux/sched/mm.h> 233d0407baSopenharmony_ci#include <linux/sched/nohz.h> 243d0407baSopenharmony_ci#include <linux/sched/numa_balancing.h> 253d0407baSopenharmony_ci#include <linux/sched/prio.h> 263d0407baSopenharmony_ci#include <linux/sched/rt.h> 273d0407baSopenharmony_ci#include <linux/sched/signal.h> 283d0407baSopenharmony_ci#include <linux/sched/smt.h> 293d0407baSopenharmony_ci#include <linux/sched/stat.h> 303d0407baSopenharmony_ci#include <linux/sched/sysctl.h> 313d0407baSopenharmony_ci#include <linux/sched/task.h> 323d0407baSopenharmony_ci#include <linux/sched/task_stack.h> 333d0407baSopenharmony_ci#include <linux/sched/topology.h> 343d0407baSopenharmony_ci#include <linux/sched/user.h> 353d0407baSopenharmony_ci#include <linux/sched/wake_q.h> 363d0407baSopenharmony_ci#include <linux/sched/xacct.h> 373d0407baSopenharmony_ci 383d0407baSopenharmony_ci#include <uapi/linux/sched/types.h> 393d0407baSopenharmony_ci 403d0407baSopenharmony_ci#include <linux/binfmts.h> 413d0407baSopenharmony_ci#include <linux/blkdev.h> 423d0407baSopenharmony_ci#include <linux/compat.h> 433d0407baSopenharmony_ci#include <linux/context_tracking.h> 443d0407baSopenharmony_ci#include <linux/cpufreq.h> 453d0407baSopenharmony_ci#include <linux/cpuidle.h> 463d0407baSopenharmony_ci#include <linux/cpuset.h> 473d0407baSopenharmony_ci#include <linux/ctype.h> 483d0407baSopenharmony_ci#include <linux/debugfs.h> 493d0407baSopenharmony_ci#include <linux/delayacct.h> 503d0407baSopenharmony_ci#include <linux/energy_model.h> 513d0407baSopenharmony_ci#include <linux/init_task.h> 523d0407baSopenharmony_ci#include <linux/kprobes.h> 533d0407baSopenharmony_ci#include <linux/kthread.h> 543d0407baSopenharmony_ci#include <linux/membarrier.h> 553d0407baSopenharmony_ci#include <linux/migrate.h> 563d0407baSopenharmony_ci#include <linux/mmu_context.h> 573d0407baSopenharmony_ci#include <linux/nmi.h> 583d0407baSopenharmony_ci#include <linux/proc_fs.h> 593d0407baSopenharmony_ci#include <linux/prefetch.h> 603d0407baSopenharmony_ci#include <linux/profile.h> 613d0407baSopenharmony_ci#include <linux/psi.h> 623d0407baSopenharmony_ci#include <linux/rcupdate_wait.h> 633d0407baSopenharmony_ci#include <linux/security.h> 643d0407baSopenharmony_ci#include <linux/stop_machine.h> 653d0407baSopenharmony_ci#include <linux/suspend.h> 663d0407baSopenharmony_ci#include <linux/swait.h> 673d0407baSopenharmony_ci#include <linux/syscalls.h> 683d0407baSopenharmony_ci#include <linux/task_work.h> 693d0407baSopenharmony_ci#include <linux/tsacct_kern.h> 703d0407baSopenharmony_ci 713d0407baSopenharmony_ci#include <asm/tlb.h> 723d0407baSopenharmony_ci#include <asm-generic/vmlinux.lds.h> 733d0407baSopenharmony_ci 743d0407baSopenharmony_ci#ifdef CONFIG_PARAVIRT 753d0407baSopenharmony_ci#include <asm/paravirt.h> 763d0407baSopenharmony_ci#endif 773d0407baSopenharmony_ci 783d0407baSopenharmony_ci#include "cpupri.h" 793d0407baSopenharmony_ci#include "cpudeadline.h" 803d0407baSopenharmony_ci 813d0407baSopenharmony_ci#include <trace/events/sched.h> 823d0407baSopenharmony_ci 833d0407baSopenharmony_ci#ifdef CONFIG_SCHED_DEBUG 843d0407baSopenharmony_ci#define SCHED_WARN_ON(x) (WARN_ONCE(x, #x)) 853d0407baSopenharmony_ci#else 863d0407baSopenharmony_ci#define SCHED_WARN_ON(x) ( { \ 873d0407baSopenharmony_ci (void)(x), 0; }) 883d0407baSopenharmony_ci#endif 893d0407baSopenharmony_ci 903d0407baSopenharmony_cistruct rq; 913d0407baSopenharmony_cistruct cpuidle_state; 923d0407baSopenharmony_ci 933d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS 943d0407baSopenharmony_ciextern unsigned long uclamp_task_util(struct task_struct *p); 953d0407baSopenharmony_ci#endif 963d0407baSopenharmony_ci 973d0407baSopenharmony_ci#ifdef CONFIG_SCHED_WALT 983d0407baSopenharmony_ciextern unsigned int sched_ravg_window; 993d0407baSopenharmony_ciextern unsigned int walt_cpu_util_freq_divisor; 1003d0407baSopenharmony_ci 1013d0407baSopenharmony_cistruct walt_sched_stats { 1023d0407baSopenharmony_ci u64 cumulative_runnable_avg_scaled; 1033d0407baSopenharmony_ci}; 1043d0407baSopenharmony_ci 1053d0407baSopenharmony_cistruct load_subtractions { 1063d0407baSopenharmony_ci u64 window_start; 1073d0407baSopenharmony_ci u64 subs; 1083d0407baSopenharmony_ci u64 new_subs; 1093d0407baSopenharmony_ci}; 1103d0407baSopenharmony_ci 1113d0407baSopenharmony_ci#define NUM_TRACKED_WINDOWS 2 1123d0407baSopenharmony_ci 1133d0407baSopenharmony_cistruct sched_cluster { 1143d0407baSopenharmony_ci raw_spinlock_t load_lock; 1153d0407baSopenharmony_ci struct list_head list; 1163d0407baSopenharmony_ci struct cpumask cpus; 1173d0407baSopenharmony_ci int id; 1183d0407baSopenharmony_ci int max_power_cost; 1193d0407baSopenharmony_ci int min_power_cost; 1203d0407baSopenharmony_ci int max_possible_capacity; 1213d0407baSopenharmony_ci int capacity; 1223d0407baSopenharmony_ci int efficiency; /* Differentiate cpus with different IPC capability */ 1233d0407baSopenharmony_ci int load_scale_factor; 1243d0407baSopenharmony_ci unsigned int exec_scale_factor; 1253d0407baSopenharmony_ci /* 1263d0407baSopenharmony_ci * max_freq = user maximum 1273d0407baSopenharmony_ci * max_possible_freq = maximum supported by hardware 1283d0407baSopenharmony_ci */ 1293d0407baSopenharmony_ci unsigned int cur_freq, max_freq, min_freq; 1303d0407baSopenharmony_ci unsigned int max_possible_freq; 1313d0407baSopenharmony_ci bool freq_init_done; 1323d0407baSopenharmony_ci}; 1333d0407baSopenharmony_ci 1343d0407baSopenharmony_ciextern unsigned int sched_disable_window_stats; 1353d0407baSopenharmony_ci#endif /* CONFIG_SCHED_WALT */ 1363d0407baSopenharmony_ci 1373d0407baSopenharmony_ci/* task_struct::on_rq states: */ 1383d0407baSopenharmony_ci#define TASK_ON_RQ_QUEUED 1 1393d0407baSopenharmony_ci#define TASK_ON_RQ_MIGRATING 2 1403d0407baSopenharmony_ci 1413d0407baSopenharmony_ciextern __read_mostly int scheduler_running; 1423d0407baSopenharmony_ci 1433d0407baSopenharmony_ciextern unsigned long calc_load_update; 1443d0407baSopenharmony_ciextern atomic_long_t calc_load_tasks; 1453d0407baSopenharmony_ci 1463d0407baSopenharmony_ciextern const u64 max_cfs_quota_period; 1473d0407baSopenharmony_ci 1483d0407baSopenharmony_ciextern void calc_global_load_tick(struct rq *this_rq); 1493d0407baSopenharmony_ciextern long calc_load_fold_active(struct rq *this_rq, long adjust); 1503d0407baSopenharmony_ci 1513d0407baSopenharmony_ci#ifdef CONFIG_SMP 1523d0407baSopenharmony_ciextern void init_sched_groups_capacity(int cpu, struct sched_domain *sd); 1533d0407baSopenharmony_ci#endif 1543d0407baSopenharmony_ci 1553d0407baSopenharmony_ciextern void call_trace_sched_update_nr_running(struct rq *rq, int count); 1563d0407baSopenharmony_ci/* 1573d0407baSopenharmony_ci * Helpers for converting nanosecond timing to jiffy resolution 1583d0407baSopenharmony_ci */ 1593d0407baSopenharmony_ci#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 1603d0407baSopenharmony_ci#ifdef CONFIG_SCHED_LATENCY_NICE 1613d0407baSopenharmony_ci#define MAX_LATENCY_NICE 19 1623d0407baSopenharmony_ci#define MIN_LATENCY_NICE -20 1633d0407baSopenharmony_ci#define LATENCY_NICE_WIDTH \ 1643d0407baSopenharmony_ci (MAX_LATENCY_NICE - MIN_LATENCY_NICE + 1) 1653d0407baSopenharmony_ci#define DEFAULT_LATENCY_NICE 0 1663d0407baSopenharmony_ci#define DEFAULT_LATENCY_PRIO (DEFAULT_LATENCY_NICE + LATENCY_NICE_WIDTH/2) 1673d0407baSopenharmony_ci#define NICE_TO_LATENCY(nice) ((nice) + DEFAULT_LATENCY_PRIO) 1683d0407baSopenharmony_ci#define LATENCY_TO_NICE(prio) ((prio) - DEFAULT_LATENCY_PRIO) 1693d0407baSopenharmony_ci#define NICE_LATENCY_SHIFT (SCHED_FIXEDPOINT_SHIFT) 1703d0407baSopenharmony_ci#define NICE_LATENCY_WEIGHT_MAX (1L << NICE_LATENCY_SHIFT) 1713d0407baSopenharmony_ci#endif /* CONFIG_SCHED_LATENCY_NICE */ 1723d0407baSopenharmony_ci 1733d0407baSopenharmony_ci/* 1743d0407baSopenharmony_ci * Increase resolution of nice-level calculations for 64-bit architectures. 1753d0407baSopenharmony_ci * The extra resolution improves shares distribution and load balancing of 1763d0407baSopenharmony_ci * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 1773d0407baSopenharmony_ci * hierarchies, especially on larger systems. This is not a user-visible change 1783d0407baSopenharmony_ci * and does not change the user-interface for setting shares/weights. 1793d0407baSopenharmony_ci * 1803d0407baSopenharmony_ci * We increase resolution only if we have enough bits to allow this increased 1813d0407baSopenharmony_ci * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 1823d0407baSopenharmony_ci * are pretty high and the returns do not justify the increased costs. 1833d0407baSopenharmony_ci * 1843d0407baSopenharmony_ci * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 1853d0407baSopenharmony_ci * increase coverage and consistency always enable it on 64-bit platforms. 1863d0407baSopenharmony_ci */ 1873d0407baSopenharmony_ci#ifdef CONFIG_64BIT 1883d0407baSopenharmony_ci#define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 1893d0407baSopenharmony_ci#define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 1903d0407baSopenharmony_ci#define scale_load_down(w) \ 1913d0407baSopenharmony_ci ( { \ 1923d0407baSopenharmony_ci unsigned long __w = (w); \ 1933d0407baSopenharmony_ci if (__w) \ 1943d0407baSopenharmony_ci __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 1953d0407baSopenharmony_ci __w; \ 1963d0407baSopenharmony_ci }) 1973d0407baSopenharmony_ci#else 1983d0407baSopenharmony_ci#define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 1993d0407baSopenharmony_ci#define scale_load(w) (w) 2003d0407baSopenharmony_ci#define scale_load_down(w) (w) 2013d0407baSopenharmony_ci#endif 2023d0407baSopenharmony_ci 2033d0407baSopenharmony_ci/* 2043d0407baSopenharmony_ci * Task weight (visible to users) and its load (invisible to users) have 2053d0407baSopenharmony_ci * independent resolution, but they should be well calibrated. We use 2063d0407baSopenharmony_ci * scale_load() and scale_load_down(w) to convert between them. The 2073d0407baSopenharmony_ci * following must be true: 2083d0407baSopenharmony_ci * 2093d0407baSopenharmony_ci * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 2103d0407baSopenharmony_ci * 2113d0407baSopenharmony_ci */ 2123d0407baSopenharmony_ci#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 2133d0407baSopenharmony_ci#define CPU_FREQ_1K 1024 2143d0407baSopenharmony_ci#define CPU_SAMPLE_ARTE 8 2153d0407baSopenharmony_ci 2163d0407baSopenharmony_ciextern struct cpufreq_governor schedutil_gov; 2173d0407baSopenharmony_ci 2183d0407baSopenharmony_ci/* 2193d0407baSopenharmony_ci * Single value that decides SCHED_DEADLINE internal math precision. 2203d0407baSopenharmony_ci * 10 -> just above 1us 2213d0407baSopenharmony_ci * 9 -> just above 0.5us 2223d0407baSopenharmony_ci */ 2233d0407baSopenharmony_ci#define DL_SCALE 10 2243d0407baSopenharmony_ci 2253d0407baSopenharmony_ci/* 2263d0407baSopenharmony_ci * Single value that denotes runtime == period, ie unlimited time. 2273d0407baSopenharmony_ci */ 2283d0407baSopenharmony_ci#define RUNTIME_INF ((u64)~0ULL) 2293d0407baSopenharmony_ci 2303d0407baSopenharmony_cistatic inline int idle_policy(int policy) 2313d0407baSopenharmony_ci{ 2323d0407baSopenharmony_ci return policy == SCHED_IDLE; 2333d0407baSopenharmony_ci} 2343d0407baSopenharmony_cistatic inline int fair_policy(int policy) 2353d0407baSopenharmony_ci{ 2363d0407baSopenharmony_ci return policy == SCHED_NORMAL || policy == SCHED_BATCH; 2373d0407baSopenharmony_ci} 2383d0407baSopenharmony_ci 2393d0407baSopenharmony_cistatic inline int rt_policy(int policy) 2403d0407baSopenharmony_ci{ 2413d0407baSopenharmony_ci return policy == SCHED_FIFO || policy == SCHED_RR; 2423d0407baSopenharmony_ci} 2433d0407baSopenharmony_ci 2443d0407baSopenharmony_cistatic inline int dl_policy(int policy) 2453d0407baSopenharmony_ci{ 2463d0407baSopenharmony_ci return policy == SCHED_DEADLINE; 2473d0407baSopenharmony_ci} 2483d0407baSopenharmony_cistatic inline bool valid_policy(int policy) 2493d0407baSopenharmony_ci{ 2503d0407baSopenharmony_ci return idle_policy(policy) || fair_policy(policy) || rt_policy(policy) || dl_policy(policy); 2513d0407baSopenharmony_ci} 2523d0407baSopenharmony_ci 2533d0407baSopenharmony_cistatic inline int task_has_idle_policy(struct task_struct *p) 2543d0407baSopenharmony_ci{ 2553d0407baSopenharmony_ci return idle_policy(p->policy); 2563d0407baSopenharmony_ci} 2573d0407baSopenharmony_ci 2583d0407baSopenharmony_cistatic inline int task_has_rt_policy(struct task_struct *p) 2593d0407baSopenharmony_ci{ 2603d0407baSopenharmony_ci return rt_policy(p->policy); 2613d0407baSopenharmony_ci} 2623d0407baSopenharmony_ci 2633d0407baSopenharmony_cistatic inline int task_has_dl_policy(struct task_struct *p) 2643d0407baSopenharmony_ci{ 2653d0407baSopenharmony_ci return dl_policy(p->policy); 2663d0407baSopenharmony_ci} 2673d0407baSopenharmony_ci 2683d0407baSopenharmony_ci#define cap_scale(v, s) (((v) * (s)) >> SCHED_CAPACITY_SHIFT) 2693d0407baSopenharmony_ci 2703d0407baSopenharmony_cistatic inline void update_avg(u64 *avg, u64 sample) 2713d0407baSopenharmony_ci{ 2723d0407baSopenharmony_ci s64 diff = sample - *avg; 2733d0407baSopenharmony_ci *avg += diff / CPU_SAMPLE_ARTE; 2743d0407baSopenharmony_ci} 2753d0407baSopenharmony_ci 2763d0407baSopenharmony_ci/* 2773d0407baSopenharmony_ci * Shifting a value by an exponent greater *or equal* to the size of said value 2783d0407baSopenharmony_ci * is UB; cap at size-1. 2793d0407baSopenharmony_ci */ 2803d0407baSopenharmony_ci#define shr_bound(val, shift) ((val) >> min_t(typeof(shift), (shift), BITS_PER_TYPE(typeof(val)) - 1)) 2813d0407baSopenharmony_ci 2823d0407baSopenharmony_ci/* 2833d0407baSopenharmony_ci * !! For sched_setattr_nocheck() (kernel) only !! 2843d0407baSopenharmony_ci * 2853d0407baSopenharmony_ci * This is actually gross. :( 2863d0407baSopenharmony_ci * 2873d0407baSopenharmony_ci * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 2883d0407baSopenharmony_ci * tasks, but still be able to sleep. We need this on platforms that cannot 2893d0407baSopenharmony_ci * atomically change clock frequency. Remove once fast switching will be 2903d0407baSopenharmony_ci * available on such platforms. 2913d0407baSopenharmony_ci * 2923d0407baSopenharmony_ci * SUGOV stands for SchedUtil GOVernor. 2933d0407baSopenharmony_ci */ 2943d0407baSopenharmony_ci#define SCHED_FLAG_SUGOV 0x10000000 2953d0407baSopenharmony_ci 2963d0407baSopenharmony_ci#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV) 2973d0407baSopenharmony_ci 2983d0407baSopenharmony_cistatic inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 2993d0407baSopenharmony_ci{ 3003d0407baSopenharmony_ci#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 3013d0407baSopenharmony_ci return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 3023d0407baSopenharmony_ci#else 3033d0407baSopenharmony_ci return false; 3043d0407baSopenharmony_ci#endif 3053d0407baSopenharmony_ci} 3063d0407baSopenharmony_ci 3073d0407baSopenharmony_ci/* 3083d0407baSopenharmony_ci * Tells if entity @a should preempt entity @b. 3093d0407baSopenharmony_ci */ 3103d0407baSopenharmony_cistatic inline bool dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 3113d0407baSopenharmony_ci{ 3123d0407baSopenharmony_ci return dl_entity_is_special(a) || dl_time_before(a->deadline, b->deadline); 3133d0407baSopenharmony_ci} 3143d0407baSopenharmony_ci 3153d0407baSopenharmony_ci/* 3163d0407baSopenharmony_ci * This is the priority-queue data structure of the RT scheduling class: 3173d0407baSopenharmony_ci */ 3183d0407baSopenharmony_cistruct rt_prio_array { 3193d0407baSopenharmony_ci DECLARE_BITMAP(bitmap, MAX_RT_PRIO + 1); /* include 1 bit for delimiter */ 3203d0407baSopenharmony_ci struct list_head queue[MAX_RT_PRIO]; 3213d0407baSopenharmony_ci}; 3223d0407baSopenharmony_ci 3233d0407baSopenharmony_cistruct rt_bandwidth { 3243d0407baSopenharmony_ci /* nests inside the rq lock: */ 3253d0407baSopenharmony_ci raw_spinlock_t rt_runtime_lock; 3263d0407baSopenharmony_ci ktime_t rt_period; 3273d0407baSopenharmony_ci u64 rt_runtime; 3283d0407baSopenharmony_ci struct hrtimer rt_period_timer; 3293d0407baSopenharmony_ci unsigned int rt_period_active; 3303d0407baSopenharmony_ci}; 3313d0407baSopenharmony_ci 3323d0407baSopenharmony_civoid __dl_clear_params(struct task_struct *p); 3333d0407baSopenharmony_ci 3343d0407baSopenharmony_cistruct dl_bandwidth { 3353d0407baSopenharmony_ci raw_spinlock_t dl_runtime_lock; 3363d0407baSopenharmony_ci u64 dl_runtime; 3373d0407baSopenharmony_ci u64 dl_period; 3383d0407baSopenharmony_ci}; 3393d0407baSopenharmony_ci 3403d0407baSopenharmony_cistatic inline int dl_bandwidth_enabled(void) 3413d0407baSopenharmony_ci{ 3423d0407baSopenharmony_ci return sysctl_sched_rt_runtime >= 0; 3433d0407baSopenharmony_ci} 3443d0407baSopenharmony_ci 3453d0407baSopenharmony_ci/* 3463d0407baSopenharmony_ci * To keep the bandwidth of -deadline tasks under control 3473d0407baSopenharmony_ci * we need some place where: 3483d0407baSopenharmony_ci * - store the maximum -deadline bandwidth of each cpu; 3493d0407baSopenharmony_ci * - cache the fraction of bandwidth that is currently allocated in 3503d0407baSopenharmony_ci * each root domain; 3513d0407baSopenharmony_ci * 3523d0407baSopenharmony_ci * This is all done in the data structure below. It is similar to the 3533d0407baSopenharmony_ci * one used for RT-throttling (rt_bandwidth), with the main difference 3543d0407baSopenharmony_ci * that, since here we are only interested in admission control, we 3553d0407baSopenharmony_ci * do not decrease any runtime while the group "executes", neither we 3563d0407baSopenharmony_ci * need a timer to replenish it. 3573d0407baSopenharmony_ci * 3583d0407baSopenharmony_ci * With respect to SMP, bandwidth is given on a per root domain basis, 3593d0407baSopenharmony_ci * meaning that: 3603d0407baSopenharmony_ci * - bw (< 100%) is the deadline bandwidth of each CPU; 3613d0407baSopenharmony_ci * - total_bw is the currently allocated bandwidth in each root domain; 3623d0407baSopenharmony_ci */ 3633d0407baSopenharmony_cistruct dl_bw { 3643d0407baSopenharmony_ci raw_spinlock_t lock; 3653d0407baSopenharmony_ci u64 bw; 3663d0407baSopenharmony_ci u64 total_bw; 3673d0407baSopenharmony_ci}; 3683d0407baSopenharmony_ci 3693d0407baSopenharmony_cistatic inline void __dl_update(struct dl_bw *dl_b, s64 bw); 3703d0407baSopenharmony_ci 3713d0407baSopenharmony_cistatic inline void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 3723d0407baSopenharmony_ci{ 3733d0407baSopenharmony_ci dl_b->total_bw -= tsk_bw; 3743d0407baSopenharmony_ci __dl_update(dl_b, (s32)tsk_bw / cpus); 3753d0407baSopenharmony_ci} 3763d0407baSopenharmony_ci 3773d0407baSopenharmony_cistatic inline void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 3783d0407baSopenharmony_ci{ 3793d0407baSopenharmony_ci dl_b->total_bw += tsk_bw; 3803d0407baSopenharmony_ci __dl_update(dl_b, -((s32)tsk_bw / cpus)); 3813d0407baSopenharmony_ci} 3823d0407baSopenharmony_ci 3833d0407baSopenharmony_cistatic inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw) 3843d0407baSopenharmony_ci{ 3853d0407baSopenharmony_ci return (dl_b->bw != -1) && (cap_scale(dl_b->bw, cap) < (dl_b->total_bw - old_bw + new_bw)); 3863d0407baSopenharmony_ci} 3873d0407baSopenharmony_ci 3883d0407baSopenharmony_ci/* 3893d0407baSopenharmony_ci * Verify the fitness of task @p to run on @cpu taking into account the 3903d0407baSopenharmony_ci * CPU original capacity and the runtime/deadline ratio of the task. 3913d0407baSopenharmony_ci * 3923d0407baSopenharmony_ci * The function will return true if the CPU original capacity of the 3933d0407baSopenharmony_ci * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the 3943d0407baSopenharmony_ci * task and false otherwise. 3953d0407baSopenharmony_ci */ 3963d0407baSopenharmony_cistatic inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) 3973d0407baSopenharmony_ci{ 3983d0407baSopenharmony_ci unsigned long cap = arch_scale_cpu_capacity(cpu); 3993d0407baSopenharmony_ci 4003d0407baSopenharmony_ci return ((cap_scale(p->dl.dl_deadline, cap)) >= (p->dl.dl_runtime)); 4013d0407baSopenharmony_ci} 4023d0407baSopenharmony_ci 4033d0407baSopenharmony_ciextern void init_dl_bw(struct dl_bw *dl_b); 4043d0407baSopenharmony_ciextern int sched_dl_global_validate(void); 4053d0407baSopenharmony_ciextern void sched_dl_do_global(void); 4063d0407baSopenharmony_ciextern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 4073d0407baSopenharmony_ciextern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 4083d0407baSopenharmony_ciextern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 4093d0407baSopenharmony_ciextern bool __checkparam_dl(const struct sched_attr *attr); 4103d0407baSopenharmony_ciextern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 4113d0407baSopenharmony_ciextern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 4123d0407baSopenharmony_ciextern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 4133d0407baSopenharmony_ciextern int dl_cpu_busy(int cpu, struct task_struct *p); 4143d0407baSopenharmony_ci 4153d0407baSopenharmony_ci#ifdef CONFIG_CGROUP_SCHED 4163d0407baSopenharmony_ci 4173d0407baSopenharmony_ci#include <linux/cgroup.h> 4183d0407baSopenharmony_ci#include <linux/psi.h> 4193d0407baSopenharmony_ci 4203d0407baSopenharmony_cistruct cfs_rq; 4213d0407baSopenharmony_cistruct rt_rq; 4223d0407baSopenharmony_ci 4233d0407baSopenharmony_ciextern struct list_head task_groups; 4243d0407baSopenharmony_ci 4253d0407baSopenharmony_cistruct cfs_bandwidth { 4263d0407baSopenharmony_ci#ifdef CONFIG_CFS_BANDWIDTH 4273d0407baSopenharmony_ci raw_spinlock_t lock; 4283d0407baSopenharmony_ci ktime_t period; 4293d0407baSopenharmony_ci u64 quota; 4303d0407baSopenharmony_ci u64 runtime; 4313d0407baSopenharmony_ci s64 hierarchical_quota; 4323d0407baSopenharmony_ci 4333d0407baSopenharmony_ci u8 idle; 4343d0407baSopenharmony_ci u8 period_active; 4353d0407baSopenharmony_ci u8 slack_started; 4363d0407baSopenharmony_ci struct hrtimer period_timer; 4373d0407baSopenharmony_ci struct hrtimer slack_timer; 4383d0407baSopenharmony_ci struct list_head throttled_cfs_rq; 4393d0407baSopenharmony_ci 4403d0407baSopenharmony_ci /* Statistics: */ 4413d0407baSopenharmony_ci int nr_periods; 4423d0407baSopenharmony_ci int nr_throttled; 4433d0407baSopenharmony_ci u64 throttled_time; 4443d0407baSopenharmony_ci#endif 4453d0407baSopenharmony_ci}; 4463d0407baSopenharmony_ci 4473d0407baSopenharmony_ci/* Task group related information */ 4483d0407baSopenharmony_cistruct task_group { 4493d0407baSopenharmony_ci struct cgroup_subsys_state css; 4503d0407baSopenharmony_ci 4513d0407baSopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED 4523d0407baSopenharmony_ci /* schedulable entities of this group on each CPU */ 4533d0407baSopenharmony_ci struct sched_entity **se; 4543d0407baSopenharmony_ci /* runqueue "owned" by this group on each CPU */ 4553d0407baSopenharmony_ci struct cfs_rq **cfs_rq; 4563d0407baSopenharmony_ci unsigned long shares; 4573d0407baSopenharmony_ci 4583d0407baSopenharmony_ci#ifdef CONFIG_SMP 4593d0407baSopenharmony_ci /* 4603d0407baSopenharmony_ci * load_avg can be heavily contended at clock tick time, so put 4613d0407baSopenharmony_ci * it in its own cacheline separated from the fields above which 4623d0407baSopenharmony_ci * will also be accessed at each tick. 4633d0407baSopenharmony_ci */ 4643d0407baSopenharmony_ci atomic_long_t load_avg ____cacheline_aligned; 4653d0407baSopenharmony_ci#endif 4663d0407baSopenharmony_ci#endif 4673d0407baSopenharmony_ci 4683d0407baSopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED 4693d0407baSopenharmony_ci struct sched_rt_entity **rt_se; 4703d0407baSopenharmony_ci struct rt_rq **rt_rq; 4713d0407baSopenharmony_ci 4723d0407baSopenharmony_ci struct rt_bandwidth rt_bandwidth; 4733d0407baSopenharmony_ci#endif 4743d0407baSopenharmony_ci 4753d0407baSopenharmony_ci struct rcu_head rcu; 4763d0407baSopenharmony_ci struct list_head list; 4773d0407baSopenharmony_ci 4783d0407baSopenharmony_ci struct task_group *parent; 4793d0407baSopenharmony_ci struct list_head siblings; 4803d0407baSopenharmony_ci struct list_head children; 4813d0407baSopenharmony_ci 4823d0407baSopenharmony_ci#ifdef CONFIG_SCHED_AUTOGROUP 4833d0407baSopenharmony_ci struct autogroup *autogroup; 4843d0407baSopenharmony_ci#endif 4853d0407baSopenharmony_ci 4863d0407baSopenharmony_ci struct cfs_bandwidth cfs_bandwidth; 4873d0407baSopenharmony_ci 4883d0407baSopenharmony_ci#ifdef CONFIG_UCLAMP_TASK_GROUP 4893d0407baSopenharmony_ci /* The two decimal precision [%] value requested from user-space */ 4903d0407baSopenharmony_ci unsigned int uclamp_pct[UCLAMP_CNT]; 4913d0407baSopenharmony_ci /* Clamp values requested for a task group */ 4923d0407baSopenharmony_ci struct uclamp_se uclamp_req[UCLAMP_CNT]; 4933d0407baSopenharmony_ci /* Effective clamp values used for a task group */ 4943d0407baSopenharmony_ci struct uclamp_se uclamp[UCLAMP_CNT]; 4953d0407baSopenharmony_ci#endif 4963d0407baSopenharmony_ci 4973d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RTG_CGROUP 4983d0407baSopenharmony_ci /* 4993d0407baSopenharmony_ci * Controls whether tasks of this cgroup should be colocated with each 5003d0407baSopenharmony_ci * other and tasks of other cgroups that have the same flag turned on. 5013d0407baSopenharmony_ci */ 5023d0407baSopenharmony_ci bool colocate; 5033d0407baSopenharmony_ci 5043d0407baSopenharmony_ci /* Controls whether further updates are allowed to the colocate flag */ 5053d0407baSopenharmony_ci bool colocate_update_disabled; 5063d0407baSopenharmony_ci#endif 5073d0407baSopenharmony_ci}; 5083d0407baSopenharmony_ci 5093d0407baSopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED 5103d0407baSopenharmony_ci#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 5113d0407baSopenharmony_ci 5123d0407baSopenharmony_ci/* 5133d0407baSopenharmony_ci * A weight of 0 or 1 can cause arithmetics problems. 5143d0407baSopenharmony_ci * A weight of a cfs_rq is the sum of weights of which entities 5153d0407baSopenharmony_ci * are queued on this cfs_rq, so a weight of a entity should not be 5163d0407baSopenharmony_ci * too large, so as the shares value of a task group. 5173d0407baSopenharmony_ci * (The default weight is 1024 - so there's no practical 5183d0407baSopenharmony_ci * limitation from this.) 5193d0407baSopenharmony_ci */ 5203d0407baSopenharmony_ci#define MIN_SHARES (1UL << 1) 5213d0407baSopenharmony_ci#define MAX_SHARES (1UL << 18) 5223d0407baSopenharmony_ci#endif 5233d0407baSopenharmony_ci 5243d0407baSopenharmony_citypedef int (*tg_visitor)(struct task_group *, void *); 5253d0407baSopenharmony_ci 5263d0407baSopenharmony_ciextern int walk_tg_tree_from(struct task_group *from, tg_visitor down, tg_visitor up, void *data); 5273d0407baSopenharmony_ci 5283d0407baSopenharmony_ci/* 5293d0407baSopenharmony_ci * Iterate the full tree, calling @down when first entering a node and @up when 5303d0407baSopenharmony_ci * leaving it for the final time. 5313d0407baSopenharmony_ci * 5323d0407baSopenharmony_ci * Caller must hold rcu_lock or sufficient equivalent. 5333d0407baSopenharmony_ci */ 5343d0407baSopenharmony_cistatic inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 5353d0407baSopenharmony_ci{ 5363d0407baSopenharmony_ci return walk_tg_tree_from(&root_task_group, down, up, data); 5373d0407baSopenharmony_ci} 5383d0407baSopenharmony_ci 5393d0407baSopenharmony_ciextern int tg_nop(struct task_group *tg, void *data); 5403d0407baSopenharmony_ci 5413d0407baSopenharmony_ciextern void free_fair_sched_group(struct task_group *tg); 5423d0407baSopenharmony_ciextern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 5433d0407baSopenharmony_ciextern void online_fair_sched_group(struct task_group *tg); 5443d0407baSopenharmony_ciextern void unregister_fair_sched_group(struct task_group *tg); 5453d0407baSopenharmony_ciextern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, 5463d0407baSopenharmony_ci struct sched_entity *parent); 5473d0407baSopenharmony_ciextern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 5483d0407baSopenharmony_ci 5493d0407baSopenharmony_ciextern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 5503d0407baSopenharmony_ciextern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 5513d0407baSopenharmony_ciextern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 5523d0407baSopenharmony_ci 5533d0407baSopenharmony_ciextern void free_rt_sched_group(struct task_group *tg); 5543d0407baSopenharmony_ciextern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 5553d0407baSopenharmony_ciextern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu, 5563d0407baSopenharmony_ci struct sched_rt_entity *parent); 5573d0407baSopenharmony_ciextern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 5583d0407baSopenharmony_ciextern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 5593d0407baSopenharmony_ciextern long sched_group_rt_runtime(struct task_group *tg); 5603d0407baSopenharmony_ciextern long sched_group_rt_period(struct task_group *tg); 5613d0407baSopenharmony_ciextern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 5623d0407baSopenharmony_ci 5633d0407baSopenharmony_ciextern struct task_group *sched_create_group(struct task_group *parent); 5643d0407baSopenharmony_ciextern void sched_online_group(struct task_group *tg, struct task_group *parent); 5653d0407baSopenharmony_ciextern void sched_destroy_group(struct task_group *tg); 5663d0407baSopenharmony_ciextern void sched_offline_group(struct task_group *tg); 5673d0407baSopenharmony_ci 5683d0407baSopenharmony_ciextern void sched_move_task(struct task_struct *tsk); 5693d0407baSopenharmony_ci 5703d0407baSopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED 5713d0407baSopenharmony_ciextern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 5723d0407baSopenharmony_ci 5733d0407baSopenharmony_ci#ifdef CONFIG_SMP 5743d0407baSopenharmony_ciextern void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next); 5753d0407baSopenharmony_ci#else /* !CONFIG_SMP */ 5763d0407baSopenharmony_cistatic inline void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next) 5773d0407baSopenharmony_ci{ 5783d0407baSopenharmony_ci} 5793d0407baSopenharmony_ci#endif /* CONFIG_SMP */ 5803d0407baSopenharmony_ci#endif /* CONFIG_FAIR_GROUP_SCHED */ 5813d0407baSopenharmony_ci 5823d0407baSopenharmony_ci#else /* CONFIG_CGROUP_SCHED */ 5833d0407baSopenharmony_ci 5843d0407baSopenharmony_cistruct cfs_bandwidth { 5853d0407baSopenharmony_ci}; 5863d0407baSopenharmony_ci 5873d0407baSopenharmony_ci#endif /* CONFIG_CGROUP_SCHED */ 5883d0407baSopenharmony_ci 5893d0407baSopenharmony_ci/* CFS-related fields in a runqueue */ 5903d0407baSopenharmony_cistruct cfs_rq { 5913d0407baSopenharmony_ci struct load_weight load; 5923d0407baSopenharmony_ci unsigned int nr_running; 5933d0407baSopenharmony_ci unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 5943d0407baSopenharmony_ci unsigned int idle_h_nr_running; /* SCHED_IDLE */ 5953d0407baSopenharmony_ci 5963d0407baSopenharmony_ci u64 exec_clock; 5973d0407baSopenharmony_ci u64 min_vruntime; 5983d0407baSopenharmony_ci#ifndef CONFIG_64BIT 5993d0407baSopenharmony_ci u64 min_vruntime_copy; 6003d0407baSopenharmony_ci#endif 6013d0407baSopenharmony_ci 6023d0407baSopenharmony_ci struct rb_root_cached tasks_timeline; 6033d0407baSopenharmony_ci 6043d0407baSopenharmony_ci /* 6053d0407baSopenharmony_ci * 'curr' points to currently running entity on this cfs_rq. 6063d0407baSopenharmony_ci * It is set to NULL otherwise (i.e when none are currently running). 6073d0407baSopenharmony_ci */ 6083d0407baSopenharmony_ci struct sched_entity *curr; 6093d0407baSopenharmony_ci struct sched_entity *next; 6103d0407baSopenharmony_ci struct sched_entity *last; 6113d0407baSopenharmony_ci struct sched_entity *skip; 6123d0407baSopenharmony_ci 6133d0407baSopenharmony_ci#ifdef CONFIG_SCHED_DEBUG 6143d0407baSopenharmony_ci unsigned int nr_spread_over; 6153d0407baSopenharmony_ci#endif 6163d0407baSopenharmony_ci 6173d0407baSopenharmony_ci#ifdef CONFIG_SMP 6183d0407baSopenharmony_ci /* 6193d0407baSopenharmony_ci * CFS load tracking 6203d0407baSopenharmony_ci */ 6213d0407baSopenharmony_ci struct sched_avg avg; 6223d0407baSopenharmony_ci#ifndef CONFIG_64BIT 6233d0407baSopenharmony_ci u64 load_last_update_time_copy; 6243d0407baSopenharmony_ci#endif 6253d0407baSopenharmony_ci struct { 6263d0407baSopenharmony_ci raw_spinlock_t lock ____cacheline_aligned; 6273d0407baSopenharmony_ci int nr; 6283d0407baSopenharmony_ci unsigned long load_avg; 6293d0407baSopenharmony_ci unsigned long util_avg; 6303d0407baSopenharmony_ci unsigned long runnable_avg; 6313d0407baSopenharmony_ci } removed; 6323d0407baSopenharmony_ci 6333d0407baSopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED 6343d0407baSopenharmony_ci unsigned long tg_load_avg_contrib; 6353d0407baSopenharmony_ci long propagate; 6363d0407baSopenharmony_ci long prop_runnable_sum; 6373d0407baSopenharmony_ci 6383d0407baSopenharmony_ci /* 6393d0407baSopenharmony_ci * h_load = weight * f(tg) 6403d0407baSopenharmony_ci * 6413d0407baSopenharmony_ci * Where f(tg) is the recursive weight fraction assigned to 6423d0407baSopenharmony_ci * this group. 6433d0407baSopenharmony_ci */ 6443d0407baSopenharmony_ci unsigned long h_load; 6453d0407baSopenharmony_ci u64 last_h_load_update; 6463d0407baSopenharmony_ci struct sched_entity *h_load_next; 6473d0407baSopenharmony_ci#endif /* CONFIG_FAIR_GROUP_SCHED */ 6483d0407baSopenharmony_ci#endif /* CONFIG_SMP */ 6493d0407baSopenharmony_ci 6503d0407baSopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED 6513d0407baSopenharmony_ci struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 6523d0407baSopenharmony_ci 6533d0407baSopenharmony_ci /* 6543d0407baSopenharmony_ci * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 6553d0407baSopenharmony_ci * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 6563d0407baSopenharmony_ci * (like users, containers etc.) 6573d0407baSopenharmony_ci * 6583d0407baSopenharmony_ci * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 6593d0407baSopenharmony_ci * This list is used during load balance. 6603d0407baSopenharmony_ci */ 6613d0407baSopenharmony_ci int on_list; 6623d0407baSopenharmony_ci struct list_head leaf_cfs_rq_list; 6633d0407baSopenharmony_ci struct task_group *tg; /* group that "owns" this runqueue */ 6643d0407baSopenharmony_ci 6653d0407baSopenharmony_ci#ifdef CONFIG_SCHED_WALT 6663d0407baSopenharmony_ci struct walt_sched_stats walt_stats; 6673d0407baSopenharmony_ci#endif 6683d0407baSopenharmony_ci 6693d0407baSopenharmony_ci#ifdef CONFIG_CFS_BANDWIDTH 6703d0407baSopenharmony_ci int runtime_enabled; 6713d0407baSopenharmony_ci s64 runtime_remaining; 6723d0407baSopenharmony_ci 6733d0407baSopenharmony_ci u64 throttled_clock; 6743d0407baSopenharmony_ci u64 throttled_clock_pelt; 6753d0407baSopenharmony_ci u64 throttled_clock_pelt_time; 6763d0407baSopenharmony_ci int throttled; 6773d0407baSopenharmony_ci int throttle_count; 6783d0407baSopenharmony_ci struct list_head throttled_list; 6793d0407baSopenharmony_ci#ifdef CONFIG_SCHED_WALT 6803d0407baSopenharmony_ci u64 cumulative_runnable_avg; 6813d0407baSopenharmony_ci#endif 6823d0407baSopenharmony_ci#endif /* CONFIG_CFS_BANDWIDTH */ 6833d0407baSopenharmony_ci#endif /* CONFIG_FAIR_GROUP_SCHED */ 6843d0407baSopenharmony_ci}; 6853d0407baSopenharmony_ci 6863d0407baSopenharmony_cistatic inline int rt_bandwidth_enabled(void) 6873d0407baSopenharmony_ci{ 6883d0407baSopenharmony_ci return sysctl_sched_rt_runtime >= 0; 6893d0407baSopenharmony_ci} 6903d0407baSopenharmony_ci 6913d0407baSopenharmony_ci/* RT IPI pull logic requires IRQ_WORK */ 6923d0407baSopenharmony_ci#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 6933d0407baSopenharmony_ci#define HAVE_RT_PUSH_IPI 6943d0407baSopenharmony_ci#endif 6953d0407baSopenharmony_ci 6963d0407baSopenharmony_ci/* Real-Time classes' related field in a runqueue: */ 6973d0407baSopenharmony_cistruct rt_rq { 6983d0407baSopenharmony_ci struct rt_prio_array active; 6993d0407baSopenharmony_ci unsigned int rt_nr_running; 7003d0407baSopenharmony_ci unsigned int rr_nr_running; 7013d0407baSopenharmony_ci#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 7023d0407baSopenharmony_ci struct { 7033d0407baSopenharmony_ci int curr; /* highest queued rt task prio */ 7043d0407baSopenharmony_ci#ifdef CONFIG_SMP 7053d0407baSopenharmony_ci int next; /* next highest */ 7063d0407baSopenharmony_ci#endif 7073d0407baSopenharmony_ci } highest_prio; 7083d0407baSopenharmony_ci#endif 7093d0407baSopenharmony_ci#ifdef CONFIG_SMP 7103d0407baSopenharmony_ci unsigned long rt_nr_migratory; 7113d0407baSopenharmony_ci unsigned long rt_nr_total; 7123d0407baSopenharmony_ci int overloaded; 7133d0407baSopenharmony_ci struct plist_head pushable_tasks; 7143d0407baSopenharmony_ci 7153d0407baSopenharmony_ci#endif /* CONFIG_SMP */ 7163d0407baSopenharmony_ci int rt_queued; 7173d0407baSopenharmony_ci 7183d0407baSopenharmony_ci int rt_throttled; 7193d0407baSopenharmony_ci u64 rt_time; 7203d0407baSopenharmony_ci u64 rt_runtime; 7213d0407baSopenharmony_ci /* Nests inside the rq lock: */ 7223d0407baSopenharmony_ci raw_spinlock_t rt_runtime_lock; 7233d0407baSopenharmony_ci 7243d0407baSopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED 7253d0407baSopenharmony_ci unsigned long rt_nr_boosted; 7263d0407baSopenharmony_ci 7273d0407baSopenharmony_ci struct rq *rq; 7283d0407baSopenharmony_ci struct task_group *tg; 7293d0407baSopenharmony_ci#endif 7303d0407baSopenharmony_ci}; 7313d0407baSopenharmony_ci 7323d0407baSopenharmony_cistatic inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 7333d0407baSopenharmony_ci{ 7343d0407baSopenharmony_ci return rt_rq->rt_queued && rt_rq->rt_nr_running; 7353d0407baSopenharmony_ci} 7363d0407baSopenharmony_ci 7373d0407baSopenharmony_ci/* Deadline class' related fields in a runqueue */ 7383d0407baSopenharmony_cistruct dl_rq { 7393d0407baSopenharmony_ci /* runqueue is an rbtree, ordered by deadline */ 7403d0407baSopenharmony_ci struct rb_root_cached root; 7413d0407baSopenharmony_ci 7423d0407baSopenharmony_ci unsigned long dl_nr_running; 7433d0407baSopenharmony_ci 7443d0407baSopenharmony_ci#ifdef CONFIG_SMP 7453d0407baSopenharmony_ci /* 7463d0407baSopenharmony_ci * Deadline values of the currently executing and the 7473d0407baSopenharmony_ci * earliest ready task on this rq. Caching these facilitates 7483d0407baSopenharmony_ci * the decision whether or not a ready but not running task 7493d0407baSopenharmony_ci * should migrate somewhere else. 7503d0407baSopenharmony_ci */ 7513d0407baSopenharmony_ci struct { 7523d0407baSopenharmony_ci u64 curr; 7533d0407baSopenharmony_ci u64 next; 7543d0407baSopenharmony_ci } earliest_dl; 7553d0407baSopenharmony_ci 7563d0407baSopenharmony_ci unsigned long dl_nr_migratory; 7573d0407baSopenharmony_ci int overloaded; 7583d0407baSopenharmony_ci 7593d0407baSopenharmony_ci /* 7603d0407baSopenharmony_ci * Tasks on this rq that can be pushed away. They are kept in 7613d0407baSopenharmony_ci * an rb-tree, ordered by tasks' deadlines, with caching 7623d0407baSopenharmony_ci * of the leftmost (earliest deadline) element. 7633d0407baSopenharmony_ci */ 7643d0407baSopenharmony_ci struct rb_root_cached pushable_dl_tasks_root; 7653d0407baSopenharmony_ci#else 7663d0407baSopenharmony_ci struct dl_bw dl_bw; 7673d0407baSopenharmony_ci#endif 7683d0407baSopenharmony_ci /* 7693d0407baSopenharmony_ci * "Active utilization" for this runqueue: increased when a 7703d0407baSopenharmony_ci * task wakes up (becomes TASK_RUNNING) and decreased when a 7713d0407baSopenharmony_ci * task blocks 7723d0407baSopenharmony_ci */ 7733d0407baSopenharmony_ci u64 running_bw; 7743d0407baSopenharmony_ci 7753d0407baSopenharmony_ci /* 7763d0407baSopenharmony_ci * Utilization of the tasks "assigned" to this runqueue (including 7773d0407baSopenharmony_ci * the tasks that are in runqueue and the tasks that executed on this 7783d0407baSopenharmony_ci * CPU and blocked). Increased when a task moves to this runqueue, and 7793d0407baSopenharmony_ci * decreased when the task moves away (migrates, changes scheduling 7803d0407baSopenharmony_ci * policy, or terminates). 7813d0407baSopenharmony_ci * This is needed to compute the "inactive utilization" for the 7823d0407baSopenharmony_ci * runqueue (inactive utilization = this_bw - running_bw). 7833d0407baSopenharmony_ci */ 7843d0407baSopenharmony_ci u64 this_bw; 7853d0407baSopenharmony_ci u64 extra_bw; 7863d0407baSopenharmony_ci 7873d0407baSopenharmony_ci /* 7883d0407baSopenharmony_ci * Inverse of the fraction of CPU utilization that can be reclaimed 7893d0407baSopenharmony_ci * by the GRUB algorithm. 7903d0407baSopenharmony_ci */ 7913d0407baSopenharmony_ci u64 bw_ratio; 7923d0407baSopenharmony_ci}; 7933d0407baSopenharmony_ci 7943d0407baSopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED 7953d0407baSopenharmony_ci/* An entity is a task if it doesn't "own" a runqueue */ 7963d0407baSopenharmony_ci#define entity_is_task(se) (!se->my_q) 7973d0407baSopenharmony_ci 7983d0407baSopenharmony_cistatic inline void se_update_runnable(struct sched_entity *se) 7993d0407baSopenharmony_ci{ 8003d0407baSopenharmony_ci if (!entity_is_task(se)) { 8013d0407baSopenharmony_ci se->runnable_weight = se->my_q->h_nr_running; 8023d0407baSopenharmony_ci } 8033d0407baSopenharmony_ci} 8043d0407baSopenharmony_ci 8053d0407baSopenharmony_cistatic inline long se_runnable(struct sched_entity *se) 8063d0407baSopenharmony_ci{ 8073d0407baSopenharmony_ci if (entity_is_task(se)) { 8083d0407baSopenharmony_ci return !!se->on_rq; 8093d0407baSopenharmony_ci } else { 8103d0407baSopenharmony_ci return se->runnable_weight; 8113d0407baSopenharmony_ci } 8123d0407baSopenharmony_ci} 8133d0407baSopenharmony_ci 8143d0407baSopenharmony_ci#else 8153d0407baSopenharmony_ci#define entity_is_task(se) 1 8163d0407baSopenharmony_ci 8173d0407baSopenharmony_cistatic inline void se_update_runnable(struct sched_entity *se) 8183d0407baSopenharmony_ci{ 8193d0407baSopenharmony_ci} 8203d0407baSopenharmony_ci 8213d0407baSopenharmony_cistatic inline long se_runnable(struct sched_entity *se) 8223d0407baSopenharmony_ci{ 8233d0407baSopenharmony_ci return !!se->on_rq; 8243d0407baSopenharmony_ci} 8253d0407baSopenharmony_ci#endif 8263d0407baSopenharmony_ci 8273d0407baSopenharmony_ci#ifdef CONFIG_SMP 8283d0407baSopenharmony_ci/* 8293d0407baSopenharmony_ci * XXX we want to get rid of these helpers and use the full load resolution. 8303d0407baSopenharmony_ci */ 8313d0407baSopenharmony_cistatic inline long se_weight(struct sched_entity *se) 8323d0407baSopenharmony_ci{ 8333d0407baSopenharmony_ci return scale_load_down(se->load.weight); 8343d0407baSopenharmony_ci} 8353d0407baSopenharmony_ci 8363d0407baSopenharmony_cistatic inline bool sched_asym_prefer(int a, int b) 8373d0407baSopenharmony_ci{ 8383d0407baSopenharmony_ci return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 8393d0407baSopenharmony_ci} 8403d0407baSopenharmony_ci 8413d0407baSopenharmony_cistruct perf_domain { 8423d0407baSopenharmony_ci struct em_perf_domain *em_pd; 8433d0407baSopenharmony_ci struct perf_domain *next; 8443d0407baSopenharmony_ci struct rcu_head rcu; 8453d0407baSopenharmony_ci}; 8463d0407baSopenharmony_ci 8473d0407baSopenharmony_ci/* Scheduling group status flags */ 8483d0407baSopenharmony_ci#define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 8493d0407baSopenharmony_ci#define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 8503d0407baSopenharmony_ci 8513d0407baSopenharmony_ci/* 8523d0407baSopenharmony_ci * We add the notion of a root-domain which will be used to define per-domain 8533d0407baSopenharmony_ci * variables. Each exclusive cpuset essentially defines an island domain by 8543d0407baSopenharmony_ci * fully partitioning the member CPUs from any other cpuset. Whenever a new 8553d0407baSopenharmony_ci * exclusive cpuset is created, we also create and attach a new root-domain 8563d0407baSopenharmony_ci * object. 8573d0407baSopenharmony_ci * 8583d0407baSopenharmony_ci */ 8593d0407baSopenharmony_cistruct root_domain { 8603d0407baSopenharmony_ci atomic_t refcount; 8613d0407baSopenharmony_ci atomic_t rto_count; 8623d0407baSopenharmony_ci struct rcu_head rcu; 8633d0407baSopenharmony_ci cpumask_var_t span; 8643d0407baSopenharmony_ci cpumask_var_t online; 8653d0407baSopenharmony_ci 8663d0407baSopenharmony_ci /* 8673d0407baSopenharmony_ci * Indicate pullable load on at least one CPU, e.g: 8683d0407baSopenharmony_ci * - More than one runnable task 8693d0407baSopenharmony_ci * - Running task is misfit 8703d0407baSopenharmony_ci */ 8713d0407baSopenharmony_ci int overload; 8723d0407baSopenharmony_ci 8733d0407baSopenharmony_ci /* Indicate one or more cpus over-utilized (tipping point) */ 8743d0407baSopenharmony_ci int overutilized; 8753d0407baSopenharmony_ci 8763d0407baSopenharmony_ci /* 8773d0407baSopenharmony_ci * The bit corresponding to a CPU gets set here if such CPU has more 8783d0407baSopenharmony_ci * than one runnable -deadline task (as it is below for RT tasks). 8793d0407baSopenharmony_ci */ 8803d0407baSopenharmony_ci cpumask_var_t dlo_mask; 8813d0407baSopenharmony_ci atomic_t dlo_count; 8823d0407baSopenharmony_ci struct dl_bw dl_bw; 8833d0407baSopenharmony_ci struct cpudl cpudl; 8843d0407baSopenharmony_ci 8853d0407baSopenharmony_ci#ifdef HAVE_RT_PUSH_IPI 8863d0407baSopenharmony_ci /* 8873d0407baSopenharmony_ci * For IPI pull requests, loop across the rto_mask. 8883d0407baSopenharmony_ci */ 8893d0407baSopenharmony_ci struct irq_work rto_push_work; 8903d0407baSopenharmony_ci raw_spinlock_t rto_lock; 8913d0407baSopenharmony_ci /* These are only updated and read within rto_lock */ 8923d0407baSopenharmony_ci int rto_loop; 8933d0407baSopenharmony_ci int rto_cpu; 8943d0407baSopenharmony_ci /* These atomics are updated outside of a lock */ 8953d0407baSopenharmony_ci atomic_t rto_loop_next; 8963d0407baSopenharmony_ci atomic_t rto_loop_start; 8973d0407baSopenharmony_ci#endif 8983d0407baSopenharmony_ci /* 8993d0407baSopenharmony_ci * The "RT overload" flag: it gets set if a CPU has more than 9003d0407baSopenharmony_ci * one runnable RT task. 9013d0407baSopenharmony_ci */ 9023d0407baSopenharmony_ci cpumask_var_t rto_mask; 9033d0407baSopenharmony_ci struct cpupri cpupri; 9043d0407baSopenharmony_ci 9053d0407baSopenharmony_ci unsigned long max_cpu_capacity; 9063d0407baSopenharmony_ci 9073d0407baSopenharmony_ci /* 9083d0407baSopenharmony_ci * NULL-terminated list of performance domains intersecting with the 9093d0407baSopenharmony_ci * CPUs of the rd. Protected by RCU. 9103d0407baSopenharmony_ci */ 9113d0407baSopenharmony_ci struct perf_domain __rcu *pd; 9123d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS 9133d0407baSopenharmony_ci int max_cap_orig_cpu; 9143d0407baSopenharmony_ci#endif 9153d0407baSopenharmony_ci}; 9163d0407baSopenharmony_ci 9173d0407baSopenharmony_ciextern void init_defrootdomain(void); 9183d0407baSopenharmony_ciextern int sched_init_domains(const struct cpumask *cpu_map); 9193d0407baSopenharmony_ciextern void rq_attach_root(struct rq *rq, struct root_domain *rd); 9203d0407baSopenharmony_ciextern void sched_get_rd(struct root_domain *rd); 9213d0407baSopenharmony_ciextern void sched_put_rd(struct root_domain *rd); 9223d0407baSopenharmony_ci 9233d0407baSopenharmony_ci#ifdef HAVE_RT_PUSH_IPI 9243d0407baSopenharmony_ciextern void rto_push_irq_work_func(struct irq_work *work); 9253d0407baSopenharmony_ci#endif 9263d0407baSopenharmony_ci#endif /* CONFIG_SMP */ 9273d0407baSopenharmony_ci 9283d0407baSopenharmony_ci#ifdef CONFIG_UCLAMP_TASK 9293d0407baSopenharmony_ci/* 9303d0407baSopenharmony_ci * struct uclamp_bucket - Utilization clamp bucket 9313d0407baSopenharmony_ci * @value: utilization clamp value for tasks on this clamp bucket 9323d0407baSopenharmony_ci * @tasks: number of RUNNABLE tasks on this clamp bucket 9333d0407baSopenharmony_ci * 9343d0407baSopenharmony_ci * Keep track of how many tasks are RUNNABLE for a given utilization 9353d0407baSopenharmony_ci * clamp value. 9363d0407baSopenharmony_ci */ 9373d0407baSopenharmony_cistruct uclamp_bucket { 9383d0407baSopenharmony_ci unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 9393d0407baSopenharmony_ci unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 9403d0407baSopenharmony_ci}; 9413d0407baSopenharmony_ci 9423d0407baSopenharmony_ci/* 9433d0407baSopenharmony_ci * struct uclamp_rq - rq's utilization clamp 9443d0407baSopenharmony_ci * @value: currently active clamp values for a rq 9453d0407baSopenharmony_ci * @bucket: utilization clamp buckets affecting a rq 9463d0407baSopenharmony_ci * 9473d0407baSopenharmony_ci * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 9483d0407baSopenharmony_ci * A clamp value is affecting a rq when there is at least one task RUNNABLE 9493d0407baSopenharmony_ci * (or actually running) with that value. 9503d0407baSopenharmony_ci * 9513d0407baSopenharmony_ci * There are up to UCLAMP_CNT possible different clamp values, currently there 9523d0407baSopenharmony_ci * are only two: minimum utilization and maximum utilization. 9533d0407baSopenharmony_ci * 9543d0407baSopenharmony_ci * All utilization clamping values are MAX aggregated, since: 9553d0407baSopenharmony_ci * - for util_min: we want to run the CPU at least at the max of the minimum 9563d0407baSopenharmony_ci * utilization required by its currently RUNNABLE tasks. 9573d0407baSopenharmony_ci * - for util_max: we want to allow the CPU to run up to the max of the 9583d0407baSopenharmony_ci * maximum utilization allowed by its currently RUNNABLE tasks. 9593d0407baSopenharmony_ci * 9603d0407baSopenharmony_ci * Since on each system we expect only a limited number of different 9613d0407baSopenharmony_ci * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 9623d0407baSopenharmony_ci * the metrics required to compute all the per-rq utilization clamp values. 9633d0407baSopenharmony_ci */ 9643d0407baSopenharmony_cistruct uclamp_rq { 9653d0407baSopenharmony_ci unsigned int value; 9663d0407baSopenharmony_ci struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 9673d0407baSopenharmony_ci}; 9683d0407baSopenharmony_ci 9693d0407baSopenharmony_ciDECLARE_STATIC_KEY_FALSE(sched_uclamp_used); 9703d0407baSopenharmony_ci#endif /* CONFIG_UCLAMP_TASK */ 9713d0407baSopenharmony_ci 9723d0407baSopenharmony_ci/* 9733d0407baSopenharmony_ci * This is the main, per-CPU runqueue data structure. 9743d0407baSopenharmony_ci * 9753d0407baSopenharmony_ci * Locking rule: those places that want to lock multiple runqueues 9763d0407baSopenharmony_ci * (such as the load balancing or the thread migration code), lock 9773d0407baSopenharmony_ci * acquire operations must be ordered by ascending &runqueue. 9783d0407baSopenharmony_ci */ 9793d0407baSopenharmony_cistruct rq { 9803d0407baSopenharmony_ci /* runqueue lock: */ 9813d0407baSopenharmony_ci raw_spinlock_t lock; 9823d0407baSopenharmony_ci 9833d0407baSopenharmony_ci /* 9843d0407baSopenharmony_ci * nr_running and cpu_load should be in the same cacheline because 9853d0407baSopenharmony_ci * remote CPUs use both these fields when doing load calculation. 9863d0407baSopenharmony_ci */ 9873d0407baSopenharmony_ci unsigned int nr_running; 9883d0407baSopenharmony_ci#ifdef CONFIG_NUMA_BALANCING 9893d0407baSopenharmony_ci unsigned int nr_numa_running; 9903d0407baSopenharmony_ci unsigned int nr_preferred_running; 9913d0407baSopenharmony_ci unsigned int numa_migrate_on; 9923d0407baSopenharmony_ci#endif 9933d0407baSopenharmony_ci#ifdef CONFIG_NO_HZ_COMMON 9943d0407baSopenharmony_ci#ifdef CONFIG_SMP 9953d0407baSopenharmony_ci unsigned long last_blocked_load_update_tick; 9963d0407baSopenharmony_ci unsigned int has_blocked_load; 9973d0407baSopenharmony_ci call_single_data_t nohz_csd; 9983d0407baSopenharmony_ci#endif /* CONFIG_SMP */ 9993d0407baSopenharmony_ci unsigned int nohz_tick_stopped; 10003d0407baSopenharmony_ci atomic_t nohz_flags; 10013d0407baSopenharmony_ci#endif /* CONFIG_NO_HZ_COMMON */ 10023d0407baSopenharmony_ci 10033d0407baSopenharmony_ci#ifdef CONFIG_SMP 10043d0407baSopenharmony_ci unsigned int ttwu_pending; 10053d0407baSopenharmony_ci#endif 10063d0407baSopenharmony_ci u64 nr_switches; 10073d0407baSopenharmony_ci 10083d0407baSopenharmony_ci#ifdef CONFIG_UCLAMP_TASK 10093d0407baSopenharmony_ci /* Utilization clamp values based on CPU's RUNNABLE tasks */ 10103d0407baSopenharmony_ci struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 10113d0407baSopenharmony_ci unsigned int uclamp_flags; 10123d0407baSopenharmony_ci#define UCLAMP_FLAG_IDLE 0x01 10133d0407baSopenharmony_ci#endif 10143d0407baSopenharmony_ci 10153d0407baSopenharmony_ci struct cfs_rq cfs; 10163d0407baSopenharmony_ci struct rt_rq rt; 10173d0407baSopenharmony_ci struct dl_rq dl; 10183d0407baSopenharmony_ci 10193d0407baSopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED 10203d0407baSopenharmony_ci /* list of leaf cfs_rq on this CPU: */ 10213d0407baSopenharmony_ci struct list_head leaf_cfs_rq_list; 10223d0407baSopenharmony_ci struct list_head *tmp_alone_branch; 10233d0407baSopenharmony_ci#endif /* CONFIG_FAIR_GROUP_SCHED */ 10243d0407baSopenharmony_ci 10253d0407baSopenharmony_ci /* 10263d0407baSopenharmony_ci * This is part of a global counter where only the total sum 10273d0407baSopenharmony_ci * over all CPUs matters. A task can increase this counter on 10283d0407baSopenharmony_ci * one CPU and if it got migrated afterwards it may decrease 10293d0407baSopenharmony_ci * it on another CPU. Always updated under the runqueue lock: 10303d0407baSopenharmony_ci */ 10313d0407baSopenharmony_ci unsigned long nr_uninterruptible; 10323d0407baSopenharmony_ci 10333d0407baSopenharmony_ci struct task_struct __rcu *curr; 10343d0407baSopenharmony_ci struct task_struct *idle; 10353d0407baSopenharmony_ci struct task_struct *stop; 10363d0407baSopenharmony_ci unsigned long next_balance; 10373d0407baSopenharmony_ci struct mm_struct *prev_mm; 10383d0407baSopenharmony_ci 10393d0407baSopenharmony_ci unsigned int clock_update_flags; 10403d0407baSopenharmony_ci u64 clock; 10413d0407baSopenharmony_ci /* Ensure that all clocks are in the same cache line */ 10423d0407baSopenharmony_ci u64 clock_task ____cacheline_aligned; 10433d0407baSopenharmony_ci u64 clock_pelt; 10443d0407baSopenharmony_ci unsigned long lost_idle_time; 10453d0407baSopenharmony_ci 10463d0407baSopenharmony_ci atomic_t nr_iowait; 10473d0407baSopenharmony_ci 10483d0407baSopenharmony_ci#ifdef CONFIG_MEMBARRIER 10493d0407baSopenharmony_ci int membarrier_state; 10503d0407baSopenharmony_ci#endif 10513d0407baSopenharmony_ci 10523d0407baSopenharmony_ci#ifdef CONFIG_SMP 10533d0407baSopenharmony_ci struct root_domain *rd; 10543d0407baSopenharmony_ci struct sched_domain __rcu *sd; 10553d0407baSopenharmony_ci 10563d0407baSopenharmony_ci unsigned long cpu_capacity; 10573d0407baSopenharmony_ci unsigned long cpu_capacity_orig; 10583d0407baSopenharmony_ci 10593d0407baSopenharmony_ci struct callback_head *balance_callback; 10603d0407baSopenharmony_ci 10613d0407baSopenharmony_ci unsigned char nohz_idle_balance; 10623d0407baSopenharmony_ci unsigned char idle_balance; 10633d0407baSopenharmony_ci 10643d0407baSopenharmony_ci unsigned long misfit_task_load; 10653d0407baSopenharmony_ci 10663d0407baSopenharmony_ci /* For active balancing */ 10673d0407baSopenharmony_ci int active_balance; 10683d0407baSopenharmony_ci int push_cpu; 10693d0407baSopenharmony_ci#ifdef CONFIG_SCHED_EAS 10703d0407baSopenharmony_ci struct task_struct *push_task; 10713d0407baSopenharmony_ci#endif 10723d0407baSopenharmony_ci struct cpu_stop_work active_balance_work; 10733d0407baSopenharmony_ci 10743d0407baSopenharmony_ci /* For rt active balancing */ 10753d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RT_ACTIVE_LB 10763d0407baSopenharmony_ci int rt_active_balance; 10773d0407baSopenharmony_ci struct task_struct *rt_push_task; 10783d0407baSopenharmony_ci struct cpu_stop_work rt_active_balance_work; 10793d0407baSopenharmony_ci#endif 10803d0407baSopenharmony_ci 10813d0407baSopenharmony_ci /* CPU of this runqueue: */ 10823d0407baSopenharmony_ci int cpu; 10833d0407baSopenharmony_ci int online; 10843d0407baSopenharmony_ci 10853d0407baSopenharmony_ci struct list_head cfs_tasks; 10863d0407baSopenharmony_ci 10873d0407baSopenharmony_ci struct sched_avg avg_rt; 10883d0407baSopenharmony_ci struct sched_avg avg_dl; 10893d0407baSopenharmony_ci#ifdef CONFIG_HAVE_SCHED_AVG_IRQ 10903d0407baSopenharmony_ci struct sched_avg avg_irq; 10913d0407baSopenharmony_ci#endif 10923d0407baSopenharmony_ci#ifdef CONFIG_SCHED_THERMAL_PRESSURE 10933d0407baSopenharmony_ci struct sched_avg avg_thermal; 10943d0407baSopenharmony_ci#endif 10953d0407baSopenharmony_ci u64 idle_stamp; 10963d0407baSopenharmony_ci u64 avg_idle; 10973d0407baSopenharmony_ci 10983d0407baSopenharmony_ci /* This is used to determine avg_idle's max value */ 10993d0407baSopenharmony_ci u64 max_idle_balance_cost; 11003d0407baSopenharmony_ci#endif /* CONFIG_SMP */ 11013d0407baSopenharmony_ci 11023d0407baSopenharmony_ci#ifdef CONFIG_SCHED_WALT 11033d0407baSopenharmony_ci struct sched_cluster *cluster; 11043d0407baSopenharmony_ci struct cpumask freq_domain_cpumask; 11053d0407baSopenharmony_ci struct walt_sched_stats walt_stats; 11063d0407baSopenharmony_ci 11073d0407baSopenharmony_ci u64 window_start; 11083d0407baSopenharmony_ci unsigned long walt_flags; 11093d0407baSopenharmony_ci 11103d0407baSopenharmony_ci u64 cur_irqload; 11113d0407baSopenharmony_ci u64 avg_irqload; 11123d0407baSopenharmony_ci u64 irqload_ts; 11133d0407baSopenharmony_ci u64 curr_runnable_sum; 11143d0407baSopenharmony_ci u64 prev_runnable_sum; 11153d0407baSopenharmony_ci u64 nt_curr_runnable_sum; 11163d0407baSopenharmony_ci u64 nt_prev_runnable_sum; 11173d0407baSopenharmony_ci u64 cum_window_demand_scaled; 11183d0407baSopenharmony_ci struct load_subtractions load_subs[NUM_TRACKED_WINDOWS]; 11193d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RTG 11203d0407baSopenharmony_ci struct group_cpu_time grp_time; 11213d0407baSopenharmony_ci#endif 11223d0407baSopenharmony_ci#endif /* CONFIG_SCHED_WALT */ 11233d0407baSopenharmony_ci 11243d0407baSopenharmony_ci#ifdef CONFIG_IRQ_TIME_ACCOUNTING 11253d0407baSopenharmony_ci u64 prev_irq_time; 11263d0407baSopenharmony_ci#endif 11273d0407baSopenharmony_ci#ifdef CONFIG_PARAVIRT 11283d0407baSopenharmony_ci u64 prev_steal_time; 11293d0407baSopenharmony_ci#endif 11303d0407baSopenharmony_ci#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 11313d0407baSopenharmony_ci u64 prev_steal_time_rq; 11323d0407baSopenharmony_ci#endif 11333d0407baSopenharmony_ci 11343d0407baSopenharmony_ci /* calc_load related fields */ 11353d0407baSopenharmony_ci unsigned long calc_load_update; 11363d0407baSopenharmony_ci long calc_load_active; 11373d0407baSopenharmony_ci 11383d0407baSopenharmony_ci#ifdef CONFIG_SCHED_HRTICK 11393d0407baSopenharmony_ci#ifdef CONFIG_SMP 11403d0407baSopenharmony_ci call_single_data_t hrtick_csd; 11413d0407baSopenharmony_ci#endif 11423d0407baSopenharmony_ci struct hrtimer hrtick_timer; 11433d0407baSopenharmony_ci ktime_t hrtick_time; 11443d0407baSopenharmony_ci#endif 11453d0407baSopenharmony_ci 11463d0407baSopenharmony_ci#ifdef CONFIG_SCHEDSTATS 11473d0407baSopenharmony_ci /* latency stats */ 11483d0407baSopenharmony_ci struct sched_info rq_sched_info; 11493d0407baSopenharmony_ci unsigned long long rq_cpu_time; 11503d0407baSopenharmony_ci /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 11513d0407baSopenharmony_ci 11523d0407baSopenharmony_ci /* sys_sched_yield() stats */ 11533d0407baSopenharmony_ci unsigned int yld_count; 11543d0407baSopenharmony_ci 11553d0407baSopenharmony_ci /* schedule() stats */ 11563d0407baSopenharmony_ci unsigned int sched_count; 11573d0407baSopenharmony_ci unsigned int sched_goidle; 11583d0407baSopenharmony_ci 11593d0407baSopenharmony_ci /* try_to_wake_up() stats */ 11603d0407baSopenharmony_ci unsigned int ttwu_count; 11613d0407baSopenharmony_ci unsigned int ttwu_local; 11623d0407baSopenharmony_ci#endif 11633d0407baSopenharmony_ci 11643d0407baSopenharmony_ci#ifdef CONFIG_CPU_IDLE 11653d0407baSopenharmony_ci /* Must be inspected within a rcu lock section */ 11663d0407baSopenharmony_ci struct cpuidle_state *idle_state; 11673d0407baSopenharmony_ci#endif 11683d0407baSopenharmony_ci}; 11693d0407baSopenharmony_ci 11703d0407baSopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED 11713d0407baSopenharmony_ci 11723d0407baSopenharmony_ci/* CPU runqueue to which this cfs_rq is attached */ 11733d0407baSopenharmony_cistatic inline struct rq *rq_of(struct cfs_rq *cfs_rq) 11743d0407baSopenharmony_ci{ 11753d0407baSopenharmony_ci return cfs_rq->rq; 11763d0407baSopenharmony_ci} 11773d0407baSopenharmony_ci 11783d0407baSopenharmony_ci#else 11793d0407baSopenharmony_ci 11803d0407baSopenharmony_cistatic inline struct rq *rq_of(struct cfs_rq *cfs_rq) 11813d0407baSopenharmony_ci{ 11823d0407baSopenharmony_ci return container_of(cfs_rq, struct rq, cfs); 11833d0407baSopenharmony_ci} 11843d0407baSopenharmony_ci#endif 11853d0407baSopenharmony_ci 11863d0407baSopenharmony_cistatic inline int cpu_of(struct rq *rq) 11873d0407baSopenharmony_ci{ 11883d0407baSopenharmony_ci#ifdef CONFIG_SMP 11893d0407baSopenharmony_ci return rq->cpu; 11903d0407baSopenharmony_ci#else 11913d0407baSopenharmony_ci return 0; 11923d0407baSopenharmony_ci#endif 11933d0407baSopenharmony_ci} 11943d0407baSopenharmony_ci 11953d0407baSopenharmony_ci#ifdef CONFIG_SCHED_SMT 11963d0407baSopenharmony_ciextern void __update_idle_core(struct rq *rq); 11973d0407baSopenharmony_ci 11983d0407baSopenharmony_cistatic inline void update_idle_core(struct rq *rq) 11993d0407baSopenharmony_ci{ 12003d0407baSopenharmony_ci if (static_branch_unlikely(&sched_smt_present)) { 12013d0407baSopenharmony_ci __update_idle_core(rq); 12023d0407baSopenharmony_ci } 12033d0407baSopenharmony_ci} 12043d0407baSopenharmony_ci 12053d0407baSopenharmony_ci#else 12063d0407baSopenharmony_cistatic inline void update_idle_core(struct rq *rq) 12073d0407baSopenharmony_ci{ 12083d0407baSopenharmony_ci} 12093d0407baSopenharmony_ci#endif 12103d0407baSopenharmony_ci 12113d0407baSopenharmony_ciDECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 12123d0407baSopenharmony_ci 12133d0407baSopenharmony_ci#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 12143d0407baSopenharmony_ci#define this_rq() this_cpu_ptr(&runqueues) 12153d0407baSopenharmony_ci#define task_rq(p) cpu_rq(task_cpu(p)) 12163d0407baSopenharmony_ci#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 12173d0407baSopenharmony_ci#define raw_rq() raw_cpu_ptr(&runqueues) 12183d0407baSopenharmony_ci 12193d0407baSopenharmony_ciextern void update_rq_clock(struct rq *rq); 12203d0407baSopenharmony_ci 12213d0407baSopenharmony_cistatic inline u64 __rq_clock_broken(struct rq *rq) 12223d0407baSopenharmony_ci{ 12233d0407baSopenharmony_ci return READ_ONCE(rq->clock); 12243d0407baSopenharmony_ci} 12253d0407baSopenharmony_ci 12263d0407baSopenharmony_ci/* 12273d0407baSopenharmony_ci * rq::clock_update_flags bits 12283d0407baSopenharmony_ci * 12293d0407baSopenharmony_ci * %RQCF_REQ_SKIP - will request skipping of clock update on the next 12303d0407baSopenharmony_ci * call to __schedule(). This is an optimisation to avoid 12313d0407baSopenharmony_ci * neighbouring rq clock updates. 12323d0407baSopenharmony_ci * 12333d0407baSopenharmony_ci * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 12343d0407baSopenharmony_ci * in effect and calls to update_rq_clock() are being ignored. 12353d0407baSopenharmony_ci * 12363d0407baSopenharmony_ci * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 12373d0407baSopenharmony_ci * made to update_rq_clock() since the last time rq::lock was pinned. 12383d0407baSopenharmony_ci * 12393d0407baSopenharmony_ci * If inside of __schedule(), clock_update_flags will have been 12403d0407baSopenharmony_ci * shifted left (a left shift is a cheap operation for the fast path 12413d0407baSopenharmony_ci * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 12423d0407baSopenharmony_ci * 12433d0407baSopenharmony_ci * if (rq-clock_update_flags >= RQCF_UPDATED) 12443d0407baSopenharmony_ci * 12453d0407baSopenharmony_ci * to check if %RQCF_UPADTED is set. It'll never be shifted more than 12463d0407baSopenharmony_ci * one position though, because the next rq_unpin_lock() will shift it 12473d0407baSopenharmony_ci * back. 12483d0407baSopenharmony_ci */ 12493d0407baSopenharmony_ci#define RQCF_REQ_SKIP 0x01 12503d0407baSopenharmony_ci#define RQCF_ACT_SKIP 0x02 12513d0407baSopenharmony_ci#define RQCF_UPDATED 0x04 12523d0407baSopenharmony_ci 12533d0407baSopenharmony_cistatic inline void assert_clock_updated(struct rq *rq) 12543d0407baSopenharmony_ci{ 12553d0407baSopenharmony_ci /* 12563d0407baSopenharmony_ci * The only reason for not seeing a clock update since the 12573d0407baSopenharmony_ci * last rq_pin_lock() is if we're currently skipping updates. 12583d0407baSopenharmony_ci */ 12593d0407baSopenharmony_ci SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 12603d0407baSopenharmony_ci} 12613d0407baSopenharmony_ci 12623d0407baSopenharmony_cistatic inline u64 rq_clock(struct rq *rq) 12633d0407baSopenharmony_ci{ 12643d0407baSopenharmony_ci lockdep_assert_held(&rq->lock); 12653d0407baSopenharmony_ci assert_clock_updated(rq); 12663d0407baSopenharmony_ci 12673d0407baSopenharmony_ci return rq->clock; 12683d0407baSopenharmony_ci} 12693d0407baSopenharmony_ci 12703d0407baSopenharmony_cistatic inline u64 rq_clock_task(struct rq *rq) 12713d0407baSopenharmony_ci{ 12723d0407baSopenharmony_ci lockdep_assert_held(&rq->lock); 12733d0407baSopenharmony_ci assert_clock_updated(rq); 12743d0407baSopenharmony_ci 12753d0407baSopenharmony_ci return rq->clock_task; 12763d0407baSopenharmony_ci} 12773d0407baSopenharmony_ci 12783d0407baSopenharmony_ci/** 12793d0407baSopenharmony_ci * By default the decay is the default pelt decay period. 12803d0407baSopenharmony_ci * The decay shift can change the decay period in 12813d0407baSopenharmony_ci * multiples of 32. 12823d0407baSopenharmony_ci * Decay shift Decay period(ms) 12833d0407baSopenharmony_ci * 0 32 12843d0407baSopenharmony_ci * 1 64 12853d0407baSopenharmony_ci * 2 128 12863d0407baSopenharmony_ci * 3 256 12873d0407baSopenharmony_ci * 4 512 12883d0407baSopenharmony_ci */ 12893d0407baSopenharmony_ciextern int sched_thermal_decay_shift; 12903d0407baSopenharmony_ci 12913d0407baSopenharmony_cistatic inline u64 rq_clock_thermal(struct rq *rq) 12923d0407baSopenharmony_ci{ 12933d0407baSopenharmony_ci return rq_clock_task(rq) >> sched_thermal_decay_shift; 12943d0407baSopenharmony_ci} 12953d0407baSopenharmony_ci 12963d0407baSopenharmony_cistatic inline void rq_clock_skip_update(struct rq *rq) 12973d0407baSopenharmony_ci{ 12983d0407baSopenharmony_ci lockdep_assert_held(&rq->lock); 12993d0407baSopenharmony_ci rq->clock_update_flags |= RQCF_REQ_SKIP; 13003d0407baSopenharmony_ci} 13013d0407baSopenharmony_ci 13023d0407baSopenharmony_ci/* 13033d0407baSopenharmony_ci * See rt task throttling, which is the only time a skip 13043d0407baSopenharmony_ci * request is cancelled. 13053d0407baSopenharmony_ci */ 13063d0407baSopenharmony_cistatic inline void rq_clock_cancel_skipupdate(struct rq *rq) 13073d0407baSopenharmony_ci{ 13083d0407baSopenharmony_ci lockdep_assert_held(&rq->lock); 13093d0407baSopenharmony_ci rq->clock_update_flags &= ~RQCF_REQ_SKIP; 13103d0407baSopenharmony_ci} 13113d0407baSopenharmony_ci 13123d0407baSopenharmony_cistruct rq_flags { 13133d0407baSopenharmony_ci unsigned long flags; 13143d0407baSopenharmony_ci struct pin_cookie cookie; 13153d0407baSopenharmony_ci#ifdef CONFIG_SCHED_DEBUG 13163d0407baSopenharmony_ci /* 13173d0407baSopenharmony_ci * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 13183d0407baSopenharmony_ci * current pin context is stashed here in case it needs to be 13193d0407baSopenharmony_ci * restored in rq_repin_lock(). 13203d0407baSopenharmony_ci */ 13213d0407baSopenharmony_ci unsigned int clock_update_flags; 13223d0407baSopenharmony_ci#endif 13233d0407baSopenharmony_ci}; 13243d0407baSopenharmony_ci 13253d0407baSopenharmony_ci/* 13263d0407baSopenharmony_ci * Lockdep annotation that avoids accidental unlocks; it's like a 13273d0407baSopenharmony_ci * sticky/continuous lockdep_assert_held(). 13283d0407baSopenharmony_ci * 13293d0407baSopenharmony_ci * This avoids code that has access to 'struct rq *rq' (basically everything in 13303d0407baSopenharmony_ci * the scheduler) from accidentally unlocking the rq if they do not also have a 13313d0407baSopenharmony_ci * copy of the (on-stack) 'struct rq_flags rf'. 13323d0407baSopenharmony_ci * 13333d0407baSopenharmony_ci * Also see Documentation/locking/lockdep-design.rst. 13343d0407baSopenharmony_ci */ 13353d0407baSopenharmony_cistatic inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 13363d0407baSopenharmony_ci{ 13373d0407baSopenharmony_ci rf->cookie = lockdep_pin_lock(&rq->lock); 13383d0407baSopenharmony_ci 13393d0407baSopenharmony_ci#ifdef CONFIG_SCHED_DEBUG 13403d0407baSopenharmony_ci rq->clock_update_flags &= (RQCF_REQ_SKIP | RQCF_ACT_SKIP); 13413d0407baSopenharmony_ci rf->clock_update_flags = 0; 13423d0407baSopenharmony_ci#endif 13433d0407baSopenharmony_ci} 13443d0407baSopenharmony_ci 13453d0407baSopenharmony_cistatic inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 13463d0407baSopenharmony_ci{ 13473d0407baSopenharmony_ci#ifdef CONFIG_SCHED_DEBUG 13483d0407baSopenharmony_ci if (rq->clock_update_flags > RQCF_ACT_SKIP) { 13493d0407baSopenharmony_ci rf->clock_update_flags = RQCF_UPDATED; 13503d0407baSopenharmony_ci } 13513d0407baSopenharmony_ci#endif 13523d0407baSopenharmony_ci 13533d0407baSopenharmony_ci lockdep_unpin_lock(&rq->lock, rf->cookie); 13543d0407baSopenharmony_ci} 13553d0407baSopenharmony_ci 13563d0407baSopenharmony_cistatic inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 13573d0407baSopenharmony_ci{ 13583d0407baSopenharmony_ci lockdep_repin_lock(&rq->lock, rf->cookie); 13593d0407baSopenharmony_ci 13603d0407baSopenharmony_ci#ifdef CONFIG_SCHED_DEBUG 13613d0407baSopenharmony_ci /* 13623d0407baSopenharmony_ci * Restore the value we stashed in @rf for this pin context. 13633d0407baSopenharmony_ci */ 13643d0407baSopenharmony_ci rq->clock_update_flags |= rf->clock_update_flags; 13653d0407baSopenharmony_ci#endif 13663d0407baSopenharmony_ci} 13673d0407baSopenharmony_ci 13683d0407baSopenharmony_cistruct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires(rq->lock); 13693d0407baSopenharmony_ci 13703d0407baSopenharmony_cistruct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires(p->pi_lock) __acquires(rq->lock); 13713d0407baSopenharmony_ci 13723d0407baSopenharmony_cistatic inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) 13733d0407baSopenharmony_ci{ 13743d0407baSopenharmony_ci rq_unpin_lock(rq, rf); 13753d0407baSopenharmony_ci raw_spin_unlock(&rq->lock); 13763d0407baSopenharmony_ci} 13773d0407baSopenharmony_ci 13783d0407baSopenharmony_cistatic inline void task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) __releases(rq->lock) 13793d0407baSopenharmony_ci __releases(p->pi_lock) 13803d0407baSopenharmony_ci{ 13813d0407baSopenharmony_ci rq_unpin_lock(rq, rf); 13823d0407baSopenharmony_ci raw_spin_unlock(&rq->lock); 13833d0407baSopenharmony_ci raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 13843d0407baSopenharmony_ci} 13853d0407baSopenharmony_ci 13863d0407baSopenharmony_cistatic inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock) 13873d0407baSopenharmony_ci{ 13883d0407baSopenharmony_ci raw_spin_lock_irqsave(&rq->lock, rf->flags); 13893d0407baSopenharmony_ci rq_pin_lock(rq, rf); 13903d0407baSopenharmony_ci} 13913d0407baSopenharmony_ci 13923d0407baSopenharmony_cistatic inline void rq_lock_irq(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock) 13933d0407baSopenharmony_ci{ 13943d0407baSopenharmony_ci raw_spin_lock_irq(&rq->lock); 13953d0407baSopenharmony_ci rq_pin_lock(rq, rf); 13963d0407baSopenharmony_ci} 13973d0407baSopenharmony_ci 13983d0407baSopenharmony_cistatic inline void rq_lock(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock) 13993d0407baSopenharmony_ci{ 14003d0407baSopenharmony_ci raw_spin_lock(&rq->lock); 14013d0407baSopenharmony_ci rq_pin_lock(rq, rf); 14023d0407baSopenharmony_ci} 14033d0407baSopenharmony_ci 14043d0407baSopenharmony_cistatic inline void rq_relock(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock) 14053d0407baSopenharmony_ci{ 14063d0407baSopenharmony_ci raw_spin_lock(&rq->lock); 14073d0407baSopenharmony_ci rq_repin_lock(rq, rf); 14083d0407baSopenharmony_ci} 14093d0407baSopenharmony_ci 14103d0407baSopenharmony_cistatic inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) 14113d0407baSopenharmony_ci{ 14123d0407baSopenharmony_ci rq_unpin_lock(rq, rf); 14133d0407baSopenharmony_ci raw_spin_unlock_irqrestore(&rq->lock, rf->flags); 14143d0407baSopenharmony_ci} 14153d0407baSopenharmony_ci 14163d0407baSopenharmony_cistatic inline void rq_unlock_irq(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) 14173d0407baSopenharmony_ci{ 14183d0407baSopenharmony_ci rq_unpin_lock(rq, rf); 14193d0407baSopenharmony_ci raw_spin_unlock_irq(&rq->lock); 14203d0407baSopenharmony_ci} 14213d0407baSopenharmony_ci 14223d0407baSopenharmony_cistatic inline void rq_unlock(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) 14233d0407baSopenharmony_ci{ 14243d0407baSopenharmony_ci rq_unpin_lock(rq, rf); 14253d0407baSopenharmony_ci raw_spin_unlock(&rq->lock); 14263d0407baSopenharmony_ci} 14273d0407baSopenharmony_ci 14283d0407baSopenharmony_cistatic inline struct rq *this_rq_lock_irq(struct rq_flags *rf) __acquires(rq->lock) 14293d0407baSopenharmony_ci{ 14303d0407baSopenharmony_ci struct rq *rq; 14313d0407baSopenharmony_ci 14323d0407baSopenharmony_ci local_irq_disable(); 14333d0407baSopenharmony_ci rq = this_rq(); 14343d0407baSopenharmony_ci rq_lock(rq, rf); 14353d0407baSopenharmony_ci return rq; 14363d0407baSopenharmony_ci} 14373d0407baSopenharmony_ci 14383d0407baSopenharmony_ci#ifdef CONFIG_NUMA 14393d0407baSopenharmony_cienum numa_topology_type { 14403d0407baSopenharmony_ci NUMA_DIRECT, 14413d0407baSopenharmony_ci NUMA_GLUELESS_MESH, 14423d0407baSopenharmony_ci NUMA_BACKPLANE, 14433d0407baSopenharmony_ci}; 14443d0407baSopenharmony_ciextern enum numa_topology_type sched_numa_topology_type; 14453d0407baSopenharmony_ciextern int sched_max_numa_distance; 14463d0407baSopenharmony_ciextern bool find_numa_distance(int distance); 14473d0407baSopenharmony_ciextern void sched_init_numa(void); 14483d0407baSopenharmony_ciextern void sched_domains_numa_masks_set(unsigned int cpu); 14493d0407baSopenharmony_ciextern void sched_domains_numa_masks_clear(unsigned int cpu); 14503d0407baSopenharmony_ciextern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 14513d0407baSopenharmony_ci#else 14523d0407baSopenharmony_cistatic inline void sched_init_numa(void) 14533d0407baSopenharmony_ci{ 14543d0407baSopenharmony_ci} 14553d0407baSopenharmony_cistatic inline void sched_domains_numa_masks_set(unsigned int cpu) 14563d0407baSopenharmony_ci{ 14573d0407baSopenharmony_ci} 14583d0407baSopenharmony_cistatic inline void sched_domains_numa_masks_clear(unsigned int cpu) 14593d0407baSopenharmony_ci{ 14603d0407baSopenharmony_ci} 14613d0407baSopenharmony_cistatic inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 14623d0407baSopenharmony_ci{ 14633d0407baSopenharmony_ci return nr_cpu_ids; 14643d0407baSopenharmony_ci} 14653d0407baSopenharmony_ci#endif 14663d0407baSopenharmony_ci 14673d0407baSopenharmony_ci#ifdef CONFIG_NUMA_BALANCING 14683d0407baSopenharmony_ci/* The regions in numa_faults array from task_struct */ 14693d0407baSopenharmony_cienum numa_faults_stats { NUMA_MEM = 0, NUMA_CPU, NUMA_MEMBUF, NUMA_CPUBUF }; 14703d0407baSopenharmony_ciextern void sched_setnuma(struct task_struct *p, int node); 14713d0407baSopenharmony_ciextern int migrate_task_to(struct task_struct *p, int cpu); 14723d0407baSopenharmony_ciextern int migrate_swap(struct task_struct *p, struct task_struct *t, int cpu, int scpu); 14733d0407baSopenharmony_ciextern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 14743d0407baSopenharmony_ci#else 14753d0407baSopenharmony_cistatic inline void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 14763d0407baSopenharmony_ci{ 14773d0407baSopenharmony_ci} 14783d0407baSopenharmony_ci#endif /* CONFIG_NUMA_BALANCING */ 14793d0407baSopenharmony_ci 14803d0407baSopenharmony_ci#ifdef CONFIG_SMP 14813d0407baSopenharmony_ci 14823d0407baSopenharmony_cistatic inline void queue_balance_callback(struct rq *rq, struct callback_head *head, void (*func)(struct rq *rq)) 14833d0407baSopenharmony_ci{ 14843d0407baSopenharmony_ci lockdep_assert_held(&rq->lock); 14853d0407baSopenharmony_ci 14863d0407baSopenharmony_ci if (unlikely(head->next)) { 14873d0407baSopenharmony_ci return; 14883d0407baSopenharmony_ci } 14893d0407baSopenharmony_ci 14903d0407baSopenharmony_ci head->func = (void (*)(struct callback_head *))func; 14913d0407baSopenharmony_ci head->next = rq->balance_callback; 14923d0407baSopenharmony_ci rq->balance_callback = head; 14933d0407baSopenharmony_ci} 14943d0407baSopenharmony_ci 14953d0407baSopenharmony_ci#define rcu_dereference_check_sched_domain(p) rcu_dereference_check((p), lockdep_is_held(&sched_domains_mutex)) 14963d0407baSopenharmony_ci 14973d0407baSopenharmony_ci/* 14983d0407baSopenharmony_ci * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 14993d0407baSopenharmony_ci * See destroy_sched_domains: call_rcu for details. 15003d0407baSopenharmony_ci * 15013d0407baSopenharmony_ci * The domain tree of any CPU may only be accessed from within 15023d0407baSopenharmony_ci * preempt-disabled sections. 15033d0407baSopenharmony_ci */ 15043d0407baSopenharmony_ci#define for_each_domain(cpu, __sd) \ 15053d0407baSopenharmony_ci for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) 15063d0407baSopenharmony_ci 15073d0407baSopenharmony_ci/** 15083d0407baSopenharmony_ci * highest_flag_domain - Return highest sched_domain containing flag. 15093d0407baSopenharmony_ci * @cpu: The CPU whose highest level of sched domain is to 15103d0407baSopenharmony_ci * be returned. 15113d0407baSopenharmony_ci * @flag: The flag to check for the highest sched_domain 15123d0407baSopenharmony_ci * for the given CPU. 15133d0407baSopenharmony_ci * 15143d0407baSopenharmony_ci * Returns the highest sched_domain of a CPU which contains the given flag. 15153d0407baSopenharmony_ci */ 15163d0407baSopenharmony_cistatic inline struct sched_domain *highest_flag_domain(int cpu, int flag) 15173d0407baSopenharmony_ci{ 15183d0407baSopenharmony_ci struct sched_domain *sd, *hsd = NULL; 15193d0407baSopenharmony_ci 15203d0407baSopenharmony_ci for (sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); sd; sd = sd->parent) { 15213d0407baSopenharmony_ci if (!(sd->flags & flag)) { 15223d0407baSopenharmony_ci break; 15233d0407baSopenharmony_ci } 15243d0407baSopenharmony_ci hsd = sd; 15253d0407baSopenharmony_ci } 15263d0407baSopenharmony_ci 15273d0407baSopenharmony_ci return hsd; 15283d0407baSopenharmony_ci} 15293d0407baSopenharmony_ci 15303d0407baSopenharmony_cistatic inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 15313d0407baSopenharmony_ci{ 15323d0407baSopenharmony_ci struct sched_domain *sd; 15333d0407baSopenharmony_ci 15343d0407baSopenharmony_ci for (sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); sd; sd = sd->parent) { 15353d0407baSopenharmony_ci if (sd->flags & flag) { 15363d0407baSopenharmony_ci break; 15373d0407baSopenharmony_ci } 15383d0407baSopenharmony_ci } 15393d0407baSopenharmony_ci 15403d0407baSopenharmony_ci return sd; 15413d0407baSopenharmony_ci} 15423d0407baSopenharmony_ci 15433d0407baSopenharmony_ciDECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 15443d0407baSopenharmony_ciDECLARE_PER_CPU(int, sd_llc_size); 15453d0407baSopenharmony_ciDECLARE_PER_CPU(int, sd_llc_id); 15463d0407baSopenharmony_ciDECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 15473d0407baSopenharmony_ciDECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 15483d0407baSopenharmony_ciDECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 15493d0407baSopenharmony_ciDECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 15503d0407baSopenharmony_ciextern struct static_key_false sched_asym_cpucapacity; 15513d0407baSopenharmony_ci 15523d0407baSopenharmony_cistruct sched_group_capacity { 15533d0407baSopenharmony_ci atomic_t ref; 15543d0407baSopenharmony_ci /* 15553d0407baSopenharmony_ci * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 15563d0407baSopenharmony_ci * for a single CPU. 15573d0407baSopenharmony_ci */ 15583d0407baSopenharmony_ci unsigned long capacity; 15593d0407baSopenharmony_ci unsigned long min_capacity; /* Min per-CPU capacity in group */ 15603d0407baSopenharmony_ci unsigned long max_capacity; /* Max per-CPU capacity in group */ 15613d0407baSopenharmony_ci unsigned long next_update; 15623d0407baSopenharmony_ci int imbalance; /* XXX unrelated to capacity but shared group state */ 15633d0407baSopenharmony_ci 15643d0407baSopenharmony_ci#ifdef CONFIG_SCHED_DEBUG 15653d0407baSopenharmony_ci int id; 15663d0407baSopenharmony_ci#endif 15673d0407baSopenharmony_ci 15683d0407baSopenharmony_ci unsigned long cpumask[]; /* Balance mask */ 15693d0407baSopenharmony_ci}; 15703d0407baSopenharmony_ci 15713d0407baSopenharmony_cistruct sched_group { 15723d0407baSopenharmony_ci struct sched_group *next; /* Must be a circular list */ 15733d0407baSopenharmony_ci atomic_t ref; 15743d0407baSopenharmony_ci 15753d0407baSopenharmony_ci unsigned int group_weight; 15763d0407baSopenharmony_ci struct sched_group_capacity *sgc; 15773d0407baSopenharmony_ci int asym_prefer_cpu; /* CPU of highest priority in group */ 15783d0407baSopenharmony_ci 15793d0407baSopenharmony_ci /* 15803d0407baSopenharmony_ci * The CPUs this group covers. 15813d0407baSopenharmony_ci * 15823d0407baSopenharmony_ci * NOTE: this field is variable length. (Allocated dynamically 15833d0407baSopenharmony_ci * by attaching extra space to the end of the structure, 15843d0407baSopenharmony_ci * depending on how many CPUs the kernel has booted up with) 15853d0407baSopenharmony_ci */ 15863d0407baSopenharmony_ci unsigned long cpumask[]; 15873d0407baSopenharmony_ci}; 15883d0407baSopenharmony_ci 15893d0407baSopenharmony_cistatic inline struct cpumask *sched_group_span(struct sched_group *sg) 15903d0407baSopenharmony_ci{ 15913d0407baSopenharmony_ci return to_cpumask(sg->cpumask); 15923d0407baSopenharmony_ci} 15933d0407baSopenharmony_ci 15943d0407baSopenharmony_ci/* 15953d0407baSopenharmony_ci * See build_balance_mask(). 15963d0407baSopenharmony_ci */ 15973d0407baSopenharmony_cistatic inline struct cpumask *group_balance_mask(struct sched_group *sg) 15983d0407baSopenharmony_ci{ 15993d0407baSopenharmony_ci return to_cpumask(sg->sgc->cpumask); 16003d0407baSopenharmony_ci} 16013d0407baSopenharmony_ci 16023d0407baSopenharmony_ci/** 16033d0407baSopenharmony_ci * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 16043d0407baSopenharmony_ci * @group: The group whose first CPU is to be returned. 16053d0407baSopenharmony_ci */ 16063d0407baSopenharmony_cistatic inline unsigned int group_first_cpu(struct sched_group *group) 16073d0407baSopenharmony_ci{ 16083d0407baSopenharmony_ci return cpumask_first(sched_group_span(group)); 16093d0407baSopenharmony_ci} 16103d0407baSopenharmony_ci 16113d0407baSopenharmony_ciextern int group_balance_cpu(struct sched_group *sg); 16123d0407baSopenharmony_ci 16133d0407baSopenharmony_ci#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 16143d0407baSopenharmony_civoid register_sched_domain_sysctl(void); 16153d0407baSopenharmony_civoid dirty_sched_domain_sysctl(int cpu); 16163d0407baSopenharmony_civoid unregister_sched_domain_sysctl(void); 16173d0407baSopenharmony_ci#else 16183d0407baSopenharmony_cistatic inline void register_sched_domain_sysctl(void) 16193d0407baSopenharmony_ci{ 16203d0407baSopenharmony_ci} 16213d0407baSopenharmony_cistatic inline void dirty_sched_domain_sysctl(int cpu) 16223d0407baSopenharmony_ci{ 16233d0407baSopenharmony_ci} 16243d0407baSopenharmony_cistatic inline void unregister_sched_domain_sysctl(void) 16253d0407baSopenharmony_ci{ 16263d0407baSopenharmony_ci} 16273d0407baSopenharmony_ci#endif 16283d0407baSopenharmony_ci 16293d0407baSopenharmony_ciextern void flush_smp_call_function_from_idle(void); 16303d0407baSopenharmony_ci 16313d0407baSopenharmony_ci#else /* !CONFIG_SMP: */ 16323d0407baSopenharmony_cistatic inline void flush_smp_call_function_from_idle(void) 16333d0407baSopenharmony_ci{ 16343d0407baSopenharmony_ci} 16353d0407baSopenharmony_ci#endif 16363d0407baSopenharmony_ci 16373d0407baSopenharmony_ci#include "stats.h" 16383d0407baSopenharmony_ci#include "autogroup.h" 16393d0407baSopenharmony_ci 16403d0407baSopenharmony_ci#ifdef CONFIG_CGROUP_SCHED 16413d0407baSopenharmony_ci 16423d0407baSopenharmony_ci/* 16433d0407baSopenharmony_ci * Return the group to which this tasks belongs. 16443d0407baSopenharmony_ci * 16453d0407baSopenharmony_ci * We cannot use task_css() and friends because the cgroup subsystem 16463d0407baSopenharmony_ci * changes that value before the cgroup_subsys::attach() method is called, 16473d0407baSopenharmony_ci * therefore we cannot pin it and might observe the wrong value. 16483d0407baSopenharmony_ci * 16493d0407baSopenharmony_ci * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 16503d0407baSopenharmony_ci * core changes this before calling sched_move_task(). 16513d0407baSopenharmony_ci * 16523d0407baSopenharmony_ci * Instead we use a 'copy' which is updated from sched_move_task() while 16533d0407baSopenharmony_ci * holding both task_struct::pi_lock and rq::lock. 16543d0407baSopenharmony_ci */ 16553d0407baSopenharmony_cistatic inline struct task_group *task_group(struct task_struct *p) 16563d0407baSopenharmony_ci{ 16573d0407baSopenharmony_ci return p->sched_task_group; 16583d0407baSopenharmony_ci} 16593d0407baSopenharmony_ci 16603d0407baSopenharmony_ci/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 16613d0407baSopenharmony_cistatic inline void set_task_rq(struct task_struct *p, unsigned int cpu) 16623d0407baSopenharmony_ci{ 16633d0407baSopenharmony_ci#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 16643d0407baSopenharmony_ci struct task_group *tg = task_group(p); 16653d0407baSopenharmony_ci#endif 16663d0407baSopenharmony_ci 16673d0407baSopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED 16683d0407baSopenharmony_ci set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 16693d0407baSopenharmony_ci p->se.cfs_rq = tg->cfs_rq[cpu]; 16703d0407baSopenharmony_ci p->se.parent = tg->se[cpu]; 16713d0407baSopenharmony_ci#endif 16723d0407baSopenharmony_ci 16733d0407baSopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED 16743d0407baSopenharmony_ci p->rt.rt_rq = tg->rt_rq[cpu]; 16753d0407baSopenharmony_ci p->rt.parent = tg->rt_se[cpu]; 16763d0407baSopenharmony_ci#endif 16773d0407baSopenharmony_ci} 16783d0407baSopenharmony_ci 16793d0407baSopenharmony_ci#else /* CONFIG_CGROUP_SCHED */ 16803d0407baSopenharmony_ci 16813d0407baSopenharmony_cistatic inline void set_task_rq(struct task_struct *p, unsigned int cpu) 16823d0407baSopenharmony_ci{ 16833d0407baSopenharmony_ci} 16843d0407baSopenharmony_cistatic inline struct task_group *task_group(struct task_struct *p) 16853d0407baSopenharmony_ci{ 16863d0407baSopenharmony_ci return NULL; 16873d0407baSopenharmony_ci} 16883d0407baSopenharmony_ci 16893d0407baSopenharmony_ci#endif /* CONFIG_CGROUP_SCHED */ 16903d0407baSopenharmony_ci 16913d0407baSopenharmony_cistatic inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 16923d0407baSopenharmony_ci{ 16933d0407baSopenharmony_ci set_task_rq(p, cpu); 16943d0407baSopenharmony_ci#ifdef CONFIG_SMP 16953d0407baSopenharmony_ci /* 16963d0407baSopenharmony_ci * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 16973d0407baSopenharmony_ci * successfully executed on another CPU. We must ensure that updates of 16983d0407baSopenharmony_ci * per-task data have been completed by this moment. 16993d0407baSopenharmony_ci */ 17003d0407baSopenharmony_ci smp_wmb(); 17013d0407baSopenharmony_ci#ifdef CONFIG_THREAD_INFO_IN_TASK 17023d0407baSopenharmony_ci WRITE_ONCE(p->cpu, cpu); 17033d0407baSopenharmony_ci#else 17043d0407baSopenharmony_ci WRITE_ONCE(task_thread_info(p)->cpu, cpu); 17053d0407baSopenharmony_ci#endif 17063d0407baSopenharmony_ci p->wake_cpu = cpu; 17073d0407baSopenharmony_ci#endif 17083d0407baSopenharmony_ci} 17093d0407baSopenharmony_ci 17103d0407baSopenharmony_ci/* 17113d0407baSopenharmony_ci * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 17123d0407baSopenharmony_ci */ 17133d0407baSopenharmony_ci#ifdef CONFIG_SCHED_DEBUG 17143d0407baSopenharmony_ci#include <linux/static_key.h> 17153d0407baSopenharmony_ci#define const_debug __read_mostly 17163d0407baSopenharmony_ci#else 17173d0407baSopenharmony_ci#define const_debug const 17183d0407baSopenharmony_ci#endif 17193d0407baSopenharmony_ci 17203d0407baSopenharmony_ci#define SCHED_FEAT(name, enabled) __SCHED_FEAT_##name, 17213d0407baSopenharmony_ci 17223d0407baSopenharmony_cienum { 17233d0407baSopenharmony_ci#include "features.h" 17243d0407baSopenharmony_ci __SCHED_FEAT_NR, 17253d0407baSopenharmony_ci}; 17263d0407baSopenharmony_ci 17273d0407baSopenharmony_ci#undef SCHED_FEAT 17283d0407baSopenharmony_ci 17293d0407baSopenharmony_ci#ifdef CONFIG_SCHED_DEBUG 17303d0407baSopenharmony_ci 17313d0407baSopenharmony_ci/* 17323d0407baSopenharmony_ci * To support run-time toggling of sched features, all the translation units 17333d0407baSopenharmony_ci * (but core.c) reference the sysctl_sched_features defined in core.c. 17343d0407baSopenharmony_ci */ 17353d0407baSopenharmony_ciextern const_debug unsigned int sysctl_sched_features; 17363d0407baSopenharmony_ci 17373d0407baSopenharmony_ci#ifdef CONFIG_JUMP_LABEL 17383d0407baSopenharmony_ci#define SCHED_FEAT(name, enabled) \ 17393d0407baSopenharmony_ci static __always_inline bool static_branch_##name(struct static_key *key) \ 17403d0407baSopenharmony_ci { \ 17413d0407baSopenharmony_ci return static_key_##enabled(key); \ 17423d0407baSopenharmony_ci } 17433d0407baSopenharmony_ci 17443d0407baSopenharmony_ci#include "features.h" 17453d0407baSopenharmony_ci#undef SCHED_FEAT 17463d0407baSopenharmony_ci 17473d0407baSopenharmony_ciextern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 17483d0407baSopenharmony_ci#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 17493d0407baSopenharmony_ci 17503d0407baSopenharmony_ci#else /* !CONFIG_JUMP_LABEL */ 17513d0407baSopenharmony_ci 17523d0407baSopenharmony_ci#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 17533d0407baSopenharmony_ci 17543d0407baSopenharmony_ci#endif /* CONFIG_JUMP_LABEL */ 17553d0407baSopenharmony_ci 17563d0407baSopenharmony_ci#else /* !SCHED_DEBUG */ 17573d0407baSopenharmony_ci 17583d0407baSopenharmony_ci/* 17593d0407baSopenharmony_ci * Each translation unit has its own copy of sysctl_sched_features to allow 17603d0407baSopenharmony_ci * constants propagation at compile time and compiler optimization based on 17613d0407baSopenharmony_ci * features default. 17623d0407baSopenharmony_ci */ 17633d0407baSopenharmony_ci#define SCHED_FEAT(name, enabled) (1UL << __SCHED_FEAT_##name) * (enabled) | 17643d0407baSopenharmony_cistatic const_debug __maybe_unused unsigned int sysctl_sched_features = 17653d0407baSopenharmony_ci#include "features.h" 17663d0407baSopenharmony_ci 0; 17673d0407baSopenharmony_ci#undef SCHED_FEAT 17683d0407baSopenharmony_ci 17693d0407baSopenharmony_ci#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 17703d0407baSopenharmony_ci 17713d0407baSopenharmony_ci#endif /* SCHED_DEBUG */ 17723d0407baSopenharmony_ci 17733d0407baSopenharmony_ciextern struct static_key_false sched_numa_balancing; 17743d0407baSopenharmony_ciextern struct static_key_false sched_schedstats; 17753d0407baSopenharmony_ci 17763d0407baSopenharmony_cistatic inline u64 global_rt_period(void) 17773d0407baSopenharmony_ci{ 17783d0407baSopenharmony_ci return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 17793d0407baSopenharmony_ci} 17803d0407baSopenharmony_ci 17813d0407baSopenharmony_cistatic inline u64 global_rt_runtime(void) 17823d0407baSopenharmony_ci{ 17833d0407baSopenharmony_ci if (sysctl_sched_rt_runtime < 0) { 17843d0407baSopenharmony_ci return RUNTIME_INF; 17853d0407baSopenharmony_ci } 17863d0407baSopenharmony_ci 17873d0407baSopenharmony_ci return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 17883d0407baSopenharmony_ci} 17893d0407baSopenharmony_ci 17903d0407baSopenharmony_cistatic inline int task_current(struct rq *rq, struct task_struct *p) 17913d0407baSopenharmony_ci{ 17923d0407baSopenharmony_ci return rq->curr == p; 17933d0407baSopenharmony_ci} 17943d0407baSopenharmony_ci 17953d0407baSopenharmony_cistatic inline int task_running(struct rq *rq, struct task_struct *p) 17963d0407baSopenharmony_ci{ 17973d0407baSopenharmony_ci#ifdef CONFIG_SMP 17983d0407baSopenharmony_ci return p->on_cpu; 17993d0407baSopenharmony_ci#else 18003d0407baSopenharmony_ci return task_current(rq, p); 18013d0407baSopenharmony_ci#endif 18023d0407baSopenharmony_ci} 18033d0407baSopenharmony_ci 18043d0407baSopenharmony_cistatic inline int task_on_rq_queued(struct task_struct *p) 18053d0407baSopenharmony_ci{ 18063d0407baSopenharmony_ci return p->on_rq == TASK_ON_RQ_QUEUED; 18073d0407baSopenharmony_ci} 18083d0407baSopenharmony_ci 18093d0407baSopenharmony_cistatic inline int task_on_rq_migrating(struct task_struct *p) 18103d0407baSopenharmony_ci{ 18113d0407baSopenharmony_ci return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 18123d0407baSopenharmony_ci} 18133d0407baSopenharmony_ci 18143d0407baSopenharmony_ci/* 18153d0407baSopenharmony_ci * wake flags 18163d0407baSopenharmony_ci */ 18173d0407baSopenharmony_ci#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ 18183d0407baSopenharmony_ci#define WF_FORK 0x02 /* Child wakeup after fork */ 18193d0407baSopenharmony_ci#define WF_MIGRATED 0x04 /* Internal use, task got migrated */ 18203d0407baSopenharmony_ci#define WF_ON_CPU 0x08 /* Wakee is on_cpu */ 18213d0407baSopenharmony_ci 18223d0407baSopenharmony_ci/* 18233d0407baSopenharmony_ci * To aid in avoiding the subversion of "niceness" due to uneven distribution 18243d0407baSopenharmony_ci * of tasks with abnormal "nice" values across CPUs the contribution that 18253d0407baSopenharmony_ci * each task makes to its run queue's load is weighted according to its 18263d0407baSopenharmony_ci * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 18273d0407baSopenharmony_ci * scaled version of the new time slice allocation that they receive on time 18283d0407baSopenharmony_ci * slice expiry etc. 18293d0407baSopenharmony_ci */ 18303d0407baSopenharmony_ci 18313d0407baSopenharmony_ci#define WEIGHT_IDLEPRIO 3 18323d0407baSopenharmony_ci#define WMULT_IDLEPRIO 1431655765 18333d0407baSopenharmony_ci 18343d0407baSopenharmony_ciextern const int sched_prio_to_weight[40]; 18353d0407baSopenharmony_ciextern const u32 sched_prio_to_wmult[40]; 18363d0407baSopenharmony_ci#ifdef CONFIG_SCHED_LATENCY_NICE 18373d0407baSopenharmony_ciextern const int sched_latency_to_weight[40]; 18383d0407baSopenharmony_ci#endif 18393d0407baSopenharmony_ci 18403d0407baSopenharmony_ci/* 18413d0407baSopenharmony_ci * {de,en}queue flags: 18423d0407baSopenharmony_ci * 18433d0407baSopenharmony_ci * DEQUEUE_SLEEP - task is no longer runnable 18443d0407baSopenharmony_ci * ENQUEUE_WAKEUP - task just became runnable 18453d0407baSopenharmony_ci * 18463d0407baSopenharmony_ci * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 18473d0407baSopenharmony_ci * are in a known state which allows modification. Such pairs 18483d0407baSopenharmony_ci * should preserve as much state as possible. 18493d0407baSopenharmony_ci * 18503d0407baSopenharmony_ci * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 18513d0407baSopenharmony_ci * in the runqueue. 18523d0407baSopenharmony_ci * 18533d0407baSopenharmony_ci * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 18543d0407baSopenharmony_ci * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 18553d0407baSopenharmony_ci * ENQUEUE_MIGRATED - the task was migrated during wakeup 18563d0407baSopenharmony_ci * 18573d0407baSopenharmony_ci */ 18583d0407baSopenharmony_ci 18593d0407baSopenharmony_ci#define DEQUEUE_SLEEP 0x01 18603d0407baSopenharmony_ci#define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 18613d0407baSopenharmony_ci#define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 18623d0407baSopenharmony_ci#define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 18633d0407baSopenharmony_ci 18643d0407baSopenharmony_ci#define ENQUEUE_WAKEUP 0x01 18653d0407baSopenharmony_ci#define ENQUEUE_RESTORE 0x02 18663d0407baSopenharmony_ci#define ENQUEUE_MOVE 0x04 18673d0407baSopenharmony_ci#define ENQUEUE_NOCLOCK 0x08 18683d0407baSopenharmony_ci 18693d0407baSopenharmony_ci#define ENQUEUE_HEAD 0x10 18703d0407baSopenharmony_ci#define ENQUEUE_REPLENISH 0x20 18713d0407baSopenharmony_ci#ifdef CONFIG_SMP 18723d0407baSopenharmony_ci#define ENQUEUE_MIGRATED 0x40 18733d0407baSopenharmony_ci#else 18743d0407baSopenharmony_ci#define ENQUEUE_MIGRATED 0x00 18753d0407baSopenharmony_ci#endif 18763d0407baSopenharmony_ci 18773d0407baSopenharmony_ci#define ENQUEUE_WAKEUP_SYNC 0x80 18783d0407baSopenharmony_ci 18793d0407baSopenharmony_ci#define RETRY_TASK ((void *)-1UL) 18803d0407baSopenharmony_ci 18813d0407baSopenharmony_cistruct sched_class { 18823d0407baSopenharmony_ci#ifdef CONFIG_UCLAMP_TASK 18833d0407baSopenharmony_ci int uclamp_enabled; 18843d0407baSopenharmony_ci#endif 18853d0407baSopenharmony_ci 18863d0407baSopenharmony_ci void (*enqueue_task)(struct rq *rq, struct task_struct *p, int flags); 18873d0407baSopenharmony_ci void (*dequeue_task)(struct rq *rq, struct task_struct *p, int flags); 18883d0407baSopenharmony_ci void (*yield_task)(struct rq *rq); 18893d0407baSopenharmony_ci bool (*yield_to_task)(struct rq *rq, struct task_struct *p); 18903d0407baSopenharmony_ci 18913d0407baSopenharmony_ci void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 18923d0407baSopenharmony_ci 18933d0407baSopenharmony_ci struct task_struct *(*pick_next_task)(struct rq *rq); 18943d0407baSopenharmony_ci 18953d0407baSopenharmony_ci void (*put_prev_task)(struct rq *rq, struct task_struct *p); 18963d0407baSopenharmony_ci void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 18973d0407baSopenharmony_ci 18983d0407baSopenharmony_ci#ifdef CONFIG_SMP 18993d0407baSopenharmony_ci int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 19003d0407baSopenharmony_ci int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 19013d0407baSopenharmony_ci void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 19023d0407baSopenharmony_ci 19033d0407baSopenharmony_ci void (*task_woken)(struct rq *this_rq, struct task_struct *task); 19043d0407baSopenharmony_ci 19053d0407baSopenharmony_ci void (*set_cpus_allowed)(struct task_struct *p, const struct cpumask *newmask); 19063d0407baSopenharmony_ci 19073d0407baSopenharmony_ci void (*rq_online)(struct rq *rq); 19083d0407baSopenharmony_ci void (*rq_offline)(struct rq *rq); 19093d0407baSopenharmony_ci#endif 19103d0407baSopenharmony_ci 19113d0407baSopenharmony_ci void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 19123d0407baSopenharmony_ci void (*task_fork)(struct task_struct *p); 19133d0407baSopenharmony_ci void (*task_dead)(struct task_struct *p); 19143d0407baSopenharmony_ci 19153d0407baSopenharmony_ci /* 19163d0407baSopenharmony_ci * The switched_from() call is allowed to drop rq->lock, therefore we 19173d0407baSopenharmony_ci * cannot assume the switched_from/switched_to pair is serliazed by 19183d0407baSopenharmony_ci * rq->lock. They are however serialized by p->pi_lock. 19193d0407baSopenharmony_ci */ 19203d0407baSopenharmony_ci void (*switched_from)(struct rq *this_rq, struct task_struct *task); 19213d0407baSopenharmony_ci void (*switched_to)(struct rq *this_rq, struct task_struct *task); 19223d0407baSopenharmony_ci void (*prio_changed)(struct rq *this_rq, struct task_struct *task, int oldprio); 19233d0407baSopenharmony_ci 19243d0407baSopenharmony_ci unsigned int (*get_rr_interval)(struct rq *rq, struct task_struct *task); 19253d0407baSopenharmony_ci 19263d0407baSopenharmony_ci void (*update_curr)(struct rq *rq); 19273d0407baSopenharmony_ci 19283d0407baSopenharmony_ci#define TASK_SET_GROUP 0 19293d0407baSopenharmony_ci#define TASK_MOVE_GROUP 1 19303d0407baSopenharmony_ci 19313d0407baSopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED 19323d0407baSopenharmony_ci void (*task_change_group)(struct task_struct *p, int type); 19333d0407baSopenharmony_ci#endif 19343d0407baSopenharmony_ci#ifdef CONFIG_SCHED_WALT 19353d0407baSopenharmony_ci void (*fixup_walt_sched_stats)(struct rq *rq, struct task_struct *p, u16 updated_demand_scaled); 19363d0407baSopenharmony_ci#endif 19373d0407baSopenharmony_ci#ifdef CONFIG_SCHED_EAS 19383d0407baSopenharmony_ci void (*check_for_migration)(struct rq *rq, struct task_struct *p); 19393d0407baSopenharmony_ci#endif 19403d0407baSopenharmony_ci} __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */ 19413d0407baSopenharmony_ci 19423d0407baSopenharmony_cistatic inline void put_prev_task(struct rq *rq, struct task_struct *prev) 19433d0407baSopenharmony_ci{ 19443d0407baSopenharmony_ci WARN_ON_ONCE(rq->curr != prev); 19453d0407baSopenharmony_ci prev->sched_class->put_prev_task(rq, prev); 19463d0407baSopenharmony_ci} 19473d0407baSopenharmony_ci 19483d0407baSopenharmony_cistatic inline void set_next_task(struct rq *rq, struct task_struct *next) 19493d0407baSopenharmony_ci{ 19503d0407baSopenharmony_ci WARN_ON_ONCE(rq->curr != next); 19513d0407baSopenharmony_ci next->sched_class->set_next_task(rq, next, false); 19523d0407baSopenharmony_ci} 19533d0407baSopenharmony_ci 19543d0407baSopenharmony_ci/* Defined in include/asm-generic/vmlinux.lds.h */ 19553d0407baSopenharmony_ciextern struct sched_class __begin_sched_classes[]; 19563d0407baSopenharmony_ciextern struct sched_class __end_sched_classes[]; 19573d0407baSopenharmony_ci 19583d0407baSopenharmony_ci#define sched_class_highest (__end_sched_classes - 1) 19593d0407baSopenharmony_ci#define sched_class_lowest (__begin_sched_classes - 1) 19603d0407baSopenharmony_ci 19613d0407baSopenharmony_ci#define for_class_range(class, _from, _to) for (class = (_from); class != (_to); (class)--) 19623d0407baSopenharmony_ci 19633d0407baSopenharmony_ci#define for_each_class(class) for_class_range(class, sched_class_highest, sched_class_lowest) 19643d0407baSopenharmony_ci 19653d0407baSopenharmony_ciextern const struct sched_class stop_sched_class; 19663d0407baSopenharmony_ciextern const struct sched_class dl_sched_class; 19673d0407baSopenharmony_ciextern const struct sched_class rt_sched_class; 19683d0407baSopenharmony_ciextern const struct sched_class fair_sched_class; 19693d0407baSopenharmony_ciextern const struct sched_class idle_sched_class; 19703d0407baSopenharmony_ci 19713d0407baSopenharmony_cistatic inline bool sched_stop_runnable(struct rq *rq) 19723d0407baSopenharmony_ci{ 19733d0407baSopenharmony_ci return rq->stop && task_on_rq_queued(rq->stop); 19743d0407baSopenharmony_ci} 19753d0407baSopenharmony_ci 19763d0407baSopenharmony_cistatic inline bool sched_dl_runnable(struct rq *rq) 19773d0407baSopenharmony_ci{ 19783d0407baSopenharmony_ci return rq->dl.dl_nr_running > 0; 19793d0407baSopenharmony_ci} 19803d0407baSopenharmony_ci 19813d0407baSopenharmony_cistatic inline bool sched_rt_runnable(struct rq *rq) 19823d0407baSopenharmony_ci{ 19833d0407baSopenharmony_ci return rq->rt.rt_queued > 0; 19843d0407baSopenharmony_ci} 19853d0407baSopenharmony_ci 19863d0407baSopenharmony_cistatic inline bool sched_fair_runnable(struct rq *rq) 19873d0407baSopenharmony_ci{ 19883d0407baSopenharmony_ci return rq->cfs.nr_running > 0; 19893d0407baSopenharmony_ci} 19903d0407baSopenharmony_ci 19913d0407baSopenharmony_ciextern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 19923d0407baSopenharmony_ciextern struct task_struct *pick_next_task_idle(struct rq *rq); 19933d0407baSopenharmony_ci 19943d0407baSopenharmony_ci#ifdef CONFIG_SMP 19953d0407baSopenharmony_ci 19963d0407baSopenharmony_ciextern void update_group_capacity(struct sched_domain *sd, int cpu); 19973d0407baSopenharmony_ci 19983d0407baSopenharmony_ciextern void trigger_load_balance(struct rq *rq); 19993d0407baSopenharmony_ci 20003d0407baSopenharmony_ciextern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 20013d0407baSopenharmony_ci 20023d0407baSopenharmony_ci#endif 20033d0407baSopenharmony_ci 20043d0407baSopenharmony_ci#ifdef CONFIG_CPU_IDLE 20053d0407baSopenharmony_cistatic inline void idle_set_state(struct rq *rq, struct cpuidle_state *idle_state) 20063d0407baSopenharmony_ci{ 20073d0407baSopenharmony_ci rq->idle_state = idle_state; 20083d0407baSopenharmony_ci} 20093d0407baSopenharmony_ci 20103d0407baSopenharmony_cistatic inline struct cpuidle_state *idle_get_state(struct rq *rq) 20113d0407baSopenharmony_ci{ 20123d0407baSopenharmony_ci SCHED_WARN_ON(!rcu_read_lock_held()); 20133d0407baSopenharmony_ci 20143d0407baSopenharmony_ci return rq->idle_state; 20153d0407baSopenharmony_ci} 20163d0407baSopenharmony_ci#else 20173d0407baSopenharmony_cistatic inline void idle_set_state(struct rq *rq, struct cpuidle_state *idle_state) 20183d0407baSopenharmony_ci{ 20193d0407baSopenharmony_ci} 20203d0407baSopenharmony_ci 20213d0407baSopenharmony_cistatic inline struct cpuidle_state *idle_get_state(struct rq *rq) 20223d0407baSopenharmony_ci{ 20233d0407baSopenharmony_ci return NULL; 20243d0407baSopenharmony_ci} 20253d0407baSopenharmony_ci#endif 20263d0407baSopenharmony_ci 20273d0407baSopenharmony_ciextern void schedule_idle(void); 20283d0407baSopenharmony_ci 20293d0407baSopenharmony_ciextern void sysrq_sched_debug_show(void); 20303d0407baSopenharmony_ciextern void sched_init_granularity(void); 20313d0407baSopenharmony_ciextern void update_max_interval(void); 20323d0407baSopenharmony_ci 20333d0407baSopenharmony_ciextern void init_sched_dl_class(void); 20343d0407baSopenharmony_ciextern void init_sched_rt_class(void); 20353d0407baSopenharmony_ciextern void init_sched_fair_class(void); 20363d0407baSopenharmony_ci 20373d0407baSopenharmony_ciextern void reweight_task(struct task_struct *p, int prio); 20383d0407baSopenharmony_ci 20393d0407baSopenharmony_ciextern void resched_curr(struct rq *rq); 20403d0407baSopenharmony_ciextern void resched_cpu(int cpu); 20413d0407baSopenharmony_ci 20423d0407baSopenharmony_ciextern struct rt_bandwidth def_rt_bandwidth; 20433d0407baSopenharmony_ciextern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 20443d0407baSopenharmony_ci 20453d0407baSopenharmony_ciextern struct dl_bandwidth def_dl_bandwidth; 20463d0407baSopenharmony_ciextern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 20473d0407baSopenharmony_ciextern void init_dl_task_timer(struct sched_dl_entity *dl_se); 20483d0407baSopenharmony_ciextern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 20493d0407baSopenharmony_ci 20503d0407baSopenharmony_ci#define BW_SHIFT 20 20513d0407baSopenharmony_ci#define BW_UNIT (1 << BW_SHIFT) 20523d0407baSopenharmony_ci#define RATIO_SHIFT 8 20533d0407baSopenharmony_ci#define MAX_BW_BITS (64 - BW_SHIFT) 20543d0407baSopenharmony_ci#define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 20553d0407baSopenharmony_ciunsigned long to_ratio(u64 period, u64 runtime); 20563d0407baSopenharmony_ci 20573d0407baSopenharmony_ciextern void init_entity_runnable_average(struct sched_entity *se); 20583d0407baSopenharmony_ciextern void post_init_entity_util_avg(struct task_struct *p); 20593d0407baSopenharmony_ci 20603d0407baSopenharmony_ci#ifdef CONFIG_NO_HZ_FULL 20613d0407baSopenharmony_ciextern bool sched_can_stop_tick(struct rq *rq); 20623d0407baSopenharmony_ciextern int __init sched_tick_offload_init(void); 20633d0407baSopenharmony_ci 20643d0407baSopenharmony_ci/* 20653d0407baSopenharmony_ci * Tick may be needed by tasks in the runqueue depending on their policy and 20663d0407baSopenharmony_ci * requirements. If tick is needed, lets send the target an IPI to kick it out of 20673d0407baSopenharmony_ci * nohz mode if necessary. 20683d0407baSopenharmony_ci */ 20693d0407baSopenharmony_cistatic inline void sched_update_tick_dependency(struct rq *rq) 20703d0407baSopenharmony_ci{ 20713d0407baSopenharmony_ci int cpu = cpu_of(rq); 20723d0407baSopenharmony_ci if (!tick_nohz_full_cpu(cpu)) { 20733d0407baSopenharmony_ci return; 20743d0407baSopenharmony_ci } 20753d0407baSopenharmony_ci 20763d0407baSopenharmony_ci if (sched_can_stop_tick(rq)) { 20773d0407baSopenharmony_ci tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 20783d0407baSopenharmony_ci } else { 20793d0407baSopenharmony_ci tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 20803d0407baSopenharmony_ci } 20813d0407baSopenharmony_ci} 20823d0407baSopenharmony_ci#else 20833d0407baSopenharmony_cistatic inline int sched_tick_offload_init(void) 20843d0407baSopenharmony_ci{ 20853d0407baSopenharmony_ci return 0; 20863d0407baSopenharmony_ci} 20873d0407baSopenharmony_cistatic inline void sched_update_tick_dependency(struct rq *rq) 20883d0407baSopenharmony_ci{ 20893d0407baSopenharmony_ci} 20903d0407baSopenharmony_ci#endif 20913d0407baSopenharmony_ci 20923d0407baSopenharmony_cistatic inline void add_nr_running(struct rq *rq, unsigned count) 20933d0407baSopenharmony_ci{ 20943d0407baSopenharmony_ci unsigned prev_nr = rq->nr_running; 20953d0407baSopenharmony_ci 20963d0407baSopenharmony_ci rq->nr_running = prev_nr + count; 20973d0407baSopenharmony_ci if (trace_sched_update_nr_running_tp_enabled()) { 20983d0407baSopenharmony_ci call_trace_sched_update_nr_running(rq, count); 20993d0407baSopenharmony_ci } 21003d0407baSopenharmony_ci 21013d0407baSopenharmony_ci#ifdef CONFIG_SMP 21023d0407baSopenharmony_ci if (prev_nr < TASK_ON_RQ_MIGRATING && rq->nr_running >= TASK_ON_RQ_MIGRATING) { 21033d0407baSopenharmony_ci if (!READ_ONCE(rq->rd->overload)) { 21043d0407baSopenharmony_ci WRITE_ONCE(rq->rd->overload, 1); 21053d0407baSopenharmony_ci } 21063d0407baSopenharmony_ci } 21073d0407baSopenharmony_ci#endif 21083d0407baSopenharmony_ci 21093d0407baSopenharmony_ci sched_update_tick_dependency(rq); 21103d0407baSopenharmony_ci} 21113d0407baSopenharmony_ci 21123d0407baSopenharmony_cistatic inline void sub_nr_running(struct rq *rq, unsigned count) 21133d0407baSopenharmony_ci{ 21143d0407baSopenharmony_ci rq->nr_running -= count; 21153d0407baSopenharmony_ci if (trace_sched_update_nr_running_tp_enabled()) { 21163d0407baSopenharmony_ci call_trace_sched_update_nr_running(rq, -count); 21173d0407baSopenharmony_ci } 21183d0407baSopenharmony_ci 21193d0407baSopenharmony_ci /* Check if we still need preemption */ 21203d0407baSopenharmony_ci sched_update_tick_dependency(rq); 21213d0407baSopenharmony_ci} 21223d0407baSopenharmony_ci 21233d0407baSopenharmony_ciextern void activate_task(struct rq *rq, struct task_struct *p, int flags); 21243d0407baSopenharmony_ciextern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 21253d0407baSopenharmony_ci 21263d0407baSopenharmony_ciextern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 21273d0407baSopenharmony_ci 21283d0407baSopenharmony_ciextern const_debug unsigned int sysctl_sched_nr_migrate; 21293d0407baSopenharmony_ciextern const_debug unsigned int sysctl_sched_migration_cost; 21303d0407baSopenharmony_ci 21313d0407baSopenharmony_ci#ifdef CONFIG_SCHED_HRTICK 21323d0407baSopenharmony_ci 21333d0407baSopenharmony_ci/* 21343d0407baSopenharmony_ci * Use hrtick when: 21353d0407baSopenharmony_ci * - enabled by features 21363d0407baSopenharmony_ci * - hrtimer is actually high res 21373d0407baSopenharmony_ci */ 21383d0407baSopenharmony_cistatic inline int hrtick_enabled(struct rq *rq) 21393d0407baSopenharmony_ci{ 21403d0407baSopenharmony_ci if (!sched_feat(HRTICK)) { 21413d0407baSopenharmony_ci return 0; 21423d0407baSopenharmony_ci } 21433d0407baSopenharmony_ci if (!cpu_active(cpu_of(rq))) { 21443d0407baSopenharmony_ci return 0; 21453d0407baSopenharmony_ci } 21463d0407baSopenharmony_ci return hrtimer_is_hres_active(&rq->hrtick_timer); 21473d0407baSopenharmony_ci} 21483d0407baSopenharmony_ci 21493d0407baSopenharmony_civoid hrtick_start(struct rq *rq, u64 delay); 21503d0407baSopenharmony_ci 21513d0407baSopenharmony_ci#else 21523d0407baSopenharmony_ci 21533d0407baSopenharmony_cistatic inline int hrtick_enabled(struct rq *rq) 21543d0407baSopenharmony_ci{ 21553d0407baSopenharmony_ci return 0; 21563d0407baSopenharmony_ci} 21573d0407baSopenharmony_ci 21583d0407baSopenharmony_ci#endif /* CONFIG_SCHED_HRTICK */ 21593d0407baSopenharmony_ci 21603d0407baSopenharmony_ci#ifdef CONFIG_SCHED_WALT 21613d0407baSopenharmony_ciu64 sched_ktime_clock(void); 21623d0407baSopenharmony_ci#else 21633d0407baSopenharmony_cistatic inline u64 sched_ktime_clock(void) 21643d0407baSopenharmony_ci{ 21653d0407baSopenharmony_ci return sched_clock(); 21663d0407baSopenharmony_ci} 21673d0407baSopenharmony_ci#endif 21683d0407baSopenharmony_ci 21693d0407baSopenharmony_ci#ifndef arch_scale_freq_tick 21703d0407baSopenharmony_cistatic __always_inline void arch_scale_freq_tick(void) 21713d0407baSopenharmony_ci{ 21723d0407baSopenharmony_ci} 21733d0407baSopenharmony_ci#endif 21743d0407baSopenharmony_ci 21753d0407baSopenharmony_ci#ifndef arch_scale_freq_capacity 21763d0407baSopenharmony_ci/** 21773d0407baSopenharmony_ci * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. 21783d0407baSopenharmony_ci * @cpu: the CPU in question. 21793d0407baSopenharmony_ci * 21803d0407baSopenharmony_ci * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. 21813d0407baSopenharmony_ci * 21823d0407baSopenharmony_ci * f_curr 21833d0407baSopenharmony_ci * ------ * SCHED_CAPACITY_SCALE 21843d0407baSopenharmony_ci * f_max 21853d0407baSopenharmony_ci */ 21863d0407baSopenharmony_cistatic __always_inline unsigned long arch_scale_freq_capacity(int cpu) 21873d0407baSopenharmony_ci{ 21883d0407baSopenharmony_ci return SCHED_CAPACITY_SCALE; 21893d0407baSopenharmony_ci} 21903d0407baSopenharmony_ci#endif 21913d0407baSopenharmony_ci 21923d0407baSopenharmony_ciunsigned long capacity_curr_of(int cpu); 21933d0407baSopenharmony_ciunsigned long cpu_util(int cpu); 21943d0407baSopenharmony_ci 21953d0407baSopenharmony_ci#ifdef CONFIG_SMP 21963d0407baSopenharmony_ci#ifdef CONFIG_SCHED_WALT 21973d0407baSopenharmony_ciextern unsigned int sysctl_sched_use_walt_cpu_util; 21983d0407baSopenharmony_ciextern unsigned int walt_disabled; 21993d0407baSopenharmony_ci#endif 22003d0407baSopenharmony_ci#ifdef CONFIG_PREEMPTION 22013d0407baSopenharmony_ci 22023d0407baSopenharmony_cistatic inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 22033d0407baSopenharmony_ci 22043d0407baSopenharmony_ci/* 22053d0407baSopenharmony_ci * fair double_lock_balance: Safely acquires both rq->locks in a fair 22063d0407baSopenharmony_ci * way at the expense of forcing extra atomic operations in all 22073d0407baSopenharmony_ci * invocations. This assures that the double_lock is acquired using the 22083d0407baSopenharmony_ci * same underlying policy as the spinlock_t on this architecture, which 22093d0407baSopenharmony_ci * reduces latency compared to the unfair variant below. However, it 22103d0407baSopenharmony_ci * also adds more overhead and therefore may reduce throughput. 22113d0407baSopenharmony_ci */ 22123d0407baSopenharmony_cistatic inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock) 22133d0407baSopenharmony_ci __acquires(busiest->lock) __acquires(this_rq->lock) 22143d0407baSopenharmony_ci{ 22153d0407baSopenharmony_ci raw_spin_unlock(&this_rq->lock); 22163d0407baSopenharmony_ci double_rq_lock(this_rq, busiest); 22173d0407baSopenharmony_ci 22183d0407baSopenharmony_ci return 1; 22193d0407baSopenharmony_ci} 22203d0407baSopenharmony_ci 22213d0407baSopenharmony_ci#else 22223d0407baSopenharmony_ci/* 22233d0407baSopenharmony_ci * Unfair double_lock_balance: Optimizes throughput at the expense of 22243d0407baSopenharmony_ci * latency by eliminating extra atomic operations when the locks are 22253d0407baSopenharmony_ci * already in proper order on entry. This favors lower CPU-ids and will 22263d0407baSopenharmony_ci * grant the double lock to lower CPUs over higher ids under contention, 22273d0407baSopenharmony_ci * regardless of entry order into the function. 22283d0407baSopenharmony_ci */ 22293d0407baSopenharmony_cistatic inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock) 22303d0407baSopenharmony_ci __acquires(busiest->lock) __acquires(this_rq->lock) 22313d0407baSopenharmony_ci{ 22323d0407baSopenharmony_ci int ret = 0; 22333d0407baSopenharmony_ci 22343d0407baSopenharmony_ci if (unlikely(!raw_spin_trylock(&busiest->lock))) { 22353d0407baSopenharmony_ci if (busiest < this_rq) { 22363d0407baSopenharmony_ci raw_spin_unlock(&this_rq->lock); 22373d0407baSopenharmony_ci raw_spin_lock(&busiest->lock); 22383d0407baSopenharmony_ci raw_spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); 22393d0407baSopenharmony_ci ret = 1; 22403d0407baSopenharmony_ci } else { 22413d0407baSopenharmony_ci raw_spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); 22423d0407baSopenharmony_ci } 22433d0407baSopenharmony_ci } 22443d0407baSopenharmony_ci return ret; 22453d0407baSopenharmony_ci} 22463d0407baSopenharmony_ci 22473d0407baSopenharmony_ci#endif /* CONFIG_PREEMPTION */ 22483d0407baSopenharmony_ci 22493d0407baSopenharmony_ci/* 22503d0407baSopenharmony_ci * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 22513d0407baSopenharmony_ci */ 22523d0407baSopenharmony_cistatic inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 22533d0407baSopenharmony_ci{ 22543d0407baSopenharmony_ci if (unlikely(!irqs_disabled())) { 22553d0407baSopenharmony_ci /* printk() doesn't work well under rq->lock */ 22563d0407baSopenharmony_ci raw_spin_unlock(&this_rq->lock); 22573d0407baSopenharmony_ci BUG_ON(1); 22583d0407baSopenharmony_ci } 22593d0407baSopenharmony_ci 22603d0407baSopenharmony_ci return _double_lock_balance(this_rq, busiest); 22613d0407baSopenharmony_ci} 22623d0407baSopenharmony_ci 22633d0407baSopenharmony_cistatic inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) __releases(busiest->lock) 22643d0407baSopenharmony_ci{ 22653d0407baSopenharmony_ci raw_spin_unlock(&busiest->lock); 22663d0407baSopenharmony_ci lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 22673d0407baSopenharmony_ci} 22683d0407baSopenharmony_ci 22693d0407baSopenharmony_cistatic inline void double_lock(spinlock_t *l1, spinlock_t *l2) 22703d0407baSopenharmony_ci{ 22713d0407baSopenharmony_ci if (l1 > l2) { 22723d0407baSopenharmony_ci swap(l1, l2); 22733d0407baSopenharmony_ci } 22743d0407baSopenharmony_ci 22753d0407baSopenharmony_ci spin_lock(l1); 22763d0407baSopenharmony_ci spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 22773d0407baSopenharmony_ci} 22783d0407baSopenharmony_ci 22793d0407baSopenharmony_cistatic inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 22803d0407baSopenharmony_ci{ 22813d0407baSopenharmony_ci if (l1 > l2) { 22823d0407baSopenharmony_ci swap(l1, l2); 22833d0407baSopenharmony_ci } 22843d0407baSopenharmony_ci 22853d0407baSopenharmony_ci spin_lock_irq(l1); 22863d0407baSopenharmony_ci spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 22873d0407baSopenharmony_ci} 22883d0407baSopenharmony_ci 22893d0407baSopenharmony_cistatic inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 22903d0407baSopenharmony_ci{ 22913d0407baSopenharmony_ci if (l1 > l2) { 22923d0407baSopenharmony_ci swap(l1, l2); 22933d0407baSopenharmony_ci } 22943d0407baSopenharmony_ci 22953d0407baSopenharmony_ci raw_spin_lock(l1); 22963d0407baSopenharmony_ci raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 22973d0407baSopenharmony_ci} 22983d0407baSopenharmony_ci 22993d0407baSopenharmony_ci/* 23003d0407baSopenharmony_ci * double_rq_lock - safely lock two runqueues 23013d0407baSopenharmony_ci * 23023d0407baSopenharmony_ci * Note this does not disable interrupts like task_rq_lock, 23033d0407baSopenharmony_ci * you need to do so manually before calling. 23043d0407baSopenharmony_ci */ 23053d0407baSopenharmony_cistatic inline void double_rq_lock(struct rq *rq1, struct rq *rq2) __acquires(rq1->lock) __acquires(rq2->lock) 23063d0407baSopenharmony_ci{ 23073d0407baSopenharmony_ci BUG_ON(!irqs_disabled()); 23083d0407baSopenharmony_ci if (rq1 == rq2) { 23093d0407baSopenharmony_ci raw_spin_lock(&rq1->lock); 23103d0407baSopenharmony_ci __acquire(rq2->lock); /* Fake it out ;) */ 23113d0407baSopenharmony_ci } else { 23123d0407baSopenharmony_ci if (rq1 < rq2) { 23133d0407baSopenharmony_ci raw_spin_lock(&rq1->lock); 23143d0407baSopenharmony_ci raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 23153d0407baSopenharmony_ci } else { 23163d0407baSopenharmony_ci raw_spin_lock(&rq2->lock); 23173d0407baSopenharmony_ci raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 23183d0407baSopenharmony_ci } 23193d0407baSopenharmony_ci } 23203d0407baSopenharmony_ci} 23213d0407baSopenharmony_ci 23223d0407baSopenharmony_ci/* 23233d0407baSopenharmony_ci * double_rq_unlock - safely unlock two runqueues 23243d0407baSopenharmony_ci * 23253d0407baSopenharmony_ci * Note this does not restore interrupts like task_rq_unlock, 23263d0407baSopenharmony_ci * you need to do so manually after calling. 23273d0407baSopenharmony_ci */ 23283d0407baSopenharmony_cistatic inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock) 23293d0407baSopenharmony_ci{ 23303d0407baSopenharmony_ci raw_spin_unlock(&rq1->lock); 23313d0407baSopenharmony_ci if (rq1 != rq2) { 23323d0407baSopenharmony_ci raw_spin_unlock(&rq2->lock); 23333d0407baSopenharmony_ci } else { 23343d0407baSopenharmony_ci __release(rq2->lock); 23353d0407baSopenharmony_ci } 23363d0407baSopenharmony_ci} 23373d0407baSopenharmony_ci 23383d0407baSopenharmony_ciextern void set_rq_online(struct rq *rq); 23393d0407baSopenharmony_ciextern void set_rq_offline(struct rq *rq); 23403d0407baSopenharmony_ciextern bool sched_smp_initialized; 23413d0407baSopenharmony_ci 23423d0407baSopenharmony_ci#else /* CONFIG_SMP */ 23433d0407baSopenharmony_ci 23443d0407baSopenharmony_ci/* 23453d0407baSopenharmony_ci * double_rq_lock - safely lock two runqueues 23463d0407baSopenharmony_ci * 23473d0407baSopenharmony_ci * Note this does not disable interrupts like task_rq_lock, 23483d0407baSopenharmony_ci * you need to do so manually before calling. 23493d0407baSopenharmony_ci */ 23503d0407baSopenharmony_cistatic inline void double_rq_lock(struct rq *rq1, struct rq *rq2) __acquires(rq1->lock) __acquires(rq2->lock) 23513d0407baSopenharmony_ci{ 23523d0407baSopenharmony_ci BUG_ON(!irqs_disabled()); 23533d0407baSopenharmony_ci BUG_ON(rq1 != rq2); 23543d0407baSopenharmony_ci raw_spin_lock(&rq1->lock); 23553d0407baSopenharmony_ci __acquire(rq2->lock); /* Fake it out ;) */ 23563d0407baSopenharmony_ci} 23573d0407baSopenharmony_ci 23583d0407baSopenharmony_ci/* 23593d0407baSopenharmony_ci * double_rq_unlock - safely unlock two runqueues 23603d0407baSopenharmony_ci * 23613d0407baSopenharmony_ci * Note this does not restore interrupts like task_rq_unlock, 23623d0407baSopenharmony_ci * you need to do so manually after calling. 23633d0407baSopenharmony_ci */ 23643d0407baSopenharmony_cistatic inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock) 23653d0407baSopenharmony_ci{ 23663d0407baSopenharmony_ci BUG_ON(rq1 != rq2); 23673d0407baSopenharmony_ci raw_spin_unlock(&rq1->lock); 23683d0407baSopenharmony_ci __release(rq2->lock); 23693d0407baSopenharmony_ci} 23703d0407baSopenharmony_ci 23713d0407baSopenharmony_ci#endif 23723d0407baSopenharmony_ci 23733d0407baSopenharmony_ciextern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 23743d0407baSopenharmony_ciextern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 23753d0407baSopenharmony_ci 23763d0407baSopenharmony_ci#ifdef CONFIG_SCHED_DEBUG 23773d0407baSopenharmony_ciextern bool sched_debug_enabled; 23783d0407baSopenharmony_ci 23793d0407baSopenharmony_ciextern void print_cfs_stats(struct seq_file *m, int cpu); 23803d0407baSopenharmony_ciextern void print_rt_stats(struct seq_file *m, int cpu); 23813d0407baSopenharmony_ciextern void print_dl_stats(struct seq_file *m, int cpu); 23823d0407baSopenharmony_ciextern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 23833d0407baSopenharmony_ciextern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 23843d0407baSopenharmony_ciextern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 23853d0407baSopenharmony_ci#ifdef CONFIG_NUMA_BALANCING 23863d0407baSopenharmony_ciextern void show_numa_stats(struct task_struct *p, struct seq_file *m); 23873d0407baSopenharmony_ciextern void print_numa_stats(struct seq_file *m, int node, unsigned long tsf, unsigned long tpf, unsigned long gsf, 23883d0407baSopenharmony_ci unsigned long gpf); 23893d0407baSopenharmony_ci#endif /* CONFIG_NUMA_BALANCING */ 23903d0407baSopenharmony_ci#endif /* CONFIG_SCHED_DEBUG */ 23913d0407baSopenharmony_ci 23923d0407baSopenharmony_ciextern void init_cfs_rq(struct cfs_rq *cfs_rq); 23933d0407baSopenharmony_ciextern void init_rt_rq(struct rt_rq *rt_rq); 23943d0407baSopenharmony_ciextern void init_dl_rq(struct dl_rq *dl_rq); 23953d0407baSopenharmony_ci 23963d0407baSopenharmony_ciextern void cfs_bandwidth_usage_inc(void); 23973d0407baSopenharmony_ciextern void cfs_bandwidth_usage_dec(void); 23983d0407baSopenharmony_ci 23993d0407baSopenharmony_ci#ifdef CONFIG_NO_HZ_COMMON 24003d0407baSopenharmony_ci#define NOHZ_BALANCE_KICK_BIT 0 24013d0407baSopenharmony_ci#define NOHZ_STATS_KICK_BIT 1 24023d0407baSopenharmony_ci 24033d0407baSopenharmony_ci#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 24043d0407baSopenharmony_ci#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 24053d0407baSopenharmony_ci 24063d0407baSopenharmony_ci#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) 24073d0407baSopenharmony_ci 24083d0407baSopenharmony_ci#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 24093d0407baSopenharmony_ci 24103d0407baSopenharmony_ciextern void nohz_balance_exit_idle(struct rq *rq); 24113d0407baSopenharmony_ci#else 24123d0407baSopenharmony_cistatic inline void nohz_balance_exit_idle(struct rq *rq) 24133d0407baSopenharmony_ci{ 24143d0407baSopenharmony_ci} 24153d0407baSopenharmony_ci#endif 24163d0407baSopenharmony_ci 24173d0407baSopenharmony_ci#ifdef CONFIG_SMP 24183d0407baSopenharmony_cistatic inline void __dl_update(struct dl_bw *dl_b, s64 bw) 24193d0407baSopenharmony_ci{ 24203d0407baSopenharmony_ci struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 24213d0407baSopenharmony_ci int i; 24223d0407baSopenharmony_ci 24233d0407baSopenharmony_ci RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), "sched RCU must be held"); 24243d0407baSopenharmony_ci for_each_cpu_and(i, rd->span, cpu_active_mask) 24253d0407baSopenharmony_ci { 24263d0407baSopenharmony_ci struct rq *rq = cpu_rq(i); 24273d0407baSopenharmony_ci 24283d0407baSopenharmony_ci rq->dl.extra_bw += bw; 24293d0407baSopenharmony_ci } 24303d0407baSopenharmony_ci} 24313d0407baSopenharmony_ci#else 24323d0407baSopenharmony_cistatic inline void __dl_update(struct dl_bw *dl_b, s64 bw) 24333d0407baSopenharmony_ci{ 24343d0407baSopenharmony_ci struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 24353d0407baSopenharmony_ci 24363d0407baSopenharmony_ci dl->extra_bw += bw; 24373d0407baSopenharmony_ci} 24383d0407baSopenharmony_ci#endif 24393d0407baSopenharmony_ci 24403d0407baSopenharmony_ci#ifdef CONFIG_IRQ_TIME_ACCOUNTING 24413d0407baSopenharmony_cistruct irqtime { 24423d0407baSopenharmony_ci u64 total; 24433d0407baSopenharmony_ci u64 tick_delta; 24443d0407baSopenharmony_ci u64 irq_start_time; 24453d0407baSopenharmony_ci struct u64_stats_sync sync; 24463d0407baSopenharmony_ci}; 24473d0407baSopenharmony_ci 24483d0407baSopenharmony_ciDECLARE_PER_CPU(struct irqtime, cpu_irqtime); 24493d0407baSopenharmony_ci 24503d0407baSopenharmony_ci/* 24513d0407baSopenharmony_ci * Returns the irqtime minus the softirq time computed by ksoftirqd. 24523d0407baSopenharmony_ci * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 24533d0407baSopenharmony_ci * and never move forward. 24543d0407baSopenharmony_ci */ 24553d0407baSopenharmony_cistatic inline u64 irq_time_read(int cpu) 24563d0407baSopenharmony_ci{ 24573d0407baSopenharmony_ci struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 24583d0407baSopenharmony_ci unsigned int seq; 24593d0407baSopenharmony_ci u64 total; 24603d0407baSopenharmony_ci 24613d0407baSopenharmony_ci do { 24623d0407baSopenharmony_ci seq = __u64_stats_fetch_begin(&irqtime->sync); 24633d0407baSopenharmony_ci total = irqtime->total; 24643d0407baSopenharmony_ci } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 24653d0407baSopenharmony_ci 24663d0407baSopenharmony_ci return total; 24673d0407baSopenharmony_ci} 24683d0407baSopenharmony_ci#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 24693d0407baSopenharmony_ci 24703d0407baSopenharmony_ci#ifdef CONFIG_CPU_FREQ 24713d0407baSopenharmony_ciDECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 24723d0407baSopenharmony_ci 24733d0407baSopenharmony_ci/** 24743d0407baSopenharmony_ci * cpufreq_update_util - Take a note about CPU utilization changes. 24753d0407baSopenharmony_ci * @rq: Runqueue to carry out the update for. 24763d0407baSopenharmony_ci * @flags: Update reason flags. 24773d0407baSopenharmony_ci * 24783d0407baSopenharmony_ci * This function is called by the scheduler on the CPU whose utilization is 24793d0407baSopenharmony_ci * being updated. 24803d0407baSopenharmony_ci * 24813d0407baSopenharmony_ci * It can only be called from RCU-sched read-side critical sections. 24823d0407baSopenharmony_ci * 24833d0407baSopenharmony_ci * The way cpufreq is currently arranged requires it to evaluate the CPU 24843d0407baSopenharmony_ci * performance state (frequency/voltage) on a regular basis to prevent it from 24853d0407baSopenharmony_ci * being stuck in a completely inadequate performance level for too long. 24863d0407baSopenharmony_ci * That is not guaranteed to happen if the updates are only triggered from CFS 24873d0407baSopenharmony_ci * and DL, though, because they may not be coming in if only RT tasks are 24883d0407baSopenharmony_ci * active all the time (or there are RT tasks only). 24893d0407baSopenharmony_ci * 24903d0407baSopenharmony_ci * As a workaround for that issue, this function is called periodically by the 24913d0407baSopenharmony_ci * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 24923d0407baSopenharmony_ci * but that really is a band-aid. Going forward it should be replaced with 24933d0407baSopenharmony_ci * solutions targeted more specifically at RT tasks. 24943d0407baSopenharmony_ci */ 24953d0407baSopenharmony_cistatic inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 24963d0407baSopenharmony_ci{ 24973d0407baSopenharmony_ci struct update_util_data *data; 24983d0407baSopenharmony_ci u64 clock; 24993d0407baSopenharmony_ci 25003d0407baSopenharmony_ci#ifdef CONFIG_SCHED_WALT 25013d0407baSopenharmony_ci if (!(flags & SCHED_CPUFREQ_WALT)) { 25023d0407baSopenharmony_ci return; 25033d0407baSopenharmony_ci } 25043d0407baSopenharmony_ci 25053d0407baSopenharmony_ci clock = sched_ktime_clock(); 25063d0407baSopenharmony_ci#else 25073d0407baSopenharmony_ci clock = rq_clock(rq); 25083d0407baSopenharmony_ci#endif 25093d0407baSopenharmony_ci data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, cpu_of(rq))); 25103d0407baSopenharmony_ci if (data) { 25113d0407baSopenharmony_ci data->func(data, clock, flags); 25123d0407baSopenharmony_ci } 25133d0407baSopenharmony_ci} 25143d0407baSopenharmony_ci#else 25153d0407baSopenharmony_cistatic inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 25163d0407baSopenharmony_ci{ 25173d0407baSopenharmony_ci} 25183d0407baSopenharmony_ci#endif /* CONFIG_CPU_FREQ */ 25193d0407baSopenharmony_ci 25203d0407baSopenharmony_ci#ifdef CONFIG_UCLAMP_TASK 25213d0407baSopenharmony_ciunsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 25223d0407baSopenharmony_ci 25233d0407baSopenharmony_ci/** 25243d0407baSopenharmony_ci * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. 25253d0407baSopenharmony_ci * @rq: The rq to clamp against. Must not be NULL. 25263d0407baSopenharmony_ci * @util: The util value to clamp. 25273d0407baSopenharmony_ci * @p: The task to clamp against. Can be NULL if you want to clamp 25283d0407baSopenharmony_ci * against @rq only. 25293d0407baSopenharmony_ci * 25303d0407baSopenharmony_ci * Clamps the passed @util to the max(@rq, @p) effective uclamp values. 25313d0407baSopenharmony_ci * 25323d0407baSopenharmony_ci * If sched_uclamp_used static key is disabled, then just return the util 25333d0407baSopenharmony_ci * without any clamping since uclamp aggregation at the rq level in the fast 25343d0407baSopenharmony_ci * path is disabled, rendering this operation a NOP. 25353d0407baSopenharmony_ci * 25363d0407baSopenharmony_ci * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It 25373d0407baSopenharmony_ci * will return the correct effective uclamp value of the task even if the 25383d0407baSopenharmony_ci * static key is disabled. 25393d0407baSopenharmony_ci */ 25403d0407baSopenharmony_cistatic __always_inline unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p) 25413d0407baSopenharmony_ci{ 25423d0407baSopenharmony_ci unsigned long min_util = 0; 25433d0407baSopenharmony_ci unsigned long max_util = 0; 25443d0407baSopenharmony_ci 25453d0407baSopenharmony_ci if (!static_branch_likely(&sched_uclamp_used)) { 25463d0407baSopenharmony_ci return util; 25473d0407baSopenharmony_ci } 25483d0407baSopenharmony_ci 25493d0407baSopenharmony_ci if (p) { 25503d0407baSopenharmony_ci min_util = uclamp_eff_value(p, UCLAMP_MIN); 25513d0407baSopenharmony_ci max_util = uclamp_eff_value(p, UCLAMP_MAX); 25523d0407baSopenharmony_ci 25533d0407baSopenharmony_ci /* 25543d0407baSopenharmony_ci * Ignore last runnable task's max clamp, as this task will 25553d0407baSopenharmony_ci * reset it. Similarly, no need to read the rq's min clamp. 25563d0407baSopenharmony_ci */ 25573d0407baSopenharmony_ci if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) { 25583d0407baSopenharmony_ci goto out; 25593d0407baSopenharmony_ci } 25603d0407baSopenharmony_ci } 25613d0407baSopenharmony_ci 25623d0407baSopenharmony_ci min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); 25633d0407baSopenharmony_ci max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); 25643d0407baSopenharmony_ciout: 25653d0407baSopenharmony_ci /* 25663d0407baSopenharmony_ci * Since CPU's {min,max}_util clamps are MAX aggregated considering 25673d0407baSopenharmony_ci * RUNNABLE tasks with _different_ clamps, we can end up with an 25683d0407baSopenharmony_ci * inversion. Fix it now when the clamps are applied. 25693d0407baSopenharmony_ci */ 25703d0407baSopenharmony_ci if (unlikely(min_util >= max_util)) { 25713d0407baSopenharmony_ci return min_util; 25723d0407baSopenharmony_ci } 25733d0407baSopenharmony_ci 25743d0407baSopenharmony_ci return clamp(util, min_util, max_util); 25753d0407baSopenharmony_ci} 25763d0407baSopenharmony_ci 25773d0407baSopenharmony_cistatic inline bool uclamp_boosted(struct task_struct *p) 25783d0407baSopenharmony_ci{ 25793d0407baSopenharmony_ci return uclamp_eff_value(p, UCLAMP_MIN) > 0; 25803d0407baSopenharmony_ci} 25813d0407baSopenharmony_ci 25823d0407baSopenharmony_ci/* 25833d0407baSopenharmony_ci * When uclamp is compiled in, the aggregation at rq level is 'turned off' 25843d0407baSopenharmony_ci * by default in the fast path and only gets turned on once userspace performs 25853d0407baSopenharmony_ci * an operation that requires it. 25863d0407baSopenharmony_ci * 25873d0407baSopenharmony_ci * Returns true if userspace opted-in to use uclamp and aggregation at rq level 25883d0407baSopenharmony_ci * hence is active. 25893d0407baSopenharmony_ci */ 25903d0407baSopenharmony_cistatic inline bool uclamp_is_used(void) 25913d0407baSopenharmony_ci{ 25923d0407baSopenharmony_ci return static_branch_likely(&sched_uclamp_used); 25933d0407baSopenharmony_ci} 25943d0407baSopenharmony_ci#else /* CONFIG_UCLAMP_TASK */ 25953d0407baSopenharmony_cistatic inline unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p) 25963d0407baSopenharmony_ci{ 25973d0407baSopenharmony_ci return util; 25983d0407baSopenharmony_ci} 25993d0407baSopenharmony_ci 26003d0407baSopenharmony_cistatic inline bool uclamp_boosted(struct task_struct *p) 26013d0407baSopenharmony_ci{ 26023d0407baSopenharmony_ci return false; 26033d0407baSopenharmony_ci} 26043d0407baSopenharmony_ci 26053d0407baSopenharmony_cistatic inline bool uclamp_is_used(void) 26063d0407baSopenharmony_ci{ 26073d0407baSopenharmony_ci return false; 26083d0407baSopenharmony_ci} 26093d0407baSopenharmony_ci#endif /* CONFIG_UCLAMP_TASK */ 26103d0407baSopenharmony_ci 26113d0407baSopenharmony_ci#ifdef arch_scale_freq_capacity 26123d0407baSopenharmony_ci#ifndef arch_scale_freq_invariant 26133d0407baSopenharmony_ci#define arch_scale_freq_invariant() true 26143d0407baSopenharmony_ci#endif 26153d0407baSopenharmony_ci#else 26163d0407baSopenharmony_ci#define arch_scale_freq_invariant() false 26173d0407baSopenharmony_ci#endif 26183d0407baSopenharmony_ci 26193d0407baSopenharmony_ci#ifdef CONFIG_SMP 26203d0407baSopenharmony_cistatic inline unsigned long capacity_of(int cpu) 26213d0407baSopenharmony_ci{ 26223d0407baSopenharmony_ci return cpu_rq(cpu)->cpu_capacity; 26233d0407baSopenharmony_ci} 26243d0407baSopenharmony_ci 26253d0407baSopenharmony_cistatic inline unsigned long capacity_orig_of(int cpu) 26263d0407baSopenharmony_ci{ 26273d0407baSopenharmony_ci return cpu_rq(cpu)->cpu_capacity_orig; 26283d0407baSopenharmony_ci} 26293d0407baSopenharmony_ci#endif 26303d0407baSopenharmony_ci 26313d0407baSopenharmony_ci/** 26323d0407baSopenharmony_ci * enum schedutil_type - CPU utilization type 26333d0407baSopenharmony_ci * @FREQUENCY_UTIL: Utilization used to select frequency 26343d0407baSopenharmony_ci * @ENERGY_UTIL: Utilization used during energy calculation 26353d0407baSopenharmony_ci * 26363d0407baSopenharmony_ci * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 26373d0407baSopenharmony_ci * need to be aggregated differently depending on the usage made of them. This 26383d0407baSopenharmony_ci * enum is used within schedutil_freq_util() to differentiate the types of 26393d0407baSopenharmony_ci * utilization expected by the callers, and adjust the aggregation accordingly. 26403d0407baSopenharmony_ci */ 26413d0407baSopenharmony_cienum schedutil_type { 26423d0407baSopenharmony_ci FREQUENCY_UTIL, 26433d0407baSopenharmony_ci ENERGY_UTIL, 26443d0407baSopenharmony_ci}; 26453d0407baSopenharmony_ci 26463d0407baSopenharmony_ci#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 26473d0407baSopenharmony_ci 26483d0407baSopenharmony_ciunsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, unsigned long max, enum schedutil_type type, 26493d0407baSopenharmony_ci struct task_struct *p); 26503d0407baSopenharmony_ci 26513d0407baSopenharmony_cistatic inline unsigned long cpu_bw_dl(struct rq *rq) 26523d0407baSopenharmony_ci{ 26533d0407baSopenharmony_ci return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 26543d0407baSopenharmony_ci} 26553d0407baSopenharmony_ci 26563d0407baSopenharmony_cistatic inline unsigned long cpu_util_dl(struct rq *rq) 26573d0407baSopenharmony_ci{ 26583d0407baSopenharmony_ci return READ_ONCE(rq->avg_dl.util_avg); 26593d0407baSopenharmony_ci} 26603d0407baSopenharmony_ci 26613d0407baSopenharmony_cistatic inline unsigned long cpu_util_cfs(struct rq *rq) 26623d0407baSopenharmony_ci{ 26633d0407baSopenharmony_ci unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 26643d0407baSopenharmony_ci 26653d0407baSopenharmony_ci if (sched_feat(UTIL_EST)) { 26663d0407baSopenharmony_ci util = max_t(unsigned long, util, READ_ONCE(rq->cfs.avg.util_est.enqueued)); 26673d0407baSopenharmony_ci } 26683d0407baSopenharmony_ci 26693d0407baSopenharmony_ci return util; 26703d0407baSopenharmony_ci} 26713d0407baSopenharmony_ci 26723d0407baSopenharmony_cistatic inline unsigned long cpu_util_rt(struct rq *rq) 26733d0407baSopenharmony_ci{ 26743d0407baSopenharmony_ci return READ_ONCE(rq->avg_rt.util_avg); 26753d0407baSopenharmony_ci} 26763d0407baSopenharmony_ci#else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 26773d0407baSopenharmony_cistatic inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, unsigned long max, 26783d0407baSopenharmony_ci enum schedutil_type type, struct task_struct *p) 26793d0407baSopenharmony_ci{ 26803d0407baSopenharmony_ci return 0; 26813d0407baSopenharmony_ci} 26823d0407baSopenharmony_ci#endif /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 26833d0407baSopenharmony_ci 26843d0407baSopenharmony_ci#ifdef CONFIG_HAVE_SCHED_AVG_IRQ 26853d0407baSopenharmony_cistatic inline unsigned long cpu_util_irq(struct rq *rq) 26863d0407baSopenharmony_ci{ 26873d0407baSopenharmony_ci return rq->avg_irq.util_avg; 26883d0407baSopenharmony_ci} 26893d0407baSopenharmony_ci 26903d0407baSopenharmony_cistatic inline unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 26913d0407baSopenharmony_ci{ 26923d0407baSopenharmony_ci util *= (max - irq); 26933d0407baSopenharmony_ci util /= max; 26943d0407baSopenharmony_ci 26953d0407baSopenharmony_ci return util; 26963d0407baSopenharmony_ci} 26973d0407baSopenharmony_ci#else 26983d0407baSopenharmony_cistatic inline unsigned long cpu_util_irq(struct rq *rq) 26993d0407baSopenharmony_ci{ 27003d0407baSopenharmony_ci return 0; 27013d0407baSopenharmony_ci} 27023d0407baSopenharmony_ci 27033d0407baSopenharmony_cistatic inline unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 27043d0407baSopenharmony_ci{ 27053d0407baSopenharmony_ci return util; 27063d0407baSopenharmony_ci} 27073d0407baSopenharmony_ci#endif 27083d0407baSopenharmony_ci 27093d0407baSopenharmony_ci#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 27103d0407baSopenharmony_ci 27113d0407baSopenharmony_ci#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 27123d0407baSopenharmony_ci 27133d0407baSopenharmony_ciDECLARE_STATIC_KEY_FALSE(sched_energy_present); 27143d0407baSopenharmony_ci 27153d0407baSopenharmony_cistatic inline bool sched_energy_enabled(void) 27163d0407baSopenharmony_ci{ 27173d0407baSopenharmony_ci return static_branch_unlikely(&sched_energy_present); 27183d0407baSopenharmony_ci} 27193d0407baSopenharmony_ci 27203d0407baSopenharmony_ci#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 27213d0407baSopenharmony_ci 27223d0407baSopenharmony_ci#define perf_domain_span(pd) NULL 27233d0407baSopenharmony_cistatic inline bool sched_energy_enabled(void) 27243d0407baSopenharmony_ci{ 27253d0407baSopenharmony_ci return false; 27263d0407baSopenharmony_ci} 27273d0407baSopenharmony_ci 27283d0407baSopenharmony_ci#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 27293d0407baSopenharmony_ci 27303d0407baSopenharmony_ci#ifdef CONFIG_MEMBARRIER 27313d0407baSopenharmony_ci/* 27323d0407baSopenharmony_ci * The scheduler provides memory barriers required by membarrier between: 27333d0407baSopenharmony_ci * - prior user-space memory accesses and store to rq->membarrier_state, 27343d0407baSopenharmony_ci * - store to rq->membarrier_state and following user-space memory accesses. 27353d0407baSopenharmony_ci * In the same way it provides those guarantees around store to rq->curr. 27363d0407baSopenharmony_ci */ 27373d0407baSopenharmony_cistatic inline void membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm) 27383d0407baSopenharmony_ci{ 27393d0407baSopenharmony_ci int membarrier_state; 27403d0407baSopenharmony_ci 27413d0407baSopenharmony_ci if (prev_mm == next_mm) { 27423d0407baSopenharmony_ci return; 27433d0407baSopenharmony_ci } 27443d0407baSopenharmony_ci 27453d0407baSopenharmony_ci membarrier_state = atomic_read(&next_mm->membarrier_state); 27463d0407baSopenharmony_ci if (READ_ONCE(rq->membarrier_state) == membarrier_state) { 27473d0407baSopenharmony_ci return; 27483d0407baSopenharmony_ci } 27493d0407baSopenharmony_ci 27503d0407baSopenharmony_ci WRITE_ONCE(rq->membarrier_state, membarrier_state); 27513d0407baSopenharmony_ci} 27523d0407baSopenharmony_ci#else 27533d0407baSopenharmony_cistatic inline void membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm) 27543d0407baSopenharmony_ci{ 27553d0407baSopenharmony_ci} 27563d0407baSopenharmony_ci#endif 27573d0407baSopenharmony_ci 27583d0407baSopenharmony_ci#ifdef CONFIG_SMP 27593d0407baSopenharmony_cistatic inline bool is_per_cpu_kthread(struct task_struct *p) 27603d0407baSopenharmony_ci{ 27613d0407baSopenharmony_ci if (!(p->flags & PF_KTHREAD)) { 27623d0407baSopenharmony_ci return false; 27633d0407baSopenharmony_ci } 27643d0407baSopenharmony_ci 27653d0407baSopenharmony_ci if (p->nr_cpus_allowed != 1) { 27663d0407baSopenharmony_ci return false; 27673d0407baSopenharmony_ci } 27683d0407baSopenharmony_ci 27693d0407baSopenharmony_ci return true; 27703d0407baSopenharmony_ci} 27713d0407baSopenharmony_ci#endif 27723d0407baSopenharmony_ci 27733d0407baSopenharmony_civoid swake_up_all_locked(struct swait_queue_head *q); 27743d0407baSopenharmony_civoid __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 27753d0407baSopenharmony_ci 27763d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RTG 27773d0407baSopenharmony_ciextern bool task_fits_max(struct task_struct *p, int cpu); 27783d0407baSopenharmony_ciextern unsigned long capacity_spare_without(int cpu, struct task_struct *p); 27793d0407baSopenharmony_ciextern int update_preferred_cluster(struct related_thread_group *grp, struct task_struct *p, u32 old_load, 27803d0407baSopenharmony_ci bool from_tick); 27813d0407baSopenharmony_ciextern struct cpumask *find_rtg_target(struct task_struct *p); 27823d0407baSopenharmony_ci#endif 27833d0407baSopenharmony_ci 27843d0407baSopenharmony_ci#ifdef CONFIG_SCHED_WALT 27853d0407baSopenharmony_cistatic inline int cluster_first_cpu(struct sched_cluster *cluster) 27863d0407baSopenharmony_ci{ 27873d0407baSopenharmony_ci return cpumask_first(&cluster->cpus); 27883d0407baSopenharmony_ci} 27893d0407baSopenharmony_ci 27903d0407baSopenharmony_ciextern struct list_head cluster_head; 27913d0407baSopenharmony_ciextern struct sched_cluster *sched_cluster[NR_CPUS]; 27923d0407baSopenharmony_ci 27933d0407baSopenharmony_ci#define for_each_sched_cluster(cluster) list_for_each_entry_rcu(cluster, &cluster_head, list) 27943d0407baSopenharmony_ci 27953d0407baSopenharmony_ciextern struct mutex policy_mutex; 27963d0407baSopenharmony_ciextern unsigned int sched_disable_window_stats; 27973d0407baSopenharmony_ciextern unsigned int max_possible_freq; 27983d0407baSopenharmony_ciextern unsigned int min_max_freq; 27993d0407baSopenharmony_ciextern unsigned int max_possible_efficiency; 28003d0407baSopenharmony_ciextern unsigned int min_possible_efficiency; 28013d0407baSopenharmony_ciextern unsigned int max_capacity; 28023d0407baSopenharmony_ciextern unsigned int min_capacity; 28033d0407baSopenharmony_ciextern unsigned int max_load_scale_factor; 28043d0407baSopenharmony_ciextern unsigned int max_possible_capacity; 28053d0407baSopenharmony_ciextern unsigned int min_max_possible_capacity; 28063d0407baSopenharmony_ciextern unsigned int max_power_cost; 28073d0407baSopenharmony_ciextern unsigned int __read_mostly sched_init_task_load_windows; 28083d0407baSopenharmony_ciextern unsigned int sysctl_sched_restrict_cluster_spill; 28093d0407baSopenharmony_ciextern unsigned int sched_pred_alert_load; 28103d0407baSopenharmony_ciextern struct sched_cluster init_cluster; 28113d0407baSopenharmony_ci 28123d0407baSopenharmony_cistatic inline void walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta) 28133d0407baSopenharmony_ci{ 28143d0407baSopenharmony_ci rq->cum_window_demand_scaled += scaled_delta; 28153d0407baSopenharmony_ci if (unlikely((s64)rq->cum_window_demand_scaled < 0)) { 28163d0407baSopenharmony_ci rq->cum_window_demand_scaled = 0; 28173d0407baSopenharmony_ci } 28183d0407baSopenharmony_ci} 28193d0407baSopenharmony_ci 28203d0407baSopenharmony_ci/* Is frequency of two cpus synchronized with each other? */ 28213d0407baSopenharmony_cistatic inline int same_freq_domain(int src_cpu, int dst_cpu) 28223d0407baSopenharmony_ci{ 28233d0407baSopenharmony_ci struct rq *rq = cpu_rq(src_cpu); 28243d0407baSopenharmony_ci 28253d0407baSopenharmony_ci if (src_cpu == dst_cpu) { 28263d0407baSopenharmony_ci return 1; 28273d0407baSopenharmony_ci } 28283d0407baSopenharmony_ci 28293d0407baSopenharmony_ci return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask); 28303d0407baSopenharmony_ci} 28313d0407baSopenharmony_ci 28323d0407baSopenharmony_ciextern void reset_task_stats(struct task_struct *p); 28333d0407baSopenharmony_ci 28343d0407baSopenharmony_ci#define CPU_RESERVED 1 28353d0407baSopenharmony_cistatic inline int is_reserved(int cpu) 28363d0407baSopenharmony_ci{ 28373d0407baSopenharmony_ci struct rq *rq = cpu_rq(cpu); 28383d0407baSopenharmony_ci 28393d0407baSopenharmony_ci return test_bit(CPU_RESERVED, &rq->walt_flags); 28403d0407baSopenharmony_ci} 28413d0407baSopenharmony_ci 28423d0407baSopenharmony_cistatic inline int mark_reserved(int cpu) 28433d0407baSopenharmony_ci{ 28443d0407baSopenharmony_ci struct rq *rq = cpu_rq(cpu); 28453d0407baSopenharmony_ci 28463d0407baSopenharmony_ci return test_and_set_bit(CPU_RESERVED, &rq->walt_flags); 28473d0407baSopenharmony_ci} 28483d0407baSopenharmony_ci 28493d0407baSopenharmony_cistatic inline void clear_reserved(int cpu) 28503d0407baSopenharmony_ci{ 28513d0407baSopenharmony_ci struct rq *rq = cpu_rq(cpu); 28523d0407baSopenharmony_ci 28533d0407baSopenharmony_ci clear_bit(CPU_RESERVED, &rq->walt_flags); 28543d0407baSopenharmony_ci} 28553d0407baSopenharmony_ci 28563d0407baSopenharmony_cistatic inline int cpu_capacity(int cpu) 28573d0407baSopenharmony_ci{ 28583d0407baSopenharmony_ci return cpu_rq(cpu)->cluster->capacity; 28593d0407baSopenharmony_ci} 28603d0407baSopenharmony_ci 28613d0407baSopenharmony_cistatic inline int cpu_max_possible_capacity(int cpu) 28623d0407baSopenharmony_ci{ 28633d0407baSopenharmony_ci return cpu_rq(cpu)->cluster->max_possible_capacity; 28643d0407baSopenharmony_ci} 28653d0407baSopenharmony_ci 28663d0407baSopenharmony_cistatic inline int cpu_load_scale_factor(int cpu) 28673d0407baSopenharmony_ci{ 28683d0407baSopenharmony_ci return cpu_rq(cpu)->cluster->load_scale_factor; 28693d0407baSopenharmony_ci} 28703d0407baSopenharmony_ci 28713d0407baSopenharmony_cistatic inline unsigned int cluster_max_freq(struct sched_cluster *cluster) 28723d0407baSopenharmony_ci{ 28733d0407baSopenharmony_ci /* 28743d0407baSopenharmony_ci * Governor and thermal driver don't know the other party's mitigation 28753d0407baSopenharmony_ci * voting. So struct cluster saves both and return min() for current 28763d0407baSopenharmony_ci * cluster fmax. 28773d0407baSopenharmony_ci */ 28783d0407baSopenharmony_ci return cluster->max_freq; 28793d0407baSopenharmony_ci} 28803d0407baSopenharmony_ci 28813d0407baSopenharmony_ci/* Keep track of max/min capacity possible across CPUs "currently" */ 28823d0407baSopenharmony_cistatic inline void __update_min_max_capacity(void) 28833d0407baSopenharmony_ci{ 28843d0407baSopenharmony_ci int i; 28853d0407baSopenharmony_ci int max_cap = 0, min_cap = INT_MAX; 28863d0407baSopenharmony_ci 28873d0407baSopenharmony_ci for_each_possible_cpu(i) 28883d0407baSopenharmony_ci { 28893d0407baSopenharmony_ci if (!cpu_active(i)) { 28903d0407baSopenharmony_ci continue; 28913d0407baSopenharmony_ci } 28923d0407baSopenharmony_ci 28933d0407baSopenharmony_ci max_cap = max(max_cap, cpu_capacity(i)); 28943d0407baSopenharmony_ci min_cap = min(min_cap, cpu_capacity(i)); 28953d0407baSopenharmony_ci } 28963d0407baSopenharmony_ci 28973d0407baSopenharmony_ci max_capacity = max_cap; 28983d0407baSopenharmony_ci min_capacity = min_cap; 28993d0407baSopenharmony_ci} 29003d0407baSopenharmony_ci 29013d0407baSopenharmony_ci/* 29023d0407baSopenharmony_ci * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so 29033d0407baSopenharmony_ci * that "most" efficient cpu gets a load_scale_factor of 1 29043d0407baSopenharmony_ci */ 29053d0407baSopenharmony_cistatic inline unsigned long load_scale_cpu_efficiency(struct sched_cluster *cluster) 29063d0407baSopenharmony_ci{ 29073d0407baSopenharmony_ci return DIV_ROUND_UP(CPU_FREQ_1K * max_possible_efficiency, cluster->efficiency); 29083d0407baSopenharmony_ci} 29093d0407baSopenharmony_ci 29103d0407baSopenharmony_ci/* 29113d0407baSopenharmony_ci * Return load_scale_factor of a cpu in reference to cpu with best max_freq 29123d0407baSopenharmony_ci * (max_possible_freq), so that one with best max_freq gets a load_scale_factor 29133d0407baSopenharmony_ci * of 1. 29143d0407baSopenharmony_ci */ 29153d0407baSopenharmony_cistatic inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster) 29163d0407baSopenharmony_ci{ 29173d0407baSopenharmony_ci return DIV_ROUND_UP(CPU_FREQ_1K * max_possible_freq, cluster_max_freq(cluster)); 29183d0407baSopenharmony_ci} 29193d0407baSopenharmony_ci 29203d0407baSopenharmony_cistatic inline int compute_load_scale_factor(struct sched_cluster *cluster) 29213d0407baSopenharmony_ci{ 29223d0407baSopenharmony_ci int load_scale = CPU_FREQ_1K; 29233d0407baSopenharmony_ci 29243d0407baSopenharmony_ci /* 29253d0407baSopenharmony_ci * load_scale_factor accounts for the fact that task load 29263d0407baSopenharmony_ci * is in reference to "best" performing cpu. Task's load will need to be 29273d0407baSopenharmony_ci * scaled (up) by a factor to determine suitability to be placed on a 29283d0407baSopenharmony_ci * (little) cpu. 29293d0407baSopenharmony_ci */ 29303d0407baSopenharmony_ci load_scale *= load_scale_cpu_efficiency(cluster); 29313d0407baSopenharmony_ci load_scale >>= 0xa; 29323d0407baSopenharmony_ci 29333d0407baSopenharmony_ci load_scale *= load_scale_cpu_freq(cluster); 29343d0407baSopenharmony_ci load_scale >>= 0xa; 29353d0407baSopenharmony_ci 29363d0407baSopenharmony_ci return load_scale; 29373d0407baSopenharmony_ci} 29383d0407baSopenharmony_ci 29393d0407baSopenharmony_cistatic inline bool is_max_capacity_cpu(int cpu) 29403d0407baSopenharmony_ci{ 29413d0407baSopenharmony_ci return cpu_max_possible_capacity(cpu) == max_possible_capacity; 29423d0407baSopenharmony_ci} 29433d0407baSopenharmony_ci 29443d0407baSopenharmony_cistatic inline bool is_min_capacity_cpu(int cpu) 29453d0407baSopenharmony_ci{ 29463d0407baSopenharmony_ci return cpu_max_possible_capacity(cpu) == min_max_possible_capacity; 29473d0407baSopenharmony_ci} 29483d0407baSopenharmony_ci 29493d0407baSopenharmony_ci/* 29503d0407baSopenharmony_ci * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that 29513d0407baSopenharmony_ci * least efficient cpu gets capacity of 1024 29523d0407baSopenharmony_ci */ 29533d0407baSopenharmony_cistatic unsigned long capacity_scale_cpu_efficiency(struct sched_cluster *cluster) 29543d0407baSopenharmony_ci{ 29553d0407baSopenharmony_ci return (0x400 * cluster->efficiency) / min_possible_efficiency; 29563d0407baSopenharmony_ci} 29573d0407baSopenharmony_ci 29583d0407baSopenharmony_ci/* 29593d0407baSopenharmony_ci * Return 'capacity' of a cpu in reference to cpu with lowest max_freq 29603d0407baSopenharmony_ci * (min_max_freq), such that one with lowest max_freq gets capacity of 1024. 29613d0407baSopenharmony_ci */ 29623d0407baSopenharmony_cistatic unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster) 29633d0407baSopenharmony_ci{ 29643d0407baSopenharmony_ci return (0x400 * cluster_max_freq(cluster)) / min_max_freq; 29653d0407baSopenharmony_ci} 29663d0407baSopenharmony_ci 29673d0407baSopenharmony_cistatic inline int compute_capacity(struct sched_cluster *cluster) 29683d0407baSopenharmony_ci{ 29693d0407baSopenharmony_ci int capacity = 0x400; 29703d0407baSopenharmony_ci 29713d0407baSopenharmony_ci capacity *= capacity_scale_cpu_efficiency(cluster); 29723d0407baSopenharmony_ci capacity >>= 0xa; 29733d0407baSopenharmony_ci 29743d0407baSopenharmony_ci capacity *= capacity_scale_cpu_freq(cluster); 29753d0407baSopenharmony_ci capacity >>= 0xa; 29763d0407baSopenharmony_ci 29773d0407baSopenharmony_ci return capacity; 29783d0407baSopenharmony_ci} 29793d0407baSopenharmony_ci 29803d0407baSopenharmony_cistatic inline unsigned int power_cost(int cpu, u64 demand) 29813d0407baSopenharmony_ci{ 29823d0407baSopenharmony_ci return cpu_max_possible_capacity(cpu); 29833d0407baSopenharmony_ci} 29843d0407baSopenharmony_ci 29853d0407baSopenharmony_cistatic inline unsigned long cpu_util_freq_walt(int cpu) 29863d0407baSopenharmony_ci{ 29873d0407baSopenharmony_ci u64 util; 29883d0407baSopenharmony_ci struct rq *rq = cpu_rq(cpu); 29893d0407baSopenharmony_ci unsigned long capacity = capacity_orig_of(cpu); 29903d0407baSopenharmony_ci 29913d0407baSopenharmony_ci if (unlikely(walt_disabled || !sysctl_sched_use_walt_cpu_util)) { 29923d0407baSopenharmony_ci return cpu_util(cpu); 29933d0407baSopenharmony_ci } 29943d0407baSopenharmony_ci 29953d0407baSopenharmony_ci util = rq->prev_runnable_sum << SCHED_CAPACITY_SHIFT; 29963d0407baSopenharmony_ci util = div_u64(util, sched_ravg_window); 29973d0407baSopenharmony_ci 29983d0407baSopenharmony_ci return (util >= capacity) ? capacity : util; 29993d0407baSopenharmony_ci} 30003d0407baSopenharmony_ci 30013d0407baSopenharmony_cistatic inline bool hmp_capable(void) 30023d0407baSopenharmony_ci{ 30033d0407baSopenharmony_ci return max_possible_capacity != min_max_possible_capacity; 30043d0407baSopenharmony_ci} 30053d0407baSopenharmony_ci#else /* CONFIG_SCHED_WALT */ 30063d0407baSopenharmony_cistatic inline void walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta) 30073d0407baSopenharmony_ci{ 30083d0407baSopenharmony_ci} 30093d0407baSopenharmony_ci 30103d0407baSopenharmony_cistatic inline int same_freq_domain(int src_cpu, int dst_cpu) 30113d0407baSopenharmony_ci{ 30123d0407baSopenharmony_ci return 1; 30133d0407baSopenharmony_ci} 30143d0407baSopenharmony_ci 30153d0407baSopenharmony_cistatic inline int is_reserved(int cpu) 30163d0407baSopenharmony_ci{ 30173d0407baSopenharmony_ci return 0; 30183d0407baSopenharmony_ci} 30193d0407baSopenharmony_ci 30203d0407baSopenharmony_cistatic inline void clear_reserved(int cpu) 30213d0407baSopenharmony_ci{ 30223d0407baSopenharmony_ci} 30233d0407baSopenharmony_ci 30243d0407baSopenharmony_cistatic inline bool hmp_capable(void) 30253d0407baSopenharmony_ci{ 30263d0407baSopenharmony_ci return false; 30273d0407baSopenharmony_ci} 30283d0407baSopenharmony_ci#endif /* CONFIG_SCHED_WALT */ 30293d0407baSopenharmony_ci 30303d0407baSopenharmony_cistruct sched_avg_stats { 30313d0407baSopenharmony_ci int nr; 30323d0407baSopenharmony_ci int nr_misfit; 30333d0407baSopenharmony_ci int nr_max; 30343d0407baSopenharmony_ci int nr_scaled; 30353d0407baSopenharmony_ci}; 30363d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RUNNING_AVG 30373d0407baSopenharmony_ciextern void sched_get_nr_running_avg(struct sched_avg_stats *stats); 30383d0407baSopenharmony_ci#else 30393d0407baSopenharmony_cistatic inline void sched_get_nr_running_avg(struct sched_avg_stats *stats) 30403d0407baSopenharmony_ci{ 30413d0407baSopenharmony_ci} 30423d0407baSopenharmony_ci#endif 30433d0407baSopenharmony_ci 30443d0407baSopenharmony_ci#ifdef CONFIG_CPU_ISOLATION_OPT 30453d0407baSopenharmony_ciextern int group_balance_cpu_not_isolated(struct sched_group *sg); 30463d0407baSopenharmony_ci#else 30473d0407baSopenharmony_cistatic inline int group_balance_cpu_not_isolated(struct sched_group *sg) 30483d0407baSopenharmony_ci{ 30493d0407baSopenharmony_ci return group_balance_cpu(sg); 30503d0407baSopenharmony_ci} 30513d0407baSopenharmony_ci#endif /* CONFIG_CPU_ISOLATION_OPT */ 30523d0407baSopenharmony_ci 30533d0407baSopenharmony_ci#ifdef CONFIG_HOTPLUG_CPU 30543d0407baSopenharmony_ciextern void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf, bool migrate_pinned_tasks); 30553d0407baSopenharmony_ci#endif 30563d0407baSopenharmony_ci#endif