18c2ecf20Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0 */
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * Scheduler internal types and methods:
48c2ecf20Sopenharmony_ci */
58c2ecf20Sopenharmony_ci#include <linux/sched.h>
68c2ecf20Sopenharmony_ci
78c2ecf20Sopenharmony_ci#include <linux/sched/autogroup.h>
88c2ecf20Sopenharmony_ci#include <linux/sched/clock.h>
98c2ecf20Sopenharmony_ci#include <linux/sched/coredump.h>
108c2ecf20Sopenharmony_ci#include <linux/sched/cpufreq.h>
118c2ecf20Sopenharmony_ci#include <linux/sched/cputime.h>
128c2ecf20Sopenharmony_ci#include <linux/sched/deadline.h>
138c2ecf20Sopenharmony_ci#include <linux/sched/debug.h>
148c2ecf20Sopenharmony_ci#include <linux/sched/hotplug.h>
158c2ecf20Sopenharmony_ci#include <linux/sched/idle.h>
168c2ecf20Sopenharmony_ci#include <linux/sched/init.h>
178c2ecf20Sopenharmony_ci#include <linux/sched/isolation.h>
188c2ecf20Sopenharmony_ci#include <linux/sched/jobctl.h>
198c2ecf20Sopenharmony_ci#include <linux/sched/loadavg.h>
208c2ecf20Sopenharmony_ci#include <linux/sched/mm.h>
218c2ecf20Sopenharmony_ci#include <linux/sched/nohz.h>
228c2ecf20Sopenharmony_ci#include <linux/sched/numa_balancing.h>
238c2ecf20Sopenharmony_ci#include <linux/sched/prio.h>
248c2ecf20Sopenharmony_ci#include <linux/sched/rt.h>
258c2ecf20Sopenharmony_ci#include <linux/sched/signal.h>
268c2ecf20Sopenharmony_ci#include <linux/sched/smt.h>
278c2ecf20Sopenharmony_ci#include <linux/sched/stat.h>
288c2ecf20Sopenharmony_ci#include <linux/sched/sysctl.h>
298c2ecf20Sopenharmony_ci#include <linux/sched/task.h>
308c2ecf20Sopenharmony_ci#include <linux/sched/task_stack.h>
318c2ecf20Sopenharmony_ci#include <linux/sched/topology.h>
328c2ecf20Sopenharmony_ci#include <linux/sched/user.h>
338c2ecf20Sopenharmony_ci#include <linux/sched/wake_q.h>
348c2ecf20Sopenharmony_ci#include <linux/sched/xacct.h>
358c2ecf20Sopenharmony_ci
368c2ecf20Sopenharmony_ci#include <uapi/linux/sched/types.h>
378c2ecf20Sopenharmony_ci
388c2ecf20Sopenharmony_ci#include <linux/binfmts.h>
398c2ecf20Sopenharmony_ci#include <linux/blkdev.h>
408c2ecf20Sopenharmony_ci#include <linux/compat.h>
418c2ecf20Sopenharmony_ci#include <linux/context_tracking.h>
428c2ecf20Sopenharmony_ci#include <linux/cpufreq.h>
438c2ecf20Sopenharmony_ci#include <linux/cpuidle.h>
448c2ecf20Sopenharmony_ci#include <linux/cpuset.h>
458c2ecf20Sopenharmony_ci#include <linux/ctype.h>
468c2ecf20Sopenharmony_ci#include <linux/debugfs.h>
478c2ecf20Sopenharmony_ci#include <linux/delayacct.h>
488c2ecf20Sopenharmony_ci#include <linux/energy_model.h>
498c2ecf20Sopenharmony_ci#include <linux/init_task.h>
508c2ecf20Sopenharmony_ci#include <linux/kprobes.h>
518c2ecf20Sopenharmony_ci#include <linux/kthread.h>
528c2ecf20Sopenharmony_ci#include <linux/membarrier.h>
538c2ecf20Sopenharmony_ci#include <linux/migrate.h>
548c2ecf20Sopenharmony_ci#include <linux/mmu_context.h>
558c2ecf20Sopenharmony_ci#include <linux/nmi.h>
568c2ecf20Sopenharmony_ci#include <linux/proc_fs.h>
578c2ecf20Sopenharmony_ci#include <linux/prefetch.h>
588c2ecf20Sopenharmony_ci#include <linux/profile.h>
598c2ecf20Sopenharmony_ci#include <linux/psi.h>
608c2ecf20Sopenharmony_ci#include <linux/rcupdate_wait.h>
618c2ecf20Sopenharmony_ci#include <linux/security.h>
628c2ecf20Sopenharmony_ci#include <linux/stop_machine.h>
638c2ecf20Sopenharmony_ci#include <linux/suspend.h>
648c2ecf20Sopenharmony_ci#include <linux/swait.h>
658c2ecf20Sopenharmony_ci#include <linux/syscalls.h>
668c2ecf20Sopenharmony_ci#include <linux/task_work.h>
678c2ecf20Sopenharmony_ci#include <linux/tsacct_kern.h>
688c2ecf20Sopenharmony_ci
698c2ecf20Sopenharmony_ci#include <asm/tlb.h>
708c2ecf20Sopenharmony_ci#include <asm-generic/vmlinux.lds.h>
718c2ecf20Sopenharmony_ci
728c2ecf20Sopenharmony_ci#ifdef CONFIG_PARAVIRT
738c2ecf20Sopenharmony_ci# include <asm/paravirt.h>
748c2ecf20Sopenharmony_ci#endif
758c2ecf20Sopenharmony_ci
768c2ecf20Sopenharmony_ci#include "cpupri.h"
778c2ecf20Sopenharmony_ci#include "cpudeadline.h"
788c2ecf20Sopenharmony_ci
798c2ecf20Sopenharmony_ci#include <trace/events/sched.h>
808c2ecf20Sopenharmony_ci
818c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_DEBUG
828c2ecf20Sopenharmony_ci# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
838c2ecf20Sopenharmony_ci#else
848c2ecf20Sopenharmony_ci# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
858c2ecf20Sopenharmony_ci#endif
868c2ecf20Sopenharmony_ci
878c2ecf20Sopenharmony_cistruct rq;
888c2ecf20Sopenharmony_cistruct cpuidle_state;
898c2ecf20Sopenharmony_ci
908c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS
918c2ecf20Sopenharmony_ciextern unsigned long uclamp_task_util(struct task_struct *p,
928c2ecf20Sopenharmony_ci					     unsigned long uclamp_min,
938c2ecf20Sopenharmony_ci					     unsigned long uclamp_max);
948c2ecf20Sopenharmony_ci#endif
958c2ecf20Sopenharmony_ci
968c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_WALT
978c2ecf20Sopenharmony_ciextern unsigned int sched_ravg_window;
988c2ecf20Sopenharmony_ciextern unsigned int walt_cpu_util_freq_divisor;
998c2ecf20Sopenharmony_ci
1008c2ecf20Sopenharmony_cistruct walt_sched_stats {
1018c2ecf20Sopenharmony_ci	u64 cumulative_runnable_avg_scaled;
1028c2ecf20Sopenharmony_ci};
1038c2ecf20Sopenharmony_ci
1048c2ecf20Sopenharmony_cistruct load_subtractions {
1058c2ecf20Sopenharmony_ci	u64 window_start;
1068c2ecf20Sopenharmony_ci	u64 subs;
1078c2ecf20Sopenharmony_ci	u64 new_subs;
1088c2ecf20Sopenharmony_ci};
1098c2ecf20Sopenharmony_ci
1108c2ecf20Sopenharmony_ci#define NUM_TRACKED_WINDOWS 2
1118c2ecf20Sopenharmony_ci
1128c2ecf20Sopenharmony_cistruct sched_cluster {
1138c2ecf20Sopenharmony_ci	raw_spinlock_t load_lock;
1148c2ecf20Sopenharmony_ci	struct list_head list;
1158c2ecf20Sopenharmony_ci	struct cpumask cpus;
1168c2ecf20Sopenharmony_ci	int id;
1178c2ecf20Sopenharmony_ci	int max_power_cost;
1188c2ecf20Sopenharmony_ci	int min_power_cost;
1198c2ecf20Sopenharmony_ci	int max_possible_capacity;
1208c2ecf20Sopenharmony_ci	int capacity;
1218c2ecf20Sopenharmony_ci	int efficiency; /* Differentiate cpus with different IPC capability */
1228c2ecf20Sopenharmony_ci	int load_scale_factor;
1238c2ecf20Sopenharmony_ci	unsigned int exec_scale_factor;
1248c2ecf20Sopenharmony_ci	/*
1258c2ecf20Sopenharmony_ci	 * max_freq = user maximum
1268c2ecf20Sopenharmony_ci	 * max_possible_freq = maximum supported by hardware
1278c2ecf20Sopenharmony_ci	 */
1288c2ecf20Sopenharmony_ci	unsigned int cur_freq, max_freq, min_freq;
1298c2ecf20Sopenharmony_ci	unsigned int max_possible_freq;
1308c2ecf20Sopenharmony_ci	bool freq_init_done;
1318c2ecf20Sopenharmony_ci};
1328c2ecf20Sopenharmony_ci
1338c2ecf20Sopenharmony_ciextern unsigned int sched_disable_window_stats;
1348c2ecf20Sopenharmony_ci#endif /* CONFIG_SCHED_WALT */
1358c2ecf20Sopenharmony_ci
1368c2ecf20Sopenharmony_ci
1378c2ecf20Sopenharmony_ci/* task_struct::on_rq states: */
1388c2ecf20Sopenharmony_ci#define TASK_ON_RQ_QUEUED	1
1398c2ecf20Sopenharmony_ci#define TASK_ON_RQ_MIGRATING	2
1408c2ecf20Sopenharmony_ci
1418c2ecf20Sopenharmony_ciextern __read_mostly int scheduler_running;
1428c2ecf20Sopenharmony_ci
1438c2ecf20Sopenharmony_ciextern unsigned long calc_load_update;
1448c2ecf20Sopenharmony_ciextern atomic_long_t calc_load_tasks;
1458c2ecf20Sopenharmony_ci
1468c2ecf20Sopenharmony_ciextern void calc_global_load_tick(struct rq *this_rq);
1478c2ecf20Sopenharmony_ciextern long calc_load_fold_active(struct rq *this_rq, long adjust);
1488c2ecf20Sopenharmony_ci
1498c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
1508c2ecf20Sopenharmony_ciextern void init_sched_groups_capacity(int cpu, struct sched_domain *sd);
1518c2ecf20Sopenharmony_ci#endif
1528c2ecf20Sopenharmony_ci
1538c2ecf20Sopenharmony_ciextern void call_trace_sched_update_nr_running(struct rq *rq, int count);
1548c2ecf20Sopenharmony_ci/*
1558c2ecf20Sopenharmony_ci * Helpers for converting nanosecond timing to jiffy resolution
1568c2ecf20Sopenharmony_ci */
1578c2ecf20Sopenharmony_ci#define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
1588c2ecf20Sopenharmony_ci
1598c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_LATENCY_NICE
1608c2ecf20Sopenharmony_ci/*
1618c2ecf20Sopenharmony_ci * Latency nice is meant to provide scheduler hints about the relative
1628c2ecf20Sopenharmony_ci * latency requirements of a task with respect to other tasks.
1638c2ecf20Sopenharmony_ci * Thus a task with latency_nice == 19 can be hinted as the task with no
1648c2ecf20Sopenharmony_ci * latency requirements, in contrast to the task with latency_nice == -20
1658c2ecf20Sopenharmony_ci * which should be given priority in terms of lower latency.
1668c2ecf20Sopenharmony_ci */
1678c2ecf20Sopenharmony_ci#define MAX_LATENCY_NICE	19
1688c2ecf20Sopenharmony_ci#define MIN_LATENCY_NICE	-20
1698c2ecf20Sopenharmony_ci
1708c2ecf20Sopenharmony_ci#define LATENCY_NICE_WIDTH	\
1718c2ecf20Sopenharmony_ci	(MAX_LATENCY_NICE - MIN_LATENCY_NICE + 1)
1728c2ecf20Sopenharmony_ci
1738c2ecf20Sopenharmony_ci/*
1748c2ecf20Sopenharmony_ci * Default tasks should be treated as a task with latency_nice = 0.
1758c2ecf20Sopenharmony_ci */
1768c2ecf20Sopenharmony_ci#define DEFAULT_LATENCY_NICE	0
1778c2ecf20Sopenharmony_ci#define DEFAULT_LATENCY_PRIO	(DEFAULT_LATENCY_NICE + LATENCY_NICE_WIDTH/2)
1788c2ecf20Sopenharmony_ci
1798c2ecf20Sopenharmony_ci/*
1808c2ecf20Sopenharmony_ci * Convert user-nice values [ -20 ... 0 ... 19 ]
1818c2ecf20Sopenharmony_ci * to static latency [ 0..39 ],
1828c2ecf20Sopenharmony_ci * and back.
1838c2ecf20Sopenharmony_ci */
1848c2ecf20Sopenharmony_ci#define NICE_TO_LATENCY(nice)	((nice) + DEFAULT_LATENCY_PRIO)
1858c2ecf20Sopenharmony_ci#define LATENCY_TO_NICE(prio)	((prio) - DEFAULT_LATENCY_PRIO)
1868c2ecf20Sopenharmony_ci#define NICE_LATENCY_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
1878c2ecf20Sopenharmony_ci#define NICE_LATENCY_WEIGHT_MAX	(1L << NICE_LATENCY_SHIFT)
1888c2ecf20Sopenharmony_ci#endif /* CONFIG_SCHED_LATENCY_NICE */
1898c2ecf20Sopenharmony_ci
1908c2ecf20Sopenharmony_ci/*
1918c2ecf20Sopenharmony_ci * Increase resolution of nice-level calculations for 64-bit architectures.
1928c2ecf20Sopenharmony_ci * The extra resolution improves shares distribution and load balancing of
1938c2ecf20Sopenharmony_ci * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
1948c2ecf20Sopenharmony_ci * hierarchies, especially on larger systems. This is not a user-visible change
1958c2ecf20Sopenharmony_ci * and does not change the user-interface for setting shares/weights.
1968c2ecf20Sopenharmony_ci *
1978c2ecf20Sopenharmony_ci * We increase resolution only if we have enough bits to allow this increased
1988c2ecf20Sopenharmony_ci * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
1998c2ecf20Sopenharmony_ci * are pretty high and the returns do not justify the increased costs.
2008c2ecf20Sopenharmony_ci *
2018c2ecf20Sopenharmony_ci * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
2028c2ecf20Sopenharmony_ci * increase coverage and consistency always enable it on 64-bit platforms.
2038c2ecf20Sopenharmony_ci */
2048c2ecf20Sopenharmony_ci#ifdef CONFIG_64BIT
2058c2ecf20Sopenharmony_ci# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
2068c2ecf20Sopenharmony_ci# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
2078c2ecf20Sopenharmony_ci# define scale_load_down(w) \
2088c2ecf20Sopenharmony_ci({ \
2098c2ecf20Sopenharmony_ci	unsigned long __w = (w); \
2108c2ecf20Sopenharmony_ci	if (__w) \
2118c2ecf20Sopenharmony_ci		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
2128c2ecf20Sopenharmony_ci	__w; \
2138c2ecf20Sopenharmony_ci})
2148c2ecf20Sopenharmony_ci#else
2158c2ecf20Sopenharmony_ci# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
2168c2ecf20Sopenharmony_ci# define scale_load(w)		(w)
2178c2ecf20Sopenharmony_ci# define scale_load_down(w)	(w)
2188c2ecf20Sopenharmony_ci#endif
2198c2ecf20Sopenharmony_ci
2208c2ecf20Sopenharmony_ci/*
2218c2ecf20Sopenharmony_ci * Task weight (visible to users) and its load (invisible to users) have
2228c2ecf20Sopenharmony_ci * independent resolution, but they should be well calibrated. We use
2238c2ecf20Sopenharmony_ci * scale_load() and scale_load_down(w) to convert between them. The
2248c2ecf20Sopenharmony_ci * following must be true:
2258c2ecf20Sopenharmony_ci *
2268c2ecf20Sopenharmony_ci *  scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
2278c2ecf20Sopenharmony_ci *
2288c2ecf20Sopenharmony_ci */
2298c2ecf20Sopenharmony_ci#define NICE_0_LOAD		(1L << NICE_0_LOAD_SHIFT)
2308c2ecf20Sopenharmony_ci
2318c2ecf20Sopenharmony_ci/*
2328c2ecf20Sopenharmony_ci * Single value that decides SCHED_DEADLINE internal math precision.
2338c2ecf20Sopenharmony_ci * 10 -> just above 1us
2348c2ecf20Sopenharmony_ci * 9  -> just above 0.5us
2358c2ecf20Sopenharmony_ci */
2368c2ecf20Sopenharmony_ci#define DL_SCALE		10
2378c2ecf20Sopenharmony_ci
2388c2ecf20Sopenharmony_ci/*
2398c2ecf20Sopenharmony_ci * Single value that denotes runtime == period, ie unlimited time.
2408c2ecf20Sopenharmony_ci */
2418c2ecf20Sopenharmony_ci#define RUNTIME_INF		((u64)~0ULL)
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_cistatic inline int idle_policy(int policy)
2448c2ecf20Sopenharmony_ci{
2458c2ecf20Sopenharmony_ci	return policy == SCHED_IDLE;
2468c2ecf20Sopenharmony_ci}
2478c2ecf20Sopenharmony_cistatic inline int fair_policy(int policy)
2488c2ecf20Sopenharmony_ci{
2498c2ecf20Sopenharmony_ci	return policy == SCHED_NORMAL || policy == SCHED_BATCH;
2508c2ecf20Sopenharmony_ci}
2518c2ecf20Sopenharmony_ci
2528c2ecf20Sopenharmony_cistatic inline int rt_policy(int policy)
2538c2ecf20Sopenharmony_ci{
2548c2ecf20Sopenharmony_ci	return policy == SCHED_FIFO || policy == SCHED_RR;
2558c2ecf20Sopenharmony_ci}
2568c2ecf20Sopenharmony_ci
2578c2ecf20Sopenharmony_cistatic inline int dl_policy(int policy)
2588c2ecf20Sopenharmony_ci{
2598c2ecf20Sopenharmony_ci	return policy == SCHED_DEADLINE;
2608c2ecf20Sopenharmony_ci}
2618c2ecf20Sopenharmony_cistatic inline bool valid_policy(int policy)
2628c2ecf20Sopenharmony_ci{
2638c2ecf20Sopenharmony_ci	return idle_policy(policy) || fair_policy(policy) ||
2648c2ecf20Sopenharmony_ci		rt_policy(policy) || dl_policy(policy);
2658c2ecf20Sopenharmony_ci}
2668c2ecf20Sopenharmony_ci
2678c2ecf20Sopenharmony_cistatic inline int task_has_idle_policy(struct task_struct *p)
2688c2ecf20Sopenharmony_ci{
2698c2ecf20Sopenharmony_ci	return idle_policy(p->policy);
2708c2ecf20Sopenharmony_ci}
2718c2ecf20Sopenharmony_ci
2728c2ecf20Sopenharmony_cistatic inline int task_has_rt_policy(struct task_struct *p)
2738c2ecf20Sopenharmony_ci{
2748c2ecf20Sopenharmony_ci	return rt_policy(p->policy);
2758c2ecf20Sopenharmony_ci}
2768c2ecf20Sopenharmony_ci
2778c2ecf20Sopenharmony_cistatic inline int task_has_dl_policy(struct task_struct *p)
2788c2ecf20Sopenharmony_ci{
2798c2ecf20Sopenharmony_ci	return dl_policy(p->policy);
2808c2ecf20Sopenharmony_ci}
2818c2ecf20Sopenharmony_ci
2828c2ecf20Sopenharmony_ci#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
2838c2ecf20Sopenharmony_ci
2848c2ecf20Sopenharmony_cistatic inline void update_avg(u64 *avg, u64 sample)
2858c2ecf20Sopenharmony_ci{
2868c2ecf20Sopenharmony_ci	s64 diff = sample - *avg;
2878c2ecf20Sopenharmony_ci	*avg += diff / 8;
2888c2ecf20Sopenharmony_ci}
2898c2ecf20Sopenharmony_ci
2908c2ecf20Sopenharmony_ci/*
2918c2ecf20Sopenharmony_ci * Shifting a value by an exponent greater *or equal* to the size of said value
2928c2ecf20Sopenharmony_ci * is UB; cap at size-1.
2938c2ecf20Sopenharmony_ci */
2948c2ecf20Sopenharmony_ci#define shr_bound(val, shift)							\
2958c2ecf20Sopenharmony_ci	(val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
2968c2ecf20Sopenharmony_ci
2978c2ecf20Sopenharmony_ci/*
2988c2ecf20Sopenharmony_ci * !! For sched_setattr_nocheck() (kernel) only !!
2998c2ecf20Sopenharmony_ci *
3008c2ecf20Sopenharmony_ci * This is actually gross. :(
3018c2ecf20Sopenharmony_ci *
3028c2ecf20Sopenharmony_ci * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
3038c2ecf20Sopenharmony_ci * tasks, but still be able to sleep. We need this on platforms that cannot
3048c2ecf20Sopenharmony_ci * atomically change clock frequency. Remove once fast switching will be
3058c2ecf20Sopenharmony_ci * available on such platforms.
3068c2ecf20Sopenharmony_ci *
3078c2ecf20Sopenharmony_ci * SUGOV stands for SchedUtil GOVernor.
3088c2ecf20Sopenharmony_ci */
3098c2ecf20Sopenharmony_ci#define SCHED_FLAG_SUGOV	0x10000000
3108c2ecf20Sopenharmony_ci
3118c2ecf20Sopenharmony_ci#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
3128c2ecf20Sopenharmony_ci
3138c2ecf20Sopenharmony_cistatic inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
3148c2ecf20Sopenharmony_ci{
3158c2ecf20Sopenharmony_ci#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
3168c2ecf20Sopenharmony_ci	return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
3178c2ecf20Sopenharmony_ci#else
3188c2ecf20Sopenharmony_ci	return false;
3198c2ecf20Sopenharmony_ci#endif
3208c2ecf20Sopenharmony_ci}
3218c2ecf20Sopenharmony_ci
3228c2ecf20Sopenharmony_ci/*
3238c2ecf20Sopenharmony_ci * Tells if entity @a should preempt entity @b.
3248c2ecf20Sopenharmony_ci */
3258c2ecf20Sopenharmony_cistatic inline bool
3268c2ecf20Sopenharmony_cidl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
3278c2ecf20Sopenharmony_ci{
3288c2ecf20Sopenharmony_ci	return dl_entity_is_special(a) ||
3298c2ecf20Sopenharmony_ci	       dl_time_before(a->deadline, b->deadline);
3308c2ecf20Sopenharmony_ci}
3318c2ecf20Sopenharmony_ci
3328c2ecf20Sopenharmony_ci/*
3338c2ecf20Sopenharmony_ci * This is the priority-queue data structure of the RT scheduling class:
3348c2ecf20Sopenharmony_ci */
3358c2ecf20Sopenharmony_cistruct rt_prio_array {
3368c2ecf20Sopenharmony_ci	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
3378c2ecf20Sopenharmony_ci	struct list_head queue[MAX_RT_PRIO];
3388c2ecf20Sopenharmony_ci};
3398c2ecf20Sopenharmony_ci
3408c2ecf20Sopenharmony_cistruct rt_bandwidth {
3418c2ecf20Sopenharmony_ci	/* nests inside the rq lock: */
3428c2ecf20Sopenharmony_ci	raw_spinlock_t		rt_runtime_lock;
3438c2ecf20Sopenharmony_ci	ktime_t			rt_period;
3448c2ecf20Sopenharmony_ci	u64			rt_runtime;
3458c2ecf20Sopenharmony_ci	struct hrtimer		rt_period_timer;
3468c2ecf20Sopenharmony_ci	unsigned int		rt_period_active;
3478c2ecf20Sopenharmony_ci};
3488c2ecf20Sopenharmony_ci
3498c2ecf20Sopenharmony_civoid __dl_clear_params(struct task_struct *p);
3508c2ecf20Sopenharmony_ci
3518c2ecf20Sopenharmony_cistruct dl_bandwidth {
3528c2ecf20Sopenharmony_ci	raw_spinlock_t		dl_runtime_lock;
3538c2ecf20Sopenharmony_ci	u64			dl_runtime;
3548c2ecf20Sopenharmony_ci	u64			dl_period;
3558c2ecf20Sopenharmony_ci};
3568c2ecf20Sopenharmony_ci
3578c2ecf20Sopenharmony_cistatic inline int dl_bandwidth_enabled(void)
3588c2ecf20Sopenharmony_ci{
3598c2ecf20Sopenharmony_ci	return sysctl_sched_rt_runtime >= 0;
3608c2ecf20Sopenharmony_ci}
3618c2ecf20Sopenharmony_ci
3628c2ecf20Sopenharmony_ci/*
3638c2ecf20Sopenharmony_ci * To keep the bandwidth of -deadline tasks under control
3648c2ecf20Sopenharmony_ci * we need some place where:
3658c2ecf20Sopenharmony_ci *  - store the maximum -deadline bandwidth of each cpu;
3668c2ecf20Sopenharmony_ci *  - cache the fraction of bandwidth that is currently allocated in
3678c2ecf20Sopenharmony_ci *    each root domain;
3688c2ecf20Sopenharmony_ci *
3698c2ecf20Sopenharmony_ci * This is all done in the data structure below. It is similar to the
3708c2ecf20Sopenharmony_ci * one used for RT-throttling (rt_bandwidth), with the main difference
3718c2ecf20Sopenharmony_ci * that, since here we are only interested in admission control, we
3728c2ecf20Sopenharmony_ci * do not decrease any runtime while the group "executes", neither we
3738c2ecf20Sopenharmony_ci * need a timer to replenish it.
3748c2ecf20Sopenharmony_ci *
3758c2ecf20Sopenharmony_ci * With respect to SMP, bandwidth is given on a per root domain basis,
3768c2ecf20Sopenharmony_ci * meaning that:
3778c2ecf20Sopenharmony_ci *  - bw (< 100%) is the deadline bandwidth of each CPU;
3788c2ecf20Sopenharmony_ci *  - total_bw is the currently allocated bandwidth in each root domain;
3798c2ecf20Sopenharmony_ci */
3808c2ecf20Sopenharmony_cistruct dl_bw {
3818c2ecf20Sopenharmony_ci	raw_spinlock_t		lock;
3828c2ecf20Sopenharmony_ci	u64			bw;
3838c2ecf20Sopenharmony_ci	u64			total_bw;
3848c2ecf20Sopenharmony_ci};
3858c2ecf20Sopenharmony_ci
3868c2ecf20Sopenharmony_cistatic inline void __dl_update(struct dl_bw *dl_b, s64 bw);
3878c2ecf20Sopenharmony_ci
3888c2ecf20Sopenharmony_cistatic inline
3898c2ecf20Sopenharmony_civoid __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
3908c2ecf20Sopenharmony_ci{
3918c2ecf20Sopenharmony_ci	dl_b->total_bw -= tsk_bw;
3928c2ecf20Sopenharmony_ci	__dl_update(dl_b, (s32)tsk_bw / cpus);
3938c2ecf20Sopenharmony_ci}
3948c2ecf20Sopenharmony_ci
3958c2ecf20Sopenharmony_cistatic inline
3968c2ecf20Sopenharmony_civoid __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
3978c2ecf20Sopenharmony_ci{
3988c2ecf20Sopenharmony_ci	dl_b->total_bw += tsk_bw;
3998c2ecf20Sopenharmony_ci	__dl_update(dl_b, -((s32)tsk_bw / cpus));
4008c2ecf20Sopenharmony_ci}
4018c2ecf20Sopenharmony_ci
4028c2ecf20Sopenharmony_cistatic inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap,
4038c2ecf20Sopenharmony_ci				 u64 old_bw, u64 new_bw)
4048c2ecf20Sopenharmony_ci{
4058c2ecf20Sopenharmony_ci	return dl_b->bw != -1 &&
4068c2ecf20Sopenharmony_ci	       cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
4078c2ecf20Sopenharmony_ci}
4088c2ecf20Sopenharmony_ci
4098c2ecf20Sopenharmony_ci/*
4108c2ecf20Sopenharmony_ci * Verify the fitness of task @p to run on @cpu taking into account the
4118c2ecf20Sopenharmony_ci * CPU original capacity and the runtime/deadline ratio of the task.
4128c2ecf20Sopenharmony_ci *
4138c2ecf20Sopenharmony_ci * The function will return true if the CPU original capacity of the
4148c2ecf20Sopenharmony_ci * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the
4158c2ecf20Sopenharmony_ci * task and false otherwise.
4168c2ecf20Sopenharmony_ci */
4178c2ecf20Sopenharmony_cistatic inline bool dl_task_fits_capacity(struct task_struct *p, int cpu)
4188c2ecf20Sopenharmony_ci{
4198c2ecf20Sopenharmony_ci	unsigned long cap = arch_scale_cpu_capacity(cpu);
4208c2ecf20Sopenharmony_ci
4218c2ecf20Sopenharmony_ci	return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime;
4228c2ecf20Sopenharmony_ci}
4238c2ecf20Sopenharmony_ci
4248c2ecf20Sopenharmony_ciextern void init_dl_bw(struct dl_bw *dl_b);
4258c2ecf20Sopenharmony_ciextern int  sched_dl_global_validate(void);
4268c2ecf20Sopenharmony_ciextern void sched_dl_do_global(void);
4278c2ecf20Sopenharmony_ciextern int  sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
4288c2ecf20Sopenharmony_ciextern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
4298c2ecf20Sopenharmony_ciextern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
4308c2ecf20Sopenharmony_ciextern bool __checkparam_dl(const struct sched_attr *attr);
4318c2ecf20Sopenharmony_ciextern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
4328c2ecf20Sopenharmony_ciextern int  dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
4338c2ecf20Sopenharmony_ciextern int  dl_bw_check_overflow(int cpu);
4348c2ecf20Sopenharmony_ci
4358c2ecf20Sopenharmony_ci#ifdef CONFIG_CGROUP_SCHED
4368c2ecf20Sopenharmony_ci
4378c2ecf20Sopenharmony_ci#include <linux/cgroup.h>
4388c2ecf20Sopenharmony_ci#include <linux/psi.h>
4398c2ecf20Sopenharmony_ci
4408c2ecf20Sopenharmony_cistruct cfs_rq;
4418c2ecf20Sopenharmony_cistruct rt_rq;
4428c2ecf20Sopenharmony_ci
4438c2ecf20Sopenharmony_ciextern struct list_head task_groups;
4448c2ecf20Sopenharmony_ci
4458c2ecf20Sopenharmony_cistruct cfs_bandwidth {
4468c2ecf20Sopenharmony_ci#ifdef CONFIG_CFS_BANDWIDTH
4478c2ecf20Sopenharmony_ci	raw_spinlock_t		lock;
4488c2ecf20Sopenharmony_ci	ktime_t			period;
4498c2ecf20Sopenharmony_ci	u64			quota;
4508c2ecf20Sopenharmony_ci	u64			runtime;
4518c2ecf20Sopenharmony_ci	s64			hierarchical_quota;
4528c2ecf20Sopenharmony_ci
4538c2ecf20Sopenharmony_ci	u8			idle;
4548c2ecf20Sopenharmony_ci	u8			period_active;
4558c2ecf20Sopenharmony_ci	u8			slack_started;
4568c2ecf20Sopenharmony_ci	struct hrtimer		period_timer;
4578c2ecf20Sopenharmony_ci	struct hrtimer		slack_timer;
4588c2ecf20Sopenharmony_ci	struct list_head	throttled_cfs_rq;
4598c2ecf20Sopenharmony_ci
4608c2ecf20Sopenharmony_ci	/* Statistics: */
4618c2ecf20Sopenharmony_ci	int			nr_periods;
4628c2ecf20Sopenharmony_ci	int			nr_throttled;
4638c2ecf20Sopenharmony_ci	u64			throttled_time;
4648c2ecf20Sopenharmony_ci#endif
4658c2ecf20Sopenharmony_ci};
4668c2ecf20Sopenharmony_ci
4678c2ecf20Sopenharmony_ci/* Task group related information */
4688c2ecf20Sopenharmony_cistruct task_group {
4698c2ecf20Sopenharmony_ci	struct cgroup_subsys_state css;
4708c2ecf20Sopenharmony_ci
4718c2ecf20Sopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED
4728c2ecf20Sopenharmony_ci	/* schedulable entities of this group on each CPU */
4738c2ecf20Sopenharmony_ci	struct sched_entity	**se;
4748c2ecf20Sopenharmony_ci	/* runqueue "owned" by this group on each CPU */
4758c2ecf20Sopenharmony_ci	struct cfs_rq		**cfs_rq;
4768c2ecf20Sopenharmony_ci	unsigned long		shares;
4778c2ecf20Sopenharmony_ci
4788c2ecf20Sopenharmony_ci#ifdef	CONFIG_SMP
4798c2ecf20Sopenharmony_ci	/*
4808c2ecf20Sopenharmony_ci	 * load_avg can be heavily contended at clock tick time, so put
4818c2ecf20Sopenharmony_ci	 * it in its own cacheline separated from the fields above which
4828c2ecf20Sopenharmony_ci	 * will also be accessed at each tick.
4838c2ecf20Sopenharmony_ci	 */
4848c2ecf20Sopenharmony_ci	atomic_long_t		load_avg ____cacheline_aligned;
4858c2ecf20Sopenharmony_ci#endif
4868c2ecf20Sopenharmony_ci#endif
4878c2ecf20Sopenharmony_ci
4888c2ecf20Sopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
4898c2ecf20Sopenharmony_ci	struct sched_rt_entity	**rt_se;
4908c2ecf20Sopenharmony_ci	struct rt_rq		**rt_rq;
4918c2ecf20Sopenharmony_ci
4928c2ecf20Sopenharmony_ci	struct rt_bandwidth	rt_bandwidth;
4938c2ecf20Sopenharmony_ci#endif
4948c2ecf20Sopenharmony_ci
4958c2ecf20Sopenharmony_ci	struct rcu_head		rcu;
4968c2ecf20Sopenharmony_ci	struct list_head	list;
4978c2ecf20Sopenharmony_ci
4988c2ecf20Sopenharmony_ci	struct task_group	*parent;
4998c2ecf20Sopenharmony_ci	struct list_head	siblings;
5008c2ecf20Sopenharmony_ci	struct list_head	children;
5018c2ecf20Sopenharmony_ci
5028c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_AUTOGROUP
5038c2ecf20Sopenharmony_ci	struct autogroup	*autogroup;
5048c2ecf20Sopenharmony_ci#endif
5058c2ecf20Sopenharmony_ci
5068c2ecf20Sopenharmony_ci	struct cfs_bandwidth	cfs_bandwidth;
5078c2ecf20Sopenharmony_ci
5088c2ecf20Sopenharmony_ci#ifdef CONFIG_UCLAMP_TASK_GROUP
5098c2ecf20Sopenharmony_ci	/* The two decimal precision [%] value requested from user-space */
5108c2ecf20Sopenharmony_ci	unsigned int		uclamp_pct[UCLAMP_CNT];
5118c2ecf20Sopenharmony_ci	/* Clamp values requested for a task group */
5128c2ecf20Sopenharmony_ci	struct uclamp_se	uclamp_req[UCLAMP_CNT];
5138c2ecf20Sopenharmony_ci	/* Effective clamp values used for a task group */
5148c2ecf20Sopenharmony_ci	struct uclamp_se	uclamp[UCLAMP_CNT];
5158c2ecf20Sopenharmony_ci#endif
5168c2ecf20Sopenharmony_ci
5178c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RTG_CGROUP
5188c2ecf20Sopenharmony_ci	/*
5198c2ecf20Sopenharmony_ci	 * Controls whether tasks of this cgroup should be colocated with each
5208c2ecf20Sopenharmony_ci	 * other and tasks of other cgroups that have the same flag turned on.
5218c2ecf20Sopenharmony_ci	 */
5228c2ecf20Sopenharmony_ci	bool colocate;
5238c2ecf20Sopenharmony_ci
5248c2ecf20Sopenharmony_ci	/* Controls whether further updates are allowed to the colocate flag */
5258c2ecf20Sopenharmony_ci	bool colocate_update_disabled;
5268c2ecf20Sopenharmony_ci#endif
5278c2ecf20Sopenharmony_ci};
5288c2ecf20Sopenharmony_ci
5298c2ecf20Sopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED
5308c2ecf20Sopenharmony_ci#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
5318c2ecf20Sopenharmony_ci
5328c2ecf20Sopenharmony_ci/*
5338c2ecf20Sopenharmony_ci * A weight of 0 or 1 can cause arithmetics problems.
5348c2ecf20Sopenharmony_ci * A weight of a cfs_rq is the sum of weights of which entities
5358c2ecf20Sopenharmony_ci * are queued on this cfs_rq, so a weight of a entity should not be
5368c2ecf20Sopenharmony_ci * too large, so as the shares value of a task group.
5378c2ecf20Sopenharmony_ci * (The default weight is 1024 - so there's no practical
5388c2ecf20Sopenharmony_ci *  limitation from this.)
5398c2ecf20Sopenharmony_ci */
5408c2ecf20Sopenharmony_ci#define MIN_SHARES		(1UL <<  1)
5418c2ecf20Sopenharmony_ci#define MAX_SHARES		(1UL << 18)
5428c2ecf20Sopenharmony_ci#endif
5438c2ecf20Sopenharmony_ci
5448c2ecf20Sopenharmony_citypedef int (*tg_visitor)(struct task_group *, void *);
5458c2ecf20Sopenharmony_ci
5468c2ecf20Sopenharmony_ciextern int walk_tg_tree_from(struct task_group *from,
5478c2ecf20Sopenharmony_ci			     tg_visitor down, tg_visitor up, void *data);
5488c2ecf20Sopenharmony_ci
5498c2ecf20Sopenharmony_ci/*
5508c2ecf20Sopenharmony_ci * Iterate the full tree, calling @down when first entering a node and @up when
5518c2ecf20Sopenharmony_ci * leaving it for the final time.
5528c2ecf20Sopenharmony_ci *
5538c2ecf20Sopenharmony_ci * Caller must hold rcu_lock or sufficient equivalent.
5548c2ecf20Sopenharmony_ci */
5558c2ecf20Sopenharmony_cistatic inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
5568c2ecf20Sopenharmony_ci{
5578c2ecf20Sopenharmony_ci	return walk_tg_tree_from(&root_task_group, down, up, data);
5588c2ecf20Sopenharmony_ci}
5598c2ecf20Sopenharmony_ci
5608c2ecf20Sopenharmony_ciextern int tg_nop(struct task_group *tg, void *data);
5618c2ecf20Sopenharmony_ci
5628c2ecf20Sopenharmony_ciextern void free_fair_sched_group(struct task_group *tg);
5638c2ecf20Sopenharmony_ciextern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
5648c2ecf20Sopenharmony_ciextern void online_fair_sched_group(struct task_group *tg);
5658c2ecf20Sopenharmony_ciextern void unregister_fair_sched_group(struct task_group *tg);
5668c2ecf20Sopenharmony_ciextern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
5678c2ecf20Sopenharmony_ci			struct sched_entity *se, int cpu,
5688c2ecf20Sopenharmony_ci			struct sched_entity *parent);
5698c2ecf20Sopenharmony_ciextern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
5708c2ecf20Sopenharmony_ci
5718c2ecf20Sopenharmony_ciextern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
5728c2ecf20Sopenharmony_ciextern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
5738c2ecf20Sopenharmony_ciextern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
5748c2ecf20Sopenharmony_ci
5758c2ecf20Sopenharmony_ciextern void free_rt_sched_group(struct task_group *tg);
5768c2ecf20Sopenharmony_ciextern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
5778c2ecf20Sopenharmony_ciextern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
5788c2ecf20Sopenharmony_ci		struct sched_rt_entity *rt_se, int cpu,
5798c2ecf20Sopenharmony_ci		struct sched_rt_entity *parent);
5808c2ecf20Sopenharmony_ciextern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
5818c2ecf20Sopenharmony_ciextern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
5828c2ecf20Sopenharmony_ciextern long sched_group_rt_runtime(struct task_group *tg);
5838c2ecf20Sopenharmony_ciextern long sched_group_rt_period(struct task_group *tg);
5848c2ecf20Sopenharmony_ciextern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
5858c2ecf20Sopenharmony_ci
5868c2ecf20Sopenharmony_ciextern struct task_group *sched_create_group(struct task_group *parent);
5878c2ecf20Sopenharmony_ciextern void sched_online_group(struct task_group *tg,
5888c2ecf20Sopenharmony_ci			       struct task_group *parent);
5898c2ecf20Sopenharmony_ciextern void sched_destroy_group(struct task_group *tg);
5908c2ecf20Sopenharmony_ciextern void sched_offline_group(struct task_group *tg);
5918c2ecf20Sopenharmony_ci
5928c2ecf20Sopenharmony_ciextern void sched_move_task(struct task_struct *tsk);
5938c2ecf20Sopenharmony_ci
5948c2ecf20Sopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED
5958c2ecf20Sopenharmony_ciextern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
5968c2ecf20Sopenharmony_ci
5978c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
5988c2ecf20Sopenharmony_ciextern void set_task_rq_fair(struct sched_entity *se,
5998c2ecf20Sopenharmony_ci			     struct cfs_rq *prev, struct cfs_rq *next);
6008c2ecf20Sopenharmony_ci#else /* !CONFIG_SMP */
6018c2ecf20Sopenharmony_cistatic inline void set_task_rq_fair(struct sched_entity *se,
6028c2ecf20Sopenharmony_ci			     struct cfs_rq *prev, struct cfs_rq *next) { }
6038c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
6048c2ecf20Sopenharmony_ci#endif /* CONFIG_FAIR_GROUP_SCHED */
6058c2ecf20Sopenharmony_ci
6068c2ecf20Sopenharmony_ci#else /* CONFIG_CGROUP_SCHED */
6078c2ecf20Sopenharmony_ci
6088c2ecf20Sopenharmony_cistruct cfs_bandwidth { };
6098c2ecf20Sopenharmony_ci
6108c2ecf20Sopenharmony_ci#endif	/* CONFIG_CGROUP_SCHED */
6118c2ecf20Sopenharmony_ci
6128c2ecf20Sopenharmony_ci/* CFS-related fields in a runqueue */
6138c2ecf20Sopenharmony_cistruct cfs_rq {
6148c2ecf20Sopenharmony_ci	struct load_weight	load;
6158c2ecf20Sopenharmony_ci	unsigned int		nr_running;
6168c2ecf20Sopenharmony_ci	unsigned int		h_nr_running;      /* SCHED_{NORMAL,BATCH,IDLE} */
6178c2ecf20Sopenharmony_ci	unsigned int		idle_h_nr_running; /* SCHED_IDLE */
6188c2ecf20Sopenharmony_ci
6198c2ecf20Sopenharmony_ci	u64			exec_clock;
6208c2ecf20Sopenharmony_ci	u64			min_vruntime;
6218c2ecf20Sopenharmony_ci#ifndef CONFIG_64BIT
6228c2ecf20Sopenharmony_ci	u64			min_vruntime_copy;
6238c2ecf20Sopenharmony_ci#endif
6248c2ecf20Sopenharmony_ci
6258c2ecf20Sopenharmony_ci	struct rb_root_cached	tasks_timeline;
6268c2ecf20Sopenharmony_ci
6278c2ecf20Sopenharmony_ci	/*
6288c2ecf20Sopenharmony_ci	 * 'curr' points to currently running entity on this cfs_rq.
6298c2ecf20Sopenharmony_ci	 * It is set to NULL otherwise (i.e when none are currently running).
6308c2ecf20Sopenharmony_ci	 */
6318c2ecf20Sopenharmony_ci	struct sched_entity	*curr;
6328c2ecf20Sopenharmony_ci	struct sched_entity	*next;
6338c2ecf20Sopenharmony_ci	struct sched_entity	*last;
6348c2ecf20Sopenharmony_ci	struct sched_entity	*skip;
6358c2ecf20Sopenharmony_ci
6368c2ecf20Sopenharmony_ci#ifdef	CONFIG_SCHED_DEBUG
6378c2ecf20Sopenharmony_ci	unsigned int		nr_spread_over;
6388c2ecf20Sopenharmony_ci#endif
6398c2ecf20Sopenharmony_ci
6408c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
6418c2ecf20Sopenharmony_ci	/*
6428c2ecf20Sopenharmony_ci	 * CFS load tracking
6438c2ecf20Sopenharmony_ci	 */
6448c2ecf20Sopenharmony_ci	struct sched_avg	avg;
6458c2ecf20Sopenharmony_ci#ifndef CONFIG_64BIT
6468c2ecf20Sopenharmony_ci	u64			load_last_update_time_copy;
6478c2ecf20Sopenharmony_ci#endif
6488c2ecf20Sopenharmony_ci	struct {
6498c2ecf20Sopenharmony_ci		raw_spinlock_t	lock ____cacheline_aligned;
6508c2ecf20Sopenharmony_ci		int		nr;
6518c2ecf20Sopenharmony_ci		unsigned long	load_avg;
6528c2ecf20Sopenharmony_ci		unsigned long	util_avg;
6538c2ecf20Sopenharmony_ci		unsigned long	runnable_avg;
6548c2ecf20Sopenharmony_ci	} removed;
6558c2ecf20Sopenharmony_ci
6568c2ecf20Sopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED
6578c2ecf20Sopenharmony_ci	unsigned long		tg_load_avg_contrib;
6588c2ecf20Sopenharmony_ci	long			propagate;
6598c2ecf20Sopenharmony_ci	long			prop_runnable_sum;
6608c2ecf20Sopenharmony_ci
6618c2ecf20Sopenharmony_ci	/*
6628c2ecf20Sopenharmony_ci	 *   h_load = weight * f(tg)
6638c2ecf20Sopenharmony_ci	 *
6648c2ecf20Sopenharmony_ci	 * Where f(tg) is the recursive weight fraction assigned to
6658c2ecf20Sopenharmony_ci	 * this group.
6668c2ecf20Sopenharmony_ci	 */
6678c2ecf20Sopenharmony_ci	unsigned long		h_load;
6688c2ecf20Sopenharmony_ci	u64			last_h_load_update;
6698c2ecf20Sopenharmony_ci	struct sched_entity	*h_load_next;
6708c2ecf20Sopenharmony_ci#endif /* CONFIG_FAIR_GROUP_SCHED */
6718c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
6728c2ecf20Sopenharmony_ci
6738c2ecf20Sopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED
6748c2ecf20Sopenharmony_ci	struct rq		*rq;	/* CPU runqueue to which this cfs_rq is attached */
6758c2ecf20Sopenharmony_ci
6768c2ecf20Sopenharmony_ci	/*
6778c2ecf20Sopenharmony_ci	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
6788c2ecf20Sopenharmony_ci	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
6798c2ecf20Sopenharmony_ci	 * (like users, containers etc.)
6808c2ecf20Sopenharmony_ci	 *
6818c2ecf20Sopenharmony_ci	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU.
6828c2ecf20Sopenharmony_ci	 * This list is used during load balance.
6838c2ecf20Sopenharmony_ci	 */
6848c2ecf20Sopenharmony_ci	int			on_list;
6858c2ecf20Sopenharmony_ci	struct list_head	leaf_cfs_rq_list;
6868c2ecf20Sopenharmony_ci	struct task_group	*tg;	/* group that "owns" this runqueue */
6878c2ecf20Sopenharmony_ci
6888c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_WALT
6898c2ecf20Sopenharmony_ci	struct walt_sched_stats walt_stats;
6908c2ecf20Sopenharmony_ci#endif
6918c2ecf20Sopenharmony_ci
6928c2ecf20Sopenharmony_ci#ifdef CONFIG_CFS_BANDWIDTH
6938c2ecf20Sopenharmony_ci	int			runtime_enabled;
6948c2ecf20Sopenharmony_ci	s64			runtime_remaining;
6958c2ecf20Sopenharmony_ci
6968c2ecf20Sopenharmony_ci	u64			throttled_clock;
6978c2ecf20Sopenharmony_ci	u64			throttled_clock_pelt;
6988c2ecf20Sopenharmony_ci	u64			throttled_clock_pelt_time;
6998c2ecf20Sopenharmony_ci	int			throttled;
7008c2ecf20Sopenharmony_ci	int			throttle_count;
7018c2ecf20Sopenharmony_ci	struct list_head	throttled_list;
7028c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_WALT
7038c2ecf20Sopenharmony_ci	u64 cumulative_runnable_avg;
7048c2ecf20Sopenharmony_ci#endif
7058c2ecf20Sopenharmony_ci#endif /* CONFIG_CFS_BANDWIDTH */
7068c2ecf20Sopenharmony_ci#endif /* CONFIG_FAIR_GROUP_SCHED */
7078c2ecf20Sopenharmony_ci};
7088c2ecf20Sopenharmony_ci
7098c2ecf20Sopenharmony_cistatic inline int rt_bandwidth_enabled(void)
7108c2ecf20Sopenharmony_ci{
7118c2ecf20Sopenharmony_ci	return sysctl_sched_rt_runtime >= 0;
7128c2ecf20Sopenharmony_ci}
7138c2ecf20Sopenharmony_ci
7148c2ecf20Sopenharmony_ci/* RT IPI pull logic requires IRQ_WORK */
7158c2ecf20Sopenharmony_ci#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
7168c2ecf20Sopenharmony_ci# define HAVE_RT_PUSH_IPI
7178c2ecf20Sopenharmony_ci#endif
7188c2ecf20Sopenharmony_ci
7198c2ecf20Sopenharmony_ci/* Real-Time classes' related field in a runqueue: */
7208c2ecf20Sopenharmony_cistruct rt_rq {
7218c2ecf20Sopenharmony_ci	struct rt_prio_array	active;
7228c2ecf20Sopenharmony_ci	unsigned int		rt_nr_running;
7238c2ecf20Sopenharmony_ci	unsigned int		rr_nr_running;
7248c2ecf20Sopenharmony_ci#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
7258c2ecf20Sopenharmony_ci	struct {
7268c2ecf20Sopenharmony_ci		int		curr; /* highest queued rt task prio */
7278c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
7288c2ecf20Sopenharmony_ci		int		next; /* next highest */
7298c2ecf20Sopenharmony_ci#endif
7308c2ecf20Sopenharmony_ci	} highest_prio;
7318c2ecf20Sopenharmony_ci#endif
7328c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
7338c2ecf20Sopenharmony_ci	unsigned long		rt_nr_migratory;
7348c2ecf20Sopenharmony_ci	unsigned long		rt_nr_total;
7358c2ecf20Sopenharmony_ci	int			overloaded;
7368c2ecf20Sopenharmony_ci	struct plist_head	pushable_tasks;
7378c2ecf20Sopenharmony_ci
7388c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
7398c2ecf20Sopenharmony_ci	int			rt_queued;
7408c2ecf20Sopenharmony_ci
7418c2ecf20Sopenharmony_ci	int			rt_throttled;
7428c2ecf20Sopenharmony_ci	u64			rt_time;
7438c2ecf20Sopenharmony_ci	u64			rt_runtime;
7448c2ecf20Sopenharmony_ci	/* Nests inside the rq lock: */
7458c2ecf20Sopenharmony_ci	raw_spinlock_t		rt_runtime_lock;
7468c2ecf20Sopenharmony_ci
7478c2ecf20Sopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
7488c2ecf20Sopenharmony_ci	unsigned long		rt_nr_boosted;
7498c2ecf20Sopenharmony_ci
7508c2ecf20Sopenharmony_ci	struct rq		*rq;
7518c2ecf20Sopenharmony_ci	struct task_group	*tg;
7528c2ecf20Sopenharmony_ci#endif
7538c2ecf20Sopenharmony_ci};
7548c2ecf20Sopenharmony_ci
7558c2ecf20Sopenharmony_cistatic inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
7568c2ecf20Sopenharmony_ci{
7578c2ecf20Sopenharmony_ci	return rt_rq->rt_queued && rt_rq->rt_nr_running;
7588c2ecf20Sopenharmony_ci}
7598c2ecf20Sopenharmony_ci
7608c2ecf20Sopenharmony_ci/* Deadline class' related fields in a runqueue */
7618c2ecf20Sopenharmony_cistruct dl_rq {
7628c2ecf20Sopenharmony_ci	/* runqueue is an rbtree, ordered by deadline */
7638c2ecf20Sopenharmony_ci	struct rb_root_cached	root;
7648c2ecf20Sopenharmony_ci
7658c2ecf20Sopenharmony_ci	unsigned long		dl_nr_running;
7668c2ecf20Sopenharmony_ci
7678c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
7688c2ecf20Sopenharmony_ci	/*
7698c2ecf20Sopenharmony_ci	 * Deadline values of the currently executing and the
7708c2ecf20Sopenharmony_ci	 * earliest ready task on this rq. Caching these facilitates
7718c2ecf20Sopenharmony_ci	 * the decision whether or not a ready but not running task
7728c2ecf20Sopenharmony_ci	 * should migrate somewhere else.
7738c2ecf20Sopenharmony_ci	 */
7748c2ecf20Sopenharmony_ci	struct {
7758c2ecf20Sopenharmony_ci		u64		curr;
7768c2ecf20Sopenharmony_ci		u64		next;
7778c2ecf20Sopenharmony_ci	} earliest_dl;
7788c2ecf20Sopenharmony_ci
7798c2ecf20Sopenharmony_ci	unsigned long		dl_nr_migratory;
7808c2ecf20Sopenharmony_ci	int			overloaded;
7818c2ecf20Sopenharmony_ci
7828c2ecf20Sopenharmony_ci	/*
7838c2ecf20Sopenharmony_ci	 * Tasks on this rq that can be pushed away. They are kept in
7848c2ecf20Sopenharmony_ci	 * an rb-tree, ordered by tasks' deadlines, with caching
7858c2ecf20Sopenharmony_ci	 * of the leftmost (earliest deadline) element.
7868c2ecf20Sopenharmony_ci	 */
7878c2ecf20Sopenharmony_ci	struct rb_root_cached	pushable_dl_tasks_root;
7888c2ecf20Sopenharmony_ci#else
7898c2ecf20Sopenharmony_ci	struct dl_bw		dl_bw;
7908c2ecf20Sopenharmony_ci#endif
7918c2ecf20Sopenharmony_ci	/*
7928c2ecf20Sopenharmony_ci	 * "Active utilization" for this runqueue: increased when a
7938c2ecf20Sopenharmony_ci	 * task wakes up (becomes TASK_RUNNING) and decreased when a
7948c2ecf20Sopenharmony_ci	 * task blocks
7958c2ecf20Sopenharmony_ci	 */
7968c2ecf20Sopenharmony_ci	u64			running_bw;
7978c2ecf20Sopenharmony_ci
7988c2ecf20Sopenharmony_ci	/*
7998c2ecf20Sopenharmony_ci	 * Utilization of the tasks "assigned" to this runqueue (including
8008c2ecf20Sopenharmony_ci	 * the tasks that are in runqueue and the tasks that executed on this
8018c2ecf20Sopenharmony_ci	 * CPU and blocked). Increased when a task moves to this runqueue, and
8028c2ecf20Sopenharmony_ci	 * decreased when the task moves away (migrates, changes scheduling
8038c2ecf20Sopenharmony_ci	 * policy, or terminates).
8048c2ecf20Sopenharmony_ci	 * This is needed to compute the "inactive utilization" for the
8058c2ecf20Sopenharmony_ci	 * runqueue (inactive utilization = this_bw - running_bw).
8068c2ecf20Sopenharmony_ci	 */
8078c2ecf20Sopenharmony_ci	u64			this_bw;
8088c2ecf20Sopenharmony_ci	u64			extra_bw;
8098c2ecf20Sopenharmony_ci
8108c2ecf20Sopenharmony_ci	/*
8118c2ecf20Sopenharmony_ci	 * Inverse of the fraction of CPU utilization that can be reclaimed
8128c2ecf20Sopenharmony_ci	 * by the GRUB algorithm.
8138c2ecf20Sopenharmony_ci	 */
8148c2ecf20Sopenharmony_ci	u64			bw_ratio;
8158c2ecf20Sopenharmony_ci};
8168c2ecf20Sopenharmony_ci
8178c2ecf20Sopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED
8188c2ecf20Sopenharmony_ci/* An entity is a task if it doesn't "own" a runqueue */
8198c2ecf20Sopenharmony_ci#define entity_is_task(se)	(!se->my_q)
8208c2ecf20Sopenharmony_ci
8218c2ecf20Sopenharmony_cistatic inline void se_update_runnable(struct sched_entity *se)
8228c2ecf20Sopenharmony_ci{
8238c2ecf20Sopenharmony_ci	if (!entity_is_task(se))
8248c2ecf20Sopenharmony_ci		se->runnable_weight = se->my_q->h_nr_running;
8258c2ecf20Sopenharmony_ci}
8268c2ecf20Sopenharmony_ci
8278c2ecf20Sopenharmony_cistatic inline long se_runnable(struct sched_entity *se)
8288c2ecf20Sopenharmony_ci{
8298c2ecf20Sopenharmony_ci	if (entity_is_task(se))
8308c2ecf20Sopenharmony_ci		return !!se->on_rq;
8318c2ecf20Sopenharmony_ci	else
8328c2ecf20Sopenharmony_ci		return se->runnable_weight;
8338c2ecf20Sopenharmony_ci}
8348c2ecf20Sopenharmony_ci
8358c2ecf20Sopenharmony_ci#else
8368c2ecf20Sopenharmony_ci#define entity_is_task(se)	1
8378c2ecf20Sopenharmony_ci
8388c2ecf20Sopenharmony_cistatic inline void se_update_runnable(struct sched_entity *se) {}
8398c2ecf20Sopenharmony_ci
8408c2ecf20Sopenharmony_cistatic inline long se_runnable(struct sched_entity *se)
8418c2ecf20Sopenharmony_ci{
8428c2ecf20Sopenharmony_ci	return !!se->on_rq;
8438c2ecf20Sopenharmony_ci}
8448c2ecf20Sopenharmony_ci#endif
8458c2ecf20Sopenharmony_ci
8468c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
8478c2ecf20Sopenharmony_ci/*
8488c2ecf20Sopenharmony_ci * XXX we want to get rid of these helpers and use the full load resolution.
8498c2ecf20Sopenharmony_ci */
8508c2ecf20Sopenharmony_cistatic inline long se_weight(struct sched_entity *se)
8518c2ecf20Sopenharmony_ci{
8528c2ecf20Sopenharmony_ci	return scale_load_down(se->load.weight);
8538c2ecf20Sopenharmony_ci}
8548c2ecf20Sopenharmony_ci
8558c2ecf20Sopenharmony_ci
8568c2ecf20Sopenharmony_cistatic inline bool sched_asym_prefer(int a, int b)
8578c2ecf20Sopenharmony_ci{
8588c2ecf20Sopenharmony_ci	return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
8598c2ecf20Sopenharmony_ci}
8608c2ecf20Sopenharmony_ci
8618c2ecf20Sopenharmony_cistruct perf_domain {
8628c2ecf20Sopenharmony_ci	struct em_perf_domain *em_pd;
8638c2ecf20Sopenharmony_ci	struct perf_domain *next;
8648c2ecf20Sopenharmony_ci	struct rcu_head rcu;
8658c2ecf20Sopenharmony_ci};
8668c2ecf20Sopenharmony_ci
8678c2ecf20Sopenharmony_ci/* Scheduling group status flags */
8688c2ecf20Sopenharmony_ci#define SG_OVERLOAD		0x1 /* More than one runnable task on a CPU. */
8698c2ecf20Sopenharmony_ci#define SG_OVERUTILIZED		0x2 /* One or more CPUs are over-utilized. */
8708c2ecf20Sopenharmony_ci
8718c2ecf20Sopenharmony_ci/*
8728c2ecf20Sopenharmony_ci * We add the notion of a root-domain which will be used to define per-domain
8738c2ecf20Sopenharmony_ci * variables. Each exclusive cpuset essentially defines an island domain by
8748c2ecf20Sopenharmony_ci * fully partitioning the member CPUs from any other cpuset. Whenever a new
8758c2ecf20Sopenharmony_ci * exclusive cpuset is created, we also create and attach a new root-domain
8768c2ecf20Sopenharmony_ci * object.
8778c2ecf20Sopenharmony_ci *
8788c2ecf20Sopenharmony_ci */
8798c2ecf20Sopenharmony_cistruct root_domain {
8808c2ecf20Sopenharmony_ci	atomic_t		refcount;
8818c2ecf20Sopenharmony_ci	atomic_t		rto_count;
8828c2ecf20Sopenharmony_ci	struct rcu_head		rcu;
8838c2ecf20Sopenharmony_ci	cpumask_var_t		span;
8848c2ecf20Sopenharmony_ci	cpumask_var_t		online;
8858c2ecf20Sopenharmony_ci
8868c2ecf20Sopenharmony_ci	/*
8878c2ecf20Sopenharmony_ci	 * Indicate pullable load on at least one CPU, e.g:
8888c2ecf20Sopenharmony_ci	 * - More than one runnable task
8898c2ecf20Sopenharmony_ci	 * - Running task is misfit
8908c2ecf20Sopenharmony_ci	 */
8918c2ecf20Sopenharmony_ci	int			overload;
8928c2ecf20Sopenharmony_ci
8938c2ecf20Sopenharmony_ci	/* Indicate one or more cpus over-utilized (tipping point) */
8948c2ecf20Sopenharmony_ci	int			overutilized;
8958c2ecf20Sopenharmony_ci
8968c2ecf20Sopenharmony_ci	/*
8978c2ecf20Sopenharmony_ci	 * The bit corresponding to a CPU gets set here if such CPU has more
8988c2ecf20Sopenharmony_ci	 * than one runnable -deadline task (as it is below for RT tasks).
8998c2ecf20Sopenharmony_ci	 */
9008c2ecf20Sopenharmony_ci	cpumask_var_t		dlo_mask;
9018c2ecf20Sopenharmony_ci	atomic_t		dlo_count;
9028c2ecf20Sopenharmony_ci	struct dl_bw		dl_bw;
9038c2ecf20Sopenharmony_ci	struct cpudl		cpudl;
9048c2ecf20Sopenharmony_ci
9058c2ecf20Sopenharmony_ci#ifdef HAVE_RT_PUSH_IPI
9068c2ecf20Sopenharmony_ci	/*
9078c2ecf20Sopenharmony_ci	 * For IPI pull requests, loop across the rto_mask.
9088c2ecf20Sopenharmony_ci	 */
9098c2ecf20Sopenharmony_ci	struct irq_work		rto_push_work;
9108c2ecf20Sopenharmony_ci	raw_spinlock_t		rto_lock;
9118c2ecf20Sopenharmony_ci	/* These are only updated and read within rto_lock */
9128c2ecf20Sopenharmony_ci	int			rto_loop;
9138c2ecf20Sopenharmony_ci	int			rto_cpu;
9148c2ecf20Sopenharmony_ci	/* These atomics are updated outside of a lock */
9158c2ecf20Sopenharmony_ci	atomic_t		rto_loop_next;
9168c2ecf20Sopenharmony_ci	atomic_t		rto_loop_start;
9178c2ecf20Sopenharmony_ci#endif
9188c2ecf20Sopenharmony_ci	/*
9198c2ecf20Sopenharmony_ci	 * The "RT overload" flag: it gets set if a CPU has more than
9208c2ecf20Sopenharmony_ci	 * one runnable RT task.
9218c2ecf20Sopenharmony_ci	 */
9228c2ecf20Sopenharmony_ci	cpumask_var_t		rto_mask;
9238c2ecf20Sopenharmony_ci	struct cpupri		cpupri;
9248c2ecf20Sopenharmony_ci
9258c2ecf20Sopenharmony_ci	unsigned long		max_cpu_capacity;
9268c2ecf20Sopenharmony_ci
9278c2ecf20Sopenharmony_ci	/*
9288c2ecf20Sopenharmony_ci	 * NULL-terminated list of performance domains intersecting with the
9298c2ecf20Sopenharmony_ci	 * CPUs of the rd. Protected by RCU.
9308c2ecf20Sopenharmony_ci	 */
9318c2ecf20Sopenharmony_ci	struct perf_domain __rcu *pd;
9328c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS
9338c2ecf20Sopenharmony_ci	int max_cap_orig_cpu;
9348c2ecf20Sopenharmony_ci#endif
9358c2ecf20Sopenharmony_ci};
9368c2ecf20Sopenharmony_ci
9378c2ecf20Sopenharmony_ciextern void init_defrootdomain(void);
9388c2ecf20Sopenharmony_ciextern int sched_init_domains(const struct cpumask *cpu_map);
9398c2ecf20Sopenharmony_ciextern void rq_attach_root(struct rq *rq, struct root_domain *rd);
9408c2ecf20Sopenharmony_ciextern void sched_get_rd(struct root_domain *rd);
9418c2ecf20Sopenharmony_ciextern void sched_put_rd(struct root_domain *rd);
9428c2ecf20Sopenharmony_ci
9438c2ecf20Sopenharmony_ci#ifdef HAVE_RT_PUSH_IPI
9448c2ecf20Sopenharmony_ciextern void rto_push_irq_work_func(struct irq_work *work);
9458c2ecf20Sopenharmony_ci#endif
9468c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
9478c2ecf20Sopenharmony_ci
9488c2ecf20Sopenharmony_ci#ifdef CONFIG_UCLAMP_TASK
9498c2ecf20Sopenharmony_ci/*
9508c2ecf20Sopenharmony_ci * struct uclamp_bucket - Utilization clamp bucket
9518c2ecf20Sopenharmony_ci * @value: utilization clamp value for tasks on this clamp bucket
9528c2ecf20Sopenharmony_ci * @tasks: number of RUNNABLE tasks on this clamp bucket
9538c2ecf20Sopenharmony_ci *
9548c2ecf20Sopenharmony_ci * Keep track of how many tasks are RUNNABLE for a given utilization
9558c2ecf20Sopenharmony_ci * clamp value.
9568c2ecf20Sopenharmony_ci */
9578c2ecf20Sopenharmony_cistruct uclamp_bucket {
9588c2ecf20Sopenharmony_ci	unsigned long value : bits_per(SCHED_CAPACITY_SCALE);
9598c2ecf20Sopenharmony_ci	unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
9608c2ecf20Sopenharmony_ci};
9618c2ecf20Sopenharmony_ci
9628c2ecf20Sopenharmony_ci/*
9638c2ecf20Sopenharmony_ci * struct uclamp_rq - rq's utilization clamp
9648c2ecf20Sopenharmony_ci * @value: currently active clamp values for a rq
9658c2ecf20Sopenharmony_ci * @bucket: utilization clamp buckets affecting a rq
9668c2ecf20Sopenharmony_ci *
9678c2ecf20Sopenharmony_ci * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values.
9688c2ecf20Sopenharmony_ci * A clamp value is affecting a rq when there is at least one task RUNNABLE
9698c2ecf20Sopenharmony_ci * (or actually running) with that value.
9708c2ecf20Sopenharmony_ci *
9718c2ecf20Sopenharmony_ci * There are up to UCLAMP_CNT possible different clamp values, currently there
9728c2ecf20Sopenharmony_ci * are only two: minimum utilization and maximum utilization.
9738c2ecf20Sopenharmony_ci *
9748c2ecf20Sopenharmony_ci * All utilization clamping values are MAX aggregated, since:
9758c2ecf20Sopenharmony_ci * - for util_min: we want to run the CPU at least at the max of the minimum
9768c2ecf20Sopenharmony_ci *   utilization required by its currently RUNNABLE tasks.
9778c2ecf20Sopenharmony_ci * - for util_max: we want to allow the CPU to run up to the max of the
9788c2ecf20Sopenharmony_ci *   maximum utilization allowed by its currently RUNNABLE tasks.
9798c2ecf20Sopenharmony_ci *
9808c2ecf20Sopenharmony_ci * Since on each system we expect only a limited number of different
9818c2ecf20Sopenharmony_ci * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track
9828c2ecf20Sopenharmony_ci * the metrics required to compute all the per-rq utilization clamp values.
9838c2ecf20Sopenharmony_ci */
9848c2ecf20Sopenharmony_cistruct uclamp_rq {
9858c2ecf20Sopenharmony_ci	unsigned int value;
9868c2ecf20Sopenharmony_ci	struct uclamp_bucket bucket[UCLAMP_BUCKETS];
9878c2ecf20Sopenharmony_ci};
9888c2ecf20Sopenharmony_ci
9898c2ecf20Sopenharmony_ciDECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
9908c2ecf20Sopenharmony_ci#endif /* CONFIG_UCLAMP_TASK */
9918c2ecf20Sopenharmony_ci
9928c2ecf20Sopenharmony_ci/*
9938c2ecf20Sopenharmony_ci * This is the main, per-CPU runqueue data structure.
9948c2ecf20Sopenharmony_ci *
9958c2ecf20Sopenharmony_ci * Locking rule: those places that want to lock multiple runqueues
9968c2ecf20Sopenharmony_ci * (such as the load balancing or the thread migration code), lock
9978c2ecf20Sopenharmony_ci * acquire operations must be ordered by ascending &runqueue.
9988c2ecf20Sopenharmony_ci */
9998c2ecf20Sopenharmony_cistruct rq {
10008c2ecf20Sopenharmony_ci	/* runqueue lock: */
10018c2ecf20Sopenharmony_ci	raw_spinlock_t		lock;
10028c2ecf20Sopenharmony_ci
10038c2ecf20Sopenharmony_ci	/*
10048c2ecf20Sopenharmony_ci	 * nr_running and cpu_load should be in the same cacheline because
10058c2ecf20Sopenharmony_ci	 * remote CPUs use both these fields when doing load calculation.
10068c2ecf20Sopenharmony_ci	 */
10078c2ecf20Sopenharmony_ci	unsigned int		nr_running;
10088c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA_BALANCING
10098c2ecf20Sopenharmony_ci	unsigned int		nr_numa_running;
10108c2ecf20Sopenharmony_ci	unsigned int		nr_preferred_running;
10118c2ecf20Sopenharmony_ci	unsigned int		numa_migrate_on;
10128c2ecf20Sopenharmony_ci#endif
10138c2ecf20Sopenharmony_ci#ifdef CONFIG_NO_HZ_COMMON
10148c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
10158c2ecf20Sopenharmony_ci	unsigned long		last_blocked_load_update_tick;
10168c2ecf20Sopenharmony_ci	unsigned int		has_blocked_load;
10178c2ecf20Sopenharmony_ci	call_single_data_t	nohz_csd;
10188c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
10198c2ecf20Sopenharmony_ci	unsigned int		nohz_tick_stopped;
10208c2ecf20Sopenharmony_ci	atomic_t		nohz_flags;
10218c2ecf20Sopenharmony_ci#endif /* CONFIG_NO_HZ_COMMON */
10228c2ecf20Sopenharmony_ci
10238c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
10248c2ecf20Sopenharmony_ci	unsigned int		ttwu_pending;
10258c2ecf20Sopenharmony_ci#endif
10268c2ecf20Sopenharmony_ci	u64			nr_switches;
10278c2ecf20Sopenharmony_ci
10288c2ecf20Sopenharmony_ci#ifdef CONFIG_UCLAMP_TASK
10298c2ecf20Sopenharmony_ci	/* Utilization clamp values based on CPU's RUNNABLE tasks */
10308c2ecf20Sopenharmony_ci	struct uclamp_rq	uclamp[UCLAMP_CNT] ____cacheline_aligned;
10318c2ecf20Sopenharmony_ci	unsigned int		uclamp_flags;
10328c2ecf20Sopenharmony_ci#define UCLAMP_FLAG_IDLE 0x01
10338c2ecf20Sopenharmony_ci#endif
10348c2ecf20Sopenharmony_ci
10358c2ecf20Sopenharmony_ci	struct cfs_rq		cfs;
10368c2ecf20Sopenharmony_ci	struct rt_rq		rt;
10378c2ecf20Sopenharmony_ci	struct dl_rq		dl;
10388c2ecf20Sopenharmony_ci
10398c2ecf20Sopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED
10408c2ecf20Sopenharmony_ci	/* list of leaf cfs_rq on this CPU: */
10418c2ecf20Sopenharmony_ci	struct list_head	leaf_cfs_rq_list;
10428c2ecf20Sopenharmony_ci	struct list_head	*tmp_alone_branch;
10438c2ecf20Sopenharmony_ci#endif /* CONFIG_FAIR_GROUP_SCHED */
10448c2ecf20Sopenharmony_ci
10458c2ecf20Sopenharmony_ci	/*
10468c2ecf20Sopenharmony_ci	 * This is part of a global counter where only the total sum
10478c2ecf20Sopenharmony_ci	 * over all CPUs matters. A task can increase this counter on
10488c2ecf20Sopenharmony_ci	 * one CPU and if it got migrated afterwards it may decrease
10498c2ecf20Sopenharmony_ci	 * it on another CPU. Always updated under the runqueue lock:
10508c2ecf20Sopenharmony_ci	 */
10518c2ecf20Sopenharmony_ci	unsigned long		nr_uninterruptible;
10528c2ecf20Sopenharmony_ci
10538c2ecf20Sopenharmony_ci	struct task_struct __rcu	*curr;
10548c2ecf20Sopenharmony_ci	struct task_struct	*idle;
10558c2ecf20Sopenharmony_ci	struct task_struct	*stop;
10568c2ecf20Sopenharmony_ci	unsigned long		next_balance;
10578c2ecf20Sopenharmony_ci	struct mm_struct	*prev_mm;
10588c2ecf20Sopenharmony_ci
10598c2ecf20Sopenharmony_ci	unsigned int		clock_update_flags;
10608c2ecf20Sopenharmony_ci	u64			clock;
10618c2ecf20Sopenharmony_ci	/* Ensure that all clocks are in the same cache line */
10628c2ecf20Sopenharmony_ci	u64			clock_task ____cacheline_aligned;
10638c2ecf20Sopenharmony_ci	u64			clock_pelt;
10648c2ecf20Sopenharmony_ci	unsigned long		lost_idle_time;
10658c2ecf20Sopenharmony_ci
10668c2ecf20Sopenharmony_ci	atomic_t		nr_iowait;
10678c2ecf20Sopenharmony_ci
10688c2ecf20Sopenharmony_ci#ifdef CONFIG_MEMBARRIER
10698c2ecf20Sopenharmony_ci	int membarrier_state;
10708c2ecf20Sopenharmony_ci#endif
10718c2ecf20Sopenharmony_ci
10728c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
10738c2ecf20Sopenharmony_ci	struct root_domain		*rd;
10748c2ecf20Sopenharmony_ci	struct sched_domain __rcu	*sd;
10758c2ecf20Sopenharmony_ci
10768c2ecf20Sopenharmony_ci	unsigned long		cpu_capacity;
10778c2ecf20Sopenharmony_ci	unsigned long		cpu_capacity_orig;
10788c2ecf20Sopenharmony_ci	unsigned long		cpu_capacity_inverted;
10798c2ecf20Sopenharmony_ci
10808c2ecf20Sopenharmony_ci	struct callback_head	*balance_callback;
10818c2ecf20Sopenharmony_ci
10828c2ecf20Sopenharmony_ci	unsigned char		nohz_idle_balance;
10838c2ecf20Sopenharmony_ci	unsigned char		idle_balance;
10848c2ecf20Sopenharmony_ci
10858c2ecf20Sopenharmony_ci	unsigned long		misfit_task_load;
10868c2ecf20Sopenharmony_ci
10878c2ecf20Sopenharmony_ci	/* For active balancing */
10888c2ecf20Sopenharmony_ci	int			active_balance;
10898c2ecf20Sopenharmony_ci	int			push_cpu;
10908c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_EAS
10918c2ecf20Sopenharmony_ci	struct task_struct	*push_task;
10928c2ecf20Sopenharmony_ci#endif
10938c2ecf20Sopenharmony_ci	struct cpu_stop_work	active_balance_work;
10948c2ecf20Sopenharmony_ci
10958c2ecf20Sopenharmony_ci	/* For rt active balancing */
10968c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RT_ACTIVE_LB
10978c2ecf20Sopenharmony_ci	int rt_active_balance;
10988c2ecf20Sopenharmony_ci	struct task_struct *rt_push_task;
10998c2ecf20Sopenharmony_ci	struct cpu_stop_work rt_active_balance_work;
11008c2ecf20Sopenharmony_ci#endif
11018c2ecf20Sopenharmony_ci
11028c2ecf20Sopenharmony_ci	/* CPU of this runqueue: */
11038c2ecf20Sopenharmony_ci	int			cpu;
11048c2ecf20Sopenharmony_ci	int			online;
11058c2ecf20Sopenharmony_ci
11068c2ecf20Sopenharmony_ci	struct list_head cfs_tasks;
11078c2ecf20Sopenharmony_ci
11088c2ecf20Sopenharmony_ci	struct sched_avg	avg_rt;
11098c2ecf20Sopenharmony_ci	struct sched_avg	avg_dl;
11108c2ecf20Sopenharmony_ci#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
11118c2ecf20Sopenharmony_ci	struct sched_avg	avg_irq;
11128c2ecf20Sopenharmony_ci#endif
11138c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_THERMAL_PRESSURE
11148c2ecf20Sopenharmony_ci	struct sched_avg	avg_thermal;
11158c2ecf20Sopenharmony_ci#endif
11168c2ecf20Sopenharmony_ci	u64			idle_stamp;
11178c2ecf20Sopenharmony_ci	u64			avg_idle;
11188c2ecf20Sopenharmony_ci
11198c2ecf20Sopenharmony_ci	/* This is used to determine avg_idle's max value */
11208c2ecf20Sopenharmony_ci	u64			max_idle_balance_cost;
11218c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
11228c2ecf20Sopenharmony_ci
11238c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_WALT
11248c2ecf20Sopenharmony_ci	struct sched_cluster *cluster;
11258c2ecf20Sopenharmony_ci	struct cpumask freq_domain_cpumask;
11268c2ecf20Sopenharmony_ci	struct walt_sched_stats walt_stats;
11278c2ecf20Sopenharmony_ci
11288c2ecf20Sopenharmony_ci	u64 window_start;
11298c2ecf20Sopenharmony_ci	unsigned long walt_flags;
11308c2ecf20Sopenharmony_ci
11318c2ecf20Sopenharmony_ci	u64 cur_irqload;
11328c2ecf20Sopenharmony_ci	u64 avg_irqload;
11338c2ecf20Sopenharmony_ci	u64 irqload_ts;
11348c2ecf20Sopenharmony_ci	u64 curr_runnable_sum;
11358c2ecf20Sopenharmony_ci	u64 prev_runnable_sum;
11368c2ecf20Sopenharmony_ci	u64 nt_curr_runnable_sum;
11378c2ecf20Sopenharmony_ci	u64 nt_prev_runnable_sum;
11388c2ecf20Sopenharmony_ci	u64 cum_window_demand_scaled;
11398c2ecf20Sopenharmony_ci	struct load_subtractions load_subs[NUM_TRACKED_WINDOWS];
11408c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RTG
11418c2ecf20Sopenharmony_ci	struct group_cpu_time grp_time;
11428c2ecf20Sopenharmony_ci#endif
11438c2ecf20Sopenharmony_ci#endif /* CONFIG_SCHED_WALT */
11448c2ecf20Sopenharmony_ci
11458c2ecf20Sopenharmony_ci#ifdef CONFIG_IRQ_TIME_ACCOUNTING
11468c2ecf20Sopenharmony_ci	u64			prev_irq_time;
11478c2ecf20Sopenharmony_ci#endif
11488c2ecf20Sopenharmony_ci#ifdef CONFIG_PARAVIRT
11498c2ecf20Sopenharmony_ci	u64			prev_steal_time;
11508c2ecf20Sopenharmony_ci#endif
11518c2ecf20Sopenharmony_ci#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
11528c2ecf20Sopenharmony_ci	u64			prev_steal_time_rq;
11538c2ecf20Sopenharmony_ci#endif
11548c2ecf20Sopenharmony_ci
11558c2ecf20Sopenharmony_ci	/* calc_load related fields */
11568c2ecf20Sopenharmony_ci	unsigned long		calc_load_update;
11578c2ecf20Sopenharmony_ci	long			calc_load_active;
11588c2ecf20Sopenharmony_ci
11598c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_HRTICK
11608c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
11618c2ecf20Sopenharmony_ci	call_single_data_t	hrtick_csd;
11628c2ecf20Sopenharmony_ci#endif
11638c2ecf20Sopenharmony_ci	struct hrtimer		hrtick_timer;
11648c2ecf20Sopenharmony_ci	ktime_t 		hrtick_time;
11658c2ecf20Sopenharmony_ci#endif
11668c2ecf20Sopenharmony_ci
11678c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHEDSTATS
11688c2ecf20Sopenharmony_ci	/* latency stats */
11698c2ecf20Sopenharmony_ci	struct sched_info	rq_sched_info;
11708c2ecf20Sopenharmony_ci	unsigned long long	rq_cpu_time;
11718c2ecf20Sopenharmony_ci	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
11728c2ecf20Sopenharmony_ci
11738c2ecf20Sopenharmony_ci	/* sys_sched_yield() stats */
11748c2ecf20Sopenharmony_ci	unsigned int		yld_count;
11758c2ecf20Sopenharmony_ci
11768c2ecf20Sopenharmony_ci	/* schedule() stats */
11778c2ecf20Sopenharmony_ci	unsigned int		sched_count;
11788c2ecf20Sopenharmony_ci	unsigned int		sched_goidle;
11798c2ecf20Sopenharmony_ci
11808c2ecf20Sopenharmony_ci	/* try_to_wake_up() stats */
11818c2ecf20Sopenharmony_ci	unsigned int		ttwu_count;
11828c2ecf20Sopenharmony_ci	unsigned int		ttwu_local;
11838c2ecf20Sopenharmony_ci#endif
11848c2ecf20Sopenharmony_ci
11858c2ecf20Sopenharmony_ci#ifdef CONFIG_CPU_IDLE
11868c2ecf20Sopenharmony_ci	/* Must be inspected within a rcu lock section */
11878c2ecf20Sopenharmony_ci	struct cpuidle_state	*idle_state;
11888c2ecf20Sopenharmony_ci#endif
11898c2ecf20Sopenharmony_ci};
11908c2ecf20Sopenharmony_ci
11918c2ecf20Sopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED
11928c2ecf20Sopenharmony_ci
11938c2ecf20Sopenharmony_ci/* CPU runqueue to which this cfs_rq is attached */
11948c2ecf20Sopenharmony_cistatic inline struct rq *rq_of(struct cfs_rq *cfs_rq)
11958c2ecf20Sopenharmony_ci{
11968c2ecf20Sopenharmony_ci	return cfs_rq->rq;
11978c2ecf20Sopenharmony_ci}
11988c2ecf20Sopenharmony_ci
11998c2ecf20Sopenharmony_ci#else
12008c2ecf20Sopenharmony_ci
12018c2ecf20Sopenharmony_cistatic inline struct rq *rq_of(struct cfs_rq *cfs_rq)
12028c2ecf20Sopenharmony_ci{
12038c2ecf20Sopenharmony_ci	return container_of(cfs_rq, struct rq, cfs);
12048c2ecf20Sopenharmony_ci}
12058c2ecf20Sopenharmony_ci#endif
12068c2ecf20Sopenharmony_ci
12078c2ecf20Sopenharmony_cistatic inline int cpu_of(struct rq *rq)
12088c2ecf20Sopenharmony_ci{
12098c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
12108c2ecf20Sopenharmony_ci	return rq->cpu;
12118c2ecf20Sopenharmony_ci#else
12128c2ecf20Sopenharmony_ci	return 0;
12138c2ecf20Sopenharmony_ci#endif
12148c2ecf20Sopenharmony_ci}
12158c2ecf20Sopenharmony_ci
12168c2ecf20Sopenharmony_ci
12178c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_SMT
12188c2ecf20Sopenharmony_ciextern void __update_idle_core(struct rq *rq);
12198c2ecf20Sopenharmony_ci
12208c2ecf20Sopenharmony_cistatic inline void update_idle_core(struct rq *rq)
12218c2ecf20Sopenharmony_ci{
12228c2ecf20Sopenharmony_ci	if (static_branch_unlikely(&sched_smt_present))
12238c2ecf20Sopenharmony_ci		__update_idle_core(rq);
12248c2ecf20Sopenharmony_ci}
12258c2ecf20Sopenharmony_ci
12268c2ecf20Sopenharmony_ci#else
12278c2ecf20Sopenharmony_cistatic inline void update_idle_core(struct rq *rq) { }
12288c2ecf20Sopenharmony_ci#endif
12298c2ecf20Sopenharmony_ci
12308c2ecf20Sopenharmony_ciDECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
12318c2ecf20Sopenharmony_ci
12328c2ecf20Sopenharmony_ci#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
12338c2ecf20Sopenharmony_ci#define this_rq()		this_cpu_ptr(&runqueues)
12348c2ecf20Sopenharmony_ci#define task_rq(p)		cpu_rq(task_cpu(p))
12358c2ecf20Sopenharmony_ci#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
12368c2ecf20Sopenharmony_ci#define raw_rq()		raw_cpu_ptr(&runqueues)
12378c2ecf20Sopenharmony_ci
12388c2ecf20Sopenharmony_ciextern void update_rq_clock(struct rq *rq);
12398c2ecf20Sopenharmony_ci
12408c2ecf20Sopenharmony_cistatic inline u64 __rq_clock_broken(struct rq *rq)
12418c2ecf20Sopenharmony_ci{
12428c2ecf20Sopenharmony_ci	return READ_ONCE(rq->clock);
12438c2ecf20Sopenharmony_ci}
12448c2ecf20Sopenharmony_ci
12458c2ecf20Sopenharmony_ci/*
12468c2ecf20Sopenharmony_ci * rq::clock_update_flags bits
12478c2ecf20Sopenharmony_ci *
12488c2ecf20Sopenharmony_ci * %RQCF_REQ_SKIP - will request skipping of clock update on the next
12498c2ecf20Sopenharmony_ci *  call to __schedule(). This is an optimisation to avoid
12508c2ecf20Sopenharmony_ci *  neighbouring rq clock updates.
12518c2ecf20Sopenharmony_ci *
12528c2ecf20Sopenharmony_ci * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
12538c2ecf20Sopenharmony_ci *  in effect and calls to update_rq_clock() are being ignored.
12548c2ecf20Sopenharmony_ci *
12558c2ecf20Sopenharmony_ci * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
12568c2ecf20Sopenharmony_ci *  made to update_rq_clock() since the last time rq::lock was pinned.
12578c2ecf20Sopenharmony_ci *
12588c2ecf20Sopenharmony_ci * If inside of __schedule(), clock_update_flags will have been
12598c2ecf20Sopenharmony_ci * shifted left (a left shift is a cheap operation for the fast path
12608c2ecf20Sopenharmony_ci * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use,
12618c2ecf20Sopenharmony_ci *
12628c2ecf20Sopenharmony_ci *	if (rq-clock_update_flags >= RQCF_UPDATED)
12638c2ecf20Sopenharmony_ci *
12648c2ecf20Sopenharmony_ci * to check if %RQCF_UPADTED is set. It'll never be shifted more than
12658c2ecf20Sopenharmony_ci * one position though, because the next rq_unpin_lock() will shift it
12668c2ecf20Sopenharmony_ci * back.
12678c2ecf20Sopenharmony_ci */
12688c2ecf20Sopenharmony_ci#define RQCF_REQ_SKIP		0x01
12698c2ecf20Sopenharmony_ci#define RQCF_ACT_SKIP		0x02
12708c2ecf20Sopenharmony_ci#define RQCF_UPDATED		0x04
12718c2ecf20Sopenharmony_ci
12728c2ecf20Sopenharmony_cistatic inline void assert_clock_updated(struct rq *rq)
12738c2ecf20Sopenharmony_ci{
12748c2ecf20Sopenharmony_ci	/*
12758c2ecf20Sopenharmony_ci	 * The only reason for not seeing a clock update since the
12768c2ecf20Sopenharmony_ci	 * last rq_pin_lock() is if we're currently skipping updates.
12778c2ecf20Sopenharmony_ci	 */
12788c2ecf20Sopenharmony_ci	SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
12798c2ecf20Sopenharmony_ci}
12808c2ecf20Sopenharmony_ci
12818c2ecf20Sopenharmony_cistatic inline u64 rq_clock(struct rq *rq)
12828c2ecf20Sopenharmony_ci{
12838c2ecf20Sopenharmony_ci	lockdep_assert_held(&rq->lock);
12848c2ecf20Sopenharmony_ci	assert_clock_updated(rq);
12858c2ecf20Sopenharmony_ci
12868c2ecf20Sopenharmony_ci	return rq->clock;
12878c2ecf20Sopenharmony_ci}
12888c2ecf20Sopenharmony_ci
12898c2ecf20Sopenharmony_cistatic inline u64 rq_clock_task(struct rq *rq)
12908c2ecf20Sopenharmony_ci{
12918c2ecf20Sopenharmony_ci	lockdep_assert_held(&rq->lock);
12928c2ecf20Sopenharmony_ci	assert_clock_updated(rq);
12938c2ecf20Sopenharmony_ci
12948c2ecf20Sopenharmony_ci	return rq->clock_task;
12958c2ecf20Sopenharmony_ci}
12968c2ecf20Sopenharmony_ci
12978c2ecf20Sopenharmony_ci/**
12988c2ecf20Sopenharmony_ci * By default the decay is the default pelt decay period.
12998c2ecf20Sopenharmony_ci * The decay shift can change the decay period in
13008c2ecf20Sopenharmony_ci * multiples of 32.
13018c2ecf20Sopenharmony_ci *  Decay shift		Decay period(ms)
13028c2ecf20Sopenharmony_ci *	0			32
13038c2ecf20Sopenharmony_ci *	1			64
13048c2ecf20Sopenharmony_ci *	2			128
13058c2ecf20Sopenharmony_ci *	3			256
13068c2ecf20Sopenharmony_ci *	4			512
13078c2ecf20Sopenharmony_ci */
13088c2ecf20Sopenharmony_ciextern int sched_thermal_decay_shift;
13098c2ecf20Sopenharmony_ci
13108c2ecf20Sopenharmony_cistatic inline u64 rq_clock_thermal(struct rq *rq)
13118c2ecf20Sopenharmony_ci{
13128c2ecf20Sopenharmony_ci	return rq_clock_task(rq) >> sched_thermal_decay_shift;
13138c2ecf20Sopenharmony_ci}
13148c2ecf20Sopenharmony_ci
13158c2ecf20Sopenharmony_cistatic inline void rq_clock_skip_update(struct rq *rq)
13168c2ecf20Sopenharmony_ci{
13178c2ecf20Sopenharmony_ci	lockdep_assert_held(&rq->lock);
13188c2ecf20Sopenharmony_ci	rq->clock_update_flags |= RQCF_REQ_SKIP;
13198c2ecf20Sopenharmony_ci}
13208c2ecf20Sopenharmony_ci
13218c2ecf20Sopenharmony_ci/*
13228c2ecf20Sopenharmony_ci * See rt task throttling, which is the only time a skip
13238c2ecf20Sopenharmony_ci * request is cancelled.
13248c2ecf20Sopenharmony_ci */
13258c2ecf20Sopenharmony_cistatic inline void rq_clock_cancel_skipupdate(struct rq *rq)
13268c2ecf20Sopenharmony_ci{
13278c2ecf20Sopenharmony_ci	lockdep_assert_held(&rq->lock);
13288c2ecf20Sopenharmony_ci	rq->clock_update_flags &= ~RQCF_REQ_SKIP;
13298c2ecf20Sopenharmony_ci}
13308c2ecf20Sopenharmony_ci
13318c2ecf20Sopenharmony_cistruct rq_flags {
13328c2ecf20Sopenharmony_ci	unsigned long flags;
13338c2ecf20Sopenharmony_ci	struct pin_cookie cookie;
13348c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_DEBUG
13358c2ecf20Sopenharmony_ci	/*
13368c2ecf20Sopenharmony_ci	 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
13378c2ecf20Sopenharmony_ci	 * current pin context is stashed here in case it needs to be
13388c2ecf20Sopenharmony_ci	 * restored in rq_repin_lock().
13398c2ecf20Sopenharmony_ci	 */
13408c2ecf20Sopenharmony_ci	unsigned int clock_update_flags;
13418c2ecf20Sopenharmony_ci#endif
13428c2ecf20Sopenharmony_ci};
13438c2ecf20Sopenharmony_ci
13448c2ecf20Sopenharmony_ci/*
13458c2ecf20Sopenharmony_ci * Lockdep annotation that avoids accidental unlocks; it's like a
13468c2ecf20Sopenharmony_ci * sticky/continuous lockdep_assert_held().
13478c2ecf20Sopenharmony_ci *
13488c2ecf20Sopenharmony_ci * This avoids code that has access to 'struct rq *rq' (basically everything in
13498c2ecf20Sopenharmony_ci * the scheduler) from accidentally unlocking the rq if they do not also have a
13508c2ecf20Sopenharmony_ci * copy of the (on-stack) 'struct rq_flags rf'.
13518c2ecf20Sopenharmony_ci *
13528c2ecf20Sopenharmony_ci * Also see Documentation/locking/lockdep-design.rst.
13538c2ecf20Sopenharmony_ci */
13548c2ecf20Sopenharmony_cistatic inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
13558c2ecf20Sopenharmony_ci{
13568c2ecf20Sopenharmony_ci	rf->cookie = lockdep_pin_lock(&rq->lock);
13578c2ecf20Sopenharmony_ci
13588c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_DEBUG
13598c2ecf20Sopenharmony_ci	rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
13608c2ecf20Sopenharmony_ci	rf->clock_update_flags = 0;
13618c2ecf20Sopenharmony_ci#endif
13628c2ecf20Sopenharmony_ci}
13638c2ecf20Sopenharmony_ci
13648c2ecf20Sopenharmony_cistatic inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
13658c2ecf20Sopenharmony_ci{
13668c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_DEBUG
13678c2ecf20Sopenharmony_ci	if (rq->clock_update_flags > RQCF_ACT_SKIP)
13688c2ecf20Sopenharmony_ci		rf->clock_update_flags = RQCF_UPDATED;
13698c2ecf20Sopenharmony_ci#endif
13708c2ecf20Sopenharmony_ci
13718c2ecf20Sopenharmony_ci	lockdep_unpin_lock(&rq->lock, rf->cookie);
13728c2ecf20Sopenharmony_ci}
13738c2ecf20Sopenharmony_ci
13748c2ecf20Sopenharmony_cistatic inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
13758c2ecf20Sopenharmony_ci{
13768c2ecf20Sopenharmony_ci	lockdep_repin_lock(&rq->lock, rf->cookie);
13778c2ecf20Sopenharmony_ci
13788c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_DEBUG
13798c2ecf20Sopenharmony_ci	/*
13808c2ecf20Sopenharmony_ci	 * Restore the value we stashed in @rf for this pin context.
13818c2ecf20Sopenharmony_ci	 */
13828c2ecf20Sopenharmony_ci	rq->clock_update_flags |= rf->clock_update_flags;
13838c2ecf20Sopenharmony_ci#endif
13848c2ecf20Sopenharmony_ci}
13858c2ecf20Sopenharmony_ci
13868c2ecf20Sopenharmony_cistruct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
13878c2ecf20Sopenharmony_ci	__acquires(rq->lock);
13888c2ecf20Sopenharmony_ci
13898c2ecf20Sopenharmony_cistruct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
13908c2ecf20Sopenharmony_ci	__acquires(p->pi_lock)
13918c2ecf20Sopenharmony_ci	__acquires(rq->lock);
13928c2ecf20Sopenharmony_ci
13938c2ecf20Sopenharmony_cistatic inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
13948c2ecf20Sopenharmony_ci	__releases(rq->lock)
13958c2ecf20Sopenharmony_ci{
13968c2ecf20Sopenharmony_ci	rq_unpin_lock(rq, rf);
13978c2ecf20Sopenharmony_ci	raw_spin_unlock(&rq->lock);
13988c2ecf20Sopenharmony_ci}
13998c2ecf20Sopenharmony_ci
14008c2ecf20Sopenharmony_cistatic inline void
14018c2ecf20Sopenharmony_citask_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
14028c2ecf20Sopenharmony_ci	__releases(rq->lock)
14038c2ecf20Sopenharmony_ci	__releases(p->pi_lock)
14048c2ecf20Sopenharmony_ci{
14058c2ecf20Sopenharmony_ci	rq_unpin_lock(rq, rf);
14068c2ecf20Sopenharmony_ci	raw_spin_unlock(&rq->lock);
14078c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
14088c2ecf20Sopenharmony_ci}
14098c2ecf20Sopenharmony_ci
14108c2ecf20Sopenharmony_cistatic inline void
14118c2ecf20Sopenharmony_cirq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
14128c2ecf20Sopenharmony_ci	__acquires(rq->lock)
14138c2ecf20Sopenharmony_ci{
14148c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&rq->lock, rf->flags);
14158c2ecf20Sopenharmony_ci	rq_pin_lock(rq, rf);
14168c2ecf20Sopenharmony_ci}
14178c2ecf20Sopenharmony_ci
14188c2ecf20Sopenharmony_cistatic inline void
14198c2ecf20Sopenharmony_cirq_lock_irq(struct rq *rq, struct rq_flags *rf)
14208c2ecf20Sopenharmony_ci	__acquires(rq->lock)
14218c2ecf20Sopenharmony_ci{
14228c2ecf20Sopenharmony_ci	raw_spin_lock_irq(&rq->lock);
14238c2ecf20Sopenharmony_ci	rq_pin_lock(rq, rf);
14248c2ecf20Sopenharmony_ci}
14258c2ecf20Sopenharmony_ci
14268c2ecf20Sopenharmony_cistatic inline void
14278c2ecf20Sopenharmony_cirq_lock(struct rq *rq, struct rq_flags *rf)
14288c2ecf20Sopenharmony_ci	__acquires(rq->lock)
14298c2ecf20Sopenharmony_ci{
14308c2ecf20Sopenharmony_ci	raw_spin_lock(&rq->lock);
14318c2ecf20Sopenharmony_ci	rq_pin_lock(rq, rf);
14328c2ecf20Sopenharmony_ci}
14338c2ecf20Sopenharmony_ci
14348c2ecf20Sopenharmony_cistatic inline void
14358c2ecf20Sopenharmony_cirq_relock(struct rq *rq, struct rq_flags *rf)
14368c2ecf20Sopenharmony_ci	__acquires(rq->lock)
14378c2ecf20Sopenharmony_ci{
14388c2ecf20Sopenharmony_ci	raw_spin_lock(&rq->lock);
14398c2ecf20Sopenharmony_ci	rq_repin_lock(rq, rf);
14408c2ecf20Sopenharmony_ci}
14418c2ecf20Sopenharmony_ci
14428c2ecf20Sopenharmony_cistatic inline void
14438c2ecf20Sopenharmony_cirq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
14448c2ecf20Sopenharmony_ci	__releases(rq->lock)
14458c2ecf20Sopenharmony_ci{
14468c2ecf20Sopenharmony_ci	rq_unpin_lock(rq, rf);
14478c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
14488c2ecf20Sopenharmony_ci}
14498c2ecf20Sopenharmony_ci
14508c2ecf20Sopenharmony_cistatic inline void
14518c2ecf20Sopenharmony_cirq_unlock_irq(struct rq *rq, struct rq_flags *rf)
14528c2ecf20Sopenharmony_ci	__releases(rq->lock)
14538c2ecf20Sopenharmony_ci{
14548c2ecf20Sopenharmony_ci	rq_unpin_lock(rq, rf);
14558c2ecf20Sopenharmony_ci	raw_spin_unlock_irq(&rq->lock);
14568c2ecf20Sopenharmony_ci}
14578c2ecf20Sopenharmony_ci
14588c2ecf20Sopenharmony_cistatic inline void
14598c2ecf20Sopenharmony_cirq_unlock(struct rq *rq, struct rq_flags *rf)
14608c2ecf20Sopenharmony_ci	__releases(rq->lock)
14618c2ecf20Sopenharmony_ci{
14628c2ecf20Sopenharmony_ci	rq_unpin_lock(rq, rf);
14638c2ecf20Sopenharmony_ci	raw_spin_unlock(&rq->lock);
14648c2ecf20Sopenharmony_ci}
14658c2ecf20Sopenharmony_ci
14668c2ecf20Sopenharmony_cistatic inline struct rq *
14678c2ecf20Sopenharmony_cithis_rq_lock_irq(struct rq_flags *rf)
14688c2ecf20Sopenharmony_ci	__acquires(rq->lock)
14698c2ecf20Sopenharmony_ci{
14708c2ecf20Sopenharmony_ci	struct rq *rq;
14718c2ecf20Sopenharmony_ci
14728c2ecf20Sopenharmony_ci	local_irq_disable();
14738c2ecf20Sopenharmony_ci	rq = this_rq();
14748c2ecf20Sopenharmony_ci	rq_lock(rq, rf);
14758c2ecf20Sopenharmony_ci	return rq;
14768c2ecf20Sopenharmony_ci}
14778c2ecf20Sopenharmony_ci
14788c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA
14798c2ecf20Sopenharmony_cienum numa_topology_type {
14808c2ecf20Sopenharmony_ci	NUMA_DIRECT,
14818c2ecf20Sopenharmony_ci	NUMA_GLUELESS_MESH,
14828c2ecf20Sopenharmony_ci	NUMA_BACKPLANE,
14838c2ecf20Sopenharmony_ci};
14848c2ecf20Sopenharmony_ciextern enum numa_topology_type sched_numa_topology_type;
14858c2ecf20Sopenharmony_ciextern int sched_max_numa_distance;
14868c2ecf20Sopenharmony_ciextern bool find_numa_distance(int distance);
14878c2ecf20Sopenharmony_ciextern void sched_init_numa(void);
14888c2ecf20Sopenharmony_ciextern void sched_domains_numa_masks_set(unsigned int cpu);
14898c2ecf20Sopenharmony_ciextern void sched_domains_numa_masks_clear(unsigned int cpu);
14908c2ecf20Sopenharmony_ciextern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
14918c2ecf20Sopenharmony_ci#else
14928c2ecf20Sopenharmony_cistatic inline void sched_init_numa(void) { }
14938c2ecf20Sopenharmony_cistatic inline void sched_domains_numa_masks_set(unsigned int cpu) { }
14948c2ecf20Sopenharmony_cistatic inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
14958c2ecf20Sopenharmony_cistatic inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
14968c2ecf20Sopenharmony_ci{
14978c2ecf20Sopenharmony_ci	return nr_cpu_ids;
14988c2ecf20Sopenharmony_ci}
14998c2ecf20Sopenharmony_ci#endif
15008c2ecf20Sopenharmony_ci
15018c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA_BALANCING
15028c2ecf20Sopenharmony_ci/* The regions in numa_faults array from task_struct */
15038c2ecf20Sopenharmony_cienum numa_faults_stats {
15048c2ecf20Sopenharmony_ci	NUMA_MEM = 0,
15058c2ecf20Sopenharmony_ci	NUMA_CPU,
15068c2ecf20Sopenharmony_ci	NUMA_MEMBUF,
15078c2ecf20Sopenharmony_ci	NUMA_CPUBUF
15088c2ecf20Sopenharmony_ci};
15098c2ecf20Sopenharmony_ciextern void sched_setnuma(struct task_struct *p, int node);
15108c2ecf20Sopenharmony_ciextern int migrate_task_to(struct task_struct *p, int cpu);
15118c2ecf20Sopenharmony_ciextern int migrate_swap(struct task_struct *p, struct task_struct *t,
15128c2ecf20Sopenharmony_ci			int cpu, int scpu);
15138c2ecf20Sopenharmony_ciextern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
15148c2ecf20Sopenharmony_ci#else
15158c2ecf20Sopenharmony_cistatic inline void
15168c2ecf20Sopenharmony_ciinit_numa_balancing(unsigned long clone_flags, struct task_struct *p)
15178c2ecf20Sopenharmony_ci{
15188c2ecf20Sopenharmony_ci}
15198c2ecf20Sopenharmony_ci#endif /* CONFIG_NUMA_BALANCING */
15208c2ecf20Sopenharmony_ci
15218c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
15228c2ecf20Sopenharmony_ci
15238c2ecf20Sopenharmony_cistatic inline void
15248c2ecf20Sopenharmony_ciqueue_balance_callback(struct rq *rq,
15258c2ecf20Sopenharmony_ci		       struct callback_head *head,
15268c2ecf20Sopenharmony_ci		       void (*func)(struct rq *rq))
15278c2ecf20Sopenharmony_ci{
15288c2ecf20Sopenharmony_ci	lockdep_assert_held(&rq->lock);
15298c2ecf20Sopenharmony_ci
15308c2ecf20Sopenharmony_ci	if (unlikely(head->next))
15318c2ecf20Sopenharmony_ci		return;
15328c2ecf20Sopenharmony_ci
15338c2ecf20Sopenharmony_ci	head->func = (void (*)(struct callback_head *))func;
15348c2ecf20Sopenharmony_ci	head->next = rq->balance_callback;
15358c2ecf20Sopenharmony_ci	rq->balance_callback = head;
15368c2ecf20Sopenharmony_ci}
15378c2ecf20Sopenharmony_ci
15388c2ecf20Sopenharmony_ci#define rcu_dereference_check_sched_domain(p) \
15398c2ecf20Sopenharmony_ci	rcu_dereference_check((p), \
15408c2ecf20Sopenharmony_ci			      lockdep_is_held(&sched_domains_mutex))
15418c2ecf20Sopenharmony_ci
15428c2ecf20Sopenharmony_ci/*
15438c2ecf20Sopenharmony_ci * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
15448c2ecf20Sopenharmony_ci * See destroy_sched_domains: call_rcu for details.
15458c2ecf20Sopenharmony_ci *
15468c2ecf20Sopenharmony_ci * The domain tree of any CPU may only be accessed from within
15478c2ecf20Sopenharmony_ci * preempt-disabled sections.
15488c2ecf20Sopenharmony_ci */
15498c2ecf20Sopenharmony_ci#define for_each_domain(cpu, __sd) \
15508c2ecf20Sopenharmony_ci	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
15518c2ecf20Sopenharmony_ci			__sd; __sd = __sd->parent)
15528c2ecf20Sopenharmony_ci
15538c2ecf20Sopenharmony_ci/**
15548c2ecf20Sopenharmony_ci * highest_flag_domain - Return highest sched_domain containing flag.
15558c2ecf20Sopenharmony_ci * @cpu:	The CPU whose highest level of sched domain is to
15568c2ecf20Sopenharmony_ci *		be returned.
15578c2ecf20Sopenharmony_ci * @flag:	The flag to check for the highest sched_domain
15588c2ecf20Sopenharmony_ci *		for the given CPU.
15598c2ecf20Sopenharmony_ci *
15608c2ecf20Sopenharmony_ci * Returns the highest sched_domain of a CPU which contains the given flag.
15618c2ecf20Sopenharmony_ci */
15628c2ecf20Sopenharmony_cistatic inline struct sched_domain *highest_flag_domain(int cpu, int flag)
15638c2ecf20Sopenharmony_ci{
15648c2ecf20Sopenharmony_ci	struct sched_domain *sd, *hsd = NULL;
15658c2ecf20Sopenharmony_ci
15668c2ecf20Sopenharmony_ci	for_each_domain(cpu, sd) {
15678c2ecf20Sopenharmony_ci		if (!(sd->flags & flag))
15688c2ecf20Sopenharmony_ci			break;
15698c2ecf20Sopenharmony_ci		hsd = sd;
15708c2ecf20Sopenharmony_ci	}
15718c2ecf20Sopenharmony_ci
15728c2ecf20Sopenharmony_ci	return hsd;
15738c2ecf20Sopenharmony_ci}
15748c2ecf20Sopenharmony_ci
15758c2ecf20Sopenharmony_cistatic inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
15768c2ecf20Sopenharmony_ci{
15778c2ecf20Sopenharmony_ci	struct sched_domain *sd;
15788c2ecf20Sopenharmony_ci
15798c2ecf20Sopenharmony_ci	for_each_domain(cpu, sd) {
15808c2ecf20Sopenharmony_ci		if (sd->flags & flag)
15818c2ecf20Sopenharmony_ci			break;
15828c2ecf20Sopenharmony_ci	}
15838c2ecf20Sopenharmony_ci
15848c2ecf20Sopenharmony_ci	return sd;
15858c2ecf20Sopenharmony_ci}
15868c2ecf20Sopenharmony_ci
15878c2ecf20Sopenharmony_ciDECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
15888c2ecf20Sopenharmony_ciDECLARE_PER_CPU(int, sd_llc_size);
15898c2ecf20Sopenharmony_ciDECLARE_PER_CPU(int, sd_llc_id);
15908c2ecf20Sopenharmony_ciDECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
15918c2ecf20Sopenharmony_ciDECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
15928c2ecf20Sopenharmony_ciDECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
15938c2ecf20Sopenharmony_ciDECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
15948c2ecf20Sopenharmony_ciextern struct static_key_false sched_asym_cpucapacity;
15958c2ecf20Sopenharmony_ci
15968c2ecf20Sopenharmony_cistruct sched_group_capacity {
15978c2ecf20Sopenharmony_ci	atomic_t		ref;
15988c2ecf20Sopenharmony_ci	/*
15998c2ecf20Sopenharmony_ci	 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
16008c2ecf20Sopenharmony_ci	 * for a single CPU.
16018c2ecf20Sopenharmony_ci	 */
16028c2ecf20Sopenharmony_ci	unsigned long		capacity;
16038c2ecf20Sopenharmony_ci	unsigned long		min_capacity;		/* Min per-CPU capacity in group */
16048c2ecf20Sopenharmony_ci	unsigned long		max_capacity;		/* Max per-CPU capacity in group */
16058c2ecf20Sopenharmony_ci	unsigned long		next_update;
16068c2ecf20Sopenharmony_ci	int			imbalance;		/* XXX unrelated to capacity but shared group state */
16078c2ecf20Sopenharmony_ci
16088c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_DEBUG
16098c2ecf20Sopenharmony_ci	int			id;
16108c2ecf20Sopenharmony_ci#endif
16118c2ecf20Sopenharmony_ci
16128c2ecf20Sopenharmony_ci	unsigned long		cpumask[];		/* Balance mask */
16138c2ecf20Sopenharmony_ci};
16148c2ecf20Sopenharmony_ci
16158c2ecf20Sopenharmony_cistruct sched_group {
16168c2ecf20Sopenharmony_ci	struct sched_group	*next;			/* Must be a circular list */
16178c2ecf20Sopenharmony_ci	atomic_t		ref;
16188c2ecf20Sopenharmony_ci
16198c2ecf20Sopenharmony_ci	unsigned int		group_weight;
16208c2ecf20Sopenharmony_ci	struct sched_group_capacity *sgc;
16218c2ecf20Sopenharmony_ci	int			asym_prefer_cpu;	/* CPU of highest priority in group */
16228c2ecf20Sopenharmony_ci
16238c2ecf20Sopenharmony_ci	/*
16248c2ecf20Sopenharmony_ci	 * The CPUs this group covers.
16258c2ecf20Sopenharmony_ci	 *
16268c2ecf20Sopenharmony_ci	 * NOTE: this field is variable length. (Allocated dynamically
16278c2ecf20Sopenharmony_ci	 * by attaching extra space to the end of the structure,
16288c2ecf20Sopenharmony_ci	 * depending on how many CPUs the kernel has booted up with)
16298c2ecf20Sopenharmony_ci	 */
16308c2ecf20Sopenharmony_ci	unsigned long		cpumask[];
16318c2ecf20Sopenharmony_ci};
16328c2ecf20Sopenharmony_ci
16338c2ecf20Sopenharmony_cistatic inline struct cpumask *sched_group_span(struct sched_group *sg)
16348c2ecf20Sopenharmony_ci{
16358c2ecf20Sopenharmony_ci	return to_cpumask(sg->cpumask);
16368c2ecf20Sopenharmony_ci}
16378c2ecf20Sopenharmony_ci
16388c2ecf20Sopenharmony_ci/*
16398c2ecf20Sopenharmony_ci * See build_balance_mask().
16408c2ecf20Sopenharmony_ci */
16418c2ecf20Sopenharmony_cistatic inline struct cpumask *group_balance_mask(struct sched_group *sg)
16428c2ecf20Sopenharmony_ci{
16438c2ecf20Sopenharmony_ci	return to_cpumask(sg->sgc->cpumask);
16448c2ecf20Sopenharmony_ci}
16458c2ecf20Sopenharmony_ci
16468c2ecf20Sopenharmony_ci/**
16478c2ecf20Sopenharmony_ci * group_first_cpu - Returns the first CPU in the cpumask of a sched_group.
16488c2ecf20Sopenharmony_ci * @group: The group whose first CPU is to be returned.
16498c2ecf20Sopenharmony_ci */
16508c2ecf20Sopenharmony_cistatic inline unsigned int group_first_cpu(struct sched_group *group)
16518c2ecf20Sopenharmony_ci{
16528c2ecf20Sopenharmony_ci	return cpumask_first(sched_group_span(group));
16538c2ecf20Sopenharmony_ci}
16548c2ecf20Sopenharmony_ci
16558c2ecf20Sopenharmony_ciextern int group_balance_cpu(struct sched_group *sg);
16568c2ecf20Sopenharmony_ci
16578c2ecf20Sopenharmony_ci#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
16588c2ecf20Sopenharmony_civoid register_sched_domain_sysctl(void);
16598c2ecf20Sopenharmony_civoid dirty_sched_domain_sysctl(int cpu);
16608c2ecf20Sopenharmony_civoid unregister_sched_domain_sysctl(void);
16618c2ecf20Sopenharmony_ci#else
16628c2ecf20Sopenharmony_cistatic inline void register_sched_domain_sysctl(void)
16638c2ecf20Sopenharmony_ci{
16648c2ecf20Sopenharmony_ci}
16658c2ecf20Sopenharmony_cistatic inline void dirty_sched_domain_sysctl(int cpu)
16668c2ecf20Sopenharmony_ci{
16678c2ecf20Sopenharmony_ci}
16688c2ecf20Sopenharmony_cistatic inline void unregister_sched_domain_sysctl(void)
16698c2ecf20Sopenharmony_ci{
16708c2ecf20Sopenharmony_ci}
16718c2ecf20Sopenharmony_ci#endif
16728c2ecf20Sopenharmony_ci
16738c2ecf20Sopenharmony_ciextern void flush_smp_call_function_from_idle(void);
16748c2ecf20Sopenharmony_ci
16758c2ecf20Sopenharmony_ci#else /* !CONFIG_SMP: */
16768c2ecf20Sopenharmony_cistatic inline void flush_smp_call_function_from_idle(void) { }
16778c2ecf20Sopenharmony_ci#endif
16788c2ecf20Sopenharmony_ci
16798c2ecf20Sopenharmony_ci#include "stats.h"
16808c2ecf20Sopenharmony_ci#include "autogroup.h"
16818c2ecf20Sopenharmony_ci
16828c2ecf20Sopenharmony_ci#ifdef CONFIG_CGROUP_SCHED
16838c2ecf20Sopenharmony_ci
16848c2ecf20Sopenharmony_ci/*
16858c2ecf20Sopenharmony_ci * Return the group to which this tasks belongs.
16868c2ecf20Sopenharmony_ci *
16878c2ecf20Sopenharmony_ci * We cannot use task_css() and friends because the cgroup subsystem
16888c2ecf20Sopenharmony_ci * changes that value before the cgroup_subsys::attach() method is called,
16898c2ecf20Sopenharmony_ci * therefore we cannot pin it and might observe the wrong value.
16908c2ecf20Sopenharmony_ci *
16918c2ecf20Sopenharmony_ci * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
16928c2ecf20Sopenharmony_ci * core changes this before calling sched_move_task().
16938c2ecf20Sopenharmony_ci *
16948c2ecf20Sopenharmony_ci * Instead we use a 'copy' which is updated from sched_move_task() while
16958c2ecf20Sopenharmony_ci * holding both task_struct::pi_lock and rq::lock.
16968c2ecf20Sopenharmony_ci */
16978c2ecf20Sopenharmony_cistatic inline struct task_group *task_group(struct task_struct *p)
16988c2ecf20Sopenharmony_ci{
16998c2ecf20Sopenharmony_ci	return p->sched_task_group;
17008c2ecf20Sopenharmony_ci}
17018c2ecf20Sopenharmony_ci
17028c2ecf20Sopenharmony_ci/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
17038c2ecf20Sopenharmony_cistatic inline void set_task_rq(struct task_struct *p, unsigned int cpu)
17048c2ecf20Sopenharmony_ci{
17058c2ecf20Sopenharmony_ci#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
17068c2ecf20Sopenharmony_ci	struct task_group *tg = task_group(p);
17078c2ecf20Sopenharmony_ci#endif
17088c2ecf20Sopenharmony_ci
17098c2ecf20Sopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED
17108c2ecf20Sopenharmony_ci	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
17118c2ecf20Sopenharmony_ci	p->se.cfs_rq = tg->cfs_rq[cpu];
17128c2ecf20Sopenharmony_ci	p->se.parent = tg->se[cpu];
17138c2ecf20Sopenharmony_ci#endif
17148c2ecf20Sopenharmony_ci
17158c2ecf20Sopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
17168c2ecf20Sopenharmony_ci	p->rt.rt_rq  = tg->rt_rq[cpu];
17178c2ecf20Sopenharmony_ci	p->rt.parent = tg->rt_se[cpu];
17188c2ecf20Sopenharmony_ci#endif
17198c2ecf20Sopenharmony_ci}
17208c2ecf20Sopenharmony_ci
17218c2ecf20Sopenharmony_ci#else /* CONFIG_CGROUP_SCHED */
17228c2ecf20Sopenharmony_ci
17238c2ecf20Sopenharmony_cistatic inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
17248c2ecf20Sopenharmony_cistatic inline struct task_group *task_group(struct task_struct *p)
17258c2ecf20Sopenharmony_ci{
17268c2ecf20Sopenharmony_ci	return NULL;
17278c2ecf20Sopenharmony_ci}
17288c2ecf20Sopenharmony_ci
17298c2ecf20Sopenharmony_ci#endif /* CONFIG_CGROUP_SCHED */
17308c2ecf20Sopenharmony_ci
17318c2ecf20Sopenharmony_cistatic inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
17328c2ecf20Sopenharmony_ci{
17338c2ecf20Sopenharmony_ci	set_task_rq(p, cpu);
17348c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
17358c2ecf20Sopenharmony_ci	/*
17368c2ecf20Sopenharmony_ci	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
17378c2ecf20Sopenharmony_ci	 * successfully executed on another CPU. We must ensure that updates of
17388c2ecf20Sopenharmony_ci	 * per-task data have been completed by this moment.
17398c2ecf20Sopenharmony_ci	 */
17408c2ecf20Sopenharmony_ci	smp_wmb();
17418c2ecf20Sopenharmony_ci#ifdef CONFIG_THREAD_INFO_IN_TASK
17428c2ecf20Sopenharmony_ci	WRITE_ONCE(p->cpu, cpu);
17438c2ecf20Sopenharmony_ci#else
17448c2ecf20Sopenharmony_ci	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
17458c2ecf20Sopenharmony_ci#endif
17468c2ecf20Sopenharmony_ci	p->wake_cpu = cpu;
17478c2ecf20Sopenharmony_ci#endif
17488c2ecf20Sopenharmony_ci}
17498c2ecf20Sopenharmony_ci
17508c2ecf20Sopenharmony_ci/*
17518c2ecf20Sopenharmony_ci * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
17528c2ecf20Sopenharmony_ci */
17538c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_DEBUG
17548c2ecf20Sopenharmony_ci# include <linux/static_key.h>
17558c2ecf20Sopenharmony_ci# define const_debug __read_mostly
17568c2ecf20Sopenharmony_ci#else
17578c2ecf20Sopenharmony_ci# define const_debug const
17588c2ecf20Sopenharmony_ci#endif
17598c2ecf20Sopenharmony_ci
17608c2ecf20Sopenharmony_ci#define SCHED_FEAT(name, enabled)	\
17618c2ecf20Sopenharmony_ci	__SCHED_FEAT_##name ,
17628c2ecf20Sopenharmony_ci
17638c2ecf20Sopenharmony_cienum {
17648c2ecf20Sopenharmony_ci#include "features.h"
17658c2ecf20Sopenharmony_ci	__SCHED_FEAT_NR,
17668c2ecf20Sopenharmony_ci};
17678c2ecf20Sopenharmony_ci
17688c2ecf20Sopenharmony_ci#undef SCHED_FEAT
17698c2ecf20Sopenharmony_ci
17708c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_DEBUG
17718c2ecf20Sopenharmony_ci
17728c2ecf20Sopenharmony_ci/*
17738c2ecf20Sopenharmony_ci * To support run-time toggling of sched features, all the translation units
17748c2ecf20Sopenharmony_ci * (but core.c) reference the sysctl_sched_features defined in core.c.
17758c2ecf20Sopenharmony_ci */
17768c2ecf20Sopenharmony_ciextern const_debug unsigned int sysctl_sched_features;
17778c2ecf20Sopenharmony_ci
17788c2ecf20Sopenharmony_ci#ifdef CONFIG_JUMP_LABEL
17798c2ecf20Sopenharmony_ci#define SCHED_FEAT(name, enabled)					\
17808c2ecf20Sopenharmony_cistatic __always_inline bool static_branch_##name(struct static_key *key) \
17818c2ecf20Sopenharmony_ci{									\
17828c2ecf20Sopenharmony_ci	return static_key_##enabled(key);				\
17838c2ecf20Sopenharmony_ci}
17848c2ecf20Sopenharmony_ci
17858c2ecf20Sopenharmony_ci#include "features.h"
17868c2ecf20Sopenharmony_ci#undef SCHED_FEAT
17878c2ecf20Sopenharmony_ci
17888c2ecf20Sopenharmony_ciextern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
17898c2ecf20Sopenharmony_ci#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
17908c2ecf20Sopenharmony_ci
17918c2ecf20Sopenharmony_ci#else /* !CONFIG_JUMP_LABEL */
17928c2ecf20Sopenharmony_ci
17938c2ecf20Sopenharmony_ci#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
17948c2ecf20Sopenharmony_ci
17958c2ecf20Sopenharmony_ci#endif /* CONFIG_JUMP_LABEL */
17968c2ecf20Sopenharmony_ci
17978c2ecf20Sopenharmony_ci#else /* !SCHED_DEBUG */
17988c2ecf20Sopenharmony_ci
17998c2ecf20Sopenharmony_ci/*
18008c2ecf20Sopenharmony_ci * Each translation unit has its own copy of sysctl_sched_features to allow
18018c2ecf20Sopenharmony_ci * constants propagation at compile time and compiler optimization based on
18028c2ecf20Sopenharmony_ci * features default.
18038c2ecf20Sopenharmony_ci */
18048c2ecf20Sopenharmony_ci#define SCHED_FEAT(name, enabled)	\
18058c2ecf20Sopenharmony_ci	(1UL << __SCHED_FEAT_##name) * enabled |
18068c2ecf20Sopenharmony_cistatic const_debug __maybe_unused unsigned int sysctl_sched_features =
18078c2ecf20Sopenharmony_ci#include "features.h"
18088c2ecf20Sopenharmony_ci	0;
18098c2ecf20Sopenharmony_ci#undef SCHED_FEAT
18108c2ecf20Sopenharmony_ci
18118c2ecf20Sopenharmony_ci#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
18128c2ecf20Sopenharmony_ci
18138c2ecf20Sopenharmony_ci#endif /* SCHED_DEBUG */
18148c2ecf20Sopenharmony_ci
18158c2ecf20Sopenharmony_ciextern struct static_key_false sched_numa_balancing;
18168c2ecf20Sopenharmony_ciextern struct static_key_false sched_schedstats;
18178c2ecf20Sopenharmony_ci
18188c2ecf20Sopenharmony_cistatic inline u64 global_rt_period(void)
18198c2ecf20Sopenharmony_ci{
18208c2ecf20Sopenharmony_ci	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
18218c2ecf20Sopenharmony_ci}
18228c2ecf20Sopenharmony_ci
18238c2ecf20Sopenharmony_cistatic inline u64 global_rt_runtime(void)
18248c2ecf20Sopenharmony_ci{
18258c2ecf20Sopenharmony_ci	if (sysctl_sched_rt_runtime < 0)
18268c2ecf20Sopenharmony_ci		return RUNTIME_INF;
18278c2ecf20Sopenharmony_ci
18288c2ecf20Sopenharmony_ci	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
18298c2ecf20Sopenharmony_ci}
18308c2ecf20Sopenharmony_ci
18318c2ecf20Sopenharmony_cistatic inline int task_current(struct rq *rq, struct task_struct *p)
18328c2ecf20Sopenharmony_ci{
18338c2ecf20Sopenharmony_ci	return rq->curr == p;
18348c2ecf20Sopenharmony_ci}
18358c2ecf20Sopenharmony_ci
18368c2ecf20Sopenharmony_cistatic inline int task_running(struct rq *rq, struct task_struct *p)
18378c2ecf20Sopenharmony_ci{
18388c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
18398c2ecf20Sopenharmony_ci	return p->on_cpu;
18408c2ecf20Sopenharmony_ci#else
18418c2ecf20Sopenharmony_ci	return task_current(rq, p);
18428c2ecf20Sopenharmony_ci#endif
18438c2ecf20Sopenharmony_ci}
18448c2ecf20Sopenharmony_ci
18458c2ecf20Sopenharmony_cistatic inline int task_on_rq_queued(struct task_struct *p)
18468c2ecf20Sopenharmony_ci{
18478c2ecf20Sopenharmony_ci	return p->on_rq == TASK_ON_RQ_QUEUED;
18488c2ecf20Sopenharmony_ci}
18498c2ecf20Sopenharmony_ci
18508c2ecf20Sopenharmony_cistatic inline int task_on_rq_migrating(struct task_struct *p)
18518c2ecf20Sopenharmony_ci{
18528c2ecf20Sopenharmony_ci	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
18538c2ecf20Sopenharmony_ci}
18548c2ecf20Sopenharmony_ci
18558c2ecf20Sopenharmony_ci/*
18568c2ecf20Sopenharmony_ci * wake flags
18578c2ecf20Sopenharmony_ci */
18588c2ecf20Sopenharmony_ci#define WF_SYNC			0x01		/* Waker goes to sleep after wakeup */
18598c2ecf20Sopenharmony_ci#define WF_FORK			0x02		/* Child wakeup after fork */
18608c2ecf20Sopenharmony_ci#define WF_MIGRATED		0x04		/* Internal use, task got migrated */
18618c2ecf20Sopenharmony_ci#define WF_ON_CPU		0x08		/* Wakee is on_cpu */
18628c2ecf20Sopenharmony_ci
18638c2ecf20Sopenharmony_ci/*
18648c2ecf20Sopenharmony_ci * To aid in avoiding the subversion of "niceness" due to uneven distribution
18658c2ecf20Sopenharmony_ci * of tasks with abnormal "nice" values across CPUs the contribution that
18668c2ecf20Sopenharmony_ci * each task makes to its run queue's load is weighted according to its
18678c2ecf20Sopenharmony_ci * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
18688c2ecf20Sopenharmony_ci * scaled version of the new time slice allocation that they receive on time
18698c2ecf20Sopenharmony_ci * slice expiry etc.
18708c2ecf20Sopenharmony_ci */
18718c2ecf20Sopenharmony_ci
18728c2ecf20Sopenharmony_ci#define WEIGHT_IDLEPRIO		3
18738c2ecf20Sopenharmony_ci#define WMULT_IDLEPRIO		1431655765
18748c2ecf20Sopenharmony_ci
18758c2ecf20Sopenharmony_ciextern const int		sched_prio_to_weight[40];
18768c2ecf20Sopenharmony_ciextern const u32		sched_prio_to_wmult[40];
18778c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_LATENCY_NICE
18788c2ecf20Sopenharmony_ciextern const int		sched_latency_to_weight[40];
18798c2ecf20Sopenharmony_ci#endif
18808c2ecf20Sopenharmony_ci
18818c2ecf20Sopenharmony_ci/*
18828c2ecf20Sopenharmony_ci * {de,en}queue flags:
18838c2ecf20Sopenharmony_ci *
18848c2ecf20Sopenharmony_ci * DEQUEUE_SLEEP  - task is no longer runnable
18858c2ecf20Sopenharmony_ci * ENQUEUE_WAKEUP - task just became runnable
18868c2ecf20Sopenharmony_ci *
18878c2ecf20Sopenharmony_ci * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
18888c2ecf20Sopenharmony_ci *                are in a known state which allows modification. Such pairs
18898c2ecf20Sopenharmony_ci *                should preserve as much state as possible.
18908c2ecf20Sopenharmony_ci *
18918c2ecf20Sopenharmony_ci * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
18928c2ecf20Sopenharmony_ci *        in the runqueue.
18938c2ecf20Sopenharmony_ci *
18948c2ecf20Sopenharmony_ci * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
18958c2ecf20Sopenharmony_ci * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
18968c2ecf20Sopenharmony_ci * ENQUEUE_MIGRATED  - the task was migrated during wakeup
18978c2ecf20Sopenharmony_ci *
18988c2ecf20Sopenharmony_ci */
18998c2ecf20Sopenharmony_ci
19008c2ecf20Sopenharmony_ci#define DEQUEUE_SLEEP		0x01
19018c2ecf20Sopenharmony_ci#define DEQUEUE_SAVE		0x02 /* Matches ENQUEUE_RESTORE */
19028c2ecf20Sopenharmony_ci#define DEQUEUE_MOVE		0x04 /* Matches ENQUEUE_MOVE */
19038c2ecf20Sopenharmony_ci#define DEQUEUE_NOCLOCK		0x08 /* Matches ENQUEUE_NOCLOCK */
19048c2ecf20Sopenharmony_ci
19058c2ecf20Sopenharmony_ci#define ENQUEUE_WAKEUP		0x01
19068c2ecf20Sopenharmony_ci#define ENQUEUE_RESTORE		0x02
19078c2ecf20Sopenharmony_ci#define ENQUEUE_MOVE		0x04
19088c2ecf20Sopenharmony_ci#define ENQUEUE_NOCLOCK		0x08
19098c2ecf20Sopenharmony_ci
19108c2ecf20Sopenharmony_ci#define ENQUEUE_HEAD		0x10
19118c2ecf20Sopenharmony_ci#define ENQUEUE_REPLENISH	0x20
19128c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
19138c2ecf20Sopenharmony_ci#define ENQUEUE_MIGRATED	0x40
19148c2ecf20Sopenharmony_ci#else
19158c2ecf20Sopenharmony_ci#define ENQUEUE_MIGRATED	0x00
19168c2ecf20Sopenharmony_ci#endif
19178c2ecf20Sopenharmony_ci
19188c2ecf20Sopenharmony_ci#define RETRY_TASK		((void *)-1UL)
19198c2ecf20Sopenharmony_ci
19208c2ecf20Sopenharmony_cistruct sched_class {
19218c2ecf20Sopenharmony_ci
19228c2ecf20Sopenharmony_ci#ifdef CONFIG_UCLAMP_TASK
19238c2ecf20Sopenharmony_ci	int uclamp_enabled;
19248c2ecf20Sopenharmony_ci#endif
19258c2ecf20Sopenharmony_ci
19268c2ecf20Sopenharmony_ci	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
19278c2ecf20Sopenharmony_ci	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
19288c2ecf20Sopenharmony_ci	void (*yield_task)   (struct rq *rq);
19298c2ecf20Sopenharmony_ci	bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
19308c2ecf20Sopenharmony_ci
19318c2ecf20Sopenharmony_ci	void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
19328c2ecf20Sopenharmony_ci
19338c2ecf20Sopenharmony_ci	struct task_struct *(*pick_next_task)(struct rq *rq);
19348c2ecf20Sopenharmony_ci
19358c2ecf20Sopenharmony_ci	void (*put_prev_task)(struct rq *rq, struct task_struct *p);
19368c2ecf20Sopenharmony_ci	void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
19378c2ecf20Sopenharmony_ci
19388c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
19398c2ecf20Sopenharmony_ci	int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
19408c2ecf20Sopenharmony_ci	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
19418c2ecf20Sopenharmony_ci	void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
19428c2ecf20Sopenharmony_ci
19438c2ecf20Sopenharmony_ci	void (*task_woken)(struct rq *this_rq, struct task_struct *task);
19448c2ecf20Sopenharmony_ci
19458c2ecf20Sopenharmony_ci	void (*set_cpus_allowed)(struct task_struct *p,
19468c2ecf20Sopenharmony_ci				 const struct cpumask *newmask);
19478c2ecf20Sopenharmony_ci
19488c2ecf20Sopenharmony_ci	void (*rq_online)(struct rq *rq);
19498c2ecf20Sopenharmony_ci	void (*rq_offline)(struct rq *rq);
19508c2ecf20Sopenharmony_ci#endif
19518c2ecf20Sopenharmony_ci
19528c2ecf20Sopenharmony_ci	void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
19538c2ecf20Sopenharmony_ci	void (*task_fork)(struct task_struct *p);
19548c2ecf20Sopenharmony_ci	void (*task_dead)(struct task_struct *p);
19558c2ecf20Sopenharmony_ci
19568c2ecf20Sopenharmony_ci	/*
19578c2ecf20Sopenharmony_ci	 * The switched_from() call is allowed to drop rq->lock, therefore we
19588c2ecf20Sopenharmony_ci	 * cannot assume the switched_from/switched_to pair is serliazed by
19598c2ecf20Sopenharmony_ci	 * rq->lock. They are however serialized by p->pi_lock.
19608c2ecf20Sopenharmony_ci	 */
19618c2ecf20Sopenharmony_ci	void (*switched_from)(struct rq *this_rq, struct task_struct *task);
19628c2ecf20Sopenharmony_ci	void (*switched_to)  (struct rq *this_rq, struct task_struct *task);
19638c2ecf20Sopenharmony_ci	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
19648c2ecf20Sopenharmony_ci			      int oldprio);
19658c2ecf20Sopenharmony_ci
19668c2ecf20Sopenharmony_ci	unsigned int (*get_rr_interval)(struct rq *rq,
19678c2ecf20Sopenharmony_ci					struct task_struct *task);
19688c2ecf20Sopenharmony_ci
19698c2ecf20Sopenharmony_ci	void (*update_curr)(struct rq *rq);
19708c2ecf20Sopenharmony_ci
19718c2ecf20Sopenharmony_ci#define TASK_SET_GROUP		0
19728c2ecf20Sopenharmony_ci#define TASK_MOVE_GROUP		1
19738c2ecf20Sopenharmony_ci
19748c2ecf20Sopenharmony_ci#ifdef CONFIG_FAIR_GROUP_SCHED
19758c2ecf20Sopenharmony_ci	void (*task_change_group)(struct task_struct *p, int type);
19768c2ecf20Sopenharmony_ci#endif
19778c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_WALT
19788c2ecf20Sopenharmony_ci	void (*fixup_walt_sched_stats)(struct rq *rq, struct task_struct *p,
19798c2ecf20Sopenharmony_ci					u16 updated_demand_scaled);
19808c2ecf20Sopenharmony_ci#endif
19818c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_EAS
19828c2ecf20Sopenharmony_ci	void  (*check_for_migration)(struct rq *rq, struct task_struct *p);
19838c2ecf20Sopenharmony_ci#endif
19848c2ecf20Sopenharmony_ci} __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */
19858c2ecf20Sopenharmony_ci
19868c2ecf20Sopenharmony_cistatic inline void put_prev_task(struct rq *rq, struct task_struct *prev)
19878c2ecf20Sopenharmony_ci{
19888c2ecf20Sopenharmony_ci	WARN_ON_ONCE(rq->curr != prev);
19898c2ecf20Sopenharmony_ci	prev->sched_class->put_prev_task(rq, prev);
19908c2ecf20Sopenharmony_ci}
19918c2ecf20Sopenharmony_ci
19928c2ecf20Sopenharmony_cistatic inline void set_next_task(struct rq *rq, struct task_struct *next)
19938c2ecf20Sopenharmony_ci{
19948c2ecf20Sopenharmony_ci	WARN_ON_ONCE(rq->curr != next);
19958c2ecf20Sopenharmony_ci	next->sched_class->set_next_task(rq, next, false);
19968c2ecf20Sopenharmony_ci}
19978c2ecf20Sopenharmony_ci
19988c2ecf20Sopenharmony_ci/* Defined in include/asm-generic/vmlinux.lds.h */
19998c2ecf20Sopenharmony_ciextern struct sched_class __begin_sched_classes[];
20008c2ecf20Sopenharmony_ciextern struct sched_class __end_sched_classes[];
20018c2ecf20Sopenharmony_ci
20028c2ecf20Sopenharmony_ci#define sched_class_highest (__end_sched_classes - 1)
20038c2ecf20Sopenharmony_ci#define sched_class_lowest  (__begin_sched_classes - 1)
20048c2ecf20Sopenharmony_ci
20058c2ecf20Sopenharmony_ci#define for_class_range(class, _from, _to) \
20068c2ecf20Sopenharmony_ci	for (class = (_from); class != (_to); class--)
20078c2ecf20Sopenharmony_ci
20088c2ecf20Sopenharmony_ci#define for_each_class(class) \
20098c2ecf20Sopenharmony_ci	for_class_range(class, sched_class_highest, sched_class_lowest)
20108c2ecf20Sopenharmony_ci
20118c2ecf20Sopenharmony_ciextern const struct sched_class stop_sched_class;
20128c2ecf20Sopenharmony_ciextern const struct sched_class dl_sched_class;
20138c2ecf20Sopenharmony_ciextern const struct sched_class rt_sched_class;
20148c2ecf20Sopenharmony_ciextern const struct sched_class fair_sched_class;
20158c2ecf20Sopenharmony_ciextern const struct sched_class idle_sched_class;
20168c2ecf20Sopenharmony_ci
20178c2ecf20Sopenharmony_cistatic inline bool sched_stop_runnable(struct rq *rq)
20188c2ecf20Sopenharmony_ci{
20198c2ecf20Sopenharmony_ci	return rq->stop && task_on_rq_queued(rq->stop);
20208c2ecf20Sopenharmony_ci}
20218c2ecf20Sopenharmony_ci
20228c2ecf20Sopenharmony_cistatic inline bool sched_dl_runnable(struct rq *rq)
20238c2ecf20Sopenharmony_ci{
20248c2ecf20Sopenharmony_ci	return rq->dl.dl_nr_running > 0;
20258c2ecf20Sopenharmony_ci}
20268c2ecf20Sopenharmony_ci
20278c2ecf20Sopenharmony_cistatic inline bool sched_rt_runnable(struct rq *rq)
20288c2ecf20Sopenharmony_ci{
20298c2ecf20Sopenharmony_ci	return rq->rt.rt_queued > 0;
20308c2ecf20Sopenharmony_ci}
20318c2ecf20Sopenharmony_ci
20328c2ecf20Sopenharmony_cistatic inline bool sched_fair_runnable(struct rq *rq)
20338c2ecf20Sopenharmony_ci{
20348c2ecf20Sopenharmony_ci	return rq->cfs.nr_running > 0;
20358c2ecf20Sopenharmony_ci}
20368c2ecf20Sopenharmony_ci
20378c2ecf20Sopenharmony_ciextern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
20388c2ecf20Sopenharmony_ciextern struct task_struct *pick_next_task_idle(struct rq *rq);
20398c2ecf20Sopenharmony_ci
20408c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
20418c2ecf20Sopenharmony_ci
20428c2ecf20Sopenharmony_ciextern void update_group_capacity(struct sched_domain *sd, int cpu);
20438c2ecf20Sopenharmony_ci
20448c2ecf20Sopenharmony_ciextern void trigger_load_balance(struct rq *rq);
20458c2ecf20Sopenharmony_ci
20468c2ecf20Sopenharmony_ciextern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
20478c2ecf20Sopenharmony_ci
20488c2ecf20Sopenharmony_ci#endif
20498c2ecf20Sopenharmony_ci
20508c2ecf20Sopenharmony_ci#ifdef CONFIG_CPU_IDLE
20518c2ecf20Sopenharmony_cistatic inline void idle_set_state(struct rq *rq,
20528c2ecf20Sopenharmony_ci				  struct cpuidle_state *idle_state)
20538c2ecf20Sopenharmony_ci{
20548c2ecf20Sopenharmony_ci	rq->idle_state = idle_state;
20558c2ecf20Sopenharmony_ci}
20568c2ecf20Sopenharmony_ci
20578c2ecf20Sopenharmony_cistatic inline struct cpuidle_state *idle_get_state(struct rq *rq)
20588c2ecf20Sopenharmony_ci{
20598c2ecf20Sopenharmony_ci	SCHED_WARN_ON(!rcu_read_lock_held());
20608c2ecf20Sopenharmony_ci
20618c2ecf20Sopenharmony_ci	return rq->idle_state;
20628c2ecf20Sopenharmony_ci}
20638c2ecf20Sopenharmony_ci#else
20648c2ecf20Sopenharmony_cistatic inline void idle_set_state(struct rq *rq,
20658c2ecf20Sopenharmony_ci				  struct cpuidle_state *idle_state)
20668c2ecf20Sopenharmony_ci{
20678c2ecf20Sopenharmony_ci}
20688c2ecf20Sopenharmony_ci
20698c2ecf20Sopenharmony_cistatic inline struct cpuidle_state *idle_get_state(struct rq *rq)
20708c2ecf20Sopenharmony_ci{
20718c2ecf20Sopenharmony_ci	return NULL;
20728c2ecf20Sopenharmony_ci}
20738c2ecf20Sopenharmony_ci#endif
20748c2ecf20Sopenharmony_ci
20758c2ecf20Sopenharmony_ciextern void schedule_idle(void);
20768c2ecf20Sopenharmony_ci
20778c2ecf20Sopenharmony_ciextern void sysrq_sched_debug_show(void);
20788c2ecf20Sopenharmony_ciextern void sched_init_granularity(void);
20798c2ecf20Sopenharmony_ciextern void update_max_interval(void);
20808c2ecf20Sopenharmony_ci
20818c2ecf20Sopenharmony_ciextern void init_sched_dl_class(void);
20828c2ecf20Sopenharmony_ciextern void init_sched_rt_class(void);
20838c2ecf20Sopenharmony_ciextern void init_sched_fair_class(void);
20848c2ecf20Sopenharmony_ci
20858c2ecf20Sopenharmony_ciextern void reweight_task(struct task_struct *p, int prio);
20868c2ecf20Sopenharmony_ci
20878c2ecf20Sopenharmony_ciextern void resched_curr(struct rq *rq);
20888c2ecf20Sopenharmony_ciextern void resched_cpu(int cpu);
20898c2ecf20Sopenharmony_ci
20908c2ecf20Sopenharmony_ciextern struct rt_bandwidth def_rt_bandwidth;
20918c2ecf20Sopenharmony_ciextern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
20928c2ecf20Sopenharmony_ci
20938c2ecf20Sopenharmony_ciextern struct dl_bandwidth def_dl_bandwidth;
20948c2ecf20Sopenharmony_ciextern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
20958c2ecf20Sopenharmony_ciextern void init_dl_task_timer(struct sched_dl_entity *dl_se);
20968c2ecf20Sopenharmony_ciextern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
20978c2ecf20Sopenharmony_ci
20988c2ecf20Sopenharmony_ci#define BW_SHIFT		20
20998c2ecf20Sopenharmony_ci#define BW_UNIT			(1 << BW_SHIFT)
21008c2ecf20Sopenharmony_ci#define RATIO_SHIFT		8
21018c2ecf20Sopenharmony_ci#define MAX_BW_BITS		(64 - BW_SHIFT)
21028c2ecf20Sopenharmony_ci#define MAX_BW			((1ULL << MAX_BW_BITS) - 1)
21038c2ecf20Sopenharmony_ciunsigned long to_ratio(u64 period, u64 runtime);
21048c2ecf20Sopenharmony_ci
21058c2ecf20Sopenharmony_ciextern void init_entity_runnable_average(struct sched_entity *se);
21068c2ecf20Sopenharmony_ciextern void post_init_entity_util_avg(struct task_struct *p);
21078c2ecf20Sopenharmony_ci
21088c2ecf20Sopenharmony_ci#ifdef CONFIG_NO_HZ_FULL
21098c2ecf20Sopenharmony_ciextern bool sched_can_stop_tick(struct rq *rq);
21108c2ecf20Sopenharmony_ciextern int __init sched_tick_offload_init(void);
21118c2ecf20Sopenharmony_ci
21128c2ecf20Sopenharmony_ci/*
21138c2ecf20Sopenharmony_ci * Tick may be needed by tasks in the runqueue depending on their policy and
21148c2ecf20Sopenharmony_ci * requirements. If tick is needed, lets send the target an IPI to kick it out of
21158c2ecf20Sopenharmony_ci * nohz mode if necessary.
21168c2ecf20Sopenharmony_ci */
21178c2ecf20Sopenharmony_cistatic inline void sched_update_tick_dependency(struct rq *rq)
21188c2ecf20Sopenharmony_ci{
21198c2ecf20Sopenharmony_ci	int cpu = cpu_of(rq);
21208c2ecf20Sopenharmony_ci
21218c2ecf20Sopenharmony_ci	if (!tick_nohz_full_cpu(cpu))
21228c2ecf20Sopenharmony_ci		return;
21238c2ecf20Sopenharmony_ci
21248c2ecf20Sopenharmony_ci	if (sched_can_stop_tick(rq))
21258c2ecf20Sopenharmony_ci		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
21268c2ecf20Sopenharmony_ci	else
21278c2ecf20Sopenharmony_ci		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
21288c2ecf20Sopenharmony_ci}
21298c2ecf20Sopenharmony_ci#else
21308c2ecf20Sopenharmony_cistatic inline int sched_tick_offload_init(void) { return 0; }
21318c2ecf20Sopenharmony_cistatic inline void sched_update_tick_dependency(struct rq *rq) { }
21328c2ecf20Sopenharmony_ci#endif
21338c2ecf20Sopenharmony_ci
21348c2ecf20Sopenharmony_cistatic inline void add_nr_running(struct rq *rq, unsigned count)
21358c2ecf20Sopenharmony_ci{
21368c2ecf20Sopenharmony_ci	unsigned prev_nr = rq->nr_running;
21378c2ecf20Sopenharmony_ci
21388c2ecf20Sopenharmony_ci	rq->nr_running = prev_nr + count;
21398c2ecf20Sopenharmony_ci	if (trace_sched_update_nr_running_tp_enabled()) {
21408c2ecf20Sopenharmony_ci		call_trace_sched_update_nr_running(rq, count);
21418c2ecf20Sopenharmony_ci	}
21428c2ecf20Sopenharmony_ci
21438c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
21448c2ecf20Sopenharmony_ci	if (prev_nr < 2 && rq->nr_running >= 2) {
21458c2ecf20Sopenharmony_ci		if (!READ_ONCE(rq->rd->overload))
21468c2ecf20Sopenharmony_ci			WRITE_ONCE(rq->rd->overload, 1);
21478c2ecf20Sopenharmony_ci	}
21488c2ecf20Sopenharmony_ci#endif
21498c2ecf20Sopenharmony_ci
21508c2ecf20Sopenharmony_ci	sched_update_tick_dependency(rq);
21518c2ecf20Sopenharmony_ci}
21528c2ecf20Sopenharmony_ci
21538c2ecf20Sopenharmony_cistatic inline void sub_nr_running(struct rq *rq, unsigned count)
21548c2ecf20Sopenharmony_ci{
21558c2ecf20Sopenharmony_ci	rq->nr_running -= count;
21568c2ecf20Sopenharmony_ci	if (trace_sched_update_nr_running_tp_enabled()) {
21578c2ecf20Sopenharmony_ci		call_trace_sched_update_nr_running(rq, -count);
21588c2ecf20Sopenharmony_ci	}
21598c2ecf20Sopenharmony_ci
21608c2ecf20Sopenharmony_ci	/* Check if we still need preemption */
21618c2ecf20Sopenharmony_ci	sched_update_tick_dependency(rq);
21628c2ecf20Sopenharmony_ci}
21638c2ecf20Sopenharmony_ci
21648c2ecf20Sopenharmony_ciextern void activate_task(struct rq *rq, struct task_struct *p, int flags);
21658c2ecf20Sopenharmony_ciextern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
21668c2ecf20Sopenharmony_ci
21678c2ecf20Sopenharmony_ciextern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
21688c2ecf20Sopenharmony_ci
21698c2ecf20Sopenharmony_ciextern const_debug unsigned int sysctl_sched_nr_migrate;
21708c2ecf20Sopenharmony_ciextern const_debug unsigned int sysctl_sched_migration_cost;
21718c2ecf20Sopenharmony_ci
21728c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_HRTICK
21738c2ecf20Sopenharmony_ci
21748c2ecf20Sopenharmony_ci/*
21758c2ecf20Sopenharmony_ci * Use hrtick when:
21768c2ecf20Sopenharmony_ci *  - enabled by features
21778c2ecf20Sopenharmony_ci *  - hrtimer is actually high res
21788c2ecf20Sopenharmony_ci */
21798c2ecf20Sopenharmony_cistatic inline int hrtick_enabled(struct rq *rq)
21808c2ecf20Sopenharmony_ci{
21818c2ecf20Sopenharmony_ci	if (!sched_feat(HRTICK))
21828c2ecf20Sopenharmony_ci		return 0;
21838c2ecf20Sopenharmony_ci	if (!cpu_active(cpu_of(rq)))
21848c2ecf20Sopenharmony_ci		return 0;
21858c2ecf20Sopenharmony_ci	return hrtimer_is_hres_active(&rq->hrtick_timer);
21868c2ecf20Sopenharmony_ci}
21878c2ecf20Sopenharmony_ci
21888c2ecf20Sopenharmony_civoid hrtick_start(struct rq *rq, u64 delay);
21898c2ecf20Sopenharmony_ci
21908c2ecf20Sopenharmony_ci#else
21918c2ecf20Sopenharmony_ci
21928c2ecf20Sopenharmony_cistatic inline int hrtick_enabled(struct rq *rq)
21938c2ecf20Sopenharmony_ci{
21948c2ecf20Sopenharmony_ci	return 0;
21958c2ecf20Sopenharmony_ci}
21968c2ecf20Sopenharmony_ci
21978c2ecf20Sopenharmony_ci#endif /* CONFIG_SCHED_HRTICK */
21988c2ecf20Sopenharmony_ci
21998c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_WALT
22008c2ecf20Sopenharmony_ciu64 sched_ktime_clock(void);
22018c2ecf20Sopenharmony_ci#else
22028c2ecf20Sopenharmony_cistatic inline u64 sched_ktime_clock(void)
22038c2ecf20Sopenharmony_ci{
22048c2ecf20Sopenharmony_ci	return sched_clock();
22058c2ecf20Sopenharmony_ci}
22068c2ecf20Sopenharmony_ci#endif
22078c2ecf20Sopenharmony_ci
22088c2ecf20Sopenharmony_ci#ifndef arch_scale_freq_tick
22098c2ecf20Sopenharmony_cistatic __always_inline
22108c2ecf20Sopenharmony_civoid arch_scale_freq_tick(void)
22118c2ecf20Sopenharmony_ci{
22128c2ecf20Sopenharmony_ci}
22138c2ecf20Sopenharmony_ci#endif
22148c2ecf20Sopenharmony_ci
22158c2ecf20Sopenharmony_ci#ifndef arch_scale_freq_capacity
22168c2ecf20Sopenharmony_ci/**
22178c2ecf20Sopenharmony_ci * arch_scale_freq_capacity - get the frequency scale factor of a given CPU.
22188c2ecf20Sopenharmony_ci * @cpu: the CPU in question.
22198c2ecf20Sopenharmony_ci *
22208c2ecf20Sopenharmony_ci * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
22218c2ecf20Sopenharmony_ci *
22228c2ecf20Sopenharmony_ci *     f_curr
22238c2ecf20Sopenharmony_ci *     ------ * SCHED_CAPACITY_SCALE
22248c2ecf20Sopenharmony_ci *     f_max
22258c2ecf20Sopenharmony_ci */
22268c2ecf20Sopenharmony_cistatic __always_inline
22278c2ecf20Sopenharmony_ciunsigned long arch_scale_freq_capacity(int cpu)
22288c2ecf20Sopenharmony_ci{
22298c2ecf20Sopenharmony_ci	return SCHED_CAPACITY_SCALE;
22308c2ecf20Sopenharmony_ci}
22318c2ecf20Sopenharmony_ci#endif
22328c2ecf20Sopenharmony_ci
22338c2ecf20Sopenharmony_ciunsigned long capacity_curr_of(int cpu);
22348c2ecf20Sopenharmony_ciunsigned long cpu_util(int cpu);
22358c2ecf20Sopenharmony_ci
22368c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
22378c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_WALT
22388c2ecf20Sopenharmony_ciextern unsigned int sysctl_sched_use_walt_cpu_util;
22398c2ecf20Sopenharmony_ciextern unsigned int walt_disabled;
22408c2ecf20Sopenharmony_ci#endif
22418c2ecf20Sopenharmony_ci#ifdef CONFIG_PREEMPTION
22428c2ecf20Sopenharmony_ci
22438c2ecf20Sopenharmony_cistatic inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
22448c2ecf20Sopenharmony_ci
22458c2ecf20Sopenharmony_ci/*
22468c2ecf20Sopenharmony_ci * fair double_lock_balance: Safely acquires both rq->locks in a fair
22478c2ecf20Sopenharmony_ci * way at the expense of forcing extra atomic operations in all
22488c2ecf20Sopenharmony_ci * invocations.  This assures that the double_lock is acquired using the
22498c2ecf20Sopenharmony_ci * same underlying policy as the spinlock_t on this architecture, which
22508c2ecf20Sopenharmony_ci * reduces latency compared to the unfair variant below.  However, it
22518c2ecf20Sopenharmony_ci * also adds more overhead and therefore may reduce throughput.
22528c2ecf20Sopenharmony_ci */
22538c2ecf20Sopenharmony_cistatic inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
22548c2ecf20Sopenharmony_ci	__releases(this_rq->lock)
22558c2ecf20Sopenharmony_ci	__acquires(busiest->lock)
22568c2ecf20Sopenharmony_ci	__acquires(this_rq->lock)
22578c2ecf20Sopenharmony_ci{
22588c2ecf20Sopenharmony_ci	raw_spin_unlock(&this_rq->lock);
22598c2ecf20Sopenharmony_ci	double_rq_lock(this_rq, busiest);
22608c2ecf20Sopenharmony_ci
22618c2ecf20Sopenharmony_ci	return 1;
22628c2ecf20Sopenharmony_ci}
22638c2ecf20Sopenharmony_ci
22648c2ecf20Sopenharmony_ci#else
22658c2ecf20Sopenharmony_ci/*
22668c2ecf20Sopenharmony_ci * Unfair double_lock_balance: Optimizes throughput at the expense of
22678c2ecf20Sopenharmony_ci * latency by eliminating extra atomic operations when the locks are
22688c2ecf20Sopenharmony_ci * already in proper order on entry.  This favors lower CPU-ids and will
22698c2ecf20Sopenharmony_ci * grant the double lock to lower CPUs over higher ids under contention,
22708c2ecf20Sopenharmony_ci * regardless of entry order into the function.
22718c2ecf20Sopenharmony_ci */
22728c2ecf20Sopenharmony_cistatic inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
22738c2ecf20Sopenharmony_ci	__releases(this_rq->lock)
22748c2ecf20Sopenharmony_ci	__acquires(busiest->lock)
22758c2ecf20Sopenharmony_ci	__acquires(this_rq->lock)
22768c2ecf20Sopenharmony_ci{
22778c2ecf20Sopenharmony_ci	int ret = 0;
22788c2ecf20Sopenharmony_ci
22798c2ecf20Sopenharmony_ci	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
22808c2ecf20Sopenharmony_ci		if (busiest < this_rq) {
22818c2ecf20Sopenharmony_ci			raw_spin_unlock(&this_rq->lock);
22828c2ecf20Sopenharmony_ci			raw_spin_lock(&busiest->lock);
22838c2ecf20Sopenharmony_ci			raw_spin_lock_nested(&this_rq->lock,
22848c2ecf20Sopenharmony_ci					      SINGLE_DEPTH_NESTING);
22858c2ecf20Sopenharmony_ci			ret = 1;
22868c2ecf20Sopenharmony_ci		} else
22878c2ecf20Sopenharmony_ci			raw_spin_lock_nested(&busiest->lock,
22888c2ecf20Sopenharmony_ci					      SINGLE_DEPTH_NESTING);
22898c2ecf20Sopenharmony_ci	}
22908c2ecf20Sopenharmony_ci	return ret;
22918c2ecf20Sopenharmony_ci}
22928c2ecf20Sopenharmony_ci
22938c2ecf20Sopenharmony_ci#endif /* CONFIG_PREEMPTION */
22948c2ecf20Sopenharmony_ci
22958c2ecf20Sopenharmony_ci/*
22968c2ecf20Sopenharmony_ci * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
22978c2ecf20Sopenharmony_ci */
22988c2ecf20Sopenharmony_cistatic inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
22998c2ecf20Sopenharmony_ci{
23008c2ecf20Sopenharmony_ci	if (unlikely(!irqs_disabled())) {
23018c2ecf20Sopenharmony_ci		/* printk() doesn't work well under rq->lock */
23028c2ecf20Sopenharmony_ci		raw_spin_unlock(&this_rq->lock);
23038c2ecf20Sopenharmony_ci		BUG_ON(1);
23048c2ecf20Sopenharmony_ci	}
23058c2ecf20Sopenharmony_ci
23068c2ecf20Sopenharmony_ci	return _double_lock_balance(this_rq, busiest);
23078c2ecf20Sopenharmony_ci}
23088c2ecf20Sopenharmony_ci
23098c2ecf20Sopenharmony_cistatic inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
23108c2ecf20Sopenharmony_ci	__releases(busiest->lock)
23118c2ecf20Sopenharmony_ci{
23128c2ecf20Sopenharmony_ci	raw_spin_unlock(&busiest->lock);
23138c2ecf20Sopenharmony_ci	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
23148c2ecf20Sopenharmony_ci}
23158c2ecf20Sopenharmony_ci
23168c2ecf20Sopenharmony_cistatic inline void double_lock(spinlock_t *l1, spinlock_t *l2)
23178c2ecf20Sopenharmony_ci{
23188c2ecf20Sopenharmony_ci	if (l1 > l2)
23198c2ecf20Sopenharmony_ci		swap(l1, l2);
23208c2ecf20Sopenharmony_ci
23218c2ecf20Sopenharmony_ci	spin_lock(l1);
23228c2ecf20Sopenharmony_ci	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
23238c2ecf20Sopenharmony_ci}
23248c2ecf20Sopenharmony_ci
23258c2ecf20Sopenharmony_cistatic inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
23268c2ecf20Sopenharmony_ci{
23278c2ecf20Sopenharmony_ci	if (l1 > l2)
23288c2ecf20Sopenharmony_ci		swap(l1, l2);
23298c2ecf20Sopenharmony_ci
23308c2ecf20Sopenharmony_ci	spin_lock_irq(l1);
23318c2ecf20Sopenharmony_ci	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
23328c2ecf20Sopenharmony_ci}
23338c2ecf20Sopenharmony_ci
23348c2ecf20Sopenharmony_cistatic inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
23358c2ecf20Sopenharmony_ci{
23368c2ecf20Sopenharmony_ci	if (l1 > l2)
23378c2ecf20Sopenharmony_ci		swap(l1, l2);
23388c2ecf20Sopenharmony_ci
23398c2ecf20Sopenharmony_ci	raw_spin_lock(l1);
23408c2ecf20Sopenharmony_ci	raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
23418c2ecf20Sopenharmony_ci}
23428c2ecf20Sopenharmony_ci
23438c2ecf20Sopenharmony_ci/*
23448c2ecf20Sopenharmony_ci * double_rq_lock - safely lock two runqueues
23458c2ecf20Sopenharmony_ci *
23468c2ecf20Sopenharmony_ci * Note this does not disable interrupts like task_rq_lock,
23478c2ecf20Sopenharmony_ci * you need to do so manually before calling.
23488c2ecf20Sopenharmony_ci */
23498c2ecf20Sopenharmony_cistatic inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
23508c2ecf20Sopenharmony_ci	__acquires(rq1->lock)
23518c2ecf20Sopenharmony_ci	__acquires(rq2->lock)
23528c2ecf20Sopenharmony_ci{
23538c2ecf20Sopenharmony_ci	BUG_ON(!irqs_disabled());
23548c2ecf20Sopenharmony_ci	if (rq1 == rq2) {
23558c2ecf20Sopenharmony_ci		raw_spin_lock(&rq1->lock);
23568c2ecf20Sopenharmony_ci		__acquire(rq2->lock);	/* Fake it out ;) */
23578c2ecf20Sopenharmony_ci	} else {
23588c2ecf20Sopenharmony_ci		if (rq1 < rq2) {
23598c2ecf20Sopenharmony_ci			raw_spin_lock(&rq1->lock);
23608c2ecf20Sopenharmony_ci			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
23618c2ecf20Sopenharmony_ci		} else {
23628c2ecf20Sopenharmony_ci			raw_spin_lock(&rq2->lock);
23638c2ecf20Sopenharmony_ci			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
23648c2ecf20Sopenharmony_ci		}
23658c2ecf20Sopenharmony_ci	}
23668c2ecf20Sopenharmony_ci}
23678c2ecf20Sopenharmony_ci
23688c2ecf20Sopenharmony_ci/*
23698c2ecf20Sopenharmony_ci * double_rq_unlock - safely unlock two runqueues
23708c2ecf20Sopenharmony_ci *
23718c2ecf20Sopenharmony_ci * Note this does not restore interrupts like task_rq_unlock,
23728c2ecf20Sopenharmony_ci * you need to do so manually after calling.
23738c2ecf20Sopenharmony_ci */
23748c2ecf20Sopenharmony_cistatic inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
23758c2ecf20Sopenharmony_ci	__releases(rq1->lock)
23768c2ecf20Sopenharmony_ci	__releases(rq2->lock)
23778c2ecf20Sopenharmony_ci{
23788c2ecf20Sopenharmony_ci	raw_spin_unlock(&rq1->lock);
23798c2ecf20Sopenharmony_ci	if (rq1 != rq2)
23808c2ecf20Sopenharmony_ci		raw_spin_unlock(&rq2->lock);
23818c2ecf20Sopenharmony_ci	else
23828c2ecf20Sopenharmony_ci		__release(rq2->lock);
23838c2ecf20Sopenharmony_ci}
23848c2ecf20Sopenharmony_ci
23858c2ecf20Sopenharmony_ciextern void set_rq_online (struct rq *rq);
23868c2ecf20Sopenharmony_ciextern void set_rq_offline(struct rq *rq);
23878c2ecf20Sopenharmony_ciextern bool sched_smp_initialized;
23888c2ecf20Sopenharmony_ci
23898c2ecf20Sopenharmony_ci#else /* CONFIG_SMP */
23908c2ecf20Sopenharmony_ci
23918c2ecf20Sopenharmony_ci/*
23928c2ecf20Sopenharmony_ci * double_rq_lock - safely lock two runqueues
23938c2ecf20Sopenharmony_ci *
23948c2ecf20Sopenharmony_ci * Note this does not disable interrupts like task_rq_lock,
23958c2ecf20Sopenharmony_ci * you need to do so manually before calling.
23968c2ecf20Sopenharmony_ci */
23978c2ecf20Sopenharmony_cistatic inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
23988c2ecf20Sopenharmony_ci	__acquires(rq1->lock)
23998c2ecf20Sopenharmony_ci	__acquires(rq2->lock)
24008c2ecf20Sopenharmony_ci{
24018c2ecf20Sopenharmony_ci	BUG_ON(!irqs_disabled());
24028c2ecf20Sopenharmony_ci	BUG_ON(rq1 != rq2);
24038c2ecf20Sopenharmony_ci	raw_spin_lock(&rq1->lock);
24048c2ecf20Sopenharmony_ci	__acquire(rq2->lock);	/* Fake it out ;) */
24058c2ecf20Sopenharmony_ci}
24068c2ecf20Sopenharmony_ci
24078c2ecf20Sopenharmony_ci/*
24088c2ecf20Sopenharmony_ci * double_rq_unlock - safely unlock two runqueues
24098c2ecf20Sopenharmony_ci *
24108c2ecf20Sopenharmony_ci * Note this does not restore interrupts like task_rq_unlock,
24118c2ecf20Sopenharmony_ci * you need to do so manually after calling.
24128c2ecf20Sopenharmony_ci */
24138c2ecf20Sopenharmony_cistatic inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
24148c2ecf20Sopenharmony_ci	__releases(rq1->lock)
24158c2ecf20Sopenharmony_ci	__releases(rq2->lock)
24168c2ecf20Sopenharmony_ci{
24178c2ecf20Sopenharmony_ci	BUG_ON(rq1 != rq2);
24188c2ecf20Sopenharmony_ci	raw_spin_unlock(&rq1->lock);
24198c2ecf20Sopenharmony_ci	__release(rq2->lock);
24208c2ecf20Sopenharmony_ci}
24218c2ecf20Sopenharmony_ci
24228c2ecf20Sopenharmony_ci#endif
24238c2ecf20Sopenharmony_ci
24248c2ecf20Sopenharmony_ciextern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
24258c2ecf20Sopenharmony_ciextern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
24268c2ecf20Sopenharmony_ci
24278c2ecf20Sopenharmony_ci#ifdef	CONFIG_SCHED_DEBUG
24288c2ecf20Sopenharmony_ciextern bool sched_debug_enabled;
24298c2ecf20Sopenharmony_ci
24308c2ecf20Sopenharmony_ciextern void print_cfs_stats(struct seq_file *m, int cpu);
24318c2ecf20Sopenharmony_ciextern void print_rt_stats(struct seq_file *m, int cpu);
24328c2ecf20Sopenharmony_ciextern void print_dl_stats(struct seq_file *m, int cpu);
24338c2ecf20Sopenharmony_ciextern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
24348c2ecf20Sopenharmony_ciextern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
24358c2ecf20Sopenharmony_ciextern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
24368c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA_BALANCING
24378c2ecf20Sopenharmony_ciextern void
24388c2ecf20Sopenharmony_cishow_numa_stats(struct task_struct *p, struct seq_file *m);
24398c2ecf20Sopenharmony_ciextern void
24408c2ecf20Sopenharmony_ciprint_numa_stats(struct seq_file *m, int node, unsigned long tsf,
24418c2ecf20Sopenharmony_ci	unsigned long tpf, unsigned long gsf, unsigned long gpf);
24428c2ecf20Sopenharmony_ci#endif /* CONFIG_NUMA_BALANCING */
24438c2ecf20Sopenharmony_ci#endif /* CONFIG_SCHED_DEBUG */
24448c2ecf20Sopenharmony_ci
24458c2ecf20Sopenharmony_ciextern void init_cfs_rq(struct cfs_rq *cfs_rq);
24468c2ecf20Sopenharmony_ciextern void init_rt_rq(struct rt_rq *rt_rq);
24478c2ecf20Sopenharmony_ciextern void init_dl_rq(struct dl_rq *dl_rq);
24488c2ecf20Sopenharmony_ci
24498c2ecf20Sopenharmony_ciextern void cfs_bandwidth_usage_inc(void);
24508c2ecf20Sopenharmony_ciextern void cfs_bandwidth_usage_dec(void);
24518c2ecf20Sopenharmony_ci
24528c2ecf20Sopenharmony_ci#ifdef CONFIG_NO_HZ_COMMON
24538c2ecf20Sopenharmony_ci#define NOHZ_BALANCE_KICK_BIT	0
24548c2ecf20Sopenharmony_ci#define NOHZ_STATS_KICK_BIT	1
24558c2ecf20Sopenharmony_ci
24568c2ecf20Sopenharmony_ci#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
24578c2ecf20Sopenharmony_ci#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
24588c2ecf20Sopenharmony_ci
24598c2ecf20Sopenharmony_ci#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
24608c2ecf20Sopenharmony_ci
24618c2ecf20Sopenharmony_ci#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
24628c2ecf20Sopenharmony_ci
24638c2ecf20Sopenharmony_ciextern void nohz_balance_exit_idle(struct rq *rq);
24648c2ecf20Sopenharmony_ci#else
24658c2ecf20Sopenharmony_cistatic inline void nohz_balance_exit_idle(struct rq *rq) { }
24668c2ecf20Sopenharmony_ci#endif
24678c2ecf20Sopenharmony_ci
24688c2ecf20Sopenharmony_ci
24698c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
24708c2ecf20Sopenharmony_cistatic inline
24718c2ecf20Sopenharmony_civoid __dl_update(struct dl_bw *dl_b, s64 bw)
24728c2ecf20Sopenharmony_ci{
24738c2ecf20Sopenharmony_ci	struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
24748c2ecf20Sopenharmony_ci	int i;
24758c2ecf20Sopenharmony_ci
24768c2ecf20Sopenharmony_ci	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
24778c2ecf20Sopenharmony_ci			 "sched RCU must be held");
24788c2ecf20Sopenharmony_ci	for_each_cpu_and(i, rd->span, cpu_active_mask) {
24798c2ecf20Sopenharmony_ci		struct rq *rq = cpu_rq(i);
24808c2ecf20Sopenharmony_ci
24818c2ecf20Sopenharmony_ci		rq->dl.extra_bw += bw;
24828c2ecf20Sopenharmony_ci	}
24838c2ecf20Sopenharmony_ci}
24848c2ecf20Sopenharmony_ci#else
24858c2ecf20Sopenharmony_cistatic inline
24868c2ecf20Sopenharmony_civoid __dl_update(struct dl_bw *dl_b, s64 bw)
24878c2ecf20Sopenharmony_ci{
24888c2ecf20Sopenharmony_ci	struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
24898c2ecf20Sopenharmony_ci
24908c2ecf20Sopenharmony_ci	dl->extra_bw += bw;
24918c2ecf20Sopenharmony_ci}
24928c2ecf20Sopenharmony_ci#endif
24938c2ecf20Sopenharmony_ci
24948c2ecf20Sopenharmony_ci
24958c2ecf20Sopenharmony_ci#ifdef CONFIG_IRQ_TIME_ACCOUNTING
24968c2ecf20Sopenharmony_cistruct irqtime {
24978c2ecf20Sopenharmony_ci	u64			total;
24988c2ecf20Sopenharmony_ci	u64			tick_delta;
24998c2ecf20Sopenharmony_ci	u64			irq_start_time;
25008c2ecf20Sopenharmony_ci	struct u64_stats_sync	sync;
25018c2ecf20Sopenharmony_ci};
25028c2ecf20Sopenharmony_ci
25038c2ecf20Sopenharmony_ciDECLARE_PER_CPU(struct irqtime, cpu_irqtime);
25048c2ecf20Sopenharmony_ci
25058c2ecf20Sopenharmony_ci/*
25068c2ecf20Sopenharmony_ci * Returns the irqtime minus the softirq time computed by ksoftirqd.
25078c2ecf20Sopenharmony_ci * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
25088c2ecf20Sopenharmony_ci * and never move forward.
25098c2ecf20Sopenharmony_ci */
25108c2ecf20Sopenharmony_cistatic inline u64 irq_time_read(int cpu)
25118c2ecf20Sopenharmony_ci{
25128c2ecf20Sopenharmony_ci	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
25138c2ecf20Sopenharmony_ci	unsigned int seq;
25148c2ecf20Sopenharmony_ci	u64 total;
25158c2ecf20Sopenharmony_ci
25168c2ecf20Sopenharmony_ci	do {
25178c2ecf20Sopenharmony_ci		seq = __u64_stats_fetch_begin(&irqtime->sync);
25188c2ecf20Sopenharmony_ci		total = irqtime->total;
25198c2ecf20Sopenharmony_ci	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
25208c2ecf20Sopenharmony_ci
25218c2ecf20Sopenharmony_ci	return total;
25228c2ecf20Sopenharmony_ci}
25238c2ecf20Sopenharmony_ci#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
25248c2ecf20Sopenharmony_ci
25258c2ecf20Sopenharmony_ci#ifdef CONFIG_CPU_FREQ
25268c2ecf20Sopenharmony_ciDECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
25278c2ecf20Sopenharmony_ci
25288c2ecf20Sopenharmony_ci/**
25298c2ecf20Sopenharmony_ci * cpufreq_update_util - Take a note about CPU utilization changes.
25308c2ecf20Sopenharmony_ci * @rq: Runqueue to carry out the update for.
25318c2ecf20Sopenharmony_ci * @flags: Update reason flags.
25328c2ecf20Sopenharmony_ci *
25338c2ecf20Sopenharmony_ci * This function is called by the scheduler on the CPU whose utilization is
25348c2ecf20Sopenharmony_ci * being updated.
25358c2ecf20Sopenharmony_ci *
25368c2ecf20Sopenharmony_ci * It can only be called from RCU-sched read-side critical sections.
25378c2ecf20Sopenharmony_ci *
25388c2ecf20Sopenharmony_ci * The way cpufreq is currently arranged requires it to evaluate the CPU
25398c2ecf20Sopenharmony_ci * performance state (frequency/voltage) on a regular basis to prevent it from
25408c2ecf20Sopenharmony_ci * being stuck in a completely inadequate performance level for too long.
25418c2ecf20Sopenharmony_ci * That is not guaranteed to happen if the updates are only triggered from CFS
25428c2ecf20Sopenharmony_ci * and DL, though, because they may not be coming in if only RT tasks are
25438c2ecf20Sopenharmony_ci * active all the time (or there are RT tasks only).
25448c2ecf20Sopenharmony_ci *
25458c2ecf20Sopenharmony_ci * As a workaround for that issue, this function is called periodically by the
25468c2ecf20Sopenharmony_ci * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
25478c2ecf20Sopenharmony_ci * but that really is a band-aid.  Going forward it should be replaced with
25488c2ecf20Sopenharmony_ci * solutions targeted more specifically at RT tasks.
25498c2ecf20Sopenharmony_ci */
25508c2ecf20Sopenharmony_cistatic inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
25518c2ecf20Sopenharmony_ci{
25528c2ecf20Sopenharmony_ci	struct update_util_data *data;
25538c2ecf20Sopenharmony_ci	u64 clock;
25548c2ecf20Sopenharmony_ci
25558c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_WALT
25568c2ecf20Sopenharmony_ci	if (!(flags & SCHED_CPUFREQ_WALT))
25578c2ecf20Sopenharmony_ci		return;
25588c2ecf20Sopenharmony_ci
25598c2ecf20Sopenharmony_ci	clock = sched_ktime_clock();
25608c2ecf20Sopenharmony_ci#else
25618c2ecf20Sopenharmony_ci	clock = rq_clock(rq);
25628c2ecf20Sopenharmony_ci#endif
25638c2ecf20Sopenharmony_ci	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
25648c2ecf20Sopenharmony_ci						  cpu_of(rq)));
25658c2ecf20Sopenharmony_ci	if (data)
25668c2ecf20Sopenharmony_ci		data->func(data, clock, flags);
25678c2ecf20Sopenharmony_ci}
25688c2ecf20Sopenharmony_ci#else
25698c2ecf20Sopenharmony_cistatic inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
25708c2ecf20Sopenharmony_ci#endif /* CONFIG_CPU_FREQ */
25718c2ecf20Sopenharmony_ci
25728c2ecf20Sopenharmony_ci#ifdef CONFIG_UCLAMP_TASK
25738c2ecf20Sopenharmony_ciunsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
25748c2ecf20Sopenharmony_ci
25758c2ecf20Sopenharmony_cistatic inline unsigned long uclamp_rq_get(struct rq *rq,
25768c2ecf20Sopenharmony_ci					  enum uclamp_id clamp_id)
25778c2ecf20Sopenharmony_ci{
25788c2ecf20Sopenharmony_ci	return READ_ONCE(rq->uclamp[clamp_id].value);
25798c2ecf20Sopenharmony_ci}
25808c2ecf20Sopenharmony_ci
25818c2ecf20Sopenharmony_cistatic inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
25828c2ecf20Sopenharmony_ci				 unsigned int value)
25838c2ecf20Sopenharmony_ci{
25848c2ecf20Sopenharmony_ci	WRITE_ONCE(rq->uclamp[clamp_id].value, value);
25858c2ecf20Sopenharmony_ci}
25868c2ecf20Sopenharmony_ci
25878c2ecf20Sopenharmony_cistatic inline bool uclamp_rq_is_idle(struct rq *rq)
25888c2ecf20Sopenharmony_ci{
25898c2ecf20Sopenharmony_ci	return rq->uclamp_flags & UCLAMP_FLAG_IDLE;
25908c2ecf20Sopenharmony_ci}
25918c2ecf20Sopenharmony_ci
25928c2ecf20Sopenharmony_ci/**
25938c2ecf20Sopenharmony_ci * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
25948c2ecf20Sopenharmony_ci * @rq:		The rq to clamp against. Must not be NULL.
25958c2ecf20Sopenharmony_ci * @util:	The util value to clamp.
25968c2ecf20Sopenharmony_ci * @p:		The task to clamp against. Can be NULL if you want to clamp
25978c2ecf20Sopenharmony_ci *		against @rq only.
25988c2ecf20Sopenharmony_ci *
25998c2ecf20Sopenharmony_ci * Clamps the passed @util to the max(@rq, @p) effective uclamp values.
26008c2ecf20Sopenharmony_ci *
26018c2ecf20Sopenharmony_ci * If sched_uclamp_used static key is disabled, then just return the util
26028c2ecf20Sopenharmony_ci * without any clamping since uclamp aggregation at the rq level in the fast
26038c2ecf20Sopenharmony_ci * path is disabled, rendering this operation a NOP.
26048c2ecf20Sopenharmony_ci *
26058c2ecf20Sopenharmony_ci * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It
26068c2ecf20Sopenharmony_ci * will return the correct effective uclamp value of the task even if the
26078c2ecf20Sopenharmony_ci * static key is disabled.
26088c2ecf20Sopenharmony_ci */
26098c2ecf20Sopenharmony_cistatic __always_inline
26108c2ecf20Sopenharmony_ciunsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
26118c2ecf20Sopenharmony_ci				  struct task_struct *p)
26128c2ecf20Sopenharmony_ci{
26138c2ecf20Sopenharmony_ci	unsigned long min_util = 0;
26148c2ecf20Sopenharmony_ci	unsigned long max_util = 0;
26158c2ecf20Sopenharmony_ci
26168c2ecf20Sopenharmony_ci	if (!static_branch_likely(&sched_uclamp_used))
26178c2ecf20Sopenharmony_ci		return util;
26188c2ecf20Sopenharmony_ci
26198c2ecf20Sopenharmony_ci	if (p) {
26208c2ecf20Sopenharmony_ci		min_util = uclamp_eff_value(p, UCLAMP_MIN);
26218c2ecf20Sopenharmony_ci		max_util = uclamp_eff_value(p, UCLAMP_MAX);
26228c2ecf20Sopenharmony_ci
26238c2ecf20Sopenharmony_ci		/*
26248c2ecf20Sopenharmony_ci		 * Ignore last runnable task's max clamp, as this task will
26258c2ecf20Sopenharmony_ci		 * reset it. Similarly, no need to read the rq's min clamp.
26268c2ecf20Sopenharmony_ci		 */
26278c2ecf20Sopenharmony_ci		if (uclamp_rq_is_idle(rq))
26288c2ecf20Sopenharmony_ci			goto out;
26298c2ecf20Sopenharmony_ci	}
26308c2ecf20Sopenharmony_ci
26318c2ecf20Sopenharmony_ci	min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN));
26328c2ecf20Sopenharmony_ci	max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX));
26338c2ecf20Sopenharmony_ciout:
26348c2ecf20Sopenharmony_ci	/*
26358c2ecf20Sopenharmony_ci	 * Since CPU's {min,max}_util clamps are MAX aggregated considering
26368c2ecf20Sopenharmony_ci	 * RUNNABLE tasks with _different_ clamps, we can end up with an
26378c2ecf20Sopenharmony_ci	 * inversion. Fix it now when the clamps are applied.
26388c2ecf20Sopenharmony_ci	 */
26398c2ecf20Sopenharmony_ci	if (unlikely(min_util >= max_util))
26408c2ecf20Sopenharmony_ci		return min_util;
26418c2ecf20Sopenharmony_ci
26428c2ecf20Sopenharmony_ci	return clamp(util, min_util, max_util);
26438c2ecf20Sopenharmony_ci}
26448c2ecf20Sopenharmony_ci
26458c2ecf20Sopenharmony_cistatic inline bool uclamp_boosted(struct task_struct *p)
26468c2ecf20Sopenharmony_ci{
26478c2ecf20Sopenharmony_ci	return uclamp_eff_value(p, UCLAMP_MIN) > 0;
26488c2ecf20Sopenharmony_ci}
26498c2ecf20Sopenharmony_ci
26508c2ecf20Sopenharmony_ci/*
26518c2ecf20Sopenharmony_ci * When uclamp is compiled in, the aggregation at rq level is 'turned off'
26528c2ecf20Sopenharmony_ci * by default in the fast path and only gets turned on once userspace performs
26538c2ecf20Sopenharmony_ci * an operation that requires it.
26548c2ecf20Sopenharmony_ci *
26558c2ecf20Sopenharmony_ci * Returns true if userspace opted-in to use uclamp and aggregation at rq level
26568c2ecf20Sopenharmony_ci * hence is active.
26578c2ecf20Sopenharmony_ci */
26588c2ecf20Sopenharmony_cistatic inline bool uclamp_is_used(void)
26598c2ecf20Sopenharmony_ci{
26608c2ecf20Sopenharmony_ci	return static_branch_likely(&sched_uclamp_used);
26618c2ecf20Sopenharmony_ci}
26628c2ecf20Sopenharmony_ci#else /* CONFIG_UCLAMP_TASK */
26638c2ecf20Sopenharmony_cistatic inline unsigned long uclamp_eff_value(struct task_struct *p,
26648c2ecf20Sopenharmony_ci					     enum uclamp_id clamp_id)
26658c2ecf20Sopenharmony_ci{
26668c2ecf20Sopenharmony_ci	if (clamp_id == UCLAMP_MIN)
26678c2ecf20Sopenharmony_ci		return 0;
26688c2ecf20Sopenharmony_ci
26698c2ecf20Sopenharmony_ci	return SCHED_CAPACITY_SCALE;
26708c2ecf20Sopenharmony_ci}
26718c2ecf20Sopenharmony_ci
26728c2ecf20Sopenharmony_cistatic inline
26738c2ecf20Sopenharmony_ciunsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
26748c2ecf20Sopenharmony_ci				  struct task_struct *p)
26758c2ecf20Sopenharmony_ci{
26768c2ecf20Sopenharmony_ci	return util;
26778c2ecf20Sopenharmony_ci}
26788c2ecf20Sopenharmony_ci
26798c2ecf20Sopenharmony_cistatic inline bool uclamp_boosted(struct task_struct *p)
26808c2ecf20Sopenharmony_ci{
26818c2ecf20Sopenharmony_ci	return false;
26828c2ecf20Sopenharmony_ci}
26838c2ecf20Sopenharmony_ci
26848c2ecf20Sopenharmony_cistatic inline bool uclamp_is_used(void)
26858c2ecf20Sopenharmony_ci{
26868c2ecf20Sopenharmony_ci	return false;
26878c2ecf20Sopenharmony_ci}
26888c2ecf20Sopenharmony_ci
26898c2ecf20Sopenharmony_cistatic inline unsigned long uclamp_rq_get(struct rq *rq,
26908c2ecf20Sopenharmony_ci					  enum uclamp_id clamp_id)
26918c2ecf20Sopenharmony_ci{
26928c2ecf20Sopenharmony_ci	if (clamp_id == UCLAMP_MIN)
26938c2ecf20Sopenharmony_ci		return 0;
26948c2ecf20Sopenharmony_ci
26958c2ecf20Sopenharmony_ci	return SCHED_CAPACITY_SCALE;
26968c2ecf20Sopenharmony_ci}
26978c2ecf20Sopenharmony_ci
26988c2ecf20Sopenharmony_cistatic inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
26998c2ecf20Sopenharmony_ci				 unsigned int value)
27008c2ecf20Sopenharmony_ci{
27018c2ecf20Sopenharmony_ci}
27028c2ecf20Sopenharmony_ci
27038c2ecf20Sopenharmony_cistatic inline bool uclamp_rq_is_idle(struct rq *rq)
27048c2ecf20Sopenharmony_ci{
27058c2ecf20Sopenharmony_ci	return false;
27068c2ecf20Sopenharmony_ci}
27078c2ecf20Sopenharmony_ci#endif /* CONFIG_UCLAMP_TASK */
27088c2ecf20Sopenharmony_ci
27098c2ecf20Sopenharmony_ci#ifdef arch_scale_freq_capacity
27108c2ecf20Sopenharmony_ci# ifndef arch_scale_freq_invariant
27118c2ecf20Sopenharmony_ci#  define arch_scale_freq_invariant()	true
27128c2ecf20Sopenharmony_ci# endif
27138c2ecf20Sopenharmony_ci#else
27148c2ecf20Sopenharmony_ci# define arch_scale_freq_invariant()	false
27158c2ecf20Sopenharmony_ci#endif
27168c2ecf20Sopenharmony_ci
27178c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
27188c2ecf20Sopenharmony_cistatic inline unsigned long capacity_of(int cpu)
27198c2ecf20Sopenharmony_ci{
27208c2ecf20Sopenharmony_ci	return cpu_rq(cpu)->cpu_capacity;
27218c2ecf20Sopenharmony_ci}
27228c2ecf20Sopenharmony_ci
27238c2ecf20Sopenharmony_cistatic inline unsigned long capacity_orig_of(int cpu)
27248c2ecf20Sopenharmony_ci{
27258c2ecf20Sopenharmony_ci	return cpu_rq(cpu)->cpu_capacity_orig;
27268c2ecf20Sopenharmony_ci}
27278c2ecf20Sopenharmony_ci
27288c2ecf20Sopenharmony_ci/*
27298c2ecf20Sopenharmony_ci * Returns inverted capacity if the CPU is in capacity inversion state.
27308c2ecf20Sopenharmony_ci * 0 otherwise.
27318c2ecf20Sopenharmony_ci *
27328c2ecf20Sopenharmony_ci * Capacity inversion detection only considers thermal impact where actual
27338c2ecf20Sopenharmony_ci * performance points (OPPs) gets dropped.
27348c2ecf20Sopenharmony_ci *
27358c2ecf20Sopenharmony_ci * Capacity inversion state happens when another performance domain that has
27368c2ecf20Sopenharmony_ci * equal or lower capacity_orig_of() becomes effectively larger than the perf
27378c2ecf20Sopenharmony_ci * domain this CPU belongs to due to thermal pressure throttling it hard.
27388c2ecf20Sopenharmony_ci *
27398c2ecf20Sopenharmony_ci * See comment in update_cpu_capacity().
27408c2ecf20Sopenharmony_ci */
27418c2ecf20Sopenharmony_cistatic inline unsigned long cpu_in_capacity_inversion(int cpu)
27428c2ecf20Sopenharmony_ci{
27438c2ecf20Sopenharmony_ci	return cpu_rq(cpu)->cpu_capacity_inverted;
27448c2ecf20Sopenharmony_ci}
27458c2ecf20Sopenharmony_ci#endif
27468c2ecf20Sopenharmony_ci
27478c2ecf20Sopenharmony_ci/**
27488c2ecf20Sopenharmony_ci * enum schedutil_type - CPU utilization type
27498c2ecf20Sopenharmony_ci * @FREQUENCY_UTIL:	Utilization used to select frequency
27508c2ecf20Sopenharmony_ci * @ENERGY_UTIL:	Utilization used during energy calculation
27518c2ecf20Sopenharmony_ci *
27528c2ecf20Sopenharmony_ci * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time
27538c2ecf20Sopenharmony_ci * need to be aggregated differently depending on the usage made of them. This
27548c2ecf20Sopenharmony_ci * enum is used within schedutil_freq_util() to differentiate the types of
27558c2ecf20Sopenharmony_ci * utilization expected by the callers, and adjust the aggregation accordingly.
27568c2ecf20Sopenharmony_ci */
27578c2ecf20Sopenharmony_cienum schedutil_type {
27588c2ecf20Sopenharmony_ci	FREQUENCY_UTIL,
27598c2ecf20Sopenharmony_ci	ENERGY_UTIL,
27608c2ecf20Sopenharmony_ci};
27618c2ecf20Sopenharmony_ci
27628c2ecf20Sopenharmony_ci#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
27638c2ecf20Sopenharmony_ci
27648c2ecf20Sopenharmony_ciunsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
27658c2ecf20Sopenharmony_ci				 unsigned long max, enum schedutil_type type,
27668c2ecf20Sopenharmony_ci				 struct task_struct *p);
27678c2ecf20Sopenharmony_ci
27688c2ecf20Sopenharmony_cistatic inline unsigned long cpu_bw_dl(struct rq *rq)
27698c2ecf20Sopenharmony_ci{
27708c2ecf20Sopenharmony_ci	return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
27718c2ecf20Sopenharmony_ci}
27728c2ecf20Sopenharmony_ci
27738c2ecf20Sopenharmony_cistatic inline unsigned long cpu_util_dl(struct rq *rq)
27748c2ecf20Sopenharmony_ci{
27758c2ecf20Sopenharmony_ci	return READ_ONCE(rq->avg_dl.util_avg);
27768c2ecf20Sopenharmony_ci}
27778c2ecf20Sopenharmony_ci
27788c2ecf20Sopenharmony_cistatic inline unsigned long cpu_util_cfs(struct rq *rq)
27798c2ecf20Sopenharmony_ci{
27808c2ecf20Sopenharmony_ci	unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
27818c2ecf20Sopenharmony_ci
27828c2ecf20Sopenharmony_ci	if (sched_feat(UTIL_EST)) {
27838c2ecf20Sopenharmony_ci		util = max_t(unsigned long, util,
27848c2ecf20Sopenharmony_ci			     READ_ONCE(rq->cfs.avg.util_est.enqueued));
27858c2ecf20Sopenharmony_ci	}
27868c2ecf20Sopenharmony_ci
27878c2ecf20Sopenharmony_ci	return util;
27888c2ecf20Sopenharmony_ci}
27898c2ecf20Sopenharmony_ci
27908c2ecf20Sopenharmony_cistatic inline unsigned long cpu_util_rt(struct rq *rq)
27918c2ecf20Sopenharmony_ci{
27928c2ecf20Sopenharmony_ci	return READ_ONCE(rq->avg_rt.util_avg);
27938c2ecf20Sopenharmony_ci}
27948c2ecf20Sopenharmony_ci#else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
27958c2ecf20Sopenharmony_cistatic inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
27968c2ecf20Sopenharmony_ci				 unsigned long max, enum schedutil_type type,
27978c2ecf20Sopenharmony_ci				 struct task_struct *p)
27988c2ecf20Sopenharmony_ci{
27998c2ecf20Sopenharmony_ci	return 0;
28008c2ecf20Sopenharmony_ci}
28018c2ecf20Sopenharmony_ci#endif /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
28028c2ecf20Sopenharmony_ci
28038c2ecf20Sopenharmony_ci#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
28048c2ecf20Sopenharmony_cistatic inline unsigned long cpu_util_irq(struct rq *rq)
28058c2ecf20Sopenharmony_ci{
28068c2ecf20Sopenharmony_ci	return rq->avg_irq.util_avg;
28078c2ecf20Sopenharmony_ci}
28088c2ecf20Sopenharmony_ci
28098c2ecf20Sopenharmony_cistatic inline
28108c2ecf20Sopenharmony_ciunsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
28118c2ecf20Sopenharmony_ci{
28128c2ecf20Sopenharmony_ci	util *= (max - irq);
28138c2ecf20Sopenharmony_ci	util /= max;
28148c2ecf20Sopenharmony_ci
28158c2ecf20Sopenharmony_ci	return util;
28168c2ecf20Sopenharmony_ci
28178c2ecf20Sopenharmony_ci}
28188c2ecf20Sopenharmony_ci#else
28198c2ecf20Sopenharmony_cistatic inline unsigned long cpu_util_irq(struct rq *rq)
28208c2ecf20Sopenharmony_ci{
28218c2ecf20Sopenharmony_ci	return 0;
28228c2ecf20Sopenharmony_ci}
28238c2ecf20Sopenharmony_ci
28248c2ecf20Sopenharmony_cistatic inline
28258c2ecf20Sopenharmony_ciunsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
28268c2ecf20Sopenharmony_ci{
28278c2ecf20Sopenharmony_ci	return util;
28288c2ecf20Sopenharmony_ci}
28298c2ecf20Sopenharmony_ci#endif
28308c2ecf20Sopenharmony_ci
28318c2ecf20Sopenharmony_ci#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
28328c2ecf20Sopenharmony_ci
28338c2ecf20Sopenharmony_ci#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
28348c2ecf20Sopenharmony_ci
28358c2ecf20Sopenharmony_ciDECLARE_STATIC_KEY_FALSE(sched_energy_present);
28368c2ecf20Sopenharmony_ci
28378c2ecf20Sopenharmony_cistatic inline bool sched_energy_enabled(void)
28388c2ecf20Sopenharmony_ci{
28398c2ecf20Sopenharmony_ci	return static_branch_unlikely(&sched_energy_present);
28408c2ecf20Sopenharmony_ci}
28418c2ecf20Sopenharmony_ci
28428c2ecf20Sopenharmony_ci#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
28438c2ecf20Sopenharmony_ci
28448c2ecf20Sopenharmony_ci#define perf_domain_span(pd) NULL
28458c2ecf20Sopenharmony_cistatic inline bool sched_energy_enabled(void) { return false; }
28468c2ecf20Sopenharmony_ci
28478c2ecf20Sopenharmony_ci#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
28488c2ecf20Sopenharmony_ci
28498c2ecf20Sopenharmony_ci#ifdef CONFIG_MEMBARRIER
28508c2ecf20Sopenharmony_ci/*
28518c2ecf20Sopenharmony_ci * The scheduler provides memory barriers required by membarrier between:
28528c2ecf20Sopenharmony_ci * - prior user-space memory accesses and store to rq->membarrier_state,
28538c2ecf20Sopenharmony_ci * - store to rq->membarrier_state and following user-space memory accesses.
28548c2ecf20Sopenharmony_ci * In the same way it provides those guarantees around store to rq->curr.
28558c2ecf20Sopenharmony_ci */
28568c2ecf20Sopenharmony_cistatic inline void membarrier_switch_mm(struct rq *rq,
28578c2ecf20Sopenharmony_ci					struct mm_struct *prev_mm,
28588c2ecf20Sopenharmony_ci					struct mm_struct *next_mm)
28598c2ecf20Sopenharmony_ci{
28608c2ecf20Sopenharmony_ci	int membarrier_state;
28618c2ecf20Sopenharmony_ci
28628c2ecf20Sopenharmony_ci	if (prev_mm == next_mm)
28638c2ecf20Sopenharmony_ci		return;
28648c2ecf20Sopenharmony_ci
28658c2ecf20Sopenharmony_ci	membarrier_state = atomic_read(&next_mm->membarrier_state);
28668c2ecf20Sopenharmony_ci	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
28678c2ecf20Sopenharmony_ci		return;
28688c2ecf20Sopenharmony_ci
28698c2ecf20Sopenharmony_ci	WRITE_ONCE(rq->membarrier_state, membarrier_state);
28708c2ecf20Sopenharmony_ci}
28718c2ecf20Sopenharmony_ci#else
28728c2ecf20Sopenharmony_cistatic inline void membarrier_switch_mm(struct rq *rq,
28738c2ecf20Sopenharmony_ci					struct mm_struct *prev_mm,
28748c2ecf20Sopenharmony_ci					struct mm_struct *next_mm)
28758c2ecf20Sopenharmony_ci{
28768c2ecf20Sopenharmony_ci}
28778c2ecf20Sopenharmony_ci#endif
28788c2ecf20Sopenharmony_ci
28798c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
28808c2ecf20Sopenharmony_cistatic inline bool is_per_cpu_kthread(struct task_struct *p)
28818c2ecf20Sopenharmony_ci{
28828c2ecf20Sopenharmony_ci	if (!(p->flags & PF_KTHREAD))
28838c2ecf20Sopenharmony_ci		return false;
28848c2ecf20Sopenharmony_ci
28858c2ecf20Sopenharmony_ci	if (p->nr_cpus_allowed != 1)
28868c2ecf20Sopenharmony_ci		return false;
28878c2ecf20Sopenharmony_ci
28888c2ecf20Sopenharmony_ci	return true;
28898c2ecf20Sopenharmony_ci}
28908c2ecf20Sopenharmony_ci#endif
28918c2ecf20Sopenharmony_ci
28928c2ecf20Sopenharmony_civoid swake_up_all_locked(struct swait_queue_head *q);
28938c2ecf20Sopenharmony_civoid __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
28948c2ecf20Sopenharmony_ci
28958c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RTG
28968c2ecf20Sopenharmony_ciextern bool task_fits_max(struct task_struct *p, int cpu);
28978c2ecf20Sopenharmony_ciextern unsigned long capacity_spare_without(int cpu, struct task_struct *p);
28988c2ecf20Sopenharmony_ciextern int update_preferred_cluster(struct related_thread_group *grp,
28998c2ecf20Sopenharmony_ci			struct task_struct *p, u32 old_load, bool from_tick);
29008c2ecf20Sopenharmony_ciextern struct cpumask *find_rtg_target(struct task_struct *p);
29018c2ecf20Sopenharmony_ci#endif
29028c2ecf20Sopenharmony_ci
29038c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_WALT
29048c2ecf20Sopenharmony_cistatic inline int cluster_first_cpu(struct sched_cluster *cluster)
29058c2ecf20Sopenharmony_ci{
29068c2ecf20Sopenharmony_ci	return cpumask_first(&cluster->cpus);
29078c2ecf20Sopenharmony_ci}
29088c2ecf20Sopenharmony_ci
29098c2ecf20Sopenharmony_ciextern struct list_head cluster_head;
29108c2ecf20Sopenharmony_ciextern struct sched_cluster *sched_cluster[NR_CPUS];
29118c2ecf20Sopenharmony_ci
29128c2ecf20Sopenharmony_ci#define for_each_sched_cluster(cluster) \
29138c2ecf20Sopenharmony_ci	list_for_each_entry_rcu(cluster, &cluster_head, list)
29148c2ecf20Sopenharmony_ci
29158c2ecf20Sopenharmony_ciextern struct mutex policy_mutex;
29168c2ecf20Sopenharmony_ciextern unsigned int sched_disable_window_stats;
29178c2ecf20Sopenharmony_ciextern unsigned int max_possible_freq;
29188c2ecf20Sopenharmony_ciextern unsigned int min_max_freq;
29198c2ecf20Sopenharmony_ciextern unsigned int max_possible_efficiency;
29208c2ecf20Sopenharmony_ciextern unsigned int min_possible_efficiency;
29218c2ecf20Sopenharmony_ciextern unsigned int max_capacity;
29228c2ecf20Sopenharmony_ciextern unsigned int min_capacity;
29238c2ecf20Sopenharmony_ciextern unsigned int max_load_scale_factor;
29248c2ecf20Sopenharmony_ciextern unsigned int max_possible_capacity;
29258c2ecf20Sopenharmony_ciextern unsigned int min_max_possible_capacity;
29268c2ecf20Sopenharmony_ciextern unsigned int max_power_cost;
29278c2ecf20Sopenharmony_ciextern unsigned int __read_mostly sched_init_task_load_windows;
29288c2ecf20Sopenharmony_ciextern unsigned int sysctl_sched_restrict_cluster_spill;
29298c2ecf20Sopenharmony_ciextern unsigned int sched_pred_alert_load;
29308c2ecf20Sopenharmony_ciextern struct sched_cluster init_cluster;
29318c2ecf20Sopenharmony_ci
29328c2ecf20Sopenharmony_cistatic inline void walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta)
29338c2ecf20Sopenharmony_ci{
29348c2ecf20Sopenharmony_ci	rq->cum_window_demand_scaled += scaled_delta;
29358c2ecf20Sopenharmony_ci	if (unlikely((s64)rq->cum_window_demand_scaled < 0))
29368c2ecf20Sopenharmony_ci		rq->cum_window_demand_scaled = 0;
29378c2ecf20Sopenharmony_ci}
29388c2ecf20Sopenharmony_ci
29398c2ecf20Sopenharmony_ci/* Is frequency of two cpus synchronized with each other? */
29408c2ecf20Sopenharmony_cistatic inline int same_freq_domain(int src_cpu, int dst_cpu)
29418c2ecf20Sopenharmony_ci{
29428c2ecf20Sopenharmony_ci	struct rq *rq = cpu_rq(src_cpu);
29438c2ecf20Sopenharmony_ci
29448c2ecf20Sopenharmony_ci	if (src_cpu == dst_cpu)
29458c2ecf20Sopenharmony_ci		return 1;
29468c2ecf20Sopenharmony_ci
29478c2ecf20Sopenharmony_ci	return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
29488c2ecf20Sopenharmony_ci}
29498c2ecf20Sopenharmony_ci
29508c2ecf20Sopenharmony_ciextern void reset_task_stats(struct task_struct *p);
29518c2ecf20Sopenharmony_ci
29528c2ecf20Sopenharmony_ci#define CPU_RESERVED	1
29538c2ecf20Sopenharmony_cistatic inline int is_reserved(int cpu)
29548c2ecf20Sopenharmony_ci{
29558c2ecf20Sopenharmony_ci	struct rq *rq = cpu_rq(cpu);
29568c2ecf20Sopenharmony_ci
29578c2ecf20Sopenharmony_ci	return test_bit(CPU_RESERVED, &rq->walt_flags);
29588c2ecf20Sopenharmony_ci}
29598c2ecf20Sopenharmony_ci
29608c2ecf20Sopenharmony_cistatic inline int mark_reserved(int cpu)
29618c2ecf20Sopenharmony_ci{
29628c2ecf20Sopenharmony_ci	struct rq *rq = cpu_rq(cpu);
29638c2ecf20Sopenharmony_ci
29648c2ecf20Sopenharmony_ci	return test_and_set_bit(CPU_RESERVED, &rq->walt_flags);
29658c2ecf20Sopenharmony_ci}
29668c2ecf20Sopenharmony_ci
29678c2ecf20Sopenharmony_cistatic inline void clear_reserved(int cpu)
29688c2ecf20Sopenharmony_ci{
29698c2ecf20Sopenharmony_ci	struct rq *rq = cpu_rq(cpu);
29708c2ecf20Sopenharmony_ci
29718c2ecf20Sopenharmony_ci	clear_bit(CPU_RESERVED, &rq->walt_flags);
29728c2ecf20Sopenharmony_ci}
29738c2ecf20Sopenharmony_ci
29748c2ecf20Sopenharmony_cistatic inline int cpu_capacity(int cpu)
29758c2ecf20Sopenharmony_ci{
29768c2ecf20Sopenharmony_ci	return cpu_rq(cpu)->cluster->capacity;
29778c2ecf20Sopenharmony_ci}
29788c2ecf20Sopenharmony_ci
29798c2ecf20Sopenharmony_cistatic inline int cpu_max_possible_capacity(int cpu)
29808c2ecf20Sopenharmony_ci{
29818c2ecf20Sopenharmony_ci	return cpu_rq(cpu)->cluster->max_possible_capacity;
29828c2ecf20Sopenharmony_ci}
29838c2ecf20Sopenharmony_ci
29848c2ecf20Sopenharmony_cistatic inline int cpu_load_scale_factor(int cpu)
29858c2ecf20Sopenharmony_ci{
29868c2ecf20Sopenharmony_ci	return cpu_rq(cpu)->cluster->load_scale_factor;
29878c2ecf20Sopenharmony_ci}
29888c2ecf20Sopenharmony_ci
29898c2ecf20Sopenharmony_cistatic inline unsigned int cluster_max_freq(struct sched_cluster *cluster)
29908c2ecf20Sopenharmony_ci{
29918c2ecf20Sopenharmony_ci	/*
29928c2ecf20Sopenharmony_ci	 * Governor and thermal driver don't know the other party's mitigation
29938c2ecf20Sopenharmony_ci	 * voting. So struct cluster saves both and return min() for current
29948c2ecf20Sopenharmony_ci	 * cluster fmax.
29958c2ecf20Sopenharmony_ci	 */
29968c2ecf20Sopenharmony_ci	return cluster->max_freq;
29978c2ecf20Sopenharmony_ci}
29988c2ecf20Sopenharmony_ci
29998c2ecf20Sopenharmony_ci/* Keep track of max/min capacity possible across CPUs "currently" */
30008c2ecf20Sopenharmony_cistatic inline void __update_min_max_capacity(void)
30018c2ecf20Sopenharmony_ci{
30028c2ecf20Sopenharmony_ci	int i;
30038c2ecf20Sopenharmony_ci	int max_cap = 0, min_cap = INT_MAX;
30048c2ecf20Sopenharmony_ci
30058c2ecf20Sopenharmony_ci	for_each_possible_cpu(i) {
30068c2ecf20Sopenharmony_ci		if (!cpu_active(i))
30078c2ecf20Sopenharmony_ci			continue;
30088c2ecf20Sopenharmony_ci
30098c2ecf20Sopenharmony_ci		max_cap = max(max_cap, cpu_capacity(i));
30108c2ecf20Sopenharmony_ci		min_cap = min(min_cap, cpu_capacity(i));
30118c2ecf20Sopenharmony_ci	}
30128c2ecf20Sopenharmony_ci
30138c2ecf20Sopenharmony_ci	max_capacity = max_cap;
30148c2ecf20Sopenharmony_ci	min_capacity = min_cap;
30158c2ecf20Sopenharmony_ci}
30168c2ecf20Sopenharmony_ci
30178c2ecf20Sopenharmony_ci/*
30188c2ecf20Sopenharmony_ci * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
30198c2ecf20Sopenharmony_ci * that "most" efficient cpu gets a load_scale_factor of 1
30208c2ecf20Sopenharmony_ci */
30218c2ecf20Sopenharmony_cistatic inline unsigned long
30228c2ecf20Sopenharmony_ciload_scale_cpu_efficiency(struct sched_cluster *cluster)
30238c2ecf20Sopenharmony_ci{
30248c2ecf20Sopenharmony_ci	return DIV_ROUND_UP(1024 * max_possible_efficiency,
30258c2ecf20Sopenharmony_ci			    cluster->efficiency);
30268c2ecf20Sopenharmony_ci}
30278c2ecf20Sopenharmony_ci
30288c2ecf20Sopenharmony_ci/*
30298c2ecf20Sopenharmony_ci * Return load_scale_factor of a cpu in reference to cpu with best max_freq
30308c2ecf20Sopenharmony_ci * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
30318c2ecf20Sopenharmony_ci * of 1.
30328c2ecf20Sopenharmony_ci */
30338c2ecf20Sopenharmony_cistatic inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster)
30348c2ecf20Sopenharmony_ci{
30358c2ecf20Sopenharmony_ci	return DIV_ROUND_UP(1024 * max_possible_freq,
30368c2ecf20Sopenharmony_ci			   cluster_max_freq(cluster));
30378c2ecf20Sopenharmony_ci}
30388c2ecf20Sopenharmony_ci
30398c2ecf20Sopenharmony_cistatic inline int compute_load_scale_factor(struct sched_cluster *cluster)
30408c2ecf20Sopenharmony_ci{
30418c2ecf20Sopenharmony_ci	int load_scale = 1024;
30428c2ecf20Sopenharmony_ci
30438c2ecf20Sopenharmony_ci	/*
30448c2ecf20Sopenharmony_ci	 * load_scale_factor accounts for the fact that task load
30458c2ecf20Sopenharmony_ci	 * is in reference to "best" performing cpu. Task's load will need to be
30468c2ecf20Sopenharmony_ci	 * scaled (up) by a factor to determine suitability to be placed on a
30478c2ecf20Sopenharmony_ci	 * (little) cpu.
30488c2ecf20Sopenharmony_ci	 */
30498c2ecf20Sopenharmony_ci	load_scale *= load_scale_cpu_efficiency(cluster);
30508c2ecf20Sopenharmony_ci	load_scale >>= 10;
30518c2ecf20Sopenharmony_ci
30528c2ecf20Sopenharmony_ci	load_scale *= load_scale_cpu_freq(cluster);
30538c2ecf20Sopenharmony_ci	load_scale >>= 10;
30548c2ecf20Sopenharmony_ci
30558c2ecf20Sopenharmony_ci	return load_scale;
30568c2ecf20Sopenharmony_ci}
30578c2ecf20Sopenharmony_ci
30588c2ecf20Sopenharmony_cistatic inline bool is_max_capacity_cpu(int cpu)
30598c2ecf20Sopenharmony_ci{
30608c2ecf20Sopenharmony_ci	return cpu_max_possible_capacity(cpu) == max_possible_capacity;
30618c2ecf20Sopenharmony_ci}
30628c2ecf20Sopenharmony_ci
30638c2ecf20Sopenharmony_cistatic inline bool is_min_capacity_cpu(int cpu)
30648c2ecf20Sopenharmony_ci{
30658c2ecf20Sopenharmony_ci	return cpu_max_possible_capacity(cpu) == min_max_possible_capacity;
30668c2ecf20Sopenharmony_ci}
30678c2ecf20Sopenharmony_ci
30688c2ecf20Sopenharmony_ci/*
30698c2ecf20Sopenharmony_ci * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
30708c2ecf20Sopenharmony_ci * least efficient cpu gets capacity of 1024
30718c2ecf20Sopenharmony_ci */
30728c2ecf20Sopenharmony_cistatic unsigned long
30738c2ecf20Sopenharmony_cicapacity_scale_cpu_efficiency(struct sched_cluster *cluster)
30748c2ecf20Sopenharmony_ci{
30758c2ecf20Sopenharmony_ci	return (1024 * cluster->efficiency) / min_possible_efficiency;
30768c2ecf20Sopenharmony_ci}
30778c2ecf20Sopenharmony_ci
30788c2ecf20Sopenharmony_ci/*
30798c2ecf20Sopenharmony_ci * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
30808c2ecf20Sopenharmony_ci * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
30818c2ecf20Sopenharmony_ci */
30828c2ecf20Sopenharmony_cistatic unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster)
30838c2ecf20Sopenharmony_ci{
30848c2ecf20Sopenharmony_ci	return (1024 * cluster_max_freq(cluster)) / min_max_freq;
30858c2ecf20Sopenharmony_ci}
30868c2ecf20Sopenharmony_ci
30878c2ecf20Sopenharmony_cistatic inline int compute_capacity(struct sched_cluster *cluster)
30888c2ecf20Sopenharmony_ci{
30898c2ecf20Sopenharmony_ci	int capacity = 1024;
30908c2ecf20Sopenharmony_ci
30918c2ecf20Sopenharmony_ci	capacity *= capacity_scale_cpu_efficiency(cluster);
30928c2ecf20Sopenharmony_ci	capacity >>= 10;
30938c2ecf20Sopenharmony_ci
30948c2ecf20Sopenharmony_ci	capacity *= capacity_scale_cpu_freq(cluster);
30958c2ecf20Sopenharmony_ci	capacity >>= 10;
30968c2ecf20Sopenharmony_ci
30978c2ecf20Sopenharmony_ci	return capacity;
30988c2ecf20Sopenharmony_ci}
30998c2ecf20Sopenharmony_ci
31008c2ecf20Sopenharmony_cistatic inline unsigned int power_cost(int cpu, u64 demand)
31018c2ecf20Sopenharmony_ci{
31028c2ecf20Sopenharmony_ci	return cpu_max_possible_capacity(cpu);
31038c2ecf20Sopenharmony_ci}
31048c2ecf20Sopenharmony_ci
31058c2ecf20Sopenharmony_cistatic inline unsigned long cpu_util_freq_walt(int cpu)
31068c2ecf20Sopenharmony_ci{
31078c2ecf20Sopenharmony_ci	u64 util;
31088c2ecf20Sopenharmony_ci	struct rq *rq = cpu_rq(cpu);
31098c2ecf20Sopenharmony_ci	unsigned long capacity = capacity_orig_of(cpu);
31108c2ecf20Sopenharmony_ci
31118c2ecf20Sopenharmony_ci	if (unlikely(walt_disabled || !sysctl_sched_use_walt_cpu_util))
31128c2ecf20Sopenharmony_ci		return cpu_util(cpu);
31138c2ecf20Sopenharmony_ci
31148c2ecf20Sopenharmony_ci	util = rq->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
31158c2ecf20Sopenharmony_ci	util = div_u64(util, sched_ravg_window);
31168c2ecf20Sopenharmony_ci
31178c2ecf20Sopenharmony_ci	return (util >= capacity) ? capacity : util;
31188c2ecf20Sopenharmony_ci}
31198c2ecf20Sopenharmony_ci
31208c2ecf20Sopenharmony_cistatic inline bool hmp_capable(void)
31218c2ecf20Sopenharmony_ci{
31228c2ecf20Sopenharmony_ci	return max_possible_capacity != min_max_possible_capacity;
31238c2ecf20Sopenharmony_ci}
31248c2ecf20Sopenharmony_ci#else /* CONFIG_SCHED_WALT */
31258c2ecf20Sopenharmony_cistatic inline void walt_fixup_cum_window_demand(struct rq *rq,
31268c2ecf20Sopenharmony_ci						s64 scaled_delta) { }
31278c2ecf20Sopenharmony_ci
31288c2ecf20Sopenharmony_cistatic inline int same_freq_domain(int src_cpu, int dst_cpu)
31298c2ecf20Sopenharmony_ci{
31308c2ecf20Sopenharmony_ci	return 1;
31318c2ecf20Sopenharmony_ci}
31328c2ecf20Sopenharmony_ci
31338c2ecf20Sopenharmony_cistatic inline int is_reserved(int cpu)
31348c2ecf20Sopenharmony_ci{
31358c2ecf20Sopenharmony_ci	return 0;
31368c2ecf20Sopenharmony_ci}
31378c2ecf20Sopenharmony_ci
31388c2ecf20Sopenharmony_cistatic inline void clear_reserved(int cpu) { }
31398c2ecf20Sopenharmony_ci
31408c2ecf20Sopenharmony_cistatic inline bool hmp_capable(void)
31418c2ecf20Sopenharmony_ci{
31428c2ecf20Sopenharmony_ci	return false;
31438c2ecf20Sopenharmony_ci}
31448c2ecf20Sopenharmony_ci#endif /* CONFIG_SCHED_WALT */
31458c2ecf20Sopenharmony_ci
31468c2ecf20Sopenharmony_cistruct sched_avg_stats {
31478c2ecf20Sopenharmony_ci	int nr;
31488c2ecf20Sopenharmony_ci	int nr_misfit;
31498c2ecf20Sopenharmony_ci	int nr_max;
31508c2ecf20Sopenharmony_ci	int nr_scaled;
31518c2ecf20Sopenharmony_ci};
31528c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RUNNING_AVG
31538c2ecf20Sopenharmony_ciextern void sched_get_nr_running_avg(struct sched_avg_stats *stats);
31548c2ecf20Sopenharmony_ci#else
31558c2ecf20Sopenharmony_cistatic inline void sched_get_nr_running_avg(struct sched_avg_stats *stats) { }
31568c2ecf20Sopenharmony_ci#endif
31578c2ecf20Sopenharmony_ci
31588c2ecf20Sopenharmony_ci#ifdef CONFIG_CPU_ISOLATION_OPT
31598c2ecf20Sopenharmony_ciextern int group_balance_cpu_not_isolated(struct sched_group *sg);
31608c2ecf20Sopenharmony_ci#else
31618c2ecf20Sopenharmony_cistatic inline int group_balance_cpu_not_isolated(struct sched_group *sg)
31628c2ecf20Sopenharmony_ci{
31638c2ecf20Sopenharmony_ci	return group_balance_cpu(sg);
31648c2ecf20Sopenharmony_ci}
31658c2ecf20Sopenharmony_ci#endif /* CONFIG_CPU_ISOLATION_OPT */
31668c2ecf20Sopenharmony_ci
31678c2ecf20Sopenharmony_ci#ifdef CONFIG_HOTPLUG_CPU
31688c2ecf20Sopenharmony_ciextern void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf,
31698c2ecf20Sopenharmony_ci					bool migrate_pinned_tasks);
31708c2ecf20Sopenharmony_ci#endif
3171