18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
48c2ecf20Sopenharmony_ci * policies)
58c2ecf20Sopenharmony_ci */
68c2ecf20Sopenharmony_ci#include "sched.h"
78c2ecf20Sopenharmony_ci
88c2ecf20Sopenharmony_ci#include "pelt.h"
98c2ecf20Sopenharmony_ci#include "walt.h"
108c2ecf20Sopenharmony_ci
118c2ecf20Sopenharmony_ciint sched_rr_timeslice = RR_TIMESLICE;
128c2ecf20Sopenharmony_ciint sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
138c2ecf20Sopenharmony_ci/* More than 4 hours if BW_SHIFT equals 20. */
148c2ecf20Sopenharmony_cistatic const u64 max_rt_runtime = MAX_BW;
158c2ecf20Sopenharmony_ci
168c2ecf20Sopenharmony_cistatic int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
178c2ecf20Sopenharmony_ci
188c2ecf20Sopenharmony_cistruct rt_bandwidth def_rt_bandwidth;
198c2ecf20Sopenharmony_ci
208c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS
218c2ecf20Sopenharmony_ciunsigned int sysctl_sched_enable_rt_cas = 1;
228c2ecf20Sopenharmony_ci#endif
238c2ecf20Sopenharmony_ci
248c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RT_ACTIVE_LB
258c2ecf20Sopenharmony_ciunsigned int sysctl_sched_enable_rt_active_lb = 1;
268c2ecf20Sopenharmony_ci#endif
278c2ecf20Sopenharmony_ci
288c2ecf20Sopenharmony_cistatic enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
298c2ecf20Sopenharmony_ci{
308c2ecf20Sopenharmony_ci	struct rt_bandwidth *rt_b =
318c2ecf20Sopenharmony_ci		container_of(timer, struct rt_bandwidth, rt_period_timer);
328c2ecf20Sopenharmony_ci	int idle = 0;
338c2ecf20Sopenharmony_ci	int overrun;
348c2ecf20Sopenharmony_ci
358c2ecf20Sopenharmony_ci	raw_spin_lock(&rt_b->rt_runtime_lock);
368c2ecf20Sopenharmony_ci	for (;;) {
378c2ecf20Sopenharmony_ci		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
388c2ecf20Sopenharmony_ci		if (!overrun)
398c2ecf20Sopenharmony_ci			break;
408c2ecf20Sopenharmony_ci
418c2ecf20Sopenharmony_ci		raw_spin_unlock(&rt_b->rt_runtime_lock);
428c2ecf20Sopenharmony_ci		idle = do_sched_rt_period_timer(rt_b, overrun);
438c2ecf20Sopenharmony_ci		raw_spin_lock(&rt_b->rt_runtime_lock);
448c2ecf20Sopenharmony_ci	}
458c2ecf20Sopenharmony_ci	if (idle)
468c2ecf20Sopenharmony_ci		rt_b->rt_period_active = 0;
478c2ecf20Sopenharmony_ci	raw_spin_unlock(&rt_b->rt_runtime_lock);
488c2ecf20Sopenharmony_ci
498c2ecf20Sopenharmony_ci	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
508c2ecf20Sopenharmony_ci}
518c2ecf20Sopenharmony_ci
528c2ecf20Sopenharmony_civoid init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
538c2ecf20Sopenharmony_ci{
548c2ecf20Sopenharmony_ci	rt_b->rt_period = ns_to_ktime(period);
558c2ecf20Sopenharmony_ci	rt_b->rt_runtime = runtime;
568c2ecf20Sopenharmony_ci
578c2ecf20Sopenharmony_ci	raw_spin_lock_init(&rt_b->rt_runtime_lock);
588c2ecf20Sopenharmony_ci
598c2ecf20Sopenharmony_ci	hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
608c2ecf20Sopenharmony_ci		     HRTIMER_MODE_REL_HARD);
618c2ecf20Sopenharmony_ci	rt_b->rt_period_timer.function = sched_rt_period_timer;
628c2ecf20Sopenharmony_ci}
638c2ecf20Sopenharmony_ci
648c2ecf20Sopenharmony_cistatic void start_rt_bandwidth(struct rt_bandwidth *rt_b)
658c2ecf20Sopenharmony_ci{
668c2ecf20Sopenharmony_ci	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
678c2ecf20Sopenharmony_ci		return;
688c2ecf20Sopenharmony_ci
698c2ecf20Sopenharmony_ci	raw_spin_lock(&rt_b->rt_runtime_lock);
708c2ecf20Sopenharmony_ci	if (!rt_b->rt_period_active) {
718c2ecf20Sopenharmony_ci		rt_b->rt_period_active = 1;
728c2ecf20Sopenharmony_ci		/*
738c2ecf20Sopenharmony_ci		 * SCHED_DEADLINE updates the bandwidth, as a run away
748c2ecf20Sopenharmony_ci		 * RT task with a DL task could hog a CPU. But DL does
758c2ecf20Sopenharmony_ci		 * not reset the period. If a deadline task was running
768c2ecf20Sopenharmony_ci		 * without an RT task running, it can cause RT tasks to
778c2ecf20Sopenharmony_ci		 * throttle when they start up. Kick the timer right away
788c2ecf20Sopenharmony_ci		 * to update the period.
798c2ecf20Sopenharmony_ci		 */
808c2ecf20Sopenharmony_ci		hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
818c2ecf20Sopenharmony_ci		hrtimer_start_expires(&rt_b->rt_period_timer,
828c2ecf20Sopenharmony_ci				      HRTIMER_MODE_ABS_PINNED_HARD);
838c2ecf20Sopenharmony_ci	}
848c2ecf20Sopenharmony_ci	raw_spin_unlock(&rt_b->rt_runtime_lock);
858c2ecf20Sopenharmony_ci}
868c2ecf20Sopenharmony_ci
878c2ecf20Sopenharmony_civoid init_rt_rq(struct rt_rq *rt_rq)
888c2ecf20Sopenharmony_ci{
898c2ecf20Sopenharmony_ci	struct rt_prio_array *array;
908c2ecf20Sopenharmony_ci	int i;
918c2ecf20Sopenharmony_ci
928c2ecf20Sopenharmony_ci	array = &rt_rq->active;
938c2ecf20Sopenharmony_ci	for (i = 0; i < MAX_RT_PRIO; i++) {
948c2ecf20Sopenharmony_ci		INIT_LIST_HEAD(array->queue + i);
958c2ecf20Sopenharmony_ci		__clear_bit(i, array->bitmap);
968c2ecf20Sopenharmony_ci	}
978c2ecf20Sopenharmony_ci	/* delimiter for bitsearch: */
988c2ecf20Sopenharmony_ci	__set_bit(MAX_RT_PRIO, array->bitmap);
998c2ecf20Sopenharmony_ci
1008c2ecf20Sopenharmony_ci#if defined CONFIG_SMP
1018c2ecf20Sopenharmony_ci	rt_rq->highest_prio.curr = MAX_RT_PRIO;
1028c2ecf20Sopenharmony_ci	rt_rq->highest_prio.next = MAX_RT_PRIO;
1038c2ecf20Sopenharmony_ci	rt_rq->rt_nr_migratory = 0;
1048c2ecf20Sopenharmony_ci	rt_rq->overloaded = 0;
1058c2ecf20Sopenharmony_ci	plist_head_init(&rt_rq->pushable_tasks);
1068c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
1078c2ecf20Sopenharmony_ci	/* We start is dequeued state, because no RT tasks are queued */
1088c2ecf20Sopenharmony_ci	rt_rq->rt_queued = 0;
1098c2ecf20Sopenharmony_ci
1108c2ecf20Sopenharmony_ci	rt_rq->rt_time = 0;
1118c2ecf20Sopenharmony_ci	rt_rq->rt_throttled = 0;
1128c2ecf20Sopenharmony_ci	rt_rq->rt_runtime = 0;
1138c2ecf20Sopenharmony_ci	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
1148c2ecf20Sopenharmony_ci}
1158c2ecf20Sopenharmony_ci
1168c2ecf20Sopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
1178c2ecf20Sopenharmony_cistatic void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
1188c2ecf20Sopenharmony_ci{
1198c2ecf20Sopenharmony_ci	hrtimer_cancel(&rt_b->rt_period_timer);
1208c2ecf20Sopenharmony_ci}
1218c2ecf20Sopenharmony_ci
1228c2ecf20Sopenharmony_ci#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
1238c2ecf20Sopenharmony_ci
1248c2ecf20Sopenharmony_cistatic inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
1258c2ecf20Sopenharmony_ci{
1268c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_DEBUG
1278c2ecf20Sopenharmony_ci	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
1288c2ecf20Sopenharmony_ci#endif
1298c2ecf20Sopenharmony_ci	return container_of(rt_se, struct task_struct, rt);
1308c2ecf20Sopenharmony_ci}
1318c2ecf20Sopenharmony_ci
1328c2ecf20Sopenharmony_cistatic inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
1338c2ecf20Sopenharmony_ci{
1348c2ecf20Sopenharmony_ci	return rt_rq->rq;
1358c2ecf20Sopenharmony_ci}
1368c2ecf20Sopenharmony_ci
1378c2ecf20Sopenharmony_cistatic inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
1388c2ecf20Sopenharmony_ci{
1398c2ecf20Sopenharmony_ci	return rt_se->rt_rq;
1408c2ecf20Sopenharmony_ci}
1418c2ecf20Sopenharmony_ci
1428c2ecf20Sopenharmony_cistatic inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
1438c2ecf20Sopenharmony_ci{
1448c2ecf20Sopenharmony_ci	struct rt_rq *rt_rq = rt_se->rt_rq;
1458c2ecf20Sopenharmony_ci
1468c2ecf20Sopenharmony_ci	return rt_rq->rq;
1478c2ecf20Sopenharmony_ci}
1488c2ecf20Sopenharmony_ci
1498c2ecf20Sopenharmony_civoid free_rt_sched_group(struct task_group *tg)
1508c2ecf20Sopenharmony_ci{
1518c2ecf20Sopenharmony_ci	int i;
1528c2ecf20Sopenharmony_ci
1538c2ecf20Sopenharmony_ci	if (tg->rt_se)
1548c2ecf20Sopenharmony_ci		destroy_rt_bandwidth(&tg->rt_bandwidth);
1558c2ecf20Sopenharmony_ci
1568c2ecf20Sopenharmony_ci	for_each_possible_cpu(i) {
1578c2ecf20Sopenharmony_ci		if (tg->rt_rq)
1588c2ecf20Sopenharmony_ci			kfree(tg->rt_rq[i]);
1598c2ecf20Sopenharmony_ci		if (tg->rt_se)
1608c2ecf20Sopenharmony_ci			kfree(tg->rt_se[i]);
1618c2ecf20Sopenharmony_ci	}
1628c2ecf20Sopenharmony_ci
1638c2ecf20Sopenharmony_ci	kfree(tg->rt_rq);
1648c2ecf20Sopenharmony_ci	kfree(tg->rt_se);
1658c2ecf20Sopenharmony_ci}
1668c2ecf20Sopenharmony_ci
1678c2ecf20Sopenharmony_civoid init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
1688c2ecf20Sopenharmony_ci		struct sched_rt_entity *rt_se, int cpu,
1698c2ecf20Sopenharmony_ci		struct sched_rt_entity *parent)
1708c2ecf20Sopenharmony_ci{
1718c2ecf20Sopenharmony_ci	struct rq *rq = cpu_rq(cpu);
1728c2ecf20Sopenharmony_ci
1738c2ecf20Sopenharmony_ci	rt_rq->highest_prio.curr = MAX_RT_PRIO;
1748c2ecf20Sopenharmony_ci	rt_rq->rt_nr_boosted = 0;
1758c2ecf20Sopenharmony_ci	rt_rq->rq = rq;
1768c2ecf20Sopenharmony_ci	rt_rq->tg = tg;
1778c2ecf20Sopenharmony_ci
1788c2ecf20Sopenharmony_ci	tg->rt_rq[cpu] = rt_rq;
1798c2ecf20Sopenharmony_ci	tg->rt_se[cpu] = rt_se;
1808c2ecf20Sopenharmony_ci
1818c2ecf20Sopenharmony_ci	if (!rt_se)
1828c2ecf20Sopenharmony_ci		return;
1838c2ecf20Sopenharmony_ci
1848c2ecf20Sopenharmony_ci	if (!parent)
1858c2ecf20Sopenharmony_ci		rt_se->rt_rq = &rq->rt;
1868c2ecf20Sopenharmony_ci	else
1878c2ecf20Sopenharmony_ci		rt_se->rt_rq = parent->my_q;
1888c2ecf20Sopenharmony_ci
1898c2ecf20Sopenharmony_ci	rt_se->my_q = rt_rq;
1908c2ecf20Sopenharmony_ci	rt_se->parent = parent;
1918c2ecf20Sopenharmony_ci	INIT_LIST_HEAD(&rt_se->run_list);
1928c2ecf20Sopenharmony_ci}
1938c2ecf20Sopenharmony_ci
1948c2ecf20Sopenharmony_ciint alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
1958c2ecf20Sopenharmony_ci{
1968c2ecf20Sopenharmony_ci	struct rt_rq *rt_rq;
1978c2ecf20Sopenharmony_ci	struct sched_rt_entity *rt_se;
1988c2ecf20Sopenharmony_ci	int i;
1998c2ecf20Sopenharmony_ci
2008c2ecf20Sopenharmony_ci	tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
2018c2ecf20Sopenharmony_ci	if (!tg->rt_rq)
2028c2ecf20Sopenharmony_ci		goto err;
2038c2ecf20Sopenharmony_ci	tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
2048c2ecf20Sopenharmony_ci	if (!tg->rt_se)
2058c2ecf20Sopenharmony_ci		goto err;
2068c2ecf20Sopenharmony_ci
2078c2ecf20Sopenharmony_ci	init_rt_bandwidth(&tg->rt_bandwidth,
2088c2ecf20Sopenharmony_ci			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
2098c2ecf20Sopenharmony_ci
2108c2ecf20Sopenharmony_ci	for_each_possible_cpu(i) {
2118c2ecf20Sopenharmony_ci		rt_rq = kzalloc_node(sizeof(struct rt_rq),
2128c2ecf20Sopenharmony_ci				     GFP_KERNEL, cpu_to_node(i));
2138c2ecf20Sopenharmony_ci		if (!rt_rq)
2148c2ecf20Sopenharmony_ci			goto err;
2158c2ecf20Sopenharmony_ci
2168c2ecf20Sopenharmony_ci		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
2178c2ecf20Sopenharmony_ci				     GFP_KERNEL, cpu_to_node(i));
2188c2ecf20Sopenharmony_ci		if (!rt_se)
2198c2ecf20Sopenharmony_ci			goto err_free_rq;
2208c2ecf20Sopenharmony_ci
2218c2ecf20Sopenharmony_ci		init_rt_rq(rt_rq);
2228c2ecf20Sopenharmony_ci		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
2238c2ecf20Sopenharmony_ci		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
2248c2ecf20Sopenharmony_ci	}
2258c2ecf20Sopenharmony_ci
2268c2ecf20Sopenharmony_ci	return 1;
2278c2ecf20Sopenharmony_ci
2288c2ecf20Sopenharmony_cierr_free_rq:
2298c2ecf20Sopenharmony_ci	kfree(rt_rq);
2308c2ecf20Sopenharmony_cierr:
2318c2ecf20Sopenharmony_ci	return 0;
2328c2ecf20Sopenharmony_ci}
2338c2ecf20Sopenharmony_ci
2348c2ecf20Sopenharmony_ci#else /* CONFIG_RT_GROUP_SCHED */
2358c2ecf20Sopenharmony_ci
2368c2ecf20Sopenharmony_ci#define rt_entity_is_task(rt_se) (1)
2378c2ecf20Sopenharmony_ci
2388c2ecf20Sopenharmony_cistatic inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
2398c2ecf20Sopenharmony_ci{
2408c2ecf20Sopenharmony_ci	return container_of(rt_se, struct task_struct, rt);
2418c2ecf20Sopenharmony_ci}
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_cistatic inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
2448c2ecf20Sopenharmony_ci{
2458c2ecf20Sopenharmony_ci	return container_of(rt_rq, struct rq, rt);
2468c2ecf20Sopenharmony_ci}
2478c2ecf20Sopenharmony_ci
2488c2ecf20Sopenharmony_cistatic inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
2498c2ecf20Sopenharmony_ci{
2508c2ecf20Sopenharmony_ci	struct task_struct *p = rt_task_of(rt_se);
2518c2ecf20Sopenharmony_ci
2528c2ecf20Sopenharmony_ci	return task_rq(p);
2538c2ecf20Sopenharmony_ci}
2548c2ecf20Sopenharmony_ci
2558c2ecf20Sopenharmony_cistatic inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
2568c2ecf20Sopenharmony_ci{
2578c2ecf20Sopenharmony_ci	struct rq *rq = rq_of_rt_se(rt_se);
2588c2ecf20Sopenharmony_ci
2598c2ecf20Sopenharmony_ci	return &rq->rt;
2608c2ecf20Sopenharmony_ci}
2618c2ecf20Sopenharmony_ci
2628c2ecf20Sopenharmony_civoid free_rt_sched_group(struct task_group *tg) { }
2638c2ecf20Sopenharmony_ci
2648c2ecf20Sopenharmony_ciint alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
2658c2ecf20Sopenharmony_ci{
2668c2ecf20Sopenharmony_ci	return 1;
2678c2ecf20Sopenharmony_ci}
2688c2ecf20Sopenharmony_ci#endif /* CONFIG_RT_GROUP_SCHED */
2698c2ecf20Sopenharmony_ci
2708c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
2718c2ecf20Sopenharmony_ci
2728c2ecf20Sopenharmony_cistatic void pull_rt_task(struct rq *this_rq);
2738c2ecf20Sopenharmony_ci
2748c2ecf20Sopenharmony_cistatic inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
2758c2ecf20Sopenharmony_ci{
2768c2ecf20Sopenharmony_ci	/*
2778c2ecf20Sopenharmony_ci	 * Try to pull RT tasks here if we lower this rq's prio and cpu is not
2788c2ecf20Sopenharmony_ci	 * isolated
2798c2ecf20Sopenharmony_ci	 */
2808c2ecf20Sopenharmony_ci	return rq->rt.highest_prio.curr > prev->prio &&
2818c2ecf20Sopenharmony_ci	       !cpu_isolated(cpu_of(rq));
2828c2ecf20Sopenharmony_ci}
2838c2ecf20Sopenharmony_ci
2848c2ecf20Sopenharmony_cistatic inline int rt_overloaded(struct rq *rq)
2858c2ecf20Sopenharmony_ci{
2868c2ecf20Sopenharmony_ci	return atomic_read(&rq->rd->rto_count);
2878c2ecf20Sopenharmony_ci}
2888c2ecf20Sopenharmony_ci
2898c2ecf20Sopenharmony_cistatic inline void rt_set_overload(struct rq *rq)
2908c2ecf20Sopenharmony_ci{
2918c2ecf20Sopenharmony_ci	if (!rq->online)
2928c2ecf20Sopenharmony_ci		return;
2938c2ecf20Sopenharmony_ci
2948c2ecf20Sopenharmony_ci	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
2958c2ecf20Sopenharmony_ci	/*
2968c2ecf20Sopenharmony_ci	 * Make sure the mask is visible before we set
2978c2ecf20Sopenharmony_ci	 * the overload count. That is checked to determine
2988c2ecf20Sopenharmony_ci	 * if we should look at the mask. It would be a shame
2998c2ecf20Sopenharmony_ci	 * if we looked at the mask, but the mask was not
3008c2ecf20Sopenharmony_ci	 * updated yet.
3018c2ecf20Sopenharmony_ci	 *
3028c2ecf20Sopenharmony_ci	 * Matched by the barrier in pull_rt_task().
3038c2ecf20Sopenharmony_ci	 */
3048c2ecf20Sopenharmony_ci	smp_wmb();
3058c2ecf20Sopenharmony_ci	atomic_inc(&rq->rd->rto_count);
3068c2ecf20Sopenharmony_ci}
3078c2ecf20Sopenharmony_ci
3088c2ecf20Sopenharmony_cistatic inline void rt_clear_overload(struct rq *rq)
3098c2ecf20Sopenharmony_ci{
3108c2ecf20Sopenharmony_ci	if (!rq->online)
3118c2ecf20Sopenharmony_ci		return;
3128c2ecf20Sopenharmony_ci
3138c2ecf20Sopenharmony_ci	/* the order here really doesn't matter */
3148c2ecf20Sopenharmony_ci	atomic_dec(&rq->rd->rto_count);
3158c2ecf20Sopenharmony_ci	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
3168c2ecf20Sopenharmony_ci}
3178c2ecf20Sopenharmony_ci
3188c2ecf20Sopenharmony_cistatic void update_rt_migration(struct rt_rq *rt_rq)
3198c2ecf20Sopenharmony_ci{
3208c2ecf20Sopenharmony_ci	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
3218c2ecf20Sopenharmony_ci		if (!rt_rq->overloaded) {
3228c2ecf20Sopenharmony_ci			rt_set_overload(rq_of_rt_rq(rt_rq));
3238c2ecf20Sopenharmony_ci			rt_rq->overloaded = 1;
3248c2ecf20Sopenharmony_ci		}
3258c2ecf20Sopenharmony_ci	} else if (rt_rq->overloaded) {
3268c2ecf20Sopenharmony_ci		rt_clear_overload(rq_of_rt_rq(rt_rq));
3278c2ecf20Sopenharmony_ci		rt_rq->overloaded = 0;
3288c2ecf20Sopenharmony_ci	}
3298c2ecf20Sopenharmony_ci}
3308c2ecf20Sopenharmony_ci
3318c2ecf20Sopenharmony_cistatic void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
3328c2ecf20Sopenharmony_ci{
3338c2ecf20Sopenharmony_ci	struct task_struct *p;
3348c2ecf20Sopenharmony_ci
3358c2ecf20Sopenharmony_ci	if (!rt_entity_is_task(rt_se))
3368c2ecf20Sopenharmony_ci		return;
3378c2ecf20Sopenharmony_ci
3388c2ecf20Sopenharmony_ci	p = rt_task_of(rt_se);
3398c2ecf20Sopenharmony_ci	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
3408c2ecf20Sopenharmony_ci
3418c2ecf20Sopenharmony_ci	rt_rq->rt_nr_total++;
3428c2ecf20Sopenharmony_ci	if (p->nr_cpus_allowed > 1)
3438c2ecf20Sopenharmony_ci		rt_rq->rt_nr_migratory++;
3448c2ecf20Sopenharmony_ci
3458c2ecf20Sopenharmony_ci	update_rt_migration(rt_rq);
3468c2ecf20Sopenharmony_ci}
3478c2ecf20Sopenharmony_ci
3488c2ecf20Sopenharmony_cistatic void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
3498c2ecf20Sopenharmony_ci{
3508c2ecf20Sopenharmony_ci	struct task_struct *p;
3518c2ecf20Sopenharmony_ci
3528c2ecf20Sopenharmony_ci	if (!rt_entity_is_task(rt_se))
3538c2ecf20Sopenharmony_ci		return;
3548c2ecf20Sopenharmony_ci
3558c2ecf20Sopenharmony_ci	p = rt_task_of(rt_se);
3568c2ecf20Sopenharmony_ci	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
3578c2ecf20Sopenharmony_ci
3588c2ecf20Sopenharmony_ci	rt_rq->rt_nr_total--;
3598c2ecf20Sopenharmony_ci	if (p->nr_cpus_allowed > 1)
3608c2ecf20Sopenharmony_ci		rt_rq->rt_nr_migratory--;
3618c2ecf20Sopenharmony_ci
3628c2ecf20Sopenharmony_ci	update_rt_migration(rt_rq);
3638c2ecf20Sopenharmony_ci}
3648c2ecf20Sopenharmony_ci
3658c2ecf20Sopenharmony_cistatic inline int has_pushable_tasks(struct rq *rq)
3668c2ecf20Sopenharmony_ci{
3678c2ecf20Sopenharmony_ci	return !plist_head_empty(&rq->rt.pushable_tasks);
3688c2ecf20Sopenharmony_ci}
3698c2ecf20Sopenharmony_ci
3708c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(struct callback_head, rt_push_head);
3718c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(struct callback_head, rt_pull_head);
3728c2ecf20Sopenharmony_ci
3738c2ecf20Sopenharmony_cistatic void push_rt_tasks(struct rq *);
3748c2ecf20Sopenharmony_cistatic void pull_rt_task(struct rq *);
3758c2ecf20Sopenharmony_ci
3768c2ecf20Sopenharmony_cistatic inline void rt_queue_push_tasks(struct rq *rq)
3778c2ecf20Sopenharmony_ci{
3788c2ecf20Sopenharmony_ci	if (!has_pushable_tasks(rq))
3798c2ecf20Sopenharmony_ci		return;
3808c2ecf20Sopenharmony_ci
3818c2ecf20Sopenharmony_ci	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
3828c2ecf20Sopenharmony_ci}
3838c2ecf20Sopenharmony_ci
3848c2ecf20Sopenharmony_cistatic inline void rt_queue_pull_task(struct rq *rq)
3858c2ecf20Sopenharmony_ci{
3868c2ecf20Sopenharmony_ci	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
3878c2ecf20Sopenharmony_ci}
3888c2ecf20Sopenharmony_ci
3898c2ecf20Sopenharmony_cistatic void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
3908c2ecf20Sopenharmony_ci{
3918c2ecf20Sopenharmony_ci	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
3928c2ecf20Sopenharmony_ci	plist_node_init(&p->pushable_tasks, p->prio);
3938c2ecf20Sopenharmony_ci	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
3948c2ecf20Sopenharmony_ci
3958c2ecf20Sopenharmony_ci	/* Update the highest prio pushable task */
3968c2ecf20Sopenharmony_ci	if (p->prio < rq->rt.highest_prio.next)
3978c2ecf20Sopenharmony_ci		rq->rt.highest_prio.next = p->prio;
3988c2ecf20Sopenharmony_ci}
3998c2ecf20Sopenharmony_ci
4008c2ecf20Sopenharmony_cistatic void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
4018c2ecf20Sopenharmony_ci{
4028c2ecf20Sopenharmony_ci	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
4038c2ecf20Sopenharmony_ci
4048c2ecf20Sopenharmony_ci	/* Update the new highest prio pushable task */
4058c2ecf20Sopenharmony_ci	if (has_pushable_tasks(rq)) {
4068c2ecf20Sopenharmony_ci		p = plist_first_entry(&rq->rt.pushable_tasks,
4078c2ecf20Sopenharmony_ci				      struct task_struct, pushable_tasks);
4088c2ecf20Sopenharmony_ci		rq->rt.highest_prio.next = p->prio;
4098c2ecf20Sopenharmony_ci	} else
4108c2ecf20Sopenharmony_ci		rq->rt.highest_prio.next = MAX_RT_PRIO;
4118c2ecf20Sopenharmony_ci}
4128c2ecf20Sopenharmony_ci
4138c2ecf20Sopenharmony_ci#else
4148c2ecf20Sopenharmony_ci
4158c2ecf20Sopenharmony_cistatic inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
4168c2ecf20Sopenharmony_ci{
4178c2ecf20Sopenharmony_ci}
4188c2ecf20Sopenharmony_ci
4198c2ecf20Sopenharmony_cistatic inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
4208c2ecf20Sopenharmony_ci{
4218c2ecf20Sopenharmony_ci}
4228c2ecf20Sopenharmony_ci
4238c2ecf20Sopenharmony_cistatic inline
4248c2ecf20Sopenharmony_civoid inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
4258c2ecf20Sopenharmony_ci{
4268c2ecf20Sopenharmony_ci}
4278c2ecf20Sopenharmony_ci
4288c2ecf20Sopenharmony_cistatic inline
4298c2ecf20Sopenharmony_civoid dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
4308c2ecf20Sopenharmony_ci{
4318c2ecf20Sopenharmony_ci}
4328c2ecf20Sopenharmony_ci
4338c2ecf20Sopenharmony_cistatic inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
4348c2ecf20Sopenharmony_ci{
4358c2ecf20Sopenharmony_ci	return false;
4368c2ecf20Sopenharmony_ci}
4378c2ecf20Sopenharmony_ci
4388c2ecf20Sopenharmony_cistatic inline void pull_rt_task(struct rq *this_rq)
4398c2ecf20Sopenharmony_ci{
4408c2ecf20Sopenharmony_ci}
4418c2ecf20Sopenharmony_ci
4428c2ecf20Sopenharmony_cistatic inline void rt_queue_push_tasks(struct rq *rq)
4438c2ecf20Sopenharmony_ci{
4448c2ecf20Sopenharmony_ci}
4458c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
4468c2ecf20Sopenharmony_ci
4478c2ecf20Sopenharmony_cistatic void enqueue_top_rt_rq(struct rt_rq *rt_rq);
4488c2ecf20Sopenharmony_cistatic void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
4498c2ecf20Sopenharmony_ci
4508c2ecf20Sopenharmony_cistatic inline int on_rt_rq(struct sched_rt_entity *rt_se)
4518c2ecf20Sopenharmony_ci{
4528c2ecf20Sopenharmony_ci	return rt_se->on_rq;
4538c2ecf20Sopenharmony_ci}
4548c2ecf20Sopenharmony_ci
4558c2ecf20Sopenharmony_ci#ifdef CONFIG_UCLAMP_TASK
4568c2ecf20Sopenharmony_ci/*
4578c2ecf20Sopenharmony_ci * Verify the fitness of task @p to run on @cpu taking into account the uclamp
4588c2ecf20Sopenharmony_ci * settings.
4598c2ecf20Sopenharmony_ci *
4608c2ecf20Sopenharmony_ci * This check is only important for heterogeneous systems where uclamp_min value
4618c2ecf20Sopenharmony_ci * is higher than the capacity of a @cpu. For non-heterogeneous system this
4628c2ecf20Sopenharmony_ci * function will always return true.
4638c2ecf20Sopenharmony_ci *
4648c2ecf20Sopenharmony_ci * The function will return true if the capacity of the @cpu is >= the
4658c2ecf20Sopenharmony_ci * uclamp_min and false otherwise.
4668c2ecf20Sopenharmony_ci *
4678c2ecf20Sopenharmony_ci * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
4688c2ecf20Sopenharmony_ci * > uclamp_max.
4698c2ecf20Sopenharmony_ci */
4708c2ecf20Sopenharmony_cistatic inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
4718c2ecf20Sopenharmony_ci{
4728c2ecf20Sopenharmony_ci	unsigned int min_cap;
4738c2ecf20Sopenharmony_ci	unsigned int max_cap;
4748c2ecf20Sopenharmony_ci	unsigned int cpu_cap;
4758c2ecf20Sopenharmony_ci
4768c2ecf20Sopenharmony_ci	/* Only heterogeneous systems can benefit from this check */
4778c2ecf20Sopenharmony_ci	if (!static_branch_unlikely(&sched_asym_cpucapacity))
4788c2ecf20Sopenharmony_ci		return true;
4798c2ecf20Sopenharmony_ci
4808c2ecf20Sopenharmony_ci	min_cap = uclamp_eff_value(p, UCLAMP_MIN);
4818c2ecf20Sopenharmony_ci	max_cap = uclamp_eff_value(p, UCLAMP_MAX);
4828c2ecf20Sopenharmony_ci
4838c2ecf20Sopenharmony_ci	cpu_cap = capacity_orig_of(cpu);
4848c2ecf20Sopenharmony_ci
4858c2ecf20Sopenharmony_ci	return cpu_cap >= min(min_cap, max_cap);
4868c2ecf20Sopenharmony_ci}
4878c2ecf20Sopenharmony_ci#else
4888c2ecf20Sopenharmony_cistatic inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
4898c2ecf20Sopenharmony_ci{
4908c2ecf20Sopenharmony_ci	return true;
4918c2ecf20Sopenharmony_ci}
4928c2ecf20Sopenharmony_ci#endif
4938c2ecf20Sopenharmony_ci
4948c2ecf20Sopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
4958c2ecf20Sopenharmony_ci
4968c2ecf20Sopenharmony_cistatic inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
4978c2ecf20Sopenharmony_ci{
4988c2ecf20Sopenharmony_ci	if (!rt_rq->tg)
4998c2ecf20Sopenharmony_ci		return RUNTIME_INF;
5008c2ecf20Sopenharmony_ci
5018c2ecf20Sopenharmony_ci	return rt_rq->rt_runtime;
5028c2ecf20Sopenharmony_ci}
5038c2ecf20Sopenharmony_ci
5048c2ecf20Sopenharmony_cistatic inline u64 sched_rt_period(struct rt_rq *rt_rq)
5058c2ecf20Sopenharmony_ci{
5068c2ecf20Sopenharmony_ci	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
5078c2ecf20Sopenharmony_ci}
5088c2ecf20Sopenharmony_ci
5098c2ecf20Sopenharmony_citypedef struct task_group *rt_rq_iter_t;
5108c2ecf20Sopenharmony_ci
5118c2ecf20Sopenharmony_cistatic inline struct task_group *next_task_group(struct task_group *tg)
5128c2ecf20Sopenharmony_ci{
5138c2ecf20Sopenharmony_ci	do {
5148c2ecf20Sopenharmony_ci		tg = list_entry_rcu(tg->list.next,
5158c2ecf20Sopenharmony_ci			typeof(struct task_group), list);
5168c2ecf20Sopenharmony_ci	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
5178c2ecf20Sopenharmony_ci
5188c2ecf20Sopenharmony_ci	if (&tg->list == &task_groups)
5198c2ecf20Sopenharmony_ci		tg = NULL;
5208c2ecf20Sopenharmony_ci
5218c2ecf20Sopenharmony_ci	return tg;
5228c2ecf20Sopenharmony_ci}
5238c2ecf20Sopenharmony_ci
5248c2ecf20Sopenharmony_ci#define for_each_rt_rq(rt_rq, iter, rq)					\
5258c2ecf20Sopenharmony_ci	for (iter = container_of(&task_groups, typeof(*iter), list);	\
5268c2ecf20Sopenharmony_ci		(iter = next_task_group(iter)) &&			\
5278c2ecf20Sopenharmony_ci		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
5288c2ecf20Sopenharmony_ci
5298c2ecf20Sopenharmony_ci#define for_each_sched_rt_entity(rt_se) \
5308c2ecf20Sopenharmony_ci	for (; rt_se; rt_se = rt_se->parent)
5318c2ecf20Sopenharmony_ci
5328c2ecf20Sopenharmony_cistatic inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
5338c2ecf20Sopenharmony_ci{
5348c2ecf20Sopenharmony_ci	return rt_se->my_q;
5358c2ecf20Sopenharmony_ci}
5368c2ecf20Sopenharmony_ci
5378c2ecf20Sopenharmony_cistatic void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
5388c2ecf20Sopenharmony_cistatic void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
5398c2ecf20Sopenharmony_ci
5408c2ecf20Sopenharmony_cistatic void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
5418c2ecf20Sopenharmony_ci{
5428c2ecf20Sopenharmony_ci	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
5438c2ecf20Sopenharmony_ci	struct rq *rq = rq_of_rt_rq(rt_rq);
5448c2ecf20Sopenharmony_ci	struct sched_rt_entity *rt_se;
5458c2ecf20Sopenharmony_ci
5468c2ecf20Sopenharmony_ci	int cpu = cpu_of(rq);
5478c2ecf20Sopenharmony_ci
5488c2ecf20Sopenharmony_ci	rt_se = rt_rq->tg->rt_se[cpu];
5498c2ecf20Sopenharmony_ci
5508c2ecf20Sopenharmony_ci	if (rt_rq->rt_nr_running) {
5518c2ecf20Sopenharmony_ci		if (!rt_se)
5528c2ecf20Sopenharmony_ci			enqueue_top_rt_rq(rt_rq);
5538c2ecf20Sopenharmony_ci		else if (!on_rt_rq(rt_se))
5548c2ecf20Sopenharmony_ci			enqueue_rt_entity(rt_se, 0);
5558c2ecf20Sopenharmony_ci
5568c2ecf20Sopenharmony_ci		if (rt_rq->highest_prio.curr < curr->prio)
5578c2ecf20Sopenharmony_ci			resched_curr(rq);
5588c2ecf20Sopenharmony_ci	}
5598c2ecf20Sopenharmony_ci}
5608c2ecf20Sopenharmony_ci
5618c2ecf20Sopenharmony_cistatic void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
5628c2ecf20Sopenharmony_ci{
5638c2ecf20Sopenharmony_ci	struct sched_rt_entity *rt_se;
5648c2ecf20Sopenharmony_ci	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
5658c2ecf20Sopenharmony_ci
5668c2ecf20Sopenharmony_ci	rt_se = rt_rq->tg->rt_se[cpu];
5678c2ecf20Sopenharmony_ci
5688c2ecf20Sopenharmony_ci	if (!rt_se) {
5698c2ecf20Sopenharmony_ci		dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
5708c2ecf20Sopenharmony_ci		/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
5718c2ecf20Sopenharmony_ci		cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
5728c2ecf20Sopenharmony_ci	}
5738c2ecf20Sopenharmony_ci	else if (on_rt_rq(rt_se))
5748c2ecf20Sopenharmony_ci		dequeue_rt_entity(rt_se, 0);
5758c2ecf20Sopenharmony_ci}
5768c2ecf20Sopenharmony_ci
5778c2ecf20Sopenharmony_cistatic inline int rt_rq_throttled(struct rt_rq *rt_rq)
5788c2ecf20Sopenharmony_ci{
5798c2ecf20Sopenharmony_ci	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
5808c2ecf20Sopenharmony_ci}
5818c2ecf20Sopenharmony_ci
5828c2ecf20Sopenharmony_cistatic int rt_se_boosted(struct sched_rt_entity *rt_se)
5838c2ecf20Sopenharmony_ci{
5848c2ecf20Sopenharmony_ci	struct rt_rq *rt_rq = group_rt_rq(rt_se);
5858c2ecf20Sopenharmony_ci	struct task_struct *p;
5868c2ecf20Sopenharmony_ci
5878c2ecf20Sopenharmony_ci	if (rt_rq)
5888c2ecf20Sopenharmony_ci		return !!rt_rq->rt_nr_boosted;
5898c2ecf20Sopenharmony_ci
5908c2ecf20Sopenharmony_ci	p = rt_task_of(rt_se);
5918c2ecf20Sopenharmony_ci	return p->prio != p->normal_prio;
5928c2ecf20Sopenharmony_ci}
5938c2ecf20Sopenharmony_ci
5948c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
5958c2ecf20Sopenharmony_cistatic inline const struct cpumask *sched_rt_period_mask(void)
5968c2ecf20Sopenharmony_ci{
5978c2ecf20Sopenharmony_ci	return this_rq()->rd->span;
5988c2ecf20Sopenharmony_ci}
5998c2ecf20Sopenharmony_ci#else
6008c2ecf20Sopenharmony_cistatic inline const struct cpumask *sched_rt_period_mask(void)
6018c2ecf20Sopenharmony_ci{
6028c2ecf20Sopenharmony_ci	return cpu_online_mask;
6038c2ecf20Sopenharmony_ci}
6048c2ecf20Sopenharmony_ci#endif
6058c2ecf20Sopenharmony_ci
6068c2ecf20Sopenharmony_cistatic inline
6078c2ecf20Sopenharmony_cistruct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6088c2ecf20Sopenharmony_ci{
6098c2ecf20Sopenharmony_ci	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
6108c2ecf20Sopenharmony_ci}
6118c2ecf20Sopenharmony_ci
6128c2ecf20Sopenharmony_cistatic inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
6138c2ecf20Sopenharmony_ci{
6148c2ecf20Sopenharmony_ci	return &rt_rq->tg->rt_bandwidth;
6158c2ecf20Sopenharmony_ci}
6168c2ecf20Sopenharmony_ci
6178c2ecf20Sopenharmony_ci#else /* !CONFIG_RT_GROUP_SCHED */
6188c2ecf20Sopenharmony_ci
6198c2ecf20Sopenharmony_cistatic inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6208c2ecf20Sopenharmony_ci{
6218c2ecf20Sopenharmony_ci	return rt_rq->rt_runtime;
6228c2ecf20Sopenharmony_ci}
6238c2ecf20Sopenharmony_ci
6248c2ecf20Sopenharmony_cistatic inline u64 sched_rt_period(struct rt_rq *rt_rq)
6258c2ecf20Sopenharmony_ci{
6268c2ecf20Sopenharmony_ci	return ktime_to_ns(def_rt_bandwidth.rt_period);
6278c2ecf20Sopenharmony_ci}
6288c2ecf20Sopenharmony_ci
6298c2ecf20Sopenharmony_citypedef struct rt_rq *rt_rq_iter_t;
6308c2ecf20Sopenharmony_ci
6318c2ecf20Sopenharmony_ci#define for_each_rt_rq(rt_rq, iter, rq) \
6328c2ecf20Sopenharmony_ci	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
6338c2ecf20Sopenharmony_ci
6348c2ecf20Sopenharmony_ci#define for_each_sched_rt_entity(rt_se) \
6358c2ecf20Sopenharmony_ci	for (; rt_se; rt_se = NULL)
6368c2ecf20Sopenharmony_ci
6378c2ecf20Sopenharmony_cistatic inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
6388c2ecf20Sopenharmony_ci{
6398c2ecf20Sopenharmony_ci	return NULL;
6408c2ecf20Sopenharmony_ci}
6418c2ecf20Sopenharmony_ci
6428c2ecf20Sopenharmony_cistatic inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6438c2ecf20Sopenharmony_ci{
6448c2ecf20Sopenharmony_ci	struct rq *rq = rq_of_rt_rq(rt_rq);
6458c2ecf20Sopenharmony_ci
6468c2ecf20Sopenharmony_ci	if (!rt_rq->rt_nr_running)
6478c2ecf20Sopenharmony_ci		return;
6488c2ecf20Sopenharmony_ci
6498c2ecf20Sopenharmony_ci	enqueue_top_rt_rq(rt_rq);
6508c2ecf20Sopenharmony_ci	resched_curr(rq);
6518c2ecf20Sopenharmony_ci}
6528c2ecf20Sopenharmony_ci
6538c2ecf20Sopenharmony_cistatic inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6548c2ecf20Sopenharmony_ci{
6558c2ecf20Sopenharmony_ci	dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
6568c2ecf20Sopenharmony_ci}
6578c2ecf20Sopenharmony_ci
6588c2ecf20Sopenharmony_cistatic inline int rt_rq_throttled(struct rt_rq *rt_rq)
6598c2ecf20Sopenharmony_ci{
6608c2ecf20Sopenharmony_ci	return rt_rq->rt_throttled;
6618c2ecf20Sopenharmony_ci}
6628c2ecf20Sopenharmony_ci
6638c2ecf20Sopenharmony_cistatic inline const struct cpumask *sched_rt_period_mask(void)
6648c2ecf20Sopenharmony_ci{
6658c2ecf20Sopenharmony_ci	return cpu_online_mask;
6668c2ecf20Sopenharmony_ci}
6678c2ecf20Sopenharmony_ci
6688c2ecf20Sopenharmony_cistatic inline
6698c2ecf20Sopenharmony_cistruct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6708c2ecf20Sopenharmony_ci{
6718c2ecf20Sopenharmony_ci	return &cpu_rq(cpu)->rt;
6728c2ecf20Sopenharmony_ci}
6738c2ecf20Sopenharmony_ci
6748c2ecf20Sopenharmony_cistatic inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
6758c2ecf20Sopenharmony_ci{
6768c2ecf20Sopenharmony_ci	return &def_rt_bandwidth;
6778c2ecf20Sopenharmony_ci}
6788c2ecf20Sopenharmony_ci
6798c2ecf20Sopenharmony_ci#endif /* CONFIG_RT_GROUP_SCHED */
6808c2ecf20Sopenharmony_ci
6818c2ecf20Sopenharmony_cibool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
6828c2ecf20Sopenharmony_ci{
6838c2ecf20Sopenharmony_ci	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
6848c2ecf20Sopenharmony_ci
6858c2ecf20Sopenharmony_ci	return (hrtimer_active(&rt_b->rt_period_timer) ||
6868c2ecf20Sopenharmony_ci		rt_rq->rt_time < rt_b->rt_runtime);
6878c2ecf20Sopenharmony_ci}
6888c2ecf20Sopenharmony_ci
6898c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
6908c2ecf20Sopenharmony_ci/*
6918c2ecf20Sopenharmony_ci * We ran out of runtime, see if we can borrow some from our neighbours.
6928c2ecf20Sopenharmony_ci */
6938c2ecf20Sopenharmony_cistatic void do_balance_runtime(struct rt_rq *rt_rq)
6948c2ecf20Sopenharmony_ci{
6958c2ecf20Sopenharmony_ci	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
6968c2ecf20Sopenharmony_ci	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
6978c2ecf20Sopenharmony_ci	int i, weight;
6988c2ecf20Sopenharmony_ci	u64 rt_period;
6998c2ecf20Sopenharmony_ci
7008c2ecf20Sopenharmony_ci	weight = cpumask_weight(rd->span);
7018c2ecf20Sopenharmony_ci
7028c2ecf20Sopenharmony_ci	raw_spin_lock(&rt_b->rt_runtime_lock);
7038c2ecf20Sopenharmony_ci	rt_period = ktime_to_ns(rt_b->rt_period);
7048c2ecf20Sopenharmony_ci	for_each_cpu(i, rd->span) {
7058c2ecf20Sopenharmony_ci		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
7068c2ecf20Sopenharmony_ci		s64 diff;
7078c2ecf20Sopenharmony_ci
7088c2ecf20Sopenharmony_ci		if (iter == rt_rq)
7098c2ecf20Sopenharmony_ci			continue;
7108c2ecf20Sopenharmony_ci
7118c2ecf20Sopenharmony_ci		raw_spin_lock(&iter->rt_runtime_lock);
7128c2ecf20Sopenharmony_ci		/*
7138c2ecf20Sopenharmony_ci		 * Either all rqs have inf runtime and there's nothing to steal
7148c2ecf20Sopenharmony_ci		 * or __disable_runtime() below sets a specific rq to inf to
7158c2ecf20Sopenharmony_ci		 * indicate its been disabled and disalow stealing.
7168c2ecf20Sopenharmony_ci		 */
7178c2ecf20Sopenharmony_ci		if (iter->rt_runtime == RUNTIME_INF)
7188c2ecf20Sopenharmony_ci			goto next;
7198c2ecf20Sopenharmony_ci
7208c2ecf20Sopenharmony_ci		/*
7218c2ecf20Sopenharmony_ci		 * From runqueues with spare time, take 1/n part of their
7228c2ecf20Sopenharmony_ci		 * spare time, but no more than our period.
7238c2ecf20Sopenharmony_ci		 */
7248c2ecf20Sopenharmony_ci		diff = iter->rt_runtime - iter->rt_time;
7258c2ecf20Sopenharmony_ci		if (diff > 0) {
7268c2ecf20Sopenharmony_ci			diff = div_u64((u64)diff, weight);
7278c2ecf20Sopenharmony_ci			if (rt_rq->rt_runtime + diff > rt_period)
7288c2ecf20Sopenharmony_ci				diff = rt_period - rt_rq->rt_runtime;
7298c2ecf20Sopenharmony_ci			iter->rt_runtime -= diff;
7308c2ecf20Sopenharmony_ci			rt_rq->rt_runtime += diff;
7318c2ecf20Sopenharmony_ci			if (rt_rq->rt_runtime == rt_period) {
7328c2ecf20Sopenharmony_ci				raw_spin_unlock(&iter->rt_runtime_lock);
7338c2ecf20Sopenharmony_ci				break;
7348c2ecf20Sopenharmony_ci			}
7358c2ecf20Sopenharmony_ci		}
7368c2ecf20Sopenharmony_cinext:
7378c2ecf20Sopenharmony_ci		raw_spin_unlock(&iter->rt_runtime_lock);
7388c2ecf20Sopenharmony_ci	}
7398c2ecf20Sopenharmony_ci	raw_spin_unlock(&rt_b->rt_runtime_lock);
7408c2ecf20Sopenharmony_ci}
7418c2ecf20Sopenharmony_ci
7428c2ecf20Sopenharmony_ci/*
7438c2ecf20Sopenharmony_ci * Ensure this RQ takes back all the runtime it lend to its neighbours.
7448c2ecf20Sopenharmony_ci */
7458c2ecf20Sopenharmony_cistatic void __disable_runtime(struct rq *rq)
7468c2ecf20Sopenharmony_ci{
7478c2ecf20Sopenharmony_ci	struct root_domain *rd = rq->rd;
7488c2ecf20Sopenharmony_ci	rt_rq_iter_t iter;
7498c2ecf20Sopenharmony_ci	struct rt_rq *rt_rq;
7508c2ecf20Sopenharmony_ci
7518c2ecf20Sopenharmony_ci	if (unlikely(!scheduler_running))
7528c2ecf20Sopenharmony_ci		return;
7538c2ecf20Sopenharmony_ci
7548c2ecf20Sopenharmony_ci	for_each_rt_rq(rt_rq, iter, rq) {
7558c2ecf20Sopenharmony_ci		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
7568c2ecf20Sopenharmony_ci		s64 want;
7578c2ecf20Sopenharmony_ci		int i;
7588c2ecf20Sopenharmony_ci
7598c2ecf20Sopenharmony_ci		raw_spin_lock(&rt_b->rt_runtime_lock);
7608c2ecf20Sopenharmony_ci		raw_spin_lock(&rt_rq->rt_runtime_lock);
7618c2ecf20Sopenharmony_ci		/*
7628c2ecf20Sopenharmony_ci		 * Either we're all inf and nobody needs to borrow, or we're
7638c2ecf20Sopenharmony_ci		 * already disabled and thus have nothing to do, or we have
7648c2ecf20Sopenharmony_ci		 * exactly the right amount of runtime to take out.
7658c2ecf20Sopenharmony_ci		 */
7668c2ecf20Sopenharmony_ci		if (rt_rq->rt_runtime == RUNTIME_INF ||
7678c2ecf20Sopenharmony_ci				rt_rq->rt_runtime == rt_b->rt_runtime)
7688c2ecf20Sopenharmony_ci			goto balanced;
7698c2ecf20Sopenharmony_ci		raw_spin_unlock(&rt_rq->rt_runtime_lock);
7708c2ecf20Sopenharmony_ci
7718c2ecf20Sopenharmony_ci		/*
7728c2ecf20Sopenharmony_ci		 * Calculate the difference between what we started out with
7738c2ecf20Sopenharmony_ci		 * and what we current have, that's the amount of runtime
7748c2ecf20Sopenharmony_ci		 * we lend and now have to reclaim.
7758c2ecf20Sopenharmony_ci		 */
7768c2ecf20Sopenharmony_ci		want = rt_b->rt_runtime - rt_rq->rt_runtime;
7778c2ecf20Sopenharmony_ci
7788c2ecf20Sopenharmony_ci		/*
7798c2ecf20Sopenharmony_ci		 * Greedy reclaim, take back as much as we can.
7808c2ecf20Sopenharmony_ci		 */
7818c2ecf20Sopenharmony_ci		for_each_cpu(i, rd->span) {
7828c2ecf20Sopenharmony_ci			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
7838c2ecf20Sopenharmony_ci			s64 diff;
7848c2ecf20Sopenharmony_ci
7858c2ecf20Sopenharmony_ci			/*
7868c2ecf20Sopenharmony_ci			 * Can't reclaim from ourselves or disabled runqueues.
7878c2ecf20Sopenharmony_ci			 */
7888c2ecf20Sopenharmony_ci			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
7898c2ecf20Sopenharmony_ci				continue;
7908c2ecf20Sopenharmony_ci
7918c2ecf20Sopenharmony_ci			raw_spin_lock(&iter->rt_runtime_lock);
7928c2ecf20Sopenharmony_ci			if (want > 0) {
7938c2ecf20Sopenharmony_ci				diff = min_t(s64, iter->rt_runtime, want);
7948c2ecf20Sopenharmony_ci				iter->rt_runtime -= diff;
7958c2ecf20Sopenharmony_ci				want -= diff;
7968c2ecf20Sopenharmony_ci			} else {
7978c2ecf20Sopenharmony_ci				iter->rt_runtime -= want;
7988c2ecf20Sopenharmony_ci				want -= want;
7998c2ecf20Sopenharmony_ci			}
8008c2ecf20Sopenharmony_ci			raw_spin_unlock(&iter->rt_runtime_lock);
8018c2ecf20Sopenharmony_ci
8028c2ecf20Sopenharmony_ci			if (!want)
8038c2ecf20Sopenharmony_ci				break;
8048c2ecf20Sopenharmony_ci		}
8058c2ecf20Sopenharmony_ci
8068c2ecf20Sopenharmony_ci		raw_spin_lock(&rt_rq->rt_runtime_lock);
8078c2ecf20Sopenharmony_ci		/*
8088c2ecf20Sopenharmony_ci		 * We cannot be left wanting - that would mean some runtime
8098c2ecf20Sopenharmony_ci		 * leaked out of the system.
8108c2ecf20Sopenharmony_ci		 */
8118c2ecf20Sopenharmony_ci		BUG_ON(want);
8128c2ecf20Sopenharmony_cibalanced:
8138c2ecf20Sopenharmony_ci		/*
8148c2ecf20Sopenharmony_ci		 * Disable all the borrow logic by pretending we have inf
8158c2ecf20Sopenharmony_ci		 * runtime - in which case borrowing doesn't make sense.
8168c2ecf20Sopenharmony_ci		 */
8178c2ecf20Sopenharmony_ci		rt_rq->rt_runtime = RUNTIME_INF;
8188c2ecf20Sopenharmony_ci		rt_rq->rt_throttled = 0;
8198c2ecf20Sopenharmony_ci		raw_spin_unlock(&rt_rq->rt_runtime_lock);
8208c2ecf20Sopenharmony_ci		raw_spin_unlock(&rt_b->rt_runtime_lock);
8218c2ecf20Sopenharmony_ci
8228c2ecf20Sopenharmony_ci		/* Make rt_rq available for pick_next_task() */
8238c2ecf20Sopenharmony_ci		sched_rt_rq_enqueue(rt_rq);
8248c2ecf20Sopenharmony_ci	}
8258c2ecf20Sopenharmony_ci}
8268c2ecf20Sopenharmony_ci
8278c2ecf20Sopenharmony_cistatic void __enable_runtime(struct rq *rq)
8288c2ecf20Sopenharmony_ci{
8298c2ecf20Sopenharmony_ci	rt_rq_iter_t iter;
8308c2ecf20Sopenharmony_ci	struct rt_rq *rt_rq;
8318c2ecf20Sopenharmony_ci
8328c2ecf20Sopenharmony_ci	if (unlikely(!scheduler_running))
8338c2ecf20Sopenharmony_ci		return;
8348c2ecf20Sopenharmony_ci
8358c2ecf20Sopenharmony_ci	/*
8368c2ecf20Sopenharmony_ci	 * Reset each runqueue's bandwidth settings
8378c2ecf20Sopenharmony_ci	 */
8388c2ecf20Sopenharmony_ci	for_each_rt_rq(rt_rq, iter, rq) {
8398c2ecf20Sopenharmony_ci		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
8408c2ecf20Sopenharmony_ci
8418c2ecf20Sopenharmony_ci		raw_spin_lock(&rt_b->rt_runtime_lock);
8428c2ecf20Sopenharmony_ci		raw_spin_lock(&rt_rq->rt_runtime_lock);
8438c2ecf20Sopenharmony_ci		rt_rq->rt_runtime = rt_b->rt_runtime;
8448c2ecf20Sopenharmony_ci		rt_rq->rt_time = 0;
8458c2ecf20Sopenharmony_ci		rt_rq->rt_throttled = 0;
8468c2ecf20Sopenharmony_ci		raw_spin_unlock(&rt_rq->rt_runtime_lock);
8478c2ecf20Sopenharmony_ci		raw_spin_unlock(&rt_b->rt_runtime_lock);
8488c2ecf20Sopenharmony_ci	}
8498c2ecf20Sopenharmony_ci}
8508c2ecf20Sopenharmony_ci
8518c2ecf20Sopenharmony_cistatic void balance_runtime(struct rt_rq *rt_rq)
8528c2ecf20Sopenharmony_ci{
8538c2ecf20Sopenharmony_ci	if (!sched_feat(RT_RUNTIME_SHARE))
8548c2ecf20Sopenharmony_ci		return;
8558c2ecf20Sopenharmony_ci
8568c2ecf20Sopenharmony_ci	if (rt_rq->rt_time > rt_rq->rt_runtime) {
8578c2ecf20Sopenharmony_ci		raw_spin_unlock(&rt_rq->rt_runtime_lock);
8588c2ecf20Sopenharmony_ci		do_balance_runtime(rt_rq);
8598c2ecf20Sopenharmony_ci		raw_spin_lock(&rt_rq->rt_runtime_lock);
8608c2ecf20Sopenharmony_ci	}
8618c2ecf20Sopenharmony_ci}
8628c2ecf20Sopenharmony_ci#else /* !CONFIG_SMP */
8638c2ecf20Sopenharmony_cistatic inline void balance_runtime(struct rt_rq *rt_rq) {}
8648c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
8658c2ecf20Sopenharmony_ci
8668c2ecf20Sopenharmony_cistatic int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
8678c2ecf20Sopenharmony_ci{
8688c2ecf20Sopenharmony_ci	int i, idle = 1, throttled = 0;
8698c2ecf20Sopenharmony_ci	const struct cpumask *span;
8708c2ecf20Sopenharmony_ci
8718c2ecf20Sopenharmony_ci	span = sched_rt_period_mask();
8728c2ecf20Sopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
8738c2ecf20Sopenharmony_ci	/*
8748c2ecf20Sopenharmony_ci	 * When the tasks in the task_group run on either isolated
8758c2ecf20Sopenharmony_ci	 * CPUs or non-isolated CPUs, whether they are isolcpus or
8768c2ecf20Sopenharmony_ci	 * were isolated via cpusets, check all the online rt_rq
8778c2ecf20Sopenharmony_ci	 * to lest the timer run on a CPU which does not service
8788c2ecf20Sopenharmony_ci	 * all runqueues, potentially leaving other CPUs indefinitely
8798c2ecf20Sopenharmony_ci	 * throttled.
8808c2ecf20Sopenharmony_ci	 */
8818c2ecf20Sopenharmony_ci	span = cpu_online_mask;
8828c2ecf20Sopenharmony_ci#endif
8838c2ecf20Sopenharmony_ci	for_each_cpu(i, span) {
8848c2ecf20Sopenharmony_ci		int enqueue = 0;
8858c2ecf20Sopenharmony_ci		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
8868c2ecf20Sopenharmony_ci		struct rq *rq = rq_of_rt_rq(rt_rq);
8878c2ecf20Sopenharmony_ci		int skip;
8888c2ecf20Sopenharmony_ci
8898c2ecf20Sopenharmony_ci		/*
8908c2ecf20Sopenharmony_ci		 * When span == cpu_online_mask, taking each rq->lock
8918c2ecf20Sopenharmony_ci		 * can be time-consuming. Try to avoid it when possible.
8928c2ecf20Sopenharmony_ci		 */
8938c2ecf20Sopenharmony_ci		raw_spin_lock(&rt_rq->rt_runtime_lock);
8948c2ecf20Sopenharmony_ci		if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
8958c2ecf20Sopenharmony_ci			rt_rq->rt_runtime = rt_b->rt_runtime;
8968c2ecf20Sopenharmony_ci		skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
8978c2ecf20Sopenharmony_ci		raw_spin_unlock(&rt_rq->rt_runtime_lock);
8988c2ecf20Sopenharmony_ci		if (skip)
8998c2ecf20Sopenharmony_ci			continue;
9008c2ecf20Sopenharmony_ci
9018c2ecf20Sopenharmony_ci		raw_spin_lock(&rq->lock);
9028c2ecf20Sopenharmony_ci		update_rq_clock(rq);
9038c2ecf20Sopenharmony_ci
9048c2ecf20Sopenharmony_ci		if (rt_rq->rt_time) {
9058c2ecf20Sopenharmony_ci			u64 runtime;
9068c2ecf20Sopenharmony_ci
9078c2ecf20Sopenharmony_ci			raw_spin_lock(&rt_rq->rt_runtime_lock);
9088c2ecf20Sopenharmony_ci			if (rt_rq->rt_throttled)
9098c2ecf20Sopenharmony_ci				balance_runtime(rt_rq);
9108c2ecf20Sopenharmony_ci			runtime = rt_rq->rt_runtime;
9118c2ecf20Sopenharmony_ci			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
9128c2ecf20Sopenharmony_ci			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
9138c2ecf20Sopenharmony_ci				rt_rq->rt_throttled = 0;
9148c2ecf20Sopenharmony_ci				enqueue = 1;
9158c2ecf20Sopenharmony_ci
9168c2ecf20Sopenharmony_ci				/*
9178c2ecf20Sopenharmony_ci				 * When we're idle and a woken (rt) task is
9188c2ecf20Sopenharmony_ci				 * throttled check_preempt_curr() will set
9198c2ecf20Sopenharmony_ci				 * skip_update and the time between the wakeup
9208c2ecf20Sopenharmony_ci				 * and this unthrottle will get accounted as
9218c2ecf20Sopenharmony_ci				 * 'runtime'.
9228c2ecf20Sopenharmony_ci				 */
9238c2ecf20Sopenharmony_ci				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
9248c2ecf20Sopenharmony_ci					rq_clock_cancel_skipupdate(rq);
9258c2ecf20Sopenharmony_ci			}
9268c2ecf20Sopenharmony_ci			if (rt_rq->rt_time || rt_rq->rt_nr_running)
9278c2ecf20Sopenharmony_ci				idle = 0;
9288c2ecf20Sopenharmony_ci			raw_spin_unlock(&rt_rq->rt_runtime_lock);
9298c2ecf20Sopenharmony_ci		} else if (rt_rq->rt_nr_running) {
9308c2ecf20Sopenharmony_ci			idle = 0;
9318c2ecf20Sopenharmony_ci			if (!rt_rq_throttled(rt_rq))
9328c2ecf20Sopenharmony_ci				enqueue = 1;
9338c2ecf20Sopenharmony_ci		}
9348c2ecf20Sopenharmony_ci		if (rt_rq->rt_throttled)
9358c2ecf20Sopenharmony_ci			throttled = 1;
9368c2ecf20Sopenharmony_ci
9378c2ecf20Sopenharmony_ci		if (enqueue)
9388c2ecf20Sopenharmony_ci			sched_rt_rq_enqueue(rt_rq);
9398c2ecf20Sopenharmony_ci		raw_spin_unlock(&rq->lock);
9408c2ecf20Sopenharmony_ci	}
9418c2ecf20Sopenharmony_ci
9428c2ecf20Sopenharmony_ci	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
9438c2ecf20Sopenharmony_ci		return 1;
9448c2ecf20Sopenharmony_ci
9458c2ecf20Sopenharmony_ci	return idle;
9468c2ecf20Sopenharmony_ci}
9478c2ecf20Sopenharmony_ci
9488c2ecf20Sopenharmony_cistatic inline int rt_se_prio(struct sched_rt_entity *rt_se)
9498c2ecf20Sopenharmony_ci{
9508c2ecf20Sopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
9518c2ecf20Sopenharmony_ci	struct rt_rq *rt_rq = group_rt_rq(rt_se);
9528c2ecf20Sopenharmony_ci
9538c2ecf20Sopenharmony_ci	if (rt_rq)
9548c2ecf20Sopenharmony_ci		return rt_rq->highest_prio.curr;
9558c2ecf20Sopenharmony_ci#endif
9568c2ecf20Sopenharmony_ci
9578c2ecf20Sopenharmony_ci	return rt_task_of(rt_se)->prio;
9588c2ecf20Sopenharmony_ci}
9598c2ecf20Sopenharmony_ci
9608c2ecf20Sopenharmony_cistatic inline void try_start_rt_bandwidth(struct rt_bandwidth *rt_b)
9618c2ecf20Sopenharmony_ci{
9628c2ecf20Sopenharmony_ci	raw_spin_lock(&rt_b->rt_runtime_lock);
9638c2ecf20Sopenharmony_ci	if (!rt_b->rt_period_active) {
9648c2ecf20Sopenharmony_ci		rt_b->rt_period_active = 1;
9658c2ecf20Sopenharmony_ci		hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
9668c2ecf20Sopenharmony_ci		hrtimer_start_expires(&rt_b->rt_period_timer,
9678c2ecf20Sopenharmony_ci				      HRTIMER_MODE_ABS_PINNED_HARD);
9688c2ecf20Sopenharmony_ci	}
9698c2ecf20Sopenharmony_ci	raw_spin_unlock(&rt_b->rt_runtime_lock);
9708c2ecf20Sopenharmony_ci}
9718c2ecf20Sopenharmony_ci
9728c2ecf20Sopenharmony_cistatic int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
9738c2ecf20Sopenharmony_ci{
9748c2ecf20Sopenharmony_ci	u64 runtime = sched_rt_runtime(rt_rq);
9758c2ecf20Sopenharmony_ci
9768c2ecf20Sopenharmony_ci	if (rt_rq->rt_throttled)
9778c2ecf20Sopenharmony_ci		return rt_rq_throttled(rt_rq);
9788c2ecf20Sopenharmony_ci
9798c2ecf20Sopenharmony_ci	if (runtime >= sched_rt_period(rt_rq))
9808c2ecf20Sopenharmony_ci		return 0;
9818c2ecf20Sopenharmony_ci
9828c2ecf20Sopenharmony_ci	balance_runtime(rt_rq);
9838c2ecf20Sopenharmony_ci	runtime = sched_rt_runtime(rt_rq);
9848c2ecf20Sopenharmony_ci	if (runtime == RUNTIME_INF)
9858c2ecf20Sopenharmony_ci		return 0;
9868c2ecf20Sopenharmony_ci
9878c2ecf20Sopenharmony_ci	if (rt_rq->rt_time > runtime) {
9888c2ecf20Sopenharmony_ci		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
9898c2ecf20Sopenharmony_ci
9908c2ecf20Sopenharmony_ci		/*
9918c2ecf20Sopenharmony_ci		 * Don't actually throttle groups that have no runtime assigned
9928c2ecf20Sopenharmony_ci		 * but accrue some time due to boosting.
9938c2ecf20Sopenharmony_ci		 */
9948c2ecf20Sopenharmony_ci		if (likely(rt_b->rt_runtime)) {
9958c2ecf20Sopenharmony_ci			rt_rq->rt_throttled = 1;
9968c2ecf20Sopenharmony_ci			printk_deferred_once("sched: RT throttling activated\n");
9978c2ecf20Sopenharmony_ci		} else {
9988c2ecf20Sopenharmony_ci			/*
9998c2ecf20Sopenharmony_ci			 * In case we did anyway, make it go away,
10008c2ecf20Sopenharmony_ci			 * replenishment is a joke, since it will replenish us
10018c2ecf20Sopenharmony_ci			 * with exactly 0 ns.
10028c2ecf20Sopenharmony_ci			 */
10038c2ecf20Sopenharmony_ci			rt_rq->rt_time = 0;
10048c2ecf20Sopenharmony_ci		}
10058c2ecf20Sopenharmony_ci
10068c2ecf20Sopenharmony_ci		if (rt_rq_throttled(rt_rq)) {
10078c2ecf20Sopenharmony_ci			sched_rt_rq_dequeue(rt_rq);
10088c2ecf20Sopenharmony_ci			return 1;
10098c2ecf20Sopenharmony_ci		}
10108c2ecf20Sopenharmony_ci	}
10118c2ecf20Sopenharmony_ci
10128c2ecf20Sopenharmony_ci	return 0;
10138c2ecf20Sopenharmony_ci}
10148c2ecf20Sopenharmony_ci
10158c2ecf20Sopenharmony_ci/*
10168c2ecf20Sopenharmony_ci * Update the current task's runtime statistics. Skip current tasks that
10178c2ecf20Sopenharmony_ci * are not in our scheduling class.
10188c2ecf20Sopenharmony_ci */
10198c2ecf20Sopenharmony_cistatic void update_curr_rt(struct rq *rq)
10208c2ecf20Sopenharmony_ci{
10218c2ecf20Sopenharmony_ci	struct task_struct *curr = rq->curr;
10228c2ecf20Sopenharmony_ci	struct sched_rt_entity *rt_se = &curr->rt;
10238c2ecf20Sopenharmony_ci	u64 delta_exec;
10248c2ecf20Sopenharmony_ci	u64 now;
10258c2ecf20Sopenharmony_ci
10268c2ecf20Sopenharmony_ci	if (curr->sched_class != &rt_sched_class)
10278c2ecf20Sopenharmony_ci		return;
10288c2ecf20Sopenharmony_ci
10298c2ecf20Sopenharmony_ci	now = rq_clock_task(rq);
10308c2ecf20Sopenharmony_ci	delta_exec = now - curr->se.exec_start;
10318c2ecf20Sopenharmony_ci	if (unlikely((s64)delta_exec <= 0))
10328c2ecf20Sopenharmony_ci		return;
10338c2ecf20Sopenharmony_ci
10348c2ecf20Sopenharmony_ci	schedstat_set(curr->se.statistics.exec_max,
10358c2ecf20Sopenharmony_ci		      max(curr->se.statistics.exec_max, delta_exec));
10368c2ecf20Sopenharmony_ci
10378c2ecf20Sopenharmony_ci	curr->se.sum_exec_runtime += delta_exec;
10388c2ecf20Sopenharmony_ci	account_group_exec_runtime(curr, delta_exec);
10398c2ecf20Sopenharmony_ci
10408c2ecf20Sopenharmony_ci	curr->se.exec_start = now;
10418c2ecf20Sopenharmony_ci	cgroup_account_cputime(curr, delta_exec);
10428c2ecf20Sopenharmony_ci
10438c2ecf20Sopenharmony_ci	if (!rt_bandwidth_enabled())
10448c2ecf20Sopenharmony_ci		return;
10458c2ecf20Sopenharmony_ci
10468c2ecf20Sopenharmony_ci	for_each_sched_rt_entity(rt_se) {
10478c2ecf20Sopenharmony_ci		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
10488c2ecf20Sopenharmony_ci		int exceeded;
10498c2ecf20Sopenharmony_ci
10508c2ecf20Sopenharmony_ci		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
10518c2ecf20Sopenharmony_ci			raw_spin_lock(&rt_rq->rt_runtime_lock);
10528c2ecf20Sopenharmony_ci			rt_rq->rt_time += delta_exec;
10538c2ecf20Sopenharmony_ci			exceeded = sched_rt_runtime_exceeded(rt_rq);
10548c2ecf20Sopenharmony_ci			if (exceeded)
10558c2ecf20Sopenharmony_ci				resched_curr(rq);
10568c2ecf20Sopenharmony_ci			raw_spin_unlock(&rt_rq->rt_runtime_lock);
10578c2ecf20Sopenharmony_ci			if (exceeded)
10588c2ecf20Sopenharmony_ci				try_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
10598c2ecf20Sopenharmony_ci		}
10608c2ecf20Sopenharmony_ci	}
10618c2ecf20Sopenharmony_ci}
10628c2ecf20Sopenharmony_ci
10638c2ecf20Sopenharmony_cistatic void
10648c2ecf20Sopenharmony_cidequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
10658c2ecf20Sopenharmony_ci{
10668c2ecf20Sopenharmony_ci	struct rq *rq = rq_of_rt_rq(rt_rq);
10678c2ecf20Sopenharmony_ci
10688c2ecf20Sopenharmony_ci	BUG_ON(&rq->rt != rt_rq);
10698c2ecf20Sopenharmony_ci
10708c2ecf20Sopenharmony_ci	if (!rt_rq->rt_queued)
10718c2ecf20Sopenharmony_ci		return;
10728c2ecf20Sopenharmony_ci
10738c2ecf20Sopenharmony_ci	BUG_ON(!rq->nr_running);
10748c2ecf20Sopenharmony_ci
10758c2ecf20Sopenharmony_ci	sub_nr_running(rq, count);
10768c2ecf20Sopenharmony_ci	rt_rq->rt_queued = 0;
10778c2ecf20Sopenharmony_ci
10788c2ecf20Sopenharmony_ci}
10798c2ecf20Sopenharmony_ci
10808c2ecf20Sopenharmony_cistatic void
10818c2ecf20Sopenharmony_cienqueue_top_rt_rq(struct rt_rq *rt_rq)
10828c2ecf20Sopenharmony_ci{
10838c2ecf20Sopenharmony_ci	struct rq *rq = rq_of_rt_rq(rt_rq);
10848c2ecf20Sopenharmony_ci
10858c2ecf20Sopenharmony_ci	BUG_ON(&rq->rt != rt_rq);
10868c2ecf20Sopenharmony_ci
10878c2ecf20Sopenharmony_ci	if (rt_rq->rt_queued)
10888c2ecf20Sopenharmony_ci		return;
10898c2ecf20Sopenharmony_ci
10908c2ecf20Sopenharmony_ci	if (rt_rq_throttled(rt_rq))
10918c2ecf20Sopenharmony_ci		return;
10928c2ecf20Sopenharmony_ci
10938c2ecf20Sopenharmony_ci	if (rt_rq->rt_nr_running) {
10948c2ecf20Sopenharmony_ci		add_nr_running(rq, rt_rq->rt_nr_running);
10958c2ecf20Sopenharmony_ci		rt_rq->rt_queued = 1;
10968c2ecf20Sopenharmony_ci	}
10978c2ecf20Sopenharmony_ci
10988c2ecf20Sopenharmony_ci	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
10998c2ecf20Sopenharmony_ci	cpufreq_update_util(rq, 0);
11008c2ecf20Sopenharmony_ci}
11018c2ecf20Sopenharmony_ci
11028c2ecf20Sopenharmony_ci#if defined CONFIG_SMP
11038c2ecf20Sopenharmony_ci
11048c2ecf20Sopenharmony_cistatic void
11058c2ecf20Sopenharmony_ciinc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
11068c2ecf20Sopenharmony_ci{
11078c2ecf20Sopenharmony_ci	struct rq *rq = rq_of_rt_rq(rt_rq);
11088c2ecf20Sopenharmony_ci
11098c2ecf20Sopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
11108c2ecf20Sopenharmony_ci	/*
11118c2ecf20Sopenharmony_ci	 * Change rq's cpupri only if rt_rq is the top queue.
11128c2ecf20Sopenharmony_ci	 */
11138c2ecf20Sopenharmony_ci	if (&rq->rt != rt_rq)
11148c2ecf20Sopenharmony_ci		return;
11158c2ecf20Sopenharmony_ci#endif
11168c2ecf20Sopenharmony_ci	if (rq->online && prio < prev_prio)
11178c2ecf20Sopenharmony_ci		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
11188c2ecf20Sopenharmony_ci}
11198c2ecf20Sopenharmony_ci
11208c2ecf20Sopenharmony_cistatic void
11218c2ecf20Sopenharmony_cidec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
11228c2ecf20Sopenharmony_ci{
11238c2ecf20Sopenharmony_ci	struct rq *rq = rq_of_rt_rq(rt_rq);
11248c2ecf20Sopenharmony_ci
11258c2ecf20Sopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
11268c2ecf20Sopenharmony_ci	/*
11278c2ecf20Sopenharmony_ci	 * Change rq's cpupri only if rt_rq is the top queue.
11288c2ecf20Sopenharmony_ci	 */
11298c2ecf20Sopenharmony_ci	if (&rq->rt != rt_rq)
11308c2ecf20Sopenharmony_ci		return;
11318c2ecf20Sopenharmony_ci#endif
11328c2ecf20Sopenharmony_ci	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
11338c2ecf20Sopenharmony_ci		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
11348c2ecf20Sopenharmony_ci}
11358c2ecf20Sopenharmony_ci
11368c2ecf20Sopenharmony_ci#else /* CONFIG_SMP */
11378c2ecf20Sopenharmony_ci
11388c2ecf20Sopenharmony_cistatic inline
11398c2ecf20Sopenharmony_civoid inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
11408c2ecf20Sopenharmony_cistatic inline
11418c2ecf20Sopenharmony_civoid dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
11428c2ecf20Sopenharmony_ci
11438c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
11448c2ecf20Sopenharmony_ci
11458c2ecf20Sopenharmony_ci#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
11468c2ecf20Sopenharmony_cistatic void
11478c2ecf20Sopenharmony_ciinc_rt_prio(struct rt_rq *rt_rq, int prio)
11488c2ecf20Sopenharmony_ci{
11498c2ecf20Sopenharmony_ci	int prev_prio = rt_rq->highest_prio.curr;
11508c2ecf20Sopenharmony_ci
11518c2ecf20Sopenharmony_ci	if (prio < prev_prio)
11528c2ecf20Sopenharmony_ci		rt_rq->highest_prio.curr = prio;
11538c2ecf20Sopenharmony_ci
11548c2ecf20Sopenharmony_ci	inc_rt_prio_smp(rt_rq, prio, prev_prio);
11558c2ecf20Sopenharmony_ci}
11568c2ecf20Sopenharmony_ci
11578c2ecf20Sopenharmony_cistatic void
11588c2ecf20Sopenharmony_cidec_rt_prio(struct rt_rq *rt_rq, int prio)
11598c2ecf20Sopenharmony_ci{
11608c2ecf20Sopenharmony_ci	int prev_prio = rt_rq->highest_prio.curr;
11618c2ecf20Sopenharmony_ci
11628c2ecf20Sopenharmony_ci	if (rt_rq->rt_nr_running) {
11638c2ecf20Sopenharmony_ci
11648c2ecf20Sopenharmony_ci		WARN_ON(prio < prev_prio);
11658c2ecf20Sopenharmony_ci
11668c2ecf20Sopenharmony_ci		/*
11678c2ecf20Sopenharmony_ci		 * This may have been our highest task, and therefore
11688c2ecf20Sopenharmony_ci		 * we may have some recomputation to do
11698c2ecf20Sopenharmony_ci		 */
11708c2ecf20Sopenharmony_ci		if (prio == prev_prio) {
11718c2ecf20Sopenharmony_ci			struct rt_prio_array *array = &rt_rq->active;
11728c2ecf20Sopenharmony_ci
11738c2ecf20Sopenharmony_ci			rt_rq->highest_prio.curr =
11748c2ecf20Sopenharmony_ci				sched_find_first_bit(array->bitmap);
11758c2ecf20Sopenharmony_ci		}
11768c2ecf20Sopenharmony_ci
11778c2ecf20Sopenharmony_ci	} else
11788c2ecf20Sopenharmony_ci		rt_rq->highest_prio.curr = MAX_RT_PRIO;
11798c2ecf20Sopenharmony_ci
11808c2ecf20Sopenharmony_ci	dec_rt_prio_smp(rt_rq, prio, prev_prio);
11818c2ecf20Sopenharmony_ci}
11828c2ecf20Sopenharmony_ci
11838c2ecf20Sopenharmony_ci#else
11848c2ecf20Sopenharmony_ci
11858c2ecf20Sopenharmony_cistatic inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
11868c2ecf20Sopenharmony_cistatic inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
11878c2ecf20Sopenharmony_ci
11888c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
11898c2ecf20Sopenharmony_ci
11908c2ecf20Sopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
11918c2ecf20Sopenharmony_ci
11928c2ecf20Sopenharmony_cistatic void
11938c2ecf20Sopenharmony_ciinc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
11948c2ecf20Sopenharmony_ci{
11958c2ecf20Sopenharmony_ci	if (rt_se_boosted(rt_se))
11968c2ecf20Sopenharmony_ci		rt_rq->rt_nr_boosted++;
11978c2ecf20Sopenharmony_ci
11988c2ecf20Sopenharmony_ci	if (rt_rq->tg)
11998c2ecf20Sopenharmony_ci		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
12008c2ecf20Sopenharmony_ci}
12018c2ecf20Sopenharmony_ci
12028c2ecf20Sopenharmony_cistatic void
12038c2ecf20Sopenharmony_cidec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
12048c2ecf20Sopenharmony_ci{
12058c2ecf20Sopenharmony_ci	if (rt_se_boosted(rt_se))
12068c2ecf20Sopenharmony_ci		rt_rq->rt_nr_boosted--;
12078c2ecf20Sopenharmony_ci
12088c2ecf20Sopenharmony_ci	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
12098c2ecf20Sopenharmony_ci}
12108c2ecf20Sopenharmony_ci
12118c2ecf20Sopenharmony_ci#else /* CONFIG_RT_GROUP_SCHED */
12128c2ecf20Sopenharmony_ci
12138c2ecf20Sopenharmony_cistatic void
12148c2ecf20Sopenharmony_ciinc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
12158c2ecf20Sopenharmony_ci{
12168c2ecf20Sopenharmony_ci	start_rt_bandwidth(&def_rt_bandwidth);
12178c2ecf20Sopenharmony_ci}
12188c2ecf20Sopenharmony_ci
12198c2ecf20Sopenharmony_cistatic inline
12208c2ecf20Sopenharmony_civoid dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
12218c2ecf20Sopenharmony_ci
12228c2ecf20Sopenharmony_ci#endif /* CONFIG_RT_GROUP_SCHED */
12238c2ecf20Sopenharmony_ci
12248c2ecf20Sopenharmony_cistatic inline
12258c2ecf20Sopenharmony_ciunsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
12268c2ecf20Sopenharmony_ci{
12278c2ecf20Sopenharmony_ci	struct rt_rq *group_rq = group_rt_rq(rt_se);
12288c2ecf20Sopenharmony_ci
12298c2ecf20Sopenharmony_ci	if (group_rq)
12308c2ecf20Sopenharmony_ci		return group_rq->rt_nr_running;
12318c2ecf20Sopenharmony_ci	else
12328c2ecf20Sopenharmony_ci		return 1;
12338c2ecf20Sopenharmony_ci}
12348c2ecf20Sopenharmony_ci
12358c2ecf20Sopenharmony_cistatic inline
12368c2ecf20Sopenharmony_ciunsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
12378c2ecf20Sopenharmony_ci{
12388c2ecf20Sopenharmony_ci	struct rt_rq *group_rq = group_rt_rq(rt_se);
12398c2ecf20Sopenharmony_ci	struct task_struct *tsk;
12408c2ecf20Sopenharmony_ci
12418c2ecf20Sopenharmony_ci	if (group_rq)
12428c2ecf20Sopenharmony_ci		return group_rq->rr_nr_running;
12438c2ecf20Sopenharmony_ci
12448c2ecf20Sopenharmony_ci	tsk = rt_task_of(rt_se);
12458c2ecf20Sopenharmony_ci
12468c2ecf20Sopenharmony_ci	return (tsk->policy == SCHED_RR) ? 1 : 0;
12478c2ecf20Sopenharmony_ci}
12488c2ecf20Sopenharmony_ci
12498c2ecf20Sopenharmony_cistatic inline
12508c2ecf20Sopenharmony_civoid inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
12518c2ecf20Sopenharmony_ci{
12528c2ecf20Sopenharmony_ci	int prio = rt_se_prio(rt_se);
12538c2ecf20Sopenharmony_ci
12548c2ecf20Sopenharmony_ci	WARN_ON(!rt_prio(prio));
12558c2ecf20Sopenharmony_ci	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
12568c2ecf20Sopenharmony_ci	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
12578c2ecf20Sopenharmony_ci
12588c2ecf20Sopenharmony_ci	inc_rt_prio(rt_rq, prio);
12598c2ecf20Sopenharmony_ci	inc_rt_migration(rt_se, rt_rq);
12608c2ecf20Sopenharmony_ci	inc_rt_group(rt_se, rt_rq);
12618c2ecf20Sopenharmony_ci}
12628c2ecf20Sopenharmony_ci
12638c2ecf20Sopenharmony_cistatic inline
12648c2ecf20Sopenharmony_civoid dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
12658c2ecf20Sopenharmony_ci{
12668c2ecf20Sopenharmony_ci	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
12678c2ecf20Sopenharmony_ci	WARN_ON(!rt_rq->rt_nr_running);
12688c2ecf20Sopenharmony_ci	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
12698c2ecf20Sopenharmony_ci	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
12708c2ecf20Sopenharmony_ci
12718c2ecf20Sopenharmony_ci	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
12728c2ecf20Sopenharmony_ci	dec_rt_migration(rt_se, rt_rq);
12738c2ecf20Sopenharmony_ci	dec_rt_group(rt_se, rt_rq);
12748c2ecf20Sopenharmony_ci}
12758c2ecf20Sopenharmony_ci
12768c2ecf20Sopenharmony_ci/*
12778c2ecf20Sopenharmony_ci * Change rt_se->run_list location unless SAVE && !MOVE
12788c2ecf20Sopenharmony_ci *
12798c2ecf20Sopenharmony_ci * assumes ENQUEUE/DEQUEUE flags match
12808c2ecf20Sopenharmony_ci */
12818c2ecf20Sopenharmony_cistatic inline bool move_entity(unsigned int flags)
12828c2ecf20Sopenharmony_ci{
12838c2ecf20Sopenharmony_ci	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
12848c2ecf20Sopenharmony_ci		return false;
12858c2ecf20Sopenharmony_ci
12868c2ecf20Sopenharmony_ci	return true;
12878c2ecf20Sopenharmony_ci}
12888c2ecf20Sopenharmony_ci
12898c2ecf20Sopenharmony_cistatic void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
12908c2ecf20Sopenharmony_ci{
12918c2ecf20Sopenharmony_ci	list_del_init(&rt_se->run_list);
12928c2ecf20Sopenharmony_ci
12938c2ecf20Sopenharmony_ci	if (list_empty(array->queue + rt_se_prio(rt_se)))
12948c2ecf20Sopenharmony_ci		__clear_bit(rt_se_prio(rt_se), array->bitmap);
12958c2ecf20Sopenharmony_ci
12968c2ecf20Sopenharmony_ci	rt_se->on_list = 0;
12978c2ecf20Sopenharmony_ci}
12988c2ecf20Sopenharmony_ci
12998c2ecf20Sopenharmony_cistatic void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
13008c2ecf20Sopenharmony_ci{
13018c2ecf20Sopenharmony_ci	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
13028c2ecf20Sopenharmony_ci	struct rt_prio_array *array = &rt_rq->active;
13038c2ecf20Sopenharmony_ci	struct rt_rq *group_rq = group_rt_rq(rt_se);
13048c2ecf20Sopenharmony_ci	struct list_head *queue = array->queue + rt_se_prio(rt_se);
13058c2ecf20Sopenharmony_ci
13068c2ecf20Sopenharmony_ci	/*
13078c2ecf20Sopenharmony_ci	 * Don't enqueue the group if its throttled, or when empty.
13088c2ecf20Sopenharmony_ci	 * The latter is a consequence of the former when a child group
13098c2ecf20Sopenharmony_ci	 * get throttled and the current group doesn't have any other
13108c2ecf20Sopenharmony_ci	 * active members.
13118c2ecf20Sopenharmony_ci	 */
13128c2ecf20Sopenharmony_ci	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
13138c2ecf20Sopenharmony_ci		if (rt_se->on_list)
13148c2ecf20Sopenharmony_ci			__delist_rt_entity(rt_se, array);
13158c2ecf20Sopenharmony_ci		return;
13168c2ecf20Sopenharmony_ci	}
13178c2ecf20Sopenharmony_ci
13188c2ecf20Sopenharmony_ci	if (move_entity(flags)) {
13198c2ecf20Sopenharmony_ci		WARN_ON_ONCE(rt_se->on_list);
13208c2ecf20Sopenharmony_ci		if (flags & ENQUEUE_HEAD)
13218c2ecf20Sopenharmony_ci			list_add(&rt_se->run_list, queue);
13228c2ecf20Sopenharmony_ci		else
13238c2ecf20Sopenharmony_ci			list_add_tail(&rt_se->run_list, queue);
13248c2ecf20Sopenharmony_ci
13258c2ecf20Sopenharmony_ci		__set_bit(rt_se_prio(rt_se), array->bitmap);
13268c2ecf20Sopenharmony_ci		rt_se->on_list = 1;
13278c2ecf20Sopenharmony_ci	}
13288c2ecf20Sopenharmony_ci	rt_se->on_rq = 1;
13298c2ecf20Sopenharmony_ci
13308c2ecf20Sopenharmony_ci	inc_rt_tasks(rt_se, rt_rq);
13318c2ecf20Sopenharmony_ci}
13328c2ecf20Sopenharmony_ci
13338c2ecf20Sopenharmony_cistatic void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
13348c2ecf20Sopenharmony_ci{
13358c2ecf20Sopenharmony_ci	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
13368c2ecf20Sopenharmony_ci	struct rt_prio_array *array = &rt_rq->active;
13378c2ecf20Sopenharmony_ci
13388c2ecf20Sopenharmony_ci	if (move_entity(flags)) {
13398c2ecf20Sopenharmony_ci		WARN_ON_ONCE(!rt_se->on_list);
13408c2ecf20Sopenharmony_ci		__delist_rt_entity(rt_se, array);
13418c2ecf20Sopenharmony_ci	}
13428c2ecf20Sopenharmony_ci	rt_se->on_rq = 0;
13438c2ecf20Sopenharmony_ci
13448c2ecf20Sopenharmony_ci	dec_rt_tasks(rt_se, rt_rq);
13458c2ecf20Sopenharmony_ci}
13468c2ecf20Sopenharmony_ci
13478c2ecf20Sopenharmony_ci/*
13488c2ecf20Sopenharmony_ci * Because the prio of an upper entry depends on the lower
13498c2ecf20Sopenharmony_ci * entries, we must remove entries top - down.
13508c2ecf20Sopenharmony_ci */
13518c2ecf20Sopenharmony_cistatic void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
13528c2ecf20Sopenharmony_ci{
13538c2ecf20Sopenharmony_ci	struct sched_rt_entity *back = NULL;
13548c2ecf20Sopenharmony_ci	unsigned int rt_nr_running;
13558c2ecf20Sopenharmony_ci
13568c2ecf20Sopenharmony_ci	for_each_sched_rt_entity(rt_se) {
13578c2ecf20Sopenharmony_ci		rt_se->back = back;
13588c2ecf20Sopenharmony_ci		back = rt_se;
13598c2ecf20Sopenharmony_ci	}
13608c2ecf20Sopenharmony_ci
13618c2ecf20Sopenharmony_ci	rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
13628c2ecf20Sopenharmony_ci
13638c2ecf20Sopenharmony_ci	for (rt_se = back; rt_se; rt_se = rt_se->back) {
13648c2ecf20Sopenharmony_ci		if (on_rt_rq(rt_se))
13658c2ecf20Sopenharmony_ci			__dequeue_rt_entity(rt_se, flags);
13668c2ecf20Sopenharmony_ci	}
13678c2ecf20Sopenharmony_ci
13688c2ecf20Sopenharmony_ci	dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
13698c2ecf20Sopenharmony_ci}
13708c2ecf20Sopenharmony_ci
13718c2ecf20Sopenharmony_cistatic void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
13728c2ecf20Sopenharmony_ci{
13738c2ecf20Sopenharmony_ci	struct rq *rq = rq_of_rt_se(rt_se);
13748c2ecf20Sopenharmony_ci
13758c2ecf20Sopenharmony_ci	dequeue_rt_stack(rt_se, flags);
13768c2ecf20Sopenharmony_ci	for_each_sched_rt_entity(rt_se)
13778c2ecf20Sopenharmony_ci		__enqueue_rt_entity(rt_se, flags);
13788c2ecf20Sopenharmony_ci	enqueue_top_rt_rq(&rq->rt);
13798c2ecf20Sopenharmony_ci}
13808c2ecf20Sopenharmony_ci
13818c2ecf20Sopenharmony_cistatic void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
13828c2ecf20Sopenharmony_ci{
13838c2ecf20Sopenharmony_ci	struct rq *rq = rq_of_rt_se(rt_se);
13848c2ecf20Sopenharmony_ci
13858c2ecf20Sopenharmony_ci	dequeue_rt_stack(rt_se, flags);
13868c2ecf20Sopenharmony_ci
13878c2ecf20Sopenharmony_ci	for_each_sched_rt_entity(rt_se) {
13888c2ecf20Sopenharmony_ci		struct rt_rq *rt_rq = group_rt_rq(rt_se);
13898c2ecf20Sopenharmony_ci
13908c2ecf20Sopenharmony_ci		if (rt_rq && rt_rq->rt_nr_running)
13918c2ecf20Sopenharmony_ci			__enqueue_rt_entity(rt_se, flags);
13928c2ecf20Sopenharmony_ci	}
13938c2ecf20Sopenharmony_ci	enqueue_top_rt_rq(&rq->rt);
13948c2ecf20Sopenharmony_ci}
13958c2ecf20Sopenharmony_ci
13968c2ecf20Sopenharmony_ci/*
13978c2ecf20Sopenharmony_ci * Adding/removing a task to/from a priority array:
13988c2ecf20Sopenharmony_ci */
13998c2ecf20Sopenharmony_cistatic void
14008c2ecf20Sopenharmony_cienqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
14018c2ecf20Sopenharmony_ci{
14028c2ecf20Sopenharmony_ci	struct sched_rt_entity *rt_se = &p->rt;
14038c2ecf20Sopenharmony_ci
14048c2ecf20Sopenharmony_ci	if (flags & ENQUEUE_WAKEUP)
14058c2ecf20Sopenharmony_ci		rt_se->timeout = 0;
14068c2ecf20Sopenharmony_ci
14078c2ecf20Sopenharmony_ci	enqueue_rt_entity(rt_se, flags);
14088c2ecf20Sopenharmony_ci	walt_inc_cumulative_runnable_avg(rq, p);
14098c2ecf20Sopenharmony_ci
14108c2ecf20Sopenharmony_ci	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
14118c2ecf20Sopenharmony_ci		enqueue_pushable_task(rq, p);
14128c2ecf20Sopenharmony_ci}
14138c2ecf20Sopenharmony_ci
14148c2ecf20Sopenharmony_cistatic void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
14158c2ecf20Sopenharmony_ci{
14168c2ecf20Sopenharmony_ci	struct sched_rt_entity *rt_se = &p->rt;
14178c2ecf20Sopenharmony_ci
14188c2ecf20Sopenharmony_ci	update_curr_rt(rq);
14198c2ecf20Sopenharmony_ci	dequeue_rt_entity(rt_se, flags);
14208c2ecf20Sopenharmony_ci	walt_dec_cumulative_runnable_avg(rq, p);
14218c2ecf20Sopenharmony_ci
14228c2ecf20Sopenharmony_ci	dequeue_pushable_task(rq, p);
14238c2ecf20Sopenharmony_ci}
14248c2ecf20Sopenharmony_ci
14258c2ecf20Sopenharmony_ci/*
14268c2ecf20Sopenharmony_ci * Put task to the head or the end of the run list without the overhead of
14278c2ecf20Sopenharmony_ci * dequeue followed by enqueue.
14288c2ecf20Sopenharmony_ci */
14298c2ecf20Sopenharmony_cistatic void
14308c2ecf20Sopenharmony_cirequeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
14318c2ecf20Sopenharmony_ci{
14328c2ecf20Sopenharmony_ci	if (on_rt_rq(rt_se)) {
14338c2ecf20Sopenharmony_ci		struct rt_prio_array *array = &rt_rq->active;
14348c2ecf20Sopenharmony_ci		struct list_head *queue = array->queue + rt_se_prio(rt_se);
14358c2ecf20Sopenharmony_ci
14368c2ecf20Sopenharmony_ci		if (head)
14378c2ecf20Sopenharmony_ci			list_move(&rt_se->run_list, queue);
14388c2ecf20Sopenharmony_ci		else
14398c2ecf20Sopenharmony_ci			list_move_tail(&rt_se->run_list, queue);
14408c2ecf20Sopenharmony_ci	}
14418c2ecf20Sopenharmony_ci}
14428c2ecf20Sopenharmony_ci
14438c2ecf20Sopenharmony_cistatic void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
14448c2ecf20Sopenharmony_ci{
14458c2ecf20Sopenharmony_ci	struct sched_rt_entity *rt_se = &p->rt;
14468c2ecf20Sopenharmony_ci	struct rt_rq *rt_rq;
14478c2ecf20Sopenharmony_ci
14488c2ecf20Sopenharmony_ci	for_each_sched_rt_entity(rt_se) {
14498c2ecf20Sopenharmony_ci		rt_rq = rt_rq_of_se(rt_se);
14508c2ecf20Sopenharmony_ci		requeue_rt_entity(rt_rq, rt_se, head);
14518c2ecf20Sopenharmony_ci	}
14528c2ecf20Sopenharmony_ci}
14538c2ecf20Sopenharmony_ci
14548c2ecf20Sopenharmony_cistatic void yield_task_rt(struct rq *rq)
14558c2ecf20Sopenharmony_ci{
14568c2ecf20Sopenharmony_ci	requeue_task_rt(rq, rq->curr, 0);
14578c2ecf20Sopenharmony_ci}
14588c2ecf20Sopenharmony_ci
14598c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
14608c2ecf20Sopenharmony_cistatic int find_lowest_rq(struct task_struct *task);
14618c2ecf20Sopenharmony_ci
14628c2ecf20Sopenharmony_cistatic int
14638c2ecf20Sopenharmony_ciselect_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
14648c2ecf20Sopenharmony_ci{
14658c2ecf20Sopenharmony_ci	struct task_struct *curr;
14668c2ecf20Sopenharmony_ci	struct rq *rq;
14678c2ecf20Sopenharmony_ci	bool test;
14688c2ecf20Sopenharmony_ci
14698c2ecf20Sopenharmony_ci	/* For anything but wake ups, just return the task_cpu */
14708c2ecf20Sopenharmony_ci	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
14718c2ecf20Sopenharmony_ci		goto out;
14728c2ecf20Sopenharmony_ci
14738c2ecf20Sopenharmony_ci	rq = cpu_rq(cpu);
14748c2ecf20Sopenharmony_ci
14758c2ecf20Sopenharmony_ci	rcu_read_lock();
14768c2ecf20Sopenharmony_ci	curr = READ_ONCE(rq->curr); /* unlocked access */
14778c2ecf20Sopenharmony_ci
14788c2ecf20Sopenharmony_ci	/*
14798c2ecf20Sopenharmony_ci	 * If the current task on @p's runqueue is an RT task, then
14808c2ecf20Sopenharmony_ci	 * try to see if we can wake this RT task up on another
14818c2ecf20Sopenharmony_ci	 * runqueue. Otherwise simply start this RT task
14828c2ecf20Sopenharmony_ci	 * on its current runqueue.
14838c2ecf20Sopenharmony_ci	 *
14848c2ecf20Sopenharmony_ci	 * We want to avoid overloading runqueues. If the woken
14858c2ecf20Sopenharmony_ci	 * task is a higher priority, then it will stay on this CPU
14868c2ecf20Sopenharmony_ci	 * and the lower prio task should be moved to another CPU.
14878c2ecf20Sopenharmony_ci	 * Even though this will probably make the lower prio task
14888c2ecf20Sopenharmony_ci	 * lose its cache, we do not want to bounce a higher task
14898c2ecf20Sopenharmony_ci	 * around just because it gave up its CPU, perhaps for a
14908c2ecf20Sopenharmony_ci	 * lock?
14918c2ecf20Sopenharmony_ci	 *
14928c2ecf20Sopenharmony_ci	 * For equal prio tasks, we just let the scheduler sort it out.
14938c2ecf20Sopenharmony_ci	 *
14948c2ecf20Sopenharmony_ci	 * Otherwise, just let it ride on the affined RQ and the
14958c2ecf20Sopenharmony_ci	 * post-schedule router will push the preempted task away
14968c2ecf20Sopenharmony_ci	 *
14978c2ecf20Sopenharmony_ci	 * This test is optimistic, if we get it wrong the load-balancer
14988c2ecf20Sopenharmony_ci	 * will have to sort it out.
14998c2ecf20Sopenharmony_ci	 *
15008c2ecf20Sopenharmony_ci	 * We take into account the capacity of the CPU to ensure it fits the
15018c2ecf20Sopenharmony_ci	 * requirement of the task - which is only important on heterogeneous
15028c2ecf20Sopenharmony_ci	 * systems like big.LITTLE.
15038c2ecf20Sopenharmony_ci	 */
15048c2ecf20Sopenharmony_ci	test = curr &&
15058c2ecf20Sopenharmony_ci	       unlikely(rt_task(curr)) &&
15068c2ecf20Sopenharmony_ci	       (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
15078c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS
15088c2ecf20Sopenharmony_ci	test |= sysctl_sched_enable_rt_cas;
15098c2ecf20Sopenharmony_ci#endif
15108c2ecf20Sopenharmony_ci
15118c2ecf20Sopenharmony_ci	if (test || !rt_task_fits_capacity(p, cpu)) {
15128c2ecf20Sopenharmony_ci		int target = find_lowest_rq(p);
15138c2ecf20Sopenharmony_ci
15148c2ecf20Sopenharmony_ci		/*
15158c2ecf20Sopenharmony_ci		 * Bail out if we were forcing a migration to find a better
15168c2ecf20Sopenharmony_ci		 * fitting CPU but our search failed.
15178c2ecf20Sopenharmony_ci		 */
15188c2ecf20Sopenharmony_ci		if (!test && target != -1 && !rt_task_fits_capacity(p, target))
15198c2ecf20Sopenharmony_ci			goto out_unlock;
15208c2ecf20Sopenharmony_ci
15218c2ecf20Sopenharmony_ci		/*
15228c2ecf20Sopenharmony_ci		 * Don't bother moving it if the destination CPU is
15238c2ecf20Sopenharmony_ci		 * not running a lower priority task.
15248c2ecf20Sopenharmony_ci		 */
15258c2ecf20Sopenharmony_ci		if (target != -1 && (
15268c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS
15278c2ecf20Sopenharmony_ci		    sysctl_sched_enable_rt_cas ||
15288c2ecf20Sopenharmony_ci#endif
15298c2ecf20Sopenharmony_ci		    p->prio < cpu_rq(target)->rt.highest_prio.curr))
15308c2ecf20Sopenharmony_ci			cpu = target;
15318c2ecf20Sopenharmony_ci	}
15328c2ecf20Sopenharmony_ci
15338c2ecf20Sopenharmony_ciout_unlock:
15348c2ecf20Sopenharmony_ci	rcu_read_unlock();
15358c2ecf20Sopenharmony_ci
15368c2ecf20Sopenharmony_ciout:
15378c2ecf20Sopenharmony_ci	return cpu;
15388c2ecf20Sopenharmony_ci}
15398c2ecf20Sopenharmony_ci
15408c2ecf20Sopenharmony_cistatic void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
15418c2ecf20Sopenharmony_ci{
15428c2ecf20Sopenharmony_ci	/*
15438c2ecf20Sopenharmony_ci	 * Current can't be migrated, useless to reschedule,
15448c2ecf20Sopenharmony_ci	 * let's hope p can move out.
15458c2ecf20Sopenharmony_ci	 */
15468c2ecf20Sopenharmony_ci	if (rq->curr->nr_cpus_allowed == 1 ||
15478c2ecf20Sopenharmony_ci	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
15488c2ecf20Sopenharmony_ci		return;
15498c2ecf20Sopenharmony_ci
15508c2ecf20Sopenharmony_ci	/*
15518c2ecf20Sopenharmony_ci	 * p is migratable, so let's not schedule it and
15528c2ecf20Sopenharmony_ci	 * see if it is pushed or pulled somewhere else.
15538c2ecf20Sopenharmony_ci	 */
15548c2ecf20Sopenharmony_ci	if (p->nr_cpus_allowed != 1 &&
15558c2ecf20Sopenharmony_ci	    cpupri_find(&rq->rd->cpupri, p, NULL))
15568c2ecf20Sopenharmony_ci		return;
15578c2ecf20Sopenharmony_ci
15588c2ecf20Sopenharmony_ci	/*
15598c2ecf20Sopenharmony_ci	 * There appear to be other CPUs that can accept
15608c2ecf20Sopenharmony_ci	 * the current task but none can run 'p', so lets reschedule
15618c2ecf20Sopenharmony_ci	 * to try and push the current task away:
15628c2ecf20Sopenharmony_ci	 */
15638c2ecf20Sopenharmony_ci	requeue_task_rt(rq, p, 1);
15648c2ecf20Sopenharmony_ci	resched_curr(rq);
15658c2ecf20Sopenharmony_ci}
15668c2ecf20Sopenharmony_ci
15678c2ecf20Sopenharmony_cistatic int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
15688c2ecf20Sopenharmony_ci{
15698c2ecf20Sopenharmony_ci	if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
15708c2ecf20Sopenharmony_ci		/*
15718c2ecf20Sopenharmony_ci		 * This is OK, because current is on_cpu, which avoids it being
15728c2ecf20Sopenharmony_ci		 * picked for load-balance and preemption/IRQs are still
15738c2ecf20Sopenharmony_ci		 * disabled avoiding further scheduler activity on it and we've
15748c2ecf20Sopenharmony_ci		 * not yet started the picking loop.
15758c2ecf20Sopenharmony_ci		 */
15768c2ecf20Sopenharmony_ci		rq_unpin_lock(rq, rf);
15778c2ecf20Sopenharmony_ci		pull_rt_task(rq);
15788c2ecf20Sopenharmony_ci		rq_repin_lock(rq, rf);
15798c2ecf20Sopenharmony_ci	}
15808c2ecf20Sopenharmony_ci
15818c2ecf20Sopenharmony_ci	return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
15828c2ecf20Sopenharmony_ci}
15838c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
15848c2ecf20Sopenharmony_ci
15858c2ecf20Sopenharmony_ci/*
15868c2ecf20Sopenharmony_ci * Preempt the current task with a newly woken task if needed:
15878c2ecf20Sopenharmony_ci */
15888c2ecf20Sopenharmony_cistatic void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
15898c2ecf20Sopenharmony_ci{
15908c2ecf20Sopenharmony_ci	if (p->prio < rq->curr->prio) {
15918c2ecf20Sopenharmony_ci		resched_curr(rq);
15928c2ecf20Sopenharmony_ci		return;
15938c2ecf20Sopenharmony_ci	}
15948c2ecf20Sopenharmony_ci
15958c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
15968c2ecf20Sopenharmony_ci	/*
15978c2ecf20Sopenharmony_ci	 * If:
15988c2ecf20Sopenharmony_ci	 *
15998c2ecf20Sopenharmony_ci	 * - the newly woken task is of equal priority to the current task
16008c2ecf20Sopenharmony_ci	 * - the newly woken task is non-migratable while current is migratable
16018c2ecf20Sopenharmony_ci	 * - current will be preempted on the next reschedule
16028c2ecf20Sopenharmony_ci	 *
16038c2ecf20Sopenharmony_ci	 * we should check to see if current can readily move to a different
16048c2ecf20Sopenharmony_ci	 * cpu.  If so, we will reschedule to allow the push logic to try
16058c2ecf20Sopenharmony_ci	 * to move current somewhere else, making room for our non-migratable
16068c2ecf20Sopenharmony_ci	 * task.
16078c2ecf20Sopenharmony_ci	 */
16088c2ecf20Sopenharmony_ci	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
16098c2ecf20Sopenharmony_ci		check_preempt_equal_prio(rq, p);
16108c2ecf20Sopenharmony_ci#endif
16118c2ecf20Sopenharmony_ci}
16128c2ecf20Sopenharmony_ci
16138c2ecf20Sopenharmony_cistatic inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
16148c2ecf20Sopenharmony_ci{
16158c2ecf20Sopenharmony_ci	p->se.exec_start = rq_clock_task(rq);
16168c2ecf20Sopenharmony_ci
16178c2ecf20Sopenharmony_ci	/* The running task is never eligible for pushing */
16188c2ecf20Sopenharmony_ci	dequeue_pushable_task(rq, p);
16198c2ecf20Sopenharmony_ci
16208c2ecf20Sopenharmony_ci	if (!first)
16218c2ecf20Sopenharmony_ci		return;
16228c2ecf20Sopenharmony_ci
16238c2ecf20Sopenharmony_ci	/*
16248c2ecf20Sopenharmony_ci	 * If prev task was rt, put_prev_task() has already updated the
16258c2ecf20Sopenharmony_ci	 * utilization. We only care of the case where we start to schedule a
16268c2ecf20Sopenharmony_ci	 * rt task
16278c2ecf20Sopenharmony_ci	 */
16288c2ecf20Sopenharmony_ci	if (rq->curr->sched_class != &rt_sched_class)
16298c2ecf20Sopenharmony_ci		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
16308c2ecf20Sopenharmony_ci
16318c2ecf20Sopenharmony_ci	rt_queue_push_tasks(rq);
16328c2ecf20Sopenharmony_ci}
16338c2ecf20Sopenharmony_ci
16348c2ecf20Sopenharmony_cistatic struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
16358c2ecf20Sopenharmony_ci{
16368c2ecf20Sopenharmony_ci	struct rt_prio_array *array = &rt_rq->active;
16378c2ecf20Sopenharmony_ci	struct sched_rt_entity *next = NULL;
16388c2ecf20Sopenharmony_ci	struct list_head *queue;
16398c2ecf20Sopenharmony_ci	int idx;
16408c2ecf20Sopenharmony_ci
16418c2ecf20Sopenharmony_ci	idx = sched_find_first_bit(array->bitmap);
16428c2ecf20Sopenharmony_ci	BUG_ON(idx >= MAX_RT_PRIO);
16438c2ecf20Sopenharmony_ci
16448c2ecf20Sopenharmony_ci	queue = array->queue + idx;
16458c2ecf20Sopenharmony_ci	if (SCHED_WARN_ON(list_empty(queue)))
16468c2ecf20Sopenharmony_ci		return NULL;
16478c2ecf20Sopenharmony_ci	next = list_entry(queue->next, struct sched_rt_entity, run_list);
16488c2ecf20Sopenharmony_ci
16498c2ecf20Sopenharmony_ci	return next;
16508c2ecf20Sopenharmony_ci}
16518c2ecf20Sopenharmony_ci
16528c2ecf20Sopenharmony_cistatic struct task_struct *_pick_next_task_rt(struct rq *rq)
16538c2ecf20Sopenharmony_ci{
16548c2ecf20Sopenharmony_ci	struct sched_rt_entity *rt_se;
16558c2ecf20Sopenharmony_ci	struct rt_rq *rt_rq  = &rq->rt;
16568c2ecf20Sopenharmony_ci
16578c2ecf20Sopenharmony_ci	do {
16588c2ecf20Sopenharmony_ci		rt_se = pick_next_rt_entity(rt_rq);
16598c2ecf20Sopenharmony_ci		if (unlikely(!rt_se))
16608c2ecf20Sopenharmony_ci			return NULL;
16618c2ecf20Sopenharmony_ci		rt_rq = group_rt_rq(rt_se);
16628c2ecf20Sopenharmony_ci	} while (rt_rq);
16638c2ecf20Sopenharmony_ci
16648c2ecf20Sopenharmony_ci	return rt_task_of(rt_se);
16658c2ecf20Sopenharmony_ci}
16668c2ecf20Sopenharmony_ci
16678c2ecf20Sopenharmony_cistatic struct task_struct *pick_next_task_rt(struct rq *rq)
16688c2ecf20Sopenharmony_ci{
16698c2ecf20Sopenharmony_ci	struct task_struct *p;
16708c2ecf20Sopenharmony_ci
16718c2ecf20Sopenharmony_ci	if (!sched_rt_runnable(rq))
16728c2ecf20Sopenharmony_ci		return NULL;
16738c2ecf20Sopenharmony_ci
16748c2ecf20Sopenharmony_ci	p = _pick_next_task_rt(rq);
16758c2ecf20Sopenharmony_ci	set_next_task_rt(rq, p, true);
16768c2ecf20Sopenharmony_ci	return p;
16778c2ecf20Sopenharmony_ci}
16788c2ecf20Sopenharmony_ci
16798c2ecf20Sopenharmony_cistatic void put_prev_task_rt(struct rq *rq, struct task_struct *p)
16808c2ecf20Sopenharmony_ci{
16818c2ecf20Sopenharmony_ci	update_curr_rt(rq);
16828c2ecf20Sopenharmony_ci
16838c2ecf20Sopenharmony_ci	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
16848c2ecf20Sopenharmony_ci
16858c2ecf20Sopenharmony_ci	/*
16868c2ecf20Sopenharmony_ci	 * The previous task needs to be made eligible for pushing
16878c2ecf20Sopenharmony_ci	 * if it is still active
16888c2ecf20Sopenharmony_ci	 */
16898c2ecf20Sopenharmony_ci	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
16908c2ecf20Sopenharmony_ci		enqueue_pushable_task(rq, p);
16918c2ecf20Sopenharmony_ci}
16928c2ecf20Sopenharmony_ci
16938c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
16948c2ecf20Sopenharmony_ci
16958c2ecf20Sopenharmony_ci/* Only try algorithms three times */
16968c2ecf20Sopenharmony_ci#define RT_MAX_TRIES 3
16978c2ecf20Sopenharmony_ci
16988c2ecf20Sopenharmony_cistatic int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
16998c2ecf20Sopenharmony_ci{
17008c2ecf20Sopenharmony_ci	if (!task_running(rq, p) &&
17018c2ecf20Sopenharmony_ci	    cpumask_test_cpu(cpu, p->cpus_ptr))
17028c2ecf20Sopenharmony_ci		return 1;
17038c2ecf20Sopenharmony_ci
17048c2ecf20Sopenharmony_ci	return 0;
17058c2ecf20Sopenharmony_ci}
17068c2ecf20Sopenharmony_ci
17078c2ecf20Sopenharmony_ci/*
17088c2ecf20Sopenharmony_ci * Return the highest pushable rq's task, which is suitable to be executed
17098c2ecf20Sopenharmony_ci * on the CPU, NULL otherwise
17108c2ecf20Sopenharmony_ci */
17118c2ecf20Sopenharmony_cistatic struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
17128c2ecf20Sopenharmony_ci{
17138c2ecf20Sopenharmony_ci	struct plist_head *head = &rq->rt.pushable_tasks;
17148c2ecf20Sopenharmony_ci	struct task_struct *p;
17158c2ecf20Sopenharmony_ci
17168c2ecf20Sopenharmony_ci	if (!has_pushable_tasks(rq))
17178c2ecf20Sopenharmony_ci		return NULL;
17188c2ecf20Sopenharmony_ci
17198c2ecf20Sopenharmony_ci	plist_for_each_entry(p, head, pushable_tasks) {
17208c2ecf20Sopenharmony_ci		if (pick_rt_task(rq, p, cpu))
17218c2ecf20Sopenharmony_ci			return p;
17228c2ecf20Sopenharmony_ci	}
17238c2ecf20Sopenharmony_ci
17248c2ecf20Sopenharmony_ci	return NULL;
17258c2ecf20Sopenharmony_ci}
17268c2ecf20Sopenharmony_ci
17278c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS
17288c2ecf20Sopenharmony_cistatic int find_cas_cpu(struct sched_domain *sd,
17298c2ecf20Sopenharmony_ci		 struct task_struct *task, struct cpumask *lowest_mask)
17308c2ecf20Sopenharmony_ci{
17318c2ecf20Sopenharmony_ci	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
17328c2ecf20Sopenharmony_ci	struct sched_group *sg = NULL;
17338c2ecf20Sopenharmony_ci	struct sched_group *sg_target = NULL;
17348c2ecf20Sopenharmony_ci	struct sched_group *sg_backup = NULL;
17358c2ecf20Sopenharmony_ci	struct cpumask search_cpu, backup_search_cpu;
17368c2ecf20Sopenharmony_ci	int cpu = -1;
17378c2ecf20Sopenharmony_ci	int target_cpu = -1;
17388c2ecf20Sopenharmony_ci	unsigned long cpu_capacity;
17398c2ecf20Sopenharmony_ci	unsigned long boosted_tutil = uclamp_task_util(task, uclamp_eff_value(task, UCLAMP_MIN), uclamp_eff_value(task, UCLAMP_MAX));
17408c2ecf20Sopenharmony_ci	unsigned long target_capacity = ULONG_MAX;
17418c2ecf20Sopenharmony_ci	unsigned long util;
17428c2ecf20Sopenharmony_ci	unsigned long target_cpu_util = ULONG_MAX;
17438c2ecf20Sopenharmony_ci	int prev_cpu = task_cpu(task);
17448c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RTG
17458c2ecf20Sopenharmony_ci	struct cpumask *rtg_target = NULL;
17468c2ecf20Sopenharmony_ci#endif
17478c2ecf20Sopenharmony_ci	bool boosted = uclamp_boosted(task);
17488c2ecf20Sopenharmony_ci
17498c2ecf20Sopenharmony_ci	if (!sysctl_sched_enable_rt_cas)
17508c2ecf20Sopenharmony_ci		return -1;
17518c2ecf20Sopenharmony_ci
17528c2ecf20Sopenharmony_ci	rcu_read_lock();
17538c2ecf20Sopenharmony_ci
17548c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RTG
17558c2ecf20Sopenharmony_ci	rtg_target = find_rtg_target(task);
17568c2ecf20Sopenharmony_ci#endif
17578c2ecf20Sopenharmony_ci
17588c2ecf20Sopenharmony_ci	sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, 0));
17598c2ecf20Sopenharmony_ci	if (!sd) {
17608c2ecf20Sopenharmony_ci		rcu_read_unlock();
17618c2ecf20Sopenharmony_ci		return -1;
17628c2ecf20Sopenharmony_ci	}
17638c2ecf20Sopenharmony_ci
17648c2ecf20Sopenharmony_ci	sg = sd->groups;
17658c2ecf20Sopenharmony_ci	do {
17668c2ecf20Sopenharmony_ci		if (!cpumask_intersects(lowest_mask, sched_group_span(sg)))
17678c2ecf20Sopenharmony_ci			continue;
17688c2ecf20Sopenharmony_ci
17698c2ecf20Sopenharmony_ci		if (boosted) {
17708c2ecf20Sopenharmony_ci			if (cpumask_test_cpu(rd->max_cap_orig_cpu,
17718c2ecf20Sopenharmony_ci					     sched_group_span(sg))) {
17728c2ecf20Sopenharmony_ci				sg_target = sg;
17738c2ecf20Sopenharmony_ci				break;
17748c2ecf20Sopenharmony_ci			}
17758c2ecf20Sopenharmony_ci		}
17768c2ecf20Sopenharmony_ci
17778c2ecf20Sopenharmony_ci		cpu = group_first_cpu(sg);
17788c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RTG
17798c2ecf20Sopenharmony_ci		/* honor the rtg tasks */
17808c2ecf20Sopenharmony_ci		if (rtg_target) {
17818c2ecf20Sopenharmony_ci			if (cpumask_test_cpu(cpu, rtg_target)) {
17828c2ecf20Sopenharmony_ci				sg_target = sg;
17838c2ecf20Sopenharmony_ci				break;
17848c2ecf20Sopenharmony_ci			}
17858c2ecf20Sopenharmony_ci
17868c2ecf20Sopenharmony_ci			/* active LB or big_task favor cpus with more capacity */
17878c2ecf20Sopenharmony_ci			if (task->state == TASK_RUNNING || boosted) {
17888c2ecf20Sopenharmony_ci				if (capacity_orig_of(cpu) >
17898c2ecf20Sopenharmony_ci				    capacity_orig_of(cpumask_any(rtg_target))) {
17908c2ecf20Sopenharmony_ci					sg_target = sg;
17918c2ecf20Sopenharmony_ci					break;
17928c2ecf20Sopenharmony_ci				}
17938c2ecf20Sopenharmony_ci
17948c2ecf20Sopenharmony_ci				sg_backup = sg;
17958c2ecf20Sopenharmony_ci				continue;
17968c2ecf20Sopenharmony_ci			}
17978c2ecf20Sopenharmony_ci		}
17988c2ecf20Sopenharmony_ci#endif
17998c2ecf20Sopenharmony_ci		/*
18008c2ecf20Sopenharmony_ci		 * 1. add margin to support task migration
18018c2ecf20Sopenharmony_ci		 * 2. if task_util is high then all cpus, make sure the
18028c2ecf20Sopenharmony_ci		 * sg_backup with the most powerful cpus is selected
18038c2ecf20Sopenharmony_ci		 */
18048c2ecf20Sopenharmony_ci		if (!rt_task_fits_capacity(task, cpu)) {
18058c2ecf20Sopenharmony_ci			sg_backup = sg;
18068c2ecf20Sopenharmony_ci			continue;
18078c2ecf20Sopenharmony_ci		}
18088c2ecf20Sopenharmony_ci
18098c2ecf20Sopenharmony_ci		/* support task boost */
18108c2ecf20Sopenharmony_ci		cpu_capacity = capacity_orig_of(cpu);
18118c2ecf20Sopenharmony_ci		if (boosted_tutil > cpu_capacity) {
18128c2ecf20Sopenharmony_ci			sg_backup = sg;
18138c2ecf20Sopenharmony_ci			continue;
18148c2ecf20Sopenharmony_ci		}
18158c2ecf20Sopenharmony_ci
18168c2ecf20Sopenharmony_ci		/* sg_target: select the sg with smaller capacity */
18178c2ecf20Sopenharmony_ci		if (cpu_capacity < target_capacity) {
18188c2ecf20Sopenharmony_ci			target_capacity = cpu_capacity;
18198c2ecf20Sopenharmony_ci			sg_target = sg;
18208c2ecf20Sopenharmony_ci		}
18218c2ecf20Sopenharmony_ci	} while (sg = sg->next, sg != sd->groups);
18228c2ecf20Sopenharmony_ci
18238c2ecf20Sopenharmony_ci	if (!sg_target)
18248c2ecf20Sopenharmony_ci		sg_target = sg_backup;
18258c2ecf20Sopenharmony_ci
18268c2ecf20Sopenharmony_ci	if (sg_target) {
18278c2ecf20Sopenharmony_ci		cpumask_and(&search_cpu, lowest_mask, sched_group_span(sg_target));
18288c2ecf20Sopenharmony_ci		cpumask_copy(&backup_search_cpu, lowest_mask);
18298c2ecf20Sopenharmony_ci		cpumask_andnot(&backup_search_cpu, &backup_search_cpu, &search_cpu);
18308c2ecf20Sopenharmony_ci	} else {
18318c2ecf20Sopenharmony_ci		cpumask_copy(&search_cpu, lowest_mask);
18328c2ecf20Sopenharmony_ci		cpumask_clear(&backup_search_cpu);
18338c2ecf20Sopenharmony_ci	}
18348c2ecf20Sopenharmony_ci
18358c2ecf20Sopenharmony_ciretry:
18368c2ecf20Sopenharmony_ci	cpu = cpumask_first(&search_cpu);
18378c2ecf20Sopenharmony_ci	do {
18388c2ecf20Sopenharmony_ci		trace_sched_find_cas_cpu_each(task, cpu, target_cpu,
18398c2ecf20Sopenharmony_ci			cpu_isolated(cpu),
18408c2ecf20Sopenharmony_ci			idle_cpu(cpu), boosted_tutil, cpu_util(cpu),
18418c2ecf20Sopenharmony_ci			capacity_orig_of(cpu));
18428c2ecf20Sopenharmony_ci
18438c2ecf20Sopenharmony_ci		if (cpu_isolated(cpu))
18448c2ecf20Sopenharmony_ci			continue;
18458c2ecf20Sopenharmony_ci
18468c2ecf20Sopenharmony_ci		if (!cpumask_test_cpu(cpu, task->cpus_ptr))
18478c2ecf20Sopenharmony_ci			continue;
18488c2ecf20Sopenharmony_ci
18498c2ecf20Sopenharmony_ci		/* find best cpu with smallest max_capacity */
18508c2ecf20Sopenharmony_ci		if (target_cpu != -1 &&
18518c2ecf20Sopenharmony_ci		    capacity_orig_of(cpu) > capacity_orig_of(target_cpu))
18528c2ecf20Sopenharmony_ci			continue;
18538c2ecf20Sopenharmony_ci
18548c2ecf20Sopenharmony_ci		util = cpu_util(cpu);
18558c2ecf20Sopenharmony_ci
18568c2ecf20Sopenharmony_ci		/* Find the least loaded CPU */
18578c2ecf20Sopenharmony_ci		if (util > target_cpu_util)
18588c2ecf20Sopenharmony_ci			continue;
18598c2ecf20Sopenharmony_ci
18608c2ecf20Sopenharmony_ci		/*
18618c2ecf20Sopenharmony_ci		 * If the preivous CPU has same load, keep it as
18628c2ecf20Sopenharmony_ci		 * target_cpu
18638c2ecf20Sopenharmony_ci		 */
18648c2ecf20Sopenharmony_ci		if (target_cpu_util == util && target_cpu == prev_cpu)
18658c2ecf20Sopenharmony_ci			continue;
18668c2ecf20Sopenharmony_ci
18678c2ecf20Sopenharmony_ci		/*
18688c2ecf20Sopenharmony_ci		 * If candidate CPU is the previous CPU, select it.
18698c2ecf20Sopenharmony_ci		 * If all above conditions are same, select the least
18708c2ecf20Sopenharmony_ci		 * cumulative window demand CPU.
18718c2ecf20Sopenharmony_ci		 */
18728c2ecf20Sopenharmony_ci		target_cpu_util = util;
18738c2ecf20Sopenharmony_ci		target_cpu = cpu;
18748c2ecf20Sopenharmony_ci	} while ((cpu = cpumask_next(cpu, &search_cpu)) < nr_cpu_ids);
18758c2ecf20Sopenharmony_ci
18768c2ecf20Sopenharmony_ci	if (target_cpu != -1 && cpumask_test_cpu(target_cpu, lowest_mask)) {
18778c2ecf20Sopenharmony_ci		goto done;
18788c2ecf20Sopenharmony_ci	} else if (!cpumask_empty(&backup_search_cpu)) {
18798c2ecf20Sopenharmony_ci		cpumask_copy(&search_cpu, &backup_search_cpu);
18808c2ecf20Sopenharmony_ci		cpumask_clear(&backup_search_cpu);
18818c2ecf20Sopenharmony_ci		goto retry;
18828c2ecf20Sopenharmony_ci	}
18838c2ecf20Sopenharmony_ci
18848c2ecf20Sopenharmony_cidone:
18858c2ecf20Sopenharmony_ci	trace_sched_find_cas_cpu(task, lowest_mask, boosted_tutil, prev_cpu, target_cpu);
18868c2ecf20Sopenharmony_ci	rcu_read_unlock();
18878c2ecf20Sopenharmony_ci	return target_cpu;
18888c2ecf20Sopenharmony_ci}
18898c2ecf20Sopenharmony_ci#endif
18908c2ecf20Sopenharmony_ci
18918c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
18928c2ecf20Sopenharmony_ci
18938c2ecf20Sopenharmony_cistatic int find_lowest_rq(struct task_struct *task)
18948c2ecf20Sopenharmony_ci{
18958c2ecf20Sopenharmony_ci	struct sched_domain *sd;
18968c2ecf20Sopenharmony_ci	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
18978c2ecf20Sopenharmony_ci	int this_cpu = smp_processor_id();
18988c2ecf20Sopenharmony_ci	int cpu      = task_cpu(task);
18998c2ecf20Sopenharmony_ci	int ret;
19008c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS
19018c2ecf20Sopenharmony_ci	int cas_cpu;
19028c2ecf20Sopenharmony_ci#endif
19038c2ecf20Sopenharmony_ci
19048c2ecf20Sopenharmony_ci	/* Make sure the mask is initialized first */
19058c2ecf20Sopenharmony_ci	if (unlikely(!lowest_mask))
19068c2ecf20Sopenharmony_ci		return -1;
19078c2ecf20Sopenharmony_ci
19088c2ecf20Sopenharmony_ci	if (task->nr_cpus_allowed == 1)
19098c2ecf20Sopenharmony_ci		return -1; /* No other targets possible */
19108c2ecf20Sopenharmony_ci
19118c2ecf20Sopenharmony_ci	/*
19128c2ecf20Sopenharmony_ci	 * If we're on asym system ensure we consider the different capacities
19138c2ecf20Sopenharmony_ci	 * of the CPUs when searching for the lowest_mask.
19148c2ecf20Sopenharmony_ci	 */
19158c2ecf20Sopenharmony_ci	if (static_branch_unlikely(&sched_asym_cpucapacity)) {
19168c2ecf20Sopenharmony_ci
19178c2ecf20Sopenharmony_ci		ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
19188c2ecf20Sopenharmony_ci					  task, lowest_mask,
19198c2ecf20Sopenharmony_ci					  rt_task_fits_capacity);
19208c2ecf20Sopenharmony_ci	} else {
19218c2ecf20Sopenharmony_ci
19228c2ecf20Sopenharmony_ci		ret = cpupri_find(&task_rq(task)->rd->cpupri,
19238c2ecf20Sopenharmony_ci				  task, lowest_mask);
19248c2ecf20Sopenharmony_ci	}
19258c2ecf20Sopenharmony_ci
19268c2ecf20Sopenharmony_ci	if (!ret)
19278c2ecf20Sopenharmony_ci		return -1; /* No targets found */
19288c2ecf20Sopenharmony_ci
19298c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS
19308c2ecf20Sopenharmony_ci	cas_cpu = find_cas_cpu(sd, task, lowest_mask);
19318c2ecf20Sopenharmony_ci	if (cas_cpu != -1)
19328c2ecf20Sopenharmony_ci		return cas_cpu;
19338c2ecf20Sopenharmony_ci#endif
19348c2ecf20Sopenharmony_ci
19358c2ecf20Sopenharmony_ci	/*
19368c2ecf20Sopenharmony_ci	 * At this point we have built a mask of CPUs representing the
19378c2ecf20Sopenharmony_ci	 * lowest priority tasks in the system.  Now we want to elect
19388c2ecf20Sopenharmony_ci	 * the best one based on our affinity and topology.
19398c2ecf20Sopenharmony_ci	 *
19408c2ecf20Sopenharmony_ci	 * We prioritize the last CPU that the task executed on since
19418c2ecf20Sopenharmony_ci	 * it is most likely cache-hot in that location.
19428c2ecf20Sopenharmony_ci	 */
19438c2ecf20Sopenharmony_ci	if (cpumask_test_cpu(cpu, lowest_mask))
19448c2ecf20Sopenharmony_ci		return cpu;
19458c2ecf20Sopenharmony_ci
19468c2ecf20Sopenharmony_ci	/*
19478c2ecf20Sopenharmony_ci	 * Otherwise, we consult the sched_domains span maps to figure
19488c2ecf20Sopenharmony_ci	 * out which CPU is logically closest to our hot cache data.
19498c2ecf20Sopenharmony_ci	 */
19508c2ecf20Sopenharmony_ci	if (!cpumask_test_cpu(this_cpu, lowest_mask))
19518c2ecf20Sopenharmony_ci		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
19528c2ecf20Sopenharmony_ci
19538c2ecf20Sopenharmony_ci	rcu_read_lock();
19548c2ecf20Sopenharmony_ci	for_each_domain(cpu, sd) {
19558c2ecf20Sopenharmony_ci		if (sd->flags & SD_WAKE_AFFINE) {
19568c2ecf20Sopenharmony_ci			int best_cpu;
19578c2ecf20Sopenharmony_ci
19588c2ecf20Sopenharmony_ci			/*
19598c2ecf20Sopenharmony_ci			 * "this_cpu" is cheaper to preempt than a
19608c2ecf20Sopenharmony_ci			 * remote processor.
19618c2ecf20Sopenharmony_ci			 */
19628c2ecf20Sopenharmony_ci			if (this_cpu != -1 &&
19638c2ecf20Sopenharmony_ci			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
19648c2ecf20Sopenharmony_ci				rcu_read_unlock();
19658c2ecf20Sopenharmony_ci				return this_cpu;
19668c2ecf20Sopenharmony_ci			}
19678c2ecf20Sopenharmony_ci
19688c2ecf20Sopenharmony_ci			best_cpu = cpumask_first_and(lowest_mask,
19698c2ecf20Sopenharmony_ci						     sched_domain_span(sd));
19708c2ecf20Sopenharmony_ci			if (best_cpu < nr_cpu_ids) {
19718c2ecf20Sopenharmony_ci				rcu_read_unlock();
19728c2ecf20Sopenharmony_ci				return best_cpu;
19738c2ecf20Sopenharmony_ci			}
19748c2ecf20Sopenharmony_ci		}
19758c2ecf20Sopenharmony_ci	}
19768c2ecf20Sopenharmony_ci	rcu_read_unlock();
19778c2ecf20Sopenharmony_ci
19788c2ecf20Sopenharmony_ci	/*
19798c2ecf20Sopenharmony_ci	 * And finally, if there were no matches within the domains
19808c2ecf20Sopenharmony_ci	 * just give the caller *something* to work with from the compatible
19818c2ecf20Sopenharmony_ci	 * locations.
19828c2ecf20Sopenharmony_ci	 */
19838c2ecf20Sopenharmony_ci	if (this_cpu != -1)
19848c2ecf20Sopenharmony_ci		return this_cpu;
19858c2ecf20Sopenharmony_ci
19868c2ecf20Sopenharmony_ci	cpu = cpumask_any(lowest_mask);
19878c2ecf20Sopenharmony_ci	if (cpu < nr_cpu_ids)
19888c2ecf20Sopenharmony_ci		return cpu;
19898c2ecf20Sopenharmony_ci
19908c2ecf20Sopenharmony_ci	return -1;
19918c2ecf20Sopenharmony_ci}
19928c2ecf20Sopenharmony_ci
19938c2ecf20Sopenharmony_cistatic struct task_struct *pick_next_pushable_task(struct rq *rq)
19948c2ecf20Sopenharmony_ci{
19958c2ecf20Sopenharmony_ci	struct task_struct *p;
19968c2ecf20Sopenharmony_ci
19978c2ecf20Sopenharmony_ci	if (!has_pushable_tasks(rq))
19988c2ecf20Sopenharmony_ci		return NULL;
19998c2ecf20Sopenharmony_ci
20008c2ecf20Sopenharmony_ci	p = plist_first_entry(&rq->rt.pushable_tasks,
20018c2ecf20Sopenharmony_ci			      struct task_struct, pushable_tasks);
20028c2ecf20Sopenharmony_ci
20038c2ecf20Sopenharmony_ci	BUG_ON(rq->cpu != task_cpu(p));
20048c2ecf20Sopenharmony_ci	BUG_ON(task_current(rq, p));
20058c2ecf20Sopenharmony_ci	BUG_ON(p->nr_cpus_allowed <= 1);
20068c2ecf20Sopenharmony_ci
20078c2ecf20Sopenharmony_ci	BUG_ON(!task_on_rq_queued(p));
20088c2ecf20Sopenharmony_ci	BUG_ON(!rt_task(p));
20098c2ecf20Sopenharmony_ci
20108c2ecf20Sopenharmony_ci	return p;
20118c2ecf20Sopenharmony_ci}
20128c2ecf20Sopenharmony_ci
20138c2ecf20Sopenharmony_ci/* Will lock the rq it finds */
20148c2ecf20Sopenharmony_cistatic struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
20158c2ecf20Sopenharmony_ci{
20168c2ecf20Sopenharmony_ci	struct rq *lowest_rq = NULL;
20178c2ecf20Sopenharmony_ci	int tries;
20188c2ecf20Sopenharmony_ci	int cpu;
20198c2ecf20Sopenharmony_ci
20208c2ecf20Sopenharmony_ci	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
20218c2ecf20Sopenharmony_ci		cpu = find_lowest_rq(task);
20228c2ecf20Sopenharmony_ci
20238c2ecf20Sopenharmony_ci		if ((cpu == -1) || (cpu == rq->cpu))
20248c2ecf20Sopenharmony_ci			break;
20258c2ecf20Sopenharmony_ci
20268c2ecf20Sopenharmony_ci		lowest_rq = cpu_rq(cpu);
20278c2ecf20Sopenharmony_ci
20288c2ecf20Sopenharmony_ci		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
20298c2ecf20Sopenharmony_ci			/*
20308c2ecf20Sopenharmony_ci			 * Target rq has tasks of equal or higher priority,
20318c2ecf20Sopenharmony_ci			 * retrying does not release any lock and is unlikely
20328c2ecf20Sopenharmony_ci			 * to yield a different result.
20338c2ecf20Sopenharmony_ci			 */
20348c2ecf20Sopenharmony_ci			lowest_rq = NULL;
20358c2ecf20Sopenharmony_ci			break;
20368c2ecf20Sopenharmony_ci		}
20378c2ecf20Sopenharmony_ci
20388c2ecf20Sopenharmony_ci		/* if the prio of this runqueue changed, try again */
20398c2ecf20Sopenharmony_ci		if (double_lock_balance(rq, lowest_rq)) {
20408c2ecf20Sopenharmony_ci			/*
20418c2ecf20Sopenharmony_ci			 * We had to unlock the run queue. In
20428c2ecf20Sopenharmony_ci			 * the mean time, task could have
20438c2ecf20Sopenharmony_ci			 * migrated already or had its affinity changed.
20448c2ecf20Sopenharmony_ci			 */
20458c2ecf20Sopenharmony_ci			struct task_struct *next_task = pick_next_pushable_task(rq);
20468c2ecf20Sopenharmony_ci			if (unlikely(next_task != task ||
20478c2ecf20Sopenharmony_ci				     !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr))) {
20488c2ecf20Sopenharmony_ci				double_unlock_balance(rq, lowest_rq);
20498c2ecf20Sopenharmony_ci				lowest_rq = NULL;
20508c2ecf20Sopenharmony_ci				break;
20518c2ecf20Sopenharmony_ci			}
20528c2ecf20Sopenharmony_ci		}
20538c2ecf20Sopenharmony_ci
20548c2ecf20Sopenharmony_ci		/* If this rq is still suitable use it. */
20558c2ecf20Sopenharmony_ci		if (lowest_rq->rt.highest_prio.curr > task->prio)
20568c2ecf20Sopenharmony_ci			break;
20578c2ecf20Sopenharmony_ci
20588c2ecf20Sopenharmony_ci		/* try again */
20598c2ecf20Sopenharmony_ci		double_unlock_balance(rq, lowest_rq);
20608c2ecf20Sopenharmony_ci		lowest_rq = NULL;
20618c2ecf20Sopenharmony_ci	}
20628c2ecf20Sopenharmony_ci
20638c2ecf20Sopenharmony_ci	return lowest_rq;
20648c2ecf20Sopenharmony_ci}
20658c2ecf20Sopenharmony_ci
20668c2ecf20Sopenharmony_ci/*
20678c2ecf20Sopenharmony_ci * If the current CPU has more than one RT task, see if the non
20688c2ecf20Sopenharmony_ci * running task can migrate over to a CPU that is running a task
20698c2ecf20Sopenharmony_ci * of lesser priority.
20708c2ecf20Sopenharmony_ci */
20718c2ecf20Sopenharmony_cistatic int push_rt_task(struct rq *rq)
20728c2ecf20Sopenharmony_ci{
20738c2ecf20Sopenharmony_ci	struct task_struct *next_task;
20748c2ecf20Sopenharmony_ci	struct rq *lowest_rq;
20758c2ecf20Sopenharmony_ci	int ret = 0;
20768c2ecf20Sopenharmony_ci
20778c2ecf20Sopenharmony_ci	if (!rq->rt.overloaded)
20788c2ecf20Sopenharmony_ci		return 0;
20798c2ecf20Sopenharmony_ci
20808c2ecf20Sopenharmony_ci	next_task = pick_next_pushable_task(rq);
20818c2ecf20Sopenharmony_ci	if (!next_task)
20828c2ecf20Sopenharmony_ci		return 0;
20838c2ecf20Sopenharmony_ci
20848c2ecf20Sopenharmony_ciretry:
20858c2ecf20Sopenharmony_ci	if (WARN_ON(next_task == rq->curr))
20868c2ecf20Sopenharmony_ci		return 0;
20878c2ecf20Sopenharmony_ci
20888c2ecf20Sopenharmony_ci	/*
20898c2ecf20Sopenharmony_ci	 * It's possible that the next_task slipped in of
20908c2ecf20Sopenharmony_ci	 * higher priority than current. If that's the case
20918c2ecf20Sopenharmony_ci	 * just reschedule current.
20928c2ecf20Sopenharmony_ci	 */
20938c2ecf20Sopenharmony_ci	if (unlikely(next_task->prio < rq->curr->prio)) {
20948c2ecf20Sopenharmony_ci		resched_curr(rq);
20958c2ecf20Sopenharmony_ci		return 0;
20968c2ecf20Sopenharmony_ci	}
20978c2ecf20Sopenharmony_ci
20988c2ecf20Sopenharmony_ci	/* We might release rq lock */
20998c2ecf20Sopenharmony_ci	get_task_struct(next_task);
21008c2ecf20Sopenharmony_ci
21018c2ecf20Sopenharmony_ci	/* find_lock_lowest_rq locks the rq if found */
21028c2ecf20Sopenharmony_ci	lowest_rq = find_lock_lowest_rq(next_task, rq);
21038c2ecf20Sopenharmony_ci	if (!lowest_rq) {
21048c2ecf20Sopenharmony_ci		struct task_struct *task;
21058c2ecf20Sopenharmony_ci		/*
21068c2ecf20Sopenharmony_ci		 * find_lock_lowest_rq releases rq->lock
21078c2ecf20Sopenharmony_ci		 * so it is possible that next_task has migrated.
21088c2ecf20Sopenharmony_ci		 *
21098c2ecf20Sopenharmony_ci		 * We need to make sure that the task is still on the same
21108c2ecf20Sopenharmony_ci		 * run-queue and is also still the next task eligible for
21118c2ecf20Sopenharmony_ci		 * pushing.
21128c2ecf20Sopenharmony_ci		 */
21138c2ecf20Sopenharmony_ci		task = pick_next_pushable_task(rq);
21148c2ecf20Sopenharmony_ci		if (task == next_task) {
21158c2ecf20Sopenharmony_ci			/*
21168c2ecf20Sopenharmony_ci			 * The task hasn't migrated, and is still the next
21178c2ecf20Sopenharmony_ci			 * eligible task, but we failed to find a run-queue
21188c2ecf20Sopenharmony_ci			 * to push it to.  Do not retry in this case, since
21198c2ecf20Sopenharmony_ci			 * other CPUs will pull from us when ready.
21208c2ecf20Sopenharmony_ci			 */
21218c2ecf20Sopenharmony_ci			goto out;
21228c2ecf20Sopenharmony_ci		}
21238c2ecf20Sopenharmony_ci
21248c2ecf20Sopenharmony_ci		if (!task)
21258c2ecf20Sopenharmony_ci			/* No more tasks, just exit */
21268c2ecf20Sopenharmony_ci			goto out;
21278c2ecf20Sopenharmony_ci
21288c2ecf20Sopenharmony_ci		/*
21298c2ecf20Sopenharmony_ci		 * Something has shifted, try again.
21308c2ecf20Sopenharmony_ci		 */
21318c2ecf20Sopenharmony_ci		put_task_struct(next_task);
21328c2ecf20Sopenharmony_ci		next_task = task;
21338c2ecf20Sopenharmony_ci		goto retry;
21348c2ecf20Sopenharmony_ci	}
21358c2ecf20Sopenharmony_ci
21368c2ecf20Sopenharmony_ci	deactivate_task(rq, next_task, 0);
21378c2ecf20Sopenharmony_ci	set_task_cpu(next_task, lowest_rq->cpu);
21388c2ecf20Sopenharmony_ci	activate_task(lowest_rq, next_task, 0);
21398c2ecf20Sopenharmony_ci	ret = 1;
21408c2ecf20Sopenharmony_ci
21418c2ecf20Sopenharmony_ci	resched_curr(lowest_rq);
21428c2ecf20Sopenharmony_ci
21438c2ecf20Sopenharmony_ci	double_unlock_balance(rq, lowest_rq);
21448c2ecf20Sopenharmony_ci
21458c2ecf20Sopenharmony_ciout:
21468c2ecf20Sopenharmony_ci	put_task_struct(next_task);
21478c2ecf20Sopenharmony_ci
21488c2ecf20Sopenharmony_ci	return ret;
21498c2ecf20Sopenharmony_ci}
21508c2ecf20Sopenharmony_ci
21518c2ecf20Sopenharmony_cistatic void push_rt_tasks(struct rq *rq)
21528c2ecf20Sopenharmony_ci{
21538c2ecf20Sopenharmony_ci	/* push_rt_task will return true if it moved an RT */
21548c2ecf20Sopenharmony_ci	while (push_rt_task(rq))
21558c2ecf20Sopenharmony_ci		;
21568c2ecf20Sopenharmony_ci}
21578c2ecf20Sopenharmony_ci
21588c2ecf20Sopenharmony_ci#ifdef HAVE_RT_PUSH_IPI
21598c2ecf20Sopenharmony_ci
21608c2ecf20Sopenharmony_ci/*
21618c2ecf20Sopenharmony_ci * When a high priority task schedules out from a CPU and a lower priority
21628c2ecf20Sopenharmony_ci * task is scheduled in, a check is made to see if there's any RT tasks
21638c2ecf20Sopenharmony_ci * on other CPUs that are waiting to run because a higher priority RT task
21648c2ecf20Sopenharmony_ci * is currently running on its CPU. In this case, the CPU with multiple RT
21658c2ecf20Sopenharmony_ci * tasks queued on it (overloaded) needs to be notified that a CPU has opened
21668c2ecf20Sopenharmony_ci * up that may be able to run one of its non-running queued RT tasks.
21678c2ecf20Sopenharmony_ci *
21688c2ecf20Sopenharmony_ci * All CPUs with overloaded RT tasks need to be notified as there is currently
21698c2ecf20Sopenharmony_ci * no way to know which of these CPUs have the highest priority task waiting
21708c2ecf20Sopenharmony_ci * to run. Instead of trying to take a spinlock on each of these CPUs,
21718c2ecf20Sopenharmony_ci * which has shown to cause large latency when done on machines with many
21728c2ecf20Sopenharmony_ci * CPUs, sending an IPI to the CPUs to have them push off the overloaded
21738c2ecf20Sopenharmony_ci * RT tasks waiting to run.
21748c2ecf20Sopenharmony_ci *
21758c2ecf20Sopenharmony_ci * Just sending an IPI to each of the CPUs is also an issue, as on large
21768c2ecf20Sopenharmony_ci * count CPU machines, this can cause an IPI storm on a CPU, especially
21778c2ecf20Sopenharmony_ci * if its the only CPU with multiple RT tasks queued, and a large number
21788c2ecf20Sopenharmony_ci * of CPUs scheduling a lower priority task at the same time.
21798c2ecf20Sopenharmony_ci *
21808c2ecf20Sopenharmony_ci * Each root domain has its own irq work function that can iterate over
21818c2ecf20Sopenharmony_ci * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
21828c2ecf20Sopenharmony_ci * tassk must be checked if there's one or many CPUs that are lowering
21838c2ecf20Sopenharmony_ci * their priority, there's a single irq work iterator that will try to
21848c2ecf20Sopenharmony_ci * push off RT tasks that are waiting to run.
21858c2ecf20Sopenharmony_ci *
21868c2ecf20Sopenharmony_ci * When a CPU schedules a lower priority task, it will kick off the
21878c2ecf20Sopenharmony_ci * irq work iterator that will jump to each CPU with overloaded RT tasks.
21888c2ecf20Sopenharmony_ci * As it only takes the first CPU that schedules a lower priority task
21898c2ecf20Sopenharmony_ci * to start the process, the rto_start variable is incremented and if
21908c2ecf20Sopenharmony_ci * the atomic result is one, then that CPU will try to take the rto_lock.
21918c2ecf20Sopenharmony_ci * This prevents high contention on the lock as the process handles all
21928c2ecf20Sopenharmony_ci * CPUs scheduling lower priority tasks.
21938c2ecf20Sopenharmony_ci *
21948c2ecf20Sopenharmony_ci * All CPUs that are scheduling a lower priority task will increment the
21958c2ecf20Sopenharmony_ci * rt_loop_next variable. This will make sure that the irq work iterator
21968c2ecf20Sopenharmony_ci * checks all RT overloaded CPUs whenever a CPU schedules a new lower
21978c2ecf20Sopenharmony_ci * priority task, even if the iterator is in the middle of a scan. Incrementing
21988c2ecf20Sopenharmony_ci * the rt_loop_next will cause the iterator to perform another scan.
21998c2ecf20Sopenharmony_ci *
22008c2ecf20Sopenharmony_ci */
22018c2ecf20Sopenharmony_cistatic int rto_next_cpu(struct root_domain *rd)
22028c2ecf20Sopenharmony_ci{
22038c2ecf20Sopenharmony_ci	int next;
22048c2ecf20Sopenharmony_ci	int cpu;
22058c2ecf20Sopenharmony_ci
22068c2ecf20Sopenharmony_ci	/*
22078c2ecf20Sopenharmony_ci	 * When starting the IPI RT pushing, the rto_cpu is set to -1,
22088c2ecf20Sopenharmony_ci	 * rt_next_cpu() will simply return the first CPU found in
22098c2ecf20Sopenharmony_ci	 * the rto_mask.
22108c2ecf20Sopenharmony_ci	 *
22118c2ecf20Sopenharmony_ci	 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
22128c2ecf20Sopenharmony_ci	 * will return the next CPU found in the rto_mask.
22138c2ecf20Sopenharmony_ci	 *
22148c2ecf20Sopenharmony_ci	 * If there are no more CPUs left in the rto_mask, then a check is made
22158c2ecf20Sopenharmony_ci	 * against rto_loop and rto_loop_next. rto_loop is only updated with
22168c2ecf20Sopenharmony_ci	 * the rto_lock held, but any CPU may increment the rto_loop_next
22178c2ecf20Sopenharmony_ci	 * without any locking.
22188c2ecf20Sopenharmony_ci	 */
22198c2ecf20Sopenharmony_ci	for (;;) {
22208c2ecf20Sopenharmony_ci
22218c2ecf20Sopenharmony_ci		/* When rto_cpu is -1 this acts like cpumask_first() */
22228c2ecf20Sopenharmony_ci		cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
22238c2ecf20Sopenharmony_ci
22248c2ecf20Sopenharmony_ci		rd->rto_cpu = cpu;
22258c2ecf20Sopenharmony_ci
22268c2ecf20Sopenharmony_ci		if (cpu < nr_cpu_ids)
22278c2ecf20Sopenharmony_ci			return cpu;
22288c2ecf20Sopenharmony_ci
22298c2ecf20Sopenharmony_ci		rd->rto_cpu = -1;
22308c2ecf20Sopenharmony_ci
22318c2ecf20Sopenharmony_ci		/*
22328c2ecf20Sopenharmony_ci		 * ACQUIRE ensures we see the @rto_mask changes
22338c2ecf20Sopenharmony_ci		 * made prior to the @next value observed.
22348c2ecf20Sopenharmony_ci		 *
22358c2ecf20Sopenharmony_ci		 * Matches WMB in rt_set_overload().
22368c2ecf20Sopenharmony_ci		 */
22378c2ecf20Sopenharmony_ci		next = atomic_read_acquire(&rd->rto_loop_next);
22388c2ecf20Sopenharmony_ci
22398c2ecf20Sopenharmony_ci		if (rd->rto_loop == next)
22408c2ecf20Sopenharmony_ci			break;
22418c2ecf20Sopenharmony_ci
22428c2ecf20Sopenharmony_ci		rd->rto_loop = next;
22438c2ecf20Sopenharmony_ci	}
22448c2ecf20Sopenharmony_ci
22458c2ecf20Sopenharmony_ci	return -1;
22468c2ecf20Sopenharmony_ci}
22478c2ecf20Sopenharmony_ci
22488c2ecf20Sopenharmony_cistatic inline bool rto_start_trylock(atomic_t *v)
22498c2ecf20Sopenharmony_ci{
22508c2ecf20Sopenharmony_ci	return !atomic_cmpxchg_acquire(v, 0, 1);
22518c2ecf20Sopenharmony_ci}
22528c2ecf20Sopenharmony_ci
22538c2ecf20Sopenharmony_cistatic inline void rto_start_unlock(atomic_t *v)
22548c2ecf20Sopenharmony_ci{
22558c2ecf20Sopenharmony_ci	atomic_set_release(v, 0);
22568c2ecf20Sopenharmony_ci}
22578c2ecf20Sopenharmony_ci
22588c2ecf20Sopenharmony_cistatic void tell_cpu_to_push(struct rq *rq)
22598c2ecf20Sopenharmony_ci{
22608c2ecf20Sopenharmony_ci	int cpu = -1;
22618c2ecf20Sopenharmony_ci
22628c2ecf20Sopenharmony_ci	/* Keep the loop going if the IPI is currently active */
22638c2ecf20Sopenharmony_ci	atomic_inc(&rq->rd->rto_loop_next);
22648c2ecf20Sopenharmony_ci
22658c2ecf20Sopenharmony_ci	/* Only one CPU can initiate a loop at a time */
22668c2ecf20Sopenharmony_ci	if (!rto_start_trylock(&rq->rd->rto_loop_start))
22678c2ecf20Sopenharmony_ci		return;
22688c2ecf20Sopenharmony_ci
22698c2ecf20Sopenharmony_ci	raw_spin_lock(&rq->rd->rto_lock);
22708c2ecf20Sopenharmony_ci
22718c2ecf20Sopenharmony_ci	/*
22728c2ecf20Sopenharmony_ci	 * The rto_cpu is updated under the lock, if it has a valid CPU
22738c2ecf20Sopenharmony_ci	 * then the IPI is still running and will continue due to the
22748c2ecf20Sopenharmony_ci	 * update to loop_next, and nothing needs to be done here.
22758c2ecf20Sopenharmony_ci	 * Otherwise it is finishing up and an ipi needs to be sent.
22768c2ecf20Sopenharmony_ci	 */
22778c2ecf20Sopenharmony_ci	if (rq->rd->rto_cpu < 0)
22788c2ecf20Sopenharmony_ci		cpu = rto_next_cpu(rq->rd);
22798c2ecf20Sopenharmony_ci
22808c2ecf20Sopenharmony_ci	raw_spin_unlock(&rq->rd->rto_lock);
22818c2ecf20Sopenharmony_ci
22828c2ecf20Sopenharmony_ci	rto_start_unlock(&rq->rd->rto_loop_start);
22838c2ecf20Sopenharmony_ci
22848c2ecf20Sopenharmony_ci	if (cpu >= 0) {
22858c2ecf20Sopenharmony_ci		/* Make sure the rd does not get freed while pushing */
22868c2ecf20Sopenharmony_ci		sched_get_rd(rq->rd);
22878c2ecf20Sopenharmony_ci		irq_work_queue_on(&rq->rd->rto_push_work, cpu);
22888c2ecf20Sopenharmony_ci	}
22898c2ecf20Sopenharmony_ci}
22908c2ecf20Sopenharmony_ci
22918c2ecf20Sopenharmony_ci/* Called from hardirq context */
22928c2ecf20Sopenharmony_civoid rto_push_irq_work_func(struct irq_work *work)
22938c2ecf20Sopenharmony_ci{
22948c2ecf20Sopenharmony_ci	struct root_domain *rd =
22958c2ecf20Sopenharmony_ci		container_of(work, struct root_domain, rto_push_work);
22968c2ecf20Sopenharmony_ci	struct rq *rq;
22978c2ecf20Sopenharmony_ci	int cpu;
22988c2ecf20Sopenharmony_ci
22998c2ecf20Sopenharmony_ci	rq = this_rq();
23008c2ecf20Sopenharmony_ci
23018c2ecf20Sopenharmony_ci	/*
23028c2ecf20Sopenharmony_ci	 * We do not need to grab the lock to check for has_pushable_tasks.
23038c2ecf20Sopenharmony_ci	 * When it gets updated, a check is made if a push is possible.
23048c2ecf20Sopenharmony_ci	 */
23058c2ecf20Sopenharmony_ci	if (has_pushable_tasks(rq)) {
23068c2ecf20Sopenharmony_ci		raw_spin_lock(&rq->lock);
23078c2ecf20Sopenharmony_ci		push_rt_tasks(rq);
23088c2ecf20Sopenharmony_ci		raw_spin_unlock(&rq->lock);
23098c2ecf20Sopenharmony_ci	}
23108c2ecf20Sopenharmony_ci
23118c2ecf20Sopenharmony_ci	raw_spin_lock(&rd->rto_lock);
23128c2ecf20Sopenharmony_ci
23138c2ecf20Sopenharmony_ci	/* Pass the IPI to the next rt overloaded queue */
23148c2ecf20Sopenharmony_ci	cpu = rto_next_cpu(rd);
23158c2ecf20Sopenharmony_ci
23168c2ecf20Sopenharmony_ci	raw_spin_unlock(&rd->rto_lock);
23178c2ecf20Sopenharmony_ci
23188c2ecf20Sopenharmony_ci	if (cpu < 0) {
23198c2ecf20Sopenharmony_ci		sched_put_rd(rd);
23208c2ecf20Sopenharmony_ci		return;
23218c2ecf20Sopenharmony_ci	}
23228c2ecf20Sopenharmony_ci
23238c2ecf20Sopenharmony_ci	/* Try the next RT overloaded CPU */
23248c2ecf20Sopenharmony_ci	irq_work_queue_on(&rd->rto_push_work, cpu);
23258c2ecf20Sopenharmony_ci}
23268c2ecf20Sopenharmony_ci#endif /* HAVE_RT_PUSH_IPI */
23278c2ecf20Sopenharmony_ci
23288c2ecf20Sopenharmony_cistatic void pull_rt_task(struct rq *this_rq)
23298c2ecf20Sopenharmony_ci{
23308c2ecf20Sopenharmony_ci	int this_cpu = this_rq->cpu, cpu;
23318c2ecf20Sopenharmony_ci	bool resched = false;
23328c2ecf20Sopenharmony_ci	struct task_struct *p;
23338c2ecf20Sopenharmony_ci	struct rq *src_rq;
23348c2ecf20Sopenharmony_ci	int rt_overload_count = rt_overloaded(this_rq);
23358c2ecf20Sopenharmony_ci
23368c2ecf20Sopenharmony_ci	if (likely(!rt_overload_count))
23378c2ecf20Sopenharmony_ci		return;
23388c2ecf20Sopenharmony_ci
23398c2ecf20Sopenharmony_ci	/*
23408c2ecf20Sopenharmony_ci	 * Match the barrier from rt_set_overloaded; this guarantees that if we
23418c2ecf20Sopenharmony_ci	 * see overloaded we must also see the rto_mask bit.
23428c2ecf20Sopenharmony_ci	 */
23438c2ecf20Sopenharmony_ci	smp_rmb();
23448c2ecf20Sopenharmony_ci
23458c2ecf20Sopenharmony_ci	/* If we are the only overloaded CPU do nothing */
23468c2ecf20Sopenharmony_ci	if (rt_overload_count == 1 &&
23478c2ecf20Sopenharmony_ci	    cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
23488c2ecf20Sopenharmony_ci		return;
23498c2ecf20Sopenharmony_ci
23508c2ecf20Sopenharmony_ci#ifdef HAVE_RT_PUSH_IPI
23518c2ecf20Sopenharmony_ci	if (sched_feat(RT_PUSH_IPI)) {
23528c2ecf20Sopenharmony_ci		tell_cpu_to_push(this_rq);
23538c2ecf20Sopenharmony_ci		return;
23548c2ecf20Sopenharmony_ci	}
23558c2ecf20Sopenharmony_ci#endif
23568c2ecf20Sopenharmony_ci
23578c2ecf20Sopenharmony_ci	for_each_cpu(cpu, this_rq->rd->rto_mask) {
23588c2ecf20Sopenharmony_ci		if (this_cpu == cpu)
23598c2ecf20Sopenharmony_ci			continue;
23608c2ecf20Sopenharmony_ci
23618c2ecf20Sopenharmony_ci		src_rq = cpu_rq(cpu);
23628c2ecf20Sopenharmony_ci
23638c2ecf20Sopenharmony_ci		/*
23648c2ecf20Sopenharmony_ci		 * Don't bother taking the src_rq->lock if the next highest
23658c2ecf20Sopenharmony_ci		 * task is known to be lower-priority than our current task.
23668c2ecf20Sopenharmony_ci		 * This may look racy, but if this value is about to go
23678c2ecf20Sopenharmony_ci		 * logically higher, the src_rq will push this task away.
23688c2ecf20Sopenharmony_ci		 * And if its going logically lower, we do not care
23698c2ecf20Sopenharmony_ci		 */
23708c2ecf20Sopenharmony_ci		if (src_rq->rt.highest_prio.next >=
23718c2ecf20Sopenharmony_ci		    this_rq->rt.highest_prio.curr)
23728c2ecf20Sopenharmony_ci			continue;
23738c2ecf20Sopenharmony_ci
23748c2ecf20Sopenharmony_ci		/*
23758c2ecf20Sopenharmony_ci		 * We can potentially drop this_rq's lock in
23768c2ecf20Sopenharmony_ci		 * double_lock_balance, and another CPU could
23778c2ecf20Sopenharmony_ci		 * alter this_rq
23788c2ecf20Sopenharmony_ci		 */
23798c2ecf20Sopenharmony_ci		double_lock_balance(this_rq, src_rq);
23808c2ecf20Sopenharmony_ci
23818c2ecf20Sopenharmony_ci		/*
23828c2ecf20Sopenharmony_ci		 * We can pull only a task, which is pushable
23838c2ecf20Sopenharmony_ci		 * on its rq, and no others.
23848c2ecf20Sopenharmony_ci		 */
23858c2ecf20Sopenharmony_ci		p = pick_highest_pushable_task(src_rq, this_cpu);
23868c2ecf20Sopenharmony_ci
23878c2ecf20Sopenharmony_ci		/*
23888c2ecf20Sopenharmony_ci		 * Do we have an RT task that preempts
23898c2ecf20Sopenharmony_ci		 * the to-be-scheduled task?
23908c2ecf20Sopenharmony_ci		 */
23918c2ecf20Sopenharmony_ci		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
23928c2ecf20Sopenharmony_ci			WARN_ON(p == src_rq->curr);
23938c2ecf20Sopenharmony_ci			WARN_ON(!task_on_rq_queued(p));
23948c2ecf20Sopenharmony_ci
23958c2ecf20Sopenharmony_ci			/*
23968c2ecf20Sopenharmony_ci			 * There's a chance that p is higher in priority
23978c2ecf20Sopenharmony_ci			 * than what's currently running on its CPU.
23988c2ecf20Sopenharmony_ci			 * This is just that p is wakeing up and hasn't
23998c2ecf20Sopenharmony_ci			 * had a chance to schedule. We only pull
24008c2ecf20Sopenharmony_ci			 * p if it is lower in priority than the
24018c2ecf20Sopenharmony_ci			 * current task on the run queue
24028c2ecf20Sopenharmony_ci			 */
24038c2ecf20Sopenharmony_ci			if (p->prio < src_rq->curr->prio)
24048c2ecf20Sopenharmony_ci				goto skip;
24058c2ecf20Sopenharmony_ci
24068c2ecf20Sopenharmony_ci			resched = true;
24078c2ecf20Sopenharmony_ci
24088c2ecf20Sopenharmony_ci			deactivate_task(src_rq, p, 0);
24098c2ecf20Sopenharmony_ci			set_task_cpu(p, this_cpu);
24108c2ecf20Sopenharmony_ci			activate_task(this_rq, p, 0);
24118c2ecf20Sopenharmony_ci			/*
24128c2ecf20Sopenharmony_ci			 * We continue with the search, just in
24138c2ecf20Sopenharmony_ci			 * case there's an even higher prio task
24148c2ecf20Sopenharmony_ci			 * in another runqueue. (low likelihood
24158c2ecf20Sopenharmony_ci			 * but possible)
24168c2ecf20Sopenharmony_ci			 */
24178c2ecf20Sopenharmony_ci		}
24188c2ecf20Sopenharmony_ciskip:
24198c2ecf20Sopenharmony_ci		double_unlock_balance(this_rq, src_rq);
24208c2ecf20Sopenharmony_ci	}
24218c2ecf20Sopenharmony_ci
24228c2ecf20Sopenharmony_ci	if (resched)
24238c2ecf20Sopenharmony_ci		resched_curr(this_rq);
24248c2ecf20Sopenharmony_ci}
24258c2ecf20Sopenharmony_ci
24268c2ecf20Sopenharmony_ci/*
24278c2ecf20Sopenharmony_ci * If we are not running and we are not going to reschedule soon, we should
24288c2ecf20Sopenharmony_ci * try to push tasks away now
24298c2ecf20Sopenharmony_ci */
24308c2ecf20Sopenharmony_cistatic void task_woken_rt(struct rq *rq, struct task_struct *p)
24318c2ecf20Sopenharmony_ci{
24328c2ecf20Sopenharmony_ci	bool need_to_push = !task_running(rq, p) &&
24338c2ecf20Sopenharmony_ci			    !test_tsk_need_resched(rq->curr) &&
24348c2ecf20Sopenharmony_ci			    p->nr_cpus_allowed > 1 &&
24358c2ecf20Sopenharmony_ci			    (dl_task(rq->curr) || rt_task(rq->curr)) &&
24368c2ecf20Sopenharmony_ci			    (rq->curr->nr_cpus_allowed < 2 ||
24378c2ecf20Sopenharmony_ci			     rq->curr->prio <= p->prio);
24388c2ecf20Sopenharmony_ci
24398c2ecf20Sopenharmony_ci	if (need_to_push)
24408c2ecf20Sopenharmony_ci		push_rt_tasks(rq);
24418c2ecf20Sopenharmony_ci}
24428c2ecf20Sopenharmony_ci
24438c2ecf20Sopenharmony_ci/* Assumes rq->lock is held */
24448c2ecf20Sopenharmony_cistatic void rq_online_rt(struct rq *rq)
24458c2ecf20Sopenharmony_ci{
24468c2ecf20Sopenharmony_ci	if (rq->rt.overloaded)
24478c2ecf20Sopenharmony_ci		rt_set_overload(rq);
24488c2ecf20Sopenharmony_ci
24498c2ecf20Sopenharmony_ci	__enable_runtime(rq);
24508c2ecf20Sopenharmony_ci
24518c2ecf20Sopenharmony_ci	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
24528c2ecf20Sopenharmony_ci}
24538c2ecf20Sopenharmony_ci
24548c2ecf20Sopenharmony_ci/* Assumes rq->lock is held */
24558c2ecf20Sopenharmony_cistatic void rq_offline_rt(struct rq *rq)
24568c2ecf20Sopenharmony_ci{
24578c2ecf20Sopenharmony_ci	if (rq->rt.overloaded)
24588c2ecf20Sopenharmony_ci		rt_clear_overload(rq);
24598c2ecf20Sopenharmony_ci
24608c2ecf20Sopenharmony_ci	__disable_runtime(rq);
24618c2ecf20Sopenharmony_ci
24628c2ecf20Sopenharmony_ci	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
24638c2ecf20Sopenharmony_ci}
24648c2ecf20Sopenharmony_ci
24658c2ecf20Sopenharmony_ci/*
24668c2ecf20Sopenharmony_ci * When switch from the rt queue, we bring ourselves to a position
24678c2ecf20Sopenharmony_ci * that we might want to pull RT tasks from other runqueues.
24688c2ecf20Sopenharmony_ci */
24698c2ecf20Sopenharmony_cistatic void switched_from_rt(struct rq *rq, struct task_struct *p)
24708c2ecf20Sopenharmony_ci{
24718c2ecf20Sopenharmony_ci	/*
24728c2ecf20Sopenharmony_ci	 * If there are other RT tasks then we will reschedule
24738c2ecf20Sopenharmony_ci	 * and the scheduling of the other RT tasks will handle
24748c2ecf20Sopenharmony_ci	 * the balancing. But if we are the last RT task
24758c2ecf20Sopenharmony_ci	 * we may need to handle the pulling of RT tasks
24768c2ecf20Sopenharmony_ci	 * now.
24778c2ecf20Sopenharmony_ci	 */
24788c2ecf20Sopenharmony_ci	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running ||
24798c2ecf20Sopenharmony_ci		cpu_isolated(cpu_of(rq)))
24808c2ecf20Sopenharmony_ci		return;
24818c2ecf20Sopenharmony_ci
24828c2ecf20Sopenharmony_ci	rt_queue_pull_task(rq);
24838c2ecf20Sopenharmony_ci}
24848c2ecf20Sopenharmony_ci
24858c2ecf20Sopenharmony_civoid __init init_sched_rt_class(void)
24868c2ecf20Sopenharmony_ci{
24878c2ecf20Sopenharmony_ci	unsigned int i;
24888c2ecf20Sopenharmony_ci
24898c2ecf20Sopenharmony_ci	for_each_possible_cpu(i) {
24908c2ecf20Sopenharmony_ci		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
24918c2ecf20Sopenharmony_ci					GFP_KERNEL, cpu_to_node(i));
24928c2ecf20Sopenharmony_ci	}
24938c2ecf20Sopenharmony_ci}
24948c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
24958c2ecf20Sopenharmony_ci
24968c2ecf20Sopenharmony_ci/*
24978c2ecf20Sopenharmony_ci * When switching a task to RT, we may overload the runqueue
24988c2ecf20Sopenharmony_ci * with RT tasks. In this case we try to push them off to
24998c2ecf20Sopenharmony_ci * other runqueues.
25008c2ecf20Sopenharmony_ci */
25018c2ecf20Sopenharmony_cistatic void switched_to_rt(struct rq *rq, struct task_struct *p)
25028c2ecf20Sopenharmony_ci{
25038c2ecf20Sopenharmony_ci	/*
25048c2ecf20Sopenharmony_ci	 * If we are running, update the avg_rt tracking, as the running time
25058c2ecf20Sopenharmony_ci	 * will now on be accounted into the latter.
25068c2ecf20Sopenharmony_ci	 */
25078c2ecf20Sopenharmony_ci	if (task_current(rq, p)) {
25088c2ecf20Sopenharmony_ci		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
25098c2ecf20Sopenharmony_ci		return;
25108c2ecf20Sopenharmony_ci	}
25118c2ecf20Sopenharmony_ci
25128c2ecf20Sopenharmony_ci	/*
25138c2ecf20Sopenharmony_ci	 * If we are not running we may need to preempt the current
25148c2ecf20Sopenharmony_ci	 * running task. If that current running task is also an RT task
25158c2ecf20Sopenharmony_ci	 * then see if we can move to another run queue.
25168c2ecf20Sopenharmony_ci	 */
25178c2ecf20Sopenharmony_ci	if (task_on_rq_queued(p)) {
25188c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
25198c2ecf20Sopenharmony_ci		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
25208c2ecf20Sopenharmony_ci			rt_queue_push_tasks(rq);
25218c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
25228c2ecf20Sopenharmony_ci		if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
25238c2ecf20Sopenharmony_ci			resched_curr(rq);
25248c2ecf20Sopenharmony_ci	}
25258c2ecf20Sopenharmony_ci}
25268c2ecf20Sopenharmony_ci
25278c2ecf20Sopenharmony_ci/*
25288c2ecf20Sopenharmony_ci * Priority of the task has changed. This may cause
25298c2ecf20Sopenharmony_ci * us to initiate a push or pull.
25308c2ecf20Sopenharmony_ci */
25318c2ecf20Sopenharmony_cistatic void
25328c2ecf20Sopenharmony_ciprio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
25338c2ecf20Sopenharmony_ci{
25348c2ecf20Sopenharmony_ci	if (!task_on_rq_queued(p))
25358c2ecf20Sopenharmony_ci		return;
25368c2ecf20Sopenharmony_ci
25378c2ecf20Sopenharmony_ci	if (rq->curr == p) {
25388c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
25398c2ecf20Sopenharmony_ci		/*
25408c2ecf20Sopenharmony_ci		 * If our priority decreases while running, we
25418c2ecf20Sopenharmony_ci		 * may need to pull tasks to this runqueue.
25428c2ecf20Sopenharmony_ci		 */
25438c2ecf20Sopenharmony_ci		if (oldprio < p->prio)
25448c2ecf20Sopenharmony_ci			rt_queue_pull_task(rq);
25458c2ecf20Sopenharmony_ci
25468c2ecf20Sopenharmony_ci		/*
25478c2ecf20Sopenharmony_ci		 * If there's a higher priority task waiting to run
25488c2ecf20Sopenharmony_ci		 * then reschedule.
25498c2ecf20Sopenharmony_ci		 */
25508c2ecf20Sopenharmony_ci		if (p->prio > rq->rt.highest_prio.curr)
25518c2ecf20Sopenharmony_ci			resched_curr(rq);
25528c2ecf20Sopenharmony_ci#else
25538c2ecf20Sopenharmony_ci		/* For UP simply resched on drop of prio */
25548c2ecf20Sopenharmony_ci		if (oldprio < p->prio)
25558c2ecf20Sopenharmony_ci			resched_curr(rq);
25568c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */
25578c2ecf20Sopenharmony_ci	} else {
25588c2ecf20Sopenharmony_ci		/*
25598c2ecf20Sopenharmony_ci		 * This task is not running, but if it is
25608c2ecf20Sopenharmony_ci		 * greater than the current running task
25618c2ecf20Sopenharmony_ci		 * then reschedule.
25628c2ecf20Sopenharmony_ci		 */
25638c2ecf20Sopenharmony_ci		if (p->prio < rq->curr->prio)
25648c2ecf20Sopenharmony_ci			resched_curr(rq);
25658c2ecf20Sopenharmony_ci	}
25668c2ecf20Sopenharmony_ci}
25678c2ecf20Sopenharmony_ci
25688c2ecf20Sopenharmony_ci#ifdef CONFIG_POSIX_TIMERS
25698c2ecf20Sopenharmony_cistatic void watchdog(struct rq *rq, struct task_struct *p)
25708c2ecf20Sopenharmony_ci{
25718c2ecf20Sopenharmony_ci	unsigned long soft, hard;
25728c2ecf20Sopenharmony_ci
25738c2ecf20Sopenharmony_ci	/* max may change after cur was read, this will be fixed next tick */
25748c2ecf20Sopenharmony_ci	soft = task_rlimit(p, RLIMIT_RTTIME);
25758c2ecf20Sopenharmony_ci	hard = task_rlimit_max(p, RLIMIT_RTTIME);
25768c2ecf20Sopenharmony_ci
25778c2ecf20Sopenharmony_ci	if (soft != RLIM_INFINITY) {
25788c2ecf20Sopenharmony_ci		unsigned long next;
25798c2ecf20Sopenharmony_ci
25808c2ecf20Sopenharmony_ci		if (p->rt.watchdog_stamp != jiffies) {
25818c2ecf20Sopenharmony_ci			p->rt.timeout++;
25828c2ecf20Sopenharmony_ci			p->rt.watchdog_stamp = jiffies;
25838c2ecf20Sopenharmony_ci		}
25848c2ecf20Sopenharmony_ci
25858c2ecf20Sopenharmony_ci		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
25868c2ecf20Sopenharmony_ci		if (p->rt.timeout > next) {
25878c2ecf20Sopenharmony_ci			posix_cputimers_rt_watchdog(&p->posix_cputimers,
25888c2ecf20Sopenharmony_ci						    p->se.sum_exec_runtime);
25898c2ecf20Sopenharmony_ci		}
25908c2ecf20Sopenharmony_ci	}
25918c2ecf20Sopenharmony_ci}
25928c2ecf20Sopenharmony_ci#else
25938c2ecf20Sopenharmony_cistatic inline void watchdog(struct rq *rq, struct task_struct *p) { }
25948c2ecf20Sopenharmony_ci#endif
25958c2ecf20Sopenharmony_ci
25968c2ecf20Sopenharmony_ci/*
25978c2ecf20Sopenharmony_ci * scheduler tick hitting a task of our scheduling class.
25988c2ecf20Sopenharmony_ci *
25998c2ecf20Sopenharmony_ci * NOTE: This function can be called remotely by the tick offload that
26008c2ecf20Sopenharmony_ci * goes along full dynticks. Therefore no local assumption can be made
26018c2ecf20Sopenharmony_ci * and everything must be accessed through the @rq and @curr passed in
26028c2ecf20Sopenharmony_ci * parameters.
26038c2ecf20Sopenharmony_ci */
26048c2ecf20Sopenharmony_cistatic void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
26058c2ecf20Sopenharmony_ci{
26068c2ecf20Sopenharmony_ci	struct sched_rt_entity *rt_se = &p->rt;
26078c2ecf20Sopenharmony_ci
26088c2ecf20Sopenharmony_ci	update_curr_rt(rq);
26098c2ecf20Sopenharmony_ci	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
26108c2ecf20Sopenharmony_ci
26118c2ecf20Sopenharmony_ci	watchdog(rq, p);
26128c2ecf20Sopenharmony_ci
26138c2ecf20Sopenharmony_ci	/*
26148c2ecf20Sopenharmony_ci	 * RR tasks need a special form of timeslice management.
26158c2ecf20Sopenharmony_ci	 * FIFO tasks have no timeslices.
26168c2ecf20Sopenharmony_ci	 */
26178c2ecf20Sopenharmony_ci	if (p->policy != SCHED_RR)
26188c2ecf20Sopenharmony_ci		return;
26198c2ecf20Sopenharmony_ci
26208c2ecf20Sopenharmony_ci	if (--p->rt.time_slice)
26218c2ecf20Sopenharmony_ci		return;
26228c2ecf20Sopenharmony_ci
26238c2ecf20Sopenharmony_ci	p->rt.time_slice = sched_rr_timeslice;
26248c2ecf20Sopenharmony_ci
26258c2ecf20Sopenharmony_ci	/*
26268c2ecf20Sopenharmony_ci	 * Requeue to the end of queue if we (and all of our ancestors) are not
26278c2ecf20Sopenharmony_ci	 * the only element on the queue
26288c2ecf20Sopenharmony_ci	 */
26298c2ecf20Sopenharmony_ci	for_each_sched_rt_entity(rt_se) {
26308c2ecf20Sopenharmony_ci		if (rt_se->run_list.prev != rt_se->run_list.next) {
26318c2ecf20Sopenharmony_ci			requeue_task_rt(rq, p, 0);
26328c2ecf20Sopenharmony_ci			resched_curr(rq);
26338c2ecf20Sopenharmony_ci			return;
26348c2ecf20Sopenharmony_ci		}
26358c2ecf20Sopenharmony_ci	}
26368c2ecf20Sopenharmony_ci}
26378c2ecf20Sopenharmony_ci
26388c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RT_ACTIVE_LB
26398c2ecf20Sopenharmony_cistatic int rt_active_load_balance_cpu_stop(void *data)
26408c2ecf20Sopenharmony_ci{
26418c2ecf20Sopenharmony_ci	struct rq *busiest_rq = data;
26428c2ecf20Sopenharmony_ci	struct task_struct *next_task = busiest_rq->rt_push_task;
26438c2ecf20Sopenharmony_ci	struct rq *lowest_rq = NULL;
26448c2ecf20Sopenharmony_ci	unsigned long flags;
26458c2ecf20Sopenharmony_ci
26468c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&busiest_rq->lock, flags);
26478c2ecf20Sopenharmony_ci	busiest_rq->rt_active_balance = 0;
26488c2ecf20Sopenharmony_ci
26498c2ecf20Sopenharmony_ci	if (!task_on_rq_queued(next_task) ||
26508c2ecf20Sopenharmony_ci	    task_cpu(next_task) != cpu_of(busiest_rq))
26518c2ecf20Sopenharmony_ci		goto out;
26528c2ecf20Sopenharmony_ci
26538c2ecf20Sopenharmony_ci	/* find_lock_lowest_rq locks the rq if found */
26548c2ecf20Sopenharmony_ci	lowest_rq = find_lock_lowest_rq(next_task, busiest_rq);
26558c2ecf20Sopenharmony_ci	if (!lowest_rq)
26568c2ecf20Sopenharmony_ci		goto out;
26578c2ecf20Sopenharmony_ci
26588c2ecf20Sopenharmony_ci	if (capacity_orig_of(cpu_of(lowest_rq)) <= capacity_orig_of(task_cpu(next_task)))
26598c2ecf20Sopenharmony_ci		goto unlock;
26608c2ecf20Sopenharmony_ci
26618c2ecf20Sopenharmony_ci	deactivate_task(busiest_rq, next_task, 0);
26628c2ecf20Sopenharmony_ci	set_task_cpu(next_task, lowest_rq->cpu);
26638c2ecf20Sopenharmony_ci	activate_task(lowest_rq, next_task, 0);
26648c2ecf20Sopenharmony_ci
26658c2ecf20Sopenharmony_ci	resched_curr(lowest_rq);
26668c2ecf20Sopenharmony_ciunlock:
26678c2ecf20Sopenharmony_ci	double_unlock_balance(busiest_rq, lowest_rq);
26688c2ecf20Sopenharmony_ciout:
26698c2ecf20Sopenharmony_ci	put_task_struct(next_task);
26708c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&busiest_rq->lock, flags);
26718c2ecf20Sopenharmony_ci
26728c2ecf20Sopenharmony_ci	return 0;
26738c2ecf20Sopenharmony_ci}
26748c2ecf20Sopenharmony_ci
26758c2ecf20Sopenharmony_cistatic void check_for_migration_rt(struct rq *rq, struct task_struct *p)
26768c2ecf20Sopenharmony_ci{
26778c2ecf20Sopenharmony_ci	bool need_actvie_lb = false;
26788c2ecf20Sopenharmony_ci	bool misfit_task = false;
26798c2ecf20Sopenharmony_ci	int cpu = task_cpu(p);
26808c2ecf20Sopenharmony_ci	unsigned long cpu_orig_cap;
26818c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RTG
26828c2ecf20Sopenharmony_ci	struct cpumask *rtg_target = NULL;
26838c2ecf20Sopenharmony_ci#endif
26848c2ecf20Sopenharmony_ci
26858c2ecf20Sopenharmony_ci	if (!sysctl_sched_enable_rt_active_lb)
26868c2ecf20Sopenharmony_ci		return;
26878c2ecf20Sopenharmony_ci
26888c2ecf20Sopenharmony_ci	if (p->nr_cpus_allowed == 1)
26898c2ecf20Sopenharmony_ci		return;
26908c2ecf20Sopenharmony_ci
26918c2ecf20Sopenharmony_ci	cpu_orig_cap = capacity_orig_of(cpu);
26928c2ecf20Sopenharmony_ci	/* cpu has max capacity, no need to do balance */
26938c2ecf20Sopenharmony_ci	if (cpu_orig_cap ==  rq->rd->max_cpu_capacity)
26948c2ecf20Sopenharmony_ci		return;
26958c2ecf20Sopenharmony_ci
26968c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RTG
26978c2ecf20Sopenharmony_ci	rtg_target = find_rtg_target(p);
26988c2ecf20Sopenharmony_ci	if (rtg_target)
26998c2ecf20Sopenharmony_ci		misfit_task = capacity_orig_of(cpumask_first(rtg_target)) >
27008c2ecf20Sopenharmony_ci				cpu_orig_cap;
27018c2ecf20Sopenharmony_ci	else
27028c2ecf20Sopenharmony_ci		misfit_task = !rt_task_fits_capacity(p, cpu);
27038c2ecf20Sopenharmony_ci#else
27048c2ecf20Sopenharmony_ci	misfit_task = !rt_task_fits_capacity(p, cpu);
27058c2ecf20Sopenharmony_ci#endif
27068c2ecf20Sopenharmony_ci
27078c2ecf20Sopenharmony_ci	if (misfit_task) {
27088c2ecf20Sopenharmony_ci		raw_spin_lock(&rq->lock);
27098c2ecf20Sopenharmony_ci		if (!rq->active_balance && !rq->rt_active_balance) {
27108c2ecf20Sopenharmony_ci			rq->rt_active_balance = 1;
27118c2ecf20Sopenharmony_ci			rq->rt_push_task = p;
27128c2ecf20Sopenharmony_ci			get_task_struct(p);
27138c2ecf20Sopenharmony_ci			need_actvie_lb = true;
27148c2ecf20Sopenharmony_ci		}
27158c2ecf20Sopenharmony_ci		raw_spin_unlock(&rq->lock);
27168c2ecf20Sopenharmony_ci
27178c2ecf20Sopenharmony_ci		if (need_actvie_lb)
27188c2ecf20Sopenharmony_ci			stop_one_cpu_nowait(task_cpu(p),
27198c2ecf20Sopenharmony_ci					    rt_active_load_balance_cpu_stop,
27208c2ecf20Sopenharmony_ci					    rq, &rq->rt_active_balance_work);
27218c2ecf20Sopenharmony_ci	}
27228c2ecf20Sopenharmony_ci}
27238c2ecf20Sopenharmony_ci#endif
27248c2ecf20Sopenharmony_ci
27258c2ecf20Sopenharmony_cistatic unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
27268c2ecf20Sopenharmony_ci{
27278c2ecf20Sopenharmony_ci	/*
27288c2ecf20Sopenharmony_ci	 * Time slice is 0 for SCHED_FIFO tasks
27298c2ecf20Sopenharmony_ci	 */
27308c2ecf20Sopenharmony_ci	if (task->policy == SCHED_RR)
27318c2ecf20Sopenharmony_ci		return sched_rr_timeslice;
27328c2ecf20Sopenharmony_ci	else
27338c2ecf20Sopenharmony_ci		return 0;
27348c2ecf20Sopenharmony_ci}
27358c2ecf20Sopenharmony_ci
27368c2ecf20Sopenharmony_ciconst struct sched_class rt_sched_class
27378c2ecf20Sopenharmony_ci	__section("__rt_sched_class") = {
27388c2ecf20Sopenharmony_ci	.enqueue_task		= enqueue_task_rt,
27398c2ecf20Sopenharmony_ci	.dequeue_task		= dequeue_task_rt,
27408c2ecf20Sopenharmony_ci	.yield_task		= yield_task_rt,
27418c2ecf20Sopenharmony_ci
27428c2ecf20Sopenharmony_ci	.check_preempt_curr	= check_preempt_curr_rt,
27438c2ecf20Sopenharmony_ci
27448c2ecf20Sopenharmony_ci	.pick_next_task		= pick_next_task_rt,
27458c2ecf20Sopenharmony_ci	.put_prev_task		= put_prev_task_rt,
27468c2ecf20Sopenharmony_ci	.set_next_task          = set_next_task_rt,
27478c2ecf20Sopenharmony_ci
27488c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
27498c2ecf20Sopenharmony_ci	.balance		= balance_rt,
27508c2ecf20Sopenharmony_ci	.select_task_rq		= select_task_rq_rt,
27518c2ecf20Sopenharmony_ci	.set_cpus_allowed       = set_cpus_allowed_common,
27528c2ecf20Sopenharmony_ci	.rq_online              = rq_online_rt,
27538c2ecf20Sopenharmony_ci	.rq_offline             = rq_offline_rt,
27548c2ecf20Sopenharmony_ci	.task_woken		= task_woken_rt,
27558c2ecf20Sopenharmony_ci	.switched_from		= switched_from_rt,
27568c2ecf20Sopenharmony_ci#endif
27578c2ecf20Sopenharmony_ci
27588c2ecf20Sopenharmony_ci	.task_tick		= task_tick_rt,
27598c2ecf20Sopenharmony_ci
27608c2ecf20Sopenharmony_ci	.get_rr_interval	= get_rr_interval_rt,
27618c2ecf20Sopenharmony_ci
27628c2ecf20Sopenharmony_ci	.prio_changed		= prio_changed_rt,
27638c2ecf20Sopenharmony_ci	.switched_to		= switched_to_rt,
27648c2ecf20Sopenharmony_ci
27658c2ecf20Sopenharmony_ci	.update_curr		= update_curr_rt,
27668c2ecf20Sopenharmony_ci
27678c2ecf20Sopenharmony_ci#ifdef CONFIG_UCLAMP_TASK
27688c2ecf20Sopenharmony_ci	.uclamp_enabled		= 1,
27698c2ecf20Sopenharmony_ci#endif
27708c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_WALT
27718c2ecf20Sopenharmony_ci	.fixup_walt_sched_stats	= fixup_walt_sched_stats_common,
27728c2ecf20Sopenharmony_ci#endif
27738c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_RT_ACTIVE_LB
27748c2ecf20Sopenharmony_ci	.check_for_migration	= check_for_migration_rt,
27758c2ecf20Sopenharmony_ci#endif
27768c2ecf20Sopenharmony_ci};
27778c2ecf20Sopenharmony_ci
27788c2ecf20Sopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
27798c2ecf20Sopenharmony_ci/*
27808c2ecf20Sopenharmony_ci * Ensure that the real time constraints are schedulable.
27818c2ecf20Sopenharmony_ci */
27828c2ecf20Sopenharmony_cistatic DEFINE_MUTEX(rt_constraints_mutex);
27838c2ecf20Sopenharmony_ci
27848c2ecf20Sopenharmony_cistatic inline int tg_has_rt_tasks(struct task_group *tg)
27858c2ecf20Sopenharmony_ci{
27868c2ecf20Sopenharmony_ci	struct task_struct *task;
27878c2ecf20Sopenharmony_ci	struct css_task_iter it;
27888c2ecf20Sopenharmony_ci	int ret = 0;
27898c2ecf20Sopenharmony_ci
27908c2ecf20Sopenharmony_ci	/*
27918c2ecf20Sopenharmony_ci	 * Autogroups do not have RT tasks; see autogroup_create().
27928c2ecf20Sopenharmony_ci	 */
27938c2ecf20Sopenharmony_ci	if (task_group_is_autogroup(tg))
27948c2ecf20Sopenharmony_ci		return 0;
27958c2ecf20Sopenharmony_ci
27968c2ecf20Sopenharmony_ci	css_task_iter_start(&tg->css, 0, &it);
27978c2ecf20Sopenharmony_ci	while (!ret && (task = css_task_iter_next(&it)))
27988c2ecf20Sopenharmony_ci		ret |= rt_task(task);
27998c2ecf20Sopenharmony_ci	css_task_iter_end(&it);
28008c2ecf20Sopenharmony_ci
28018c2ecf20Sopenharmony_ci	return ret;
28028c2ecf20Sopenharmony_ci}
28038c2ecf20Sopenharmony_ci
28048c2ecf20Sopenharmony_cistruct rt_schedulable_data {
28058c2ecf20Sopenharmony_ci	struct task_group *tg;
28068c2ecf20Sopenharmony_ci	u64 rt_period;
28078c2ecf20Sopenharmony_ci	u64 rt_runtime;
28088c2ecf20Sopenharmony_ci};
28098c2ecf20Sopenharmony_ci
28108c2ecf20Sopenharmony_cistatic int tg_rt_schedulable(struct task_group *tg, void *data)
28118c2ecf20Sopenharmony_ci{
28128c2ecf20Sopenharmony_ci	struct rt_schedulable_data *d = data;
28138c2ecf20Sopenharmony_ci	struct task_group *child;
28148c2ecf20Sopenharmony_ci	unsigned long total, sum = 0;
28158c2ecf20Sopenharmony_ci	u64 period, runtime;
28168c2ecf20Sopenharmony_ci
28178c2ecf20Sopenharmony_ci	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
28188c2ecf20Sopenharmony_ci	runtime = tg->rt_bandwidth.rt_runtime;
28198c2ecf20Sopenharmony_ci
28208c2ecf20Sopenharmony_ci	if (tg == d->tg) {
28218c2ecf20Sopenharmony_ci		period = d->rt_period;
28228c2ecf20Sopenharmony_ci		runtime = d->rt_runtime;
28238c2ecf20Sopenharmony_ci	}
28248c2ecf20Sopenharmony_ci
28258c2ecf20Sopenharmony_ci	/*
28268c2ecf20Sopenharmony_ci	 * Cannot have more runtime than the period.
28278c2ecf20Sopenharmony_ci	 */
28288c2ecf20Sopenharmony_ci	if (runtime > period && runtime != RUNTIME_INF)
28298c2ecf20Sopenharmony_ci		return -EINVAL;
28308c2ecf20Sopenharmony_ci
28318c2ecf20Sopenharmony_ci	/*
28328c2ecf20Sopenharmony_ci	 * Ensure we don't starve existing RT tasks if runtime turns zero.
28338c2ecf20Sopenharmony_ci	 */
28348c2ecf20Sopenharmony_ci	if (rt_bandwidth_enabled() && !runtime &&
28358c2ecf20Sopenharmony_ci	    tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
28368c2ecf20Sopenharmony_ci		return -EBUSY;
28378c2ecf20Sopenharmony_ci
28388c2ecf20Sopenharmony_ci	total = to_ratio(period, runtime);
28398c2ecf20Sopenharmony_ci
28408c2ecf20Sopenharmony_ci	/*
28418c2ecf20Sopenharmony_ci	 * Nobody can have more than the global setting allows.
28428c2ecf20Sopenharmony_ci	 */
28438c2ecf20Sopenharmony_ci	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
28448c2ecf20Sopenharmony_ci		return -EINVAL;
28458c2ecf20Sopenharmony_ci
28468c2ecf20Sopenharmony_ci	/*
28478c2ecf20Sopenharmony_ci	 * The sum of our children's runtime should not exceed our own.
28488c2ecf20Sopenharmony_ci	 */
28498c2ecf20Sopenharmony_ci	list_for_each_entry_rcu(child, &tg->children, siblings) {
28508c2ecf20Sopenharmony_ci		period = ktime_to_ns(child->rt_bandwidth.rt_period);
28518c2ecf20Sopenharmony_ci		runtime = child->rt_bandwidth.rt_runtime;
28528c2ecf20Sopenharmony_ci
28538c2ecf20Sopenharmony_ci		if (child == d->tg) {
28548c2ecf20Sopenharmony_ci			period = d->rt_period;
28558c2ecf20Sopenharmony_ci			runtime = d->rt_runtime;
28568c2ecf20Sopenharmony_ci		}
28578c2ecf20Sopenharmony_ci
28588c2ecf20Sopenharmony_ci		sum += to_ratio(period, runtime);
28598c2ecf20Sopenharmony_ci	}
28608c2ecf20Sopenharmony_ci
28618c2ecf20Sopenharmony_ci	if (sum > total)
28628c2ecf20Sopenharmony_ci		return -EINVAL;
28638c2ecf20Sopenharmony_ci
28648c2ecf20Sopenharmony_ci	return 0;
28658c2ecf20Sopenharmony_ci}
28668c2ecf20Sopenharmony_ci
28678c2ecf20Sopenharmony_cistatic int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
28688c2ecf20Sopenharmony_ci{
28698c2ecf20Sopenharmony_ci	int ret;
28708c2ecf20Sopenharmony_ci
28718c2ecf20Sopenharmony_ci	struct rt_schedulable_data data = {
28728c2ecf20Sopenharmony_ci		.tg = tg,
28738c2ecf20Sopenharmony_ci		.rt_period = period,
28748c2ecf20Sopenharmony_ci		.rt_runtime = runtime,
28758c2ecf20Sopenharmony_ci	};
28768c2ecf20Sopenharmony_ci
28778c2ecf20Sopenharmony_ci	rcu_read_lock();
28788c2ecf20Sopenharmony_ci	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
28798c2ecf20Sopenharmony_ci	rcu_read_unlock();
28808c2ecf20Sopenharmony_ci
28818c2ecf20Sopenharmony_ci	return ret;
28828c2ecf20Sopenharmony_ci}
28838c2ecf20Sopenharmony_ci
28848c2ecf20Sopenharmony_cistatic int tg_set_rt_bandwidth(struct task_group *tg,
28858c2ecf20Sopenharmony_ci		u64 rt_period, u64 rt_runtime)
28868c2ecf20Sopenharmony_ci{
28878c2ecf20Sopenharmony_ci	int i, err = 0;
28888c2ecf20Sopenharmony_ci
28898c2ecf20Sopenharmony_ci	/*
28908c2ecf20Sopenharmony_ci	 * Disallowing the root group RT runtime is BAD, it would disallow the
28918c2ecf20Sopenharmony_ci	 * kernel creating (and or operating) RT threads.
28928c2ecf20Sopenharmony_ci	 */
28938c2ecf20Sopenharmony_ci	if (tg == &root_task_group && rt_runtime == 0)
28948c2ecf20Sopenharmony_ci		return -EINVAL;
28958c2ecf20Sopenharmony_ci
28968c2ecf20Sopenharmony_ci	/* No period doesn't make any sense. */
28978c2ecf20Sopenharmony_ci	if (rt_period == 0)
28988c2ecf20Sopenharmony_ci		return -EINVAL;
28998c2ecf20Sopenharmony_ci
29008c2ecf20Sopenharmony_ci	/*
29018c2ecf20Sopenharmony_ci	 * Bound quota to defend quota against overflow during bandwidth shift.
29028c2ecf20Sopenharmony_ci	 */
29038c2ecf20Sopenharmony_ci	if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
29048c2ecf20Sopenharmony_ci		return -EINVAL;
29058c2ecf20Sopenharmony_ci
29068c2ecf20Sopenharmony_ci	mutex_lock(&rt_constraints_mutex);
29078c2ecf20Sopenharmony_ci	err = __rt_schedulable(tg, rt_period, rt_runtime);
29088c2ecf20Sopenharmony_ci	if (err)
29098c2ecf20Sopenharmony_ci		goto unlock;
29108c2ecf20Sopenharmony_ci
29118c2ecf20Sopenharmony_ci	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
29128c2ecf20Sopenharmony_ci	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
29138c2ecf20Sopenharmony_ci	tg->rt_bandwidth.rt_runtime = rt_runtime;
29148c2ecf20Sopenharmony_ci
29158c2ecf20Sopenharmony_ci	for_each_possible_cpu(i) {
29168c2ecf20Sopenharmony_ci		struct rt_rq *rt_rq = tg->rt_rq[i];
29178c2ecf20Sopenharmony_ci
29188c2ecf20Sopenharmony_ci		raw_spin_lock(&rt_rq->rt_runtime_lock);
29198c2ecf20Sopenharmony_ci		rt_rq->rt_runtime = rt_runtime;
29208c2ecf20Sopenharmony_ci		raw_spin_unlock(&rt_rq->rt_runtime_lock);
29218c2ecf20Sopenharmony_ci	}
29228c2ecf20Sopenharmony_ci	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
29238c2ecf20Sopenharmony_ciunlock:
29248c2ecf20Sopenharmony_ci	mutex_unlock(&rt_constraints_mutex);
29258c2ecf20Sopenharmony_ci
29268c2ecf20Sopenharmony_ci	return err;
29278c2ecf20Sopenharmony_ci}
29288c2ecf20Sopenharmony_ci
29298c2ecf20Sopenharmony_ciint sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
29308c2ecf20Sopenharmony_ci{
29318c2ecf20Sopenharmony_ci	u64 rt_runtime, rt_period;
29328c2ecf20Sopenharmony_ci
29338c2ecf20Sopenharmony_ci	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
29348c2ecf20Sopenharmony_ci	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
29358c2ecf20Sopenharmony_ci	if (rt_runtime_us < 0)
29368c2ecf20Sopenharmony_ci		rt_runtime = RUNTIME_INF;
29378c2ecf20Sopenharmony_ci	else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
29388c2ecf20Sopenharmony_ci		return -EINVAL;
29398c2ecf20Sopenharmony_ci
29408c2ecf20Sopenharmony_ci	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
29418c2ecf20Sopenharmony_ci}
29428c2ecf20Sopenharmony_ci
29438c2ecf20Sopenharmony_cilong sched_group_rt_runtime(struct task_group *tg)
29448c2ecf20Sopenharmony_ci{
29458c2ecf20Sopenharmony_ci	u64 rt_runtime_us;
29468c2ecf20Sopenharmony_ci
29478c2ecf20Sopenharmony_ci	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
29488c2ecf20Sopenharmony_ci		return -1;
29498c2ecf20Sopenharmony_ci
29508c2ecf20Sopenharmony_ci	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
29518c2ecf20Sopenharmony_ci	do_div(rt_runtime_us, NSEC_PER_USEC);
29528c2ecf20Sopenharmony_ci	return rt_runtime_us;
29538c2ecf20Sopenharmony_ci}
29548c2ecf20Sopenharmony_ci
29558c2ecf20Sopenharmony_ciint sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
29568c2ecf20Sopenharmony_ci{
29578c2ecf20Sopenharmony_ci	u64 rt_runtime, rt_period;
29588c2ecf20Sopenharmony_ci
29598c2ecf20Sopenharmony_ci	if (rt_period_us > U64_MAX / NSEC_PER_USEC)
29608c2ecf20Sopenharmony_ci		return -EINVAL;
29618c2ecf20Sopenharmony_ci
29628c2ecf20Sopenharmony_ci	rt_period = rt_period_us * NSEC_PER_USEC;
29638c2ecf20Sopenharmony_ci	rt_runtime = tg->rt_bandwidth.rt_runtime;
29648c2ecf20Sopenharmony_ci
29658c2ecf20Sopenharmony_ci	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
29668c2ecf20Sopenharmony_ci}
29678c2ecf20Sopenharmony_ci
29688c2ecf20Sopenharmony_cilong sched_group_rt_period(struct task_group *tg)
29698c2ecf20Sopenharmony_ci{
29708c2ecf20Sopenharmony_ci	u64 rt_period_us;
29718c2ecf20Sopenharmony_ci
29728c2ecf20Sopenharmony_ci	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
29738c2ecf20Sopenharmony_ci	do_div(rt_period_us, NSEC_PER_USEC);
29748c2ecf20Sopenharmony_ci	return rt_period_us;
29758c2ecf20Sopenharmony_ci}
29768c2ecf20Sopenharmony_ci
29778c2ecf20Sopenharmony_cistatic int sched_rt_global_constraints(void)
29788c2ecf20Sopenharmony_ci{
29798c2ecf20Sopenharmony_ci	int ret = 0;
29808c2ecf20Sopenharmony_ci
29818c2ecf20Sopenharmony_ci	mutex_lock(&rt_constraints_mutex);
29828c2ecf20Sopenharmony_ci	ret = __rt_schedulable(NULL, 0, 0);
29838c2ecf20Sopenharmony_ci	mutex_unlock(&rt_constraints_mutex);
29848c2ecf20Sopenharmony_ci
29858c2ecf20Sopenharmony_ci	return ret;
29868c2ecf20Sopenharmony_ci}
29878c2ecf20Sopenharmony_ci
29888c2ecf20Sopenharmony_ciint sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
29898c2ecf20Sopenharmony_ci{
29908c2ecf20Sopenharmony_ci	/* Don't accept realtime tasks when there is no way for them to run */
29918c2ecf20Sopenharmony_ci	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
29928c2ecf20Sopenharmony_ci		return 0;
29938c2ecf20Sopenharmony_ci
29948c2ecf20Sopenharmony_ci	return 1;
29958c2ecf20Sopenharmony_ci}
29968c2ecf20Sopenharmony_ci
29978c2ecf20Sopenharmony_ci#else /* !CONFIG_RT_GROUP_SCHED */
29988c2ecf20Sopenharmony_cistatic int sched_rt_global_constraints(void)
29998c2ecf20Sopenharmony_ci{
30008c2ecf20Sopenharmony_ci	unsigned long flags;
30018c2ecf20Sopenharmony_ci	int i;
30028c2ecf20Sopenharmony_ci
30038c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
30048c2ecf20Sopenharmony_ci	for_each_possible_cpu(i) {
30058c2ecf20Sopenharmony_ci		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
30068c2ecf20Sopenharmony_ci
30078c2ecf20Sopenharmony_ci		raw_spin_lock(&rt_rq->rt_runtime_lock);
30088c2ecf20Sopenharmony_ci		rt_rq->rt_runtime = global_rt_runtime();
30098c2ecf20Sopenharmony_ci		raw_spin_unlock(&rt_rq->rt_runtime_lock);
30108c2ecf20Sopenharmony_ci	}
30118c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
30128c2ecf20Sopenharmony_ci
30138c2ecf20Sopenharmony_ci	return 0;
30148c2ecf20Sopenharmony_ci}
30158c2ecf20Sopenharmony_ci#endif /* CONFIG_RT_GROUP_SCHED */
30168c2ecf20Sopenharmony_ci
30178c2ecf20Sopenharmony_cistatic int sched_rt_global_validate(void)
30188c2ecf20Sopenharmony_ci{
30198c2ecf20Sopenharmony_ci	if (sysctl_sched_rt_period <= 0)
30208c2ecf20Sopenharmony_ci		return -EINVAL;
30218c2ecf20Sopenharmony_ci
30228c2ecf20Sopenharmony_ci	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
30238c2ecf20Sopenharmony_ci		((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
30248c2ecf20Sopenharmony_ci		 ((u64)sysctl_sched_rt_runtime *
30258c2ecf20Sopenharmony_ci			NSEC_PER_USEC > max_rt_runtime)))
30268c2ecf20Sopenharmony_ci		return -EINVAL;
30278c2ecf20Sopenharmony_ci
30288c2ecf20Sopenharmony_ci	return 0;
30298c2ecf20Sopenharmony_ci}
30308c2ecf20Sopenharmony_ci
30318c2ecf20Sopenharmony_cistatic void sched_rt_do_global(void)
30328c2ecf20Sopenharmony_ci{
30338c2ecf20Sopenharmony_ci	unsigned long flags;
30348c2ecf20Sopenharmony_ci
30358c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
30368c2ecf20Sopenharmony_ci	def_rt_bandwidth.rt_runtime = global_rt_runtime();
30378c2ecf20Sopenharmony_ci	def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
30388c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
30398c2ecf20Sopenharmony_ci}
30408c2ecf20Sopenharmony_ci
30418c2ecf20Sopenharmony_ciint sched_rt_handler(struct ctl_table *table, int write, void *buffer,
30428c2ecf20Sopenharmony_ci		size_t *lenp, loff_t *ppos)
30438c2ecf20Sopenharmony_ci{
30448c2ecf20Sopenharmony_ci	int old_period, old_runtime;
30458c2ecf20Sopenharmony_ci	static DEFINE_MUTEX(mutex);
30468c2ecf20Sopenharmony_ci	int ret;
30478c2ecf20Sopenharmony_ci
30488c2ecf20Sopenharmony_ci	mutex_lock(&mutex);
30498c2ecf20Sopenharmony_ci	old_period = sysctl_sched_rt_period;
30508c2ecf20Sopenharmony_ci	old_runtime = sysctl_sched_rt_runtime;
30518c2ecf20Sopenharmony_ci
30528c2ecf20Sopenharmony_ci	ret = proc_dointvec(table, write, buffer, lenp, ppos);
30538c2ecf20Sopenharmony_ci
30548c2ecf20Sopenharmony_ci	if (!ret && write) {
30558c2ecf20Sopenharmony_ci		ret = sched_rt_global_validate();
30568c2ecf20Sopenharmony_ci		if (ret)
30578c2ecf20Sopenharmony_ci			goto undo;
30588c2ecf20Sopenharmony_ci
30598c2ecf20Sopenharmony_ci		ret = sched_dl_global_validate();
30608c2ecf20Sopenharmony_ci		if (ret)
30618c2ecf20Sopenharmony_ci			goto undo;
30628c2ecf20Sopenharmony_ci
30638c2ecf20Sopenharmony_ci		ret = sched_rt_global_constraints();
30648c2ecf20Sopenharmony_ci		if (ret)
30658c2ecf20Sopenharmony_ci			goto undo;
30668c2ecf20Sopenharmony_ci
30678c2ecf20Sopenharmony_ci		sched_rt_do_global();
30688c2ecf20Sopenharmony_ci		sched_dl_do_global();
30698c2ecf20Sopenharmony_ci	}
30708c2ecf20Sopenharmony_ci	if (0) {
30718c2ecf20Sopenharmony_ciundo:
30728c2ecf20Sopenharmony_ci		sysctl_sched_rt_period = old_period;
30738c2ecf20Sopenharmony_ci		sysctl_sched_rt_runtime = old_runtime;
30748c2ecf20Sopenharmony_ci	}
30758c2ecf20Sopenharmony_ci	mutex_unlock(&mutex);
30768c2ecf20Sopenharmony_ci
30778c2ecf20Sopenharmony_ci	return ret;
30788c2ecf20Sopenharmony_ci}
30798c2ecf20Sopenharmony_ci
30808c2ecf20Sopenharmony_ciint sched_rr_handler(struct ctl_table *table, int write, void *buffer,
30818c2ecf20Sopenharmony_ci		size_t *lenp, loff_t *ppos)
30828c2ecf20Sopenharmony_ci{
30838c2ecf20Sopenharmony_ci	int ret;
30848c2ecf20Sopenharmony_ci	static DEFINE_MUTEX(mutex);
30858c2ecf20Sopenharmony_ci
30868c2ecf20Sopenharmony_ci	mutex_lock(&mutex);
30878c2ecf20Sopenharmony_ci	ret = proc_dointvec(table, write, buffer, lenp, ppos);
30888c2ecf20Sopenharmony_ci	/*
30898c2ecf20Sopenharmony_ci	 * Make sure that internally we keep jiffies.
30908c2ecf20Sopenharmony_ci	 * Also, writing zero resets the timeslice to default:
30918c2ecf20Sopenharmony_ci	 */
30928c2ecf20Sopenharmony_ci	if (!ret && write) {
30938c2ecf20Sopenharmony_ci		sched_rr_timeslice =
30948c2ecf20Sopenharmony_ci			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
30958c2ecf20Sopenharmony_ci			msecs_to_jiffies(sysctl_sched_rr_timeslice);
30968c2ecf20Sopenharmony_ci	}
30978c2ecf20Sopenharmony_ci	mutex_unlock(&mutex);
30988c2ecf20Sopenharmony_ci
30998c2ecf20Sopenharmony_ci	return ret;
31008c2ecf20Sopenharmony_ci}
31018c2ecf20Sopenharmony_ci
31028c2ecf20Sopenharmony_ci#ifdef CONFIG_SCHED_DEBUG
31038c2ecf20Sopenharmony_civoid print_rt_stats(struct seq_file *m, int cpu)
31048c2ecf20Sopenharmony_ci{
31058c2ecf20Sopenharmony_ci	rt_rq_iter_t iter;
31068c2ecf20Sopenharmony_ci	struct rt_rq *rt_rq;
31078c2ecf20Sopenharmony_ci
31088c2ecf20Sopenharmony_ci	rcu_read_lock();
31098c2ecf20Sopenharmony_ci	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
31108c2ecf20Sopenharmony_ci		print_rt_rq(m, cpu, rt_rq);
31118c2ecf20Sopenharmony_ci	rcu_read_unlock();
31128c2ecf20Sopenharmony_ci}
31138c2ecf20Sopenharmony_ci#endif /* CONFIG_SCHED_DEBUG */
3114