13d0407baSopenharmony_ci// SPDX-License-Identifier: GPL-2.0
23d0407baSopenharmony_ci/*
33d0407baSopenharmony_ci * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
43d0407baSopenharmony_ci * policies)
53d0407baSopenharmony_ci */
63d0407baSopenharmony_ci#include "sched.h"
73d0407baSopenharmony_ci
83d0407baSopenharmony_ci#include "pelt.h"
93d0407baSopenharmony_ci#include "walt.h"
103d0407baSopenharmony_ci
113d0407baSopenharmony_ciint sched_rr_timeslice = RR_TIMESLICE;
123d0407baSopenharmony_ciint sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
133d0407baSopenharmony_ci/* More than 4 hours if BW_SHIFT equals 20. */
143d0407baSopenharmony_cistatic const u64 max_rt_runtime = MAX_BW;
153d0407baSopenharmony_ci
163d0407baSopenharmony_cistatic int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
173d0407baSopenharmony_ci
183d0407baSopenharmony_cistruct rt_bandwidth def_rt_bandwidth;
193d0407baSopenharmony_ci
203d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS
213d0407baSopenharmony_ciunsigned int sysctl_sched_enable_rt_cas = 1;
223d0407baSopenharmony_ci#endif
233d0407baSopenharmony_ci
243d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RT_ACTIVE_LB
253d0407baSopenharmony_ciunsigned int sysctl_sched_enable_rt_active_lb = 1;
263d0407baSopenharmony_ci#endif
273d0407baSopenharmony_ci
283d0407baSopenharmony_cistatic enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
293d0407baSopenharmony_ci{
303d0407baSopenharmony_ci    struct rt_bandwidth *rt_b = container_of(timer, struct rt_bandwidth, rt_period_timer);
313d0407baSopenharmony_ci    int idle = 0;
323d0407baSopenharmony_ci    int overrun;
333d0407baSopenharmony_ci
343d0407baSopenharmony_ci    raw_spin_lock(&rt_b->rt_runtime_lock);
353d0407baSopenharmony_ci    for (;;) {
363d0407baSopenharmony_ci        overrun = hrtimer_forward_now(timer, rt_b->rt_period);
373d0407baSopenharmony_ci        if (!overrun) {
383d0407baSopenharmony_ci            break;
393d0407baSopenharmony_ci        }
403d0407baSopenharmony_ci
413d0407baSopenharmony_ci        raw_spin_unlock(&rt_b->rt_runtime_lock);
423d0407baSopenharmony_ci        idle = do_sched_rt_period_timer(rt_b, overrun);
433d0407baSopenharmony_ci        raw_spin_lock(&rt_b->rt_runtime_lock);
443d0407baSopenharmony_ci    }
453d0407baSopenharmony_ci    if (idle) {
463d0407baSopenharmony_ci        rt_b->rt_period_active = 0;
473d0407baSopenharmony_ci    }
483d0407baSopenharmony_ci    raw_spin_unlock(&rt_b->rt_runtime_lock);
493d0407baSopenharmony_ci
503d0407baSopenharmony_ci    return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
513d0407baSopenharmony_ci}
523d0407baSopenharmony_ci
533d0407baSopenharmony_civoid init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
543d0407baSopenharmony_ci{
553d0407baSopenharmony_ci    rt_b->rt_period = ns_to_ktime(period);
563d0407baSopenharmony_ci    rt_b->rt_runtime = runtime;
573d0407baSopenharmony_ci
583d0407baSopenharmony_ci    raw_spin_lock_init(&rt_b->rt_runtime_lock);
593d0407baSopenharmony_ci
603d0407baSopenharmony_ci    hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
613d0407baSopenharmony_ci    rt_b->rt_period_timer.function = sched_rt_period_timer;
623d0407baSopenharmony_ci}
633d0407baSopenharmony_ci
643d0407baSopenharmony_cistatic void start_rt_bandwidth(struct rt_bandwidth *rt_b)
653d0407baSopenharmony_ci{
663d0407baSopenharmony_ci    if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) {
673d0407baSopenharmony_ci        return;
683d0407baSopenharmony_ci    }
693d0407baSopenharmony_ci
703d0407baSopenharmony_ci    raw_spin_lock(&rt_b->rt_runtime_lock);
713d0407baSopenharmony_ci    if (!rt_b->rt_period_active) {
723d0407baSopenharmony_ci        rt_b->rt_period_active = 1;
733d0407baSopenharmony_ci        /*
743d0407baSopenharmony_ci         * SCHED_DEADLINE updates the bandwidth, as a run away
753d0407baSopenharmony_ci         * RT task with a DL task could hog a CPU. But DL does
763d0407baSopenharmony_ci         * not reset the period. If a deadline task was running
773d0407baSopenharmony_ci         * without an RT task running, it can cause RT tasks to
783d0407baSopenharmony_ci         * throttle when they start up. Kick the timer right away
793d0407baSopenharmony_ci         * to update the period.
803d0407baSopenharmony_ci         */
813d0407baSopenharmony_ci        hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
823d0407baSopenharmony_ci        hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED_HARD);
833d0407baSopenharmony_ci    }
843d0407baSopenharmony_ci    raw_spin_unlock(&rt_b->rt_runtime_lock);
853d0407baSopenharmony_ci}
863d0407baSopenharmony_ci
873d0407baSopenharmony_civoid init_rt_rq(struct rt_rq *rt_rq)
883d0407baSopenharmony_ci{
893d0407baSopenharmony_ci    struct rt_prio_array *array;
903d0407baSopenharmony_ci    int i;
913d0407baSopenharmony_ci
923d0407baSopenharmony_ci    array = &rt_rq->active;
933d0407baSopenharmony_ci    for (i = 0; i < MAX_RT_PRIO; i++) {
943d0407baSopenharmony_ci        INIT_LIST_HEAD(array->queue + i);
953d0407baSopenharmony_ci        __clear_bit(i, array->bitmap);
963d0407baSopenharmony_ci    }
973d0407baSopenharmony_ci    /* delimiter for bitsearch: */
983d0407baSopenharmony_ci    __set_bit(MAX_RT_PRIO, array->bitmap);
993d0407baSopenharmony_ci
1003d0407baSopenharmony_ci#if defined CONFIG_SMP
1013d0407baSopenharmony_ci    rt_rq->highest_prio.curr = MAX_RT_PRIO;
1023d0407baSopenharmony_ci    rt_rq->highest_prio.next = MAX_RT_PRIO;
1033d0407baSopenharmony_ci    rt_rq->rt_nr_migratory = 0;
1043d0407baSopenharmony_ci    rt_rq->overloaded = 0;
1053d0407baSopenharmony_ci    plist_head_init(&rt_rq->pushable_tasks);
1063d0407baSopenharmony_ci#endif /* CONFIG_SMP */
1073d0407baSopenharmony_ci    /* We start is dequeued state, because no RT tasks are queued */
1083d0407baSopenharmony_ci    rt_rq->rt_queued = 0;
1093d0407baSopenharmony_ci
1103d0407baSopenharmony_ci    rt_rq->rt_time = 0;
1113d0407baSopenharmony_ci    rt_rq->rt_throttled = 0;
1123d0407baSopenharmony_ci    rt_rq->rt_runtime = 0;
1133d0407baSopenharmony_ci    raw_spin_lock_init(&rt_rq->rt_runtime_lock);
1143d0407baSopenharmony_ci}
1153d0407baSopenharmony_ci
1163d0407baSopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
1173d0407baSopenharmony_cistatic void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
1183d0407baSopenharmony_ci{
1193d0407baSopenharmony_ci    hrtimer_cancel(&rt_b->rt_period_timer);
1203d0407baSopenharmony_ci}
1213d0407baSopenharmony_ci
1223d0407baSopenharmony_ci#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
1233d0407baSopenharmony_ci
1243d0407baSopenharmony_cistatic inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
1253d0407baSopenharmony_ci{
1263d0407baSopenharmony_ci#ifdef CONFIG_SCHED_DEBUG
1273d0407baSopenharmony_ci    WARN_ON_ONCE(!rt_entity_is_task(rt_se));
1283d0407baSopenharmony_ci#endif
1293d0407baSopenharmony_ci    return container_of(rt_se, struct task_struct, rt);
1303d0407baSopenharmony_ci}
1313d0407baSopenharmony_ci
1323d0407baSopenharmony_cistatic inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
1333d0407baSopenharmony_ci{
1343d0407baSopenharmony_ci    return rt_rq->rq;
1353d0407baSopenharmony_ci}
1363d0407baSopenharmony_ci
1373d0407baSopenharmony_cistatic inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
1383d0407baSopenharmony_ci{
1393d0407baSopenharmony_ci    return rt_se->rt_rq;
1403d0407baSopenharmony_ci}
1413d0407baSopenharmony_ci
1423d0407baSopenharmony_cistatic inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
1433d0407baSopenharmony_ci{
1443d0407baSopenharmony_ci    struct rt_rq *rt_rq = rt_se->rt_rq;
1453d0407baSopenharmony_ci
1463d0407baSopenharmony_ci    return rt_rq->rq;
1473d0407baSopenharmony_ci}
1483d0407baSopenharmony_ci
1493d0407baSopenharmony_civoid free_rt_sched_group(struct task_group *tg)
1503d0407baSopenharmony_ci{
1513d0407baSopenharmony_ci    int i;
1523d0407baSopenharmony_ci
1533d0407baSopenharmony_ci    if (tg->rt_se) {
1543d0407baSopenharmony_ci        destroy_rt_bandwidth(&tg->rt_bandwidth);
1553d0407baSopenharmony_ci    }
1563d0407baSopenharmony_ci
1573d0407baSopenharmony_ci    for_each_possible_cpu(i)
1583d0407baSopenharmony_ci    {
1593d0407baSopenharmony_ci        if (tg->rt_rq) {
1603d0407baSopenharmony_ci            kfree(tg->rt_rq[i]);
1613d0407baSopenharmony_ci        }
1623d0407baSopenharmony_ci        if (tg->rt_se) {
1633d0407baSopenharmony_ci            kfree(tg->rt_se[i]);
1643d0407baSopenharmony_ci        }
1653d0407baSopenharmony_ci    }
1663d0407baSopenharmony_ci
1673d0407baSopenharmony_ci    kfree(tg->rt_rq);
1683d0407baSopenharmony_ci    kfree(tg->rt_se);
1693d0407baSopenharmony_ci}
1703d0407baSopenharmony_ci
1713d0407baSopenharmony_civoid init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu,
1723d0407baSopenharmony_ci                      struct sched_rt_entity *parent)
1733d0407baSopenharmony_ci{
1743d0407baSopenharmony_ci    struct rq *rq = cpu_rq(cpu);
1753d0407baSopenharmony_ci
1763d0407baSopenharmony_ci    rt_rq->highest_prio.curr = MAX_RT_PRIO;
1773d0407baSopenharmony_ci    rt_rq->rt_nr_boosted = 0;
1783d0407baSopenharmony_ci    rt_rq->rq = rq;
1793d0407baSopenharmony_ci    rt_rq->tg = tg;
1803d0407baSopenharmony_ci
1813d0407baSopenharmony_ci    tg->rt_rq[cpu] = rt_rq;
1823d0407baSopenharmony_ci    tg->rt_se[cpu] = rt_se;
1833d0407baSopenharmony_ci
1843d0407baSopenharmony_ci    if (!rt_se) {
1853d0407baSopenharmony_ci        return;
1863d0407baSopenharmony_ci    }
1873d0407baSopenharmony_ci
1883d0407baSopenharmony_ci    if (!parent) {
1893d0407baSopenharmony_ci        rt_se->rt_rq = &rq->rt;
1903d0407baSopenharmony_ci    } else {
1913d0407baSopenharmony_ci        rt_se->rt_rq = parent->my_q;
1923d0407baSopenharmony_ci    }
1933d0407baSopenharmony_ci
1943d0407baSopenharmony_ci    rt_se->my_q = rt_rq;
1953d0407baSopenharmony_ci    rt_se->parent = parent;
1963d0407baSopenharmony_ci    INIT_LIST_HEAD(&rt_se->run_list);
1973d0407baSopenharmony_ci}
1983d0407baSopenharmony_ci
1993d0407baSopenharmony_ciint alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
2003d0407baSopenharmony_ci{
2013d0407baSopenharmony_ci    struct rt_rq *rt_rq;
2023d0407baSopenharmony_ci    struct sched_rt_entity *rt_se;
2033d0407baSopenharmony_ci    int i;
2043d0407baSopenharmony_ci
2053d0407baSopenharmony_ci    tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
2063d0407baSopenharmony_ci    if (!tg->rt_rq) {
2073d0407baSopenharmony_ci        goto err;
2083d0407baSopenharmony_ci    }
2093d0407baSopenharmony_ci    tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
2103d0407baSopenharmony_ci    if (!tg->rt_se) {
2113d0407baSopenharmony_ci        goto err;
2123d0407baSopenharmony_ci    }
2133d0407baSopenharmony_ci
2143d0407baSopenharmony_ci    init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(def_rt_bandwidth.rt_period), 0);
2153d0407baSopenharmony_ci
2163d0407baSopenharmony_ci    for_each_possible_cpu(i)
2173d0407baSopenharmony_ci    {
2183d0407baSopenharmony_ci        rt_rq = kzalloc_node(sizeof(struct rt_rq), GFP_KERNEL, cpu_to_node(i));
2193d0407baSopenharmony_ci        if (!rt_rq) {
2203d0407baSopenharmony_ci            goto err;
2213d0407baSopenharmony_ci        }
2223d0407baSopenharmony_ci
2233d0407baSopenharmony_ci        rt_se = kzalloc_node(sizeof(struct sched_rt_entity), GFP_KERNEL, cpu_to_node(i));
2243d0407baSopenharmony_ci        if (!rt_se) {
2253d0407baSopenharmony_ci            goto err_free_rq;
2263d0407baSopenharmony_ci        }
2273d0407baSopenharmony_ci
2283d0407baSopenharmony_ci        init_rt_rq(rt_rq);
2293d0407baSopenharmony_ci        rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
2303d0407baSopenharmony_ci        init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
2313d0407baSopenharmony_ci    }
2323d0407baSopenharmony_ci
2333d0407baSopenharmony_ci    return 1;
2343d0407baSopenharmony_ci
2353d0407baSopenharmony_cierr_free_rq:
2363d0407baSopenharmony_ci    kfree(rt_rq);
2373d0407baSopenharmony_cierr:
2383d0407baSopenharmony_ci    return 0;
2393d0407baSopenharmony_ci}
2403d0407baSopenharmony_ci
2413d0407baSopenharmony_ci#else /* CONFIG_RT_GROUP_SCHED */
2423d0407baSopenharmony_ci
2433d0407baSopenharmony_ci#define rt_entity_is_task(rt_se) (1)
2443d0407baSopenharmony_ci
2453d0407baSopenharmony_cistatic inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
2463d0407baSopenharmony_ci{
2473d0407baSopenharmony_ci    return container_of(rt_se, struct task_struct, rt);
2483d0407baSopenharmony_ci}
2493d0407baSopenharmony_ci
2503d0407baSopenharmony_cistatic inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
2513d0407baSopenharmony_ci{
2523d0407baSopenharmony_ci    return container_of(rt_rq, struct rq, rt);
2533d0407baSopenharmony_ci}
2543d0407baSopenharmony_ci
2553d0407baSopenharmony_cistatic inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
2563d0407baSopenharmony_ci{
2573d0407baSopenharmony_ci    struct task_struct *p = rt_task_of(rt_se);
2583d0407baSopenharmony_ci
2593d0407baSopenharmony_ci    return task_rq(p);
2603d0407baSopenharmony_ci}
2613d0407baSopenharmony_ci
2623d0407baSopenharmony_cistatic inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
2633d0407baSopenharmony_ci{
2643d0407baSopenharmony_ci    struct rq *rq = rq_of_rt_se(rt_se);
2653d0407baSopenharmony_ci
2663d0407baSopenharmony_ci    return &rq->rt;
2673d0407baSopenharmony_ci}
2683d0407baSopenharmony_ci
2693d0407baSopenharmony_civoid free_rt_sched_group(struct task_group *tg)
2703d0407baSopenharmony_ci{
2713d0407baSopenharmony_ci}
2723d0407baSopenharmony_ci
2733d0407baSopenharmony_ciint alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
2743d0407baSopenharmony_ci{
2753d0407baSopenharmony_ci    return 1;
2763d0407baSopenharmony_ci}
2773d0407baSopenharmony_ci#endif /* CONFIG_RT_GROUP_SCHED */
2783d0407baSopenharmony_ci
2793d0407baSopenharmony_ci#ifdef CONFIG_SMP
2803d0407baSopenharmony_ci
2813d0407baSopenharmony_cistatic void pull_rt_task(struct rq *this_rq);
2823d0407baSopenharmony_ci
2833d0407baSopenharmony_cistatic inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
2843d0407baSopenharmony_ci{
2853d0407baSopenharmony_ci    /*
2863d0407baSopenharmony_ci     * Try to pull RT tasks here if we lower this rq's prio and cpu is not
2873d0407baSopenharmony_ci     * isolated
2883d0407baSopenharmony_ci     */
2893d0407baSopenharmony_ci    return rq->rt.highest_prio.curr > prev->prio && !cpu_isolated(cpu_of(rq));
2903d0407baSopenharmony_ci}
2913d0407baSopenharmony_ci
2923d0407baSopenharmony_cistatic inline int rt_overloaded(struct rq *rq)
2933d0407baSopenharmony_ci{
2943d0407baSopenharmony_ci    return atomic_read(&rq->rd->rto_count);
2953d0407baSopenharmony_ci}
2963d0407baSopenharmony_ci
2973d0407baSopenharmony_cistatic inline void rt_set_overload(struct rq *rq)
2983d0407baSopenharmony_ci{
2993d0407baSopenharmony_ci    if (!rq->online) {
3003d0407baSopenharmony_ci        return;
3013d0407baSopenharmony_ci    }
3023d0407baSopenharmony_ci
3033d0407baSopenharmony_ci    cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
3043d0407baSopenharmony_ci    /*
3053d0407baSopenharmony_ci     * Make sure the mask is visible before we set
3063d0407baSopenharmony_ci     * the overload count. That is checked to determine
3073d0407baSopenharmony_ci     * if we should look at the mask. It would be a shame
3083d0407baSopenharmony_ci     * if we looked at the mask, but the mask was not
3093d0407baSopenharmony_ci     * updated yet.
3103d0407baSopenharmony_ci     *
3113d0407baSopenharmony_ci     * Matched by the barrier in pull_rt_task().
3123d0407baSopenharmony_ci     */
3133d0407baSopenharmony_ci    smp_wmb();
3143d0407baSopenharmony_ci    atomic_inc(&rq->rd->rto_count);
3153d0407baSopenharmony_ci}
3163d0407baSopenharmony_ci
3173d0407baSopenharmony_cistatic inline void rt_clear_overload(struct rq *rq)
3183d0407baSopenharmony_ci{
3193d0407baSopenharmony_ci    if (!rq->online) {
3203d0407baSopenharmony_ci        return;
3213d0407baSopenharmony_ci    }
3223d0407baSopenharmony_ci
3233d0407baSopenharmony_ci    /* the order here really doesn't matter */
3243d0407baSopenharmony_ci    atomic_dec(&rq->rd->rto_count);
3253d0407baSopenharmony_ci    cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
3263d0407baSopenharmony_ci}
3273d0407baSopenharmony_ci
3283d0407baSopenharmony_cistatic void update_rt_migration(struct rt_rq *rt_rq)
3293d0407baSopenharmony_ci{
3303d0407baSopenharmony_ci    if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
3313d0407baSopenharmony_ci        if (!rt_rq->overloaded) {
3323d0407baSopenharmony_ci            rt_set_overload(rq_of_rt_rq(rt_rq));
3333d0407baSopenharmony_ci            rt_rq->overloaded = 1;
3343d0407baSopenharmony_ci        }
3353d0407baSopenharmony_ci    } else if (rt_rq->overloaded) {
3363d0407baSopenharmony_ci        rt_clear_overload(rq_of_rt_rq(rt_rq));
3373d0407baSopenharmony_ci        rt_rq->overloaded = 0;
3383d0407baSopenharmony_ci    }
3393d0407baSopenharmony_ci}
3403d0407baSopenharmony_ci
3413d0407baSopenharmony_cistatic void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
3423d0407baSopenharmony_ci{
3433d0407baSopenharmony_ci    struct task_struct *p;
3443d0407baSopenharmony_ci
3453d0407baSopenharmony_ci    if (!rt_entity_is_task(rt_se)) {
3463d0407baSopenharmony_ci        return;
3473d0407baSopenharmony_ci    }
3483d0407baSopenharmony_ci
3493d0407baSopenharmony_ci    p = rt_task_of(rt_se);
3503d0407baSopenharmony_ci    rt_rq = &rq_of_rt_rq(rt_rq)->rt;
3513d0407baSopenharmony_ci
3523d0407baSopenharmony_ci    rt_rq->rt_nr_total++;
3533d0407baSopenharmony_ci    if (p->nr_cpus_allowed > 1) {
3543d0407baSopenharmony_ci        rt_rq->rt_nr_migratory++;
3553d0407baSopenharmony_ci    }
3563d0407baSopenharmony_ci
3573d0407baSopenharmony_ci    update_rt_migration(rt_rq);
3583d0407baSopenharmony_ci}
3593d0407baSopenharmony_ci
3603d0407baSopenharmony_cistatic void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
3613d0407baSopenharmony_ci{
3623d0407baSopenharmony_ci    struct task_struct *p;
3633d0407baSopenharmony_ci
3643d0407baSopenharmony_ci    if (!rt_entity_is_task(rt_se)) {
3653d0407baSopenharmony_ci        return;
3663d0407baSopenharmony_ci    }
3673d0407baSopenharmony_ci
3683d0407baSopenharmony_ci    p = rt_task_of(rt_se);
3693d0407baSopenharmony_ci    rt_rq = &rq_of_rt_rq(rt_rq)->rt;
3703d0407baSopenharmony_ci
3713d0407baSopenharmony_ci    rt_rq->rt_nr_total--;
3723d0407baSopenharmony_ci    if (p->nr_cpus_allowed > 1) {
3733d0407baSopenharmony_ci        rt_rq->rt_nr_migratory--;
3743d0407baSopenharmony_ci    }
3753d0407baSopenharmony_ci
3763d0407baSopenharmony_ci    update_rt_migration(rt_rq);
3773d0407baSopenharmony_ci}
3783d0407baSopenharmony_ci
3793d0407baSopenharmony_cistatic inline int has_pushable_tasks(struct rq *rq)
3803d0407baSopenharmony_ci{
3813d0407baSopenharmony_ci    return !plist_head_empty(&rq->rt.pushable_tasks);
3823d0407baSopenharmony_ci}
3833d0407baSopenharmony_ci
3843d0407baSopenharmony_cistatic DEFINE_PER_CPU(struct callback_head, rt_push_head);
3853d0407baSopenharmony_cistatic DEFINE_PER_CPU(struct callback_head, rt_pull_head);
3863d0407baSopenharmony_ci
3873d0407baSopenharmony_cistatic void push_rt_tasks(struct rq *);
3883d0407baSopenharmony_cistatic void pull_rt_task(struct rq *);
3893d0407baSopenharmony_ci
3903d0407baSopenharmony_cistatic inline void rt_queue_push_tasks(struct rq *rq)
3913d0407baSopenharmony_ci{
3923d0407baSopenharmony_ci    if (!has_pushable_tasks(rq)) {
3933d0407baSopenharmony_ci        return;
3943d0407baSopenharmony_ci    }
3953d0407baSopenharmony_ci
3963d0407baSopenharmony_ci    queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
3973d0407baSopenharmony_ci}
3983d0407baSopenharmony_ci
3993d0407baSopenharmony_cistatic inline void rt_queue_pull_task(struct rq *rq)
4003d0407baSopenharmony_ci{
4013d0407baSopenharmony_ci    queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
4023d0407baSopenharmony_ci}
4033d0407baSopenharmony_ci
4043d0407baSopenharmony_cistatic void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
4053d0407baSopenharmony_ci{
4063d0407baSopenharmony_ci    plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
4073d0407baSopenharmony_ci    plist_node_init(&p->pushable_tasks, p->prio);
4083d0407baSopenharmony_ci    plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
4093d0407baSopenharmony_ci
4103d0407baSopenharmony_ci    /* Update the highest prio pushable task */
4113d0407baSopenharmony_ci    if (p->prio < rq->rt.highest_prio.next) {
4123d0407baSopenharmony_ci        rq->rt.highest_prio.next = p->prio;
4133d0407baSopenharmony_ci    }
4143d0407baSopenharmony_ci}
4153d0407baSopenharmony_ci
4163d0407baSopenharmony_cistatic void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
4173d0407baSopenharmony_ci{
4183d0407baSopenharmony_ci    plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
4193d0407baSopenharmony_ci
4203d0407baSopenharmony_ci    /* Update the new highest prio pushable task */
4213d0407baSopenharmony_ci    if (has_pushable_tasks(rq)) {
4223d0407baSopenharmony_ci        p = plist_first_entry(&rq->rt.pushable_tasks, struct task_struct, pushable_tasks);
4233d0407baSopenharmony_ci        rq->rt.highest_prio.next = p->prio;
4243d0407baSopenharmony_ci    } else {
4253d0407baSopenharmony_ci        rq->rt.highest_prio.next = MAX_RT_PRIO;
4263d0407baSopenharmony_ci    }
4273d0407baSopenharmony_ci}
4283d0407baSopenharmony_ci
4293d0407baSopenharmony_ci#else
4303d0407baSopenharmony_ci
4313d0407baSopenharmony_cistatic inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
4323d0407baSopenharmony_ci{
4333d0407baSopenharmony_ci}
4343d0407baSopenharmony_ci
4353d0407baSopenharmony_cistatic inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
4363d0407baSopenharmony_ci{
4373d0407baSopenharmony_ci}
4383d0407baSopenharmony_ci
4393d0407baSopenharmony_cistatic inline void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
4403d0407baSopenharmony_ci{
4413d0407baSopenharmony_ci}
4423d0407baSopenharmony_ci
4433d0407baSopenharmony_cistatic inline void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
4443d0407baSopenharmony_ci{
4453d0407baSopenharmony_ci}
4463d0407baSopenharmony_ci
4473d0407baSopenharmony_cistatic inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
4483d0407baSopenharmony_ci{
4493d0407baSopenharmony_ci    return false;
4503d0407baSopenharmony_ci}
4513d0407baSopenharmony_ci
4523d0407baSopenharmony_cistatic inline void pull_rt_task(struct rq *this_rq)
4533d0407baSopenharmony_ci{
4543d0407baSopenharmony_ci}
4553d0407baSopenharmony_ci
4563d0407baSopenharmony_cistatic inline void rt_queue_push_tasks(struct rq *rq)
4573d0407baSopenharmony_ci{
4583d0407baSopenharmony_ci}
4593d0407baSopenharmony_ci#endif /* CONFIG_SMP */
4603d0407baSopenharmony_ci
4613d0407baSopenharmony_cistatic void enqueue_top_rt_rq(struct rt_rq *rt_rq);
4623d0407baSopenharmony_cistatic void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
4633d0407baSopenharmony_ci
4643d0407baSopenharmony_cistatic inline int on_rt_rq(struct sched_rt_entity *rt_se)
4653d0407baSopenharmony_ci{
4663d0407baSopenharmony_ci    return rt_se->on_rq;
4673d0407baSopenharmony_ci}
4683d0407baSopenharmony_ci
4693d0407baSopenharmony_ci#ifdef CONFIG_UCLAMP_TASK
4703d0407baSopenharmony_ci/*
4713d0407baSopenharmony_ci * Verify the fitness of task @p to run on @cpu taking into account the uclamp
4723d0407baSopenharmony_ci * settings.
4733d0407baSopenharmony_ci *
4743d0407baSopenharmony_ci * This check is only important for heterogeneous systems where uclamp_min value
4753d0407baSopenharmony_ci * is higher than the capacity of a @cpu. For non-heterogeneous system this
4763d0407baSopenharmony_ci * function will always return true.
4773d0407baSopenharmony_ci *
4783d0407baSopenharmony_ci * The function will return true if the capacity of the @cpu is >= the
4793d0407baSopenharmony_ci * uclamp_min and false otherwise.
4803d0407baSopenharmony_ci *
4813d0407baSopenharmony_ci * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
4823d0407baSopenharmony_ci * > uclamp_max.
4833d0407baSopenharmony_ci */
4843d0407baSopenharmony_cistatic inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
4853d0407baSopenharmony_ci{
4863d0407baSopenharmony_ci    unsigned int min_cap;
4873d0407baSopenharmony_ci    unsigned int max_cap;
4883d0407baSopenharmony_ci    unsigned int cpu_cap;
4893d0407baSopenharmony_ci
4903d0407baSopenharmony_ci    /* Only heterogeneous systems can benefit from this check */
4913d0407baSopenharmony_ci    if (!static_branch_unlikely(&sched_asym_cpucapacity)) {
4923d0407baSopenharmony_ci        return true;
4933d0407baSopenharmony_ci    }
4943d0407baSopenharmony_ci
4953d0407baSopenharmony_ci    min_cap = uclamp_eff_value(p, UCLAMP_MIN);
4963d0407baSopenharmony_ci    max_cap = uclamp_eff_value(p, UCLAMP_MAX);
4973d0407baSopenharmony_ci
4983d0407baSopenharmony_ci    cpu_cap = capacity_orig_of(cpu);
4993d0407baSopenharmony_ci
5003d0407baSopenharmony_ci    return cpu_cap >= min(min_cap, max_cap);
5013d0407baSopenharmony_ci}
5023d0407baSopenharmony_ci#else
5033d0407baSopenharmony_cistatic inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
5043d0407baSopenharmony_ci{
5053d0407baSopenharmony_ci    return true;
5063d0407baSopenharmony_ci}
5073d0407baSopenharmony_ci#endif
5083d0407baSopenharmony_ci
5093d0407baSopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
5103d0407baSopenharmony_ci
5113d0407baSopenharmony_cistatic inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
5123d0407baSopenharmony_ci{
5133d0407baSopenharmony_ci    if (!rt_rq->tg) {
5143d0407baSopenharmony_ci        return RUNTIME_INF;
5153d0407baSopenharmony_ci    }
5163d0407baSopenharmony_ci
5173d0407baSopenharmony_ci    return rt_rq->rt_runtime;
5183d0407baSopenharmony_ci}
5193d0407baSopenharmony_ci
5203d0407baSopenharmony_cistatic inline u64 sched_rt_period(struct rt_rq *rt_rq)
5213d0407baSopenharmony_ci{
5223d0407baSopenharmony_ci    return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
5233d0407baSopenharmony_ci}
5243d0407baSopenharmony_ci
5253d0407baSopenharmony_citypedef struct task_group *rt_rq_iter_t;
5263d0407baSopenharmony_ci
5273d0407baSopenharmony_cistatic inline struct task_group *next_task_group(struct task_group *tg)
5283d0407baSopenharmony_ci{
5293d0407baSopenharmony_ci    do {
5303d0407baSopenharmony_ci        tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
5313d0407baSopenharmony_ci    } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
5323d0407baSopenharmony_ci
5333d0407baSopenharmony_ci    if (&tg->list == &task_groups) {
5343d0407baSopenharmony_ci        tg = NULL;
5353d0407baSopenharmony_ci    }
5363d0407baSopenharmony_ci
5373d0407baSopenharmony_ci    return tg;
5383d0407baSopenharmony_ci}
5393d0407baSopenharmony_ci
5403d0407baSopenharmony_ci#define cycle_each_rt_rq(rt_rq, iter, rq)
5413d0407baSopenharmony_ci    do {                                                                                    \
5423d0407baSopenharmony_ci        for (iter = container_of(&task_groups, typeof(*iter), list);                        \
5433d0407baSopenharmony_ci             (iter = next_task_group(iter)) && (rt_rq = iter->rt_rq[cpu_of(rq)]);)          \
5443d0407baSopenharmony_ci    } while (0)
5453d0407baSopenharmony_ci
5463d0407baSopenharmony_ci#define cycle_each_sched_rt_entity(rt_se) for (; rt_se; rt_se = rt_se->parent)
5473d0407baSopenharmony_ci
5483d0407baSopenharmony_cistatic inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
5493d0407baSopenharmony_ci{
5503d0407baSopenharmony_ci    return rt_se->my_q;
5513d0407baSopenharmony_ci}
5523d0407baSopenharmony_ci
5533d0407baSopenharmony_cistatic void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
5543d0407baSopenharmony_cistatic void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
5553d0407baSopenharmony_ci
5563d0407baSopenharmony_cistatic void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
5573d0407baSopenharmony_ci{
5583d0407baSopenharmony_ci    struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
5593d0407baSopenharmony_ci    struct rq *rq = rq_of_rt_rq(rt_rq);
5603d0407baSopenharmony_ci    struct sched_rt_entity *rt_se;
5613d0407baSopenharmony_ci
5623d0407baSopenharmony_ci    int cpu = cpu_of(rq);
5633d0407baSopenharmony_ci
5643d0407baSopenharmony_ci    rt_se = rt_rq->tg->rt_se[cpu];
5653d0407baSopenharmony_ci
5663d0407baSopenharmony_ci    if (rt_rq->rt_nr_running) {
5673d0407baSopenharmony_ci        if (!rt_se) {
5683d0407baSopenharmony_ci            enqueue_top_rt_rq(rt_rq);
5693d0407baSopenharmony_ci        } else if (!on_rt_rq(rt_se)) {
5703d0407baSopenharmony_ci            enqueue_rt_entity(rt_se, 0);
5713d0407baSopenharmony_ci        }
5723d0407baSopenharmony_ci
5733d0407baSopenharmony_ci        if (rt_rq->highest_prio.curr < curr->prio) {
5743d0407baSopenharmony_ci            resched_curr(rq);
5753d0407baSopenharmony_ci        }
5763d0407baSopenharmony_ci    }
5773d0407baSopenharmony_ci}
5783d0407baSopenharmony_ci
5793d0407baSopenharmony_cistatic void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
5803d0407baSopenharmony_ci{
5813d0407baSopenharmony_ci    struct sched_rt_entity *rt_se;
5823d0407baSopenharmony_ci    int cpu = cpu_of(rq_of_rt_rq(rt_rq));
5833d0407baSopenharmony_ci
5843d0407baSopenharmony_ci    rt_se = rt_rq->tg->rt_se[cpu];
5853d0407baSopenharmony_ci
5863d0407baSopenharmony_ci    if (!rt_se) {
5873d0407baSopenharmony_ci        dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
5883d0407baSopenharmony_ci        /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
5893d0407baSopenharmony_ci        cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
5903d0407baSopenharmony_ci    } else if (on_rt_rq(rt_se)) {
5913d0407baSopenharmony_ci        dequeue_rt_entity(rt_se, 0);
5923d0407baSopenharmony_ci    }
5933d0407baSopenharmony_ci}
5943d0407baSopenharmony_ci
5953d0407baSopenharmony_cistatic inline int rt_rq_throttled(struct rt_rq *rt_rq)
5963d0407baSopenharmony_ci{
5973d0407baSopenharmony_ci    return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
5983d0407baSopenharmony_ci}
5993d0407baSopenharmony_ci
6003d0407baSopenharmony_cistatic int rt_se_boosted(struct sched_rt_entity *rt_se)
6013d0407baSopenharmony_ci{
6023d0407baSopenharmony_ci    struct rt_rq *rt_rq = group_rt_rq(rt_se);
6033d0407baSopenharmony_ci    struct task_struct *p;
6043d0407baSopenharmony_ci
6053d0407baSopenharmony_ci    if (rt_rq) {
6063d0407baSopenharmony_ci        return !!rt_rq->rt_nr_boosted;
6073d0407baSopenharmony_ci    }
6083d0407baSopenharmony_ci
6093d0407baSopenharmony_ci    p = rt_task_of(rt_se);
6103d0407baSopenharmony_ci    return p->prio != p->normal_prio;
6113d0407baSopenharmony_ci}
6123d0407baSopenharmony_ci
6133d0407baSopenharmony_ci#ifdef CONFIG_SMP
6143d0407baSopenharmony_cistatic inline const struct cpumask *sched_rt_period_mask(void)
6153d0407baSopenharmony_ci{
6163d0407baSopenharmony_ci    return this_rq()->rd->span;
6173d0407baSopenharmony_ci}
6183d0407baSopenharmony_ci#else
6193d0407baSopenharmony_cistatic inline const struct cpumask *sched_rt_period_mask(void)
6203d0407baSopenharmony_ci{
6213d0407baSopenharmony_ci    return cpu_online_mask;
6223d0407baSopenharmony_ci}
6233d0407baSopenharmony_ci#endif
6243d0407baSopenharmony_ci
6253d0407baSopenharmony_cistatic inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6263d0407baSopenharmony_ci{
6273d0407baSopenharmony_ci    return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
6283d0407baSopenharmony_ci}
6293d0407baSopenharmony_ci
6303d0407baSopenharmony_cistatic inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
6313d0407baSopenharmony_ci{
6323d0407baSopenharmony_ci    return &rt_rq->tg->rt_bandwidth;
6333d0407baSopenharmony_ci}
6343d0407baSopenharmony_ci
6353d0407baSopenharmony_ci#else /* !CONFIG_RT_GROUP_SCHED */
6363d0407baSopenharmony_ci
6373d0407baSopenharmony_cistatic inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6383d0407baSopenharmony_ci{
6393d0407baSopenharmony_ci    return rt_rq->rt_runtime;
6403d0407baSopenharmony_ci}
6413d0407baSopenharmony_ci
6423d0407baSopenharmony_cistatic inline u64 sched_rt_period(struct rt_rq *rt_rq)
6433d0407baSopenharmony_ci{
6443d0407baSopenharmony_ci    return ktime_to_ns(def_rt_bandwidth.rt_period);
6453d0407baSopenharmony_ci}
6463d0407baSopenharmony_ci
6473d0407baSopenharmony_citypedef struct rt_rq *rt_rq_iter_t;
6483d0407baSopenharmony_ci
6493d0407baSopenharmony_ci#define cycle_each_rt_rq(rt_rq, iter, rq) for ((void)(iter), (rt_rq) = &(rq)->rt; (rt_rq); (rt_rq) = NULL)
6503d0407baSopenharmony_ci
6513d0407baSopenharmony_ci#define cycle_each_sched_rt_entity(rt_se) for (; rt_se; rt_se = NULL)
6523d0407baSopenharmony_ci
6533d0407baSopenharmony_cistatic inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
6543d0407baSopenharmony_ci{
6553d0407baSopenharmony_ci    return NULL;
6563d0407baSopenharmony_ci}
6573d0407baSopenharmony_ci
6583d0407baSopenharmony_cistatic inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6593d0407baSopenharmony_ci{
6603d0407baSopenharmony_ci    struct rq *rq = rq_of_rt_rq(rt_rq);
6613d0407baSopenharmony_ci
6623d0407baSopenharmony_ci    if (!rt_rq->rt_nr_running) {
6633d0407baSopenharmony_ci        return;
6643d0407baSopenharmony_ci    }
6653d0407baSopenharmony_ci
6663d0407baSopenharmony_ci    enqueue_top_rt_rq(rt_rq);
6673d0407baSopenharmony_ci    resched_curr(rq);
6683d0407baSopenharmony_ci}
6693d0407baSopenharmony_ci
6703d0407baSopenharmony_cistatic inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6713d0407baSopenharmony_ci{
6723d0407baSopenharmony_ci        dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
6733d0407baSopenharmony_ci}
6743d0407baSopenharmony_ci
6753d0407baSopenharmony_cistatic inline int rt_rq_throttled(struct rt_rq *rt_rq)
6763d0407baSopenharmony_ci{
6773d0407baSopenharmony_ci    return rt_rq->rt_throttled;
6783d0407baSopenharmony_ci}
6793d0407baSopenharmony_ci
6803d0407baSopenharmony_cistatic inline const struct cpumask *sched_rt_period_mask(void)
6813d0407baSopenharmony_ci{
6823d0407baSopenharmony_ci    return cpu_online_mask;
6833d0407baSopenharmony_ci}
6843d0407baSopenharmony_ci
6853d0407baSopenharmony_cistatic inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6863d0407baSopenharmony_ci{
6873d0407baSopenharmony_ci    return &cpu_rq(cpu)->rt;
6883d0407baSopenharmony_ci}
6893d0407baSopenharmony_ci
6903d0407baSopenharmony_cistatic inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
6913d0407baSopenharmony_ci{
6923d0407baSopenharmony_ci    return &def_rt_bandwidth;
6933d0407baSopenharmony_ci}
6943d0407baSopenharmony_ci
6953d0407baSopenharmony_ci#endif /* CONFIG_RT_GROUP_SCHED */
6963d0407baSopenharmony_ci
6973d0407baSopenharmony_cibool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
6983d0407baSopenharmony_ci{
6993d0407baSopenharmony_ci    struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
7003d0407baSopenharmony_ci
7013d0407baSopenharmony_ci    return (hrtimer_active(&rt_b->rt_period_timer) || rt_rq->rt_time < rt_b->rt_runtime);
7023d0407baSopenharmony_ci}
7033d0407baSopenharmony_ci
7043d0407baSopenharmony_ci#ifdef CONFIG_SMP
7053d0407baSopenharmony_ci/*
7063d0407baSopenharmony_ci * We ran out of runtime, see if we can borrow some from our neighbours.
7073d0407baSopenharmony_ci */
7083d0407baSopenharmony_cistatic void do_balance_runtime(struct rt_rq *rt_rq)
7093d0407baSopenharmony_ci{
7103d0407baSopenharmony_ci    struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
7113d0407baSopenharmony_ci    struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
7123d0407baSopenharmony_ci    int i, weight;
7133d0407baSopenharmony_ci    u64 rt_period;
7143d0407baSopenharmony_ci
7153d0407baSopenharmony_ci    weight = cpumask_weight(rd->span);
7163d0407baSopenharmony_ci
7173d0407baSopenharmony_ci    raw_spin_lock(&rt_b->rt_runtime_lock);
7183d0407baSopenharmony_ci    rt_period = ktime_to_ns(rt_b->rt_period);
7193d0407baSopenharmony_ci    for_each_cpu(i, rd->span)
7203d0407baSopenharmony_ci    {
7213d0407baSopenharmony_ci        struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
7223d0407baSopenharmony_ci        s64 diff;
7233d0407baSopenharmony_ci
7243d0407baSopenharmony_ci        if (iter == rt_rq) {
7253d0407baSopenharmony_ci            continue;
7263d0407baSopenharmony_ci        }
7273d0407baSopenharmony_ci
7283d0407baSopenharmony_ci        raw_spin_lock(&iter->rt_runtime_lock);
7293d0407baSopenharmony_ci        /*
7303d0407baSopenharmony_ci         * Either all rqs have inf runtime and there's nothing to steal
7313d0407baSopenharmony_ci         * or __disable_runtime() below sets a specific rq to inf to
7323d0407baSopenharmony_ci         * indicate its been disabled and disalow stealing.
7333d0407baSopenharmony_ci         */
7343d0407baSopenharmony_ci        if (iter->rt_runtime == RUNTIME_INF) {
7353d0407baSopenharmony_ci            goto next;
7363d0407baSopenharmony_ci        }
7373d0407baSopenharmony_ci
7383d0407baSopenharmony_ci        /*
7393d0407baSopenharmony_ci         * From runqueues with spare time, take 1/n part of their
7403d0407baSopenharmony_ci         * spare time, but no more than our period.
7413d0407baSopenharmony_ci         */
7423d0407baSopenharmony_ci        diff = iter->rt_runtime - iter->rt_time;
7433d0407baSopenharmony_ci        if (diff > 0) {
7443d0407baSopenharmony_ci            diff = div_u64((u64)diff, weight);
7453d0407baSopenharmony_ci            if (rt_rq->rt_runtime + diff > rt_period) {
7463d0407baSopenharmony_ci                diff = rt_period - rt_rq->rt_runtime;
7473d0407baSopenharmony_ci            }
7483d0407baSopenharmony_ci            iter->rt_runtime -= diff;
7493d0407baSopenharmony_ci            rt_rq->rt_runtime += diff;
7503d0407baSopenharmony_ci            if (rt_rq->rt_runtime == rt_period) {
7513d0407baSopenharmony_ci                raw_spin_unlock(&iter->rt_runtime_lock);
7523d0407baSopenharmony_ci                break;
7533d0407baSopenharmony_ci            }
7543d0407baSopenharmony_ci        }
7553d0407baSopenharmony_ci    next:
7563d0407baSopenharmony_ci        raw_spin_unlock(&iter->rt_runtime_lock);
7573d0407baSopenharmony_ci    }
7583d0407baSopenharmony_ci    raw_spin_unlock(&rt_b->rt_runtime_lock);
7593d0407baSopenharmony_ci}
7603d0407baSopenharmony_ci
7613d0407baSopenharmony_ci/*
7623d0407baSopenharmony_ci * Ensure this RQ takes back all the runtime it lend to its neighbours.
7633d0407baSopenharmony_ci */
7643d0407baSopenharmony_cistatic void __disable_runtime(struct rq *rq)
7653d0407baSopenharmony_ci{
7663d0407baSopenharmony_ci    struct root_domain *rd = rq->rd;
7673d0407baSopenharmony_ci    rt_rq_iter_t iter;
7683d0407baSopenharmony_ci    struct rt_rq *rt_rq;
7693d0407baSopenharmony_ci
7703d0407baSopenharmony_ci    if (unlikely(!scheduler_running)) {
7713d0407baSopenharmony_ci        return;
7723d0407baSopenharmony_ci    }
7733d0407baSopenharmony_ci
7743d0407baSopenharmony_ci    cycle_each_rt_rq(rt_rq, iter, rq) {
7753d0407baSopenharmony_ci        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
7763d0407baSopenharmony_ci        s64 want;
7773d0407baSopenharmony_ci        int i;
7783d0407baSopenharmony_ci
7793d0407baSopenharmony_ci        raw_spin_lock(&rt_b->rt_runtime_lock);
7803d0407baSopenharmony_ci        raw_spin_lock(&rt_rq->rt_runtime_lock);
7813d0407baSopenharmony_ci        /*
7823d0407baSopenharmony_ci         * Either we're all inf and nobody needs to borrow, or we're
7833d0407baSopenharmony_ci         * already disabled and thus have nothing to do, or we have
7843d0407baSopenharmony_ci         * exactly the right amount of runtime to take out.
7853d0407baSopenharmony_ci         */
7863d0407baSopenharmony_ci        if (rt_rq->rt_runtime == RUNTIME_INF || rt_rq->rt_runtime == rt_b->rt_runtime) {
7873d0407baSopenharmony_ci            goto balanced;
7883d0407baSopenharmony_ci        }
7893d0407baSopenharmony_ci        raw_spin_unlock(&rt_rq->rt_runtime_lock);
7903d0407baSopenharmony_ci
7913d0407baSopenharmony_ci        /*
7923d0407baSopenharmony_ci         * Calculate the difference between what we started out with
7933d0407baSopenharmony_ci         * and what we current have, that's the amount of runtime
7943d0407baSopenharmony_ci         * we lend and now have to reclaim.
7953d0407baSopenharmony_ci         */
7963d0407baSopenharmony_ci        want = rt_b->rt_runtime - rt_rq->rt_runtime;
7973d0407baSopenharmony_ci
7983d0407baSopenharmony_ci        /*
7993d0407baSopenharmony_ci         * Greedy reclaim, take back as much as we can.
8003d0407baSopenharmony_ci         */
8013d0407baSopenharmony_ci        for_each_cpu(i, rd->span)
8023d0407baSopenharmony_ci        {
8033d0407baSopenharmony_ci            struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
8043d0407baSopenharmony_ci            s64 diff;
8053d0407baSopenharmony_ci
8063d0407baSopenharmony_ci            /*
8073d0407baSopenharmony_ci             * Can't reclaim from ourselves or disabled runqueues.
8083d0407baSopenharmony_ci             */
8093d0407baSopenharmony_ci            if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) {
8103d0407baSopenharmony_ci                continue;
8113d0407baSopenharmony_ci            }
8123d0407baSopenharmony_ci
8133d0407baSopenharmony_ci            raw_spin_lock(&iter->rt_runtime_lock);
8143d0407baSopenharmony_ci            if (want > 0) {
8153d0407baSopenharmony_ci                diff = min_t(s64, iter->rt_runtime, want);
8163d0407baSopenharmony_ci                iter->rt_runtime -= diff;
8173d0407baSopenharmony_ci                want -= diff;
8183d0407baSopenharmony_ci            } else {
8193d0407baSopenharmony_ci                iter->rt_runtime -= want;
8203d0407baSopenharmony_ci                want -= want;
8213d0407baSopenharmony_ci            }
8223d0407baSopenharmony_ci            raw_spin_unlock(&iter->rt_runtime_lock);
8233d0407baSopenharmony_ci
8243d0407baSopenharmony_ci            if (!want) {
8253d0407baSopenharmony_ci                break;
8263d0407baSopenharmony_ci            }
8273d0407baSopenharmony_ci        }
8283d0407baSopenharmony_ci
8293d0407baSopenharmony_ci        raw_spin_lock(&rt_rq->rt_runtime_lock);
8303d0407baSopenharmony_ci        /*
8313d0407baSopenharmony_ci         * We cannot be left wanting - that would mean some runtime
8323d0407baSopenharmony_ci         * leaked out of the system.
8333d0407baSopenharmony_ci         */
8343d0407baSopenharmony_ci        BUG_ON(want);
8353d0407baSopenharmony_ci    balanced:
8363d0407baSopenharmony_ci        /*
8373d0407baSopenharmony_ci         * Disable all the borrow logic by pretending we have inf
8383d0407baSopenharmony_ci         * runtime - in which case borrowing doesn't make sense.
8393d0407baSopenharmony_ci         */
8403d0407baSopenharmony_ci        rt_rq->rt_runtime = RUNTIME_INF;
8413d0407baSopenharmony_ci        rt_rq->rt_throttled = 0;
8423d0407baSopenharmony_ci        raw_spin_unlock(&rt_rq->rt_runtime_lock);
8433d0407baSopenharmony_ci        raw_spin_unlock(&rt_b->rt_runtime_lock);
8443d0407baSopenharmony_ci
8453d0407baSopenharmony_ci        /* Make rt_rq available for pick_next_task() */
8463d0407baSopenharmony_ci        sched_rt_rq_enqueue(rt_rq);
8473d0407baSopenharmony_ci    }
8483d0407baSopenharmony_ci}
8493d0407baSopenharmony_ci
8503d0407baSopenharmony_cistatic void __enable_runtime(struct rq *rq)
8513d0407baSopenharmony_ci{
8523d0407baSopenharmony_ci    rt_rq_iter_t iter;
8533d0407baSopenharmony_ci    struct rt_rq *rt_rq;
8543d0407baSopenharmony_ci
8553d0407baSopenharmony_ci    if (unlikely(!scheduler_running)) {
8563d0407baSopenharmony_ci        return;
8573d0407baSopenharmony_ci    }
8583d0407baSopenharmony_ci
8593d0407baSopenharmony_ci    /*
8603d0407baSopenharmony_ci     * Reset each runqueue's bandwidth settings
8613d0407baSopenharmony_ci     */
8623d0407baSopenharmony_ci    cycle_each_rt_rq(rt_rq, iter, rq) {
8633d0407baSopenharmony_ci        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
8643d0407baSopenharmony_ci
8653d0407baSopenharmony_ci        raw_spin_lock(&rt_b->rt_runtime_lock);
8663d0407baSopenharmony_ci        raw_spin_lock(&rt_rq->rt_runtime_lock);
8673d0407baSopenharmony_ci        rt_rq->rt_runtime = rt_b->rt_runtime;
8683d0407baSopenharmony_ci        rt_rq->rt_time = 0;
8693d0407baSopenharmony_ci        rt_rq->rt_throttled = 0;
8703d0407baSopenharmony_ci        raw_spin_unlock(&rt_rq->rt_runtime_lock);
8713d0407baSopenharmony_ci        raw_spin_unlock(&rt_b->rt_runtime_lock);
8723d0407baSopenharmony_ci    }
8733d0407baSopenharmony_ci}
8743d0407baSopenharmony_ci
8753d0407baSopenharmony_cistatic void balance_runtime(struct rt_rq *rt_rq)
8763d0407baSopenharmony_ci{
8773d0407baSopenharmony_ci    if (!sched_feat(RT_RUNTIME_SHARE)) {
8783d0407baSopenharmony_ci        return;
8793d0407baSopenharmony_ci    }
8803d0407baSopenharmony_ci
8813d0407baSopenharmony_ci    if (rt_rq->rt_time > rt_rq->rt_runtime) {
8823d0407baSopenharmony_ci        raw_spin_unlock(&rt_rq->rt_runtime_lock);
8833d0407baSopenharmony_ci        do_balance_runtime(rt_rq);
8843d0407baSopenharmony_ci        raw_spin_lock(&rt_rq->rt_runtime_lock);
8853d0407baSopenharmony_ci    }
8863d0407baSopenharmony_ci}
8873d0407baSopenharmony_ci#else  /* !CONFIG_SMP */
8883d0407baSopenharmony_cistatic inline void balance_runtime(struct rt_rq *rt_rq)
8893d0407baSopenharmony_ci{
8903d0407baSopenharmony_ci}
8913d0407baSopenharmony_ci#endif /* CONFIG_SMP */
8923d0407baSopenharmony_ci
8933d0407baSopenharmony_cistatic int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
8943d0407baSopenharmony_ci{
8953d0407baSopenharmony_ci    int i, idle = 1, throttled = 0;
8963d0407baSopenharmony_ci    const struct cpumask *span;
8973d0407baSopenharmony_ci
8983d0407baSopenharmony_ci    span = sched_rt_period_mask();
8993d0407baSopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
9003d0407baSopenharmony_ci    /*
9013d0407baSopenharmony_ci     * When the tasks in the task_group run on either isolated
9023d0407baSopenharmony_ci     * CPUs or non-isolated CPUs, whether they are isolcpus or
9033d0407baSopenharmony_ci     * were isolated via cpusets, check all the online rt_rq
9043d0407baSopenharmony_ci     * to lest the timer run on a CPU which does not service
9053d0407baSopenharmony_ci     * all runqueues, potentially leaving other CPUs indefinitely
9063d0407baSopenharmony_ci     * throttled.
9073d0407baSopenharmony_ci     */
9083d0407baSopenharmony_ci    span = cpu_online_mask;
9093d0407baSopenharmony_ci#endif
9103d0407baSopenharmony_ci    for_each_cpu(i, span)
9113d0407baSopenharmony_ci    {
9123d0407baSopenharmony_ci        int enqueue = 0;
9133d0407baSopenharmony_ci        struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
9143d0407baSopenharmony_ci        struct rq *rq = rq_of_rt_rq(rt_rq);
9153d0407baSopenharmony_ci        int skip;
9163d0407baSopenharmony_ci
9173d0407baSopenharmony_ci        /*
9183d0407baSopenharmony_ci         * When span == cpu_online_mask, taking each rq->lock
9193d0407baSopenharmony_ci         * can be time-consuming. Try to avoid it when possible.
9203d0407baSopenharmony_ci         */
9213d0407baSopenharmony_ci        raw_spin_lock(&rt_rq->rt_runtime_lock);
9223d0407baSopenharmony_ci        if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) {
9233d0407baSopenharmony_ci            rt_rq->rt_runtime = rt_b->rt_runtime;
9243d0407baSopenharmony_ci        }
9253d0407baSopenharmony_ci        skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
9263d0407baSopenharmony_ci        raw_spin_unlock(&rt_rq->rt_runtime_lock);
9273d0407baSopenharmony_ci        if (skip) {
9283d0407baSopenharmony_ci            continue;
9293d0407baSopenharmony_ci        }
9303d0407baSopenharmony_ci
9313d0407baSopenharmony_ci        raw_spin_lock(&rq->lock);
9323d0407baSopenharmony_ci        update_rq_clock(rq);
9333d0407baSopenharmony_ci
9343d0407baSopenharmony_ci        if (rt_rq->rt_time) {
9353d0407baSopenharmony_ci            u64 runtime;
9363d0407baSopenharmony_ci
9373d0407baSopenharmony_ci            raw_spin_lock(&rt_rq->rt_runtime_lock);
9383d0407baSopenharmony_ci            if (rt_rq->rt_throttled) {
9393d0407baSopenharmony_ci                balance_runtime(rt_rq);
9403d0407baSopenharmony_ci            }
9413d0407baSopenharmony_ci            runtime = rt_rq->rt_runtime;
9423d0407baSopenharmony_ci            rt_rq->rt_time -= min(rt_rq->rt_time, overrun * runtime);
9433d0407baSopenharmony_ci            if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
9443d0407baSopenharmony_ci                rt_rq->rt_throttled = 0;
9453d0407baSopenharmony_ci                enqueue = 1;
9463d0407baSopenharmony_ci
9473d0407baSopenharmony_ci                /*
9483d0407baSopenharmony_ci                 * When we're idle and a woken (rt) task is
9493d0407baSopenharmony_ci                 * throttled check_preempt_curr() will set
9503d0407baSopenharmony_ci                 * skip_update and the time between the wakeup
9513d0407baSopenharmony_ci                 * and this unthrottle will get accounted as
9523d0407baSopenharmony_ci                 * 'runtime'.
9533d0407baSopenharmony_ci                 */
9543d0407baSopenharmony_ci                if (rt_rq->rt_nr_running && rq->curr == rq->idle) {
9553d0407baSopenharmony_ci                    rq_clock_cancel_skipupdate(rq);
9563d0407baSopenharmony_ci                }
9573d0407baSopenharmony_ci            }
9583d0407baSopenharmony_ci            if (rt_rq->rt_time || rt_rq->rt_nr_running) {
9593d0407baSopenharmony_ci                idle = 0;
9603d0407baSopenharmony_ci            }
9613d0407baSopenharmony_ci            raw_spin_unlock(&rt_rq->rt_runtime_lock);
9623d0407baSopenharmony_ci        } else if (rt_rq->rt_nr_running) {
9633d0407baSopenharmony_ci            idle = 0;
9643d0407baSopenharmony_ci            if (!rt_rq_throttled(rt_rq)) {
9653d0407baSopenharmony_ci                enqueue = 1;
9663d0407baSopenharmony_ci            }
9673d0407baSopenharmony_ci        }
9683d0407baSopenharmony_ci        if (rt_rq->rt_throttled) {
9693d0407baSopenharmony_ci            throttled = 1;
9703d0407baSopenharmony_ci        }
9713d0407baSopenharmony_ci
9723d0407baSopenharmony_ci        if (enqueue) {
9733d0407baSopenharmony_ci            sched_rt_rq_enqueue(rt_rq);
9743d0407baSopenharmony_ci        }
9753d0407baSopenharmony_ci        raw_spin_unlock(&rq->lock);
9763d0407baSopenharmony_ci    }
9773d0407baSopenharmony_ci
9783d0407baSopenharmony_ci    if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) {
9793d0407baSopenharmony_ci        return 1;
9803d0407baSopenharmony_ci    }
9813d0407baSopenharmony_ci
9823d0407baSopenharmony_ci    return idle;
9833d0407baSopenharmony_ci}
9843d0407baSopenharmony_ci
9853d0407baSopenharmony_cistatic inline int rt_se_prio(struct sched_rt_entity *rt_se)
9863d0407baSopenharmony_ci{
9873d0407baSopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
9883d0407baSopenharmony_ci    struct rt_rq *rt_rq = group_rt_rq(rt_se);
9893d0407baSopenharmony_ci
9903d0407baSopenharmony_ci    if (rt_rq) {
9913d0407baSopenharmony_ci        return rt_rq->highest_prio.curr;
9923d0407baSopenharmony_ci    }
9933d0407baSopenharmony_ci#endif
9943d0407baSopenharmony_ci
9953d0407baSopenharmony_ci    return rt_task_of(rt_se)->prio;
9963d0407baSopenharmony_ci}
9973d0407baSopenharmony_ci
9983d0407baSopenharmony_cistatic inline void try_start_rt_bandwidth(struct rt_bandwidth *rt_b)
9993d0407baSopenharmony_ci{
10003d0407baSopenharmony_ci    raw_spin_lock(&rt_b->rt_runtime_lock);
10013d0407baSopenharmony_ci    if (!rt_b->rt_period_active) {
10023d0407baSopenharmony_ci        rt_b->rt_period_active = 1;
10033d0407baSopenharmony_ci        hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
10043d0407baSopenharmony_ci        hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED_HARD);
10053d0407baSopenharmony_ci    }
10063d0407baSopenharmony_ci    raw_spin_unlock(&rt_b->rt_runtime_lock);
10073d0407baSopenharmony_ci}
10083d0407baSopenharmony_ci
10093d0407baSopenharmony_cistatic int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
10103d0407baSopenharmony_ci{
10113d0407baSopenharmony_ci    u64 runtime = sched_rt_runtime(rt_rq);
10123d0407baSopenharmony_ci
10133d0407baSopenharmony_ci    if (rt_rq->rt_throttled) {
10143d0407baSopenharmony_ci        return rt_rq_throttled(rt_rq);
10153d0407baSopenharmony_ci    }
10163d0407baSopenharmony_ci
10173d0407baSopenharmony_ci    if (runtime >= sched_rt_period(rt_rq)) {
10183d0407baSopenharmony_ci        return 0;
10193d0407baSopenharmony_ci    }
10203d0407baSopenharmony_ci
10213d0407baSopenharmony_ci    balance_runtime(rt_rq);
10223d0407baSopenharmony_ci    runtime = sched_rt_runtime(rt_rq);
10233d0407baSopenharmony_ci    if (runtime == RUNTIME_INF) {
10243d0407baSopenharmony_ci        return 0;
10253d0407baSopenharmony_ci    }
10263d0407baSopenharmony_ci
10273d0407baSopenharmony_ci    if (rt_rq->rt_time > runtime) {
10283d0407baSopenharmony_ci        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
10293d0407baSopenharmony_ci
10303d0407baSopenharmony_ci        /*
10313d0407baSopenharmony_ci         * Don't actually throttle groups that have no runtime assigned
10323d0407baSopenharmony_ci         * but accrue some time due to boosting.
10333d0407baSopenharmony_ci         */
10343d0407baSopenharmony_ci        if (likely(rt_b->rt_runtime)) {
10353d0407baSopenharmony_ci            rt_rq->rt_throttled = 1;
10363d0407baSopenharmony_ci            printk_deferred_once("sched: RT throttling activated\n");
10373d0407baSopenharmony_ci        } else {
10383d0407baSopenharmony_ci            /*
10393d0407baSopenharmony_ci             * In case we did anyway, make it go away,
10403d0407baSopenharmony_ci             * replenishment is a joke, since it will replenish us
10413d0407baSopenharmony_ci             * with exactly 0 ns.
10423d0407baSopenharmony_ci             */
10433d0407baSopenharmony_ci            rt_rq->rt_time = 0;
10443d0407baSopenharmony_ci        }
10453d0407baSopenharmony_ci
10463d0407baSopenharmony_ci        if (rt_rq_throttled(rt_rq)) {
10473d0407baSopenharmony_ci            sched_rt_rq_dequeue(rt_rq);
10483d0407baSopenharmony_ci            return 1;
10493d0407baSopenharmony_ci        }
10503d0407baSopenharmony_ci    }
10513d0407baSopenharmony_ci
10523d0407baSopenharmony_ci    return 0;
10533d0407baSopenharmony_ci}
10543d0407baSopenharmony_ci
10553d0407baSopenharmony_ci/*
10563d0407baSopenharmony_ci * Update the current task's runtime statistics. Skip current tasks that
10573d0407baSopenharmony_ci * are not in our scheduling class.
10583d0407baSopenharmony_ci */
10593d0407baSopenharmony_cistatic void update_curr_rt(struct rq *rq)
10603d0407baSopenharmony_ci{
10613d0407baSopenharmony_ci    struct task_struct *curr = rq->curr;
10623d0407baSopenharmony_ci    struct sched_rt_entity *rt_se = &curr->rt;
10633d0407baSopenharmony_ci    u64 delta_exec;
10643d0407baSopenharmony_ci    u64 now;
10653d0407baSopenharmony_ci
10663d0407baSopenharmony_ci    if (curr->sched_class != &rt_sched_class) {
10673d0407baSopenharmony_ci        return;
10683d0407baSopenharmony_ci    }
10693d0407baSopenharmony_ci
10703d0407baSopenharmony_ci    now = rq_clock_task(rq);
10713d0407baSopenharmony_ci    delta_exec = now - curr->se.exec_start;
10723d0407baSopenharmony_ci    if (unlikely((s64)delta_exec <= 0)) {
10733d0407baSopenharmony_ci        return;
10743d0407baSopenharmony_ci    }
10753d0407baSopenharmony_ci
10763d0407baSopenharmony_ci    schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
10773d0407baSopenharmony_ci
10783d0407baSopenharmony_ci    curr->se.sum_exec_runtime += delta_exec;
10793d0407baSopenharmony_ci    account_group_exec_runtime(curr, delta_exec);
10803d0407baSopenharmony_ci
10813d0407baSopenharmony_ci    curr->se.exec_start = now;
10823d0407baSopenharmony_ci    cgroup_account_cputime(curr, delta_exec);
10833d0407baSopenharmony_ci
10843d0407baSopenharmony_ci    if (!rt_bandwidth_enabled()) {
10853d0407baSopenharmony_ci        return;
10863d0407baSopenharmony_ci    }
10873d0407baSopenharmony_ci
10883d0407baSopenharmony_ci    cycle_each_sched_rt_entity(rt_se) {
10893d0407baSopenharmony_ci        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
10903d0407baSopenharmony_ci        int exceeded;
10913d0407baSopenharmony_ci
10923d0407baSopenharmony_ci        if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
10933d0407baSopenharmony_ci            raw_spin_lock(&rt_rq->rt_runtime_lock);
10943d0407baSopenharmony_ci            rt_rq->rt_time += delta_exec;
10953d0407baSopenharmony_ci            exceeded = sched_rt_runtime_exceeded(rt_rq);
10963d0407baSopenharmony_ci            if (exceeded) {
10973d0407baSopenharmony_ci                resched_curr(rq);
10983d0407baSopenharmony_ci            }
10993d0407baSopenharmony_ci            raw_spin_unlock(&rt_rq->rt_runtime_lock);
11003d0407baSopenharmony_ci            if (exceeded) {
11013d0407baSopenharmony_ci                try_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
11023d0407baSopenharmony_ci            }
11033d0407baSopenharmony_ci        }
11043d0407baSopenharmony_ci    }
11053d0407baSopenharmony_ci}
11063d0407baSopenharmony_ci
11073d0407baSopenharmony_cistatic void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
11083d0407baSopenharmony_ci{
11093d0407baSopenharmony_ci    struct rq *rq = rq_of_rt_rq(rt_rq);
11103d0407baSopenharmony_ci
11113d0407baSopenharmony_ci    BUG_ON(&rq->rt != rt_rq);
11123d0407baSopenharmony_ci
11133d0407baSopenharmony_ci    if (!rt_rq->rt_queued) {
11143d0407baSopenharmony_ci        return;
11153d0407baSopenharmony_ci    }
11163d0407baSopenharmony_ci
11173d0407baSopenharmony_ci    BUG_ON(!rq->nr_running);
11183d0407baSopenharmony_ci
11193d0407baSopenharmony_ci    sub_nr_running(rq, count);
11203d0407baSopenharmony_ci    rt_rq->rt_queued = 0;
11213d0407baSopenharmony_ci}
11223d0407baSopenharmony_ci
11233d0407baSopenharmony_cistatic void enqueue_top_rt_rq(struct rt_rq *rt_rq)
11243d0407baSopenharmony_ci{
11253d0407baSopenharmony_ci    struct rq *rq = rq_of_rt_rq(rt_rq);
11263d0407baSopenharmony_ci
11273d0407baSopenharmony_ci    BUG_ON(&rq->rt != rt_rq);
11283d0407baSopenharmony_ci
11293d0407baSopenharmony_ci    if (rt_rq->rt_queued) {
11303d0407baSopenharmony_ci        return;
11313d0407baSopenharmony_ci    }
11323d0407baSopenharmony_ci
11333d0407baSopenharmony_ci    if (rt_rq_throttled(rt_rq)) {
11343d0407baSopenharmony_ci        return;
11353d0407baSopenharmony_ci    }
11363d0407baSopenharmony_ci
11373d0407baSopenharmony_ci    if (rt_rq->rt_nr_running) {
11383d0407baSopenharmony_ci        add_nr_running(rq, rt_rq->rt_nr_running);
11393d0407baSopenharmony_ci        rt_rq->rt_queued = 1;
11403d0407baSopenharmony_ci    }
11413d0407baSopenharmony_ci
11423d0407baSopenharmony_ci    /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
11433d0407baSopenharmony_ci    cpufreq_update_util(rq, 0);
11443d0407baSopenharmony_ci}
11453d0407baSopenharmony_ci
11463d0407baSopenharmony_ci#if defined CONFIG_SMP
11473d0407baSopenharmony_ci
11483d0407baSopenharmony_cistatic void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
11493d0407baSopenharmony_ci{
11503d0407baSopenharmony_ci    struct rq *rq = rq_of_rt_rq(rt_rq);
11513d0407baSopenharmony_ci
11523d0407baSopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
11533d0407baSopenharmony_ci    /*
11543d0407baSopenharmony_ci     * Change rq's cpupri only if rt_rq is the top queue.
11553d0407baSopenharmony_ci     */
11563d0407baSopenharmony_ci    if (&rq->rt != rt_rq) {
11573d0407baSopenharmony_ci        return;
11583d0407baSopenharmony_ci    }
11593d0407baSopenharmony_ci#endif
11603d0407baSopenharmony_ci    if (rq->online && prio < prev_prio) {
11613d0407baSopenharmony_ci        cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
11623d0407baSopenharmony_ci    }
11633d0407baSopenharmony_ci}
11643d0407baSopenharmony_ci
11653d0407baSopenharmony_cistatic void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
11663d0407baSopenharmony_ci{
11673d0407baSopenharmony_ci    struct rq *rq = rq_of_rt_rq(rt_rq);
11683d0407baSopenharmony_ci
11693d0407baSopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
11703d0407baSopenharmony_ci    /*
11713d0407baSopenharmony_ci     * Change rq's cpupri only if rt_rq is the top queue.
11723d0407baSopenharmony_ci     */
11733d0407baSopenharmony_ci    if (&rq->rt != rt_rq) {
11743d0407baSopenharmony_ci        return;
11753d0407baSopenharmony_ci    }
11763d0407baSopenharmony_ci#endif
11773d0407baSopenharmony_ci    if (rq->online && rt_rq->highest_prio.curr != prev_prio) {
11783d0407baSopenharmony_ci        cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
11793d0407baSopenharmony_ci    }
11803d0407baSopenharmony_ci}
11813d0407baSopenharmony_ci
11823d0407baSopenharmony_ci#else /* CONFIG_SMP */
11833d0407baSopenharmony_ci
11843d0407baSopenharmony_cistatic inline void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
11853d0407baSopenharmony_ci{
11863d0407baSopenharmony_ci}
11873d0407baSopenharmony_cistatic inline void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
11883d0407baSopenharmony_ci{
11893d0407baSopenharmony_ci}
11903d0407baSopenharmony_ci
11913d0407baSopenharmony_ci#endif /* CONFIG_SMP */
11923d0407baSopenharmony_ci
11933d0407baSopenharmony_ci#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
11943d0407baSopenharmony_cistatic void inc_rt_prio(struct rt_rq *rt_rq, int prio)
11953d0407baSopenharmony_ci{
11963d0407baSopenharmony_ci    int prev_prio = rt_rq->highest_prio.curr;
11973d0407baSopenharmony_ci
11983d0407baSopenharmony_ci    if (prio < prev_prio) {
11993d0407baSopenharmony_ci        rt_rq->highest_prio.curr = prio;
12003d0407baSopenharmony_ci    }
12013d0407baSopenharmony_ci
12023d0407baSopenharmony_ci    inc_rt_prio_smp(rt_rq, prio, prev_prio);
12033d0407baSopenharmony_ci}
12043d0407baSopenharmony_ci
12053d0407baSopenharmony_cistatic void dec_rt_prio(struct rt_rq *rt_rq, int prio)
12063d0407baSopenharmony_ci{
12073d0407baSopenharmony_ci    int prev_prio = rt_rq->highest_prio.curr;
12083d0407baSopenharmony_ci
12093d0407baSopenharmony_ci    if (rt_rq->rt_nr_running) {
12103d0407baSopenharmony_ci        WARN_ON(prio < prev_prio);
12113d0407baSopenharmony_ci
12123d0407baSopenharmony_ci        /*
12133d0407baSopenharmony_ci         * This may have been our highest task, and therefore
12143d0407baSopenharmony_ci         * we may have some recomputation to do
12153d0407baSopenharmony_ci         */
12163d0407baSopenharmony_ci        if (prio == prev_prio) {
12173d0407baSopenharmony_ci            struct rt_prio_array *array = &rt_rq->active;
12183d0407baSopenharmony_ci
12193d0407baSopenharmony_ci            rt_rq->highest_prio.curr = sched_find_first_bit(array->bitmap);
12203d0407baSopenharmony_ci        }
12213d0407baSopenharmony_ci    } else {
12223d0407baSopenharmony_ci        rt_rq->highest_prio.curr = MAX_RT_PRIO;
12233d0407baSopenharmony_ci    }
12243d0407baSopenharmony_ci
12253d0407baSopenharmony_ci    dec_rt_prio_smp(rt_rq, prio, prev_prio);
12263d0407baSopenharmony_ci}
12273d0407baSopenharmony_ci
12283d0407baSopenharmony_ci#else
12293d0407baSopenharmony_ci
12303d0407baSopenharmony_cistatic inline void inc_rt_prio(struct rt_rq *rt_rq, int prio)
12313d0407baSopenharmony_ci{
12323d0407baSopenharmony_ci}
12333d0407baSopenharmony_cistatic inline void dec_rt_prio(struct rt_rq *rt_rq, int prio)
12343d0407baSopenharmony_ci{
12353d0407baSopenharmony_ci}
12363d0407baSopenharmony_ci
12373d0407baSopenharmony_ci#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
12383d0407baSopenharmony_ci
12393d0407baSopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
12403d0407baSopenharmony_ci
12413d0407baSopenharmony_cistatic void inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
12423d0407baSopenharmony_ci{
12433d0407baSopenharmony_ci    if (rt_se_boosted(rt_se)) {
12443d0407baSopenharmony_ci        rt_rq->rt_nr_boosted++;
12453d0407baSopenharmony_ci    }
12463d0407baSopenharmony_ci
12473d0407baSopenharmony_ci    if (rt_rq->tg) {
12483d0407baSopenharmony_ci        start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
12493d0407baSopenharmony_ci    }
12503d0407baSopenharmony_ci}
12513d0407baSopenharmony_ci
12523d0407baSopenharmony_cistatic void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
12533d0407baSopenharmony_ci{
12543d0407baSopenharmony_ci    if (rt_se_boosted(rt_se)) {
12553d0407baSopenharmony_ci        rt_rq->rt_nr_boosted--;
12563d0407baSopenharmony_ci    }
12573d0407baSopenharmony_ci
12583d0407baSopenharmony_ci    WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
12593d0407baSopenharmony_ci}
12603d0407baSopenharmony_ci
12613d0407baSopenharmony_ci#else /* CONFIG_RT_GROUP_SCHED */
12623d0407baSopenharmony_ci
12633d0407baSopenharmony_cistatic void inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
12643d0407baSopenharmony_ci{
12653d0407baSopenharmony_ci    start_rt_bandwidth(&def_rt_bandwidth);
12663d0407baSopenharmony_ci}
12673d0407baSopenharmony_ci
12683d0407baSopenharmony_cistatic inline void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
12693d0407baSopenharmony_ci{
12703d0407baSopenharmony_ci}
12713d0407baSopenharmony_ci
12723d0407baSopenharmony_ci#endif /* CONFIG_RT_GROUP_SCHED */
12733d0407baSopenharmony_ci
12743d0407baSopenharmony_cistatic inline unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
12753d0407baSopenharmony_ci{
12763d0407baSopenharmony_ci    struct rt_rq *group_rq = group_rt_rq(rt_se);
12773d0407baSopenharmony_ci
12783d0407baSopenharmony_ci    if (group_rq) {
12793d0407baSopenharmony_ci        return group_rq->rt_nr_running;
12803d0407baSopenharmony_ci    } else {
12813d0407baSopenharmony_ci        return 1;
12823d0407baSopenharmony_ci    }
12833d0407baSopenharmony_ci}
12843d0407baSopenharmony_ci
12853d0407baSopenharmony_cistatic inline unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
12863d0407baSopenharmony_ci{
12873d0407baSopenharmony_ci    struct rt_rq *group_rq = group_rt_rq(rt_se);
12883d0407baSopenharmony_ci    struct task_struct *tsk;
12893d0407baSopenharmony_ci
12903d0407baSopenharmony_ci    if (group_rq) {
12913d0407baSopenharmony_ci        return group_rq->rr_nr_running;
12923d0407baSopenharmony_ci    }
12933d0407baSopenharmony_ci
12943d0407baSopenharmony_ci    tsk = rt_task_of(rt_se);
12953d0407baSopenharmony_ci
12963d0407baSopenharmony_ci    return (tsk->policy == SCHED_RR) ? 1 : 0;
12973d0407baSopenharmony_ci}
12983d0407baSopenharmony_ci
12993d0407baSopenharmony_cistatic inline void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
13003d0407baSopenharmony_ci{
13013d0407baSopenharmony_ci    int prio = rt_se_prio(rt_se);
13023d0407baSopenharmony_ci
13033d0407baSopenharmony_ci    WARN_ON(!rt_prio(prio));
13043d0407baSopenharmony_ci    rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
13053d0407baSopenharmony_ci    rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
13063d0407baSopenharmony_ci
13073d0407baSopenharmony_ci    inc_rt_prio(rt_rq, prio);
13083d0407baSopenharmony_ci    inc_rt_migration(rt_se, rt_rq);
13093d0407baSopenharmony_ci    inc_rt_group(rt_se, rt_rq);
13103d0407baSopenharmony_ci}
13113d0407baSopenharmony_ci
13123d0407baSopenharmony_cistatic inline void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
13133d0407baSopenharmony_ci{
13143d0407baSopenharmony_ci    WARN_ON(!rt_prio(rt_se_prio(rt_se)));
13153d0407baSopenharmony_ci    WARN_ON(!rt_rq->rt_nr_running);
13163d0407baSopenharmony_ci    rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
13173d0407baSopenharmony_ci    rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
13183d0407baSopenharmony_ci
13193d0407baSopenharmony_ci    dec_rt_prio(rt_rq, rt_se_prio(rt_se));
13203d0407baSopenharmony_ci    dec_rt_migration(rt_se, rt_rq);
13213d0407baSopenharmony_ci    dec_rt_group(rt_se, rt_rq);
13223d0407baSopenharmony_ci}
13233d0407baSopenharmony_ci
13243d0407baSopenharmony_ci/*
13253d0407baSopenharmony_ci * Change rt_se->run_list location unless SAVE && !MOVE
13263d0407baSopenharmony_ci *
13273d0407baSopenharmony_ci * assumes ENQUEUE/DEQUEUE flags match
13283d0407baSopenharmony_ci */
13293d0407baSopenharmony_cistatic inline bool move_entity(unsigned int flags)
13303d0407baSopenharmony_ci{
13313d0407baSopenharmony_ci    if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) {
13323d0407baSopenharmony_ci        return false;
13333d0407baSopenharmony_ci    }
13343d0407baSopenharmony_ci
13353d0407baSopenharmony_ci    return true;
13363d0407baSopenharmony_ci}
13373d0407baSopenharmony_ci
13383d0407baSopenharmony_cistatic void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
13393d0407baSopenharmony_ci{
13403d0407baSopenharmony_ci    list_del_init(&rt_se->run_list);
13413d0407baSopenharmony_ci
13423d0407baSopenharmony_ci    if (list_empty(array->queue + rt_se_prio(rt_se))) {
13433d0407baSopenharmony_ci        __clear_bit(rt_se_prio(rt_se), array->bitmap);
13443d0407baSopenharmony_ci    }
13453d0407baSopenharmony_ci
13463d0407baSopenharmony_ci    rt_se->on_list = 0;
13473d0407baSopenharmony_ci}
13483d0407baSopenharmony_ci
13493d0407baSopenharmony_cistatic void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
13503d0407baSopenharmony_ci{
13513d0407baSopenharmony_ci    struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
13523d0407baSopenharmony_ci    struct rt_prio_array *array = &rt_rq->active;
13533d0407baSopenharmony_ci    struct rt_rq *group_rq = group_rt_rq(rt_se);
13543d0407baSopenharmony_ci    struct list_head *queue = array->queue + rt_se_prio(rt_se);
13553d0407baSopenharmony_ci
13563d0407baSopenharmony_ci    /*
13573d0407baSopenharmony_ci     * Don't enqueue the group if its throttled, or when empty.
13583d0407baSopenharmony_ci     * The latter is a consequence of the former when a child group
13593d0407baSopenharmony_ci     * get throttled and the current group doesn't have any other
13603d0407baSopenharmony_ci     * active members.
13613d0407baSopenharmony_ci     */
13623d0407baSopenharmony_ci    if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
13633d0407baSopenharmony_ci        if (rt_se->on_list) {
13643d0407baSopenharmony_ci            __delist_rt_entity(rt_se, array);
13653d0407baSopenharmony_ci        }
13663d0407baSopenharmony_ci        return;
13673d0407baSopenharmony_ci    }
13683d0407baSopenharmony_ci
13693d0407baSopenharmony_ci    if (move_entity(flags)) {
13703d0407baSopenharmony_ci        WARN_ON_ONCE(rt_se->on_list);
13713d0407baSopenharmony_ci        if (flags & ENQUEUE_HEAD) {
13723d0407baSopenharmony_ci            list_add(&rt_se->run_list, queue);
13733d0407baSopenharmony_ci        } else {
13743d0407baSopenharmony_ci            list_add_tail(&rt_se->run_list, queue);
13753d0407baSopenharmony_ci        }
13763d0407baSopenharmony_ci
13773d0407baSopenharmony_ci        __set_bit(rt_se_prio(rt_se), array->bitmap);
13783d0407baSopenharmony_ci        rt_se->on_list = 1;
13793d0407baSopenharmony_ci    }
13803d0407baSopenharmony_ci    rt_se->on_rq = 1;
13813d0407baSopenharmony_ci
13823d0407baSopenharmony_ci    inc_rt_tasks(rt_se, rt_rq);
13833d0407baSopenharmony_ci}
13843d0407baSopenharmony_ci
13853d0407baSopenharmony_cistatic void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
13863d0407baSopenharmony_ci{
13873d0407baSopenharmony_ci    struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
13883d0407baSopenharmony_ci    struct rt_prio_array *array = &rt_rq->active;
13893d0407baSopenharmony_ci
13903d0407baSopenharmony_ci    if (move_entity(flags)) {
13913d0407baSopenharmony_ci        WARN_ON_ONCE(!rt_se->on_list);
13923d0407baSopenharmony_ci        __delist_rt_entity(rt_se, array);
13933d0407baSopenharmony_ci    }
13943d0407baSopenharmony_ci    rt_se->on_rq = 0;
13953d0407baSopenharmony_ci
13963d0407baSopenharmony_ci    dec_rt_tasks(rt_se, rt_rq);
13973d0407baSopenharmony_ci}
13983d0407baSopenharmony_ci
13993d0407baSopenharmony_ci/*
14003d0407baSopenharmony_ci * Because the prio of an upper entry depends on the lower
14013d0407baSopenharmony_ci * entries, we must remove entries top - down.
14023d0407baSopenharmony_ci */
14033d0407baSopenharmony_cistatic void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
14043d0407baSopenharmony_ci{
14053d0407baSopenharmony_ci    struct sched_rt_entity *back = NULL;
14063d0407baSopenharmony_ci    unsigned int rt_nr_running;
14073d0407baSopenharmony_ci
14083d0407baSopenharmony_ci    cycle_each_sched_rt_entity(rt_se) {
14093d0407baSopenharmony_ci        rt_se->back = back;
14103d0407baSopenharmony_ci        back = rt_se;
14113d0407baSopenharmony_ci    }
14123d0407baSopenharmony_ci
14133d0407baSopenharmony_ci    rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
14143d0407baSopenharmony_ci
14153d0407baSopenharmony_ci    for (rt_se = back; rt_se; rt_se = rt_se->back) {
14163d0407baSopenharmony_ci        if (on_rt_rq(rt_se)) {
14173d0407baSopenharmony_ci            __dequeue_rt_entity(rt_se, flags);
14183d0407baSopenharmony_ci        }
14193d0407baSopenharmony_ci    }
14203d0407baSopenharmony_ci    dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
14213d0407baSopenharmony_ci}
14223d0407baSopenharmony_ci
14233d0407baSopenharmony_cistatic void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
14243d0407baSopenharmony_ci{
14253d0407baSopenharmony_ci    struct rq *rq = rq_of_rt_se(rt_se);
14263d0407baSopenharmony_ci
14273d0407baSopenharmony_ci    dequeue_rt_stack(rt_se, flags);
14283d0407baSopenharmony_ci    cycle_each_sched_rt_entity(rt_se) __enqueue_rt_entity(rt_se, flags);
14293d0407baSopenharmony_ci    enqueue_top_rt_rq(&rq->rt);
14303d0407baSopenharmony_ci}
14313d0407baSopenharmony_ci
14323d0407baSopenharmony_cistatic void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
14333d0407baSopenharmony_ci{
14343d0407baSopenharmony_ci    struct rq *rq = rq_of_rt_se(rt_se);
14353d0407baSopenharmony_ci
14363d0407baSopenharmony_ci    dequeue_rt_stack(rt_se, flags);
14373d0407baSopenharmony_ci
14383d0407baSopenharmony_ci    cycle_each_sched_rt_entity(rt_se) {
14393d0407baSopenharmony_ci        struct rt_rq *rt_rq = group_rt_rq(rt_se);
14403d0407baSopenharmony_ci
14413d0407baSopenharmony_ci        if (rt_rq && rt_rq->rt_nr_running) {
14423d0407baSopenharmony_ci            __enqueue_rt_entity(rt_se, flags);
14433d0407baSopenharmony_ci        }
14443d0407baSopenharmony_ci    }
14453d0407baSopenharmony_ci    enqueue_top_rt_rq(&rq->rt);
14463d0407baSopenharmony_ci}
14473d0407baSopenharmony_ci
14483d0407baSopenharmony_ci#ifdef CONFIG_SMP
14493d0407baSopenharmony_cistatic inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, bool sync)
14503d0407baSopenharmony_ci{
14513d0407baSopenharmony_ci    /*
14523d0407baSopenharmony_ci     * If the waker is CFS, then an RT sync wakeup would preempt the waker
14533d0407baSopenharmony_ci     * and force it to run for a likely small time after the RT wakee is
14543d0407baSopenharmony_ci     * done. So, only honor RT sync wakeups from RT wakers.
14553d0407baSopenharmony_ci     */
14563d0407baSopenharmony_ci    return sync && task_has_rt_policy(rq->curr) && p->prio <= rq->rt.highest_prio.next && rq->rt.rt_nr_running <= 0x2;
14573d0407baSopenharmony_ci}
14583d0407baSopenharmony_ci#else
14593d0407baSopenharmony_cistatic inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, bool sync)
14603d0407baSopenharmony_ci{
14613d0407baSopenharmony_ci    return 0;
14623d0407baSopenharmony_ci}
14633d0407baSopenharmony_ci#endif
14643d0407baSopenharmony_ci
14653d0407baSopenharmony_ci/*
14663d0407baSopenharmony_ci * Adding/removing a task to/from a priority array:
14673d0407baSopenharmony_ci */
14683d0407baSopenharmony_cistatic void enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
14693d0407baSopenharmony_ci{
14703d0407baSopenharmony_ci    struct sched_rt_entity *rt_se = &p->rt;
14713d0407baSopenharmony_ci    bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC);
14723d0407baSopenharmony_ci
14733d0407baSopenharmony_ci    if (flags & ENQUEUE_WAKEUP) {
14743d0407baSopenharmony_ci        rt_se->timeout = 0;
14753d0407baSopenharmony_ci    }
14763d0407baSopenharmony_ci
14773d0407baSopenharmony_ci    enqueue_rt_entity(rt_se, flags);
14783d0407baSopenharmony_ci    walt_inc_cumulative_runnable_avg(rq, p);
14793d0407baSopenharmony_ci
14803d0407baSopenharmony_ci    if (!task_current(rq, p) && p->nr_cpus_allowed > 1 && !should_honor_rt_sync(rq, p, sync)) {
14813d0407baSopenharmony_ci        enqueue_pushable_task(rq, p);
14823d0407baSopenharmony_ci    }
14833d0407baSopenharmony_ci}
14843d0407baSopenharmony_ci
14853d0407baSopenharmony_cistatic void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
14863d0407baSopenharmony_ci{
14873d0407baSopenharmony_ci    struct sched_rt_entity *rt_se = &p->rt;
14883d0407baSopenharmony_ci
14893d0407baSopenharmony_ci    update_curr_rt(rq);
14903d0407baSopenharmony_ci    dequeue_rt_entity(rt_se, flags);
14913d0407baSopenharmony_ci    walt_dec_cumulative_runnable_avg(rq, p);
14923d0407baSopenharmony_ci
14933d0407baSopenharmony_ci    dequeue_pushable_task(rq, p);
14943d0407baSopenharmony_ci}
14953d0407baSopenharmony_ci
14963d0407baSopenharmony_ci/*
14973d0407baSopenharmony_ci * Put task to the head or the end of the run list without the overhead of
14983d0407baSopenharmony_ci * dequeue followed by enqueue.
14993d0407baSopenharmony_ci */
15003d0407baSopenharmony_cistatic void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
15013d0407baSopenharmony_ci{
15023d0407baSopenharmony_ci    if (on_rt_rq(rt_se)) {
15033d0407baSopenharmony_ci        struct rt_prio_array *array = &rt_rq->active;
15043d0407baSopenharmony_ci        struct list_head *queue = array->queue + rt_se_prio(rt_se);
15053d0407baSopenharmony_ci
15063d0407baSopenharmony_ci        if (head) {
15073d0407baSopenharmony_ci            list_move(&rt_se->run_list, queue);
15083d0407baSopenharmony_ci        } else {
15093d0407baSopenharmony_ci            list_move_tail(&rt_se->run_list, queue);
15103d0407baSopenharmony_ci        }
15113d0407baSopenharmony_ci    }
15123d0407baSopenharmony_ci}
15133d0407baSopenharmony_ci
15143d0407baSopenharmony_cistatic void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
15153d0407baSopenharmony_ci{
15163d0407baSopenharmony_ci    struct sched_rt_entity *rt_se = &p->rt;
15173d0407baSopenharmony_ci    struct rt_rq *rt_rq;
15183d0407baSopenharmony_ci
15193d0407baSopenharmony_ci    cycle_each_sched_rt_entity(rt_se) {
15203d0407baSopenharmony_ci        rt_rq = rt_rq_of_se(rt_se);
15213d0407baSopenharmony_ci        requeue_rt_entity(rt_rq, rt_se, head);
15223d0407baSopenharmony_ci    }
15233d0407baSopenharmony_ci}
15243d0407baSopenharmony_ci
15253d0407baSopenharmony_cistatic void yield_task_rt(struct rq *rq)
15263d0407baSopenharmony_ci{
15273d0407baSopenharmony_ci    requeue_task_rt(rq, rq->curr, 0);
15283d0407baSopenharmony_ci}
15293d0407baSopenharmony_ci
15303d0407baSopenharmony_ci#ifdef CONFIG_SMP
15313d0407baSopenharmony_cistatic int find_lowest_rq(struct task_struct *task);
15323d0407baSopenharmony_ci
15333d0407baSopenharmony_cistatic int select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
15343d0407baSopenharmony_ci{
15353d0407baSopenharmony_ci    struct task_struct *curr;
15363d0407baSopenharmony_ci    struct rq *rq;
15373d0407baSopenharmony_ci    struct rq *this_cpu_rq;
15383d0407baSopenharmony_ci    bool test;
15393d0407baSopenharmony_ci    int target_cpu = -1;
15403d0407baSopenharmony_ci    bool sync = !!(flags & WF_SYNC);
15413d0407baSopenharmony_ci    int this_cpu;
15423d0407baSopenharmony_ci
15433d0407baSopenharmony_ci    /* For anything but wake ups, just return the task_cpu */
15443d0407baSopenharmony_ci    if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) {
15453d0407baSopenharmony_ci        goto out;
15463d0407baSopenharmony_ci    }
15473d0407baSopenharmony_ci
15483d0407baSopenharmony_ci    rq = cpu_rq(cpu);
15493d0407baSopenharmony_ci
15503d0407baSopenharmony_ci    rcu_read_lock();
15513d0407baSopenharmony_ci    curr = READ_ONCE(rq->curr); /* unlocked access */
15523d0407baSopenharmony_ci    this_cpu = smp_processor_id();
15533d0407baSopenharmony_ci    this_cpu_rq = cpu_rq(this_cpu);
15543d0407baSopenharmony_ci
15553d0407baSopenharmony_ci    /*
15563d0407baSopenharmony_ci     * If the current task on @p's runqueue is an RT task, then
15573d0407baSopenharmony_ci     * try to see if we can wake this RT task up on another
15583d0407baSopenharmony_ci     * runqueue. Otherwise simply start this RT task
15593d0407baSopenharmony_ci     * on its current runqueue.
15603d0407baSopenharmony_ci     *
15613d0407baSopenharmony_ci     * We want to avoid overloading runqueues. If the woken
15623d0407baSopenharmony_ci     * task is a higher priority, then it will stay on this CPU
15633d0407baSopenharmony_ci     * and the lower prio task should be moved to another CPU.
15643d0407baSopenharmony_ci     * Even though this will probably make the lower prio task
15653d0407baSopenharmony_ci     * lose its cache, we do not want to bounce a higher task
15663d0407baSopenharmony_ci     * around just because it gave up its CPU, perhaps for a
15673d0407baSopenharmony_ci     * lock?
15683d0407baSopenharmony_ci     *
15693d0407baSopenharmony_ci     * For equal prio tasks, we just let the scheduler sort it out.
15703d0407baSopenharmony_ci     *
15713d0407baSopenharmony_ci     * Otherwise, just let it ride on the affined RQ and the
15723d0407baSopenharmony_ci     * post-schedule router will push the preempted task away
15733d0407baSopenharmony_ci     *
15743d0407baSopenharmony_ci     * This test is optimistic, if we get it wrong the load-balancer
15753d0407baSopenharmony_ci     * will have to sort it out.
15763d0407baSopenharmony_ci     *
15773d0407baSopenharmony_ci     * We take into account the capacity of the CPU to ensure it fits the
15783d0407baSopenharmony_ci     * requirement of the task - which is only important on heterogeneous
15793d0407baSopenharmony_ci     * systems like big.LITTLE.
15803d0407baSopenharmony_ci     */
15813d0407baSopenharmony_ci    test = curr && unlikely(rt_task(curr)) && (curr->nr_cpus_allowed < 0x2 || curr->prio <= p->prio);
15823d0407baSopenharmony_ci
15833d0407baSopenharmony_ci    /*
15843d0407baSopenharmony_ci     * Respect the sync flag as long as the task can run on this CPU.
15853d0407baSopenharmony_ci     */
15863d0407baSopenharmony_ci    if (should_honor_rt_sync(this_cpu_rq, p, sync) && cpumask_test_cpu(this_cpu, p->cpus_ptr)) {
15873d0407baSopenharmony_ci        cpu = this_cpu;
15883d0407baSopenharmony_ci        goto out_unlock;
15893d0407baSopenharmony_ci    }
15903d0407baSopenharmony_ci
15913d0407baSopenharmony_ci    if (test || !rt_task_fits_capacity(p, cpu)) {
15923d0407baSopenharmony_ci        int target = find_lowest_rq(p);
15933d0407baSopenharmony_ci        /*
15943d0407baSopenharmony_ci         * Bail out if we were forcing a migration to find a better
15953d0407baSopenharmony_ci         * fitting CPU but our search failed.
15963d0407baSopenharmony_ci         */
15973d0407baSopenharmony_ci        if (!test && target != -1 && !rt_task_fits_capacity(p, target)) {
15983d0407baSopenharmony_ci            goto out_unlock;
15993d0407baSopenharmony_ci        }
16003d0407baSopenharmony_ci
16013d0407baSopenharmony_ci        /*
16023d0407baSopenharmony_ci         * Don't bother moving it if the destination CPU is
16033d0407baSopenharmony_ci         * not running a lower priority task.
16043d0407baSopenharmony_ci         */
16053d0407baSopenharmony_ci        if (target != -1 && p->prio < cpu_rq(target)->rt.highest_prio.curr) {
16063d0407baSopenharmony_ci            cpu = target;
16073d0407baSopenharmony_ci        }
16083d0407baSopenharmony_ci    }
16093d0407baSopenharmony_ci
16103d0407baSopenharmony_ciout_unlock:
16113d0407baSopenharmony_ci    rcu_read_unlock();
16123d0407baSopenharmony_ci
16133d0407baSopenharmony_ciout:
16143d0407baSopenharmony_ci    return cpu;
16153d0407baSopenharmony_ci}
16163d0407baSopenharmony_ci
16173d0407baSopenharmony_cistatic void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
16183d0407baSopenharmony_ci{
16193d0407baSopenharmony_ci    /*
16203d0407baSopenharmony_ci     * Current can't be migrated, useless to reschedule,
16213d0407baSopenharmony_ci     * let's hope p can move out.
16223d0407baSopenharmony_ci     */
16233d0407baSopenharmony_ci    if (rq->curr->nr_cpus_allowed == 1 || !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) {
16243d0407baSopenharmony_ci        return;
16253d0407baSopenharmony_ci    }
16263d0407baSopenharmony_ci
16273d0407baSopenharmony_ci    /*
16283d0407baSopenharmony_ci     * p is migratable, so let's not schedule it and
16293d0407baSopenharmony_ci     * see if it is pushed or pulled somewhere else.
16303d0407baSopenharmony_ci     */
16313d0407baSopenharmony_ci    if (p->nr_cpus_allowed != 1 && cpupri_find(&rq->rd->cpupri, p, NULL)) {
16323d0407baSopenharmony_ci        return;
16333d0407baSopenharmony_ci    }
16343d0407baSopenharmony_ci
16353d0407baSopenharmony_ci    /*
16363d0407baSopenharmony_ci     * There appear to be other CPUs that can accept
16373d0407baSopenharmony_ci     * the current task but none can run 'p', so lets reschedule
16383d0407baSopenharmony_ci     * to try and push the current task away:
16393d0407baSopenharmony_ci     */
16403d0407baSopenharmony_ci    requeue_task_rt(rq, p, 1);
16413d0407baSopenharmony_ci    resched_curr(rq);
16423d0407baSopenharmony_ci}
16433d0407baSopenharmony_ci
16443d0407baSopenharmony_cistatic int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
16453d0407baSopenharmony_ci{
16463d0407baSopenharmony_ci    if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
16473d0407baSopenharmony_ci        /*
16483d0407baSopenharmony_ci         * This is OK, because current is on_cpu, which avoids it being
16493d0407baSopenharmony_ci         * picked for load-balance and preemption/IRQs are still
16503d0407baSopenharmony_ci         * disabled avoiding further scheduler activity on it and we've
16513d0407baSopenharmony_ci         * not yet started the picking loop.
16523d0407baSopenharmony_ci         */
16533d0407baSopenharmony_ci        rq_unpin_lock(rq, rf);
16543d0407baSopenharmony_ci        pull_rt_task(rq);
16553d0407baSopenharmony_ci        rq_repin_lock(rq, rf);
16563d0407baSopenharmony_ci    }
16573d0407baSopenharmony_ci
16583d0407baSopenharmony_ci    return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
16593d0407baSopenharmony_ci}
16603d0407baSopenharmony_ci#endif /* CONFIG_SMP */
16613d0407baSopenharmony_ci
16623d0407baSopenharmony_ci/*
16633d0407baSopenharmony_ci * Preempt the current task with a newly woken task if needed:
16643d0407baSopenharmony_ci */
16653d0407baSopenharmony_cistatic void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
16663d0407baSopenharmony_ci{
16673d0407baSopenharmony_ci    if (p->prio < rq->curr->prio) {
16683d0407baSopenharmony_ci        resched_curr(rq);
16693d0407baSopenharmony_ci        return;
16703d0407baSopenharmony_ci    }
16713d0407baSopenharmony_ci
16723d0407baSopenharmony_ci#ifdef CONFIG_SMP
16733d0407baSopenharmony_ci    /*
16743d0407baSopenharmony_ci     * If:
16753d0407baSopenharmony_ci     *
16763d0407baSopenharmony_ci     * - the newly woken task is of equal priority to the current task
16773d0407baSopenharmony_ci     * - the newly woken task is non-migratable while current is migratable
16783d0407baSopenharmony_ci     * - current will be preempted on the next reschedule
16793d0407baSopenharmony_ci     *
16803d0407baSopenharmony_ci     * we should check to see if current can readily move to a different
16813d0407baSopenharmony_ci     * cpu.  If so, we will reschedule to allow the push logic to try
16823d0407baSopenharmony_ci     * to move current somewhere else, making room for our non-migratable
16833d0407baSopenharmony_ci     * task.
16843d0407baSopenharmony_ci     */
16853d0407baSopenharmony_ci    if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) {
16863d0407baSopenharmony_ci        check_preempt_equal_prio(rq, p);
16873d0407baSopenharmony_ci    }
16883d0407baSopenharmony_ci#endif
16893d0407baSopenharmony_ci}
16903d0407baSopenharmony_ci
16913d0407baSopenharmony_cistatic inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
16923d0407baSopenharmony_ci{
16933d0407baSopenharmony_ci    p->se.exec_start = rq_clock_task(rq);
16943d0407baSopenharmony_ci
16953d0407baSopenharmony_ci    /* The running task is never eligible for pushing */
16963d0407baSopenharmony_ci    dequeue_pushable_task(rq, p);
16973d0407baSopenharmony_ci
16983d0407baSopenharmony_ci    if (!first) {
16993d0407baSopenharmony_ci        return;
17003d0407baSopenharmony_ci    }
17013d0407baSopenharmony_ci
17023d0407baSopenharmony_ci    /*
17033d0407baSopenharmony_ci     * If prev task was rt, put_prev_task() has already updated the
17043d0407baSopenharmony_ci     * utilization. We only care of the case where we start to schedule a
17053d0407baSopenharmony_ci     * rt task
17063d0407baSopenharmony_ci     */
17073d0407baSopenharmony_ci    if (rq->curr->sched_class != &rt_sched_class) {
17083d0407baSopenharmony_ci        update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
17093d0407baSopenharmony_ci    }
17103d0407baSopenharmony_ci
17113d0407baSopenharmony_ci    rt_queue_push_tasks(rq);
17123d0407baSopenharmony_ci}
17133d0407baSopenharmony_ci
17143d0407baSopenharmony_cistatic struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, struct rt_rq *rt_rq)
17153d0407baSopenharmony_ci{
17163d0407baSopenharmony_ci    struct rt_prio_array *array = &rt_rq->active;
17173d0407baSopenharmony_ci    struct sched_rt_entity *next = NULL;
17183d0407baSopenharmony_ci    struct list_head *queue;
17193d0407baSopenharmony_ci    int idx;
17203d0407baSopenharmony_ci
17213d0407baSopenharmony_ci    idx = sched_find_first_bit(array->bitmap);
17223d0407baSopenharmony_ci    BUG_ON(idx >= MAX_RT_PRIO);
17233d0407baSopenharmony_ci
17243d0407baSopenharmony_ci    queue = array->queue + idx;
17253d0407baSopenharmony_ci    next = list_entry(queue->next, struct sched_rt_entity, run_list);
17263d0407baSopenharmony_ci
17273d0407baSopenharmony_ci    return next;
17283d0407baSopenharmony_ci}
17293d0407baSopenharmony_ci
17303d0407baSopenharmony_cistatic struct task_struct *_pick_next_task_rt(struct rq *rq)
17313d0407baSopenharmony_ci{
17323d0407baSopenharmony_ci    struct sched_rt_entity *rt_se;
17333d0407baSopenharmony_ci    struct rt_rq *rt_rq = &rq->rt;
17343d0407baSopenharmony_ci
17353d0407baSopenharmony_ci    do {
17363d0407baSopenharmony_ci        rt_se = pick_next_rt_entity(rq, rt_rq);
17373d0407baSopenharmony_ci        BUG_ON(!rt_se);
17383d0407baSopenharmony_ci        rt_rq = group_rt_rq(rt_se);
17393d0407baSopenharmony_ci    } while (rt_rq);
17403d0407baSopenharmony_ci
17413d0407baSopenharmony_ci    return rt_task_of(rt_se);
17423d0407baSopenharmony_ci}
17433d0407baSopenharmony_ci
17443d0407baSopenharmony_cistatic struct task_struct *pick_next_task_rt(struct rq *rq)
17453d0407baSopenharmony_ci{
17463d0407baSopenharmony_ci    struct task_struct *p;
17473d0407baSopenharmony_ci
17483d0407baSopenharmony_ci    if (!sched_rt_runnable(rq)) {
17493d0407baSopenharmony_ci        return NULL;
17503d0407baSopenharmony_ci    }
17513d0407baSopenharmony_ci
17523d0407baSopenharmony_ci    p = _pick_next_task_rt(rq);
17533d0407baSopenharmony_ci    set_next_task_rt(rq, p, true);
17543d0407baSopenharmony_ci    return p;
17553d0407baSopenharmony_ci}
17563d0407baSopenharmony_ci
17573d0407baSopenharmony_cistatic void put_prev_task_rt(struct rq *rq, struct task_struct *p)
17583d0407baSopenharmony_ci{
17593d0407baSopenharmony_ci    update_curr_rt(rq);
17603d0407baSopenharmony_ci
17613d0407baSopenharmony_ci    update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
17623d0407baSopenharmony_ci
17633d0407baSopenharmony_ci    /*
17643d0407baSopenharmony_ci     * The previous task needs to be made eligible for pushing
17653d0407baSopenharmony_ci     * if it is still active
17663d0407baSopenharmony_ci     */
17673d0407baSopenharmony_ci    if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) {
17683d0407baSopenharmony_ci        enqueue_pushable_task(rq, p);
17693d0407baSopenharmony_ci    }
17703d0407baSopenharmony_ci}
17713d0407baSopenharmony_ci
17723d0407baSopenharmony_ci#ifdef CONFIG_SMP
17733d0407baSopenharmony_ci
17743d0407baSopenharmony_ci/* Only try algorithms three times */
17753d0407baSopenharmony_ci#define RT_MAX_TRIES 3
17763d0407baSopenharmony_ci
17773d0407baSopenharmony_cistatic int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
17783d0407baSopenharmony_ci{
17793d0407baSopenharmony_ci    if (!task_running(rq, p) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
17803d0407baSopenharmony_ci        return 1;
17813d0407baSopenharmony_ci    }
17823d0407baSopenharmony_ci
17833d0407baSopenharmony_ci    return 0;
17843d0407baSopenharmony_ci}
17853d0407baSopenharmony_ci
17863d0407baSopenharmony_ci/*
17873d0407baSopenharmony_ci * Return the highest pushable rq's task, which is suitable to be executed
17883d0407baSopenharmony_ci * on the CPU, NULL otherwise
17893d0407baSopenharmony_ci */
17903d0407baSopenharmony_cistatic struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
17913d0407baSopenharmony_ci{
17923d0407baSopenharmony_ci    struct plist_head *head = &rq->rt.pushable_tasks;
17933d0407baSopenharmony_ci    struct task_struct *p;
17943d0407baSopenharmony_ci
17953d0407baSopenharmony_ci    if (!has_pushable_tasks(rq)) {
17963d0407baSopenharmony_ci        return NULL;
17973d0407baSopenharmony_ci    }
17983d0407baSopenharmony_ci
17993d0407baSopenharmony_ci    plist_for_each_entry(p, head, pushable_tasks)
18003d0407baSopenharmony_ci    {
18013d0407baSopenharmony_ci        if (pick_rt_task(rq, p, cpu)) {
18023d0407baSopenharmony_ci            return p;
18033d0407baSopenharmony_ci        }
18043d0407baSopenharmony_ci    }
18053d0407baSopenharmony_ci
18063d0407baSopenharmony_ci    return NULL;
18073d0407baSopenharmony_ci}
18083d0407baSopenharmony_ci
18093d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS
18103d0407baSopenharmony_cistatic int find_cas_cpu(struct sched_domain *sd, struct task_struct *task, struct cpumask *lowest_mask)
18113d0407baSopenharmony_ci{
18123d0407baSopenharmony_ci    struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
18133d0407baSopenharmony_ci    struct sched_group *sg = NULL;
18143d0407baSopenharmony_ci    struct sched_group *sg_target = NULL;
18153d0407baSopenharmony_ci    struct sched_group *sg_backup = NULL;
18163d0407baSopenharmony_ci    struct cpumask search_cpu, backup_search_cpu;
18173d0407baSopenharmony_ci    int cpu = -1;
18183d0407baSopenharmony_ci    int target_cpu = -1;
18193d0407baSopenharmony_ci    unsigned long cpu_capacity;
18203d0407baSopenharmony_ci    unsigned long boosted_tutil = uclamp_task_util(task);
18213d0407baSopenharmony_ci    unsigned long target_capacity = ULONG_MAX;
18223d0407baSopenharmony_ci    unsigned long util;
18233d0407baSopenharmony_ci    unsigned long target_cpu_util = ULONG_MAX;
18243d0407baSopenharmony_ci    int prev_cpu = task_cpu(task);
18253d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RTG
18263d0407baSopenharmony_ci    struct cpumask *rtg_target = NULL;
18273d0407baSopenharmony_ci#endif
18283d0407baSopenharmony_ci    bool boosted = uclamp_boosted(task);
18293d0407baSopenharmony_ci
18303d0407baSopenharmony_ci    if (!sysctl_sched_enable_rt_cas) {
18313d0407baSopenharmony_ci        return -1;
18323d0407baSopenharmony_ci    }
18333d0407baSopenharmony_ci
18343d0407baSopenharmony_ci    rcu_read_lock();
18353d0407baSopenharmony_ci
18363d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RTG
18373d0407baSopenharmony_ci    rtg_target = find_rtg_target(task);
18383d0407baSopenharmony_ci#endif
18393d0407baSopenharmony_ci
18403d0407baSopenharmony_ci    sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, 0));
18413d0407baSopenharmony_ci    if (!sd) {
18423d0407baSopenharmony_ci        rcu_read_unlock();
18433d0407baSopenharmony_ci        return -1;
18443d0407baSopenharmony_ci    }
18453d0407baSopenharmony_ci
18463d0407baSopenharmony_ci    sg = sd->groups;
18473d0407baSopenharmony_ci    do {
18483d0407baSopenharmony_ci        if (!cpumask_intersects(lowest_mask, sched_group_span(sg))) {
18493d0407baSopenharmony_ci            continue;
18503d0407baSopenharmony_ci        }
18513d0407baSopenharmony_ci
18523d0407baSopenharmony_ci        if (boosted) {
18533d0407baSopenharmony_ci            if (cpumask_test_cpu(rd->max_cap_orig_cpu, sched_group_span(sg))) {
18543d0407baSopenharmony_ci                sg_target = sg;
18553d0407baSopenharmony_ci                break;
18563d0407baSopenharmony_ci            }
18573d0407baSopenharmony_ci        }
18583d0407baSopenharmony_ci
18593d0407baSopenharmony_ci        cpu = group_first_cpu(sg);
18603d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RTG
18613d0407baSopenharmony_ci        /* honor the rtg tasks */
18623d0407baSopenharmony_ci        if (rtg_target) {
18633d0407baSopenharmony_ci            if (cpumask_test_cpu(cpu, rtg_target)) {
18643d0407baSopenharmony_ci                sg_target = sg;
18653d0407baSopenharmony_ci                break;
18663d0407baSopenharmony_ci            }
18673d0407baSopenharmony_ci
18683d0407baSopenharmony_ci            /* active LB or big_task favor cpus with more capacity */
18693d0407baSopenharmony_ci            if (task->state == TASK_RUNNING || boosted) {
18703d0407baSopenharmony_ci                if (capacity_orig_of(cpu) > capacity_orig_of(cpumask_any(rtg_target))) {
18713d0407baSopenharmony_ci                    sg_target = sg;
18723d0407baSopenharmony_ci                    break;
18733d0407baSopenharmony_ci                }
18743d0407baSopenharmony_ci
18753d0407baSopenharmony_ci                sg_backup = sg;
18763d0407baSopenharmony_ci                continue;
18773d0407baSopenharmony_ci            }
18783d0407baSopenharmony_ci        }
18793d0407baSopenharmony_ci#endif
18803d0407baSopenharmony_ci        /*
18813d0407baSopenharmony_ci         * 1. add margin to support task migration
18823d0407baSopenharmony_ci         * 2. if task_util is high then all cpus, make sure the
18833d0407baSopenharmony_ci         * sg_backup with the most powerful cpus is selected
18843d0407baSopenharmony_ci         */
18853d0407baSopenharmony_ci        if (!rt_task_fits_capacity(task, cpu)) {
18863d0407baSopenharmony_ci            sg_backup = sg;
18873d0407baSopenharmony_ci            continue;
18883d0407baSopenharmony_ci        }
18893d0407baSopenharmony_ci
18903d0407baSopenharmony_ci        /* support task boost */
18913d0407baSopenharmony_ci        cpu_capacity = capacity_orig_of(cpu);
18923d0407baSopenharmony_ci        if (boosted_tutil > cpu_capacity) {
18933d0407baSopenharmony_ci            sg_backup = sg;
18943d0407baSopenharmony_ci            continue;
18953d0407baSopenharmony_ci        }
18963d0407baSopenharmony_ci
18973d0407baSopenharmony_ci        /* sg_target: select the sg with smaller capacity */
18983d0407baSopenharmony_ci        if (cpu_capacity < target_capacity) {
18993d0407baSopenharmony_ci            target_capacity = cpu_capacity;
19003d0407baSopenharmony_ci            sg_target = sg;
19013d0407baSopenharmony_ci        }
19023d0407baSopenharmony_ci    } while (sg = sg->next, sg != sd->groups);
19033d0407baSopenharmony_ci
19043d0407baSopenharmony_ci    if (!sg_target) {
19053d0407baSopenharmony_ci        sg_target = sg_backup;
19063d0407baSopenharmony_ci    }
19073d0407baSopenharmony_ci
19083d0407baSopenharmony_ci    if (sg_target) {
19093d0407baSopenharmony_ci        cpumask_and(&search_cpu, lowest_mask, sched_group_span(sg_target));
19103d0407baSopenharmony_ci        cpumask_copy(&backup_search_cpu, lowest_mask);
19113d0407baSopenharmony_ci        cpumask_andnot(&backup_search_cpu, &backup_search_cpu, &search_cpu);
19123d0407baSopenharmony_ci    } else {
19133d0407baSopenharmony_ci        cpumask_copy(&search_cpu, lowest_mask);
19143d0407baSopenharmony_ci        cpumask_clear(&backup_search_cpu);
19153d0407baSopenharmony_ci    }
19163d0407baSopenharmony_ci
19173d0407baSopenharmony_ciretry:
19183d0407baSopenharmony_ci    cpu = cpumask_first(&search_cpu);
19193d0407baSopenharmony_ci    do {
19203d0407baSopenharmony_ci        trace_sched_find_cas_cpu_each(task, cpu, target_cpu, cpu_isolated(cpu), idle_cpu(cpu), boosted_tutil,
19213d0407baSopenharmony_ci                                      cpu_util(cpu), capacity_orig_of(cpu));
19223d0407baSopenharmony_ci
19233d0407baSopenharmony_ci        if (cpu_isolated(cpu)) {
19243d0407baSopenharmony_ci            continue;
19253d0407baSopenharmony_ci        }
19263d0407baSopenharmony_ci
19273d0407baSopenharmony_ci        if (!cpumask_test_cpu(cpu, task->cpus_ptr)) {
19283d0407baSopenharmony_ci            continue;
19293d0407baSopenharmony_ci        }
19303d0407baSopenharmony_ci
19313d0407baSopenharmony_ci        /* find best cpu with smallest max_capacity */
19323d0407baSopenharmony_ci        if (target_cpu != -1 && capacity_orig_of(cpu) > capacity_orig_of(target_cpu)) {
19333d0407baSopenharmony_ci            continue;
19343d0407baSopenharmony_ci        }
19353d0407baSopenharmony_ci
19363d0407baSopenharmony_ci        util = cpu_util(cpu);
19373d0407baSopenharmony_ci        /* Find the least loaded CPU */
19383d0407baSopenharmony_ci        if (util > target_cpu_util) {
19393d0407baSopenharmony_ci            continue;
19403d0407baSopenharmony_ci        }
19413d0407baSopenharmony_ci
19423d0407baSopenharmony_ci        /*
19433d0407baSopenharmony_ci         * If the preivous CPU has same load, keep it as
19443d0407baSopenharmony_ci         * target_cpu
19453d0407baSopenharmony_ci         */
19463d0407baSopenharmony_ci        if (target_cpu_util == util && target_cpu == prev_cpu) {
19473d0407baSopenharmony_ci            continue;
19483d0407baSopenharmony_ci        }
19493d0407baSopenharmony_ci
19503d0407baSopenharmony_ci        /*
19513d0407baSopenharmony_ci         * If candidate CPU is the previous CPU, select it.
19523d0407baSopenharmony_ci         * If all above conditions are same, select the least
19533d0407baSopenharmony_ci         * cumulative window demand CPU.
19543d0407baSopenharmony_ci         */
19553d0407baSopenharmony_ci        target_cpu_util = util;
19563d0407baSopenharmony_ci        target_cpu = cpu;
19573d0407baSopenharmony_ci    } while ((cpu = cpumask_next(cpu, &search_cpu)) < nr_cpu_ids);
19583d0407baSopenharmony_ci
19593d0407baSopenharmony_ci    if (target_cpu != -1 && cpumask_test_cpu(target_cpu, lowest_mask)) {
19603d0407baSopenharmony_ci        goto done;
19613d0407baSopenharmony_ci    } else if (!cpumask_empty(&backup_search_cpu)) {
19623d0407baSopenharmony_ci        cpumask_copy(&search_cpu, &backup_search_cpu);
19633d0407baSopenharmony_ci        cpumask_clear(&backup_search_cpu);
19643d0407baSopenharmony_ci        goto retry;
19653d0407baSopenharmony_ci    }
19663d0407baSopenharmony_ci
19673d0407baSopenharmony_cidone:
19683d0407baSopenharmony_ci    trace_sched_find_cas_cpu(task, lowest_mask, boosted_tutil, prev_cpu, target_cpu);
19693d0407baSopenharmony_ci    rcu_read_unlock();
19703d0407baSopenharmony_ci    return target_cpu;
19713d0407baSopenharmony_ci}
19723d0407baSopenharmony_ci#endif
19733d0407baSopenharmony_ci
19743d0407baSopenharmony_cistatic DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
19753d0407baSopenharmony_ci
19763d0407baSopenharmony_cistatic int find_lowest_rq(struct task_struct *task)
19773d0407baSopenharmony_ci{
19783d0407baSopenharmony_ci    struct sched_domain *sd;
19793d0407baSopenharmony_ci    struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
19803d0407baSopenharmony_ci    int this_cpu = smp_processor_id();
19813d0407baSopenharmony_ci    int cpu = task_cpu(task);
19823d0407baSopenharmony_ci    int ret;
19833d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS
19843d0407baSopenharmony_ci    int cas_cpu;
19853d0407baSopenharmony_ci#endif
19863d0407baSopenharmony_ci
19873d0407baSopenharmony_ci    /* Make sure the mask is initialized first */
19883d0407baSopenharmony_ci    if (unlikely(!lowest_mask)) {
19893d0407baSopenharmony_ci        return -1;
19903d0407baSopenharmony_ci    }
19913d0407baSopenharmony_ci
19923d0407baSopenharmony_ci    if (task->nr_cpus_allowed == 1) {
19933d0407baSopenharmony_ci        return -1; /* No other targets possible */
19943d0407baSopenharmony_ci    }
19953d0407baSopenharmony_ci
19963d0407baSopenharmony_ci    /*
19973d0407baSopenharmony_ci     * If we're on asym system ensure we consider the different capacities
19983d0407baSopenharmony_ci     * of the CPUs when searching for the lowest_mask.
19993d0407baSopenharmony_ci     */
20003d0407baSopenharmony_ci    if (static_branch_unlikely(&sched_asym_cpucapacity)) {
20013d0407baSopenharmony_ci        ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, task, lowest_mask, rt_task_fits_capacity);
20023d0407baSopenharmony_ci    } else {
20033d0407baSopenharmony_ci        ret = cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask);
20043d0407baSopenharmony_ci    }
20053d0407baSopenharmony_ci
20063d0407baSopenharmony_ci    if (!ret) {
20073d0407baSopenharmony_ci        return -1; /* No targets found */
20083d0407baSopenharmony_ci    }
20093d0407baSopenharmony_ci
20103d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RT_CAS
20113d0407baSopenharmony_ci    cas_cpu = find_cas_cpu(sd, task, lowest_mask);
20123d0407baSopenharmony_ci    if (cas_cpu != -1) {
20133d0407baSopenharmony_ci        return cas_cpu;
20143d0407baSopenharmony_ci    }
20153d0407baSopenharmony_ci#endif
20163d0407baSopenharmony_ci
20173d0407baSopenharmony_ci    /*
20183d0407baSopenharmony_ci     * At this point we have built a mask of CPUs representing the
20193d0407baSopenharmony_ci     * lowest priority tasks in the system.  Now we want to elect
20203d0407baSopenharmony_ci     * the best one based on our affinity and topology.
20213d0407baSopenharmony_ci     *
20223d0407baSopenharmony_ci     * We prioritize the last CPU that the task executed on since
20233d0407baSopenharmony_ci     * it is most likely cache-hot in that location.
20243d0407baSopenharmony_ci     */
20253d0407baSopenharmony_ci    if (cpumask_test_cpu(cpu, lowest_mask)) {
20263d0407baSopenharmony_ci        return cpu;
20273d0407baSopenharmony_ci    }
20283d0407baSopenharmony_ci
20293d0407baSopenharmony_ci    /*
20303d0407baSopenharmony_ci     * Otherwise, we consult the sched_domains span maps to figure
20313d0407baSopenharmony_ci     * out which CPU is logically closest to our hot cache data.
20323d0407baSopenharmony_ci     */
20333d0407baSopenharmony_ci    if (!cpumask_test_cpu(this_cpu, lowest_mask)) {
20343d0407baSopenharmony_ci        this_cpu = -1; /* Skip this_cpu opt if not among lowest */
20353d0407baSopenharmony_ci    }
20363d0407baSopenharmony_ci
20373d0407baSopenharmony_ci    rcu_read_lock();
20383d0407baSopenharmony_ci    for_each_domain(cpu, sd)
20393d0407baSopenharmony_ci    {
20403d0407baSopenharmony_ci        if (sd->flags & SD_WAKE_AFFINE) {
20413d0407baSopenharmony_ci            int best_cpu;
20423d0407baSopenharmony_ci
20433d0407baSopenharmony_ci            /*
20443d0407baSopenharmony_ci             * "this_cpu" is cheaper to preempt than a
20453d0407baSopenharmony_ci             * remote processor.
20463d0407baSopenharmony_ci             */
20473d0407baSopenharmony_ci            if (this_cpu != -1 && cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
20483d0407baSopenharmony_ci                rcu_read_unlock();
20493d0407baSopenharmony_ci                return this_cpu;
20503d0407baSopenharmony_ci            }
20513d0407baSopenharmony_ci
20523d0407baSopenharmony_ci            best_cpu = cpumask_first_and(lowest_mask, sched_domain_span(sd));
20533d0407baSopenharmony_ci            if (best_cpu < nr_cpu_ids) {
20543d0407baSopenharmony_ci                rcu_read_unlock();
20553d0407baSopenharmony_ci                return best_cpu;
20563d0407baSopenharmony_ci            }
20573d0407baSopenharmony_ci        }
20583d0407baSopenharmony_ci    }
20593d0407baSopenharmony_ci    rcu_read_unlock();
20603d0407baSopenharmony_ci
20613d0407baSopenharmony_ci    /*
20623d0407baSopenharmony_ci     * And finally, if there were no matches within the domains
20633d0407baSopenharmony_ci     * just give the caller *something* to work with from the compatible
20643d0407baSopenharmony_ci     * locations.
20653d0407baSopenharmony_ci     */
20663d0407baSopenharmony_ci    if (this_cpu != -1) {
20673d0407baSopenharmony_ci        return this_cpu;
20683d0407baSopenharmony_ci    }
20693d0407baSopenharmony_ci
20703d0407baSopenharmony_ci    cpu = cpumask_any(lowest_mask);
20713d0407baSopenharmony_ci    if (cpu < nr_cpu_ids) {
20723d0407baSopenharmony_ci        return cpu;
20733d0407baSopenharmony_ci    }
20743d0407baSopenharmony_ci
20753d0407baSopenharmony_ci    return -1;
20763d0407baSopenharmony_ci}
20773d0407baSopenharmony_ci
20783d0407baSopenharmony_cistatic struct task_struct *pick_next_pushable_task(struct rq *rq)
20793d0407baSopenharmony_ci{
20803d0407baSopenharmony_ci    struct task_struct *p;
20813d0407baSopenharmony_ci
20823d0407baSopenharmony_ci    if (!has_pushable_tasks(rq)) {
20833d0407baSopenharmony_ci        return NULL;
20843d0407baSopenharmony_ci    }
20853d0407baSopenharmony_ci
20863d0407baSopenharmony_ci    p = plist_first_entry(&rq->rt.pushable_tasks, struct task_struct, pushable_tasks);
20873d0407baSopenharmony_ci
20883d0407baSopenharmony_ci    BUG_ON(rq->cpu != task_cpu(p));
20893d0407baSopenharmony_ci    BUG_ON(task_current(rq, p));
20903d0407baSopenharmony_ci    BUG_ON(p->nr_cpus_allowed <= 1);
20913d0407baSopenharmony_ci
20923d0407baSopenharmony_ci    BUG_ON(!task_on_rq_queued(p));
20933d0407baSopenharmony_ci    BUG_ON(!rt_task(p));
20943d0407baSopenharmony_ci
20953d0407baSopenharmony_ci    return p;
20963d0407baSopenharmony_ci}
20973d0407baSopenharmony_ci
20983d0407baSopenharmony_ci/* Will lock the rq it finds */
20993d0407baSopenharmony_cistatic struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
21003d0407baSopenharmony_ci{
21013d0407baSopenharmony_ci    struct rq *lowest_rq = NULL;
21023d0407baSopenharmony_ci    int tries;
21033d0407baSopenharmony_ci    int cpu;
21043d0407baSopenharmony_ci
21053d0407baSopenharmony_ci    for (tries = 0; tries < RT_MAX_TRIES; tries++) {
21063d0407baSopenharmony_ci        cpu = find_lowest_rq(task);
21073d0407baSopenharmony_ci        if ((cpu == -1) || (cpu == rq->cpu)) {
21083d0407baSopenharmony_ci            break;
21093d0407baSopenharmony_ci        }
21103d0407baSopenharmony_ci
21113d0407baSopenharmony_ci        lowest_rq = cpu_rq(cpu);
21123d0407baSopenharmony_ci        if (lowest_rq->rt.highest_prio.curr <= task->prio) {
21133d0407baSopenharmony_ci            /*
21143d0407baSopenharmony_ci             * Target rq has tasks of equal or higher priority,
21153d0407baSopenharmony_ci             * retrying does not release any lock and is unlikely
21163d0407baSopenharmony_ci             * to yield a different result.
21173d0407baSopenharmony_ci             */
21183d0407baSopenharmony_ci            lowest_rq = NULL;
21193d0407baSopenharmony_ci            break;
21203d0407baSopenharmony_ci        }
21213d0407baSopenharmony_ci
21223d0407baSopenharmony_ci        /* if the prio of this runqueue changed, try again */
21233d0407baSopenharmony_ci        if (double_lock_balance(rq, lowest_rq)) {
21243d0407baSopenharmony_ci            /*
21253d0407baSopenharmony_ci             * We had to unlock the run queue. In
21263d0407baSopenharmony_ci             * the mean time, task could have
21273d0407baSopenharmony_ci             * migrated already or had its affinity changed.
21283d0407baSopenharmony_ci             */
21293d0407baSopenharmony_ci            struct task_struct *next_task = pick_next_pushable_task(rq);
21303d0407baSopenharmony_ci            if (unlikely(next_task != task || !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr))) {
21313d0407baSopenharmony_ci                double_unlock_balance(rq, lowest_rq);
21323d0407baSopenharmony_ci                lowest_rq = NULL;
21333d0407baSopenharmony_ci                break;
21343d0407baSopenharmony_ci            }
21353d0407baSopenharmony_ci        }
21363d0407baSopenharmony_ci
21373d0407baSopenharmony_ci        /* If this rq is still suitable use it. */
21383d0407baSopenharmony_ci        if (lowest_rq->rt.highest_prio.curr > task->prio) {
21393d0407baSopenharmony_ci            break;
21403d0407baSopenharmony_ci        }
21413d0407baSopenharmony_ci
21423d0407baSopenharmony_ci        /* try again */
21433d0407baSopenharmony_ci        double_unlock_balance(rq, lowest_rq);
21443d0407baSopenharmony_ci        lowest_rq = NULL;
21453d0407baSopenharmony_ci    }
21463d0407baSopenharmony_ci
21473d0407baSopenharmony_ci    return lowest_rq;
21483d0407baSopenharmony_ci}
21493d0407baSopenharmony_ci
21503d0407baSopenharmony_ci/*
21513d0407baSopenharmony_ci * If the current CPU has more than one RT task, see if the non
21523d0407baSopenharmony_ci * running task can migrate over to a CPU that is running a task
21533d0407baSopenharmony_ci * of lesser priority.
21543d0407baSopenharmony_ci */
21553d0407baSopenharmony_cistatic int push_rt_task(struct rq *rq)
21563d0407baSopenharmony_ci{
21573d0407baSopenharmony_ci    struct task_struct *next_task;
21583d0407baSopenharmony_ci    struct rq *lowest_rq;
21593d0407baSopenharmony_ci    int ret = 0;
21603d0407baSopenharmony_ci
21613d0407baSopenharmony_ci    if (!rq->rt.overloaded) {
21623d0407baSopenharmony_ci        return 0;
21633d0407baSopenharmony_ci    }
21643d0407baSopenharmony_ci
21653d0407baSopenharmony_ci    next_task = pick_next_pushable_task(rq);
21663d0407baSopenharmony_ci    if (!next_task) {
21673d0407baSopenharmony_ci        return 0;
21683d0407baSopenharmony_ci    }
21693d0407baSopenharmony_ci
21703d0407baSopenharmony_ciretry:
21713d0407baSopenharmony_ci    if (WARN_ON(next_task == rq->curr)) {
21723d0407baSopenharmony_ci        return 0;
21733d0407baSopenharmony_ci    }
21743d0407baSopenharmony_ci
21753d0407baSopenharmony_ci    /*
21763d0407baSopenharmony_ci     * It's possible that the next_task slipped in of
21773d0407baSopenharmony_ci     * higher priority than current. If that's the case
21783d0407baSopenharmony_ci     * just reschedule current.
21793d0407baSopenharmony_ci     */
21803d0407baSopenharmony_ci    if (unlikely(next_task->prio < rq->curr->prio)) {
21813d0407baSopenharmony_ci        resched_curr(rq);
21823d0407baSopenharmony_ci        return 0;
21833d0407baSopenharmony_ci    }
21843d0407baSopenharmony_ci
21853d0407baSopenharmony_ci    /* We might release rq lock */
21863d0407baSopenharmony_ci    get_task_struct(next_task);
21873d0407baSopenharmony_ci
21883d0407baSopenharmony_ci    /* find_lock_lowest_rq locks the rq if found */
21893d0407baSopenharmony_ci    lowest_rq = find_lock_lowest_rq(next_task, rq);
21903d0407baSopenharmony_ci    if (!lowest_rq) {
21913d0407baSopenharmony_ci        struct task_struct *task;
21923d0407baSopenharmony_ci        /*
21933d0407baSopenharmony_ci         * find_lock_lowest_rq releases rq->lock
21943d0407baSopenharmony_ci         * so it is possible that next_task has migrated.
21953d0407baSopenharmony_ci         *
21963d0407baSopenharmony_ci         * We need to make sure that the task is still on the same
21973d0407baSopenharmony_ci         * run-queue and is also still the next task eligible for
21983d0407baSopenharmony_ci         * pushing.
21993d0407baSopenharmony_ci         */
22003d0407baSopenharmony_ci        task = pick_next_pushable_task(rq);
22013d0407baSopenharmony_ci        if (task == next_task) {
22023d0407baSopenharmony_ci            /*
22033d0407baSopenharmony_ci             * The task hasn't migrated, and is still the next
22043d0407baSopenharmony_ci             * eligible task, but we failed to find a run-queue
22053d0407baSopenharmony_ci             * to push it to.  Do not retry in this case, since
22063d0407baSopenharmony_ci             * other CPUs will pull from us when ready.
22073d0407baSopenharmony_ci             */
22083d0407baSopenharmony_ci            goto out;
22093d0407baSopenharmony_ci        }
22103d0407baSopenharmony_ci
22113d0407baSopenharmony_ci        if (!task) {
22123d0407baSopenharmony_ci            /* No more tasks, just exit */
22133d0407baSopenharmony_ci            goto out;
22143d0407baSopenharmony_ci        }
22153d0407baSopenharmony_ci
22163d0407baSopenharmony_ci        /*
22173d0407baSopenharmony_ci         * Something has shifted, try again.
22183d0407baSopenharmony_ci         */
22193d0407baSopenharmony_ci        put_task_struct(next_task);
22203d0407baSopenharmony_ci        next_task = task;
22213d0407baSopenharmony_ci        goto retry;
22223d0407baSopenharmony_ci    }
22233d0407baSopenharmony_ci
22243d0407baSopenharmony_ci    deactivate_task(rq, next_task, 0);
22253d0407baSopenharmony_ci    set_task_cpu(next_task, lowest_rq->cpu);
22263d0407baSopenharmony_ci    activate_task(lowest_rq, next_task, 0);
22273d0407baSopenharmony_ci    ret = 1;
22283d0407baSopenharmony_ci
22293d0407baSopenharmony_ci    resched_curr(lowest_rq);
22303d0407baSopenharmony_ci
22313d0407baSopenharmony_ci    double_unlock_balance(rq, lowest_rq);
22323d0407baSopenharmony_ci
22333d0407baSopenharmony_ciout:
22343d0407baSopenharmony_ci    put_task_struct(next_task);
22353d0407baSopenharmony_ci
22363d0407baSopenharmony_ci    return ret;
22373d0407baSopenharmony_ci}
22383d0407baSopenharmony_ci
22393d0407baSopenharmony_cistatic void push_rt_tasks(struct rq *rq)
22403d0407baSopenharmony_ci{
22413d0407baSopenharmony_ci    /* push_rt_task will return true if it moved an RT */
22423d0407baSopenharmony_ci    while (push_rt_task(rq)) {
22433d0407baSopenharmony_ci        ;
22443d0407baSopenharmony_ci    }
22453d0407baSopenharmony_ci}
22463d0407baSopenharmony_ci
22473d0407baSopenharmony_ci#ifdef HAVE_RT_PUSH_IPI
22483d0407baSopenharmony_ci
22493d0407baSopenharmony_ci/*
22503d0407baSopenharmony_ci * When a high priority task schedules out from a CPU and a lower priority
22513d0407baSopenharmony_ci * task is scheduled in, a check is made to see if there's any RT tasks
22523d0407baSopenharmony_ci * on other CPUs that are waiting to run because a higher priority RT task
22533d0407baSopenharmony_ci * is currently running on its CPU. In this case, the CPU with multiple RT
22543d0407baSopenharmony_ci * tasks queued on it (overloaded) needs to be notified that a CPU has opened
22553d0407baSopenharmony_ci * up that may be able to run one of its non-running queued RT tasks.
22563d0407baSopenharmony_ci *
22573d0407baSopenharmony_ci * All CPUs with overloaded RT tasks need to be notified as there is currently
22583d0407baSopenharmony_ci * no way to know which of these CPUs have the highest priority task waiting
22593d0407baSopenharmony_ci * to run. Instead of trying to take a spinlock on each of these CPUs,
22603d0407baSopenharmony_ci * which has shown to cause large latency when done on machines with many
22613d0407baSopenharmony_ci * CPUs, sending an IPI to the CPUs to have them push off the overloaded
22623d0407baSopenharmony_ci * RT tasks waiting to run.
22633d0407baSopenharmony_ci *
22643d0407baSopenharmony_ci * Just sending an IPI to each of the CPUs is also an issue, as on large
22653d0407baSopenharmony_ci * count CPU machines, this can cause an IPI storm on a CPU, especially
22663d0407baSopenharmony_ci * if its the only CPU with multiple RT tasks queued, and a large number
22673d0407baSopenharmony_ci * of CPUs scheduling a lower priority task at the same time.
22683d0407baSopenharmony_ci *
22693d0407baSopenharmony_ci * Each root domain has its own irq work function that can iterate over
22703d0407baSopenharmony_ci * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
22713d0407baSopenharmony_ci * tassk must be checked if there's one or many CPUs that are lowering
22723d0407baSopenharmony_ci * their priority, there's a single irq work iterator that will try to
22733d0407baSopenharmony_ci * push off RT tasks that are waiting to run.
22743d0407baSopenharmony_ci *
22753d0407baSopenharmony_ci * When a CPU schedules a lower priority task, it will kick off the
22763d0407baSopenharmony_ci * irq work iterator that will jump to each CPU with overloaded RT tasks.
22773d0407baSopenharmony_ci * As it only takes the first CPU that schedules a lower priority task
22783d0407baSopenharmony_ci * to start the process, the rto_start variable is incremented and if
22793d0407baSopenharmony_ci * the atomic result is one, then that CPU will try to take the rto_lock.
22803d0407baSopenharmony_ci * This prevents high contention on the lock as the process handles all
22813d0407baSopenharmony_ci * CPUs scheduling lower priority tasks.
22823d0407baSopenharmony_ci *
22833d0407baSopenharmony_ci * All CPUs that are scheduling a lower priority task will increment the
22843d0407baSopenharmony_ci * rt_loop_next variable. This will make sure that the irq work iterator
22853d0407baSopenharmony_ci * checks all RT overloaded CPUs whenever a CPU schedules a new lower
22863d0407baSopenharmony_ci * priority task, even if the iterator is in the middle of a scan. Incrementing
22873d0407baSopenharmony_ci * the rt_loop_next will cause the iterator to perform another scan.
22883d0407baSopenharmony_ci *
22893d0407baSopenharmony_ci */
22903d0407baSopenharmony_cistatic int rto_next_cpu(struct root_domain *rd)
22913d0407baSopenharmony_ci{
22923d0407baSopenharmony_ci    int next;
22933d0407baSopenharmony_ci    int cpu;
22943d0407baSopenharmony_ci
22953d0407baSopenharmony_ci    /*
22963d0407baSopenharmony_ci     * When starting the IPI RT pushing, the rto_cpu is set to -1,
22973d0407baSopenharmony_ci     * rt_next_cpu() will simply return the first CPU found in
22983d0407baSopenharmony_ci     * the rto_mask.
22993d0407baSopenharmony_ci     *
23003d0407baSopenharmony_ci     * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
23013d0407baSopenharmony_ci     * will return the next CPU found in the rto_mask.
23023d0407baSopenharmony_ci     *
23033d0407baSopenharmony_ci     * If there are no more CPUs left in the rto_mask, then a check is made
23043d0407baSopenharmony_ci     * against rto_loop and rto_loop_next. rto_loop is only updated with
23053d0407baSopenharmony_ci     * the rto_lock held, but any CPU may increment the rto_loop_next
23063d0407baSopenharmony_ci     * without any locking.
23073d0407baSopenharmony_ci     */
23083d0407baSopenharmony_ci    for (;;) {
23093d0407baSopenharmony_ci        /* When rto_cpu is -1 this acts like cpumask_first() */
23103d0407baSopenharmony_ci        cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
23113d0407baSopenharmony_ci
23123d0407baSopenharmony_ci        rd->rto_cpu = cpu;
23133d0407baSopenharmony_ci
23143d0407baSopenharmony_ci        if (cpu < nr_cpu_ids) {
23153d0407baSopenharmony_ci            return cpu;
23163d0407baSopenharmony_ci        }
23173d0407baSopenharmony_ci
23183d0407baSopenharmony_ci        rd->rto_cpu = -1;
23193d0407baSopenharmony_ci
23203d0407baSopenharmony_ci        /*
23213d0407baSopenharmony_ci         * ACQUIRE ensures we see the @rto_mask changes
23223d0407baSopenharmony_ci         * made prior to the @next value observed.
23233d0407baSopenharmony_ci         *
23243d0407baSopenharmony_ci         * Matches WMB in rt_set_overload().
23253d0407baSopenharmony_ci         */
23263d0407baSopenharmony_ci        next = atomic_read_acquire(&rd->rto_loop_next);
23273d0407baSopenharmony_ci        if (rd->rto_loop == next) {
23283d0407baSopenharmony_ci            break;
23293d0407baSopenharmony_ci        }
23303d0407baSopenharmony_ci
23313d0407baSopenharmony_ci        rd->rto_loop = next;
23323d0407baSopenharmony_ci    }
23333d0407baSopenharmony_ci
23343d0407baSopenharmony_ci    return -1;
23353d0407baSopenharmony_ci}
23363d0407baSopenharmony_ci
23373d0407baSopenharmony_cistatic inline bool rto_start_trylock(atomic_t *v)
23383d0407baSopenharmony_ci{
23393d0407baSopenharmony_ci    return !atomic_cmpxchg_acquire(v, 0, 1);
23403d0407baSopenharmony_ci}
23413d0407baSopenharmony_ci
23423d0407baSopenharmony_cistatic inline void rto_start_unlock(atomic_t *v)
23433d0407baSopenharmony_ci{
23443d0407baSopenharmony_ci    atomic_set_release(v, 0);
23453d0407baSopenharmony_ci}
23463d0407baSopenharmony_ci
23473d0407baSopenharmony_cistatic void tell_cpu_to_push(struct rq *rq)
23483d0407baSopenharmony_ci{
23493d0407baSopenharmony_ci    int cpu = -1;
23503d0407baSopenharmony_ci
23513d0407baSopenharmony_ci    /* Keep the loop going if the IPI is currently active */
23523d0407baSopenharmony_ci    atomic_inc(&rq->rd->rto_loop_next);
23533d0407baSopenharmony_ci
23543d0407baSopenharmony_ci    /* Only one CPU can initiate a loop at a time */
23553d0407baSopenharmony_ci    if (!rto_start_trylock(&rq->rd->rto_loop_start)) {
23563d0407baSopenharmony_ci        return;
23573d0407baSopenharmony_ci    }
23583d0407baSopenharmony_ci
23593d0407baSopenharmony_ci    raw_spin_lock(&rq->rd->rto_lock);
23603d0407baSopenharmony_ci
23613d0407baSopenharmony_ci    /*
23623d0407baSopenharmony_ci     * The rto_cpu is updated under the lock, if it has a valid CPU
23633d0407baSopenharmony_ci     * then the IPI is still running and will continue due to the
23643d0407baSopenharmony_ci     * update to loop_next, and nothing needs to be done here.
23653d0407baSopenharmony_ci     * Otherwise it is finishing up and an ipi needs to be sent.
23663d0407baSopenharmony_ci     */
23673d0407baSopenharmony_ci    if (rq->rd->rto_cpu < 0) {
23683d0407baSopenharmony_ci        cpu = rto_next_cpu(rq->rd);
23693d0407baSopenharmony_ci    }
23703d0407baSopenharmony_ci
23713d0407baSopenharmony_ci    raw_spin_unlock(&rq->rd->rto_lock);
23723d0407baSopenharmony_ci
23733d0407baSopenharmony_ci    rto_start_unlock(&rq->rd->rto_loop_start);
23743d0407baSopenharmony_ci
23753d0407baSopenharmony_ci    if (cpu >= 0) {
23763d0407baSopenharmony_ci        /* Make sure the rd does not get freed while pushing */
23773d0407baSopenharmony_ci        sched_get_rd(rq->rd);
23783d0407baSopenharmony_ci        irq_work_queue_on(&rq->rd->rto_push_work, cpu);
23793d0407baSopenharmony_ci    }
23803d0407baSopenharmony_ci}
23813d0407baSopenharmony_ci
23823d0407baSopenharmony_ci/* Called from hardirq context */
23833d0407baSopenharmony_civoid rto_push_irq_work_func(struct irq_work *work)
23843d0407baSopenharmony_ci{
23853d0407baSopenharmony_ci    struct root_domain *rd = container_of(work, struct root_domain, rto_push_work);
23863d0407baSopenharmony_ci    struct rq *rq;
23873d0407baSopenharmony_ci    int cpu;
23883d0407baSopenharmony_ci
23893d0407baSopenharmony_ci    rq = this_rq();
23903d0407baSopenharmony_ci    /*
23913d0407baSopenharmony_ci     * We do not need to grab the lock to check for has_pushable_tasks.
23923d0407baSopenharmony_ci     * When it gets updated, a check is made if a push is possible.
23933d0407baSopenharmony_ci     */
23943d0407baSopenharmony_ci    if (has_pushable_tasks(rq)) {
23953d0407baSopenharmony_ci        raw_spin_lock(&rq->lock);
23963d0407baSopenharmony_ci        push_rt_tasks(rq);
23973d0407baSopenharmony_ci        raw_spin_unlock(&rq->lock);
23983d0407baSopenharmony_ci    }
23993d0407baSopenharmony_ci
24003d0407baSopenharmony_ci    raw_spin_lock(&rd->rto_lock);
24013d0407baSopenharmony_ci
24023d0407baSopenharmony_ci    /* Pass the IPI to the next rt overloaded queue */
24033d0407baSopenharmony_ci    cpu = rto_next_cpu(rd);
24043d0407baSopenharmony_ci
24053d0407baSopenharmony_ci    raw_spin_unlock(&rd->rto_lock);
24063d0407baSopenharmony_ci
24073d0407baSopenharmony_ci    if (cpu < 0) {
24083d0407baSopenharmony_ci        sched_put_rd(rd);
24093d0407baSopenharmony_ci        return;
24103d0407baSopenharmony_ci    }
24113d0407baSopenharmony_ci
24123d0407baSopenharmony_ci    /* Try the next RT overloaded CPU */
24133d0407baSopenharmony_ci    irq_work_queue_on(&rd->rto_push_work, cpu);
24143d0407baSopenharmony_ci}
24153d0407baSopenharmony_ci#endif /* HAVE_RT_PUSH_IPI */
24163d0407baSopenharmony_ci
24173d0407baSopenharmony_cistatic void pull_rt_task(struct rq *this_rq)
24183d0407baSopenharmony_ci{
24193d0407baSopenharmony_ci    int this_cpu = this_rq->cpu, cpu;
24203d0407baSopenharmony_ci    bool resched = false;
24213d0407baSopenharmony_ci    struct task_struct *p;
24223d0407baSopenharmony_ci    struct rq *src_rq;
24233d0407baSopenharmony_ci    int rt_overload_count = rt_overloaded(this_rq);
24243d0407baSopenharmony_ci    if (likely(!rt_overload_count)) {
24253d0407baSopenharmony_ci        return;
24263d0407baSopenharmony_ci    }
24273d0407baSopenharmony_ci
24283d0407baSopenharmony_ci    /*
24293d0407baSopenharmony_ci     * Match the barrier from rt_set_overloaded; this guarantees that if we
24303d0407baSopenharmony_ci     * see overloaded we must also see the rto_mask bit.
24313d0407baSopenharmony_ci     */
24323d0407baSopenharmony_ci    smp_rmb();
24333d0407baSopenharmony_ci
24343d0407baSopenharmony_ci    /* If we are the only overloaded CPU do nothing */
24353d0407baSopenharmony_ci    if (rt_overload_count == 1 && cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) {
24363d0407baSopenharmony_ci        return;
24373d0407baSopenharmony_ci    }
24383d0407baSopenharmony_ci
24393d0407baSopenharmony_ci#ifdef HAVE_RT_PUSH_IPI
24403d0407baSopenharmony_ci    if (sched_feat(RT_PUSH_IPI)) {
24413d0407baSopenharmony_ci        tell_cpu_to_push(this_rq);
24423d0407baSopenharmony_ci        return;
24433d0407baSopenharmony_ci    }
24443d0407baSopenharmony_ci#endif
24453d0407baSopenharmony_ci
24463d0407baSopenharmony_ci    for_each_cpu(cpu, this_rq->rd->rto_mask)
24473d0407baSopenharmony_ci    {
24483d0407baSopenharmony_ci        if (this_cpu == cpu) {
24493d0407baSopenharmony_ci            continue;
24503d0407baSopenharmony_ci        }
24513d0407baSopenharmony_ci
24523d0407baSopenharmony_ci        src_rq = cpu_rq(cpu);
24533d0407baSopenharmony_ci        /*
24543d0407baSopenharmony_ci         * Don't bother taking the src_rq->lock if the next highest
24553d0407baSopenharmony_ci         * task is known to be lower-priority than our current task.
24563d0407baSopenharmony_ci         * This may look racy, but if this value is about to go
24573d0407baSopenharmony_ci         * logically higher, the src_rq will push this task away.
24583d0407baSopenharmony_ci         * And if its going logically lower, we do not care
24593d0407baSopenharmony_ci         */
24603d0407baSopenharmony_ci        if (src_rq->rt.highest_prio.next >= this_rq->rt.highest_prio.curr) {
24613d0407baSopenharmony_ci            continue;
24623d0407baSopenharmony_ci        }
24633d0407baSopenharmony_ci
24643d0407baSopenharmony_ci        /*
24653d0407baSopenharmony_ci         * We can potentially drop this_rq's lock in
24663d0407baSopenharmony_ci         * double_lock_balance, and another CPU could
24673d0407baSopenharmony_ci         * alter this_rq
24683d0407baSopenharmony_ci         */
24693d0407baSopenharmony_ci        double_lock_balance(this_rq, src_rq);
24703d0407baSopenharmony_ci
24713d0407baSopenharmony_ci        /*
24723d0407baSopenharmony_ci         * We can pull only a task, which is pushable
24733d0407baSopenharmony_ci         * on its rq, and no others.
24743d0407baSopenharmony_ci         */
24753d0407baSopenharmony_ci        p = pick_highest_pushable_task(src_rq, this_cpu);
24763d0407baSopenharmony_ci        /*
24773d0407baSopenharmony_ci         * Do we have an RT task that preempts
24783d0407baSopenharmony_ci         * the to-be-scheduled task?
24793d0407baSopenharmony_ci         */
24803d0407baSopenharmony_ci        if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
24813d0407baSopenharmony_ci            WARN_ON(p == src_rq->curr);
24823d0407baSopenharmony_ci            WARN_ON(!task_on_rq_queued(p));
24833d0407baSopenharmony_ci
24843d0407baSopenharmony_ci            /*
24853d0407baSopenharmony_ci             * There's a chance that p is higher in priority
24863d0407baSopenharmony_ci             * than what's currently running on its CPU.
24873d0407baSopenharmony_ci             * This is just that p is wakeing up and hasn't
24883d0407baSopenharmony_ci             * had a chance to schedule. We only pull
24893d0407baSopenharmony_ci             * p if it is lower in priority than the
24903d0407baSopenharmony_ci             * current task on the run queue
24913d0407baSopenharmony_ci             */
24923d0407baSopenharmony_ci            if (p->prio < src_rq->curr->prio) {
24933d0407baSopenharmony_ci                goto skip;
24943d0407baSopenharmony_ci            }
24953d0407baSopenharmony_ci
24963d0407baSopenharmony_ci            resched = true;
24973d0407baSopenharmony_ci
24983d0407baSopenharmony_ci            deactivate_task(src_rq, p, 0);
24993d0407baSopenharmony_ci            set_task_cpu(p, this_cpu);
25003d0407baSopenharmony_ci            activate_task(this_rq, p, 0);
25013d0407baSopenharmony_ci            /*
25023d0407baSopenharmony_ci             * We continue with the search, just in
25033d0407baSopenharmony_ci             * case there's an even higher prio task
25043d0407baSopenharmony_ci             * in another runqueue. (low likelihood
25053d0407baSopenharmony_ci             * but possible)
25063d0407baSopenharmony_ci             */
25073d0407baSopenharmony_ci        }
25083d0407baSopenharmony_ci    skip:
25093d0407baSopenharmony_ci        double_unlock_balance(this_rq, src_rq);
25103d0407baSopenharmony_ci    }
25113d0407baSopenharmony_ci
25123d0407baSopenharmony_ci    if (resched) {
25133d0407baSopenharmony_ci        resched_curr(this_rq);
25143d0407baSopenharmony_ci    }
25153d0407baSopenharmony_ci}
25163d0407baSopenharmony_ci
25173d0407baSopenharmony_ci/*
25183d0407baSopenharmony_ci * If we are not running and we are not going to reschedule soon, we should
25193d0407baSopenharmony_ci * try to push tasks away now
25203d0407baSopenharmony_ci */
25213d0407baSopenharmony_cistatic void task_woken_rt(struct rq *rq, struct task_struct *p)
25223d0407baSopenharmony_ci{
25233d0407baSopenharmony_ci    bool need_to_push = !task_running(rq, p) && !test_tsk_need_resched(rq->curr) && p->nr_cpus_allowed > 1 &&
25243d0407baSopenharmony_ci                        (dl_task(rq->curr) || rt_task(rq->curr)) &&
25253d0407baSopenharmony_ci                        (rq->curr->nr_cpus_allowed < 2 || rq->curr->prio <= p->prio);
25263d0407baSopenharmony_ci    if (need_to_push) {
25273d0407baSopenharmony_ci        push_rt_tasks(rq);
25283d0407baSopenharmony_ci    }
25293d0407baSopenharmony_ci}
25303d0407baSopenharmony_ci
25313d0407baSopenharmony_ci/* Assumes rq->lock is held */
25323d0407baSopenharmony_cistatic void rq_online_rt(struct rq *rq)
25333d0407baSopenharmony_ci{
25343d0407baSopenharmony_ci    if (rq->rt.overloaded) {
25353d0407baSopenharmony_ci        rt_set_overload(rq);
25363d0407baSopenharmony_ci    }
25373d0407baSopenharmony_ci
25383d0407baSopenharmony_ci    __enable_runtime(rq);
25393d0407baSopenharmony_ci
25403d0407baSopenharmony_ci    cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
25413d0407baSopenharmony_ci}
25423d0407baSopenharmony_ci
25433d0407baSopenharmony_ci/* Assumes rq->lock is held */
25443d0407baSopenharmony_cistatic void rq_offline_rt(struct rq *rq)
25453d0407baSopenharmony_ci{
25463d0407baSopenharmony_ci    if (rq->rt.overloaded) {
25473d0407baSopenharmony_ci        rt_clear_overload(rq);
25483d0407baSopenharmony_ci    }
25493d0407baSopenharmony_ci
25503d0407baSopenharmony_ci    __disable_runtime(rq);
25513d0407baSopenharmony_ci
25523d0407baSopenharmony_ci    cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
25533d0407baSopenharmony_ci}
25543d0407baSopenharmony_ci
25553d0407baSopenharmony_ci/*
25563d0407baSopenharmony_ci * When switch from the rt queue, we bring ourselves to a position
25573d0407baSopenharmony_ci * that we might want to pull RT tasks from other runqueues.
25583d0407baSopenharmony_ci */
25593d0407baSopenharmony_cistatic void switched_from_rt(struct rq *rq, struct task_struct *p)
25603d0407baSopenharmony_ci{
25613d0407baSopenharmony_ci    /*
25623d0407baSopenharmony_ci     * If there are other RT tasks then we will reschedule
25633d0407baSopenharmony_ci     * and the scheduling of the other RT tasks will handle
25643d0407baSopenharmony_ci     * the balancing. But if we are the last RT task
25653d0407baSopenharmony_ci     * we may need to handle the pulling of RT tasks
25663d0407baSopenharmony_ci     * now.
25673d0407baSopenharmony_ci     */
25683d0407baSopenharmony_ci    if (!task_on_rq_queued(p) || rq->rt.rt_nr_running || cpu_isolated(cpu_of(rq))) {
25693d0407baSopenharmony_ci        return;
25703d0407baSopenharmony_ci    }
25713d0407baSopenharmony_ci
25723d0407baSopenharmony_ci    rt_queue_pull_task(rq);
25733d0407baSopenharmony_ci}
25743d0407baSopenharmony_ci
25753d0407baSopenharmony_civoid __init init_sched_rt_class(void)
25763d0407baSopenharmony_ci{
25773d0407baSopenharmony_ci    unsigned int i;
25783d0407baSopenharmony_ci
25793d0407baSopenharmony_ci    for_each_possible_cpu(i)
25803d0407baSopenharmony_ci    {
25813d0407baSopenharmony_ci        zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), GFP_KERNEL, cpu_to_node(i));
25823d0407baSopenharmony_ci    }
25833d0407baSopenharmony_ci}
25843d0407baSopenharmony_ci#endif /* CONFIG_SMP */
25853d0407baSopenharmony_ci
25863d0407baSopenharmony_ci/*
25873d0407baSopenharmony_ci * When switching a task to RT, we may overload the runqueue
25883d0407baSopenharmony_ci * with RT tasks. In this case we try to push them off to
25893d0407baSopenharmony_ci * other runqueues.
25903d0407baSopenharmony_ci */
25913d0407baSopenharmony_cistatic void switched_to_rt(struct rq *rq, struct task_struct *p)
25923d0407baSopenharmony_ci{
25933d0407baSopenharmony_ci    /*
25943d0407baSopenharmony_ci     * If we are running, update the avg_rt tracking, as the running time
25953d0407baSopenharmony_ci     * will now on be accounted into the latter.
25963d0407baSopenharmony_ci     */
25973d0407baSopenharmony_ci    if (task_current(rq, p)) {
25983d0407baSopenharmony_ci        update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
25993d0407baSopenharmony_ci        return;
26003d0407baSopenharmony_ci    }
26013d0407baSopenharmony_ci
26023d0407baSopenharmony_ci    /*
26033d0407baSopenharmony_ci     * If we are not running we may need to preempt the current
26043d0407baSopenharmony_ci     * running task. If that current running task is also an RT task
26053d0407baSopenharmony_ci     * then see if we can move to another run queue.
26063d0407baSopenharmony_ci     */
26073d0407baSopenharmony_ci    if (task_on_rq_queued(p)) {
26083d0407baSopenharmony_ci#ifdef CONFIG_SMP
26093d0407baSopenharmony_ci        if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) {
26103d0407baSopenharmony_ci            rt_queue_push_tasks(rq);
26113d0407baSopenharmony_ci        }
26123d0407baSopenharmony_ci#endif /* CONFIG_SMP */
26133d0407baSopenharmony_ci        if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) {
26143d0407baSopenharmony_ci            resched_curr(rq);
26153d0407baSopenharmony_ci        }
26163d0407baSopenharmony_ci    }
26173d0407baSopenharmony_ci}
26183d0407baSopenharmony_ci
26193d0407baSopenharmony_ci/*
26203d0407baSopenharmony_ci * Priority of the task has changed. This may cause
26213d0407baSopenharmony_ci * us to initiate a push or pull.
26223d0407baSopenharmony_ci */
26233d0407baSopenharmony_cistatic void prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
26243d0407baSopenharmony_ci{
26253d0407baSopenharmony_ci    if (!task_on_rq_queued(p)) {
26263d0407baSopenharmony_ci        return;
26273d0407baSopenharmony_ci    }
26283d0407baSopenharmony_ci
26293d0407baSopenharmony_ci    if (rq->curr == p) {
26303d0407baSopenharmony_ci#ifdef CONFIG_SMP
26313d0407baSopenharmony_ci        /*
26323d0407baSopenharmony_ci         * If our priority decreases while running, we
26333d0407baSopenharmony_ci         * may need to pull tasks to this runqueue.
26343d0407baSopenharmony_ci         */
26353d0407baSopenharmony_ci        if (oldprio < p->prio) {
26363d0407baSopenharmony_ci            rt_queue_pull_task(rq);
26373d0407baSopenharmony_ci        }
26383d0407baSopenharmony_ci
26393d0407baSopenharmony_ci        /*
26403d0407baSopenharmony_ci         * If there's a higher priority task waiting to run
26413d0407baSopenharmony_ci         * then reschedule.
26423d0407baSopenharmony_ci         */
26433d0407baSopenharmony_ci        if (p->prio > rq->rt.highest_prio.curr) {
26443d0407baSopenharmony_ci            resched_curr(rq);
26453d0407baSopenharmony_ci        }
26463d0407baSopenharmony_ci#else
26473d0407baSopenharmony_ci        /* For UP simply resched on drop of prio */
26483d0407baSopenharmony_ci        if (oldprio < p->prio) {
26493d0407baSopenharmony_ci            resched_curr(rq);
26503d0407baSopenharmony_ci        }
26513d0407baSopenharmony_ci#endif /* CONFIG_SMP */
26523d0407baSopenharmony_ci    } else {
26533d0407baSopenharmony_ci        /*
26543d0407baSopenharmony_ci         * This task is not running, but if it is
26553d0407baSopenharmony_ci         * greater than the current running task
26563d0407baSopenharmony_ci         * then reschedule.
26573d0407baSopenharmony_ci         */
26583d0407baSopenharmony_ci        if (p->prio < rq->curr->prio) {
26593d0407baSopenharmony_ci            resched_curr(rq);
26603d0407baSopenharmony_ci        }
26613d0407baSopenharmony_ci    }
26623d0407baSopenharmony_ci}
26633d0407baSopenharmony_ci
26643d0407baSopenharmony_ci#ifdef CONFIG_POSIX_TIMERS
26653d0407baSopenharmony_cistatic void watchdog(struct rq *rq, struct task_struct *p)
26663d0407baSopenharmony_ci{
26673d0407baSopenharmony_ci    unsigned long soft, hard;
26683d0407baSopenharmony_ci
26693d0407baSopenharmony_ci    /* max may change after cur was read, this will be fixed next tick */
26703d0407baSopenharmony_ci    soft = task_rlimit(p, RLIMIT_RTTIME);
26713d0407baSopenharmony_ci    hard = task_rlimit_max(p, RLIMIT_RTTIME);
26723d0407baSopenharmony_ci
26733d0407baSopenharmony_ci    if (soft != RLIM_INFINITY) {
26743d0407baSopenharmony_ci        unsigned long next;
26753d0407baSopenharmony_ci
26763d0407baSopenharmony_ci        if (p->rt.watchdog_stamp != jiffies) {
26773d0407baSopenharmony_ci            p->rt.timeout++;
26783d0407baSopenharmony_ci            p->rt.watchdog_stamp = jiffies;
26793d0407baSopenharmony_ci        }
26803d0407baSopenharmony_ci
26813d0407baSopenharmony_ci        next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC / HZ);
26823d0407baSopenharmony_ci        if (p->rt.timeout > next) {
26833d0407baSopenharmony_ci            posix_cputimers_rt_watchdog(&p->posix_cputimers, p->se.sum_exec_runtime);
26843d0407baSopenharmony_ci        }
26853d0407baSopenharmony_ci    }
26863d0407baSopenharmony_ci}
26873d0407baSopenharmony_ci#else
26883d0407baSopenharmony_cistatic inline void watchdog(struct rq *rq, struct task_struct *p)
26893d0407baSopenharmony_ci{
26903d0407baSopenharmony_ci}
26913d0407baSopenharmony_ci#endif
26923d0407baSopenharmony_ci
26933d0407baSopenharmony_ci/*
26943d0407baSopenharmony_ci * scheduler tick hitting a task of our scheduling class.
26953d0407baSopenharmony_ci *
26963d0407baSopenharmony_ci * NOTE: This function can be called remotely by the tick offload that
26973d0407baSopenharmony_ci * goes along full dynticks. Therefore no local assumption can be made
26983d0407baSopenharmony_ci * and everything must be accessed through the @rq and @curr passed in
26993d0407baSopenharmony_ci * parameters.
27003d0407baSopenharmony_ci */
27013d0407baSopenharmony_cistatic void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
27023d0407baSopenharmony_ci{
27033d0407baSopenharmony_ci    struct sched_rt_entity *rt_se = &p->rt;
27043d0407baSopenharmony_ci
27053d0407baSopenharmony_ci    update_curr_rt(rq);
27063d0407baSopenharmony_ci    update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
27073d0407baSopenharmony_ci
27083d0407baSopenharmony_ci    watchdog(rq, p);
27093d0407baSopenharmony_ci
27103d0407baSopenharmony_ci    /*
27113d0407baSopenharmony_ci     * RR tasks need a special form of timeslice management.
27123d0407baSopenharmony_ci     * FIFO tasks have no timeslices.
27133d0407baSopenharmony_ci     */
27143d0407baSopenharmony_ci    if (p->policy != SCHED_RR) {
27153d0407baSopenharmony_ci        return;
27163d0407baSopenharmony_ci    }
27173d0407baSopenharmony_ci
27183d0407baSopenharmony_ci    if (--p->rt.time_slice) {
27193d0407baSopenharmony_ci        return;
27203d0407baSopenharmony_ci    }
27213d0407baSopenharmony_ci
27223d0407baSopenharmony_ci    p->rt.time_slice = sched_rr_timeslice;
27233d0407baSopenharmony_ci
27243d0407baSopenharmony_ci    /*
27253d0407baSopenharmony_ci     * Requeue to the end of queue if we (and all of our ancestors) are not
27263d0407baSopenharmony_ci     * the only element on the queue
27273d0407baSopenharmony_ci     */
27283d0407baSopenharmony_ci    cycle_each_sched_rt_entity(rt_se) {
27293d0407baSopenharmony_ci        if (rt_se->run_list.prev != rt_se->run_list.next) {
27303d0407baSopenharmony_ci            requeue_task_rt(rq, p, 0);
27313d0407baSopenharmony_ci            resched_curr(rq);
27323d0407baSopenharmony_ci            return;
27333d0407baSopenharmony_ci        }
27343d0407baSopenharmony_ci    }
27353d0407baSopenharmony_ci}
27363d0407baSopenharmony_ci
27373d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RT_ACTIVE_LB
27383d0407baSopenharmony_cistatic int rt_active_load_balance_cpu_stop(void *data)
27393d0407baSopenharmony_ci{
27403d0407baSopenharmony_ci    struct rq *busiest_rq = data;
27413d0407baSopenharmony_ci    struct task_struct *next_task = busiest_rq->rt_push_task;
27423d0407baSopenharmony_ci    struct rq *lowest_rq = NULL;
27433d0407baSopenharmony_ci    unsigned long flags;
27443d0407baSopenharmony_ci
27453d0407baSopenharmony_ci    raw_spin_lock_irqsave(&busiest_rq->lock, flags);
27463d0407baSopenharmony_ci    busiest_rq->rt_active_balance = 0;
27473d0407baSopenharmony_ci
27483d0407baSopenharmony_ci    /* find_lock_lowest_rq locks the rq if found */
27493d0407baSopenharmony_ci    lowest_rq = find_lock_lowest_rq(next_task, busiest_rq);
27503d0407baSopenharmony_ci    if (!lowest_rq) {
27513d0407baSopenharmony_ci        goto out;
27523d0407baSopenharmony_ci    }
27533d0407baSopenharmony_ci
27543d0407baSopenharmony_ci    if (capacity_orig_of(cpu_of(lowest_rq)) <= capacity_orig_of(task_cpu(next_task))) {
27553d0407baSopenharmony_ci        goto unlock;
27563d0407baSopenharmony_ci    }
27573d0407baSopenharmony_ci
27583d0407baSopenharmony_ci    deactivate_task(busiest_rq, next_task, 0);
27593d0407baSopenharmony_ci    set_task_cpu(next_task, lowest_rq->cpu);
27603d0407baSopenharmony_ci    activate_task(lowest_rq, next_task, 0);
27613d0407baSopenharmony_ci
27623d0407baSopenharmony_ci    resched_curr(lowest_rq);
27633d0407baSopenharmony_ciunlock:
27643d0407baSopenharmony_ci    double_unlock_balance(busiest_rq, lowest_rq);
27653d0407baSopenharmony_ciout:
27663d0407baSopenharmony_ci    put_task_struct(next_task);
27673d0407baSopenharmony_ci    raw_spin_unlock_irqrestore(&busiest_rq->lock, flags);
27683d0407baSopenharmony_ci
27693d0407baSopenharmony_ci    return 0;
27703d0407baSopenharmony_ci}
27713d0407baSopenharmony_ci
27723d0407baSopenharmony_cistatic void check_for_migration_rt(struct rq *rq, struct task_struct *p)
27733d0407baSopenharmony_ci{
27743d0407baSopenharmony_ci    bool need_actvie_lb = false;
27753d0407baSopenharmony_ci    bool misfit_task = false;
27763d0407baSopenharmony_ci    int cpu = task_cpu(p);
27773d0407baSopenharmony_ci    unsigned long cpu_orig_cap;
27783d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RTG
27793d0407baSopenharmony_ci    struct cpumask *rtg_target = NULL;
27803d0407baSopenharmony_ci#endif
27813d0407baSopenharmony_ci
27823d0407baSopenharmony_ci    if (!sysctl_sched_enable_rt_active_lb) {
27833d0407baSopenharmony_ci        return;
27843d0407baSopenharmony_ci    }
27853d0407baSopenharmony_ci
27863d0407baSopenharmony_ci    if (p->nr_cpus_allowed == 1) {
27873d0407baSopenharmony_ci        return;
27883d0407baSopenharmony_ci    }
27893d0407baSopenharmony_ci
27903d0407baSopenharmony_ci    cpu_orig_cap = capacity_orig_of(cpu);
27913d0407baSopenharmony_ci    /* cpu has max capacity, no need to do balance */
27923d0407baSopenharmony_ci    if (cpu_orig_cap == rq->rd->max_cpu_capacity) {
27933d0407baSopenharmony_ci        return;
27943d0407baSopenharmony_ci    }
27953d0407baSopenharmony_ci
27963d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RTG
27973d0407baSopenharmony_ci    rtg_target = find_rtg_target(p);
27983d0407baSopenharmony_ci    if (rtg_target) {
27993d0407baSopenharmony_ci        misfit_task = capacity_orig_of(cpumask_first(rtg_target)) > cpu_orig_cap;
28003d0407baSopenharmony_ci    } else {
28013d0407baSopenharmony_ci        misfit_task = !rt_task_fits_capacity(p, cpu);
28023d0407baSopenharmony_ci    }
28033d0407baSopenharmony_ci#else
28043d0407baSopenharmony_ci    misfit_task = !rt_task_fits_capacity(p, cpu);
28053d0407baSopenharmony_ci#endif
28063d0407baSopenharmony_ci    if (misfit_task) {
28073d0407baSopenharmony_ci        raw_spin_lock(&rq->lock);
28083d0407baSopenharmony_ci        if (!rq->active_balance && !rq->rt_active_balance) {
28093d0407baSopenharmony_ci            rq->rt_active_balance = 1;
28103d0407baSopenharmony_ci            rq->rt_push_task = p;
28113d0407baSopenharmony_ci            get_task_struct(p);
28123d0407baSopenharmony_ci            need_actvie_lb = true;
28133d0407baSopenharmony_ci        }
28143d0407baSopenharmony_ci        raw_spin_unlock(&rq->lock);
28153d0407baSopenharmony_ci
28163d0407baSopenharmony_ci        if (need_actvie_lb) {
28173d0407baSopenharmony_ci            stop_one_cpu_nowait(task_cpu(p), rt_active_load_balance_cpu_stop, rq, &rq->rt_active_balance_work);
28183d0407baSopenharmony_ci        }
28193d0407baSopenharmony_ci    }
28203d0407baSopenharmony_ci}
28213d0407baSopenharmony_ci#endif
28223d0407baSopenharmony_ci
28233d0407baSopenharmony_cistatic unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
28243d0407baSopenharmony_ci{
28253d0407baSopenharmony_ci    /*
28263d0407baSopenharmony_ci     * Time slice is 0 for SCHED_FIFO tasks
28273d0407baSopenharmony_ci     */
28283d0407baSopenharmony_ci    if (task->policy == SCHED_RR) {
28293d0407baSopenharmony_ci        return sched_rr_timeslice;
28303d0407baSopenharmony_ci    } else {
28313d0407baSopenharmony_ci        return 0;
28323d0407baSopenharmony_ci    }
28333d0407baSopenharmony_ci}
28343d0407baSopenharmony_ci
28353d0407baSopenharmony_ciconst struct sched_class rt_sched_class __section("__rt_sched_class") = {
28363d0407baSopenharmony_ci    .enqueue_task = enqueue_task_rt,
28373d0407baSopenharmony_ci    .dequeue_task = dequeue_task_rt,
28383d0407baSopenharmony_ci    .yield_task = yield_task_rt,
28393d0407baSopenharmony_ci
28403d0407baSopenharmony_ci    .check_preempt_curr = check_preempt_curr_rt,
28413d0407baSopenharmony_ci
28423d0407baSopenharmony_ci    .pick_next_task = pick_next_task_rt,
28433d0407baSopenharmony_ci    .put_prev_task = put_prev_task_rt,
28443d0407baSopenharmony_ci    .set_next_task = set_next_task_rt,
28453d0407baSopenharmony_ci
28463d0407baSopenharmony_ci#ifdef CONFIG_SMP
28473d0407baSopenharmony_ci    .balance = balance_rt,
28483d0407baSopenharmony_ci    .select_task_rq = select_task_rq_rt,
28493d0407baSopenharmony_ci    .set_cpus_allowed = set_cpus_allowed_common,
28503d0407baSopenharmony_ci    .rq_online = rq_online_rt,
28513d0407baSopenharmony_ci    .rq_offline = rq_offline_rt,
28523d0407baSopenharmony_ci    .task_woken = task_woken_rt,
28533d0407baSopenharmony_ci    .switched_from = switched_from_rt,
28543d0407baSopenharmony_ci#endif
28553d0407baSopenharmony_ci
28563d0407baSopenharmony_ci    .task_tick = task_tick_rt,
28573d0407baSopenharmony_ci
28583d0407baSopenharmony_ci    .get_rr_interval = get_rr_interval_rt,
28593d0407baSopenharmony_ci
28603d0407baSopenharmony_ci    .prio_changed = prio_changed_rt,
28613d0407baSopenharmony_ci    .switched_to = switched_to_rt,
28623d0407baSopenharmony_ci
28633d0407baSopenharmony_ci    .update_curr = update_curr_rt,
28643d0407baSopenharmony_ci
28653d0407baSopenharmony_ci#ifdef CONFIG_UCLAMP_TASK
28663d0407baSopenharmony_ci    .uclamp_enabled = 1,
28673d0407baSopenharmony_ci#endif
28683d0407baSopenharmony_ci#ifdef CONFIG_SCHED_WALT
28693d0407baSopenharmony_ci    .fixup_walt_sched_stats = fixup_walt_sched_stats_common,
28703d0407baSopenharmony_ci#endif
28713d0407baSopenharmony_ci#ifdef CONFIG_SCHED_RT_ACTIVE_LB
28723d0407baSopenharmony_ci    .check_for_migration = check_for_migration_rt,
28733d0407baSopenharmony_ci#endif
28743d0407baSopenharmony_ci};
28753d0407baSopenharmony_ci
28763d0407baSopenharmony_ci#ifdef CONFIG_RT_GROUP_SCHED
28773d0407baSopenharmony_ci/*
28783d0407baSopenharmony_ci * Ensure that the real time constraints are schedulable.
28793d0407baSopenharmony_ci */
28803d0407baSopenharmony_cistatic DEFINE_MUTEX(rt_constraints_mutex);
28813d0407baSopenharmony_ci
28823d0407baSopenharmony_cistatic inline int tg_has_rt_tasks(struct task_group *tg)
28833d0407baSopenharmony_ci{
28843d0407baSopenharmony_ci    struct task_struct *task;
28853d0407baSopenharmony_ci    struct css_task_iter it;
28863d0407baSopenharmony_ci    int ret = 0;
28873d0407baSopenharmony_ci
28883d0407baSopenharmony_ci    /*
28893d0407baSopenharmony_ci     * Autogroups do not have RT tasks; see autogroup_create().
28903d0407baSopenharmony_ci     */
28913d0407baSopenharmony_ci    if (task_group_is_autogroup(tg)) {
28923d0407baSopenharmony_ci        return 0;
28933d0407baSopenharmony_ci    }
28943d0407baSopenharmony_ci
28953d0407baSopenharmony_ci    css_task_iter_start(&tg->css, 0, &it);
28963d0407baSopenharmony_ci    while (!ret && (task = css_task_iter_next(&it))) {
28973d0407baSopenharmony_ci        ret |= rt_task(task);
28983d0407baSopenharmony_ci    }
28993d0407baSopenharmony_ci    css_task_iter_end(&it);
29003d0407baSopenharmony_ci
29013d0407baSopenharmony_ci    return ret;
29023d0407baSopenharmony_ci}
29033d0407baSopenharmony_ci
29043d0407baSopenharmony_cistruct rt_schedulable_data {
29053d0407baSopenharmony_ci    struct task_group *tg;
29063d0407baSopenharmony_ci    u64 rt_period;
29073d0407baSopenharmony_ci    u64 rt_runtime;
29083d0407baSopenharmony_ci};
29093d0407baSopenharmony_ci
29103d0407baSopenharmony_cistatic int tg_rt_schedulable(struct task_group *tg, void *data)
29113d0407baSopenharmony_ci{
29123d0407baSopenharmony_ci    struct rt_schedulable_data *d = data;
29133d0407baSopenharmony_ci    struct task_group *child;
29143d0407baSopenharmony_ci    unsigned long total, sum = 0;
29153d0407baSopenharmony_ci    u64 period, runtime;
29163d0407baSopenharmony_ci
29173d0407baSopenharmony_ci    period = ktime_to_ns(tg->rt_bandwidth.rt_period);
29183d0407baSopenharmony_ci    runtime = tg->rt_bandwidth.rt_runtime;
29193d0407baSopenharmony_ci
29203d0407baSopenharmony_ci    if (tg == d->tg) {
29213d0407baSopenharmony_ci        period = d->rt_period;
29223d0407baSopenharmony_ci        runtime = d->rt_runtime;
29233d0407baSopenharmony_ci    }
29243d0407baSopenharmony_ci
29253d0407baSopenharmony_ci    /*
29263d0407baSopenharmony_ci     * Cannot have more runtime than the period.
29273d0407baSopenharmony_ci     */
29283d0407baSopenharmony_ci    if (runtime > period && runtime != RUNTIME_INF) {
29293d0407baSopenharmony_ci        return -EINVAL;
29303d0407baSopenharmony_ci    }
29313d0407baSopenharmony_ci
29323d0407baSopenharmony_ci    /*
29333d0407baSopenharmony_ci     * Ensure we don't starve existing RT tasks if runtime turns zero.
29343d0407baSopenharmony_ci     */
29353d0407baSopenharmony_ci    if (rt_bandwidth_enabled() && !runtime && tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) {
29363d0407baSopenharmony_ci        return -EBUSY;
29373d0407baSopenharmony_ci    }
29383d0407baSopenharmony_ci
29393d0407baSopenharmony_ci    total = to_ratio(period, runtime);
29403d0407baSopenharmony_ci    /*
29413d0407baSopenharmony_ci     * Nobody can have more than the global setting allows.
29423d0407baSopenharmony_ci     */
29433d0407baSopenharmony_ci    if (total > to_ratio(global_rt_period(), global_rt_runtime())) {
29443d0407baSopenharmony_ci        return -EINVAL;
29453d0407baSopenharmony_ci    }
29463d0407baSopenharmony_ci
29473d0407baSopenharmony_ci    /*
29483d0407baSopenharmony_ci     * The sum of our children's runtime should not exceed our own.
29493d0407baSopenharmony_ci     */
29503d0407baSopenharmony_ci    list_for_each_entry_rcu(child, &tg->children, siblings)
29513d0407baSopenharmony_ci    {
29523d0407baSopenharmony_ci        period = ktime_to_ns(child->rt_bandwidth.rt_period);
29533d0407baSopenharmony_ci        runtime = child->rt_bandwidth.rt_runtime;
29543d0407baSopenharmony_ci
29553d0407baSopenharmony_ci        if (child == d->tg) {
29563d0407baSopenharmony_ci            period = d->rt_period;
29573d0407baSopenharmony_ci            runtime = d->rt_runtime;
29583d0407baSopenharmony_ci        }
29593d0407baSopenharmony_ci
29603d0407baSopenharmony_ci        sum += to_ratio(period, runtime);
29613d0407baSopenharmony_ci    }
29623d0407baSopenharmony_ci
29633d0407baSopenharmony_ci    if (sum > total) {
29643d0407baSopenharmony_ci        return -EINVAL;
29653d0407baSopenharmony_ci    }
29663d0407baSopenharmony_ci
29673d0407baSopenharmony_ci    return 0;
29683d0407baSopenharmony_ci}
29693d0407baSopenharmony_ci
29703d0407baSopenharmony_cistatic int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
29713d0407baSopenharmony_ci{
29723d0407baSopenharmony_ci    int ret;
29733d0407baSopenharmony_ci
29743d0407baSopenharmony_ci    struct rt_schedulable_data data = {
29753d0407baSopenharmony_ci        .tg = tg,
29763d0407baSopenharmony_ci        .rt_period = period,
29773d0407baSopenharmony_ci        .rt_runtime = runtime,
29783d0407baSopenharmony_ci    };
29793d0407baSopenharmony_ci
29803d0407baSopenharmony_ci    rcu_read_lock();
29813d0407baSopenharmony_ci    ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
29823d0407baSopenharmony_ci    rcu_read_unlock();
29833d0407baSopenharmony_ci
29843d0407baSopenharmony_ci    return ret;
29853d0407baSopenharmony_ci}
29863d0407baSopenharmony_ci
29873d0407baSopenharmony_cistatic int tg_set_rt_bandwidth(struct task_group *tg, u64 rt_period, u64 rt_runtime)
29883d0407baSopenharmony_ci{
29893d0407baSopenharmony_ci    int i, err = 0;
29903d0407baSopenharmony_ci
29913d0407baSopenharmony_ci    /*
29923d0407baSopenharmony_ci     * Disallowing the root group RT runtime is BAD, it would disallow the
29933d0407baSopenharmony_ci     * kernel creating (and or operating) RT threads.
29943d0407baSopenharmony_ci     */
29953d0407baSopenharmony_ci    if (tg == &root_task_group && rt_runtime == 0) {
29963d0407baSopenharmony_ci        return -EINVAL;
29973d0407baSopenharmony_ci    }
29983d0407baSopenharmony_ci
29993d0407baSopenharmony_ci    /* No period doesn't make any sense. */
30003d0407baSopenharmony_ci    if (rt_period == 0) {
30013d0407baSopenharmony_ci        return -EINVAL;
30023d0407baSopenharmony_ci    }
30033d0407baSopenharmony_ci
30043d0407baSopenharmony_ci    /*
30053d0407baSopenharmony_ci     * Bound quota to defend quota against overflow during bandwidth shift.
30063d0407baSopenharmony_ci     */
30073d0407baSopenharmony_ci    if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime) {
30083d0407baSopenharmony_ci        return -EINVAL;
30093d0407baSopenharmony_ci    }
30103d0407baSopenharmony_ci
30113d0407baSopenharmony_ci    mutex_lock(&rt_constraints_mutex);
30123d0407baSopenharmony_ci    err = __rt_schedulable(tg, rt_period, rt_runtime);
30133d0407baSopenharmony_ci    if (err) {
30143d0407baSopenharmony_ci        goto unlock;
30153d0407baSopenharmony_ci    }
30163d0407baSopenharmony_ci
30173d0407baSopenharmony_ci    raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
30183d0407baSopenharmony_ci    tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
30193d0407baSopenharmony_ci    tg->rt_bandwidth.rt_runtime = rt_runtime;
30203d0407baSopenharmony_ci
30213d0407baSopenharmony_ci    for_each_possible_cpu(i)
30223d0407baSopenharmony_ci    {
30233d0407baSopenharmony_ci        struct rt_rq *rt_rq = tg->rt_rq[i];
30243d0407baSopenharmony_ci
30253d0407baSopenharmony_ci        raw_spin_lock(&rt_rq->rt_runtime_lock);
30263d0407baSopenharmony_ci        rt_rq->rt_runtime = rt_runtime;
30273d0407baSopenharmony_ci        raw_spin_unlock(&rt_rq->rt_runtime_lock);
30283d0407baSopenharmony_ci    }
30293d0407baSopenharmony_ci    raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
30303d0407baSopenharmony_ciunlock:
30313d0407baSopenharmony_ci    mutex_unlock(&rt_constraints_mutex);
30323d0407baSopenharmony_ci
30333d0407baSopenharmony_ci    return err;
30343d0407baSopenharmony_ci}
30353d0407baSopenharmony_ci
30363d0407baSopenharmony_ciint sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
30373d0407baSopenharmony_ci{
30383d0407baSopenharmony_ci    u64 rt_runtime, rt_period;
30393d0407baSopenharmony_ci
30403d0407baSopenharmony_ci    rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
30413d0407baSopenharmony_ci    rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
30423d0407baSopenharmony_ci    if (rt_runtime_us < 0) {
30433d0407baSopenharmony_ci        rt_runtime = RUNTIME_INF;
30443d0407baSopenharmony_ci    } else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC) {
30453d0407baSopenharmony_ci        return -EINVAL;
30463d0407baSopenharmony_ci    }
30473d0407baSopenharmony_ci
30483d0407baSopenharmony_ci    return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
30493d0407baSopenharmony_ci}
30503d0407baSopenharmony_ci
30513d0407baSopenharmony_cilong sched_group_rt_runtime(struct task_group *tg)
30523d0407baSopenharmony_ci{
30533d0407baSopenharmony_ci    u64 rt_runtime_us;
30543d0407baSopenharmony_ci
30553d0407baSopenharmony_ci    if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) {
30563d0407baSopenharmony_ci        return -1;
30573d0407baSopenharmony_ci    }
30583d0407baSopenharmony_ci
30593d0407baSopenharmony_ci    rt_runtime_us = tg->rt_bandwidth.rt_runtime;
30603d0407baSopenharmony_ci    do_div(rt_runtime_us, NSEC_PER_USEC);
30613d0407baSopenharmony_ci    return rt_runtime_us;
30623d0407baSopenharmony_ci}
30633d0407baSopenharmony_ci
30643d0407baSopenharmony_ciint sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
30653d0407baSopenharmony_ci{
30663d0407baSopenharmony_ci    u64 rt_runtime, rt_period;
30673d0407baSopenharmony_ci
30683d0407baSopenharmony_ci    if (rt_period_us > U64_MAX / NSEC_PER_USEC) {
30693d0407baSopenharmony_ci        return -EINVAL;
30703d0407baSopenharmony_ci    }
30713d0407baSopenharmony_ci
30723d0407baSopenharmony_ci    rt_period = rt_period_us * NSEC_PER_USEC;
30733d0407baSopenharmony_ci    rt_runtime = tg->rt_bandwidth.rt_runtime;
30743d0407baSopenharmony_ci
30753d0407baSopenharmony_ci    return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
30763d0407baSopenharmony_ci}
30773d0407baSopenharmony_ci
30783d0407baSopenharmony_cilong sched_group_rt_period(struct task_group *tg)
30793d0407baSopenharmony_ci{
30803d0407baSopenharmony_ci    u64 rt_period_us;
30813d0407baSopenharmony_ci
30823d0407baSopenharmony_ci    rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
30833d0407baSopenharmony_ci    do_div(rt_period_us, NSEC_PER_USEC);
30843d0407baSopenharmony_ci    return rt_period_us;
30853d0407baSopenharmony_ci}
30863d0407baSopenharmony_ci
30873d0407baSopenharmony_cistatic int sched_rt_global_constraints(void)
30883d0407baSopenharmony_ci{
30893d0407baSopenharmony_ci    int ret = 0;
30903d0407baSopenharmony_ci
30913d0407baSopenharmony_ci    mutex_lock(&rt_constraints_mutex);
30923d0407baSopenharmony_ci    ret = __rt_schedulable(NULL, 0, 0);
30933d0407baSopenharmony_ci    mutex_unlock(&rt_constraints_mutex);
30943d0407baSopenharmony_ci
30953d0407baSopenharmony_ci    return ret;
30963d0407baSopenharmony_ci}
30973d0407baSopenharmony_ci
30983d0407baSopenharmony_ciint sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
30993d0407baSopenharmony_ci{
31003d0407baSopenharmony_ci    /* Don't accept realtime tasks when there is no way for them to run */
31013d0407baSopenharmony_ci    if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) {
31023d0407baSopenharmony_ci        return 0;
31033d0407baSopenharmony_ci    }
31043d0407baSopenharmony_ci
31053d0407baSopenharmony_ci    return 1;
31063d0407baSopenharmony_ci}
31073d0407baSopenharmony_ci
31083d0407baSopenharmony_ci#else  /* !CONFIG_RT_GROUP_SCHED */
31093d0407baSopenharmony_cistatic int sched_rt_global_constraints(void)
31103d0407baSopenharmony_ci{
31113d0407baSopenharmony_ci    unsigned long flags;
31123d0407baSopenharmony_ci    int i;
31133d0407baSopenharmony_ci
31143d0407baSopenharmony_ci    raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
31153d0407baSopenharmony_ci    for_each_possible_cpu(i)
31163d0407baSopenharmony_ci    {
31173d0407baSopenharmony_ci        struct rt_rq *rt_rq = &cpu_rq(i)->rt;
31183d0407baSopenharmony_ci
31193d0407baSopenharmony_ci        raw_spin_lock(&rt_rq->rt_runtime_lock);
31203d0407baSopenharmony_ci        rt_rq->rt_runtime = global_rt_runtime();
31213d0407baSopenharmony_ci        raw_spin_unlock(&rt_rq->rt_runtime_lock);
31223d0407baSopenharmony_ci    }
31233d0407baSopenharmony_ci    raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
31243d0407baSopenharmony_ci
31253d0407baSopenharmony_ci    return 0;
31263d0407baSopenharmony_ci}
31273d0407baSopenharmony_ci#endif /* CONFIG_RT_GROUP_SCHED */
31283d0407baSopenharmony_ci
31293d0407baSopenharmony_cistatic int sched_rt_global_validate(void)
31303d0407baSopenharmony_ci{
31313d0407baSopenharmony_ci    if (sysctl_sched_rt_period <= 0) {
31323d0407baSopenharmony_ci        return -EINVAL;
31333d0407baSopenharmony_ci    }
31343d0407baSopenharmony_ci
31353d0407baSopenharmony_ci    if ((sysctl_sched_rt_runtime != RUNTIME_INF) && ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
31363d0407baSopenharmony_ci                                                     ((u64)sysctl_sched_rt_runtime * NSEC_PER_USEC > max_rt_runtime))) {
31373d0407baSopenharmony_ci        return -EINVAL;
31383d0407baSopenharmony_ci    }
31393d0407baSopenharmony_ci
31403d0407baSopenharmony_ci    return 0;
31413d0407baSopenharmony_ci}
31423d0407baSopenharmony_ci
31433d0407baSopenharmony_cistatic void sched_rt_do_global(void)
31443d0407baSopenharmony_ci{
31453d0407baSopenharmony_ci    raw_spin_lock(&def_rt_bandwidth.rt_runtime_lock);
31463d0407baSopenharmony_ci    def_rt_bandwidth.rt_runtime = global_rt_runtime();
31473d0407baSopenharmony_ci    def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
31483d0407baSopenharmony_ci    raw_spin_unlock(&def_rt_bandwidth.rt_runtime_lock);
31493d0407baSopenharmony_ci}
31503d0407baSopenharmony_ci
31513d0407baSopenharmony_ciint sched_rt_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos)
31523d0407baSopenharmony_ci{
31533d0407baSopenharmony_ci    int old_period, old_runtime;
31543d0407baSopenharmony_ci    static DEFINE_MUTEX(mutex);
31553d0407baSopenharmony_ci    int ret;
31563d0407baSopenharmony_ci
31573d0407baSopenharmony_ci    mutex_lock(&mutex);
31583d0407baSopenharmony_ci    old_period = sysctl_sched_rt_period;
31593d0407baSopenharmony_ci    old_runtime = sysctl_sched_rt_runtime;
31603d0407baSopenharmony_ci
31613d0407baSopenharmony_ci    ret = proc_dointvec(table, write, buffer, lenp, ppos);
31623d0407baSopenharmony_ci    if (!ret && write) {
31633d0407baSopenharmony_ci        ret = sched_rt_global_validate();
31643d0407baSopenharmony_ci        if (ret) {
31653d0407baSopenharmony_ci            goto undo;
31663d0407baSopenharmony_ci        }
31673d0407baSopenharmony_ci
31683d0407baSopenharmony_ci        ret = sched_dl_global_validate();
31693d0407baSopenharmony_ci        if (ret) {
31703d0407baSopenharmony_ci            goto undo;
31713d0407baSopenharmony_ci        }
31723d0407baSopenharmony_ci
31733d0407baSopenharmony_ci        ret = sched_rt_global_constraints();
31743d0407baSopenharmony_ci        if (ret) {
31753d0407baSopenharmony_ci            goto undo;
31763d0407baSopenharmony_ci        }
31773d0407baSopenharmony_ci
31783d0407baSopenharmony_ci        sched_rt_do_global();
31793d0407baSopenharmony_ci        sched_dl_do_global();
31803d0407baSopenharmony_ci    }
31813d0407baSopenharmony_ci    if (0) {
31823d0407baSopenharmony_ci    undo:
31833d0407baSopenharmony_ci        sysctl_sched_rt_period = old_period;
31843d0407baSopenharmony_ci        sysctl_sched_rt_runtime = old_runtime;
31853d0407baSopenharmony_ci    }
31863d0407baSopenharmony_ci    mutex_unlock(&mutex);
31873d0407baSopenharmony_ci
31883d0407baSopenharmony_ci    return ret;
31893d0407baSopenharmony_ci}
31903d0407baSopenharmony_ci
31913d0407baSopenharmony_ciint sched_rr_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos)
31923d0407baSopenharmony_ci{
31933d0407baSopenharmony_ci    int ret;
31943d0407baSopenharmony_ci    static DEFINE_MUTEX(mutex);
31953d0407baSopenharmony_ci
31963d0407baSopenharmony_ci    mutex_lock(&mutex);
31973d0407baSopenharmony_ci    ret = proc_dointvec(table, write, buffer, lenp, ppos);
31983d0407baSopenharmony_ci    /*
31993d0407baSopenharmony_ci     * Make sure that internally we keep jiffies.
32003d0407baSopenharmony_ci     * Also, writing zero resets the timeslice to default:
32013d0407baSopenharmony_ci     */
32023d0407baSopenharmony_ci    if (!ret && write) {
32033d0407baSopenharmony_ci        sched_rr_timeslice =
32043d0407baSopenharmony_ci            sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE : msecs_to_jiffies(sysctl_sched_rr_timeslice);
32053d0407baSopenharmony_ci    }
32063d0407baSopenharmony_ci    mutex_unlock(&mutex);
32073d0407baSopenharmony_ci
32083d0407baSopenharmony_ci    return ret;
32093d0407baSopenharmony_ci}
32103d0407baSopenharmony_ci
32113d0407baSopenharmony_ci#ifdef CONFIG_SCHED_DEBUG
32123d0407baSopenharmony_civoid print_rt_stats(struct seq_file *m, int cpu)
32133d0407baSopenharmony_ci{
32143d0407baSopenharmony_ci    rt_rq_iter_t iter;
32153d0407baSopenharmony_ci    struct rt_rq *rt_rq;
32163d0407baSopenharmony_ci
32173d0407baSopenharmony_ci    rcu_read_lock();
32183d0407baSopenharmony_ci    cycle_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) print_rt_rq(m, cpu, rt_rq);
32193d0407baSopenharmony_ci    rcu_read_unlock();
32203d0407baSopenharmony_ci}
32213d0407baSopenharmony_ci#endif /* CONFIG_SCHED_DEBUG */
3222