13d0407baSopenharmony_ci/* CPU control.
23d0407baSopenharmony_ci * (C) 2001, 2002, 2003, 2004 Rusty Russell
33d0407baSopenharmony_ci *
43d0407baSopenharmony_ci * This code is licenced under the GPL.
53d0407baSopenharmony_ci */
63d0407baSopenharmony_ci#include <linux/sched/mm.h>
73d0407baSopenharmony_ci#include <linux/proc_fs.h>
83d0407baSopenharmony_ci#include <linux/smp.h>
93d0407baSopenharmony_ci#include <linux/init.h>
103d0407baSopenharmony_ci#include <linux/notifier.h>
113d0407baSopenharmony_ci#include <linux/sched/signal.h>
123d0407baSopenharmony_ci#include <linux/sched/hotplug.h>
133d0407baSopenharmony_ci#include <linux/sched/isolation.h>
143d0407baSopenharmony_ci#include <linux/sched/task.h>
153d0407baSopenharmony_ci#include <linux/sched/smt.h>
163d0407baSopenharmony_ci#include <linux/unistd.h>
173d0407baSopenharmony_ci#include <linux/cpu.h>
183d0407baSopenharmony_ci#include <linux/oom.h>
193d0407baSopenharmony_ci#include <linux/rcupdate.h>
203d0407baSopenharmony_ci#include <linux/export.h>
213d0407baSopenharmony_ci#include <linux/bug.h>
223d0407baSopenharmony_ci#include <linux/kthread.h>
233d0407baSopenharmony_ci#include <linux/stop_machine.h>
243d0407baSopenharmony_ci#include <linux/mutex.h>
253d0407baSopenharmony_ci#include <linux/gfp.h>
263d0407baSopenharmony_ci#include <linux/suspend.h>
273d0407baSopenharmony_ci#include <linux/lockdep.h>
283d0407baSopenharmony_ci#include <linux/tick.h>
293d0407baSopenharmony_ci#include <linux/irq.h>
303d0407baSopenharmony_ci#include <linux/nmi.h>
313d0407baSopenharmony_ci#include <linux/smpboot.h>
323d0407baSopenharmony_ci#include <linux/relay.h>
333d0407baSopenharmony_ci#include <linux/slab.h>
343d0407baSopenharmony_ci#include <linux/scs.h>
353d0407baSopenharmony_ci#include <linux/percpu-rwsem.h>
363d0407baSopenharmony_ci#include <linux/cpuset.h>
373d0407baSopenharmony_ci#include <linux/random.h>
383d0407baSopenharmony_ci
393d0407baSopenharmony_ci#include <trace/events/power.h>
403d0407baSopenharmony_ci#define CREATE_TRACE_POINTS
413d0407baSopenharmony_ci#include <trace/events/cpuhp.h>
423d0407baSopenharmony_ci
433d0407baSopenharmony_ci#undef CREATE_TRACE_POINTS
443d0407baSopenharmony_ci
453d0407baSopenharmony_ci#include "smpboot.h"
463d0407baSopenharmony_ci
473d0407baSopenharmony_ci#define CPU_PAGE_SIZE_OFF_TWO 2
483d0407baSopenharmony_ci
493d0407baSopenharmony_ci/**
503d0407baSopenharmony_ci * cpuhp_cpu_state - Per cpu hotplug state storage
513d0407baSopenharmony_ci * @state:    The current cpu state
523d0407baSopenharmony_ci * @target:    The target state
533d0407baSopenharmony_ci * @thread:    Pointer to the hotplug thread
543d0407baSopenharmony_ci * @should_run:    Thread should execute
553d0407baSopenharmony_ci * @rollback:    Perform a rollback
563d0407baSopenharmony_ci * @single:    Single callback invocation
573d0407baSopenharmony_ci * @bringup:    Single callback bringup or teardown selector
583d0407baSopenharmony_ci * @cb_state:    The state for a single callback (install/uninstall)
593d0407baSopenharmony_ci * @result:    Result of the operation
603d0407baSopenharmony_ci * @done_up:    Signal completion to the issuer of the task for cpu-up
613d0407baSopenharmony_ci * @done_down:    Signal completion to the issuer of the task for cpu-down
623d0407baSopenharmony_ci */
633d0407baSopenharmony_cistruct cpuhp_cpu_state {
643d0407baSopenharmony_ci    enum cpuhp_state state;
653d0407baSopenharmony_ci    enum cpuhp_state target;
663d0407baSopenharmony_ci    enum cpuhp_state fail;
673d0407baSopenharmony_ci#ifdef CONFIG_SMP
683d0407baSopenharmony_ci    struct task_struct *thread;
693d0407baSopenharmony_ci    bool should_run;
703d0407baSopenharmony_ci    bool rollback;
713d0407baSopenharmony_ci    bool single;
723d0407baSopenharmony_ci    bool bringup;
733d0407baSopenharmony_ci    struct hlist_node *node;
743d0407baSopenharmony_ci    struct hlist_node *last;
753d0407baSopenharmony_ci    enum cpuhp_state cb_state;
763d0407baSopenharmony_ci    int result;
773d0407baSopenharmony_ci    struct completion done_up;
783d0407baSopenharmony_ci    struct completion done_down;
793d0407baSopenharmony_ci#endif
803d0407baSopenharmony_ci};
813d0407baSopenharmony_ci
823d0407baSopenharmony_cistatic DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
833d0407baSopenharmony_ci    .fail = CPUHP_INVALID,
843d0407baSopenharmony_ci};
853d0407baSopenharmony_ci
863d0407baSopenharmony_ci#ifdef CONFIG_SMP
873d0407baSopenharmony_cicpumask_t cpus_booted_once_mask;
883d0407baSopenharmony_ci#endif
893d0407baSopenharmony_ci
903d0407baSopenharmony_ci#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
913d0407baSopenharmony_cistatic struct lockdep_map cpuhp_state_up_map = STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
923d0407baSopenharmony_cistatic struct lockdep_map cpuhp_state_down_map = STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
933d0407baSopenharmony_ci
943d0407baSopenharmony_cistatic inline void cpuhp_lock_acquire(bool bringup)
953d0407baSopenharmony_ci{
963d0407baSopenharmony_ci    lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
973d0407baSopenharmony_ci}
983d0407baSopenharmony_ci
993d0407baSopenharmony_cistatic inline void cpuhp_lock_release(bool bringup)
1003d0407baSopenharmony_ci{
1013d0407baSopenharmony_ci    lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
1023d0407baSopenharmony_ci}
1033d0407baSopenharmony_ci#else
1043d0407baSopenharmony_ci
1053d0407baSopenharmony_cistatic inline void cpuhp_lock_acquire(bool bringup)
1063d0407baSopenharmony_ci{
1073d0407baSopenharmony_ci}
1083d0407baSopenharmony_cistatic inline void cpuhp_lock_release(bool bringup)
1093d0407baSopenharmony_ci{
1103d0407baSopenharmony_ci}
1113d0407baSopenharmony_ci
1123d0407baSopenharmony_ci#endif
1133d0407baSopenharmony_ci
1143d0407baSopenharmony_ci/**
1153d0407baSopenharmony_ci * cpuhp_step - Hotplug state machine step
1163d0407baSopenharmony_ci * @name:    Name of the step
1173d0407baSopenharmony_ci * @startup:    Startup function of the step
1183d0407baSopenharmony_ci * @teardown:    Teardown function of the step
1193d0407baSopenharmony_ci * @cant_stop:    Bringup/teardown can't be stopped at this step
1203d0407baSopenharmony_ci */
1213d0407baSopenharmony_cistruct cpuhp_step {
1223d0407baSopenharmony_ci    const char *name;
1233d0407baSopenharmony_ci    union {
1243d0407baSopenharmony_ci        int (*single)(unsigned int cpu);
1253d0407baSopenharmony_ci        int (*multi)(unsigned int cpu, struct hlist_node *node);
1263d0407baSopenharmony_ci    } startup;
1273d0407baSopenharmony_ci    union {
1283d0407baSopenharmony_ci        int (*single)(unsigned int cpu);
1293d0407baSopenharmony_ci        int (*multi)(unsigned int cpu, struct hlist_node *node);
1303d0407baSopenharmony_ci    } teardown;
1313d0407baSopenharmony_ci    struct hlist_head list;
1323d0407baSopenharmony_ci    bool cant_stop;
1333d0407baSopenharmony_ci    bool multi_instance;
1343d0407baSopenharmony_ci};
1353d0407baSopenharmony_ci
1363d0407baSopenharmony_cistatic DEFINE_MUTEX(cpuhp_state_mutex);
1373d0407baSopenharmony_cistatic struct cpuhp_step cpuhp_hp_states[];
1383d0407baSopenharmony_ci
1393d0407baSopenharmony_cistatic struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
1403d0407baSopenharmony_ci{
1413d0407baSopenharmony_ci    return cpuhp_hp_states + state;
1423d0407baSopenharmony_ci}
1433d0407baSopenharmony_ci
1443d0407baSopenharmony_ci/**
1453d0407baSopenharmony_ci * cpuhp_invoke_callback _ Invoke the callbacks for a given state
1463d0407baSopenharmony_ci * @cpu:    The cpu for which the callback should be invoked
1473d0407baSopenharmony_ci * @state:    The state to do callbacks for
1483d0407baSopenharmony_ci * @bringup:    True if the bringup callback should be invoked
1493d0407baSopenharmony_ci * @node:    For multi-instance, do a single entry callback for install/remove
1503d0407baSopenharmony_ci * @lastp:    For multi-instance rollback, remember how far we got
1513d0407baSopenharmony_ci *
1523d0407baSopenharmony_ci * Called from cpu hotplug and from the state register machinery.
1533d0407baSopenharmony_ci */
1543d0407baSopenharmony_cistatic int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node,
1553d0407baSopenharmony_ci                                 struct hlist_node **lastp)
1563d0407baSopenharmony_ci{
1573d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1583d0407baSopenharmony_ci    struct cpuhp_step *step = cpuhp_get_step(state);
1593d0407baSopenharmony_ci    int (*cbm)(unsigned int cpu, struct hlist_node *node);
1603d0407baSopenharmony_ci    int (*cb)(unsigned int cpu);
1613d0407baSopenharmony_ci    int ret, cnt;
1623d0407baSopenharmony_ci
1633d0407baSopenharmony_ci    if (st->fail == state) {
1643d0407baSopenharmony_ci        st->fail = CPUHP_INVALID;
1653d0407baSopenharmony_ci
1663d0407baSopenharmony_ci        if (!(bringup ? step->startup.single : step->teardown.single)) {
1673d0407baSopenharmony_ci            return 0;
1683d0407baSopenharmony_ci        }
1693d0407baSopenharmony_ci
1703d0407baSopenharmony_ci        return -EAGAIN;
1713d0407baSopenharmony_ci    }
1723d0407baSopenharmony_ci
1733d0407baSopenharmony_ci    if (!step->multi_instance) {
1743d0407baSopenharmony_ci        WARN_ON_ONCE(lastp && *lastp);
1753d0407baSopenharmony_ci        cb = bringup ? step->startup.single : step->teardown.single;
1763d0407baSopenharmony_ci        if (!cb) {
1773d0407baSopenharmony_ci            return 0;
1783d0407baSopenharmony_ci        }
1793d0407baSopenharmony_ci        trace_cpuhp_enter(cpu, st->target, state, cb);
1803d0407baSopenharmony_ci        ret = cb(cpu);
1813d0407baSopenharmony_ci        trace_cpuhp_exit(cpu, st->state, state, ret);
1823d0407baSopenharmony_ci        return ret;
1833d0407baSopenharmony_ci    }
1843d0407baSopenharmony_ci    cbm = bringup ? step->startup.multi : step->teardown.multi;
1853d0407baSopenharmony_ci    if (!cbm) {
1863d0407baSopenharmony_ci        return 0;
1873d0407baSopenharmony_ci    }
1883d0407baSopenharmony_ci
1893d0407baSopenharmony_ci    /* Single invocation for instance add/remove */
1903d0407baSopenharmony_ci    if (node) {
1913d0407baSopenharmony_ci        WARN_ON_ONCE(lastp && *lastp);
1923d0407baSopenharmony_ci        trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
1933d0407baSopenharmony_ci        ret = cbm(cpu, node);
1943d0407baSopenharmony_ci        trace_cpuhp_exit(cpu, st->state, state, ret);
1953d0407baSopenharmony_ci        return ret;
1963d0407baSopenharmony_ci    }
1973d0407baSopenharmony_ci
1983d0407baSopenharmony_ci    /* State transition. Invoke on all instances */
1993d0407baSopenharmony_ci    cnt = 0;
2003d0407baSopenharmony_ci    hlist_for_each(node, &step->list)
2013d0407baSopenharmony_ci    {
2023d0407baSopenharmony_ci        if (lastp && node == *lastp) {
2033d0407baSopenharmony_ci            break;
2043d0407baSopenharmony_ci        }
2053d0407baSopenharmony_ci
2063d0407baSopenharmony_ci        trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
2073d0407baSopenharmony_ci        ret = cbm(cpu, node);
2083d0407baSopenharmony_ci        trace_cpuhp_exit(cpu, st->state, state, ret);
2093d0407baSopenharmony_ci        if (ret) {
2103d0407baSopenharmony_ci            if (!lastp) {
2113d0407baSopenharmony_ci                goto err;
2123d0407baSopenharmony_ci            }
2133d0407baSopenharmony_ci
2143d0407baSopenharmony_ci            *lastp = node;
2153d0407baSopenharmony_ci            return ret;
2163d0407baSopenharmony_ci        }
2173d0407baSopenharmony_ci        cnt++;
2183d0407baSopenharmony_ci    }
2193d0407baSopenharmony_ci    if (lastp) {
2203d0407baSopenharmony_ci        *lastp = NULL;
2213d0407baSopenharmony_ci    }
2223d0407baSopenharmony_ci    return 0;
2233d0407baSopenharmony_cierr:
2243d0407baSopenharmony_ci    /* Rollback the instances if one failed */
2253d0407baSopenharmony_ci    cbm = !bringup ? step->startup.multi : step->teardown.multi;
2263d0407baSopenharmony_ci    if (!cbm) {
2273d0407baSopenharmony_ci        return ret;
2283d0407baSopenharmony_ci    }
2293d0407baSopenharmony_ci
2303d0407baSopenharmony_ci    hlist_for_each(node, &step->list)
2313d0407baSopenharmony_ci    {
2323d0407baSopenharmony_ci        if (!cnt--) {
2333d0407baSopenharmony_ci            break;
2343d0407baSopenharmony_ci        }
2353d0407baSopenharmony_ci
2363d0407baSopenharmony_ci        trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
2373d0407baSopenharmony_ci        ret = cbm(cpu, node);
2383d0407baSopenharmony_ci        trace_cpuhp_exit(cpu, st->state, state, ret);
2393d0407baSopenharmony_ci        /*
2403d0407baSopenharmony_ci         * Rollback must not fail,
2413d0407baSopenharmony_ci         */
2423d0407baSopenharmony_ci        WARN_ON_ONCE(ret);
2433d0407baSopenharmony_ci    }
2443d0407baSopenharmony_ci    return ret;
2453d0407baSopenharmony_ci}
2463d0407baSopenharmony_ci
2473d0407baSopenharmony_ci#ifdef CONFIG_SMP
2483d0407baSopenharmony_cistatic bool cpuhp_is_ap_state(enum cpuhp_state state)
2493d0407baSopenharmony_ci{
2503d0407baSopenharmony_ci    /*
2513d0407baSopenharmony_ci     * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
2523d0407baSopenharmony_ci     * purposes as that state is handled explicitly in cpu_down.
2533d0407baSopenharmony_ci     */
2543d0407baSopenharmony_ci    return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
2553d0407baSopenharmony_ci}
2563d0407baSopenharmony_ci
2573d0407baSopenharmony_cistatic inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
2583d0407baSopenharmony_ci{
2593d0407baSopenharmony_ci    struct completion *done = bringup ? &st->done_up : &st->done_down;
2603d0407baSopenharmony_ci    wait_for_completion(done);
2613d0407baSopenharmony_ci}
2623d0407baSopenharmony_ci
2633d0407baSopenharmony_cistatic inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
2643d0407baSopenharmony_ci{
2653d0407baSopenharmony_ci    struct completion *done = bringup ? &st->done_up : &st->done_down;
2663d0407baSopenharmony_ci    complete(done);
2673d0407baSopenharmony_ci}
2683d0407baSopenharmony_ci
2693d0407baSopenharmony_ci/*
2703d0407baSopenharmony_ci * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
2713d0407baSopenharmony_ci */
2723d0407baSopenharmony_cistatic bool cpuhp_is_atomic_state(enum cpuhp_state state)
2733d0407baSopenharmony_ci{
2743d0407baSopenharmony_ci    return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
2753d0407baSopenharmony_ci}
2763d0407baSopenharmony_ci
2773d0407baSopenharmony_ci/* Serializes the updates to cpu_online_mask, cpu_present_mask */
2783d0407baSopenharmony_cistatic DEFINE_MUTEX(cpu_add_remove_lock);
2793d0407baSopenharmony_cibool cpuhp_tasks_frozen;
2803d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
2813d0407baSopenharmony_ci
2823d0407baSopenharmony_ci/*
2833d0407baSopenharmony_ci * The following two APIs (cpu_maps_update_begin/done) must be used when
2843d0407baSopenharmony_ci * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
2853d0407baSopenharmony_ci */
2863d0407baSopenharmony_civoid cpu_maps_update_begin(void)
2873d0407baSopenharmony_ci{
2883d0407baSopenharmony_ci    mutex_lock(&cpu_add_remove_lock);
2893d0407baSopenharmony_ci}
2903d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(cpu_maps_update_begin);
2913d0407baSopenharmony_ci
2923d0407baSopenharmony_civoid cpu_maps_update_done(void)
2933d0407baSopenharmony_ci{
2943d0407baSopenharmony_ci    mutex_unlock(&cpu_add_remove_lock);
2953d0407baSopenharmony_ci}
2963d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(cpu_maps_update_done);
2973d0407baSopenharmony_ci
2983d0407baSopenharmony_ci/*
2993d0407baSopenharmony_ci * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
3003d0407baSopenharmony_ci * Should always be manipulated under cpu_add_remove_lock
3013d0407baSopenharmony_ci */
3023d0407baSopenharmony_cistatic int cpu_hotplug_disabled;
3033d0407baSopenharmony_ci
3043d0407baSopenharmony_ci#ifdef CONFIG_HOTPLUG_CPU
3053d0407baSopenharmony_ci
3063d0407baSopenharmony_ciDEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
3073d0407baSopenharmony_ci
3083d0407baSopenharmony_civoid cpus_read_lock(void)
3093d0407baSopenharmony_ci{
3103d0407baSopenharmony_ci    percpu_down_read(&cpu_hotplug_lock);
3113d0407baSopenharmony_ci}
3123d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(cpus_read_lock);
3133d0407baSopenharmony_ci
3143d0407baSopenharmony_ciint cpus_read_trylock(void)
3153d0407baSopenharmony_ci{
3163d0407baSopenharmony_ci    return percpu_down_read_trylock(&cpu_hotplug_lock);
3173d0407baSopenharmony_ci}
3183d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(cpus_read_trylock);
3193d0407baSopenharmony_ci
3203d0407baSopenharmony_civoid cpus_read_unlock(void)
3213d0407baSopenharmony_ci{
3223d0407baSopenharmony_ci    percpu_up_read(&cpu_hotplug_lock);
3233d0407baSopenharmony_ci}
3243d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(cpus_read_unlock);
3253d0407baSopenharmony_ci
3263d0407baSopenharmony_civoid cpus_write_lock(void)
3273d0407baSopenharmony_ci{
3283d0407baSopenharmony_ci    percpu_down_write(&cpu_hotplug_lock);
3293d0407baSopenharmony_ci}
3303d0407baSopenharmony_ci
3313d0407baSopenharmony_civoid cpus_write_unlock(void)
3323d0407baSopenharmony_ci{
3333d0407baSopenharmony_ci    percpu_up_write(&cpu_hotplug_lock);
3343d0407baSopenharmony_ci}
3353d0407baSopenharmony_ci
3363d0407baSopenharmony_civoid lockdep_assert_cpus_held(void)
3373d0407baSopenharmony_ci{
3383d0407baSopenharmony_ci    /*
3393d0407baSopenharmony_ci     * We can't have hotplug operations before userspace starts running,
3403d0407baSopenharmony_ci     * and some init codepaths will knowingly not take the hotplug lock.
3413d0407baSopenharmony_ci     * This is all valid, so mute lockdep until it makes sense to report
3423d0407baSopenharmony_ci     * unheld locks.
3433d0407baSopenharmony_ci     */
3443d0407baSopenharmony_ci    if (system_state < SYSTEM_RUNNING) {
3453d0407baSopenharmony_ci        return;
3463d0407baSopenharmony_ci    }
3473d0407baSopenharmony_ci
3483d0407baSopenharmony_ci    percpu_rwsem_assert_held(&cpu_hotplug_lock);
3493d0407baSopenharmony_ci}
3503d0407baSopenharmony_ci
3513d0407baSopenharmony_cistatic void lockdep_acquire_cpus_lock(void)
3523d0407baSopenharmony_ci{
3533d0407baSopenharmony_ci    rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
3543d0407baSopenharmony_ci}
3553d0407baSopenharmony_ci
3563d0407baSopenharmony_cistatic void lockdep_release_cpus_lock(void)
3573d0407baSopenharmony_ci{
3583d0407baSopenharmony_ci    rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
3593d0407baSopenharmony_ci}
3603d0407baSopenharmony_ci
3613d0407baSopenharmony_ci/*
3623d0407baSopenharmony_ci * Wait for currently running CPU hotplug operations to complete (if any) and
3633d0407baSopenharmony_ci * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
3643d0407baSopenharmony_ci * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
3653d0407baSopenharmony_ci * hotplug path before performing hotplug operations. So acquiring that lock
3663d0407baSopenharmony_ci * guarantees mutual exclusion from any currently running hotplug operations.
3673d0407baSopenharmony_ci */
3683d0407baSopenharmony_civoid cpu_hotplug_disable(void)
3693d0407baSopenharmony_ci{
3703d0407baSopenharmony_ci    cpu_maps_update_begin();
3713d0407baSopenharmony_ci    cpu_hotplug_disabled++;
3723d0407baSopenharmony_ci    cpu_maps_update_done();
3733d0407baSopenharmony_ci}
3743d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(cpu_hotplug_disable);
3753d0407baSopenharmony_ci
3763d0407baSopenharmony_cistatic void _cpu_hotplug_enable(void)
3773d0407baSopenharmony_ci{
3783d0407baSopenharmony_ci    if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n")) {
3793d0407baSopenharmony_ci        return;
3803d0407baSopenharmony_ci    }
3813d0407baSopenharmony_ci    cpu_hotplug_disabled--;
3823d0407baSopenharmony_ci}
3833d0407baSopenharmony_ci
3843d0407baSopenharmony_civoid cpu_hotplug_enable(void)
3853d0407baSopenharmony_ci{
3863d0407baSopenharmony_ci    cpu_maps_update_begin();
3873d0407baSopenharmony_ci    _cpu_hotplug_enable();
3883d0407baSopenharmony_ci    cpu_maps_update_done();
3893d0407baSopenharmony_ci}
3903d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(cpu_hotplug_enable);
3913d0407baSopenharmony_ci
3923d0407baSopenharmony_ci#else
3933d0407baSopenharmony_ci
3943d0407baSopenharmony_cistatic void lockdep_acquire_cpus_lock(void)
3953d0407baSopenharmony_ci{
3963d0407baSopenharmony_ci}
3973d0407baSopenharmony_ci
3983d0407baSopenharmony_cistatic void lockdep_release_cpus_lock(void)
3993d0407baSopenharmony_ci{
4003d0407baSopenharmony_ci}
4013d0407baSopenharmony_ci
4023d0407baSopenharmony_ci#endif /* CONFIG_HOTPLUG_CPU */
4033d0407baSopenharmony_ci
4043d0407baSopenharmony_ci/*
4053d0407baSopenharmony_ci * Architectures that need SMT-specific errata handling during SMT hotplug
4063d0407baSopenharmony_ci * should override this.
4073d0407baSopenharmony_ci */
4083d0407baSopenharmony_civoid __weak arch_smt_update(void)
4093d0407baSopenharmony_ci{
4103d0407baSopenharmony_ci}
4113d0407baSopenharmony_ci
4123d0407baSopenharmony_ci#ifdef CONFIG_HOTPLUG_SMT
4133d0407baSopenharmony_cienum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
4143d0407baSopenharmony_ci
4153d0407baSopenharmony_civoid __init cpu_smt_disable(bool force)
4163d0407baSopenharmony_ci{
4173d0407baSopenharmony_ci    if (!cpu_smt_possible()) {
4183d0407baSopenharmony_ci        return;
4193d0407baSopenharmony_ci    }
4203d0407baSopenharmony_ci
4213d0407baSopenharmony_ci    if (force) {
4223d0407baSopenharmony_ci        pr_info("SMT: Force disabled\n");
4233d0407baSopenharmony_ci        cpu_smt_control = CPU_SMT_FORCE_DISABLED;
4243d0407baSopenharmony_ci    } else {
4253d0407baSopenharmony_ci        pr_info("SMT: disabled\n");
4263d0407baSopenharmony_ci        cpu_smt_control = CPU_SMT_DISABLED;
4273d0407baSopenharmony_ci    }
4283d0407baSopenharmony_ci}
4293d0407baSopenharmony_ci
4303d0407baSopenharmony_ci/*
4313d0407baSopenharmony_ci * The decision whether SMT is supported can only be done after the full
4323d0407baSopenharmony_ci * CPU identification. Called from architecture code.
4333d0407baSopenharmony_ci */
4343d0407baSopenharmony_civoid __init cpu_smt_check_topology(void)
4353d0407baSopenharmony_ci{
4363d0407baSopenharmony_ci    if (!topology_smt_supported()) {
4373d0407baSopenharmony_ci        cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
4383d0407baSopenharmony_ci    }
4393d0407baSopenharmony_ci}
4403d0407baSopenharmony_ci
4413d0407baSopenharmony_cistatic int __init smt_cmdline_disable(char *str)
4423d0407baSopenharmony_ci{
4433d0407baSopenharmony_ci    cpu_smt_disable(str && !strcmp(str, "force"));
4443d0407baSopenharmony_ci    return 0;
4453d0407baSopenharmony_ci}
4463d0407baSopenharmony_ciearly_param("nosmt", smt_cmdline_disable);
4473d0407baSopenharmony_ci
4483d0407baSopenharmony_cistatic inline bool cpu_smt_allowed(unsigned int cpu)
4493d0407baSopenharmony_ci{
4503d0407baSopenharmony_ci    if (cpu_smt_control == CPU_SMT_ENABLED) {
4513d0407baSopenharmony_ci        return true;
4523d0407baSopenharmony_ci    }
4533d0407baSopenharmony_ci
4543d0407baSopenharmony_ci    if (topology_is_primary_thread(cpu)) {
4553d0407baSopenharmony_ci        return true;
4563d0407baSopenharmony_ci    }
4573d0407baSopenharmony_ci
4583d0407baSopenharmony_ci    /*
4593d0407baSopenharmony_ci     * On x86 it's required to boot all logical CPUs at least once so
4603d0407baSopenharmony_ci     * that the init code can get a chance to set CR4.MCE on each
4613d0407baSopenharmony_ci     * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
4623d0407baSopenharmony_ci     * core will shutdown the machine.
4633d0407baSopenharmony_ci     */
4643d0407baSopenharmony_ci    return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
4653d0407baSopenharmony_ci}
4663d0407baSopenharmony_ci
4673d0407baSopenharmony_ci/* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
4683d0407baSopenharmony_cibool cpu_smt_possible(void)
4693d0407baSopenharmony_ci{
4703d0407baSopenharmony_ci    return cpu_smt_control != CPU_SMT_FORCE_DISABLED && cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
4713d0407baSopenharmony_ci}
4723d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(cpu_smt_possible);
4733d0407baSopenharmony_ci#else
4743d0407baSopenharmony_cistatic inline bool cpu_smt_allowed(unsigned int cpu)
4753d0407baSopenharmony_ci{
4763d0407baSopenharmony_ci    return true;
4773d0407baSopenharmony_ci}
4783d0407baSopenharmony_ci#endif
4793d0407baSopenharmony_ci
4803d0407baSopenharmony_cistatic inline enum cpuhp_state cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
4813d0407baSopenharmony_ci{
4823d0407baSopenharmony_ci    enum cpuhp_state prev_state = st->state;
4833d0407baSopenharmony_ci
4843d0407baSopenharmony_ci    st->rollback = false;
4853d0407baSopenharmony_ci    st->last = NULL;
4863d0407baSopenharmony_ci
4873d0407baSopenharmony_ci    st->target = target;
4883d0407baSopenharmony_ci    st->single = false;
4893d0407baSopenharmony_ci    st->bringup = st->state < target;
4903d0407baSopenharmony_ci
4913d0407baSopenharmony_ci    return prev_state;
4923d0407baSopenharmony_ci}
4933d0407baSopenharmony_ci
4943d0407baSopenharmony_cistatic inline void cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
4953d0407baSopenharmony_ci{
4963d0407baSopenharmony_ci    st->rollback = true;
4973d0407baSopenharmony_ci
4983d0407baSopenharmony_ci    /*
4993d0407baSopenharmony_ci     * If we have st->last we need to undo partial multi_instance of this
5003d0407baSopenharmony_ci     * state first. Otherwise start undo at the previous state.
5013d0407baSopenharmony_ci     */
5023d0407baSopenharmony_ci    if (!st->last) {
5033d0407baSopenharmony_ci        if (st->bringup) {
5043d0407baSopenharmony_ci            st->state--;
5053d0407baSopenharmony_ci        } else {
5063d0407baSopenharmony_ci            st->state++;
5073d0407baSopenharmony_ci        }
5083d0407baSopenharmony_ci    }
5093d0407baSopenharmony_ci
5103d0407baSopenharmony_ci    st->target = prev_state;
5113d0407baSopenharmony_ci    st->bringup = !st->bringup;
5123d0407baSopenharmony_ci}
5133d0407baSopenharmony_ci
5143d0407baSopenharmony_ci/* Regular hotplug invocation of the AP hotplug thread */
5153d0407baSopenharmony_cistatic void _cpuhp_kick_ap(struct cpuhp_cpu_state *st)
5163d0407baSopenharmony_ci{
5173d0407baSopenharmony_ci    if (!st->single && st->state == st->target) {
5183d0407baSopenharmony_ci        return;
5193d0407baSopenharmony_ci    }
5203d0407baSopenharmony_ci
5213d0407baSopenharmony_ci    st->result = 0;
5223d0407baSopenharmony_ci    /*
5233d0407baSopenharmony_ci     * Make sure the above stores are visible before should_run becomes
5243d0407baSopenharmony_ci     * true. Paired with the mb() above in cpuhp_thread_fun()
5253d0407baSopenharmony_ci     */
5263d0407baSopenharmony_ci    smp_mb();
5273d0407baSopenharmony_ci    st->should_run = true;
5283d0407baSopenharmony_ci    wake_up_process(st->thread);
5293d0407baSopenharmony_ci    wait_for_ap_thread(st, st->bringup);
5303d0407baSopenharmony_ci}
5313d0407baSopenharmony_ci
5323d0407baSopenharmony_cistatic int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
5333d0407baSopenharmony_ci{
5343d0407baSopenharmony_ci    enum cpuhp_state prev_state;
5353d0407baSopenharmony_ci    int ret;
5363d0407baSopenharmony_ci
5373d0407baSopenharmony_ci    prev_state = cpuhp_set_state(st, target);
5383d0407baSopenharmony_ci    _cpuhp_kick_ap(st);
5393d0407baSopenharmony_ci    if ((ret = st->result)) {
5403d0407baSopenharmony_ci        cpuhp_reset_state(st, prev_state);
5413d0407baSopenharmony_ci        _cpuhp_kick_ap(st);
5423d0407baSopenharmony_ci    }
5433d0407baSopenharmony_ci
5443d0407baSopenharmony_ci    return ret;
5453d0407baSopenharmony_ci}
5463d0407baSopenharmony_ci
5473d0407baSopenharmony_cistatic int bringup_wait_for_ap(unsigned int cpu)
5483d0407baSopenharmony_ci{
5493d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
5503d0407baSopenharmony_ci
5513d0407baSopenharmony_ci    /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
5523d0407baSopenharmony_ci    wait_for_ap_thread(st, true);
5533d0407baSopenharmony_ci    if (WARN_ON_ONCE((!cpu_online(cpu)))) {
5543d0407baSopenharmony_ci        return -ECANCELED;
5553d0407baSopenharmony_ci    }
5563d0407baSopenharmony_ci
5573d0407baSopenharmony_ci    /* Unpark the hotplug thread of the target cpu */
5583d0407baSopenharmony_ci    kthread_unpark(st->thread);
5593d0407baSopenharmony_ci
5603d0407baSopenharmony_ci    /*
5613d0407baSopenharmony_ci     * SMT soft disabling on X86 requires to bring the CPU out of the
5623d0407baSopenharmony_ci     * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
5633d0407baSopenharmony_ci     * CPU marked itself as booted_once in notify_cpu_starting() so the
5643d0407baSopenharmony_ci     * cpu_smt_allowed() check will now return false if this is not the
5653d0407baSopenharmony_ci     * primary sibling.
5663d0407baSopenharmony_ci     */
5673d0407baSopenharmony_ci    if (!cpu_smt_allowed(cpu)) {
5683d0407baSopenharmony_ci        return -ECANCELED;
5693d0407baSopenharmony_ci    }
5703d0407baSopenharmony_ci
5713d0407baSopenharmony_ci    if (st->target <= CPUHP_AP_ONLINE_IDLE) {
5723d0407baSopenharmony_ci        return 0;
5733d0407baSopenharmony_ci    }
5743d0407baSopenharmony_ci
5753d0407baSopenharmony_ci    return cpuhp_kick_ap(st, st->target);
5763d0407baSopenharmony_ci}
5773d0407baSopenharmony_ci
5783d0407baSopenharmony_cistatic int bringup_cpu(unsigned int cpu)
5793d0407baSopenharmony_ci{
5803d0407baSopenharmony_ci    struct task_struct *idle = idle_thread_get(cpu);
5813d0407baSopenharmony_ci    int ret;
5823d0407baSopenharmony_ci
5833d0407baSopenharmony_ci    /*
5843d0407baSopenharmony_ci     * Reset stale stack state from the last time this CPU was online.
5853d0407baSopenharmony_ci     */
5863d0407baSopenharmony_ci    scs_task_reset(idle);
5873d0407baSopenharmony_ci    kasan_unpoison_task_stack(idle);
5883d0407baSopenharmony_ci
5893d0407baSopenharmony_ci    /*
5903d0407baSopenharmony_ci     * Some architectures have to walk the irq descriptors to
5913d0407baSopenharmony_ci     * setup the vector space for the cpu which comes online.
5923d0407baSopenharmony_ci     * Prevent irq alloc/free across the bringup.
5933d0407baSopenharmony_ci     */
5943d0407baSopenharmony_ci    irq_lock_sparse();
5953d0407baSopenharmony_ci
5963d0407baSopenharmony_ci    /* Arch-specific enabling code. */
5973d0407baSopenharmony_ci    ret = __cpu_up(cpu, idle);
5983d0407baSopenharmony_ci    irq_unlock_sparse();
5993d0407baSopenharmony_ci    if (ret) {
6003d0407baSopenharmony_ci        return ret;
6013d0407baSopenharmony_ci    }
6023d0407baSopenharmony_ci    return bringup_wait_for_ap(cpu);
6033d0407baSopenharmony_ci}
6043d0407baSopenharmony_ci
6053d0407baSopenharmony_cistatic int finish_cpu(unsigned int cpu)
6063d0407baSopenharmony_ci{
6073d0407baSopenharmony_ci    struct task_struct *idle = idle_thread_get(cpu);
6083d0407baSopenharmony_ci    struct mm_struct *mm = idle->active_mm;
6093d0407baSopenharmony_ci
6103d0407baSopenharmony_ci    /*
6113d0407baSopenharmony_ci     * idle_task_exit() will have switched to &init_mm, now
6123d0407baSopenharmony_ci     * clean up any remaining active_mm state.
6133d0407baSopenharmony_ci     */
6143d0407baSopenharmony_ci    if (mm != &init_mm) {
6153d0407baSopenharmony_ci        idle->active_mm = &init_mm;
6163d0407baSopenharmony_ci    }
6173d0407baSopenharmony_ci    mmdrop(mm);
6183d0407baSopenharmony_ci    return 0;
6193d0407baSopenharmony_ci}
6203d0407baSopenharmony_ci
6213d0407baSopenharmony_ci/*
6223d0407baSopenharmony_ci * Hotplug state machine related functions
6233d0407baSopenharmony_ci */
6243d0407baSopenharmony_ci
6253d0407baSopenharmony_cistatic void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
6263d0407baSopenharmony_ci{
6273d0407baSopenharmony_ci    for (st->state--; st->state > st->target; st->state--) {
6283d0407baSopenharmony_ci        cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
6293d0407baSopenharmony_ci    }
6303d0407baSopenharmony_ci}
6313d0407baSopenharmony_ci
6323d0407baSopenharmony_cistatic inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
6333d0407baSopenharmony_ci{
6343d0407baSopenharmony_ci    if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
6353d0407baSopenharmony_ci        return true;
6363d0407baSopenharmony_ci    }
6373d0407baSopenharmony_ci    /*
6383d0407baSopenharmony_ci     * When CPU hotplug is disabled, then taking the CPU down is not
6393d0407baSopenharmony_ci     * possible because takedown_cpu() and the architecture and
6403d0407baSopenharmony_ci     * subsystem specific mechanisms are not available. So the CPU
6413d0407baSopenharmony_ci     * which would be completely unplugged again needs to stay around
6423d0407baSopenharmony_ci     * in the current state.
6433d0407baSopenharmony_ci     */
6443d0407baSopenharmony_ci    return st->state <= CPUHP_BRINGUP_CPU;
6453d0407baSopenharmony_ci}
6463d0407baSopenharmony_ci
6473d0407baSopenharmony_cistatic int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
6483d0407baSopenharmony_ci{
6493d0407baSopenharmony_ci    enum cpuhp_state prev_state = st->state;
6503d0407baSopenharmony_ci    int ret = 0;
6513d0407baSopenharmony_ci
6523d0407baSopenharmony_ci    while (st->state < target) {
6533d0407baSopenharmony_ci        st->state++;
6543d0407baSopenharmony_ci        ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
6553d0407baSopenharmony_ci        if (ret) {
6563d0407baSopenharmony_ci            if (can_rollback_cpu(st)) {
6573d0407baSopenharmony_ci                st->target = prev_state;
6583d0407baSopenharmony_ci                undo_cpu_up(cpu, st);
6593d0407baSopenharmony_ci            }
6603d0407baSopenharmony_ci            break;
6613d0407baSopenharmony_ci        }
6623d0407baSopenharmony_ci    }
6633d0407baSopenharmony_ci    return ret;
6643d0407baSopenharmony_ci}
6653d0407baSopenharmony_ci
6663d0407baSopenharmony_ci/*
6673d0407baSopenharmony_ci * The cpu hotplug threads manage the bringup and teardown of the cpus
6683d0407baSopenharmony_ci */
6693d0407baSopenharmony_cistatic void cpuhp_create(unsigned int cpu)
6703d0407baSopenharmony_ci{
6713d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
6723d0407baSopenharmony_ci
6733d0407baSopenharmony_ci    init_completion(&st->done_up);
6743d0407baSopenharmony_ci    init_completion(&st->done_down);
6753d0407baSopenharmony_ci}
6763d0407baSopenharmony_ci
6773d0407baSopenharmony_cistatic int cpuhp_should_run(unsigned int cpu)
6783d0407baSopenharmony_ci{
6793d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
6803d0407baSopenharmony_ci
6813d0407baSopenharmony_ci    return st->should_run;
6823d0407baSopenharmony_ci}
6833d0407baSopenharmony_ci
6843d0407baSopenharmony_ci/*
6853d0407baSopenharmony_ci * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
6863d0407baSopenharmony_ci * callbacks when a state gets [un]installed at runtime.
6873d0407baSopenharmony_ci *
6883d0407baSopenharmony_ci * Each invocation of this function by the smpboot thread does a single AP
6893d0407baSopenharmony_ci * state callback.
6903d0407baSopenharmony_ci *
6913d0407baSopenharmony_ci * It has 3 modes of operation:
6923d0407baSopenharmony_ci *  - single: runs st->cb_state
6933d0407baSopenharmony_ci *  - up:     runs ++st->state, while st->state < st->target
6943d0407baSopenharmony_ci *  - down:   runs st->state--, while st->state > st->target
6953d0407baSopenharmony_ci *
6963d0407baSopenharmony_ci * When complete or on error, should_run is cleared and the completion is fired.
6973d0407baSopenharmony_ci */
6983d0407baSopenharmony_cistatic void cpuhp_thread_fun(unsigned int cpu)
6993d0407baSopenharmony_ci{
7003d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
7013d0407baSopenharmony_ci    bool bringup = st->bringup;
7023d0407baSopenharmony_ci    enum cpuhp_state state;
7033d0407baSopenharmony_ci
7043d0407baSopenharmony_ci    if (WARN_ON_ONCE(!st->should_run)) {
7053d0407baSopenharmony_ci        return;
7063d0407baSopenharmony_ci    }
7073d0407baSopenharmony_ci
7083d0407baSopenharmony_ci    /*
7093d0407baSopenharmony_ci     * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
7103d0407baSopenharmony_ci     * that if we see ->should_run we also see the rest of the state.
7113d0407baSopenharmony_ci     */
7123d0407baSopenharmony_ci    smp_mb();
7133d0407baSopenharmony_ci
7143d0407baSopenharmony_ci    /*
7153d0407baSopenharmony_ci     * The BP holds the hotplug lock, but we're now running on the AP,
7163d0407baSopenharmony_ci     * ensure that anybody asserting the lock is held, will actually find
7173d0407baSopenharmony_ci     * it so.
7183d0407baSopenharmony_ci     */
7193d0407baSopenharmony_ci    lockdep_acquire_cpus_lock();
7203d0407baSopenharmony_ci    cpuhp_lock_acquire(bringup);
7213d0407baSopenharmony_ci
7223d0407baSopenharmony_ci    if (st->single) {
7233d0407baSopenharmony_ci        state = st->cb_state;
7243d0407baSopenharmony_ci        st->should_run = false;
7253d0407baSopenharmony_ci    } else {
7263d0407baSopenharmony_ci        if (bringup) {
7273d0407baSopenharmony_ci            st->state++;
7283d0407baSopenharmony_ci            state = st->state;
7293d0407baSopenharmony_ci            st->should_run = (st->state < st->target);
7303d0407baSopenharmony_ci            WARN_ON_ONCE(st->state > st->target);
7313d0407baSopenharmony_ci        } else {
7323d0407baSopenharmony_ci            state = st->state;
7333d0407baSopenharmony_ci            st->state--;
7343d0407baSopenharmony_ci            st->should_run = (st->state > st->target);
7353d0407baSopenharmony_ci            WARN_ON_ONCE(st->state < st->target);
7363d0407baSopenharmony_ci        }
7373d0407baSopenharmony_ci    }
7383d0407baSopenharmony_ci
7393d0407baSopenharmony_ci    WARN_ON_ONCE(!cpuhp_is_ap_state(state));
7403d0407baSopenharmony_ci
7413d0407baSopenharmony_ci    if (cpuhp_is_atomic_state(state)) {
7423d0407baSopenharmony_ci        local_irq_disable();
7433d0407baSopenharmony_ci        st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
7443d0407baSopenharmony_ci        local_irq_enable();
7453d0407baSopenharmony_ci
7463d0407baSopenharmony_ci        /*
7473d0407baSopenharmony_ci         * STARTING/DYING must not fail!
7483d0407baSopenharmony_ci         */
7493d0407baSopenharmony_ci        WARN_ON_ONCE(st->result);
7503d0407baSopenharmony_ci    } else {
7513d0407baSopenharmony_ci        st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
7523d0407baSopenharmony_ci    }
7533d0407baSopenharmony_ci
7543d0407baSopenharmony_ci    if (st->result) {
7553d0407baSopenharmony_ci        /*
7563d0407baSopenharmony_ci         * If we fail on a rollback, we're up a creek without no
7573d0407baSopenharmony_ci         * paddle, no way forward, no way back. We loose, thanks for
7583d0407baSopenharmony_ci         * playing.
7593d0407baSopenharmony_ci         */
7603d0407baSopenharmony_ci        WARN_ON_ONCE(st->rollback);
7613d0407baSopenharmony_ci        st->should_run = false;
7623d0407baSopenharmony_ci    }
7633d0407baSopenharmony_ci
7643d0407baSopenharmony_ci    cpuhp_lock_release(bringup);
7653d0407baSopenharmony_ci    lockdep_release_cpus_lock();
7663d0407baSopenharmony_ci
7673d0407baSopenharmony_ci    if (!st->should_run) {
7683d0407baSopenharmony_ci        complete_ap_thread(st, bringup);
7693d0407baSopenharmony_ci    }
7703d0407baSopenharmony_ci}
7713d0407baSopenharmony_ci
7723d0407baSopenharmony_ci/* Invoke a single callback on a remote cpu */
7733d0407baSopenharmony_cistatic int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node)
7743d0407baSopenharmony_ci{
7753d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
7763d0407baSopenharmony_ci    int ret;
7773d0407baSopenharmony_ci
7783d0407baSopenharmony_ci    if (!cpu_online(cpu)) {
7793d0407baSopenharmony_ci        return 0;
7803d0407baSopenharmony_ci    }
7813d0407baSopenharmony_ci
7823d0407baSopenharmony_ci    cpuhp_lock_acquire(false);
7833d0407baSopenharmony_ci    cpuhp_lock_release(false);
7843d0407baSopenharmony_ci
7853d0407baSopenharmony_ci    cpuhp_lock_acquire(true);
7863d0407baSopenharmony_ci    cpuhp_lock_release(true);
7873d0407baSopenharmony_ci
7883d0407baSopenharmony_ci    /*
7893d0407baSopenharmony_ci     * If we are up and running, use the hotplug thread. For early calls
7903d0407baSopenharmony_ci     * we invoke the thread function directly.
7913d0407baSopenharmony_ci     */
7923d0407baSopenharmony_ci    if (!st->thread) {
7933d0407baSopenharmony_ci        return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
7943d0407baSopenharmony_ci    }
7953d0407baSopenharmony_ci
7963d0407baSopenharmony_ci    st->rollback = false;
7973d0407baSopenharmony_ci    st->last = NULL;
7983d0407baSopenharmony_ci
7993d0407baSopenharmony_ci    st->node = node;
8003d0407baSopenharmony_ci    st->bringup = bringup;
8013d0407baSopenharmony_ci    st->cb_state = state;
8023d0407baSopenharmony_ci    st->single = true;
8033d0407baSopenharmony_ci
8043d0407baSopenharmony_ci    _cpuhp_kick_ap(st);
8053d0407baSopenharmony_ci
8063d0407baSopenharmony_ci    /*
8073d0407baSopenharmony_ci     * If we failed and did a partial, do a rollback.
8083d0407baSopenharmony_ci     */
8093d0407baSopenharmony_ci    if ((ret = st->result) && st->last) {
8103d0407baSopenharmony_ci        st->rollback = true;
8113d0407baSopenharmony_ci        st->bringup = !bringup;
8123d0407baSopenharmony_ci
8133d0407baSopenharmony_ci        _cpuhp_kick_ap(st);
8143d0407baSopenharmony_ci    }
8153d0407baSopenharmony_ci
8163d0407baSopenharmony_ci    /*
8173d0407baSopenharmony_ci     * Clean up the leftovers so the next hotplug operation wont use stale
8183d0407baSopenharmony_ci     * data.
8193d0407baSopenharmony_ci     */
8203d0407baSopenharmony_ci    st->node = st->last = NULL;
8213d0407baSopenharmony_ci    return ret;
8223d0407baSopenharmony_ci}
8233d0407baSopenharmony_ci
8243d0407baSopenharmony_cistatic int cpuhp_kick_ap_work(unsigned int cpu)
8253d0407baSopenharmony_ci{
8263d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
8273d0407baSopenharmony_ci    enum cpuhp_state prev_state = st->state;
8283d0407baSopenharmony_ci    int ret;
8293d0407baSopenharmony_ci
8303d0407baSopenharmony_ci    cpuhp_lock_acquire(false);
8313d0407baSopenharmony_ci    cpuhp_lock_release(false);
8323d0407baSopenharmony_ci
8333d0407baSopenharmony_ci    cpuhp_lock_acquire(true);
8343d0407baSopenharmony_ci    cpuhp_lock_release(true);
8353d0407baSopenharmony_ci
8363d0407baSopenharmony_ci    trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
8373d0407baSopenharmony_ci    ret = cpuhp_kick_ap(st, st->target);
8383d0407baSopenharmony_ci    trace_cpuhp_exit(cpu, st->state, prev_state, ret);
8393d0407baSopenharmony_ci
8403d0407baSopenharmony_ci    return ret;
8413d0407baSopenharmony_ci}
8423d0407baSopenharmony_ci
8433d0407baSopenharmony_cistatic struct smp_hotplug_thread cpuhp_threads = {
8443d0407baSopenharmony_ci    .store = &cpuhp_state.thread,
8453d0407baSopenharmony_ci    .create = &cpuhp_create,
8463d0407baSopenharmony_ci    .thread_should_run = cpuhp_should_run,
8473d0407baSopenharmony_ci    .thread_fn = cpuhp_thread_fun,
8483d0407baSopenharmony_ci    .thread_comm = "cpuhp/%u",
8493d0407baSopenharmony_ci    .selfparking = true,
8503d0407baSopenharmony_ci};
8513d0407baSopenharmony_ci
8523d0407baSopenharmony_civoid __init cpuhp_threads_init(void)
8533d0407baSopenharmony_ci{
8543d0407baSopenharmony_ci    BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
8553d0407baSopenharmony_ci    kthread_unpark(this_cpu_read(cpuhp_state.thread));
8563d0407baSopenharmony_ci}
8573d0407baSopenharmony_ci
8583d0407baSopenharmony_ci/*
8593d0407baSopenharmony_ci *
8603d0407baSopenharmony_ci * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
8613d0407baSopenharmony_ci * protected region.
8623d0407baSopenharmony_ci *
8633d0407baSopenharmony_ci * The operation is still serialized against concurrent CPU hotplug via
8643d0407baSopenharmony_ci * cpu_add_remove_lock, i.e. CPU map protection.  But it is _not_
8653d0407baSopenharmony_ci * serialized against other hotplug related activity like adding or
8663d0407baSopenharmony_ci * removing of state callbacks and state instances, which invoke either the
8673d0407baSopenharmony_ci * startup or the teardown callback of the affected state.
8683d0407baSopenharmony_ci *
8693d0407baSopenharmony_ci * This is required for subsystems which are unfixable vs. CPU hotplug and
8703d0407baSopenharmony_ci * evade lock inversion problems by scheduling work which has to be
8713d0407baSopenharmony_ci * completed _before_ cpu_up()/_cpu_down() returns.
8723d0407baSopenharmony_ci *
8733d0407baSopenharmony_ci * Don't even think about adding anything to this for any new code or even
8743d0407baSopenharmony_ci * drivers. It's only purpose is to keep existing lock order trainwrecks
8753d0407baSopenharmony_ci * working.
8763d0407baSopenharmony_ci *
8773d0407baSopenharmony_ci * For cpu_down() there might be valid reasons to finish cleanups which are
8783d0407baSopenharmony_ci * not required to be done under cpu_hotplug_lock, but that's a different
8793d0407baSopenharmony_ci * story and would be not invoked via this.
8803d0407baSopenharmony_ci */
8813d0407baSopenharmony_cistatic void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
8823d0407baSopenharmony_ci{
8833d0407baSopenharmony_ci    /*
8843d0407baSopenharmony_ci     * cpusets delegate hotplug operations to a worker to "solve" the
8853d0407baSopenharmony_ci     * lock order problems. Wait for the worker, but only if tasks are
8863d0407baSopenharmony_ci     * _not_ frozen (suspend, hibernate) as that would wait forever.
8873d0407baSopenharmony_ci     *
8883d0407baSopenharmony_ci     * The wait is required because otherwise the hotplug operation
8893d0407baSopenharmony_ci     * returns with inconsistent state, which could even be observed in
8903d0407baSopenharmony_ci     * user space when a new CPU is brought up. The CPU plug uevent
8913d0407baSopenharmony_ci     * would be delivered and user space reacting on it would fail to
8923d0407baSopenharmony_ci     * move tasks to the newly plugged CPU up to the point where the
8933d0407baSopenharmony_ci     * work has finished because up to that point the newly plugged CPU
8943d0407baSopenharmony_ci     * is not assignable in cpusets/cgroups. On unplug that's not
8953d0407baSopenharmony_ci     * necessarily a visible issue, but it is still inconsistent state,
8963d0407baSopenharmony_ci     * which is the real problem which needs to be "fixed". This can't
8973d0407baSopenharmony_ci     * prevent the transient state between scheduling the work and
8983d0407baSopenharmony_ci     * returning from waiting for it.
8993d0407baSopenharmony_ci     */
9003d0407baSopenharmony_ci    if (!tasks_frozen) {
9013d0407baSopenharmony_ci        cpuset_wait_for_hotplug();
9023d0407baSopenharmony_ci    }
9033d0407baSopenharmony_ci}
9043d0407baSopenharmony_ci
9053d0407baSopenharmony_ci#ifdef CONFIG_HOTPLUG_CPU
9063d0407baSopenharmony_ci#ifndef arch_clear_mm_cpumask_cpu
9073d0407baSopenharmony_ci#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
9083d0407baSopenharmony_ci#endif
9093d0407baSopenharmony_ci
9103d0407baSopenharmony_ci/**
9113d0407baSopenharmony_ci * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
9123d0407baSopenharmony_ci * @cpu: a CPU id
9133d0407baSopenharmony_ci *
9143d0407baSopenharmony_ci * This function walks all processes, finds a valid mm struct for each one and
9153d0407baSopenharmony_ci * then clears a corresponding bit in mm's cpumask.  While this all sounds
9163d0407baSopenharmony_ci * trivial, there are various non-obvious corner cases, which this function
9173d0407baSopenharmony_ci * tries to solve in a safe manner.
9183d0407baSopenharmony_ci *
9193d0407baSopenharmony_ci * Also note that the function uses a somewhat relaxed locking scheme, so it may
9203d0407baSopenharmony_ci * be called only for an already offlined CPU.
9213d0407baSopenharmony_ci */
9223d0407baSopenharmony_civoid clear_tasks_mm_cpumask(int cpu)
9233d0407baSopenharmony_ci{
9243d0407baSopenharmony_ci    struct task_struct *p;
9253d0407baSopenharmony_ci
9263d0407baSopenharmony_ci    /*
9273d0407baSopenharmony_ci     * This function is called after the cpu is taken down and marked
9283d0407baSopenharmony_ci     * offline, so its not like new tasks will ever get this cpu set in
9293d0407baSopenharmony_ci     * their mm mask. -- Peter Zijlstra
9303d0407baSopenharmony_ci     * Thus, we may use rcu_read_lock() here, instead of grabbing
9313d0407baSopenharmony_ci     * full-fledged tasklist_lock.
9323d0407baSopenharmony_ci     */
9333d0407baSopenharmony_ci    WARN_ON(cpu_online(cpu));
9343d0407baSopenharmony_ci    rcu_read_lock();
9353d0407baSopenharmony_ci    for_each_process(p)
9363d0407baSopenharmony_ci    {
9373d0407baSopenharmony_ci        struct task_struct *t;
9383d0407baSopenharmony_ci
9393d0407baSopenharmony_ci        /*
9403d0407baSopenharmony_ci         * Main thread might exit, but other threads may still have
9413d0407baSopenharmony_ci         * a valid mm. Find one.
9423d0407baSopenharmony_ci         */
9433d0407baSopenharmony_ci        t = find_lock_task_mm(p);
9443d0407baSopenharmony_ci        if (!t) {
9453d0407baSopenharmony_ci            continue;
9463d0407baSopenharmony_ci        }
9473d0407baSopenharmony_ci        arch_clear_mm_cpumask_cpu(cpu, t->mm);
9483d0407baSopenharmony_ci        task_unlock(t);
9493d0407baSopenharmony_ci    }
9503d0407baSopenharmony_ci    rcu_read_unlock();
9513d0407baSopenharmony_ci}
9523d0407baSopenharmony_ci
9533d0407baSopenharmony_ci/* Take this CPU down. */
9543d0407baSopenharmony_cistatic int take_cpu_down(void *_param)
9553d0407baSopenharmony_ci{
9563d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
9573d0407baSopenharmony_ci    enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
9583d0407baSopenharmony_ci    int err, cpu = smp_processor_id();
9593d0407baSopenharmony_ci    int ret;
9603d0407baSopenharmony_ci
9613d0407baSopenharmony_ci    /* Ensure this CPU doesn't handle any more interrupts. */
9623d0407baSopenharmony_ci    err = __cpu_disable();
9633d0407baSopenharmony_ci    if (err < 0) {
9643d0407baSopenharmony_ci        return err;
9653d0407baSopenharmony_ci    }
9663d0407baSopenharmony_ci
9673d0407baSopenharmony_ci    /*
9683d0407baSopenharmony_ci     * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
9693d0407baSopenharmony_ci     * do this step again.
9703d0407baSopenharmony_ci     */
9713d0407baSopenharmony_ci    WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
9723d0407baSopenharmony_ci    st->state--;
9733d0407baSopenharmony_ci    /* Invoke the former CPU_DYING callbacks */
9743d0407baSopenharmony_ci    for (; st->state > target; st->state--) {
9753d0407baSopenharmony_ci        ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
9763d0407baSopenharmony_ci        /*
9773d0407baSopenharmony_ci         * DYING must not fail!
9783d0407baSopenharmony_ci         */
9793d0407baSopenharmony_ci        WARN_ON_ONCE(ret);
9803d0407baSopenharmony_ci    }
9813d0407baSopenharmony_ci
9823d0407baSopenharmony_ci    /* Give up timekeeping duties */
9833d0407baSopenharmony_ci    tick_handover_do_timer();
9843d0407baSopenharmony_ci    /* Remove CPU from timer broadcasting */
9853d0407baSopenharmony_ci    tick_offline_cpu(cpu);
9863d0407baSopenharmony_ci    /* Park the stopper thread */
9873d0407baSopenharmony_ci    stop_machine_park(cpu);
9883d0407baSopenharmony_ci    return 0;
9893d0407baSopenharmony_ci}
9903d0407baSopenharmony_ci
9913d0407baSopenharmony_cistatic int takedown_cpu(unsigned int cpu)
9923d0407baSopenharmony_ci{
9933d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
9943d0407baSopenharmony_ci    int err;
9953d0407baSopenharmony_ci
9963d0407baSopenharmony_ci    /* Park the smpboot threads */
9973d0407baSopenharmony_ci    kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
9983d0407baSopenharmony_ci
9993d0407baSopenharmony_ci    /*
10003d0407baSopenharmony_ci     * Prevent irq alloc/free while the dying cpu reorganizes the
10013d0407baSopenharmony_ci     * interrupt affinities.
10023d0407baSopenharmony_ci     */
10033d0407baSopenharmony_ci    irq_lock_sparse();
10043d0407baSopenharmony_ci
10053d0407baSopenharmony_ci    /*
10063d0407baSopenharmony_ci     * So now all preempt/rcu users must observe !cpu_active().
10073d0407baSopenharmony_ci     */
10083d0407baSopenharmony_ci    err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
10093d0407baSopenharmony_ci    if (err) {
10103d0407baSopenharmony_ci        /* CPU refused to die */
10113d0407baSopenharmony_ci        irq_unlock_sparse();
10123d0407baSopenharmony_ci        /* Unpark the hotplug thread so we can rollback there */
10133d0407baSopenharmony_ci        kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
10143d0407baSopenharmony_ci        return err;
10153d0407baSopenharmony_ci    }
10163d0407baSopenharmony_ci    BUG_ON(cpu_online(cpu));
10173d0407baSopenharmony_ci
10183d0407baSopenharmony_ci    /*
10193d0407baSopenharmony_ci     * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
10203d0407baSopenharmony_ci     * all runnable tasks from the CPU, there's only the idle task left now
10213d0407baSopenharmony_ci     * that the migration thread is done doing the stop_machine thing.
10223d0407baSopenharmony_ci     *
10233d0407baSopenharmony_ci     * Wait for the stop thread to go away.
10243d0407baSopenharmony_ci     */
10253d0407baSopenharmony_ci    wait_for_ap_thread(st, false);
10263d0407baSopenharmony_ci    BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
10273d0407baSopenharmony_ci
10283d0407baSopenharmony_ci    /* Interrupts are moved away from the dying cpu, reenable alloc/free */
10293d0407baSopenharmony_ci    irq_unlock_sparse();
10303d0407baSopenharmony_ci
10313d0407baSopenharmony_ci    hotplug_cpu__broadcast_tick_pull(cpu);
10323d0407baSopenharmony_ci    /* This actually kills the CPU. */
10333d0407baSopenharmony_ci    __cpu_die(cpu);
10343d0407baSopenharmony_ci
10353d0407baSopenharmony_ci    tick_cleanup_dead_cpu(cpu);
10363d0407baSopenharmony_ci    rcutree_migrate_callbacks(cpu);
10373d0407baSopenharmony_ci    return 0;
10383d0407baSopenharmony_ci}
10393d0407baSopenharmony_ci
10403d0407baSopenharmony_cistatic void cpuhp_complete_idle_dead(void *arg)
10413d0407baSopenharmony_ci{
10423d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = arg;
10433d0407baSopenharmony_ci
10443d0407baSopenharmony_ci    complete_ap_thread(st, false);
10453d0407baSopenharmony_ci}
10463d0407baSopenharmony_ci
10473d0407baSopenharmony_civoid cpuhp_report_idle_dead(void)
10483d0407baSopenharmony_ci{
10493d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
10503d0407baSopenharmony_ci
10513d0407baSopenharmony_ci    BUG_ON(st->state != CPUHP_AP_OFFLINE);
10523d0407baSopenharmony_ci    rcu_report_dead(smp_processor_id());
10533d0407baSopenharmony_ci    st->state = CPUHP_AP_IDLE_DEAD;
10543d0407baSopenharmony_ci    /*
10553d0407baSopenharmony_ci     * We cannot call complete after rcu_report_dead() so we delegate it
10563d0407baSopenharmony_ci     * to an online cpu.
10573d0407baSopenharmony_ci     */
10583d0407baSopenharmony_ci    smp_call_function_single(cpumask_first(cpu_online_mask), cpuhp_complete_idle_dead, st, 0);
10593d0407baSopenharmony_ci}
10603d0407baSopenharmony_ci
10613d0407baSopenharmony_cistatic void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
10623d0407baSopenharmony_ci{
10633d0407baSopenharmony_ci    for (st->state++; st->state < st->target; st->state++) {
10643d0407baSopenharmony_ci        cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
10653d0407baSopenharmony_ci    }
10663d0407baSopenharmony_ci}
10673d0407baSopenharmony_ci
10683d0407baSopenharmony_cistatic int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
10693d0407baSopenharmony_ci{
10703d0407baSopenharmony_ci    enum cpuhp_state prev_state = st->state;
10713d0407baSopenharmony_ci    int ret = 0;
10723d0407baSopenharmony_ci
10733d0407baSopenharmony_ci    for (; st->state > target; st->state--) {
10743d0407baSopenharmony_ci        ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
10753d0407baSopenharmony_ci        if (ret) {
10763d0407baSopenharmony_ci            st->target = prev_state;
10773d0407baSopenharmony_ci            if (st->state < prev_state) {
10783d0407baSopenharmony_ci                undo_cpu_down(cpu, st);
10793d0407baSopenharmony_ci            }
10803d0407baSopenharmony_ci            break;
10813d0407baSopenharmony_ci        }
10823d0407baSopenharmony_ci    }
10833d0407baSopenharmony_ci    return ret;
10843d0407baSopenharmony_ci}
10853d0407baSopenharmony_ci
10863d0407baSopenharmony_ci/* Requires cpu_add_remove_lock to be held */
10873d0407baSopenharmony_cistatic int __ref _cpu_down(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
10883d0407baSopenharmony_ci{
10893d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
10903d0407baSopenharmony_ci    int prev_state, ret = 0;
10913d0407baSopenharmony_ci
10923d0407baSopenharmony_ci    if (num_active_cpus() == 1 && cpu_active(cpu)) {
10933d0407baSopenharmony_ci        return -EBUSY;
10943d0407baSopenharmony_ci    }
10953d0407baSopenharmony_ci
10963d0407baSopenharmony_ci    if (!cpu_present(cpu)) {
10973d0407baSopenharmony_ci        return -EINVAL;
10983d0407baSopenharmony_ci    }
10993d0407baSopenharmony_ci
11003d0407baSopenharmony_ci#ifdef CONFIG_CPU_ISOLATION_OPT
11013d0407baSopenharmony_ci    if (!tasks_frozen && !cpu_isolated(cpu) && num_online_uniso_cpus() == 1) {
11023d0407baSopenharmony_ci        return -EBUSY;
11033d0407baSopenharmony_ci    }
11043d0407baSopenharmony_ci#endif
11053d0407baSopenharmony_ci
11063d0407baSopenharmony_ci    cpus_write_lock();
11073d0407baSopenharmony_ci
11083d0407baSopenharmony_ci    cpuhp_tasks_frozen = tasks_frozen;
11093d0407baSopenharmony_ci
11103d0407baSopenharmony_ci    prev_state = cpuhp_set_state(st, target);
11113d0407baSopenharmony_ci    /*
11123d0407baSopenharmony_ci     * If the current CPU state is in the range of the AP hotplug thread,
11133d0407baSopenharmony_ci     * then we need to kick the thread.
11143d0407baSopenharmony_ci     */
11153d0407baSopenharmony_ci    if (st->state > CPUHP_TEARDOWN_CPU) {
11163d0407baSopenharmony_ci        st->target = max((int)target, CPUHP_TEARDOWN_CPU);
11173d0407baSopenharmony_ci        ret = cpuhp_kick_ap_work(cpu);
11183d0407baSopenharmony_ci        /*
11193d0407baSopenharmony_ci         * The AP side has done the error rollback already. Just
11203d0407baSopenharmony_ci         * return the error code..
11213d0407baSopenharmony_ci         */
11223d0407baSopenharmony_ci        if (ret) {
11233d0407baSopenharmony_ci            goto out;
11243d0407baSopenharmony_ci        }
11253d0407baSopenharmony_ci
11263d0407baSopenharmony_ci        /*
11273d0407baSopenharmony_ci         * We might have stopped still in the range of the AP hotplug
11283d0407baSopenharmony_ci         * thread. Nothing to do anymore.
11293d0407baSopenharmony_ci         */
11303d0407baSopenharmony_ci        if (st->state > CPUHP_TEARDOWN_CPU) {
11313d0407baSopenharmony_ci            goto out;
11323d0407baSopenharmony_ci        }
11333d0407baSopenharmony_ci
11343d0407baSopenharmony_ci        st->target = target;
11353d0407baSopenharmony_ci    }
11363d0407baSopenharmony_ci    /*
11373d0407baSopenharmony_ci     * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
11383d0407baSopenharmony_ci     * to do the further cleanups.
11393d0407baSopenharmony_ci     */
11403d0407baSopenharmony_ci    ret = cpuhp_down_callbacks(cpu, st, target);
11413d0407baSopenharmony_ci    if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
11423d0407baSopenharmony_ci        cpuhp_reset_state(st, prev_state);
11433d0407baSopenharmony_ci        _cpuhp_kick_ap(st);
11443d0407baSopenharmony_ci    }
11453d0407baSopenharmony_ci
11463d0407baSopenharmony_ciout:
11473d0407baSopenharmony_ci    cpus_write_unlock();
11483d0407baSopenharmony_ci    /*
11493d0407baSopenharmony_ci     * Do post unplug cleanup. This is still protected against
11503d0407baSopenharmony_ci     * concurrent CPU hotplug via cpu_add_remove_lock.
11513d0407baSopenharmony_ci     */
11523d0407baSopenharmony_ci    lockup_detector_cleanup();
11533d0407baSopenharmony_ci    arch_smt_update();
11543d0407baSopenharmony_ci    cpu_up_down_serialize_trainwrecks(tasks_frozen);
11553d0407baSopenharmony_ci    return ret;
11563d0407baSopenharmony_ci}
11573d0407baSopenharmony_ci
11583d0407baSopenharmony_cistatic int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
11593d0407baSopenharmony_ci{
11603d0407baSopenharmony_ci    if (cpu_hotplug_disabled) {
11613d0407baSopenharmony_ci        return -EBUSY;
11623d0407baSopenharmony_ci    }
11633d0407baSopenharmony_ci    return _cpu_down(cpu, 0, target);
11643d0407baSopenharmony_ci}
11653d0407baSopenharmony_ci
11663d0407baSopenharmony_cistatic int cpu_down(unsigned int cpu, enum cpuhp_state target)
11673d0407baSopenharmony_ci{
11683d0407baSopenharmony_ci    int err;
11693d0407baSopenharmony_ci
11703d0407baSopenharmony_ci    cpu_maps_update_begin();
11713d0407baSopenharmony_ci    err = cpu_down_maps_locked(cpu, target);
11723d0407baSopenharmony_ci    cpu_maps_update_done();
11733d0407baSopenharmony_ci    return err;
11743d0407baSopenharmony_ci}
11753d0407baSopenharmony_ci
11763d0407baSopenharmony_ci/**
11773d0407baSopenharmony_ci * cpu_device_down - Bring down a cpu device
11783d0407baSopenharmony_ci * @dev: Pointer to the cpu device to offline
11793d0407baSopenharmony_ci *
11803d0407baSopenharmony_ci * This function is meant to be used by device core cpu subsystem only.
11813d0407baSopenharmony_ci *
11823d0407baSopenharmony_ci * Other subsystems should use remove_cpu() instead.
11833d0407baSopenharmony_ci */
11843d0407baSopenharmony_ciint cpu_device_down(struct device *dev)
11853d0407baSopenharmony_ci{
11863d0407baSopenharmony_ci    return cpu_down(dev->id, CPUHP_OFFLINE);
11873d0407baSopenharmony_ci}
11883d0407baSopenharmony_ci
11893d0407baSopenharmony_ciint remove_cpu(unsigned int cpu)
11903d0407baSopenharmony_ci{
11913d0407baSopenharmony_ci    int ret;
11923d0407baSopenharmony_ci
11933d0407baSopenharmony_ci    lock_device_hotplug();
11943d0407baSopenharmony_ci    ret = device_offline(get_cpu_device(cpu));
11953d0407baSopenharmony_ci    unlock_device_hotplug();
11963d0407baSopenharmony_ci
11973d0407baSopenharmony_ci    return ret;
11983d0407baSopenharmony_ci}
11993d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(remove_cpu);
12003d0407baSopenharmony_ci
12013d0407baSopenharmony_civoid smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
12023d0407baSopenharmony_ci{
12033d0407baSopenharmony_ci    unsigned int cpu;
12043d0407baSopenharmony_ci    int error;
12053d0407baSopenharmony_ci
12063d0407baSopenharmony_ci    cpu_maps_update_begin();
12073d0407baSopenharmony_ci
12083d0407baSopenharmony_ci    /*
12093d0407baSopenharmony_ci     * Make certain the cpu I'm about to reboot on is online.
12103d0407baSopenharmony_ci     *
12113d0407baSopenharmony_ci     * This is inline to what migrate_to_reboot_cpu() already do.
12123d0407baSopenharmony_ci     */
12133d0407baSopenharmony_ci    if (!cpu_online(primary_cpu)) {
12143d0407baSopenharmony_ci        primary_cpu = cpumask_first(cpu_online_mask);
12153d0407baSopenharmony_ci    }
12163d0407baSopenharmony_ci
12173d0407baSopenharmony_ci    for_each_online_cpu(cpu)
12183d0407baSopenharmony_ci    {
12193d0407baSopenharmony_ci        if (cpu == primary_cpu) {
12203d0407baSopenharmony_ci            continue;
12213d0407baSopenharmony_ci        }
12223d0407baSopenharmony_ci
12233d0407baSopenharmony_ci        error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
12243d0407baSopenharmony_ci        if (error) {
12253d0407baSopenharmony_ci            pr_err("Failed to offline CPU%d - error=%d", cpu, error);
12263d0407baSopenharmony_ci            break;
12273d0407baSopenharmony_ci        }
12283d0407baSopenharmony_ci    }
12293d0407baSopenharmony_ci
12303d0407baSopenharmony_ci    /*
12313d0407baSopenharmony_ci     * Ensure all but the reboot CPU are offline.
12323d0407baSopenharmony_ci     */
12333d0407baSopenharmony_ci    BUG_ON(num_online_cpus() > 1);
12343d0407baSopenharmony_ci
12353d0407baSopenharmony_ci    /*
12363d0407baSopenharmony_ci     * Make sure the CPUs won't be enabled by someone else after this
12373d0407baSopenharmony_ci     * point. Kexec will reboot to a new kernel shortly resetting
12383d0407baSopenharmony_ci     * everything along the way.
12393d0407baSopenharmony_ci     */
12403d0407baSopenharmony_ci    cpu_hotplug_disabled++;
12413d0407baSopenharmony_ci
12423d0407baSopenharmony_ci    cpu_maps_update_done();
12433d0407baSopenharmony_ci}
12443d0407baSopenharmony_ci
12453d0407baSopenharmony_ci#else
12463d0407baSopenharmony_ci#define takedown_cpu NULL
12473d0407baSopenharmony_ci#endif /* CONFIG_HOTPLUG_CPU */
12483d0407baSopenharmony_ci
12493d0407baSopenharmony_ci/**
12503d0407baSopenharmony_ci * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
12513d0407baSopenharmony_ci * @cpu: cpu that just started
12523d0407baSopenharmony_ci *
12533d0407baSopenharmony_ci * It must be called by the arch code on the new cpu, before the new cpu
12543d0407baSopenharmony_ci * enables interrupts and before the "boot" cpu returns from __cpu_up().
12553d0407baSopenharmony_ci */
12563d0407baSopenharmony_civoid notify_cpu_starting(unsigned int cpu)
12573d0407baSopenharmony_ci{
12583d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
12593d0407baSopenharmony_ci    enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
12603d0407baSopenharmony_ci    int ret;
12613d0407baSopenharmony_ci
12623d0407baSopenharmony_ci    rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
12633d0407baSopenharmony_ci    cpumask_set_cpu(cpu, &cpus_booted_once_mask);
12643d0407baSopenharmony_ci    while (st->state < target) {
12653d0407baSopenharmony_ci        st->state++;
12663d0407baSopenharmony_ci        ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
12673d0407baSopenharmony_ci        /*
12683d0407baSopenharmony_ci         * STARTING must not fail!
12693d0407baSopenharmony_ci         */
12703d0407baSopenharmony_ci        WARN_ON_ONCE(ret);
12713d0407baSopenharmony_ci    }
12723d0407baSopenharmony_ci}
12733d0407baSopenharmony_ci
12743d0407baSopenharmony_ci/*
12753d0407baSopenharmony_ci * Called from the idle task. Wake up the controlling task which brings the
12763d0407baSopenharmony_ci * hotplug thread of the upcoming CPU up and then delegates the rest of the
12773d0407baSopenharmony_ci * online bringup to the hotplug thread.
12783d0407baSopenharmony_ci */
12793d0407baSopenharmony_civoid cpuhp_online_idle(enum cpuhp_state state)
12803d0407baSopenharmony_ci{
12813d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
12823d0407baSopenharmony_ci
12833d0407baSopenharmony_ci    /* Happens for the boot cpu */
12843d0407baSopenharmony_ci    if (state != CPUHP_AP_ONLINE_IDLE) {
12853d0407baSopenharmony_ci        return;
12863d0407baSopenharmony_ci    }
12873d0407baSopenharmony_ci
12883d0407baSopenharmony_ci    /*
12893d0407baSopenharmony_ci     * Unpart the stopper thread before we start the idle loop (and start
12903d0407baSopenharmony_ci     * scheduling); this ensures the stopper task is always available.
12913d0407baSopenharmony_ci     */
12923d0407baSopenharmony_ci    stop_machine_unpark(smp_processor_id());
12933d0407baSopenharmony_ci
12943d0407baSopenharmony_ci    st->state = CPUHP_AP_ONLINE_IDLE;
12953d0407baSopenharmony_ci    complete_ap_thread(st, true);
12963d0407baSopenharmony_ci}
12973d0407baSopenharmony_ci
12983d0407baSopenharmony_ci/* Requires cpu_add_remove_lock to be held */
12993d0407baSopenharmony_cistatic int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
13003d0407baSopenharmony_ci{
13013d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
13023d0407baSopenharmony_ci    struct task_struct *idle;
13033d0407baSopenharmony_ci    int ret = 0;
13043d0407baSopenharmony_ci
13053d0407baSopenharmony_ci    cpus_write_lock();
13063d0407baSopenharmony_ci
13073d0407baSopenharmony_ci    if (!cpu_present(cpu)) {
13083d0407baSopenharmony_ci        ret = -EINVAL;
13093d0407baSopenharmony_ci        goto out;
13103d0407baSopenharmony_ci    }
13113d0407baSopenharmony_ci
13123d0407baSopenharmony_ci    /*
13133d0407baSopenharmony_ci     * The caller of cpu_up() might have raced with another
13143d0407baSopenharmony_ci     * caller. Nothing to do.
13153d0407baSopenharmony_ci     */
13163d0407baSopenharmony_ci    if (st->state >= target) {
13173d0407baSopenharmony_ci        goto out;
13183d0407baSopenharmony_ci    }
13193d0407baSopenharmony_ci
13203d0407baSopenharmony_ci    if (st->state == CPUHP_OFFLINE) {
13213d0407baSopenharmony_ci        /* Let it fail before we try to bring the cpu up */
13223d0407baSopenharmony_ci        idle = idle_thread_get(cpu);
13233d0407baSopenharmony_ci        if (IS_ERR(idle)) {
13243d0407baSopenharmony_ci            ret = PTR_ERR(idle);
13253d0407baSopenharmony_ci            goto out;
13263d0407baSopenharmony_ci        }
13273d0407baSopenharmony_ci    }
13283d0407baSopenharmony_ci
13293d0407baSopenharmony_ci    cpuhp_tasks_frozen = tasks_frozen;
13303d0407baSopenharmony_ci
13313d0407baSopenharmony_ci    cpuhp_set_state(st, target);
13323d0407baSopenharmony_ci    /*
13333d0407baSopenharmony_ci     * If the current CPU state is in the range of the AP hotplug thread,
13343d0407baSopenharmony_ci     * then we need to kick the thread once more.
13353d0407baSopenharmony_ci     */
13363d0407baSopenharmony_ci    if (st->state > CPUHP_BRINGUP_CPU) {
13373d0407baSopenharmony_ci        ret = cpuhp_kick_ap_work(cpu);
13383d0407baSopenharmony_ci        /*
13393d0407baSopenharmony_ci         * The AP side has done the error rollback already. Just
13403d0407baSopenharmony_ci         * return the error code..
13413d0407baSopenharmony_ci         */
13423d0407baSopenharmony_ci        if (ret) {
13433d0407baSopenharmony_ci            goto out;
13443d0407baSopenharmony_ci        }
13453d0407baSopenharmony_ci    }
13463d0407baSopenharmony_ci
13473d0407baSopenharmony_ci    /*
13483d0407baSopenharmony_ci     * Try to reach the target state. We max out on the BP at
13493d0407baSopenharmony_ci     * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
13503d0407baSopenharmony_ci     * responsible for bringing it up to the target state.
13513d0407baSopenharmony_ci     */
13523d0407baSopenharmony_ci    target = min((int)target, CPUHP_BRINGUP_CPU);
13533d0407baSopenharmony_ci    ret = cpuhp_up_callbacks(cpu, st, target);
13543d0407baSopenharmony_ciout:
13553d0407baSopenharmony_ci    cpus_write_unlock();
13563d0407baSopenharmony_ci    arch_smt_update();
13573d0407baSopenharmony_ci    cpu_up_down_serialize_trainwrecks(tasks_frozen);
13583d0407baSopenharmony_ci    return ret;
13593d0407baSopenharmony_ci}
13603d0407baSopenharmony_ci
13613d0407baSopenharmony_cistatic int cpu_up(unsigned int cpu, enum cpuhp_state target)
13623d0407baSopenharmony_ci{
13633d0407baSopenharmony_ci    int err = 0;
13643d0407baSopenharmony_ci
13653d0407baSopenharmony_ci    if (!cpu_possible(cpu)) {
13663d0407baSopenharmony_ci        pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", cpu);
13673d0407baSopenharmony_ci#if defined(CONFIG_IA64)
13683d0407baSopenharmony_ci        pr_err("please check additional_cpus= boot parameter\n");
13693d0407baSopenharmony_ci#endif
13703d0407baSopenharmony_ci        return -EINVAL;
13713d0407baSopenharmony_ci    }
13723d0407baSopenharmony_ci
13733d0407baSopenharmony_ci    err = try_online_node(cpu_to_node(cpu));
13743d0407baSopenharmony_ci    if (err) {
13753d0407baSopenharmony_ci        return err;
13763d0407baSopenharmony_ci    }
13773d0407baSopenharmony_ci
13783d0407baSopenharmony_ci    cpu_maps_update_begin();
13793d0407baSopenharmony_ci
13803d0407baSopenharmony_ci    if (cpu_hotplug_disabled) {
13813d0407baSopenharmony_ci        err = -EBUSY;
13823d0407baSopenharmony_ci        goto out;
13833d0407baSopenharmony_ci    }
13843d0407baSopenharmony_ci    if (!cpu_smt_allowed(cpu)) {
13853d0407baSopenharmony_ci        err = -EPERM;
13863d0407baSopenharmony_ci        goto out;
13873d0407baSopenharmony_ci    }
13883d0407baSopenharmony_ci
13893d0407baSopenharmony_ci    err = _cpu_up(cpu, 0, target);
13903d0407baSopenharmony_ciout:
13913d0407baSopenharmony_ci    cpu_maps_update_done();
13923d0407baSopenharmony_ci    return err;
13933d0407baSopenharmony_ci}
13943d0407baSopenharmony_ci
13953d0407baSopenharmony_ci/**
13963d0407baSopenharmony_ci * cpu_device_up - Bring up a cpu device
13973d0407baSopenharmony_ci * @dev: Pointer to the cpu device to online
13983d0407baSopenharmony_ci *
13993d0407baSopenharmony_ci * This function is meant to be used by device core cpu subsystem only.
14003d0407baSopenharmony_ci *
14013d0407baSopenharmony_ci * Other subsystems should use add_cpu() instead.
14023d0407baSopenharmony_ci */
14033d0407baSopenharmony_ciint cpu_device_up(struct device *dev)
14043d0407baSopenharmony_ci{
14053d0407baSopenharmony_ci    return cpu_up(dev->id, CPUHP_ONLINE);
14063d0407baSopenharmony_ci}
14073d0407baSopenharmony_ci
14083d0407baSopenharmony_ciint add_cpu(unsigned int cpu)
14093d0407baSopenharmony_ci{
14103d0407baSopenharmony_ci    int ret;
14113d0407baSopenharmony_ci
14123d0407baSopenharmony_ci    lock_device_hotplug();
14133d0407baSopenharmony_ci    ret = device_online(get_cpu_device(cpu));
14143d0407baSopenharmony_ci    unlock_device_hotplug();
14153d0407baSopenharmony_ci
14163d0407baSopenharmony_ci    return ret;
14173d0407baSopenharmony_ci}
14183d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(add_cpu);
14193d0407baSopenharmony_ci
14203d0407baSopenharmony_ci/**
14213d0407baSopenharmony_ci * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
14223d0407baSopenharmony_ci * @sleep_cpu: The cpu we hibernated on and should be brought up.
14233d0407baSopenharmony_ci *
14243d0407baSopenharmony_ci * On some architectures like arm64, we can hibernate on any CPU, but on
14253d0407baSopenharmony_ci * wake up the CPU we hibernated on might be offline as a side effect of
14263d0407baSopenharmony_ci * using maxcpus= for example.
14273d0407baSopenharmony_ci */
14283d0407baSopenharmony_ciint bringup_hibernate_cpu(unsigned int sleep_cpu)
14293d0407baSopenharmony_ci{
14303d0407baSopenharmony_ci    int ret;
14313d0407baSopenharmony_ci
14323d0407baSopenharmony_ci    if (!cpu_online(sleep_cpu)) {
14333d0407baSopenharmony_ci        pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
14343d0407baSopenharmony_ci        ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
14353d0407baSopenharmony_ci        if (ret) {
14363d0407baSopenharmony_ci            pr_err("Failed to bring hibernate-CPU up!\n");
14373d0407baSopenharmony_ci            return ret;
14383d0407baSopenharmony_ci        }
14393d0407baSopenharmony_ci    }
14403d0407baSopenharmony_ci    return 0;
14413d0407baSopenharmony_ci}
14423d0407baSopenharmony_ci
14433d0407baSopenharmony_civoid bringup_nonboot_cpus(unsigned int setup_max_cpus)
14443d0407baSopenharmony_ci{
14453d0407baSopenharmony_ci    unsigned int cpu;
14463d0407baSopenharmony_ci
14473d0407baSopenharmony_ci    for_each_present_cpu(cpu)
14483d0407baSopenharmony_ci    {
14493d0407baSopenharmony_ci        if (num_online_cpus() >= setup_max_cpus) {
14503d0407baSopenharmony_ci            break;
14513d0407baSopenharmony_ci        }
14523d0407baSopenharmony_ci        if (!cpu_online(cpu)) {
14533d0407baSopenharmony_ci            cpu_up(cpu, CPUHP_ONLINE);
14543d0407baSopenharmony_ci        }
14553d0407baSopenharmony_ci    }
14563d0407baSopenharmony_ci}
14573d0407baSopenharmony_ci
14583d0407baSopenharmony_ci#ifdef CONFIG_PM_SLEEP_SMP
14593d0407baSopenharmony_cistatic cpumask_var_t frozen_cpus;
14603d0407baSopenharmony_ci
14613d0407baSopenharmony_ciint freeze_secondary_cpus(int primary)
14623d0407baSopenharmony_ci{
14633d0407baSopenharmony_ci    int cpu, error = 0;
14643d0407baSopenharmony_ci
14653d0407baSopenharmony_ci    cpu_maps_update_begin();
14663d0407baSopenharmony_ci    if (primary == -1) {
14673d0407baSopenharmony_ci        primary = cpumask_first(cpu_online_mask);
14683d0407baSopenharmony_ci        if (!housekeeping_cpu(primary, HK_FLAG_TIMER)) {
14693d0407baSopenharmony_ci            primary = housekeeping_any_cpu(HK_FLAG_TIMER);
14703d0407baSopenharmony_ci        }
14713d0407baSopenharmony_ci    } else {
14723d0407baSopenharmony_ci        if (!cpu_online(primary)) {
14733d0407baSopenharmony_ci            primary = cpumask_first(cpu_online_mask);
14743d0407baSopenharmony_ci        }
14753d0407baSopenharmony_ci    }
14763d0407baSopenharmony_ci
14773d0407baSopenharmony_ci    /*
14783d0407baSopenharmony_ci     * We take down all of the non-boot CPUs in one shot to avoid races
14793d0407baSopenharmony_ci     * with the userspace trying to use the CPU hotplug at the same time
14803d0407baSopenharmony_ci     */
14813d0407baSopenharmony_ci    cpumask_clear(frozen_cpus);
14823d0407baSopenharmony_ci
14833d0407baSopenharmony_ci    pr_info("Disabling non-boot CPUs ...\n");
14843d0407baSopenharmony_ci    for_each_online_cpu(cpu)
14853d0407baSopenharmony_ci    {
14863d0407baSopenharmony_ci        if (cpu == primary) {
14873d0407baSopenharmony_ci            continue;
14883d0407baSopenharmony_ci        }
14893d0407baSopenharmony_ci
14903d0407baSopenharmony_ci        if (pm_wakeup_pending()) {
14913d0407baSopenharmony_ci            pr_info("Wakeup pending. Abort CPU freeze\n");
14923d0407baSopenharmony_ci            error = -EBUSY;
14933d0407baSopenharmony_ci            break;
14943d0407baSopenharmony_ci        }
14953d0407baSopenharmony_ci
14963d0407baSopenharmony_ci        trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
14973d0407baSopenharmony_ci        error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
14983d0407baSopenharmony_ci        trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
14993d0407baSopenharmony_ci        if (!error) {
15003d0407baSopenharmony_ci            cpumask_set_cpu(cpu, frozen_cpus);
15013d0407baSopenharmony_ci        } else {
15023d0407baSopenharmony_ci            pr_err("Error taking CPU%d down: %d\n", cpu, error);
15033d0407baSopenharmony_ci            break;
15043d0407baSopenharmony_ci        }
15053d0407baSopenharmony_ci    }
15063d0407baSopenharmony_ci
15073d0407baSopenharmony_ci    if (!error) {
15083d0407baSopenharmony_ci        BUG_ON(num_online_cpus() > 1);
15093d0407baSopenharmony_ci    } else {
15103d0407baSopenharmony_ci        pr_err("Non-boot CPUs are not disabled\n");
15113d0407baSopenharmony_ci    }
15123d0407baSopenharmony_ci
15133d0407baSopenharmony_ci    /*
15143d0407baSopenharmony_ci     * Make sure the CPUs won't be enabled by someone else. We need to do
15153d0407baSopenharmony_ci     * this even in case of failure as all freeze_secondary_cpus() users are
15163d0407baSopenharmony_ci     * supposed to do thaw_secondary_cpus() on the failure path.
15173d0407baSopenharmony_ci     */
15183d0407baSopenharmony_ci    cpu_hotplug_disabled++;
15193d0407baSopenharmony_ci
15203d0407baSopenharmony_ci    cpu_maps_update_done();
15213d0407baSopenharmony_ci    return error;
15223d0407baSopenharmony_ci}
15233d0407baSopenharmony_ci
15243d0407baSopenharmony_civoid __weak arch_thaw_secondary_cpus_begin(void)
15253d0407baSopenharmony_ci{
15263d0407baSopenharmony_ci}
15273d0407baSopenharmony_ci
15283d0407baSopenharmony_civoid __weak arch_thaw_secondary_cpus_end(void)
15293d0407baSopenharmony_ci{
15303d0407baSopenharmony_ci}
15313d0407baSopenharmony_ci
15323d0407baSopenharmony_civoid thaw_secondary_cpus(void)
15333d0407baSopenharmony_ci{
15343d0407baSopenharmony_ci    int cpu, error;
15353d0407baSopenharmony_ci
15363d0407baSopenharmony_ci    /* Allow everyone to use the CPU hotplug again */
15373d0407baSopenharmony_ci    cpu_maps_update_begin();
15383d0407baSopenharmony_ci    _cpu_hotplug_enable();
15393d0407baSopenharmony_ci    if (cpumask_empty(frozen_cpus)) {
15403d0407baSopenharmony_ci        goto out;
15413d0407baSopenharmony_ci    }
15423d0407baSopenharmony_ci
15433d0407baSopenharmony_ci    pr_info("Enabling non-boot CPUs ...\n");
15443d0407baSopenharmony_ci
15453d0407baSopenharmony_ci    arch_thaw_secondary_cpus_begin();
15463d0407baSopenharmony_ci
15473d0407baSopenharmony_ci    for_each_cpu(cpu, frozen_cpus)
15483d0407baSopenharmony_ci    {
15493d0407baSopenharmony_ci        trace_suspend_resume(TPS("CPU_ON"), cpu, true);
15503d0407baSopenharmony_ci        error = _cpu_up(cpu, 1, CPUHP_ONLINE);
15513d0407baSopenharmony_ci        trace_suspend_resume(TPS("CPU_ON"), cpu, false);
15523d0407baSopenharmony_ci        if (!error) {
15533d0407baSopenharmony_ci            pr_info("CPU%d is up\n", cpu);
15543d0407baSopenharmony_ci            continue;
15553d0407baSopenharmony_ci        }
15563d0407baSopenharmony_ci        pr_warn("Error taking CPU%d up: %d\n", cpu, error);
15573d0407baSopenharmony_ci    }
15583d0407baSopenharmony_ci
15593d0407baSopenharmony_ci    arch_thaw_secondary_cpus_end();
15603d0407baSopenharmony_ci
15613d0407baSopenharmony_ci    cpumask_clear(frozen_cpus);
15623d0407baSopenharmony_ciout:
15633d0407baSopenharmony_ci    cpu_maps_update_done();
15643d0407baSopenharmony_ci}
15653d0407baSopenharmony_ci
15663d0407baSopenharmony_cistatic int __init alloc_frozen_cpus(void)
15673d0407baSopenharmony_ci{
15683d0407baSopenharmony_ci    if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL | __GFP_ZERO)) {
15693d0407baSopenharmony_ci        return -ENOMEM;
15703d0407baSopenharmony_ci    }
15713d0407baSopenharmony_ci    return 0;
15723d0407baSopenharmony_ci}
15733d0407baSopenharmony_cicore_initcall(alloc_frozen_cpus);
15743d0407baSopenharmony_ci
15753d0407baSopenharmony_ci/*
15763d0407baSopenharmony_ci * When callbacks for CPU hotplug notifications are being executed, we must
15773d0407baSopenharmony_ci * ensure that the state of the system with respect to the tasks being frozen
15783d0407baSopenharmony_ci * or not, as reported by the notification, remains unchanged *throughout the
15793d0407baSopenharmony_ci * duration* of the execution of the callbacks.
15803d0407baSopenharmony_ci * Hence we need to prevent the freezer from racing with regular CPU hotplug.
15813d0407baSopenharmony_ci *
15823d0407baSopenharmony_ci * This synchronization is implemented by mutually excluding regular CPU
15833d0407baSopenharmony_ci * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
15843d0407baSopenharmony_ci * Hibernate notifications.
15853d0407baSopenharmony_ci */
15863d0407baSopenharmony_cistatic int cpu_hotplug_pm_callback(struct notifier_block *nb, unsigned long action, void *ptr)
15873d0407baSopenharmony_ci{
15883d0407baSopenharmony_ci    switch (action) {
15893d0407baSopenharmony_ci        case PM_SUSPEND_PREPARE:
15903d0407baSopenharmony_ci        case PM_HIBERNATION_PREPARE:
15913d0407baSopenharmony_ci            cpu_hotplug_disable();
15923d0407baSopenharmony_ci            break;
15933d0407baSopenharmony_ci
15943d0407baSopenharmony_ci        case PM_POST_SUSPEND:
15953d0407baSopenharmony_ci        case PM_POST_HIBERNATION:
15963d0407baSopenharmony_ci            cpu_hotplug_enable();
15973d0407baSopenharmony_ci            break;
15983d0407baSopenharmony_ci
15993d0407baSopenharmony_ci        default:
16003d0407baSopenharmony_ci            return NOTIFY_DONE;
16013d0407baSopenharmony_ci    }
16023d0407baSopenharmony_ci
16033d0407baSopenharmony_ci    return NOTIFY_OK;
16043d0407baSopenharmony_ci}
16053d0407baSopenharmony_ci
16063d0407baSopenharmony_cistatic int __init cpu_hotplug_pm_sync_init(void)
16073d0407baSopenharmony_ci{
16083d0407baSopenharmony_ci    /*
16093d0407baSopenharmony_ci     * cpu_hotplug_pm_callback has higher priority than x86
16103d0407baSopenharmony_ci     * bsp_pm_callback which depends on cpu_hotplug_pm_callback
16113d0407baSopenharmony_ci     * to disable cpu hotplug to avoid cpu hotplug race.
16123d0407baSopenharmony_ci     */
16133d0407baSopenharmony_ci    pm_notifier(cpu_hotplug_pm_callback, 0);
16143d0407baSopenharmony_ci    return 0;
16153d0407baSopenharmony_ci}
16163d0407baSopenharmony_cicore_initcall(cpu_hotplug_pm_sync_init);
16173d0407baSopenharmony_ci
16183d0407baSopenharmony_ci#endif /* CONFIG_PM_SLEEP_SMP */
16193d0407baSopenharmony_ci
16203d0407baSopenharmony_ciint __boot_cpu_id;
16213d0407baSopenharmony_ci
16223d0407baSopenharmony_ci#endif /* CONFIG_SMP */
16233d0407baSopenharmony_ci
16243d0407baSopenharmony_ci/* Boot processor state steps */
16253d0407baSopenharmony_cistatic struct cpuhp_step cpuhp_hp_states[] = {
16263d0407baSopenharmony_ci    [CPUHP_OFFLINE] =
16273d0407baSopenharmony_ci        {
16283d0407baSopenharmony_ci            .name = "offline",
16293d0407baSopenharmony_ci            .startup.single = NULL,
16303d0407baSopenharmony_ci            .teardown.single = NULL,
16313d0407baSopenharmony_ci        },
16323d0407baSopenharmony_ci#ifdef CONFIG_SMP
16333d0407baSopenharmony_ci    [CPUHP_CREATE_THREADS] =
16343d0407baSopenharmony_ci        {
16353d0407baSopenharmony_ci            .name = "threads:prepare",
16363d0407baSopenharmony_ci            .startup.single = smpboot_create_threads,
16373d0407baSopenharmony_ci            .teardown.single = NULL,
16383d0407baSopenharmony_ci            .cant_stop = true,
16393d0407baSopenharmony_ci        },
16403d0407baSopenharmony_ci    [CPUHP_PERF_PREPARE] =
16413d0407baSopenharmony_ci        {
16423d0407baSopenharmony_ci            .name = "perf:prepare",
16433d0407baSopenharmony_ci            .startup.single = perf_event_init_cpu,
16443d0407baSopenharmony_ci            .teardown.single = perf_event_exit_cpu,
16453d0407baSopenharmony_ci        },
16463d0407baSopenharmony_ci    [CPUHP_RANDOM_PREPARE] = {
16473d0407baSopenharmony_ci        .name			= "random:prepare",
16483d0407baSopenharmony_ci        .startup.single		= random_prepare_cpu,
16493d0407baSopenharmony_ci        .teardown.single	= NULL,
16503d0407baSopenharmony_ci    },
16513d0407baSopenharmony_ci
16523d0407baSopenharmony_ci    [CPUHP_WORKQUEUE_PREP] =
16533d0407baSopenharmony_ci        {
16543d0407baSopenharmony_ci            .name = "workqueue:prepare",
16553d0407baSopenharmony_ci            .startup.single = workqueue_prepare_cpu,
16563d0407baSopenharmony_ci            .teardown.single = NULL,
16573d0407baSopenharmony_ci        },
16583d0407baSopenharmony_ci    [CPUHP_HRTIMERS_PREPARE] =
16593d0407baSopenharmony_ci        {
16603d0407baSopenharmony_ci            .name = "hrtimers:prepare",
16613d0407baSopenharmony_ci            .startup.single = hrtimers_prepare_cpu,
16623d0407baSopenharmony_ci            .teardown.single = hrtimers_dead_cpu,
16633d0407baSopenharmony_ci        },
16643d0407baSopenharmony_ci    [CPUHP_SMPCFD_PREPARE] =
16653d0407baSopenharmony_ci        {
16663d0407baSopenharmony_ci            .name = "smpcfd:prepare",
16673d0407baSopenharmony_ci            .startup.single = smpcfd_prepare_cpu,
16683d0407baSopenharmony_ci            .teardown.single = smpcfd_dead_cpu,
16693d0407baSopenharmony_ci        },
16703d0407baSopenharmony_ci    [CPUHP_RELAY_PREPARE] =
16713d0407baSopenharmony_ci        {
16723d0407baSopenharmony_ci            .name = "relay:prepare",
16733d0407baSopenharmony_ci            .startup.single = relay_prepare_cpu,
16743d0407baSopenharmony_ci            .teardown.single = NULL,
16753d0407baSopenharmony_ci        },
16763d0407baSopenharmony_ci    [CPUHP_SLAB_PREPARE] =
16773d0407baSopenharmony_ci        {
16783d0407baSopenharmony_ci            .name = "slab:prepare",
16793d0407baSopenharmony_ci            .startup.single = slab_prepare_cpu,
16803d0407baSopenharmony_ci            .teardown.single = slab_dead_cpu,
16813d0407baSopenharmony_ci        },
16823d0407baSopenharmony_ci    [CPUHP_RCUTREE_PREP] =
16833d0407baSopenharmony_ci        {
16843d0407baSopenharmony_ci            .name = "RCU/tree:prepare",
16853d0407baSopenharmony_ci            .startup.single = rcutree_prepare_cpu,
16863d0407baSopenharmony_ci            .teardown.single = rcutree_dead_cpu,
16873d0407baSopenharmony_ci        },
16883d0407baSopenharmony_ci    /*
16893d0407baSopenharmony_ci     * On the tear-down path, timers_dead_cpu() must be invoked
16903d0407baSopenharmony_ci     * before blk_mq_queue_reinit_notify() from notify_dead(),
16913d0407baSopenharmony_ci     * otherwise a RCU stall occurs.
16923d0407baSopenharmony_ci     */
16933d0407baSopenharmony_ci    [CPUHP_TIMERS_PREPARE] =
16943d0407baSopenharmony_ci        {
16953d0407baSopenharmony_ci            .name = "timers:prepare",
16963d0407baSopenharmony_ci            .startup.single = timers_prepare_cpu,
16973d0407baSopenharmony_ci            .teardown.single = timers_dead_cpu,
16983d0407baSopenharmony_ci        },
16993d0407baSopenharmony_ci    /* Kicks the plugged cpu into life */
17003d0407baSopenharmony_ci    [CPUHP_BRINGUP_CPU] =
17013d0407baSopenharmony_ci        {
17023d0407baSopenharmony_ci            .name = "cpu:bringup",
17033d0407baSopenharmony_ci            .startup.single = bringup_cpu,
17043d0407baSopenharmony_ci            .teardown.single = finish_cpu,
17053d0407baSopenharmony_ci            .cant_stop = true,
17063d0407baSopenharmony_ci        },
17073d0407baSopenharmony_ci    /* Final state before CPU kills itself */
17083d0407baSopenharmony_ci    [CPUHP_AP_IDLE_DEAD] =
17093d0407baSopenharmony_ci        {
17103d0407baSopenharmony_ci            .name = "idle:dead",
17113d0407baSopenharmony_ci        },
17123d0407baSopenharmony_ci    /*
17133d0407baSopenharmony_ci     * Last state before CPU enters the idle loop to die. Transient state
17143d0407baSopenharmony_ci     * for synchronization.
17153d0407baSopenharmony_ci     */
17163d0407baSopenharmony_ci    [CPUHP_AP_OFFLINE] =
17173d0407baSopenharmony_ci        {
17183d0407baSopenharmony_ci            .name = "ap:offline",
17193d0407baSopenharmony_ci            .cant_stop = true,
17203d0407baSopenharmony_ci        },
17213d0407baSopenharmony_ci    /* First state is scheduler control. Interrupts are disabled */
17223d0407baSopenharmony_ci    [CPUHP_AP_SCHED_STARTING] =
17233d0407baSopenharmony_ci        {
17243d0407baSopenharmony_ci            .name = "sched:starting",
17253d0407baSopenharmony_ci            .startup.single = sched_cpu_starting,
17263d0407baSopenharmony_ci            .teardown.single = sched_cpu_dying,
17273d0407baSopenharmony_ci        },
17283d0407baSopenharmony_ci    [CPUHP_AP_RCUTREE_DYING] =
17293d0407baSopenharmony_ci        {
17303d0407baSopenharmony_ci            .name = "RCU/tree:dying",
17313d0407baSopenharmony_ci            .startup.single = NULL,
17323d0407baSopenharmony_ci            .teardown.single = rcutree_dying_cpu,
17333d0407baSopenharmony_ci        },
17343d0407baSopenharmony_ci    [CPUHP_AP_SMPCFD_DYING] =
17353d0407baSopenharmony_ci        {
17363d0407baSopenharmony_ci            .name = "smpcfd:dying",
17373d0407baSopenharmony_ci            .startup.single = NULL,
17383d0407baSopenharmony_ci            .teardown.single = smpcfd_dying_cpu,
17393d0407baSopenharmony_ci        },
17403d0407baSopenharmony_ci    /* Entry state on starting. Interrupts enabled from here on. Transient
17413d0407baSopenharmony_ci     * state for synchronsization */
17423d0407baSopenharmony_ci    [CPUHP_AP_ONLINE] =
17433d0407baSopenharmony_ci        {
17443d0407baSopenharmony_ci            .name = "ap:online",
17453d0407baSopenharmony_ci        },
17463d0407baSopenharmony_ci    /*
17473d0407baSopenharmony_ci     * Handled on controll processor until the plugged processor manages
17483d0407baSopenharmony_ci     * this itself.
17493d0407baSopenharmony_ci     */
17503d0407baSopenharmony_ci    [CPUHP_TEARDOWN_CPU] =
17513d0407baSopenharmony_ci        {
17523d0407baSopenharmony_ci            .name = "cpu:teardown",
17533d0407baSopenharmony_ci            .startup.single = NULL,
17543d0407baSopenharmony_ci            .teardown.single = takedown_cpu,
17553d0407baSopenharmony_ci            .cant_stop = true,
17563d0407baSopenharmony_ci        },
17573d0407baSopenharmony_ci    /* Handle smpboot threads park/unpark */
17583d0407baSopenharmony_ci    [CPUHP_AP_SMPBOOT_THREADS] =
17593d0407baSopenharmony_ci        {
17603d0407baSopenharmony_ci            .name = "smpboot/threads:online",
17613d0407baSopenharmony_ci            .startup.single = smpboot_unpark_threads,
17623d0407baSopenharmony_ci            .teardown.single = smpboot_park_threads,
17633d0407baSopenharmony_ci        },
17643d0407baSopenharmony_ci    [CPUHP_AP_IRQ_AFFINITY_ONLINE] =
17653d0407baSopenharmony_ci        {
17663d0407baSopenharmony_ci            .name = "irq/affinity:online",
17673d0407baSopenharmony_ci            .startup.single = irq_affinity_online_cpu,
17683d0407baSopenharmony_ci            .teardown.single = NULL,
17693d0407baSopenharmony_ci        },
17703d0407baSopenharmony_ci    [CPUHP_AP_PERF_ONLINE] =
17713d0407baSopenharmony_ci        {
17723d0407baSopenharmony_ci            .name = "perf:online",
17733d0407baSopenharmony_ci            .startup.single = perf_event_init_cpu,
17743d0407baSopenharmony_ci            .teardown.single = perf_event_exit_cpu,
17753d0407baSopenharmony_ci        },
17763d0407baSopenharmony_ci    [CPUHP_AP_WATCHDOG_ONLINE] =
17773d0407baSopenharmony_ci        {
17783d0407baSopenharmony_ci            .name = "lockup_detector:online",
17793d0407baSopenharmony_ci            .startup.single = lockup_detector_online_cpu,
17803d0407baSopenharmony_ci            .teardown.single = lockup_detector_offline_cpu,
17813d0407baSopenharmony_ci        },
17823d0407baSopenharmony_ci    [CPUHP_AP_WORKQUEUE_ONLINE] =
17833d0407baSopenharmony_ci        {
17843d0407baSopenharmony_ci            .name = "workqueue:online",
17853d0407baSopenharmony_ci            .startup.single = workqueue_online_cpu,
17863d0407baSopenharmony_ci            .teardown.single = workqueue_offline_cpu,
17873d0407baSopenharmony_ci        },
17883d0407baSopenharmony_ci    [CPUHP_AP_RANDOM_ONLINE] = {
17893d0407baSopenharmony_ci        .name			= "random:online",
17903d0407baSopenharmony_ci        .startup.single		= random_online_cpu,
17913d0407baSopenharmony_ci        .teardown.single	= NULL,
17923d0407baSopenharmony_ci    },
17933d0407baSopenharmony_ci    [CPUHP_AP_RCUTREE_ONLINE] =
17943d0407baSopenharmony_ci        {
17953d0407baSopenharmony_ci            .name = "RCU/tree:online",
17963d0407baSopenharmony_ci            .startup.single = rcutree_online_cpu,
17973d0407baSopenharmony_ci            .teardown.single = rcutree_offline_cpu,
17983d0407baSopenharmony_ci        },
17993d0407baSopenharmony_ci#endif
18003d0407baSopenharmony_ci/*
18013d0407baSopenharmony_ci * The dynamically registered state space is here
18023d0407baSopenharmony_ci */
18033d0407baSopenharmony_ci
18043d0407baSopenharmony_ci#ifdef CONFIG_SMP
18053d0407baSopenharmony_ci    /* Last state is scheduler control setting the cpu active */
18063d0407baSopenharmony_ci    [CPUHP_AP_ACTIVE] =
18073d0407baSopenharmony_ci        {
18083d0407baSopenharmony_ci            .name = "sched:active",
18093d0407baSopenharmony_ci            .startup.single = sched_cpu_activate,
18103d0407baSopenharmony_ci            .teardown.single = sched_cpu_deactivate,
18113d0407baSopenharmony_ci        },
18123d0407baSopenharmony_ci#endif
18133d0407baSopenharmony_ci
18143d0407baSopenharmony_ci    /* CPU is fully up and running. */
18153d0407baSopenharmony_ci    [CPUHP_ONLINE] =
18163d0407baSopenharmony_ci        {
18173d0407baSopenharmony_ci            .name = "online",
18183d0407baSopenharmony_ci            .startup.single = NULL,
18193d0407baSopenharmony_ci            .teardown.single = NULL,
18203d0407baSopenharmony_ci        },
18213d0407baSopenharmony_ci};
18223d0407baSopenharmony_ci
18233d0407baSopenharmony_ci/* Sanity check for callbacks */
18243d0407baSopenharmony_cistatic int cpuhp_cb_check(enum cpuhp_state state)
18253d0407baSopenharmony_ci{
18263d0407baSopenharmony_ci    if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE) {
18273d0407baSopenharmony_ci        return -EINVAL;
18283d0407baSopenharmony_ci    }
18293d0407baSopenharmony_ci    return 0;
18303d0407baSopenharmony_ci}
18313d0407baSopenharmony_ci
18323d0407baSopenharmony_ci/*
18333d0407baSopenharmony_ci * Returns a free for dynamic slot assignment of the Online state. The states
18343d0407baSopenharmony_ci * are protected by the cpuhp_slot_states mutex and an empty slot is identified
18353d0407baSopenharmony_ci * by having no name assigned.
18363d0407baSopenharmony_ci */
18373d0407baSopenharmony_cistatic int cpuhp_reserve_state(enum cpuhp_state state)
18383d0407baSopenharmony_ci{
18393d0407baSopenharmony_ci    enum cpuhp_state i, end;
18403d0407baSopenharmony_ci    struct cpuhp_step *step;
18413d0407baSopenharmony_ci
18423d0407baSopenharmony_ci    switch (state) {
18433d0407baSopenharmony_ci        case CPUHP_AP_ONLINE_DYN:
18443d0407baSopenharmony_ci            step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
18453d0407baSopenharmony_ci            end = CPUHP_AP_ONLINE_DYN_END;
18463d0407baSopenharmony_ci            break;
18473d0407baSopenharmony_ci        case CPUHP_BP_PREPARE_DYN:
18483d0407baSopenharmony_ci            step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
18493d0407baSopenharmony_ci            end = CPUHP_BP_PREPARE_DYN_END;
18503d0407baSopenharmony_ci            break;
18513d0407baSopenharmony_ci        default:
18523d0407baSopenharmony_ci            return -EINVAL;
18533d0407baSopenharmony_ci    }
18543d0407baSopenharmony_ci
18553d0407baSopenharmony_ci    for (i = state; i <= end; i++, step++) {
18563d0407baSopenharmony_ci        if (!step->name) {
18573d0407baSopenharmony_ci            return i;
18583d0407baSopenharmony_ci        }
18593d0407baSopenharmony_ci    }
18603d0407baSopenharmony_ci    WARN(1, "No more dynamic states available for CPU hotplug\n");
18613d0407baSopenharmony_ci    return -ENOSPC;
18623d0407baSopenharmony_ci}
18633d0407baSopenharmony_ci
18643d0407baSopenharmony_cistatic int cpuhp_store_callbacks(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu),
18653d0407baSopenharmony_ci                                 int (*teardown)(unsigned int cpu), bool multi_instance)
18663d0407baSopenharmony_ci{
18673d0407baSopenharmony_ci    /* (Un)Install the callbacks for further cpu hotplug operations */
18683d0407baSopenharmony_ci    struct cpuhp_step *sp;
18693d0407baSopenharmony_ci    int ret = 0;
18703d0407baSopenharmony_ci
18713d0407baSopenharmony_ci    /*
18723d0407baSopenharmony_ci     * If name is NULL, then the state gets removed.
18733d0407baSopenharmony_ci     *
18743d0407baSopenharmony_ci     * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
18753d0407baSopenharmony_ci     * the first allocation from these dynamic ranges, so the removal
18763d0407baSopenharmony_ci     * would trigger a new allocation and clear the wrong (already
18773d0407baSopenharmony_ci     * empty) state, leaving the callbacks of the to be cleared state
18783d0407baSopenharmony_ci     * dangling, which causes wreckage on the next hotplug operation.
18793d0407baSopenharmony_ci     */
18803d0407baSopenharmony_ci    if (name && (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN)) {
18813d0407baSopenharmony_ci        ret = cpuhp_reserve_state(state);
18823d0407baSopenharmony_ci        if (ret < 0) {
18833d0407baSopenharmony_ci            return ret;
18843d0407baSopenharmony_ci        }
18853d0407baSopenharmony_ci        state = ret;
18863d0407baSopenharmony_ci    }
18873d0407baSopenharmony_ci    sp = cpuhp_get_step(state);
18883d0407baSopenharmony_ci    if (name && sp->name) {
18893d0407baSopenharmony_ci        return -EBUSY;
18903d0407baSopenharmony_ci    }
18913d0407baSopenharmony_ci
18923d0407baSopenharmony_ci    sp->startup.single = startup;
18933d0407baSopenharmony_ci    sp->teardown.single = teardown;
18943d0407baSopenharmony_ci    sp->name = name;
18953d0407baSopenharmony_ci    sp->multi_instance = multi_instance;
18963d0407baSopenharmony_ci    INIT_HLIST_HEAD(&sp->list);
18973d0407baSopenharmony_ci    return ret;
18983d0407baSopenharmony_ci}
18993d0407baSopenharmony_ci
19003d0407baSopenharmony_cistatic void *cpuhp_get_teardown_cb(enum cpuhp_state state)
19013d0407baSopenharmony_ci{
19023d0407baSopenharmony_ci    return cpuhp_get_step(state)->teardown.single;
19033d0407baSopenharmony_ci}
19043d0407baSopenharmony_ci
19053d0407baSopenharmony_ci/*
19063d0407baSopenharmony_ci * Call the startup/teardown function for a step either on the AP or
19073d0407baSopenharmony_ci * on the current CPU.
19083d0407baSopenharmony_ci */
19093d0407baSopenharmony_cistatic int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node)
19103d0407baSopenharmony_ci{
19113d0407baSopenharmony_ci    struct cpuhp_step *sp = cpuhp_get_step(state);
19123d0407baSopenharmony_ci    int ret;
19133d0407baSopenharmony_ci
19143d0407baSopenharmony_ci    /*
19153d0407baSopenharmony_ci     * If there's nothing to do, we done.
19163d0407baSopenharmony_ci     * Relies on the union for multi_instance.
19173d0407baSopenharmony_ci     */
19183d0407baSopenharmony_ci    if ((bringup && !sp->startup.single) || (!bringup && !sp->teardown.single)) {
19193d0407baSopenharmony_ci        return 0;
19203d0407baSopenharmony_ci    }
19213d0407baSopenharmony_ci    /*
19223d0407baSopenharmony_ci     * The non AP bound callbacks can fail on bringup. On teardown
19233d0407baSopenharmony_ci     * e.g. module removal we crash for now.
19243d0407baSopenharmony_ci     */
19253d0407baSopenharmony_ci#ifdef CONFIG_SMP
19263d0407baSopenharmony_ci    if (cpuhp_is_ap_state(state)) {
19273d0407baSopenharmony_ci        ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
19283d0407baSopenharmony_ci    } else {
19293d0407baSopenharmony_ci        ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
19303d0407baSopenharmony_ci    }
19313d0407baSopenharmony_ci#else
19323d0407baSopenharmony_ci    ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
19333d0407baSopenharmony_ci#endif
19343d0407baSopenharmony_ci    BUG_ON(ret && !bringup);
19353d0407baSopenharmony_ci    return ret;
19363d0407baSopenharmony_ci}
19373d0407baSopenharmony_ci
19383d0407baSopenharmony_ci/*
19393d0407baSopenharmony_ci * Called from __cpuhp_setup_state on a recoverable failure.
19403d0407baSopenharmony_ci *
19413d0407baSopenharmony_ci * Note: The teardown callbacks for rollback are not allowed to fail!
19423d0407baSopenharmony_ci */
19433d0407baSopenharmony_cistatic void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, struct hlist_node *node)
19443d0407baSopenharmony_ci{
19453d0407baSopenharmony_ci    int cpu;
19463d0407baSopenharmony_ci
19473d0407baSopenharmony_ci    /* Roll back the already executed steps on the other cpus */
19483d0407baSopenharmony_ci    for_each_present_cpu(cpu)
19493d0407baSopenharmony_ci    {
19503d0407baSopenharmony_ci        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
19513d0407baSopenharmony_ci        int cpustate = st->state;
19523d0407baSopenharmony_ci
19533d0407baSopenharmony_ci        if (cpu >= failedcpu) {
19543d0407baSopenharmony_ci            break;
19553d0407baSopenharmony_ci        }
19563d0407baSopenharmony_ci
19573d0407baSopenharmony_ci        /* Did we invoke the startup call on that cpu ? */
19583d0407baSopenharmony_ci        if (cpustate >= state) {
19593d0407baSopenharmony_ci            cpuhp_issue_call(cpu, state, false, node);
19603d0407baSopenharmony_ci        }
19613d0407baSopenharmony_ci    }
19623d0407baSopenharmony_ci}
19633d0407baSopenharmony_ci
19643d0407baSopenharmony_ciint __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, struct hlist_node *node, bool invoke)
19653d0407baSopenharmony_ci{
19663d0407baSopenharmony_ci    struct cpuhp_step *sp;
19673d0407baSopenharmony_ci    int cpu;
19683d0407baSopenharmony_ci    int ret;
19693d0407baSopenharmony_ci
19703d0407baSopenharmony_ci    lockdep_assert_cpus_held();
19713d0407baSopenharmony_ci
19723d0407baSopenharmony_ci    sp = cpuhp_get_step(state);
19733d0407baSopenharmony_ci    if (sp->multi_instance == false) {
19743d0407baSopenharmony_ci        return -EINVAL;
19753d0407baSopenharmony_ci    }
19763d0407baSopenharmony_ci
19773d0407baSopenharmony_ci    mutex_lock(&cpuhp_state_mutex);
19783d0407baSopenharmony_ci
19793d0407baSopenharmony_ci    if (!invoke || !sp->startup.multi) {
19803d0407baSopenharmony_ci        goto add_node;
19813d0407baSopenharmony_ci    }
19823d0407baSopenharmony_ci
19833d0407baSopenharmony_ci    /*
19843d0407baSopenharmony_ci     * Try to call the startup callback for each present cpu
19853d0407baSopenharmony_ci     * depending on the hotplug state of the cpu.
19863d0407baSopenharmony_ci     */
19873d0407baSopenharmony_ci    for_each_present_cpu(cpu)
19883d0407baSopenharmony_ci    {
19893d0407baSopenharmony_ci        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
19903d0407baSopenharmony_ci        int cpustate = st->state;
19913d0407baSopenharmony_ci
19923d0407baSopenharmony_ci        if (cpustate < state) {
19933d0407baSopenharmony_ci            continue;
19943d0407baSopenharmony_ci        }
19953d0407baSopenharmony_ci
19963d0407baSopenharmony_ci        ret = cpuhp_issue_call(cpu, state, true, node);
19973d0407baSopenharmony_ci        if (ret) {
19983d0407baSopenharmony_ci            if (sp->teardown.multi) {
19993d0407baSopenharmony_ci                cpuhp_rollback_install(cpu, state, node);
20003d0407baSopenharmony_ci            }
20013d0407baSopenharmony_ci            goto unlock;
20023d0407baSopenharmony_ci        }
20033d0407baSopenharmony_ci    }
20043d0407baSopenharmony_ciadd_node:
20053d0407baSopenharmony_ci    ret = 0;
20063d0407baSopenharmony_ci    hlist_add_head(node, &sp->list);
20073d0407baSopenharmony_ciunlock:
20083d0407baSopenharmony_ci    mutex_unlock(&cpuhp_state_mutex);
20093d0407baSopenharmony_ci    return ret;
20103d0407baSopenharmony_ci}
20113d0407baSopenharmony_ci
20123d0407baSopenharmony_ciint __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, bool invoke)
20133d0407baSopenharmony_ci{
20143d0407baSopenharmony_ci    int ret;
20153d0407baSopenharmony_ci
20163d0407baSopenharmony_ci    cpus_read_lock();
20173d0407baSopenharmony_ci    ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
20183d0407baSopenharmony_ci    cpus_read_unlock();
20193d0407baSopenharmony_ci    return ret;
20203d0407baSopenharmony_ci}
20213d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
20223d0407baSopenharmony_ci
20233d0407baSopenharmony_ci/**
20243d0407baSopenharmony_ci * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
20253d0407baSopenharmony_ci * @state:        The state to setup
20263d0407baSopenharmony_ci * @invoke:        If true, the startup function is invoked for cpus where
20273d0407baSopenharmony_ci *            cpu state >= @state
20283d0407baSopenharmony_ci * @startup:        startup callback function
20293d0407baSopenharmony_ci * @teardown:        teardown callback function
20303d0407baSopenharmony_ci * @multi_instance:    State is set up for multiple instances which get
20313d0407baSopenharmony_ci *            added afterwards.
20323d0407baSopenharmony_ci *
20333d0407baSopenharmony_ci * The caller needs to hold cpus read locked while calling this function.
20343d0407baSopenharmony_ci * Returns:
20353d0407baSopenharmony_ci *   On success:
20363d0407baSopenharmony_ci *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
20373d0407baSopenharmony_ci *      0 for all other states
20383d0407baSopenharmony_ci *   On failure: proper (negative) error code
20393d0407baSopenharmony_ci */
20403d0407baSopenharmony_ciint __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name, bool invoke,
20413d0407baSopenharmony_ci                                   int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu),
20423d0407baSopenharmony_ci                                   bool multi_instance)
20433d0407baSopenharmony_ci{
20443d0407baSopenharmony_ci    int cpu, ret = 0;
20453d0407baSopenharmony_ci    bool dynstate;
20463d0407baSopenharmony_ci
20473d0407baSopenharmony_ci    lockdep_assert_cpus_held();
20483d0407baSopenharmony_ci
20493d0407baSopenharmony_ci    if (cpuhp_cb_check(state) || !name) {
20503d0407baSopenharmony_ci        return -EINVAL;
20513d0407baSopenharmony_ci    }
20523d0407baSopenharmony_ci
20533d0407baSopenharmony_ci    mutex_lock(&cpuhp_state_mutex);
20543d0407baSopenharmony_ci
20553d0407baSopenharmony_ci    ret = cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
20563d0407baSopenharmony_ci
20573d0407baSopenharmony_ci    dynstate = state == CPUHP_AP_ONLINE_DYN;
20583d0407baSopenharmony_ci    if (ret > 0 && dynstate) {
20593d0407baSopenharmony_ci        state = ret;
20603d0407baSopenharmony_ci        ret = 0;
20613d0407baSopenharmony_ci    }
20623d0407baSopenharmony_ci
20633d0407baSopenharmony_ci    if (ret || !invoke || !startup) {
20643d0407baSopenharmony_ci        goto out;
20653d0407baSopenharmony_ci    }
20663d0407baSopenharmony_ci
20673d0407baSopenharmony_ci    /*
20683d0407baSopenharmony_ci     * Try to call the startup callback for each present cpu
20693d0407baSopenharmony_ci     * depending on the hotplug state of the cpu.
20703d0407baSopenharmony_ci     */
20713d0407baSopenharmony_ci    for_each_present_cpu(cpu)
20723d0407baSopenharmony_ci    {
20733d0407baSopenharmony_ci        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
20743d0407baSopenharmony_ci        int cpustate = st->state;
20753d0407baSopenharmony_ci
20763d0407baSopenharmony_ci        if (cpustate < state) {
20773d0407baSopenharmony_ci            continue;
20783d0407baSopenharmony_ci        }
20793d0407baSopenharmony_ci
20803d0407baSopenharmony_ci        ret = cpuhp_issue_call(cpu, state, true, NULL);
20813d0407baSopenharmony_ci        if (ret) {
20823d0407baSopenharmony_ci            if (teardown) {
20833d0407baSopenharmony_ci                cpuhp_rollback_install(cpu, state, NULL);
20843d0407baSopenharmony_ci            }
20853d0407baSopenharmony_ci            cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
20863d0407baSopenharmony_ci            goto out;
20873d0407baSopenharmony_ci        }
20883d0407baSopenharmony_ci    }
20893d0407baSopenharmony_ciout:
20903d0407baSopenharmony_ci    mutex_unlock(&cpuhp_state_mutex);
20913d0407baSopenharmony_ci    /*
20923d0407baSopenharmony_ci     * If the requested state is CPUHP_AP_ONLINE_DYN, return the
20933d0407baSopenharmony_ci     * dynamically allocated state in case of success.
20943d0407baSopenharmony_ci     */
20953d0407baSopenharmony_ci    if (!ret && dynstate) {
20963d0407baSopenharmony_ci        return state;
20973d0407baSopenharmony_ci    }
20983d0407baSopenharmony_ci    return ret;
20993d0407baSopenharmony_ci}
21003d0407baSopenharmony_ciEXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
21013d0407baSopenharmony_ci
21023d0407baSopenharmony_ciint __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke, int (*startup)(unsigned int cpu),
21033d0407baSopenharmony_ci                        int (*teardown)(unsigned int cpu), bool multi_instance)
21043d0407baSopenharmony_ci{
21053d0407baSopenharmony_ci    int ret;
21063d0407baSopenharmony_ci
21073d0407baSopenharmony_ci    cpus_read_lock();
21083d0407baSopenharmony_ci    ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup, teardown, multi_instance);
21093d0407baSopenharmony_ci    cpus_read_unlock();
21103d0407baSopenharmony_ci    return ret;
21113d0407baSopenharmony_ci}
21123d0407baSopenharmony_ciEXPORT_SYMBOL(__cpuhp_setup_state);
21133d0407baSopenharmony_ci
21143d0407baSopenharmony_ciint __cpuhp_state_remove_instance(enum cpuhp_state state, struct hlist_node *node, bool invoke)
21153d0407baSopenharmony_ci{
21163d0407baSopenharmony_ci    struct cpuhp_step *sp = cpuhp_get_step(state);
21173d0407baSopenharmony_ci    int cpu;
21183d0407baSopenharmony_ci
21193d0407baSopenharmony_ci    BUG_ON(cpuhp_cb_check(state));
21203d0407baSopenharmony_ci
21213d0407baSopenharmony_ci    if (!sp->multi_instance) {
21223d0407baSopenharmony_ci        return -EINVAL;
21233d0407baSopenharmony_ci    }
21243d0407baSopenharmony_ci
21253d0407baSopenharmony_ci    cpus_read_lock();
21263d0407baSopenharmony_ci    mutex_lock(&cpuhp_state_mutex);
21273d0407baSopenharmony_ci
21283d0407baSopenharmony_ci    if (!invoke || !cpuhp_get_teardown_cb(state)) {
21293d0407baSopenharmony_ci        goto remove;
21303d0407baSopenharmony_ci    }
21313d0407baSopenharmony_ci    /*
21323d0407baSopenharmony_ci     * Call the teardown callback for each present cpu depending
21333d0407baSopenharmony_ci     * on the hotplug state of the cpu. This function is not
21343d0407baSopenharmony_ci     * allowed to fail currently!
21353d0407baSopenharmony_ci     */
21363d0407baSopenharmony_ci    for_each_present_cpu(cpu)
21373d0407baSopenharmony_ci    {
21383d0407baSopenharmony_ci        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
21393d0407baSopenharmony_ci        int cpustate = st->state;
21403d0407baSopenharmony_ci
21413d0407baSopenharmony_ci        if (cpustate >= state) {
21423d0407baSopenharmony_ci            cpuhp_issue_call(cpu, state, false, node);
21433d0407baSopenharmony_ci        }
21443d0407baSopenharmony_ci    }
21453d0407baSopenharmony_ci
21463d0407baSopenharmony_ciremove:
21473d0407baSopenharmony_ci    hlist_del(node);
21483d0407baSopenharmony_ci    mutex_unlock(&cpuhp_state_mutex);
21493d0407baSopenharmony_ci    cpus_read_unlock();
21503d0407baSopenharmony_ci
21513d0407baSopenharmony_ci    return 0;
21523d0407baSopenharmony_ci}
21533d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
21543d0407baSopenharmony_ci
21553d0407baSopenharmony_ci/**
21563d0407baSopenharmony_ci * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
21573d0407baSopenharmony_ci * @state:    The state to remove
21583d0407baSopenharmony_ci * @invoke:    If true, the teardown function is invoked for cpus where
21593d0407baSopenharmony_ci *        cpu state >= @state
21603d0407baSopenharmony_ci *
21613d0407baSopenharmony_ci * The caller needs to hold cpus read locked while calling this function.
21623d0407baSopenharmony_ci * The teardown callback is currently not allowed to fail. Think
21633d0407baSopenharmony_ci * about module removal!
21643d0407baSopenharmony_ci */
21653d0407baSopenharmony_civoid __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
21663d0407baSopenharmony_ci{
21673d0407baSopenharmony_ci    struct cpuhp_step *sp = cpuhp_get_step(state);
21683d0407baSopenharmony_ci    int cpu;
21693d0407baSopenharmony_ci
21703d0407baSopenharmony_ci    BUG_ON(cpuhp_cb_check(state));
21713d0407baSopenharmony_ci
21723d0407baSopenharmony_ci    lockdep_assert_cpus_held();
21733d0407baSopenharmony_ci
21743d0407baSopenharmony_ci    mutex_lock(&cpuhp_state_mutex);
21753d0407baSopenharmony_ci    if (sp->multi_instance) {
21763d0407baSopenharmony_ci        WARN(!hlist_empty(&sp->list), "Error: Removing state %d which has instances left.\n", state);
21773d0407baSopenharmony_ci        goto remove;
21783d0407baSopenharmony_ci    }
21793d0407baSopenharmony_ci
21803d0407baSopenharmony_ci    if (!invoke || !cpuhp_get_teardown_cb(state)) {
21813d0407baSopenharmony_ci        goto remove;
21823d0407baSopenharmony_ci    }
21833d0407baSopenharmony_ci
21843d0407baSopenharmony_ci    /*
21853d0407baSopenharmony_ci     * Call the teardown callback for each present cpu depending
21863d0407baSopenharmony_ci     * on the hotplug state of the cpu. This function is not
21873d0407baSopenharmony_ci     * allowed to fail currently!
21883d0407baSopenharmony_ci     */
21893d0407baSopenharmony_ci    for_each_present_cpu(cpu)
21903d0407baSopenharmony_ci    {
21913d0407baSopenharmony_ci        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
21923d0407baSopenharmony_ci        int cpustate = st->state;
21933d0407baSopenharmony_ci
21943d0407baSopenharmony_ci        if (cpustate >= state) {
21953d0407baSopenharmony_ci            cpuhp_issue_call(cpu, state, false, NULL);
21963d0407baSopenharmony_ci        }
21973d0407baSopenharmony_ci    }
21983d0407baSopenharmony_ciremove:
21993d0407baSopenharmony_ci    cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
22003d0407baSopenharmony_ci    mutex_unlock(&cpuhp_state_mutex);
22013d0407baSopenharmony_ci}
22023d0407baSopenharmony_ciEXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
22033d0407baSopenharmony_ci
22043d0407baSopenharmony_civoid __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
22053d0407baSopenharmony_ci{
22063d0407baSopenharmony_ci    cpus_read_lock();
22073d0407baSopenharmony_ci    __cpuhp_remove_state_cpuslocked(state, invoke);
22083d0407baSopenharmony_ci    cpus_read_unlock();
22093d0407baSopenharmony_ci}
22103d0407baSopenharmony_ciEXPORT_SYMBOL(__cpuhp_remove_state);
22113d0407baSopenharmony_ci
22123d0407baSopenharmony_ci#ifdef CONFIG_HOTPLUG_SMT
22133d0407baSopenharmony_cistatic void cpuhp_offline_cpu_device(unsigned int cpu)
22143d0407baSopenharmony_ci{
22153d0407baSopenharmony_ci    struct device *dev = get_cpu_device(cpu);
22163d0407baSopenharmony_ci
22173d0407baSopenharmony_ci    dev->offline = true;
22183d0407baSopenharmony_ci    /* Tell user space about the state change */
22193d0407baSopenharmony_ci    kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
22203d0407baSopenharmony_ci}
22213d0407baSopenharmony_ci
22223d0407baSopenharmony_cistatic void cpuhp_online_cpu_device(unsigned int cpu)
22233d0407baSopenharmony_ci{
22243d0407baSopenharmony_ci    struct device *dev = get_cpu_device(cpu);
22253d0407baSopenharmony_ci
22263d0407baSopenharmony_ci    dev->offline = false;
22273d0407baSopenharmony_ci    /* Tell user space about the state change */
22283d0407baSopenharmony_ci    kobject_uevent(&dev->kobj, KOBJ_ONLINE);
22293d0407baSopenharmony_ci}
22303d0407baSopenharmony_ci
22313d0407baSopenharmony_ciint cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
22323d0407baSopenharmony_ci{
22333d0407baSopenharmony_ci    int cpu, ret = 0;
22343d0407baSopenharmony_ci
22353d0407baSopenharmony_ci    cpu_maps_update_begin();
22363d0407baSopenharmony_ci    for_each_online_cpu(cpu)
22373d0407baSopenharmony_ci    {
22383d0407baSopenharmony_ci        if (topology_is_primary_thread(cpu)) {
22393d0407baSopenharmony_ci            continue;
22403d0407baSopenharmony_ci        }
22413d0407baSopenharmony_ci        ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
22423d0407baSopenharmony_ci        if (ret) {
22433d0407baSopenharmony_ci            break;
22443d0407baSopenharmony_ci        }
22453d0407baSopenharmony_ci        /*
22463d0407baSopenharmony_ci         * As this needs to hold the cpu maps lock it's impossible
22473d0407baSopenharmony_ci         * to call device_offline() because that ends up calling
22483d0407baSopenharmony_ci         * cpu_down() which takes cpu maps lock. cpu maps lock
22493d0407baSopenharmony_ci         * needs to be held as this might race against in kernel
22503d0407baSopenharmony_ci         * abusers of the hotplug machinery (thermal management).
22513d0407baSopenharmony_ci         *
22523d0407baSopenharmony_ci         * So nothing would update device:offline state. That would
22533d0407baSopenharmony_ci         * leave the sysfs entry stale and prevent onlining after
22543d0407baSopenharmony_ci         * smt control has been changed to 'off' again. This is
22553d0407baSopenharmony_ci         * called under the sysfs hotplug lock, so it is properly
22563d0407baSopenharmony_ci         * serialized against the regular offline usage.
22573d0407baSopenharmony_ci         */
22583d0407baSopenharmony_ci        cpuhp_offline_cpu_device(cpu);
22593d0407baSopenharmony_ci    }
22603d0407baSopenharmony_ci    if (!ret) {
22613d0407baSopenharmony_ci        cpu_smt_control = ctrlval;
22623d0407baSopenharmony_ci    }
22633d0407baSopenharmony_ci    cpu_maps_update_done();
22643d0407baSopenharmony_ci    return ret;
22653d0407baSopenharmony_ci}
22663d0407baSopenharmony_ci
22673d0407baSopenharmony_ciint cpuhp_smt_enable(void)
22683d0407baSopenharmony_ci{
22693d0407baSopenharmony_ci    int cpu, ret = 0;
22703d0407baSopenharmony_ci
22713d0407baSopenharmony_ci    cpu_maps_update_begin();
22723d0407baSopenharmony_ci    cpu_smt_control = CPU_SMT_ENABLED;
22733d0407baSopenharmony_ci    for_each_present_cpu(cpu)
22743d0407baSopenharmony_ci    {
22753d0407baSopenharmony_ci        /* Skip online CPUs and CPUs on offline nodes */
22763d0407baSopenharmony_ci        if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) {
22773d0407baSopenharmony_ci            continue;
22783d0407baSopenharmony_ci        }
22793d0407baSopenharmony_ci        ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
22803d0407baSopenharmony_ci        if (ret) {
22813d0407baSopenharmony_ci            break;
22823d0407baSopenharmony_ci        }
22833d0407baSopenharmony_ci        /* See comment in cpuhp_smt_disable() */
22843d0407baSopenharmony_ci        cpuhp_online_cpu_device(cpu);
22853d0407baSopenharmony_ci    }
22863d0407baSopenharmony_ci    cpu_maps_update_done();
22873d0407baSopenharmony_ci    return ret;
22883d0407baSopenharmony_ci}
22893d0407baSopenharmony_ci#endif
22903d0407baSopenharmony_ci
22913d0407baSopenharmony_ci#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
22923d0407baSopenharmony_cistatic ssize_t show_cpuhp_state(struct device *dev, struct device_attribute *attr, char *buf)
22933d0407baSopenharmony_ci{
22943d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
22953d0407baSopenharmony_ci
22963d0407baSopenharmony_ci    return sprintf(buf, "%d\n", st->state);
22973d0407baSopenharmony_ci}
22983d0407baSopenharmony_cistatic DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
22993d0407baSopenharmony_ci
23003d0407baSopenharmony_cistatic ssize_t write_cpuhp_target(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
23013d0407baSopenharmony_ci{
23023d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
23033d0407baSopenharmony_ci    struct cpuhp_step *sp;
23043d0407baSopenharmony_ci    int target, ret;
23053d0407baSopenharmony_ci
23063d0407baSopenharmony_ci    ret = kstrtoint(buf, 10, &target);
23073d0407baSopenharmony_ci    if (ret) {
23083d0407baSopenharmony_ci        return ret;
23093d0407baSopenharmony_ci    }
23103d0407baSopenharmony_ci
23113d0407baSopenharmony_ci#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
23123d0407baSopenharmony_ci    if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE) {
23133d0407baSopenharmony_ci        return -EINVAL;
23143d0407baSopenharmony_ci    }
23153d0407baSopenharmony_ci#else
23163d0407baSopenharmony_ci    if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE) {
23173d0407baSopenharmony_ci        return -EINVAL;
23183d0407baSopenharmony_ci    }
23193d0407baSopenharmony_ci#endif
23203d0407baSopenharmony_ci
23213d0407baSopenharmony_ci    ret = lock_device_hotplug_sysfs();
23223d0407baSopenharmony_ci    if (ret) {
23233d0407baSopenharmony_ci        return ret;
23243d0407baSopenharmony_ci    }
23253d0407baSopenharmony_ci
23263d0407baSopenharmony_ci    mutex_lock(&cpuhp_state_mutex);
23273d0407baSopenharmony_ci    sp = cpuhp_get_step(target);
23283d0407baSopenharmony_ci    ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
23293d0407baSopenharmony_ci    mutex_unlock(&cpuhp_state_mutex);
23303d0407baSopenharmony_ci    if (ret) {
23313d0407baSopenharmony_ci        goto out;
23323d0407baSopenharmony_ci    }
23333d0407baSopenharmony_ci
23343d0407baSopenharmony_ci    if (st->state < target) {
23353d0407baSopenharmony_ci        ret = cpu_up(dev->id, target);
23363d0407baSopenharmony_ci    } else {
23373d0407baSopenharmony_ci        ret = cpu_down(dev->id, target);
23383d0407baSopenharmony_ci    }
23393d0407baSopenharmony_ciout:
23403d0407baSopenharmony_ci    unlock_device_hotplug();
23413d0407baSopenharmony_ci    return ret ? ret : count;
23423d0407baSopenharmony_ci}
23433d0407baSopenharmony_ci
23443d0407baSopenharmony_cistatic ssize_t show_cpuhp_target(struct device *dev, struct device_attribute *attr, char *buf)
23453d0407baSopenharmony_ci{
23463d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
23473d0407baSopenharmony_ci
23483d0407baSopenharmony_ci    return sprintf(buf, "%d\n", st->target);
23493d0407baSopenharmony_ci}
23503d0407baSopenharmony_cistatic DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
23513d0407baSopenharmony_ci
23523d0407baSopenharmony_cistatic ssize_t write_cpuhp_fail(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
23533d0407baSopenharmony_ci{
23543d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
23553d0407baSopenharmony_ci    struct cpuhp_step *sp;
23563d0407baSopenharmony_ci    int fail, ret;
23573d0407baSopenharmony_ci
23583d0407baSopenharmony_ci    ret = kstrtoint(buf, 10, &fail);
23593d0407baSopenharmony_ci    if (ret) {
23603d0407baSopenharmony_ci        return ret;
23613d0407baSopenharmony_ci    }
23623d0407baSopenharmony_ci
23633d0407baSopenharmony_ci    if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE) {
23643d0407baSopenharmony_ci        return -EINVAL;
23653d0407baSopenharmony_ci    }
23663d0407baSopenharmony_ci
23673d0407baSopenharmony_ci    /*
23683d0407baSopenharmony_ci     * Cannot fail STARTING/DYING callbacks.
23693d0407baSopenharmony_ci     */
23703d0407baSopenharmony_ci    if (cpuhp_is_atomic_state(fail)) {
23713d0407baSopenharmony_ci        return -EINVAL;
23723d0407baSopenharmony_ci    }
23733d0407baSopenharmony_ci
23743d0407baSopenharmony_ci    /*
23753d0407baSopenharmony_ci     * Cannot fail anything that doesn't have callbacks.
23763d0407baSopenharmony_ci     */
23773d0407baSopenharmony_ci    mutex_lock(&cpuhp_state_mutex);
23783d0407baSopenharmony_ci    sp = cpuhp_get_step(fail);
23793d0407baSopenharmony_ci    if (!sp->startup.single && !sp->teardown.single) {
23803d0407baSopenharmony_ci        ret = -EINVAL;
23813d0407baSopenharmony_ci    }
23823d0407baSopenharmony_ci    mutex_unlock(&cpuhp_state_mutex);
23833d0407baSopenharmony_ci    if (ret) {
23843d0407baSopenharmony_ci        return ret;
23853d0407baSopenharmony_ci    }
23863d0407baSopenharmony_ci
23873d0407baSopenharmony_ci    st->fail = fail;
23883d0407baSopenharmony_ci
23893d0407baSopenharmony_ci    return count;
23903d0407baSopenharmony_ci}
23913d0407baSopenharmony_ci
23923d0407baSopenharmony_cistatic ssize_t show_cpuhp_fail(struct device *dev, struct device_attribute *attr, char *buf)
23933d0407baSopenharmony_ci{
23943d0407baSopenharmony_ci    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
23953d0407baSopenharmony_ci
23963d0407baSopenharmony_ci    return sprintf(buf, "%d\n", st->fail);
23973d0407baSopenharmony_ci}
23983d0407baSopenharmony_ci
23993d0407baSopenharmony_cistatic DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
24003d0407baSopenharmony_ci
24013d0407baSopenharmony_cistatic struct attribute *cpuhp_cpu_attrs[] = {&dev_attr_state.attr, &dev_attr_target.attr, &dev_attr_fail.attr, NULL};
24023d0407baSopenharmony_ci
24033d0407baSopenharmony_cistatic const struct attribute_group cpuhp_cpu_attr_group = {.attrs = cpuhp_cpu_attrs, .name = "hotplug", NULL};
24043d0407baSopenharmony_ci
24053d0407baSopenharmony_cistatic ssize_t show_cpuhp_states(struct device *dev, struct device_attribute *attr, char *buf)
24063d0407baSopenharmony_ci{
24073d0407baSopenharmony_ci    ssize_t cur, res = 0;
24083d0407baSopenharmony_ci    int i;
24093d0407baSopenharmony_ci
24103d0407baSopenharmony_ci    mutex_lock(&cpuhp_state_mutex);
24113d0407baSopenharmony_ci    for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
24123d0407baSopenharmony_ci        struct cpuhp_step *sp = cpuhp_get_step(i);
24133d0407baSopenharmony_ci
24143d0407baSopenharmony_ci        if (sp->name) {
24153d0407baSopenharmony_ci            cur = sprintf(buf, "%3d: %s\n", i, sp->name);
24163d0407baSopenharmony_ci            buf += cur;
24173d0407baSopenharmony_ci            res += cur;
24183d0407baSopenharmony_ci        }
24193d0407baSopenharmony_ci    }
24203d0407baSopenharmony_ci    mutex_unlock(&cpuhp_state_mutex);
24213d0407baSopenharmony_ci    return res;
24223d0407baSopenharmony_ci}
24233d0407baSopenharmony_cistatic DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
24243d0407baSopenharmony_ci
24253d0407baSopenharmony_cistatic struct attribute *cpuhp_cpu_root_attrs[] = {&dev_attr_states.attr, NULL};
24263d0407baSopenharmony_ci
24273d0407baSopenharmony_cistatic const struct attribute_group cpuhp_cpu_root_attr_group = {
24283d0407baSopenharmony_ci    .attrs = cpuhp_cpu_root_attrs, .name = "hotplug", NULL};
24293d0407baSopenharmony_ci
24303d0407baSopenharmony_ci#ifdef CONFIG_HOTPLUG_SMT
24313d0407baSopenharmony_ci
24323d0407baSopenharmony_cistatic ssize_t _store_smt_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
24333d0407baSopenharmony_ci{
24343d0407baSopenharmony_ci    int ctrlval, ret;
24353d0407baSopenharmony_ci
24363d0407baSopenharmony_ci    if (sysfs_streq(buf, "on")) {
24373d0407baSopenharmony_ci        ctrlval = CPU_SMT_ENABLED;
24383d0407baSopenharmony_ci    } else if (sysfs_streq(buf, "off")) {
24393d0407baSopenharmony_ci        ctrlval = CPU_SMT_DISABLED;
24403d0407baSopenharmony_ci    } else if (sysfs_streq(buf, "forceoff")) {
24413d0407baSopenharmony_ci        ctrlval = CPU_SMT_FORCE_DISABLED;
24423d0407baSopenharmony_ci    } else {
24433d0407baSopenharmony_ci        return -EINVAL;
24443d0407baSopenharmony_ci    }
24453d0407baSopenharmony_ci
24463d0407baSopenharmony_ci    if (cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
24473d0407baSopenharmony_ci        return -EPERM;
24483d0407baSopenharmony_ci    }
24493d0407baSopenharmony_ci
24503d0407baSopenharmony_ci    if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED) {
24513d0407baSopenharmony_ci        return -ENODEV;
24523d0407baSopenharmony_ci    }
24533d0407baSopenharmony_ci
24543d0407baSopenharmony_ci    ret = lock_device_hotplug_sysfs();
24553d0407baSopenharmony_ci    if (ret) {
24563d0407baSopenharmony_ci        return ret;
24573d0407baSopenharmony_ci    }
24583d0407baSopenharmony_ci
24593d0407baSopenharmony_ci    if (ctrlval != cpu_smt_control) {
24603d0407baSopenharmony_ci        switch (ctrlval) {
24613d0407baSopenharmony_ci            case CPU_SMT_ENABLED:
24623d0407baSopenharmony_ci                ret = cpuhp_smt_enable();
24633d0407baSopenharmony_ci                break;
24643d0407baSopenharmony_ci            case CPU_SMT_DISABLED:
24653d0407baSopenharmony_ci            case CPU_SMT_FORCE_DISABLED:
24663d0407baSopenharmony_ci                ret = cpuhp_smt_disable(ctrlval);
24673d0407baSopenharmony_ci                break;
24683d0407baSopenharmony_ci        }
24693d0407baSopenharmony_ci    }
24703d0407baSopenharmony_ci
24713d0407baSopenharmony_ci    unlock_device_hotplug();
24723d0407baSopenharmony_ci    return ret ? ret : count;
24733d0407baSopenharmony_ci}
24743d0407baSopenharmony_ci
24753d0407baSopenharmony_ci#else  /* !CONFIG_HOTPLUG_SMT */
24763d0407baSopenharmony_cistatic ssize_t _store_smt_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
24773d0407baSopenharmony_ci{
24783d0407baSopenharmony_ci    return -ENODEV;
24793d0407baSopenharmony_ci}
24803d0407baSopenharmony_ci#endif /* CONFIG_HOTPLUG_SMT */
24813d0407baSopenharmony_ci
24823d0407baSopenharmony_cistatic const char *smt_states[] = {
24833d0407baSopenharmony_ci    [CPU_SMT_ENABLED] = "on",
24843d0407baSopenharmony_ci    [CPU_SMT_DISABLED] = "off",
24853d0407baSopenharmony_ci    [CPU_SMT_FORCE_DISABLED] = "forceoff",
24863d0407baSopenharmony_ci    [CPU_SMT_NOT_SUPPORTED] = "notsupported",
24873d0407baSopenharmony_ci    [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented",
24883d0407baSopenharmony_ci};
24893d0407baSopenharmony_ci
24903d0407baSopenharmony_cistatic ssize_t show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
24913d0407baSopenharmony_ci{
24923d0407baSopenharmony_ci    const char *state = smt_states[cpu_smt_control];
24933d0407baSopenharmony_ci
24943d0407baSopenharmony_ci    return snprintf(buf, PAGE_SIZE - CPU_PAGE_SIZE_OFF_TWO, "%s\n", state);
24953d0407baSopenharmony_ci}
24963d0407baSopenharmony_ci
24973d0407baSopenharmony_cistatic ssize_t store_smt_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
24983d0407baSopenharmony_ci{
24993d0407baSopenharmony_ci    return _store_smt_control(dev, attr, buf, count);
25003d0407baSopenharmony_ci}
25013d0407baSopenharmony_cistatic DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
25023d0407baSopenharmony_ci
25033d0407baSopenharmony_cistatic ssize_t show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
25043d0407baSopenharmony_ci{
25053d0407baSopenharmony_ci    return snprintf(buf, PAGE_SIZE - CPU_PAGE_SIZE_OFF_TWO, "%d\n", sched_smt_active());
25063d0407baSopenharmony_ci}
25073d0407baSopenharmony_cistatic DEVICE_ATTR(active, 0444, show_smt_active, NULL);
25083d0407baSopenharmony_ci
25093d0407baSopenharmony_cistatic struct attribute *cpuhp_smt_attrs[] = {&dev_attr_control.attr, &dev_attr_active.attr, NULL};
25103d0407baSopenharmony_ci
25113d0407baSopenharmony_cistatic const struct attribute_group cpuhp_smt_attr_group = {.attrs = cpuhp_smt_attrs, .name = "smt", NULL};
25123d0407baSopenharmony_ci
25133d0407baSopenharmony_cistatic int __init cpu_smt_sysfs_init(void)
25143d0407baSopenharmony_ci{
25153d0407baSopenharmony_ci    return sysfs_create_group(&cpu_subsys.dev_root->kobj, &cpuhp_smt_attr_group);
25163d0407baSopenharmony_ci}
25173d0407baSopenharmony_ci
25183d0407baSopenharmony_cistatic int __init cpuhp_sysfs_init(void)
25193d0407baSopenharmony_ci{
25203d0407baSopenharmony_ci    int cpu, ret;
25213d0407baSopenharmony_ci
25223d0407baSopenharmony_ci    ret = cpu_smt_sysfs_init();
25233d0407baSopenharmony_ci    if (ret) {
25243d0407baSopenharmony_ci        return ret;
25253d0407baSopenharmony_ci    }
25263d0407baSopenharmony_ci
25273d0407baSopenharmony_ci    ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, &cpuhp_cpu_root_attr_group);
25283d0407baSopenharmony_ci    if (ret) {
25293d0407baSopenharmony_ci        return ret;
25303d0407baSopenharmony_ci    }
25313d0407baSopenharmony_ci
25323d0407baSopenharmony_ci    for_each_possible_cpu(cpu)
25333d0407baSopenharmony_ci    {
25343d0407baSopenharmony_ci        struct device *dev = get_cpu_device(cpu);
25353d0407baSopenharmony_ci
25363d0407baSopenharmony_ci        if (!dev) {
25373d0407baSopenharmony_ci            continue;
25383d0407baSopenharmony_ci        }
25393d0407baSopenharmony_ci        ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
25403d0407baSopenharmony_ci        if (ret) {
25413d0407baSopenharmony_ci            return ret;
25423d0407baSopenharmony_ci        }
25433d0407baSopenharmony_ci    }
25443d0407baSopenharmony_ci    return 0;
25453d0407baSopenharmony_ci}
25463d0407baSopenharmony_cidevice_initcall(cpuhp_sysfs_init);
25473d0407baSopenharmony_ci#endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
25483d0407baSopenharmony_ci
25493d0407baSopenharmony_ci/*
25503d0407baSopenharmony_ci * cpu_bit_bitmap[] is a special, "compressed" data structure that
25513d0407baSopenharmony_ci * represents all NR_CPUS bits binary values of 1<<nr.
25523d0407baSopenharmony_ci *
25533d0407baSopenharmony_ci * It is used by cpumask_of() to get a constant address to a CPU
25543d0407baSopenharmony_ci * mask value that has a single bit set only.
25553d0407baSopenharmony_ci */
25563d0407baSopenharmony_ci
25573d0407baSopenharmony_ci/* cpu_bit_bitmap[0] is empty - so we can back into it */
25583d0407baSopenharmony_ci#define MASK_DECLARE_1(x) [(x) + 1][0] = (1UL << (x))
25593d0407baSopenharmony_ci#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1((x) + 1)
25603d0407baSopenharmony_ci#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2((x) + 2)
25613d0407baSopenharmony_ci#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4((x) + 4)
25623d0407baSopenharmony_ci
25633d0407baSopenharmony_ciconst unsigned long cpu_bit_bitmap[BITS_PER_LONG + 1][BITS_TO_LONGS(NR_CPUS)] = {
25643d0407baSopenharmony_ci
25653d0407baSopenharmony_ci    MASK_DECLARE_8(0),  MASK_DECLARE_8(8),  MASK_DECLARE_8(16), MASK_DECLARE_8(24),
25663d0407baSopenharmony_ci#if BITS_PER_LONG > 32
25673d0407baSopenharmony_ci    MASK_DECLARE_8(32), MASK_DECLARE_8(40), MASK_DECLARE_8(48), MASK_DECLARE_8(56),
25683d0407baSopenharmony_ci#endif
25693d0407baSopenharmony_ci};
25703d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(cpu_bit_bitmap);
25713d0407baSopenharmony_ci
25723d0407baSopenharmony_ciconst DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
25733d0407baSopenharmony_ciEXPORT_SYMBOL(cpu_all_bits);
25743d0407baSopenharmony_ci
25753d0407baSopenharmony_ci#ifdef CONFIG_INIT_ALL_POSSIBLE
25763d0407baSopenharmony_cistruct cpumask __cpu_possible_mask __read_mostly = {CPU_BITS_ALL};
25773d0407baSopenharmony_ci#else
25783d0407baSopenharmony_cistruct cpumask __cpu_possible_mask __read_mostly;
25793d0407baSopenharmony_ci#endif
25803d0407baSopenharmony_ciEXPORT_SYMBOL(__cpu_possible_mask);
25813d0407baSopenharmony_ci
25823d0407baSopenharmony_cistruct cpumask __cpu_online_mask __read_mostly;
25833d0407baSopenharmony_ciEXPORT_SYMBOL(__cpu_online_mask);
25843d0407baSopenharmony_ci
25853d0407baSopenharmony_cistruct cpumask __cpu_present_mask __read_mostly;
25863d0407baSopenharmony_ciEXPORT_SYMBOL(__cpu_present_mask);
25873d0407baSopenharmony_ci
25883d0407baSopenharmony_cistruct cpumask __cpu_active_mask __read_mostly;
25893d0407baSopenharmony_ciEXPORT_SYMBOL(__cpu_active_mask);
25903d0407baSopenharmony_ci
25913d0407baSopenharmony_ci#ifdef CONFIG_CPU_ISOLATION_OPT
25923d0407baSopenharmony_cistruct cpumask __cpu_isolated_mask __read_mostly;
25933d0407baSopenharmony_ciEXPORT_SYMBOL(__cpu_isolated_mask);
25943d0407baSopenharmony_ci#endif
25953d0407baSopenharmony_ci
25963d0407baSopenharmony_ciatomic_t __num_online_cpus __read_mostly;
25973d0407baSopenharmony_ciEXPORT_SYMBOL(__num_online_cpus);
25983d0407baSopenharmony_ci
25993d0407baSopenharmony_civoid init_cpu_present(const struct cpumask *src)
26003d0407baSopenharmony_ci{
26013d0407baSopenharmony_ci    cpumask_copy(&__cpu_present_mask, src);
26023d0407baSopenharmony_ci}
26033d0407baSopenharmony_ci
26043d0407baSopenharmony_civoid init_cpu_possible(const struct cpumask *src)
26053d0407baSopenharmony_ci{
26063d0407baSopenharmony_ci    cpumask_copy(&__cpu_possible_mask, src);
26073d0407baSopenharmony_ci}
26083d0407baSopenharmony_ci
26093d0407baSopenharmony_civoid init_cpu_online(const struct cpumask *src)
26103d0407baSopenharmony_ci{
26113d0407baSopenharmony_ci    cpumask_copy(&__cpu_online_mask, src);
26123d0407baSopenharmony_ci}
26133d0407baSopenharmony_ci
26143d0407baSopenharmony_ci#ifdef CONFIG_CPU_ISOLATION_OPT
26153d0407baSopenharmony_civoid init_cpu_isolated(const struct cpumask *src)
26163d0407baSopenharmony_ci{
26173d0407baSopenharmony_ci    cpumask_copy(&__cpu_isolated_mask, src);
26183d0407baSopenharmony_ci}
26193d0407baSopenharmony_ci#endif
26203d0407baSopenharmony_ci
26213d0407baSopenharmony_civoid set_cpu_online(unsigned int cpu, bool online)
26223d0407baSopenharmony_ci{
26233d0407baSopenharmony_ci    /*
26243d0407baSopenharmony_ci     * atomic_inc/dec() is required to handle the horrid abuse of this
26253d0407baSopenharmony_ci     * function by the reboot and kexec code which invoke it from
26263d0407baSopenharmony_ci     * IPI/NMI broadcasts when shutting down CPUs. Invocation from
26273d0407baSopenharmony_ci     * regular CPU hotplug is properly serialized.
26283d0407baSopenharmony_ci     *
26293d0407baSopenharmony_ci     * Note, that the fact that __num_online_cpus is of type atomic_t
26303d0407baSopenharmony_ci     * does not protect readers which are not serialized against
26313d0407baSopenharmony_ci     * concurrent hotplug operations.
26323d0407baSopenharmony_ci     */
26333d0407baSopenharmony_ci    if (online) {
26343d0407baSopenharmony_ci        if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask)) {
26353d0407baSopenharmony_ci            atomic_inc(&__num_online_cpus);
26363d0407baSopenharmony_ci        }
26373d0407baSopenharmony_ci    } else {
26383d0407baSopenharmony_ci        if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask)) {
26393d0407baSopenharmony_ci            atomic_dec(&__num_online_cpus);
26403d0407baSopenharmony_ci        }
26413d0407baSopenharmony_ci    }
26423d0407baSopenharmony_ci}
26433d0407baSopenharmony_ci
26443d0407baSopenharmony_ci/*
26453d0407baSopenharmony_ci * Activate the first processor.
26463d0407baSopenharmony_ci */
26473d0407baSopenharmony_civoid __init boot_cpu_init(void)
26483d0407baSopenharmony_ci{
26493d0407baSopenharmony_ci    int cpu = smp_processor_id();
26503d0407baSopenharmony_ci
26513d0407baSopenharmony_ci    /* Mark the boot cpu "present", "online" etc for SMP and UP case */
26523d0407baSopenharmony_ci    set_cpu_online(cpu, true);
26533d0407baSopenharmony_ci    set_cpu_active(cpu, true);
26543d0407baSopenharmony_ci    set_cpu_present(cpu, true);
26553d0407baSopenharmony_ci    set_cpu_possible(cpu, true);
26563d0407baSopenharmony_ci
26573d0407baSopenharmony_ci#ifdef CONFIG_SMP
26583d0407baSopenharmony_ci    __boot_cpu_id = cpu;
26593d0407baSopenharmony_ci#endif
26603d0407baSopenharmony_ci}
26613d0407baSopenharmony_ci
26623d0407baSopenharmony_ci/*
26633d0407baSopenharmony_ci * Must be called _AFTER_ setting up the per_cpu areas
26643d0407baSopenharmony_ci */
26653d0407baSopenharmony_civoid __init boot_cpu_hotplug_init(void)
26663d0407baSopenharmony_ci{
26673d0407baSopenharmony_ci#ifdef CONFIG_SMP
26683d0407baSopenharmony_ci    cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
26693d0407baSopenharmony_ci#endif
26703d0407baSopenharmony_ci    this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
26713d0407baSopenharmony_ci}
26723d0407baSopenharmony_ci
26733d0407baSopenharmony_ci/*
26743d0407baSopenharmony_ci * These are used for a global "mitigations=" cmdline option for toggling
26753d0407baSopenharmony_ci * optional CPU mitigations.
26763d0407baSopenharmony_ci */
26773d0407baSopenharmony_cienum cpu_mitigations {
26783d0407baSopenharmony_ci    CPU_MITIGATIONS_OFF,
26793d0407baSopenharmony_ci    CPU_MITIGATIONS_AUTO,
26803d0407baSopenharmony_ci    CPU_MITIGATIONS_AUTO_NOSMT,
26813d0407baSopenharmony_ci};
26823d0407baSopenharmony_ci
26833d0407baSopenharmony_cistatic enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
26843d0407baSopenharmony_ci
26853d0407baSopenharmony_cistatic int __init mitigations_parse_cmdline(char *arg)
26863d0407baSopenharmony_ci{
26873d0407baSopenharmony_ci    if (!strcmp(arg, "off")) {
26883d0407baSopenharmony_ci        cpu_mitigations = CPU_MITIGATIONS_OFF;
26893d0407baSopenharmony_ci    } else if (!strcmp(arg, "auto")) {
26903d0407baSopenharmony_ci        cpu_mitigations = CPU_MITIGATIONS_AUTO;
26913d0407baSopenharmony_ci    } else if (!strcmp(arg, "auto,nosmt")) {
26923d0407baSopenharmony_ci        cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
26933d0407baSopenharmony_ci    } else {
26943d0407baSopenharmony_ci        pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n", arg);
26953d0407baSopenharmony_ci    }
26963d0407baSopenharmony_ci
26973d0407baSopenharmony_ci    return 0;
26983d0407baSopenharmony_ci}
26993d0407baSopenharmony_ciearly_param("mitigations", mitigations_parse_cmdline);
27003d0407baSopenharmony_ci
27013d0407baSopenharmony_ci/* mitigations=off */
27023d0407baSopenharmony_cibool cpu_mitigations_off(void)
27033d0407baSopenharmony_ci{
27043d0407baSopenharmony_ci    return cpu_mitigations == CPU_MITIGATIONS_OFF;
27053d0407baSopenharmony_ci}
27063d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(cpu_mitigations_off);
27073d0407baSopenharmony_ci
27083d0407baSopenharmony_ci/* mitigations=auto,nosmt */
27093d0407baSopenharmony_cibool cpu_mitigations_auto_nosmt(void)
27103d0407baSopenharmony_ci{
27113d0407baSopenharmony_ci    return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
27123d0407baSopenharmony_ci}
27133d0407baSopenharmony_ciEXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
2714