Lines Matching refs:state

50  * cpuhp_cpu_state - Per cpu hotplug state storage
51 * @state: The current cpu state
52 * @target: The target state
58 * @cb_state: The state for a single callback (install/uninstall)
64 enum cpuhp_state state;
115 * cpuhp_step - Hotplug state machine step
139 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
141 return cpuhp_hp_states + state;
145 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
147 * @state: The state to do callbacks for
152 * Called from cpu hotplug and from the state register machinery.
154 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node,
158 struct cpuhp_step *step = cpuhp_get_step(state);
163 if (st->fail == state) {
179 trace_cpuhp_enter(cpu, st->target, state, cb);
181 trace_cpuhp_exit(cpu, st->state, state, ret);
192 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
194 trace_cpuhp_exit(cpu, st->state, state, ret);
206 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
208 trace_cpuhp_exit(cpu, st->state, state, ret);
236 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
238 trace_cpuhp_exit(cpu, st->state, state, ret);
248 static bool cpuhp_is_ap_state(enum cpuhp_state state)
252 * purposes as that state is handled explicitly in cpu_down.
254 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
272 static bool cpuhp_is_atomic_state(enum cpuhp_state state)
274 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
482 enum cpuhp_state prev_state = st->state;
489 st->bringup = st->state < target;
500 * state first. Otherwise start undo at the previous state.
504 st->state--;
506 st->state++;
517 if (!st->single && st->state == st->target) {
562 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
584 * Reset stale stack state from the last time this CPU was online.
612 * clean up any remaining active_mm state.
622 * Hotplug state machine related functions
627 for (st->state--; st->state > st->target; st->state--) {
628 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
642 * in the current state.
644 return st->state <= CPUHP_BRINGUP_CPU;
649 enum cpuhp_state prev_state = st->state;
652 while (st->state < target) {
653 st->state++;
654 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
686 * callbacks when a state gets [un]installed at runtime.
689 * state callback.
693 * - up: runs ++st->state, while st->state < st->target
694 * - down: runs st->state--, while st->state > st->target
702 enum cpuhp_state state;
710 * that if we see ->should_run we also see the rest of the state.
723 state = st->cb_state;
727 st->state++;
728 state = st->state;
729 st->should_run = (st->state < st->target);
730 WARN_ON_ONCE(st->state > st->target);
732 state = st->state;
733 st->state--;
734 st->should_run = (st->state > st->target);
735 WARN_ON_ONCE(st->state < st->target);
739 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
741 if (cpuhp_is_atomic_state(state)) {
743 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
751 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
773 static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node)
793 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
801 st->cb_state = state;
827 enum cpuhp_state prev_state = st->state;
838 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
866 * removing of state callbacks and state instances, which invoke either the
867 * startup or the teardown callback of the affected state.
889 * returns with inconsistent state, which could even be observed in
895 * necessarily a visible issue, but it is still inconsistent state,
897 * prevent the transient state between scheduling the work and
968 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
971 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
972 st->state--;
974 for (; st->state > target; st->state--) {
975 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
1026 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1051 BUG_ON(st->state != CPUHP_AP_OFFLINE);
1053 st->state = CPUHP_AP_IDLE_DEAD;
1063 for (st->state++; st->state < st->target; st->state++) {
1064 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1070 enum cpuhp_state prev_state = st->state;
1073 for (; st->state > target; st->state--) {
1074 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
1077 if (st->state < prev_state) {
1112 * If the current CPU state is in the range of the AP hotplug thread,
1115 if (st->state > CPUHP_TEARDOWN_CPU) {
1130 if (st->state > CPUHP_TEARDOWN_CPU) {
1141 if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1264 while (st->state < target) {
1265 st->state++;
1266 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1279 void cpuhp_online_idle(enum cpuhp_state state)
1284 if (state != CPUHP_AP_ONLINE_IDLE) {
1294 st->state = CPUHP_AP_ONLINE_IDLE;
1316 if (st->state >= target) {
1320 if (st->state == CPUHP_OFFLINE) {
1333 * If the current CPU state is in the range of the AP hotplug thread,
1336 if (st->state > CPUHP_BRINGUP_CPU) {
1348 * Try to reach the target state. We max out on the BP at
1350 * responsible for bringing it up to the target state.
1577 * ensure that the state of the system with respect to the tasks being frozen
1624 /* Boot processor state steps */
1707 /* Final state before CPU kills itself */
1713 * Last state before CPU enters the idle loop to die. Transient state
1721 /* First state is scheduler control. Interrupts are disabled */
1740 /* Entry state on starting. Interrupts enabled from here on. Transient
1741 * state for synchronsization */
1801 * The dynamically registered state space is here
1805 /* Last state is scheduler control setting the cpu active */
1824 static int cpuhp_cb_check(enum cpuhp_state state)
1826 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE) {
1833 * Returns a free for dynamic slot assignment of the Online state. The states
1837 static int cpuhp_reserve_state(enum cpuhp_state state)
1842 switch (state) {
1855 for (i = state; i <= end; i++, step++) {
1864 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu),
1872 * If name is NULL, then the state gets removed.
1877 * empty) state, leaving the callbacks of the to be cleared state
1880 if (name && (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN)) {
1881 ret = cpuhp_reserve_state(state);
1885 state = ret;
1887 sp = cpuhp_get_step(state);
1900 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1902 return cpuhp_get_step(state)->teardown.single;
1909 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node)
1911 struct cpuhp_step *sp = cpuhp_get_step(state);
1926 if (cpuhp_is_ap_state(state)) {
1927 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1929 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1932 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1943 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, struct hlist_node *node)
1951 int cpustate = st->state;
1958 if (cpustate >= state) {
1959 cpuhp_issue_call(cpu, state, false, node);
1964 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, struct hlist_node *node, bool invoke)
1972 sp = cpuhp_get_step(state);
1985 * depending on the hotplug state of the cpu.
1990 int cpustate = st->state;
1992 if (cpustate < state) {
1996 ret = cpuhp_issue_call(cpu, state, true, node);
1999 cpuhp_rollback_install(cpu, state, node);
2012 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, bool invoke)
2017 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
2024 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2025 * @state: The state to setup
2027 * cpu state >= @state
2036 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
2040 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name, bool invoke,
2049 if (cpuhp_cb_check(state) || !name) {
2055 ret = cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
2057 dynstate = state == CPUHP_AP_ONLINE_DYN;
2059 state = ret;
2069 * depending on the hotplug state of the cpu.
2074 int cpustate = st->state;
2076 if (cpustate < state) {
2080 ret = cpuhp_issue_call(cpu, state, true, NULL);
2083 cpuhp_rollback_install(cpu, state, NULL);
2085 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2092 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
2093 * dynamically allocated state in case of success.
2096 return state;
2102 int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke, int (*startup)(unsigned int cpu),
2108 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup, teardown, multi_instance);
2114 int __cpuhp_state_remove_instance(enum cpuhp_state state, struct hlist_node *node, bool invoke)
2116 struct cpuhp_step *sp = cpuhp_get_step(state);
2119 BUG_ON(cpuhp_cb_check(state));
2128 if (!invoke || !cpuhp_get_teardown_cb(state)) {
2133 * on the hotplug state of the cpu. This function is not
2139 int cpustate = st->state;
2141 if (cpustate >= state) {
2142 cpuhp_issue_call(cpu, state, false, node);
2156 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2157 * @state: The state to remove
2159 * cpu state >= @state
2165 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2167 struct cpuhp_step *sp = cpuhp_get_step(state);
2170 BUG_ON(cpuhp_cb_check(state));
2176 WARN(!hlist_empty(&sp->list), "Error: Removing state %d which has instances left.\n", state);
2180 if (!invoke || !cpuhp_get_teardown_cb(state)) {
2186 * on the hotplug state of the cpu. This function is not
2192 int cpustate = st->state;
2194 if (cpustate >= state) {
2195 cpuhp_issue_call(cpu, state, false, NULL);
2199 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2204 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2207 __cpuhp_remove_state_cpuslocked(state, invoke);
2218 /* Tell user space about the state change */
2227 /* Tell user space about the state change */
2252 * So nothing would update device:offline state. That would
2296 return sprintf(buf, "%d\n", st->state);
2298 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
2334 if (st->state < target) {
2492 const char *state = smt_states[cpu_smt_control];
2494 return snprintf(buf, PAGE_SIZE - CPU_PAGE_SIZE_OFF_TWO, "%s\n", state);
2670 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);