18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * coupled.c - helper functions to enter the same idle state on multiple cpus
48c2ecf20Sopenharmony_ci *
58c2ecf20Sopenharmony_ci * Copyright (c) 2011 Google, Inc.
68c2ecf20Sopenharmony_ci *
78c2ecf20Sopenharmony_ci * Author: Colin Cross <ccross@android.com>
88c2ecf20Sopenharmony_ci */
98c2ecf20Sopenharmony_ci
108c2ecf20Sopenharmony_ci#include <linux/kernel.h>
118c2ecf20Sopenharmony_ci#include <linux/cpu.h>
128c2ecf20Sopenharmony_ci#include <linux/cpuidle.h>
138c2ecf20Sopenharmony_ci#include <linux/mutex.h>
148c2ecf20Sopenharmony_ci#include <linux/sched.h>
158c2ecf20Sopenharmony_ci#include <linux/slab.h>
168c2ecf20Sopenharmony_ci#include <linux/spinlock.h>
178c2ecf20Sopenharmony_ci
188c2ecf20Sopenharmony_ci#include "cpuidle.h"
198c2ecf20Sopenharmony_ci
208c2ecf20Sopenharmony_ci/**
218c2ecf20Sopenharmony_ci * DOC: Coupled cpuidle states
228c2ecf20Sopenharmony_ci *
238c2ecf20Sopenharmony_ci * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the
248c2ecf20Sopenharmony_ci * cpus cannot be independently powered down, either due to
258c2ecf20Sopenharmony_ci * sequencing restrictions (on Tegra 2, cpu 0 must be the last to
268c2ecf20Sopenharmony_ci * power down), or due to HW bugs (on OMAP4460, a cpu powering up
278c2ecf20Sopenharmony_ci * will corrupt the gic state unless the other cpu runs a work
288c2ecf20Sopenharmony_ci * around).  Each cpu has a power state that it can enter without
298c2ecf20Sopenharmony_ci * coordinating with the other cpu (usually Wait For Interrupt, or
308c2ecf20Sopenharmony_ci * WFI), and one or more "coupled" power states that affect blocks
318c2ecf20Sopenharmony_ci * shared between the cpus (L2 cache, interrupt controller, and
328c2ecf20Sopenharmony_ci * sometimes the whole SoC).  Entering a coupled power state must
338c2ecf20Sopenharmony_ci * be tightly controlled on both cpus.
348c2ecf20Sopenharmony_ci *
358c2ecf20Sopenharmony_ci * This file implements a solution, where each cpu will wait in the
368c2ecf20Sopenharmony_ci * WFI state until all cpus are ready to enter a coupled state, at
378c2ecf20Sopenharmony_ci * which point the coupled state function will be called on all
388c2ecf20Sopenharmony_ci * cpus at approximately the same time.
398c2ecf20Sopenharmony_ci *
408c2ecf20Sopenharmony_ci * Once all cpus are ready to enter idle, they are woken by an smp
418c2ecf20Sopenharmony_ci * cross call.  At this point, there is a chance that one of the
428c2ecf20Sopenharmony_ci * cpus will find work to do, and choose not to enter idle.  A
438c2ecf20Sopenharmony_ci * final pass is needed to guarantee that all cpus will call the
448c2ecf20Sopenharmony_ci * power state enter function at the same time.  During this pass,
458c2ecf20Sopenharmony_ci * each cpu will increment the ready counter, and continue once the
468c2ecf20Sopenharmony_ci * ready counter matches the number of online coupled cpus.  If any
478c2ecf20Sopenharmony_ci * cpu exits idle, the other cpus will decrement their counter and
488c2ecf20Sopenharmony_ci * retry.
498c2ecf20Sopenharmony_ci *
508c2ecf20Sopenharmony_ci * requested_state stores the deepest coupled idle state each cpu
518c2ecf20Sopenharmony_ci * is ready for.  It is assumed that the states are indexed from
528c2ecf20Sopenharmony_ci * shallowest (highest power, lowest exit latency) to deepest
538c2ecf20Sopenharmony_ci * (lowest power, highest exit latency).  The requested_state
548c2ecf20Sopenharmony_ci * variable is not locked.  It is only written from the cpu that
558c2ecf20Sopenharmony_ci * it stores (or by the on/offlining cpu if that cpu is offline),
568c2ecf20Sopenharmony_ci * and only read after all the cpus are ready for the coupled idle
578c2ecf20Sopenharmony_ci * state are are no longer updating it.
588c2ecf20Sopenharmony_ci *
598c2ecf20Sopenharmony_ci * Three atomic counters are used.  alive_count tracks the number
608c2ecf20Sopenharmony_ci * of cpus in the coupled set that are currently or soon will be
618c2ecf20Sopenharmony_ci * online.  waiting_count tracks the number of cpus that are in
628c2ecf20Sopenharmony_ci * the waiting loop, in the ready loop, or in the coupled idle state.
638c2ecf20Sopenharmony_ci * ready_count tracks the number of cpus that are in the ready loop
648c2ecf20Sopenharmony_ci * or in the coupled idle state.
658c2ecf20Sopenharmony_ci *
668c2ecf20Sopenharmony_ci * To use coupled cpuidle states, a cpuidle driver must:
678c2ecf20Sopenharmony_ci *
688c2ecf20Sopenharmony_ci *    Set struct cpuidle_device.coupled_cpus to the mask of all
698c2ecf20Sopenharmony_ci *    coupled cpus, usually the same as cpu_possible_mask if all cpus
708c2ecf20Sopenharmony_ci *    are part of the same cluster.  The coupled_cpus mask must be
718c2ecf20Sopenharmony_ci *    set in the struct cpuidle_device for each cpu.
728c2ecf20Sopenharmony_ci *
738c2ecf20Sopenharmony_ci *    Set struct cpuidle_device.safe_state to a state that is not a
748c2ecf20Sopenharmony_ci *    coupled state.  This is usually WFI.
758c2ecf20Sopenharmony_ci *
768c2ecf20Sopenharmony_ci *    Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each
778c2ecf20Sopenharmony_ci *    state that affects multiple cpus.
788c2ecf20Sopenharmony_ci *
798c2ecf20Sopenharmony_ci *    Provide a struct cpuidle_state.enter function for each state
808c2ecf20Sopenharmony_ci *    that affects multiple cpus.  This function is guaranteed to be
818c2ecf20Sopenharmony_ci *    called on all cpus at approximately the same time.  The driver
828c2ecf20Sopenharmony_ci *    should ensure that the cpus all abort together if any cpu tries
838c2ecf20Sopenharmony_ci *    to abort once the function is called.  The function should return
848c2ecf20Sopenharmony_ci *    with interrupts still disabled.
858c2ecf20Sopenharmony_ci */
868c2ecf20Sopenharmony_ci
878c2ecf20Sopenharmony_ci/**
888c2ecf20Sopenharmony_ci * struct cpuidle_coupled - data for set of cpus that share a coupled idle state
898c2ecf20Sopenharmony_ci * @coupled_cpus: mask of cpus that are part of the coupled set
908c2ecf20Sopenharmony_ci * @requested_state: array of requested states for cpus in the coupled set
918c2ecf20Sopenharmony_ci * @ready_waiting_counts: combined count of cpus  in ready or waiting loops
928c2ecf20Sopenharmony_ci * @abort_barrier: synchronisation point for abort cases
938c2ecf20Sopenharmony_ci * @online_count: count of cpus that are online
948c2ecf20Sopenharmony_ci * @refcnt: reference count of cpuidle devices that are using this struct
958c2ecf20Sopenharmony_ci * @prevent: flag to prevent coupled idle while a cpu is hotplugging
968c2ecf20Sopenharmony_ci */
978c2ecf20Sopenharmony_cistruct cpuidle_coupled {
988c2ecf20Sopenharmony_ci	cpumask_t coupled_cpus;
998c2ecf20Sopenharmony_ci	int requested_state[NR_CPUS];
1008c2ecf20Sopenharmony_ci	atomic_t ready_waiting_counts;
1018c2ecf20Sopenharmony_ci	atomic_t abort_barrier;
1028c2ecf20Sopenharmony_ci	int online_count;
1038c2ecf20Sopenharmony_ci	int refcnt;
1048c2ecf20Sopenharmony_ci	int prevent;
1058c2ecf20Sopenharmony_ci};
1068c2ecf20Sopenharmony_ci
1078c2ecf20Sopenharmony_ci#define WAITING_BITS 16
1088c2ecf20Sopenharmony_ci#define MAX_WAITING_CPUS (1 << WAITING_BITS)
1098c2ecf20Sopenharmony_ci#define WAITING_MASK (MAX_WAITING_CPUS - 1)
1108c2ecf20Sopenharmony_ci#define READY_MASK (~WAITING_MASK)
1118c2ecf20Sopenharmony_ci
1128c2ecf20Sopenharmony_ci#define CPUIDLE_COUPLED_NOT_IDLE	(-1)
1138c2ecf20Sopenharmony_ci
1148c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(call_single_data_t, cpuidle_coupled_poke_cb);
1158c2ecf20Sopenharmony_ci
1168c2ecf20Sopenharmony_ci/*
1178c2ecf20Sopenharmony_ci * The cpuidle_coupled_poke_pending mask is used to avoid calling
1188c2ecf20Sopenharmony_ci * __smp_call_function_single with the per cpu call_single_data_t struct already
1198c2ecf20Sopenharmony_ci * in use.  This prevents a deadlock where two cpus are waiting for each others
1208c2ecf20Sopenharmony_ci * call_single_data_t struct to be available
1218c2ecf20Sopenharmony_ci */
1228c2ecf20Sopenharmony_cistatic cpumask_t cpuidle_coupled_poke_pending;
1238c2ecf20Sopenharmony_ci
1248c2ecf20Sopenharmony_ci/*
1258c2ecf20Sopenharmony_ci * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked
1268c2ecf20Sopenharmony_ci * once to minimize entering the ready loop with a poke pending, which would
1278c2ecf20Sopenharmony_ci * require aborting and retrying.
1288c2ecf20Sopenharmony_ci */
1298c2ecf20Sopenharmony_cistatic cpumask_t cpuidle_coupled_poked;
1308c2ecf20Sopenharmony_ci
1318c2ecf20Sopenharmony_ci/**
1328c2ecf20Sopenharmony_ci * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
1338c2ecf20Sopenharmony_ci * @dev: cpuidle_device of the calling cpu
1348c2ecf20Sopenharmony_ci * @a:   atomic variable to hold the barrier
1358c2ecf20Sopenharmony_ci *
1368c2ecf20Sopenharmony_ci * No caller to this function will return from this function until all online
1378c2ecf20Sopenharmony_ci * cpus in the same coupled group have called this function.  Once any caller
1388c2ecf20Sopenharmony_ci * has returned from this function, the barrier is immediately available for
1398c2ecf20Sopenharmony_ci * reuse.
1408c2ecf20Sopenharmony_ci *
1418c2ecf20Sopenharmony_ci * The atomic variable must be initialized to 0 before any cpu calls
1428c2ecf20Sopenharmony_ci * this function, will be reset to 0 before any cpu returns from this function.
1438c2ecf20Sopenharmony_ci *
1448c2ecf20Sopenharmony_ci * Must only be called from within a coupled idle state handler
1458c2ecf20Sopenharmony_ci * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set).
1468c2ecf20Sopenharmony_ci *
1478c2ecf20Sopenharmony_ci * Provides full smp barrier semantics before and after calling.
1488c2ecf20Sopenharmony_ci */
1498c2ecf20Sopenharmony_civoid cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
1508c2ecf20Sopenharmony_ci{
1518c2ecf20Sopenharmony_ci	int n = dev->coupled->online_count;
1528c2ecf20Sopenharmony_ci
1538c2ecf20Sopenharmony_ci	smp_mb__before_atomic();
1548c2ecf20Sopenharmony_ci	atomic_inc(a);
1558c2ecf20Sopenharmony_ci
1568c2ecf20Sopenharmony_ci	while (atomic_read(a) < n)
1578c2ecf20Sopenharmony_ci		cpu_relax();
1588c2ecf20Sopenharmony_ci
1598c2ecf20Sopenharmony_ci	if (atomic_inc_return(a) == n * 2) {
1608c2ecf20Sopenharmony_ci		atomic_set(a, 0);
1618c2ecf20Sopenharmony_ci		return;
1628c2ecf20Sopenharmony_ci	}
1638c2ecf20Sopenharmony_ci
1648c2ecf20Sopenharmony_ci	while (atomic_read(a) > n)
1658c2ecf20Sopenharmony_ci		cpu_relax();
1668c2ecf20Sopenharmony_ci}
1678c2ecf20Sopenharmony_ci
1688c2ecf20Sopenharmony_ci/**
1698c2ecf20Sopenharmony_ci * cpuidle_state_is_coupled - check if a state is part of a coupled set
1708c2ecf20Sopenharmony_ci * @drv: struct cpuidle_driver for the platform
1718c2ecf20Sopenharmony_ci * @state: index of the target state in drv->states
1728c2ecf20Sopenharmony_ci *
1738c2ecf20Sopenharmony_ci * Returns true if the target state is coupled with cpus besides this one
1748c2ecf20Sopenharmony_ci */
1758c2ecf20Sopenharmony_cibool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
1768c2ecf20Sopenharmony_ci{
1778c2ecf20Sopenharmony_ci	return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
1788c2ecf20Sopenharmony_ci}
1798c2ecf20Sopenharmony_ci
1808c2ecf20Sopenharmony_ci/**
1818c2ecf20Sopenharmony_ci * cpuidle_coupled_state_verify - check if the coupled states are correctly set.
1828c2ecf20Sopenharmony_ci * @drv: struct cpuidle_driver for the platform
1838c2ecf20Sopenharmony_ci *
1848c2ecf20Sopenharmony_ci * Returns 0 for valid state values, a negative error code otherwise:
1858c2ecf20Sopenharmony_ci *  * -EINVAL if any coupled state(safe_state_index) is wrongly set.
1868c2ecf20Sopenharmony_ci */
1878c2ecf20Sopenharmony_ciint cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
1888c2ecf20Sopenharmony_ci{
1898c2ecf20Sopenharmony_ci	int i;
1908c2ecf20Sopenharmony_ci
1918c2ecf20Sopenharmony_ci	for (i = drv->state_count - 1; i >= 0; i--) {
1928c2ecf20Sopenharmony_ci		if (cpuidle_state_is_coupled(drv, i) &&
1938c2ecf20Sopenharmony_ci		    (drv->safe_state_index == i ||
1948c2ecf20Sopenharmony_ci		     drv->safe_state_index < 0 ||
1958c2ecf20Sopenharmony_ci		     drv->safe_state_index >= drv->state_count))
1968c2ecf20Sopenharmony_ci			return -EINVAL;
1978c2ecf20Sopenharmony_ci	}
1988c2ecf20Sopenharmony_ci
1998c2ecf20Sopenharmony_ci	return 0;
2008c2ecf20Sopenharmony_ci}
2018c2ecf20Sopenharmony_ci
2028c2ecf20Sopenharmony_ci/**
2038c2ecf20Sopenharmony_ci * cpuidle_coupled_set_ready - mark a cpu as ready
2048c2ecf20Sopenharmony_ci * @coupled: the struct coupled that contains the current cpu
2058c2ecf20Sopenharmony_ci */
2068c2ecf20Sopenharmony_cistatic inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled)
2078c2ecf20Sopenharmony_ci{
2088c2ecf20Sopenharmony_ci	atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
2098c2ecf20Sopenharmony_ci}
2108c2ecf20Sopenharmony_ci
2118c2ecf20Sopenharmony_ci/**
2128c2ecf20Sopenharmony_ci * cpuidle_coupled_set_not_ready - mark a cpu as not ready
2138c2ecf20Sopenharmony_ci * @coupled: the struct coupled that contains the current cpu
2148c2ecf20Sopenharmony_ci *
2158c2ecf20Sopenharmony_ci * Decrements the ready counter, unless the ready (and thus the waiting) counter
2168c2ecf20Sopenharmony_ci * is equal to the number of online cpus.  Prevents a race where one cpu
2178c2ecf20Sopenharmony_ci * decrements the waiting counter and then re-increments it just before another
2188c2ecf20Sopenharmony_ci * cpu has decremented its ready counter, leading to the ready counter going
2198c2ecf20Sopenharmony_ci * down from the number of online cpus without going through the coupled idle
2208c2ecf20Sopenharmony_ci * state.
2218c2ecf20Sopenharmony_ci *
2228c2ecf20Sopenharmony_ci * Returns 0 if the counter was decremented successfully, -EINVAL if the ready
2238c2ecf20Sopenharmony_ci * counter was equal to the number of online cpus.
2248c2ecf20Sopenharmony_ci */
2258c2ecf20Sopenharmony_cistatic
2268c2ecf20Sopenharmony_ciinline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
2278c2ecf20Sopenharmony_ci{
2288c2ecf20Sopenharmony_ci	int all;
2298c2ecf20Sopenharmony_ci	int ret;
2308c2ecf20Sopenharmony_ci
2318c2ecf20Sopenharmony_ci	all = coupled->online_count | (coupled->online_count << WAITING_BITS);
2328c2ecf20Sopenharmony_ci	ret = atomic_add_unless(&coupled->ready_waiting_counts,
2338c2ecf20Sopenharmony_ci		-MAX_WAITING_CPUS, all);
2348c2ecf20Sopenharmony_ci
2358c2ecf20Sopenharmony_ci	return ret ? 0 : -EINVAL;
2368c2ecf20Sopenharmony_ci}
2378c2ecf20Sopenharmony_ci
2388c2ecf20Sopenharmony_ci/**
2398c2ecf20Sopenharmony_ci * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready
2408c2ecf20Sopenharmony_ci * @coupled: the struct coupled that contains the current cpu
2418c2ecf20Sopenharmony_ci *
2428c2ecf20Sopenharmony_ci * Returns true if all of the cpus in a coupled set are out of the ready loop.
2438c2ecf20Sopenharmony_ci */
2448c2ecf20Sopenharmony_cistatic inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled)
2458c2ecf20Sopenharmony_ci{
2468c2ecf20Sopenharmony_ci	int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
2478c2ecf20Sopenharmony_ci	return r == 0;
2488c2ecf20Sopenharmony_ci}
2498c2ecf20Sopenharmony_ci
2508c2ecf20Sopenharmony_ci/**
2518c2ecf20Sopenharmony_ci * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready
2528c2ecf20Sopenharmony_ci * @coupled: the struct coupled that contains the current cpu
2538c2ecf20Sopenharmony_ci *
2548c2ecf20Sopenharmony_ci * Returns true if all cpus coupled to this target state are in the ready loop
2558c2ecf20Sopenharmony_ci */
2568c2ecf20Sopenharmony_cistatic inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled)
2578c2ecf20Sopenharmony_ci{
2588c2ecf20Sopenharmony_ci	int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
2598c2ecf20Sopenharmony_ci	return r == coupled->online_count;
2608c2ecf20Sopenharmony_ci}
2618c2ecf20Sopenharmony_ci
2628c2ecf20Sopenharmony_ci/**
2638c2ecf20Sopenharmony_ci * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting
2648c2ecf20Sopenharmony_ci * @coupled: the struct coupled that contains the current cpu
2658c2ecf20Sopenharmony_ci *
2668c2ecf20Sopenharmony_ci * Returns true if all cpus coupled to this target state are in the wait loop
2678c2ecf20Sopenharmony_ci */
2688c2ecf20Sopenharmony_cistatic inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled)
2698c2ecf20Sopenharmony_ci{
2708c2ecf20Sopenharmony_ci	int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
2718c2ecf20Sopenharmony_ci	return w == coupled->online_count;
2728c2ecf20Sopenharmony_ci}
2738c2ecf20Sopenharmony_ci
2748c2ecf20Sopenharmony_ci/**
2758c2ecf20Sopenharmony_ci * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting
2768c2ecf20Sopenharmony_ci * @coupled: the struct coupled that contains the current cpu
2778c2ecf20Sopenharmony_ci *
2788c2ecf20Sopenharmony_ci * Returns true if all of the cpus in a coupled set are out of the waiting loop.
2798c2ecf20Sopenharmony_ci */
2808c2ecf20Sopenharmony_cistatic inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled)
2818c2ecf20Sopenharmony_ci{
2828c2ecf20Sopenharmony_ci	int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
2838c2ecf20Sopenharmony_ci	return w == 0;
2848c2ecf20Sopenharmony_ci}
2858c2ecf20Sopenharmony_ci
2868c2ecf20Sopenharmony_ci/**
2878c2ecf20Sopenharmony_ci * cpuidle_coupled_get_state - determine the deepest idle state
2888c2ecf20Sopenharmony_ci * @dev: struct cpuidle_device for this cpu
2898c2ecf20Sopenharmony_ci * @coupled: the struct coupled that contains the current cpu
2908c2ecf20Sopenharmony_ci *
2918c2ecf20Sopenharmony_ci * Returns the deepest idle state that all coupled cpus can enter
2928c2ecf20Sopenharmony_ci */
2938c2ecf20Sopenharmony_cistatic inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
2948c2ecf20Sopenharmony_ci		struct cpuidle_coupled *coupled)
2958c2ecf20Sopenharmony_ci{
2968c2ecf20Sopenharmony_ci	int i;
2978c2ecf20Sopenharmony_ci	int state = INT_MAX;
2988c2ecf20Sopenharmony_ci
2998c2ecf20Sopenharmony_ci	/*
3008c2ecf20Sopenharmony_ci	 * Read barrier ensures that read of requested_state is ordered after
3018c2ecf20Sopenharmony_ci	 * reads of ready_count.  Matches the write barriers
3028c2ecf20Sopenharmony_ci	 * cpuidle_set_state_waiting.
3038c2ecf20Sopenharmony_ci	 */
3048c2ecf20Sopenharmony_ci	smp_rmb();
3058c2ecf20Sopenharmony_ci
3068c2ecf20Sopenharmony_ci	for_each_cpu(i, &coupled->coupled_cpus)
3078c2ecf20Sopenharmony_ci		if (cpu_online(i) && coupled->requested_state[i] < state)
3088c2ecf20Sopenharmony_ci			state = coupled->requested_state[i];
3098c2ecf20Sopenharmony_ci
3108c2ecf20Sopenharmony_ci	return state;
3118c2ecf20Sopenharmony_ci}
3128c2ecf20Sopenharmony_ci
3138c2ecf20Sopenharmony_cistatic void cpuidle_coupled_handle_poke(void *info)
3148c2ecf20Sopenharmony_ci{
3158c2ecf20Sopenharmony_ci	int cpu = (unsigned long)info;
3168c2ecf20Sopenharmony_ci	cpumask_set_cpu(cpu, &cpuidle_coupled_poked);
3178c2ecf20Sopenharmony_ci	cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);
3188c2ecf20Sopenharmony_ci}
3198c2ecf20Sopenharmony_ci
3208c2ecf20Sopenharmony_ci/**
3218c2ecf20Sopenharmony_ci * cpuidle_coupled_poke - wake up a cpu that may be waiting
3228c2ecf20Sopenharmony_ci * @cpu: target cpu
3238c2ecf20Sopenharmony_ci *
3248c2ecf20Sopenharmony_ci * Ensures that the target cpu exits it's waiting idle state (if it is in it)
3258c2ecf20Sopenharmony_ci * and will see updates to waiting_count before it re-enters it's waiting idle
3268c2ecf20Sopenharmony_ci * state.
3278c2ecf20Sopenharmony_ci *
3288c2ecf20Sopenharmony_ci * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu
3298c2ecf20Sopenharmony_ci * either has or will soon have a pending IPI that will wake it out of idle,
3308c2ecf20Sopenharmony_ci * or it is currently processing the IPI and is not in idle.
3318c2ecf20Sopenharmony_ci */
3328c2ecf20Sopenharmony_cistatic void cpuidle_coupled_poke(int cpu)
3338c2ecf20Sopenharmony_ci{
3348c2ecf20Sopenharmony_ci	call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
3358c2ecf20Sopenharmony_ci
3368c2ecf20Sopenharmony_ci	if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
3378c2ecf20Sopenharmony_ci		smp_call_function_single_async(cpu, csd);
3388c2ecf20Sopenharmony_ci}
3398c2ecf20Sopenharmony_ci
3408c2ecf20Sopenharmony_ci/**
3418c2ecf20Sopenharmony_ci * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
3428c2ecf20Sopenharmony_ci * @this_cpu: target cpu
3438c2ecf20Sopenharmony_ci * @coupled: the struct coupled that contains the current cpu
3448c2ecf20Sopenharmony_ci *
3458c2ecf20Sopenharmony_ci * Calls cpuidle_coupled_poke on all other online cpus.
3468c2ecf20Sopenharmony_ci */
3478c2ecf20Sopenharmony_cistatic void cpuidle_coupled_poke_others(int this_cpu,
3488c2ecf20Sopenharmony_ci		struct cpuidle_coupled *coupled)
3498c2ecf20Sopenharmony_ci{
3508c2ecf20Sopenharmony_ci	int cpu;
3518c2ecf20Sopenharmony_ci
3528c2ecf20Sopenharmony_ci	for_each_cpu(cpu, &coupled->coupled_cpus)
3538c2ecf20Sopenharmony_ci		if (cpu != this_cpu && cpu_online(cpu))
3548c2ecf20Sopenharmony_ci			cpuidle_coupled_poke(cpu);
3558c2ecf20Sopenharmony_ci}
3568c2ecf20Sopenharmony_ci
3578c2ecf20Sopenharmony_ci/**
3588c2ecf20Sopenharmony_ci * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
3598c2ecf20Sopenharmony_ci * @cpu: target cpu
3608c2ecf20Sopenharmony_ci * @coupled: the struct coupled that contains the current cpu
3618c2ecf20Sopenharmony_ci * @next_state: the index in drv->states of the requested state for this cpu
3628c2ecf20Sopenharmony_ci *
3638c2ecf20Sopenharmony_ci * Updates the requested idle state for the specified cpuidle device.
3648c2ecf20Sopenharmony_ci * Returns the number of waiting cpus.
3658c2ecf20Sopenharmony_ci */
3668c2ecf20Sopenharmony_cistatic int cpuidle_coupled_set_waiting(int cpu,
3678c2ecf20Sopenharmony_ci		struct cpuidle_coupled *coupled, int next_state)
3688c2ecf20Sopenharmony_ci{
3698c2ecf20Sopenharmony_ci	coupled->requested_state[cpu] = next_state;
3708c2ecf20Sopenharmony_ci
3718c2ecf20Sopenharmony_ci	/*
3728c2ecf20Sopenharmony_ci	 * The atomic_inc_return provides a write barrier to order the write
3738c2ecf20Sopenharmony_ci	 * to requested_state with the later write that increments ready_count.
3748c2ecf20Sopenharmony_ci	 */
3758c2ecf20Sopenharmony_ci	return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
3768c2ecf20Sopenharmony_ci}
3778c2ecf20Sopenharmony_ci
3788c2ecf20Sopenharmony_ci/**
3798c2ecf20Sopenharmony_ci * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
3808c2ecf20Sopenharmony_ci * @cpu: target cpu
3818c2ecf20Sopenharmony_ci * @coupled: the struct coupled that contains the current cpu
3828c2ecf20Sopenharmony_ci *
3838c2ecf20Sopenharmony_ci * Removes the requested idle state for the specified cpuidle device.
3848c2ecf20Sopenharmony_ci */
3858c2ecf20Sopenharmony_cistatic void cpuidle_coupled_set_not_waiting(int cpu,
3868c2ecf20Sopenharmony_ci		struct cpuidle_coupled *coupled)
3878c2ecf20Sopenharmony_ci{
3888c2ecf20Sopenharmony_ci	/*
3898c2ecf20Sopenharmony_ci	 * Decrementing waiting count can race with incrementing it in
3908c2ecf20Sopenharmony_ci	 * cpuidle_coupled_set_waiting, but that's OK.  Worst case, some
3918c2ecf20Sopenharmony_ci	 * cpus will increment ready_count and then spin until they
3928c2ecf20Sopenharmony_ci	 * notice that this cpu has cleared it's requested_state.
3938c2ecf20Sopenharmony_ci	 */
3948c2ecf20Sopenharmony_ci	atomic_dec(&coupled->ready_waiting_counts);
3958c2ecf20Sopenharmony_ci
3968c2ecf20Sopenharmony_ci	coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;
3978c2ecf20Sopenharmony_ci}
3988c2ecf20Sopenharmony_ci
3998c2ecf20Sopenharmony_ci/**
4008c2ecf20Sopenharmony_ci * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop
4018c2ecf20Sopenharmony_ci * @cpu: the current cpu
4028c2ecf20Sopenharmony_ci * @coupled: the struct coupled that contains the current cpu
4038c2ecf20Sopenharmony_ci *
4048c2ecf20Sopenharmony_ci * Marks this cpu as no longer in the ready and waiting loops.  Decrements
4058c2ecf20Sopenharmony_ci * the waiting count first to prevent another cpu looping back in and seeing
4068c2ecf20Sopenharmony_ci * this cpu as waiting just before it exits idle.
4078c2ecf20Sopenharmony_ci */
4088c2ecf20Sopenharmony_cistatic void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
4098c2ecf20Sopenharmony_ci{
4108c2ecf20Sopenharmony_ci	cpuidle_coupled_set_not_waiting(cpu, coupled);
4118c2ecf20Sopenharmony_ci	atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
4128c2ecf20Sopenharmony_ci}
4138c2ecf20Sopenharmony_ci
4148c2ecf20Sopenharmony_ci/**
4158c2ecf20Sopenharmony_ci * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
4168c2ecf20Sopenharmony_ci * @cpu: this cpu
4178c2ecf20Sopenharmony_ci *
4188c2ecf20Sopenharmony_ci * Turns on interrupts and spins until any outstanding poke interrupts have
4198c2ecf20Sopenharmony_ci * been processed and the poke bit has been cleared.
4208c2ecf20Sopenharmony_ci *
4218c2ecf20Sopenharmony_ci * Other interrupts may also be processed while interrupts are enabled, so
4228c2ecf20Sopenharmony_ci * need_resched() must be tested after this function returns to make sure
4238c2ecf20Sopenharmony_ci * the interrupt didn't schedule work that should take the cpu out of idle.
4248c2ecf20Sopenharmony_ci *
4258c2ecf20Sopenharmony_ci * Returns 0 if no poke was pending, 1 if a poke was cleared.
4268c2ecf20Sopenharmony_ci */
4278c2ecf20Sopenharmony_cistatic int cpuidle_coupled_clear_pokes(int cpu)
4288c2ecf20Sopenharmony_ci{
4298c2ecf20Sopenharmony_ci	if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
4308c2ecf20Sopenharmony_ci		return 0;
4318c2ecf20Sopenharmony_ci
4328c2ecf20Sopenharmony_ci	local_irq_enable();
4338c2ecf20Sopenharmony_ci	while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
4348c2ecf20Sopenharmony_ci		cpu_relax();
4358c2ecf20Sopenharmony_ci	local_irq_disable();
4368c2ecf20Sopenharmony_ci
4378c2ecf20Sopenharmony_ci	return 1;
4388c2ecf20Sopenharmony_ci}
4398c2ecf20Sopenharmony_ci
4408c2ecf20Sopenharmony_cistatic bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
4418c2ecf20Sopenharmony_ci{
4428c2ecf20Sopenharmony_ci	cpumask_t cpus;
4438c2ecf20Sopenharmony_ci	int ret;
4448c2ecf20Sopenharmony_ci
4458c2ecf20Sopenharmony_ci	cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
4468c2ecf20Sopenharmony_ci	ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus);
4478c2ecf20Sopenharmony_ci
4488c2ecf20Sopenharmony_ci	return ret;
4498c2ecf20Sopenharmony_ci}
4508c2ecf20Sopenharmony_ci
4518c2ecf20Sopenharmony_ci/**
4528c2ecf20Sopenharmony_ci * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus
4538c2ecf20Sopenharmony_ci * @dev: struct cpuidle_device for the current cpu
4548c2ecf20Sopenharmony_ci * @drv: struct cpuidle_driver for the platform
4558c2ecf20Sopenharmony_ci * @next_state: index of the requested state in drv->states
4568c2ecf20Sopenharmony_ci *
4578c2ecf20Sopenharmony_ci * Coordinate with coupled cpus to enter the target state.  This is a two
4588c2ecf20Sopenharmony_ci * stage process.  In the first stage, the cpus are operating independently,
4598c2ecf20Sopenharmony_ci * and may call into cpuidle_enter_state_coupled at completely different times.
4608c2ecf20Sopenharmony_ci * To save as much power as possible, the first cpus to call this function will
4618c2ecf20Sopenharmony_ci * go to an intermediate state (the cpuidle_device's safe state), and wait for
4628c2ecf20Sopenharmony_ci * all the other cpus to call this function.  Once all coupled cpus are idle,
4638c2ecf20Sopenharmony_ci * the second stage will start.  Each coupled cpu will spin until all cpus have
4648c2ecf20Sopenharmony_ci * guaranteed that they will call the target_state.
4658c2ecf20Sopenharmony_ci *
4668c2ecf20Sopenharmony_ci * This function must be called with interrupts disabled.  It may enable
4678c2ecf20Sopenharmony_ci * interrupts while preparing for idle, and it will always return with
4688c2ecf20Sopenharmony_ci * interrupts enabled.
4698c2ecf20Sopenharmony_ci */
4708c2ecf20Sopenharmony_ciint cpuidle_enter_state_coupled(struct cpuidle_device *dev,
4718c2ecf20Sopenharmony_ci		struct cpuidle_driver *drv, int next_state)
4728c2ecf20Sopenharmony_ci{
4738c2ecf20Sopenharmony_ci	int entered_state = -1;
4748c2ecf20Sopenharmony_ci	struct cpuidle_coupled *coupled = dev->coupled;
4758c2ecf20Sopenharmony_ci	int w;
4768c2ecf20Sopenharmony_ci
4778c2ecf20Sopenharmony_ci	if (!coupled)
4788c2ecf20Sopenharmony_ci		return -EINVAL;
4798c2ecf20Sopenharmony_ci
4808c2ecf20Sopenharmony_ci	while (coupled->prevent) {
4818c2ecf20Sopenharmony_ci		cpuidle_coupled_clear_pokes(dev->cpu);
4828c2ecf20Sopenharmony_ci		if (need_resched()) {
4838c2ecf20Sopenharmony_ci			local_irq_enable();
4848c2ecf20Sopenharmony_ci			return entered_state;
4858c2ecf20Sopenharmony_ci		}
4868c2ecf20Sopenharmony_ci		entered_state = cpuidle_enter_state(dev, drv,
4878c2ecf20Sopenharmony_ci			drv->safe_state_index);
4888c2ecf20Sopenharmony_ci		local_irq_disable();
4898c2ecf20Sopenharmony_ci	}
4908c2ecf20Sopenharmony_ci
4918c2ecf20Sopenharmony_ci	/* Read barrier ensures online_count is read after prevent is cleared */
4928c2ecf20Sopenharmony_ci	smp_rmb();
4938c2ecf20Sopenharmony_ci
4948c2ecf20Sopenharmony_cireset:
4958c2ecf20Sopenharmony_ci	cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);
4968c2ecf20Sopenharmony_ci
4978c2ecf20Sopenharmony_ci	w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
4988c2ecf20Sopenharmony_ci	/*
4998c2ecf20Sopenharmony_ci	 * If this is the last cpu to enter the waiting state, poke
5008c2ecf20Sopenharmony_ci	 * all the other cpus out of their waiting state so they can
5018c2ecf20Sopenharmony_ci	 * enter a deeper state.  This can race with one of the cpus
5028c2ecf20Sopenharmony_ci	 * exiting the waiting state due to an interrupt and
5038c2ecf20Sopenharmony_ci	 * decrementing waiting_count, see comment below.
5048c2ecf20Sopenharmony_ci	 */
5058c2ecf20Sopenharmony_ci	if (w == coupled->online_count) {
5068c2ecf20Sopenharmony_ci		cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);
5078c2ecf20Sopenharmony_ci		cpuidle_coupled_poke_others(dev->cpu, coupled);
5088c2ecf20Sopenharmony_ci	}
5098c2ecf20Sopenharmony_ci
5108c2ecf20Sopenharmony_ciretry:
5118c2ecf20Sopenharmony_ci	/*
5128c2ecf20Sopenharmony_ci	 * Wait for all coupled cpus to be idle, using the deepest state
5138c2ecf20Sopenharmony_ci	 * allowed for a single cpu.  If this was not the poking cpu, wait
5148c2ecf20Sopenharmony_ci	 * for at least one poke before leaving to avoid a race where
5158c2ecf20Sopenharmony_ci	 * two cpus could arrive at the waiting loop at the same time,
5168c2ecf20Sopenharmony_ci	 * but the first of the two to arrive could skip the loop without
5178c2ecf20Sopenharmony_ci	 * processing the pokes from the last to arrive.
5188c2ecf20Sopenharmony_ci	 */
5198c2ecf20Sopenharmony_ci	while (!cpuidle_coupled_cpus_waiting(coupled) ||
5208c2ecf20Sopenharmony_ci			!cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
5218c2ecf20Sopenharmony_ci		if (cpuidle_coupled_clear_pokes(dev->cpu))
5228c2ecf20Sopenharmony_ci			continue;
5238c2ecf20Sopenharmony_ci
5248c2ecf20Sopenharmony_ci		if (need_resched()) {
5258c2ecf20Sopenharmony_ci			cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
5268c2ecf20Sopenharmony_ci			goto out;
5278c2ecf20Sopenharmony_ci		}
5288c2ecf20Sopenharmony_ci
5298c2ecf20Sopenharmony_ci		if (coupled->prevent) {
5308c2ecf20Sopenharmony_ci			cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
5318c2ecf20Sopenharmony_ci			goto out;
5328c2ecf20Sopenharmony_ci		}
5338c2ecf20Sopenharmony_ci
5348c2ecf20Sopenharmony_ci		entered_state = cpuidle_enter_state(dev, drv,
5358c2ecf20Sopenharmony_ci			drv->safe_state_index);
5368c2ecf20Sopenharmony_ci		local_irq_disable();
5378c2ecf20Sopenharmony_ci	}
5388c2ecf20Sopenharmony_ci
5398c2ecf20Sopenharmony_ci	cpuidle_coupled_clear_pokes(dev->cpu);
5408c2ecf20Sopenharmony_ci	if (need_resched()) {
5418c2ecf20Sopenharmony_ci		cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
5428c2ecf20Sopenharmony_ci		goto out;
5438c2ecf20Sopenharmony_ci	}
5448c2ecf20Sopenharmony_ci
5458c2ecf20Sopenharmony_ci	/*
5468c2ecf20Sopenharmony_ci	 * Make sure final poke status for this cpu is visible before setting
5478c2ecf20Sopenharmony_ci	 * cpu as ready.
5488c2ecf20Sopenharmony_ci	 */
5498c2ecf20Sopenharmony_ci	smp_wmb();
5508c2ecf20Sopenharmony_ci
5518c2ecf20Sopenharmony_ci	/*
5528c2ecf20Sopenharmony_ci	 * All coupled cpus are probably idle.  There is a small chance that
5538c2ecf20Sopenharmony_ci	 * one of the other cpus just became active.  Increment the ready count,
5548c2ecf20Sopenharmony_ci	 * and spin until all coupled cpus have incremented the counter. Once a
5558c2ecf20Sopenharmony_ci	 * cpu has incremented the ready counter, it cannot abort idle and must
5568c2ecf20Sopenharmony_ci	 * spin until either all cpus have incremented the ready counter, or
5578c2ecf20Sopenharmony_ci	 * another cpu leaves idle and decrements the waiting counter.
5588c2ecf20Sopenharmony_ci	 */
5598c2ecf20Sopenharmony_ci
5608c2ecf20Sopenharmony_ci	cpuidle_coupled_set_ready(coupled);
5618c2ecf20Sopenharmony_ci	while (!cpuidle_coupled_cpus_ready(coupled)) {
5628c2ecf20Sopenharmony_ci		/* Check if any other cpus bailed out of idle. */
5638c2ecf20Sopenharmony_ci		if (!cpuidle_coupled_cpus_waiting(coupled))
5648c2ecf20Sopenharmony_ci			if (!cpuidle_coupled_set_not_ready(coupled))
5658c2ecf20Sopenharmony_ci				goto retry;
5668c2ecf20Sopenharmony_ci
5678c2ecf20Sopenharmony_ci		cpu_relax();
5688c2ecf20Sopenharmony_ci	}
5698c2ecf20Sopenharmony_ci
5708c2ecf20Sopenharmony_ci	/*
5718c2ecf20Sopenharmony_ci	 * Make sure read of all cpus ready is done before reading pending pokes
5728c2ecf20Sopenharmony_ci	 */
5738c2ecf20Sopenharmony_ci	smp_rmb();
5748c2ecf20Sopenharmony_ci
5758c2ecf20Sopenharmony_ci	/*
5768c2ecf20Sopenharmony_ci	 * There is a small chance that a cpu left and reentered idle after this
5778c2ecf20Sopenharmony_ci	 * cpu saw that all cpus were waiting.  The cpu that reentered idle will
5788c2ecf20Sopenharmony_ci	 * have sent this cpu a poke, which will still be pending after the
5798c2ecf20Sopenharmony_ci	 * ready loop.  The pending interrupt may be lost by the interrupt
5808c2ecf20Sopenharmony_ci	 * controller when entering the deep idle state.  It's not possible to
5818c2ecf20Sopenharmony_ci	 * clear a pending interrupt without turning interrupts on and handling
5828c2ecf20Sopenharmony_ci	 * it, and it's too late to turn on interrupts here, so reset the
5838c2ecf20Sopenharmony_ci	 * coupled idle state of all cpus and retry.
5848c2ecf20Sopenharmony_ci	 */
5858c2ecf20Sopenharmony_ci	if (cpuidle_coupled_any_pokes_pending(coupled)) {
5868c2ecf20Sopenharmony_ci		cpuidle_coupled_set_done(dev->cpu, coupled);
5878c2ecf20Sopenharmony_ci		/* Wait for all cpus to see the pending pokes */
5888c2ecf20Sopenharmony_ci		cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier);
5898c2ecf20Sopenharmony_ci		goto reset;
5908c2ecf20Sopenharmony_ci	}
5918c2ecf20Sopenharmony_ci
5928c2ecf20Sopenharmony_ci	/* all cpus have acked the coupled state */
5938c2ecf20Sopenharmony_ci	next_state = cpuidle_coupled_get_state(dev, coupled);
5948c2ecf20Sopenharmony_ci
5958c2ecf20Sopenharmony_ci	entered_state = cpuidle_enter_state(dev, drv, next_state);
5968c2ecf20Sopenharmony_ci
5978c2ecf20Sopenharmony_ci	cpuidle_coupled_set_done(dev->cpu, coupled);
5988c2ecf20Sopenharmony_ci
5998c2ecf20Sopenharmony_ciout:
6008c2ecf20Sopenharmony_ci	/*
6018c2ecf20Sopenharmony_ci	 * Normal cpuidle states are expected to return with irqs enabled.
6028c2ecf20Sopenharmony_ci	 * That leads to an inefficiency where a cpu receiving an interrupt
6038c2ecf20Sopenharmony_ci	 * that brings it out of idle will process that interrupt before
6048c2ecf20Sopenharmony_ci	 * exiting the idle enter function and decrementing ready_count.  All
6058c2ecf20Sopenharmony_ci	 * other cpus will need to spin waiting for the cpu that is processing
6068c2ecf20Sopenharmony_ci	 * the interrupt.  If the driver returns with interrupts disabled,
6078c2ecf20Sopenharmony_ci	 * all other cpus will loop back into the safe idle state instead of
6088c2ecf20Sopenharmony_ci	 * spinning, saving power.
6098c2ecf20Sopenharmony_ci	 *
6108c2ecf20Sopenharmony_ci	 * Calling local_irq_enable here allows coupled states to return with
6118c2ecf20Sopenharmony_ci	 * interrupts disabled, but won't cause problems for drivers that
6128c2ecf20Sopenharmony_ci	 * exit with interrupts enabled.
6138c2ecf20Sopenharmony_ci	 */
6148c2ecf20Sopenharmony_ci	local_irq_enable();
6158c2ecf20Sopenharmony_ci
6168c2ecf20Sopenharmony_ci	/*
6178c2ecf20Sopenharmony_ci	 * Wait until all coupled cpus have exited idle.  There is no risk that
6188c2ecf20Sopenharmony_ci	 * a cpu exits and re-enters the ready state because this cpu has
6198c2ecf20Sopenharmony_ci	 * already decremented its waiting_count.
6208c2ecf20Sopenharmony_ci	 */
6218c2ecf20Sopenharmony_ci	while (!cpuidle_coupled_no_cpus_ready(coupled))
6228c2ecf20Sopenharmony_ci		cpu_relax();
6238c2ecf20Sopenharmony_ci
6248c2ecf20Sopenharmony_ci	return entered_state;
6258c2ecf20Sopenharmony_ci}
6268c2ecf20Sopenharmony_ci
6278c2ecf20Sopenharmony_cistatic void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)
6288c2ecf20Sopenharmony_ci{
6298c2ecf20Sopenharmony_ci	cpumask_t cpus;
6308c2ecf20Sopenharmony_ci	cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
6318c2ecf20Sopenharmony_ci	coupled->online_count = cpumask_weight(&cpus);
6328c2ecf20Sopenharmony_ci}
6338c2ecf20Sopenharmony_ci
6348c2ecf20Sopenharmony_ci/**
6358c2ecf20Sopenharmony_ci * cpuidle_coupled_register_device - register a coupled cpuidle device
6368c2ecf20Sopenharmony_ci * @dev: struct cpuidle_device for the current cpu
6378c2ecf20Sopenharmony_ci *
6388c2ecf20Sopenharmony_ci * Called from cpuidle_register_device to handle coupled idle init.  Finds the
6398c2ecf20Sopenharmony_ci * cpuidle_coupled struct for this set of coupled cpus, or creates one if none
6408c2ecf20Sopenharmony_ci * exists yet.
6418c2ecf20Sopenharmony_ci */
6428c2ecf20Sopenharmony_ciint cpuidle_coupled_register_device(struct cpuidle_device *dev)
6438c2ecf20Sopenharmony_ci{
6448c2ecf20Sopenharmony_ci	int cpu;
6458c2ecf20Sopenharmony_ci	struct cpuidle_device *other_dev;
6468c2ecf20Sopenharmony_ci	call_single_data_t *csd;
6478c2ecf20Sopenharmony_ci	struct cpuidle_coupled *coupled;
6488c2ecf20Sopenharmony_ci
6498c2ecf20Sopenharmony_ci	if (cpumask_empty(&dev->coupled_cpus))
6508c2ecf20Sopenharmony_ci		return 0;
6518c2ecf20Sopenharmony_ci
6528c2ecf20Sopenharmony_ci	for_each_cpu(cpu, &dev->coupled_cpus) {
6538c2ecf20Sopenharmony_ci		other_dev = per_cpu(cpuidle_devices, cpu);
6548c2ecf20Sopenharmony_ci		if (other_dev && other_dev->coupled) {
6558c2ecf20Sopenharmony_ci			coupled = other_dev->coupled;
6568c2ecf20Sopenharmony_ci			goto have_coupled;
6578c2ecf20Sopenharmony_ci		}
6588c2ecf20Sopenharmony_ci	}
6598c2ecf20Sopenharmony_ci
6608c2ecf20Sopenharmony_ci	/* No existing coupled info found, create a new one */
6618c2ecf20Sopenharmony_ci	coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL);
6628c2ecf20Sopenharmony_ci	if (!coupled)
6638c2ecf20Sopenharmony_ci		return -ENOMEM;
6648c2ecf20Sopenharmony_ci
6658c2ecf20Sopenharmony_ci	coupled->coupled_cpus = dev->coupled_cpus;
6668c2ecf20Sopenharmony_ci
6678c2ecf20Sopenharmony_cihave_coupled:
6688c2ecf20Sopenharmony_ci	dev->coupled = coupled;
6698c2ecf20Sopenharmony_ci	if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus)))
6708c2ecf20Sopenharmony_ci		coupled->prevent++;
6718c2ecf20Sopenharmony_ci
6728c2ecf20Sopenharmony_ci	cpuidle_coupled_update_online_cpus(coupled);
6738c2ecf20Sopenharmony_ci
6748c2ecf20Sopenharmony_ci	coupled->refcnt++;
6758c2ecf20Sopenharmony_ci
6768c2ecf20Sopenharmony_ci	csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
6778c2ecf20Sopenharmony_ci	csd->func = cpuidle_coupled_handle_poke;
6788c2ecf20Sopenharmony_ci	csd->info = (void *)(unsigned long)dev->cpu;
6798c2ecf20Sopenharmony_ci
6808c2ecf20Sopenharmony_ci	return 0;
6818c2ecf20Sopenharmony_ci}
6828c2ecf20Sopenharmony_ci
6838c2ecf20Sopenharmony_ci/**
6848c2ecf20Sopenharmony_ci * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device
6858c2ecf20Sopenharmony_ci * @dev: struct cpuidle_device for the current cpu
6868c2ecf20Sopenharmony_ci *
6878c2ecf20Sopenharmony_ci * Called from cpuidle_unregister_device to tear down coupled idle.  Removes the
6888c2ecf20Sopenharmony_ci * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if
6898c2ecf20Sopenharmony_ci * this was the last cpu in the set.
6908c2ecf20Sopenharmony_ci */
6918c2ecf20Sopenharmony_civoid cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
6928c2ecf20Sopenharmony_ci{
6938c2ecf20Sopenharmony_ci	struct cpuidle_coupled *coupled = dev->coupled;
6948c2ecf20Sopenharmony_ci
6958c2ecf20Sopenharmony_ci	if (cpumask_empty(&dev->coupled_cpus))
6968c2ecf20Sopenharmony_ci		return;
6978c2ecf20Sopenharmony_ci
6988c2ecf20Sopenharmony_ci	if (--coupled->refcnt)
6998c2ecf20Sopenharmony_ci		kfree(coupled);
7008c2ecf20Sopenharmony_ci	dev->coupled = NULL;
7018c2ecf20Sopenharmony_ci}
7028c2ecf20Sopenharmony_ci
7038c2ecf20Sopenharmony_ci/**
7048c2ecf20Sopenharmony_ci * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state
7058c2ecf20Sopenharmony_ci * @coupled: the struct coupled that contains the cpu that is changing state
7068c2ecf20Sopenharmony_ci *
7078c2ecf20Sopenharmony_ci * Disables coupled cpuidle on a coupled set of cpus.  Used to ensure that
7088c2ecf20Sopenharmony_ci * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
7098c2ecf20Sopenharmony_ci */
7108c2ecf20Sopenharmony_cistatic void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled)
7118c2ecf20Sopenharmony_ci{
7128c2ecf20Sopenharmony_ci	int cpu = get_cpu();
7138c2ecf20Sopenharmony_ci
7148c2ecf20Sopenharmony_ci	/* Force all cpus out of the waiting loop. */
7158c2ecf20Sopenharmony_ci	coupled->prevent++;
7168c2ecf20Sopenharmony_ci	cpuidle_coupled_poke_others(cpu, coupled);
7178c2ecf20Sopenharmony_ci	put_cpu();
7188c2ecf20Sopenharmony_ci	while (!cpuidle_coupled_no_cpus_waiting(coupled))
7198c2ecf20Sopenharmony_ci		cpu_relax();
7208c2ecf20Sopenharmony_ci}
7218c2ecf20Sopenharmony_ci
7228c2ecf20Sopenharmony_ci/**
7238c2ecf20Sopenharmony_ci * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state
7248c2ecf20Sopenharmony_ci * @coupled: the struct coupled that contains the cpu that is changing state
7258c2ecf20Sopenharmony_ci *
7268c2ecf20Sopenharmony_ci * Enables coupled cpuidle on a coupled set of cpus.  Used to ensure that
7278c2ecf20Sopenharmony_ci * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
7288c2ecf20Sopenharmony_ci */
7298c2ecf20Sopenharmony_cistatic void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
7308c2ecf20Sopenharmony_ci{
7318c2ecf20Sopenharmony_ci	int cpu = get_cpu();
7328c2ecf20Sopenharmony_ci
7338c2ecf20Sopenharmony_ci	/*
7348c2ecf20Sopenharmony_ci	 * Write barrier ensures readers see the new online_count when they
7358c2ecf20Sopenharmony_ci	 * see prevent == 0.
7368c2ecf20Sopenharmony_ci	 */
7378c2ecf20Sopenharmony_ci	smp_wmb();
7388c2ecf20Sopenharmony_ci	coupled->prevent--;
7398c2ecf20Sopenharmony_ci	/* Force cpus out of the prevent loop. */
7408c2ecf20Sopenharmony_ci	cpuidle_coupled_poke_others(cpu, coupled);
7418c2ecf20Sopenharmony_ci	put_cpu();
7428c2ecf20Sopenharmony_ci}
7438c2ecf20Sopenharmony_ci
7448c2ecf20Sopenharmony_cistatic int coupled_cpu_online(unsigned int cpu)
7458c2ecf20Sopenharmony_ci{
7468c2ecf20Sopenharmony_ci	struct cpuidle_device *dev;
7478c2ecf20Sopenharmony_ci
7488c2ecf20Sopenharmony_ci	mutex_lock(&cpuidle_lock);
7498c2ecf20Sopenharmony_ci
7508c2ecf20Sopenharmony_ci	dev = per_cpu(cpuidle_devices, cpu);
7518c2ecf20Sopenharmony_ci	if (dev && dev->coupled) {
7528c2ecf20Sopenharmony_ci		cpuidle_coupled_update_online_cpus(dev->coupled);
7538c2ecf20Sopenharmony_ci		cpuidle_coupled_allow_idle(dev->coupled);
7548c2ecf20Sopenharmony_ci	}
7558c2ecf20Sopenharmony_ci
7568c2ecf20Sopenharmony_ci	mutex_unlock(&cpuidle_lock);
7578c2ecf20Sopenharmony_ci	return 0;
7588c2ecf20Sopenharmony_ci}
7598c2ecf20Sopenharmony_ci
7608c2ecf20Sopenharmony_cistatic int coupled_cpu_up_prepare(unsigned int cpu)
7618c2ecf20Sopenharmony_ci{
7628c2ecf20Sopenharmony_ci	struct cpuidle_device *dev;
7638c2ecf20Sopenharmony_ci
7648c2ecf20Sopenharmony_ci	mutex_lock(&cpuidle_lock);
7658c2ecf20Sopenharmony_ci
7668c2ecf20Sopenharmony_ci	dev = per_cpu(cpuidle_devices, cpu);
7678c2ecf20Sopenharmony_ci	if (dev && dev->coupled)
7688c2ecf20Sopenharmony_ci		cpuidle_coupled_prevent_idle(dev->coupled);
7698c2ecf20Sopenharmony_ci
7708c2ecf20Sopenharmony_ci	mutex_unlock(&cpuidle_lock);
7718c2ecf20Sopenharmony_ci	return 0;
7728c2ecf20Sopenharmony_ci}
7738c2ecf20Sopenharmony_ci
7748c2ecf20Sopenharmony_cistatic int __init cpuidle_coupled_init(void)
7758c2ecf20Sopenharmony_ci{
7768c2ecf20Sopenharmony_ci	int ret;
7778c2ecf20Sopenharmony_ci
7788c2ecf20Sopenharmony_ci	ret = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE,
7798c2ecf20Sopenharmony_ci					"cpuidle/coupled:prepare",
7808c2ecf20Sopenharmony_ci					coupled_cpu_up_prepare,
7818c2ecf20Sopenharmony_ci					coupled_cpu_online);
7828c2ecf20Sopenharmony_ci	if (ret)
7838c2ecf20Sopenharmony_ci		return ret;
7848c2ecf20Sopenharmony_ci	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
7858c2ecf20Sopenharmony_ci					"cpuidle/coupled:online",
7868c2ecf20Sopenharmony_ci					coupled_cpu_online,
7878c2ecf20Sopenharmony_ci					coupled_cpu_up_prepare);
7888c2ecf20Sopenharmony_ci	if (ret < 0)
7898c2ecf20Sopenharmony_ci		cpuhp_remove_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE);
7908c2ecf20Sopenharmony_ci	return ret;
7918c2ecf20Sopenharmony_ci}
7928c2ecf20Sopenharmony_cicore_initcall(cpuidle_coupled_init);
793