18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
28c2ecf20Sopenharmony_ci#define pr_fmt(fmt) "%s: " fmt, __func__
38c2ecf20Sopenharmony_ci
48c2ecf20Sopenharmony_ci#include <linux/kernel.h>
58c2ecf20Sopenharmony_ci#include <linux/sched.h>
68c2ecf20Sopenharmony_ci#include <linux/wait.h>
78c2ecf20Sopenharmony_ci#include <linux/slab.h>
88c2ecf20Sopenharmony_ci#include <linux/percpu-refcount.h>
98c2ecf20Sopenharmony_ci
108c2ecf20Sopenharmony_ci/*
118c2ecf20Sopenharmony_ci * Initially, a percpu refcount is just a set of percpu counters. Initially, we
128c2ecf20Sopenharmony_ci * don't try to detect the ref hitting 0 - which means that get/put can just
138c2ecf20Sopenharmony_ci * increment or decrement the local counter. Note that the counter on a
148c2ecf20Sopenharmony_ci * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
158c2ecf20Sopenharmony_ci * percpu counters will all sum to the correct value
168c2ecf20Sopenharmony_ci *
178c2ecf20Sopenharmony_ci * (More precisely: because modular arithmetic is commutative the sum of all the
188c2ecf20Sopenharmony_ci * percpu_count vars will be equal to what it would have been if all the gets
198c2ecf20Sopenharmony_ci * and puts were done to a single integer, even if some of the percpu integers
208c2ecf20Sopenharmony_ci * overflow or underflow).
218c2ecf20Sopenharmony_ci *
228c2ecf20Sopenharmony_ci * The real trick to implementing percpu refcounts is shutdown. We can't detect
238c2ecf20Sopenharmony_ci * the ref hitting 0 on every put - this would require global synchronization
248c2ecf20Sopenharmony_ci * and defeat the whole purpose of using percpu refs.
258c2ecf20Sopenharmony_ci *
268c2ecf20Sopenharmony_ci * What we do is require the user to keep track of the initial refcount; we know
278c2ecf20Sopenharmony_ci * the ref can't hit 0 before the user drops the initial ref, so as long as we
288c2ecf20Sopenharmony_ci * convert to non percpu mode before the initial ref is dropped everything
298c2ecf20Sopenharmony_ci * works.
308c2ecf20Sopenharmony_ci *
318c2ecf20Sopenharmony_ci * Converting to non percpu mode is done with some RCUish stuff in
328c2ecf20Sopenharmony_ci * percpu_ref_kill. Additionally, we need a bias value so that the
338c2ecf20Sopenharmony_ci * atomic_long_t can't hit 0 before we've added up all the percpu refs.
348c2ecf20Sopenharmony_ci */
358c2ecf20Sopenharmony_ci
368c2ecf20Sopenharmony_ci#define PERCPU_COUNT_BIAS	(1LU << (BITS_PER_LONG - 1))
378c2ecf20Sopenharmony_ci
388c2ecf20Sopenharmony_cistatic DEFINE_SPINLOCK(percpu_ref_switch_lock);
398c2ecf20Sopenharmony_cistatic DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
408c2ecf20Sopenharmony_ci
418c2ecf20Sopenharmony_cistatic unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
428c2ecf20Sopenharmony_ci{
438c2ecf20Sopenharmony_ci	return (unsigned long __percpu *)
448c2ecf20Sopenharmony_ci		(ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
458c2ecf20Sopenharmony_ci}
468c2ecf20Sopenharmony_ci
478c2ecf20Sopenharmony_ci/**
488c2ecf20Sopenharmony_ci * percpu_ref_init - initialize a percpu refcount
498c2ecf20Sopenharmony_ci * @ref: percpu_ref to initialize
508c2ecf20Sopenharmony_ci * @release: function which will be called when refcount hits 0
518c2ecf20Sopenharmony_ci * @flags: PERCPU_REF_INIT_* flags
528c2ecf20Sopenharmony_ci * @gfp: allocation mask to use
538c2ecf20Sopenharmony_ci *
548c2ecf20Sopenharmony_ci * Initializes @ref.  @ref starts out in percpu mode with a refcount of 1 unless
558c2ecf20Sopenharmony_ci * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD.  These flags
568c2ecf20Sopenharmony_ci * change the start state to atomic with the latter setting the initial refcount
578c2ecf20Sopenharmony_ci * to 0.  See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
588c2ecf20Sopenharmony_ci *
598c2ecf20Sopenharmony_ci * Note that @release must not sleep - it may potentially be called from RCU
608c2ecf20Sopenharmony_ci * callback context by percpu_ref_kill().
618c2ecf20Sopenharmony_ci */
628c2ecf20Sopenharmony_ciint percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
638c2ecf20Sopenharmony_ci		    unsigned int flags, gfp_t gfp)
648c2ecf20Sopenharmony_ci{
658c2ecf20Sopenharmony_ci	size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
668c2ecf20Sopenharmony_ci			     __alignof__(unsigned long));
678c2ecf20Sopenharmony_ci	unsigned long start_count = 0;
688c2ecf20Sopenharmony_ci	struct percpu_ref_data *data;
698c2ecf20Sopenharmony_ci
708c2ecf20Sopenharmony_ci	ref->percpu_count_ptr = (unsigned long)
718c2ecf20Sopenharmony_ci		__alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
728c2ecf20Sopenharmony_ci	if (!ref->percpu_count_ptr)
738c2ecf20Sopenharmony_ci		return -ENOMEM;
748c2ecf20Sopenharmony_ci
758c2ecf20Sopenharmony_ci	data = kzalloc(sizeof(*ref->data), gfp);
768c2ecf20Sopenharmony_ci	if (!data) {
778c2ecf20Sopenharmony_ci		free_percpu((void __percpu *)ref->percpu_count_ptr);
788c2ecf20Sopenharmony_ci		ref->percpu_count_ptr = 0;
798c2ecf20Sopenharmony_ci		return -ENOMEM;
808c2ecf20Sopenharmony_ci	}
818c2ecf20Sopenharmony_ci
828c2ecf20Sopenharmony_ci	data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
838c2ecf20Sopenharmony_ci	data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
848c2ecf20Sopenharmony_ci
858c2ecf20Sopenharmony_ci	if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
868c2ecf20Sopenharmony_ci		ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
878c2ecf20Sopenharmony_ci		data->allow_reinit = true;
888c2ecf20Sopenharmony_ci	} else {
898c2ecf20Sopenharmony_ci		start_count += PERCPU_COUNT_BIAS;
908c2ecf20Sopenharmony_ci	}
918c2ecf20Sopenharmony_ci
928c2ecf20Sopenharmony_ci	if (flags & PERCPU_REF_INIT_DEAD)
938c2ecf20Sopenharmony_ci		ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
948c2ecf20Sopenharmony_ci	else
958c2ecf20Sopenharmony_ci		start_count++;
968c2ecf20Sopenharmony_ci
978c2ecf20Sopenharmony_ci	atomic_long_set(&data->count, start_count);
988c2ecf20Sopenharmony_ci
998c2ecf20Sopenharmony_ci	data->release = release;
1008c2ecf20Sopenharmony_ci	data->confirm_switch = NULL;
1018c2ecf20Sopenharmony_ci	data->ref = ref;
1028c2ecf20Sopenharmony_ci	ref->data = data;
1038c2ecf20Sopenharmony_ci	return 0;
1048c2ecf20Sopenharmony_ci}
1058c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(percpu_ref_init);
1068c2ecf20Sopenharmony_ci
1078c2ecf20Sopenharmony_cistatic void __percpu_ref_exit(struct percpu_ref *ref)
1088c2ecf20Sopenharmony_ci{
1098c2ecf20Sopenharmony_ci	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
1108c2ecf20Sopenharmony_ci
1118c2ecf20Sopenharmony_ci	if (percpu_count) {
1128c2ecf20Sopenharmony_ci		/* non-NULL confirm_switch indicates switching in progress */
1138c2ecf20Sopenharmony_ci		WARN_ON_ONCE(ref->data && ref->data->confirm_switch);
1148c2ecf20Sopenharmony_ci		free_percpu(percpu_count);
1158c2ecf20Sopenharmony_ci		ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
1168c2ecf20Sopenharmony_ci	}
1178c2ecf20Sopenharmony_ci}
1188c2ecf20Sopenharmony_ci
1198c2ecf20Sopenharmony_ci/**
1208c2ecf20Sopenharmony_ci * percpu_ref_exit - undo percpu_ref_init()
1218c2ecf20Sopenharmony_ci * @ref: percpu_ref to exit
1228c2ecf20Sopenharmony_ci *
1238c2ecf20Sopenharmony_ci * This function exits @ref.  The caller is responsible for ensuring that
1248c2ecf20Sopenharmony_ci * @ref is no longer in active use.  The usual places to invoke this
1258c2ecf20Sopenharmony_ci * function from are the @ref->release() callback or in init failure path
1268c2ecf20Sopenharmony_ci * where percpu_ref_init() succeeded but other parts of the initialization
1278c2ecf20Sopenharmony_ci * of the embedding object failed.
1288c2ecf20Sopenharmony_ci */
1298c2ecf20Sopenharmony_civoid percpu_ref_exit(struct percpu_ref *ref)
1308c2ecf20Sopenharmony_ci{
1318c2ecf20Sopenharmony_ci	struct percpu_ref_data *data = ref->data;
1328c2ecf20Sopenharmony_ci	unsigned long flags;
1338c2ecf20Sopenharmony_ci
1348c2ecf20Sopenharmony_ci	__percpu_ref_exit(ref);
1358c2ecf20Sopenharmony_ci
1368c2ecf20Sopenharmony_ci	if (!data)
1378c2ecf20Sopenharmony_ci		return;
1388c2ecf20Sopenharmony_ci
1398c2ecf20Sopenharmony_ci	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
1408c2ecf20Sopenharmony_ci	ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) <<
1418c2ecf20Sopenharmony_ci		__PERCPU_REF_FLAG_BITS;
1428c2ecf20Sopenharmony_ci	ref->data = NULL;
1438c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
1448c2ecf20Sopenharmony_ci
1458c2ecf20Sopenharmony_ci	kfree(data);
1468c2ecf20Sopenharmony_ci}
1478c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(percpu_ref_exit);
1488c2ecf20Sopenharmony_ci
1498c2ecf20Sopenharmony_cistatic void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
1508c2ecf20Sopenharmony_ci{
1518c2ecf20Sopenharmony_ci	struct percpu_ref_data *data = container_of(rcu,
1528c2ecf20Sopenharmony_ci			struct percpu_ref_data, rcu);
1538c2ecf20Sopenharmony_ci	struct percpu_ref *ref = data->ref;
1548c2ecf20Sopenharmony_ci
1558c2ecf20Sopenharmony_ci	data->confirm_switch(ref);
1568c2ecf20Sopenharmony_ci	data->confirm_switch = NULL;
1578c2ecf20Sopenharmony_ci	wake_up_all(&percpu_ref_switch_waitq);
1588c2ecf20Sopenharmony_ci
1598c2ecf20Sopenharmony_ci	if (!data->allow_reinit)
1608c2ecf20Sopenharmony_ci		__percpu_ref_exit(ref);
1618c2ecf20Sopenharmony_ci
1628c2ecf20Sopenharmony_ci	/* drop ref from percpu_ref_switch_to_atomic() */
1638c2ecf20Sopenharmony_ci	percpu_ref_put(ref);
1648c2ecf20Sopenharmony_ci}
1658c2ecf20Sopenharmony_ci
1668c2ecf20Sopenharmony_cistatic void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
1678c2ecf20Sopenharmony_ci{
1688c2ecf20Sopenharmony_ci	struct percpu_ref_data *data = container_of(rcu,
1698c2ecf20Sopenharmony_ci			struct percpu_ref_data, rcu);
1708c2ecf20Sopenharmony_ci	struct percpu_ref *ref = data->ref;
1718c2ecf20Sopenharmony_ci	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
1728c2ecf20Sopenharmony_ci	unsigned long count = 0;
1738c2ecf20Sopenharmony_ci	int cpu;
1748c2ecf20Sopenharmony_ci
1758c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu)
1768c2ecf20Sopenharmony_ci		count += *per_cpu_ptr(percpu_count, cpu);
1778c2ecf20Sopenharmony_ci
1788c2ecf20Sopenharmony_ci	pr_debug("global %lu percpu %lu\n",
1798c2ecf20Sopenharmony_ci		 atomic_long_read(&data->count), count);
1808c2ecf20Sopenharmony_ci
1818c2ecf20Sopenharmony_ci	/*
1828c2ecf20Sopenharmony_ci	 * It's crucial that we sum the percpu counters _before_ adding the sum
1838c2ecf20Sopenharmony_ci	 * to &ref->count; since gets could be happening on one cpu while puts
1848c2ecf20Sopenharmony_ci	 * happen on another, adding a single cpu's count could cause
1858c2ecf20Sopenharmony_ci	 * @ref->count to hit 0 before we've got a consistent value - but the
1868c2ecf20Sopenharmony_ci	 * sum of all the counts will be consistent and correct.
1878c2ecf20Sopenharmony_ci	 *
1888c2ecf20Sopenharmony_ci	 * Subtracting the bias value then has to happen _after_ adding count to
1898c2ecf20Sopenharmony_ci	 * &ref->count; we need the bias value to prevent &ref->count from
1908c2ecf20Sopenharmony_ci	 * reaching 0 before we add the percpu counts. But doing it at the same
1918c2ecf20Sopenharmony_ci	 * time is equivalent and saves us atomic operations:
1928c2ecf20Sopenharmony_ci	 */
1938c2ecf20Sopenharmony_ci	atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
1948c2ecf20Sopenharmony_ci
1958c2ecf20Sopenharmony_ci	WARN_ONCE(atomic_long_read(&data->count) <= 0,
1968c2ecf20Sopenharmony_ci		  "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
1978c2ecf20Sopenharmony_ci		  data->release, atomic_long_read(&data->count));
1988c2ecf20Sopenharmony_ci
1998c2ecf20Sopenharmony_ci	/* @ref is viewed as dead on all CPUs, send out switch confirmation */
2008c2ecf20Sopenharmony_ci	percpu_ref_call_confirm_rcu(rcu);
2018c2ecf20Sopenharmony_ci}
2028c2ecf20Sopenharmony_ci
2038c2ecf20Sopenharmony_cistatic void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
2048c2ecf20Sopenharmony_ci{
2058c2ecf20Sopenharmony_ci}
2068c2ecf20Sopenharmony_ci
2078c2ecf20Sopenharmony_cistatic void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
2088c2ecf20Sopenharmony_ci					  percpu_ref_func_t *confirm_switch)
2098c2ecf20Sopenharmony_ci{
2108c2ecf20Sopenharmony_ci	if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
2118c2ecf20Sopenharmony_ci		if (confirm_switch)
2128c2ecf20Sopenharmony_ci			confirm_switch(ref);
2138c2ecf20Sopenharmony_ci		return;
2148c2ecf20Sopenharmony_ci	}
2158c2ecf20Sopenharmony_ci
2168c2ecf20Sopenharmony_ci	/* switching from percpu to atomic */
2178c2ecf20Sopenharmony_ci	ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
2188c2ecf20Sopenharmony_ci
2198c2ecf20Sopenharmony_ci	/*
2208c2ecf20Sopenharmony_ci	 * Non-NULL ->confirm_switch is used to indicate that switching is
2218c2ecf20Sopenharmony_ci	 * in progress.  Use noop one if unspecified.
2228c2ecf20Sopenharmony_ci	 */
2238c2ecf20Sopenharmony_ci	ref->data->confirm_switch = confirm_switch ?:
2248c2ecf20Sopenharmony_ci		percpu_ref_noop_confirm_switch;
2258c2ecf20Sopenharmony_ci
2268c2ecf20Sopenharmony_ci	percpu_ref_get(ref);	/* put after confirmation */
2278c2ecf20Sopenharmony_ci	call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu);
2288c2ecf20Sopenharmony_ci}
2298c2ecf20Sopenharmony_ci
2308c2ecf20Sopenharmony_cistatic void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
2318c2ecf20Sopenharmony_ci{
2328c2ecf20Sopenharmony_ci	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
2338c2ecf20Sopenharmony_ci	int cpu;
2348c2ecf20Sopenharmony_ci
2358c2ecf20Sopenharmony_ci	BUG_ON(!percpu_count);
2368c2ecf20Sopenharmony_ci
2378c2ecf20Sopenharmony_ci	if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
2388c2ecf20Sopenharmony_ci		return;
2398c2ecf20Sopenharmony_ci
2408c2ecf20Sopenharmony_ci	if (WARN_ON_ONCE(!ref->data->allow_reinit))
2418c2ecf20Sopenharmony_ci		return;
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_ci	atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count);
2448c2ecf20Sopenharmony_ci
2458c2ecf20Sopenharmony_ci	/*
2468c2ecf20Sopenharmony_ci	 * Restore per-cpu operation.  smp_store_release() is paired
2478c2ecf20Sopenharmony_ci	 * with READ_ONCE() in __ref_is_percpu() and guarantees that the
2488c2ecf20Sopenharmony_ci	 * zeroing is visible to all percpu accesses which can see the
2498c2ecf20Sopenharmony_ci	 * following __PERCPU_REF_ATOMIC clearing.
2508c2ecf20Sopenharmony_ci	 */
2518c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu)
2528c2ecf20Sopenharmony_ci		*per_cpu_ptr(percpu_count, cpu) = 0;
2538c2ecf20Sopenharmony_ci
2548c2ecf20Sopenharmony_ci	smp_store_release(&ref->percpu_count_ptr,
2558c2ecf20Sopenharmony_ci			  ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
2568c2ecf20Sopenharmony_ci}
2578c2ecf20Sopenharmony_ci
2588c2ecf20Sopenharmony_cistatic void __percpu_ref_switch_mode(struct percpu_ref *ref,
2598c2ecf20Sopenharmony_ci				     percpu_ref_func_t *confirm_switch)
2608c2ecf20Sopenharmony_ci{
2618c2ecf20Sopenharmony_ci	struct percpu_ref_data *data = ref->data;
2628c2ecf20Sopenharmony_ci
2638c2ecf20Sopenharmony_ci	lockdep_assert_held(&percpu_ref_switch_lock);
2648c2ecf20Sopenharmony_ci
2658c2ecf20Sopenharmony_ci	/*
2668c2ecf20Sopenharmony_ci	 * If the previous ATOMIC switching hasn't finished yet, wait for
2678c2ecf20Sopenharmony_ci	 * its completion.  If the caller ensures that ATOMIC switching
2688c2ecf20Sopenharmony_ci	 * isn't in progress, this function can be called from any context.
2698c2ecf20Sopenharmony_ci	 */
2708c2ecf20Sopenharmony_ci	wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
2718c2ecf20Sopenharmony_ci			    percpu_ref_switch_lock);
2728c2ecf20Sopenharmony_ci
2738c2ecf20Sopenharmony_ci	if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
2748c2ecf20Sopenharmony_ci		__percpu_ref_switch_to_atomic(ref, confirm_switch);
2758c2ecf20Sopenharmony_ci	else
2768c2ecf20Sopenharmony_ci		__percpu_ref_switch_to_percpu(ref);
2778c2ecf20Sopenharmony_ci}
2788c2ecf20Sopenharmony_ci
2798c2ecf20Sopenharmony_ci/**
2808c2ecf20Sopenharmony_ci * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
2818c2ecf20Sopenharmony_ci * @ref: percpu_ref to switch to atomic mode
2828c2ecf20Sopenharmony_ci * @confirm_switch: optional confirmation callback
2838c2ecf20Sopenharmony_ci *
2848c2ecf20Sopenharmony_ci * There's no reason to use this function for the usual reference counting.
2858c2ecf20Sopenharmony_ci * Use percpu_ref_kill[_and_confirm]().
2868c2ecf20Sopenharmony_ci *
2878c2ecf20Sopenharmony_ci * Schedule switching of @ref to atomic mode.  All its percpu counts will
2888c2ecf20Sopenharmony_ci * be collected to the main atomic counter.  On completion, when all CPUs
2898c2ecf20Sopenharmony_ci * are guaraneed to be in atomic mode, @confirm_switch, which may not
2908c2ecf20Sopenharmony_ci * block, is invoked.  This function may be invoked concurrently with all
2918c2ecf20Sopenharmony_ci * the get/put operations and can safely be mixed with kill and reinit
2928c2ecf20Sopenharmony_ci * operations.  Note that @ref will stay in atomic mode across kill/reinit
2938c2ecf20Sopenharmony_ci * cycles until percpu_ref_switch_to_percpu() is called.
2948c2ecf20Sopenharmony_ci *
2958c2ecf20Sopenharmony_ci * This function may block if @ref is in the process of switching to atomic
2968c2ecf20Sopenharmony_ci * mode.  If the caller ensures that @ref is not in the process of
2978c2ecf20Sopenharmony_ci * switching to atomic mode, this function can be called from any context.
2988c2ecf20Sopenharmony_ci */
2998c2ecf20Sopenharmony_civoid percpu_ref_switch_to_atomic(struct percpu_ref *ref,
3008c2ecf20Sopenharmony_ci				 percpu_ref_func_t *confirm_switch)
3018c2ecf20Sopenharmony_ci{
3028c2ecf20Sopenharmony_ci	unsigned long flags;
3038c2ecf20Sopenharmony_ci
3048c2ecf20Sopenharmony_ci	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
3058c2ecf20Sopenharmony_ci
3068c2ecf20Sopenharmony_ci	ref->data->force_atomic = true;
3078c2ecf20Sopenharmony_ci	__percpu_ref_switch_mode(ref, confirm_switch);
3088c2ecf20Sopenharmony_ci
3098c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
3108c2ecf20Sopenharmony_ci}
3118c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
3128c2ecf20Sopenharmony_ci
3138c2ecf20Sopenharmony_ci/**
3148c2ecf20Sopenharmony_ci * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
3158c2ecf20Sopenharmony_ci * @ref: percpu_ref to switch to atomic mode
3168c2ecf20Sopenharmony_ci *
3178c2ecf20Sopenharmony_ci * Schedule switching the ref to atomic mode, and wait for the
3188c2ecf20Sopenharmony_ci * switch to complete.  Caller must ensure that no other thread
3198c2ecf20Sopenharmony_ci * will switch back to percpu mode.
3208c2ecf20Sopenharmony_ci */
3218c2ecf20Sopenharmony_civoid percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
3228c2ecf20Sopenharmony_ci{
3238c2ecf20Sopenharmony_ci	percpu_ref_switch_to_atomic(ref, NULL);
3248c2ecf20Sopenharmony_ci	wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch);
3258c2ecf20Sopenharmony_ci}
3268c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
3278c2ecf20Sopenharmony_ci
3288c2ecf20Sopenharmony_ci/**
3298c2ecf20Sopenharmony_ci * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
3308c2ecf20Sopenharmony_ci * @ref: percpu_ref to switch to percpu mode
3318c2ecf20Sopenharmony_ci *
3328c2ecf20Sopenharmony_ci * There's no reason to use this function for the usual reference counting.
3338c2ecf20Sopenharmony_ci * To re-use an expired ref, use percpu_ref_reinit().
3348c2ecf20Sopenharmony_ci *
3358c2ecf20Sopenharmony_ci * Switch @ref to percpu mode.  This function may be invoked concurrently
3368c2ecf20Sopenharmony_ci * with all the get/put operations and can safely be mixed with kill and
3378c2ecf20Sopenharmony_ci * reinit operations.  This function reverses the sticky atomic state set
3388c2ecf20Sopenharmony_ci * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic().  If @ref is
3398c2ecf20Sopenharmony_ci * dying or dead, the actual switching takes place on the following
3408c2ecf20Sopenharmony_ci * percpu_ref_reinit().
3418c2ecf20Sopenharmony_ci *
3428c2ecf20Sopenharmony_ci * This function may block if @ref is in the process of switching to atomic
3438c2ecf20Sopenharmony_ci * mode.  If the caller ensures that @ref is not in the process of
3448c2ecf20Sopenharmony_ci * switching to atomic mode, this function can be called from any context.
3458c2ecf20Sopenharmony_ci */
3468c2ecf20Sopenharmony_civoid percpu_ref_switch_to_percpu(struct percpu_ref *ref)
3478c2ecf20Sopenharmony_ci{
3488c2ecf20Sopenharmony_ci	unsigned long flags;
3498c2ecf20Sopenharmony_ci
3508c2ecf20Sopenharmony_ci	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
3518c2ecf20Sopenharmony_ci
3528c2ecf20Sopenharmony_ci	ref->data->force_atomic = false;
3538c2ecf20Sopenharmony_ci	__percpu_ref_switch_mode(ref, NULL);
3548c2ecf20Sopenharmony_ci
3558c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
3568c2ecf20Sopenharmony_ci}
3578c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
3588c2ecf20Sopenharmony_ci
3598c2ecf20Sopenharmony_ci/**
3608c2ecf20Sopenharmony_ci * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
3618c2ecf20Sopenharmony_ci * @ref: percpu_ref to kill
3628c2ecf20Sopenharmony_ci * @confirm_kill: optional confirmation callback
3638c2ecf20Sopenharmony_ci *
3648c2ecf20Sopenharmony_ci * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
3658c2ecf20Sopenharmony_ci * @confirm_kill is not NULL.  @confirm_kill, which may not block, will be
3668c2ecf20Sopenharmony_ci * called after @ref is seen as dead from all CPUs at which point all
3678c2ecf20Sopenharmony_ci * further invocations of percpu_ref_tryget_live() will fail.  See
3688c2ecf20Sopenharmony_ci * percpu_ref_tryget_live() for details.
3698c2ecf20Sopenharmony_ci *
3708c2ecf20Sopenharmony_ci * This function normally doesn't block and can be called from any context
3718c2ecf20Sopenharmony_ci * but it may block if @confirm_kill is specified and @ref is in the
3728c2ecf20Sopenharmony_ci * process of switching to atomic mode by percpu_ref_switch_to_atomic().
3738c2ecf20Sopenharmony_ci *
3748c2ecf20Sopenharmony_ci * There are no implied RCU grace periods between kill and release.
3758c2ecf20Sopenharmony_ci */
3768c2ecf20Sopenharmony_civoid percpu_ref_kill_and_confirm(struct percpu_ref *ref,
3778c2ecf20Sopenharmony_ci				 percpu_ref_func_t *confirm_kill)
3788c2ecf20Sopenharmony_ci{
3798c2ecf20Sopenharmony_ci	unsigned long flags;
3808c2ecf20Sopenharmony_ci
3818c2ecf20Sopenharmony_ci	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
3828c2ecf20Sopenharmony_ci
3838c2ecf20Sopenharmony_ci	WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
3848c2ecf20Sopenharmony_ci		  "%s called more than once on %ps!", __func__,
3858c2ecf20Sopenharmony_ci		  ref->data->release);
3868c2ecf20Sopenharmony_ci
3878c2ecf20Sopenharmony_ci	ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
3888c2ecf20Sopenharmony_ci	__percpu_ref_switch_mode(ref, confirm_kill);
3898c2ecf20Sopenharmony_ci	percpu_ref_put(ref);
3908c2ecf20Sopenharmony_ci
3918c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
3928c2ecf20Sopenharmony_ci}
3938c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
3948c2ecf20Sopenharmony_ci
3958c2ecf20Sopenharmony_ci/**
3968c2ecf20Sopenharmony_ci * percpu_ref_is_zero - test whether a percpu refcount reached zero
3978c2ecf20Sopenharmony_ci * @ref: percpu_ref to test
3988c2ecf20Sopenharmony_ci *
3998c2ecf20Sopenharmony_ci * Returns %true if @ref reached zero.
4008c2ecf20Sopenharmony_ci *
4018c2ecf20Sopenharmony_ci * This function is safe to call as long as @ref is between init and exit.
4028c2ecf20Sopenharmony_ci */
4038c2ecf20Sopenharmony_cibool percpu_ref_is_zero(struct percpu_ref *ref)
4048c2ecf20Sopenharmony_ci{
4058c2ecf20Sopenharmony_ci	unsigned long __percpu *percpu_count;
4068c2ecf20Sopenharmony_ci	unsigned long count, flags;
4078c2ecf20Sopenharmony_ci
4088c2ecf20Sopenharmony_ci	if (__ref_is_percpu(ref, &percpu_count))
4098c2ecf20Sopenharmony_ci		return false;
4108c2ecf20Sopenharmony_ci
4118c2ecf20Sopenharmony_ci	/* protect us from being destroyed */
4128c2ecf20Sopenharmony_ci	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
4138c2ecf20Sopenharmony_ci	if (ref->data)
4148c2ecf20Sopenharmony_ci		count = atomic_long_read(&ref->data->count);
4158c2ecf20Sopenharmony_ci	else
4168c2ecf20Sopenharmony_ci		count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS;
4178c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
4188c2ecf20Sopenharmony_ci
4198c2ecf20Sopenharmony_ci	return count == 0;
4208c2ecf20Sopenharmony_ci}
4218c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(percpu_ref_is_zero);
4228c2ecf20Sopenharmony_ci
4238c2ecf20Sopenharmony_ci/**
4248c2ecf20Sopenharmony_ci * percpu_ref_reinit - re-initialize a percpu refcount
4258c2ecf20Sopenharmony_ci * @ref: perpcu_ref to re-initialize
4268c2ecf20Sopenharmony_ci *
4278c2ecf20Sopenharmony_ci * Re-initialize @ref so that it's in the same state as when it finished
4288c2ecf20Sopenharmony_ci * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD.  @ref must have been
4298c2ecf20Sopenharmony_ci * initialized successfully and reached 0 but not exited.
4308c2ecf20Sopenharmony_ci *
4318c2ecf20Sopenharmony_ci * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
4328c2ecf20Sopenharmony_ci * this function is in progress.
4338c2ecf20Sopenharmony_ci */
4348c2ecf20Sopenharmony_civoid percpu_ref_reinit(struct percpu_ref *ref)
4358c2ecf20Sopenharmony_ci{
4368c2ecf20Sopenharmony_ci	WARN_ON_ONCE(!percpu_ref_is_zero(ref));
4378c2ecf20Sopenharmony_ci
4388c2ecf20Sopenharmony_ci	percpu_ref_resurrect(ref);
4398c2ecf20Sopenharmony_ci}
4408c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(percpu_ref_reinit);
4418c2ecf20Sopenharmony_ci
4428c2ecf20Sopenharmony_ci/**
4438c2ecf20Sopenharmony_ci * percpu_ref_resurrect - modify a percpu refcount from dead to live
4448c2ecf20Sopenharmony_ci * @ref: perpcu_ref to resurrect
4458c2ecf20Sopenharmony_ci *
4468c2ecf20Sopenharmony_ci * Modify @ref so that it's in the same state as before percpu_ref_kill() was
4478c2ecf20Sopenharmony_ci * called. @ref must be dead but must not yet have exited.
4488c2ecf20Sopenharmony_ci *
4498c2ecf20Sopenharmony_ci * If @ref->release() frees @ref then the caller is responsible for
4508c2ecf20Sopenharmony_ci * guaranteeing that @ref->release() does not get called while this
4518c2ecf20Sopenharmony_ci * function is in progress.
4528c2ecf20Sopenharmony_ci *
4538c2ecf20Sopenharmony_ci * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
4548c2ecf20Sopenharmony_ci * this function is in progress.
4558c2ecf20Sopenharmony_ci */
4568c2ecf20Sopenharmony_civoid percpu_ref_resurrect(struct percpu_ref *ref)
4578c2ecf20Sopenharmony_ci{
4588c2ecf20Sopenharmony_ci	unsigned long __percpu *percpu_count;
4598c2ecf20Sopenharmony_ci	unsigned long flags;
4608c2ecf20Sopenharmony_ci
4618c2ecf20Sopenharmony_ci	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
4628c2ecf20Sopenharmony_ci
4638c2ecf20Sopenharmony_ci	WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
4648c2ecf20Sopenharmony_ci	WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
4658c2ecf20Sopenharmony_ci
4668c2ecf20Sopenharmony_ci	ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
4678c2ecf20Sopenharmony_ci	percpu_ref_get(ref);
4688c2ecf20Sopenharmony_ci	__percpu_ref_switch_mode(ref, NULL);
4698c2ecf20Sopenharmony_ci
4708c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
4718c2ecf20Sopenharmony_ci}
4728c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(percpu_ref_resurrect);
473