18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * Hardware spinlock framework
48c2ecf20Sopenharmony_ci *
58c2ecf20Sopenharmony_ci * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
68c2ecf20Sopenharmony_ci *
78c2ecf20Sopenharmony_ci * Contact: Ohad Ben-Cohen <ohad@wizery.com>
88c2ecf20Sopenharmony_ci */
98c2ecf20Sopenharmony_ci
108c2ecf20Sopenharmony_ci#define pr_fmt(fmt)    "%s: " fmt, __func__
118c2ecf20Sopenharmony_ci
128c2ecf20Sopenharmony_ci#include <linux/delay.h>
138c2ecf20Sopenharmony_ci#include <linux/kernel.h>
148c2ecf20Sopenharmony_ci#include <linux/module.h>
158c2ecf20Sopenharmony_ci#include <linux/spinlock.h>
168c2ecf20Sopenharmony_ci#include <linux/types.h>
178c2ecf20Sopenharmony_ci#include <linux/err.h>
188c2ecf20Sopenharmony_ci#include <linux/jiffies.h>
198c2ecf20Sopenharmony_ci#include <linux/radix-tree.h>
208c2ecf20Sopenharmony_ci#include <linux/hwspinlock.h>
218c2ecf20Sopenharmony_ci#include <linux/pm_runtime.h>
228c2ecf20Sopenharmony_ci#include <linux/mutex.h>
238c2ecf20Sopenharmony_ci#include <linux/of.h>
248c2ecf20Sopenharmony_ci
258c2ecf20Sopenharmony_ci#include "hwspinlock_internal.h"
268c2ecf20Sopenharmony_ci
278c2ecf20Sopenharmony_ci/* retry delay used in atomic context */
288c2ecf20Sopenharmony_ci#define HWSPINLOCK_RETRY_DELAY_US	100
298c2ecf20Sopenharmony_ci
308c2ecf20Sopenharmony_ci/* radix tree tags */
318c2ecf20Sopenharmony_ci#define HWSPINLOCK_UNUSED	(0) /* tags an hwspinlock as unused */
328c2ecf20Sopenharmony_ci
338c2ecf20Sopenharmony_ci/*
348c2ecf20Sopenharmony_ci * A radix tree is used to maintain the available hwspinlock instances.
358c2ecf20Sopenharmony_ci * The tree associates hwspinlock pointers with their integer key id,
368c2ecf20Sopenharmony_ci * and provides easy-to-use API which makes the hwspinlock core code simple
378c2ecf20Sopenharmony_ci * and easy to read.
388c2ecf20Sopenharmony_ci *
398c2ecf20Sopenharmony_ci * Radix trees are quick on lookups, and reasonably efficient in terms of
408c2ecf20Sopenharmony_ci * storage, especially with high density usages such as this framework
418c2ecf20Sopenharmony_ci * requires (a continuous range of integer keys, beginning with zero, is
428c2ecf20Sopenharmony_ci * used as the ID's of the hwspinlock instances).
438c2ecf20Sopenharmony_ci *
448c2ecf20Sopenharmony_ci * The radix tree API supports tagging items in the tree, which this
458c2ecf20Sopenharmony_ci * framework uses to mark unused hwspinlock instances (see the
468c2ecf20Sopenharmony_ci * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
478c2ecf20Sopenharmony_ci * tree, looking for an unused hwspinlock instance, is now reduced to a
488c2ecf20Sopenharmony_ci * single radix tree API call.
498c2ecf20Sopenharmony_ci */
508c2ecf20Sopenharmony_cistatic RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
518c2ecf20Sopenharmony_ci
528c2ecf20Sopenharmony_ci/*
538c2ecf20Sopenharmony_ci * Synchronization of access to the tree is achieved using this mutex,
548c2ecf20Sopenharmony_ci * as the radix-tree API requires that users provide all synchronisation.
558c2ecf20Sopenharmony_ci * A mutex is needed because we're using non-atomic radix tree allocations.
568c2ecf20Sopenharmony_ci */
578c2ecf20Sopenharmony_cistatic DEFINE_MUTEX(hwspinlock_tree_lock);
588c2ecf20Sopenharmony_ci
598c2ecf20Sopenharmony_ci
608c2ecf20Sopenharmony_ci/**
618c2ecf20Sopenharmony_ci * __hwspin_trylock() - attempt to lock a specific hwspinlock
628c2ecf20Sopenharmony_ci * @hwlock: an hwspinlock which we want to trylock
638c2ecf20Sopenharmony_ci * @mode: controls whether local interrupts are disabled or not
648c2ecf20Sopenharmony_ci * @flags: a pointer where the caller's interrupt state will be saved at (if
658c2ecf20Sopenharmony_ci *         requested)
668c2ecf20Sopenharmony_ci *
678c2ecf20Sopenharmony_ci * This function attempts to lock an hwspinlock, and will immediately
688c2ecf20Sopenharmony_ci * fail if the hwspinlock is already taken.
698c2ecf20Sopenharmony_ci *
708c2ecf20Sopenharmony_ci * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
718c2ecf20Sopenharmony_ci * of getting hardware lock with mutex or spinlock. Since in some scenarios,
728c2ecf20Sopenharmony_ci * user need some time-consuming or sleepable operations under the hardware
738c2ecf20Sopenharmony_ci * lock, they need one sleepable lock (like mutex) to protect the operations.
748c2ecf20Sopenharmony_ci *
758c2ecf20Sopenharmony_ci * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
768c2ecf20Sopenharmony_ci * return from this function, preemption (and possibly interrupts) is disabled,
778c2ecf20Sopenharmony_ci * so the caller must not sleep, and is advised to release the hwspinlock as
788c2ecf20Sopenharmony_ci * soon as possible. This is required in order to minimize remote cores polling
798c2ecf20Sopenharmony_ci * on the hardware interconnect.
808c2ecf20Sopenharmony_ci *
818c2ecf20Sopenharmony_ci * The user decides whether local interrupts are disabled or not, and if yes,
828c2ecf20Sopenharmony_ci * whether he wants their previous state to be saved. It is up to the user
838c2ecf20Sopenharmony_ci * to choose the appropriate @mode of operation, exactly the same way users
848c2ecf20Sopenharmony_ci * should decide between spin_trylock, spin_trylock_irq and
858c2ecf20Sopenharmony_ci * spin_trylock_irqsave.
868c2ecf20Sopenharmony_ci *
878c2ecf20Sopenharmony_ci * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
888c2ecf20Sopenharmony_ci * the hwspinlock was already taken.
898c2ecf20Sopenharmony_ci * This function will never sleep.
908c2ecf20Sopenharmony_ci */
918c2ecf20Sopenharmony_ciint __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
928c2ecf20Sopenharmony_ci{
938c2ecf20Sopenharmony_ci	int ret;
948c2ecf20Sopenharmony_ci
958c2ecf20Sopenharmony_ci	if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
968c2ecf20Sopenharmony_ci		return -EINVAL;
978c2ecf20Sopenharmony_ci
988c2ecf20Sopenharmony_ci	/*
998c2ecf20Sopenharmony_ci	 * This spin_lock{_irq, _irqsave} serves three purposes:
1008c2ecf20Sopenharmony_ci	 *
1018c2ecf20Sopenharmony_ci	 * 1. Disable preemption, in order to minimize the period of time
1028c2ecf20Sopenharmony_ci	 *    in which the hwspinlock is taken. This is important in order
1038c2ecf20Sopenharmony_ci	 *    to minimize the possible polling on the hardware interconnect
1048c2ecf20Sopenharmony_ci	 *    by a remote user of this lock.
1058c2ecf20Sopenharmony_ci	 * 2. Make the hwspinlock SMP-safe (so we can take it from
1068c2ecf20Sopenharmony_ci	 *    additional contexts on the local host).
1078c2ecf20Sopenharmony_ci	 * 3. Ensure that in_atomic/might_sleep checks catch potential
1088c2ecf20Sopenharmony_ci	 *    problems with hwspinlock usage (e.g. scheduler checks like
1098c2ecf20Sopenharmony_ci	 *    'scheduling while atomic' etc.)
1108c2ecf20Sopenharmony_ci	 */
1118c2ecf20Sopenharmony_ci	switch (mode) {
1128c2ecf20Sopenharmony_ci	case HWLOCK_IRQSTATE:
1138c2ecf20Sopenharmony_ci		ret = spin_trylock_irqsave(&hwlock->lock, *flags);
1148c2ecf20Sopenharmony_ci		break;
1158c2ecf20Sopenharmony_ci	case HWLOCK_IRQ:
1168c2ecf20Sopenharmony_ci		ret = spin_trylock_irq(&hwlock->lock);
1178c2ecf20Sopenharmony_ci		break;
1188c2ecf20Sopenharmony_ci	case HWLOCK_RAW:
1198c2ecf20Sopenharmony_ci	case HWLOCK_IN_ATOMIC:
1208c2ecf20Sopenharmony_ci		ret = 1;
1218c2ecf20Sopenharmony_ci		break;
1228c2ecf20Sopenharmony_ci	default:
1238c2ecf20Sopenharmony_ci		ret = spin_trylock(&hwlock->lock);
1248c2ecf20Sopenharmony_ci		break;
1258c2ecf20Sopenharmony_ci	}
1268c2ecf20Sopenharmony_ci
1278c2ecf20Sopenharmony_ci	/* is lock already taken by another context on the local cpu ? */
1288c2ecf20Sopenharmony_ci	if (!ret)
1298c2ecf20Sopenharmony_ci		return -EBUSY;
1308c2ecf20Sopenharmony_ci
1318c2ecf20Sopenharmony_ci	/* try to take the hwspinlock device */
1328c2ecf20Sopenharmony_ci	ret = hwlock->bank->ops->trylock(hwlock);
1338c2ecf20Sopenharmony_ci
1348c2ecf20Sopenharmony_ci	/* if hwlock is already taken, undo spin_trylock_* and exit */
1358c2ecf20Sopenharmony_ci	if (!ret) {
1368c2ecf20Sopenharmony_ci		switch (mode) {
1378c2ecf20Sopenharmony_ci		case HWLOCK_IRQSTATE:
1388c2ecf20Sopenharmony_ci			spin_unlock_irqrestore(&hwlock->lock, *flags);
1398c2ecf20Sopenharmony_ci			break;
1408c2ecf20Sopenharmony_ci		case HWLOCK_IRQ:
1418c2ecf20Sopenharmony_ci			spin_unlock_irq(&hwlock->lock);
1428c2ecf20Sopenharmony_ci			break;
1438c2ecf20Sopenharmony_ci		case HWLOCK_RAW:
1448c2ecf20Sopenharmony_ci		case HWLOCK_IN_ATOMIC:
1458c2ecf20Sopenharmony_ci			/* Nothing to do */
1468c2ecf20Sopenharmony_ci			break;
1478c2ecf20Sopenharmony_ci		default:
1488c2ecf20Sopenharmony_ci			spin_unlock(&hwlock->lock);
1498c2ecf20Sopenharmony_ci			break;
1508c2ecf20Sopenharmony_ci		}
1518c2ecf20Sopenharmony_ci
1528c2ecf20Sopenharmony_ci		return -EBUSY;
1538c2ecf20Sopenharmony_ci	}
1548c2ecf20Sopenharmony_ci
1558c2ecf20Sopenharmony_ci	/*
1568c2ecf20Sopenharmony_ci	 * We can be sure the other core's memory operations
1578c2ecf20Sopenharmony_ci	 * are observable to us only _after_ we successfully take
1588c2ecf20Sopenharmony_ci	 * the hwspinlock, and we must make sure that subsequent memory
1598c2ecf20Sopenharmony_ci	 * operations (both reads and writes) will not be reordered before
1608c2ecf20Sopenharmony_ci	 * we actually took the hwspinlock.
1618c2ecf20Sopenharmony_ci	 *
1628c2ecf20Sopenharmony_ci	 * Note: the implicit memory barrier of the spinlock above is too
1638c2ecf20Sopenharmony_ci	 * early, so we need this additional explicit memory barrier.
1648c2ecf20Sopenharmony_ci	 */
1658c2ecf20Sopenharmony_ci	mb();
1668c2ecf20Sopenharmony_ci
1678c2ecf20Sopenharmony_ci	return 0;
1688c2ecf20Sopenharmony_ci}
1698c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(__hwspin_trylock);
1708c2ecf20Sopenharmony_ci
1718c2ecf20Sopenharmony_ci/**
1728c2ecf20Sopenharmony_ci * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
1738c2ecf20Sopenharmony_ci * @hwlock: the hwspinlock to be locked
1748c2ecf20Sopenharmony_ci * @timeout: timeout value in msecs
1758c2ecf20Sopenharmony_ci * @mode: mode which controls whether local interrupts are disabled or not
1768c2ecf20Sopenharmony_ci * @flags: a pointer to where the caller's interrupt state will be saved at (if
1778c2ecf20Sopenharmony_ci *         requested)
1788c2ecf20Sopenharmony_ci *
1798c2ecf20Sopenharmony_ci * This function locks the given @hwlock. If the @hwlock
1808c2ecf20Sopenharmony_ci * is already taken, the function will busy loop waiting for it to
1818c2ecf20Sopenharmony_ci * be released, but give up after @timeout msecs have elapsed.
1828c2ecf20Sopenharmony_ci *
1838c2ecf20Sopenharmony_ci * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
1848c2ecf20Sopenharmony_ci * of getting hardware lock with mutex or spinlock. Since in some scenarios,
1858c2ecf20Sopenharmony_ci * user need some time-consuming or sleepable operations under the hardware
1868c2ecf20Sopenharmony_ci * lock, they need one sleepable lock (like mutex) to protect the operations.
1878c2ecf20Sopenharmony_ci *
1888c2ecf20Sopenharmony_ci * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout
1898c2ecf20Sopenharmony_ci * is handled with busy-waiting delays, hence shall not exceed few msecs.
1908c2ecf20Sopenharmony_ci *
1918c2ecf20Sopenharmony_ci * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
1928c2ecf20Sopenharmony_ci * return from this function, preemption (and possibly interrupts) is disabled,
1938c2ecf20Sopenharmony_ci * so the caller must not sleep, and is advised to release the hwspinlock as
1948c2ecf20Sopenharmony_ci * soon as possible. This is required in order to minimize remote cores polling
1958c2ecf20Sopenharmony_ci * on the hardware interconnect.
1968c2ecf20Sopenharmony_ci *
1978c2ecf20Sopenharmony_ci * The user decides whether local interrupts are disabled or not, and if yes,
1988c2ecf20Sopenharmony_ci * whether he wants their previous state to be saved. It is up to the user
1998c2ecf20Sopenharmony_ci * to choose the appropriate @mode of operation, exactly the same way users
2008c2ecf20Sopenharmony_ci * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
2018c2ecf20Sopenharmony_ci *
2028c2ecf20Sopenharmony_ci * Returns 0 when the @hwlock was successfully taken, and an appropriate
2038c2ecf20Sopenharmony_ci * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
2048c2ecf20Sopenharmony_ci * busy after @timeout msecs). The function will never sleep.
2058c2ecf20Sopenharmony_ci */
2068c2ecf20Sopenharmony_ciint __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
2078c2ecf20Sopenharmony_ci					int mode, unsigned long *flags)
2088c2ecf20Sopenharmony_ci{
2098c2ecf20Sopenharmony_ci	int ret;
2108c2ecf20Sopenharmony_ci	unsigned long expire, atomic_delay = 0;
2118c2ecf20Sopenharmony_ci
2128c2ecf20Sopenharmony_ci	expire = msecs_to_jiffies(to) + jiffies;
2138c2ecf20Sopenharmony_ci
2148c2ecf20Sopenharmony_ci	for (;;) {
2158c2ecf20Sopenharmony_ci		/* Try to take the hwspinlock */
2168c2ecf20Sopenharmony_ci		ret = __hwspin_trylock(hwlock, mode, flags);
2178c2ecf20Sopenharmony_ci		if (ret != -EBUSY)
2188c2ecf20Sopenharmony_ci			break;
2198c2ecf20Sopenharmony_ci
2208c2ecf20Sopenharmony_ci		/*
2218c2ecf20Sopenharmony_ci		 * The lock is already taken, let's check if the user wants
2228c2ecf20Sopenharmony_ci		 * us to try again
2238c2ecf20Sopenharmony_ci		 */
2248c2ecf20Sopenharmony_ci		if (mode == HWLOCK_IN_ATOMIC) {
2258c2ecf20Sopenharmony_ci			udelay(HWSPINLOCK_RETRY_DELAY_US);
2268c2ecf20Sopenharmony_ci			atomic_delay += HWSPINLOCK_RETRY_DELAY_US;
2278c2ecf20Sopenharmony_ci			if (atomic_delay > to * 1000)
2288c2ecf20Sopenharmony_ci				return -ETIMEDOUT;
2298c2ecf20Sopenharmony_ci		} else {
2308c2ecf20Sopenharmony_ci			if (time_is_before_eq_jiffies(expire))
2318c2ecf20Sopenharmony_ci				return -ETIMEDOUT;
2328c2ecf20Sopenharmony_ci		}
2338c2ecf20Sopenharmony_ci
2348c2ecf20Sopenharmony_ci		/*
2358c2ecf20Sopenharmony_ci		 * Allow platform-specific relax handlers to prevent
2368c2ecf20Sopenharmony_ci		 * hogging the interconnect (no sleeping, though)
2378c2ecf20Sopenharmony_ci		 */
2388c2ecf20Sopenharmony_ci		if (hwlock->bank->ops->relax)
2398c2ecf20Sopenharmony_ci			hwlock->bank->ops->relax(hwlock);
2408c2ecf20Sopenharmony_ci	}
2418c2ecf20Sopenharmony_ci
2428c2ecf20Sopenharmony_ci	return ret;
2438c2ecf20Sopenharmony_ci}
2448c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
2458c2ecf20Sopenharmony_ci
2468c2ecf20Sopenharmony_ci/**
2478c2ecf20Sopenharmony_ci * __hwspin_unlock() - unlock a specific hwspinlock
2488c2ecf20Sopenharmony_ci * @hwlock: a previously-acquired hwspinlock which we want to unlock
2498c2ecf20Sopenharmony_ci * @mode: controls whether local interrupts needs to be restored or not
2508c2ecf20Sopenharmony_ci * @flags: previous caller's interrupt state to restore (if requested)
2518c2ecf20Sopenharmony_ci *
2528c2ecf20Sopenharmony_ci * This function will unlock a specific hwspinlock, enable preemption and
2538c2ecf20Sopenharmony_ci * (possibly) enable interrupts or restore their previous state.
2548c2ecf20Sopenharmony_ci * @hwlock must be already locked before calling this function: it is a bug
2558c2ecf20Sopenharmony_ci * to call unlock on a @hwlock that is already unlocked.
2568c2ecf20Sopenharmony_ci *
2578c2ecf20Sopenharmony_ci * The user decides whether local interrupts should be enabled or not, and
2588c2ecf20Sopenharmony_ci * if yes, whether he wants their previous state to be restored. It is up
2598c2ecf20Sopenharmony_ci * to the user to choose the appropriate @mode of operation, exactly the
2608c2ecf20Sopenharmony_ci * same way users decide between spin_unlock, spin_unlock_irq and
2618c2ecf20Sopenharmony_ci * spin_unlock_irqrestore.
2628c2ecf20Sopenharmony_ci *
2638c2ecf20Sopenharmony_ci * The function will never sleep.
2648c2ecf20Sopenharmony_ci */
2658c2ecf20Sopenharmony_civoid __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
2668c2ecf20Sopenharmony_ci{
2678c2ecf20Sopenharmony_ci	if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
2688c2ecf20Sopenharmony_ci		return;
2698c2ecf20Sopenharmony_ci
2708c2ecf20Sopenharmony_ci	/*
2718c2ecf20Sopenharmony_ci	 * We must make sure that memory operations (both reads and writes),
2728c2ecf20Sopenharmony_ci	 * done before unlocking the hwspinlock, will not be reordered
2738c2ecf20Sopenharmony_ci	 * after the lock is released.
2748c2ecf20Sopenharmony_ci	 *
2758c2ecf20Sopenharmony_ci	 * That's the purpose of this explicit memory barrier.
2768c2ecf20Sopenharmony_ci	 *
2778c2ecf20Sopenharmony_ci	 * Note: the memory barrier induced by the spin_unlock below is too
2788c2ecf20Sopenharmony_ci	 * late; the other core is going to access memory soon after it will
2798c2ecf20Sopenharmony_ci	 * take the hwspinlock, and by then we want to be sure our memory
2808c2ecf20Sopenharmony_ci	 * operations are already observable.
2818c2ecf20Sopenharmony_ci	 */
2828c2ecf20Sopenharmony_ci	mb();
2838c2ecf20Sopenharmony_ci
2848c2ecf20Sopenharmony_ci	hwlock->bank->ops->unlock(hwlock);
2858c2ecf20Sopenharmony_ci
2868c2ecf20Sopenharmony_ci	/* Undo the spin_trylock{_irq, _irqsave} called while locking */
2878c2ecf20Sopenharmony_ci	switch (mode) {
2888c2ecf20Sopenharmony_ci	case HWLOCK_IRQSTATE:
2898c2ecf20Sopenharmony_ci		spin_unlock_irqrestore(&hwlock->lock, *flags);
2908c2ecf20Sopenharmony_ci		break;
2918c2ecf20Sopenharmony_ci	case HWLOCK_IRQ:
2928c2ecf20Sopenharmony_ci		spin_unlock_irq(&hwlock->lock);
2938c2ecf20Sopenharmony_ci		break;
2948c2ecf20Sopenharmony_ci	case HWLOCK_RAW:
2958c2ecf20Sopenharmony_ci	case HWLOCK_IN_ATOMIC:
2968c2ecf20Sopenharmony_ci		/* Nothing to do */
2978c2ecf20Sopenharmony_ci		break;
2988c2ecf20Sopenharmony_ci	default:
2998c2ecf20Sopenharmony_ci		spin_unlock(&hwlock->lock);
3008c2ecf20Sopenharmony_ci		break;
3018c2ecf20Sopenharmony_ci	}
3028c2ecf20Sopenharmony_ci}
3038c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(__hwspin_unlock);
3048c2ecf20Sopenharmony_ci
3058c2ecf20Sopenharmony_ci/**
3068c2ecf20Sopenharmony_ci * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
3078c2ecf20Sopenharmony_ci * @bank: the hwspinlock device bank
3088c2ecf20Sopenharmony_ci * @hwlock_spec: hwlock specifier as found in the device tree
3098c2ecf20Sopenharmony_ci *
3108c2ecf20Sopenharmony_ci * This is a simple translation function, suitable for hwspinlock platform
3118c2ecf20Sopenharmony_ci * drivers that only has a lock specifier length of 1.
3128c2ecf20Sopenharmony_ci *
3138c2ecf20Sopenharmony_ci * Returns a relative index of the lock within a specified bank on success,
3148c2ecf20Sopenharmony_ci * or -EINVAL on invalid specifier cell count.
3158c2ecf20Sopenharmony_ci */
3168c2ecf20Sopenharmony_cistatic inline int
3178c2ecf20Sopenharmony_ciof_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
3188c2ecf20Sopenharmony_ci{
3198c2ecf20Sopenharmony_ci	if (WARN_ON(hwlock_spec->args_count != 1))
3208c2ecf20Sopenharmony_ci		return -EINVAL;
3218c2ecf20Sopenharmony_ci
3228c2ecf20Sopenharmony_ci	return hwlock_spec->args[0];
3238c2ecf20Sopenharmony_ci}
3248c2ecf20Sopenharmony_ci
3258c2ecf20Sopenharmony_ci/**
3268c2ecf20Sopenharmony_ci * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
3278c2ecf20Sopenharmony_ci * @np: device node from which to request the specific hwlock
3288c2ecf20Sopenharmony_ci * @index: index of the hwlock in the list of values
3298c2ecf20Sopenharmony_ci *
3308c2ecf20Sopenharmony_ci * This function provides a means for DT users of the hwspinlock module to
3318c2ecf20Sopenharmony_ci * get the global lock id of a specific hwspinlock using the phandle of the
3328c2ecf20Sopenharmony_ci * hwspinlock device, so that it can be requested using the normal
3338c2ecf20Sopenharmony_ci * hwspin_lock_request_specific() API.
3348c2ecf20Sopenharmony_ci *
3358c2ecf20Sopenharmony_ci * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
3368c2ecf20Sopenharmony_ci * device is not yet registered, -EINVAL on invalid args specifier value or an
3378c2ecf20Sopenharmony_ci * appropriate error as returned from the OF parsing of the DT client node.
3388c2ecf20Sopenharmony_ci */
3398c2ecf20Sopenharmony_ciint of_hwspin_lock_get_id(struct device_node *np, int index)
3408c2ecf20Sopenharmony_ci{
3418c2ecf20Sopenharmony_ci	struct of_phandle_args args;
3428c2ecf20Sopenharmony_ci	struct hwspinlock *hwlock;
3438c2ecf20Sopenharmony_ci	struct radix_tree_iter iter;
3448c2ecf20Sopenharmony_ci	void **slot;
3458c2ecf20Sopenharmony_ci	int id;
3468c2ecf20Sopenharmony_ci	int ret;
3478c2ecf20Sopenharmony_ci
3488c2ecf20Sopenharmony_ci	ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
3498c2ecf20Sopenharmony_ci					 &args);
3508c2ecf20Sopenharmony_ci	if (ret)
3518c2ecf20Sopenharmony_ci		return ret;
3528c2ecf20Sopenharmony_ci
3538c2ecf20Sopenharmony_ci	if (!of_device_is_available(args.np)) {
3548c2ecf20Sopenharmony_ci		ret = -ENOENT;
3558c2ecf20Sopenharmony_ci		goto out;
3568c2ecf20Sopenharmony_ci	}
3578c2ecf20Sopenharmony_ci
3588c2ecf20Sopenharmony_ci	/* Find the hwspinlock device: we need its base_id */
3598c2ecf20Sopenharmony_ci	ret = -EPROBE_DEFER;
3608c2ecf20Sopenharmony_ci	rcu_read_lock();
3618c2ecf20Sopenharmony_ci	radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
3628c2ecf20Sopenharmony_ci		hwlock = radix_tree_deref_slot(slot);
3638c2ecf20Sopenharmony_ci		if (unlikely(!hwlock))
3648c2ecf20Sopenharmony_ci			continue;
3658c2ecf20Sopenharmony_ci		if (radix_tree_deref_retry(hwlock)) {
3668c2ecf20Sopenharmony_ci			slot = radix_tree_iter_retry(&iter);
3678c2ecf20Sopenharmony_ci			continue;
3688c2ecf20Sopenharmony_ci		}
3698c2ecf20Sopenharmony_ci
3708c2ecf20Sopenharmony_ci		if (hwlock->bank->dev->of_node == args.np) {
3718c2ecf20Sopenharmony_ci			ret = 0;
3728c2ecf20Sopenharmony_ci			break;
3738c2ecf20Sopenharmony_ci		}
3748c2ecf20Sopenharmony_ci	}
3758c2ecf20Sopenharmony_ci	rcu_read_unlock();
3768c2ecf20Sopenharmony_ci	if (ret < 0)
3778c2ecf20Sopenharmony_ci		goto out;
3788c2ecf20Sopenharmony_ci
3798c2ecf20Sopenharmony_ci	id = of_hwspin_lock_simple_xlate(&args);
3808c2ecf20Sopenharmony_ci	if (id < 0 || id >= hwlock->bank->num_locks) {
3818c2ecf20Sopenharmony_ci		ret = -EINVAL;
3828c2ecf20Sopenharmony_ci		goto out;
3838c2ecf20Sopenharmony_ci	}
3848c2ecf20Sopenharmony_ci	id += hwlock->bank->base_id;
3858c2ecf20Sopenharmony_ci
3868c2ecf20Sopenharmony_ciout:
3878c2ecf20Sopenharmony_ci	of_node_put(args.np);
3888c2ecf20Sopenharmony_ci	return ret ? ret : id;
3898c2ecf20Sopenharmony_ci}
3908c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
3918c2ecf20Sopenharmony_ci
3928c2ecf20Sopenharmony_ci/**
3938c2ecf20Sopenharmony_ci * of_hwspin_lock_get_id_byname() - get lock id for an specified hwlock name
3948c2ecf20Sopenharmony_ci * @np: device node from which to request the specific hwlock
3958c2ecf20Sopenharmony_ci * @name: hwlock name
3968c2ecf20Sopenharmony_ci *
3978c2ecf20Sopenharmony_ci * This function provides a means for DT users of the hwspinlock module to
3988c2ecf20Sopenharmony_ci * get the global lock id of a specific hwspinlock using the specified name of
3998c2ecf20Sopenharmony_ci * the hwspinlock device, so that it can be requested using the normal
4008c2ecf20Sopenharmony_ci * hwspin_lock_request_specific() API.
4018c2ecf20Sopenharmony_ci *
4028c2ecf20Sopenharmony_ci * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
4038c2ecf20Sopenharmony_ci * device is not yet registered, -EINVAL on invalid args specifier value or an
4048c2ecf20Sopenharmony_ci * appropriate error as returned from the OF parsing of the DT client node.
4058c2ecf20Sopenharmony_ci */
4068c2ecf20Sopenharmony_ciint of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
4078c2ecf20Sopenharmony_ci{
4088c2ecf20Sopenharmony_ci	int index;
4098c2ecf20Sopenharmony_ci
4108c2ecf20Sopenharmony_ci	if (!name)
4118c2ecf20Sopenharmony_ci		return -EINVAL;
4128c2ecf20Sopenharmony_ci
4138c2ecf20Sopenharmony_ci	index = of_property_match_string(np, "hwlock-names", name);
4148c2ecf20Sopenharmony_ci	if (index < 0)
4158c2ecf20Sopenharmony_ci		return index;
4168c2ecf20Sopenharmony_ci
4178c2ecf20Sopenharmony_ci	return of_hwspin_lock_get_id(np, index);
4188c2ecf20Sopenharmony_ci}
4198c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname);
4208c2ecf20Sopenharmony_ci
4218c2ecf20Sopenharmony_cistatic int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
4228c2ecf20Sopenharmony_ci{
4238c2ecf20Sopenharmony_ci	struct hwspinlock *tmp;
4248c2ecf20Sopenharmony_ci	int ret;
4258c2ecf20Sopenharmony_ci
4268c2ecf20Sopenharmony_ci	mutex_lock(&hwspinlock_tree_lock);
4278c2ecf20Sopenharmony_ci
4288c2ecf20Sopenharmony_ci	ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
4298c2ecf20Sopenharmony_ci	if (ret) {
4308c2ecf20Sopenharmony_ci		if (ret == -EEXIST)
4318c2ecf20Sopenharmony_ci			pr_err("hwspinlock id %d already exists!\n", id);
4328c2ecf20Sopenharmony_ci		goto out;
4338c2ecf20Sopenharmony_ci	}
4348c2ecf20Sopenharmony_ci
4358c2ecf20Sopenharmony_ci	/* mark this hwspinlock as available */
4368c2ecf20Sopenharmony_ci	tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
4378c2ecf20Sopenharmony_ci
4388c2ecf20Sopenharmony_ci	/* self-sanity check which should never fail */
4398c2ecf20Sopenharmony_ci	WARN_ON(tmp != hwlock);
4408c2ecf20Sopenharmony_ci
4418c2ecf20Sopenharmony_ciout:
4428c2ecf20Sopenharmony_ci	mutex_unlock(&hwspinlock_tree_lock);
4438c2ecf20Sopenharmony_ci	return 0;
4448c2ecf20Sopenharmony_ci}
4458c2ecf20Sopenharmony_ci
4468c2ecf20Sopenharmony_cistatic struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
4478c2ecf20Sopenharmony_ci{
4488c2ecf20Sopenharmony_ci	struct hwspinlock *hwlock = NULL;
4498c2ecf20Sopenharmony_ci	int ret;
4508c2ecf20Sopenharmony_ci
4518c2ecf20Sopenharmony_ci	mutex_lock(&hwspinlock_tree_lock);
4528c2ecf20Sopenharmony_ci
4538c2ecf20Sopenharmony_ci	/* make sure the hwspinlock is not in use (tag is set) */
4548c2ecf20Sopenharmony_ci	ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
4558c2ecf20Sopenharmony_ci	if (ret == 0) {
4568c2ecf20Sopenharmony_ci		pr_err("hwspinlock %d still in use (or not present)\n", id);
4578c2ecf20Sopenharmony_ci		goto out;
4588c2ecf20Sopenharmony_ci	}
4598c2ecf20Sopenharmony_ci
4608c2ecf20Sopenharmony_ci	hwlock = radix_tree_delete(&hwspinlock_tree, id);
4618c2ecf20Sopenharmony_ci	if (!hwlock) {
4628c2ecf20Sopenharmony_ci		pr_err("failed to delete hwspinlock %d\n", id);
4638c2ecf20Sopenharmony_ci		goto out;
4648c2ecf20Sopenharmony_ci	}
4658c2ecf20Sopenharmony_ci
4668c2ecf20Sopenharmony_ciout:
4678c2ecf20Sopenharmony_ci	mutex_unlock(&hwspinlock_tree_lock);
4688c2ecf20Sopenharmony_ci	return hwlock;
4698c2ecf20Sopenharmony_ci}
4708c2ecf20Sopenharmony_ci
4718c2ecf20Sopenharmony_ci/**
4728c2ecf20Sopenharmony_ci * hwspin_lock_register() - register a new hw spinlock device
4738c2ecf20Sopenharmony_ci * @bank: the hwspinlock device, which usually provides numerous hw locks
4748c2ecf20Sopenharmony_ci * @dev: the backing device
4758c2ecf20Sopenharmony_ci * @ops: hwspinlock handlers for this device
4768c2ecf20Sopenharmony_ci * @base_id: id of the first hardware spinlock in this bank
4778c2ecf20Sopenharmony_ci * @num_locks: number of hwspinlocks provided by this device
4788c2ecf20Sopenharmony_ci *
4798c2ecf20Sopenharmony_ci * This function should be called from the underlying platform-specific
4808c2ecf20Sopenharmony_ci * implementation, to register a new hwspinlock device instance.
4818c2ecf20Sopenharmony_ci *
4828c2ecf20Sopenharmony_ci * Should be called from a process context (might sleep)
4838c2ecf20Sopenharmony_ci *
4848c2ecf20Sopenharmony_ci * Returns 0 on success, or an appropriate error code on failure
4858c2ecf20Sopenharmony_ci */
4868c2ecf20Sopenharmony_ciint hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
4878c2ecf20Sopenharmony_ci		const struct hwspinlock_ops *ops, int base_id, int num_locks)
4888c2ecf20Sopenharmony_ci{
4898c2ecf20Sopenharmony_ci	struct hwspinlock *hwlock;
4908c2ecf20Sopenharmony_ci	int ret = 0, i;
4918c2ecf20Sopenharmony_ci
4928c2ecf20Sopenharmony_ci	if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
4938c2ecf20Sopenharmony_ci							!ops->unlock) {
4948c2ecf20Sopenharmony_ci		pr_err("invalid parameters\n");
4958c2ecf20Sopenharmony_ci		return -EINVAL;
4968c2ecf20Sopenharmony_ci	}
4978c2ecf20Sopenharmony_ci
4988c2ecf20Sopenharmony_ci	bank->dev = dev;
4998c2ecf20Sopenharmony_ci	bank->ops = ops;
5008c2ecf20Sopenharmony_ci	bank->base_id = base_id;
5018c2ecf20Sopenharmony_ci	bank->num_locks = num_locks;
5028c2ecf20Sopenharmony_ci
5038c2ecf20Sopenharmony_ci	for (i = 0; i < num_locks; i++) {
5048c2ecf20Sopenharmony_ci		hwlock = &bank->lock[i];
5058c2ecf20Sopenharmony_ci
5068c2ecf20Sopenharmony_ci		spin_lock_init(&hwlock->lock);
5078c2ecf20Sopenharmony_ci		hwlock->bank = bank;
5088c2ecf20Sopenharmony_ci
5098c2ecf20Sopenharmony_ci		ret = hwspin_lock_register_single(hwlock, base_id + i);
5108c2ecf20Sopenharmony_ci		if (ret)
5118c2ecf20Sopenharmony_ci			goto reg_failed;
5128c2ecf20Sopenharmony_ci	}
5138c2ecf20Sopenharmony_ci
5148c2ecf20Sopenharmony_ci	return 0;
5158c2ecf20Sopenharmony_ci
5168c2ecf20Sopenharmony_cireg_failed:
5178c2ecf20Sopenharmony_ci	while (--i >= 0)
5188c2ecf20Sopenharmony_ci		hwspin_lock_unregister_single(base_id + i);
5198c2ecf20Sopenharmony_ci	return ret;
5208c2ecf20Sopenharmony_ci}
5218c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(hwspin_lock_register);
5228c2ecf20Sopenharmony_ci
5238c2ecf20Sopenharmony_ci/**
5248c2ecf20Sopenharmony_ci * hwspin_lock_unregister() - unregister an hw spinlock device
5258c2ecf20Sopenharmony_ci * @bank: the hwspinlock device, which usually provides numerous hw locks
5268c2ecf20Sopenharmony_ci *
5278c2ecf20Sopenharmony_ci * This function should be called from the underlying platform-specific
5288c2ecf20Sopenharmony_ci * implementation, to unregister an existing (and unused) hwspinlock.
5298c2ecf20Sopenharmony_ci *
5308c2ecf20Sopenharmony_ci * Should be called from a process context (might sleep)
5318c2ecf20Sopenharmony_ci *
5328c2ecf20Sopenharmony_ci * Returns 0 on success, or an appropriate error code on failure
5338c2ecf20Sopenharmony_ci */
5348c2ecf20Sopenharmony_ciint hwspin_lock_unregister(struct hwspinlock_device *bank)
5358c2ecf20Sopenharmony_ci{
5368c2ecf20Sopenharmony_ci	struct hwspinlock *hwlock, *tmp;
5378c2ecf20Sopenharmony_ci	int i;
5388c2ecf20Sopenharmony_ci
5398c2ecf20Sopenharmony_ci	for (i = 0; i < bank->num_locks; i++) {
5408c2ecf20Sopenharmony_ci		hwlock = &bank->lock[i];
5418c2ecf20Sopenharmony_ci
5428c2ecf20Sopenharmony_ci		tmp = hwspin_lock_unregister_single(bank->base_id + i);
5438c2ecf20Sopenharmony_ci		if (!tmp)
5448c2ecf20Sopenharmony_ci			return -EBUSY;
5458c2ecf20Sopenharmony_ci
5468c2ecf20Sopenharmony_ci		/* self-sanity check that should never fail */
5478c2ecf20Sopenharmony_ci		WARN_ON(tmp != hwlock);
5488c2ecf20Sopenharmony_ci	}
5498c2ecf20Sopenharmony_ci
5508c2ecf20Sopenharmony_ci	return 0;
5518c2ecf20Sopenharmony_ci}
5528c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(hwspin_lock_unregister);
5538c2ecf20Sopenharmony_ci
5548c2ecf20Sopenharmony_cistatic void devm_hwspin_lock_unreg(struct device *dev, void *res)
5558c2ecf20Sopenharmony_ci{
5568c2ecf20Sopenharmony_ci	hwspin_lock_unregister(*(struct hwspinlock_device **)res);
5578c2ecf20Sopenharmony_ci}
5588c2ecf20Sopenharmony_ci
5598c2ecf20Sopenharmony_cistatic int devm_hwspin_lock_device_match(struct device *dev, void *res,
5608c2ecf20Sopenharmony_ci					 void *data)
5618c2ecf20Sopenharmony_ci{
5628c2ecf20Sopenharmony_ci	struct hwspinlock_device **bank = res;
5638c2ecf20Sopenharmony_ci
5648c2ecf20Sopenharmony_ci	if (WARN_ON(!bank || !*bank))
5658c2ecf20Sopenharmony_ci		return 0;
5668c2ecf20Sopenharmony_ci
5678c2ecf20Sopenharmony_ci	return *bank == data;
5688c2ecf20Sopenharmony_ci}
5698c2ecf20Sopenharmony_ci
5708c2ecf20Sopenharmony_ci/**
5718c2ecf20Sopenharmony_ci * devm_hwspin_lock_unregister() - unregister an hw spinlock device for
5728c2ecf20Sopenharmony_ci *				   a managed device
5738c2ecf20Sopenharmony_ci * @dev: the backing device
5748c2ecf20Sopenharmony_ci * @bank: the hwspinlock device, which usually provides numerous hw locks
5758c2ecf20Sopenharmony_ci *
5768c2ecf20Sopenharmony_ci * This function should be called from the underlying platform-specific
5778c2ecf20Sopenharmony_ci * implementation, to unregister an existing (and unused) hwspinlock.
5788c2ecf20Sopenharmony_ci *
5798c2ecf20Sopenharmony_ci * Should be called from a process context (might sleep)
5808c2ecf20Sopenharmony_ci *
5818c2ecf20Sopenharmony_ci * Returns 0 on success, or an appropriate error code on failure
5828c2ecf20Sopenharmony_ci */
5838c2ecf20Sopenharmony_ciint devm_hwspin_lock_unregister(struct device *dev,
5848c2ecf20Sopenharmony_ci				struct hwspinlock_device *bank)
5858c2ecf20Sopenharmony_ci{
5868c2ecf20Sopenharmony_ci	int ret;
5878c2ecf20Sopenharmony_ci
5888c2ecf20Sopenharmony_ci	ret = devres_release(dev, devm_hwspin_lock_unreg,
5898c2ecf20Sopenharmony_ci			     devm_hwspin_lock_device_match, bank);
5908c2ecf20Sopenharmony_ci	WARN_ON(ret);
5918c2ecf20Sopenharmony_ci
5928c2ecf20Sopenharmony_ci	return ret;
5938c2ecf20Sopenharmony_ci}
5948c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister);
5958c2ecf20Sopenharmony_ci
5968c2ecf20Sopenharmony_ci/**
5978c2ecf20Sopenharmony_ci * devm_hwspin_lock_register() - register a new hw spinlock device for
5988c2ecf20Sopenharmony_ci *				 a managed device
5998c2ecf20Sopenharmony_ci * @dev: the backing device
6008c2ecf20Sopenharmony_ci * @bank: the hwspinlock device, which usually provides numerous hw locks
6018c2ecf20Sopenharmony_ci * @ops: hwspinlock handlers for this device
6028c2ecf20Sopenharmony_ci * @base_id: id of the first hardware spinlock in this bank
6038c2ecf20Sopenharmony_ci * @num_locks: number of hwspinlocks provided by this device
6048c2ecf20Sopenharmony_ci *
6058c2ecf20Sopenharmony_ci * This function should be called from the underlying platform-specific
6068c2ecf20Sopenharmony_ci * implementation, to register a new hwspinlock device instance.
6078c2ecf20Sopenharmony_ci *
6088c2ecf20Sopenharmony_ci * Should be called from a process context (might sleep)
6098c2ecf20Sopenharmony_ci *
6108c2ecf20Sopenharmony_ci * Returns 0 on success, or an appropriate error code on failure
6118c2ecf20Sopenharmony_ci */
6128c2ecf20Sopenharmony_ciint devm_hwspin_lock_register(struct device *dev,
6138c2ecf20Sopenharmony_ci			      struct hwspinlock_device *bank,
6148c2ecf20Sopenharmony_ci			      const struct hwspinlock_ops *ops,
6158c2ecf20Sopenharmony_ci			      int base_id, int num_locks)
6168c2ecf20Sopenharmony_ci{
6178c2ecf20Sopenharmony_ci	struct hwspinlock_device **ptr;
6188c2ecf20Sopenharmony_ci	int ret;
6198c2ecf20Sopenharmony_ci
6208c2ecf20Sopenharmony_ci	ptr = devres_alloc(devm_hwspin_lock_unreg, sizeof(*ptr), GFP_KERNEL);
6218c2ecf20Sopenharmony_ci	if (!ptr)
6228c2ecf20Sopenharmony_ci		return -ENOMEM;
6238c2ecf20Sopenharmony_ci
6248c2ecf20Sopenharmony_ci	ret = hwspin_lock_register(bank, dev, ops, base_id, num_locks);
6258c2ecf20Sopenharmony_ci	if (!ret) {
6268c2ecf20Sopenharmony_ci		*ptr = bank;
6278c2ecf20Sopenharmony_ci		devres_add(dev, ptr);
6288c2ecf20Sopenharmony_ci	} else {
6298c2ecf20Sopenharmony_ci		devres_free(ptr);
6308c2ecf20Sopenharmony_ci	}
6318c2ecf20Sopenharmony_ci
6328c2ecf20Sopenharmony_ci	return ret;
6338c2ecf20Sopenharmony_ci}
6348c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(devm_hwspin_lock_register);
6358c2ecf20Sopenharmony_ci
6368c2ecf20Sopenharmony_ci/**
6378c2ecf20Sopenharmony_ci * __hwspin_lock_request() - tag an hwspinlock as used and power it up
6388c2ecf20Sopenharmony_ci *
6398c2ecf20Sopenharmony_ci * This is an internal function that prepares an hwspinlock instance
6408c2ecf20Sopenharmony_ci * before it is given to the user. The function assumes that
6418c2ecf20Sopenharmony_ci * hwspinlock_tree_lock is taken.
6428c2ecf20Sopenharmony_ci *
6438c2ecf20Sopenharmony_ci * Returns 0 or positive to indicate success, and a negative value to
6448c2ecf20Sopenharmony_ci * indicate an error (with the appropriate error code)
6458c2ecf20Sopenharmony_ci */
6468c2ecf20Sopenharmony_cistatic int __hwspin_lock_request(struct hwspinlock *hwlock)
6478c2ecf20Sopenharmony_ci{
6488c2ecf20Sopenharmony_ci	struct device *dev = hwlock->bank->dev;
6498c2ecf20Sopenharmony_ci	struct hwspinlock *tmp;
6508c2ecf20Sopenharmony_ci	int ret;
6518c2ecf20Sopenharmony_ci
6528c2ecf20Sopenharmony_ci	/* prevent underlying implementation from being removed */
6538c2ecf20Sopenharmony_ci	if (!try_module_get(dev->driver->owner)) {
6548c2ecf20Sopenharmony_ci		dev_err(dev, "%s: can't get owner\n", __func__);
6558c2ecf20Sopenharmony_ci		return -EINVAL;
6568c2ecf20Sopenharmony_ci	}
6578c2ecf20Sopenharmony_ci
6588c2ecf20Sopenharmony_ci	/* notify PM core that power is now needed */
6598c2ecf20Sopenharmony_ci	ret = pm_runtime_get_sync(dev);
6608c2ecf20Sopenharmony_ci	if (ret < 0 && ret != -EACCES) {
6618c2ecf20Sopenharmony_ci		dev_err(dev, "%s: can't power on device\n", __func__);
6628c2ecf20Sopenharmony_ci		pm_runtime_put_noidle(dev);
6638c2ecf20Sopenharmony_ci		module_put(dev->driver->owner);
6648c2ecf20Sopenharmony_ci		return ret;
6658c2ecf20Sopenharmony_ci	}
6668c2ecf20Sopenharmony_ci
6678c2ecf20Sopenharmony_ci	ret = 0;
6688c2ecf20Sopenharmony_ci
6698c2ecf20Sopenharmony_ci	/* mark hwspinlock as used, should not fail */
6708c2ecf20Sopenharmony_ci	tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
6718c2ecf20Sopenharmony_ci							HWSPINLOCK_UNUSED);
6728c2ecf20Sopenharmony_ci
6738c2ecf20Sopenharmony_ci	/* self-sanity check that should never fail */
6748c2ecf20Sopenharmony_ci	WARN_ON(tmp != hwlock);
6758c2ecf20Sopenharmony_ci
6768c2ecf20Sopenharmony_ci	return ret;
6778c2ecf20Sopenharmony_ci}
6788c2ecf20Sopenharmony_ci
6798c2ecf20Sopenharmony_ci/**
6808c2ecf20Sopenharmony_ci * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
6818c2ecf20Sopenharmony_ci * @hwlock: a valid hwspinlock instance
6828c2ecf20Sopenharmony_ci *
6838c2ecf20Sopenharmony_ci * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
6848c2ecf20Sopenharmony_ci */
6858c2ecf20Sopenharmony_ciint hwspin_lock_get_id(struct hwspinlock *hwlock)
6868c2ecf20Sopenharmony_ci{
6878c2ecf20Sopenharmony_ci	if (!hwlock) {
6888c2ecf20Sopenharmony_ci		pr_err("invalid hwlock\n");
6898c2ecf20Sopenharmony_ci		return -EINVAL;
6908c2ecf20Sopenharmony_ci	}
6918c2ecf20Sopenharmony_ci
6928c2ecf20Sopenharmony_ci	return hwlock_to_id(hwlock);
6938c2ecf20Sopenharmony_ci}
6948c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(hwspin_lock_get_id);
6958c2ecf20Sopenharmony_ci
6968c2ecf20Sopenharmony_ci/**
6978c2ecf20Sopenharmony_ci * hwspin_lock_request() - request an hwspinlock
6988c2ecf20Sopenharmony_ci *
6998c2ecf20Sopenharmony_ci * This function should be called by users of the hwspinlock device,
7008c2ecf20Sopenharmony_ci * in order to dynamically assign them an unused hwspinlock.
7018c2ecf20Sopenharmony_ci * Usually the user of this lock will then have to communicate the lock's id
7028c2ecf20Sopenharmony_ci * to the remote core before it can be used for synchronization (to get the
7038c2ecf20Sopenharmony_ci * id of a given hwlock, use hwspin_lock_get_id()).
7048c2ecf20Sopenharmony_ci *
7058c2ecf20Sopenharmony_ci * Should be called from a process context (might sleep)
7068c2ecf20Sopenharmony_ci *
7078c2ecf20Sopenharmony_ci * Returns the address of the assigned hwspinlock, or NULL on error
7088c2ecf20Sopenharmony_ci */
7098c2ecf20Sopenharmony_cistruct hwspinlock *hwspin_lock_request(void)
7108c2ecf20Sopenharmony_ci{
7118c2ecf20Sopenharmony_ci	struct hwspinlock *hwlock;
7128c2ecf20Sopenharmony_ci	int ret;
7138c2ecf20Sopenharmony_ci
7148c2ecf20Sopenharmony_ci	mutex_lock(&hwspinlock_tree_lock);
7158c2ecf20Sopenharmony_ci
7168c2ecf20Sopenharmony_ci	/* look for an unused lock */
7178c2ecf20Sopenharmony_ci	ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
7188c2ecf20Sopenharmony_ci						0, 1, HWSPINLOCK_UNUSED);
7198c2ecf20Sopenharmony_ci	if (ret == 0) {
7208c2ecf20Sopenharmony_ci		pr_warn("a free hwspinlock is not available\n");
7218c2ecf20Sopenharmony_ci		hwlock = NULL;
7228c2ecf20Sopenharmony_ci		goto out;
7238c2ecf20Sopenharmony_ci	}
7248c2ecf20Sopenharmony_ci
7258c2ecf20Sopenharmony_ci	/* sanity check that should never fail */
7268c2ecf20Sopenharmony_ci	WARN_ON(ret > 1);
7278c2ecf20Sopenharmony_ci
7288c2ecf20Sopenharmony_ci	/* mark as used and power up */
7298c2ecf20Sopenharmony_ci	ret = __hwspin_lock_request(hwlock);
7308c2ecf20Sopenharmony_ci	if (ret < 0)
7318c2ecf20Sopenharmony_ci		hwlock = NULL;
7328c2ecf20Sopenharmony_ci
7338c2ecf20Sopenharmony_ciout:
7348c2ecf20Sopenharmony_ci	mutex_unlock(&hwspinlock_tree_lock);
7358c2ecf20Sopenharmony_ci	return hwlock;
7368c2ecf20Sopenharmony_ci}
7378c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(hwspin_lock_request);
7388c2ecf20Sopenharmony_ci
7398c2ecf20Sopenharmony_ci/**
7408c2ecf20Sopenharmony_ci * hwspin_lock_request_specific() - request for a specific hwspinlock
7418c2ecf20Sopenharmony_ci * @id: index of the specific hwspinlock that is requested
7428c2ecf20Sopenharmony_ci *
7438c2ecf20Sopenharmony_ci * This function should be called by users of the hwspinlock module,
7448c2ecf20Sopenharmony_ci * in order to assign them a specific hwspinlock.
7458c2ecf20Sopenharmony_ci * Usually early board code will be calling this function in order to
7468c2ecf20Sopenharmony_ci * reserve specific hwspinlock ids for predefined purposes.
7478c2ecf20Sopenharmony_ci *
7488c2ecf20Sopenharmony_ci * Should be called from a process context (might sleep)
7498c2ecf20Sopenharmony_ci *
7508c2ecf20Sopenharmony_ci * Returns the address of the assigned hwspinlock, or NULL on error
7518c2ecf20Sopenharmony_ci */
7528c2ecf20Sopenharmony_cistruct hwspinlock *hwspin_lock_request_specific(unsigned int id)
7538c2ecf20Sopenharmony_ci{
7548c2ecf20Sopenharmony_ci	struct hwspinlock *hwlock;
7558c2ecf20Sopenharmony_ci	int ret;
7568c2ecf20Sopenharmony_ci
7578c2ecf20Sopenharmony_ci	mutex_lock(&hwspinlock_tree_lock);
7588c2ecf20Sopenharmony_ci
7598c2ecf20Sopenharmony_ci	/* make sure this hwspinlock exists */
7608c2ecf20Sopenharmony_ci	hwlock = radix_tree_lookup(&hwspinlock_tree, id);
7618c2ecf20Sopenharmony_ci	if (!hwlock) {
7628c2ecf20Sopenharmony_ci		pr_warn("hwspinlock %u does not exist\n", id);
7638c2ecf20Sopenharmony_ci		goto out;
7648c2ecf20Sopenharmony_ci	}
7658c2ecf20Sopenharmony_ci
7668c2ecf20Sopenharmony_ci	/* sanity check (this shouldn't happen) */
7678c2ecf20Sopenharmony_ci	WARN_ON(hwlock_to_id(hwlock) != id);
7688c2ecf20Sopenharmony_ci
7698c2ecf20Sopenharmony_ci	/* make sure this hwspinlock is unused */
7708c2ecf20Sopenharmony_ci	ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
7718c2ecf20Sopenharmony_ci	if (ret == 0) {
7728c2ecf20Sopenharmony_ci		pr_warn("hwspinlock %u is already in use\n", id);
7738c2ecf20Sopenharmony_ci		hwlock = NULL;
7748c2ecf20Sopenharmony_ci		goto out;
7758c2ecf20Sopenharmony_ci	}
7768c2ecf20Sopenharmony_ci
7778c2ecf20Sopenharmony_ci	/* mark as used and power up */
7788c2ecf20Sopenharmony_ci	ret = __hwspin_lock_request(hwlock);
7798c2ecf20Sopenharmony_ci	if (ret < 0)
7808c2ecf20Sopenharmony_ci		hwlock = NULL;
7818c2ecf20Sopenharmony_ci
7828c2ecf20Sopenharmony_ciout:
7838c2ecf20Sopenharmony_ci	mutex_unlock(&hwspinlock_tree_lock);
7848c2ecf20Sopenharmony_ci	return hwlock;
7858c2ecf20Sopenharmony_ci}
7868c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
7878c2ecf20Sopenharmony_ci
7888c2ecf20Sopenharmony_ci/**
7898c2ecf20Sopenharmony_ci * hwspin_lock_free() - free a specific hwspinlock
7908c2ecf20Sopenharmony_ci * @hwlock: the specific hwspinlock to free
7918c2ecf20Sopenharmony_ci *
7928c2ecf20Sopenharmony_ci * This function mark @hwlock as free again.
7938c2ecf20Sopenharmony_ci * Should only be called with an @hwlock that was retrieved from
7948c2ecf20Sopenharmony_ci * an earlier call to hwspin_lock_request{_specific}.
7958c2ecf20Sopenharmony_ci *
7968c2ecf20Sopenharmony_ci * Should be called from a process context (might sleep)
7978c2ecf20Sopenharmony_ci *
7988c2ecf20Sopenharmony_ci * Returns 0 on success, or an appropriate error code on failure
7998c2ecf20Sopenharmony_ci */
8008c2ecf20Sopenharmony_ciint hwspin_lock_free(struct hwspinlock *hwlock)
8018c2ecf20Sopenharmony_ci{
8028c2ecf20Sopenharmony_ci	struct device *dev;
8038c2ecf20Sopenharmony_ci	struct hwspinlock *tmp;
8048c2ecf20Sopenharmony_ci	int ret;
8058c2ecf20Sopenharmony_ci
8068c2ecf20Sopenharmony_ci	if (!hwlock) {
8078c2ecf20Sopenharmony_ci		pr_err("invalid hwlock\n");
8088c2ecf20Sopenharmony_ci		return -EINVAL;
8098c2ecf20Sopenharmony_ci	}
8108c2ecf20Sopenharmony_ci
8118c2ecf20Sopenharmony_ci	dev = hwlock->bank->dev;
8128c2ecf20Sopenharmony_ci	mutex_lock(&hwspinlock_tree_lock);
8138c2ecf20Sopenharmony_ci
8148c2ecf20Sopenharmony_ci	/* make sure the hwspinlock is used */
8158c2ecf20Sopenharmony_ci	ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
8168c2ecf20Sopenharmony_ci							HWSPINLOCK_UNUSED);
8178c2ecf20Sopenharmony_ci	if (ret == 1) {
8188c2ecf20Sopenharmony_ci		dev_err(dev, "%s: hwlock is already free\n", __func__);
8198c2ecf20Sopenharmony_ci		dump_stack();
8208c2ecf20Sopenharmony_ci		ret = -EINVAL;
8218c2ecf20Sopenharmony_ci		goto out;
8228c2ecf20Sopenharmony_ci	}
8238c2ecf20Sopenharmony_ci
8248c2ecf20Sopenharmony_ci	/* notify the underlying device that power is not needed */
8258c2ecf20Sopenharmony_ci	pm_runtime_put(dev);
8268c2ecf20Sopenharmony_ci
8278c2ecf20Sopenharmony_ci	/* mark this hwspinlock as available */
8288c2ecf20Sopenharmony_ci	tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
8298c2ecf20Sopenharmony_ci							HWSPINLOCK_UNUSED);
8308c2ecf20Sopenharmony_ci
8318c2ecf20Sopenharmony_ci	/* sanity check (this shouldn't happen) */
8328c2ecf20Sopenharmony_ci	WARN_ON(tmp != hwlock);
8338c2ecf20Sopenharmony_ci
8348c2ecf20Sopenharmony_ci	module_put(dev->driver->owner);
8358c2ecf20Sopenharmony_ci
8368c2ecf20Sopenharmony_ciout:
8378c2ecf20Sopenharmony_ci	mutex_unlock(&hwspinlock_tree_lock);
8388c2ecf20Sopenharmony_ci	return ret;
8398c2ecf20Sopenharmony_ci}
8408c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(hwspin_lock_free);
8418c2ecf20Sopenharmony_ci
8428c2ecf20Sopenharmony_cistatic int devm_hwspin_lock_match(struct device *dev, void *res, void *data)
8438c2ecf20Sopenharmony_ci{
8448c2ecf20Sopenharmony_ci	struct hwspinlock **hwlock = res;
8458c2ecf20Sopenharmony_ci
8468c2ecf20Sopenharmony_ci	if (WARN_ON(!hwlock || !*hwlock))
8478c2ecf20Sopenharmony_ci		return 0;
8488c2ecf20Sopenharmony_ci
8498c2ecf20Sopenharmony_ci	return *hwlock == data;
8508c2ecf20Sopenharmony_ci}
8518c2ecf20Sopenharmony_ci
8528c2ecf20Sopenharmony_cistatic void devm_hwspin_lock_release(struct device *dev, void *res)
8538c2ecf20Sopenharmony_ci{
8548c2ecf20Sopenharmony_ci	hwspin_lock_free(*(struct hwspinlock **)res);
8558c2ecf20Sopenharmony_ci}
8568c2ecf20Sopenharmony_ci
8578c2ecf20Sopenharmony_ci/**
8588c2ecf20Sopenharmony_ci * devm_hwspin_lock_free() - free a specific hwspinlock for a managed device
8598c2ecf20Sopenharmony_ci * @dev: the device to free the specific hwspinlock
8608c2ecf20Sopenharmony_ci * @hwlock: the specific hwspinlock to free
8618c2ecf20Sopenharmony_ci *
8628c2ecf20Sopenharmony_ci * This function mark @hwlock as free again.
8638c2ecf20Sopenharmony_ci * Should only be called with an @hwlock that was retrieved from
8648c2ecf20Sopenharmony_ci * an earlier call to hwspin_lock_request{_specific}.
8658c2ecf20Sopenharmony_ci *
8668c2ecf20Sopenharmony_ci * Should be called from a process context (might sleep)
8678c2ecf20Sopenharmony_ci *
8688c2ecf20Sopenharmony_ci * Returns 0 on success, or an appropriate error code on failure
8698c2ecf20Sopenharmony_ci */
8708c2ecf20Sopenharmony_ciint devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
8718c2ecf20Sopenharmony_ci{
8728c2ecf20Sopenharmony_ci	int ret;
8738c2ecf20Sopenharmony_ci
8748c2ecf20Sopenharmony_ci	ret = devres_release(dev, devm_hwspin_lock_release,
8758c2ecf20Sopenharmony_ci			     devm_hwspin_lock_match, hwlock);
8768c2ecf20Sopenharmony_ci	WARN_ON(ret);
8778c2ecf20Sopenharmony_ci
8788c2ecf20Sopenharmony_ci	return ret;
8798c2ecf20Sopenharmony_ci}
8808c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
8818c2ecf20Sopenharmony_ci
8828c2ecf20Sopenharmony_ci/**
8838c2ecf20Sopenharmony_ci * devm_hwspin_lock_request() - request an hwspinlock for a managed device
8848c2ecf20Sopenharmony_ci * @dev: the device to request an hwspinlock
8858c2ecf20Sopenharmony_ci *
8868c2ecf20Sopenharmony_ci * This function should be called by users of the hwspinlock device,
8878c2ecf20Sopenharmony_ci * in order to dynamically assign them an unused hwspinlock.
8888c2ecf20Sopenharmony_ci * Usually the user of this lock will then have to communicate the lock's id
8898c2ecf20Sopenharmony_ci * to the remote core before it can be used for synchronization (to get the
8908c2ecf20Sopenharmony_ci * id of a given hwlock, use hwspin_lock_get_id()).
8918c2ecf20Sopenharmony_ci *
8928c2ecf20Sopenharmony_ci * Should be called from a process context (might sleep)
8938c2ecf20Sopenharmony_ci *
8948c2ecf20Sopenharmony_ci * Returns the address of the assigned hwspinlock, or NULL on error
8958c2ecf20Sopenharmony_ci */
8968c2ecf20Sopenharmony_cistruct hwspinlock *devm_hwspin_lock_request(struct device *dev)
8978c2ecf20Sopenharmony_ci{
8988c2ecf20Sopenharmony_ci	struct hwspinlock **ptr, *hwlock;
8998c2ecf20Sopenharmony_ci
9008c2ecf20Sopenharmony_ci	ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
9018c2ecf20Sopenharmony_ci	if (!ptr)
9028c2ecf20Sopenharmony_ci		return NULL;
9038c2ecf20Sopenharmony_ci
9048c2ecf20Sopenharmony_ci	hwlock = hwspin_lock_request();
9058c2ecf20Sopenharmony_ci	if (hwlock) {
9068c2ecf20Sopenharmony_ci		*ptr = hwlock;
9078c2ecf20Sopenharmony_ci		devres_add(dev, ptr);
9088c2ecf20Sopenharmony_ci	} else {
9098c2ecf20Sopenharmony_ci		devres_free(ptr);
9108c2ecf20Sopenharmony_ci	}
9118c2ecf20Sopenharmony_ci
9128c2ecf20Sopenharmony_ci	return hwlock;
9138c2ecf20Sopenharmony_ci}
9148c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
9158c2ecf20Sopenharmony_ci
9168c2ecf20Sopenharmony_ci/**
9178c2ecf20Sopenharmony_ci * devm_hwspin_lock_request_specific() - request for a specific hwspinlock for
9188c2ecf20Sopenharmony_ci *					 a managed device
9198c2ecf20Sopenharmony_ci * @dev: the device to request the specific hwspinlock
9208c2ecf20Sopenharmony_ci * @id: index of the specific hwspinlock that is requested
9218c2ecf20Sopenharmony_ci *
9228c2ecf20Sopenharmony_ci * This function should be called by users of the hwspinlock module,
9238c2ecf20Sopenharmony_ci * in order to assign them a specific hwspinlock.
9248c2ecf20Sopenharmony_ci * Usually early board code will be calling this function in order to
9258c2ecf20Sopenharmony_ci * reserve specific hwspinlock ids for predefined purposes.
9268c2ecf20Sopenharmony_ci *
9278c2ecf20Sopenharmony_ci * Should be called from a process context (might sleep)
9288c2ecf20Sopenharmony_ci *
9298c2ecf20Sopenharmony_ci * Returns the address of the assigned hwspinlock, or NULL on error
9308c2ecf20Sopenharmony_ci */
9318c2ecf20Sopenharmony_cistruct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
9328c2ecf20Sopenharmony_ci						     unsigned int id)
9338c2ecf20Sopenharmony_ci{
9348c2ecf20Sopenharmony_ci	struct hwspinlock **ptr, *hwlock;
9358c2ecf20Sopenharmony_ci
9368c2ecf20Sopenharmony_ci	ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
9378c2ecf20Sopenharmony_ci	if (!ptr)
9388c2ecf20Sopenharmony_ci		return NULL;
9398c2ecf20Sopenharmony_ci
9408c2ecf20Sopenharmony_ci	hwlock = hwspin_lock_request_specific(id);
9418c2ecf20Sopenharmony_ci	if (hwlock) {
9428c2ecf20Sopenharmony_ci		*ptr = hwlock;
9438c2ecf20Sopenharmony_ci		devres_add(dev, ptr);
9448c2ecf20Sopenharmony_ci	} else {
9458c2ecf20Sopenharmony_ci		devres_free(ptr);
9468c2ecf20Sopenharmony_ci	}
9478c2ecf20Sopenharmony_ci
9488c2ecf20Sopenharmony_ci	return hwlock;
9498c2ecf20Sopenharmony_ci}
9508c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific);
9518c2ecf20Sopenharmony_ci
9528c2ecf20Sopenharmony_ciMODULE_LICENSE("GPL v2");
9538c2ecf20Sopenharmony_ciMODULE_DESCRIPTION("Hardware spinlock interface");
9548c2ecf20Sopenharmony_ciMODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
955