1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 */
5
6#ifndef __ASM_ARC_CMPXCHG_H
7#define __ASM_ARC_CMPXCHG_H
8
9#include <linux/types.h>
10
11#include <asm/barrier.h>
12#include <asm/smp.h>
13
14#ifdef CONFIG_ARC_HAS_LLSC
15
16static inline unsigned long
17__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
18{
19	unsigned long prev;
20
21	/*
22	 * Explicit full memory barrier needed before/after as
23	 * LLOCK/SCOND themselves don't provide any such semantics
24	 */
25	smp_mb();
26
27	__asm__ __volatile__(
28	"1:	llock   %0, [%1]	\n"
29	"	brne    %0, %2, 2f	\n"
30	"	scond   %3, [%1]	\n"
31	"	bnz     1b		\n"
32	"2:				\n"
33	: "=&r"(prev)	/* Early clobber, to prevent reg reuse */
34	: "r"(ptr),	/* Not "m": llock only supports reg direct addr mode */
35	  "ir"(expected),
36	  "r"(new)	/* can't be "ir". scond can't take LIMM for "b" */
37	: "cc", "memory"); /* so that gcc knows memory is being written here */
38
39	smp_mb();
40
41	return prev;
42}
43
44#else /* !CONFIG_ARC_HAS_LLSC */
45
46static inline unsigned long
47__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
48{
49	unsigned long flags;
50	int prev;
51	volatile unsigned long *p = ptr;
52
53	/*
54	 * spin lock/unlock provide the needed smp_mb() before/after
55	 */
56	atomic_ops_lock(flags);
57	prev = *p;
58	if (prev == expected)
59		*p = new;
60	atomic_ops_unlock(flags);
61	return prev;
62}
63
64#endif
65
66#define cmpxchg(ptr, o, n) ({				\
67	(typeof(*(ptr)))__cmpxchg((ptr),		\
68				  (unsigned long)(o),	\
69				  (unsigned long)(n));	\
70})
71
72/*
73 * atomic_cmpxchg is same as cmpxchg
74 *   LLSC: only different in data-type, semantics are exactly same
75 *  !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
76 *         semantics, and this lock also happens to be used by atomic_*()
77 */
78#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
79
80
81/*
82 * xchg (reg with memory) based on "Native atomic" EX insn
83 */
84static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
85				   int size)
86{
87	extern unsigned long __xchg_bad_pointer(void);
88
89	switch (size) {
90	case 4:
91		smp_mb();
92
93		__asm__ __volatile__(
94		"	ex  %0, [%1]	\n"
95		: "+r"(val)
96		: "r"(ptr)
97		: "memory");
98
99		smp_mb();
100
101		return val;
102	}
103	return __xchg_bad_pointer();
104}
105
106#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
107						 sizeof(*(ptr))))
108
109/*
110 * xchg() maps directly to ARC EX instruction which guarantees atomicity.
111 * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
112 * due to a subtle reason:
113 *  - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
114 *    of  kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
115 *    Hence xchg() needs to follow same locking rules.
116 *
117 * Technically the lock is also needed for UP (boils down to irq save/restore)
118 * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
119 * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
120 * Other way around, xchg is one instruction anyways, so can't be interrupted
121 * as such
122 */
123
124#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
125
126#define xchg(ptr, with)			\
127({					\
128	unsigned long flags;		\
129	typeof(*(ptr)) old_val;		\
130					\
131	atomic_ops_lock(flags);		\
132	old_val = _xchg(ptr, with);	\
133	atomic_ops_unlock(flags);	\
134	old_val;			\
135})
136
137#else
138
139#define xchg(ptr, with)  _xchg(ptr, with)
140
141#endif
142
143/*
144 * "atomic" variant of xchg()
145 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
146 * Since xchg() doesn't always do that, it would seem that following defintion
147 * is incorrect. But here's the rationale:
148 *   SMP : Even xchg() takes the atomic_ops_lock, so OK.
149 *   LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
150 *         is natively "SMP safe", no serialization required).
151 *   UP  : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
152 *         could clobber them. atomic_xchg() itself would be 1 insn, so it
153 *         can't be clobbered by others. Thus no serialization required when
154 *         atomic_xchg is involved.
155 */
156#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
157
158#endif
159