1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Based on arch/arm/include/asm/atomic.h
4 *
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * Copyright (C) 2012 ARM Ltd.
8 */
9
10#ifndef __ASM_ATOMIC_LSE_H
11#define __ASM_ATOMIC_LSE_H
12
13#define ATOMIC_OP(op, asm_op)						\
14static inline void __lse_atomic_##op(int i, atomic_t *v)		\
15{									\
16	asm volatile(							\
17	__LSE_PREAMBLE							\
18	"	" #asm_op "	%w[i], %[v]\n"				\
19	: [i] "+r" (i), [v] "+Q" (v->counter)				\
20	: "r" (v));							\
21}
22
23ATOMIC_OP(andnot, stclr)
24ATOMIC_OP(or, stset)
25ATOMIC_OP(xor, steor)
26ATOMIC_OP(add, stadd)
27
28#undef ATOMIC_OP
29
30#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)			\
31static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)	\
32{									\
33	asm volatile(							\
34	__LSE_PREAMBLE							\
35	"	" #asm_op #mb "	%w[i], %w[i], %[v]"			\
36	: [i] "+r" (i), [v] "+Q" (v->counter)				\
37	: "r" (v)							\
38	: cl);								\
39									\
40	return i;							\
41}
42
43#define ATOMIC_FETCH_OPS(op, asm_op)					\
44	ATOMIC_FETCH_OP(_relaxed,   , op, asm_op)			\
45	ATOMIC_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
46	ATOMIC_FETCH_OP(_release,  l, op, asm_op, "memory")		\
47	ATOMIC_FETCH_OP(        , al, op, asm_op, "memory")
48
49ATOMIC_FETCH_OPS(andnot, ldclr)
50ATOMIC_FETCH_OPS(or, ldset)
51ATOMIC_FETCH_OPS(xor, ldeor)
52ATOMIC_FETCH_OPS(add, ldadd)
53
54#undef ATOMIC_FETCH_OP
55#undef ATOMIC_FETCH_OPS
56
57#define ATOMIC_OP_ADD_RETURN(name, mb, cl...)				\
58static inline int __lse_atomic_add_return##name(int i, atomic_t *v)	\
59{									\
60	u32 tmp;							\
61									\
62	asm volatile(							\
63	__LSE_PREAMBLE							\
64	"	ldadd" #mb "	%w[i], %w[tmp], %[v]\n"			\
65	"	add	%w[i], %w[i], %w[tmp]"				\
66	: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
67	: "r" (v)							\
68	: cl);								\
69									\
70	return i;							\
71}
72
73ATOMIC_OP_ADD_RETURN(_relaxed,   )
74ATOMIC_OP_ADD_RETURN(_acquire,  a, "memory")
75ATOMIC_OP_ADD_RETURN(_release,  l, "memory")
76ATOMIC_OP_ADD_RETURN(        , al, "memory")
77
78#undef ATOMIC_OP_ADD_RETURN
79
80static inline void __lse_atomic_and(int i, atomic_t *v)
81{
82	asm volatile(
83	__LSE_PREAMBLE
84	"	mvn	%w[i], %w[i]\n"
85	"	stclr	%w[i], %[v]"
86	: [i] "+&r" (i), [v] "+Q" (v->counter)
87	: "r" (v));
88}
89
90#define ATOMIC_FETCH_OP_AND(name, mb, cl...)				\
91static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)	\
92{									\
93	asm volatile(							\
94	__LSE_PREAMBLE							\
95	"	mvn	%w[i], %w[i]\n"					\
96	"	ldclr" #mb "	%w[i], %w[i], %[v]"			\
97	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
98	: "r" (v)							\
99	: cl);								\
100									\
101	return i;							\
102}
103
104ATOMIC_FETCH_OP_AND(_relaxed,   )
105ATOMIC_FETCH_OP_AND(_acquire,  a, "memory")
106ATOMIC_FETCH_OP_AND(_release,  l, "memory")
107ATOMIC_FETCH_OP_AND(        , al, "memory")
108
109#undef ATOMIC_FETCH_OP_AND
110
111static inline void __lse_atomic_sub(int i, atomic_t *v)
112{
113	asm volatile(
114	__LSE_PREAMBLE
115	"	neg	%w[i], %w[i]\n"
116	"	stadd	%w[i], %[v]"
117	: [i] "+&r" (i), [v] "+Q" (v->counter)
118	: "r" (v));
119}
120
121#define ATOMIC_OP_SUB_RETURN(name, mb, cl...)				\
122static inline int __lse_atomic_sub_return##name(int i, atomic_t *v)	\
123{									\
124	u32 tmp;							\
125									\
126	asm volatile(							\
127	__LSE_PREAMBLE							\
128	"	neg	%w[i], %w[i]\n"					\
129	"	ldadd" #mb "	%w[i], %w[tmp], %[v]\n"			\
130	"	add	%w[i], %w[i], %w[tmp]"				\
131	: [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
132	: "r" (v)							\
133	: cl);								\
134									\
135	return i;							\
136}
137
138ATOMIC_OP_SUB_RETURN(_relaxed,   )
139ATOMIC_OP_SUB_RETURN(_acquire,  a, "memory")
140ATOMIC_OP_SUB_RETURN(_release,  l, "memory")
141ATOMIC_OP_SUB_RETURN(        , al, "memory")
142
143#undef ATOMIC_OP_SUB_RETURN
144
145#define ATOMIC_FETCH_OP_SUB(name, mb, cl...)				\
146static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v)	\
147{									\
148	asm volatile(							\
149	__LSE_PREAMBLE							\
150	"	neg	%w[i], %w[i]\n"					\
151	"	ldadd" #mb "	%w[i], %w[i], %[v]"			\
152	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
153	: "r" (v)							\
154	: cl);								\
155									\
156	return i;							\
157}
158
159ATOMIC_FETCH_OP_SUB(_relaxed,   )
160ATOMIC_FETCH_OP_SUB(_acquire,  a, "memory")
161ATOMIC_FETCH_OP_SUB(_release,  l, "memory")
162ATOMIC_FETCH_OP_SUB(        , al, "memory")
163
164#undef ATOMIC_FETCH_OP_SUB
165
166#define ATOMIC64_OP(op, asm_op)						\
167static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)		\
168{									\
169	asm volatile(							\
170	__LSE_PREAMBLE							\
171	"	" #asm_op "	%[i], %[v]\n"				\
172	: [i] "+r" (i), [v] "+Q" (v->counter)				\
173	: "r" (v));							\
174}
175
176ATOMIC64_OP(andnot, stclr)
177ATOMIC64_OP(or, stset)
178ATOMIC64_OP(xor, steor)
179ATOMIC64_OP(add, stadd)
180
181#undef ATOMIC64_OP
182
183#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)			\
184static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
185{									\
186	asm volatile(							\
187	__LSE_PREAMBLE							\
188	"	" #asm_op #mb "	%[i], %[i], %[v]"			\
189	: [i] "+r" (i), [v] "+Q" (v->counter)				\
190	: "r" (v)							\
191	: cl);								\
192									\
193	return i;							\
194}
195
196#define ATOMIC64_FETCH_OPS(op, asm_op)					\
197	ATOMIC64_FETCH_OP(_relaxed,   , op, asm_op)			\
198	ATOMIC64_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
199	ATOMIC64_FETCH_OP(_release,  l, op, asm_op, "memory")		\
200	ATOMIC64_FETCH_OP(        , al, op, asm_op, "memory")
201
202ATOMIC64_FETCH_OPS(andnot, ldclr)
203ATOMIC64_FETCH_OPS(or, ldset)
204ATOMIC64_FETCH_OPS(xor, ldeor)
205ATOMIC64_FETCH_OPS(add, ldadd)
206
207#undef ATOMIC64_FETCH_OP
208#undef ATOMIC64_FETCH_OPS
209
210#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)				\
211static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
212{									\
213	unsigned long tmp;						\
214									\
215	asm volatile(							\
216	__LSE_PREAMBLE							\
217	"	ldadd" #mb "	%[i], %x[tmp], %[v]\n"			\
218	"	add	%[i], %[i], %x[tmp]"				\
219	: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
220	: "r" (v)							\
221	: cl);								\
222									\
223	return i;							\
224}
225
226ATOMIC64_OP_ADD_RETURN(_relaxed,   )
227ATOMIC64_OP_ADD_RETURN(_acquire,  a, "memory")
228ATOMIC64_OP_ADD_RETURN(_release,  l, "memory")
229ATOMIC64_OP_ADD_RETURN(        , al, "memory")
230
231#undef ATOMIC64_OP_ADD_RETURN
232
233static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
234{
235	asm volatile(
236	__LSE_PREAMBLE
237	"	mvn	%[i], %[i]\n"
238	"	stclr	%[i], %[v]"
239	: [i] "+&r" (i), [v] "+Q" (v->counter)
240	: "r" (v));
241}
242
243#define ATOMIC64_FETCH_OP_AND(name, mb, cl...)				\
244static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)	\
245{									\
246	asm volatile(							\
247	__LSE_PREAMBLE							\
248	"	mvn	%[i], %[i]\n"					\
249	"	ldclr" #mb "	%[i], %[i], %[v]"			\
250	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
251	: "r" (v)							\
252	: cl);								\
253									\
254	return i;							\
255}
256
257ATOMIC64_FETCH_OP_AND(_relaxed,   )
258ATOMIC64_FETCH_OP_AND(_acquire,  a, "memory")
259ATOMIC64_FETCH_OP_AND(_release,  l, "memory")
260ATOMIC64_FETCH_OP_AND(        , al, "memory")
261
262#undef ATOMIC64_FETCH_OP_AND
263
264static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
265{
266	asm volatile(
267	__LSE_PREAMBLE
268	"	neg	%[i], %[i]\n"
269	"	stadd	%[i], %[v]"
270	: [i] "+&r" (i), [v] "+Q" (v->counter)
271	: "r" (v));
272}
273
274#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)				\
275static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
276{									\
277	unsigned long tmp;						\
278									\
279	asm volatile(							\
280	__LSE_PREAMBLE							\
281	"	neg	%[i], %[i]\n"					\
282	"	ldadd" #mb "	%[i], %x[tmp], %[v]\n"			\
283	"	add	%[i], %[i], %x[tmp]"				\
284	: [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
285	: "r" (v)							\
286	: cl);								\
287									\
288	return i;							\
289}
290
291ATOMIC64_OP_SUB_RETURN(_relaxed,   )
292ATOMIC64_OP_SUB_RETURN(_acquire,  a, "memory")
293ATOMIC64_OP_SUB_RETURN(_release,  l, "memory")
294ATOMIC64_OP_SUB_RETURN(        , al, "memory")
295
296#undef ATOMIC64_OP_SUB_RETURN
297
298#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...)				\
299static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)	\
300{									\
301	asm volatile(							\
302	__LSE_PREAMBLE							\
303	"	neg	%[i], %[i]\n"					\
304	"	ldadd" #mb "	%[i], %[i], %[v]"			\
305	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
306	: "r" (v)							\
307	: cl);								\
308									\
309	return i;							\
310}
311
312ATOMIC64_FETCH_OP_SUB(_relaxed,   )
313ATOMIC64_FETCH_OP_SUB(_acquire,  a, "memory")
314ATOMIC64_FETCH_OP_SUB(_release,  l, "memory")
315ATOMIC64_FETCH_OP_SUB(        , al, "memory")
316
317#undef ATOMIC64_FETCH_OP_SUB
318
319static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
320{
321	unsigned long tmp;
322
323	asm volatile(
324	__LSE_PREAMBLE
325	"1:	ldr	%x[tmp], %[v]\n"
326	"	subs	%[ret], %x[tmp], #1\n"
327	"	b.lt	2f\n"
328	"	casal	%x[tmp], %[ret], %[v]\n"
329	"	sub	%x[tmp], %x[tmp], #1\n"
330	"	sub	%x[tmp], %x[tmp], %[ret]\n"
331	"	cbnz	%x[tmp], 1b\n"
332	"2:"
333	: [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
334	:
335	: "cc", "memory");
336
337	return (long)v;
338}
339
340#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)			\
341static __always_inline u##sz						\
342__lse__cmpxchg_case_##name##sz(volatile void *ptr,			\
343					      u##sz old,		\
344					      u##sz new)		\
345{									\
346	register unsigned long x0 asm ("x0") = (unsigned long)ptr;	\
347	register u##sz x1 asm ("x1") = old;				\
348	register u##sz x2 asm ("x2") = new;				\
349	unsigned long tmp;						\
350									\
351	asm volatile(							\
352	__LSE_PREAMBLE							\
353	"	mov	%" #w "[tmp], %" #w "[old]\n"			\
354	"	cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"	\
355	"	mov	%" #w "[ret], %" #w "[tmp]"			\
356	: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr),		\
357	  [tmp] "=&r" (tmp)						\
358	: [old] "r" (x1), [new] "r" (x2)				\
359	: cl);								\
360									\
361	return x0;							\
362}
363
364__CMPXCHG_CASE(w, b,     ,  8,   )
365__CMPXCHG_CASE(w, h,     , 16,   )
366__CMPXCHG_CASE(w,  ,     , 32,   )
367__CMPXCHG_CASE(x,  ,     , 64,   )
368__CMPXCHG_CASE(w, b, acq_,  8,  a, "memory")
369__CMPXCHG_CASE(w, h, acq_, 16,  a, "memory")
370__CMPXCHG_CASE(w,  , acq_, 32,  a, "memory")
371__CMPXCHG_CASE(x,  , acq_, 64,  a, "memory")
372__CMPXCHG_CASE(w, b, rel_,  8,  l, "memory")
373__CMPXCHG_CASE(w, h, rel_, 16,  l, "memory")
374__CMPXCHG_CASE(w,  , rel_, 32,  l, "memory")
375__CMPXCHG_CASE(x,  , rel_, 64,  l, "memory")
376__CMPXCHG_CASE(w, b,  mb_,  8, al, "memory")
377__CMPXCHG_CASE(w, h,  mb_, 16, al, "memory")
378__CMPXCHG_CASE(w,  ,  mb_, 32, al, "memory")
379__CMPXCHG_CASE(x,  ,  mb_, 64, al, "memory")
380
381#undef __CMPXCHG_CASE
382
383#define __CMPXCHG_DBL(name, mb, cl...)					\
384static __always_inline long						\
385__lse__cmpxchg_double##name(unsigned long old1,				\
386					 unsigned long old2,		\
387					 unsigned long new1,		\
388					 unsigned long new2,		\
389					 volatile void *ptr)		\
390{									\
391	unsigned long oldval1 = old1;					\
392	unsigned long oldval2 = old2;					\
393	register unsigned long x0 asm ("x0") = old1;			\
394	register unsigned long x1 asm ("x1") = old2;			\
395	register unsigned long x2 asm ("x2") = new1;			\
396	register unsigned long x3 asm ("x3") = new2;			\
397	register unsigned long x4 asm ("x4") = (unsigned long)ptr;	\
398									\
399	asm volatile(							\
400	__LSE_PREAMBLE							\
401	"	casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
402	"	eor	%[old1], %[old1], %[oldval1]\n"			\
403	"	eor	%[old2], %[old2], %[oldval2]\n"			\
404	"	orr	%[old1], %[old1], %[old2]"			\
405	: [old1] "+&r" (x0), [old2] "+&r" (x1),				\
406	  [v] "+Q" (*(__uint128_t *)ptr)				\
407	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
408	  [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)		\
409	: cl);								\
410									\
411	return x0;							\
412}
413
414__CMPXCHG_DBL(   ,   )
415__CMPXCHG_DBL(_mb, al, "memory")
416
417#undef __CMPXCHG_DBL
418
419#endif	/* __ASM_ATOMIC_LSE_H */
420