1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _UAPI_ASM_IA64_CMPXCHG_H
3#define _UAPI_ASM_IA64_CMPXCHG_H
4
5/*
6 * Compare/Exchange, forked from asm/intrinsics.h
7 * which was:
8 *
9 *	Copyright (C) 2002-2003 Hewlett-Packard Co
10 *	David Mosberger-Tang <davidm@hpl.hp.com>
11 */
12
13#ifndef __ASSEMBLY__
14
15#include <linux/types.h>
16/* include compiler specific intrinsics */
17#include <asm/ia64regs.h>
18#include <asm/gcc_intrin.h>
19
20/*
21 * This function doesn't exist, so you'll get a linker error if
22 * something tries to do an invalid xchg().
23 */
24extern void ia64_xchg_called_with_bad_pointer(void);
25
26#define __arch_xchg(x, ptr, size)					\
27({									\
28	unsigned long __xchg_result;					\
29									\
30	switch (size) {							\
31	case 1:								\
32		__xchg_result = ia64_xchg1((__u8 __force *)ptr, x);	\
33		break;							\
34									\
35	case 2:								\
36		__xchg_result = ia64_xchg2((__u16 __force *)ptr, x);	\
37		break;							\
38									\
39	case 4:								\
40		__xchg_result = ia64_xchg4((__u32 __force *)ptr, x);	\
41		break;							\
42									\
43	case 8:								\
44		__xchg_result = ia64_xchg8((__u64 __force *)ptr, x);	\
45		break;							\
46	default:							\
47		ia64_xchg_called_with_bad_pointer();			\
48	}								\
49	(__typeof__ (*(ptr)) __force) __xchg_result;			\
50})
51
52#ifndef __KERNEL__
53#define xchg(ptr, x)							\
54({(__typeof__(*(ptr))) __arch_xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
55#endif
56
57/*
58 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
59 * store NEW in MEM.  Return the initial value in MEM.  Success is
60 * indicated by comparing RETURN with OLD.
61 */
62
63/*
64 * This function doesn't exist, so you'll get a linker error
65 * if something tries to do an invalid cmpxchg().
66 */
67extern long ia64_cmpxchg_called_with_bad_pointer(void);
68
69#define ia64_cmpxchg(sem, ptr, old, new, size)				\
70({									\
71	__u64 _o_, _r_;							\
72									\
73	switch (size) {							\
74	case 1:								\
75		_o_ = (__u8) (long __force) (old);			\
76		break;							\
77	case 2:								\
78		_o_ = (__u16) (long __force) (old);			\
79		break;							\
80	case 4:								\
81		_o_ = (__u32) (long __force) (old);			\
82		break;							\
83	case 8:								\
84		_o_ = (__u64) (long __force) (old);			\
85		break;							\
86	default:							\
87		break;							\
88	}								\
89	switch (size) {							\
90	case 1:								\
91		_r_ = ia64_cmpxchg1_##sem((__u8 __force *) ptr, new, _o_);	\
92		break;							\
93									\
94	case 2:								\
95		_r_ = ia64_cmpxchg2_##sem((__u16 __force *) ptr, new, _o_);	\
96		break;							\
97									\
98	case 4:								\
99		_r_ = ia64_cmpxchg4_##sem((__u32 __force *) ptr, new, _o_);	\
100		break;							\
101									\
102	case 8:								\
103		_r_ = ia64_cmpxchg8_##sem((__u64 __force *) ptr, new, _o_);	\
104		break;							\
105									\
106	default:							\
107		_r_ = ia64_cmpxchg_called_with_bad_pointer();		\
108		break;							\
109	}								\
110	(__typeof__(old) __force) _r_;					\
111})
112
113#define cmpxchg_acq(ptr, o, n)	\
114	ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
115#define cmpxchg_rel(ptr, o, n)	\
116	ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
117
118/*
119 * Worse still - early processor implementations actually just ignored
120 * the acquire/release and did a full fence all the time.  Unfortunately
121 * this meant a lot of badly written code that used .acq when they really
122 * wanted .rel became legacy out in the wild - so when we made a cpu
123 * that strictly did the .acq or .rel ... all that code started breaking - so
124 * we had to back-pedal and keep the "legacy" behavior of a full fence :-(
125 */
126
127#ifndef __KERNEL__
128/* for compatibility with other platforms: */
129#define cmpxchg(ptr, o, n)	cmpxchg_acq((ptr), (o), (n))
130#define cmpxchg64(ptr, o, n)	cmpxchg_acq((ptr), (o), (n))
131
132#define cmpxchg_local		cmpxchg
133#define cmpxchg64_local		cmpxchg64
134#endif
135
136#endif /* !__ASSEMBLY__ */
137
138#endif /* _UAPI_ASM_IA64_CMPXCHG_H */
139