xref: /kernel/linux/linux-6.6/arch/x86/include/asm/local.h (revision 62306a36)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_LOCAL_H
3#define _ASM_X86_LOCAL_H
4
5#include <linux/percpu.h>
6
7#include <linux/atomic.h>
8#include <asm/asm.h>
9
10typedef struct {
11	atomic_long_t a;
12} local_t;
13
14#define LOCAL_INIT(i)	{ ATOMIC_LONG_INIT(i) }
15
16#define local_read(l)	atomic_long_read(&(l)->a)
17#define local_set(l, i)	atomic_long_set(&(l)->a, (i))
18
19static inline void local_inc(local_t *l)
20{
21	asm volatile(_ASM_INC "%0"
22		     : "+m" (l->a.counter));
23}
24
25static inline void local_dec(local_t *l)
26{
27	asm volatile(_ASM_DEC "%0"
28		     : "+m" (l->a.counter));
29}
30
31static inline void local_add(long i, local_t *l)
32{
33	asm volatile(_ASM_ADD "%1,%0"
34		     : "+m" (l->a.counter)
35		     : "ir" (i));
36}
37
38static inline void local_sub(long i, local_t *l)
39{
40	asm volatile(_ASM_SUB "%1,%0"
41		     : "+m" (l->a.counter)
42		     : "ir" (i));
43}
44
45/**
46 * local_sub_and_test - subtract value from variable and test result
47 * @i: integer value to subtract
48 * @l: pointer to type local_t
49 *
50 * Atomically subtracts @i from @l and returns
51 * true if the result is zero, or false for all
52 * other cases.
53 */
54static inline bool local_sub_and_test(long i, local_t *l)
55{
56	return GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, e, "er", i);
57}
58
59/**
60 * local_dec_and_test - decrement and test
61 * @l: pointer to type local_t
62 *
63 * Atomically decrements @l by 1 and
64 * returns true if the result is 0, or false for all other
65 * cases.
66 */
67static inline bool local_dec_and_test(local_t *l)
68{
69	return GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, e);
70}
71
72/**
73 * local_inc_and_test - increment and test
74 * @l: pointer to type local_t
75 *
76 * Atomically increments @l by 1
77 * and returns true if the result is zero, or false for all
78 * other cases.
79 */
80static inline bool local_inc_and_test(local_t *l)
81{
82	return GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, e);
83}
84
85/**
86 * local_add_negative - add and test if negative
87 * @i: integer value to add
88 * @l: pointer to type local_t
89 *
90 * Atomically adds @i to @l and returns true
91 * if the result is negative, or false when
92 * result is greater than or equal to zero.
93 */
94static inline bool local_add_negative(long i, local_t *l)
95{
96	return GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, s, "er", i);
97}
98
99/**
100 * local_add_return - add and return
101 * @i: integer value to add
102 * @l: pointer to type local_t
103 *
104 * Atomically adds @i to @l and returns @i + @l
105 */
106static inline long local_add_return(long i, local_t *l)
107{
108	long __i = i;
109	asm volatile(_ASM_XADD "%0, %1;"
110		     : "+r" (i), "+m" (l->a.counter)
111		     : : "memory");
112	return i + __i;
113}
114
115static inline long local_sub_return(long i, local_t *l)
116{
117	return local_add_return(-i, l);
118}
119
120#define local_inc_return(l)  (local_add_return(1, l))
121#define local_dec_return(l)  (local_sub_return(1, l))
122
123static inline long local_cmpxchg(local_t *l, long old, long new)
124{
125	return cmpxchg_local(&l->a.counter, old, new);
126}
127
128static inline bool local_try_cmpxchg(local_t *l, long *old, long new)
129{
130	return try_cmpxchg_local(&l->a.counter,
131				 (typeof(l->a.counter) *) old, new);
132}
133
134/* Always has a lock prefix */
135#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
136
137/**
138 * local_add_unless - add unless the number is a given value
139 * @l: pointer of type local_t
140 * @a: the amount to add to l...
141 * @u: ...unless l is equal to u.
142 *
143 * Atomically adds @a to @l, so long as it was not @u.
144 * Returns non-zero if @l was not @u, and zero otherwise.
145 */
146#define local_add_unless(l, a, u)				\
147({								\
148	long c, old;						\
149	c = local_read((l));					\
150	for (;;) {						\
151		if (unlikely(c == (u)))				\
152			break;					\
153		old = local_cmpxchg((l), c, c + (a));		\
154		if (likely(old == c))				\
155			break;					\
156		c = old;					\
157	}							\
158	c != (u);						\
159})
160#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
161
162/* On x86_32, these are no better than the atomic variants.
163 * On x86-64 these are better than the atomic variants on SMP kernels
164 * because they dont use a lock prefix.
165 */
166#define __local_inc(l)		local_inc(l)
167#define __local_dec(l)		local_dec(l)
168#define __local_add(i, l)	local_add((i), (l))
169#define __local_sub(i, l)	local_sub((i), (l))
170
171#endif /* _ASM_X86_LOCAL_H */
172