Home
last modified time | relevance | path

Searched refs:LOCK_PREFIX (Results 1 - 25 of 28) sorted by relevance

12

/kernel/linux/linux-5.10/arch/x86/include/asm/
H A Datomic.h53 asm volatile(LOCK_PREFIX "addl %1,%0" in arch_atomic_add()
67 asm volatile(LOCK_PREFIX "subl %1,%0" in arch_atomic_sub()
83 return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i); in arch_atomic_sub_and_test()
95 asm volatile(LOCK_PREFIX "incl %0" in arch_atomic_inc()
108 asm volatile(LOCK_PREFIX "decl %0" in arch_atomic_dec()
123 return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e); in arch_atomic_dec_and_test()
137 return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e); in arch_atomic_inc_and_test()
152 return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i); in arch_atomic_add_negative()
214 asm volatile(LOCK_PREFIX "andl %1,%0" in arch_atomic_and()
232 asm volatile(LOCK_PREFIX "or in arch_atomic_or()
[all...]
H A Datomic64_64.h46 asm volatile(LOCK_PREFIX "addq %1,%0" in arch_atomic64_add()
60 asm volatile(LOCK_PREFIX "subq %1,%0" in arch_atomic64_sub()
76 return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i); in arch_atomic64_sub_and_test()
88 asm volatile(LOCK_PREFIX "incq %0" in arch_atomic64_inc()
102 asm volatile(LOCK_PREFIX "decq %0" in arch_atomic64_dec()
118 return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e); in arch_atomic64_dec_and_test()
132 return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e); in arch_atomic64_inc_and_test()
147 return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i); in arch_atomic64_add_negative()
202 asm volatile(LOCK_PREFIX "andq %1,%0" in arch_atomic64_and()
220 asm volatile(LOCK_PREFIX "or in arch_atomic64_or()
[all...]
H A Dbitops.h55 asm volatile(LOCK_PREFIX "orb %b1,%0" in arch_set_bit()
60 asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" in arch_set_bit()
75 asm volatile(LOCK_PREFIX "andb %b1,%0" in arch_clear_bit()
79 asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" in arch_clear_bit()
101 asm volatile(LOCK_PREFIX "andb %2,%1" in arch_clear_bit_unlock_is_negative_byte()
126 asm volatile(LOCK_PREFIX "xorb %b1,%0" in arch_change_bit()
130 asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" in arch_change_bit()
138 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr); in arch_test_and_set_bit()
162 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr); in arch_test_and_clear_bit()
201 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZ in arch_test_and_change_bit()
[all...]
H A Dfutex.h39 "3:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
67 unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval, in arch_futex_atomic_op_inuser()
98 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" in futex_atomic_cmpxchg_inatomic()
H A Dcmpxchg.h7 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
134 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
222 __raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)
234 #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
256 __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
H A Dcmpxchg_32.h31 LOCK_PREFIX "cmpxchg8b %0\n\t" in set_64bit()
50 asm volatile(LOCK_PREFIX "cmpxchg8b %1" in __cmpxchg64()
H A Dalternative.h19 * The LOCK_PREFIX macro defined here replaces the LOCK and
20 * LOCK_PREFIX macros used everywhere in the source tree.
45 #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " macro
49 #define LOCK_PREFIX "" macro
285 .macro LOCK_PREFIX
293 .macro LOCK_PREFIX
H A Dqspinlock_paravirt.h46 LOCK_PREFIX "cmpxchg %dl,(%rdi);"
H A Dqspinlock.h23 val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c, in queued_fetch_set_pending_acquire()
/kernel/linux/linux-6.6/arch/x86/include/asm/
H A Datomic.h33 asm volatile(LOCK_PREFIX "addl %1,%0" in arch_atomic_add()
40 asm volatile(LOCK_PREFIX "subl %1,%0" in arch_atomic_sub()
47 return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i); in arch_atomic_sub_and_test()
53 asm volatile(LOCK_PREFIX "incl %0" in arch_atomic_inc()
60 asm volatile(LOCK_PREFIX "decl %0" in arch_atomic_dec()
67 return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e); in arch_atomic_dec_and_test()
73 return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e); in arch_atomic_inc_and_test()
79 return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i); in arch_atomic_add_negative()
127 asm volatile(LOCK_PREFIX "andl %1,%0" in arch_atomic_and()
145 asm volatile(LOCK_PREFIX "or in arch_atomic_or()
[all...]
H A Datomic64_64.h25 asm volatile(LOCK_PREFIX "addq %1,%0" in arch_atomic64_add()
32 asm volatile(LOCK_PREFIX "subq %1,%0" in arch_atomic64_sub()
39 return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i); in arch_atomic64_sub_and_test()
45 asm volatile(LOCK_PREFIX "incq %0" in arch_atomic64_inc()
53 asm volatile(LOCK_PREFIX "decq %0" in arch_atomic64_dec()
61 return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e); in arch_atomic64_dec_and_test()
67 return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e); in arch_atomic64_inc_and_test()
73 return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i); in arch_atomic64_add_negative()
121 asm volatile(LOCK_PREFIX "andq %1,%0" in arch_atomic64_and()
139 asm volatile(LOCK_PREFIX "or in arch_atomic64_or()
[all...]
H A Dbitops.h55 asm volatile(LOCK_PREFIX "orb %b1,%0" in arch_set_bit()
60 asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" in arch_set_bit()
75 asm volatile(LOCK_PREFIX "andb %b1,%0" in arch_clear_bit()
79 asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" in arch_clear_bit()
101 asm volatile(LOCK_PREFIX "andb %2,%1" in arch_clear_bit_unlock_is_negative_byte()
126 asm volatile(LOCK_PREFIX "xorb %b1,%0" in arch_change_bit()
130 asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" in arch_change_bit()
138 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr); in arch_test_and_set_bit()
162 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr); in arch_test_and_clear_bit()
201 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZ in arch_test_and_change_bit()
[all...]
H A Dfutex.h35 "3:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
59 unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval, in arch_futex_atomic_op_inuser()
90 "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" in futex_atomic_cmpxchg_inatomic()
H A Dcmpxchg_64.h46 return __arch_cmpxchg128(ptr, old, new, LOCK_PREFIX); in arch_cmpxchg128()
78 return __arch_try_cmpxchg128(ptr, oldp, new, LOCK_PREFIX); in arch_try_cmpxchg128()
H A Dcmpxchg_32.h25 asm volatile(LOCK_PREFIX "cmpxchg8b %1" in __cmpxchg64()
52 asm volatile(LOCK_PREFIX "cmpxchg8b %[ptr]" in __try_cmpxchg64()
H A Dalternative.h21 * The LOCK_PREFIX macro defined here replaces the LOCK and
22 * LOCK_PREFIX macros used everywhere in the source tree.
47 #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " macro
51 #define LOCK_PREFIX "" macro
336 .macro LOCK_PREFIX
344 .macro LOCK_PREFIX
H A Dcmpxchg.h7 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
134 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
222 __raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)
240 #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
H A Dqspinlock_paravirt.h45 LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t" \
H A Duaccess.h379 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
398 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
419 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
450 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
H A Dqspinlock.h23 val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c, in queued_fetch_set_pending_acquire()
/kernel/linux/linux-6.6/tools/arch/x86/include/asm/
H A Datomic.h9 #define LOCK_PREFIX "\n\tlock; " macro
52 asm volatile(LOCK_PREFIX "incl %0" in atomic_inc()
66 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); in atomic_dec_and_test()
76 GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, "Ir", nr, "%0", "c"); in test_and_set_bit()
81 GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, "Ir", nr, "%0", "c"); in test_and_clear_bit()
/kernel/linux/linux-5.10/arch/x86/lib/
H A Datomic64_cx8_32.S14 /* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */
15 LOCK_PREFIX
26 /* we don't need LOCK_PREFIX since aligned 64-bit writes
36 LOCK_PREFIX
60 LOCK_PREFIX
88 LOCK_PREFIX
113 LOCK_PREFIX
143 LOCK_PREFIX
172 LOCK_PREFIX
/kernel/linux/linux-6.6/arch/x86/lib/
H A Datomic64_cx8_32.S14 /* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */
15 LOCK_PREFIX
26 /* we don't need LOCK_PREFIX since aligned 64-bit writes
36 LOCK_PREFIX
60 LOCK_PREFIX
88 LOCK_PREFIX
113 LOCK_PREFIX
143 LOCK_PREFIX
172 LOCK_PREFIX
/kernel/linux/linux-5.10/tools/arch/x86/include/asm/
H A Datomic.h9 #define LOCK_PREFIX "\n\tlock; " macro
51 asm volatile(LOCK_PREFIX "incl %0" in atomic_inc()
65 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); in atomic_dec_and_test()
H A Dcmpxchg.h84 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)

Completed in 9 milliseconds

12