1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_BARRIER_H
3#define _ASM_X86_BARRIER_H
4
5#include <asm/alternative.h>
6#include <asm/nops.h>
7
8/*
9 * Force strict CPU ordering.
10 * And yes, this might be required on UP too when we're talking
11 * to devices.
12 */
13
14#ifdef CONFIG_X86_32
15#define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
16				      X86_FEATURE_XMM2) ::: "memory", "cc")
17#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
18				       X86_FEATURE_XMM2) ::: "memory", "cc")
19#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
20				       X86_FEATURE_XMM2) ::: "memory", "cc")
21#else
22#define mb() 	asm volatile("mfence":::"memory")
23#define rmb()	asm volatile("lfence":::"memory")
24#define wmb()	asm volatile("sfence" ::: "memory")
25#endif
26
27/**
28 * array_index_mask_nospec() - generate a mask that is ~0UL when the
29 * 	bounds check succeeds and 0 otherwise
30 * @index: array element index
31 * @size: number of elements in array
32 *
33 * Returns:
34 *     0 - (index < size)
35 */
36static inline unsigned long array_index_mask_nospec(unsigned long index,
37		unsigned long size)
38{
39	unsigned long mask;
40
41	asm volatile ("cmp %1,%2; sbb %0,%0;"
42			:"=r" (mask)
43			:"g"(size),"r" (index)
44			:"cc");
45	return mask;
46}
47
48/* Override the default implementation from linux/nospec.h. */
49#define array_index_mask_nospec array_index_mask_nospec
50
51/* Prevent speculative execution past this barrier. */
52#define barrier_nospec() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC)
53
54#define dma_rmb()	barrier()
55#define dma_wmb()	barrier()
56
57#ifdef CONFIG_X86_32
58#define __smp_mb()	asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc")
59#else
60#define __smp_mb()	asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc")
61#endif
62#define __smp_rmb()	dma_rmb()
63#define __smp_wmb()	barrier()
64#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
65
66#define __smp_store_release(p, v)					\
67do {									\
68	compiletime_assert_atomic_type(*p);				\
69	barrier();							\
70	WRITE_ONCE(*p, v);						\
71} while (0)
72
73#define __smp_load_acquire(p)						\
74({									\
75	typeof(*p) ___p1 = READ_ONCE(*p);				\
76	compiletime_assert_atomic_type(*p);				\
77	barrier();							\
78	___p1;								\
79})
80
81/* Atomic operations are already serializing on x86 */
82#define __smp_mb__before_atomic()	do { } while (0)
83#define __smp_mb__after_atomic()	do { } while (0)
84
85#include <asm-generic/barrier.h>
86
87/*
88 * Make previous memory operations globally visible before
89 * a WRMSR.
90 *
91 * MFENCE makes writes visible, but only affects load/store
92 * instructions.  WRMSR is unfortunately not a load/store
93 * instruction and is unaffected by MFENCE.  The LFENCE ensures
94 * that the WRMSR is not reordered.
95 *
96 * Most WRMSRs are full serializing instructions themselves and
97 * do not require this barrier.  This is only required for the
98 * IA32_TSC_DEADLINE and X2APIC MSRs.
99 */
100static inline void weak_wrmsr_fence(void)
101{
102	asm volatile("mfence; lfence" : : : "memory");
103}
104
105#endif /* _ASM_X86_BARRIER_H */
106