162306a36Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0 */
262306a36Sopenharmony_ci#ifndef _ASM_X86_BARRIER_H
362306a36Sopenharmony_ci#define _ASM_X86_BARRIER_H
462306a36Sopenharmony_ci
562306a36Sopenharmony_ci#include <asm/alternative.h>
662306a36Sopenharmony_ci#include <asm/nops.h>
762306a36Sopenharmony_ci
862306a36Sopenharmony_ci/*
962306a36Sopenharmony_ci * Force strict CPU ordering.
1062306a36Sopenharmony_ci * And yes, this might be required on UP too when we're talking
1162306a36Sopenharmony_ci * to devices.
1262306a36Sopenharmony_ci */
1362306a36Sopenharmony_ci
1462306a36Sopenharmony_ci#ifdef CONFIG_X86_32
1562306a36Sopenharmony_ci#define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
1662306a36Sopenharmony_ci				      X86_FEATURE_XMM2) ::: "memory", "cc")
1762306a36Sopenharmony_ci#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
1862306a36Sopenharmony_ci				       X86_FEATURE_XMM2) ::: "memory", "cc")
1962306a36Sopenharmony_ci#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
2062306a36Sopenharmony_ci				       X86_FEATURE_XMM2) ::: "memory", "cc")
2162306a36Sopenharmony_ci#else
2262306a36Sopenharmony_ci#define __mb()	asm volatile("mfence":::"memory")
2362306a36Sopenharmony_ci#define __rmb()	asm volatile("lfence":::"memory")
2462306a36Sopenharmony_ci#define __wmb()	asm volatile("sfence" ::: "memory")
2562306a36Sopenharmony_ci#endif
2662306a36Sopenharmony_ci
2762306a36Sopenharmony_ci/**
2862306a36Sopenharmony_ci * array_index_mask_nospec() - generate a mask that is ~0UL when the
2962306a36Sopenharmony_ci * 	bounds check succeeds and 0 otherwise
3062306a36Sopenharmony_ci * @index: array element index
3162306a36Sopenharmony_ci * @size: number of elements in array
3262306a36Sopenharmony_ci *
3362306a36Sopenharmony_ci * Returns:
3462306a36Sopenharmony_ci *     0 - (index < size)
3562306a36Sopenharmony_ci */
3662306a36Sopenharmony_cistatic inline unsigned long array_index_mask_nospec(unsigned long index,
3762306a36Sopenharmony_ci		unsigned long size)
3862306a36Sopenharmony_ci{
3962306a36Sopenharmony_ci	unsigned long mask;
4062306a36Sopenharmony_ci
4162306a36Sopenharmony_ci	asm volatile ("cmp %1,%2; sbb %0,%0;"
4262306a36Sopenharmony_ci			:"=r" (mask)
4362306a36Sopenharmony_ci			:"g"(size),"r" (index)
4462306a36Sopenharmony_ci			:"cc");
4562306a36Sopenharmony_ci	return mask;
4662306a36Sopenharmony_ci}
4762306a36Sopenharmony_ci
4862306a36Sopenharmony_ci/* Override the default implementation from linux/nospec.h. */
4962306a36Sopenharmony_ci#define array_index_mask_nospec array_index_mask_nospec
5062306a36Sopenharmony_ci
5162306a36Sopenharmony_ci/* Prevent speculative execution past this barrier. */
5262306a36Sopenharmony_ci#define barrier_nospec() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC)
5362306a36Sopenharmony_ci
5462306a36Sopenharmony_ci#define __dma_rmb()	barrier()
5562306a36Sopenharmony_ci#define __dma_wmb()	barrier()
5662306a36Sopenharmony_ci
5762306a36Sopenharmony_ci#define __smp_mb()	asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
5862306a36Sopenharmony_ci
5962306a36Sopenharmony_ci#define __smp_rmb()	dma_rmb()
6062306a36Sopenharmony_ci#define __smp_wmb()	barrier()
6162306a36Sopenharmony_ci#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
6262306a36Sopenharmony_ci
6362306a36Sopenharmony_ci#define __smp_store_release(p, v)					\
6462306a36Sopenharmony_cido {									\
6562306a36Sopenharmony_ci	compiletime_assert_atomic_type(*p);				\
6662306a36Sopenharmony_ci	barrier();							\
6762306a36Sopenharmony_ci	WRITE_ONCE(*p, v);						\
6862306a36Sopenharmony_ci} while (0)
6962306a36Sopenharmony_ci
7062306a36Sopenharmony_ci#define __smp_load_acquire(p)						\
7162306a36Sopenharmony_ci({									\
7262306a36Sopenharmony_ci	typeof(*p) ___p1 = READ_ONCE(*p);				\
7362306a36Sopenharmony_ci	compiletime_assert_atomic_type(*p);				\
7462306a36Sopenharmony_ci	barrier();							\
7562306a36Sopenharmony_ci	___p1;								\
7662306a36Sopenharmony_ci})
7762306a36Sopenharmony_ci
7862306a36Sopenharmony_ci/* Atomic operations are already serializing on x86 */
7962306a36Sopenharmony_ci#define __smp_mb__before_atomic()	do { } while (0)
8062306a36Sopenharmony_ci#define __smp_mb__after_atomic()	do { } while (0)
8162306a36Sopenharmony_ci
8262306a36Sopenharmony_ci#include <asm-generic/barrier.h>
8362306a36Sopenharmony_ci
8462306a36Sopenharmony_ci#endif /* _ASM_X86_BARRIER_H */
85