18c2ecf20Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0 */ 28c2ecf20Sopenharmony_ci#ifndef _ASM_X86_BARRIER_H 38c2ecf20Sopenharmony_ci#define _ASM_X86_BARRIER_H 48c2ecf20Sopenharmony_ci 58c2ecf20Sopenharmony_ci#include <asm/alternative.h> 68c2ecf20Sopenharmony_ci#include <asm/nops.h> 78c2ecf20Sopenharmony_ci 88c2ecf20Sopenharmony_ci/* 98c2ecf20Sopenharmony_ci * Force strict CPU ordering. 108c2ecf20Sopenharmony_ci * And yes, this might be required on UP too when we're talking 118c2ecf20Sopenharmony_ci * to devices. 128c2ecf20Sopenharmony_ci */ 138c2ecf20Sopenharmony_ci 148c2ecf20Sopenharmony_ci#ifdef CONFIG_X86_32 158c2ecf20Sopenharmony_ci#define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \ 168c2ecf20Sopenharmony_ci X86_FEATURE_XMM2) ::: "memory", "cc") 178c2ecf20Sopenharmony_ci#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \ 188c2ecf20Sopenharmony_ci X86_FEATURE_XMM2) ::: "memory", "cc") 198c2ecf20Sopenharmony_ci#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \ 208c2ecf20Sopenharmony_ci X86_FEATURE_XMM2) ::: "memory", "cc") 218c2ecf20Sopenharmony_ci#else 228c2ecf20Sopenharmony_ci#define mb() asm volatile("mfence":::"memory") 238c2ecf20Sopenharmony_ci#define rmb() asm volatile("lfence":::"memory") 248c2ecf20Sopenharmony_ci#define wmb() asm volatile("sfence" ::: "memory") 258c2ecf20Sopenharmony_ci#endif 268c2ecf20Sopenharmony_ci 278c2ecf20Sopenharmony_ci/** 288c2ecf20Sopenharmony_ci * array_index_mask_nospec() - generate a mask that is ~0UL when the 298c2ecf20Sopenharmony_ci * bounds check succeeds and 0 otherwise 308c2ecf20Sopenharmony_ci * @index: array element index 318c2ecf20Sopenharmony_ci * @size: number of elements in array 328c2ecf20Sopenharmony_ci * 338c2ecf20Sopenharmony_ci * Returns: 348c2ecf20Sopenharmony_ci * 0 - (index < size) 358c2ecf20Sopenharmony_ci */ 368c2ecf20Sopenharmony_cistatic inline unsigned long array_index_mask_nospec(unsigned long index, 378c2ecf20Sopenharmony_ci unsigned long size) 388c2ecf20Sopenharmony_ci{ 398c2ecf20Sopenharmony_ci unsigned long mask; 408c2ecf20Sopenharmony_ci 418c2ecf20Sopenharmony_ci asm volatile ("cmp %1,%2; sbb %0,%0;" 428c2ecf20Sopenharmony_ci :"=r" (mask) 438c2ecf20Sopenharmony_ci :"g"(size),"r" (index) 448c2ecf20Sopenharmony_ci :"cc"); 458c2ecf20Sopenharmony_ci return mask; 468c2ecf20Sopenharmony_ci} 478c2ecf20Sopenharmony_ci 488c2ecf20Sopenharmony_ci/* Override the default implementation from linux/nospec.h. */ 498c2ecf20Sopenharmony_ci#define array_index_mask_nospec array_index_mask_nospec 508c2ecf20Sopenharmony_ci 518c2ecf20Sopenharmony_ci/* Prevent speculative execution past this barrier. */ 528c2ecf20Sopenharmony_ci#define barrier_nospec() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC) 538c2ecf20Sopenharmony_ci 548c2ecf20Sopenharmony_ci#define dma_rmb() barrier() 558c2ecf20Sopenharmony_ci#define dma_wmb() barrier() 568c2ecf20Sopenharmony_ci 578c2ecf20Sopenharmony_ci#ifdef CONFIG_X86_32 588c2ecf20Sopenharmony_ci#define __smp_mb() asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc") 598c2ecf20Sopenharmony_ci#else 608c2ecf20Sopenharmony_ci#define __smp_mb() asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc") 618c2ecf20Sopenharmony_ci#endif 628c2ecf20Sopenharmony_ci#define __smp_rmb() dma_rmb() 638c2ecf20Sopenharmony_ci#define __smp_wmb() barrier() 648c2ecf20Sopenharmony_ci#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) 658c2ecf20Sopenharmony_ci 668c2ecf20Sopenharmony_ci#define __smp_store_release(p, v) \ 678c2ecf20Sopenharmony_cido { \ 688c2ecf20Sopenharmony_ci compiletime_assert_atomic_type(*p); \ 698c2ecf20Sopenharmony_ci barrier(); \ 708c2ecf20Sopenharmony_ci WRITE_ONCE(*p, v); \ 718c2ecf20Sopenharmony_ci} while (0) 728c2ecf20Sopenharmony_ci 738c2ecf20Sopenharmony_ci#define __smp_load_acquire(p) \ 748c2ecf20Sopenharmony_ci({ \ 758c2ecf20Sopenharmony_ci typeof(*p) ___p1 = READ_ONCE(*p); \ 768c2ecf20Sopenharmony_ci compiletime_assert_atomic_type(*p); \ 778c2ecf20Sopenharmony_ci barrier(); \ 788c2ecf20Sopenharmony_ci ___p1; \ 798c2ecf20Sopenharmony_ci}) 808c2ecf20Sopenharmony_ci 818c2ecf20Sopenharmony_ci/* Atomic operations are already serializing on x86 */ 828c2ecf20Sopenharmony_ci#define __smp_mb__before_atomic() do { } while (0) 838c2ecf20Sopenharmony_ci#define __smp_mb__after_atomic() do { } while (0) 848c2ecf20Sopenharmony_ci 858c2ecf20Sopenharmony_ci#include <asm-generic/barrier.h> 868c2ecf20Sopenharmony_ci 878c2ecf20Sopenharmony_ci/* 888c2ecf20Sopenharmony_ci * Make previous memory operations globally visible before 898c2ecf20Sopenharmony_ci * a WRMSR. 908c2ecf20Sopenharmony_ci * 918c2ecf20Sopenharmony_ci * MFENCE makes writes visible, but only affects load/store 928c2ecf20Sopenharmony_ci * instructions. WRMSR is unfortunately not a load/store 938c2ecf20Sopenharmony_ci * instruction and is unaffected by MFENCE. The LFENCE ensures 948c2ecf20Sopenharmony_ci * that the WRMSR is not reordered. 958c2ecf20Sopenharmony_ci * 968c2ecf20Sopenharmony_ci * Most WRMSRs are full serializing instructions themselves and 978c2ecf20Sopenharmony_ci * do not require this barrier. This is only required for the 988c2ecf20Sopenharmony_ci * IA32_TSC_DEADLINE and X2APIC MSRs. 998c2ecf20Sopenharmony_ci */ 1008c2ecf20Sopenharmony_cistatic inline void weak_wrmsr_fence(void) 1018c2ecf20Sopenharmony_ci{ 1028c2ecf20Sopenharmony_ci asm volatile("mfence; lfence" : : : "memory"); 1038c2ecf20Sopenharmony_ci} 1048c2ecf20Sopenharmony_ci 1058c2ecf20Sopenharmony_ci#endif /* _ASM_X86_BARRIER_H */ 106