1 #ifndef _ASM_X86_BARRIER_H
2 #define _ASM_X86_BARRIER_H
4 #include <asm/alternative.h>
8 * Force strict CPU ordering.
9 * And yes, this might be required on UP too when we're talking
14 #define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
15 X86_FEATURE_XMM2) ::: "memory", "cc")
16 #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
17 X86_FEATURE_XMM2) ::: "memory", "cc")
18 #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
19 X86_FEATURE_XMM2) ::: "memory", "cc")
21 #define mb() asm volatile("mfence":::"memory")
22 #define rmb() asm volatile("lfence":::"memory")
23 #define wmb() asm volatile("sfence" ::: "memory")
26 #ifdef CONFIG_X86_PPRO_FENCE
27 #define dma_rmb() rmb()
29 #define dma_rmb() barrier()
31 #define dma_wmb() barrier()
33 #define __smp_mb() mb()
34 #define __smp_rmb() dma_rmb()
35 #define __smp_wmb() barrier()
36 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
38 #if defined(CONFIG_X86_PPRO_FENCE)
41 * For this option x86 doesn't have a strong TSO memory
42 * model and we should fall back to full barriers.
45 #define __smp_store_release(p, v) \
47 compiletime_assert_atomic_type(*p); \
52 #define __smp_load_acquire(p) \
54 typeof(*p) ___p1 = READ_ONCE(*p); \
55 compiletime_assert_atomic_type(*p); \
60 #else /* regular x86 TSO memory ordering */
62 #define __smp_store_release(p, v) \
64 compiletime_assert_atomic_type(*p); \
69 #define __smp_load_acquire(p) \
71 typeof(*p) ___p1 = READ_ONCE(*p); \
72 compiletime_assert_atomic_type(*p); \
79 /* Atomic operations are already serializing on x86 */
80 #define __smp_mb__before_atomic() barrier()
81 #define __smp_mb__after_atomic() barrier()
83 #include <asm-generic/barrier.h>
85 #endif /* _ASM_X86_BARRIER_H */