2014-12-27 16:42:08 +01:00
|
|
|
#ifndef _ASM_X86_BARRIER_H
|
|
|
|
#define _ASM_X86_BARRIER_H
|
|
|
|
|
|
|
|
#include <asm/alternative.h>
|
|
|
|
#include <asm/nops.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Force strict CPU ordering.
|
2018-02-02 15:53:42 +01:00
|
|
|
* And yes, this might be required on UP too when we're talking
|
2014-12-27 16:42:08 +01:00
|
|
|
* to devices.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/*
|
|
|
|
* Some non-Intel clones support out of order store. wmb() ceases to be a
|
|
|
|
* nop for these.
|
|
|
|
*/
|
|
|
|
#define mb() asm volatile ("lock; addl $0,0(%esp)")/*, "mfence", X86_FEATURE_XMM2) */
|
|
|
|
#define rmb() asm volatile("lock; addl $0,0(%esp)")/*, "lfence", X86_FEATURE_XMM2) */
|
|
|
|
#define wmb() asm volatile("lock; addl $0,0(%esp)")/*, "sfence", X86_FEATURE_XMM) */
|
|
|
|
#else
|
|
|
|
#define mb() asm volatile("mfence":::"memory")
|
|
|
|
#define rmb() asm volatile("lfence":::"memory")
|
|
|
|
#define wmb() asm volatile("sfence" ::: "memory")
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_PPRO_FENCE
|
|
|
|
#define dma_rmb() rmb()
|
|
|
|
#else
|
|
|
|
#define dma_rmb() barrier()
|
|
|
|
#endif
|
|
|
|
#define dma_wmb() barrier()
|
|
|
|
|
2018-02-02 15:53:42 +01:00
|
|
|
#define __smp_mb() mb()
|
|
|
|
#define __smp_rmb() dma_rmb()
|
|
|
|
#define __smp_wmb() barrier()
|
|
|
|
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
|
2014-12-27 16:42:08 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_X86_PPRO_FENCE)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For this option x86 doesn't have a strong TSO memory
|
|
|
|
* model and we should fall back to full barriers.
|
|
|
|
*/
|
|
|
|
|
2017-07-28 21:22:53 +02:00
|
|
|
#define __smp_store_release(p, v) \
|
2014-12-27 16:42:08 +01:00
|
|
|
do { \
|
|
|
|
compiletime_assert_atomic_type(*p); \
|
2017-07-28 21:22:53 +02:00
|
|
|
__smp_mb(); \
|
2016-01-20 05:19:53 +01:00
|
|
|
WRITE_ONCE(*p, v); \
|
2014-12-27 16:42:08 +01:00
|
|
|
} while (0)
|
|
|
|
|
2017-07-28 21:22:53 +02:00
|
|
|
#define __smp_load_acquire(p) \
|
2014-12-27 16:42:08 +01:00
|
|
|
({ \
|
2016-01-20 05:19:53 +01:00
|
|
|
typeof(*p) ___p1 = READ_ONCE(*p); \
|
2014-12-27 16:42:08 +01:00
|
|
|
compiletime_assert_atomic_type(*p); \
|
2017-07-28 21:22:53 +02:00
|
|
|
__smp_mb(); \
|
2014-12-27 16:42:08 +01:00
|
|
|
___p1; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#else /* regular x86 TSO memory ordering */
|
|
|
|
|
2017-07-28 21:22:53 +02:00
|
|
|
#define __smp_store_release(p, v) \
|
2014-12-27 16:42:08 +01:00
|
|
|
do { \
|
|
|
|
compiletime_assert_atomic_type(*p); \
|
|
|
|
barrier(); \
|
2016-01-20 05:19:53 +01:00
|
|
|
WRITE_ONCE(*p, v); \
|
2014-12-27 16:42:08 +01:00
|
|
|
} while (0)
|
|
|
|
|
2017-07-28 21:22:53 +02:00
|
|
|
#define __smp_load_acquire(p) \
|
2014-12-27 16:42:08 +01:00
|
|
|
({ \
|
2016-01-20 05:19:53 +01:00
|
|
|
typeof(*p) ___p1 = READ_ONCE(*p); \
|
2014-12-27 16:42:08 +01:00
|
|
|
compiletime_assert_atomic_type(*p); \
|
|
|
|
barrier(); \
|
|
|
|
___p1; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Atomic operations are already serializing on x86 */
|
2017-07-28 21:22:53 +02:00
|
|
|
#define __smp_mb__before_atomic() barrier()
|
|
|
|
#define __smp_mb__after_atomic() barrier()
|
|
|
|
|
|
|
|
#include <asm-generic/barrier.h>
|
2014-12-27 16:42:08 +01:00
|
|
|
|
|
|
|
#endif /* _ASM_X86_BARRIER_H */
|