4 #include <asm/compiler.h>
6 #define mb() __asm__ __volatile__("mb": : :"memory")
7 #define rmb() __asm__ __volatile__("mb": : :"memory")
8 #define wmb() __asm__ __volatile__("wmb": : :"memory")
11 * read_barrier_depends - Flush all pending reads that subsequents reads
14 * No data-dependent reads from memory-like regions are ever reordered
15 * over this barrier. All reads preceding this primitive are guaranteed
16 * to access memory (but not necessarily other CPUs' caches) before any
17 * reads following this primitive that depend on the data return by
18 * any of the preceding reads. This primitive is much lighter weight than
19 * rmb() on most CPUs, and is never heavier weight than is
22 * These ordering constraints are respected by both the local CPU
25 * Ordering is not guaranteed by anything other than these primitives,
26 * not even by data dependencies. See the documentation for
27 * memory_barrier() for examples and URLs to more information.
29 * For example, the following code would force ordering (the initial
30 * value of "a" is zero, "b" is one, and "p" is "&a"):
38 * read_barrier_depends();
42 * because the read of "*q" depends on the read of "p" and these
43 * two reads are separated by a read_barrier_depends(). However,
44 * the following code, with the same initial values for "a" and "b":
52 * read_barrier_depends();
56 * does not enforce ordering, since there is no data dependency between
57 * the read of "a" and the read of "b". Therefore, on some CPUs, such
58 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
59 * in cases like this where there are no data dependencies.
61 #define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
64 #define __ASM_SMP_MB "\tmb\n"
69 #include <asm-generic/barrier.h>
71 #endif /* __BARRIER_H */