1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Memory barrier definitions. This is based on information published
4 * in the Processor Abstraction Layer and the System Abstraction Layer
7 * Copyright (C) 1998-2003 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
10 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
12 #ifndef _ASM_IA64_BARRIER_H
13 #define _ASM_IA64_BARRIER_H
15 #include <linux/compiler.h>
18 * Macros to force memory ordering. In these descriptions, "previous"
19 * and "subsequent" refer to program order; "visible" means that all
20 * architecturally visible effects of a memory access have occurred
21 * (at a minimum, this means the memory has been read or written).
23 * wmb(): Guarantees that all preceding stores to memory-
24 * like regions are visible before any subsequent
25 * stores and that all following stores will be
26 * visible only after all previous stores.
27 * rmb(): Like wmb(), but for reads.
28 * mb(): wmb()/rmb() combo, i.e., all previous memory
29 * accesses are visible before all subsequent
30 * accesses and vice versa. This is also known as
33 * Note: "mb()" and its variants cannot be used as a fence to order
34 * accesses to memory mapped I/O registers. For that, mf.a needs to
35 * be used. However, we don't want to always use mf.a because (a)
36 * it's (presumably) much slower than mf and (b) mf.a is supported for
37 * sequential memory pages only.
39 #define mb() ia64_mf()
43 #define dma_rmb() mb()
44 #define dma_wmb() mb()
46 # define __smp_mb() mb()
48 #define __smp_mb__before_atomic() barrier()
49 #define __smp_mb__after_atomic() barrier()
52 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
53 * need for asm trickery!
56 #define __smp_store_release(p, v) \
58 compiletime_assert_atomic_type(*p); \
63 #define __smp_load_acquire(p) \
65 typeof(*p) ___p1 = READ_ONCE(*p); \
66 compiletime_assert_atomic_type(*p); \
72 * The group barrier in front of the rsm & ssm are necessary to ensure
73 * that none of the previous instructions in the same group are
74 * affected by the rsm/ssm.
77 #include <asm-generic/barrier.h>
79 #endif /* _ASM_IA64_BARRIER_H */