accel/ivpu: Enable HWS by default on all platforms
[drm/drm-misc.git] / include / asm-generic / barrier.h
blobd4f581c1e21da54f340bdfb8a4e846e59989e3aa
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Generic barrier definitions.
5 * It should be possible to use these on really simple architectures,
6 * but it serves more as a starting point for new ports.
8 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
9 * Written by David Howells (dhowells@redhat.com)
11 #ifndef __ASM_GENERIC_BARRIER_H
12 #define __ASM_GENERIC_BARRIER_H
14 #ifndef __ASSEMBLY__
16 #include <linux/compiler.h>
17 #include <linux/kcsan-checks.h>
18 #include <asm/rwonce.h>
20 #ifndef nop
21 #define nop() asm volatile ("nop")
22 #endif
25 * Architectures that want generic instrumentation can define __ prefixed
26 * variants of all barriers.
29 #ifdef __mb
30 #define mb() do { kcsan_mb(); __mb(); } while (0)
31 #endif
33 #ifdef __rmb
34 #define rmb() do { kcsan_rmb(); __rmb(); } while (0)
35 #endif
37 #ifdef __wmb
38 #define wmb() do { kcsan_wmb(); __wmb(); } while (0)
39 #endif
41 #ifdef __dma_mb
42 #define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0)
43 #endif
45 #ifdef __dma_rmb
46 #define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0)
47 #endif
49 #ifdef __dma_wmb
50 #define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0)
51 #endif
54 * Force strict CPU ordering. And yes, this is required on UP too when we're
55 * talking to devices.
57 * Fall back to compiler barriers if nothing better is provided.
60 #ifndef mb
61 #define mb() barrier()
62 #endif
64 #ifndef rmb
65 #define rmb() mb()
66 #endif
68 #ifndef wmb
69 #define wmb() mb()
70 #endif
72 #ifndef dma_mb
73 #define dma_mb() mb()
74 #endif
76 #ifndef dma_rmb
77 #define dma_rmb() rmb()
78 #endif
80 #ifndef dma_wmb
81 #define dma_wmb() wmb()
82 #endif
84 #ifndef __smp_mb
85 #define __smp_mb() mb()
86 #endif
88 #ifndef __smp_rmb
89 #define __smp_rmb() rmb()
90 #endif
92 #ifndef __smp_wmb
93 #define __smp_wmb() wmb()
94 #endif
96 #ifdef CONFIG_SMP
98 #ifndef smp_mb
99 #define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0)
100 #endif
102 #ifndef smp_rmb
103 #define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
104 #endif
106 #ifndef smp_wmb
107 #define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
108 #endif
110 #else /* !CONFIG_SMP */
112 #ifndef smp_mb
113 #define smp_mb() barrier()
114 #endif
116 #ifndef smp_rmb
117 #define smp_rmb() barrier()
118 #endif
120 #ifndef smp_wmb
121 #define smp_wmb() barrier()
122 #endif
124 #endif /* CONFIG_SMP */
126 #ifndef __smp_store_mb
127 #define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
128 #endif
130 #ifndef __smp_mb__before_atomic
131 #define __smp_mb__before_atomic() __smp_mb()
132 #endif
134 #ifndef __smp_mb__after_atomic
135 #define __smp_mb__after_atomic() __smp_mb()
136 #endif
138 #ifndef __smp_store_release
139 #define __smp_store_release(p, v) \
140 do { \
141 compiletime_assert_atomic_type(*p); \
142 __smp_mb(); \
143 WRITE_ONCE(*p, v); \
144 } while (0)
145 #endif
147 #ifndef __smp_load_acquire
148 #define __smp_load_acquire(p) \
149 ({ \
150 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
151 compiletime_assert_atomic_type(*p); \
152 __smp_mb(); \
153 (typeof(*p))___p1; \
155 #endif
157 #ifdef CONFIG_SMP
159 #ifndef smp_store_mb
160 #define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
161 #endif
163 #ifndef smp_mb__before_atomic
164 #define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
165 #endif
167 #ifndef smp_mb__after_atomic
168 #define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
169 #endif
171 #ifndef smp_store_release
172 #define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
173 #endif
175 #ifndef smp_load_acquire
176 #define smp_load_acquire(p) __smp_load_acquire(p)
177 #endif
179 #else /* !CONFIG_SMP */
181 #ifndef smp_store_mb
182 #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
183 #endif
185 #ifndef smp_mb__before_atomic
186 #define smp_mb__before_atomic() barrier()
187 #endif
189 #ifndef smp_mb__after_atomic
190 #define smp_mb__after_atomic() barrier()
191 #endif
193 #ifndef smp_store_release
194 #define smp_store_release(p, v) \
195 do { \
196 barrier(); \
197 WRITE_ONCE(*p, v); \
198 } while (0)
199 #endif
201 #ifndef smp_load_acquire
202 #define smp_load_acquire(p) \
203 ({ \
204 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
205 barrier(); \
206 (typeof(*p))___p1; \
208 #endif
210 #endif /* CONFIG_SMP */
212 /* Barriers for virtual machine guests when talking to an SMP host */
213 #define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
214 #define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
215 #define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
216 #define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
217 #define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
218 #define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
219 #define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
220 #define virt_load_acquire(p) __smp_load_acquire(p)
223 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
225 * A control dependency provides a LOAD->STORE order, the additional RMB
226 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
227 * aka. (load)-ACQUIRE.
229 * Architectures that do not do load speculation can have this be barrier().
231 #ifndef smp_acquire__after_ctrl_dep
232 #define smp_acquire__after_ctrl_dep() smp_rmb()
233 #endif
236 * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
237 * @ptr: pointer to the variable to wait on
238 * @cond: boolean expression to wait for
240 * Equivalent to using READ_ONCE() on the condition variable.
242 * Due to C lacking lambda expressions we load the value of *ptr into a
243 * pre-named variable @VAL to be used in @cond.
245 #ifndef smp_cond_load_relaxed
246 #define smp_cond_load_relaxed(ptr, cond_expr) ({ \
247 typeof(ptr) __PTR = (ptr); \
248 __unqual_scalar_typeof(*ptr) VAL; \
249 for (;;) { \
250 VAL = READ_ONCE(*__PTR); \
251 if (cond_expr) \
252 break; \
253 cpu_relax(); \
255 (typeof(*ptr))VAL; \
257 #endif
260 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
261 * @ptr: pointer to the variable to wait on
262 * @cond: boolean expression to wait for
264 * Equivalent to using smp_load_acquire() on the condition variable but employs
265 * the control dependency of the wait to reduce the barrier on many platforms.
267 #ifndef smp_cond_load_acquire
268 #define smp_cond_load_acquire(ptr, cond_expr) ({ \
269 __unqual_scalar_typeof(*ptr) _val; \
270 _val = smp_cond_load_relaxed(ptr, cond_expr); \
271 smp_acquire__after_ctrl_dep(); \
272 (typeof(*ptr))_val; \
274 #endif
277 * pmem_wmb() ensures that all stores for which the modification
278 * are written to persistent storage by preceding instructions have
279 * updated persistent storage before any data access or data transfer
280 * caused by subsequent instructions is initiated.
282 #ifndef pmem_wmb
283 #define pmem_wmb() wmb()
284 #endif
287 * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
288 * this kind of memory accesses, the CPU may wait for prior accesses to be
289 * merged with subsequent ones. In some situation, such wait is bad for the
290 * performance. io_stop_wc() can be used to prevent the merging of
291 * write-combining memory accesses before this macro with those after it.
293 #ifndef io_stop_wc
294 #define io_stop_wc() do { } while (0)
295 #endif
298 * Architectures that guarantee an implicit smp_mb() in switch_mm()
299 * can override smp_mb__after_switch_mm.
301 #ifndef smp_mb__after_switch_mm
302 # define smp_mb__after_switch_mm() smp_mb()
303 #endif
305 #endif /* !__ASSEMBLY__ */
306 #endif /* __ASM_GENERIC_BARRIER_H */