x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / powerpc / include / asm / cmpxchg.h
blobfc46b664c49e8adfb58bd4c45fe9f439ee7eb96a
1 #ifndef _ASM_POWERPC_CMPXCHG_H_
2 #define _ASM_POWERPC_CMPXCHG_H_
4 #ifdef __KERNEL__
5 #include <linux/compiler.h>
6 #include <asm/synch.h>
7 #include <asm/asm-compat.h>
8 #include <linux/bug.h>
10 #ifdef __BIG_ENDIAN
11 #define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
12 #else
13 #define BITOFF_CAL(size, off) (off * BITS_PER_BYTE)
14 #endif
16 #define XCHG_GEN(type, sfx, cl) \
17 static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
18 { \
19 unsigned int prev, prev_mask, tmp, bitoff, off; \
21 off = (unsigned long)p % sizeof(u32); \
22 bitoff = BITOFF_CAL(sizeof(type), off); \
23 p -= off; \
24 val <<= bitoff; \
25 prev_mask = (u32)(type)-1 << bitoff; \
27 __asm__ __volatile__( \
28 "1: lwarx %0,0,%3\n" \
29 " andc %1,%0,%5\n" \
30 " or %1,%1,%4\n" \
31 PPC405_ERR77(0,%3) \
32 " stwcx. %1,0,%3\n" \
33 " bne- 1b\n" \
34 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
35 : "r" (p), "r" (val), "r" (prev_mask) \
36 : "cc", cl); \
38 return prev >> bitoff; \
41 #define CMPXCHG_GEN(type, sfx, br, br2, cl) \
42 static inline \
43 u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \
44 { \
45 unsigned int prev, prev_mask, tmp, bitoff, off; \
47 off = (unsigned long)p % sizeof(u32); \
48 bitoff = BITOFF_CAL(sizeof(type), off); \
49 p -= off; \
50 old <<= bitoff; \
51 new <<= bitoff; \
52 prev_mask = (u32)(type)-1 << bitoff; \
54 __asm__ __volatile__( \
55 br \
56 "1: lwarx %0,0,%3\n" \
57 " and %1,%0,%6\n" \
58 " cmpw 0,%1,%4\n" \
59 " bne- 2f\n" \
60 " andc %1,%0,%6\n" \
61 " or %1,%1,%5\n" \
62 PPC405_ERR77(0,%3) \
63 " stwcx. %1,0,%3\n" \
64 " bne- 1b\n" \
65 br2 \
66 "\n" \
67 "2:" \
68 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
69 : "r" (p), "r" (old), "r" (new), "r" (prev_mask) \
70 : "cc", cl); \
72 return prev >> bitoff; \
76 * Atomic exchange
78 * Changes the memory location '*p' to be val and returns
79 * the previous value stored there.
82 XCHG_GEN(u8, _local, "memory");
83 XCHG_GEN(u8, _relaxed, "cc");
84 XCHG_GEN(u16, _local, "memory");
85 XCHG_GEN(u16, _relaxed, "cc");
87 static __always_inline unsigned long
88 __xchg_u32_local(volatile void *p, unsigned long val)
90 unsigned long prev;
92 __asm__ __volatile__(
93 "1: lwarx %0,0,%2 \n"
94 PPC405_ERR77(0,%2)
95 " stwcx. %3,0,%2 \n\
96 bne- 1b"
97 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
98 : "r" (p), "r" (val)
99 : "cc", "memory");
101 return prev;
104 static __always_inline unsigned long
105 __xchg_u32_relaxed(u32 *p, unsigned long val)
107 unsigned long prev;
109 __asm__ __volatile__(
110 "1: lwarx %0,0,%2\n"
111 PPC405_ERR77(0, %2)
112 " stwcx. %3,0,%2\n"
113 " bne- 1b"
114 : "=&r" (prev), "+m" (*p)
115 : "r" (p), "r" (val)
116 : "cc");
118 return prev;
121 #ifdef CONFIG_PPC64
122 static __always_inline unsigned long
123 __xchg_u64_local(volatile void *p, unsigned long val)
125 unsigned long prev;
127 __asm__ __volatile__(
128 "1: ldarx %0,0,%2 \n"
129 PPC405_ERR77(0,%2)
130 " stdcx. %3,0,%2 \n\
131 bne- 1b"
132 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
133 : "r" (p), "r" (val)
134 : "cc", "memory");
136 return prev;
139 static __always_inline unsigned long
140 __xchg_u64_relaxed(u64 *p, unsigned long val)
142 unsigned long prev;
144 __asm__ __volatile__(
145 "1: ldarx %0,0,%2\n"
146 PPC405_ERR77(0, %2)
147 " stdcx. %3,0,%2\n"
148 " bne- 1b"
149 : "=&r" (prev), "+m" (*p)
150 : "r" (p), "r" (val)
151 : "cc");
153 return prev;
155 #endif
157 static __always_inline unsigned long
158 __xchg_local(void *ptr, unsigned long x, unsigned int size)
160 switch (size) {
161 case 1:
162 return __xchg_u8_local(ptr, x);
163 case 2:
164 return __xchg_u16_local(ptr, x);
165 case 4:
166 return __xchg_u32_local(ptr, x);
167 #ifdef CONFIG_PPC64
168 case 8:
169 return __xchg_u64_local(ptr, x);
170 #endif
172 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
173 return x;
176 static __always_inline unsigned long
177 __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
179 switch (size) {
180 case 1:
181 return __xchg_u8_relaxed(ptr, x);
182 case 2:
183 return __xchg_u16_relaxed(ptr, x);
184 case 4:
185 return __xchg_u32_relaxed(ptr, x);
186 #ifdef CONFIG_PPC64
187 case 8:
188 return __xchg_u64_relaxed(ptr, x);
189 #endif
191 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
192 return x;
194 #define xchg_local(ptr,x) \
195 ({ \
196 __typeof__(*(ptr)) _x_ = (x); \
197 (__typeof__(*(ptr))) __xchg_local((ptr), \
198 (unsigned long)_x_, sizeof(*(ptr))); \
201 #define xchg_relaxed(ptr, x) \
202 ({ \
203 __typeof__(*(ptr)) _x_ = (x); \
204 (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
205 (unsigned long)_x_, sizeof(*(ptr))); \
208 * Compare and exchange - if *p == old, set it to new,
209 * and return the old value of *p.
212 CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
213 CMPXCHG_GEN(u8, _local, , , "memory");
214 CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
215 CMPXCHG_GEN(u8, _relaxed, , , "cc");
216 CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
217 CMPXCHG_GEN(u16, _local, , , "memory");
218 CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
219 CMPXCHG_GEN(u16, _relaxed, , , "cc");
221 static __always_inline unsigned long
222 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
224 unsigned int prev;
226 __asm__ __volatile__ (
227 PPC_ATOMIC_ENTRY_BARRIER
228 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
229 cmpw 0,%0,%3\n\
230 bne- 2f\n"
231 PPC405_ERR77(0,%2)
232 " stwcx. %4,0,%2\n\
233 bne- 1b"
234 PPC_ATOMIC_EXIT_BARRIER
235 "\n\
237 : "=&r" (prev), "+m" (*p)
238 : "r" (p), "r" (old), "r" (new)
239 : "cc", "memory");
241 return prev;
244 static __always_inline unsigned long
245 __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
246 unsigned long new)
248 unsigned int prev;
250 __asm__ __volatile__ (
251 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
252 cmpw 0,%0,%3\n\
253 bne- 2f\n"
254 PPC405_ERR77(0,%2)
255 " stwcx. %4,0,%2\n\
256 bne- 1b"
257 "\n\
259 : "=&r" (prev), "+m" (*p)
260 : "r" (p), "r" (old), "r" (new)
261 : "cc", "memory");
263 return prev;
266 static __always_inline unsigned long
267 __cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
269 unsigned long prev;
271 __asm__ __volatile__ (
272 "1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
273 " cmpw 0,%0,%3\n"
274 " bne- 2f\n"
275 PPC405_ERR77(0, %2)
276 " stwcx. %4,0,%2\n"
277 " bne- 1b\n"
278 "2:"
279 : "=&r" (prev), "+m" (*p)
280 : "r" (p), "r" (old), "r" (new)
281 : "cc");
283 return prev;
287 * cmpxchg family don't have order guarantee if cmp part fails, therefore we
288 * can avoid superfluous barriers if we use assembly code to implement
289 * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
290 * cmpxchg_release() because that will result in putting a barrier in the
291 * middle of a ll/sc loop, which is probably a bad idea. For example, this
292 * might cause the conditional store more likely to fail.
294 static __always_inline unsigned long
295 __cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
297 unsigned long prev;
299 __asm__ __volatile__ (
300 "1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
301 " cmpw 0,%0,%3\n"
302 " bne- 2f\n"
303 PPC405_ERR77(0, %2)
304 " stwcx. %4,0,%2\n"
305 " bne- 1b\n"
306 PPC_ACQUIRE_BARRIER
307 "\n"
308 "2:"
309 : "=&r" (prev), "+m" (*p)
310 : "r" (p), "r" (old), "r" (new)
311 : "cc", "memory");
313 return prev;
316 #ifdef CONFIG_PPC64
317 static __always_inline unsigned long
318 __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
320 unsigned long prev;
322 __asm__ __volatile__ (
323 PPC_ATOMIC_ENTRY_BARRIER
324 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
325 cmpd 0,%0,%3\n\
326 bne- 2f\n\
327 stdcx. %4,0,%2\n\
328 bne- 1b"
329 PPC_ATOMIC_EXIT_BARRIER
330 "\n\
332 : "=&r" (prev), "+m" (*p)
333 : "r" (p), "r" (old), "r" (new)
334 : "cc", "memory");
336 return prev;
339 static __always_inline unsigned long
340 __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
341 unsigned long new)
343 unsigned long prev;
345 __asm__ __volatile__ (
346 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
347 cmpd 0,%0,%3\n\
348 bne- 2f\n\
349 stdcx. %4,0,%2\n\
350 bne- 1b"
351 "\n\
353 : "=&r" (prev), "+m" (*p)
354 : "r" (p), "r" (old), "r" (new)
355 : "cc", "memory");
357 return prev;
360 static __always_inline unsigned long
361 __cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new)
363 unsigned long prev;
365 __asm__ __volatile__ (
366 "1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n"
367 " cmpd 0,%0,%3\n"
368 " bne- 2f\n"
369 " stdcx. %4,0,%2\n"
370 " bne- 1b\n"
371 "2:"
372 : "=&r" (prev), "+m" (*p)
373 : "r" (p), "r" (old), "r" (new)
374 : "cc");
376 return prev;
379 static __always_inline unsigned long
380 __cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new)
382 unsigned long prev;
384 __asm__ __volatile__ (
385 "1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n"
386 " cmpd 0,%0,%3\n"
387 " bne- 2f\n"
388 " stdcx. %4,0,%2\n"
389 " bne- 1b\n"
390 PPC_ACQUIRE_BARRIER
391 "\n"
392 "2:"
393 : "=&r" (prev), "+m" (*p)
394 : "r" (p), "r" (old), "r" (new)
395 : "cc", "memory");
397 return prev;
399 #endif
401 static __always_inline unsigned long
402 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
403 unsigned int size)
405 switch (size) {
406 case 1:
407 return __cmpxchg_u8(ptr, old, new);
408 case 2:
409 return __cmpxchg_u16(ptr, old, new);
410 case 4:
411 return __cmpxchg_u32(ptr, old, new);
412 #ifdef CONFIG_PPC64
413 case 8:
414 return __cmpxchg_u64(ptr, old, new);
415 #endif
417 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
418 return old;
421 static __always_inline unsigned long
422 __cmpxchg_local(void *ptr, unsigned long old, unsigned long new,
423 unsigned int size)
425 switch (size) {
426 case 1:
427 return __cmpxchg_u8_local(ptr, old, new);
428 case 2:
429 return __cmpxchg_u16_local(ptr, old, new);
430 case 4:
431 return __cmpxchg_u32_local(ptr, old, new);
432 #ifdef CONFIG_PPC64
433 case 8:
434 return __cmpxchg_u64_local(ptr, old, new);
435 #endif
437 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
438 return old;
441 static __always_inline unsigned long
442 __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
443 unsigned int size)
445 switch (size) {
446 case 1:
447 return __cmpxchg_u8_relaxed(ptr, old, new);
448 case 2:
449 return __cmpxchg_u16_relaxed(ptr, old, new);
450 case 4:
451 return __cmpxchg_u32_relaxed(ptr, old, new);
452 #ifdef CONFIG_PPC64
453 case 8:
454 return __cmpxchg_u64_relaxed(ptr, old, new);
455 #endif
457 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
458 return old;
461 static __always_inline unsigned long
462 __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
463 unsigned int size)
465 switch (size) {
466 case 1:
467 return __cmpxchg_u8_acquire(ptr, old, new);
468 case 2:
469 return __cmpxchg_u16_acquire(ptr, old, new);
470 case 4:
471 return __cmpxchg_u32_acquire(ptr, old, new);
472 #ifdef CONFIG_PPC64
473 case 8:
474 return __cmpxchg_u64_acquire(ptr, old, new);
475 #endif
477 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
478 return old;
480 #define cmpxchg(ptr, o, n) \
481 ({ \
482 __typeof__(*(ptr)) _o_ = (o); \
483 __typeof__(*(ptr)) _n_ = (n); \
484 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
485 (unsigned long)_n_, sizeof(*(ptr))); \
489 #define cmpxchg_local(ptr, o, n) \
490 ({ \
491 __typeof__(*(ptr)) _o_ = (o); \
492 __typeof__(*(ptr)) _n_ = (n); \
493 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
494 (unsigned long)_n_, sizeof(*(ptr))); \
497 #define cmpxchg_relaxed(ptr, o, n) \
498 ({ \
499 __typeof__(*(ptr)) _o_ = (o); \
500 __typeof__(*(ptr)) _n_ = (n); \
501 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
502 (unsigned long)_o_, (unsigned long)_n_, \
503 sizeof(*(ptr))); \
506 #define cmpxchg_acquire(ptr, o, n) \
507 ({ \
508 __typeof__(*(ptr)) _o_ = (o); \
509 __typeof__(*(ptr)) _n_ = (n); \
510 (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
511 (unsigned long)_o_, (unsigned long)_n_, \
512 sizeof(*(ptr))); \
514 #ifdef CONFIG_PPC64
515 #define cmpxchg64(ptr, o, n) \
516 ({ \
517 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
518 cmpxchg((ptr), (o), (n)); \
520 #define cmpxchg64_local(ptr, o, n) \
521 ({ \
522 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
523 cmpxchg_local((ptr), (o), (n)); \
525 #define cmpxchg64_relaxed(ptr, o, n) \
526 ({ \
527 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
528 cmpxchg_relaxed((ptr), (o), (n)); \
530 #define cmpxchg64_acquire(ptr, o, n) \
531 ({ \
532 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
533 cmpxchg_acquire((ptr), (o), (n)); \
535 #else
536 #include <asm-generic/cmpxchg-local.h>
537 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
538 #endif
540 #endif /* __KERNEL__ */
541 #endif /* _ASM_POWERPC_CMPXCHG_H_ */