printf: Remove unused 'bprintf'
[drm/drm-misc.git] / arch / powerpc / include / asm / cmpxchg.h
blobdbb50c06f0bf4d59db22ec27a5eca04d0cd097d0
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_CMPXCHG_H_
3 #define _ASM_POWERPC_CMPXCHG_H_
5 #ifdef __KERNEL__
6 #include <linux/compiler.h>
7 #include <asm/synch.h>
8 #include <linux/bug.h>
10 #ifdef __BIG_ENDIAN
11 #define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
12 #else
13 #define BITOFF_CAL(size, off) (off * BITS_PER_BYTE)
14 #endif
16 #define XCHG_GEN(type, sfx, cl) \
17 static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
18 { \
19 unsigned int prev, prev_mask, tmp, bitoff, off; \
21 off = (unsigned long)p % sizeof(u32); \
22 bitoff = BITOFF_CAL(sizeof(type), off); \
23 p -= off; \
24 val <<= bitoff; \
25 prev_mask = (u32)(type)-1 << bitoff; \
27 __asm__ __volatile__( \
28 "1: lwarx %0,0,%3\n" \
29 " andc %1,%0,%5\n" \
30 " or %1,%1,%4\n" \
31 " stwcx. %1,0,%3\n" \
32 " bne- 1b\n" \
33 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
34 : "r" (p), "r" (val), "r" (prev_mask) \
35 : "cc", cl); \
37 return prev >> bitoff; \
40 #define CMPXCHG_GEN(type, sfx, br, br2, cl) \
41 static inline \
42 u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \
43 { \
44 unsigned int prev, prev_mask, tmp, bitoff, off; \
46 off = (unsigned long)p % sizeof(u32); \
47 bitoff = BITOFF_CAL(sizeof(type), off); \
48 p -= off; \
49 old <<= bitoff; \
50 new <<= bitoff; \
51 prev_mask = (u32)(type)-1 << bitoff; \
53 __asm__ __volatile__( \
54 br \
55 "1: lwarx %0,0,%3\n" \
56 " and %1,%0,%6\n" \
57 " cmpw 0,%1,%4\n" \
58 " bne- 2f\n" \
59 " andc %1,%0,%6\n" \
60 " or %1,%1,%5\n" \
61 " stwcx. %1,0,%3\n" \
62 " bne- 1b\n" \
63 br2 \
64 "\n" \
65 "2:" \
66 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
67 : "r" (p), "r" (old), "r" (new), "r" (prev_mask) \
68 : "cc", cl); \
70 return prev >> bitoff; \
74 * Atomic exchange
76 * Changes the memory location '*p' to be val and returns
77 * the previous value stored there.
80 #ifndef CONFIG_PPC_HAS_LBARX_LHARX
81 XCHG_GEN(u8, _local, "memory");
82 XCHG_GEN(u8, _relaxed, "cc");
83 XCHG_GEN(u16, _local, "memory");
84 XCHG_GEN(u16, _relaxed, "cc");
85 #else
86 static __always_inline unsigned long
87 __xchg_u8_local(volatile void *p, unsigned long val)
89 unsigned long prev;
91 __asm__ __volatile__(
92 "1: lbarx %0,0,%2 # __xchg_u8_local\n"
93 " stbcx. %3,0,%2 \n"
94 " bne- 1b"
95 : "=&r" (prev), "+m" (*(volatile unsigned char *)p)
96 : "r" (p), "r" (val)
97 : "cc", "memory");
99 return prev;
102 static __always_inline unsigned long
103 __xchg_u8_relaxed(u8 *p, unsigned long val)
105 unsigned long prev;
107 __asm__ __volatile__(
108 "1: lbarx %0,0,%2 # __xchg_u8_relaxed\n"
109 " stbcx. %3,0,%2\n"
110 " bne- 1b"
111 : "=&r" (prev), "+m" (*p)
112 : "r" (p), "r" (val)
113 : "cc");
115 return prev;
118 static __always_inline unsigned long
119 __xchg_u16_local(volatile void *p, unsigned long val)
121 unsigned long prev;
123 __asm__ __volatile__(
124 "1: lharx %0,0,%2 # __xchg_u16_local\n"
125 " sthcx. %3,0,%2\n"
126 " bne- 1b"
127 : "=&r" (prev), "+m" (*(volatile unsigned short *)p)
128 : "r" (p), "r" (val)
129 : "cc", "memory");
131 return prev;
134 static __always_inline unsigned long
135 __xchg_u16_relaxed(u16 *p, unsigned long val)
137 unsigned long prev;
139 __asm__ __volatile__(
140 "1: lharx %0,0,%2 # __xchg_u16_relaxed\n"
141 " sthcx. %3,0,%2\n"
142 " bne- 1b"
143 : "=&r" (prev), "+m" (*p)
144 : "r" (p), "r" (val)
145 : "cc");
147 return prev;
149 #endif
151 static __always_inline unsigned long
152 __xchg_u32_local(volatile void *p, unsigned long val)
154 unsigned long prev;
156 __asm__ __volatile__(
157 "1: lwarx %0,0,%2 \n"
158 " stwcx. %3,0,%2 \n\
159 bne- 1b"
160 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
161 : "r" (p), "r" (val)
162 : "cc", "memory");
164 return prev;
167 static __always_inline unsigned long
168 __xchg_u32_relaxed(u32 *p, unsigned long val)
170 unsigned long prev;
172 __asm__ __volatile__(
173 "1: lwarx %0,0,%2\n"
174 " stwcx. %3,0,%2\n"
175 " bne- 1b"
176 : "=&r" (prev), "+m" (*p)
177 : "r" (p), "r" (val)
178 : "cc");
180 return prev;
183 #ifdef CONFIG_PPC64
184 static __always_inline unsigned long
185 __xchg_u64_local(volatile void *p, unsigned long val)
187 unsigned long prev;
189 __asm__ __volatile__(
190 "1: ldarx %0,0,%2 \n"
191 " stdcx. %3,0,%2 \n\
192 bne- 1b"
193 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
194 : "r" (p), "r" (val)
195 : "cc", "memory");
197 return prev;
200 static __always_inline unsigned long
201 __xchg_u64_relaxed(u64 *p, unsigned long val)
203 unsigned long prev;
205 __asm__ __volatile__(
206 "1: ldarx %0,0,%2\n"
207 " stdcx. %3,0,%2\n"
208 " bne- 1b"
209 : "=&r" (prev), "+m" (*p)
210 : "r" (p), "r" (val)
211 : "cc");
213 return prev;
215 #endif
217 static __always_inline unsigned long
218 __xchg_local(void *ptr, unsigned long x, unsigned int size)
220 switch (size) {
221 case 1:
222 return __xchg_u8_local(ptr, x);
223 case 2:
224 return __xchg_u16_local(ptr, x);
225 case 4:
226 return __xchg_u32_local(ptr, x);
227 #ifdef CONFIG_PPC64
228 case 8:
229 return __xchg_u64_local(ptr, x);
230 #endif
232 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
233 return x;
236 static __always_inline unsigned long
237 __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
239 switch (size) {
240 case 1:
241 return __xchg_u8_relaxed(ptr, x);
242 case 2:
243 return __xchg_u16_relaxed(ptr, x);
244 case 4:
245 return __xchg_u32_relaxed(ptr, x);
246 #ifdef CONFIG_PPC64
247 case 8:
248 return __xchg_u64_relaxed(ptr, x);
249 #endif
251 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_relaxed");
252 return x;
254 #define arch_xchg_local(ptr,x) \
255 ({ \
256 __typeof__(*(ptr)) _x_ = (x); \
257 (__typeof__(*(ptr))) __xchg_local((ptr), \
258 (unsigned long)_x_, sizeof(*(ptr))); \
261 #define arch_xchg_relaxed(ptr, x) \
262 ({ \
263 __typeof__(*(ptr)) _x_ = (x); \
264 (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
265 (unsigned long)_x_, sizeof(*(ptr))); \
269 * Compare and exchange - if *p == old, set it to new,
270 * and return the old value of *p.
272 #ifndef CONFIG_PPC_HAS_LBARX_LHARX
273 CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
274 CMPXCHG_GEN(u8, _local, , , "memory");
275 CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
276 CMPXCHG_GEN(u8, _relaxed, , , "cc");
277 CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
278 CMPXCHG_GEN(u16, _local, , , "memory");
279 CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
280 CMPXCHG_GEN(u16, _relaxed, , , "cc");
281 #else
282 static __always_inline unsigned long
283 __cmpxchg_u8(volatile unsigned char *p, unsigned long old, unsigned long new)
285 unsigned int prev;
287 __asm__ __volatile__ (
288 PPC_ATOMIC_ENTRY_BARRIER
289 "1: lbarx %0,0,%2 # __cmpxchg_u8\n"
290 " cmpw 0,%0,%3\n"
291 " bne- 2f\n"
292 " stbcx. %4,0,%2\n"
293 " bne- 1b"
294 PPC_ATOMIC_EXIT_BARRIER
295 "\n\
297 : "=&r" (prev), "+m" (*p)
298 : "r" (p), "r" (old), "r" (new)
299 : "cc", "memory");
301 return prev;
304 static __always_inline unsigned long
305 __cmpxchg_u8_local(volatile unsigned char *p, unsigned long old,
306 unsigned long new)
308 unsigned int prev;
310 __asm__ __volatile__ (
311 "1: lbarx %0,0,%2 # __cmpxchg_u8_local\n"
312 " cmpw 0,%0,%3\n"
313 " bne- 2f\n"
314 " stbcx. %4,0,%2\n"
315 " bne- 1b\n"
316 "2:"
317 : "=&r" (prev), "+m" (*p)
318 : "r" (p), "r" (old), "r" (new)
319 : "cc", "memory");
321 return prev;
324 static __always_inline unsigned long
325 __cmpxchg_u8_relaxed(u8 *p, unsigned long old, unsigned long new)
327 unsigned long prev;
329 __asm__ __volatile__ (
330 "1: lbarx %0,0,%2 # __cmpxchg_u8_relaxed\n"
331 " cmpw 0,%0,%3\n"
332 " bne- 2f\n"
333 " stbcx. %4,0,%2\n"
334 " bne- 1b\n"
335 "2:"
336 : "=&r" (prev), "+m" (*p)
337 : "r" (p), "r" (old), "r" (new)
338 : "cc");
340 return prev;
343 static __always_inline unsigned long
344 __cmpxchg_u8_acquire(u8 *p, unsigned long old, unsigned long new)
346 unsigned long prev;
348 __asm__ __volatile__ (
349 "1: lbarx %0,0,%2 # __cmpxchg_u8_acquire\n"
350 " cmpw 0,%0,%3\n"
351 " bne- 2f\n"
352 " stbcx. %4,0,%2\n"
353 " bne- 1b\n"
354 PPC_ACQUIRE_BARRIER
355 "2:"
356 : "=&r" (prev), "+m" (*p)
357 : "r" (p), "r" (old), "r" (new)
358 : "cc", "memory");
360 return prev;
363 static __always_inline unsigned long
364 __cmpxchg_u16(volatile unsigned short *p, unsigned long old, unsigned long new)
366 unsigned int prev;
368 __asm__ __volatile__ (
369 PPC_ATOMIC_ENTRY_BARRIER
370 "1: lharx %0,0,%2 # __cmpxchg_u16\n"
371 " cmpw 0,%0,%3\n"
372 " bne- 2f\n"
373 " sthcx. %4,0,%2\n"
374 " bne- 1b\n"
375 PPC_ATOMIC_EXIT_BARRIER
376 "2:"
377 : "=&r" (prev), "+m" (*p)
378 : "r" (p), "r" (old), "r" (new)
379 : "cc", "memory");
381 return prev;
384 static __always_inline unsigned long
385 __cmpxchg_u16_local(volatile unsigned short *p, unsigned long old,
386 unsigned long new)
388 unsigned int prev;
390 __asm__ __volatile__ (
391 "1: lharx %0,0,%2 # __cmpxchg_u16_local\n"
392 " cmpw 0,%0,%3\n"
393 " bne- 2f\n"
394 " sthcx. %4,0,%2\n"
395 " bne- 1b"
396 "2:"
397 : "=&r" (prev), "+m" (*p)
398 : "r" (p), "r" (old), "r" (new)
399 : "cc", "memory");
401 return prev;
404 static __always_inline unsigned long
405 __cmpxchg_u16_relaxed(u16 *p, unsigned long old, unsigned long new)
407 unsigned long prev;
409 __asm__ __volatile__ (
410 "1: lharx %0,0,%2 # __cmpxchg_u16_relaxed\n"
411 " cmpw 0,%0,%3\n"
412 " bne- 2f\n"
413 " sthcx. %4,0,%2\n"
414 " bne- 1b\n"
415 "2:"
416 : "=&r" (prev), "+m" (*p)
417 : "r" (p), "r" (old), "r" (new)
418 : "cc");
420 return prev;
423 static __always_inline unsigned long
424 __cmpxchg_u16_acquire(u16 *p, unsigned long old, unsigned long new)
426 unsigned long prev;
428 __asm__ __volatile__ (
429 "1: lharx %0,0,%2 # __cmpxchg_u16_acquire\n"
430 " cmpw 0,%0,%3\n"
431 " bne- 2f\n"
432 " sthcx. %4,0,%2\n"
433 " bne- 1b\n"
434 PPC_ACQUIRE_BARRIER
435 "2:"
436 : "=&r" (prev), "+m" (*p)
437 : "r" (p), "r" (old), "r" (new)
438 : "cc", "memory");
440 return prev;
442 #endif
444 static __always_inline unsigned long
445 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
447 unsigned int prev;
449 __asm__ __volatile__ (
450 PPC_ATOMIC_ENTRY_BARRIER
451 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
452 cmpw 0,%0,%3\n\
453 bne- 2f\n"
454 " stwcx. %4,0,%2\n\
455 bne- 1b"
456 PPC_ATOMIC_EXIT_BARRIER
457 "\n\
459 : "=&r" (prev), "+m" (*p)
460 : "r" (p), "r" (old), "r" (new)
461 : "cc", "memory");
463 return prev;
466 static __always_inline unsigned long
467 __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
468 unsigned long new)
470 unsigned int prev;
472 __asm__ __volatile__ (
473 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
474 cmpw 0,%0,%3\n\
475 bne- 2f\n"
476 " stwcx. %4,0,%2\n\
477 bne- 1b"
478 "\n\
480 : "=&r" (prev), "+m" (*p)
481 : "r" (p), "r" (old), "r" (new)
482 : "cc", "memory");
484 return prev;
487 static __always_inline unsigned long
488 __cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
490 unsigned long prev;
492 __asm__ __volatile__ (
493 "1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
494 " cmpw 0,%0,%3\n"
495 " bne- 2f\n"
496 " stwcx. %4,0,%2\n"
497 " bne- 1b\n"
498 "2:"
499 : "=&r" (prev), "+m" (*p)
500 : "r" (p), "r" (old), "r" (new)
501 : "cc");
503 return prev;
507 * cmpxchg family don't have order guarantee if cmp part fails, therefore we
508 * can avoid superfluous barriers if we use assembly code to implement
509 * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
510 * cmpxchg_release() because that will result in putting a barrier in the
511 * middle of a ll/sc loop, which is probably a bad idea. For example, this
512 * might cause the conditional store more likely to fail.
514 static __always_inline unsigned long
515 __cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
517 unsigned long prev;
519 __asm__ __volatile__ (
520 "1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
521 " cmpw 0,%0,%3\n"
522 " bne- 2f\n"
523 " stwcx. %4,0,%2\n"
524 " bne- 1b\n"
525 PPC_ACQUIRE_BARRIER
526 "\n"
527 "2:"
528 : "=&r" (prev), "+m" (*p)
529 : "r" (p), "r" (old), "r" (new)
530 : "cc", "memory");
532 return prev;
535 #ifdef CONFIG_PPC64
536 static __always_inline unsigned long
537 __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
539 unsigned long prev;
541 __asm__ __volatile__ (
542 PPC_ATOMIC_ENTRY_BARRIER
543 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
544 cmpd 0,%0,%3\n\
545 bne- 2f\n\
546 stdcx. %4,0,%2\n\
547 bne- 1b"
548 PPC_ATOMIC_EXIT_BARRIER
549 "\n\
551 : "=&r" (prev), "+m" (*p)
552 : "r" (p), "r" (old), "r" (new)
553 : "cc", "memory");
555 return prev;
558 static __always_inline unsigned long
559 __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
560 unsigned long new)
562 unsigned long prev;
564 __asm__ __volatile__ (
565 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
566 cmpd 0,%0,%3\n\
567 bne- 2f\n\
568 stdcx. %4,0,%2\n\
569 bne- 1b"
570 "\n\
572 : "=&r" (prev), "+m" (*p)
573 : "r" (p), "r" (old), "r" (new)
574 : "cc", "memory");
576 return prev;
579 static __always_inline unsigned long
580 __cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new)
582 unsigned long prev;
584 __asm__ __volatile__ (
585 "1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n"
586 " cmpd 0,%0,%3\n"
587 " bne- 2f\n"
588 " stdcx. %4,0,%2\n"
589 " bne- 1b\n"
590 "2:"
591 : "=&r" (prev), "+m" (*p)
592 : "r" (p), "r" (old), "r" (new)
593 : "cc");
595 return prev;
598 static __always_inline unsigned long
599 __cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new)
601 unsigned long prev;
603 __asm__ __volatile__ (
604 "1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n"
605 " cmpd 0,%0,%3\n"
606 " bne- 2f\n"
607 " stdcx. %4,0,%2\n"
608 " bne- 1b\n"
609 PPC_ACQUIRE_BARRIER
610 "\n"
611 "2:"
612 : "=&r" (prev), "+m" (*p)
613 : "r" (p), "r" (old), "r" (new)
614 : "cc", "memory");
616 return prev;
618 #endif
620 static __always_inline unsigned long
621 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
622 unsigned int size)
624 switch (size) {
625 case 1:
626 return __cmpxchg_u8(ptr, old, new);
627 case 2:
628 return __cmpxchg_u16(ptr, old, new);
629 case 4:
630 return __cmpxchg_u32(ptr, old, new);
631 #ifdef CONFIG_PPC64
632 case 8:
633 return __cmpxchg_u64(ptr, old, new);
634 #endif
636 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
637 return old;
640 static __always_inline unsigned long
641 __cmpxchg_local(void *ptr, unsigned long old, unsigned long new,
642 unsigned int size)
644 switch (size) {
645 case 1:
646 return __cmpxchg_u8_local(ptr, old, new);
647 case 2:
648 return __cmpxchg_u16_local(ptr, old, new);
649 case 4:
650 return __cmpxchg_u32_local(ptr, old, new);
651 #ifdef CONFIG_PPC64
652 case 8:
653 return __cmpxchg_u64_local(ptr, old, new);
654 #endif
656 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
657 return old;
660 static __always_inline unsigned long
661 __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
662 unsigned int size)
664 switch (size) {
665 case 1:
666 return __cmpxchg_u8_relaxed(ptr, old, new);
667 case 2:
668 return __cmpxchg_u16_relaxed(ptr, old, new);
669 case 4:
670 return __cmpxchg_u32_relaxed(ptr, old, new);
671 #ifdef CONFIG_PPC64
672 case 8:
673 return __cmpxchg_u64_relaxed(ptr, old, new);
674 #endif
676 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
677 return old;
680 static __always_inline unsigned long
681 __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
682 unsigned int size)
684 switch (size) {
685 case 1:
686 return __cmpxchg_u8_acquire(ptr, old, new);
687 case 2:
688 return __cmpxchg_u16_acquire(ptr, old, new);
689 case 4:
690 return __cmpxchg_u32_acquire(ptr, old, new);
691 #ifdef CONFIG_PPC64
692 case 8:
693 return __cmpxchg_u64_acquire(ptr, old, new);
694 #endif
696 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
697 return old;
699 #define arch_cmpxchg(ptr, o, n) \
700 ({ \
701 __typeof__(*(ptr)) _o_ = (o); \
702 __typeof__(*(ptr)) _n_ = (n); \
703 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
704 (unsigned long)_n_, sizeof(*(ptr))); \
708 #define arch_cmpxchg_local(ptr, o, n) \
709 ({ \
710 __typeof__(*(ptr)) _o_ = (o); \
711 __typeof__(*(ptr)) _n_ = (n); \
712 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
713 (unsigned long)_n_, sizeof(*(ptr))); \
716 #define arch_cmpxchg_relaxed(ptr, o, n) \
717 ({ \
718 __typeof__(*(ptr)) _o_ = (o); \
719 __typeof__(*(ptr)) _n_ = (n); \
720 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
721 (unsigned long)_o_, (unsigned long)_n_, \
722 sizeof(*(ptr))); \
725 #define arch_cmpxchg_acquire(ptr, o, n) \
726 ({ \
727 __typeof__(*(ptr)) _o_ = (o); \
728 __typeof__(*(ptr)) _n_ = (n); \
729 (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
730 (unsigned long)_o_, (unsigned long)_n_, \
731 sizeof(*(ptr))); \
733 #ifdef CONFIG_PPC64
734 #define arch_cmpxchg64(ptr, o, n) \
735 ({ \
736 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
737 arch_cmpxchg((ptr), (o), (n)); \
739 #define arch_cmpxchg64_local(ptr, o, n) \
740 ({ \
741 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
742 arch_cmpxchg_local((ptr), (o), (n)); \
744 #define arch_cmpxchg64_relaxed(ptr, o, n) \
745 ({ \
746 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
747 arch_cmpxchg_relaxed((ptr), (o), (n)); \
749 #define arch_cmpxchg64_acquire(ptr, o, n) \
750 ({ \
751 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
752 arch_cmpxchg_acquire((ptr), (o), (n)); \
754 #else
755 #include <asm-generic/cmpxchg-local.h>
756 #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
757 #endif
759 #endif /* __KERNEL__ */
760 #endif /* _ASM_POWERPC_CMPXCHG_H_ */