1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_SYNCH_H
3 #define _ASM_POWERPC_SYNCH_H
6 #include <asm/cputable.h>
7 #include <asm/feature-fixups.h>
8 #include <asm/ppc-opcode.h>
11 extern unsigned int __start___lwsync_fixup
, __stop___lwsync_fixup
;
12 extern void do_lwsync_fixups(unsigned long value
, void *fixup_start
,
15 static inline void eieio(void)
17 if (IS_ENABLED(CONFIG_BOOKE
))
18 __asm__
__volatile__ ("mbar" : : : "memory");
20 __asm__
__volatile__ ("eieio" : : : "memory");
23 static inline void isync(void)
25 __asm__
__volatile__ ("isync" : : : "memory");
28 static inline void ppc_after_tlbiel_barrier(void)
30 asm volatile("ptesync": : :"memory");
32 * POWER9, POWER10 need a cp_abort after tlbiel to ensure the copy is
33 * invalidated correctly. If this is not done, the paste can take data
34 * from the physical address that was translated at copy time.
36 * POWER9 in practice does not need this, because address spaces with
37 * accelerators mapped will use tlbie (which does invalidate the copy)
38 * to invalidate translations. It's not possible to limit POWER10 this
39 * way due to local copy-paste.
41 asm volatile(ASM_FTR_IFSET(PPC_CP_ABORT
, "", %0) : : "i" (CPU_FTR_ARCH_31
) : "memory");
43 #endif /* __ASSEMBLY__ */
45 #if defined(__powerpc64__)
46 # define LWSYNC lwsync
47 #elif defined(CONFIG_PPC_E500)
49 START_LWSYNC_SECTION(96); \
51 MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup);
57 #define __PPC_ACQUIRE_BARRIER \
58 START_LWSYNC_SECTION(97); \
60 MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
61 #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
62 #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
63 #define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
64 #define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
66 #define PPC_ACQUIRE_BARRIER
67 #define PPC_RELEASE_BARRIER
68 #define PPC_ATOMIC_ENTRY_BARRIER
69 #define PPC_ATOMIC_EXIT_BARRIER
72 #endif /* __KERNEL__ */
73 #endif /* _ASM_POWERPC_SYNCH_H */