1 /* SPDX-License-Identifier: GPL-2.0-only */
6 #include <cpu/x86/cr.h>
8 #define CR0_CacheDisable (CR0_CD)
9 #define CR0_NoWriteThrough (CR0_NW)
11 #define CPUID_FEATURE_CLFLUSH_BIT 19
12 #define CPUID_FEATURE_SELF_SNOOP_BIT 27
14 #if !defined(__ASSEMBLER__)
16 #include <arch/cpuid.h>
20 static inline void wbinvd(void)
22 asm volatile ("wbinvd" ::: "memory");
25 static inline void invd(void)
27 asm volatile("invd" ::: "memory");
30 static inline void clflush(void *addr
)
32 asm volatile ("clflush (%0)"::"r" (addr
));
35 bool clflush_supported(void);
36 void clflush_region(const uintptr_t start
, const size_t size
);
38 /* The following functions require the __always_inline due to AMD
39 * function STOP_CAR_AND_CPU that disables cache as
40 * RAM, the cache as RAM stack can no longer be used. Called
41 * functions must be inlined to avoid stack usage. Also, the
42 * compiler must keep local variables register based and not
43 * allocated them from the stack. With gcc 4.5.0, some functions
44 * declared as inline are not being inlined. This patch forces
45 * these functions to always be inlined by adding the qualifier
46 * __always_inline to their declaration.
48 static __always_inline
void enable_cache(void)
50 write_cr0(read_cr0() & ~(CR0_CD
| CR0_NW
));
54 * Cache flushing is the most time-consuming step when programming the MTRRs.
55 * However, if the processor supports cache self-snooping (ss), we can skip
56 * this step and save time.
58 static __always_inline
bool self_snooping_supported(void)
60 return (cpuid_edx(1) >> CPUID_FEATURE_SELF_SNOOP_BIT
) & 1;
63 static __always_inline
void disable_cache(void)
65 /* Disable and write back the cache */
66 write_cr0(read_cr0() | CR0_CD
);
67 if (!self_snooping_supported())
71 #endif /* !__ASSEMBLER__ */
72 #endif /* CPU_X86_CACHE */