1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #ifndef CPU_X86_MSR_ACCESS_H
4 #define CPU_X86_MSR_ACCESS_H
9 typedef union msr_union
{
16 _Static_assert(sizeof(msr_t
) == sizeof(uint64_t), "Incorrect size for msr_t");
18 #if CONFIG(SOC_SETS_MSRS)
19 msr_t
soc_msr_read(unsigned int index
);
20 void soc_msr_write(unsigned int index
, msr_t msr
);
22 /* Handle MSR references in the other source code */
23 static __always_inline msr_t
rdmsr(unsigned int index
)
25 return soc_msr_read(index
);
28 static __always_inline
void wrmsr(unsigned int index
, msr_t msr
)
30 soc_msr_write(index
, msr
);
32 #else /* CONFIG_SOC_SETS_MSRS */
34 /* The following functions require the __always_inline due to AMD
35 * function STOP_CAR_AND_CPU that disables cache as
36 * RAM, the cache as RAM stack can no longer be used. Called
37 * functions must be inlined to avoid stack usage. Also, the
38 * compiler must keep local variables register based and not
39 * allocated them from the stack. With gcc 4.5.0, some functions
40 * declared as inline are not being inlined. This patch forces
41 * these functions to always be inlined by adding the qualifier
42 * __always_inline to their declaration.
44 static __always_inline msr_t
rdmsr(unsigned int index
)
47 __asm__
__volatile__ (
49 : "=a" (result
.lo
), "=d" (result
.hi
)
55 static __always_inline
void wrmsr(unsigned int index
, msr_t msr
)
57 __asm__
__volatile__ (
60 : "c" (index
), "a" (msr
.lo
), "d" (msr
.hi
)
64 #endif /* CONFIG_SOC_SETS_MSRS */
65 #endif /* __ASSEMBLER__ */
66 #endif /* CPU_X86_MSR_ACCESS_H */