1 /* SPDX-License-Identifier: GPL-2.0 */
10 #include <asm/errno.h>
11 #include <asm/cpumask.h>
12 #include <uapi/asm/msr.h>
31 struct msr_regs_info
{
43 struct saved_msr
*array
;
47 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
48 * constraint has different meanings. For i386, "A" means exactly
49 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
50 * it means rax *or* rdx.
53 /* Using 64-bit values saves one instruction clearing the high half of low */
54 #define DECLARE_ARGS(val, low, high) unsigned long low, high
55 #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
56 #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
58 #define DECLARE_ARGS(val, low, high) unsigned long long val
59 #define EAX_EDX_VAL(val, low, high) (val)
60 #define EAX_EDX_RET(val, low, high) "=A" (val)
63 #ifdef CONFIG_TRACEPOINTS
65 * Be very careful with includes. This header is prone to include loops.
67 #include <asm/atomic.h>
68 #include <linux/tracepoint-defs.h>
70 extern struct tracepoint __tracepoint_read_msr
;
71 extern struct tracepoint __tracepoint_write_msr
;
72 extern struct tracepoint __tracepoint_rdpmc
;
73 #define msr_tracepoint_active(t) static_key_false(&(t).key)
74 extern void do_trace_write_msr(unsigned int msr
, u64 val
, int failed
);
75 extern void do_trace_read_msr(unsigned int msr
, u64 val
, int failed
);
76 extern void do_trace_rdpmc(unsigned int msr
, u64 val
, int failed
);
78 #define msr_tracepoint_active(t) false
79 static inline void do_trace_write_msr(unsigned int msr
, u64 val
, int failed
) {}
80 static inline void do_trace_read_msr(unsigned int msr
, u64 val
, int failed
) {}
81 static inline void do_trace_rdpmc(unsigned int msr
, u64 val
, int failed
) {}
85 * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR
86 * accessors and should not have any tracing or other functionality piggybacking
87 * on them - those are *purely* for accessing MSRs and nothing more. So don't even
88 * think of extending them - you will be slapped with a stinking trout or a frozen
89 * shark will reach you, wherever you are! You've been warned.
91 static inline unsigned long long notrace
__rdmsr(unsigned int msr
)
93 DECLARE_ARGS(val
, low
, high
);
95 asm volatile("1: rdmsr\n"
97 _ASM_EXTABLE_HANDLE(1b
, 2b
, ex_handler_rdmsr_unsafe
)
98 : EAX_EDX_RET(val
, low
, high
) : "c" (msr
));
100 return EAX_EDX_VAL(val
, low
, high
);
103 static inline void notrace
__wrmsr(unsigned int msr
, u32 low
, u32 high
)
105 asm volatile("1: wrmsr\n"
107 _ASM_EXTABLE_HANDLE(1b
, 2b
, ex_handler_wrmsr_unsafe
)
108 : : "c" (msr
), "a"(low
), "d" (high
) : "memory");
111 #define native_rdmsr(msr, val1, val2) \
113 u64 __val = __rdmsr((msr)); \
114 (void)((val1) = (u32)__val); \
115 (void)((val2) = (u32)(__val >> 32)); \
118 #define native_wrmsr(msr, low, high) \
119 __wrmsr(msr, low, high)
121 #define native_wrmsrl(msr, val) \
122 __wrmsr((msr), (u32)((u64)(val)), \
123 (u32)((u64)(val) >> 32))
125 static inline unsigned long long native_read_msr(unsigned int msr
)
127 unsigned long long val
;
131 if (msr_tracepoint_active(__tracepoint_read_msr
))
132 do_trace_read_msr(msr
, val
, 0);
137 static inline unsigned long long native_read_msr_safe(unsigned int msr
,
140 DECLARE_ARGS(val
, low
, high
);
142 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
144 ".section .fixup,\"ax\"\n\t"
145 "3: mov %[fault],%[err]\n\t"
146 "xorl %%eax, %%eax\n\t"
147 "xorl %%edx, %%edx\n\t"
151 : [err
] "=r" (*err
), EAX_EDX_RET(val
, low
, high
)
152 : "c" (msr
), [fault
] "i" (-EIO
));
153 if (msr_tracepoint_active(__tracepoint_read_msr
))
154 do_trace_read_msr(msr
, EAX_EDX_VAL(val
, low
, high
), *err
);
155 return EAX_EDX_VAL(val
, low
, high
);
158 /* Can be uninlined because referenced by paravirt */
159 static inline void notrace
160 native_write_msr(unsigned int msr
, u32 low
, u32 high
)
162 __wrmsr(msr
, low
, high
);
164 if (msr_tracepoint_active(__tracepoint_write_msr
))
165 do_trace_write_msr(msr
, ((u64
)high
<< 32 | low
), 0);
168 /* Can be uninlined because referenced by paravirt */
169 static inline int notrace
170 native_write_msr_safe(unsigned int msr
, u32 low
, u32 high
)
174 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
176 ".section .fixup,\"ax\"\n\t"
177 "3: mov %[fault],%[err] ; jmp 1b\n\t"
181 : "c" (msr
), "0" (low
), "d" (high
),
184 if (msr_tracepoint_active(__tracepoint_write_msr
))
185 do_trace_write_msr(msr
, ((u64
)high
<< 32 | low
), err
);
189 extern int rdmsr_safe_regs(u32 regs
[8]);
190 extern int wrmsr_safe_regs(u32 regs
[8]);
193 * rdtsc() - returns the current TSC without ordering constraints
195 * rdtsc() returns the result of RDTSC as a 64-bit integer. The
196 * only ordering constraint it supplies is the ordering implied by
197 * "asm volatile": it will put the RDTSC in the place you expect. The
198 * CPU can and will speculatively execute that RDTSC, though, so the
199 * results can be non-monotonic if compared on different CPUs.
201 static __always_inline
unsigned long long rdtsc(void)
203 DECLARE_ARGS(val
, low
, high
);
205 asm volatile("rdtsc" : EAX_EDX_RET(val
, low
, high
));
207 return EAX_EDX_VAL(val
, low
, high
);
211 * rdtsc_ordered() - read the current TSC in program order
213 * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
214 * It is ordered like a load to a global in-memory counter. It should
215 * be impossible to observe non-monotonic rdtsc_unordered() behavior
216 * across multiple CPUs as long as the TSC is synced.
218 static __always_inline
unsigned long long rdtsc_ordered(void)
220 DECLARE_ARGS(val
, low
, high
);
223 * The RDTSC instruction is not ordered relative to memory
224 * access. The Intel SDM and the AMD APM are both vague on this
225 * point, but empirically an RDTSC instruction can be
226 * speculatively executed before prior loads. An RDTSC
227 * immediately after an appropriate barrier appears to be
228 * ordered as a normal load, that is, it provides the same
229 * ordering guarantees as reading from a global memory location
230 * that some other imaginary CPU is updating continuously with a
233 * Thus, use the preferred barrier on the respective CPU, aiming for
234 * RDTSCP as the default.
236 asm volatile(ALTERNATIVE_2("rdtsc",
237 "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC
,
238 "rdtscp", X86_FEATURE_RDTSCP
)
239 : EAX_EDX_RET(val
, low
, high
)
240 /* RDTSCP clobbers ECX with MSR_TSC_AUX. */
243 return EAX_EDX_VAL(val
, low
, high
);
246 static inline unsigned long long native_read_pmc(int counter
)
248 DECLARE_ARGS(val
, low
, high
);
250 asm volatile("rdpmc" : EAX_EDX_RET(val
, low
, high
) : "c" (counter
));
251 if (msr_tracepoint_active(__tracepoint_rdpmc
))
252 do_trace_rdpmc(counter
, EAX_EDX_VAL(val
, low
, high
), 0);
253 return EAX_EDX_VAL(val
, low
, high
);
256 #ifdef CONFIG_PARAVIRT_XXL
257 #include <asm/paravirt.h>
259 #include <linux/errno.h>
261 * Access to machine-specific registers (available on 586 and better only)
262 * Note: the rd* operations modify the parameters directly (without using
263 * pointer indirection), this allows gcc to optimize better
266 #define rdmsr(msr, low, high) \
268 u64 __val = native_read_msr((msr)); \
269 (void)((low) = (u32)__val); \
270 (void)((high) = (u32)(__val >> 32)); \
273 static inline void wrmsr(unsigned int msr
, u32 low
, u32 high
)
275 native_write_msr(msr
, low
, high
);
278 #define rdmsrl(msr, val) \
279 ((val) = native_read_msr((msr)))
281 static inline void wrmsrl(unsigned int msr
, u64 val
)
283 native_write_msr(msr
, (u32
)(val
& 0xffffffffULL
), (u32
)(val
>> 32));
286 /* wrmsr with exception handling */
287 static inline int wrmsr_safe(unsigned int msr
, u32 low
, u32 high
)
289 return native_write_msr_safe(msr
, low
, high
);
292 /* rdmsr with exception handling */
293 #define rdmsr_safe(msr, low, high) \
296 u64 __val = native_read_msr_safe((msr), &__err); \
297 (*low) = (u32)__val; \
298 (*high) = (u32)(__val >> 32); \
302 static inline int rdmsrl_safe(unsigned int msr
, unsigned long long *p
)
306 *p
= native_read_msr_safe(msr
, &err
);
310 #define rdpmc(counter, low, high) \
312 u64 _l = native_read_pmc((counter)); \
314 (high) = (u32)(_l >> 32); \
317 #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
319 #endif /* !CONFIG_PARAVIRT_XXL */
322 * 64-bit version of wrmsr_safe():
324 static inline int wrmsrl_safe(u32 msr
, u64 val
)
326 return wrmsr_safe(msr
, (u32
)val
, (u32
)(val
>> 32));
329 #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
331 #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
333 struct msr
*msrs_alloc(void);
334 void msrs_free(struct msr
*msrs
);
335 int msr_set_bit(u32 msr
, u8 bit
);
336 int msr_clear_bit(u32 msr
, u8 bit
);
339 int rdmsr_on_cpu(unsigned int cpu
, u32 msr_no
, u32
*l
, u32
*h
);
340 int wrmsr_on_cpu(unsigned int cpu
, u32 msr_no
, u32 l
, u32 h
);
341 int rdmsrl_on_cpu(unsigned int cpu
, u32 msr_no
, u64
*q
);
342 int wrmsrl_on_cpu(unsigned int cpu
, u32 msr_no
, u64 q
);
343 void rdmsr_on_cpus(const struct cpumask
*mask
, u32 msr_no
, struct msr
*msrs
);
344 void wrmsr_on_cpus(const struct cpumask
*mask
, u32 msr_no
, struct msr
*msrs
);
345 int rdmsr_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u32
*l
, u32
*h
);
346 int wrmsr_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u32 l
, u32 h
);
347 int rdmsrl_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u64
*q
);
348 int wrmsrl_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u64 q
);
349 int rdmsr_safe_regs_on_cpu(unsigned int cpu
, u32 regs
[8]);
350 int wrmsr_safe_regs_on_cpu(unsigned int cpu
, u32 regs
[8]);
351 #else /* CONFIG_SMP */
352 static inline int rdmsr_on_cpu(unsigned int cpu
, u32 msr_no
, u32
*l
, u32
*h
)
354 rdmsr(msr_no
, *l
, *h
);
357 static inline int wrmsr_on_cpu(unsigned int cpu
, u32 msr_no
, u32 l
, u32 h
)
362 static inline int rdmsrl_on_cpu(unsigned int cpu
, u32 msr_no
, u64
*q
)
367 static inline int wrmsrl_on_cpu(unsigned int cpu
, u32 msr_no
, u64 q
)
372 static inline void rdmsr_on_cpus(const struct cpumask
*m
, u32 msr_no
,
375 rdmsr_on_cpu(0, msr_no
, &(msrs
[0].l
), &(msrs
[0].h
));
377 static inline void wrmsr_on_cpus(const struct cpumask
*m
, u32 msr_no
,
380 wrmsr_on_cpu(0, msr_no
, msrs
[0].l
, msrs
[0].h
);
382 static inline int rdmsr_safe_on_cpu(unsigned int cpu
, u32 msr_no
,
385 return rdmsr_safe(msr_no
, l
, h
);
387 static inline int wrmsr_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u32 l
, u32 h
)
389 return wrmsr_safe(msr_no
, l
, h
);
391 static inline int rdmsrl_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u64
*q
)
393 return rdmsrl_safe(msr_no
, q
);
395 static inline int wrmsrl_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u64 q
)
397 return wrmsrl_safe(msr_no
, q
);
399 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu
, u32 regs
[8])
401 return rdmsr_safe_regs(regs
);
403 static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu
, u32 regs
[8])
405 return wrmsr_safe_regs(regs
);
407 #endif /* CONFIG_SMP */
408 #endif /* __ASSEMBLY__ */
409 #endif /* _ASM_X86_MSR_H */