1 #include <linux/module.h>
2 #include <linux/preempt.h>
14 static void __rdmsr_on_cpu(void *info
)
16 struct msr_info
*rv
= info
;
18 int this_cpu
= raw_smp_processor_id();
21 reg
= &rv
->msrs
[this_cpu
- rv
->off
];
25 rdmsr(rv
->msr_no
, reg
->l
, reg
->h
);
28 static void __wrmsr_on_cpu(void *info
)
30 struct msr_info
*rv
= info
;
32 int this_cpu
= raw_smp_processor_id();
35 reg
= &rv
->msrs
[this_cpu
- rv
->off
];
39 wrmsr(rv
->msr_no
, reg
->l
, reg
->h
);
42 int rdmsr_on_cpu(unsigned int cpu
, u32 msr_no
, u32
*l
, u32
*h
)
47 memset(&rv
, 0, sizeof(rv
));
50 err
= smp_call_function_single(cpu
, __rdmsr_on_cpu
, &rv
, 1);
56 EXPORT_SYMBOL(rdmsr_on_cpu
);
58 int wrmsr_on_cpu(unsigned int cpu
, u32 msr_no
, u32 l
, u32 h
)
63 memset(&rv
, 0, sizeof(rv
));
68 err
= smp_call_function_single(cpu
, __wrmsr_on_cpu
, &rv
, 1);
72 EXPORT_SYMBOL(wrmsr_on_cpu
);
74 /* rdmsr on a bunch of CPUs
78 * @msrs: array of MSR values
81 void rdmsr_on_cpus(const cpumask_t
*mask
, u32 msr_no
, struct msr
*msrs
)
86 memset(&rv
, 0, sizeof(rv
));
88 rv
.off
= cpumask_first(mask
);
94 if (cpumask_test_cpu(this_cpu
, mask
))
97 smp_call_function_many(mask
, __rdmsr_on_cpu
, &rv
, 1);
100 EXPORT_SYMBOL(rdmsr_on_cpus
);
103 * wrmsr on a bunch of CPUs
107 * @msrs: array of MSR values
110 void wrmsr_on_cpus(const cpumask_t
*mask
, u32 msr_no
, struct msr
*msrs
)
115 memset(&rv
, 0, sizeof(rv
));
117 rv
.off
= cpumask_first(mask
);
121 this_cpu
= get_cpu();
123 if (cpumask_test_cpu(this_cpu
, mask
))
126 smp_call_function_many(mask
, __wrmsr_on_cpu
, &rv
, 1);
129 EXPORT_SYMBOL(wrmsr_on_cpus
);
131 /* These "safe" variants are slower and should be used when the target MSR
132 may not actually exist. */
133 static void __rdmsr_safe_on_cpu(void *info
)
135 struct msr_info
*rv
= info
;
137 rv
->err
= rdmsr_safe(rv
->msr_no
, &rv
->reg
.l
, &rv
->reg
.h
);
140 static void __wrmsr_safe_on_cpu(void *info
)
142 struct msr_info
*rv
= info
;
144 rv
->err
= wrmsr_safe(rv
->msr_no
, rv
->reg
.l
, rv
->reg
.h
);
147 int rdmsr_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u32
*l
, u32
*h
)
152 memset(&rv
, 0, sizeof(rv
));
155 err
= smp_call_function_single(cpu
, __rdmsr_safe_on_cpu
, &rv
, 1);
159 return err
? err
: rv
.err
;
161 EXPORT_SYMBOL(rdmsr_safe_on_cpu
);
163 int wrmsr_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u32 l
, u32 h
)
168 memset(&rv
, 0, sizeof(rv
));
173 err
= smp_call_function_single(cpu
, __wrmsr_safe_on_cpu
, &rv
, 1);
175 return err
? err
: rv
.err
;
177 EXPORT_SYMBOL(wrmsr_safe_on_cpu
);
180 * These variants are significantly slower, but allows control over
181 * the entire 32-bit GPR set.
183 struct msr_regs_info
{
188 static void __rdmsr_safe_regs_on_cpu(void *info
)
190 struct msr_regs_info
*rv
= info
;
192 rv
->err
= rdmsr_safe_regs(rv
->regs
);
195 static void __wrmsr_safe_regs_on_cpu(void *info
)
197 struct msr_regs_info
*rv
= info
;
199 rv
->err
= wrmsr_safe_regs(rv
->regs
);
202 int rdmsr_safe_regs_on_cpu(unsigned int cpu
, u32
*regs
)
205 struct msr_regs_info rv
;
209 err
= smp_call_function_single(cpu
, __rdmsr_safe_regs_on_cpu
, &rv
, 1);
211 return err
? err
: rv
.err
;
213 EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu
);
215 int wrmsr_safe_regs_on_cpu(unsigned int cpu
, u32
*regs
)
218 struct msr_regs_info rv
;
222 err
= smp_call_function_single(cpu
, __wrmsr_safe_regs_on_cpu
, &rv
, 1);
224 return err
? err
: rv
.err
;
226 EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu
);