2 * Kernel-based Virtual Machine driver for Linux
3 * cpuid support routines
5 * derived from arch/x86/kvm/x86.c
7 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Copyright IBM Corporation, 2008
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
18 #include <linux/uaccess.h>
20 #include <asm/xsave.h>
26 void kvm_update_cpuid(struct kvm_vcpu
*vcpu
)
28 struct kvm_cpuid_entry2
*best
;
29 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
31 best
= kvm_find_cpuid_entry(vcpu
, 1, 0);
35 /* Update OSXSAVE bit */
36 if (cpu_has_xsave
&& best
->function
== 0x1) {
37 best
->ecx
&= ~(bit(X86_FEATURE_OSXSAVE
));
38 if (kvm_read_cr4_bits(vcpu
, X86_CR4_OSXSAVE
))
39 best
->ecx
|= bit(X86_FEATURE_OSXSAVE
);
43 if (best
->ecx
& bit(X86_FEATURE_TSC_DEADLINE_TIMER
))
44 apic
->lapic_timer
.timer_mode_mask
= 3 << 17;
46 apic
->lapic_timer
.timer_mode_mask
= 1 << 17;
49 kvm_pmu_cpuid_update(vcpu
);
52 static int is_efer_nx(void)
54 unsigned long long efer
= 0;
56 rdmsrl_safe(MSR_EFER
, &efer
);
57 return efer
& EFER_NX
;
60 static void cpuid_fix_nx_cap(struct kvm_vcpu
*vcpu
)
63 struct kvm_cpuid_entry2
*e
, *entry
;
66 for (i
= 0; i
< vcpu
->arch
.cpuid_nent
; ++i
) {
67 e
= &vcpu
->arch
.cpuid_entries
[i
];
68 if (e
->function
== 0x80000001) {
73 if (entry
&& (entry
->edx
& (1 << 20)) && !is_efer_nx()) {
74 entry
->edx
&= ~(1 << 20);
75 printk(KERN_INFO
"kvm: guest NX capability removed\n");
79 /* when an old userspace process fills a new kernel module */
80 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu
*vcpu
,
81 struct kvm_cpuid
*cpuid
,
82 struct kvm_cpuid_entry __user
*entries
)
85 struct kvm_cpuid_entry
*cpuid_entries
;
88 if (cpuid
->nent
> KVM_MAX_CPUID_ENTRIES
)
91 cpuid_entries
= vmalloc(sizeof(struct kvm_cpuid_entry
) * cpuid
->nent
);
95 if (copy_from_user(cpuid_entries
, entries
,
96 cpuid
->nent
* sizeof(struct kvm_cpuid_entry
)))
98 for (i
= 0; i
< cpuid
->nent
; i
++) {
99 vcpu
->arch
.cpuid_entries
[i
].function
= cpuid_entries
[i
].function
;
100 vcpu
->arch
.cpuid_entries
[i
].eax
= cpuid_entries
[i
].eax
;
101 vcpu
->arch
.cpuid_entries
[i
].ebx
= cpuid_entries
[i
].ebx
;
102 vcpu
->arch
.cpuid_entries
[i
].ecx
= cpuid_entries
[i
].ecx
;
103 vcpu
->arch
.cpuid_entries
[i
].edx
= cpuid_entries
[i
].edx
;
104 vcpu
->arch
.cpuid_entries
[i
].index
= 0;
105 vcpu
->arch
.cpuid_entries
[i
].flags
= 0;
106 vcpu
->arch
.cpuid_entries
[i
].padding
[0] = 0;
107 vcpu
->arch
.cpuid_entries
[i
].padding
[1] = 0;
108 vcpu
->arch
.cpuid_entries
[i
].padding
[2] = 0;
110 vcpu
->arch
.cpuid_nent
= cpuid
->nent
;
111 cpuid_fix_nx_cap(vcpu
);
113 kvm_apic_set_version(vcpu
);
114 kvm_x86_ops
->cpuid_update(vcpu
);
115 kvm_update_cpuid(vcpu
);
118 vfree(cpuid_entries
);
123 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu
*vcpu
,
124 struct kvm_cpuid2
*cpuid
,
125 struct kvm_cpuid_entry2 __user
*entries
)
130 if (cpuid
->nent
> KVM_MAX_CPUID_ENTRIES
)
133 if (copy_from_user(&vcpu
->arch
.cpuid_entries
, entries
,
134 cpuid
->nent
* sizeof(struct kvm_cpuid_entry2
)))
136 vcpu
->arch
.cpuid_nent
= cpuid
->nent
;
137 kvm_apic_set_version(vcpu
);
138 kvm_x86_ops
->cpuid_update(vcpu
);
139 kvm_update_cpuid(vcpu
);
146 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu
*vcpu
,
147 struct kvm_cpuid2
*cpuid
,
148 struct kvm_cpuid_entry2 __user
*entries
)
153 if (cpuid
->nent
< vcpu
->arch
.cpuid_nent
)
156 if (copy_to_user(entries
, &vcpu
->arch
.cpuid_entries
,
157 vcpu
->arch
.cpuid_nent
* sizeof(struct kvm_cpuid_entry2
)))
162 cpuid
->nent
= vcpu
->arch
.cpuid_nent
;
166 static void cpuid_mask(u32
*word
, int wordnum
)
168 *word
&= boot_cpu_data
.x86_capability
[wordnum
];
171 static void do_cpuid_1_ent(struct kvm_cpuid_entry2
*entry
, u32 function
,
174 entry
->function
= function
;
175 entry
->index
= index
;
176 cpuid_count(entry
->function
, entry
->index
,
177 &entry
->eax
, &entry
->ebx
, &entry
->ecx
, &entry
->edx
);
181 static bool supported_xcr0_bit(unsigned bit
)
183 u64 mask
= ((u64
)1 << bit
);
185 return mask
& (XSTATE_FP
| XSTATE_SSE
| XSTATE_YMM
) & host_xcr0
;
188 #define F(x) bit(X86_FEATURE_##x)
190 static int do_cpuid_ent(struct kvm_cpuid_entry2
*entry
, u32 function
,
191 u32 index
, int *nent
, int maxnent
)
194 unsigned f_nx
= is_efer_nx() ? F(NX
) : 0;
196 unsigned f_gbpages
= (kvm_x86_ops
->get_lpage_level() == PT_PDPE_LEVEL
)
198 unsigned f_lm
= F(LM
);
200 unsigned f_gbpages
= 0;
203 unsigned f_rdtscp
= kvm_x86_ops
->rdtscp_supported() ? F(RDTSCP
) : 0;
204 unsigned f_invpcid
= kvm_x86_ops
->invpcid_supported() ? F(INVPCID
) : 0;
207 const u32 kvm_supported_word0_x86_features
=
208 F(FPU
) | F(VME
) | F(DE
) | F(PSE
) |
209 F(TSC
) | F(MSR
) | F(PAE
) | F(MCE
) |
210 F(CX8
) | F(APIC
) | 0 /* Reserved */ | F(SEP
) |
211 F(MTRR
) | F(PGE
) | F(MCA
) | F(CMOV
) |
212 F(PAT
) | F(PSE36
) | 0 /* PSN */ | F(CLFLSH
) |
213 0 /* Reserved, DS, ACPI */ | F(MMX
) |
214 F(FXSR
) | F(XMM
) | F(XMM2
) | F(SELFSNOOP
) |
215 0 /* HTT, TM, Reserved, PBE */;
216 /* cpuid 0x80000001.edx */
217 const u32 kvm_supported_word1_x86_features
=
218 F(FPU
) | F(VME
) | F(DE
) | F(PSE
) |
219 F(TSC
) | F(MSR
) | F(PAE
) | F(MCE
) |
220 F(CX8
) | F(APIC
) | 0 /* Reserved */ | F(SYSCALL
) |
221 F(MTRR
) | F(PGE
) | F(MCA
) | F(CMOV
) |
222 F(PAT
) | F(PSE36
) | 0 /* Reserved */ |
223 f_nx
| 0 /* Reserved */ | F(MMXEXT
) | F(MMX
) |
224 F(FXSR
) | F(FXSR_OPT
) | f_gbpages
| f_rdtscp
|
225 0 /* Reserved */ | f_lm
| F(3DNOWEXT
) | F(3DNOW
);
227 const u32 kvm_supported_word4_x86_features
=
228 F(XMM3
) | F(PCLMULQDQ
) | 0 /* DTES64, MONITOR */ |
229 0 /* DS-CPL, VMX, SMX, EST */ |
230 0 /* TM2 */ | F(SSSE3
) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
231 F(FMA
) | F(CX16
) | 0 /* xTPR Update, PDCM */ |
232 F(PCID
) | 0 /* Reserved, DCA */ | F(XMM4_1
) |
233 F(XMM4_2
) | F(X2APIC
) | F(MOVBE
) | F(POPCNT
) |
234 0 /* Reserved*/ | F(AES
) | F(XSAVE
) | 0 /* OSXSAVE */ | F(AVX
) |
236 /* cpuid 0x80000001.ecx */
237 const u32 kvm_supported_word6_x86_features
=
238 F(LAHF_LM
) | F(CMP_LEGACY
) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
239 F(CR8_LEGACY
) | F(ABM
) | F(SSE4A
) | F(MISALIGNSSE
) |
240 F(3DNOWPREFETCH
) | F(OSVW
) | 0 /* IBS */ | F(XOP
) |
241 0 /* SKINIT, WDT, LWP */ | F(FMA4
) | F(TBM
);
243 /* cpuid 0xC0000001.edx */
244 const u32 kvm_supported_word5_x86_features
=
245 F(XSTORE
) | F(XSTORE_EN
) | F(XCRYPT
) | F(XCRYPT_EN
) |
246 F(ACE2
) | F(ACE2_EN
) | F(PHE
) | F(PHE_EN
) |
250 const u32 kvm_supported_word9_x86_features
=
251 F(FSGSBASE
) | F(BMI1
) | F(HLE
) | F(AVX2
) | F(SMEP
) |
252 F(BMI2
) | F(ERMS
) | f_invpcid
| F(RTM
);
254 /* all calls to cpuid_count() should be made on the same cpu */
259 if (*nent
>= maxnent
)
262 do_cpuid_1_ent(entry
, function
, index
);
267 entry
->eax
= min(entry
->eax
, (u32
)0xd);
270 entry
->edx
&= kvm_supported_word0_x86_features
;
271 cpuid_mask(&entry
->edx
, 0);
272 entry
->ecx
&= kvm_supported_word4_x86_features
;
273 cpuid_mask(&entry
->ecx
, 4);
274 /* we support x2apic emulation even if host does not support
275 * it since we emulate x2apic in software */
276 entry
->ecx
|= F(X2APIC
);
278 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
279 * may return different values. This forces us to get_cpu() before
280 * issuing the first command, and also to emulate this annoying behavior
281 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
283 int t
, times
= entry
->eax
& 0xff;
285 entry
->flags
|= KVM_CPUID_FLAG_STATEFUL_FUNC
;
286 entry
->flags
|= KVM_CPUID_FLAG_STATE_READ_NEXT
;
287 for (t
= 1; t
< times
; ++t
) {
288 if (*nent
>= maxnent
)
291 do_cpuid_1_ent(&entry
[t
], function
, 0);
292 entry
[t
].flags
|= KVM_CPUID_FLAG_STATEFUL_FUNC
;
297 /* function 4 has additional index. */
301 entry
->flags
|= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
302 /* read more entries until cache_type is zero */
304 if (*nent
>= maxnent
)
307 cache_type
= entry
[i
- 1].eax
& 0x1f;
310 do_cpuid_1_ent(&entry
[i
], function
, i
);
312 KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
318 entry
->flags
|= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
319 /* Mask ebx against host capbability word 9 */
321 entry
->ebx
&= kvm_supported_word9_x86_features
;
322 cpuid_mask(&entry
->ebx
, 9);
332 case 0xa: { /* Architectural Performance Monitoring */
333 struct x86_pmu_capability cap
;
334 union cpuid10_eax eax
;
335 union cpuid10_edx edx
;
337 perf_get_x86_pmu_capability(&cap
);
340 * Only support guest architectural pmu on a host
341 * with architectural pmu.
344 memset(&cap
, 0, sizeof(cap
));
346 eax
.split
.version_id
= min(cap
.version
, 2);
347 eax
.split
.num_counters
= cap
.num_counters_gp
;
348 eax
.split
.bit_width
= cap
.bit_width_gp
;
349 eax
.split
.mask_length
= cap
.events_mask_len
;
351 edx
.split
.num_counters_fixed
= cap
.num_counters_fixed
;
352 edx
.split
.bit_width_fixed
= cap
.bit_width_fixed
;
353 edx
.split
.reserved
= 0;
355 entry
->eax
= eax
.full
;
356 entry
->ebx
= cap
.events_mask
;
358 entry
->edx
= edx
.full
;
361 /* function 0xb has additional index. */
365 entry
->flags
|= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
366 /* read more entries until level_type is zero */
368 if (*nent
>= maxnent
)
371 level_type
= entry
[i
- 1].ecx
& 0xff00;
374 do_cpuid_1_ent(&entry
[i
], function
, i
);
376 KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
384 entry
->flags
|= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
385 for (idx
= 1, i
= 1; idx
< 64; ++idx
) {
386 if (*nent
>= maxnent
)
389 do_cpuid_1_ent(&entry
[i
], function
, idx
);
390 if (entry
[i
].eax
== 0 || !supported_xcr0_bit(idx
))
393 KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
399 case KVM_CPUID_SIGNATURE
: {
400 char signature
[12] = "KVMKVMKVM\0\0";
401 u32
*sigptr
= (u32
*)signature
;
402 entry
->eax
= KVM_CPUID_FEATURES
;
403 entry
->ebx
= sigptr
[0];
404 entry
->ecx
= sigptr
[1];
405 entry
->edx
= sigptr
[2];
408 case KVM_CPUID_FEATURES
:
409 entry
->eax
= (1 << KVM_FEATURE_CLOCKSOURCE
) |
410 (1 << KVM_FEATURE_NOP_IO_DELAY
) |
411 (1 << KVM_FEATURE_CLOCKSOURCE2
) |
412 (1 << KVM_FEATURE_ASYNC_PF
) |
413 (1 << KVM_FEATURE_PV_EOI
) |
414 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT
);
417 entry
->eax
|= (1 << KVM_FEATURE_STEAL_TIME
);
424 entry
->eax
= min(entry
->eax
, 0x8000001a);
427 entry
->edx
&= kvm_supported_word1_x86_features
;
428 cpuid_mask(&entry
->edx
, 1);
429 entry
->ecx
&= kvm_supported_word6_x86_features
;
430 cpuid_mask(&entry
->ecx
, 6);
433 unsigned g_phys_as
= (entry
->eax
>> 16) & 0xff;
434 unsigned virt_as
= max((entry
->eax
>> 8) & 0xff, 48U);
435 unsigned phys_as
= entry
->eax
& 0xff;
439 entry
->eax
= g_phys_as
| (virt_as
<< 8);
440 entry
->ebx
= entry
->edx
= 0;
444 entry
->ecx
= entry
->edx
= 0;
450 /*Add support for Centaur's CPUID instruction*/
452 /*Just support up to 0xC0000004 now*/
453 entry
->eax
= min(entry
->eax
, 0xC0000004);
456 entry
->edx
&= kvm_supported_word5_x86_features
;
457 cpuid_mask(&entry
->edx
, 5);
459 case 3: /* Processor serial number */
460 case 5: /* MONITOR/MWAIT */
461 case 6: /* Thermal management */
462 case 0x80000007: /* Advanced power management */
467 entry
->eax
= entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
471 kvm_x86_ops
->set_supported_cpuid(function
, entry
);
483 struct kvm_cpuid_param
{
487 bool (*qualifier
)(struct kvm_cpuid_param
*param
);
490 static bool is_centaur_cpu(struct kvm_cpuid_param
*param
)
492 return boot_cpu_data
.x86_vendor
== X86_VENDOR_CENTAUR
;
495 int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2
*cpuid
,
496 struct kvm_cpuid_entry2 __user
*entries
)
498 struct kvm_cpuid_entry2
*cpuid_entries
;
499 int limit
, nent
= 0, r
= -E2BIG
, i
;
501 static struct kvm_cpuid_param param
[] = {
502 { .func
= 0, .has_leaf_count
= true },
503 { .func
= 0x80000000, .has_leaf_count
= true },
504 { .func
= 0xC0000000, .qualifier
= is_centaur_cpu
, .has_leaf_count
= true },
505 { .func
= KVM_CPUID_SIGNATURE
},
506 { .func
= KVM_CPUID_FEATURES
},
511 if (cpuid
->nent
> KVM_MAX_CPUID_ENTRIES
)
512 cpuid
->nent
= KVM_MAX_CPUID_ENTRIES
;
514 cpuid_entries
= vmalloc(sizeof(struct kvm_cpuid_entry2
) * cpuid
->nent
);
519 for (i
= 0; i
< ARRAY_SIZE(param
); i
++) {
520 struct kvm_cpuid_param
*ent
= ¶m
[i
];
522 if (ent
->qualifier
&& !ent
->qualifier(ent
))
525 r
= do_cpuid_ent(&cpuid_entries
[nent
], ent
->func
, ent
->idx
,
531 if (!ent
->has_leaf_count
)
534 limit
= cpuid_entries
[nent
- 1].eax
;
535 for (func
= ent
->func
+ 1; func
<= limit
&& nent
< cpuid
->nent
&& r
== 0; ++func
)
536 r
= do_cpuid_ent(&cpuid_entries
[nent
], func
, ent
->idx
,
544 if (copy_to_user(entries
, cpuid_entries
,
545 nent
* sizeof(struct kvm_cpuid_entry2
)))
551 vfree(cpuid_entries
);
556 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu
*vcpu
, int i
)
558 struct kvm_cpuid_entry2
*e
= &vcpu
->arch
.cpuid_entries
[i
];
559 int j
, nent
= vcpu
->arch
.cpuid_nent
;
561 e
->flags
&= ~KVM_CPUID_FLAG_STATE_READ_NEXT
;
562 /* when no next entry is found, the current entry[i] is reselected */
563 for (j
= i
+ 1; ; j
= (j
+ 1) % nent
) {
564 struct kvm_cpuid_entry2
*ej
= &vcpu
->arch
.cpuid_entries
[j
];
565 if (ej
->function
== e
->function
) {
566 ej
->flags
|= KVM_CPUID_FLAG_STATE_READ_NEXT
;
570 return 0; /* silence gcc, even though control never reaches here */
573 /* find an entry with matching function, matching index (if needed), and that
574 * should be read next (if it's stateful) */
575 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2
*e
,
576 u32 function
, u32 index
)
578 if (e
->function
!= function
)
580 if ((e
->flags
& KVM_CPUID_FLAG_SIGNIFCANT_INDEX
) && e
->index
!= index
)
582 if ((e
->flags
& KVM_CPUID_FLAG_STATEFUL_FUNC
) &&
583 !(e
->flags
& KVM_CPUID_FLAG_STATE_READ_NEXT
))
588 struct kvm_cpuid_entry2
*kvm_find_cpuid_entry(struct kvm_vcpu
*vcpu
,
589 u32 function
, u32 index
)
592 struct kvm_cpuid_entry2
*best
= NULL
;
594 for (i
= 0; i
< vcpu
->arch
.cpuid_nent
; ++i
) {
595 struct kvm_cpuid_entry2
*e
;
597 e
= &vcpu
->arch
.cpuid_entries
[i
];
598 if (is_matching_cpuid_entry(e
, function
, index
)) {
599 if (e
->flags
& KVM_CPUID_FLAG_STATEFUL_FUNC
)
600 move_to_next_stateful_cpuid_entry(vcpu
, i
);
607 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry
);
609 int cpuid_maxphyaddr(struct kvm_vcpu
*vcpu
)
611 struct kvm_cpuid_entry2
*best
;
613 best
= kvm_find_cpuid_entry(vcpu
, 0x80000000, 0);
614 if (!best
|| best
->eax
< 0x80000008)
616 best
= kvm_find_cpuid_entry(vcpu
, 0x80000008, 0);
618 return best
->eax
& 0xff;
624 * If no match is found, check whether we exceed the vCPU's limit
625 * and return the content of the highest valid _standard_ leaf instead.
626 * This is to satisfy the CPUID specification.
628 static struct kvm_cpuid_entry2
* check_cpuid_limit(struct kvm_vcpu
*vcpu
,
629 u32 function
, u32 index
)
631 struct kvm_cpuid_entry2
*maxlevel
;
633 maxlevel
= kvm_find_cpuid_entry(vcpu
, function
& 0x80000000, 0);
634 if (!maxlevel
|| maxlevel
->eax
>= function
)
636 if (function
& 0x80000000) {
637 maxlevel
= kvm_find_cpuid_entry(vcpu
, 0, 0);
641 return kvm_find_cpuid_entry(vcpu
, maxlevel
->eax
, index
);
644 void kvm_cpuid(struct kvm_vcpu
*vcpu
, u32
*eax
, u32
*ebx
, u32
*ecx
, u32
*edx
)
646 u32 function
= *eax
, index
= *ecx
;
647 struct kvm_cpuid_entry2
*best
;
649 best
= kvm_find_cpuid_entry(vcpu
, function
, index
);
652 best
= check_cpuid_limit(vcpu
, function
, index
);
660 *eax
= *ebx
= *ecx
= *edx
= 0;
663 void kvm_emulate_cpuid(struct kvm_vcpu
*vcpu
)
665 u32 function
, eax
, ebx
, ecx
, edx
;
667 function
= eax
= kvm_register_read(vcpu
, VCPU_REGS_RAX
);
668 ecx
= kvm_register_read(vcpu
, VCPU_REGS_RCX
);
669 kvm_cpuid(vcpu
, &eax
, &ebx
, &ecx
, &edx
);
670 kvm_register_write(vcpu
, VCPU_REGS_RAX
, eax
);
671 kvm_register_write(vcpu
, VCPU_REGS_RBX
, ebx
);
672 kvm_register_write(vcpu
, VCPU_REGS_RCX
, ecx
);
673 kvm_register_write(vcpu
, VCPU_REGS_RDX
, edx
);
674 kvm_x86_ops
->skip_emulated_instruction(vcpu
);
675 trace_kvm_cpuid(function
, eax
, ebx
, ecx
, edx
);
677 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid
);