2 * x86 misc helpers - sysemu code
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/address-spaces.h"
26 #include "exec/exec-all.h"
27 #include "tcg/helper-tcg.h"
28 #include "hw/i386/apic.h"
30 void helper_outb(CPUX86State
*env
, uint32_t port
, uint32_t data
)
32 address_space_stb(&address_space_io
, port
, data
,
33 cpu_get_mem_attrs(env
), NULL
);
36 target_ulong
helper_inb(CPUX86State
*env
, uint32_t port
)
38 return address_space_ldub(&address_space_io
, port
,
39 cpu_get_mem_attrs(env
), NULL
);
42 void helper_outw(CPUX86State
*env
, uint32_t port
, uint32_t data
)
44 address_space_stw(&address_space_io
, port
, data
,
45 cpu_get_mem_attrs(env
), NULL
);
48 target_ulong
helper_inw(CPUX86State
*env
, uint32_t port
)
50 return address_space_lduw(&address_space_io
, port
,
51 cpu_get_mem_attrs(env
), NULL
);
54 void helper_outl(CPUX86State
*env
, uint32_t port
, uint32_t data
)
56 address_space_stl(&address_space_io
, port
, data
,
57 cpu_get_mem_attrs(env
), NULL
);
60 target_ulong
helper_inl(CPUX86State
*env
, uint32_t port
)
62 return address_space_ldl(&address_space_io
, port
,
63 cpu_get_mem_attrs(env
), NULL
);
66 target_ulong
helper_read_cr8(CPUX86State
*env
)
68 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
69 return cpu_get_apic_tpr(env_archcpu(env
)->apic_state
);
71 return env
->int_ctl
& V_TPR_MASK
;
75 void helper_write_crN(CPUX86State
*env
, int reg
, target_ulong t0
)
80 * If we reach this point, the CR0 write intercept is disabled.
81 * But we could still exit if the hypervisor has requested the selective
82 * intercept for bits other than TS and MP
84 if (cpu_svm_has_intercept(env
, SVM_EXIT_CR0_SEL_WRITE
) &&
85 ((env
->cr
[0] ^ t0
) & ~(CR0_TS_MASK
| CR0_MP_MASK
))) {
86 cpu_vmexit(env
, SVM_EXIT_CR0_SEL_WRITE
, 0, GETPC());
88 cpu_x86_update_cr0(env
, t0
);
91 if ((env
->efer
& MSR_EFER_LMA
) &&
92 (t0
& ((~0ULL) << env_archcpu(env
)->phys_bits
))) {
93 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
95 if (!(env
->efer
& MSR_EFER_LMA
)) {
98 cpu_x86_update_cr3(env
, t0
);
101 if (t0
& cr4_reserved_bits(env
)) {
102 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
104 if (((t0
^ env
->cr
[4]) & CR4_LA57_MASK
) &&
105 (env
->hflags
& HF_CS64_MASK
)) {
106 raise_exception_ra(env
, EXCP0D_GPF
, GETPC());
108 cpu_x86_update_cr4(env
, t0
);
111 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
113 cpu_set_apic_tpr(env_archcpu(env
)->apic_state
, t0
);
116 env
->int_ctl
= (env
->int_ctl
& ~V_TPR_MASK
) | (t0
& V_TPR_MASK
);
118 CPUState
*cs
= env_cpu(env
);
119 if (ctl_has_irq(env
)) {
120 cpu_interrupt(cs
, CPU_INTERRUPT_VIRQ
);
122 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VIRQ
);
131 void helper_wrmsr(CPUX86State
*env
)
134 CPUState
*cs
= env_cpu(env
);
136 cpu_svm_check_intercept_param(env
, SVM_EXIT_MSR
, 1, GETPC());
138 val
= ((uint32_t)env
->regs
[R_EAX
]) |
139 ((uint64_t)((uint32_t)env
->regs
[R_EDX
]) << 32);
141 switch ((uint32_t)env
->regs
[R_ECX
]) {
142 case MSR_IA32_SYSENTER_CS
:
143 env
->sysenter_cs
= val
& 0xffff;
145 case MSR_IA32_SYSENTER_ESP
:
146 env
->sysenter_esp
= val
;
148 case MSR_IA32_SYSENTER_EIP
:
149 env
->sysenter_eip
= val
;
151 case MSR_IA32_APICBASE
: {
154 if (val
& MSR_IA32_APICBASE_RESERVED
) {
158 ret
= cpu_set_apic_base(env_archcpu(env
)->apic_state
, val
);
166 uint64_t update_mask
;
169 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_SYSCALL
) {
170 update_mask
|= MSR_EFER_SCE
;
172 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
) {
173 update_mask
|= MSR_EFER_LME
;
175 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_FFXSR
) {
176 update_mask
|= MSR_EFER_FFXSR
;
178 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_NX
) {
179 update_mask
|= MSR_EFER_NXE
;
181 if (env
->features
[FEAT_8000_0001_ECX
] & CPUID_EXT3_SVM
) {
182 update_mask
|= MSR_EFER_SVME
;
184 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_FFXSR
) {
185 update_mask
|= MSR_EFER_FFXSR
;
187 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
188 (val
& update_mask
));
198 if (val
& 0xFFFFFFFF00000000ull
) {
204 case MSR_VM_HSAVE_PA
:
205 if (val
& (0xfff | ((~0ULL) << env_archcpu(env
)->phys_bits
))) {
221 env
->segs
[R_FS
].base
= val
;
224 env
->segs
[R_GS
].base
= val
;
226 case MSR_KERNELGSBASE
:
227 env
->kernelgsbase
= val
;
230 case MSR_MTRRphysBase(0):
231 case MSR_MTRRphysBase(1):
232 case MSR_MTRRphysBase(2):
233 case MSR_MTRRphysBase(3):
234 case MSR_MTRRphysBase(4):
235 case MSR_MTRRphysBase(5):
236 case MSR_MTRRphysBase(6):
237 case MSR_MTRRphysBase(7):
238 env
->mtrr_var
[((uint32_t)env
->regs
[R_ECX
] -
239 MSR_MTRRphysBase(0)) / 2].base
= val
;
241 case MSR_MTRRphysMask(0):
242 case MSR_MTRRphysMask(1):
243 case MSR_MTRRphysMask(2):
244 case MSR_MTRRphysMask(3):
245 case MSR_MTRRphysMask(4):
246 case MSR_MTRRphysMask(5):
247 case MSR_MTRRphysMask(6):
248 case MSR_MTRRphysMask(7):
249 env
->mtrr_var
[((uint32_t)env
->regs
[R_ECX
] -
250 MSR_MTRRphysMask(0)) / 2].mask
= val
;
252 case MSR_MTRRfix64K_00000
:
253 env
->mtrr_fixed
[(uint32_t)env
->regs
[R_ECX
] -
254 MSR_MTRRfix64K_00000
] = val
;
256 case MSR_MTRRfix16K_80000
:
257 case MSR_MTRRfix16K_A0000
:
258 env
->mtrr_fixed
[(uint32_t)env
->regs
[R_ECX
] -
259 MSR_MTRRfix16K_80000
+ 1] = val
;
261 case MSR_MTRRfix4K_C0000
:
262 case MSR_MTRRfix4K_C8000
:
263 case MSR_MTRRfix4K_D0000
:
264 case MSR_MTRRfix4K_D8000
:
265 case MSR_MTRRfix4K_E0000
:
266 case MSR_MTRRfix4K_E8000
:
267 case MSR_MTRRfix4K_F0000
:
268 case MSR_MTRRfix4K_F8000
:
269 env
->mtrr_fixed
[(uint32_t)env
->regs
[R_ECX
] -
270 MSR_MTRRfix4K_C0000
+ 3] = val
;
272 case MSR_MTRRdefType
:
273 env
->mtrr_deftype
= val
;
276 env
->mcg_status
= val
;
279 if ((env
->mcg_cap
& MCG_CTL_P
)
280 && (val
== 0 || val
== ~(uint64_t)0)) {
287 case MSR_IA32_MISC_ENABLE
:
288 env
->msr_ia32_misc_enable
= val
;
290 case MSR_IA32_BNDCFGS
:
291 /* FIXME: #GP if reserved bits are set. */
292 /* FIXME: Extend highest implemented bit of linear address. */
293 env
->msr_bndcfgs
= val
;
294 cpu_sync_bndcs_hflags(env
);
296 case MSR_APIC_START
... MSR_APIC_END
: {
298 int index
= (uint32_t)env
->regs
[R_ECX
] - MSR_APIC_START
;
301 ret
= apic_msr_write(index
, val
);
310 if ((uint32_t)env
->regs
[R_ECX
] >= MSR_MC0_CTL
311 && (uint32_t)env
->regs
[R_ECX
] < MSR_MC0_CTL
+
312 (4 * env
->mcg_cap
& 0xff)) {
313 uint32_t offset
= (uint32_t)env
->regs
[R_ECX
] - MSR_MC0_CTL
;
314 if ((offset
& 0x3) != 0
315 || (val
== 0 || val
== ~(uint64_t)0)) {
316 env
->mce_banks
[offset
] = val
;
320 /* XXX: exception? */
325 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
328 void helper_rdmsr(CPUX86State
*env
)
330 X86CPU
*x86_cpu
= env_archcpu(env
);
333 cpu_svm_check_intercept_param(env
, SVM_EXIT_MSR
, 0, GETPC());
335 switch ((uint32_t)env
->regs
[R_ECX
]) {
336 case MSR_IA32_SYSENTER_CS
:
337 val
= env
->sysenter_cs
;
339 case MSR_IA32_SYSENTER_ESP
:
340 val
= env
->sysenter_esp
;
342 case MSR_IA32_SYSENTER_EIP
:
343 val
= env
->sysenter_eip
;
345 case MSR_IA32_APICBASE
:
346 val
= cpu_get_apic_base(env_archcpu(env
)->apic_state
);
360 case MSR_VM_HSAVE_PA
:
363 case MSR_IA32_PERF_STATUS
:
364 /* tsc_increment_by_tick */
367 val
|= (((uint64_t)4ULL) << 40);
380 val
= env
->segs
[R_FS
].base
;
383 val
= env
->segs
[R_GS
].base
;
385 case MSR_KERNELGSBASE
:
386 val
= env
->kernelgsbase
;
393 val
= env
->msr_smi_count
;
395 case MSR_MTRRphysBase(0):
396 case MSR_MTRRphysBase(1):
397 case MSR_MTRRphysBase(2):
398 case MSR_MTRRphysBase(3):
399 case MSR_MTRRphysBase(4):
400 case MSR_MTRRphysBase(5):
401 case MSR_MTRRphysBase(6):
402 case MSR_MTRRphysBase(7):
403 val
= env
->mtrr_var
[((uint32_t)env
->regs
[R_ECX
] -
404 MSR_MTRRphysBase(0)) / 2].base
;
406 case MSR_MTRRphysMask(0):
407 case MSR_MTRRphysMask(1):
408 case MSR_MTRRphysMask(2):
409 case MSR_MTRRphysMask(3):
410 case MSR_MTRRphysMask(4):
411 case MSR_MTRRphysMask(5):
412 case MSR_MTRRphysMask(6):
413 case MSR_MTRRphysMask(7):
414 val
= env
->mtrr_var
[((uint32_t)env
->regs
[R_ECX
] -
415 MSR_MTRRphysMask(0)) / 2].mask
;
417 case MSR_MTRRfix64K_00000
:
418 val
= env
->mtrr_fixed
[0];
420 case MSR_MTRRfix16K_80000
:
421 case MSR_MTRRfix16K_A0000
:
422 val
= env
->mtrr_fixed
[(uint32_t)env
->regs
[R_ECX
] -
423 MSR_MTRRfix16K_80000
+ 1];
425 case MSR_MTRRfix4K_C0000
:
426 case MSR_MTRRfix4K_C8000
:
427 case MSR_MTRRfix4K_D0000
:
428 case MSR_MTRRfix4K_D8000
:
429 case MSR_MTRRfix4K_E0000
:
430 case MSR_MTRRfix4K_E8000
:
431 case MSR_MTRRfix4K_F0000
:
432 case MSR_MTRRfix4K_F8000
:
433 val
= env
->mtrr_fixed
[(uint32_t)env
->regs
[R_ECX
] -
434 MSR_MTRRfix4K_C0000
+ 3];
436 case MSR_MTRRdefType
:
437 val
= env
->mtrr_deftype
;
440 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
441 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
|
442 MSR_MTRRcap_WC_SUPPORTED
;
444 /* XXX: exception? */
452 if (env
->mcg_cap
& MCG_CTL_P
) {
459 val
= env
->mcg_status
;
461 case MSR_IA32_MISC_ENABLE
:
462 val
= env
->msr_ia32_misc_enable
;
464 case MSR_IA32_BNDCFGS
:
465 val
= env
->msr_bndcfgs
;
467 case MSR_IA32_UCODE_REV
:
468 val
= x86_cpu
->ucode_rev
;
470 case MSR_CORE_THREAD_COUNT
: {
471 CPUState
*cs
= CPU(x86_cpu
);
472 val
= (cs
->nr_threads
* cs
->nr_cores
) | (cs
->nr_cores
<< 16);
475 case MSR_APIC_START
... MSR_APIC_END
: {
477 int index
= (uint32_t)env
->regs
[R_ECX
] - MSR_APIC_START
;
480 ret
= apic_msr_read(index
, &val
);
483 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
489 if ((uint32_t)env
->regs
[R_ECX
] >= MSR_MC0_CTL
490 && (uint32_t)env
->regs
[R_ECX
] < MSR_MC0_CTL
+
491 (4 * env
->mcg_cap
& 0xff)) {
492 uint32_t offset
= (uint32_t)env
->regs
[R_ECX
] - MSR_MC0_CTL
;
493 val
= env
->mce_banks
[offset
];
496 /* XXX: exception? */
500 env
->regs
[R_EAX
] = (uint32_t)(val
);
501 env
->regs
[R_EDX
] = (uint32_t)(val
>> 32);
504 void helper_flush_page(CPUX86State
*env
, target_ulong addr
)
506 tlb_flush_page(env_cpu(env
), addr
);
509 G_NORETURN
void helper_hlt(CPUX86State
*env
)
511 CPUState
*cs
= env_cpu(env
);
513 do_end_instruction(env
);
515 cs
->exception_index
= EXCP_HLT
;
519 void helper_monitor(CPUX86State
*env
, target_ulong ptr
)
521 if ((uint32_t)env
->regs
[R_ECX
] != 0) {
522 raise_exception_ra(env
, EXCP0D_GPF
, GETPC());
524 /* XXX: store address? */
525 cpu_svm_check_intercept_param(env
, SVM_EXIT_MONITOR
, 0, GETPC());
528 G_NORETURN
void helper_mwait(CPUX86State
*env
, int next_eip_addend
)
530 CPUState
*cs
= env_cpu(env
);
532 if ((uint32_t)env
->regs
[R_ECX
] != 0) {
533 raise_exception_ra(env
, EXCP0D_GPF
, GETPC());
535 cpu_svm_check_intercept_param(env
, SVM_EXIT_MWAIT
, 0, GETPC());
536 env
->eip
+= next_eip_addend
;
538 /* XXX: not complete but not completely erroneous */
539 if (cs
->cpu_index
!= 0 || CPU_NEXT(cs
) != NULL
) {