qapi: Improve specificity of type/member descriptions
[qemu/armbru.git] / target / i386 / tcg / sysemu / misc_helper.c
blobe1528b7f80bec6b81e36f34eff18705dfb4ffd2d
1 /*
2 * x86 misc helpers - sysemu code
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/address-spaces.h"
26 #include "exec/exec-all.h"
27 #include "tcg/helper-tcg.h"
29 void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
31 address_space_stb(&address_space_io, port, data,
32 cpu_get_mem_attrs(env), NULL);
35 target_ulong helper_inb(CPUX86State *env, uint32_t port)
37 return address_space_ldub(&address_space_io, port,
38 cpu_get_mem_attrs(env), NULL);
41 void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
43 address_space_stw(&address_space_io, port, data,
44 cpu_get_mem_attrs(env), NULL);
47 target_ulong helper_inw(CPUX86State *env, uint32_t port)
49 return address_space_lduw(&address_space_io, port,
50 cpu_get_mem_attrs(env), NULL);
53 void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
55 address_space_stl(&address_space_io, port, data,
56 cpu_get_mem_attrs(env), NULL);
59 target_ulong helper_inl(CPUX86State *env, uint32_t port)
61 return address_space_ldl(&address_space_io, port,
62 cpu_get_mem_attrs(env), NULL);
65 target_ulong helper_read_crN(CPUX86State *env, int reg)
67 target_ulong val;
69 switch (reg) {
70 default:
71 val = env->cr[reg];
72 break;
73 case 8:
74 if (!(env->hflags2 & HF2_VINTR_MASK)) {
75 val = cpu_get_apic_tpr(env_archcpu(env)->apic_state);
76 } else {
77 val = env->int_ctl & V_TPR_MASK;
79 break;
81 return val;
84 void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
86 switch (reg) {
87 case 0:
89 * If we reach this point, the CR0 write intercept is disabled.
90 * But we could still exit if the hypervisor has requested the selective
91 * intercept for bits other than TS and MP
93 if (cpu_svm_has_intercept(env, SVM_EXIT_CR0_SEL_WRITE) &&
94 ((env->cr[0] ^ t0) & ~(CR0_TS_MASK | CR0_MP_MASK))) {
95 cpu_vmexit(env, SVM_EXIT_CR0_SEL_WRITE, 0, GETPC());
97 cpu_x86_update_cr0(env, t0);
98 break;
99 case 3:
100 if ((env->efer & MSR_EFER_LMA) &&
101 (t0 & ((~0ULL) << env_archcpu(env)->phys_bits))) {
102 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
104 if (!(env->efer & MSR_EFER_LMA)) {
105 t0 &= 0xffffffffUL;
107 cpu_x86_update_cr3(env, t0);
108 break;
109 case 4:
110 if (t0 & cr4_reserved_bits(env)) {
111 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
113 if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) &&
114 (env->hflags & HF_CS64_MASK)) {
115 raise_exception_ra(env, EXCP0D_GPF, GETPC());
117 cpu_x86_update_cr4(env, t0);
118 break;
119 case 8:
120 if (!(env->hflags2 & HF2_VINTR_MASK)) {
121 qemu_mutex_lock_iothread();
122 cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
123 qemu_mutex_unlock_iothread();
125 env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK);
127 CPUState *cs = env_cpu(env);
128 if (ctl_has_irq(env)) {
129 cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
130 } else {
131 cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
133 break;
134 default:
135 env->cr[reg] = t0;
136 break;
140 void helper_wrmsr(CPUX86State *env)
142 uint64_t val;
143 CPUState *cs = env_cpu(env);
145 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC());
147 val = ((uint32_t)env->regs[R_EAX]) |
148 ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
150 switch ((uint32_t)env->regs[R_ECX]) {
151 case MSR_IA32_SYSENTER_CS:
152 env->sysenter_cs = val & 0xffff;
153 break;
154 case MSR_IA32_SYSENTER_ESP:
155 env->sysenter_esp = val;
156 break;
157 case MSR_IA32_SYSENTER_EIP:
158 env->sysenter_eip = val;
159 break;
160 case MSR_IA32_APICBASE:
161 cpu_set_apic_base(env_archcpu(env)->apic_state, val);
162 break;
163 case MSR_EFER:
165 uint64_t update_mask;
167 update_mask = 0;
168 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
169 update_mask |= MSR_EFER_SCE;
171 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
172 update_mask |= MSR_EFER_LME;
174 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
175 update_mask |= MSR_EFER_FFXSR;
177 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
178 update_mask |= MSR_EFER_NXE;
180 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
181 update_mask |= MSR_EFER_SVME;
183 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
184 update_mask |= MSR_EFER_FFXSR;
186 cpu_load_efer(env, (env->efer & ~update_mask) |
187 (val & update_mask));
189 break;
190 case MSR_STAR:
191 env->star = val;
192 break;
193 case MSR_PAT:
194 env->pat = val;
195 break;
196 case MSR_IA32_PKRS:
197 if (val & 0xFFFFFFFF00000000ull) {
198 goto error;
200 env->pkrs = val;
201 tlb_flush(cs);
202 break;
203 case MSR_VM_HSAVE_PA:
204 env->vm_hsave = val;
205 break;
206 #ifdef TARGET_X86_64
207 case MSR_LSTAR:
208 env->lstar = val;
209 break;
210 case MSR_CSTAR:
211 env->cstar = val;
212 break;
213 case MSR_FMASK:
214 env->fmask = val;
215 break;
216 case MSR_FSBASE:
217 env->segs[R_FS].base = val;
218 break;
219 case MSR_GSBASE:
220 env->segs[R_GS].base = val;
221 break;
222 case MSR_KERNELGSBASE:
223 env->kernelgsbase = val;
224 break;
225 #endif
226 case MSR_MTRRphysBase(0):
227 case MSR_MTRRphysBase(1):
228 case MSR_MTRRphysBase(2):
229 case MSR_MTRRphysBase(3):
230 case MSR_MTRRphysBase(4):
231 case MSR_MTRRphysBase(5):
232 case MSR_MTRRphysBase(6):
233 case MSR_MTRRphysBase(7):
234 env->mtrr_var[((uint32_t)env->regs[R_ECX] -
235 MSR_MTRRphysBase(0)) / 2].base = val;
236 break;
237 case MSR_MTRRphysMask(0):
238 case MSR_MTRRphysMask(1):
239 case MSR_MTRRphysMask(2):
240 case MSR_MTRRphysMask(3):
241 case MSR_MTRRphysMask(4):
242 case MSR_MTRRphysMask(5):
243 case MSR_MTRRphysMask(6):
244 case MSR_MTRRphysMask(7):
245 env->mtrr_var[((uint32_t)env->regs[R_ECX] -
246 MSR_MTRRphysMask(0)) / 2].mask = val;
247 break;
248 case MSR_MTRRfix64K_00000:
249 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
250 MSR_MTRRfix64K_00000] = val;
251 break;
252 case MSR_MTRRfix16K_80000:
253 case MSR_MTRRfix16K_A0000:
254 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
255 MSR_MTRRfix16K_80000 + 1] = val;
256 break;
257 case MSR_MTRRfix4K_C0000:
258 case MSR_MTRRfix4K_C8000:
259 case MSR_MTRRfix4K_D0000:
260 case MSR_MTRRfix4K_D8000:
261 case MSR_MTRRfix4K_E0000:
262 case MSR_MTRRfix4K_E8000:
263 case MSR_MTRRfix4K_F0000:
264 case MSR_MTRRfix4K_F8000:
265 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
266 MSR_MTRRfix4K_C0000 + 3] = val;
267 break;
268 case MSR_MTRRdefType:
269 env->mtrr_deftype = val;
270 break;
271 case MSR_MCG_STATUS:
272 env->mcg_status = val;
273 break;
274 case MSR_MCG_CTL:
275 if ((env->mcg_cap & MCG_CTL_P)
276 && (val == 0 || val == ~(uint64_t)0)) {
277 env->mcg_ctl = val;
279 break;
280 case MSR_TSC_AUX:
281 env->tsc_aux = val;
282 break;
283 case MSR_IA32_MISC_ENABLE:
284 env->msr_ia32_misc_enable = val;
285 break;
286 case MSR_IA32_BNDCFGS:
287 /* FIXME: #GP if reserved bits are set. */
288 /* FIXME: Extend highest implemented bit of linear address. */
289 env->msr_bndcfgs = val;
290 cpu_sync_bndcs_hflags(env);
291 break;
292 default:
293 if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
294 && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
295 (4 * env->mcg_cap & 0xff)) {
296 uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
297 if ((offset & 0x3) != 0
298 || (val == 0 || val == ~(uint64_t)0)) {
299 env->mce_banks[offset] = val;
301 break;
303 /* XXX: exception? */
304 break;
306 return;
307 error:
308 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
311 void helper_rdmsr(CPUX86State *env)
313 X86CPU *x86_cpu = env_archcpu(env);
314 uint64_t val;
316 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC());
318 switch ((uint32_t)env->regs[R_ECX]) {
319 case MSR_IA32_SYSENTER_CS:
320 val = env->sysenter_cs;
321 break;
322 case MSR_IA32_SYSENTER_ESP:
323 val = env->sysenter_esp;
324 break;
325 case MSR_IA32_SYSENTER_EIP:
326 val = env->sysenter_eip;
327 break;
328 case MSR_IA32_APICBASE:
329 val = cpu_get_apic_base(env_archcpu(env)->apic_state);
330 break;
331 case MSR_EFER:
332 val = env->efer;
333 break;
334 case MSR_STAR:
335 val = env->star;
336 break;
337 case MSR_PAT:
338 val = env->pat;
339 break;
340 case MSR_IA32_PKRS:
341 val = env->pkrs;
342 break;
343 case MSR_VM_HSAVE_PA:
344 val = env->vm_hsave;
345 break;
346 case MSR_IA32_PERF_STATUS:
347 /* tsc_increment_by_tick */
348 val = 1000ULL;
349 /* CPU multiplier */
350 val |= (((uint64_t)4ULL) << 40);
351 break;
352 #ifdef TARGET_X86_64
353 case MSR_LSTAR:
354 val = env->lstar;
355 break;
356 case MSR_CSTAR:
357 val = env->cstar;
358 break;
359 case MSR_FMASK:
360 val = env->fmask;
361 break;
362 case MSR_FSBASE:
363 val = env->segs[R_FS].base;
364 break;
365 case MSR_GSBASE:
366 val = env->segs[R_GS].base;
367 break;
368 case MSR_KERNELGSBASE:
369 val = env->kernelgsbase;
370 break;
371 case MSR_TSC_AUX:
372 val = env->tsc_aux;
373 break;
374 #endif
375 case MSR_SMI_COUNT:
376 val = env->msr_smi_count;
377 break;
378 case MSR_MTRRphysBase(0):
379 case MSR_MTRRphysBase(1):
380 case MSR_MTRRphysBase(2):
381 case MSR_MTRRphysBase(3):
382 case MSR_MTRRphysBase(4):
383 case MSR_MTRRphysBase(5):
384 case MSR_MTRRphysBase(6):
385 case MSR_MTRRphysBase(7):
386 val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
387 MSR_MTRRphysBase(0)) / 2].base;
388 break;
389 case MSR_MTRRphysMask(0):
390 case MSR_MTRRphysMask(1):
391 case MSR_MTRRphysMask(2):
392 case MSR_MTRRphysMask(3):
393 case MSR_MTRRphysMask(4):
394 case MSR_MTRRphysMask(5):
395 case MSR_MTRRphysMask(6):
396 case MSR_MTRRphysMask(7):
397 val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
398 MSR_MTRRphysMask(0)) / 2].mask;
399 break;
400 case MSR_MTRRfix64K_00000:
401 val = env->mtrr_fixed[0];
402 break;
403 case MSR_MTRRfix16K_80000:
404 case MSR_MTRRfix16K_A0000:
405 val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
406 MSR_MTRRfix16K_80000 + 1];
407 break;
408 case MSR_MTRRfix4K_C0000:
409 case MSR_MTRRfix4K_C8000:
410 case MSR_MTRRfix4K_D0000:
411 case MSR_MTRRfix4K_D8000:
412 case MSR_MTRRfix4K_E0000:
413 case MSR_MTRRfix4K_E8000:
414 case MSR_MTRRfix4K_F0000:
415 case MSR_MTRRfix4K_F8000:
416 val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
417 MSR_MTRRfix4K_C0000 + 3];
418 break;
419 case MSR_MTRRdefType:
420 val = env->mtrr_deftype;
421 break;
422 case MSR_MTRRcap:
423 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
424 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
425 MSR_MTRRcap_WC_SUPPORTED;
426 } else {
427 /* XXX: exception? */
428 val = 0;
430 break;
431 case MSR_MCG_CAP:
432 val = env->mcg_cap;
433 break;
434 case MSR_MCG_CTL:
435 if (env->mcg_cap & MCG_CTL_P) {
436 val = env->mcg_ctl;
437 } else {
438 val = 0;
440 break;
441 case MSR_MCG_STATUS:
442 val = env->mcg_status;
443 break;
444 case MSR_IA32_MISC_ENABLE:
445 val = env->msr_ia32_misc_enable;
446 break;
447 case MSR_IA32_BNDCFGS:
448 val = env->msr_bndcfgs;
449 break;
450 case MSR_IA32_UCODE_REV:
451 val = x86_cpu->ucode_rev;
452 break;
453 case MSR_CORE_THREAD_COUNT: {
454 CPUState *cs = CPU(x86_cpu);
455 val = (cs->nr_threads * cs->nr_cores) | (cs->nr_cores << 16);
456 break;
458 default:
459 if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
460 && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
461 (4 * env->mcg_cap & 0xff)) {
462 uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
463 val = env->mce_banks[offset];
464 break;
466 /* XXX: exception? */
467 val = 0;
468 break;
470 env->regs[R_EAX] = (uint32_t)(val);
471 env->regs[R_EDX] = (uint32_t)(val >> 32);
474 void helper_flush_page(CPUX86State *env, target_ulong addr)
476 tlb_flush_page(env_cpu(env), addr);
479 static G_NORETURN
480 void do_hlt(CPUX86State *env)
482 CPUState *cs = env_cpu(env);
484 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
485 cs->halted = 1;
486 cs->exception_index = EXCP_HLT;
487 cpu_loop_exit(cs);
490 G_NORETURN void helper_hlt(CPUX86State *env, int next_eip_addend)
492 cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC());
493 env->eip += next_eip_addend;
495 do_hlt(env);
498 void helper_monitor(CPUX86State *env, target_ulong ptr)
500 if ((uint32_t)env->regs[R_ECX] != 0) {
501 raise_exception_ra(env, EXCP0D_GPF, GETPC());
503 /* XXX: store address? */
504 cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC());
507 G_NORETURN void helper_mwait(CPUX86State *env, int next_eip_addend)
509 CPUState *cs = env_cpu(env);
511 if ((uint32_t)env->regs[R_ECX] != 0) {
512 raise_exception_ra(env, EXCP0D_GPF, GETPC());
514 cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC());
515 env->eip += next_eip_addend;
517 /* XXX: not complete but not completely erroneous */
518 if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
519 do_pause(env);
520 } else {
521 do_hlt(env);