2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #if !defined(TARGET_IA64)
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
45 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
46 // Work around ugly bugs in glibc that mangle global register contents
48 #define env cpu_single_env
51 int tb_invalidated_flag
;
53 //#define CONFIG_DEBUG_EXEC
54 //#define DEBUG_SIGNAL
56 int qemu_cpu_has_work(CPUState
*env
)
58 return cpu_has_work(env
);
61 void cpu_loop_exit(void)
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
66 longjmp(env
->jmp_env
, 1);
69 /* exit the current TB from a signal handler. The host registers are
70 restored in a state compatible with the CPU emulator
72 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
74 #if !defined(CONFIG_SOFTMMU)
76 struct ucontext
*uc
= puc
;
77 #elif defined(__OpenBSD__)
78 struct sigcontext
*uc
= puc
;
84 /* XXX: restore cpu registers saved in host registers */
86 #if !defined(CONFIG_SOFTMMU)
88 /* XXX: use siglongjmp ? */
90 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
91 #elif defined(__OpenBSD__)
92 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
96 env
->exception_index
= -1;
97 longjmp(env
->jmp_env
, 1);
100 /* Execute the code without caching the generated code. An interpreter
101 could be used if available. */
102 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
104 unsigned long next_tb
;
105 TranslationBlock
*tb
;
107 /* Should never happen.
108 We only end up here when an existing TB is too long. */
109 if (max_cycles
> CF_COUNT_MASK
)
110 max_cycles
= CF_COUNT_MASK
;
112 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
114 env
->current_tb
= tb
;
115 /* execute the generated code */
116 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
118 if ((next_tb
& 3) == 2) {
119 /* Restore PC. This may happen if async event occurs before
120 the TB starts executing. */
121 cpu_pc_from_tb(env
, tb
);
123 tb_phys_invalidate(tb
, -1);
127 static TranslationBlock
*tb_find_slow(target_ulong pc
,
128 target_ulong cs_base
,
131 TranslationBlock
*tb
, **ptb1
;
133 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
135 tb_invalidated_flag
= 0;
137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
139 /* find translated block using physical mappings */
140 phys_pc
= get_phys_addr_code(env
, pc
);
141 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
143 h
= tb_phys_hash_func(phys_pc
);
144 ptb1
= &tb_phys_hash
[h
];
150 tb
->page_addr
[0] == phys_page1
&&
151 tb
->cs_base
== cs_base
&&
152 tb
->flags
== flags
) {
153 /* check next page if needed */
154 if (tb
->page_addr
[1] != -1) {
155 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
157 phys_page2
= get_phys_addr_code(env
, virt_page2
);
158 if (tb
->page_addr
[1] == phys_page2
)
164 ptb1
= &tb
->phys_hash_next
;
167 /* if no translated code available, then translate it now */
168 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
171 /* we add the TB in the virtual pc hash table */
172 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
176 static inline TranslationBlock
*tb_find_fast(void)
178 TranslationBlock
*tb
;
179 target_ulong cs_base
, pc
;
182 /* we record a subset of the CPU state. It will
183 always be the same before a given translated block
185 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
186 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
187 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
188 tb
->flags
!= flags
)) {
189 tb
= tb_find_slow(pc
, cs_base
, flags
);
194 static CPUDebugExcpHandler
*debug_excp_handler
;
196 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
198 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
200 debug_excp_handler
= handler
;
204 static void cpu_handle_debug_exception(CPUState
*env
)
208 if (!env
->watchpoint_hit
)
209 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
210 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
212 if (debug_excp_handler
)
213 debug_excp_handler(env
);
216 /* main execution loop */
218 int cpu_exec(CPUState
*env1
)
220 #define DECLARE_HOST_REGS 1
221 #include "hostregs_helper.h"
222 int ret
, interrupt_request
;
223 TranslationBlock
*tb
;
225 unsigned long next_tb
;
227 if (cpu_halted(env1
) == EXCP_HALTED
)
230 cpu_single_env
= env1
;
232 /* first we save global registers */
233 #define SAVE_HOST_REGS 1
234 #include "hostregs_helper.h"
238 #if defined(TARGET_I386)
239 /* put eflags in CPU temporary format */
240 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
241 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
242 CC_OP
= CC_OP_EFLAGS
;
243 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
244 #elif defined(TARGET_SPARC)
245 #elif defined(TARGET_M68K)
246 env
->cc_op
= CC_OP_FLAGS
;
247 env
->cc_dest
= env
->sr
& 0xf;
248 env
->cc_x
= (env
->sr
>> 4) & 1;
249 #elif defined(TARGET_ALPHA)
250 #elif defined(TARGET_ARM)
251 #elif defined(TARGET_PPC)
252 #elif defined(TARGET_MICROBLAZE)
253 #elif defined(TARGET_MIPS)
254 #elif defined(TARGET_SH4)
255 #elif defined(TARGET_CRIS)
256 #elif defined(TARGET_IA64)
259 #error unsupported target CPU
261 env
->exception_index
= -1;
263 /* prepare setjmp context for exception handling */
265 if (setjmp(env
->jmp_env
) == 0) {
266 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
268 env
= cpu_single_env
;
269 #define env cpu_single_env
271 env
->current_tb
= NULL
;
272 /* if an exception is pending, we execute it here */
273 if (env
->exception_index
>= 0) {
274 if (env
->exception_index
>= EXCP_INTERRUPT
) {
275 /* exit request from the cpu execution loop */
276 ret
= env
->exception_index
;
277 if (ret
== EXCP_DEBUG
)
278 cpu_handle_debug_exception(env
);
281 #if defined(CONFIG_USER_ONLY)
282 /* if user mode only, we simulate a fake exception
283 which will be handled outside the cpu execution
285 #if defined(TARGET_I386)
286 do_interrupt_user(env
->exception_index
,
287 env
->exception_is_int
,
289 env
->exception_next_eip
);
290 /* successfully delivered */
291 env
->old_exception
= -1;
293 ret
= env
->exception_index
;
296 #if defined(TARGET_I386)
297 /* simulate a real cpu exception. On i386, it can
298 trigger new exceptions, but we do not handle
299 double or triple faults yet. */
300 do_interrupt(env
->exception_index
,
301 env
->exception_is_int
,
303 env
->exception_next_eip
, 0);
304 /* successfully delivered */
305 env
->old_exception
= -1;
306 #elif defined(TARGET_PPC)
308 #elif defined(TARGET_MICROBLAZE)
310 #elif defined(TARGET_MIPS)
312 #elif defined(TARGET_SPARC)
314 #elif defined(TARGET_ARM)
316 #elif defined(TARGET_SH4)
318 #elif defined(TARGET_ALPHA)
320 #elif defined(TARGET_CRIS)
322 #elif defined(TARGET_M68K)
324 #elif defined(TARGET_IA64)
329 env
->exception_index
= -1;
334 longjmp(env
->jmp_env
, 1);
337 next_tb
= 0; /* force lookup of first TB */
339 interrupt_request
= env
->interrupt_request
;
340 if (unlikely(interrupt_request
)) {
341 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
342 /* Mask out external interrupts for this step. */
343 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
348 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
349 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
350 env
->exception_index
= EXCP_DEBUG
;
353 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
354 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
355 defined(TARGET_MICROBLAZE)
356 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
357 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
359 env
->exception_index
= EXCP_HLT
;
363 #if defined(TARGET_I386)
364 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
365 svm_check_intercept(SVM_EXIT_INIT
);
367 env
->exception_index
= EXCP_HALTED
;
369 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
371 } else if (env
->hflags2
& HF2_GIF_MASK
) {
372 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
373 !(env
->hflags
& HF_SMM_MASK
)) {
374 svm_check_intercept(SVM_EXIT_SMI
);
375 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
378 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
379 !(env
->hflags2
& HF2_NMI_MASK
)) {
380 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
381 env
->hflags2
|= HF2_NMI_MASK
;
382 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
384 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
385 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
386 do_interrupt(EXCP12_MCHK
, 0, 0, 0, 0);
388 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
389 (((env
->hflags2
& HF2_VINTR_MASK
) &&
390 (env
->hflags2
& HF2_HIF_MASK
)) ||
391 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
392 (env
->eflags
& IF_MASK
&&
393 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
395 svm_check_intercept(SVM_EXIT_INTR
);
396 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
397 intno
= cpu_get_pic_interrupt(env
);
398 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
399 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
401 env
= cpu_single_env
;
402 #define env cpu_single_env
404 do_interrupt(intno
, 0, 0, 0, 1);
405 /* ensure that no TB jump will be modified as
406 the program flow was changed */
408 #if !defined(CONFIG_USER_ONLY)
409 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
410 (env
->eflags
& IF_MASK
) &&
411 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
413 /* FIXME: this should respect TPR */
414 svm_check_intercept(SVM_EXIT_VINTR
);
415 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
416 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
417 do_interrupt(intno
, 0, 0, 0, 1);
418 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
423 #elif defined(TARGET_PPC)
425 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
429 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
430 ppc_hw_interrupt(env
);
431 if (env
->pending_interrupts
== 0)
432 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
435 #elif defined(TARGET_MICROBLAZE)
436 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
437 && (env
->sregs
[SR_MSR
] & MSR_IE
)
438 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
439 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
440 env
->exception_index
= EXCP_IRQ
;
444 #elif defined(TARGET_MIPS)
445 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
446 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
447 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
448 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
449 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
450 !(env
->hflags
& MIPS_HFLAG_DM
)) {
452 env
->exception_index
= EXCP_EXT_INTERRUPT
;
457 #elif defined(TARGET_SPARC)
458 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
459 cpu_interrupts_enabled(env
)) {
460 int pil
= env
->interrupt_index
& 15;
461 int type
= env
->interrupt_index
& 0xf0;
463 if (((type
== TT_EXTINT
) &&
464 (pil
== 15 || pil
> env
->psrpil
)) ||
466 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
467 env
->exception_index
= env
->interrupt_index
;
469 env
->interrupt_index
= 0;
472 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
473 //do_interrupt(0, 0, 0, 0, 0);
474 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
476 #elif defined(TARGET_ARM)
477 if (interrupt_request
& CPU_INTERRUPT_FIQ
478 && !(env
->uncached_cpsr
& CPSR_F
)) {
479 env
->exception_index
= EXCP_FIQ
;
483 /* ARMv7-M interrupt return works by loading a magic value
484 into the PC. On real hardware the load causes the
485 return to occur. The qemu implementation performs the
486 jump normally, then does the exception return when the
487 CPU tries to execute code at the magic address.
488 This will cause the magic PC value to be pushed to
489 the stack if an interrupt occured at the wrong time.
490 We avoid this by disabling interrupts when
491 pc contains a magic address. */
492 if (interrupt_request
& CPU_INTERRUPT_HARD
493 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
494 || !(env
->uncached_cpsr
& CPSR_I
))) {
495 env
->exception_index
= EXCP_IRQ
;
499 #elif defined(TARGET_SH4)
500 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
504 #elif defined(TARGET_ALPHA)
505 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
509 #elif defined(TARGET_CRIS)
510 if (interrupt_request
& CPU_INTERRUPT_HARD
511 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
512 env
->exception_index
= EXCP_IRQ
;
516 if (interrupt_request
& CPU_INTERRUPT_NMI
517 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
518 env
->exception_index
= EXCP_NMI
;
522 #elif defined(TARGET_M68K)
523 if (interrupt_request
& CPU_INTERRUPT_HARD
524 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
525 < env
->pending_level
) {
526 /* Real hardware gets the interrupt vector via an
527 IACK cycle at this point. Current emulated
528 hardware doesn't rely on this, so we
529 provide/save the vector when the interrupt is
531 env
->exception_index
= env
->pending_vector
;
536 /* Don't use the cached interupt_request value,
537 do_interrupt may have updated the EXITTB flag. */
538 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
539 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
540 /* ensure that no TB jump will be modified as
541 the program flow was changed */
545 if (unlikely(env
->exit_request
)) {
546 env
->exit_request
= 0;
547 env
->exception_index
= EXCP_INTERRUPT
;
550 #ifdef CONFIG_DEBUG_EXEC
551 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
552 /* restore flags in standard format */
554 #if defined(TARGET_I386)
555 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
556 log_cpu_state(env
, X86_DUMP_CCOP
);
557 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
558 #elif defined(TARGET_ARM)
559 log_cpu_state(env
, 0);
560 #elif defined(TARGET_SPARC)
561 log_cpu_state(env
, 0);
562 #elif defined(TARGET_PPC)
563 log_cpu_state(env
, 0);
564 #elif defined(TARGET_M68K)
565 cpu_m68k_flush_flags(env
, env
->cc_op
);
566 env
->cc_op
= CC_OP_FLAGS
;
567 env
->sr
= (env
->sr
& 0xffe0)
568 | env
->cc_dest
| (env
->cc_x
<< 4);
569 log_cpu_state(env
, 0);
570 #elif defined(TARGET_MICROBLAZE)
571 log_cpu_state(env
, 0);
572 #elif defined(TARGET_MIPS)
573 log_cpu_state(env
, 0);
574 #elif defined(TARGET_SH4)
575 log_cpu_state(env
, 0);
576 #elif defined(TARGET_ALPHA)
577 log_cpu_state(env
, 0);
578 #elif defined(TARGET_CRIS)
579 log_cpu_state(env
, 0);
581 #error unsupported target CPU
587 /* Note: we do it here to avoid a gcc bug on Mac OS X when
588 doing it in tb_find_slow */
589 if (tb_invalidated_flag
) {
590 /* as some TB could have been invalidated because
591 of memory exceptions while generating the code, we
592 must recompute the hash index here */
594 tb_invalidated_flag
= 0;
596 #ifdef CONFIG_DEBUG_EXEC
597 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
598 (long)tb
->tc_ptr
, tb
->pc
,
599 lookup_symbol(tb
->pc
));
601 /* see if we can patch the calling TB. When the TB
602 spans two pages, we cannot safely do a direct
605 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
606 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
609 spin_unlock(&tb_lock
);
610 env
->current_tb
= tb
;
612 /* cpu_interrupt might be called while translating the
613 TB, but before it is linked into a potentially
614 infinite loop and becomes env->current_tb. Avoid
615 starting execution if there is a pending interrupt. */
616 if (unlikely (env
->exit_request
))
617 env
->current_tb
= NULL
;
619 while (env
->current_tb
) {
621 /* execute the generated code */
622 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
624 env
= cpu_single_env
;
625 #define env cpu_single_env
627 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
628 env
->current_tb
= NULL
;
629 if ((next_tb
& 3) == 2) {
630 /* Instruction counter expired. */
632 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
634 cpu_pc_from_tb(env
, tb
);
635 insns_left
= env
->icount_decr
.u32
;
636 if (env
->icount_extra
&& insns_left
>= 0) {
637 /* Refill decrementer and continue execution. */
638 env
->icount_extra
+= insns_left
;
639 if (env
->icount_extra
> 0xffff) {
642 insns_left
= env
->icount_extra
;
644 env
->icount_extra
-= insns_left
;
645 env
->icount_decr
.u16
.low
= insns_left
;
647 if (insns_left
> 0) {
648 /* Execute remaining instructions. */
649 cpu_exec_nocache(insns_left
, tb
);
651 env
->exception_index
= EXCP_INTERRUPT
;
657 /* reset soft MMU for next block (it can currently
658 only be set by a memory fault) */
666 #if defined(TARGET_I386)
667 /* restore flags in standard format */
668 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
669 #elif defined(TARGET_ARM)
670 /* XXX: Save/restore host fpu exception state?. */
671 #elif defined(TARGET_SPARC)
672 #elif defined(TARGET_PPC)
673 #elif defined(TARGET_M68K)
674 cpu_m68k_flush_flags(env
, env
->cc_op
);
675 env
->cc_op
= CC_OP_FLAGS
;
676 env
->sr
= (env
->sr
& 0xffe0)
677 | env
->cc_dest
| (env
->cc_x
<< 4);
678 #elif defined(TARGET_MICROBLAZE)
679 #elif defined(TARGET_MIPS)
680 #elif defined(TARGET_SH4)
681 #elif defined(TARGET_IA64)
682 #elif defined(TARGET_ALPHA)
683 #elif defined(TARGET_CRIS)
686 #error unsupported target CPU
689 /* restore global registers */
690 #include "hostregs_helper.h"
692 /* fail safe : never use cpu_single_env outside cpu_exec() */
693 cpu_single_env
= NULL
;
697 /* must only be called from the generated code as an exception can be
699 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
701 /* XXX: cannot enable it yet because it yields to MMU exception
702 where NIP != read address on PowerPC */
704 target_ulong phys_addr
;
705 phys_addr
= get_phys_addr_code(env
, start
);
706 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
710 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
712 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
714 CPUX86State
*saved_env
;
718 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
720 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
721 (selector
<< 4), 0xffff, 0);
723 helper_load_seg(seg_reg
, selector
);
728 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
730 CPUX86State
*saved_env
;
735 helper_fsave(ptr
, data32
);
740 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
742 CPUX86State
*saved_env
;
747 helper_frstor(ptr
, data32
);
752 #endif /* TARGET_I386 */
754 #if !defined(CONFIG_SOFTMMU)
756 #if defined(TARGET_I386)
757 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
759 #define EXCEPTION_ACTION cpu_loop_exit()
762 /* 'pc' is the host PC at which the exception was raised. 'address' is
763 the effective address of the memory exception. 'is_write' is 1 if a
764 write caused the exception and otherwise 0'. 'old_set' is the
765 signal set which should be restored */
766 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
767 int is_write
, sigset_t
*old_set
,
770 TranslationBlock
*tb
;
774 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
775 #if defined(DEBUG_SIGNAL)
776 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
777 pc
, address
, is_write
, *(unsigned long *)old_set
);
779 /* XXX: locking issue */
780 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
784 /* see if it is an MMU fault */
785 ret
= cpu_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
787 return 0; /* not an MMU fault */
789 return 1; /* the MMU fault was handled without causing real CPU fault */
790 /* now we have a real cpu fault */
793 /* the PC is inside the translated code. It means that we have
794 a virtual CPU fault */
795 cpu_restore_state(tb
, env
, pc
, puc
);
798 /* we restore the process signal mask as the sigreturn should
799 do it (XXX: use sigsetjmp) */
800 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
803 /* never comes here */
807 #if defined(__i386__)
809 #if defined(__APPLE__)
810 # include <sys/ucontext.h>
812 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
813 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
814 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
815 # define MASK_sig(context) ((context)->uc_sigmask)
816 #elif defined (__NetBSD__)
817 # include <ucontext.h>
819 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
820 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
821 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
822 # define MASK_sig(context) ((context)->uc_sigmask)
823 #elif defined (__FreeBSD__) || defined(__DragonFly__)
824 # include <ucontext.h>
826 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
827 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
828 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
829 # define MASK_sig(context) ((context)->uc_sigmask)
830 #elif defined(__OpenBSD__)
831 # define EIP_sig(context) ((context)->sc_eip)
832 # define TRAP_sig(context) ((context)->sc_trapno)
833 # define ERROR_sig(context) ((context)->sc_err)
834 # define MASK_sig(context) ((context)->sc_mask)
836 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
837 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
838 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
839 # define MASK_sig(context) ((context)->uc_sigmask)
842 int cpu_signal_handler(int host_signum
, void *pinfo
,
845 siginfo_t
*info
= pinfo
;
846 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
847 ucontext_t
*uc
= puc
;
848 #elif defined(__OpenBSD__)
849 struct sigcontext
*uc
= puc
;
851 struct ucontext
*uc
= puc
;
860 #define REG_TRAPNO TRAPNO
863 trapno
= TRAP_sig(uc
);
864 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
866 (ERROR_sig(uc
) >> 1) & 1 : 0,
870 #elif defined(__x86_64__)
873 #define PC_sig(context) _UC_MACHINE_PC(context)
874 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
875 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
876 #define MASK_sig(context) ((context)->uc_sigmask)
877 #elif defined(__OpenBSD__)
878 #define PC_sig(context) ((context)->sc_rip)
879 #define TRAP_sig(context) ((context)->sc_trapno)
880 #define ERROR_sig(context) ((context)->sc_err)
881 #define MASK_sig(context) ((context)->sc_mask)
882 #elif defined (__FreeBSD__) || defined(__DragonFly__)
883 #include <ucontext.h>
885 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
886 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
887 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
888 #define MASK_sig(context) ((context)->uc_sigmask)
890 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
891 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
892 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
893 #define MASK_sig(context) ((context)->uc_sigmask)
896 int cpu_signal_handler(int host_signum
, void *pinfo
,
899 siginfo_t
*info
= pinfo
;
901 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
902 ucontext_t
*uc
= puc
;
903 #elif defined(__OpenBSD__)
904 struct sigcontext
*uc
= puc
;
906 struct ucontext
*uc
= puc
;
910 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
911 TRAP_sig(uc
) == 0xe ?
912 (ERROR_sig(uc
) >> 1) & 1 : 0,
916 #elif defined(_ARCH_PPC)
918 /***********************************************************************
919 * signal context platform-specific definitions
923 /* All Registers access - only for local access */
924 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
925 /* Gpr Registers access */
926 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
927 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
928 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
929 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
930 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
931 # define LR_sig(context) REG_sig(link, context) /* Link register */
932 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
933 /* Float Registers access */
934 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
935 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
936 /* Exception Registers access */
937 # define DAR_sig(context) REG_sig(dar, context)
938 # define DSISR_sig(context) REG_sig(dsisr, context)
939 # define TRAP_sig(context) REG_sig(trap, context)
943 # include <sys/ucontext.h>
944 typedef struct ucontext SIGCONTEXT
;
945 /* All Registers access - only for local access */
946 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
947 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
948 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
949 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
950 /* Gpr Registers access */
951 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
952 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
953 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
954 # define CTR_sig(context) REG_sig(ctr, context)
955 # define XER_sig(context) REG_sig(xer, context) /* Link register */
956 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
957 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
958 /* Float Registers access */
959 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
960 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
961 /* Exception Registers access */
962 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
963 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
964 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
965 #endif /* __APPLE__ */
967 int cpu_signal_handler(int host_signum
, void *pinfo
,
970 siginfo_t
*info
= pinfo
;
971 struct ucontext
*uc
= puc
;
979 if (DSISR_sig(uc
) & 0x00800000)
982 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
985 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
986 is_write
, &uc
->uc_sigmask
, puc
);
989 #elif defined(__alpha__)
991 int cpu_signal_handler(int host_signum
, void *pinfo
,
994 siginfo_t
*info
= pinfo
;
995 struct ucontext
*uc
= puc
;
996 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1000 /* XXX: need kernel patch to get write flag faster */
1001 switch (insn
>> 26) {
1016 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1017 is_write
, &uc
->uc_sigmask
, puc
);
1019 #elif defined(__sparc__)
1021 int cpu_signal_handler(int host_signum
, void *pinfo
,
1024 siginfo_t
*info
= pinfo
;
1027 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1028 uint32_t *regs
= (uint32_t *)(info
+ 1);
1029 void *sigmask
= (regs
+ 20);
1030 /* XXX: is there a standard glibc define ? */
1031 unsigned long pc
= regs
[1];
1034 struct sigcontext
*sc
= puc
;
1035 unsigned long pc
= sc
->sigc_regs
.tpc
;
1036 void *sigmask
= (void *)sc
->sigc_mask
;
1037 #elif defined(__OpenBSD__)
1038 struct sigcontext
*uc
= puc
;
1039 unsigned long pc
= uc
->sc_pc
;
1040 void *sigmask
= (void *)(long)uc
->sc_mask
;
1044 /* XXX: need kernel patch to get write flag faster */
1046 insn
= *(uint32_t *)pc
;
1047 if ((insn
>> 30) == 3) {
1048 switch((insn
>> 19) & 0x3f) {
1072 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1073 is_write
, sigmask
, NULL
);
1076 #elif defined(__arm__)
1078 int cpu_signal_handler(int host_signum
, void *pinfo
,
1081 siginfo_t
*info
= pinfo
;
1082 struct ucontext
*uc
= puc
;
1086 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1087 pc
= uc
->uc_mcontext
.gregs
[R15
];
1089 pc
= uc
->uc_mcontext
.arm_pc
;
1091 /* XXX: compute is_write */
1093 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1095 &uc
->uc_sigmask
, puc
);
1098 #elif defined(__mc68000)
1100 int cpu_signal_handler(int host_signum
, void *pinfo
,
1103 siginfo_t
*info
= pinfo
;
1104 struct ucontext
*uc
= puc
;
1108 pc
= uc
->uc_mcontext
.gregs
[16];
1109 /* XXX: compute is_write */
1111 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1113 &uc
->uc_sigmask
, puc
);
1116 #elif defined(__ia64)
1119 /* This ought to be in <bits/siginfo.h>... */
1120 # define __ISR_VALID 1
1123 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1125 siginfo_t
*info
= pinfo
;
1126 struct ucontext
*uc
= puc
;
1130 ip
= uc
->uc_mcontext
.sc_ip
;
1131 switch (host_signum
) {
1137 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1138 /* ISR.W (write-access) is bit 33: */
1139 is_write
= (info
->si_isr
>> 33) & 1;
1145 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1147 &uc
->uc_sigmask
, puc
);
1150 #elif defined(__s390__)
1152 int cpu_signal_handler(int host_signum
, void *pinfo
,
1155 siginfo_t
*info
= pinfo
;
1156 struct ucontext
*uc
= puc
;
1160 pc
= uc
->uc_mcontext
.psw
.addr
;
1161 /* XXX: compute is_write */
1163 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1164 is_write
, &uc
->uc_sigmask
, puc
);
1167 #elif defined(__mips__)
1169 int cpu_signal_handler(int host_signum
, void *pinfo
,
1172 siginfo_t
*info
= pinfo
;
1173 struct ucontext
*uc
= puc
;
1174 greg_t pc
= uc
->uc_mcontext
.pc
;
1177 /* XXX: compute is_write */
1179 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1180 is_write
, &uc
->uc_sigmask
, puc
);
1183 #elif defined(__hppa__)
1185 int cpu_signal_handler(int host_signum
, void *pinfo
,
1188 struct siginfo
*info
= pinfo
;
1189 struct ucontext
*uc
= puc
;
1193 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1194 /* FIXME: compute is_write */
1196 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1198 &uc
->uc_sigmask
, puc
);
1203 #error host CPU specific signal handler needed
1207 #endif /* !defined(CONFIG_SOFTMMU) */