2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
26 #if !defined(CONFIG_SOFTMMU)
38 #include <sys/ucontext.h>
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
45 #define env cpu_single_env
48 int tb_invalidated_flag
;
51 //#define DEBUG_SIGNAL
53 int qemu_cpu_has_work(CPUState
*env
)
55 return cpu_has_work(env
);
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
63 longjmp(env
->jmp_env
, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
71 #if !defined(CONFIG_SOFTMMU)
73 struct ucontext
*uc
= puc
;
74 #elif defined(__OpenBSD__)
75 struct sigcontext
*uc
= puc
;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
85 /* XXX: use siglongjmp ? */
87 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
93 env
->exception_index
= -1;
94 longjmp(env
->jmp_env
, 1);
97 /* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
101 unsigned long next_tb
;
102 TranslationBlock
*tb
;
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles
> CF_COUNT_MASK
)
107 max_cycles
= CF_COUNT_MASK
;
109 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
111 env
->current_tb
= tb
;
112 /* execute the generated code */
113 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
115 if ((next_tb
& 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
118 cpu_pc_from_tb(env
, tb
);
120 tb_phys_invalidate(tb
, -1);
124 static TranslationBlock
*tb_find_slow(target_ulong pc
,
125 target_ulong cs_base
,
128 TranslationBlock
*tb
, **ptb1
;
130 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
132 tb_invalidated_flag
= 0;
134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
136 /* find translated block using physical mappings */
137 phys_pc
= get_phys_addr_code(env
, pc
);
138 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
140 h
= tb_phys_hash_func(phys_pc
);
141 ptb1
= &tb_phys_hash
[h
];
147 tb
->page_addr
[0] == phys_page1
&&
148 tb
->cs_base
== cs_base
&&
149 tb
->flags
== flags
) {
150 /* check next page if needed */
151 if (tb
->page_addr
[1] != -1) {
152 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
154 phys_page2
= get_phys_addr_code(env
, virt_page2
);
155 if (tb
->page_addr
[1] == phys_page2
)
161 ptb1
= &tb
->phys_hash_next
;
164 /* if no translated code available, then translate it now */
165 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
168 /* we add the TB in the virtual pc hash table */
169 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
173 static inline TranslationBlock
*tb_find_fast(void)
175 TranslationBlock
*tb
;
176 target_ulong cs_base
, pc
;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
182 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
183 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
184 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
185 tb
->flags
!= flags
)) {
186 tb
= tb_find_slow(pc
, cs_base
, flags
);
191 static CPUDebugExcpHandler
*debug_excp_handler
;
193 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
195 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
197 debug_excp_handler
= handler
;
201 static void cpu_handle_debug_exception(CPUState
*env
)
205 if (!env
->watchpoint_hit
)
206 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
207 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
209 if (debug_excp_handler
)
210 debug_excp_handler(env
);
213 /* main execution loop */
215 int cpu_exec(CPUState
*env1
)
217 #define DECLARE_HOST_REGS 1
218 #include "hostregs_helper.h"
219 int ret
, interrupt_request
;
220 TranslationBlock
*tb
;
222 unsigned long next_tb
;
224 if (cpu_halted(env1
) == EXCP_HALTED
)
227 cpu_single_env
= env1
;
229 /* first we save global registers */
230 #define SAVE_HOST_REGS 1
231 #include "hostregs_helper.h"
235 #if defined(TARGET_I386)
236 /* put eflags in CPU temporary format */
237 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
238 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
239 CC_OP
= CC_OP_EFLAGS
;
240 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243 env
->cc_op
= CC_OP_FLAGS
;
244 env
->cc_dest
= env
->sr
& 0xf;
245 env
->cc_x
= (env
->sr
>> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_HPPA)
250 #elif defined(TARGET_MICROBLAZE)
251 #elif defined(TARGET_MIPS)
252 #elif defined(TARGET_SH4)
253 #elif defined(TARGET_CRIS)
256 #error unsupported target CPU
258 env
->exception_index
= -1;
260 /* prepare setjmp context for exception handling */
262 if (setjmp(env
->jmp_env
) == 0) {
263 #if defined(__sparc__) && !defined(HOST_SOLARIS)
265 env
= cpu_single_env
;
266 #define env cpu_single_env
268 env
->current_tb
= NULL
;
269 /* if an exception is pending, we execute it here */
270 if (env
->exception_index
>= 0) {
271 if (env
->exception_index
>= EXCP_INTERRUPT
) {
272 /* exit request from the cpu execution loop */
273 ret
= env
->exception_index
;
274 if (ret
== EXCP_DEBUG
)
275 cpu_handle_debug_exception(env
);
278 #if defined(CONFIG_USER_ONLY)
279 /* if user mode only, we simulate a fake exception
280 which will be handled outside the cpu execution
282 #if defined(TARGET_I386)
283 do_interrupt_user(env
->exception_index
,
284 env
->exception_is_int
,
286 env
->exception_next_eip
);
287 /* successfully delivered */
288 env
->old_exception
= -1;
290 ret
= env
->exception_index
;
293 #if defined(TARGET_I386)
294 /* simulate a real cpu exception. On i386, it can
295 trigger new exceptions, but we do not handle
296 double or triple faults yet. */
297 do_interrupt(env
->exception_index
,
298 env
->exception_is_int
,
300 env
->exception_next_eip
, 0);
301 /* successfully delivered */
302 env
->old_exception
= -1;
303 #elif defined(TARGET_PPC)
305 #elif defined(TARGET_MICROBLAZE)
307 #elif defined(TARGET_MIPS)
309 #elif defined(TARGET_SPARC)
311 #elif defined(TARGET_ARM)
313 #elif defined(TARGET_SH4)
315 #elif defined(TARGET_ALPHA)
317 #elif defined(TARGET_CRIS)
319 #elif defined(TARGET_M68K)
321 #elif defined(TARGET_HPPA)
326 env
->exception_index
= -1;
329 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0 && env
->exit_request
== 0) {
331 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
332 ret
= kqemu_cpu_exec(env
);
333 /* put eflags in CPU temporary format */
334 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
335 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
336 CC_OP
= CC_OP_EFLAGS
;
337 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
340 longjmp(env
->jmp_env
, 1);
341 } else if (ret
== 2) {
342 /* softmmu execution needed */
344 if (env
->interrupt_request
!= 0 || env
->exit_request
!= 0) {
345 /* hardware interrupt will be executed just after */
347 /* otherwise, we restart */
348 longjmp(env
->jmp_env
, 1);
356 longjmp(env
->jmp_env
, 1);
359 next_tb
= 0; /* force lookup of first TB */
361 interrupt_request
= env
->interrupt_request
;
362 if (unlikely(interrupt_request
)) {
363 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
364 /* Mask out external interrupts for this step. */
365 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
370 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
371 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
372 env
->exception_index
= EXCP_DEBUG
;
375 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
376 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
377 defined(TARGET_HPPA) || defined(TARGET_MICROBLAZE)
378 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
379 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
381 env
->exception_index
= EXCP_HLT
;
385 #if defined(TARGET_I386)
386 if (env
->hflags2
& HF2_GIF_MASK
) {
387 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
388 !(env
->hflags
& HF_SMM_MASK
)) {
389 svm_check_intercept(SVM_EXIT_SMI
);
390 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
393 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
394 !(env
->hflags2
& HF2_NMI_MASK
)) {
395 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
396 env
->hflags2
|= HF2_NMI_MASK
;
397 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
399 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
400 (((env
->hflags2
& HF2_VINTR_MASK
) &&
401 (env
->hflags2
& HF2_HIF_MASK
)) ||
402 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
403 (env
->eflags
& IF_MASK
&&
404 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
406 svm_check_intercept(SVM_EXIT_INTR
);
407 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
408 intno
= cpu_get_pic_interrupt(env
);
409 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
410 #if defined(__sparc__) && !defined(HOST_SOLARIS)
412 env
= cpu_single_env
;
413 #define env cpu_single_env
415 do_interrupt(intno
, 0, 0, 0, 1);
416 /* ensure that no TB jump will be modified as
417 the program flow was changed */
419 #if !defined(CONFIG_USER_ONLY)
420 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
421 (env
->eflags
& IF_MASK
) &&
422 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
424 /* FIXME: this should respect TPR */
425 svm_check_intercept(SVM_EXIT_VINTR
);
426 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
427 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
428 do_interrupt(intno
, 0, 0, 0, 1);
429 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
434 #elif defined(TARGET_PPC)
436 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
440 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
441 ppc_hw_interrupt(env
);
442 if (env
->pending_interrupts
== 0)
443 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
446 #elif defined(TARGET_MICROBLAZE)
447 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
448 && (env
->sregs
[SR_MSR
] & MSR_IE
)
449 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
450 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
451 env
->exception_index
= EXCP_IRQ
;
455 #elif defined(TARGET_MIPS)
456 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
457 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
458 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
459 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
460 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
461 !(env
->hflags
& MIPS_HFLAG_DM
)) {
463 env
->exception_index
= EXCP_EXT_INTERRUPT
;
468 #elif defined(TARGET_SPARC)
469 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
471 int pil
= env
->interrupt_index
& 15;
472 int type
= env
->interrupt_index
& 0xf0;
474 if (((type
== TT_EXTINT
) &&
475 (pil
== 15 || pil
> env
->psrpil
)) ||
477 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
478 env
->exception_index
= env
->interrupt_index
;
480 env
->interrupt_index
= 0;
481 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
486 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
487 //do_interrupt(0, 0, 0, 0, 0);
488 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
490 #elif defined(TARGET_ARM)
491 if (interrupt_request
& CPU_INTERRUPT_FIQ
492 && !(env
->uncached_cpsr
& CPSR_F
)) {
493 env
->exception_index
= EXCP_FIQ
;
497 /* ARMv7-M interrupt return works by loading a magic value
498 into the PC. On real hardware the load causes the
499 return to occur. The qemu implementation performs the
500 jump normally, then does the exception return when the
501 CPU tries to execute code at the magic address.
502 This will cause the magic PC value to be pushed to
503 the stack if an interrupt occured at the wrong time.
504 We avoid this by disabling interrupts when
505 pc contains a magic address. */
506 if (interrupt_request
& CPU_INTERRUPT_HARD
507 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
508 || !(env
->uncached_cpsr
& CPSR_I
))) {
509 env
->exception_index
= EXCP_IRQ
;
513 #elif defined(TARGET_SH4)
514 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
518 #elif defined(TARGET_ALPHA)
519 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
523 #elif defined(TARGET_CRIS)
524 if (interrupt_request
& CPU_INTERRUPT_HARD
525 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
526 env
->exception_index
= EXCP_IRQ
;
530 if (interrupt_request
& CPU_INTERRUPT_NMI
531 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
532 env
->exception_index
= EXCP_NMI
;
536 #elif defined(TARGET_M68K)
537 if (interrupt_request
& CPU_INTERRUPT_HARD
538 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
539 < env
->pending_level
) {
540 /* Real hardware gets the interrupt vector via an
541 IACK cycle at this point. Current emulated
542 hardware doesn't rely on this, so we
543 provide/save the vector when the interrupt is
545 env
->exception_index
= env
->pending_vector
;
549 #elif defined(TARGET_HPPA)
550 if (interrupt_request
& CPU_INTERRUPT_HARD
551 && !(env
->psw
& PSW_I
)) {
552 env
->exception_index
= EXCP_EXTINT
;
557 /* Don't use the cached interupt_request value,
558 do_interrupt may have updated the EXITTB flag. */
559 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
560 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
561 /* ensure that no TB jump will be modified as
562 the program flow was changed */
566 if (unlikely(env
->exit_request
)) {
567 env
->exit_request
= 0;
568 env
->exception_index
= EXCP_INTERRUPT
;
572 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
573 /* restore flags in standard format */
575 #if defined(TARGET_I386)
576 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
577 log_cpu_state(env
, X86_DUMP_CCOP
);
578 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
579 #elif defined(TARGET_ARM)
580 log_cpu_state(env
, 0);
581 #elif defined(TARGET_SPARC)
582 log_cpu_state(env
, 0);
583 #elif defined(TARGET_PPC)
584 log_cpu_state(env
, 0);
585 #elif defined(TARGET_M68K)
586 cpu_m68k_flush_flags(env
, env
->cc_op
);
587 env
->cc_op
= CC_OP_FLAGS
;
588 env
->sr
= (env
->sr
& 0xffe0)
589 | env
->cc_dest
| (env
->cc_x
<< 4);
590 log_cpu_state(env
, 0);
591 #elif defined(TARGET_MICROBLAZE)
592 log_cpu_state(env
, 0);
593 #elif defined(TARGET_MIPS)
594 log_cpu_state(env
, 0);
595 #elif defined(TARGET_SH4)
596 log_cpu_state(env
, 0);
597 #elif defined(TARGET_ALPHA)
598 log_cpu_state(env
, 0);
599 #elif defined(TARGET_CRIS)
600 log_cpu_state(env
, 0);
601 #elif defined(TARGET_HPPA)
602 log_cpu_state(env
, 0);
604 #error unsupported target CPU
610 /* Note: we do it here to avoid a gcc bug on Mac OS X when
611 doing it in tb_find_slow */
612 if (tb_invalidated_flag
) {
613 /* as some TB could have been invalidated because
614 of memory exceptions while generating the code, we
615 must recompute the hash index here */
617 tb_invalidated_flag
= 0;
620 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
621 (long)tb
->tc_ptr
, tb
->pc
,
622 lookup_symbol(tb
->pc
));
624 /* see if we can patch the calling TB. When the TB
625 spans two pages, we cannot safely do a direct
630 (env
->kqemu_enabled
!= 2) &&
632 tb
->page_addr
[1] == -1) {
633 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
636 spin_unlock(&tb_lock
);
637 env
->current_tb
= tb
;
639 /* cpu_interrupt might be called while translating the
640 TB, but before it is linked into a potentially
641 infinite loop and becomes env->current_tb. Avoid
642 starting execution if there is a pending interrupt. */
643 if (unlikely (env
->exit_request
))
644 env
->current_tb
= NULL
;
646 while (env
->current_tb
) {
648 /* execute the generated code */
649 #if defined(__sparc__) && !defined(HOST_SOLARIS)
651 env
= cpu_single_env
;
652 #define env cpu_single_env
654 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
655 env
->current_tb
= NULL
;
656 if ((next_tb
& 3) == 2) {
657 /* Instruction counter expired. */
659 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
661 cpu_pc_from_tb(env
, tb
);
662 insns_left
= env
->icount_decr
.u32
;
663 if (env
->icount_extra
&& insns_left
>= 0) {
664 /* Refill decrementer and continue execution. */
665 env
->icount_extra
+= insns_left
;
666 if (env
->icount_extra
> 0xffff) {
669 insns_left
= env
->icount_extra
;
671 env
->icount_extra
-= insns_left
;
672 env
->icount_decr
.u16
.low
= insns_left
;
674 if (insns_left
> 0) {
675 /* Execute remaining instructions. */
676 cpu_exec_nocache(insns_left
, tb
);
678 env
->exception_index
= EXCP_INTERRUPT
;
684 /* reset soft MMU for next block (it can currently
685 only be set by a memory fault) */
686 #if defined(CONFIG_KQEMU)
687 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
688 if (kqemu_is_ok(env
) &&
689 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
700 #if defined(TARGET_I386)
701 /* restore flags in standard format */
702 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
703 #elif defined(TARGET_ARM)
704 /* XXX: Save/restore host fpu exception state?. */
705 #elif defined(TARGET_SPARC)
706 #elif defined(TARGET_PPC)
707 #elif defined(TARGET_M68K)
708 cpu_m68k_flush_flags(env
, env
->cc_op
);
709 env
->cc_op
= CC_OP_FLAGS
;
710 env
->sr
= (env
->sr
& 0xffe0)
711 | env
->cc_dest
| (env
->cc_x
<< 4);
712 #elif defined(TARGET_MICROBLAZE)
713 #elif defined(TARGET_MIPS)
714 #elif defined(TARGET_SH4)
715 #elif defined(TARGET_ALPHA)
716 #elif defined(TARGET_CRIS)
717 #elif defined(TARGET_HPPA)
720 #error unsupported target CPU
723 /* restore global registers */
724 #include "hostregs_helper.h"
726 /* fail safe : never use cpu_single_env outside cpu_exec() */
727 cpu_single_env
= NULL
;
731 /* must only be called from the generated code as an exception can be
733 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
735 /* XXX: cannot enable it yet because it yields to MMU exception
736 where NIP != read address on PowerPC */
738 target_ulong phys_addr
;
739 phys_addr
= get_phys_addr_code(env
, start
);
740 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
744 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
746 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
748 CPUX86State
*saved_env
;
752 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
754 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
755 (selector
<< 4), 0xffff, 0);
757 helper_load_seg(seg_reg
, selector
);
762 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
764 CPUX86State
*saved_env
;
769 helper_fsave(ptr
, data32
);
774 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
776 CPUX86State
*saved_env
;
781 helper_frstor(ptr
, data32
);
786 #endif /* TARGET_I386 */
788 #if !defined(CONFIG_SOFTMMU)
790 #if defined(TARGET_I386)
792 /* 'pc' is the host PC at which the exception was raised. 'address' is
793 the effective address of the memory exception. 'is_write' is 1 if a
794 write caused the exception and otherwise 0'. 'old_set' is the
795 signal set which should be restored */
796 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
797 int is_write
, sigset_t
*old_set
,
800 TranslationBlock
*tb
;
804 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
805 #if defined(DEBUG_SIGNAL)
806 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
807 pc
, address
, is_write
, *(unsigned long *)old_set
);
809 /* XXX: locking issue */
810 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
814 /* see if it is an MMU fault */
815 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
817 return 0; /* not an MMU fault */
819 return 1; /* the MMU fault was handled without causing real CPU fault */
820 /* now we have a real cpu fault */
823 /* the PC is inside the translated code. It means that we have
824 a virtual CPU fault */
825 cpu_restore_state(tb
, env
, pc
, puc
);
829 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
830 env
->eip
, env
->cr
[2], env
->error_code
);
832 /* we restore the process signal mask as the sigreturn should
833 do it (XXX: use sigsetjmp) */
834 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
835 raise_exception_err(env
->exception_index
, env
->error_code
);
837 /* activate soft MMU for this block */
838 env
->hflags
|= HF_SOFTMMU_MASK
;
839 cpu_resume_from_signal(env
, puc
);
841 /* never comes here */
845 #elif defined(TARGET_ARM)
846 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
847 int is_write
, sigset_t
*old_set
,
850 TranslationBlock
*tb
;
854 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
855 #if defined(DEBUG_SIGNAL)
856 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
857 pc
, address
, is_write
, *(unsigned long *)old_set
);
859 /* XXX: locking issue */
860 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
863 /* see if it is an MMU fault */
864 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
866 return 0; /* not an MMU fault */
868 return 1; /* the MMU fault was handled without causing real CPU fault */
869 /* now we have a real cpu fault */
872 /* the PC is inside the translated code. It means that we have
873 a virtual CPU fault */
874 cpu_restore_state(tb
, env
, pc
, puc
);
876 /* we restore the process signal mask as the sigreturn should
877 do it (XXX: use sigsetjmp) */
878 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
880 /* never comes here */
883 #elif defined(TARGET_SPARC)
884 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
885 int is_write
, sigset_t
*old_set
,
888 TranslationBlock
*tb
;
892 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
893 #if defined(DEBUG_SIGNAL)
894 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
895 pc
, address
, is_write
, *(unsigned long *)old_set
);
897 /* XXX: locking issue */
898 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
901 /* see if it is an MMU fault */
902 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
904 return 0; /* not an MMU fault */
906 return 1; /* the MMU fault was handled without causing real CPU fault */
907 /* now we have a real cpu fault */
910 /* the PC is inside the translated code. It means that we have
911 a virtual CPU fault */
912 cpu_restore_state(tb
, env
, pc
, puc
);
914 /* we restore the process signal mask as the sigreturn should
915 do it (XXX: use sigsetjmp) */
916 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
918 /* never comes here */
921 #elif defined (TARGET_PPC)
922 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
923 int is_write
, sigset_t
*old_set
,
926 TranslationBlock
*tb
;
930 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
931 #if defined(DEBUG_SIGNAL)
932 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
933 pc
, address
, is_write
, *(unsigned long *)old_set
);
935 /* XXX: locking issue */
936 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
940 /* see if it is an MMU fault */
941 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
943 return 0; /* not an MMU fault */
945 return 1; /* the MMU fault was handled without causing real CPU fault */
947 /* now we have a real cpu fault */
950 /* the PC is inside the translated code. It means that we have
951 a virtual CPU fault */
952 cpu_restore_state(tb
, env
, pc
, puc
);
956 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
957 env
->nip
, env
->error_code
, tb
);
959 /* we restore the process signal mask as the sigreturn should
960 do it (XXX: use sigsetjmp) */
961 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
964 /* activate soft MMU for this block */
965 cpu_resume_from_signal(env
, puc
);
967 /* never comes here */
971 #elif defined(TARGET_M68K)
972 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
973 int is_write
, sigset_t
*old_set
,
976 TranslationBlock
*tb
;
980 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
981 #if defined(DEBUG_SIGNAL)
982 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
983 pc
, address
, is_write
, *(unsigned long *)old_set
);
985 /* XXX: locking issue */
986 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
989 /* see if it is an MMU fault */
990 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
992 return 0; /* not an MMU fault */
994 return 1; /* the MMU fault was handled without causing real CPU fault */
995 /* now we have a real cpu fault */
998 /* the PC is inside the translated code. It means that we have
999 a virtual CPU fault */
1000 cpu_restore_state(tb
, env
, pc
, puc
);
1002 /* we restore the process signal mask as the sigreturn should
1003 do it (XXX: use sigsetjmp) */
1004 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1006 /* never comes here */
1010 #elif defined (TARGET_MIPS)
1011 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1012 int is_write
, sigset_t
*old_set
,
1015 TranslationBlock
*tb
;
1019 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1020 #if defined(DEBUG_SIGNAL)
1021 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1022 pc
, address
, is_write
, *(unsigned long *)old_set
);
1024 /* XXX: locking issue */
1025 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1029 /* see if it is an MMU fault */
1030 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1032 return 0; /* not an MMU fault */
1034 return 1; /* the MMU fault was handled without causing real CPU fault */
1036 /* now we have a real cpu fault */
1037 tb
= tb_find_pc(pc
);
1039 /* the PC is inside the translated code. It means that we have
1040 a virtual CPU fault */
1041 cpu_restore_state(tb
, env
, pc
, puc
);
1045 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1046 env
->PC
, env
->error_code
, tb
);
1048 /* we restore the process signal mask as the sigreturn should
1049 do it (XXX: use sigsetjmp) */
1050 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1053 /* activate soft MMU for this block */
1054 cpu_resume_from_signal(env
, puc
);
1056 /* never comes here */
1060 #elif defined (TARGET_MICROBLAZE)
1061 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1062 int is_write
, sigset_t
*old_set
,
1065 TranslationBlock
*tb
;
1069 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1070 #if defined(DEBUG_SIGNAL)
1071 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1072 pc
, address
, is_write
, *(unsigned long *)old_set
);
1074 /* XXX: locking issue */
1075 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1079 /* see if it is an MMU fault */
1080 ret
= cpu_mb_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1082 return 0; /* not an MMU fault */
1084 return 1; /* the MMU fault was handled without causing real CPU fault */
1086 /* now we have a real cpu fault */
1087 tb
= tb_find_pc(pc
);
1089 /* the PC is inside the translated code. It means that we have
1090 a virtual CPU fault */
1091 cpu_restore_state(tb
, env
, pc
, puc
);
1095 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1096 env
->PC
, env
->error_code
, tb
);
1098 /* we restore the process signal mask as the sigreturn should
1099 do it (XXX: use sigsetjmp) */
1100 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1103 /* activate soft MMU for this block */
1104 cpu_resume_from_signal(env
, puc
);
1106 /* never comes here */
1110 #elif defined (TARGET_SH4)
1111 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1112 int is_write
, sigset_t
*old_set
,
1115 TranslationBlock
*tb
;
1119 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1120 #if defined(DEBUG_SIGNAL)
1121 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1122 pc
, address
, is_write
, *(unsigned long *)old_set
);
1124 /* XXX: locking issue */
1125 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1129 /* see if it is an MMU fault */
1130 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1132 return 0; /* not an MMU fault */
1134 return 1; /* the MMU fault was handled without causing real CPU fault */
1136 /* now we have a real cpu fault */
1137 tb
= tb_find_pc(pc
);
1139 /* the PC is inside the translated code. It means that we have
1140 a virtual CPU fault */
1141 cpu_restore_state(tb
, env
, pc
, puc
);
1144 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1145 env
->nip
, env
->error_code
, tb
);
1147 /* we restore the process signal mask as the sigreturn should
1148 do it (XXX: use sigsetjmp) */
1149 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1151 /* never comes here */
1155 #elif defined (TARGET_ALPHA)
1156 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1157 int is_write
, sigset_t
*old_set
,
1160 TranslationBlock
*tb
;
1164 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1165 #if defined(DEBUG_SIGNAL)
1166 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1167 pc
, address
, is_write
, *(unsigned long *)old_set
);
1169 /* XXX: locking issue */
1170 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1174 /* see if it is an MMU fault */
1175 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1177 return 0; /* not an MMU fault */
1179 return 1; /* the MMU fault was handled without causing real CPU fault */
1181 /* now we have a real cpu fault */
1182 tb
= tb_find_pc(pc
);
1184 /* the PC is inside the translated code. It means that we have
1185 a virtual CPU fault */
1186 cpu_restore_state(tb
, env
, pc
, puc
);
1189 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1190 env
->nip
, env
->error_code
, tb
);
1192 /* we restore the process signal mask as the sigreturn should
1193 do it (XXX: use sigsetjmp) */
1194 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1196 /* never comes here */
1199 #elif defined (TARGET_CRIS)
1200 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1201 int is_write
, sigset_t
*old_set
,
1204 TranslationBlock
*tb
;
1208 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1209 #if defined(DEBUG_SIGNAL)
1210 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1211 pc
, address
, is_write
, *(unsigned long *)old_set
);
1213 /* XXX: locking issue */
1214 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1218 /* see if it is an MMU fault */
1219 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1221 return 0; /* not an MMU fault */
1223 return 1; /* the MMU fault was handled without causing real CPU fault */
1225 /* now we have a real cpu fault */
1226 tb
= tb_find_pc(pc
);
1228 /* the PC is inside the translated code. It means that we have
1229 a virtual CPU fault */
1230 cpu_restore_state(tb
, env
, pc
, puc
);
1232 /* we restore the process signal mask as the sigreturn should
1233 do it (XXX: use sigsetjmp) */
1234 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1236 /* never comes here */
1240 #elif defined(TARGET_HPPA)
1241 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1242 int is_write
, sigset_t
*old_set
,
1245 TranslationBlock
*tb
;
1249 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1250 #if defined(DEBUG_SIGNAL)
1251 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1252 pc
, address
, is_write
, *(unsigned long *)old_set
);
1254 /* XXX: locking issue */
1255 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1258 /* see if it is an MMU fault */
1259 ret
= cpu_hppa_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1261 return 0; /* not an MMU fault */
1263 return 1; /* the MMU fault was handled without causing real CPU fault */
1264 /* now we have a real cpu fault */
1265 tb
= tb_find_pc(pc
);
1267 /* the PC is inside the translated code. It means that we have
1268 a virtual CPU fault */
1269 cpu_restore_state(tb
, env
, pc
, puc
);
1271 /* we restore the process signal mask as the sigreturn should
1272 do it (XXX: use sigsetjmp) */
1273 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1275 /* never comes here */
1280 #error unsupported target CPU
1283 #if defined(__i386__)
1285 #if defined(__APPLE__)
1286 # include <sys/ucontext.h>
1288 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1289 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1290 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1291 # define MASK_sig(context) ((context)->uc_sigmask)
1292 #elif defined(__OpenBSD__)
1293 # define EIP_sig(context) ((context)->sc_eip)
1294 # define TRAP_sig(context) ((context)->sc_trapno)
1295 # define ERROR_sig(context) ((context)->sc_err)
1296 # define MASK_sig(context) ((context)->sc_mask)
1298 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1299 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1300 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1301 # define MASK_sig(context) ((context)->uc_sigmask)
1304 int cpu_signal_handler(int host_signum
, void *pinfo
,
1307 siginfo_t
*info
= pinfo
;
1308 #if defined(__OpenBSD__)
1309 struct sigcontext
*uc
= puc
;
1311 struct ucontext
*uc
= puc
;
1320 #define REG_TRAPNO TRAPNO
1323 trapno
= TRAP_sig(uc
);
1324 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1326 (ERROR_sig(uc
) >> 1) & 1 : 0,
1327 &MASK_sig(uc
), puc
);
1330 #elif defined(__x86_64__)
1333 #define PC_sig(context) _UC_MACHINE_PC(context)
1334 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1335 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1336 #define MASK_sig(context) ((context)->uc_sigmask)
1337 #elif defined(__OpenBSD__)
1338 #define PC_sig(context) ((context)->sc_rip)
1339 #define TRAP_sig(context) ((context)->sc_trapno)
1340 #define ERROR_sig(context) ((context)->sc_err)
1341 #define MASK_sig(context) ((context)->sc_mask)
1343 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1344 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1345 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1346 #define MASK_sig(context) ((context)->uc_sigmask)
1349 int cpu_signal_handler(int host_signum
, void *pinfo
,
1352 siginfo_t
*info
= pinfo
;
1355 ucontext_t
*uc
= puc
;
1356 #elif defined(__OpenBSD__)
1357 struct sigcontext
*uc
= puc
;
1359 struct ucontext
*uc
= puc
;
1363 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1364 TRAP_sig(uc
) == 0xe ?
1365 (ERROR_sig(uc
) >> 1) & 1 : 0,
1366 &MASK_sig(uc
), puc
);
1369 #elif defined(_ARCH_PPC)
1371 /***********************************************************************
1372 * signal context platform-specific definitions
1376 /* All Registers access - only for local access */
1377 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1378 /* Gpr Registers access */
1379 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1380 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1381 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1382 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1383 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1384 # define LR_sig(context) REG_sig(link, context) /* Link register */
1385 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1386 /* Float Registers access */
1387 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1388 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1389 /* Exception Registers access */
1390 # define DAR_sig(context) REG_sig(dar, context)
1391 # define DSISR_sig(context) REG_sig(dsisr, context)
1392 # define TRAP_sig(context) REG_sig(trap, context)
1396 # include <sys/ucontext.h>
1397 typedef struct ucontext SIGCONTEXT
;
1398 /* All Registers access - only for local access */
1399 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1400 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1401 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1402 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1403 /* Gpr Registers access */
1404 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1405 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1406 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1407 # define CTR_sig(context) REG_sig(ctr, context)
1408 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1409 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1410 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1411 /* Float Registers access */
1412 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1413 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1414 /* Exception Registers access */
1415 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1416 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1417 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1418 #endif /* __APPLE__ */
1420 int cpu_signal_handler(int host_signum
, void *pinfo
,
1423 siginfo_t
*info
= pinfo
;
1424 struct ucontext
*uc
= puc
;
1432 if (DSISR_sig(uc
) & 0x00800000)
1435 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1438 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1439 is_write
, &uc
->uc_sigmask
, puc
);
1442 #elif defined(__alpha__)
1444 int cpu_signal_handler(int host_signum
, void *pinfo
,
1447 siginfo_t
*info
= pinfo
;
1448 struct ucontext
*uc
= puc
;
1449 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1450 uint32_t insn
= *pc
;
1453 /* XXX: need kernel patch to get write flag faster */
1454 switch (insn
>> 26) {
1469 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1470 is_write
, &uc
->uc_sigmask
, puc
);
1472 #elif defined(__sparc__)
1474 int cpu_signal_handler(int host_signum
, void *pinfo
,
1477 siginfo_t
*info
= pinfo
;
1480 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1481 uint32_t *regs
= (uint32_t *)(info
+ 1);
1482 void *sigmask
= (regs
+ 20);
1483 /* XXX: is there a standard glibc define ? */
1484 unsigned long pc
= regs
[1];
1487 struct sigcontext
*sc
= puc
;
1488 unsigned long pc
= sc
->sigc_regs
.tpc
;
1489 void *sigmask
= (void *)sc
->sigc_mask
;
1490 #elif defined(__OpenBSD__)
1491 struct sigcontext
*uc
= puc
;
1492 unsigned long pc
= uc
->sc_pc
;
1493 void *sigmask
= (void *)(long)uc
->sc_mask
;
1497 /* XXX: need kernel patch to get write flag faster */
1499 insn
= *(uint32_t *)pc
;
1500 if ((insn
>> 30) == 3) {
1501 switch((insn
>> 19) & 0x3f) {
1525 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1526 is_write
, sigmask
, NULL
);
1529 #elif defined(__arm__)
1531 int cpu_signal_handler(int host_signum
, void *pinfo
,
1534 siginfo_t
*info
= pinfo
;
1535 struct ucontext
*uc
= puc
;
1539 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1540 pc
= uc
->uc_mcontext
.gregs
[R15
];
1542 pc
= uc
->uc_mcontext
.arm_pc
;
1544 /* XXX: compute is_write */
1546 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1548 &uc
->uc_sigmask
, puc
);
1551 #elif defined(__mc68000)
1553 int cpu_signal_handler(int host_signum
, void *pinfo
,
1556 siginfo_t
*info
= pinfo
;
1557 struct ucontext
*uc
= puc
;
1561 pc
= uc
->uc_mcontext
.gregs
[16];
1562 /* XXX: compute is_write */
1564 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1566 &uc
->uc_sigmask
, puc
);
1569 #elif defined(__ia64)
1572 /* This ought to be in <bits/siginfo.h>... */
1573 # define __ISR_VALID 1
1576 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1578 siginfo_t
*info
= pinfo
;
1579 struct ucontext
*uc
= puc
;
1583 ip
= uc
->uc_mcontext
.sc_ip
;
1584 switch (host_signum
) {
1590 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1591 /* ISR.W (write-access) is bit 33: */
1592 is_write
= (info
->si_isr
>> 33) & 1;
1598 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1600 &uc
->uc_sigmask
, puc
);
1603 #elif defined(__s390__)
1605 int cpu_signal_handler(int host_signum
, void *pinfo
,
1608 siginfo_t
*info
= pinfo
;
1609 struct ucontext
*uc
= puc
;
1613 pc
= uc
->uc_mcontext
.psw
.addr
;
1614 /* XXX: compute is_write */
1616 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1617 is_write
, &uc
->uc_sigmask
, puc
);
1620 #elif defined(__mips__)
1622 int cpu_signal_handler(int host_signum
, void *pinfo
,
1625 siginfo_t
*info
= pinfo
;
1626 struct ucontext
*uc
= puc
;
1627 greg_t pc
= uc
->uc_mcontext
.pc
;
1630 /* XXX: compute is_write */
1632 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1633 is_write
, &uc
->uc_sigmask
, puc
);
1636 #elif defined(__hppa__)
1638 int cpu_signal_handler(int host_signum
, void *pinfo
,
1641 struct siginfo
*info
= pinfo
;
1642 struct ucontext
*uc
= puc
;
1646 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1647 /* FIXME: compute is_write */
1649 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1651 &uc
->uc_sigmask
, puc
);
1656 #error host CPU specific signal handler needed
1660 #endif /* !defined(CONFIG_SOFTMMU) */