2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
26 #if !defined(CONFIG_SOFTMMU)
38 #include <sys/ucontext.h>
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
45 #define env cpu_single_env
48 int tb_invalidated_flag
;
51 //#define DEBUG_SIGNAL
53 int qemu_cpu_has_work(CPUState
*env
)
55 return cpu_has_work(env
);
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
63 longjmp(env
->jmp_env
, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
71 #if !defined(CONFIG_SOFTMMU)
73 struct ucontext
*uc
= puc
;
74 #elif defined(__OpenBSD__)
75 struct sigcontext
*uc
= puc
;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
85 /* XXX: use siglongjmp ? */
87 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
93 env
->exception_index
= -1;
94 longjmp(env
->jmp_env
, 1);
97 /* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
101 unsigned long next_tb
;
102 TranslationBlock
*tb
;
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles
> CF_COUNT_MASK
)
107 max_cycles
= CF_COUNT_MASK
;
109 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
111 env
->current_tb
= tb
;
112 /* execute the generated code */
113 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
115 if ((next_tb
& 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
118 cpu_pc_from_tb(env
, tb
);
120 tb_phys_invalidate(tb
, -1);
124 static TranslationBlock
*tb_find_slow(target_ulong pc
,
125 target_ulong cs_base
,
128 TranslationBlock
*tb
, **ptb1
;
130 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
132 tb_invalidated_flag
= 0;
134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
136 /* find translated block using physical mappings */
137 phys_pc
= get_phys_addr_code(env
, pc
);
138 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
140 h
= tb_phys_hash_func(phys_pc
);
141 ptb1
= &tb_phys_hash
[h
];
147 tb
->page_addr
[0] == phys_page1
&&
148 tb
->cs_base
== cs_base
&&
149 tb
->flags
== flags
) {
150 /* check next page if needed */
151 if (tb
->page_addr
[1] != -1) {
152 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
154 phys_page2
= get_phys_addr_code(env
, virt_page2
);
155 if (tb
->page_addr
[1] == phys_page2
)
161 ptb1
= &tb
->phys_hash_next
;
164 /* if no translated code available, then translate it now */
165 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
168 /* we add the TB in the virtual pc hash table */
169 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
173 static inline TranslationBlock
*tb_find_fast(void)
175 TranslationBlock
*tb
;
176 target_ulong cs_base
, pc
;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
182 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
183 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
184 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
185 tb
->flags
!= flags
)) {
186 tb
= tb_find_slow(pc
, cs_base
, flags
);
191 static CPUDebugExcpHandler
*debug_excp_handler
;
193 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
195 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
197 debug_excp_handler
= handler
;
201 static void cpu_handle_debug_exception(CPUState
*env
)
205 if (!env
->watchpoint_hit
)
206 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
207 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
209 if (debug_excp_handler
)
210 debug_excp_handler(env
);
213 /* main execution loop */
215 int cpu_exec(CPUState
*env1
)
217 #define DECLARE_HOST_REGS 1
218 #include "hostregs_helper.h"
219 int ret
, interrupt_request
;
220 TranslationBlock
*tb
;
222 unsigned long next_tb
;
224 if (cpu_halted(env1
) == EXCP_HALTED
)
227 cpu_single_env
= env1
;
229 /* first we save global registers */
230 #define SAVE_HOST_REGS 1
231 #include "hostregs_helper.h"
235 #if defined(TARGET_I386)
236 /* put eflags in CPU temporary format */
237 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
238 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
239 CC_OP
= CC_OP_EFLAGS
;
240 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243 env
->cc_op
= CC_OP_FLAGS
;
244 env
->cc_dest
= env
->sr
& 0xf;
245 env
->cc_x
= (env
->sr
>> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_MICROBLAZE)
250 #elif defined(TARGET_MIPS)
251 #elif defined(TARGET_SH4)
252 #elif defined(TARGET_CRIS)
253 #elif defined(TARGET_Z80)
256 #error unsupported target CPU
258 env
->exception_index
= -1;
260 /* prepare setjmp context for exception handling */
262 if (setjmp(env
->jmp_env
) == 0) {
263 #if defined(__sparc__) && !defined(HOST_SOLARIS)
265 env
= cpu_single_env
;
266 #define env cpu_single_env
268 env
->current_tb
= NULL
;
269 /* if an exception is pending, we execute it here */
270 if (env
->exception_index
>= 0) {
271 if (env
->exception_index
>= EXCP_INTERRUPT
) {
272 /* exit request from the cpu execution loop */
273 ret
= env
->exception_index
;
274 if (ret
== EXCP_DEBUG
)
275 cpu_handle_debug_exception(env
);
278 #if defined(CONFIG_USER_ONLY)
279 /* if user mode only, we simulate a fake exception
280 which will be handled outside the cpu execution
282 #if defined(TARGET_I386)
283 do_interrupt_user(env
->exception_index
,
284 env
->exception_is_int
,
286 env
->exception_next_eip
);
287 /* successfully delivered */
288 env
->old_exception
= -1;
290 ret
= env
->exception_index
;
293 #if defined(TARGET_I386)
294 /* simulate a real cpu exception. On i386, it can
295 trigger new exceptions, but we do not handle
296 double or triple faults yet. */
297 do_interrupt(env
->exception_index
,
298 env
->exception_is_int
,
300 env
->exception_next_eip
, 0);
301 /* successfully delivered */
302 env
->old_exception
= -1;
303 #elif defined(TARGET_PPC)
305 #elif defined(TARGET_MICROBLAZE)
307 #elif defined(TARGET_MIPS)
309 #elif defined(TARGET_SPARC)
311 #elif defined(TARGET_ARM)
313 #elif defined(TARGET_SH4)
315 #elif defined(TARGET_ALPHA)
317 #elif defined(TARGET_CRIS)
319 #elif defined(TARGET_M68K)
321 #elif defined(TARGET_Z80)
326 env
->exception_index
= -1;
329 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0 && env
->exit_request
== 0) {
331 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
332 ret
= kqemu_cpu_exec(env
);
333 /* put eflags in CPU temporary format */
334 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
335 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
336 CC_OP
= CC_OP_EFLAGS
;
337 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
340 longjmp(env
->jmp_env
, 1);
341 } else if (ret
== 2) {
342 /* softmmu execution needed */
344 if (env
->interrupt_request
!= 0 || env
->exit_request
!= 0) {
345 /* hardware interrupt will be executed just after */
347 /* otherwise, we restart */
348 longjmp(env
->jmp_env
, 1);
356 longjmp(env
->jmp_env
, 1);
359 next_tb
= 0; /* force lookup of first TB */
361 interrupt_request
= env
->interrupt_request
;
362 if (unlikely(interrupt_request
)) {
363 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
364 /* Mask out external interrupts for this step. */
365 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
370 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
371 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
372 env
->exception_index
= EXCP_DEBUG
;
375 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
376 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
377 defined(TARGET_MICROBLAZE)
378 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
379 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
381 env
->exception_index
= EXCP_HLT
;
385 #if defined(TARGET_I386)
386 if (env
->hflags2
& HF2_GIF_MASK
) {
387 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
388 !(env
->hflags
& HF_SMM_MASK
)) {
389 svm_check_intercept(SVM_EXIT_SMI
);
390 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
393 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
394 !(env
->hflags2
& HF2_NMI_MASK
)) {
395 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
396 env
->hflags2
|= HF2_NMI_MASK
;
397 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
399 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
400 (((env
->hflags2
& HF2_VINTR_MASK
) &&
401 (env
->hflags2
& HF2_HIF_MASK
)) ||
402 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
403 (env
->eflags
& IF_MASK
&&
404 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
406 svm_check_intercept(SVM_EXIT_INTR
);
407 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
408 intno
= cpu_get_pic_interrupt(env
);
409 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
410 #if defined(__sparc__) && !defined(HOST_SOLARIS)
412 env
= cpu_single_env
;
413 #define env cpu_single_env
415 do_interrupt(intno
, 0, 0, 0, 1);
416 /* ensure that no TB jump will be modified as
417 the program flow was changed */
419 #if !defined(CONFIG_USER_ONLY)
420 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
421 (env
->eflags
& IF_MASK
) &&
422 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
424 /* FIXME: this should respect TPR */
425 svm_check_intercept(SVM_EXIT_VINTR
);
426 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
427 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
428 do_interrupt(intno
, 0, 0, 0, 1);
429 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
434 #elif defined(TARGET_PPC)
436 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
440 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
441 ppc_hw_interrupt(env
);
442 if (env
->pending_interrupts
== 0)
443 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
446 #elif defined(TARGET_MICROBLAZE)
447 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
448 && (env
->sregs
[SR_MSR
] & MSR_IE
)
449 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
450 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
451 env
->exception_index
= EXCP_IRQ
;
455 #elif defined(TARGET_MIPS)
456 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
457 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
458 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
459 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
460 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
461 !(env
->hflags
& MIPS_HFLAG_DM
)) {
463 env
->exception_index
= EXCP_EXT_INTERRUPT
;
468 #elif defined(TARGET_SPARC)
469 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
471 int pil
= env
->interrupt_index
& 15;
472 int type
= env
->interrupt_index
& 0xf0;
474 if (((type
== TT_EXTINT
) &&
475 (pil
== 15 || pil
> env
->psrpil
)) ||
477 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
478 env
->exception_index
= env
->interrupt_index
;
480 env
->interrupt_index
= 0;
481 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
486 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
487 //do_interrupt(0, 0, 0, 0, 0);
488 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
490 #elif defined(TARGET_ARM)
491 if (interrupt_request
& CPU_INTERRUPT_FIQ
492 && !(env
->uncached_cpsr
& CPSR_F
)) {
493 env
->exception_index
= EXCP_FIQ
;
497 /* ARMv7-M interrupt return works by loading a magic value
498 into the PC. On real hardware the load causes the
499 return to occur. The qemu implementation performs the
500 jump normally, then does the exception return when the
501 CPU tries to execute code at the magic address.
502 This will cause the magic PC value to be pushed to
503 the stack if an interrupt occured at the wrong time.
504 We avoid this by disabling interrupts when
505 pc contains a magic address. */
506 if (interrupt_request
& CPU_INTERRUPT_HARD
507 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
508 || !(env
->uncached_cpsr
& CPSR_I
))) {
509 env
->exception_index
= EXCP_IRQ
;
513 #elif defined(TARGET_SH4)
514 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
518 #elif defined(TARGET_ALPHA)
519 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
523 #elif defined(TARGET_CRIS)
524 if (interrupt_request
& CPU_INTERRUPT_HARD
525 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
526 env
->exception_index
= EXCP_IRQ
;
530 if (interrupt_request
& CPU_INTERRUPT_NMI
531 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
532 env
->exception_index
= EXCP_NMI
;
536 #elif defined(TARGET_M68K)
537 if (interrupt_request
& CPU_INTERRUPT_HARD
538 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
539 < env
->pending_level
) {
540 /* Real hardware gets the interrupt vector via an
541 IACK cycle at this point. Current emulated
542 hardware doesn't rely on this, so we
543 provide/save the vector when the interrupt is
545 env
->exception_index
= env
->pending_vector
;
549 #elif defined(TARGET_Z80)
550 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
551 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
552 /* TODO: Add support for NMIs */
556 /* Don't use the cached interupt_request value,
557 do_interrupt may have updated the EXITTB flag. */
558 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
559 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
560 /* ensure that no TB jump will be modified as
561 the program flow was changed */
565 if (unlikely(env
->exit_request
)) {
566 env
->exit_request
= 0;
567 env
->exception_index
= EXCP_INTERRUPT
;
571 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
572 /* restore flags in standard format */
574 #if defined(TARGET_I386)
575 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
576 log_cpu_state(env
, X86_DUMP_CCOP
);
577 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
578 #elif defined(TARGET_ARM)
579 log_cpu_state(env
, 0);
580 #elif defined(TARGET_SPARC)
581 log_cpu_state(env
, 0);
582 #elif defined(TARGET_PPC)
583 log_cpu_state(env
, 0);
584 #elif defined(TARGET_M68K)
585 cpu_m68k_flush_flags(env
, env
->cc_op
);
586 env
->cc_op
= CC_OP_FLAGS
;
587 env
->sr
= (env
->sr
& 0xffe0)
588 | env
->cc_dest
| (env
->cc_x
<< 4);
589 log_cpu_state(env
, 0);
590 #elif defined(TARGET_MICROBLAZE)
591 log_cpu_state(env
, 0);
592 #elif defined(TARGET_MIPS)
593 log_cpu_state(env
, 0);
594 #elif defined(TARGET_SH4)
595 log_cpu_state(env
, 0);
596 #elif defined(TARGET_ALPHA)
597 log_cpu_state(env
, 0);
598 #elif defined(TARGET_CRIS)
599 log_cpu_state(env
, 0);
600 #elif defined(TARGET_Z80)
601 log_cpu_state(env
, 0);
603 #error unsupported target CPU
609 /* Note: we do it here to avoid a gcc bug on Mac OS X when
610 doing it in tb_find_slow */
611 if (tb_invalidated_flag
) {
612 /* as some TB could have been invalidated because
613 of memory exceptions while generating the code, we
614 must recompute the hash index here */
616 tb_invalidated_flag
= 0;
619 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
620 (long)tb
->tc_ptr
, tb
->pc
,
621 lookup_symbol(tb
->pc
));
623 /* see if we can patch the calling TB. When the TB
624 spans two pages, we cannot safely do a direct
629 (env
->kqemu_enabled
!= 2) &&
631 tb
->page_addr
[1] == -1) {
632 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
635 spin_unlock(&tb_lock
);
636 env
->current_tb
= tb
;
638 /* cpu_interrupt might be called while translating the
639 TB, but before it is linked into a potentially
640 infinite loop and becomes env->current_tb. Avoid
641 starting execution if there is a pending interrupt. */
642 if (unlikely (env
->exit_request
))
643 env
->current_tb
= NULL
;
645 while (env
->current_tb
) {
647 /* execute the generated code */
648 #if defined(__sparc__) && !defined(HOST_SOLARIS)
650 env
= cpu_single_env
;
651 #define env cpu_single_env
653 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
654 env
->current_tb
= NULL
;
655 if ((next_tb
& 3) == 2) {
656 /* Instruction counter expired. */
658 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
660 cpu_pc_from_tb(env
, tb
);
661 insns_left
= env
->icount_decr
.u32
;
662 if (env
->icount_extra
&& insns_left
>= 0) {
663 /* Refill decrementer and continue execution. */
664 env
->icount_extra
+= insns_left
;
665 if (env
->icount_extra
> 0xffff) {
668 insns_left
= env
->icount_extra
;
670 env
->icount_extra
-= insns_left
;
671 env
->icount_decr
.u16
.low
= insns_left
;
673 if (insns_left
> 0) {
674 /* Execute remaining instructions. */
675 cpu_exec_nocache(insns_left
, tb
);
677 env
->exception_index
= EXCP_INTERRUPT
;
683 /* reset soft MMU for next block (it can currently
684 only be set by a memory fault) */
685 #if defined(CONFIG_KQEMU)
686 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
687 if (kqemu_is_ok(env
) &&
688 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
699 #if defined(TARGET_I386)
700 /* restore flags in standard format */
701 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
702 #elif defined(TARGET_ARM)
703 /* XXX: Save/restore host fpu exception state?. */
704 #elif defined(TARGET_SPARC)
705 #elif defined(TARGET_PPC)
706 #elif defined(TARGET_M68K)
707 cpu_m68k_flush_flags(env
, env
->cc_op
);
708 env
->cc_op
= CC_OP_FLAGS
;
709 env
->sr
= (env
->sr
& 0xffe0)
710 | env
->cc_dest
| (env
->cc_x
<< 4);
711 #elif defined(TARGET_MICROBLAZE)
712 #elif defined(TARGET_MIPS)
713 #elif defined(TARGET_SH4)
714 #elif defined(TARGET_ALPHA)
715 #elif defined(TARGET_CRIS)
716 #elif defined(TARGET_Z80)
719 #error unsupported target CPU
722 /* restore global registers */
723 #include "hostregs_helper.h"
725 /* fail safe : never use cpu_single_env outside cpu_exec() */
726 cpu_single_env
= NULL
;
730 /* must only be called from the generated code as an exception can be
732 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
734 /* XXX: cannot enable it yet because it yields to MMU exception
735 where NIP != read address on PowerPC */
737 target_ulong phys_addr
;
738 phys_addr
= get_phys_addr_code(env
, start
);
739 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
743 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
745 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
747 CPUX86State
*saved_env
;
751 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
753 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
754 (selector
<< 4), 0xffff, 0);
756 helper_load_seg(seg_reg
, selector
);
761 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
763 CPUX86State
*saved_env
;
768 helper_fsave(ptr
, data32
);
773 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
775 CPUX86State
*saved_env
;
780 helper_frstor(ptr
, data32
);
785 #endif /* TARGET_I386 */
787 #if !defined(CONFIG_SOFTMMU)
789 #if defined(TARGET_I386)
791 /* 'pc' is the host PC at which the exception was raised. 'address' is
792 the effective address of the memory exception. 'is_write' is 1 if a
793 write caused the exception and otherwise 0'. 'old_set' is the
794 signal set which should be restored */
795 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
796 int is_write
, sigset_t
*old_set
,
799 TranslationBlock
*tb
;
803 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
804 #if defined(DEBUG_SIGNAL)
805 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
806 pc
, address
, is_write
, *(unsigned long *)old_set
);
808 /* XXX: locking issue */
809 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
813 /* see if it is an MMU fault */
814 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
816 return 0; /* not an MMU fault */
818 return 1; /* the MMU fault was handled without causing real CPU fault */
819 /* now we have a real cpu fault */
822 /* the PC is inside the translated code. It means that we have
823 a virtual CPU fault */
824 cpu_restore_state(tb
, env
, pc
, puc
);
828 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
829 env
->eip
, env
->cr
[2], env
->error_code
);
831 /* we restore the process signal mask as the sigreturn should
832 do it (XXX: use sigsetjmp) */
833 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
834 raise_exception_err(env
->exception_index
, env
->error_code
);
836 /* activate soft MMU for this block */
837 env
->hflags
|= HF_SOFTMMU_MASK
;
838 cpu_resume_from_signal(env
, puc
);
840 /* never comes here */
844 #elif defined(TARGET_ARM)
845 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
846 int is_write
, sigset_t
*old_set
,
849 TranslationBlock
*tb
;
853 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
854 #if defined(DEBUG_SIGNAL)
855 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
856 pc
, address
, is_write
, *(unsigned long *)old_set
);
858 /* XXX: locking issue */
859 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
862 /* see if it is an MMU fault */
863 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
865 return 0; /* not an MMU fault */
867 return 1; /* the MMU fault was handled without causing real CPU fault */
868 /* now we have a real cpu fault */
871 /* the PC is inside the translated code. It means that we have
872 a virtual CPU fault */
873 cpu_restore_state(tb
, env
, pc
, puc
);
875 /* we restore the process signal mask as the sigreturn should
876 do it (XXX: use sigsetjmp) */
877 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
879 /* never comes here */
882 #elif defined(TARGET_SPARC)
883 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
884 int is_write
, sigset_t
*old_set
,
887 TranslationBlock
*tb
;
891 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
892 #if defined(DEBUG_SIGNAL)
893 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
894 pc
, address
, is_write
, *(unsigned long *)old_set
);
896 /* XXX: locking issue */
897 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
900 /* see if it is an MMU fault */
901 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
903 return 0; /* not an MMU fault */
905 return 1; /* the MMU fault was handled without causing real CPU fault */
906 /* now we have a real cpu fault */
909 /* the PC is inside the translated code. It means that we have
910 a virtual CPU fault */
911 cpu_restore_state(tb
, env
, pc
, puc
);
913 /* we restore the process signal mask as the sigreturn should
914 do it (XXX: use sigsetjmp) */
915 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
917 /* never comes here */
920 #elif defined (TARGET_PPC)
921 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
922 int is_write
, sigset_t
*old_set
,
925 TranslationBlock
*tb
;
929 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
930 #if defined(DEBUG_SIGNAL)
931 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
932 pc
, address
, is_write
, *(unsigned long *)old_set
);
934 /* XXX: locking issue */
935 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
939 /* see if it is an MMU fault */
940 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
942 return 0; /* not an MMU fault */
944 return 1; /* the MMU fault was handled without causing real CPU fault */
946 /* now we have a real cpu fault */
949 /* the PC is inside the translated code. It means that we have
950 a virtual CPU fault */
951 cpu_restore_state(tb
, env
, pc
, puc
);
955 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
956 env
->nip
, env
->error_code
, tb
);
958 /* we restore the process signal mask as the sigreturn should
959 do it (XXX: use sigsetjmp) */
960 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
963 /* activate soft MMU for this block */
964 cpu_resume_from_signal(env
, puc
);
966 /* never comes here */
970 #elif defined(TARGET_M68K)
971 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
972 int is_write
, sigset_t
*old_set
,
975 TranslationBlock
*tb
;
979 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
980 #if defined(DEBUG_SIGNAL)
981 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
982 pc
, address
, is_write
, *(unsigned long *)old_set
);
984 /* XXX: locking issue */
985 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
988 /* see if it is an MMU fault */
989 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
991 return 0; /* not an MMU fault */
993 return 1; /* the MMU fault was handled without causing real CPU fault */
994 /* now we have a real cpu fault */
997 /* the PC is inside the translated code. It means that we have
998 a virtual CPU fault */
999 cpu_restore_state(tb
, env
, pc
, puc
);
1001 /* we restore the process signal mask as the sigreturn should
1002 do it (XXX: use sigsetjmp) */
1003 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1005 /* never comes here */
1009 #elif defined (TARGET_MIPS)
1010 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1011 int is_write
, sigset_t
*old_set
,
1014 TranslationBlock
*tb
;
1018 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1019 #if defined(DEBUG_SIGNAL)
1020 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1021 pc
, address
, is_write
, *(unsigned long *)old_set
);
1023 /* XXX: locking issue */
1024 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1028 /* see if it is an MMU fault */
1029 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1031 return 0; /* not an MMU fault */
1033 return 1; /* the MMU fault was handled without causing real CPU fault */
1035 /* now we have a real cpu fault */
1036 tb
= tb_find_pc(pc
);
1038 /* the PC is inside the translated code. It means that we have
1039 a virtual CPU fault */
1040 cpu_restore_state(tb
, env
, pc
, puc
);
1044 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1045 env
->PC
, env
->error_code
, tb
);
1047 /* we restore the process signal mask as the sigreturn should
1048 do it (XXX: use sigsetjmp) */
1049 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1052 /* activate soft MMU for this block */
1053 cpu_resume_from_signal(env
, puc
);
1055 /* never comes here */
1059 #elif defined (TARGET_MICROBLAZE)
1060 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1061 int is_write
, sigset_t
*old_set
,
1064 TranslationBlock
*tb
;
1068 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1069 #if defined(DEBUG_SIGNAL)
1070 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1071 pc
, address
, is_write
, *(unsigned long *)old_set
);
1073 /* XXX: locking issue */
1074 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1078 /* see if it is an MMU fault */
1079 ret
= cpu_mb_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1081 return 0; /* not an MMU fault */
1083 return 1; /* the MMU fault was handled without causing real CPU fault */
1085 /* now we have a real cpu fault */
1086 tb
= tb_find_pc(pc
);
1088 /* the PC is inside the translated code. It means that we have
1089 a virtual CPU fault */
1090 cpu_restore_state(tb
, env
, pc
, puc
);
1094 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1095 env
->PC
, env
->error_code
, tb
);
1097 /* we restore the process signal mask as the sigreturn should
1098 do it (XXX: use sigsetjmp) */
1099 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1102 /* activate soft MMU for this block */
1103 cpu_resume_from_signal(env
, puc
);
1105 /* never comes here */
1109 #elif defined (TARGET_SH4)
1110 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1111 int is_write
, sigset_t
*old_set
,
1114 TranslationBlock
*tb
;
1118 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1119 #if defined(DEBUG_SIGNAL)
1120 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1121 pc
, address
, is_write
, *(unsigned long *)old_set
);
1123 /* XXX: locking issue */
1124 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1128 /* see if it is an MMU fault */
1129 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1131 return 0; /* not an MMU fault */
1133 return 1; /* the MMU fault was handled without causing real CPU fault */
1135 /* now we have a real cpu fault */
1136 tb
= tb_find_pc(pc
);
1138 /* the PC is inside the translated code. It means that we have
1139 a virtual CPU fault */
1140 cpu_restore_state(tb
, env
, pc
, puc
);
1143 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1144 env
->nip
, env
->error_code
, tb
);
1146 /* we restore the process signal mask as the sigreturn should
1147 do it (XXX: use sigsetjmp) */
1148 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1150 /* never comes here */
1154 #elif defined (TARGET_ALPHA)
1155 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1156 int is_write
, sigset_t
*old_set
,
1159 TranslationBlock
*tb
;
1163 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1164 #if defined(DEBUG_SIGNAL)
1165 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1166 pc
, address
, is_write
, *(unsigned long *)old_set
);
1168 /* XXX: locking issue */
1169 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1173 /* see if it is an MMU fault */
1174 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1176 return 0; /* not an MMU fault */
1178 return 1; /* the MMU fault was handled without causing real CPU fault */
1180 /* now we have a real cpu fault */
1181 tb
= tb_find_pc(pc
);
1183 /* the PC is inside the translated code. It means that we have
1184 a virtual CPU fault */
1185 cpu_restore_state(tb
, env
, pc
, puc
);
1188 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1189 env
->nip
, env
->error_code
, tb
);
1191 /* we restore the process signal mask as the sigreturn should
1192 do it (XXX: use sigsetjmp) */
1193 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1195 /* never comes here */
1198 #elif defined (TARGET_CRIS)
1199 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1200 int is_write
, sigset_t
*old_set
,
1203 TranslationBlock
*tb
;
1207 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1208 #if defined(DEBUG_SIGNAL)
1209 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1210 pc
, address
, is_write
, *(unsigned long *)old_set
);
1212 /* XXX: locking issue */
1213 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1217 /* see if it is an MMU fault */
1218 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1220 return 0; /* not an MMU fault */
1222 return 1; /* the MMU fault was handled without causing real CPU fault */
1224 /* now we have a real cpu fault */
1225 tb
= tb_find_pc(pc
);
1227 /* the PC is inside the translated code. It means that we have
1228 a virtual CPU fault */
1229 cpu_restore_state(tb
, env
, pc
, puc
);
1231 /* we restore the process signal mask as the sigreturn should
1232 do it (XXX: use sigsetjmp) */
1233 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1235 /* never comes here */
1240 #error unsupported target CPU
1243 #if defined(__i386__)
1245 #if defined(__APPLE__)
1246 # include <sys/ucontext.h>
1248 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1249 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1250 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1251 # define MASK_sig(context) ((context)->uc_sigmask)
1252 #elif defined(__OpenBSD__)
1253 # define EIP_sig(context) ((context)->sc_eip)
1254 # define TRAP_sig(context) ((context)->sc_trapno)
1255 # define ERROR_sig(context) ((context)->sc_err)
1256 # define MASK_sig(context) ((context)->sc_mask)
1258 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1259 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1260 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1261 # define MASK_sig(context) ((context)->uc_sigmask)
1264 int cpu_signal_handler(int host_signum
, void *pinfo
,
1267 siginfo_t
*info
= pinfo
;
1268 #if defined(__OpenBSD__)
1269 struct sigcontext
*uc
= puc
;
1271 struct ucontext
*uc
= puc
;
1280 #define REG_TRAPNO TRAPNO
1283 trapno
= TRAP_sig(uc
);
1284 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1286 (ERROR_sig(uc
) >> 1) & 1 : 0,
1287 &MASK_sig(uc
), puc
);
1290 #elif defined(__x86_64__)
1293 #define PC_sig(context) _UC_MACHINE_PC(context)
1294 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1295 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1296 #define MASK_sig(context) ((context)->uc_sigmask)
1297 #elif defined(__OpenBSD__)
1298 #define PC_sig(context) ((context)->sc_rip)
1299 #define TRAP_sig(context) ((context)->sc_trapno)
1300 #define ERROR_sig(context) ((context)->sc_err)
1301 #define MASK_sig(context) ((context)->sc_mask)
1303 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1304 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1305 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1306 #define MASK_sig(context) ((context)->uc_sigmask)
1309 int cpu_signal_handler(int host_signum
, void *pinfo
,
1312 siginfo_t
*info
= pinfo
;
1315 ucontext_t
*uc
= puc
;
1316 #elif defined(__OpenBSD__)
1317 struct sigcontext
*uc
= puc
;
1319 struct ucontext
*uc
= puc
;
1323 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1324 TRAP_sig(uc
) == 0xe ?
1325 (ERROR_sig(uc
) >> 1) & 1 : 0,
1326 &MASK_sig(uc
), puc
);
1329 #elif defined(_ARCH_PPC)
1331 /***********************************************************************
1332 * signal context platform-specific definitions
1336 /* All Registers access - only for local access */
1337 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1338 /* Gpr Registers access */
1339 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1340 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1341 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1342 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1343 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1344 # define LR_sig(context) REG_sig(link, context) /* Link register */
1345 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1346 /* Float Registers access */
1347 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1348 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1349 /* Exception Registers access */
1350 # define DAR_sig(context) REG_sig(dar, context)
1351 # define DSISR_sig(context) REG_sig(dsisr, context)
1352 # define TRAP_sig(context) REG_sig(trap, context)
1356 # include <sys/ucontext.h>
1357 typedef struct ucontext SIGCONTEXT
;
1358 /* All Registers access - only for local access */
1359 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1360 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1361 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1362 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1363 /* Gpr Registers access */
1364 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1365 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1366 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1367 # define CTR_sig(context) REG_sig(ctr, context)
1368 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1369 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1370 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1371 /* Float Registers access */
1372 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1373 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1374 /* Exception Registers access */
1375 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1376 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1377 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1378 #endif /* __APPLE__ */
1380 int cpu_signal_handler(int host_signum
, void *pinfo
,
1383 siginfo_t
*info
= pinfo
;
1384 struct ucontext
*uc
= puc
;
1392 if (DSISR_sig(uc
) & 0x00800000)
1395 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1398 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1399 is_write
, &uc
->uc_sigmask
, puc
);
1402 #elif defined(__alpha__)
1404 int cpu_signal_handler(int host_signum
, void *pinfo
,
1407 siginfo_t
*info
= pinfo
;
1408 struct ucontext
*uc
= puc
;
1409 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1410 uint32_t insn
= *pc
;
1413 /* XXX: need kernel patch to get write flag faster */
1414 switch (insn
>> 26) {
1429 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1430 is_write
, &uc
->uc_sigmask
, puc
);
1432 #elif defined(__sparc__)
1434 int cpu_signal_handler(int host_signum
, void *pinfo
,
1437 siginfo_t
*info
= pinfo
;
1440 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1441 uint32_t *regs
= (uint32_t *)(info
+ 1);
1442 void *sigmask
= (regs
+ 20);
1443 /* XXX: is there a standard glibc define ? */
1444 unsigned long pc
= regs
[1];
1447 struct sigcontext
*sc
= puc
;
1448 unsigned long pc
= sc
->sigc_regs
.tpc
;
1449 void *sigmask
= (void *)sc
->sigc_mask
;
1450 #elif defined(__OpenBSD__)
1451 struct sigcontext
*uc
= puc
;
1452 unsigned long pc
= uc
->sc_pc
;
1453 void *sigmask
= (void *)(long)uc
->sc_mask
;
1457 /* XXX: need kernel patch to get write flag faster */
1459 insn
= *(uint32_t *)pc
;
1460 if ((insn
>> 30) == 3) {
1461 switch((insn
>> 19) & 0x3f) {
1485 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1486 is_write
, sigmask
, NULL
);
1489 #elif defined(__arm__)
1491 int cpu_signal_handler(int host_signum
, void *pinfo
,
1494 siginfo_t
*info
= pinfo
;
1495 struct ucontext
*uc
= puc
;
1499 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1500 pc
= uc
->uc_mcontext
.gregs
[R15
];
1502 pc
= uc
->uc_mcontext
.arm_pc
;
1504 /* XXX: compute is_write */
1506 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1508 &uc
->uc_sigmask
, puc
);
1511 #elif defined(__mc68000)
1513 int cpu_signal_handler(int host_signum
, void *pinfo
,
1516 siginfo_t
*info
= pinfo
;
1517 struct ucontext
*uc
= puc
;
1521 pc
= uc
->uc_mcontext
.gregs
[16];
1522 /* XXX: compute is_write */
1524 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1526 &uc
->uc_sigmask
, puc
);
1529 #elif defined(__ia64)
1532 /* This ought to be in <bits/siginfo.h>... */
1533 # define __ISR_VALID 1
1536 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1538 siginfo_t
*info
= pinfo
;
1539 struct ucontext
*uc
= puc
;
1543 ip
= uc
->uc_mcontext
.sc_ip
;
1544 switch (host_signum
) {
1550 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1551 /* ISR.W (write-access) is bit 33: */
1552 is_write
= (info
->si_isr
>> 33) & 1;
1558 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1560 &uc
->uc_sigmask
, puc
);
1563 #elif defined(__s390__)
1565 int cpu_signal_handler(int host_signum
, void *pinfo
,
1568 siginfo_t
*info
= pinfo
;
1569 struct ucontext
*uc
= puc
;
1573 pc
= uc
->uc_mcontext
.psw
.addr
;
1574 /* XXX: compute is_write */
1576 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1577 is_write
, &uc
->uc_sigmask
, puc
);
1580 #elif defined(__mips__)
1582 int cpu_signal_handler(int host_signum
, void *pinfo
,
1585 siginfo_t
*info
= pinfo
;
1586 struct ucontext
*uc
= puc
;
1587 greg_t pc
= uc
->uc_mcontext
.pc
;
1590 /* XXX: compute is_write */
1592 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1593 is_write
, &uc
->uc_sigmask
, puc
);
1596 #elif defined(__hppa__)
1598 int cpu_signal_handler(int host_signum
, void *pinfo
,
1601 struct siginfo
*info
= pinfo
;
1602 struct ucontext
*uc
= puc
;
1606 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1607 /* FIXME: compute is_write */
1609 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1611 &uc
->uc_sigmask
, puc
);
1616 #error host CPU specific signal handler needed
1620 #endif /* !defined(CONFIG_SOFTMMU) */