2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
23 #if !defined(TARGET_IA64)
28 #if !defined(CONFIG_SOFTMMU)
40 #include <sys/ucontext.h>
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
49 #define env cpu_single_env
52 int tb_invalidated_flag
;
55 //#define DEBUG_SIGNAL
57 int qemu_cpu_has_work(CPUState
*env
)
59 return cpu_has_work(env
);
62 void cpu_loop_exit(void)
64 /* NOTE: the register at this point must be saved by hand because
65 longjmp restore them */
67 longjmp(env
->jmp_env
, 1);
70 /* exit the current TB from a signal handler. The host registers are
71 restored in a state compatible with the CPU emulator
73 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
75 #if !defined(CONFIG_SOFTMMU)
77 struct ucontext
*uc
= puc
;
78 #elif defined(__OpenBSD__)
79 struct sigcontext
*uc
= puc
;
85 /* XXX: restore cpu registers saved in host registers */
87 #if !defined(CONFIG_SOFTMMU)
89 /* XXX: use siglongjmp ? */
91 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
92 #elif defined(__OpenBSD__)
93 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
97 env
->exception_index
= -1;
98 longjmp(env
->jmp_env
, 1);
101 /* Execute the code without caching the generated code. An interpreter
102 could be used if available. */
103 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
105 unsigned long next_tb
;
106 TranslationBlock
*tb
;
108 /* Should never happen.
109 We only end up here when an existing TB is too long. */
110 if (max_cycles
> CF_COUNT_MASK
)
111 max_cycles
= CF_COUNT_MASK
;
113 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
115 env
->current_tb
= tb
;
116 /* execute the generated code */
117 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
119 if ((next_tb
& 3) == 2) {
120 /* Restore PC. This may happen if async event occurs before
121 the TB starts executing. */
122 cpu_pc_from_tb(env
, tb
);
124 tb_phys_invalidate(tb
, -1);
128 static TranslationBlock
*tb_find_slow(target_ulong pc
,
129 target_ulong cs_base
,
132 TranslationBlock
*tb
, **ptb1
;
134 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
136 tb_invalidated_flag
= 0;
138 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
140 /* find translated block using physical mappings */
141 phys_pc
= get_phys_addr_code(env
, pc
);
142 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
144 h
= tb_phys_hash_func(phys_pc
);
145 ptb1
= &tb_phys_hash
[h
];
151 tb
->page_addr
[0] == phys_page1
&&
152 tb
->cs_base
== cs_base
&&
153 tb
->flags
== flags
) {
154 /* check next page if needed */
155 if (tb
->page_addr
[1] != -1) {
156 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
158 phys_page2
= get_phys_addr_code(env
, virt_page2
);
159 if (tb
->page_addr
[1] == phys_page2
)
165 ptb1
= &tb
->phys_hash_next
;
168 /* if no translated code available, then translate it now */
169 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
172 /* we add the TB in the virtual pc hash table */
173 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
177 static inline TranslationBlock
*tb_find_fast(void)
179 TranslationBlock
*tb
;
180 target_ulong cs_base
, pc
;
183 /* we record a subset of the CPU state. It will
184 always be the same before a given translated block
186 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
187 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
188 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
189 tb
->flags
!= flags
)) {
190 tb
= tb_find_slow(pc
, cs_base
, flags
);
195 static CPUDebugExcpHandler
*debug_excp_handler
;
197 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
199 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
201 debug_excp_handler
= handler
;
205 static void cpu_handle_debug_exception(CPUState
*env
)
209 if (!env
->watchpoint_hit
)
210 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
211 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
213 if (debug_excp_handler
)
214 debug_excp_handler(env
);
217 /* main execution loop */
219 int cpu_exec(CPUState
*env1
)
221 #define DECLARE_HOST_REGS 1
222 #include "hostregs_helper.h"
223 int ret
, interrupt_request
;
224 TranslationBlock
*tb
;
226 unsigned long next_tb
;
228 if (cpu_halted(env1
) == EXCP_HALTED
)
231 cpu_single_env
= env1
;
233 /* first we save global registers */
234 #define SAVE_HOST_REGS 1
235 #include "hostregs_helper.h"
239 #if defined(TARGET_I386)
240 /* put eflags in CPU temporary format */
241 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
242 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
243 CC_OP
= CC_OP_EFLAGS
;
244 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
245 #elif defined(TARGET_SPARC)
246 #elif defined(TARGET_M68K)
247 env
->cc_op
= CC_OP_FLAGS
;
248 env
->cc_dest
= env
->sr
& 0xf;
249 env
->cc_x
= (env
->sr
>> 4) & 1;
250 #elif defined(TARGET_ALPHA)
251 #elif defined(TARGET_ARM)
252 #elif defined(TARGET_PPC)
253 #elif defined(TARGET_MICROBLAZE)
254 #elif defined(TARGET_MIPS)
255 #elif defined(TARGET_SH4)
256 #elif defined(TARGET_CRIS)
257 #elif defined(TARGET_IA64)
260 #error unsupported target CPU
262 env
->exception_index
= -1;
264 /* prepare setjmp context for exception handling */
266 if (setjmp(env
->jmp_env
) == 0) {
267 #if defined(__sparc__) && !defined(HOST_SOLARIS)
269 env
= cpu_single_env
;
270 #define env cpu_single_env
272 env
->current_tb
= NULL
;
273 /* if an exception is pending, we execute it here */
274 if (env
->exception_index
>= 0) {
275 if (env
->exception_index
>= EXCP_INTERRUPT
) {
276 /* exit request from the cpu execution loop */
277 ret
= env
->exception_index
;
278 if (ret
== EXCP_DEBUG
)
279 cpu_handle_debug_exception(env
);
282 #if defined(CONFIG_USER_ONLY)
283 /* if user mode only, we simulate a fake exception
284 which will be handled outside the cpu execution
286 #if defined(TARGET_I386)
287 do_interrupt_user(env
->exception_index
,
288 env
->exception_is_int
,
290 env
->exception_next_eip
);
291 /* successfully delivered */
292 env
->old_exception
= -1;
294 ret
= env
->exception_index
;
297 #if defined(TARGET_I386)
298 /* simulate a real cpu exception. On i386, it can
299 trigger new exceptions, but we do not handle
300 double or triple faults yet. */
301 do_interrupt(env
->exception_index
,
302 env
->exception_is_int
,
304 env
->exception_next_eip
, 0);
305 /* successfully delivered */
306 env
->old_exception
= -1;
307 #elif defined(TARGET_PPC)
309 #elif defined(TARGET_MICROBLAZE)
311 #elif defined(TARGET_MIPS)
313 #elif defined(TARGET_SPARC)
315 #elif defined(TARGET_ARM)
317 #elif defined(TARGET_SH4)
319 #elif defined(TARGET_ALPHA)
321 #elif defined(TARGET_CRIS)
323 #elif defined(TARGET_M68K)
325 #elif defined(TARGET_IA64)
330 env
->exception_index
= -1;
333 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0 && env
->exit_request
== 0) {
335 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
336 ret
= kqemu_cpu_exec(env
);
337 /* put eflags in CPU temporary format */
338 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
339 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
340 CC_OP
= CC_OP_EFLAGS
;
341 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
344 longjmp(env
->jmp_env
, 1);
345 } else if (ret
== 2) {
346 /* softmmu execution needed */
348 if (env
->interrupt_request
!= 0 || env
->exit_request
!= 0) {
349 /* hardware interrupt will be executed just after */
351 /* otherwise, we restart */
352 longjmp(env
->jmp_env
, 1);
360 longjmp(env
->jmp_env
, 1);
363 next_tb
= 0; /* force lookup of first TB */
365 interrupt_request
= env
->interrupt_request
;
366 if (unlikely(interrupt_request
)) {
367 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
368 /* Mask out external interrupts for this step. */
369 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
374 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
375 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
376 env
->exception_index
= EXCP_DEBUG
;
379 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
380 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
381 defined(TARGET_MICROBLAZE)
382 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
383 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
385 env
->exception_index
= EXCP_HLT
;
389 #if defined(TARGET_I386)
390 if (env
->hflags2
& HF2_GIF_MASK
) {
391 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
392 !(env
->hflags
& HF_SMM_MASK
)) {
393 svm_check_intercept(SVM_EXIT_SMI
);
394 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
397 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
398 !(env
->hflags2
& HF2_NMI_MASK
)) {
399 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
400 env
->hflags2
|= HF2_NMI_MASK
;
401 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
403 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
404 (((env
->hflags2
& HF2_VINTR_MASK
) &&
405 (env
->hflags2
& HF2_HIF_MASK
)) ||
406 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
407 (env
->eflags
& IF_MASK
&&
408 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
410 svm_check_intercept(SVM_EXIT_INTR
);
411 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
412 intno
= cpu_get_pic_interrupt(env
);
413 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
414 #if defined(__sparc__) && !defined(HOST_SOLARIS)
416 env
= cpu_single_env
;
417 #define env cpu_single_env
419 do_interrupt(intno
, 0, 0, 0, 1);
420 /* ensure that no TB jump will be modified as
421 the program flow was changed */
423 #if !defined(CONFIG_USER_ONLY)
424 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
425 (env
->eflags
& IF_MASK
) &&
426 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
428 /* FIXME: this should respect TPR */
429 svm_check_intercept(SVM_EXIT_VINTR
);
430 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
431 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
432 do_interrupt(intno
, 0, 0, 0, 1);
433 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
438 #elif defined(TARGET_PPC)
440 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
444 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
445 ppc_hw_interrupt(env
);
446 if (env
->pending_interrupts
== 0)
447 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
450 #elif defined(TARGET_MICROBLAZE)
451 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
452 && (env
->sregs
[SR_MSR
] & MSR_IE
)
453 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
454 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
455 env
->exception_index
= EXCP_IRQ
;
459 #elif defined(TARGET_MIPS)
460 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
461 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
462 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
463 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
464 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
465 !(env
->hflags
& MIPS_HFLAG_DM
)) {
467 env
->exception_index
= EXCP_EXT_INTERRUPT
;
472 #elif defined(TARGET_SPARC)
473 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
475 int pil
= env
->interrupt_index
& 15;
476 int type
= env
->interrupt_index
& 0xf0;
478 if (((type
== TT_EXTINT
) &&
479 (pil
== 15 || pil
> env
->psrpil
)) ||
481 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
482 env
->exception_index
= env
->interrupt_index
;
484 env
->interrupt_index
= 0;
485 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
490 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
491 //do_interrupt(0, 0, 0, 0, 0);
492 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
494 #elif defined(TARGET_ARM)
495 if (interrupt_request
& CPU_INTERRUPT_FIQ
496 && !(env
->uncached_cpsr
& CPSR_F
)) {
497 env
->exception_index
= EXCP_FIQ
;
501 /* ARMv7-M interrupt return works by loading a magic value
502 into the PC. On real hardware the load causes the
503 return to occur. The qemu implementation performs the
504 jump normally, then does the exception return when the
505 CPU tries to execute code at the magic address.
506 This will cause the magic PC value to be pushed to
507 the stack if an interrupt occured at the wrong time.
508 We avoid this by disabling interrupts when
509 pc contains a magic address. */
510 if (interrupt_request
& CPU_INTERRUPT_HARD
511 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
512 || !(env
->uncached_cpsr
& CPSR_I
))) {
513 env
->exception_index
= EXCP_IRQ
;
517 #elif defined(TARGET_SH4)
518 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
522 #elif defined(TARGET_ALPHA)
523 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
527 #elif defined(TARGET_CRIS)
528 if (interrupt_request
& CPU_INTERRUPT_HARD
529 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
530 env
->exception_index
= EXCP_IRQ
;
534 if (interrupt_request
& CPU_INTERRUPT_NMI
535 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
536 env
->exception_index
= EXCP_NMI
;
540 #elif defined(TARGET_M68K)
541 if (interrupt_request
& CPU_INTERRUPT_HARD
542 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
543 < env
->pending_level
) {
544 /* Real hardware gets the interrupt vector via an
545 IACK cycle at this point. Current emulated
546 hardware doesn't rely on this, so we
547 provide/save the vector when the interrupt is
549 env
->exception_index
= env
->pending_vector
;
554 /* Don't use the cached interupt_request value,
555 do_interrupt may have updated the EXITTB flag. */
556 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
557 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
558 /* ensure that no TB jump will be modified as
559 the program flow was changed */
563 if (unlikely(env
->exit_request
)) {
564 env
->exit_request
= 0;
565 env
->exception_index
= EXCP_INTERRUPT
;
569 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
570 /* restore flags in standard format */
572 #if defined(TARGET_I386)
573 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
574 log_cpu_state(env
, X86_DUMP_CCOP
);
575 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
576 #elif defined(TARGET_ARM)
577 log_cpu_state(env
, 0);
578 #elif defined(TARGET_SPARC)
579 log_cpu_state(env
, 0);
580 #elif defined(TARGET_PPC)
581 log_cpu_state(env
, 0);
582 #elif defined(TARGET_M68K)
583 cpu_m68k_flush_flags(env
, env
->cc_op
);
584 env
->cc_op
= CC_OP_FLAGS
;
585 env
->sr
= (env
->sr
& 0xffe0)
586 | env
->cc_dest
| (env
->cc_x
<< 4);
587 log_cpu_state(env
, 0);
588 #elif defined(TARGET_MICROBLAZE)
589 log_cpu_state(env
, 0);
590 #elif defined(TARGET_MIPS)
591 log_cpu_state(env
, 0);
592 #elif defined(TARGET_SH4)
593 log_cpu_state(env
, 0);
594 #elif defined(TARGET_ALPHA)
595 log_cpu_state(env
, 0);
596 #elif defined(TARGET_CRIS)
597 log_cpu_state(env
, 0);
599 #error unsupported target CPU
605 /* Note: we do it here to avoid a gcc bug on Mac OS X when
606 doing it in tb_find_slow */
607 if (tb_invalidated_flag
) {
608 /* as some TB could have been invalidated because
609 of memory exceptions while generating the code, we
610 must recompute the hash index here */
612 tb_invalidated_flag
= 0;
615 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
616 (long)tb
->tc_ptr
, tb
->pc
,
617 lookup_symbol(tb
->pc
));
619 /* see if we can patch the calling TB. When the TB
620 spans two pages, we cannot safely do a direct
625 (env
->kqemu_enabled
!= 2) &&
627 tb
->page_addr
[1] == -1) {
628 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
631 spin_unlock(&tb_lock
);
632 env
->current_tb
= tb
;
634 /* cpu_interrupt might be called while translating the
635 TB, but before it is linked into a potentially
636 infinite loop and becomes env->current_tb. Avoid
637 starting execution if there is a pending interrupt. */
638 if (unlikely (env
->exit_request
))
639 env
->current_tb
= NULL
;
641 while (env
->current_tb
) {
643 /* execute the generated code */
644 #if defined(__sparc__) && !defined(HOST_SOLARIS)
646 env
= cpu_single_env
;
647 #define env cpu_single_env
649 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
650 env
->current_tb
= NULL
;
651 if ((next_tb
& 3) == 2) {
652 /* Instruction counter expired. */
654 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
656 cpu_pc_from_tb(env
, tb
);
657 insns_left
= env
->icount_decr
.u32
;
658 if (env
->icount_extra
&& insns_left
>= 0) {
659 /* Refill decrementer and continue execution. */
660 env
->icount_extra
+= insns_left
;
661 if (env
->icount_extra
> 0xffff) {
664 insns_left
= env
->icount_extra
;
666 env
->icount_extra
-= insns_left
;
667 env
->icount_decr
.u16
.low
= insns_left
;
669 if (insns_left
> 0) {
670 /* Execute remaining instructions. */
671 cpu_exec_nocache(insns_left
, tb
);
673 env
->exception_index
= EXCP_INTERRUPT
;
679 /* reset soft MMU for next block (it can currently
680 only be set by a memory fault) */
681 #if defined(CONFIG_KQEMU)
682 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
683 if (kqemu_is_ok(env
) &&
684 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
695 #if defined(TARGET_I386)
696 /* restore flags in standard format */
697 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
698 #elif defined(TARGET_ARM)
699 /* XXX: Save/restore host fpu exception state?. */
700 #elif defined(TARGET_SPARC)
701 #elif defined(TARGET_PPC)
702 #elif defined(TARGET_M68K)
703 cpu_m68k_flush_flags(env
, env
->cc_op
);
704 env
->cc_op
= CC_OP_FLAGS
;
705 env
->sr
= (env
->sr
& 0xffe0)
706 | env
->cc_dest
| (env
->cc_x
<< 4);
707 #elif defined(TARGET_MICROBLAZE)
708 #elif defined(TARGET_MIPS)
709 #elif defined(TARGET_SH4)
710 #elif defined(TARGET_IA64)
711 #elif defined(TARGET_ALPHA)
712 #elif defined(TARGET_CRIS)
715 #error unsupported target CPU
718 /* restore global registers */
719 #include "hostregs_helper.h"
721 /* fail safe : never use cpu_single_env outside cpu_exec() */
722 cpu_single_env
= NULL
;
726 /* must only be called from the generated code as an exception can be
728 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
730 /* XXX: cannot enable it yet because it yields to MMU exception
731 where NIP != read address on PowerPC */
733 target_ulong phys_addr
;
734 phys_addr
= get_phys_addr_code(env
, start
);
735 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
739 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
741 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
743 CPUX86State
*saved_env
;
747 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
749 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
750 (selector
<< 4), 0xffff, 0);
752 helper_load_seg(seg_reg
, selector
);
757 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
759 CPUX86State
*saved_env
;
764 helper_fsave(ptr
, data32
);
769 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
771 CPUX86State
*saved_env
;
776 helper_frstor(ptr
, data32
);
781 #endif /* TARGET_I386 */
783 #if !defined(CONFIG_SOFTMMU)
785 #if defined(TARGET_I386)
787 /* 'pc' is the host PC at which the exception was raised. 'address' is
788 the effective address of the memory exception. 'is_write' is 1 if a
789 write caused the exception and otherwise 0'. 'old_set' is the
790 signal set which should be restored */
791 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
792 int is_write
, sigset_t
*old_set
,
795 TranslationBlock
*tb
;
799 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
800 #if defined(DEBUG_SIGNAL)
801 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
802 pc
, address
, is_write
, *(unsigned long *)old_set
);
804 /* XXX: locking issue */
805 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
809 /* see if it is an MMU fault */
810 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
812 return 0; /* not an MMU fault */
814 return 1; /* the MMU fault was handled without causing real CPU fault */
815 /* now we have a real cpu fault */
818 /* the PC is inside the translated code. It means that we have
819 a virtual CPU fault */
820 cpu_restore_state(tb
, env
, pc
, puc
);
824 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
825 env
->eip
, env
->cr
[2], env
->error_code
);
827 /* we restore the process signal mask as the sigreturn should
828 do it (XXX: use sigsetjmp) */
829 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
830 raise_exception_err(env
->exception_index
, env
->error_code
);
832 /* activate soft MMU for this block */
833 env
->hflags
|= HF_SOFTMMU_MASK
;
834 cpu_resume_from_signal(env
, puc
);
836 /* never comes here */
840 #elif defined(TARGET_ARM)
841 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
842 int is_write
, sigset_t
*old_set
,
845 TranslationBlock
*tb
;
849 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
850 #if defined(DEBUG_SIGNAL)
851 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
852 pc
, address
, is_write
, *(unsigned long *)old_set
);
854 /* XXX: locking issue */
855 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
858 /* see if it is an MMU fault */
859 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
861 return 0; /* not an MMU fault */
863 return 1; /* the MMU fault was handled without causing real CPU fault */
864 /* now we have a real cpu fault */
867 /* the PC is inside the translated code. It means that we have
868 a virtual CPU fault */
869 cpu_restore_state(tb
, env
, pc
, puc
);
871 /* we restore the process signal mask as the sigreturn should
872 do it (XXX: use sigsetjmp) */
873 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
875 /* never comes here */
878 #elif defined(TARGET_SPARC)
879 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
880 int is_write
, sigset_t
*old_set
,
883 TranslationBlock
*tb
;
887 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
888 #if defined(DEBUG_SIGNAL)
889 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
890 pc
, address
, is_write
, *(unsigned long *)old_set
);
892 /* XXX: locking issue */
893 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
896 /* see if it is an MMU fault */
897 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
899 return 0; /* not an MMU fault */
901 return 1; /* the MMU fault was handled without causing real CPU fault */
902 /* now we have a real cpu fault */
905 /* the PC is inside the translated code. It means that we have
906 a virtual CPU fault */
907 cpu_restore_state(tb
, env
, pc
, puc
);
909 /* we restore the process signal mask as the sigreturn should
910 do it (XXX: use sigsetjmp) */
911 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
913 /* never comes here */
916 #elif defined (TARGET_PPC)
917 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
918 int is_write
, sigset_t
*old_set
,
921 TranslationBlock
*tb
;
925 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
926 #if defined(DEBUG_SIGNAL)
927 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
928 pc
, address
, is_write
, *(unsigned long *)old_set
);
930 /* XXX: locking issue */
931 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
935 /* see if it is an MMU fault */
936 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
938 return 0; /* not an MMU fault */
940 return 1; /* the MMU fault was handled without causing real CPU fault */
942 /* now we have a real cpu fault */
945 /* the PC is inside the translated code. It means that we have
946 a virtual CPU fault */
947 cpu_restore_state(tb
, env
, pc
, puc
);
951 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
952 env
->nip
, env
->error_code
, tb
);
954 /* we restore the process signal mask as the sigreturn should
955 do it (XXX: use sigsetjmp) */
956 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
959 /* activate soft MMU for this block */
960 cpu_resume_from_signal(env
, puc
);
962 /* never comes here */
966 #elif defined(TARGET_M68K)
967 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
968 int is_write
, sigset_t
*old_set
,
971 TranslationBlock
*tb
;
975 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
976 #if defined(DEBUG_SIGNAL)
977 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
978 pc
, address
, is_write
, *(unsigned long *)old_set
);
980 /* XXX: locking issue */
981 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
984 /* see if it is an MMU fault */
985 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
987 return 0; /* not an MMU fault */
989 return 1; /* the MMU fault was handled without causing real CPU fault */
990 /* now we have a real cpu fault */
993 /* the PC is inside the translated code. It means that we have
994 a virtual CPU fault */
995 cpu_restore_state(tb
, env
, pc
, puc
);
997 /* we restore the process signal mask as the sigreturn should
998 do it (XXX: use sigsetjmp) */
999 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1001 /* never comes here */
1005 #elif defined (TARGET_MIPS)
1006 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1007 int is_write
, sigset_t
*old_set
,
1010 TranslationBlock
*tb
;
1014 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1015 #if defined(DEBUG_SIGNAL)
1016 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1017 pc
, address
, is_write
, *(unsigned long *)old_set
);
1019 /* XXX: locking issue */
1020 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1024 /* see if it is an MMU fault */
1025 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1027 return 0; /* not an MMU fault */
1029 return 1; /* the MMU fault was handled without causing real CPU fault */
1031 /* now we have a real cpu fault */
1032 tb
= tb_find_pc(pc
);
1034 /* the PC is inside the translated code. It means that we have
1035 a virtual CPU fault */
1036 cpu_restore_state(tb
, env
, pc
, puc
);
1040 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1041 env
->PC
, env
->error_code
, tb
);
1043 /* we restore the process signal mask as the sigreturn should
1044 do it (XXX: use sigsetjmp) */
1045 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1048 /* activate soft MMU for this block */
1049 cpu_resume_from_signal(env
, puc
);
1051 /* never comes here */
1055 #elif defined (TARGET_MICROBLAZE)
1056 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1057 int is_write
, sigset_t
*old_set
,
1060 TranslationBlock
*tb
;
1064 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1065 #if defined(DEBUG_SIGNAL)
1066 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1067 pc
, address
, is_write
, *(unsigned long *)old_set
);
1069 /* XXX: locking issue */
1070 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1074 /* see if it is an MMU fault */
1075 ret
= cpu_mb_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1077 return 0; /* not an MMU fault */
1079 return 1; /* the MMU fault was handled without causing real CPU fault */
1081 /* now we have a real cpu fault */
1082 tb
= tb_find_pc(pc
);
1084 /* the PC is inside the translated code. It means that we have
1085 a virtual CPU fault */
1086 cpu_restore_state(tb
, env
, pc
, puc
);
1090 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1091 env
->PC
, env
->error_code
, tb
);
1093 /* we restore the process signal mask as the sigreturn should
1094 do it (XXX: use sigsetjmp) */
1095 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1098 /* activate soft MMU for this block */
1099 cpu_resume_from_signal(env
, puc
);
1101 /* never comes here */
1105 #elif defined (TARGET_SH4)
1106 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1107 int is_write
, sigset_t
*old_set
,
1110 TranslationBlock
*tb
;
1114 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1115 #if defined(DEBUG_SIGNAL)
1116 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1117 pc
, address
, is_write
, *(unsigned long *)old_set
);
1119 /* XXX: locking issue */
1120 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1124 /* see if it is an MMU fault */
1125 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1127 return 0; /* not an MMU fault */
1129 return 1; /* the MMU fault was handled without causing real CPU fault */
1131 /* now we have a real cpu fault */
1132 tb
= tb_find_pc(pc
);
1134 /* the PC is inside the translated code. It means that we have
1135 a virtual CPU fault */
1136 cpu_restore_state(tb
, env
, pc
, puc
);
1139 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1140 env
->nip
, env
->error_code
, tb
);
1142 /* we restore the process signal mask as the sigreturn should
1143 do it (XXX: use sigsetjmp) */
1144 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1146 /* never comes here */
1150 #elif defined (TARGET_ALPHA)
1151 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1152 int is_write
, sigset_t
*old_set
,
1155 TranslationBlock
*tb
;
1159 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1160 #if defined(DEBUG_SIGNAL)
1161 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1162 pc
, address
, is_write
, *(unsigned long *)old_set
);
1164 /* XXX: locking issue */
1165 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1169 /* see if it is an MMU fault */
1170 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1172 return 0; /* not an MMU fault */
1174 return 1; /* the MMU fault was handled without causing real CPU fault */
1176 /* now we have a real cpu fault */
1177 tb
= tb_find_pc(pc
);
1179 /* the PC is inside the translated code. It means that we have
1180 a virtual CPU fault */
1181 cpu_restore_state(tb
, env
, pc
, puc
);
1184 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1185 env
->nip
, env
->error_code
, tb
);
1187 /* we restore the process signal mask as the sigreturn should
1188 do it (XXX: use sigsetjmp) */
1189 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1191 /* never comes here */
1194 #elif defined (TARGET_CRIS)
1195 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1196 int is_write
, sigset_t
*old_set
,
1199 TranslationBlock
*tb
;
1203 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1204 #if defined(DEBUG_SIGNAL)
1205 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1206 pc
, address
, is_write
, *(unsigned long *)old_set
);
1208 /* XXX: locking issue */
1209 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1213 /* see if it is an MMU fault */
1214 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1216 return 0; /* not an MMU fault */
1218 return 1; /* the MMU fault was handled without causing real CPU fault */
1220 /* now we have a real cpu fault */
1221 tb
= tb_find_pc(pc
);
1223 /* the PC is inside the translated code. It means that we have
1224 a virtual CPU fault */
1225 cpu_restore_state(tb
, env
, pc
, puc
);
1227 /* we restore the process signal mask as the sigreturn should
1228 do it (XXX: use sigsetjmp) */
1229 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1231 /* never comes here */
1236 #error unsupported target CPU
1239 #if defined(__i386__)
1241 #if defined(__APPLE__)
1242 # include <sys/ucontext.h>
1244 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1245 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1246 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1247 # define MASK_sig(context) ((context)->uc_sigmask)
1248 #elif defined(__OpenBSD__)
1249 # define EIP_sig(context) ((context)->sc_eip)
1250 # define TRAP_sig(context) ((context)->sc_trapno)
1251 # define ERROR_sig(context) ((context)->sc_err)
1252 # define MASK_sig(context) ((context)->sc_mask)
1254 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1255 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1256 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1257 # define MASK_sig(context) ((context)->uc_sigmask)
1260 int cpu_signal_handler(int host_signum
, void *pinfo
,
1263 siginfo_t
*info
= pinfo
;
1264 #if defined(__OpenBSD__)
1265 struct sigcontext
*uc
= puc
;
1267 struct ucontext
*uc
= puc
;
1276 #define REG_TRAPNO TRAPNO
1279 trapno
= TRAP_sig(uc
);
1280 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1282 (ERROR_sig(uc
) >> 1) & 1 : 0,
1283 &MASK_sig(uc
), puc
);
1286 #elif defined(__x86_64__)
1289 #define PC_sig(context) _UC_MACHINE_PC(context)
1290 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1291 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1292 #define MASK_sig(context) ((context)->uc_sigmask)
1293 #elif defined(__OpenBSD__)
1294 #define PC_sig(context) ((context)->sc_rip)
1295 #define TRAP_sig(context) ((context)->sc_trapno)
1296 #define ERROR_sig(context) ((context)->sc_err)
1297 #define MASK_sig(context) ((context)->sc_mask)
1299 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1300 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1301 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1302 #define MASK_sig(context) ((context)->uc_sigmask)
1305 int cpu_signal_handler(int host_signum
, void *pinfo
,
1308 siginfo_t
*info
= pinfo
;
1311 ucontext_t
*uc
= puc
;
1312 #elif defined(__OpenBSD__)
1313 struct sigcontext
*uc
= puc
;
1315 struct ucontext
*uc
= puc
;
1319 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1320 TRAP_sig(uc
) == 0xe ?
1321 (ERROR_sig(uc
) >> 1) & 1 : 0,
1322 &MASK_sig(uc
), puc
);
1325 #elif defined(_ARCH_PPC)
1327 /***********************************************************************
1328 * signal context platform-specific definitions
1332 /* All Registers access - only for local access */
1333 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1334 /* Gpr Registers access */
1335 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1336 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1337 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1338 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1339 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1340 # define LR_sig(context) REG_sig(link, context) /* Link register */
1341 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1342 /* Float Registers access */
1343 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1344 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1345 /* Exception Registers access */
1346 # define DAR_sig(context) REG_sig(dar, context)
1347 # define DSISR_sig(context) REG_sig(dsisr, context)
1348 # define TRAP_sig(context) REG_sig(trap, context)
1352 # include <sys/ucontext.h>
1353 typedef struct ucontext SIGCONTEXT
;
1354 /* All Registers access - only for local access */
1355 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1356 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1357 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1358 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1359 /* Gpr Registers access */
1360 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1361 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1362 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1363 # define CTR_sig(context) REG_sig(ctr, context)
1364 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1365 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1366 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1367 /* Float Registers access */
1368 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1369 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1370 /* Exception Registers access */
1371 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1372 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1373 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1374 #endif /* __APPLE__ */
1376 int cpu_signal_handler(int host_signum
, void *pinfo
,
1379 siginfo_t
*info
= pinfo
;
1380 struct ucontext
*uc
= puc
;
1388 if (DSISR_sig(uc
) & 0x00800000)
1391 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1394 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1395 is_write
, &uc
->uc_sigmask
, puc
);
1398 #elif defined(__alpha__)
1400 int cpu_signal_handler(int host_signum
, void *pinfo
,
1403 siginfo_t
*info
= pinfo
;
1404 struct ucontext
*uc
= puc
;
1405 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1406 uint32_t insn
= *pc
;
1409 /* XXX: need kernel patch to get write flag faster */
1410 switch (insn
>> 26) {
1425 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1426 is_write
, &uc
->uc_sigmask
, puc
);
1428 #elif defined(__sparc__)
1430 int cpu_signal_handler(int host_signum
, void *pinfo
,
1433 siginfo_t
*info
= pinfo
;
1436 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1437 uint32_t *regs
= (uint32_t *)(info
+ 1);
1438 void *sigmask
= (regs
+ 20);
1439 /* XXX: is there a standard glibc define ? */
1440 unsigned long pc
= regs
[1];
1443 struct sigcontext
*sc
= puc
;
1444 unsigned long pc
= sc
->sigc_regs
.tpc
;
1445 void *sigmask
= (void *)sc
->sigc_mask
;
1446 #elif defined(__OpenBSD__)
1447 struct sigcontext
*uc
= puc
;
1448 unsigned long pc
= uc
->sc_pc
;
1449 void *sigmask
= (void *)(long)uc
->sc_mask
;
1453 /* XXX: need kernel patch to get write flag faster */
1455 insn
= *(uint32_t *)pc
;
1456 if ((insn
>> 30) == 3) {
1457 switch((insn
>> 19) & 0x3f) {
1481 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1482 is_write
, sigmask
, NULL
);
1485 #elif defined(__arm__)
1487 int cpu_signal_handler(int host_signum
, void *pinfo
,
1490 siginfo_t
*info
= pinfo
;
1491 struct ucontext
*uc
= puc
;
1495 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1496 pc
= uc
->uc_mcontext
.gregs
[R15
];
1498 pc
= uc
->uc_mcontext
.arm_pc
;
1500 /* XXX: compute is_write */
1502 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1504 &uc
->uc_sigmask
, puc
);
1507 #elif defined(__mc68000)
1509 int cpu_signal_handler(int host_signum
, void *pinfo
,
1512 siginfo_t
*info
= pinfo
;
1513 struct ucontext
*uc
= puc
;
1517 pc
= uc
->uc_mcontext
.gregs
[16];
1518 /* XXX: compute is_write */
1520 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1522 &uc
->uc_sigmask
, puc
);
1525 #elif defined(__ia64)
1528 /* This ought to be in <bits/siginfo.h>... */
1529 # define __ISR_VALID 1
1532 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1534 siginfo_t
*info
= pinfo
;
1535 struct ucontext
*uc
= puc
;
1539 ip
= uc
->uc_mcontext
.sc_ip
;
1540 switch (host_signum
) {
1546 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1547 /* ISR.W (write-access) is bit 33: */
1548 is_write
= (info
->si_isr
>> 33) & 1;
1554 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1556 &uc
->uc_sigmask
, puc
);
1559 #elif defined(__s390__)
1561 int cpu_signal_handler(int host_signum
, void *pinfo
,
1564 siginfo_t
*info
= pinfo
;
1565 struct ucontext
*uc
= puc
;
1569 pc
= uc
->uc_mcontext
.psw
.addr
;
1570 /* XXX: compute is_write */
1572 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1573 is_write
, &uc
->uc_sigmask
, puc
);
1576 #elif defined(__mips__)
1578 int cpu_signal_handler(int host_signum
, void *pinfo
,
1581 siginfo_t
*info
= pinfo
;
1582 struct ucontext
*uc
= puc
;
1583 greg_t pc
= uc
->uc_mcontext
.pc
;
1586 /* XXX: compute is_write */
1588 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1589 is_write
, &uc
->uc_sigmask
, puc
);
1592 #elif defined(__hppa__)
1594 int cpu_signal_handler(int host_signum
, void *pinfo
,
1597 struct siginfo
*info
= pinfo
;
1598 struct ucontext
*uc
= puc
;
1602 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1603 /* FIXME: compute is_write */
1605 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1607 &uc
->uc_sigmask
, puc
);
1612 #error host CPU specific signal handler needed
1616 #endif /* !defined(CONFIG_SOFTMMU) */