2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
23 #if !defined(TARGET_IA64)
28 #if !defined(CONFIG_SOFTMMU)
40 #include <sys/ucontext.h>
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
49 #define env cpu_single_env
52 int tb_invalidated_flag
;
55 //#define DEBUG_SIGNAL
57 int qemu_cpu_has_work(CPUState
*env
)
59 return cpu_has_work(env
);
62 void cpu_loop_exit(void)
64 /* NOTE: the register at this point must be saved by hand because
65 longjmp restore them */
67 longjmp(env
->jmp_env
, 1);
70 /* exit the current TB from a signal handler. The host registers are
71 restored in a state compatible with the CPU emulator
73 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
75 #if !defined(CONFIG_SOFTMMU)
77 struct ucontext
*uc
= puc
;
78 #elif defined(__OpenBSD__)
79 struct sigcontext
*uc
= puc
;
85 /* XXX: restore cpu registers saved in host registers */
87 #if !defined(CONFIG_SOFTMMU)
89 /* XXX: use siglongjmp ? */
91 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
92 #elif defined(__OpenBSD__)
93 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
97 env
->exception_index
= -1;
98 longjmp(env
->jmp_env
, 1);
101 /* Execute the code without caching the generated code. An interpreter
102 could be used if available. */
103 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
105 unsigned long next_tb
;
106 TranslationBlock
*tb
;
108 /* Should never happen.
109 We only end up here when an existing TB is too long. */
110 if (max_cycles
> CF_COUNT_MASK
)
111 max_cycles
= CF_COUNT_MASK
;
113 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
115 env
->current_tb
= tb
;
116 /* execute the generated code */
117 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
119 if ((next_tb
& 3) == 2) {
120 /* Restore PC. This may happen if async event occurs before
121 the TB starts executing. */
122 cpu_pc_from_tb(env
, tb
);
124 tb_phys_invalidate(tb
, -1);
128 static TranslationBlock
*tb_find_slow(target_ulong pc
,
129 target_ulong cs_base
,
132 TranslationBlock
*tb
, **ptb1
;
134 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
136 tb_invalidated_flag
= 0;
138 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
140 /* find translated block using physical mappings */
141 phys_pc
= get_phys_addr_code(env
, pc
);
142 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
144 h
= tb_phys_hash_func(phys_pc
);
145 ptb1
= &tb_phys_hash
[h
];
151 tb
->page_addr
[0] == phys_page1
&&
152 tb
->cs_base
== cs_base
&&
153 tb
->flags
== flags
) {
154 /* check next page if needed */
155 if (tb
->page_addr
[1] != -1) {
156 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
158 phys_page2
= get_phys_addr_code(env
, virt_page2
);
159 if (tb
->page_addr
[1] == phys_page2
)
165 ptb1
= &tb
->phys_hash_next
;
168 /* if no translated code available, then translate it now */
169 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
172 /* we add the TB in the virtual pc hash table */
173 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
177 static inline TranslationBlock
*tb_find_fast(void)
179 TranslationBlock
*tb
;
180 target_ulong cs_base
, pc
;
183 /* we record a subset of the CPU state. It will
184 always be the same before a given translated block
186 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
187 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
188 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
189 tb
->flags
!= flags
)) {
190 tb
= tb_find_slow(pc
, cs_base
, flags
);
195 static CPUDebugExcpHandler
*debug_excp_handler
;
197 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
199 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
201 debug_excp_handler
= handler
;
205 static void cpu_handle_debug_exception(CPUState
*env
)
209 if (!env
->watchpoint_hit
)
210 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
211 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
213 if (debug_excp_handler
)
214 debug_excp_handler(env
);
217 /* main execution loop */
219 int cpu_exec(CPUState
*env1
)
221 #define DECLARE_HOST_REGS 1
222 #include "hostregs_helper.h"
223 int ret
, interrupt_request
;
224 TranslationBlock
*tb
;
226 unsigned long next_tb
;
228 if (cpu_halted(env1
) == EXCP_HALTED
)
231 cpu_single_env
= env1
;
233 /* first we save global registers */
234 #define SAVE_HOST_REGS 1
235 #include "hostregs_helper.h"
239 #if defined(TARGET_I386)
240 /* put eflags in CPU temporary format */
241 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
242 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
243 CC_OP
= CC_OP_EFLAGS
;
244 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
245 #elif defined(TARGET_SPARC)
246 #elif defined(TARGET_M68K)
247 env
->cc_op
= CC_OP_FLAGS
;
248 env
->cc_dest
= env
->sr
& 0xf;
249 env
->cc_x
= (env
->sr
>> 4) & 1;
250 #elif defined(TARGET_ALPHA)
251 #elif defined(TARGET_ARM)
252 #elif defined(TARGET_PPC)
253 #elif defined(TARGET_MICROBLAZE)
254 #elif defined(TARGET_MIPS)
255 #elif defined(TARGET_SH4)
256 #elif defined(TARGET_CRIS)
257 #elif defined(TARGET_IA64)
260 #error unsupported target CPU
262 env
->exception_index
= -1;
264 /* prepare setjmp context for exception handling */
266 if (setjmp(env
->jmp_env
) == 0) {
267 #if defined(__sparc__) && !defined(HOST_SOLARIS)
269 env
= cpu_single_env
;
270 #define env cpu_single_env
272 env
->current_tb
= NULL
;
273 /* if an exception is pending, we execute it here */
274 if (env
->exception_index
>= 0) {
275 if (env
->exception_index
>= EXCP_INTERRUPT
) {
276 /* exit request from the cpu execution loop */
277 ret
= env
->exception_index
;
278 if (ret
== EXCP_DEBUG
)
279 cpu_handle_debug_exception(env
);
282 #if defined(CONFIG_USER_ONLY)
283 /* if user mode only, we simulate a fake exception
284 which will be handled outside the cpu execution
286 #if defined(TARGET_I386)
287 do_interrupt_user(env
->exception_index
,
288 env
->exception_is_int
,
290 env
->exception_next_eip
);
291 /* successfully delivered */
292 env
->old_exception
= -1;
294 ret
= env
->exception_index
;
297 #if defined(TARGET_I386)
298 /* simulate a real cpu exception. On i386, it can
299 trigger new exceptions, but we do not handle
300 double or triple faults yet. */
301 do_interrupt(env
->exception_index
,
302 env
->exception_is_int
,
304 env
->exception_next_eip
, 0);
305 /* successfully delivered */
306 env
->old_exception
= -1;
307 #elif defined(TARGET_PPC)
309 #elif defined(TARGET_MICROBLAZE)
311 #elif defined(TARGET_MIPS)
313 #elif defined(TARGET_SPARC)
315 #elif defined(TARGET_ARM)
317 #elif defined(TARGET_SH4)
319 #elif defined(TARGET_ALPHA)
321 #elif defined(TARGET_CRIS)
323 #elif defined(TARGET_M68K)
325 #elif defined(TARGET_IA64)
330 env
->exception_index
= -1;
333 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0 && env
->exit_request
== 0) {
335 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
336 ret
= kqemu_cpu_exec(env
);
337 /* put eflags in CPU temporary format */
338 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
339 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
340 CC_OP
= CC_OP_EFLAGS
;
341 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
344 longjmp(env
->jmp_env
, 1);
345 } else if (ret
== 2) {
346 /* softmmu execution needed */
348 if (env
->interrupt_request
!= 0 || env
->exit_request
!= 0) {
349 /* hardware interrupt will be executed just after */
351 /* otherwise, we restart */
352 longjmp(env
->jmp_env
, 1);
360 longjmp(env
->jmp_env
, 1);
363 next_tb
= 0; /* force lookup of first TB */
365 interrupt_request
= env
->interrupt_request
;
366 if (unlikely(interrupt_request
)) {
367 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
368 /* Mask out external interrupts for this step. */
369 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
374 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
375 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
376 env
->exception_index
= EXCP_DEBUG
;
379 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
380 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
381 defined(TARGET_MICROBLAZE)
382 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
383 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
385 env
->exception_index
= EXCP_HLT
;
389 #if defined(TARGET_I386)
390 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
391 svm_check_intercept(SVM_EXIT_INIT
);
393 env
->exception_index
= EXCP_HALTED
;
395 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
397 } else if (env
->hflags2
& HF2_GIF_MASK
) {
398 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
399 !(env
->hflags
& HF_SMM_MASK
)) {
400 svm_check_intercept(SVM_EXIT_SMI
);
401 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
404 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
405 !(env
->hflags2
& HF2_NMI_MASK
)) {
406 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
407 env
->hflags2
|= HF2_NMI_MASK
;
408 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
410 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
411 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
412 do_interrupt(EXCP12_MCHK
, 0, 0, 0, 0);
414 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
415 (((env
->hflags2
& HF2_VINTR_MASK
) &&
416 (env
->hflags2
& HF2_HIF_MASK
)) ||
417 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
418 (env
->eflags
& IF_MASK
&&
419 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
421 svm_check_intercept(SVM_EXIT_INTR
);
422 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
423 intno
= cpu_get_pic_interrupt(env
);
424 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
425 #if defined(__sparc__) && !defined(HOST_SOLARIS)
427 env
= cpu_single_env
;
428 #define env cpu_single_env
430 do_interrupt(intno
, 0, 0, 0, 1);
431 /* ensure that no TB jump will be modified as
432 the program flow was changed */
434 #if !defined(CONFIG_USER_ONLY)
435 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
436 (env
->eflags
& IF_MASK
) &&
437 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
439 /* FIXME: this should respect TPR */
440 svm_check_intercept(SVM_EXIT_VINTR
);
441 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
442 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
443 do_interrupt(intno
, 0, 0, 0, 1);
444 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
449 #elif defined(TARGET_PPC)
451 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
455 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
456 ppc_hw_interrupt(env
);
457 if (env
->pending_interrupts
== 0)
458 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
461 #elif defined(TARGET_MICROBLAZE)
462 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
463 && (env
->sregs
[SR_MSR
] & MSR_IE
)
464 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
465 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
466 env
->exception_index
= EXCP_IRQ
;
470 #elif defined(TARGET_MIPS)
471 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
472 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
473 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
474 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
475 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
476 !(env
->hflags
& MIPS_HFLAG_DM
)) {
478 env
->exception_index
= EXCP_EXT_INTERRUPT
;
483 #elif defined(TARGET_SPARC)
484 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
485 cpu_interrupts_enabled(env
)) {
486 int pil
= env
->interrupt_index
& 15;
487 int type
= env
->interrupt_index
& 0xf0;
489 if (((type
== TT_EXTINT
) &&
490 (pil
== 15 || pil
> env
->psrpil
)) ||
492 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
493 env
->exception_index
= env
->interrupt_index
;
495 env
->interrupt_index
= 0;
496 #if !defined(CONFIG_USER_ONLY)
501 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
502 //do_interrupt(0, 0, 0, 0, 0);
503 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
505 #elif defined(TARGET_ARM)
506 if (interrupt_request
& CPU_INTERRUPT_FIQ
507 && !(env
->uncached_cpsr
& CPSR_F
)) {
508 env
->exception_index
= EXCP_FIQ
;
512 /* ARMv7-M interrupt return works by loading a magic value
513 into the PC. On real hardware the load causes the
514 return to occur. The qemu implementation performs the
515 jump normally, then does the exception return when the
516 CPU tries to execute code at the magic address.
517 This will cause the magic PC value to be pushed to
518 the stack if an interrupt occured at the wrong time.
519 We avoid this by disabling interrupts when
520 pc contains a magic address. */
521 if (interrupt_request
& CPU_INTERRUPT_HARD
522 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
523 || !(env
->uncached_cpsr
& CPSR_I
))) {
524 env
->exception_index
= EXCP_IRQ
;
528 #elif defined(TARGET_SH4)
529 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
533 #elif defined(TARGET_ALPHA)
534 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
538 #elif defined(TARGET_CRIS)
539 if (interrupt_request
& CPU_INTERRUPT_HARD
540 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
541 env
->exception_index
= EXCP_IRQ
;
545 if (interrupt_request
& CPU_INTERRUPT_NMI
546 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
547 env
->exception_index
= EXCP_NMI
;
551 #elif defined(TARGET_M68K)
552 if (interrupt_request
& CPU_INTERRUPT_HARD
553 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
554 < env
->pending_level
) {
555 /* Real hardware gets the interrupt vector via an
556 IACK cycle at this point. Current emulated
557 hardware doesn't rely on this, so we
558 provide/save the vector when the interrupt is
560 env
->exception_index
= env
->pending_vector
;
565 /* Don't use the cached interupt_request value,
566 do_interrupt may have updated the EXITTB flag. */
567 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
568 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
569 /* ensure that no TB jump will be modified as
570 the program flow was changed */
574 if (unlikely(env
->exit_request
)) {
575 env
->exit_request
= 0;
576 env
->exception_index
= EXCP_INTERRUPT
;
580 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
581 /* restore flags in standard format */
583 #if defined(TARGET_I386)
584 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
585 log_cpu_state(env
, X86_DUMP_CCOP
);
586 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
587 #elif defined(TARGET_ARM)
588 log_cpu_state(env
, 0);
589 #elif defined(TARGET_SPARC)
590 log_cpu_state(env
, 0);
591 #elif defined(TARGET_PPC)
592 log_cpu_state(env
, 0);
593 #elif defined(TARGET_M68K)
594 cpu_m68k_flush_flags(env
, env
->cc_op
);
595 env
->cc_op
= CC_OP_FLAGS
;
596 env
->sr
= (env
->sr
& 0xffe0)
597 | env
->cc_dest
| (env
->cc_x
<< 4);
598 log_cpu_state(env
, 0);
599 #elif defined(TARGET_MICROBLAZE)
600 log_cpu_state(env
, 0);
601 #elif defined(TARGET_MIPS)
602 log_cpu_state(env
, 0);
603 #elif defined(TARGET_SH4)
604 log_cpu_state(env
, 0);
605 #elif defined(TARGET_ALPHA)
606 log_cpu_state(env
, 0);
607 #elif defined(TARGET_CRIS)
608 log_cpu_state(env
, 0);
610 #error unsupported target CPU
616 /* Note: we do it here to avoid a gcc bug on Mac OS X when
617 doing it in tb_find_slow */
618 if (tb_invalidated_flag
) {
619 /* as some TB could have been invalidated because
620 of memory exceptions while generating the code, we
621 must recompute the hash index here */
623 tb_invalidated_flag
= 0;
626 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
627 (long)tb
->tc_ptr
, tb
->pc
,
628 lookup_symbol(tb
->pc
));
630 /* see if we can patch the calling TB. When the TB
631 spans two pages, we cannot safely do a direct
636 (env
->kqemu_enabled
!= 2) &&
638 tb
->page_addr
[1] == -1) {
639 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
642 spin_unlock(&tb_lock
);
643 env
->current_tb
= tb
;
645 /* cpu_interrupt might be called while translating the
646 TB, but before it is linked into a potentially
647 infinite loop and becomes env->current_tb. Avoid
648 starting execution if there is a pending interrupt. */
649 if (unlikely (env
->exit_request
))
650 env
->current_tb
= NULL
;
652 while (env
->current_tb
) {
654 /* execute the generated code */
655 #if defined(__sparc__) && !defined(HOST_SOLARIS)
657 env
= cpu_single_env
;
658 #define env cpu_single_env
660 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
661 env
->current_tb
= NULL
;
662 if ((next_tb
& 3) == 2) {
663 /* Instruction counter expired. */
665 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
667 cpu_pc_from_tb(env
, tb
);
668 insns_left
= env
->icount_decr
.u32
;
669 if (env
->icount_extra
&& insns_left
>= 0) {
670 /* Refill decrementer and continue execution. */
671 env
->icount_extra
+= insns_left
;
672 if (env
->icount_extra
> 0xffff) {
675 insns_left
= env
->icount_extra
;
677 env
->icount_extra
-= insns_left
;
678 env
->icount_decr
.u16
.low
= insns_left
;
680 if (insns_left
> 0) {
681 /* Execute remaining instructions. */
682 cpu_exec_nocache(insns_left
, tb
);
684 env
->exception_index
= EXCP_INTERRUPT
;
690 /* reset soft MMU for next block (it can currently
691 only be set by a memory fault) */
692 #if defined(CONFIG_KQEMU)
693 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
694 if (kqemu_is_ok(env
) &&
695 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
706 #if defined(TARGET_I386)
707 /* restore flags in standard format */
708 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
709 #elif defined(TARGET_ARM)
710 /* XXX: Save/restore host fpu exception state?. */
711 #elif defined(TARGET_SPARC)
712 #elif defined(TARGET_PPC)
713 #elif defined(TARGET_M68K)
714 cpu_m68k_flush_flags(env
, env
->cc_op
);
715 env
->cc_op
= CC_OP_FLAGS
;
716 env
->sr
= (env
->sr
& 0xffe0)
717 | env
->cc_dest
| (env
->cc_x
<< 4);
718 #elif defined(TARGET_MICROBLAZE)
719 #elif defined(TARGET_MIPS)
720 #elif defined(TARGET_SH4)
721 #elif defined(TARGET_IA64)
722 #elif defined(TARGET_ALPHA)
723 #elif defined(TARGET_CRIS)
726 #error unsupported target CPU
729 /* restore global registers */
730 #include "hostregs_helper.h"
732 /* fail safe : never use cpu_single_env outside cpu_exec() */
733 cpu_single_env
= NULL
;
737 /* must only be called from the generated code as an exception can be
739 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
741 /* XXX: cannot enable it yet because it yields to MMU exception
742 where NIP != read address on PowerPC */
744 target_ulong phys_addr
;
745 phys_addr
= get_phys_addr_code(env
, start
);
746 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
750 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
752 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
754 CPUX86State
*saved_env
;
758 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
760 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
761 (selector
<< 4), 0xffff, 0);
763 helper_load_seg(seg_reg
, selector
);
768 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
770 CPUX86State
*saved_env
;
775 helper_fsave(ptr
, data32
);
780 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
782 CPUX86State
*saved_env
;
787 helper_frstor(ptr
, data32
);
792 #endif /* TARGET_I386 */
794 #if !defined(CONFIG_SOFTMMU)
796 #if defined(TARGET_I386)
798 /* 'pc' is the host PC at which the exception was raised. 'address' is
799 the effective address of the memory exception. 'is_write' is 1 if a
800 write caused the exception and otherwise 0'. 'old_set' is the
801 signal set which should be restored */
802 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
803 int is_write
, sigset_t
*old_set
,
806 TranslationBlock
*tb
;
810 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
811 #if defined(DEBUG_SIGNAL)
812 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
813 pc
, address
, is_write
, *(unsigned long *)old_set
);
815 /* XXX: locking issue */
816 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
820 /* see if it is an MMU fault */
821 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
823 return 0; /* not an MMU fault */
825 return 1; /* the MMU fault was handled without causing real CPU fault */
826 /* now we have a real cpu fault */
829 /* the PC is inside the translated code. It means that we have
830 a virtual CPU fault */
831 cpu_restore_state(tb
, env
, pc
, puc
);
835 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
836 env
->eip
, env
->cr
[2], env
->error_code
);
838 /* we restore the process signal mask as the sigreturn should
839 do it (XXX: use sigsetjmp) */
840 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
841 raise_exception_err(env
->exception_index
, env
->error_code
);
843 /* activate soft MMU for this block */
844 env
->hflags
|= HF_SOFTMMU_MASK
;
845 cpu_resume_from_signal(env
, puc
);
847 /* never comes here */
851 #elif defined(TARGET_ARM)
852 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
853 int is_write
, sigset_t
*old_set
,
856 TranslationBlock
*tb
;
860 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
861 #if defined(DEBUG_SIGNAL)
862 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
863 pc
, address
, is_write
, *(unsigned long *)old_set
);
865 /* XXX: locking issue */
866 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
869 /* see if it is an MMU fault */
870 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
872 return 0; /* not an MMU fault */
874 return 1; /* the MMU fault was handled without causing real CPU fault */
875 /* now we have a real cpu fault */
878 /* the PC is inside the translated code. It means that we have
879 a virtual CPU fault */
880 cpu_restore_state(tb
, env
, pc
, puc
);
882 /* we restore the process signal mask as the sigreturn should
883 do it (XXX: use sigsetjmp) */
884 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
886 /* never comes here */
889 #elif defined(TARGET_SPARC)
890 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
891 int is_write
, sigset_t
*old_set
,
894 TranslationBlock
*tb
;
898 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
899 #if defined(DEBUG_SIGNAL)
900 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
901 pc
, address
, is_write
, *(unsigned long *)old_set
);
903 /* XXX: locking issue */
904 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
907 /* see if it is an MMU fault */
908 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
910 return 0; /* not an MMU fault */
912 return 1; /* the MMU fault was handled without causing real CPU fault */
913 /* now we have a real cpu fault */
916 /* the PC is inside the translated code. It means that we have
917 a virtual CPU fault */
918 cpu_restore_state(tb
, env
, pc
, puc
);
920 /* we restore the process signal mask as the sigreturn should
921 do it (XXX: use sigsetjmp) */
922 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
924 /* never comes here */
927 #elif defined (TARGET_PPC)
928 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
929 int is_write
, sigset_t
*old_set
,
932 TranslationBlock
*tb
;
936 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
937 #if defined(DEBUG_SIGNAL)
938 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
939 pc
, address
, is_write
, *(unsigned long *)old_set
);
941 /* XXX: locking issue */
942 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
946 /* see if it is an MMU fault */
947 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
949 return 0; /* not an MMU fault */
951 return 1; /* the MMU fault was handled without causing real CPU fault */
953 /* now we have a real cpu fault */
956 /* the PC is inside the translated code. It means that we have
957 a virtual CPU fault */
958 cpu_restore_state(tb
, env
, pc
, puc
);
962 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
963 env
->nip
, env
->error_code
, tb
);
965 /* we restore the process signal mask as the sigreturn should
966 do it (XXX: use sigsetjmp) */
967 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
970 /* activate soft MMU for this block */
971 cpu_resume_from_signal(env
, puc
);
973 /* never comes here */
977 #elif defined(TARGET_M68K)
978 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
979 int is_write
, sigset_t
*old_set
,
982 TranslationBlock
*tb
;
986 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
987 #if defined(DEBUG_SIGNAL)
988 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
989 pc
, address
, is_write
, *(unsigned long *)old_set
);
991 /* XXX: locking issue */
992 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
995 /* see if it is an MMU fault */
996 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
998 return 0; /* not an MMU fault */
1000 return 1; /* the MMU fault was handled without causing real CPU fault */
1001 /* now we have a real cpu fault */
1002 tb
= tb_find_pc(pc
);
1004 /* the PC is inside the translated code. It means that we have
1005 a virtual CPU fault */
1006 cpu_restore_state(tb
, env
, pc
, puc
);
1008 /* we restore the process signal mask as the sigreturn should
1009 do it (XXX: use sigsetjmp) */
1010 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1012 /* never comes here */
1016 #elif defined (TARGET_MIPS)
1017 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1018 int is_write
, sigset_t
*old_set
,
1021 TranslationBlock
*tb
;
1025 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1026 #if defined(DEBUG_SIGNAL)
1027 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1028 pc
, address
, is_write
, *(unsigned long *)old_set
);
1030 /* XXX: locking issue */
1031 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1035 /* see if it is an MMU fault */
1036 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1038 return 0; /* not an MMU fault */
1040 return 1; /* the MMU fault was handled without causing real CPU fault */
1042 /* now we have a real cpu fault */
1043 tb
= tb_find_pc(pc
);
1045 /* the PC is inside the translated code. It means that we have
1046 a virtual CPU fault */
1047 cpu_restore_state(tb
, env
, pc
, puc
);
1051 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1052 env
->PC
, env
->error_code
, tb
);
1054 /* we restore the process signal mask as the sigreturn should
1055 do it (XXX: use sigsetjmp) */
1056 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1059 /* activate soft MMU for this block */
1060 cpu_resume_from_signal(env
, puc
);
1062 /* never comes here */
1066 #elif defined (TARGET_MICROBLAZE)
1067 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1068 int is_write
, sigset_t
*old_set
,
1071 TranslationBlock
*tb
;
1075 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1076 #if defined(DEBUG_SIGNAL)
1077 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1078 pc
, address
, is_write
, *(unsigned long *)old_set
);
1080 /* XXX: locking issue */
1081 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1085 /* see if it is an MMU fault */
1086 ret
= cpu_mb_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1088 return 0; /* not an MMU fault */
1090 return 1; /* the MMU fault was handled without causing real CPU fault */
1092 /* now we have a real cpu fault */
1093 tb
= tb_find_pc(pc
);
1095 /* the PC is inside the translated code. It means that we have
1096 a virtual CPU fault */
1097 cpu_restore_state(tb
, env
, pc
, puc
);
1101 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1102 env
->PC
, env
->error_code
, tb
);
1104 /* we restore the process signal mask as the sigreturn should
1105 do it (XXX: use sigsetjmp) */
1106 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1109 /* activate soft MMU for this block */
1110 cpu_resume_from_signal(env
, puc
);
1112 /* never comes here */
1116 #elif defined (TARGET_SH4)
1117 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1118 int is_write
, sigset_t
*old_set
,
1121 TranslationBlock
*tb
;
1125 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1126 #if defined(DEBUG_SIGNAL)
1127 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1128 pc
, address
, is_write
, *(unsigned long *)old_set
);
1130 /* XXX: locking issue */
1131 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1135 /* see if it is an MMU fault */
1136 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1138 return 0; /* not an MMU fault */
1140 return 1; /* the MMU fault was handled without causing real CPU fault */
1142 /* now we have a real cpu fault */
1143 tb
= tb_find_pc(pc
);
1145 /* the PC is inside the translated code. It means that we have
1146 a virtual CPU fault */
1147 cpu_restore_state(tb
, env
, pc
, puc
);
1150 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1151 env
->nip
, env
->error_code
, tb
);
1153 /* we restore the process signal mask as the sigreturn should
1154 do it (XXX: use sigsetjmp) */
1155 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1157 /* never comes here */
1161 #elif defined (TARGET_ALPHA)
1162 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1163 int is_write
, sigset_t
*old_set
,
1166 TranslationBlock
*tb
;
1170 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1171 #if defined(DEBUG_SIGNAL)
1172 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1173 pc
, address
, is_write
, *(unsigned long *)old_set
);
1175 /* XXX: locking issue */
1176 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1180 /* see if it is an MMU fault */
1181 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1183 return 0; /* not an MMU fault */
1185 return 1; /* the MMU fault was handled without causing real CPU fault */
1187 /* now we have a real cpu fault */
1188 tb
= tb_find_pc(pc
);
1190 /* the PC is inside the translated code. It means that we have
1191 a virtual CPU fault */
1192 cpu_restore_state(tb
, env
, pc
, puc
);
1195 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1196 env
->nip
, env
->error_code
, tb
);
1198 /* we restore the process signal mask as the sigreturn should
1199 do it (XXX: use sigsetjmp) */
1200 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1202 /* never comes here */
1205 #elif defined (TARGET_CRIS)
1206 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1207 int is_write
, sigset_t
*old_set
,
1210 TranslationBlock
*tb
;
1214 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1215 #if defined(DEBUG_SIGNAL)
1216 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1217 pc
, address
, is_write
, *(unsigned long *)old_set
);
1219 /* XXX: locking issue */
1220 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1224 /* see if it is an MMU fault */
1225 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1227 return 0; /* not an MMU fault */
1229 return 1; /* the MMU fault was handled without causing real CPU fault */
1231 /* now we have a real cpu fault */
1232 tb
= tb_find_pc(pc
);
1234 /* the PC is inside the translated code. It means that we have
1235 a virtual CPU fault */
1236 cpu_restore_state(tb
, env
, pc
, puc
);
1238 /* we restore the process signal mask as the sigreturn should
1239 do it (XXX: use sigsetjmp) */
1240 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1242 /* never comes here */
1247 #error unsupported target CPU
1250 #if defined(__i386__)
1252 #if defined(__APPLE__)
1253 # include <sys/ucontext.h>
1255 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1256 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1257 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1258 # define MASK_sig(context) ((context)->uc_sigmask)
1259 #elif defined(__OpenBSD__)
1260 # define EIP_sig(context) ((context)->sc_eip)
1261 # define TRAP_sig(context) ((context)->sc_trapno)
1262 # define ERROR_sig(context) ((context)->sc_err)
1263 # define MASK_sig(context) ((context)->sc_mask)
1265 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1266 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1267 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1268 # define MASK_sig(context) ((context)->uc_sigmask)
1271 int cpu_signal_handler(int host_signum
, void *pinfo
,
1274 siginfo_t
*info
= pinfo
;
1275 #if defined(__OpenBSD__)
1276 struct sigcontext
*uc
= puc
;
1278 struct ucontext
*uc
= puc
;
1287 #define REG_TRAPNO TRAPNO
1290 trapno
= TRAP_sig(uc
);
1291 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1293 (ERROR_sig(uc
) >> 1) & 1 : 0,
1294 &MASK_sig(uc
), puc
);
1297 #elif defined(__x86_64__)
1300 #define PC_sig(context) _UC_MACHINE_PC(context)
1301 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1302 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1303 #define MASK_sig(context) ((context)->uc_sigmask)
1304 #elif defined(__OpenBSD__)
1305 #define PC_sig(context) ((context)->sc_rip)
1306 #define TRAP_sig(context) ((context)->sc_trapno)
1307 #define ERROR_sig(context) ((context)->sc_err)
1308 #define MASK_sig(context) ((context)->sc_mask)
1310 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1311 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1312 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1313 #define MASK_sig(context) ((context)->uc_sigmask)
1316 int cpu_signal_handler(int host_signum
, void *pinfo
,
1319 siginfo_t
*info
= pinfo
;
1322 ucontext_t
*uc
= puc
;
1323 #elif defined(__OpenBSD__)
1324 struct sigcontext
*uc
= puc
;
1326 struct ucontext
*uc
= puc
;
1330 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1331 TRAP_sig(uc
) == 0xe ?
1332 (ERROR_sig(uc
) >> 1) & 1 : 0,
1333 &MASK_sig(uc
), puc
);
1336 #elif defined(_ARCH_PPC)
1338 /***********************************************************************
1339 * signal context platform-specific definitions
1343 /* All Registers access - only for local access */
1344 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1345 /* Gpr Registers access */
1346 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1347 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1348 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1349 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1350 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1351 # define LR_sig(context) REG_sig(link, context) /* Link register */
1352 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1353 /* Float Registers access */
1354 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1355 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1356 /* Exception Registers access */
1357 # define DAR_sig(context) REG_sig(dar, context)
1358 # define DSISR_sig(context) REG_sig(dsisr, context)
1359 # define TRAP_sig(context) REG_sig(trap, context)
1363 # include <sys/ucontext.h>
1364 typedef struct ucontext SIGCONTEXT
;
1365 /* All Registers access - only for local access */
1366 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1367 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1368 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1369 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1370 /* Gpr Registers access */
1371 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1372 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1373 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1374 # define CTR_sig(context) REG_sig(ctr, context)
1375 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1376 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1377 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1378 /* Float Registers access */
1379 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1380 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1381 /* Exception Registers access */
1382 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1383 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1384 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1385 #endif /* __APPLE__ */
1387 int cpu_signal_handler(int host_signum
, void *pinfo
,
1390 siginfo_t
*info
= pinfo
;
1391 struct ucontext
*uc
= puc
;
1399 if (DSISR_sig(uc
) & 0x00800000)
1402 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1405 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1406 is_write
, &uc
->uc_sigmask
, puc
);
1409 #elif defined(__alpha__)
1411 int cpu_signal_handler(int host_signum
, void *pinfo
,
1414 siginfo_t
*info
= pinfo
;
1415 struct ucontext
*uc
= puc
;
1416 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1417 uint32_t insn
= *pc
;
1420 /* XXX: need kernel patch to get write flag faster */
1421 switch (insn
>> 26) {
1436 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1437 is_write
, &uc
->uc_sigmask
, puc
);
1439 #elif defined(__sparc__)
1441 int cpu_signal_handler(int host_signum
, void *pinfo
,
1444 siginfo_t
*info
= pinfo
;
1447 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1448 uint32_t *regs
= (uint32_t *)(info
+ 1);
1449 void *sigmask
= (regs
+ 20);
1450 /* XXX: is there a standard glibc define ? */
1451 unsigned long pc
= regs
[1];
1454 struct sigcontext
*sc
= puc
;
1455 unsigned long pc
= sc
->sigc_regs
.tpc
;
1456 void *sigmask
= (void *)sc
->sigc_mask
;
1457 #elif defined(__OpenBSD__)
1458 struct sigcontext
*uc
= puc
;
1459 unsigned long pc
= uc
->sc_pc
;
1460 void *sigmask
= (void *)(long)uc
->sc_mask
;
1464 /* XXX: need kernel patch to get write flag faster */
1466 insn
= *(uint32_t *)pc
;
1467 if ((insn
>> 30) == 3) {
1468 switch((insn
>> 19) & 0x3f) {
1492 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1493 is_write
, sigmask
, NULL
);
1496 #elif defined(__arm__)
1498 int cpu_signal_handler(int host_signum
, void *pinfo
,
1501 siginfo_t
*info
= pinfo
;
1502 struct ucontext
*uc
= puc
;
1506 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1507 pc
= uc
->uc_mcontext
.gregs
[R15
];
1509 pc
= uc
->uc_mcontext
.arm_pc
;
1511 /* XXX: compute is_write */
1513 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1515 &uc
->uc_sigmask
, puc
);
1518 #elif defined(__mc68000)
1520 int cpu_signal_handler(int host_signum
, void *pinfo
,
1523 siginfo_t
*info
= pinfo
;
1524 struct ucontext
*uc
= puc
;
1528 pc
= uc
->uc_mcontext
.gregs
[16];
1529 /* XXX: compute is_write */
1531 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1533 &uc
->uc_sigmask
, puc
);
1536 #elif defined(__ia64)
1539 /* This ought to be in <bits/siginfo.h>... */
1540 # define __ISR_VALID 1
1543 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1545 siginfo_t
*info
= pinfo
;
1546 struct ucontext
*uc
= puc
;
1550 ip
= uc
->uc_mcontext
.sc_ip
;
1551 switch (host_signum
) {
1557 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1558 /* ISR.W (write-access) is bit 33: */
1559 is_write
= (info
->si_isr
>> 33) & 1;
1565 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1567 &uc
->uc_sigmask
, puc
);
1570 #elif defined(__s390__)
1572 int cpu_signal_handler(int host_signum
, void *pinfo
,
1575 siginfo_t
*info
= pinfo
;
1576 struct ucontext
*uc
= puc
;
1580 pc
= uc
->uc_mcontext
.psw
.addr
;
1581 /* XXX: compute is_write */
1583 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1584 is_write
, &uc
->uc_sigmask
, puc
);
1587 #elif defined(__mips__)
1589 int cpu_signal_handler(int host_signum
, void *pinfo
,
1592 siginfo_t
*info
= pinfo
;
1593 struct ucontext
*uc
= puc
;
1594 greg_t pc
= uc
->uc_mcontext
.pc
;
1597 /* XXX: compute is_write */
1599 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1600 is_write
, &uc
->uc_sigmask
, puc
);
1603 #elif defined(__hppa__)
1605 int cpu_signal_handler(int host_signum
, void *pinfo
,
1608 struct siginfo
*info
= pinfo
;
1609 struct ucontext
*uc
= puc
;
1613 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1614 /* FIXME: compute is_write */
1616 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1618 &uc
->uc_sigmask
, puc
);
1623 #error host CPU specific signal handler needed
1627 #endif /* !defined(CONFIG_SOFTMMU) */