2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
21 #define CPU_NO_GLOBAL_REGS
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
46 #define env cpu_single_env
49 int tb_invalidated_flag
;
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
59 longjmp(env
->jmp_env
, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
67 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext
*uc
= puc
;
70 #elif defined(__OpenBSD__)
71 struct sigcontext
*uc
= puc
;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
81 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
89 env
->exception_index
= -1;
90 longjmp(env
->jmp_env
, 1);
93 /* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
97 unsigned long next_tb
;
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles
> CF_COUNT_MASK
)
103 max_cycles
= CF_COUNT_MASK
;
105 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
107 env
->current_tb
= tb
;
108 /* execute the generated code */
109 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
111 if ((next_tb
& 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
114 cpu_pc_from_tb(env
, tb
);
116 tb_phys_invalidate(tb
, -1);
120 static TranslationBlock
*tb_find_slow(target_ulong pc
,
121 target_ulong cs_base
,
124 TranslationBlock
*tb
, **ptb1
;
126 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
128 tb_invalidated_flag
= 0;
130 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
132 /* find translated block using physical mappings */
133 phys_pc
= get_phys_addr_code(env
, pc
);
134 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
136 h
= tb_phys_hash_func(phys_pc
);
137 ptb1
= &tb_phys_hash
[h
];
143 tb
->page_addr
[0] == phys_page1
&&
144 tb
->cs_base
== cs_base
&&
145 tb
->flags
== flags
) {
146 /* check next page if needed */
147 if (tb
->page_addr
[1] != -1) {
148 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
150 phys_page2
= get_phys_addr_code(env
, virt_page2
);
151 if (tb
->page_addr
[1] == phys_page2
)
157 ptb1
= &tb
->phys_hash_next
;
160 /* if no translated code available, then translate it now */
161 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
164 /* we add the TB in the virtual pc hash table */
165 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
169 static inline TranslationBlock
*tb_find_fast(void)
171 TranslationBlock
*tb
;
172 target_ulong cs_base
, pc
;
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
178 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
179 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
180 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
181 tb
->flags
!= flags
)) {
182 tb
= tb_find_slow(pc
, cs_base
, flags
);
187 static CPUDebugExcpHandler
*debug_excp_handler
;
189 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
191 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
193 debug_excp_handler
= handler
;
197 static void cpu_handle_debug_exception(CPUState
*env
)
201 if (!env
->watchpoint_hit
)
202 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
203 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
205 if (debug_excp_handler
)
206 debug_excp_handler(env
);
209 /* main execution loop */
211 int cpu_exec(CPUState
*env1
)
213 #define DECLARE_HOST_REGS 1
214 #include "hostregs_helper.h"
215 int ret
, interrupt_request
;
216 TranslationBlock
*tb
;
218 unsigned long next_tb
;
220 if (cpu_halted(env1
) == EXCP_HALTED
)
223 cpu_single_env
= env1
;
225 /* first we save global registers */
226 #define SAVE_HOST_REGS 1
227 #include "hostregs_helper.h"
231 #if defined(TARGET_I386)
232 /* put eflags in CPU temporary format */
233 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
234 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
235 CC_OP
= CC_OP_EFLAGS
;
236 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
237 #elif defined(TARGET_SPARC)
238 #elif defined(TARGET_M68K)
239 env
->cc_op
= CC_OP_FLAGS
;
240 env
->cc_dest
= env
->sr
& 0xf;
241 env
->cc_x
= (env
->sr
>> 4) & 1;
242 #elif defined(TARGET_ALPHA)
243 #elif defined(TARGET_ARM)
244 #elif defined(TARGET_PPC)
245 #elif defined(TARGET_HPPA)
246 #elif defined(TARGET_MIPS)
247 #elif defined(TARGET_SH4)
248 #elif defined(TARGET_CRIS)
251 #error unsupported target CPU
253 env
->exception_index
= -1;
255 /* prepare setjmp context for exception handling */
257 if (setjmp(env
->jmp_env
) == 0) {
258 env
->current_tb
= NULL
;
259 /* if an exception is pending, we execute it here */
260 if (env
->exception_index
>= 0) {
261 if (env
->exception_index
>= EXCP_INTERRUPT
) {
262 /* exit request from the cpu execution loop */
263 ret
= env
->exception_index
;
264 if (ret
== EXCP_DEBUG
)
265 cpu_handle_debug_exception(env
);
268 #if defined(CONFIG_USER_ONLY)
269 /* if user mode only, we simulate a fake exception
270 which will be handled outside the cpu execution
272 #if defined(TARGET_I386)
273 do_interrupt_user(env
->exception_index
,
274 env
->exception_is_int
,
276 env
->exception_next_eip
);
277 /* successfully delivered */
278 env
->old_exception
= -1;
280 ret
= env
->exception_index
;
283 #if defined(TARGET_I386)
284 /* simulate a real cpu exception. On i386, it can
285 trigger new exceptions, but we do not handle
286 double or triple faults yet. */
287 do_interrupt(env
->exception_index
,
288 env
->exception_is_int
,
290 env
->exception_next_eip
, 0);
291 /* successfully delivered */
292 env
->old_exception
= -1;
293 #elif defined(TARGET_PPC)
295 #elif defined(TARGET_MIPS)
297 #elif defined(TARGET_SPARC)
299 #elif defined(TARGET_ARM)
301 #elif defined(TARGET_SH4)
303 #elif defined(TARGET_ALPHA)
305 #elif defined(TARGET_CRIS)
307 #elif defined(TARGET_M68K)
309 #elif defined(TARGET_HPPA)
314 env
->exception_index
= -1;
317 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
319 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
320 ret
= kqemu_cpu_exec(env
);
321 /* put eflags in CPU temporary format */
322 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
323 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
324 CC_OP
= CC_OP_EFLAGS
;
325 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
328 longjmp(env
->jmp_env
, 1);
329 } else if (ret
== 2) {
330 /* softmmu execution needed */
332 if (env
->interrupt_request
!= 0) {
333 /* hardware interrupt will be executed just after */
335 /* otherwise, we restart */
336 longjmp(env
->jmp_env
, 1);
344 longjmp(env
->jmp_env
, 1);
347 next_tb
= 0; /* force lookup of first TB */
349 interrupt_request
= env
->interrupt_request
;
350 if (unlikely(interrupt_request
)) {
351 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
352 /* Mask out external interrupts for this step. */
353 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
358 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
359 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
360 env
->exception_index
= EXCP_DEBUG
;
363 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
364 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
366 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
367 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
369 env
->exception_index
= EXCP_HLT
;
373 #if defined(TARGET_I386)
374 if (env
->hflags2
& HF2_GIF_MASK
) {
375 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
376 !(env
->hflags
& HF_SMM_MASK
)) {
377 svm_check_intercept(SVM_EXIT_SMI
);
378 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
381 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
382 !(env
->hflags2
& HF2_NMI_MASK
)) {
383 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
384 env
->hflags2
|= HF2_NMI_MASK
;
385 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
387 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
388 (((env
->hflags2
& HF2_VINTR_MASK
) &&
389 (env
->hflags2
& HF2_HIF_MASK
)) ||
390 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
391 (env
->eflags
& IF_MASK
&&
392 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
394 svm_check_intercept(SVM_EXIT_INTR
);
395 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
396 intno
= cpu_get_pic_interrupt(env
);
397 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
398 do_interrupt(intno
, 0, 0, 0, 1);
399 /* ensure that no TB jump will be modified as
400 the program flow was changed */
402 #if !defined(CONFIG_USER_ONLY)
403 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
404 (env
->eflags
& IF_MASK
) &&
405 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
407 /* FIXME: this should respect TPR */
408 svm_check_intercept(SVM_EXIT_VINTR
);
409 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
410 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
411 do_interrupt(intno
, 0, 0, 0, 1);
412 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
417 #elif defined(TARGET_PPC)
419 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
423 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
424 ppc_hw_interrupt(env
);
425 if (env
->pending_interrupts
== 0)
426 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
429 #elif defined(TARGET_MIPS)
430 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
431 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
432 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
433 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
434 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
435 !(env
->hflags
& MIPS_HFLAG_DM
)) {
437 env
->exception_index
= EXCP_EXT_INTERRUPT
;
442 #elif defined(TARGET_SPARC)
443 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
445 int pil
= env
->interrupt_index
& 15;
446 int type
= env
->interrupt_index
& 0xf0;
448 if (((type
== TT_EXTINT
) &&
449 (pil
== 15 || pil
> env
->psrpil
)) ||
451 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
452 env
->exception_index
= env
->interrupt_index
;
454 env
->interrupt_index
= 0;
455 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
460 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
461 //do_interrupt(0, 0, 0, 0, 0);
462 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
464 #elif defined(TARGET_ARM)
465 if (interrupt_request
& CPU_INTERRUPT_FIQ
466 && !(env
->uncached_cpsr
& CPSR_F
)) {
467 env
->exception_index
= EXCP_FIQ
;
471 /* ARMv7-M interrupt return works by loading a magic value
472 into the PC. On real hardware the load causes the
473 return to occur. The qemu implementation performs the
474 jump normally, then does the exception return when the
475 CPU tries to execute code at the magic address.
476 This will cause the magic PC value to be pushed to
477 the stack if an interrupt occured at the wrong time.
478 We avoid this by disabling interrupts when
479 pc contains a magic address. */
480 if (interrupt_request
& CPU_INTERRUPT_HARD
481 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
482 || !(env
->uncached_cpsr
& CPSR_I
))) {
483 env
->exception_index
= EXCP_IRQ
;
487 #elif defined(TARGET_SH4)
488 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
492 #elif defined(TARGET_ALPHA)
493 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
497 #elif defined(TARGET_CRIS)
498 if (interrupt_request
& CPU_INTERRUPT_HARD
499 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
500 env
->exception_index
= EXCP_IRQ
;
504 if (interrupt_request
& CPU_INTERRUPT_NMI
505 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
506 env
->exception_index
= EXCP_NMI
;
510 #elif defined(TARGET_M68K)
511 if (interrupt_request
& CPU_INTERRUPT_HARD
512 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
513 < env
->pending_level
) {
514 /* Real hardware gets the interrupt vector via an
515 IACK cycle at this point. Current emulated
516 hardware doesn't rely on this, so we
517 provide/save the vector when the interrupt is
519 env
->exception_index
= env
->pending_vector
;
523 #elif defined(TARGET_HPPA)
524 if (interrupt_request
& CPU_INTERRUPT_HARD
525 && !(env
->psw
& PSW_I
)) {
526 env
->exception_index
= EXCP_EXTINT
;
531 /* Don't use the cached interupt_request value,
532 do_interrupt may have updated the EXITTB flag. */
533 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
534 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
535 /* ensure that no TB jump will be modified as
536 the program flow was changed */
539 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
540 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
541 env
->exception_index
= EXCP_INTERRUPT
;
546 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
547 /* restore flags in standard format */
549 #if defined(TARGET_I386)
550 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
551 log_cpu_state(env
, X86_DUMP_CCOP
);
552 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
553 #elif defined(TARGET_ARM)
554 log_cpu_state(env
, 0);
555 #elif defined(TARGET_SPARC)
556 log_cpu_state(env
, 0);
557 #elif defined(TARGET_PPC)
558 log_cpu_state(env
, 0);
559 #elif defined(TARGET_M68K)
560 cpu_m68k_flush_flags(env
, env
->cc_op
);
561 env
->cc_op
= CC_OP_FLAGS
;
562 env
->sr
= (env
->sr
& 0xffe0)
563 | env
->cc_dest
| (env
->cc_x
<< 4);
564 log_cpu_state(env
, 0);
565 #elif defined(TARGET_MIPS)
566 log_cpu_state(env
, 0);
567 #elif defined(TARGET_SH4)
568 log_cpu_state(env
, 0);
569 #elif defined(TARGET_ALPHA)
570 log_cpu_state(env
, 0);
571 #elif defined(TARGET_CRIS)
572 log_cpu_state(env
, 0);
573 #elif defined(TARGET_HPPA)
574 log_cpu_state(env
, 0);
576 #error unsupported target CPU
582 /* Note: we do it here to avoid a gcc bug on Mac OS X when
583 doing it in tb_find_slow */
584 if (tb_invalidated_flag
) {
585 /* as some TB could have been invalidated because
586 of memory exceptions while generating the code, we
587 must recompute the hash index here */
589 tb_invalidated_flag
= 0;
592 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
593 (long)tb
->tc_ptr
, tb
->pc
,
594 lookup_symbol(tb
->pc
));
596 /* see if we can patch the calling TB. When the TB
597 spans two pages, we cannot safely do a direct
602 (env
->kqemu_enabled
!= 2) &&
604 tb
->page_addr
[1] == -1) {
605 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
608 spin_unlock(&tb_lock
);
609 env
->current_tb
= tb
;
611 /* cpu_interrupt might be called while translating the
612 TB, but before it is linked into a potentially
613 infinite loop and becomes env->current_tb. Avoid
614 starting execution if there is a pending interrupt. */
615 if (unlikely (env
->interrupt_request
& CPU_INTERRUPT_EXIT
))
616 env
->current_tb
= NULL
;
618 while (env
->current_tb
) {
620 /* execute the generated code */
621 #if defined(__sparc__) && !defined(HOST_SOLARIS)
623 env
= cpu_single_env
;
624 #define env cpu_single_env
626 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
627 env
->current_tb
= NULL
;
628 if ((next_tb
& 3) == 2) {
629 /* Instruction counter expired. */
631 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
633 cpu_pc_from_tb(env
, tb
);
634 insns_left
= env
->icount_decr
.u32
;
635 if (env
->icount_extra
&& insns_left
>= 0) {
636 /* Refill decrementer and continue execution. */
637 env
->icount_extra
+= insns_left
;
638 if (env
->icount_extra
> 0xffff) {
641 insns_left
= env
->icount_extra
;
643 env
->icount_extra
-= insns_left
;
644 env
->icount_decr
.u16
.low
= insns_left
;
646 if (insns_left
> 0) {
647 /* Execute remaining instructions. */
648 cpu_exec_nocache(insns_left
, tb
);
650 env
->exception_index
= EXCP_INTERRUPT
;
656 /* reset soft MMU for next block (it can currently
657 only be set by a memory fault) */
658 #if defined(USE_KQEMU)
659 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
660 if (kqemu_is_ok(env
) &&
661 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
672 #if defined(TARGET_I386)
673 /* restore flags in standard format */
674 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
675 #elif defined(TARGET_ARM)
676 /* XXX: Save/restore host fpu exception state?. */
677 #elif defined(TARGET_SPARC)
678 #elif defined(TARGET_PPC)
679 #elif defined(TARGET_M68K)
680 cpu_m68k_flush_flags(env
, env
->cc_op
);
681 env
->cc_op
= CC_OP_FLAGS
;
682 env
->sr
= (env
->sr
& 0xffe0)
683 | env
->cc_dest
| (env
->cc_x
<< 4);
684 #elif defined(TARGET_MIPS)
685 #elif defined(TARGET_SH4)
686 #elif defined(TARGET_ALPHA)
687 #elif defined(TARGET_CRIS)
688 #elif defined(TARGET_HPPA)
691 #error unsupported target CPU
694 /* restore global registers */
695 #include "hostregs_helper.h"
697 /* fail safe : never use cpu_single_env outside cpu_exec() */
698 cpu_single_env
= NULL
;
702 /* must only be called from the generated code as an exception can be
704 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
706 /* XXX: cannot enable it yet because it yields to MMU exception
707 where NIP != read address on PowerPC */
709 target_ulong phys_addr
;
710 phys_addr
= get_phys_addr_code(env
, start
);
711 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
715 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
717 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
719 CPUX86State
*saved_env
;
723 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
725 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
726 (selector
<< 4), 0xffff, 0);
728 helper_load_seg(seg_reg
, selector
);
733 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
735 CPUX86State
*saved_env
;
740 helper_fsave(ptr
, data32
);
745 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
747 CPUX86State
*saved_env
;
752 helper_frstor(ptr
, data32
);
757 #endif /* TARGET_I386 */
759 #if !defined(CONFIG_SOFTMMU)
761 #if defined(TARGET_I386)
763 /* 'pc' is the host PC at which the exception was raised. 'address' is
764 the effective address of the memory exception. 'is_write' is 1 if a
765 write caused the exception and otherwise 0'. 'old_set' is the
766 signal set which should be restored */
767 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
768 int is_write
, sigset_t
*old_set
,
771 TranslationBlock
*tb
;
775 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
776 #if defined(DEBUG_SIGNAL)
777 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
778 pc
, address
, is_write
, *(unsigned long *)old_set
);
780 /* XXX: locking issue */
781 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
785 /* see if it is an MMU fault */
786 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
788 return 0; /* not an MMU fault */
790 return 1; /* the MMU fault was handled without causing real CPU fault */
791 /* now we have a real cpu fault */
794 /* the PC is inside the translated code. It means that we have
795 a virtual CPU fault */
796 cpu_restore_state(tb
, env
, pc
, puc
);
800 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
801 env
->eip
, env
->cr
[2], env
->error_code
);
803 /* we restore the process signal mask as the sigreturn should
804 do it (XXX: use sigsetjmp) */
805 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
806 raise_exception_err(env
->exception_index
, env
->error_code
);
808 /* activate soft MMU for this block */
809 env
->hflags
|= HF_SOFTMMU_MASK
;
810 cpu_resume_from_signal(env
, puc
);
812 /* never comes here */
816 #elif defined(TARGET_ARM)
817 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
818 int is_write
, sigset_t
*old_set
,
821 TranslationBlock
*tb
;
825 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
826 #if defined(DEBUG_SIGNAL)
827 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
828 pc
, address
, is_write
, *(unsigned long *)old_set
);
830 /* XXX: locking issue */
831 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
834 /* see if it is an MMU fault */
835 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
837 return 0; /* not an MMU fault */
839 return 1; /* the MMU fault was handled without causing real CPU fault */
840 /* now we have a real cpu fault */
843 /* the PC is inside the translated code. It means that we have
844 a virtual CPU fault */
845 cpu_restore_state(tb
, env
, pc
, puc
);
847 /* we restore the process signal mask as the sigreturn should
848 do it (XXX: use sigsetjmp) */
849 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
851 /* never comes here */
854 #elif defined(TARGET_SPARC)
855 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
856 int is_write
, sigset_t
*old_set
,
859 TranslationBlock
*tb
;
863 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
864 #if defined(DEBUG_SIGNAL)
865 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
866 pc
, address
, is_write
, *(unsigned long *)old_set
);
868 /* XXX: locking issue */
869 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
872 /* see if it is an MMU fault */
873 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
875 return 0; /* not an MMU fault */
877 return 1; /* the MMU fault was handled without causing real CPU fault */
878 /* now we have a real cpu fault */
881 /* the PC is inside the translated code. It means that we have
882 a virtual CPU fault */
883 cpu_restore_state(tb
, env
, pc
, puc
);
885 /* we restore the process signal mask as the sigreturn should
886 do it (XXX: use sigsetjmp) */
887 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
889 /* never comes here */
892 #elif defined (TARGET_PPC)
893 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
894 int is_write
, sigset_t
*old_set
,
897 TranslationBlock
*tb
;
901 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
902 #if defined(DEBUG_SIGNAL)
903 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
904 pc
, address
, is_write
, *(unsigned long *)old_set
);
906 /* XXX: locking issue */
907 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
911 /* see if it is an MMU fault */
912 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
914 return 0; /* not an MMU fault */
916 return 1; /* the MMU fault was handled without causing real CPU fault */
918 /* now we have a real cpu fault */
921 /* the PC is inside the translated code. It means that we have
922 a virtual CPU fault */
923 cpu_restore_state(tb
, env
, pc
, puc
);
927 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
928 env
->nip
, env
->error_code
, tb
);
930 /* we restore the process signal mask as the sigreturn should
931 do it (XXX: use sigsetjmp) */
932 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
935 /* activate soft MMU for this block */
936 cpu_resume_from_signal(env
, puc
);
938 /* never comes here */
942 #elif defined(TARGET_M68K)
943 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
944 int is_write
, sigset_t
*old_set
,
947 TranslationBlock
*tb
;
951 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
952 #if defined(DEBUG_SIGNAL)
953 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
954 pc
, address
, is_write
, *(unsigned long *)old_set
);
956 /* XXX: locking issue */
957 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
960 /* see if it is an MMU fault */
961 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
963 return 0; /* not an MMU fault */
965 return 1; /* the MMU fault was handled without causing real CPU fault */
966 /* now we have a real cpu fault */
969 /* the PC is inside the translated code. It means that we have
970 a virtual CPU fault */
971 cpu_restore_state(tb
, env
, pc
, puc
);
973 /* we restore the process signal mask as the sigreturn should
974 do it (XXX: use sigsetjmp) */
975 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
977 /* never comes here */
981 #elif defined (TARGET_MIPS)
982 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
983 int is_write
, sigset_t
*old_set
,
986 TranslationBlock
*tb
;
990 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
991 #if defined(DEBUG_SIGNAL)
992 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
993 pc
, address
, is_write
, *(unsigned long *)old_set
);
995 /* XXX: locking issue */
996 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1000 /* see if it is an MMU fault */
1001 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1003 return 0; /* not an MMU fault */
1005 return 1; /* the MMU fault was handled without causing real CPU fault */
1007 /* now we have a real cpu fault */
1008 tb
= tb_find_pc(pc
);
1010 /* the PC is inside the translated code. It means that we have
1011 a virtual CPU fault */
1012 cpu_restore_state(tb
, env
, pc
, puc
);
1016 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1017 env
->PC
, env
->error_code
, tb
);
1019 /* we restore the process signal mask as the sigreturn should
1020 do it (XXX: use sigsetjmp) */
1021 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1024 /* activate soft MMU for this block */
1025 cpu_resume_from_signal(env
, puc
);
1027 /* never comes here */
1031 #elif defined (TARGET_SH4)
1032 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1033 int is_write
, sigset_t
*old_set
,
1036 TranslationBlock
*tb
;
1040 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1041 #if defined(DEBUG_SIGNAL)
1042 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1043 pc
, address
, is_write
, *(unsigned long *)old_set
);
1045 /* XXX: locking issue */
1046 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1050 /* see if it is an MMU fault */
1051 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1053 return 0; /* not an MMU fault */
1055 return 1; /* the MMU fault was handled without causing real CPU fault */
1057 /* now we have a real cpu fault */
1058 tb
= tb_find_pc(pc
);
1060 /* the PC is inside the translated code. It means that we have
1061 a virtual CPU fault */
1062 cpu_restore_state(tb
, env
, pc
, puc
);
1065 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1066 env
->nip
, env
->error_code
, tb
);
1068 /* we restore the process signal mask as the sigreturn should
1069 do it (XXX: use sigsetjmp) */
1070 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1072 /* never comes here */
1076 #elif defined (TARGET_ALPHA)
1077 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1078 int is_write
, sigset_t
*old_set
,
1081 TranslationBlock
*tb
;
1085 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1086 #if defined(DEBUG_SIGNAL)
1087 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1088 pc
, address
, is_write
, *(unsigned long *)old_set
);
1090 /* XXX: locking issue */
1091 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1095 /* see if it is an MMU fault */
1096 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1098 return 0; /* not an MMU fault */
1100 return 1; /* the MMU fault was handled without causing real CPU fault */
1102 /* now we have a real cpu fault */
1103 tb
= tb_find_pc(pc
);
1105 /* the PC is inside the translated code. It means that we have
1106 a virtual CPU fault */
1107 cpu_restore_state(tb
, env
, pc
, puc
);
1110 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1111 env
->nip
, env
->error_code
, tb
);
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
1115 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1117 /* never comes here */
1120 #elif defined (TARGET_CRIS)
1121 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1122 int is_write
, sigset_t
*old_set
,
1125 TranslationBlock
*tb
;
1129 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1130 #if defined(DEBUG_SIGNAL)
1131 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1132 pc
, address
, is_write
, *(unsigned long *)old_set
);
1134 /* XXX: locking issue */
1135 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1139 /* see if it is an MMU fault */
1140 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1142 return 0; /* not an MMU fault */
1144 return 1; /* the MMU fault was handled without causing real CPU fault */
1146 /* now we have a real cpu fault */
1147 tb
= tb_find_pc(pc
);
1149 /* the PC is inside the translated code. It means that we have
1150 a virtual CPU fault */
1151 cpu_restore_state(tb
, env
, pc
, puc
);
1153 /* we restore the process signal mask as the sigreturn should
1154 do it (XXX: use sigsetjmp) */
1155 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1157 /* never comes here */
1161 #elif defined(TARGET_HPPA)
1162 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1163 int is_write
, sigset_t
*old_set
,
1166 TranslationBlock
*tb
;
1170 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1171 #if defined(DEBUG_SIGNAL)
1172 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1173 pc
, address
, is_write
, *(unsigned long *)old_set
);
1175 /* XXX: locking issue */
1176 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1179 /* see if it is an MMU fault */
1180 ret
= cpu_hppa_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1182 return 0; /* not an MMU fault */
1184 return 1; /* the MMU fault was handled without causing real CPU fault */
1185 /* now we have a real cpu fault */
1186 tb
= tb_find_pc(pc
);
1188 /* the PC is inside the translated code. It means that we have
1189 a virtual CPU fault */
1190 cpu_restore_state(tb
, env
, pc
, puc
);
1192 /* we restore the process signal mask as the sigreturn should
1193 do it (XXX: use sigsetjmp) */
1194 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1196 /* never comes here */
1201 #error unsupported target CPU
1204 #if defined(__i386__)
1206 #if defined(__APPLE__)
1207 # include <sys/ucontext.h>
1209 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1210 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1211 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1213 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1214 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1215 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1218 int cpu_signal_handler(int host_signum
, void *pinfo
,
1221 siginfo_t
*info
= pinfo
;
1222 struct ucontext
*uc
= puc
;
1230 #define REG_TRAPNO TRAPNO
1233 trapno
= TRAP_sig(uc
);
1234 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1236 (ERROR_sig(uc
) >> 1) & 1 : 0,
1237 &uc
->uc_sigmask
, puc
);
1240 #elif defined(__x86_64__)
1243 #define REG_ERR _REG_ERR
1244 #define REG_TRAPNO _REG_TRAPNO
1246 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1247 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1249 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1250 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1253 int cpu_signal_handler(int host_signum
, void *pinfo
,
1256 siginfo_t
*info
= pinfo
;
1259 ucontext_t
*uc
= puc
;
1261 struct ucontext
*uc
= puc
;
1264 pc
= QEMU_UC_MACHINE_PC(uc
);
1265 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1266 QEMU_UC_MCONTEXT_GREGS(uc
, REG_TRAPNO
) == 0xe ?
1267 (QEMU_UC_MCONTEXT_GREGS(uc
, REG_ERR
) >> 1) & 1 : 0,
1268 &uc
->uc_sigmask
, puc
);
1271 #elif defined(_ARCH_PPC)
1273 /***********************************************************************
1274 * signal context platform-specific definitions
1278 /* All Registers access - only for local access */
1279 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1280 /* Gpr Registers access */
1281 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1282 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1283 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1284 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1285 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1286 # define LR_sig(context) REG_sig(link, context) /* Link register */
1287 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1288 /* Float Registers access */
1289 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1290 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1291 /* Exception Registers access */
1292 # define DAR_sig(context) REG_sig(dar, context)
1293 # define DSISR_sig(context) REG_sig(dsisr, context)
1294 # define TRAP_sig(context) REG_sig(trap, context)
1298 # include <sys/ucontext.h>
1299 typedef struct ucontext SIGCONTEXT
;
1300 /* All Registers access - only for local access */
1301 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1302 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1303 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1304 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1305 /* Gpr Registers access */
1306 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1307 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1308 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1309 # define CTR_sig(context) REG_sig(ctr, context)
1310 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1311 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1312 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1313 /* Float Registers access */
1314 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1315 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1316 /* Exception Registers access */
1317 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1318 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1319 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1320 #endif /* __APPLE__ */
1322 int cpu_signal_handler(int host_signum
, void *pinfo
,
1325 siginfo_t
*info
= pinfo
;
1326 struct ucontext
*uc
= puc
;
1334 if (DSISR_sig(uc
) & 0x00800000)
1337 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1340 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1341 is_write
, &uc
->uc_sigmask
, puc
);
1344 #elif defined(__alpha__)
1346 int cpu_signal_handler(int host_signum
, void *pinfo
,
1349 siginfo_t
*info
= pinfo
;
1350 struct ucontext
*uc
= puc
;
1351 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1352 uint32_t insn
= *pc
;
1355 /* XXX: need kernel patch to get write flag faster */
1356 switch (insn
>> 26) {
1371 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1372 is_write
, &uc
->uc_sigmask
, puc
);
1374 #elif defined(__sparc__)
1376 int cpu_signal_handler(int host_signum
, void *pinfo
,
1379 siginfo_t
*info
= pinfo
;
1382 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1383 uint32_t *regs
= (uint32_t *)(info
+ 1);
1384 void *sigmask
= (regs
+ 20);
1385 /* XXX: is there a standard glibc define ? */
1386 unsigned long pc
= regs
[1];
1389 struct sigcontext
*sc
= puc
;
1390 unsigned long pc
= sc
->sigc_regs
.tpc
;
1391 void *sigmask
= (void *)sc
->sigc_mask
;
1392 #elif defined(__OpenBSD__)
1393 struct sigcontext
*uc
= puc
;
1394 unsigned long pc
= uc
->sc_pc
;
1395 void *sigmask
= (void *)(long)uc
->sc_mask
;
1399 /* XXX: need kernel patch to get write flag faster */
1401 insn
= *(uint32_t *)pc
;
1402 if ((insn
>> 30) == 3) {
1403 switch((insn
>> 19) & 0x3f) {
1415 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1416 is_write
, sigmask
, NULL
);
1419 #elif defined(__arm__)
1421 int cpu_signal_handler(int host_signum
, void *pinfo
,
1424 siginfo_t
*info
= pinfo
;
1425 struct ucontext
*uc
= puc
;
1429 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1430 pc
= uc
->uc_mcontext
.gregs
[R15
];
1432 pc
= uc
->uc_mcontext
.arm_pc
;
1434 /* XXX: compute is_write */
1436 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1438 &uc
->uc_sigmask
, puc
);
1441 #elif defined(__mc68000)
1443 int cpu_signal_handler(int host_signum
, void *pinfo
,
1446 siginfo_t
*info
= pinfo
;
1447 struct ucontext
*uc
= puc
;
1451 pc
= uc
->uc_mcontext
.gregs
[16];
1452 /* XXX: compute is_write */
1454 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1456 &uc
->uc_sigmask
, puc
);
1459 #elif defined(__ia64)
1462 /* This ought to be in <bits/siginfo.h>... */
1463 # define __ISR_VALID 1
1466 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1468 siginfo_t
*info
= pinfo
;
1469 struct ucontext
*uc
= puc
;
1473 ip
= uc
->uc_mcontext
.sc_ip
;
1474 switch (host_signum
) {
1480 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1481 /* ISR.W (write-access) is bit 33: */
1482 is_write
= (info
->si_isr
>> 33) & 1;
1488 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1490 &uc
->uc_sigmask
, puc
);
1493 #elif defined(__s390__)
1495 int cpu_signal_handler(int host_signum
, void *pinfo
,
1498 siginfo_t
*info
= pinfo
;
1499 struct ucontext
*uc
= puc
;
1503 pc
= uc
->uc_mcontext
.psw
.addr
;
1504 /* XXX: compute is_write */
1506 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1507 is_write
, &uc
->uc_sigmask
, puc
);
1510 #elif defined(__mips__)
1512 int cpu_signal_handler(int host_signum
, void *pinfo
,
1515 siginfo_t
*info
= pinfo
;
1516 struct ucontext
*uc
= puc
;
1517 greg_t pc
= uc
->uc_mcontext
.pc
;
1520 /* XXX: compute is_write */
1522 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1523 is_write
, &uc
->uc_sigmask
, puc
);
1526 #elif defined(__hppa__)
1528 int cpu_signal_handler(int host_signum
, void *pinfo
,
1531 struct siginfo
*info
= pinfo
;
1532 struct ucontext
*uc
= puc
;
1536 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1537 /* FIXME: compute is_write */
1539 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1541 &uc
->uc_sigmask
, puc
);
1546 #error host CPU specific signal handler needed
1550 #endif /* !defined(CONFIG_SOFTMMU) */