2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
21 #define CPU_NO_GLOBAL_REGS
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
46 #define env cpu_single_env
49 int tb_invalidated_flag
;
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
59 longjmp(env
->jmp_env
, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
67 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext
*uc
= puc
;
70 #elif defined(__OpenBSD__)
71 struct sigcontext
*uc
= puc
;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
81 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
89 env
->exception_index
= -1;
90 longjmp(env
->jmp_env
, 1);
93 /* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
97 unsigned long next_tb
;
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles
> CF_COUNT_MASK
)
103 max_cycles
= CF_COUNT_MASK
;
105 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
107 env
->current_tb
= tb
;
108 /* execute the generated code */
109 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
111 if ((next_tb
& 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
114 cpu_pc_from_tb(env
, tb
);
116 tb_phys_invalidate(tb
, -1);
120 static TranslationBlock
*tb_find_slow(target_ulong pc
,
121 target_ulong cs_base
,
124 TranslationBlock
*tb
, **ptb1
;
126 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
128 tb_invalidated_flag
= 0;
130 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
132 /* find translated block using physical mappings */
133 phys_pc
= get_phys_addr_code(env
, pc
);
134 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
136 h
= tb_phys_hash_func(phys_pc
);
137 ptb1
= &tb_phys_hash
[h
];
143 tb
->page_addr
[0] == phys_page1
&&
144 tb
->cs_base
== cs_base
&&
145 tb
->flags
== flags
) {
146 /* check next page if needed */
147 if (tb
->page_addr
[1] != -1) {
148 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
150 phys_page2
= get_phys_addr_code(env
, virt_page2
);
151 if (tb
->page_addr
[1] == phys_page2
)
157 ptb1
= &tb
->phys_hash_next
;
160 /* if no translated code available, then translate it now */
161 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
164 /* we add the TB in the virtual pc hash table */
165 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
169 static inline TranslationBlock
*tb_find_fast(void)
171 TranslationBlock
*tb
;
172 target_ulong cs_base
, pc
;
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
178 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
179 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
180 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
181 tb
->flags
!= flags
)) {
182 tb
= tb_find_slow(pc
, cs_base
, flags
);
187 static CPUDebugExcpHandler
*debug_excp_handler
;
189 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
191 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
193 debug_excp_handler
= handler
;
197 static void cpu_handle_debug_exception(CPUState
*env
)
201 if (!env
->watchpoint_hit
)
202 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
203 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
205 if (debug_excp_handler
)
206 debug_excp_handler(env
);
209 /* main execution loop */
211 int cpu_exec(CPUState
*env1
)
213 #define DECLARE_HOST_REGS 1
214 #include "hostregs_helper.h"
215 int ret
, interrupt_request
;
216 TranslationBlock
*tb
;
218 unsigned long next_tb
;
220 if (cpu_halted(env1
) == EXCP_HALTED
)
223 cpu_single_env
= env1
;
225 /* first we save global registers */
226 #define SAVE_HOST_REGS 1
227 #include "hostregs_helper.h"
231 #if defined(TARGET_I386)
232 /* put eflags in CPU temporary format */
233 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
234 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
235 CC_OP
= CC_OP_EFLAGS
;
236 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
237 #elif defined(TARGET_SPARC)
238 #elif defined(TARGET_M68K)
239 env
->cc_op
= CC_OP_FLAGS
;
240 env
->cc_dest
= env
->sr
& 0xf;
241 env
->cc_x
= (env
->sr
>> 4) & 1;
242 #elif defined(TARGET_ALPHA)
243 #elif defined(TARGET_ARM)
244 #elif defined(TARGET_PPC)
245 #elif defined(TARGET_HPPA)
246 #elif defined(TARGET_MIPS)
247 #elif defined(TARGET_SH4)
248 #elif defined(TARGET_CRIS)
251 #error unsupported target CPU
253 env
->exception_index
= -1;
255 /* prepare setjmp context for exception handling */
257 if (setjmp(env
->jmp_env
) == 0) {
258 env
->current_tb
= NULL
;
259 /* if an exception is pending, we execute it here */
260 if (env
->exception_index
>= 0) {
261 if (env
->exception_index
>= EXCP_INTERRUPT
) {
262 /* exit request from the cpu execution loop */
263 ret
= env
->exception_index
;
264 if (ret
== EXCP_DEBUG
)
265 cpu_handle_debug_exception(env
);
268 #if defined(CONFIG_USER_ONLY)
269 /* if user mode only, we simulate a fake exception
270 which will be handled outside the cpu execution
272 #if defined(TARGET_I386)
273 do_interrupt_user(env
->exception_index
,
274 env
->exception_is_int
,
276 env
->exception_next_eip
);
277 /* successfully delivered */
278 env
->old_exception
= -1;
280 ret
= env
->exception_index
;
283 #if defined(TARGET_I386)
284 /* simulate a real cpu exception. On i386, it can
285 trigger new exceptions, but we do not handle
286 double or triple faults yet. */
287 do_interrupt(env
->exception_index
,
288 env
->exception_is_int
,
290 env
->exception_next_eip
, 0);
291 /* successfully delivered */
292 env
->old_exception
= -1;
293 #elif defined(TARGET_PPC)
295 #elif defined(TARGET_MIPS)
297 #elif defined(TARGET_SPARC)
299 #elif defined(TARGET_ARM)
301 #elif defined(TARGET_SH4)
303 #elif defined(TARGET_ALPHA)
305 #elif defined(TARGET_CRIS)
307 #elif defined(TARGET_M68K)
309 #elif defined(TARGET_HPPA)
314 env
->exception_index
= -1;
317 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
319 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
320 ret
= kqemu_cpu_exec(env
);
321 /* put eflags in CPU temporary format */
322 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
323 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
324 CC_OP
= CC_OP_EFLAGS
;
325 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
328 longjmp(env
->jmp_env
, 1);
329 } else if (ret
== 2) {
330 /* softmmu execution needed */
332 if (env
->interrupt_request
!= 0) {
333 /* hardware interrupt will be executed just after */
335 /* otherwise, we restart */
336 longjmp(env
->jmp_env
, 1);
344 longjmp(env
->jmp_env
, 1);
347 next_tb
= 0; /* force lookup of first TB */
349 interrupt_request
= env
->interrupt_request
;
350 if (unlikely(interrupt_request
)) {
351 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
352 /* Mask out external interrupts for this step. */
353 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
358 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
359 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
360 env
->exception_index
= EXCP_DEBUG
;
363 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
364 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
365 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
366 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
368 env
->exception_index
= EXCP_HLT
;
372 #if defined(TARGET_I386)
373 if (env
->hflags2
& HF2_GIF_MASK
) {
374 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
375 !(env
->hflags
& HF_SMM_MASK
)) {
376 svm_check_intercept(SVM_EXIT_SMI
);
377 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
380 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
381 !(env
->hflags2
& HF2_NMI_MASK
)) {
382 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
383 env
->hflags2
|= HF2_NMI_MASK
;
384 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
386 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
387 (((env
->hflags2
& HF2_VINTR_MASK
) &&
388 (env
->hflags2
& HF2_HIF_MASK
)) ||
389 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
390 (env
->eflags
& IF_MASK
&&
391 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
393 svm_check_intercept(SVM_EXIT_INTR
);
394 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
395 intno
= cpu_get_pic_interrupt(env
);
396 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
397 do_interrupt(intno
, 0, 0, 0, 1);
398 /* ensure that no TB jump will be modified as
399 the program flow was changed */
401 #if !defined(CONFIG_USER_ONLY)
402 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
403 (env
->eflags
& IF_MASK
) &&
404 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
406 /* FIXME: this should respect TPR */
407 svm_check_intercept(SVM_EXIT_VINTR
);
408 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
409 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
410 do_interrupt(intno
, 0, 0, 0, 1);
411 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
416 #elif defined(TARGET_PPC)
418 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
422 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
423 ppc_hw_interrupt(env
);
424 if (env
->pending_interrupts
== 0)
425 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
428 #elif defined(TARGET_MIPS)
429 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
430 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
431 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
432 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
433 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
434 !(env
->hflags
& MIPS_HFLAG_DM
)) {
436 env
->exception_index
= EXCP_EXT_INTERRUPT
;
441 #elif defined(TARGET_SPARC)
442 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
444 int pil
= env
->interrupt_index
& 15;
445 int type
= env
->interrupt_index
& 0xf0;
447 if (((type
== TT_EXTINT
) &&
448 (pil
== 15 || pil
> env
->psrpil
)) ||
450 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
451 env
->exception_index
= env
->interrupt_index
;
453 env
->interrupt_index
= 0;
454 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
459 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
460 //do_interrupt(0, 0, 0, 0, 0);
461 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
463 #elif defined(TARGET_ARM)
464 if (interrupt_request
& CPU_INTERRUPT_FIQ
465 && !(env
->uncached_cpsr
& CPSR_F
)) {
466 env
->exception_index
= EXCP_FIQ
;
470 /* ARMv7-M interrupt return works by loading a magic value
471 into the PC. On real hardware the load causes the
472 return to occur. The qemu implementation performs the
473 jump normally, then does the exception return when the
474 CPU tries to execute code at the magic address.
475 This will cause the magic PC value to be pushed to
476 the stack if an interrupt occured at the wrong time.
477 We avoid this by disabling interrupts when
478 pc contains a magic address. */
479 if (interrupt_request
& CPU_INTERRUPT_HARD
480 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
481 || !(env
->uncached_cpsr
& CPSR_I
))) {
482 env
->exception_index
= EXCP_IRQ
;
486 #elif defined(TARGET_SH4)
487 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
491 #elif defined(TARGET_ALPHA)
492 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
496 #elif defined(TARGET_CRIS)
497 if (interrupt_request
& CPU_INTERRUPT_HARD
498 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
499 env
->exception_index
= EXCP_IRQ
;
503 if (interrupt_request
& CPU_INTERRUPT_NMI
504 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
505 env
->exception_index
= EXCP_NMI
;
509 #elif defined(TARGET_M68K)
510 if (interrupt_request
& CPU_INTERRUPT_HARD
511 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
512 < env
->pending_level
) {
513 /* Real hardware gets the interrupt vector via an
514 IACK cycle at this point. Current emulated
515 hardware doesn't rely on this, so we
516 provide/save the vector when the interrupt is
518 env
->exception_index
= env
->pending_vector
;
522 #elif defined(TARGET_HPPA)
523 if (interrupt_request
& CPU_INTERRUPT_HARD
524 && !(env
->psw
& PSW_I
)) {
525 env
->exception_index
= EXCP_EXTINT
;
529 /* Don't use the cached interupt_request value,
530 do_interrupt may have updated the EXITTB flag. */
531 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
532 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
533 /* ensure that no TB jump will be modified as
534 the program flow was changed */
537 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
538 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
539 env
->exception_index
= EXCP_INTERRUPT
;
544 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
545 /* restore flags in standard format */
547 #if defined(TARGET_I386)
548 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
549 log_cpu_state(env
, X86_DUMP_CCOP
);
550 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
551 #elif defined(TARGET_ARM)
552 log_cpu_state(env
, 0);
553 #elif defined(TARGET_SPARC)
554 log_cpu_state(env
, 0);
555 #elif defined(TARGET_PPC)
556 log_cpu_state(env
, 0);
557 #elif defined(TARGET_M68K)
558 cpu_m68k_flush_flags(env
, env
->cc_op
);
559 env
->cc_op
= CC_OP_FLAGS
;
560 env
->sr
= (env
->sr
& 0xffe0)
561 | env
->cc_dest
| (env
->cc_x
<< 4);
562 log_cpu_state(env
, 0);
563 #elif defined(TARGET_MIPS)
564 log_cpu_state(env
, 0);
565 #elif defined(TARGET_SH4)
566 log_cpu_state(env
, 0);
567 #elif defined(TARGET_ALPHA)
568 log_cpu_state(env
, 0);
569 #elif defined(TARGET_CRIS)
570 log_cpu_state(env
, 0);
571 #elif defined(TARGET_HPPA)
572 log_cpu_state(env
, 0);
574 #error unsupported target CPU
580 /* Note: we do it here to avoid a gcc bug on Mac OS X when
581 doing it in tb_find_slow */
582 if (tb_invalidated_flag
) {
583 /* as some TB could have been invalidated because
584 of memory exceptions while generating the code, we
585 must recompute the hash index here */
587 tb_invalidated_flag
= 0;
590 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
591 (long)tb
->tc_ptr
, tb
->pc
,
592 lookup_symbol(tb
->pc
));
594 /* see if we can patch the calling TB. When the TB
595 spans two pages, we cannot safely do a direct
600 (env
->kqemu_enabled
!= 2) &&
602 tb
->page_addr
[1] == -1) {
603 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
606 spin_unlock(&tb_lock
);
607 env
->current_tb
= tb
;
609 /* cpu_interrupt might be called while translating the
610 TB, but before it is linked into a potentially
611 infinite loop and becomes env->current_tb. Avoid
612 starting execution if there is a pending interrupt. */
613 if (unlikely (env
->interrupt_request
& CPU_INTERRUPT_EXIT
))
614 env
->current_tb
= NULL
;
616 while (env
->current_tb
) {
618 /* execute the generated code */
619 #if defined(__sparc__) && !defined(HOST_SOLARIS)
621 env
= cpu_single_env
;
622 #define env cpu_single_env
624 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
625 env
->current_tb
= NULL
;
626 if ((next_tb
& 3) == 2) {
627 /* Instruction counter expired. */
629 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
631 cpu_pc_from_tb(env
, tb
);
632 insns_left
= env
->icount_decr
.u32
;
633 if (env
->icount_extra
&& insns_left
>= 0) {
634 /* Refill decrementer and continue execution. */
635 env
->icount_extra
+= insns_left
;
636 if (env
->icount_extra
> 0xffff) {
639 insns_left
= env
->icount_extra
;
641 env
->icount_extra
-= insns_left
;
642 env
->icount_decr
.u16
.low
= insns_left
;
644 if (insns_left
> 0) {
645 /* Execute remaining instructions. */
646 cpu_exec_nocache(insns_left
, tb
);
648 env
->exception_index
= EXCP_INTERRUPT
;
654 /* reset soft MMU for next block (it can currently
655 only be set by a memory fault) */
656 #if defined(USE_KQEMU)
657 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
658 if (kqemu_is_ok(env
) &&
659 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
670 #if defined(TARGET_I386)
671 /* restore flags in standard format */
672 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
673 #elif defined(TARGET_ARM)
674 /* XXX: Save/restore host fpu exception state?. */
675 #elif defined(TARGET_SPARC)
676 #elif defined(TARGET_PPC)
677 #elif defined(TARGET_M68K)
678 cpu_m68k_flush_flags(env
, env
->cc_op
);
679 env
->cc_op
= CC_OP_FLAGS
;
680 env
->sr
= (env
->sr
& 0xffe0)
681 | env
->cc_dest
| (env
->cc_x
<< 4);
682 #elif defined(TARGET_MIPS)
683 #elif defined(TARGET_SH4)
684 #elif defined(TARGET_ALPHA)
685 #elif defined(TARGET_CRIS)
686 #elif defined(TARGET_HPPA)
689 #error unsupported target CPU
692 /* restore global registers */
693 #include "hostregs_helper.h"
695 /* fail safe : never use cpu_single_env outside cpu_exec() */
696 cpu_single_env
= NULL
;
700 /* must only be called from the generated code as an exception can be
702 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
704 /* XXX: cannot enable it yet because it yields to MMU exception
705 where NIP != read address on PowerPC */
707 target_ulong phys_addr
;
708 phys_addr
= get_phys_addr_code(env
, start
);
709 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
713 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
715 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
717 CPUX86State
*saved_env
;
721 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
723 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
724 (selector
<< 4), 0xffff, 0);
726 helper_load_seg(seg_reg
, selector
);
731 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
733 CPUX86State
*saved_env
;
738 helper_fsave(ptr
, data32
);
743 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
745 CPUX86State
*saved_env
;
750 helper_frstor(ptr
, data32
);
755 #endif /* TARGET_I386 */
757 #if !defined(CONFIG_SOFTMMU)
759 #if defined(TARGET_I386)
761 /* 'pc' is the host PC at which the exception was raised. 'address' is
762 the effective address of the memory exception. 'is_write' is 1 if a
763 write caused the exception and otherwise 0'. 'old_set' is the
764 signal set which should be restored */
765 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
766 int is_write
, sigset_t
*old_set
,
769 TranslationBlock
*tb
;
773 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
774 #if defined(DEBUG_SIGNAL)
775 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
776 pc
, address
, is_write
, *(unsigned long *)old_set
);
778 /* XXX: locking issue */
779 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
783 /* see if it is an MMU fault */
784 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
786 return 0; /* not an MMU fault */
788 return 1; /* the MMU fault was handled without causing real CPU fault */
789 /* now we have a real cpu fault */
792 /* the PC is inside the translated code. It means that we have
793 a virtual CPU fault */
794 cpu_restore_state(tb
, env
, pc
, puc
);
798 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
799 env
->eip
, env
->cr
[2], env
->error_code
);
801 /* we restore the process signal mask as the sigreturn should
802 do it (XXX: use sigsetjmp) */
803 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
804 raise_exception_err(env
->exception_index
, env
->error_code
);
806 /* activate soft MMU for this block */
807 env
->hflags
|= HF_SOFTMMU_MASK
;
808 cpu_resume_from_signal(env
, puc
);
810 /* never comes here */
814 #elif defined(TARGET_ARM)
815 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
816 int is_write
, sigset_t
*old_set
,
819 TranslationBlock
*tb
;
823 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
824 #if defined(DEBUG_SIGNAL)
825 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
826 pc
, address
, is_write
, *(unsigned long *)old_set
);
828 /* XXX: locking issue */
829 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
832 /* see if it is an MMU fault */
833 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
835 return 0; /* not an MMU fault */
837 return 1; /* the MMU fault was handled without causing real CPU fault */
838 /* now we have a real cpu fault */
841 /* the PC is inside the translated code. It means that we have
842 a virtual CPU fault */
843 cpu_restore_state(tb
, env
, pc
, puc
);
845 /* we restore the process signal mask as the sigreturn should
846 do it (XXX: use sigsetjmp) */
847 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
849 /* never comes here */
852 #elif defined(TARGET_SPARC)
853 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
854 int is_write
, sigset_t
*old_set
,
857 TranslationBlock
*tb
;
861 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
862 #if defined(DEBUG_SIGNAL)
863 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
864 pc
, address
, is_write
, *(unsigned long *)old_set
);
866 /* XXX: locking issue */
867 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
870 /* see if it is an MMU fault */
871 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
873 return 0; /* not an MMU fault */
875 return 1; /* the MMU fault was handled without causing real CPU fault */
876 /* now we have a real cpu fault */
879 /* the PC is inside the translated code. It means that we have
880 a virtual CPU fault */
881 cpu_restore_state(tb
, env
, pc
, puc
);
883 /* we restore the process signal mask as the sigreturn should
884 do it (XXX: use sigsetjmp) */
885 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
887 /* never comes here */
890 #elif defined (TARGET_PPC)
891 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
892 int is_write
, sigset_t
*old_set
,
895 TranslationBlock
*tb
;
899 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
900 #if defined(DEBUG_SIGNAL)
901 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
902 pc
, address
, is_write
, *(unsigned long *)old_set
);
904 /* XXX: locking issue */
905 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
909 /* see if it is an MMU fault */
910 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
912 return 0; /* not an MMU fault */
914 return 1; /* the MMU fault was handled without causing real CPU fault */
916 /* now we have a real cpu fault */
919 /* the PC is inside the translated code. It means that we have
920 a virtual CPU fault */
921 cpu_restore_state(tb
, env
, pc
, puc
);
925 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
926 env
->nip
, env
->error_code
, tb
);
928 /* we restore the process signal mask as the sigreturn should
929 do it (XXX: use sigsetjmp) */
930 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
933 /* activate soft MMU for this block */
934 cpu_resume_from_signal(env
, puc
);
936 /* never comes here */
940 #elif defined(TARGET_M68K)
941 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
942 int is_write
, sigset_t
*old_set
,
945 TranslationBlock
*tb
;
949 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
950 #if defined(DEBUG_SIGNAL)
951 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
952 pc
, address
, is_write
, *(unsigned long *)old_set
);
954 /* XXX: locking issue */
955 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
958 /* see if it is an MMU fault */
959 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
961 return 0; /* not an MMU fault */
963 return 1; /* the MMU fault was handled without causing real CPU fault */
964 /* now we have a real cpu fault */
967 /* the PC is inside the translated code. It means that we have
968 a virtual CPU fault */
969 cpu_restore_state(tb
, env
, pc
, puc
);
971 /* we restore the process signal mask as the sigreturn should
972 do it (XXX: use sigsetjmp) */
973 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
975 /* never comes here */
979 #elif defined (TARGET_MIPS)
980 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
981 int is_write
, sigset_t
*old_set
,
984 TranslationBlock
*tb
;
988 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
989 #if defined(DEBUG_SIGNAL)
990 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
991 pc
, address
, is_write
, *(unsigned long *)old_set
);
993 /* XXX: locking issue */
994 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
998 /* see if it is an MMU fault */
999 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1001 return 0; /* not an MMU fault */
1003 return 1; /* the MMU fault was handled without causing real CPU fault */
1005 /* now we have a real cpu fault */
1006 tb
= tb_find_pc(pc
);
1008 /* the PC is inside the translated code. It means that we have
1009 a virtual CPU fault */
1010 cpu_restore_state(tb
, env
, pc
, puc
);
1014 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1015 env
->PC
, env
->error_code
, tb
);
1017 /* we restore the process signal mask as the sigreturn should
1018 do it (XXX: use sigsetjmp) */
1019 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1022 /* activate soft MMU for this block */
1023 cpu_resume_from_signal(env
, puc
);
1025 /* never comes here */
1029 #elif defined (TARGET_SH4)
1030 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1031 int is_write
, sigset_t
*old_set
,
1034 TranslationBlock
*tb
;
1038 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1039 #if defined(DEBUG_SIGNAL)
1040 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1041 pc
, address
, is_write
, *(unsigned long *)old_set
);
1043 /* XXX: locking issue */
1044 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1048 /* see if it is an MMU fault */
1049 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1051 return 0; /* not an MMU fault */
1053 return 1; /* the MMU fault was handled without causing real CPU fault */
1055 /* now we have a real cpu fault */
1056 tb
= tb_find_pc(pc
);
1058 /* the PC is inside the translated code. It means that we have
1059 a virtual CPU fault */
1060 cpu_restore_state(tb
, env
, pc
, puc
);
1063 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1064 env
->nip
, env
->error_code
, tb
);
1066 /* we restore the process signal mask as the sigreturn should
1067 do it (XXX: use sigsetjmp) */
1068 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1070 /* never comes here */
1074 #elif defined (TARGET_ALPHA)
1075 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1076 int is_write
, sigset_t
*old_set
,
1079 TranslationBlock
*tb
;
1083 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1084 #if defined(DEBUG_SIGNAL)
1085 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1086 pc
, address
, is_write
, *(unsigned long *)old_set
);
1088 /* XXX: locking issue */
1089 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1093 /* see if it is an MMU fault */
1094 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1096 return 0; /* not an MMU fault */
1098 return 1; /* the MMU fault was handled without causing real CPU fault */
1100 /* now we have a real cpu fault */
1101 tb
= tb_find_pc(pc
);
1103 /* the PC is inside the translated code. It means that we have
1104 a virtual CPU fault */
1105 cpu_restore_state(tb
, env
, pc
, puc
);
1108 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1109 env
->nip
, env
->error_code
, tb
);
1111 /* we restore the process signal mask as the sigreturn should
1112 do it (XXX: use sigsetjmp) */
1113 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1115 /* never comes here */
1118 #elif defined (TARGET_CRIS)
1119 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1120 int is_write
, sigset_t
*old_set
,
1123 TranslationBlock
*tb
;
1127 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1128 #if defined(DEBUG_SIGNAL)
1129 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1130 pc
, address
, is_write
, *(unsigned long *)old_set
);
1132 /* XXX: locking issue */
1133 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1137 /* see if it is an MMU fault */
1138 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1140 return 0; /* not an MMU fault */
1142 return 1; /* the MMU fault was handled without causing real CPU fault */
1144 /* now we have a real cpu fault */
1145 tb
= tb_find_pc(pc
);
1147 /* the PC is inside the translated code. It means that we have
1148 a virtual CPU fault */
1149 cpu_restore_state(tb
, env
, pc
, puc
);
1151 /* we restore the process signal mask as the sigreturn should
1152 do it (XXX: use sigsetjmp) */
1153 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1155 /* never comes here */
1159 #elif defined(TARGET_HPPA)
1160 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1161 int is_write
, sigset_t
*old_set
,
1164 TranslationBlock
*tb
;
1168 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1169 #if defined(DEBUG_SIGNAL)
1170 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1171 pc
, address
, is_write
, *(unsigned long *)old_set
);
1173 /* XXX: locking issue */
1174 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1177 /* see if it is an MMU fault */
1178 ret
= cpu_hppa_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1180 return 0; /* not an MMU fault */
1182 return 1; /* the MMU fault was handled without causing real CPU fault */
1183 /* now we have a real cpu fault */
1184 tb
= tb_find_pc(pc
);
1186 /* the PC is inside the translated code. It means that we have
1187 a virtual CPU fault */
1188 cpu_restore_state(tb
, env
, pc
, puc
);
1190 /* we restore the process signal mask as the sigreturn should
1191 do it (XXX: use sigsetjmp) */
1192 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1194 /* never comes here */
1199 #error unsupported target CPU
1202 #if defined(__i386__)
1204 #if defined(__APPLE__)
1205 # include <sys/ucontext.h>
1207 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1208 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1209 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1211 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1212 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1213 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1216 int cpu_signal_handler(int host_signum
, void *pinfo
,
1219 siginfo_t
*info
= pinfo
;
1220 struct ucontext
*uc
= puc
;
1228 #define REG_TRAPNO TRAPNO
1231 trapno
= TRAP_sig(uc
);
1232 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1234 (ERROR_sig(uc
) >> 1) & 1 : 0,
1235 &uc
->uc_sigmask
, puc
);
1238 #elif defined(__x86_64__)
1241 #define REG_ERR _REG_ERR
1242 #define REG_TRAPNO _REG_TRAPNO
1244 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1245 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1247 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1248 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1251 int cpu_signal_handler(int host_signum
, void *pinfo
,
1254 siginfo_t
*info
= pinfo
;
1257 ucontext_t
*uc
= puc
;
1259 struct ucontext
*uc
= puc
;
1262 pc
= QEMU_UC_MACHINE_PC(uc
);
1263 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1264 QEMU_UC_MCONTEXT_GREGS(uc
, REG_TRAPNO
) == 0xe ?
1265 (QEMU_UC_MCONTEXT_GREGS(uc
, REG_ERR
) >> 1) & 1 : 0,
1266 &uc
->uc_sigmask
, puc
);
1269 #elif defined(_ARCH_PPC)
1271 /***********************************************************************
1272 * signal context platform-specific definitions
1276 /* All Registers access - only for local access */
1277 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1278 /* Gpr Registers access */
1279 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1280 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1281 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1282 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1283 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1284 # define LR_sig(context) REG_sig(link, context) /* Link register */
1285 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1286 /* Float Registers access */
1287 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1288 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1289 /* Exception Registers access */
1290 # define DAR_sig(context) REG_sig(dar, context)
1291 # define DSISR_sig(context) REG_sig(dsisr, context)
1292 # define TRAP_sig(context) REG_sig(trap, context)
1296 # include <sys/ucontext.h>
1297 typedef struct ucontext SIGCONTEXT
;
1298 /* All Registers access - only for local access */
1299 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1300 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1301 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1302 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1303 /* Gpr Registers access */
1304 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1305 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1306 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1307 # define CTR_sig(context) REG_sig(ctr, context)
1308 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1309 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1310 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1311 /* Float Registers access */
1312 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1313 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1314 /* Exception Registers access */
1315 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1316 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1317 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1318 #endif /* __APPLE__ */
1320 int cpu_signal_handler(int host_signum
, void *pinfo
,
1323 siginfo_t
*info
= pinfo
;
1324 struct ucontext
*uc
= puc
;
1332 if (DSISR_sig(uc
) & 0x00800000)
1335 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1338 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1339 is_write
, &uc
->uc_sigmask
, puc
);
1342 #elif defined(__alpha__)
1344 int cpu_signal_handler(int host_signum
, void *pinfo
,
1347 siginfo_t
*info
= pinfo
;
1348 struct ucontext
*uc
= puc
;
1349 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1350 uint32_t insn
= *pc
;
1353 /* XXX: need kernel patch to get write flag faster */
1354 switch (insn
>> 26) {
1369 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1370 is_write
, &uc
->uc_sigmask
, puc
);
1372 #elif defined(__sparc__)
1374 int cpu_signal_handler(int host_signum
, void *pinfo
,
1377 siginfo_t
*info
= pinfo
;
1380 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1381 uint32_t *regs
= (uint32_t *)(info
+ 1);
1382 void *sigmask
= (regs
+ 20);
1383 /* XXX: is there a standard glibc define ? */
1384 unsigned long pc
= regs
[1];
1387 struct sigcontext
*sc
= puc
;
1388 unsigned long pc
= sc
->sigc_regs
.tpc
;
1389 void *sigmask
= (void *)sc
->sigc_mask
;
1390 #elif defined(__OpenBSD__)
1391 struct sigcontext
*uc
= puc
;
1392 unsigned long pc
= uc
->sc_pc
;
1393 void *sigmask
= (void *)(long)uc
->sc_mask
;
1397 /* XXX: need kernel patch to get write flag faster */
1399 insn
= *(uint32_t *)pc
;
1400 if ((insn
>> 30) == 3) {
1401 switch((insn
>> 19) & 0x3f) {
1413 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1414 is_write
, sigmask
, NULL
);
1417 #elif defined(__arm__)
1419 int cpu_signal_handler(int host_signum
, void *pinfo
,
1422 siginfo_t
*info
= pinfo
;
1423 struct ucontext
*uc
= puc
;
1427 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1428 pc
= uc
->uc_mcontext
.gregs
[R15
];
1430 pc
= uc
->uc_mcontext
.arm_pc
;
1432 /* XXX: compute is_write */
1434 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1436 &uc
->uc_sigmask
, puc
);
1439 #elif defined(__mc68000)
1441 int cpu_signal_handler(int host_signum
, void *pinfo
,
1444 siginfo_t
*info
= pinfo
;
1445 struct ucontext
*uc
= puc
;
1449 pc
= uc
->uc_mcontext
.gregs
[16];
1450 /* XXX: compute is_write */
1452 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1454 &uc
->uc_sigmask
, puc
);
1457 #elif defined(__ia64)
1460 /* This ought to be in <bits/siginfo.h>... */
1461 # define __ISR_VALID 1
1464 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1466 siginfo_t
*info
= pinfo
;
1467 struct ucontext
*uc
= puc
;
1471 ip
= uc
->uc_mcontext
.sc_ip
;
1472 switch (host_signum
) {
1478 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1479 /* ISR.W (write-access) is bit 33: */
1480 is_write
= (info
->si_isr
>> 33) & 1;
1486 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1488 &uc
->uc_sigmask
, puc
);
1491 #elif defined(__s390__)
1493 int cpu_signal_handler(int host_signum
, void *pinfo
,
1496 siginfo_t
*info
= pinfo
;
1497 struct ucontext
*uc
= puc
;
1501 pc
= uc
->uc_mcontext
.psw
.addr
;
1502 /* XXX: compute is_write */
1504 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1505 is_write
, &uc
->uc_sigmask
, puc
);
1508 #elif defined(__mips__)
1510 int cpu_signal_handler(int host_signum
, void *pinfo
,
1513 siginfo_t
*info
= pinfo
;
1514 struct ucontext
*uc
= puc
;
1515 greg_t pc
= uc
->uc_mcontext
.pc
;
1518 /* XXX: compute is_write */
1520 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1521 is_write
, &uc
->uc_sigmask
, puc
);
1524 #elif defined(__hppa__)
1526 int cpu_signal_handler(int host_signum
, void *pinfo
,
1529 struct siginfo
*info
= pinfo
;
1530 struct ucontext
*uc
= puc
;
1534 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1535 /* FIXME: compute is_write */
1537 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1539 &uc
->uc_sigmask
, puc
);
1544 #error host CPU specific signal handler needed
1548 #endif /* !defined(CONFIG_SOFTMMU) */