2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
23 #if !defined(TARGET_IA64)
28 #if !defined(CONFIG_SOFTMMU)
40 #include <sys/ucontext.h>
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
49 #define env cpu_single_env
52 int tb_invalidated_flag
;
55 //#define DEBUG_SIGNAL
57 void cpu_loop_exit(void)
59 /* NOTE: the register at this point must be saved by hand because
60 longjmp restore them */
62 longjmp(env
->jmp_env
, 1);
65 /* exit the current TB from a signal handler. The host registers are
66 restored in a state compatible with the CPU emulator
68 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
70 #if !defined(CONFIG_SOFTMMU)
72 struct ucontext
*uc
= puc
;
73 #elif defined(__OpenBSD__)
74 struct sigcontext
*uc
= puc
;
80 /* XXX: restore cpu registers saved in host registers */
82 #if !defined(CONFIG_SOFTMMU)
84 /* XXX: use siglongjmp ? */
86 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
87 #elif defined(__OpenBSD__)
88 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
92 env
->exception_index
= -1;
93 longjmp(env
->jmp_env
, 1);
96 /* Execute the code without caching the generated code. An interpreter
97 could be used if available. */
98 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
100 unsigned long next_tb
;
101 TranslationBlock
*tb
;
103 /* Should never happen.
104 We only end up here when an existing TB is too long. */
105 if (max_cycles
> CF_COUNT_MASK
)
106 max_cycles
= CF_COUNT_MASK
;
108 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
110 env
->current_tb
= tb
;
111 /* execute the generated code */
112 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
114 if ((next_tb
& 3) == 2) {
115 /* Restore PC. This may happen if async event occurs before
116 the TB starts executing. */
117 cpu_pc_from_tb(env
, tb
);
119 tb_phys_invalidate(tb
, -1);
123 static TranslationBlock
*tb_find_slow(target_ulong pc
,
124 target_ulong cs_base
,
127 TranslationBlock
*tb
, **ptb1
;
129 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
131 tb_invalidated_flag
= 0;
133 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
135 /* find translated block using physical mappings */
136 phys_pc
= get_phys_addr_code(env
, pc
);
137 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
139 h
= tb_phys_hash_func(phys_pc
);
140 ptb1
= &tb_phys_hash
[h
];
146 tb
->page_addr
[0] == phys_page1
&&
147 tb
->cs_base
== cs_base
&&
148 tb
->flags
== flags
) {
149 /* check next page if needed */
150 if (tb
->page_addr
[1] != -1) {
151 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
153 phys_page2
= get_phys_addr_code(env
, virt_page2
);
154 if (tb
->page_addr
[1] == phys_page2
)
160 ptb1
= &tb
->phys_hash_next
;
163 /* if no translated code available, then translate it now */
164 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
167 /* we add the TB in the virtual pc hash table */
168 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
172 static inline TranslationBlock
*tb_find_fast(void)
174 TranslationBlock
*tb
;
175 target_ulong cs_base
, pc
;
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
181 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
182 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
183 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
184 tb
->flags
!= flags
)) {
185 tb
= tb_find_slow(pc
, cs_base
, flags
);
190 static CPUDebugExcpHandler
*debug_excp_handler
;
192 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
194 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
196 debug_excp_handler
= handler
;
200 static void cpu_handle_debug_exception(CPUState
*env
)
204 if (!env
->watchpoint_hit
)
205 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
206 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
208 if (debug_excp_handler
)
209 debug_excp_handler(env
);
212 /* main execution loop */
214 int cpu_exec(CPUState
*env1
)
216 #define DECLARE_HOST_REGS 1
217 #include "hostregs_helper.h"
218 int ret
, interrupt_request
;
219 TranslationBlock
*tb
;
221 unsigned long next_tb
;
223 if (cpu_halted(env1
) == EXCP_HALTED
)
226 cpu_single_env
= env1
;
228 /* first we save global registers */
229 #define SAVE_HOST_REGS 1
230 #include "hostregs_helper.h"
234 #if defined(TARGET_I386)
235 /* put eflags in CPU temporary format */
236 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
237 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
238 CC_OP
= CC_OP_EFLAGS
;
239 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
240 #elif defined(TARGET_SPARC)
241 #elif defined(TARGET_M68K)
242 env
->cc_op
= CC_OP_FLAGS
;
243 env
->cc_dest
= env
->sr
& 0xf;
244 env
->cc_x
= (env
->sr
>> 4) & 1;
245 #elif defined(TARGET_ALPHA)
246 #elif defined(TARGET_ARM)
247 #elif defined(TARGET_PPC)
248 #elif defined(TARGET_MIPS)
249 #elif defined(TARGET_SH4)
250 #elif defined(TARGET_CRIS)
251 #elif defined(TARGET_IA64)
254 #error unsupported target CPU
256 env
->exception_index
= -1;
258 /* prepare setjmp context for exception handling */
260 if (setjmp(env
->jmp_env
) == 0) {
261 #if defined(__sparc__) && !defined(HOST_SOLARIS)
263 env
= cpu_single_env
;
264 #define env cpu_single_env
266 env
->current_tb
= NULL
;
267 /* if an exception is pending, we execute it here */
268 if (env
->exception_index
>= 0) {
269 if (env
->exception_index
>= EXCP_INTERRUPT
) {
270 /* exit request from the cpu execution loop */
271 ret
= env
->exception_index
;
272 if (ret
== EXCP_DEBUG
)
273 cpu_handle_debug_exception(env
);
276 #if defined(CONFIG_USER_ONLY)
277 /* if user mode only, we simulate a fake exception
278 which will be handled outside the cpu execution
280 #if defined(TARGET_I386)
281 do_interrupt_user(env
->exception_index
,
282 env
->exception_is_int
,
284 env
->exception_next_eip
);
285 /* successfully delivered */
286 env
->old_exception
= -1;
288 ret
= env
->exception_index
;
291 #if defined(TARGET_I386)
292 /* simulate a real cpu exception. On i386, it can
293 trigger new exceptions, but we do not handle
294 double or triple faults yet. */
295 do_interrupt(env
->exception_index
,
296 env
->exception_is_int
,
298 env
->exception_next_eip
, 0);
299 /* successfully delivered */
300 env
->old_exception
= -1;
301 #elif defined(TARGET_PPC)
303 #elif defined(TARGET_MIPS)
305 #elif defined(TARGET_SPARC)
307 #elif defined(TARGET_ARM)
309 #elif defined(TARGET_SH4)
311 #elif defined(TARGET_ALPHA)
313 #elif defined(TARGET_CRIS)
315 #elif defined(TARGET_M68K)
317 #elif defined(TARGET_IA64)
322 env
->exception_index
= -1;
325 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0 && env
->exit_request
== 0) {
327 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
328 ret
= kqemu_cpu_exec(env
);
329 /* put eflags in CPU temporary format */
330 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
331 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
332 CC_OP
= CC_OP_EFLAGS
;
333 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
336 longjmp(env
->jmp_env
, 1);
337 } else if (ret
== 2) {
338 /* softmmu execution needed */
340 if (env
->interrupt_request
!= 0 || env
->exit_request
!= 0) {
341 /* hardware interrupt will be executed just after */
343 /* otherwise, we restart */
344 longjmp(env
->jmp_env
, 1);
350 /* kvm vcpu threads */
353 longjmp(env
->jmp_env
, 1);
358 longjmp(env
->jmp_env
, 1);
361 next_tb
= 0; /* force lookup of first TB */
363 interrupt_request
= env
->interrupt_request
;
364 if (unlikely(interrupt_request
)) {
365 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
366 /* Mask out external interrupts for this step. */
367 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
372 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
373 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
374 env
->exception_index
= EXCP_DEBUG
;
377 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
378 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
379 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
380 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
382 env
->exception_index
= EXCP_HLT
;
386 #if defined(TARGET_I386)
387 if (env
->hflags2
& HF2_GIF_MASK
) {
388 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
389 !(env
->hflags
& HF_SMM_MASK
)) {
390 svm_check_intercept(SVM_EXIT_SMI
);
391 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
394 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
395 !(env
->hflags2
& HF2_NMI_MASK
)) {
396 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
397 env
->hflags2
|= HF2_NMI_MASK
;
398 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
400 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
401 (((env
->hflags2
& HF2_VINTR_MASK
) &&
402 (env
->hflags2
& HF2_HIF_MASK
)) ||
403 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
404 (env
->eflags
& IF_MASK
&&
405 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
407 svm_check_intercept(SVM_EXIT_INTR
);
408 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
409 intno
= cpu_get_pic_interrupt(env
);
410 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
411 #if defined(__sparc__) && !defined(HOST_SOLARIS)
413 env
= cpu_single_env
;
414 #define env cpu_single_env
416 do_interrupt(intno
, 0, 0, 0, 1);
417 /* ensure that no TB jump will be modified as
418 the program flow was changed */
420 #if !defined(CONFIG_USER_ONLY)
421 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
422 (env
->eflags
& IF_MASK
) &&
423 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
425 /* FIXME: this should respect TPR */
426 svm_check_intercept(SVM_EXIT_VINTR
);
427 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
428 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
429 do_interrupt(intno
, 0, 0, 0, 1);
430 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
435 #elif defined(TARGET_PPC)
437 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
441 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
442 ppc_hw_interrupt(env
);
443 if (env
->pending_interrupts
== 0)
444 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
447 #elif defined(TARGET_MIPS)
448 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
449 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
450 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
451 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
452 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
453 !(env
->hflags
& MIPS_HFLAG_DM
)) {
455 env
->exception_index
= EXCP_EXT_INTERRUPT
;
460 #elif defined(TARGET_SPARC)
461 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
463 int pil
= env
->interrupt_index
& 15;
464 int type
= env
->interrupt_index
& 0xf0;
466 if (((type
== TT_EXTINT
) &&
467 (pil
== 15 || pil
> env
->psrpil
)) ||
469 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
470 env
->exception_index
= env
->interrupt_index
;
472 env
->interrupt_index
= 0;
473 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
478 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
479 //do_interrupt(0, 0, 0, 0, 0);
480 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
482 #elif defined(TARGET_ARM)
483 if (interrupt_request
& CPU_INTERRUPT_FIQ
484 && !(env
->uncached_cpsr
& CPSR_F
)) {
485 env
->exception_index
= EXCP_FIQ
;
489 /* ARMv7-M interrupt return works by loading a magic value
490 into the PC. On real hardware the load causes the
491 return to occur. The qemu implementation performs the
492 jump normally, then does the exception return when the
493 CPU tries to execute code at the magic address.
494 This will cause the magic PC value to be pushed to
495 the stack if an interrupt occured at the wrong time.
496 We avoid this by disabling interrupts when
497 pc contains a magic address. */
498 if (interrupt_request
& CPU_INTERRUPT_HARD
499 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
500 || !(env
->uncached_cpsr
& CPSR_I
))) {
501 env
->exception_index
= EXCP_IRQ
;
505 #elif defined(TARGET_SH4)
506 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
510 #elif defined(TARGET_ALPHA)
511 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
515 #elif defined(TARGET_CRIS)
516 if (interrupt_request
& CPU_INTERRUPT_HARD
517 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
518 env
->exception_index
= EXCP_IRQ
;
522 if (interrupt_request
& CPU_INTERRUPT_NMI
523 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
524 env
->exception_index
= EXCP_NMI
;
528 #elif defined(TARGET_M68K)
529 if (interrupt_request
& CPU_INTERRUPT_HARD
530 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
531 < env
->pending_level
) {
532 /* Real hardware gets the interrupt vector via an
533 IACK cycle at this point. Current emulated
534 hardware doesn't rely on this, so we
535 provide/save the vector when the interrupt is
537 env
->exception_index
= env
->pending_vector
;
542 /* Don't use the cached interupt_request value,
543 do_interrupt may have updated the EXITTB flag. */
544 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
545 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
546 /* ensure that no TB jump will be modified as
547 the program flow was changed */
551 if (unlikely(env
->exit_request
)) {
552 env
->exit_request
= 0;
553 env
->exception_index
= EXCP_INTERRUPT
;
557 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
558 /* restore flags in standard format */
560 #if defined(TARGET_I386)
561 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
562 log_cpu_state(env
, X86_DUMP_CCOP
);
563 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
564 #elif defined(TARGET_ARM)
565 log_cpu_state(env
, 0);
566 #elif defined(TARGET_SPARC)
567 log_cpu_state(env
, 0);
568 #elif defined(TARGET_PPC)
569 log_cpu_state(env
, 0);
570 #elif defined(TARGET_M68K)
571 cpu_m68k_flush_flags(env
, env
->cc_op
);
572 env
->cc_op
= CC_OP_FLAGS
;
573 env
->sr
= (env
->sr
& 0xffe0)
574 | env
->cc_dest
| (env
->cc_x
<< 4);
575 log_cpu_state(env
, 0);
576 #elif defined(TARGET_MIPS)
577 log_cpu_state(env
, 0);
578 #elif defined(TARGET_SH4)
579 log_cpu_state(env
, 0);
580 #elif defined(TARGET_ALPHA)
581 log_cpu_state(env
, 0);
582 #elif defined(TARGET_CRIS)
583 log_cpu_state(env
, 0);
585 #error unsupported target CPU
591 /* Note: we do it here to avoid a gcc bug on Mac OS X when
592 doing it in tb_find_slow */
593 if (tb_invalidated_flag
) {
594 /* as some TB could have been invalidated because
595 of memory exceptions while generating the code, we
596 must recompute the hash index here */
598 tb_invalidated_flag
= 0;
601 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
602 (long)tb
->tc_ptr
, tb
->pc
,
603 lookup_symbol(tb
->pc
));
605 /* see if we can patch the calling TB. When the TB
606 spans two pages, we cannot safely do a direct
611 (env
->kqemu_enabled
!= 2) &&
613 tb
->page_addr
[1] == -1) {
614 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
617 spin_unlock(&tb_lock
);
618 env
->current_tb
= tb
;
620 /* cpu_interrupt might be called while translating the
621 TB, but before it is linked into a potentially
622 infinite loop and becomes env->current_tb. Avoid
623 starting execution if there is a pending interrupt. */
624 if (unlikely (env
->exit_request
))
625 env
->current_tb
= NULL
;
627 while (env
->current_tb
) {
629 /* execute the generated code */
630 #if defined(__sparc__) && !defined(HOST_SOLARIS)
632 env
= cpu_single_env
;
633 #define env cpu_single_env
635 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
636 env
->current_tb
= NULL
;
637 if ((next_tb
& 3) == 2) {
638 /* Instruction counter expired. */
640 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
642 cpu_pc_from_tb(env
, tb
);
643 insns_left
= env
->icount_decr
.u32
;
644 if (env
->icount_extra
&& insns_left
>= 0) {
645 /* Refill decrementer and continue execution. */
646 env
->icount_extra
+= insns_left
;
647 if (env
->icount_extra
> 0xffff) {
650 insns_left
= env
->icount_extra
;
652 env
->icount_extra
-= insns_left
;
653 env
->icount_decr
.u16
.low
= insns_left
;
655 if (insns_left
> 0) {
656 /* Execute remaining instructions. */
657 cpu_exec_nocache(insns_left
, tb
);
659 env
->exception_index
= EXCP_INTERRUPT
;
665 /* reset soft MMU for next block (it can currently
666 only be set by a memory fault) */
667 #if defined(USE_KQEMU)
668 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
669 if (kqemu_is_ok(env
) &&
670 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
681 #if defined(TARGET_I386)
682 /* restore flags in standard format */
683 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
684 #elif defined(TARGET_ARM)
685 /* XXX: Save/restore host fpu exception state?. */
686 #elif defined(TARGET_SPARC)
687 #elif defined(TARGET_PPC)
688 #elif defined(TARGET_M68K)
689 cpu_m68k_flush_flags(env
, env
->cc_op
);
690 env
->cc_op
= CC_OP_FLAGS
;
691 env
->sr
= (env
->sr
& 0xffe0)
692 | env
->cc_dest
| (env
->cc_x
<< 4);
693 #elif defined(TARGET_MIPS)
694 #elif defined(TARGET_SH4)
695 #elif defined(TARGET_IA64)
696 #elif defined(TARGET_ALPHA)
697 #elif defined(TARGET_CRIS)
700 #error unsupported target CPU
703 /* restore global registers */
704 #include "hostregs_helper.h"
706 /* fail safe : never use cpu_single_env outside cpu_exec() */
707 cpu_single_env
= NULL
;
711 /* must only be called from the generated code as an exception can be
713 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
715 /* XXX: cannot enable it yet because it yields to MMU exception
716 where NIP != read address on PowerPC */
718 target_ulong phys_addr
;
719 phys_addr
= get_phys_addr_code(env
, start
);
720 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
724 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
726 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
728 CPUX86State
*saved_env
;
732 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
734 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
735 (selector
<< 4), 0xffff, 0);
737 helper_load_seg(seg_reg
, selector
);
742 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
744 CPUX86State
*saved_env
;
749 helper_fsave(ptr
, data32
);
754 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
756 CPUX86State
*saved_env
;
761 helper_frstor(ptr
, data32
);
766 #endif /* TARGET_I386 */
768 #if !defined(CONFIG_SOFTMMU)
770 #if defined(TARGET_I386)
772 /* 'pc' is the host PC at which the exception was raised. 'address' is
773 the effective address of the memory exception. 'is_write' is 1 if a
774 write caused the exception and otherwise 0'. 'old_set' is the
775 signal set which should be restored */
776 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
777 int is_write
, sigset_t
*old_set
,
780 TranslationBlock
*tb
;
784 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
785 #if defined(DEBUG_SIGNAL)
786 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
787 pc
, address
, is_write
, *(unsigned long *)old_set
);
789 /* XXX: locking issue */
790 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
794 /* see if it is an MMU fault */
795 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
797 return 0; /* not an MMU fault */
799 return 1; /* the MMU fault was handled without causing real CPU fault */
800 /* now we have a real cpu fault */
803 /* the PC is inside the translated code. It means that we have
804 a virtual CPU fault */
805 cpu_restore_state(tb
, env
, pc
, puc
);
809 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
810 env
->eip
, env
->cr
[2], env
->error_code
);
812 /* we restore the process signal mask as the sigreturn should
813 do it (XXX: use sigsetjmp) */
814 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
815 raise_exception_err(env
->exception_index
, env
->error_code
);
817 /* activate soft MMU for this block */
818 env
->hflags
|= HF_SOFTMMU_MASK
;
819 cpu_resume_from_signal(env
, puc
);
821 /* never comes here */
825 #elif defined(TARGET_ARM)
826 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
827 int is_write
, sigset_t
*old_set
,
830 TranslationBlock
*tb
;
834 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
835 #if defined(DEBUG_SIGNAL)
836 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
837 pc
, address
, is_write
, *(unsigned long *)old_set
);
839 /* XXX: locking issue */
840 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
843 /* see if it is an MMU fault */
844 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
846 return 0; /* not an MMU fault */
848 return 1; /* the MMU fault was handled without causing real CPU fault */
849 /* now we have a real cpu fault */
852 /* the PC is inside the translated code. It means that we have
853 a virtual CPU fault */
854 cpu_restore_state(tb
, env
, pc
, puc
);
856 /* we restore the process signal mask as the sigreturn should
857 do it (XXX: use sigsetjmp) */
858 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
860 /* never comes here */
863 #elif defined(TARGET_SPARC)
864 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
865 int is_write
, sigset_t
*old_set
,
868 TranslationBlock
*tb
;
872 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
873 #if defined(DEBUG_SIGNAL)
874 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
875 pc
, address
, is_write
, *(unsigned long *)old_set
);
877 /* XXX: locking issue */
878 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
881 /* see if it is an MMU fault */
882 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
884 return 0; /* not an MMU fault */
886 return 1; /* the MMU fault was handled without causing real CPU fault */
887 /* now we have a real cpu fault */
890 /* the PC is inside the translated code. It means that we have
891 a virtual CPU fault */
892 cpu_restore_state(tb
, env
, pc
, puc
);
894 /* we restore the process signal mask as the sigreturn should
895 do it (XXX: use sigsetjmp) */
896 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
898 /* never comes here */
901 #elif defined (TARGET_PPC)
902 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
903 int is_write
, sigset_t
*old_set
,
906 TranslationBlock
*tb
;
910 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
911 #if defined(DEBUG_SIGNAL)
912 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
913 pc
, address
, is_write
, *(unsigned long *)old_set
);
915 /* XXX: locking issue */
916 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
920 /* see if it is an MMU fault */
921 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
923 return 0; /* not an MMU fault */
925 return 1; /* the MMU fault was handled without causing real CPU fault */
927 /* now we have a real cpu fault */
930 /* the PC is inside the translated code. It means that we have
931 a virtual CPU fault */
932 cpu_restore_state(tb
, env
, pc
, puc
);
936 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
937 env
->nip
, env
->error_code
, tb
);
939 /* we restore the process signal mask as the sigreturn should
940 do it (XXX: use sigsetjmp) */
941 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
944 /* activate soft MMU for this block */
945 cpu_resume_from_signal(env
, puc
);
947 /* never comes here */
951 #elif defined(TARGET_M68K)
952 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
953 int is_write
, sigset_t
*old_set
,
956 TranslationBlock
*tb
;
960 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
961 #if defined(DEBUG_SIGNAL)
962 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
963 pc
, address
, is_write
, *(unsigned long *)old_set
);
965 /* XXX: locking issue */
966 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
969 /* see if it is an MMU fault */
970 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
972 return 0; /* not an MMU fault */
974 return 1; /* the MMU fault was handled without causing real CPU fault */
975 /* now we have a real cpu fault */
978 /* the PC is inside the translated code. It means that we have
979 a virtual CPU fault */
980 cpu_restore_state(tb
, env
, pc
, puc
);
982 /* we restore the process signal mask as the sigreturn should
983 do it (XXX: use sigsetjmp) */
984 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
986 /* never comes here */
990 #elif defined (TARGET_MIPS)
991 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
992 int is_write
, sigset_t
*old_set
,
995 TranslationBlock
*tb
;
999 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1000 #if defined(DEBUG_SIGNAL)
1001 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1002 pc
, address
, is_write
, *(unsigned long *)old_set
);
1004 /* XXX: locking issue */
1005 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1009 /* see if it is an MMU fault */
1010 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1012 return 0; /* not an MMU fault */
1014 return 1; /* the MMU fault was handled without causing real CPU fault */
1016 /* now we have a real cpu fault */
1017 tb
= tb_find_pc(pc
);
1019 /* the PC is inside the translated code. It means that we have
1020 a virtual CPU fault */
1021 cpu_restore_state(tb
, env
, pc
, puc
);
1025 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1026 env
->PC
, env
->error_code
, tb
);
1028 /* we restore the process signal mask as the sigreturn should
1029 do it (XXX: use sigsetjmp) */
1030 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1033 /* activate soft MMU for this block */
1034 cpu_resume_from_signal(env
, puc
);
1036 /* never comes here */
1040 #elif defined (TARGET_SH4)
1041 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1042 int is_write
, sigset_t
*old_set
,
1045 TranslationBlock
*tb
;
1049 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1050 #if defined(DEBUG_SIGNAL)
1051 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1052 pc
, address
, is_write
, *(unsigned long *)old_set
);
1054 /* XXX: locking issue */
1055 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1059 /* see if it is an MMU fault */
1060 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1062 return 0; /* not an MMU fault */
1064 return 1; /* the MMU fault was handled without causing real CPU fault */
1066 /* now we have a real cpu fault */
1067 tb
= tb_find_pc(pc
);
1069 /* the PC is inside the translated code. It means that we have
1070 a virtual CPU fault */
1071 cpu_restore_state(tb
, env
, pc
, puc
);
1074 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1075 env
->nip
, env
->error_code
, tb
);
1077 /* we restore the process signal mask as the sigreturn should
1078 do it (XXX: use sigsetjmp) */
1079 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1081 /* never comes here */
1085 #elif defined (TARGET_ALPHA)
1086 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1087 int is_write
, sigset_t
*old_set
,
1090 TranslationBlock
*tb
;
1094 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1095 #if defined(DEBUG_SIGNAL)
1096 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1097 pc
, address
, is_write
, *(unsigned long *)old_set
);
1099 /* XXX: locking issue */
1100 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1104 /* see if it is an MMU fault */
1105 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1107 return 0; /* not an MMU fault */
1109 return 1; /* the MMU fault was handled without causing real CPU fault */
1111 /* now we have a real cpu fault */
1112 tb
= tb_find_pc(pc
);
1114 /* the PC is inside the translated code. It means that we have
1115 a virtual CPU fault */
1116 cpu_restore_state(tb
, env
, pc
, puc
);
1119 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1120 env
->nip
, env
->error_code
, tb
);
1122 /* we restore the process signal mask as the sigreturn should
1123 do it (XXX: use sigsetjmp) */
1124 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1126 /* never comes here */
1129 #elif defined (TARGET_CRIS)
1130 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1131 int is_write
, sigset_t
*old_set
,
1134 TranslationBlock
*tb
;
1138 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1139 #if defined(DEBUG_SIGNAL)
1140 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1141 pc
, address
, is_write
, *(unsigned long *)old_set
);
1143 /* XXX: locking issue */
1144 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1148 /* see if it is an MMU fault */
1149 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1151 return 0; /* not an MMU fault */
1153 return 1; /* the MMU fault was handled without causing real CPU fault */
1155 /* now we have a real cpu fault */
1156 tb
= tb_find_pc(pc
);
1158 /* the PC is inside the translated code. It means that we have
1159 a virtual CPU fault */
1160 cpu_restore_state(tb
, env
, pc
, puc
);
1162 /* we restore the process signal mask as the sigreturn should
1163 do it (XXX: use sigsetjmp) */
1164 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1166 /* never comes here */
1171 #error unsupported target CPU
1174 #if defined(__i386__)
1176 #if defined(__APPLE__)
1177 # include <sys/ucontext.h>
1179 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1180 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1181 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1183 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1184 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1185 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1188 int cpu_signal_handler(int host_signum
, void *pinfo
,
1191 siginfo_t
*info
= pinfo
;
1192 struct ucontext
*uc
= puc
;
1200 #define REG_TRAPNO TRAPNO
1203 trapno
= TRAP_sig(uc
);
1204 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1206 (ERROR_sig(uc
) >> 1) & 1 : 0,
1207 &uc
->uc_sigmask
, puc
);
1210 #elif defined(__x86_64__)
1213 #define REG_ERR _REG_ERR
1214 #define REG_TRAPNO _REG_TRAPNO
1216 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1217 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1219 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1220 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1223 int cpu_signal_handler(int host_signum
, void *pinfo
,
1226 siginfo_t
*info
= pinfo
;
1229 ucontext_t
*uc
= puc
;
1231 struct ucontext
*uc
= puc
;
1234 pc
= QEMU_UC_MACHINE_PC(uc
);
1235 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1236 QEMU_UC_MCONTEXT_GREGS(uc
, REG_TRAPNO
) == 0xe ?
1237 (QEMU_UC_MCONTEXT_GREGS(uc
, REG_ERR
) >> 1) & 1 : 0,
1238 &uc
->uc_sigmask
, puc
);
1241 #elif defined(_ARCH_PPC)
1243 /***********************************************************************
1244 * signal context platform-specific definitions
1248 /* All Registers access - only for local access */
1249 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1250 /* Gpr Registers access */
1251 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1252 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1253 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1254 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1255 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1256 # define LR_sig(context) REG_sig(link, context) /* Link register */
1257 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1258 /* Float Registers access */
1259 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1260 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1261 /* Exception Registers access */
1262 # define DAR_sig(context) REG_sig(dar, context)
1263 # define DSISR_sig(context) REG_sig(dsisr, context)
1264 # define TRAP_sig(context) REG_sig(trap, context)
1268 # include <sys/ucontext.h>
1269 typedef struct ucontext SIGCONTEXT
;
1270 /* All Registers access - only for local access */
1271 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1272 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1273 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1274 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1275 /* Gpr Registers access */
1276 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1277 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1278 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1279 # define CTR_sig(context) REG_sig(ctr, context)
1280 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1281 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1282 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1283 /* Float Registers access */
1284 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1285 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1286 /* Exception Registers access */
1287 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1288 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1289 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1290 #endif /* __APPLE__ */
1292 int cpu_signal_handler(int host_signum
, void *pinfo
,
1295 siginfo_t
*info
= pinfo
;
1296 struct ucontext
*uc
= puc
;
1304 if (DSISR_sig(uc
) & 0x00800000)
1307 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1310 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1311 is_write
, &uc
->uc_sigmask
, puc
);
1314 #elif defined(__alpha__)
1316 int cpu_signal_handler(int host_signum
, void *pinfo
,
1319 siginfo_t
*info
= pinfo
;
1320 struct ucontext
*uc
= puc
;
1321 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1322 uint32_t insn
= *pc
;
1325 /* XXX: need kernel patch to get write flag faster */
1326 switch (insn
>> 26) {
1341 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1342 is_write
, &uc
->uc_sigmask
, puc
);
1344 #elif defined(__sparc__)
1346 int cpu_signal_handler(int host_signum
, void *pinfo
,
1349 siginfo_t
*info
= pinfo
;
1352 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1353 uint32_t *regs
= (uint32_t *)(info
+ 1);
1354 void *sigmask
= (regs
+ 20);
1355 /* XXX: is there a standard glibc define ? */
1356 unsigned long pc
= regs
[1];
1359 struct sigcontext
*sc
= puc
;
1360 unsigned long pc
= sc
->sigc_regs
.tpc
;
1361 void *sigmask
= (void *)sc
->sigc_mask
;
1362 #elif defined(__OpenBSD__)
1363 struct sigcontext
*uc
= puc
;
1364 unsigned long pc
= uc
->sc_pc
;
1365 void *sigmask
= (void *)(long)uc
->sc_mask
;
1369 /* XXX: need kernel patch to get write flag faster */
1371 insn
= *(uint32_t *)pc
;
1372 if ((insn
>> 30) == 3) {
1373 switch((insn
>> 19) & 0x3f) {
1385 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1386 is_write
, sigmask
, NULL
);
1389 #elif defined(__arm__)
1391 int cpu_signal_handler(int host_signum
, void *pinfo
,
1394 siginfo_t
*info
= pinfo
;
1395 struct ucontext
*uc
= puc
;
1399 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1400 pc
= uc
->uc_mcontext
.gregs
[R15
];
1402 pc
= uc
->uc_mcontext
.arm_pc
;
1404 /* XXX: compute is_write */
1406 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1408 &uc
->uc_sigmask
, puc
);
1411 #elif defined(__mc68000)
1413 int cpu_signal_handler(int host_signum
, void *pinfo
,
1416 siginfo_t
*info
= pinfo
;
1417 struct ucontext
*uc
= puc
;
1421 pc
= uc
->uc_mcontext
.gregs
[16];
1422 /* XXX: compute is_write */
1424 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1426 &uc
->uc_sigmask
, puc
);
1429 #elif defined(__ia64)
1432 /* This ought to be in <bits/siginfo.h>... */
1433 # define __ISR_VALID 1
1436 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1438 siginfo_t
*info
= pinfo
;
1439 struct ucontext
*uc
= puc
;
1443 ip
= uc
->uc_mcontext
.sc_ip
;
1444 switch (host_signum
) {
1450 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1451 /* ISR.W (write-access) is bit 33: */
1452 is_write
= (info
->si_isr
>> 33) & 1;
1458 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1460 &uc
->uc_sigmask
, puc
);
1463 #elif defined(__s390__)
1465 int cpu_signal_handler(int host_signum
, void *pinfo
,
1468 siginfo_t
*info
= pinfo
;
1469 struct ucontext
*uc
= puc
;
1473 pc
= uc
->uc_mcontext
.psw
.addr
;
1474 /* XXX: compute is_write */
1476 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1477 is_write
, &uc
->uc_sigmask
, puc
);
1480 #elif defined(__mips__)
1482 int cpu_signal_handler(int host_signum
, void *pinfo
,
1485 siginfo_t
*info
= pinfo
;
1486 struct ucontext
*uc
= puc
;
1487 greg_t pc
= uc
->uc_mcontext
.pc
;
1490 /* XXX: compute is_write */
1492 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1493 is_write
, &uc
->uc_sigmask
, puc
);
1496 #elif defined(__hppa__)
1498 int cpu_signal_handler(int host_signum
, void *pinfo
,
1501 struct siginfo
*info
= pinfo
;
1502 struct ucontext
*uc
= puc
;
1506 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1507 /* FIXME: compute is_write */
1509 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1511 &uc
->uc_sigmask
, puc
);
1516 #error host CPU specific signal handler needed
1520 #endif /* !defined(CONFIG_SOFTMMU) */