2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
40 int tb_invalidated_flag
;
43 //#define DEBUG_SIGNAL
45 #define SAVE_GLOBALS()
46 #define RESTORE_GLOBALS()
48 #if defined(__sparc__) && !defined(HOST_SOLARIS)
50 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
51 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
52 // Work around ugly bugs in glibc that mangle global register contents
54 static volatile void *saved_env
;
55 static volatile unsigned long saved_t0
, saved_i7
;
57 #define SAVE_GLOBALS() do { \
60 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
63 #undef RESTORE_GLOBALS
64 #define RESTORE_GLOBALS() do { \
65 env = (void *)saved_env; \
67 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
70 static int sparc_setjmp(jmp_buf buf
)
80 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
82 static void sparc_longjmp(jmp_buf buf
, int val
)
87 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
91 void cpu_loop_exit(void)
93 /* NOTE: the register at this point must be saved by hand because
94 longjmp restore them */
96 longjmp(env
->jmp_env
, 1);
99 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
103 /* exit the current TB from a signal handler. The host registers are
104 restored in a state compatible with the CPU emulator
106 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
108 #if !defined(CONFIG_SOFTMMU)
109 struct ucontext
*uc
= puc
;
114 /* XXX: restore cpu registers saved in host registers */
116 #if !defined(CONFIG_SOFTMMU)
118 /* XXX: use siglongjmp ? */
119 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
122 longjmp(env
->jmp_env
, 1);
125 static TranslationBlock
*tb_find_slow(target_ulong pc
,
126 target_ulong cs_base
,
129 TranslationBlock
*tb
, **ptb1
;
132 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
137 tb_invalidated_flag
= 0;
139 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
141 /* find translated block using physical mappings */
142 phys_pc
= get_phys_addr_code(env
, pc
);
143 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
145 h
= tb_phys_hash_func(phys_pc
);
146 ptb1
= &tb_phys_hash
[h
];
152 tb
->page_addr
[0] == phys_page1
&&
153 tb
->cs_base
== cs_base
&&
154 tb
->flags
== flags
) {
155 /* check next page if needed */
156 if (tb
->page_addr
[1] != -1) {
157 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
159 phys_page2
= get_phys_addr_code(env
, virt_page2
);
160 if (tb
->page_addr
[1] == phys_page2
)
166 ptb1
= &tb
->phys_hash_next
;
169 /* if no translated code available, then translate it now */
172 /* flush must be done */
174 /* cannot fail at this point */
176 /* don't forget to invalidate previous TB info */
177 tb_invalidated_flag
= 1;
179 tc_ptr
= code_gen_ptr
;
181 tb
->cs_base
= cs_base
;
184 cpu_gen_code(env
, tb
, &code_gen_size
);
186 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
188 /* check next page if needed */
189 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
191 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
192 phys_page2
= get_phys_addr_code(env
, virt_page2
);
194 tb_link_phys(tb
, phys_pc
, phys_page2
);
197 /* we add the TB in the virtual pc hash table */
198 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
199 spin_unlock(&tb_lock
);
203 static inline TranslationBlock
*tb_find_fast(void)
205 TranslationBlock
*tb
;
206 target_ulong cs_base
, pc
;
209 /* we record a subset of the CPU state. It will
210 always be the same before a given translated block
212 #if defined(TARGET_I386)
214 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
215 flags
|= env
->intercept
;
216 cs_base
= env
->segs
[R_CS
].base
;
217 pc
= cs_base
+ env
->eip
;
218 #elif defined(TARGET_ARM)
219 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
220 | (env
->vfp
.vec_stride
<< 4);
221 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
223 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
225 flags
|= (env
->condexec_bits
<< 8);
228 #elif defined(TARGET_SPARC)
229 #ifdef TARGET_SPARC64
230 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
231 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
232 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
234 // FPU enable . Supervisor
235 flags
= (env
->psref
<< 4) | env
->psrs
;
239 #elif defined(TARGET_PPC)
243 #elif defined(TARGET_MIPS)
244 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
246 pc
= env
->PC
[env
->current_tc
];
247 #elif defined(TARGET_M68K)
248 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
249 | (env
->sr
& SR_S
) /* Bit 13 */
250 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
253 #elif defined(TARGET_SH4)
257 #elif defined(TARGET_ALPHA)
261 #elif defined(TARGET_CRIS)
265 #elif defined(TARGET_IA64)
267 cs_base
= 0; /* XXXXX */
270 #error unsupported CPU
272 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
273 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
274 tb
->flags
!= flags
, 0)) {
275 tb
= tb_find_slow(pc
, cs_base
, flags
);
276 /* Note: we do it here to avoid a gcc bug on Mac OS X when
277 doing it in tb_find_slow */
278 if (tb_invalidated_flag
) {
279 /* as some TB could have been invalidated because
280 of memory exceptions while generating the code, we
281 must recompute the hash index here */
288 #define BREAK_CHAIN T0 = 0
290 /* main execution loop */
292 int cpu_exec(CPUState
*env1
)
294 #define DECLARE_HOST_REGS 1
295 #include "hostregs_helper.h"
296 #if defined(TARGET_SPARC)
297 #if defined(reg_REGWPTR)
298 uint32_t *saved_regwptr
;
301 int ret
, interrupt_request
;
302 long (*gen_func
)(void);
303 TranslationBlock
*tb
;
306 if (cpu_halted(env1
) == EXCP_HALTED
)
309 cpu_single_env
= env1
;
311 /* first we save global registers */
312 #define SAVE_HOST_REGS 1
313 #include "hostregs_helper.h"
318 #if defined(TARGET_I386)
319 /* put eflags in CPU temporary format */
320 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
321 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
322 CC_OP
= CC_OP_EFLAGS
;
323 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
324 #elif defined(TARGET_SPARC)
325 #if defined(reg_REGWPTR)
326 saved_regwptr
= REGWPTR
;
328 #elif defined(TARGET_M68K)
329 env
->cc_op
= CC_OP_FLAGS
;
330 env
->cc_dest
= env
->sr
& 0xf;
331 env
->cc_x
= (env
->sr
>> 4) & 1;
332 #elif defined(TARGET_ALPHA)
333 #elif defined(TARGET_ARM)
334 #elif defined(TARGET_PPC)
335 #elif defined(TARGET_MIPS)
336 #elif defined(TARGET_SH4)
337 #elif defined(TARGET_CRIS)
338 #elif defined(TARGET_IA64)
341 #error unsupported target CPU
343 env
->exception_index
= -1;
345 /* prepare setjmp context for exception handling */
347 if (setjmp(env
->jmp_env
) == 0) {
348 env
->current_tb
= NULL
;
349 /* if an exception is pending, we execute it here */
350 if (env
->exception_index
>= 0) {
351 if (env
->exception_index
>= EXCP_INTERRUPT
) {
352 /* exit request from the cpu execution loop */
353 ret
= env
->exception_index
;
355 } else if (env
->user_mode_only
) {
356 /* if user mode only, we simulate a fake exception
357 which will be handled outside the cpu execution
359 #if defined(TARGET_I386)
360 do_interrupt_user(env
->exception_index
,
361 env
->exception_is_int
,
363 env
->exception_next_eip
);
365 ret
= env
->exception_index
;
368 #if defined(TARGET_I386)
369 /* simulate a real cpu exception. On i386, it can
370 trigger new exceptions, but we do not handle
371 double or triple faults yet. */
372 do_interrupt(env
->exception_index
,
373 env
->exception_is_int
,
375 env
->exception_next_eip
, 0);
376 /* successfully delivered */
377 env
->old_exception
= -1;
378 #elif defined(TARGET_PPC)
380 #elif defined(TARGET_MIPS)
382 #elif defined(TARGET_SPARC)
383 do_interrupt(env
->exception_index
);
384 #elif defined(TARGET_ARM)
386 #elif defined(TARGET_SH4)
388 #elif defined(TARGET_ALPHA)
390 #elif defined(TARGET_CRIS)
392 #elif defined(TARGET_M68K)
394 #elif defined(TARGET_IA64)
398 env
->exception_index
= -1;
401 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
403 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
404 ret
= kqemu_cpu_exec(env
);
405 /* put eflags in CPU temporary format */
406 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
407 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
408 CC_OP
= CC_OP_EFLAGS
;
409 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
412 longjmp(env
->jmp_env
, 1);
413 } else if (ret
== 2) {
414 /* softmmu execution needed */
416 if (env
->interrupt_request
!= 0) {
417 /* hardware interrupt will be executed just after */
419 /* otherwise, we restart */
420 longjmp(env
->jmp_env
, 1);
428 longjmp(env
->jmp_env
, 1);
430 T0
= 0; /* force lookup of first TB */
433 interrupt_request
= env
->interrupt_request
;
434 if (__builtin_expect(interrupt_request
, 0)
435 #if defined(TARGET_I386)
436 && env
->hflags
& HF_GIF_MASK
439 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
440 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
441 env
->exception_index
= EXCP_DEBUG
;
444 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
445 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
446 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
447 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
449 env
->exception_index
= EXCP_HLT
;
453 #if defined(TARGET_I386)
454 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
455 !(env
->hflags
& HF_SMM_MASK
)) {
456 svm_check_intercept(SVM_EXIT_SMI
);
457 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
460 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
461 (env
->eflags
& IF_MASK
|| env
->hflags
& HF_HIF_MASK
) &&
462 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
464 svm_check_intercept(SVM_EXIT_INTR
);
465 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
466 intno
= cpu_get_pic_interrupt(env
);
467 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
468 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
470 do_interrupt(intno
, 0, 0, 0, 1);
471 /* ensure that no TB jump will be modified as
472 the program flow was changed */
474 #if !defined(CONFIG_USER_ONLY)
475 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
476 (env
->eflags
& IF_MASK
) && !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
478 /* FIXME: this should respect TPR */
479 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
480 svm_check_intercept(SVM_EXIT_VINTR
);
481 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
482 if (loglevel
& CPU_LOG_TB_IN_ASM
)
483 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
484 do_interrupt(intno
, 0, 0, -1, 1);
485 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
),
486 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
)) & ~V_IRQ_MASK
);
490 #elif defined(TARGET_PPC)
492 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
496 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
497 ppc_hw_interrupt(env
);
498 if (env
->pending_interrupts
== 0)
499 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
502 #elif defined(TARGET_MIPS)
503 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
504 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
505 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
506 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
507 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
508 !(env
->hflags
& MIPS_HFLAG_DM
)) {
510 env
->exception_index
= EXCP_EXT_INTERRUPT
;
515 #elif defined(TARGET_SPARC)
516 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
518 int pil
= env
->interrupt_index
& 15;
519 int type
= env
->interrupt_index
& 0xf0;
521 if (((type
== TT_EXTINT
) &&
522 (pil
== 15 || pil
> env
->psrpil
)) ||
524 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
525 do_interrupt(env
->interrupt_index
);
526 env
->interrupt_index
= 0;
527 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
532 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
533 //do_interrupt(0, 0, 0, 0, 0);
534 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
536 #elif defined(TARGET_ARM)
537 if (interrupt_request
& CPU_INTERRUPT_FIQ
538 && !(env
->uncached_cpsr
& CPSR_F
)) {
539 env
->exception_index
= EXCP_FIQ
;
543 /* ARMv7-M interrupt return works by loading a magic value
544 into the PC. On real hardware the load causes the
545 return to occur. The qemu implementation performs the
546 jump normally, then does the exception return when the
547 CPU tries to execute code at the magic address.
548 This will cause the magic PC value to be pushed to
549 the stack if an interrupt occured at the wrong time.
550 We avoid this by disabling interrupts when
551 pc contains a magic address. */
552 if (interrupt_request
& CPU_INTERRUPT_HARD
553 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
554 || !(env
->uncached_cpsr
& CPSR_I
))) {
555 env
->exception_index
= EXCP_IRQ
;
559 #elif defined(TARGET_SH4)
560 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
564 #elif defined(TARGET_ALPHA)
565 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
569 #elif defined(TARGET_CRIS)
570 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
572 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
575 #elif defined(TARGET_M68K)
576 if (interrupt_request
& CPU_INTERRUPT_HARD
577 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
578 < env
->pending_level
) {
579 /* Real hardware gets the interrupt vector via an
580 IACK cycle at this point. Current emulated
581 hardware doesn't rely on this, so we
582 provide/save the vector when the interrupt is
584 env
->exception_index
= env
->pending_vector
;
589 /* Don't use the cached interupt_request value,
590 do_interrupt may have updated the EXITTB flag. */
591 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
592 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
593 /* ensure that no TB jump will be modified as
594 the program flow was changed */
597 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
598 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
599 env
->exception_index
= EXCP_INTERRUPT
;
604 if ((loglevel
& CPU_LOG_TB_CPU
)) {
605 /* restore flags in standard format */
607 #if defined(TARGET_I386)
608 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
609 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
610 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
611 #elif defined(TARGET_ARM)
612 cpu_dump_state(env
, logfile
, fprintf
, 0);
613 #elif defined(TARGET_SPARC)
614 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
615 env
->regwptr
= REGWPTR
;
616 cpu_dump_state(env
, logfile
, fprintf
, 0);
617 #elif defined(TARGET_PPC)
618 cpu_dump_state(env
, logfile
, fprintf
, 0);
619 #elif defined(TARGET_M68K)
620 cpu_m68k_flush_flags(env
, env
->cc_op
);
621 env
->cc_op
= CC_OP_FLAGS
;
622 env
->sr
= (env
->sr
& 0xffe0)
623 | env
->cc_dest
| (env
->cc_x
<< 4);
624 cpu_dump_state(env
, logfile
, fprintf
, 0);
625 #elif defined(TARGET_MIPS)
626 cpu_dump_state(env
, logfile
, fprintf
, 0);
627 #elif defined(TARGET_SH4)
628 cpu_dump_state(env
, logfile
, fprintf
, 0);
629 #elif defined(TARGET_ALPHA)
630 cpu_dump_state(env
, logfile
, fprintf
, 0);
631 #elif defined(TARGET_CRIS)
632 cpu_dump_state(env
, logfile
, fprintf
, 0);
634 #error unsupported target CPU
640 if ((loglevel
& CPU_LOG_EXEC
)) {
641 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
642 (long)tb
->tc_ptr
, tb
->pc
,
643 lookup_symbol(tb
->pc
));
647 /* see if we can patch the calling TB. When the TB
648 spans two pages, we cannot safely do a direct
653 (env
->kqemu_enabled
!= 2) &&
655 tb
->page_addr
[1] == -1) {
657 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
658 spin_unlock(&tb_lock
);
662 env
->current_tb
= tb
;
663 /* execute the generated code */
664 gen_func
= (void *)tc_ptr
;
665 #if defined(__sparc__)
666 __asm__
__volatile__("call %0\n\t"
670 : "i0", "i1", "i2", "i3", "i4", "i5",
671 "o0", "o1", "o2", "o3", "o4", "o5",
672 "l0", "l1", "l2", "l3", "l4", "l5",
674 #elif defined(__arm__)
675 asm volatile ("mov pc, %0\n\t"
676 ".global exec_loop\n\t"
680 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
681 #elif defined(__ia64)
688 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
689 (*(void (*)(void)) &fp
)();
693 env
->current_tb
= NULL
;
694 /* reset soft MMU for next block (it can currently
695 only be set by a memory fault) */
696 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
697 if (env
->hflags
& HF_SOFTMMU_MASK
) {
698 env
->hflags
&= ~HF_SOFTMMU_MASK
;
699 /* do not allow linking to another block */
703 #if defined(USE_KQEMU)
704 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
705 if (kqemu_is_ok(env
) &&
706 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
717 #if defined(TARGET_I386)
718 /* restore flags in standard format */
719 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
720 #elif defined(TARGET_ARM)
721 /* XXX: Save/restore host fpu exception state?. */
722 #elif defined(TARGET_SPARC)
723 #if defined(reg_REGWPTR)
724 REGWPTR
= saved_regwptr
;
726 #elif defined(TARGET_PPC)
727 #elif defined(TARGET_M68K)
728 cpu_m68k_flush_flags(env
, env
->cc_op
);
729 env
->cc_op
= CC_OP_FLAGS
;
730 env
->sr
= (env
->sr
& 0xffe0)
731 | env
->cc_dest
| (env
->cc_x
<< 4);
732 #elif defined(TARGET_MIPS)
733 #elif defined(TARGET_SH4)
734 #elif defined(TARGET_IA64)
735 #elif defined(TARGET_ALPHA)
736 #elif defined(TARGET_CRIS)
739 #error unsupported target CPU
742 /* restore global registers */
744 #include "hostregs_helper.h"
746 /* fail safe : never use cpu_single_env outside cpu_exec() */
747 cpu_single_env
= NULL
;
751 /* must only be called from the generated code as an exception can be
753 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
755 /* XXX: cannot enable it yet because it yields to MMU exception
756 where NIP != read address on PowerPC */
758 target_ulong phys_addr
;
759 phys_addr
= get_phys_addr_code(env
, start
);
760 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
764 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
766 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
768 CPUX86State
*saved_env
;
772 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
774 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
775 (selector
<< 4), 0xffff, 0);
777 load_seg(seg_reg
, selector
);
782 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
784 CPUX86State
*saved_env
;
789 helper_fsave(ptr
, data32
);
794 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
796 CPUX86State
*saved_env
;
801 helper_frstor(ptr
, data32
);
806 #endif /* TARGET_I386 */
808 #if !defined(CONFIG_SOFTMMU)
810 #if defined(TARGET_I386)
812 /* 'pc' is the host PC at which the exception was raised. 'address' is
813 the effective address of the memory exception. 'is_write' is 1 if a
814 write caused the exception and otherwise 0'. 'old_set' is the
815 signal set which should be restored */
816 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
817 int is_write
, sigset_t
*old_set
,
820 TranslationBlock
*tb
;
824 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
825 #if defined(DEBUG_SIGNAL)
826 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
827 pc
, address
, is_write
, *(unsigned long *)old_set
);
829 /* XXX: locking issue */
830 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
834 /* see if it is an MMU fault */
835 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
837 return 0; /* not an MMU fault */
839 return 1; /* the MMU fault was handled without causing real CPU fault */
840 /* now we have a real cpu fault */
843 /* the PC is inside the translated code. It means that we have
844 a virtual CPU fault */
845 cpu_restore_state(tb
, env
, pc
, puc
);
849 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
850 env
->eip
, env
->cr
[2], env
->error_code
);
852 /* we restore the process signal mask as the sigreturn should
853 do it (XXX: use sigsetjmp) */
854 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
855 raise_exception_err(env
->exception_index
, env
->error_code
);
857 /* activate soft MMU for this block */
858 env
->hflags
|= HF_SOFTMMU_MASK
;
859 cpu_resume_from_signal(env
, puc
);
861 /* never comes here */
865 #elif defined(TARGET_ARM)
866 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
867 int is_write
, sigset_t
*old_set
,
870 TranslationBlock
*tb
;
874 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
875 #if defined(DEBUG_SIGNAL)
876 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
877 pc
, address
, is_write
, *(unsigned long *)old_set
);
879 /* XXX: locking issue */
880 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
883 /* see if it is an MMU fault */
884 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
886 return 0; /* not an MMU fault */
888 return 1; /* the MMU fault was handled without causing real CPU fault */
889 /* now we have a real cpu fault */
892 /* the PC is inside the translated code. It means that we have
893 a virtual CPU fault */
894 cpu_restore_state(tb
, env
, pc
, puc
);
896 /* we restore the process signal mask as the sigreturn should
897 do it (XXX: use sigsetjmp) */
898 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
901 #elif defined(TARGET_SPARC)
902 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
903 int is_write
, sigset_t
*old_set
,
906 TranslationBlock
*tb
;
910 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
911 #if defined(DEBUG_SIGNAL)
912 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
913 pc
, address
, is_write
, *(unsigned long *)old_set
);
915 /* XXX: locking issue */
916 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
919 /* see if it is an MMU fault */
920 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
922 return 0; /* not an MMU fault */
924 return 1; /* the MMU fault was handled without causing real CPU fault */
925 /* now we have a real cpu fault */
928 /* the PC is inside the translated code. It means that we have
929 a virtual CPU fault */
930 cpu_restore_state(tb
, env
, pc
, puc
);
932 /* we restore the process signal mask as the sigreturn should
933 do it (XXX: use sigsetjmp) */
934 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
937 #elif defined (TARGET_PPC)
938 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
939 int is_write
, sigset_t
*old_set
,
942 TranslationBlock
*tb
;
946 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
947 #if defined(DEBUG_SIGNAL)
948 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
949 pc
, address
, is_write
, *(unsigned long *)old_set
);
951 /* XXX: locking issue */
952 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
956 /* see if it is an MMU fault */
957 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
959 return 0; /* not an MMU fault */
961 return 1; /* the MMU fault was handled without causing real CPU fault */
963 /* now we have a real cpu fault */
966 /* the PC is inside the translated code. It means that we have
967 a virtual CPU fault */
968 cpu_restore_state(tb
, env
, pc
, puc
);
972 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
973 env
->nip
, env
->error_code
, tb
);
975 /* we restore the process signal mask as the sigreturn should
976 do it (XXX: use sigsetjmp) */
977 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
978 do_raise_exception_err(env
->exception_index
, env
->error_code
);
980 /* activate soft MMU for this block */
981 cpu_resume_from_signal(env
, puc
);
983 /* never comes here */
987 #elif defined(TARGET_M68K)
988 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
989 int is_write
, sigset_t
*old_set
,
992 TranslationBlock
*tb
;
996 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
997 #if defined(DEBUG_SIGNAL)
998 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
999 pc
, address
, is_write
, *(unsigned long *)old_set
);
1001 /* XXX: locking issue */
1002 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
1005 /* see if it is an MMU fault */
1006 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1008 return 0; /* not an MMU fault */
1010 return 1; /* the MMU fault was handled without causing real CPU fault */
1011 /* now we have a real cpu fault */
1012 tb
= tb_find_pc(pc
);
1014 /* the PC is inside the translated code. It means that we have
1015 a virtual CPU fault */
1016 cpu_restore_state(tb
, env
, pc
, puc
);
1018 /* we restore the process signal mask as the sigreturn should
1019 do it (XXX: use sigsetjmp) */
1020 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1022 /* never comes here */
1026 #elif defined (TARGET_MIPS)
1027 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1028 int is_write
, sigset_t
*old_set
,
1031 TranslationBlock
*tb
;
1035 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1036 #if defined(DEBUG_SIGNAL)
1037 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1038 pc
, address
, is_write
, *(unsigned long *)old_set
);
1040 /* XXX: locking issue */
1041 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1045 /* see if it is an MMU fault */
1046 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1048 return 0; /* not an MMU fault */
1050 return 1; /* the MMU fault was handled without causing real CPU fault */
1052 /* now we have a real cpu fault */
1053 tb
= tb_find_pc(pc
);
1055 /* the PC is inside the translated code. It means that we have
1056 a virtual CPU fault */
1057 cpu_restore_state(tb
, env
, pc
, puc
);
1061 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1062 env
->PC
, env
->error_code
, tb
);
1064 /* we restore the process signal mask as the sigreturn should
1065 do it (XXX: use sigsetjmp) */
1066 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1067 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1069 /* activate soft MMU for this block */
1070 cpu_resume_from_signal(env
, puc
);
1072 /* never comes here */
1076 #elif defined (TARGET_SH4)
1077 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1078 int is_write
, sigset_t
*old_set
,
1081 TranslationBlock
*tb
;
1085 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1086 #if defined(DEBUG_SIGNAL)
1087 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1088 pc
, address
, is_write
, *(unsigned long *)old_set
);
1090 /* XXX: locking issue */
1091 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1095 /* see if it is an MMU fault */
1096 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1098 return 0; /* not an MMU fault */
1100 return 1; /* the MMU fault was handled without causing real CPU fault */
1102 /* now we have a real cpu fault */
1103 tb
= tb_find_pc(pc
);
1105 /* the PC is inside the translated code. It means that we have
1106 a virtual CPU fault */
1107 cpu_restore_state(tb
, env
, pc
, puc
);
1110 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1111 env
->nip
, env
->error_code
, tb
);
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
1115 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1117 /* never comes here */
1121 #elif defined (TARGET_ALPHA)
1122 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1123 int is_write
, sigset_t
*old_set
,
1126 TranslationBlock
*tb
;
1130 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1131 #if defined(DEBUG_SIGNAL)
1132 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1133 pc
, address
, is_write
, *(unsigned long *)old_set
);
1135 /* XXX: locking issue */
1136 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1140 /* see if it is an MMU fault */
1141 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1143 return 0; /* not an MMU fault */
1145 return 1; /* the MMU fault was handled without causing real CPU fault */
1147 /* now we have a real cpu fault */
1148 tb
= tb_find_pc(pc
);
1150 /* the PC is inside the translated code. It means that we have
1151 a virtual CPU fault */
1152 cpu_restore_state(tb
, env
, pc
, puc
);
1155 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1156 env
->nip
, env
->error_code
, tb
);
1158 /* we restore the process signal mask as the sigreturn should
1159 do it (XXX: use sigsetjmp) */
1160 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1162 /* never comes here */
1165 #elif defined (TARGET_CRIS)
1166 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1167 int is_write
, sigset_t
*old_set
,
1170 TranslationBlock
*tb
;
1174 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1175 #if defined(DEBUG_SIGNAL)
1176 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1177 pc
, address
, is_write
, *(unsigned long *)old_set
);
1179 /* XXX: locking issue */
1180 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1184 /* see if it is an MMU fault */
1185 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1187 return 0; /* not an MMU fault */
1189 return 1; /* the MMU fault was handled without causing real CPU fault */
1191 /* now we have a real cpu fault */
1192 tb
= tb_find_pc(pc
);
1194 /* the PC is inside the translated code. It means that we have
1195 a virtual CPU fault */
1196 cpu_restore_state(tb
, env
, pc
, puc
);
1199 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1200 env
->nip
, env
->error_code
, tb
);
1202 /* we restore the process signal mask as the sigreturn should
1203 do it (XXX: use sigsetjmp) */
1204 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1206 /* never comes here */
1211 #error unsupported target CPU
1214 #if defined(__i386__)
1216 #if defined(__APPLE__)
1217 # include <sys/ucontext.h>
1219 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1220 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1221 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1223 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1224 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1225 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1228 int cpu_signal_handler(int host_signum
, void *pinfo
,
1231 siginfo_t
*info
= pinfo
;
1232 struct ucontext
*uc
= puc
;
1240 #define REG_TRAPNO TRAPNO
1243 trapno
= TRAP_sig(uc
);
1244 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1246 (ERROR_sig(uc
) >> 1) & 1 : 0,
1247 &uc
->uc_sigmask
, puc
);
1250 #elif defined(__x86_64__)
1252 int cpu_signal_handler(int host_signum
, void *pinfo
,
1255 siginfo_t
*info
= pinfo
;
1256 struct ucontext
*uc
= puc
;
1259 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1260 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1261 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1262 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1263 &uc
->uc_sigmask
, puc
);
1266 #elif defined(__powerpc__)
1268 /***********************************************************************
1269 * signal context platform-specific definitions
1273 /* All Registers access - only for local access */
1274 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1275 /* Gpr Registers access */
1276 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1277 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1278 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1279 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1280 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1281 # define LR_sig(context) REG_sig(link, context) /* Link register */
1282 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1283 /* Float Registers access */
1284 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1285 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1286 /* Exception Registers access */
1287 # define DAR_sig(context) REG_sig(dar, context)
1288 # define DSISR_sig(context) REG_sig(dsisr, context)
1289 # define TRAP_sig(context) REG_sig(trap, context)
1293 # include <sys/ucontext.h>
1294 typedef struct ucontext SIGCONTEXT
;
1295 /* All Registers access - only for local access */
1296 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1297 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1298 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1299 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1300 /* Gpr Registers access */
1301 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1302 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1303 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1304 # define CTR_sig(context) REG_sig(ctr, context)
1305 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1306 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1307 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1308 /* Float Registers access */
1309 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1310 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1311 /* Exception Registers access */
1312 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1313 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1314 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1315 #endif /* __APPLE__ */
1317 int cpu_signal_handler(int host_signum
, void *pinfo
,
1320 siginfo_t
*info
= pinfo
;
1321 struct ucontext
*uc
= puc
;
1329 if (DSISR_sig(uc
) & 0x00800000)
1332 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1335 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1336 is_write
, &uc
->uc_sigmask
, puc
);
1339 #elif defined(__alpha__)
1341 int cpu_signal_handler(int host_signum
, void *pinfo
,
1344 siginfo_t
*info
= pinfo
;
1345 struct ucontext
*uc
= puc
;
1346 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1347 uint32_t insn
= *pc
;
1350 /* XXX: need kernel patch to get write flag faster */
1351 switch (insn
>> 26) {
1366 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1367 is_write
, &uc
->uc_sigmask
, puc
);
1369 #elif defined(__sparc__)
1371 int cpu_signal_handler(int host_signum
, void *pinfo
,
1374 siginfo_t
*info
= pinfo
;
1375 uint32_t *regs
= (uint32_t *)(info
+ 1);
1376 void *sigmask
= (regs
+ 20);
1381 /* XXX: is there a standard glibc define ? */
1383 /* XXX: need kernel patch to get write flag faster */
1385 insn
= *(uint32_t *)pc
;
1386 if ((insn
>> 30) == 3) {
1387 switch((insn
>> 19) & 0x3f) {
1399 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1400 is_write
, sigmask
, NULL
);
1403 #elif defined(__arm__)
1405 int cpu_signal_handler(int host_signum
, void *pinfo
,
1408 siginfo_t
*info
= pinfo
;
1409 struct ucontext
*uc
= puc
;
1413 pc
= uc
->uc_mcontext
.gregs
[R15
];
1414 /* XXX: compute is_write */
1416 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1418 &uc
->uc_sigmask
, puc
);
1421 #elif defined(__mc68000)
1423 int cpu_signal_handler(int host_signum
, void *pinfo
,
1426 siginfo_t
*info
= pinfo
;
1427 struct ucontext
*uc
= puc
;
1431 pc
= uc
->uc_mcontext
.gregs
[16];
1432 /* XXX: compute is_write */
1434 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1436 &uc
->uc_sigmask
, puc
);
1439 #elif defined(__ia64)
1442 /* This ought to be in <bits/siginfo.h>... */
1443 # define __ISR_VALID 1
1446 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1448 siginfo_t
*info
= pinfo
;
1449 struct ucontext
*uc
= puc
;
1453 ip
= uc
->uc_mcontext
.sc_ip
;
1454 switch (host_signum
) {
1460 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1461 /* ISR.W (write-access) is bit 33: */
1462 is_write
= (info
->si_isr
>> 33) & 1;
1468 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1470 &uc
->uc_sigmask
, puc
);
1473 #elif defined(__s390__)
1475 int cpu_signal_handler(int host_signum
, void *pinfo
,
1478 siginfo_t
*info
= pinfo
;
1479 struct ucontext
*uc
= puc
;
1483 pc
= uc
->uc_mcontext
.psw
.addr
;
1484 /* XXX: compute is_write */
1486 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1487 is_write
, &uc
->uc_sigmask
, puc
);
1490 #elif defined(__mips__)
1492 int cpu_signal_handler(int host_signum
, void *pinfo
,
1495 siginfo_t
*info
= pinfo
;
1496 struct ucontext
*uc
= puc
;
1497 greg_t pc
= uc
->uc_mcontext
.pc
;
1500 /* XXX: compute is_write */
1502 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1503 is_write
, &uc
->uc_sigmask
, puc
);
1508 #error host CPU specific signal handler needed
1512 #endif /* !defined(CONFIG_SOFTMMU) */