2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
26 #if !defined(CONFIG_SOFTMMU)
37 #include <sys/ucontext.h>
40 int tb_invalidated_flag
;
41 static unsigned long next_tb
;
44 //#define DEBUG_SIGNAL
46 #define SAVE_GLOBALS()
47 #define RESTORE_GLOBALS()
49 #if defined(__sparc__) && !defined(HOST_SOLARIS)
51 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
52 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
53 // Work around ugly bugs in glibc that mangle global register contents
55 static volatile void *saved_env
;
56 static volatile unsigned long saved_t0
, saved_i7
;
58 #define SAVE_GLOBALS() do { \
61 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
64 #undef RESTORE_GLOBALS
65 #define RESTORE_GLOBALS() do { \
66 env = (void *)saved_env; \
68 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
71 static int sparc_setjmp(jmp_buf buf
)
81 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
83 static void sparc_longjmp(jmp_buf buf
, int val
)
88 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
92 void cpu_loop_exit(void)
94 /* NOTE: the register at this point must be saved by hand because
95 longjmp restore them */
97 longjmp(env
->jmp_env
, 1);
100 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
104 /* exit the current TB from a signal handler. The host registers are
105 restored in a state compatible with the CPU emulator
107 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
109 #if !defined(CONFIG_SOFTMMU)
110 struct ucontext
*uc
= puc
;
115 /* XXX: restore cpu registers saved in host registers */
117 #if !defined(CONFIG_SOFTMMU)
119 /* XXX: use siglongjmp ? */
120 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
123 longjmp(env
->jmp_env
, 1);
126 static TranslationBlock
*tb_find_slow(target_ulong pc
,
127 target_ulong cs_base
,
130 TranslationBlock
*tb
, **ptb1
;
133 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
138 tb_invalidated_flag
= 0;
140 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
142 /* find translated block using physical mappings */
143 phys_pc
= get_phys_addr_code(env
, pc
);
144 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
146 h
= tb_phys_hash_func(phys_pc
);
147 ptb1
= &tb_phys_hash
[h
];
153 tb
->page_addr
[0] == phys_page1
&&
154 tb
->cs_base
== cs_base
&&
155 tb
->flags
== flags
) {
156 /* check next page if needed */
157 if (tb
->page_addr
[1] != -1) {
158 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
160 phys_page2
= get_phys_addr_code(env
, virt_page2
);
161 if (tb
->page_addr
[1] == phys_page2
)
167 ptb1
= &tb
->phys_hash_next
;
170 /* if no translated code available, then translate it now */
173 /* flush must be done */
175 /* cannot fail at this point */
177 /* don't forget to invalidate previous TB info */
178 tb_invalidated_flag
= 1;
180 tc_ptr
= code_gen_ptr
;
182 tb
->cs_base
= cs_base
;
185 cpu_gen_code(env
, tb
, &code_gen_size
);
187 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
189 /* check next page if needed */
190 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
192 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
193 phys_page2
= get_phys_addr_code(env
, virt_page2
);
195 tb_link_phys(tb
, phys_pc
, phys_page2
);
198 /* we add the TB in the virtual pc hash table */
199 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
200 spin_unlock(&tb_lock
);
204 static inline TranslationBlock
*tb_find_fast(void)
206 TranslationBlock
*tb
;
207 target_ulong cs_base
, pc
;
210 /* we record a subset of the CPU state. It will
211 always be the same before a given translated block
213 #if defined(TARGET_I386)
215 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
216 flags
|= env
->intercept
;
217 cs_base
= env
->segs
[R_CS
].base
;
218 pc
= cs_base
+ env
->eip
;
219 #elif defined(TARGET_ARM)
220 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
221 | (env
->vfp
.vec_stride
<< 4);
222 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
224 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
226 flags
|= (env
->condexec_bits
<< 8);
229 #elif defined(TARGET_SPARC)
230 #ifdef TARGET_SPARC64
231 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
232 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
233 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
235 // FPU enable . Supervisor
236 flags
= (env
->psref
<< 4) | env
->psrs
;
240 #elif defined(TARGET_PPC)
244 #elif defined(TARGET_MIPS)
245 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
247 pc
= env
->PC
[env
->current_tc
];
248 #elif defined(TARGET_M68K)
249 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
250 | (env
->sr
& SR_S
) /* Bit 13 */
251 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
254 #elif defined(TARGET_SH4)
258 #elif defined(TARGET_ALPHA)
262 #elif defined(TARGET_CRIS)
263 flags
= env
->pregs
[PR_CCS
] & U_FLAG
;
266 #elif defined(TARGET_HPPA)
267 flags
= env
->psw
& PSW_N
; /* XXX: use more bits? */
268 cs_base
= env
->iaoq
[1];
271 #error unsupported CPU
273 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
274 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
275 tb
->flags
!= flags
, 0)) {
276 tb
= tb_find_slow(pc
, cs_base
, flags
);
277 /* Note: we do it here to avoid a gcc bug on Mac OS X when
278 doing it in tb_find_slow */
279 if (tb_invalidated_flag
) {
280 /* as some TB could have been invalidated because
281 of memory exceptions while generating the code, we
282 must recompute the hash index here */
289 /* main execution loop */
291 int cpu_exec(CPUState
*env1
)
293 #define DECLARE_HOST_REGS 1
294 #include "hostregs_helper.h"
295 #if defined(TARGET_SPARC)
296 #if defined(reg_REGWPTR)
297 uint32_t *saved_regwptr
;
300 int ret
, interrupt_request
;
301 TranslationBlock
*tb
;
304 if (cpu_halted(env1
) == EXCP_HALTED
)
307 cpu_single_env
= env1
;
309 /* first we save global registers */
310 #define SAVE_HOST_REGS 1
311 #include "hostregs_helper.h"
316 #if defined(TARGET_I386)
317 /* put eflags in CPU temporary format */
318 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
319 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
320 CC_OP
= CC_OP_EFLAGS
;
321 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
322 #elif defined(TARGET_SPARC)
323 #if defined(reg_REGWPTR)
324 saved_regwptr
= REGWPTR
;
326 #elif defined(TARGET_M68K)
327 env
->cc_op
= CC_OP_FLAGS
;
328 env
->cc_dest
= env
->sr
& 0xf;
329 env
->cc_x
= (env
->sr
>> 4) & 1;
330 #elif defined(TARGET_ALPHA)
331 #elif defined(TARGET_ARM)
332 #elif defined(TARGET_PPC)
333 #elif defined(TARGET_HPPA)
334 #elif defined(TARGET_MIPS)
335 #elif defined(TARGET_SH4)
336 #elif defined(TARGET_CRIS)
339 #error unsupported target CPU
341 env
->exception_index
= -1;
343 /* prepare setjmp context for exception handling */
345 if (setjmp(env
->jmp_env
) == 0) {
346 env
->current_tb
= NULL
;
347 /* if an exception is pending, we execute it here */
348 if (env
->exception_index
>= 0) {
349 if (env
->exception_index
>= EXCP_INTERRUPT
) {
350 /* exit request from the cpu execution loop */
351 ret
= env
->exception_index
;
353 } else if (env
->user_mode_only
) {
354 /* if user mode only, we simulate a fake exception
355 which will be handled outside the cpu execution
357 #if defined(TARGET_I386)
358 do_interrupt_user(env
->exception_index
,
359 env
->exception_is_int
,
361 env
->exception_next_eip
);
363 ret
= env
->exception_index
;
366 #if defined(TARGET_I386)
367 /* simulate a real cpu exception. On i386, it can
368 trigger new exceptions, but we do not handle
369 double or triple faults yet. */
370 do_interrupt(env
->exception_index
,
371 env
->exception_is_int
,
373 env
->exception_next_eip
, 0);
374 /* successfully delivered */
375 env
->old_exception
= -1;
376 #elif defined(TARGET_PPC)
378 #elif defined(TARGET_MIPS)
380 #elif defined(TARGET_SPARC)
381 do_interrupt(env
->exception_index
);
382 #elif defined(TARGET_ARM)
384 #elif defined(TARGET_SH4)
386 #elif defined(TARGET_ALPHA)
388 #elif defined(TARGET_CRIS)
390 #elif defined(TARGET_M68K)
392 #elif defined(TARGET_HPPA)
396 env
->exception_index
= -1;
399 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
401 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
402 ret
= kqemu_cpu_exec(env
);
403 /* put eflags in CPU temporary format */
404 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
405 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
406 CC_OP
= CC_OP_EFLAGS
;
407 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
410 longjmp(env
->jmp_env
, 1);
411 } else if (ret
== 2) {
412 /* softmmu execution needed */
414 if (env
->interrupt_request
!= 0) {
415 /* hardware interrupt will be executed just after */
417 /* otherwise, we restart */
418 longjmp(env
->jmp_env
, 1);
424 next_tb
= 0; /* force lookup of first TB */
427 interrupt_request
= env
->interrupt_request
;
428 if (__builtin_expect(interrupt_request
, 0)
429 #if defined(TARGET_I386)
430 && env
->hflags
& HF_GIF_MASK
432 && !(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
433 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
434 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
435 env
->exception_index
= EXCP_DEBUG
;
438 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
439 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
440 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
441 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
443 env
->exception_index
= EXCP_HLT
;
447 #if defined(TARGET_I386)
448 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
449 !(env
->hflags
& HF_SMM_MASK
)) {
450 svm_check_intercept(SVM_EXIT_SMI
);
451 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
454 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
455 !(env
->hflags
& HF_NMI_MASK
)) {
456 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
457 env
->hflags
|= HF_NMI_MASK
;
458 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
460 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
461 (env
->eflags
& IF_MASK
|| env
->hflags
& HF_HIF_MASK
) &&
462 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
464 svm_check_intercept(SVM_EXIT_INTR
);
465 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
466 intno
= cpu_get_pic_interrupt(env
);
467 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
468 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
470 do_interrupt(intno
, 0, 0, 0, 1);
471 /* ensure that no TB jump will be modified as
472 the program flow was changed */
474 #if !defined(CONFIG_USER_ONLY)
475 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
476 (env
->eflags
& IF_MASK
) && !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
478 /* FIXME: this should respect TPR */
479 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
480 svm_check_intercept(SVM_EXIT_VINTR
);
481 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
482 if (loglevel
& CPU_LOG_TB_IN_ASM
)
483 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
484 do_interrupt(intno
, 0, 0, -1, 1);
485 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
),
486 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
)) & ~V_IRQ_MASK
);
490 #elif defined(TARGET_PPC)
492 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
496 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
497 ppc_hw_interrupt(env
);
498 if (env
->pending_interrupts
== 0)
499 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
502 #elif defined(TARGET_MIPS)
503 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
504 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
505 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
506 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
507 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
508 !(env
->hflags
& MIPS_HFLAG_DM
)) {
510 env
->exception_index
= EXCP_EXT_INTERRUPT
;
515 #elif defined(TARGET_SPARC)
516 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
518 int pil
= env
->interrupt_index
& 15;
519 int type
= env
->interrupt_index
& 0xf0;
521 if (((type
== TT_EXTINT
) &&
522 (pil
== 15 || pil
> env
->psrpil
)) ||
524 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
525 do_interrupt(env
->interrupt_index
);
526 env
->interrupt_index
= 0;
527 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
532 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
533 //do_interrupt(0, 0, 0, 0, 0);
534 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
536 #elif defined(TARGET_ARM)
537 if (interrupt_request
& CPU_INTERRUPT_FIQ
538 && !(env
->uncached_cpsr
& CPSR_F
)) {
539 env
->exception_index
= EXCP_FIQ
;
543 /* ARMv7-M interrupt return works by loading a magic value
544 into the PC. On real hardware the load causes the
545 return to occur. The qemu implementation performs the
546 jump normally, then does the exception return when the
547 CPU tries to execute code at the magic address.
548 This will cause the magic PC value to be pushed to
549 the stack if an interrupt occured at the wrong time.
550 We avoid this by disabling interrupts when
551 pc contains a magic address. */
552 if (interrupt_request
& CPU_INTERRUPT_HARD
553 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
554 || !(env
->uncached_cpsr
& CPSR_I
))) {
555 env
->exception_index
= EXCP_IRQ
;
559 #elif defined(TARGET_SH4)
560 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
564 #elif defined(TARGET_ALPHA)
565 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
569 #elif defined(TARGET_CRIS)
570 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
574 #elif defined(TARGET_M68K)
575 if (interrupt_request
& CPU_INTERRUPT_HARD
576 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
577 < env
->pending_level
) {
578 /* Real hardware gets the interrupt vector via an
579 IACK cycle at this point. Current emulated
580 hardware doesn't rely on this, so we
581 provide/save the vector when the interrupt is
583 env
->exception_index
= env
->pending_vector
;
587 #elif defined(TARGET_HPPA)
588 if (interrupt_request
& CPU_INTERRUPT_HARD
589 && !(env
->psw
& PSW_I
)) {
590 env
->exception_index
= EXCP_EXTINT
;
594 /* Don't use the cached interupt_request value,
595 do_interrupt may have updated the EXITTB flag. */
596 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
597 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
598 /* ensure that no TB jump will be modified as
599 the program flow was changed */
602 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
603 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
604 env
->exception_index
= EXCP_INTERRUPT
;
609 if ((loglevel
& CPU_LOG_TB_CPU
)) {
610 /* restore flags in standard format */
612 #if defined(TARGET_I386)
613 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
614 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
615 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
616 #elif defined(TARGET_ARM)
617 cpu_dump_state(env
, logfile
, fprintf
, 0);
618 #elif defined(TARGET_SPARC)
619 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
620 env
->regwptr
= REGWPTR
;
621 cpu_dump_state(env
, logfile
, fprintf
, 0);
622 #elif defined(TARGET_PPC)
623 cpu_dump_state(env
, logfile
, fprintf
, 0);
624 #elif defined(TARGET_M68K)
625 cpu_m68k_flush_flags(env
, env
->cc_op
);
626 env
->cc_op
= CC_OP_FLAGS
;
627 env
->sr
= (env
->sr
& 0xffe0)
628 | env
->cc_dest
| (env
->cc_x
<< 4);
629 cpu_dump_state(env
, logfile
, fprintf
, 0);
630 #elif defined(TARGET_MIPS)
631 cpu_dump_state(env
, logfile
, fprintf
, 0);
632 #elif defined(TARGET_SH4)
633 cpu_dump_state(env
, logfile
, fprintf
, 0);
634 #elif defined(TARGET_ALPHA)
635 cpu_dump_state(env
, logfile
, fprintf
, 0);
636 #elif defined(TARGET_CRIS)
637 cpu_dump_state(env
, logfile
, fprintf
, 0);
638 #elif defined(TARGET_HPPA)
639 cpu_dump_state(env
, logfile
, fprintf
, 0);
641 #error unsupported target CPU
647 if ((loglevel
& CPU_LOG_EXEC
)) {
648 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
649 (long)tb
->tc_ptr
, tb
->pc
,
650 lookup_symbol(tb
->pc
));
654 /* see if we can patch the calling TB. When the TB
655 spans two pages, we cannot safely do a direct
660 (env
->kqemu_enabled
!= 2) &&
662 tb
->page_addr
[1] == -1) {
664 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
665 spin_unlock(&tb_lock
);
669 env
->current_tb
= tb
;
670 /* execute the generated code */
671 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
672 env
->current_tb
= NULL
;
673 /* reset soft MMU for next block (it can currently
674 only be set by a memory fault) */
675 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
676 if (env
->hflags
& HF_SOFTMMU_MASK
) {
677 env
->hflags
&= ~HF_SOFTMMU_MASK
;
678 /* do not allow linking to another block */
682 #if defined(USE_KQEMU)
683 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
684 if (kqemu_is_ok(env
) &&
685 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
696 #if defined(TARGET_I386)
697 /* restore flags in standard format */
698 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
699 #elif defined(TARGET_ARM)
700 /* XXX: Save/restore host fpu exception state?. */
701 #elif defined(TARGET_SPARC)
702 #if defined(reg_REGWPTR)
703 REGWPTR
= saved_regwptr
;
705 #elif defined(TARGET_PPC)
706 #elif defined(TARGET_M68K)
707 cpu_m68k_flush_flags(env
, env
->cc_op
);
708 env
->cc_op
= CC_OP_FLAGS
;
709 env
->sr
= (env
->sr
& 0xffe0)
710 | env
->cc_dest
| (env
->cc_x
<< 4);
711 #elif defined(TARGET_MIPS)
712 #elif defined(TARGET_SH4)
713 #elif defined(TARGET_ALPHA)
714 #elif defined(TARGET_CRIS)
715 #elif defined(TARGET_HPPA)
718 #error unsupported target CPU
721 /* restore global registers */
723 #include "hostregs_helper.h"
725 /* fail safe : never use cpu_single_env outside cpu_exec() */
726 cpu_single_env
= NULL
;
730 /* must only be called from the generated code as an exception can be
732 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
734 /* XXX: cannot enable it yet because it yields to MMU exception
735 where NIP != read address on PowerPC */
737 target_ulong phys_addr
;
738 phys_addr
= get_phys_addr_code(env
, start
);
739 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
743 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
745 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
747 CPUX86State
*saved_env
;
751 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
753 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
754 (selector
<< 4), 0xffff, 0);
756 load_seg(seg_reg
, selector
);
761 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
763 CPUX86State
*saved_env
;
768 helper_fsave(ptr
, data32
);
773 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
775 CPUX86State
*saved_env
;
780 helper_frstor(ptr
, data32
);
785 #endif /* TARGET_I386 */
787 #if !defined(CONFIG_SOFTMMU)
789 #if defined(TARGET_I386)
791 /* 'pc' is the host PC at which the exception was raised. 'address' is
792 the effective address of the memory exception. 'is_write' is 1 if a
793 write caused the exception and otherwise 0'. 'old_set' is the
794 signal set which should be restored */
795 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
796 int is_write
, sigset_t
*old_set
,
799 TranslationBlock
*tb
;
803 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
804 #if defined(DEBUG_SIGNAL)
805 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
806 pc
, address
, is_write
, *(unsigned long *)old_set
);
808 /* XXX: locking issue */
809 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
813 /* see if it is an MMU fault */
814 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
816 return 0; /* not an MMU fault */
818 return 1; /* the MMU fault was handled without causing real CPU fault */
819 /* now we have a real cpu fault */
822 /* the PC is inside the translated code. It means that we have
823 a virtual CPU fault */
824 cpu_restore_state(tb
, env
, pc
, puc
);
828 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
829 env
->eip
, env
->cr
[2], env
->error_code
);
831 /* we restore the process signal mask as the sigreturn should
832 do it (XXX: use sigsetjmp) */
833 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
834 raise_exception_err(env
->exception_index
, env
->error_code
);
836 /* activate soft MMU for this block */
837 env
->hflags
|= HF_SOFTMMU_MASK
;
838 cpu_resume_from_signal(env
, puc
);
840 /* never comes here */
844 #elif defined(TARGET_ARM)
845 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
846 int is_write
, sigset_t
*old_set
,
849 TranslationBlock
*tb
;
853 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
854 #if defined(DEBUG_SIGNAL)
855 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
856 pc
, address
, is_write
, *(unsigned long *)old_set
);
858 /* XXX: locking issue */
859 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
862 /* see if it is an MMU fault */
863 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
865 return 0; /* not an MMU fault */
867 return 1; /* the MMU fault was handled without causing real CPU fault */
868 /* now we have a real cpu fault */
871 /* the PC is inside the translated code. It means that we have
872 a virtual CPU fault */
873 cpu_restore_state(tb
, env
, pc
, puc
);
875 /* we restore the process signal mask as the sigreturn should
876 do it (XXX: use sigsetjmp) */
877 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
879 /* never comes here */
882 #elif defined(TARGET_SPARC)
883 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
884 int is_write
, sigset_t
*old_set
,
887 TranslationBlock
*tb
;
891 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
892 #if defined(DEBUG_SIGNAL)
893 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
894 pc
, address
, is_write
, *(unsigned long *)old_set
);
896 /* XXX: locking issue */
897 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
900 /* see if it is an MMU fault */
901 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
903 return 0; /* not an MMU fault */
905 return 1; /* the MMU fault was handled without causing real CPU fault */
906 /* now we have a real cpu fault */
909 /* the PC is inside the translated code. It means that we have
910 a virtual CPU fault */
911 cpu_restore_state(tb
, env
, pc
, puc
);
913 /* we restore the process signal mask as the sigreturn should
914 do it (XXX: use sigsetjmp) */
915 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
917 /* never comes here */
920 #elif defined (TARGET_PPC)
921 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
922 int is_write
, sigset_t
*old_set
,
925 TranslationBlock
*tb
;
929 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
930 #if defined(DEBUG_SIGNAL)
931 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
932 pc
, address
, is_write
, *(unsigned long *)old_set
);
934 /* XXX: locking issue */
935 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
939 /* see if it is an MMU fault */
940 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
942 return 0; /* not an MMU fault */
944 return 1; /* the MMU fault was handled without causing real CPU fault */
946 /* now we have a real cpu fault */
949 /* the PC is inside the translated code. It means that we have
950 a virtual CPU fault */
951 cpu_restore_state(tb
, env
, pc
, puc
);
955 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
956 env
->nip
, env
->error_code
, tb
);
958 /* we restore the process signal mask as the sigreturn should
959 do it (XXX: use sigsetjmp) */
960 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
961 do_raise_exception_err(env
->exception_index
, env
->error_code
);
963 /* activate soft MMU for this block */
964 cpu_resume_from_signal(env
, puc
);
966 /* never comes here */
970 #elif defined(TARGET_M68K)
971 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
972 int is_write
, sigset_t
*old_set
,
975 TranslationBlock
*tb
;
979 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
980 #if defined(DEBUG_SIGNAL)
981 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
982 pc
, address
, is_write
, *(unsigned long *)old_set
);
984 /* XXX: locking issue */
985 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
988 /* see if it is an MMU fault */
989 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
991 return 0; /* not an MMU fault */
993 return 1; /* the MMU fault was handled without causing real CPU fault */
994 /* now we have a real cpu fault */
997 /* the PC is inside the translated code. It means that we have
998 a virtual CPU fault */
999 cpu_restore_state(tb
, env
, pc
, puc
);
1001 /* we restore the process signal mask as the sigreturn should
1002 do it (XXX: use sigsetjmp) */
1003 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1005 /* never comes here */
1009 #elif defined (TARGET_MIPS)
1010 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1011 int is_write
, sigset_t
*old_set
,
1014 TranslationBlock
*tb
;
1018 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1019 #if defined(DEBUG_SIGNAL)
1020 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1021 pc
, address
, is_write
, *(unsigned long *)old_set
);
1023 /* XXX: locking issue */
1024 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1028 /* see if it is an MMU fault */
1029 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1031 return 0; /* not an MMU fault */
1033 return 1; /* the MMU fault was handled without causing real CPU fault */
1035 /* now we have a real cpu fault */
1036 tb
= tb_find_pc(pc
);
1038 /* the PC is inside the translated code. It means that we have
1039 a virtual CPU fault */
1040 cpu_restore_state(tb
, env
, pc
, puc
);
1044 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1045 env
->PC
, env
->error_code
, tb
);
1047 /* we restore the process signal mask as the sigreturn should
1048 do it (XXX: use sigsetjmp) */
1049 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1050 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1052 /* activate soft MMU for this block */
1053 cpu_resume_from_signal(env
, puc
);
1055 /* never comes here */
1059 #elif defined (TARGET_SH4)
1060 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1061 int is_write
, sigset_t
*old_set
,
1064 TranslationBlock
*tb
;
1068 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1069 #if defined(DEBUG_SIGNAL)
1070 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1071 pc
, address
, is_write
, *(unsigned long *)old_set
);
1073 /* XXX: locking issue */
1074 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1078 /* see if it is an MMU fault */
1079 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1081 return 0; /* not an MMU fault */
1083 return 1; /* the MMU fault was handled without causing real CPU fault */
1085 /* now we have a real cpu fault */
1086 tb
= tb_find_pc(pc
);
1088 /* the PC is inside the translated code. It means that we have
1089 a virtual CPU fault */
1090 cpu_restore_state(tb
, env
, pc
, puc
);
1093 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1094 env
->nip
, env
->error_code
, tb
);
1096 /* we restore the process signal mask as the sigreturn should
1097 do it (XXX: use sigsetjmp) */
1098 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1100 /* never comes here */
1104 #elif defined (TARGET_ALPHA)
1105 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1106 int is_write
, sigset_t
*old_set
,
1109 TranslationBlock
*tb
;
1113 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1114 #if defined(DEBUG_SIGNAL)
1115 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1116 pc
, address
, is_write
, *(unsigned long *)old_set
);
1118 /* XXX: locking issue */
1119 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1123 /* see if it is an MMU fault */
1124 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1126 return 0; /* not an MMU fault */
1128 return 1; /* the MMU fault was handled without causing real CPU fault */
1130 /* now we have a real cpu fault */
1131 tb
= tb_find_pc(pc
);
1133 /* the PC is inside the translated code. It means that we have
1134 a virtual CPU fault */
1135 cpu_restore_state(tb
, env
, pc
, puc
);
1138 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1139 env
->nip
, env
->error_code
, tb
);
1141 /* we restore the process signal mask as the sigreturn should
1142 do it (XXX: use sigsetjmp) */
1143 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1145 /* never comes here */
1148 #elif defined (TARGET_CRIS)
1149 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1150 int is_write
, sigset_t
*old_set
,
1153 TranslationBlock
*tb
;
1157 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1158 #if defined(DEBUG_SIGNAL)
1159 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1160 pc
, address
, is_write
, *(unsigned long *)old_set
);
1162 /* XXX: locking issue */
1163 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1167 /* see if it is an MMU fault */
1168 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1170 return 0; /* not an MMU fault */
1172 return 1; /* the MMU fault was handled without causing real CPU fault */
1174 /* now we have a real cpu fault */
1175 tb
= tb_find_pc(pc
);
1177 /* the PC is inside the translated code. It means that we have
1178 a virtual CPU fault */
1179 cpu_restore_state(tb
, env
, pc
, puc
);
1181 /* we restore the process signal mask as the sigreturn should
1182 do it (XXX: use sigsetjmp) */
1183 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1185 /* never comes here */
1189 #elif defined(TARGET_HPPA)
1190 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1191 int is_write
, sigset_t
*old_set
,
1194 TranslationBlock
*tb
;
1198 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1199 #if defined(DEBUG_SIGNAL)
1200 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1201 pc
, address
, is_write
, *(unsigned long *)old_set
);
1203 /* XXX: locking issue */
1204 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1207 /* see if it is an MMU fault */
1208 ret
= cpu_hppa_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1210 return 0; /* not an MMU fault */
1212 return 1; /* the MMU fault was handled without causing real CPU fault */
1213 /* now we have a real cpu fault */
1214 tb
= tb_find_pc(pc
);
1216 /* the PC is inside the translated code. It means that we have
1217 a virtual CPU fault */
1218 cpu_restore_state(tb
, env
, pc
, puc
);
1220 /* we restore the process signal mask as the sigreturn should
1221 do it (XXX: use sigsetjmp) */
1222 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1227 #error unsupported target CPU
1230 #if defined(__i386__)
1232 #if defined(__APPLE__)
1233 # include <sys/ucontext.h>
1235 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1236 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1237 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1239 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1240 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1241 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1244 int cpu_signal_handler(int host_signum
, void *pinfo
,
1247 siginfo_t
*info
= pinfo
;
1248 struct ucontext
*uc
= puc
;
1256 #define REG_TRAPNO TRAPNO
1259 trapno
= TRAP_sig(uc
);
1260 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1262 (ERROR_sig(uc
) >> 1) & 1 : 0,
1263 &uc
->uc_sigmask
, puc
);
1266 #elif defined(__x86_64__)
1268 int cpu_signal_handler(int host_signum
, void *pinfo
,
1271 siginfo_t
*info
= pinfo
;
1272 struct ucontext
*uc
= puc
;
1275 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1276 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1277 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1278 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1279 &uc
->uc_sigmask
, puc
);
1282 #elif defined(__powerpc__)
1284 /***********************************************************************
1285 * signal context platform-specific definitions
1289 /* All Registers access - only for local access */
1290 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1291 /* Gpr Registers access */
1292 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1293 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1294 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1295 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1296 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1297 # define LR_sig(context) REG_sig(link, context) /* Link register */
1298 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1299 /* Float Registers access */
1300 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1301 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1302 /* Exception Registers access */
1303 # define DAR_sig(context) REG_sig(dar, context)
1304 # define DSISR_sig(context) REG_sig(dsisr, context)
1305 # define TRAP_sig(context) REG_sig(trap, context)
1309 # include <sys/ucontext.h>
1310 typedef struct ucontext SIGCONTEXT
;
1311 /* All Registers access - only for local access */
1312 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1313 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1314 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1315 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1316 /* Gpr Registers access */
1317 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1318 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1319 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1320 # define CTR_sig(context) REG_sig(ctr, context)
1321 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1322 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1323 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1324 /* Float Registers access */
1325 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1326 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1327 /* Exception Registers access */
1328 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1329 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1330 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1331 #endif /* __APPLE__ */
1333 int cpu_signal_handler(int host_signum
, void *pinfo
,
1336 siginfo_t
*info
= pinfo
;
1337 struct ucontext
*uc
= puc
;
1345 if (DSISR_sig(uc
) & 0x00800000)
1348 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1351 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1352 is_write
, &uc
->uc_sigmask
, puc
);
1355 #elif defined(__alpha__)
1357 int cpu_signal_handler(int host_signum
, void *pinfo
,
1360 siginfo_t
*info
= pinfo
;
1361 struct ucontext
*uc
= puc
;
1362 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1363 uint32_t insn
= *pc
;
1366 /* XXX: need kernel patch to get write flag faster */
1367 switch (insn
>> 26) {
1382 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1383 is_write
, &uc
->uc_sigmask
, puc
);
1385 #elif defined(__sparc__)
1387 int cpu_signal_handler(int host_signum
, void *pinfo
,
1390 siginfo_t
*info
= pinfo
;
1391 uint32_t *regs
= (uint32_t *)(info
+ 1);
1392 void *sigmask
= (regs
+ 20);
1397 /* XXX: is there a standard glibc define ? */
1399 /* XXX: need kernel patch to get write flag faster */
1401 insn
= *(uint32_t *)pc
;
1402 if ((insn
>> 30) == 3) {
1403 switch((insn
>> 19) & 0x3f) {
1415 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1416 is_write
, sigmask
, NULL
);
1419 #elif defined(__arm__)
1421 int cpu_signal_handler(int host_signum
, void *pinfo
,
1424 siginfo_t
*info
= pinfo
;
1425 struct ucontext
*uc
= puc
;
1429 pc
= uc
->uc_mcontext
.arm_pc
;
1430 /* XXX: compute is_write */
1432 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1434 &uc
->uc_sigmask
, puc
);
1437 #elif defined(__mc68000)
1439 int cpu_signal_handler(int host_signum
, void *pinfo
,
1442 siginfo_t
*info
= pinfo
;
1443 struct ucontext
*uc
= puc
;
1447 pc
= uc
->uc_mcontext
.gregs
[16];
1448 /* XXX: compute is_write */
1450 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1452 &uc
->uc_sigmask
, puc
);
1455 #elif defined(__ia64)
1458 /* This ought to be in <bits/siginfo.h>... */
1459 # define __ISR_VALID 1
1462 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1464 siginfo_t
*info
= pinfo
;
1465 struct ucontext
*uc
= puc
;
1469 ip
= uc
->uc_mcontext
.sc_ip
;
1470 switch (host_signum
) {
1476 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1477 /* ISR.W (write-access) is bit 33: */
1478 is_write
= (info
->si_isr
>> 33) & 1;
1484 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1486 &uc
->uc_sigmask
, puc
);
1489 #elif defined(__s390__)
1491 int cpu_signal_handler(int host_signum
, void *pinfo
,
1494 siginfo_t
*info
= pinfo
;
1495 struct ucontext
*uc
= puc
;
1499 pc
= uc
->uc_mcontext
.psw
.addr
;
1500 /* XXX: compute is_write */
1502 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1503 is_write
, &uc
->uc_sigmask
, puc
);
1506 #elif defined(__mips__)
1508 int cpu_signal_handler(int host_signum
, void *pinfo
,
1511 siginfo_t
*info
= pinfo
;
1512 struct ucontext
*uc
= puc
;
1513 greg_t pc
= uc
->uc_mcontext
.pc
;
1516 /* XXX: compute is_write */
1518 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1519 is_write
, &uc
->uc_sigmask
, puc
);
1522 #elif defined(__hppa__)
1524 int cpu_signal_handler(int host_signum
, void *pinfo
,
1527 struct siginfo
*info
= pinfo
;
1528 struct ucontext
*uc
= puc
;
1532 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1533 /* FIXME: compute is_write */
1535 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1537 &uc
->uc_sigmask
, puc
);
1542 #error host CPU specific signal handler needed
1546 #endif /* !defined(CONFIG_SOFTMMU) */