2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #if !defined(TARGET_IA64)
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
45 #if defined(__sparc__) && !defined(HOST_SOLARIS)
46 // Work around ugly bugs in glibc that mangle global register contents
48 #define env cpu_single_env
51 int tb_invalidated_flag
;
54 //#define DEBUG_SIGNAL
56 int qemu_cpu_has_work(CPUState
*env
)
58 return cpu_has_work(env
);
61 void cpu_loop_exit(void)
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
66 longjmp(env
->jmp_env
, 1);
69 /* exit the current TB from a signal handler. The host registers are
70 restored in a state compatible with the CPU emulator
72 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
74 #if !defined(CONFIG_SOFTMMU)
76 struct ucontext
*uc
= puc
;
77 #elif defined(__OpenBSD__)
78 struct sigcontext
*uc
= puc
;
84 /* XXX: restore cpu registers saved in host registers */
86 #if !defined(CONFIG_SOFTMMU)
88 /* XXX: use siglongjmp ? */
90 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
91 #elif defined(__OpenBSD__)
92 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
96 env
->exception_index
= -1;
97 longjmp(env
->jmp_env
, 1);
100 /* Execute the code without caching the generated code. An interpreter
101 could be used if available. */
102 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
104 unsigned long next_tb
;
105 TranslationBlock
*tb
;
107 /* Should never happen.
108 We only end up here when an existing TB is too long. */
109 if (max_cycles
> CF_COUNT_MASK
)
110 max_cycles
= CF_COUNT_MASK
;
112 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
114 env
->current_tb
= tb
;
115 /* execute the generated code */
116 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
118 if ((next_tb
& 3) == 2) {
119 /* Restore PC. This may happen if async event occurs before
120 the TB starts executing. */
121 cpu_pc_from_tb(env
, tb
);
123 tb_phys_invalidate(tb
, -1);
127 static TranslationBlock
*tb_find_slow(target_ulong pc
,
128 target_ulong cs_base
,
131 TranslationBlock
*tb
, **ptb1
;
133 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
135 tb_invalidated_flag
= 0;
137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
139 /* find translated block using physical mappings */
140 phys_pc
= get_phys_addr_code(env
, pc
);
141 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
143 h
= tb_phys_hash_func(phys_pc
);
144 ptb1
= &tb_phys_hash
[h
];
150 tb
->page_addr
[0] == phys_page1
&&
151 tb
->cs_base
== cs_base
&&
152 tb
->flags
== flags
) {
153 /* check next page if needed */
154 if (tb
->page_addr
[1] != -1) {
155 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
157 phys_page2
= get_phys_addr_code(env
, virt_page2
);
158 if (tb
->page_addr
[1] == phys_page2
)
164 ptb1
= &tb
->phys_hash_next
;
167 /* if no translated code available, then translate it now */
168 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
171 /* we add the TB in the virtual pc hash table */
172 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
176 static inline TranslationBlock
*tb_find_fast(void)
178 TranslationBlock
*tb
;
179 target_ulong cs_base
, pc
;
182 /* we record a subset of the CPU state. It will
183 always be the same before a given translated block
185 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
186 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
187 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
188 tb
->flags
!= flags
)) {
189 tb
= tb_find_slow(pc
, cs_base
, flags
);
194 static CPUDebugExcpHandler
*debug_excp_handler
;
196 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
198 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
200 debug_excp_handler
= handler
;
204 static void cpu_handle_debug_exception(CPUState
*env
)
208 if (!env
->watchpoint_hit
)
209 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
210 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
212 if (debug_excp_handler
)
213 debug_excp_handler(env
);
216 /* main execution loop */
218 int cpu_exec(CPUState
*env1
)
220 #define DECLARE_HOST_REGS 1
221 #include "hostregs_helper.h"
222 int ret
, interrupt_request
;
223 TranslationBlock
*tb
;
225 unsigned long next_tb
;
227 if (cpu_halted(env1
) == EXCP_HALTED
)
230 cpu_single_env
= env1
;
232 /* first we save global registers */
233 #define SAVE_HOST_REGS 1
234 #include "hostregs_helper.h"
238 #if defined(TARGET_I386)
239 /* put eflags in CPU temporary format */
240 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
241 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
242 CC_OP
= CC_OP_EFLAGS
;
243 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
244 #elif defined(TARGET_SPARC)
245 #elif defined(TARGET_M68K)
246 env
->cc_op
= CC_OP_FLAGS
;
247 env
->cc_dest
= env
->sr
& 0xf;
248 env
->cc_x
= (env
->sr
>> 4) & 1;
249 #elif defined(TARGET_ALPHA)
250 #elif defined(TARGET_ARM)
251 #elif defined(TARGET_PPC)
252 #elif defined(TARGET_MICROBLAZE)
253 #elif defined(TARGET_MIPS)
254 #elif defined(TARGET_SH4)
255 #elif defined(TARGET_CRIS)
256 #elif defined(TARGET_IA64)
259 #error unsupported target CPU
261 env
->exception_index
= -1;
263 /* prepare setjmp context for exception handling */
265 if (setjmp(env
->jmp_env
) == 0) {
266 #if defined(__sparc__) && !defined(HOST_SOLARIS)
268 env
= cpu_single_env
;
269 #define env cpu_single_env
271 env
->current_tb
= NULL
;
272 /* if an exception is pending, we execute it here */
273 if (env
->exception_index
>= 0) {
274 if (env
->exception_index
>= EXCP_INTERRUPT
) {
275 /* exit request from the cpu execution loop */
276 ret
= env
->exception_index
;
277 if (ret
== EXCP_DEBUG
)
278 cpu_handle_debug_exception(env
);
281 #if defined(CONFIG_USER_ONLY)
282 /* if user mode only, we simulate a fake exception
283 which will be handled outside the cpu execution
285 #if defined(TARGET_I386)
286 do_interrupt_user(env
->exception_index
,
287 env
->exception_is_int
,
289 env
->exception_next_eip
);
290 /* successfully delivered */
291 env
->old_exception
= -1;
293 ret
= env
->exception_index
;
296 #if defined(TARGET_I386)
297 /* simulate a real cpu exception. On i386, it can
298 trigger new exceptions, but we do not handle
299 double or triple faults yet. */
300 do_interrupt(env
->exception_index
,
301 env
->exception_is_int
,
303 env
->exception_next_eip
, 0);
304 /* successfully delivered */
305 env
->old_exception
= -1;
306 #elif defined(TARGET_PPC)
308 #elif defined(TARGET_MICROBLAZE)
310 #elif defined(TARGET_MIPS)
312 #elif defined(TARGET_SPARC)
314 #elif defined(TARGET_ARM)
316 #elif defined(TARGET_SH4)
318 #elif defined(TARGET_ALPHA)
320 #elif defined(TARGET_CRIS)
322 #elif defined(TARGET_M68K)
324 #elif defined(TARGET_IA64)
329 env
->exception_index
= -1;
332 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0 && env
->exit_request
== 0) {
334 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
335 ret
= kqemu_cpu_exec(env
);
336 /* put eflags in CPU temporary format */
337 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
338 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
339 CC_OP
= CC_OP_EFLAGS
;
340 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
343 longjmp(env
->jmp_env
, 1);
344 } else if (ret
== 2) {
345 /* softmmu execution needed */
347 if (env
->interrupt_request
!= 0 || env
->exit_request
!= 0) {
348 /* hardware interrupt will be executed just after */
350 /* otherwise, we restart */
351 longjmp(env
->jmp_env
, 1);
359 longjmp(env
->jmp_env
, 1);
362 next_tb
= 0; /* force lookup of first TB */
364 interrupt_request
= env
->interrupt_request
;
365 if (unlikely(interrupt_request
)) {
366 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
367 /* Mask out external interrupts for this step. */
368 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
373 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
374 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
375 env
->exception_index
= EXCP_DEBUG
;
378 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
379 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
380 defined(TARGET_MICROBLAZE)
381 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
382 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
384 env
->exception_index
= EXCP_HLT
;
388 #if defined(TARGET_I386)
389 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
390 svm_check_intercept(SVM_EXIT_INIT
);
392 env
->exception_index
= EXCP_HALTED
;
394 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
396 } else if (env
->hflags2
& HF2_GIF_MASK
) {
397 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
398 !(env
->hflags
& HF_SMM_MASK
)) {
399 svm_check_intercept(SVM_EXIT_SMI
);
400 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
403 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
404 !(env
->hflags2
& HF2_NMI_MASK
)) {
405 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
406 env
->hflags2
|= HF2_NMI_MASK
;
407 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
409 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
410 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
411 do_interrupt(EXCP12_MCHK
, 0, 0, 0, 0);
413 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
414 (((env
->hflags2
& HF2_VINTR_MASK
) &&
415 (env
->hflags2
& HF2_HIF_MASK
)) ||
416 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
417 (env
->eflags
& IF_MASK
&&
418 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
420 svm_check_intercept(SVM_EXIT_INTR
);
421 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
422 intno
= cpu_get_pic_interrupt(env
);
423 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
424 #if defined(__sparc__) && !defined(HOST_SOLARIS)
426 env
= cpu_single_env
;
427 #define env cpu_single_env
429 do_interrupt(intno
, 0, 0, 0, 1);
430 /* ensure that no TB jump will be modified as
431 the program flow was changed */
433 #if !defined(CONFIG_USER_ONLY)
434 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
435 (env
->eflags
& IF_MASK
) &&
436 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
438 /* FIXME: this should respect TPR */
439 svm_check_intercept(SVM_EXIT_VINTR
);
440 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
441 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
442 do_interrupt(intno
, 0, 0, 0, 1);
443 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
448 #elif defined(TARGET_PPC)
450 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
454 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
455 ppc_hw_interrupt(env
);
456 if (env
->pending_interrupts
== 0)
457 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
460 #elif defined(TARGET_MICROBLAZE)
461 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
462 && (env
->sregs
[SR_MSR
] & MSR_IE
)
463 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
464 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
465 env
->exception_index
= EXCP_IRQ
;
469 #elif defined(TARGET_MIPS)
470 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
471 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
472 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
473 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
474 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
475 !(env
->hflags
& MIPS_HFLAG_DM
)) {
477 env
->exception_index
= EXCP_EXT_INTERRUPT
;
482 #elif defined(TARGET_SPARC)
483 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
484 cpu_interrupts_enabled(env
)) {
485 int pil
= env
->interrupt_index
& 15;
486 int type
= env
->interrupt_index
& 0xf0;
488 if (((type
== TT_EXTINT
) &&
489 (pil
== 15 || pil
> env
->psrpil
)) ||
491 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
492 env
->exception_index
= env
->interrupt_index
;
494 env
->interrupt_index
= 0;
495 #if !defined(CONFIG_USER_ONLY)
500 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
501 //do_interrupt(0, 0, 0, 0, 0);
502 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
504 #elif defined(TARGET_ARM)
505 if (interrupt_request
& CPU_INTERRUPT_FIQ
506 && !(env
->uncached_cpsr
& CPSR_F
)) {
507 env
->exception_index
= EXCP_FIQ
;
511 /* ARMv7-M interrupt return works by loading a magic value
512 into the PC. On real hardware the load causes the
513 return to occur. The qemu implementation performs the
514 jump normally, then does the exception return when the
515 CPU tries to execute code at the magic address.
516 This will cause the magic PC value to be pushed to
517 the stack if an interrupt occured at the wrong time.
518 We avoid this by disabling interrupts when
519 pc contains a magic address. */
520 if (interrupt_request
& CPU_INTERRUPT_HARD
521 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
522 || !(env
->uncached_cpsr
& CPSR_I
))) {
523 env
->exception_index
= EXCP_IRQ
;
527 #elif defined(TARGET_SH4)
528 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
532 #elif defined(TARGET_ALPHA)
533 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
537 #elif defined(TARGET_CRIS)
538 if (interrupt_request
& CPU_INTERRUPT_HARD
539 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
540 env
->exception_index
= EXCP_IRQ
;
544 if (interrupt_request
& CPU_INTERRUPT_NMI
545 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
546 env
->exception_index
= EXCP_NMI
;
550 #elif defined(TARGET_M68K)
551 if (interrupt_request
& CPU_INTERRUPT_HARD
552 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
553 < env
->pending_level
) {
554 /* Real hardware gets the interrupt vector via an
555 IACK cycle at this point. Current emulated
556 hardware doesn't rely on this, so we
557 provide/save the vector when the interrupt is
559 env
->exception_index
= env
->pending_vector
;
564 /* Don't use the cached interupt_request value,
565 do_interrupt may have updated the EXITTB flag. */
566 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
567 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
568 /* ensure that no TB jump will be modified as
569 the program flow was changed */
573 if (unlikely(env
->exit_request
)) {
574 env
->exit_request
= 0;
575 env
->exception_index
= EXCP_INTERRUPT
;
579 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
580 /* restore flags in standard format */
582 #if defined(TARGET_I386)
583 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
584 log_cpu_state(env
, X86_DUMP_CCOP
);
585 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
586 #elif defined(TARGET_ARM)
587 log_cpu_state(env
, 0);
588 #elif defined(TARGET_SPARC)
589 log_cpu_state(env
, 0);
590 #elif defined(TARGET_PPC)
591 log_cpu_state(env
, 0);
592 #elif defined(TARGET_M68K)
593 cpu_m68k_flush_flags(env
, env
->cc_op
);
594 env
->cc_op
= CC_OP_FLAGS
;
595 env
->sr
= (env
->sr
& 0xffe0)
596 | env
->cc_dest
| (env
->cc_x
<< 4);
597 log_cpu_state(env
, 0);
598 #elif defined(TARGET_MICROBLAZE)
599 log_cpu_state(env
, 0);
600 #elif defined(TARGET_MIPS)
601 log_cpu_state(env
, 0);
602 #elif defined(TARGET_SH4)
603 log_cpu_state(env
, 0);
604 #elif defined(TARGET_ALPHA)
605 log_cpu_state(env
, 0);
606 #elif defined(TARGET_CRIS)
607 log_cpu_state(env
, 0);
609 #error unsupported target CPU
615 /* Note: we do it here to avoid a gcc bug on Mac OS X when
616 doing it in tb_find_slow */
617 if (tb_invalidated_flag
) {
618 /* as some TB could have been invalidated because
619 of memory exceptions while generating the code, we
620 must recompute the hash index here */
622 tb_invalidated_flag
= 0;
625 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
626 (long)tb
->tc_ptr
, tb
->pc
,
627 lookup_symbol(tb
->pc
));
629 /* see if we can patch the calling TB. When the TB
630 spans two pages, we cannot safely do a direct
635 (env
->kqemu_enabled
!= 2) &&
637 tb
->page_addr
[1] == -1) {
638 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
641 spin_unlock(&tb_lock
);
642 env
->current_tb
= tb
;
644 /* cpu_interrupt might be called while translating the
645 TB, but before it is linked into a potentially
646 infinite loop and becomes env->current_tb. Avoid
647 starting execution if there is a pending interrupt. */
648 if (unlikely (env
->exit_request
))
649 env
->current_tb
= NULL
;
651 while (env
->current_tb
) {
653 /* execute the generated code */
654 #if defined(__sparc__) && !defined(HOST_SOLARIS)
656 env
= cpu_single_env
;
657 #define env cpu_single_env
659 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
660 env
->current_tb
= NULL
;
661 if ((next_tb
& 3) == 2) {
662 /* Instruction counter expired. */
664 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
666 cpu_pc_from_tb(env
, tb
);
667 insns_left
= env
->icount_decr
.u32
;
668 if (env
->icount_extra
&& insns_left
>= 0) {
669 /* Refill decrementer and continue execution. */
670 env
->icount_extra
+= insns_left
;
671 if (env
->icount_extra
> 0xffff) {
674 insns_left
= env
->icount_extra
;
676 env
->icount_extra
-= insns_left
;
677 env
->icount_decr
.u16
.low
= insns_left
;
679 if (insns_left
> 0) {
680 /* Execute remaining instructions. */
681 cpu_exec_nocache(insns_left
, tb
);
683 env
->exception_index
= EXCP_INTERRUPT
;
689 /* reset soft MMU for next block (it can currently
690 only be set by a memory fault) */
691 #if defined(CONFIG_KQEMU)
692 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
693 if (kqemu_is_ok(env
) &&
694 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
705 #if defined(TARGET_I386)
706 /* restore flags in standard format */
707 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
708 #elif defined(TARGET_ARM)
709 /* XXX: Save/restore host fpu exception state?. */
710 #elif defined(TARGET_SPARC)
711 #elif defined(TARGET_PPC)
712 #elif defined(TARGET_M68K)
713 cpu_m68k_flush_flags(env
, env
->cc_op
);
714 env
->cc_op
= CC_OP_FLAGS
;
715 env
->sr
= (env
->sr
& 0xffe0)
716 | env
->cc_dest
| (env
->cc_x
<< 4);
717 #elif defined(TARGET_MICROBLAZE)
718 #elif defined(TARGET_MIPS)
719 #elif defined(TARGET_SH4)
720 #elif defined(TARGET_IA64)
721 #elif defined(TARGET_ALPHA)
722 #elif defined(TARGET_CRIS)
725 #error unsupported target CPU
728 /* restore global registers */
729 #include "hostregs_helper.h"
731 /* fail safe : never use cpu_single_env outside cpu_exec() */
732 cpu_single_env
= NULL
;
736 /* must only be called from the generated code as an exception can be
738 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
740 /* XXX: cannot enable it yet because it yields to MMU exception
741 where NIP != read address on PowerPC */
743 target_ulong phys_addr
;
744 phys_addr
= get_phys_addr_code(env
, start
);
745 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
749 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
751 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
753 CPUX86State
*saved_env
;
757 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
759 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
760 (selector
<< 4), 0xffff, 0);
762 helper_load_seg(seg_reg
, selector
);
767 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
769 CPUX86State
*saved_env
;
774 helper_fsave(ptr
, data32
);
779 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
781 CPUX86State
*saved_env
;
786 helper_frstor(ptr
, data32
);
791 #endif /* TARGET_I386 */
793 #if !defined(CONFIG_SOFTMMU)
795 #if defined(TARGET_I386)
797 /* 'pc' is the host PC at which the exception was raised. 'address' is
798 the effective address of the memory exception. 'is_write' is 1 if a
799 write caused the exception and otherwise 0'. 'old_set' is the
800 signal set which should be restored */
801 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
802 int is_write
, sigset_t
*old_set
,
805 TranslationBlock
*tb
;
809 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
810 #if defined(DEBUG_SIGNAL)
811 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
812 pc
, address
, is_write
, *(unsigned long *)old_set
);
814 /* XXX: locking issue */
815 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
819 /* see if it is an MMU fault */
820 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
822 return 0; /* not an MMU fault */
824 return 1; /* the MMU fault was handled without causing real CPU fault */
825 /* now we have a real cpu fault */
828 /* the PC is inside the translated code. It means that we have
829 a virtual CPU fault */
830 cpu_restore_state(tb
, env
, pc
, puc
);
834 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
835 env
->eip
, env
->cr
[2], env
->error_code
);
837 /* we restore the process signal mask as the sigreturn should
838 do it (XXX: use sigsetjmp) */
839 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
840 raise_exception_err(env
->exception_index
, env
->error_code
);
842 /* activate soft MMU for this block */
843 env
->hflags
|= HF_SOFTMMU_MASK
;
844 cpu_resume_from_signal(env
, puc
);
846 /* never comes here */
850 #elif defined(TARGET_ARM)
851 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
852 int is_write
, sigset_t
*old_set
,
855 TranslationBlock
*tb
;
859 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
860 #if defined(DEBUG_SIGNAL)
861 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
862 pc
, address
, is_write
, *(unsigned long *)old_set
);
864 /* XXX: locking issue */
865 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
868 /* see if it is an MMU fault */
869 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
871 return 0; /* not an MMU fault */
873 return 1; /* the MMU fault was handled without causing real CPU fault */
874 /* now we have a real cpu fault */
877 /* the PC is inside the translated code. It means that we have
878 a virtual CPU fault */
879 cpu_restore_state(tb
, env
, pc
, puc
);
881 /* we restore the process signal mask as the sigreturn should
882 do it (XXX: use sigsetjmp) */
883 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
885 /* never comes here */
888 #elif defined(TARGET_SPARC)
889 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
890 int is_write
, sigset_t
*old_set
,
893 TranslationBlock
*tb
;
897 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
898 #if defined(DEBUG_SIGNAL)
899 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
900 pc
, address
, is_write
, *(unsigned long *)old_set
);
902 /* XXX: locking issue */
903 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
906 /* see if it is an MMU fault */
907 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
909 return 0; /* not an MMU fault */
911 return 1; /* the MMU fault was handled without causing real CPU fault */
912 /* now we have a real cpu fault */
915 /* the PC is inside the translated code. It means that we have
916 a virtual CPU fault */
917 cpu_restore_state(tb
, env
, pc
, puc
);
919 /* we restore the process signal mask as the sigreturn should
920 do it (XXX: use sigsetjmp) */
921 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
923 /* never comes here */
926 #elif defined (TARGET_PPC)
927 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
928 int is_write
, sigset_t
*old_set
,
931 TranslationBlock
*tb
;
935 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
936 #if defined(DEBUG_SIGNAL)
937 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
938 pc
, address
, is_write
, *(unsigned long *)old_set
);
940 /* XXX: locking issue */
941 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
945 /* see if it is an MMU fault */
946 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
948 return 0; /* not an MMU fault */
950 return 1; /* the MMU fault was handled without causing real CPU fault */
952 /* now we have a real cpu fault */
955 /* the PC is inside the translated code. It means that we have
956 a virtual CPU fault */
957 cpu_restore_state(tb
, env
, pc
, puc
);
961 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
962 env
->nip
, env
->error_code
, tb
);
964 /* we restore the process signal mask as the sigreturn should
965 do it (XXX: use sigsetjmp) */
966 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
969 /* activate soft MMU for this block */
970 cpu_resume_from_signal(env
, puc
);
972 /* never comes here */
976 #elif defined(TARGET_M68K)
977 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
978 int is_write
, sigset_t
*old_set
,
981 TranslationBlock
*tb
;
985 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
986 #if defined(DEBUG_SIGNAL)
987 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
988 pc
, address
, is_write
, *(unsigned long *)old_set
);
990 /* XXX: locking issue */
991 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
994 /* see if it is an MMU fault */
995 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
997 return 0; /* not an MMU fault */
999 return 1; /* the MMU fault was handled without causing real CPU fault */
1000 /* now we have a real cpu fault */
1001 tb
= tb_find_pc(pc
);
1003 /* the PC is inside the translated code. It means that we have
1004 a virtual CPU fault */
1005 cpu_restore_state(tb
, env
, pc
, puc
);
1007 /* we restore the process signal mask as the sigreturn should
1008 do it (XXX: use sigsetjmp) */
1009 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1011 /* never comes here */
1015 #elif defined (TARGET_MIPS)
1016 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1017 int is_write
, sigset_t
*old_set
,
1020 TranslationBlock
*tb
;
1024 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1025 #if defined(DEBUG_SIGNAL)
1026 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1027 pc
, address
, is_write
, *(unsigned long *)old_set
);
1029 /* XXX: locking issue */
1030 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1034 /* see if it is an MMU fault */
1035 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1037 return 0; /* not an MMU fault */
1039 return 1; /* the MMU fault was handled without causing real CPU fault */
1041 /* now we have a real cpu fault */
1042 tb
= tb_find_pc(pc
);
1044 /* the PC is inside the translated code. It means that we have
1045 a virtual CPU fault */
1046 cpu_restore_state(tb
, env
, pc
, puc
);
1050 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1051 env
->PC
, env
->error_code
, tb
);
1053 /* we restore the process signal mask as the sigreturn should
1054 do it (XXX: use sigsetjmp) */
1055 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1058 /* activate soft MMU for this block */
1059 cpu_resume_from_signal(env
, puc
);
1061 /* never comes here */
1065 #elif defined (TARGET_MICROBLAZE)
1066 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1067 int is_write
, sigset_t
*old_set
,
1070 TranslationBlock
*tb
;
1074 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1075 #if defined(DEBUG_SIGNAL)
1076 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1077 pc
, address
, is_write
, *(unsigned long *)old_set
);
1079 /* XXX: locking issue */
1080 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1084 /* see if it is an MMU fault */
1085 ret
= cpu_mb_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1087 return 0; /* not an MMU fault */
1089 return 1; /* the MMU fault was handled without causing real CPU fault */
1091 /* now we have a real cpu fault */
1092 tb
= tb_find_pc(pc
);
1094 /* the PC is inside the translated code. It means that we have
1095 a virtual CPU fault */
1096 cpu_restore_state(tb
, env
, pc
, puc
);
1100 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1101 env
->PC
, env
->error_code
, tb
);
1103 /* we restore the process signal mask as the sigreturn should
1104 do it (XXX: use sigsetjmp) */
1105 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1108 /* activate soft MMU for this block */
1109 cpu_resume_from_signal(env
, puc
);
1111 /* never comes here */
1115 #elif defined (TARGET_SH4)
1116 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1117 int is_write
, sigset_t
*old_set
,
1120 TranslationBlock
*tb
;
1124 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1125 #if defined(DEBUG_SIGNAL)
1126 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1127 pc
, address
, is_write
, *(unsigned long *)old_set
);
1129 /* XXX: locking issue */
1130 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1134 /* see if it is an MMU fault */
1135 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1137 return 0; /* not an MMU fault */
1139 return 1; /* the MMU fault was handled without causing real CPU fault */
1141 /* now we have a real cpu fault */
1142 tb
= tb_find_pc(pc
);
1144 /* the PC is inside the translated code. It means that we have
1145 a virtual CPU fault */
1146 cpu_restore_state(tb
, env
, pc
, puc
);
1149 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1150 env
->nip
, env
->error_code
, tb
);
1152 /* we restore the process signal mask as the sigreturn should
1153 do it (XXX: use sigsetjmp) */
1154 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1156 /* never comes here */
1160 #elif defined (TARGET_ALPHA)
1161 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1162 int is_write
, sigset_t
*old_set
,
1165 TranslationBlock
*tb
;
1169 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1170 #if defined(DEBUG_SIGNAL)
1171 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1172 pc
, address
, is_write
, *(unsigned long *)old_set
);
1174 /* XXX: locking issue */
1175 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1179 /* see if it is an MMU fault */
1180 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1182 return 0; /* not an MMU fault */
1184 return 1; /* the MMU fault was handled without causing real CPU fault */
1186 /* now we have a real cpu fault */
1187 tb
= tb_find_pc(pc
);
1189 /* the PC is inside the translated code. It means that we have
1190 a virtual CPU fault */
1191 cpu_restore_state(tb
, env
, pc
, puc
);
1194 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1195 env
->nip
, env
->error_code
, tb
);
1197 /* we restore the process signal mask as the sigreturn should
1198 do it (XXX: use sigsetjmp) */
1199 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1201 /* never comes here */
1204 #elif defined (TARGET_CRIS)
1205 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1206 int is_write
, sigset_t
*old_set
,
1209 TranslationBlock
*tb
;
1213 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1214 #if defined(DEBUG_SIGNAL)
1215 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1216 pc
, address
, is_write
, *(unsigned long *)old_set
);
1218 /* XXX: locking issue */
1219 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1223 /* see if it is an MMU fault */
1224 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1226 return 0; /* not an MMU fault */
1228 return 1; /* the MMU fault was handled without causing real CPU fault */
1230 /* now we have a real cpu fault */
1231 tb
= tb_find_pc(pc
);
1233 /* the PC is inside the translated code. It means that we have
1234 a virtual CPU fault */
1235 cpu_restore_state(tb
, env
, pc
, puc
);
1237 /* we restore the process signal mask as the sigreturn should
1238 do it (XXX: use sigsetjmp) */
1239 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1241 /* never comes here */
1246 #error unsupported target CPU
1249 #if defined(__i386__)
1251 #if defined(__APPLE__)
1252 # include <sys/ucontext.h>
1254 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1255 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1256 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1257 # define MASK_sig(context) ((context)->uc_sigmask)
1258 #elif defined(__OpenBSD__)
1259 # define EIP_sig(context) ((context)->sc_eip)
1260 # define TRAP_sig(context) ((context)->sc_trapno)
1261 # define ERROR_sig(context) ((context)->sc_err)
1262 # define MASK_sig(context) ((context)->sc_mask)
1264 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1265 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1266 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1267 # define MASK_sig(context) ((context)->uc_sigmask)
1270 int cpu_signal_handler(int host_signum
, void *pinfo
,
1273 siginfo_t
*info
= pinfo
;
1274 #if defined(__OpenBSD__)
1275 struct sigcontext
*uc
= puc
;
1277 struct ucontext
*uc
= puc
;
1286 #define REG_TRAPNO TRAPNO
1289 trapno
= TRAP_sig(uc
);
1290 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1292 (ERROR_sig(uc
) >> 1) & 1 : 0,
1293 &MASK_sig(uc
), puc
);
1296 #elif defined(__x86_64__)
1299 #define PC_sig(context) _UC_MACHINE_PC(context)
1300 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1301 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1302 #define MASK_sig(context) ((context)->uc_sigmask)
1303 #elif defined(__OpenBSD__)
1304 #define PC_sig(context) ((context)->sc_rip)
1305 #define TRAP_sig(context) ((context)->sc_trapno)
1306 #define ERROR_sig(context) ((context)->sc_err)
1307 #define MASK_sig(context) ((context)->sc_mask)
1309 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1310 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1311 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1312 #define MASK_sig(context) ((context)->uc_sigmask)
1315 int cpu_signal_handler(int host_signum
, void *pinfo
,
1318 siginfo_t
*info
= pinfo
;
1321 ucontext_t
*uc
= puc
;
1322 #elif defined(__OpenBSD__)
1323 struct sigcontext
*uc
= puc
;
1325 struct ucontext
*uc
= puc
;
1329 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1330 TRAP_sig(uc
) == 0xe ?
1331 (ERROR_sig(uc
) >> 1) & 1 : 0,
1332 &MASK_sig(uc
), puc
);
1335 #elif defined(_ARCH_PPC)
1337 /***********************************************************************
1338 * signal context platform-specific definitions
1342 /* All Registers access - only for local access */
1343 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1344 /* Gpr Registers access */
1345 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1346 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1347 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1348 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1349 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1350 # define LR_sig(context) REG_sig(link, context) /* Link register */
1351 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1352 /* Float Registers access */
1353 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1354 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1355 /* Exception Registers access */
1356 # define DAR_sig(context) REG_sig(dar, context)
1357 # define DSISR_sig(context) REG_sig(dsisr, context)
1358 # define TRAP_sig(context) REG_sig(trap, context)
1362 # include <sys/ucontext.h>
1363 typedef struct ucontext SIGCONTEXT
;
1364 /* All Registers access - only for local access */
1365 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1366 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1367 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1368 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1369 /* Gpr Registers access */
1370 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1371 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1372 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1373 # define CTR_sig(context) REG_sig(ctr, context)
1374 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1375 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1376 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1377 /* Float Registers access */
1378 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1379 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1380 /* Exception Registers access */
1381 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1382 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1383 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1384 #endif /* __APPLE__ */
1386 int cpu_signal_handler(int host_signum
, void *pinfo
,
1389 siginfo_t
*info
= pinfo
;
1390 struct ucontext
*uc
= puc
;
1398 if (DSISR_sig(uc
) & 0x00800000)
1401 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1404 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1405 is_write
, &uc
->uc_sigmask
, puc
);
1408 #elif defined(__alpha__)
1410 int cpu_signal_handler(int host_signum
, void *pinfo
,
1413 siginfo_t
*info
= pinfo
;
1414 struct ucontext
*uc
= puc
;
1415 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1416 uint32_t insn
= *pc
;
1419 /* XXX: need kernel patch to get write flag faster */
1420 switch (insn
>> 26) {
1435 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1436 is_write
, &uc
->uc_sigmask
, puc
);
1438 #elif defined(__sparc__)
1440 int cpu_signal_handler(int host_signum
, void *pinfo
,
1443 siginfo_t
*info
= pinfo
;
1446 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1447 uint32_t *regs
= (uint32_t *)(info
+ 1);
1448 void *sigmask
= (regs
+ 20);
1449 /* XXX: is there a standard glibc define ? */
1450 unsigned long pc
= regs
[1];
1453 struct sigcontext
*sc
= puc
;
1454 unsigned long pc
= sc
->sigc_regs
.tpc
;
1455 void *sigmask
= (void *)sc
->sigc_mask
;
1456 #elif defined(__OpenBSD__)
1457 struct sigcontext
*uc
= puc
;
1458 unsigned long pc
= uc
->sc_pc
;
1459 void *sigmask
= (void *)(long)uc
->sc_mask
;
1463 /* XXX: need kernel patch to get write flag faster */
1465 insn
= *(uint32_t *)pc
;
1466 if ((insn
>> 30) == 3) {
1467 switch((insn
>> 19) & 0x3f) {
1491 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1492 is_write
, sigmask
, NULL
);
1495 #elif defined(__arm__)
1497 int cpu_signal_handler(int host_signum
, void *pinfo
,
1500 siginfo_t
*info
= pinfo
;
1501 struct ucontext
*uc
= puc
;
1505 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1506 pc
= uc
->uc_mcontext
.gregs
[R15
];
1508 pc
= uc
->uc_mcontext
.arm_pc
;
1510 /* XXX: compute is_write */
1512 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1514 &uc
->uc_sigmask
, puc
);
1517 #elif defined(__mc68000)
1519 int cpu_signal_handler(int host_signum
, void *pinfo
,
1522 siginfo_t
*info
= pinfo
;
1523 struct ucontext
*uc
= puc
;
1527 pc
= uc
->uc_mcontext
.gregs
[16];
1528 /* XXX: compute is_write */
1530 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1532 &uc
->uc_sigmask
, puc
);
1535 #elif defined(__ia64)
1538 /* This ought to be in <bits/siginfo.h>... */
1539 # define __ISR_VALID 1
1542 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1544 siginfo_t
*info
= pinfo
;
1545 struct ucontext
*uc
= puc
;
1549 ip
= uc
->uc_mcontext
.sc_ip
;
1550 switch (host_signum
) {
1556 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1557 /* ISR.W (write-access) is bit 33: */
1558 is_write
= (info
->si_isr
>> 33) & 1;
1564 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1566 &uc
->uc_sigmask
, puc
);
1569 #elif defined(__s390__)
1571 int cpu_signal_handler(int host_signum
, void *pinfo
,
1574 siginfo_t
*info
= pinfo
;
1575 struct ucontext
*uc
= puc
;
1579 pc
= uc
->uc_mcontext
.psw
.addr
;
1580 /* XXX: compute is_write */
1582 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1583 is_write
, &uc
->uc_sigmask
, puc
);
1586 #elif defined(__mips__)
1588 int cpu_signal_handler(int host_signum
, void *pinfo
,
1591 siginfo_t
*info
= pinfo
;
1592 struct ucontext
*uc
= puc
;
1593 greg_t pc
= uc
->uc_mcontext
.pc
;
1596 /* XXX: compute is_write */
1598 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1599 is_write
, &uc
->uc_sigmask
, puc
);
1602 #elif defined(__hppa__)
1604 int cpu_signal_handler(int host_signum
, void *pinfo
,
1607 struct siginfo
*info
= pinfo
;
1608 struct ucontext
*uc
= puc
;
1612 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1613 /* FIXME: compute is_write */
1615 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1617 &uc
->uc_sigmask
, puc
);
1622 #error host CPU specific signal handler needed
1626 #endif /* !defined(CONFIG_SOFTMMU) */