Merge commit 'origin/master' into dev-mini2440-staging
[sniper_test.git] / cpu-exec.c
blob7607e240b53951fc2e7518c8aaec78ea1104de37
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
25 #include "kvm.h"
27 #if !defined(CONFIG_SOFTMMU)
28 #undef EAX
29 #undef ECX
30 #undef EDX
31 #undef EBX
32 #undef ESP
33 #undef EBP
34 #undef ESI
35 #undef EDI
36 #undef EIP
37 #include <signal.h>
38 #ifdef __linux__
39 #include <sys/ucontext.h>
40 #endif
41 #endif
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
45 #undef env
46 #define env cpu_single_env
47 #endif
49 int tb_invalidated_flag;
51 //#define DEBUG_EXEC
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
58 regs_to_env();
59 longjmp(env->jmp_env, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState *env1, void *puc)
67 #if !defined(CONFIG_SOFTMMU)
68 #ifdef __linux__
69 struct ucontext *uc = puc;
70 #elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
72 #endif
73 #endif
75 env = env1;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
80 if (puc) {
81 /* XXX: use siglongjmp ? */
82 #ifdef __linux__
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
86 #endif
88 #endif
89 env->exception_index = -1;
90 longjmp(env->jmp_env, 1);
93 /* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
97 unsigned long next_tb;
98 TranslationBlock *tb;
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles > CF_COUNT_MASK)
103 max_cycles = CF_COUNT_MASK;
105 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106 max_cycles);
107 env->current_tb = tb;
108 /* execute the generated code */
109 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
111 if ((next_tb & 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
114 cpu_pc_from_tb(env, tb);
116 tb_phys_invalidate(tb, -1);
117 tb_free(tb);
120 static TranslationBlock *tb_find_slow(target_ulong pc,
121 target_ulong cs_base,
122 uint64_t flags)
124 TranslationBlock *tb, **ptb1;
125 unsigned int h;
126 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
128 tb_invalidated_flag = 0;
130 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
132 /* find translated block using physical mappings */
133 phys_pc = get_phys_addr_code(env, pc);
134 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 phys_page2 = -1;
136 h = tb_phys_hash_func(phys_pc);
137 ptb1 = &tb_phys_hash[h];
138 for(;;) {
139 tb = *ptb1;
140 if (!tb)
141 goto not_found;
142 if (tb->pc == pc &&
143 tb->page_addr[0] == phys_page1 &&
144 tb->cs_base == cs_base &&
145 tb->flags == flags) {
146 /* check next page if needed */
147 if (tb->page_addr[1] != -1) {
148 virt_page2 = (pc & TARGET_PAGE_MASK) +
149 TARGET_PAGE_SIZE;
150 phys_page2 = get_phys_addr_code(env, virt_page2);
151 if (tb->page_addr[1] == phys_page2)
152 goto found;
153 } else {
154 goto found;
157 ptb1 = &tb->phys_hash_next;
159 not_found:
160 /* if no translated code available, then translate it now */
161 tb = tb_gen_code(env, pc, cs_base, flags, 0);
163 found:
164 /* we add the TB in the virtual pc hash table */
165 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
166 return tb;
169 static inline TranslationBlock *tb_find_fast(void)
171 TranslationBlock *tb;
172 target_ulong cs_base, pc;
173 int flags;
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
177 is executed. */
178 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
179 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
180 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
181 tb->flags != flags)) {
182 tb = tb_find_slow(pc, cs_base, flags);
184 return tb;
187 static CPUDebugExcpHandler *debug_excp_handler;
189 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
191 CPUDebugExcpHandler *old_handler = debug_excp_handler;
193 debug_excp_handler = handler;
194 return old_handler;
197 static void cpu_handle_debug_exception(CPUState *env)
199 CPUWatchpoint *wp;
201 if (!env->watchpoint_hit)
202 TAILQ_FOREACH(wp, &env->watchpoints, entry)
203 wp->flags &= ~BP_WATCHPOINT_HIT;
205 if (debug_excp_handler)
206 debug_excp_handler(env);
209 /* main execution loop */
211 int cpu_exec(CPUState *env1)
213 #define DECLARE_HOST_REGS 1
214 #include "hostregs_helper.h"
215 int ret, interrupt_request;
216 TranslationBlock *tb;
217 uint8_t *tc_ptr;
218 unsigned long next_tb;
220 if (cpu_halted(env1) == EXCP_HALTED)
221 return EXCP_HALTED;
223 cpu_single_env = env1;
225 /* first we save global registers */
226 #define SAVE_HOST_REGS 1
227 #include "hostregs_helper.h"
228 env = env1;
230 env_to_regs();
231 #if defined(TARGET_I386)
232 /* put eflags in CPU temporary format */
233 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
234 DF = 1 - (2 * ((env->eflags >> 10) & 1));
235 CC_OP = CC_OP_EFLAGS;
236 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
237 #elif defined(TARGET_SPARC)
238 #elif defined(TARGET_M68K)
239 env->cc_op = CC_OP_FLAGS;
240 env->cc_dest = env->sr & 0xf;
241 env->cc_x = (env->sr >> 4) & 1;
242 #elif defined(TARGET_ALPHA)
243 #elif defined(TARGET_ARM)
244 #elif defined(TARGET_PPC)
245 #elif defined(TARGET_MIPS)
246 #elif defined(TARGET_SH4)
247 #elif defined(TARGET_CRIS)
248 /* XXXXX */
249 #else
250 #error unsupported target CPU
251 #endif
252 env->exception_index = -1;
254 /* prepare setjmp context for exception handling */
255 for(;;) {
256 if (setjmp(env->jmp_env) == 0) {
257 env->current_tb = NULL;
258 /* if an exception is pending, we execute it here */
259 if (env->exception_index >= 0) {
260 if (env->exception_index >= EXCP_INTERRUPT) {
261 /* exit request from the cpu execution loop */
262 ret = env->exception_index;
263 if (ret == EXCP_DEBUG)
264 cpu_handle_debug_exception(env);
265 break;
266 } else {
267 #if defined(CONFIG_USER_ONLY)
268 /* if user mode only, we simulate a fake exception
269 which will be handled outside the cpu execution
270 loop */
271 #if defined(TARGET_I386)
272 do_interrupt_user(env->exception_index,
273 env->exception_is_int,
274 env->error_code,
275 env->exception_next_eip);
276 /* successfully delivered */
277 env->old_exception = -1;
278 #endif
279 ret = env->exception_index;
280 break;
281 #else
282 #if defined(TARGET_I386)
283 /* simulate a real cpu exception. On i386, it can
284 trigger new exceptions, but we do not handle
285 double or triple faults yet. */
286 do_interrupt(env->exception_index,
287 env->exception_is_int,
288 env->error_code,
289 env->exception_next_eip, 0);
290 /* successfully delivered */
291 env->old_exception = -1;
292 #elif defined(TARGET_PPC)
293 do_interrupt(env);
294 #elif defined(TARGET_MIPS)
295 do_interrupt(env);
296 #elif defined(TARGET_SPARC)
297 do_interrupt(env);
298 #elif defined(TARGET_ARM)
299 do_interrupt(env);
300 #elif defined(TARGET_SH4)
301 do_interrupt(env);
302 #elif defined(TARGET_ALPHA)
303 do_interrupt(env);
304 #elif defined(TARGET_CRIS)
305 do_interrupt(env);
306 #elif defined(TARGET_M68K)
307 do_interrupt(0);
308 #endif
309 #endif
311 env->exception_index = -1;
313 #ifdef USE_KQEMU
314 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
315 int ret;
316 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
317 ret = kqemu_cpu_exec(env);
318 /* put eflags in CPU temporary format */
319 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
320 DF = 1 - (2 * ((env->eflags >> 10) & 1));
321 CC_OP = CC_OP_EFLAGS;
322 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
323 if (ret == 1) {
324 /* exception */
325 longjmp(env->jmp_env, 1);
326 } else if (ret == 2) {
327 /* softmmu execution needed */
328 } else {
329 if (env->interrupt_request != 0 || env->exit_request != 0) {
330 /* hardware interrupt will be executed just after */
331 } else {
332 /* otherwise, we restart */
333 longjmp(env->jmp_env, 1);
337 #endif
339 if (kvm_enabled()) {
340 kvm_cpu_exec(env);
341 longjmp(env->jmp_env, 1);
344 next_tb = 0; /* force lookup of first TB */
345 for(;;) {
346 interrupt_request = env->interrupt_request;
347 if (unlikely(interrupt_request)) {
348 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
349 /* Mask out external interrupts for this step. */
350 interrupt_request &= ~(CPU_INTERRUPT_HARD |
351 CPU_INTERRUPT_FIQ |
352 CPU_INTERRUPT_SMI |
353 CPU_INTERRUPT_NMI);
355 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
356 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
357 env->exception_index = EXCP_DEBUG;
358 cpu_loop_exit();
360 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
361 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
362 if (interrupt_request & CPU_INTERRUPT_HALT) {
363 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
364 env->halted = 1;
365 env->exception_index = EXCP_HLT;
366 cpu_loop_exit();
368 #endif
369 #if defined(TARGET_I386)
370 if (env->hflags2 & HF2_GIF_MASK) {
371 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
372 !(env->hflags & HF_SMM_MASK)) {
373 svm_check_intercept(SVM_EXIT_SMI);
374 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
375 do_smm_enter();
376 next_tb = 0;
377 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
378 !(env->hflags2 & HF2_NMI_MASK)) {
379 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
380 env->hflags2 |= HF2_NMI_MASK;
381 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
382 next_tb = 0;
383 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
384 (((env->hflags2 & HF2_VINTR_MASK) &&
385 (env->hflags2 & HF2_HIF_MASK)) ||
386 (!(env->hflags2 & HF2_VINTR_MASK) &&
387 (env->eflags & IF_MASK &&
388 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
389 int intno;
390 svm_check_intercept(SVM_EXIT_INTR);
391 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
392 intno = cpu_get_pic_interrupt(env);
393 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
394 do_interrupt(intno, 0, 0, 0, 1);
395 /* ensure that no TB jump will be modified as
396 the program flow was changed */
397 next_tb = 0;
398 #if !defined(CONFIG_USER_ONLY)
399 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
400 (env->eflags & IF_MASK) &&
401 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
402 int intno;
403 /* FIXME: this should respect TPR */
404 svm_check_intercept(SVM_EXIT_VINTR);
405 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
406 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
407 do_interrupt(intno, 0, 0, 0, 1);
408 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
409 next_tb = 0;
410 #endif
413 #elif defined(TARGET_PPC)
414 #if 0
415 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
416 cpu_ppc_reset(env);
418 #endif
419 if (interrupt_request & CPU_INTERRUPT_HARD) {
420 ppc_hw_interrupt(env);
421 if (env->pending_interrupts == 0)
422 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
423 next_tb = 0;
425 #elif defined(TARGET_MIPS)
426 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
427 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
428 (env->CP0_Status & (1 << CP0St_IE)) &&
429 !(env->CP0_Status & (1 << CP0St_EXL)) &&
430 !(env->CP0_Status & (1 << CP0St_ERL)) &&
431 !(env->hflags & MIPS_HFLAG_DM)) {
432 /* Raise it */
433 env->exception_index = EXCP_EXT_INTERRUPT;
434 env->error_code = 0;
435 do_interrupt(env);
436 next_tb = 0;
438 #elif defined(TARGET_SPARC)
439 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
440 (env->psret != 0)) {
441 int pil = env->interrupt_index & 15;
442 int type = env->interrupt_index & 0xf0;
444 if (((type == TT_EXTINT) &&
445 (pil == 15 || pil > env->psrpil)) ||
446 type != TT_EXTINT) {
447 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
448 env->exception_index = env->interrupt_index;
449 do_interrupt(env);
450 env->interrupt_index = 0;
451 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
452 cpu_check_irqs(env);
453 #endif
454 next_tb = 0;
456 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
457 //do_interrupt(0, 0, 0, 0, 0);
458 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
460 #elif defined(TARGET_ARM)
461 if (interrupt_request & CPU_INTERRUPT_FIQ
462 && !(env->uncached_cpsr & CPSR_F)) {
463 env->exception_index = EXCP_FIQ;
464 do_interrupt(env);
465 next_tb = 0;
467 /* ARMv7-M interrupt return works by loading a magic value
468 into the PC. On real hardware the load causes the
469 return to occur. The qemu implementation performs the
470 jump normally, then does the exception return when the
471 CPU tries to execute code at the magic address.
472 This will cause the magic PC value to be pushed to
473 the stack if an interrupt occured at the wrong time.
474 We avoid this by disabling interrupts when
475 pc contains a magic address. */
476 if (interrupt_request & CPU_INTERRUPT_HARD
477 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
478 || !(env->uncached_cpsr & CPSR_I))) {
479 env->exception_index = EXCP_IRQ;
480 do_interrupt(env);
481 next_tb = 0;
483 #elif defined(TARGET_SH4)
484 if (interrupt_request & CPU_INTERRUPT_HARD) {
485 do_interrupt(env);
486 next_tb = 0;
488 #elif defined(TARGET_ALPHA)
489 if (interrupt_request & CPU_INTERRUPT_HARD) {
490 do_interrupt(env);
491 next_tb = 0;
493 #elif defined(TARGET_CRIS)
494 if (interrupt_request & CPU_INTERRUPT_HARD
495 && (env->pregs[PR_CCS] & I_FLAG)) {
496 env->exception_index = EXCP_IRQ;
497 do_interrupt(env);
498 next_tb = 0;
500 if (interrupt_request & CPU_INTERRUPT_NMI
501 && (env->pregs[PR_CCS] & M_FLAG)) {
502 env->exception_index = EXCP_NMI;
503 do_interrupt(env);
504 next_tb = 0;
506 #elif defined(TARGET_M68K)
507 if (interrupt_request & CPU_INTERRUPT_HARD
508 && ((env->sr & SR_I) >> SR_I_SHIFT)
509 < env->pending_level) {
510 /* Real hardware gets the interrupt vector via an
511 IACK cycle at this point. Current emulated
512 hardware doesn't rely on this, so we
513 provide/save the vector when the interrupt is
514 first signalled. */
515 env->exception_index = env->pending_vector;
516 do_interrupt(1);
517 next_tb = 0;
519 #endif
520 /* Don't use the cached interupt_request value,
521 do_interrupt may have updated the EXITTB flag. */
522 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
523 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
524 /* ensure that no TB jump will be modified as
525 the program flow was changed */
526 next_tb = 0;
529 if (unlikely(env->exit_request)) {
530 env->exit_request = 0;
531 env->exception_index = EXCP_INTERRUPT;
532 cpu_loop_exit();
534 #ifdef DEBUG_EXEC
535 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
536 /* restore flags in standard format */
537 regs_to_env();
538 #if defined(TARGET_I386)
539 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
540 log_cpu_state(env, X86_DUMP_CCOP);
541 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
542 #elif defined(TARGET_ARM)
543 log_cpu_state(env, 0);
544 #elif defined(TARGET_SPARC)
545 log_cpu_state(env, 0);
546 #elif defined(TARGET_PPC)
547 log_cpu_state(env, 0);
548 #elif defined(TARGET_M68K)
549 cpu_m68k_flush_flags(env, env->cc_op);
550 env->cc_op = CC_OP_FLAGS;
551 env->sr = (env->sr & 0xffe0)
552 | env->cc_dest | (env->cc_x << 4);
553 log_cpu_state(env, 0);
554 #elif defined(TARGET_MIPS)
555 log_cpu_state(env, 0);
556 #elif defined(TARGET_SH4)
557 log_cpu_state(env, 0);
558 #elif defined(TARGET_ALPHA)
559 log_cpu_state(env, 0);
560 #elif defined(TARGET_CRIS)
561 log_cpu_state(env, 0);
562 #else
563 #error unsupported target CPU
564 #endif
566 #endif
567 spin_lock(&tb_lock);
568 tb = tb_find_fast();
569 /* Note: we do it here to avoid a gcc bug on Mac OS X when
570 doing it in tb_find_slow */
571 if (tb_invalidated_flag) {
572 /* as some TB could have been invalidated because
573 of memory exceptions while generating the code, we
574 must recompute the hash index here */
575 next_tb = 0;
576 tb_invalidated_flag = 0;
578 #ifdef DEBUG_EXEC
579 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
580 (long)tb->tc_ptr, tb->pc,
581 lookup_symbol(tb->pc));
582 #endif
583 /* see if we can patch the calling TB. When the TB
584 spans two pages, we cannot safely do a direct
585 jump. */
587 if (next_tb != 0 &&
588 #ifdef USE_KQEMU
589 (env->kqemu_enabled != 2) &&
590 #endif
591 tb->page_addr[1] == -1) {
592 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
595 spin_unlock(&tb_lock);
596 env->current_tb = tb;
598 /* cpu_interrupt might be called while translating the
599 TB, but before it is linked into a potentially
600 infinite loop and becomes env->current_tb. Avoid
601 starting execution if there is a pending interrupt. */
602 if (unlikely (env->exit_request))
603 env->current_tb = NULL;
605 while (env->current_tb) {
606 tc_ptr = tb->tc_ptr;
607 /* execute the generated code */
608 #if defined(__sparc__) && !defined(HOST_SOLARIS)
609 #undef env
610 env = cpu_single_env;
611 #define env cpu_single_env
612 #endif
613 next_tb = tcg_qemu_tb_exec(tc_ptr);
614 env->current_tb = NULL;
615 if ((next_tb & 3) == 2) {
616 /* Instruction counter expired. */
617 int insns_left;
618 tb = (TranslationBlock *)(long)(next_tb & ~3);
619 /* Restore PC. */
620 cpu_pc_from_tb(env, tb);
621 insns_left = env->icount_decr.u32;
622 if (env->icount_extra && insns_left >= 0) {
623 /* Refill decrementer and continue execution. */
624 env->icount_extra += insns_left;
625 if (env->icount_extra > 0xffff) {
626 insns_left = 0xffff;
627 } else {
628 insns_left = env->icount_extra;
630 env->icount_extra -= insns_left;
631 env->icount_decr.u16.low = insns_left;
632 } else {
633 if (insns_left > 0) {
634 /* Execute remaining instructions. */
635 cpu_exec_nocache(insns_left, tb);
637 env->exception_index = EXCP_INTERRUPT;
638 next_tb = 0;
639 cpu_loop_exit();
643 /* reset soft MMU for next block (it can currently
644 only be set by a memory fault) */
645 #if defined(USE_KQEMU)
646 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
647 if (kqemu_is_ok(env) &&
648 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
649 cpu_loop_exit();
651 #endif
652 } /* for(;;) */
653 } else {
654 env_to_regs();
656 } /* for(;;) */
659 #if defined(TARGET_I386)
660 /* restore flags in standard format */
661 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
662 #elif defined(TARGET_ARM)
663 /* XXX: Save/restore host fpu exception state?. */
664 #elif defined(TARGET_SPARC)
665 #elif defined(TARGET_PPC)
666 #elif defined(TARGET_M68K)
667 cpu_m68k_flush_flags(env, env->cc_op);
668 env->cc_op = CC_OP_FLAGS;
669 env->sr = (env->sr & 0xffe0)
670 | env->cc_dest | (env->cc_x << 4);
671 #elif defined(TARGET_MIPS)
672 #elif defined(TARGET_SH4)
673 #elif defined(TARGET_ALPHA)
674 #elif defined(TARGET_CRIS)
675 /* XXXXX */
676 #else
677 #error unsupported target CPU
678 #endif
680 /* restore global registers */
681 #include "hostregs_helper.h"
683 /* fail safe : never use cpu_single_env outside cpu_exec() */
684 cpu_single_env = NULL;
685 return ret;
688 /* must only be called from the generated code as an exception can be
689 generated */
690 void tb_invalidate_page_range(target_ulong start, target_ulong end)
692 /* XXX: cannot enable it yet because it yields to MMU exception
693 where NIP != read address on PowerPC */
694 #if 0
695 target_ulong phys_addr;
696 phys_addr = get_phys_addr_code(env, start);
697 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
698 #endif
701 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
703 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
705 CPUX86State *saved_env;
707 saved_env = env;
708 env = s;
709 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
710 selector &= 0xffff;
711 cpu_x86_load_seg_cache(env, seg_reg, selector,
712 (selector << 4), 0xffff, 0);
713 } else {
714 helper_load_seg(seg_reg, selector);
716 env = saved_env;
719 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
721 CPUX86State *saved_env;
723 saved_env = env;
724 env = s;
726 helper_fsave(ptr, data32);
728 env = saved_env;
731 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
733 CPUX86State *saved_env;
735 saved_env = env;
736 env = s;
738 helper_frstor(ptr, data32);
740 env = saved_env;
743 #endif /* TARGET_I386 */
745 #if !defined(CONFIG_SOFTMMU)
747 #if defined(TARGET_I386)
749 /* 'pc' is the host PC at which the exception was raised. 'address' is
750 the effective address of the memory exception. 'is_write' is 1 if a
751 write caused the exception and otherwise 0'. 'old_set' is the
752 signal set which should be restored */
753 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
754 int is_write, sigset_t *old_set,
755 void *puc)
757 TranslationBlock *tb;
758 int ret;
760 if (cpu_single_env)
761 env = cpu_single_env; /* XXX: find a correct solution for multithread */
762 #if defined(DEBUG_SIGNAL)
763 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
764 pc, address, is_write, *(unsigned long *)old_set);
765 #endif
766 /* XXX: locking issue */
767 if (is_write && page_unprotect(h2g(address), pc, puc)) {
768 return 1;
771 /* see if it is an MMU fault */
772 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
773 if (ret < 0)
774 return 0; /* not an MMU fault */
775 if (ret == 0)
776 return 1; /* the MMU fault was handled without causing real CPU fault */
777 /* now we have a real cpu fault */
778 tb = tb_find_pc(pc);
779 if (tb) {
780 /* the PC is inside the translated code. It means that we have
781 a virtual CPU fault */
782 cpu_restore_state(tb, env, pc, puc);
784 if (ret == 1) {
785 #if 0
786 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
787 env->eip, env->cr[2], env->error_code);
788 #endif
789 /* we restore the process signal mask as the sigreturn should
790 do it (XXX: use sigsetjmp) */
791 sigprocmask(SIG_SETMASK, old_set, NULL);
792 raise_exception_err(env->exception_index, env->error_code);
793 } else {
794 /* activate soft MMU for this block */
795 env->hflags |= HF_SOFTMMU_MASK;
796 cpu_resume_from_signal(env, puc);
798 /* never comes here */
799 return 1;
802 #elif defined(TARGET_ARM)
803 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
804 int is_write, sigset_t *old_set,
805 void *puc)
807 TranslationBlock *tb;
808 int ret;
810 if (cpu_single_env)
811 env = cpu_single_env; /* XXX: find a correct solution for multithread */
812 #if defined(DEBUG_SIGNAL)
813 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
814 pc, address, is_write, *(unsigned long *)old_set);
815 #endif
816 /* XXX: locking issue */
817 if (is_write && page_unprotect(h2g(address), pc, puc)) {
818 return 1;
820 /* see if it is an MMU fault */
821 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
822 if (ret < 0)
823 return 0; /* not an MMU fault */
824 if (ret == 0)
825 return 1; /* the MMU fault was handled without causing real CPU fault */
826 /* now we have a real cpu fault */
827 tb = tb_find_pc(pc);
828 if (tb) {
829 /* the PC is inside the translated code. It means that we have
830 a virtual CPU fault */
831 cpu_restore_state(tb, env, pc, puc);
833 /* we restore the process signal mask as the sigreturn should
834 do it (XXX: use sigsetjmp) */
835 sigprocmask(SIG_SETMASK, old_set, NULL);
836 cpu_loop_exit();
837 /* never comes here */
838 return 1;
840 #elif defined(TARGET_SPARC)
841 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
842 int is_write, sigset_t *old_set,
843 void *puc)
845 TranslationBlock *tb;
846 int ret;
848 if (cpu_single_env)
849 env = cpu_single_env; /* XXX: find a correct solution for multithread */
850 #if defined(DEBUG_SIGNAL)
851 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
852 pc, address, is_write, *(unsigned long *)old_set);
853 #endif
854 /* XXX: locking issue */
855 if (is_write && page_unprotect(h2g(address), pc, puc)) {
856 return 1;
858 /* see if it is an MMU fault */
859 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
860 if (ret < 0)
861 return 0; /* not an MMU fault */
862 if (ret == 0)
863 return 1; /* the MMU fault was handled without causing real CPU fault */
864 /* now we have a real cpu fault */
865 tb = tb_find_pc(pc);
866 if (tb) {
867 /* the PC is inside the translated code. It means that we have
868 a virtual CPU fault */
869 cpu_restore_state(tb, env, pc, puc);
871 /* we restore the process signal mask as the sigreturn should
872 do it (XXX: use sigsetjmp) */
873 sigprocmask(SIG_SETMASK, old_set, NULL);
874 cpu_loop_exit();
875 /* never comes here */
876 return 1;
878 #elif defined (TARGET_PPC)
879 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
880 int is_write, sigset_t *old_set,
881 void *puc)
883 TranslationBlock *tb;
884 int ret;
886 if (cpu_single_env)
887 env = cpu_single_env; /* XXX: find a correct solution for multithread */
888 #if defined(DEBUG_SIGNAL)
889 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
890 pc, address, is_write, *(unsigned long *)old_set);
891 #endif
892 /* XXX: locking issue */
893 if (is_write && page_unprotect(h2g(address), pc, puc)) {
894 return 1;
897 /* see if it is an MMU fault */
898 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
899 if (ret < 0)
900 return 0; /* not an MMU fault */
901 if (ret == 0)
902 return 1; /* the MMU fault was handled without causing real CPU fault */
904 /* now we have a real cpu fault */
905 tb = tb_find_pc(pc);
906 if (tb) {
907 /* the PC is inside the translated code. It means that we have
908 a virtual CPU fault */
909 cpu_restore_state(tb, env, pc, puc);
911 if (ret == 1) {
912 #if 0
913 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
914 env->nip, env->error_code, tb);
915 #endif
916 /* we restore the process signal mask as the sigreturn should
917 do it (XXX: use sigsetjmp) */
918 sigprocmask(SIG_SETMASK, old_set, NULL);
919 cpu_loop_exit();
920 } else {
921 /* activate soft MMU for this block */
922 cpu_resume_from_signal(env, puc);
924 /* never comes here */
925 return 1;
928 #elif defined(TARGET_M68K)
929 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
930 int is_write, sigset_t *old_set,
931 void *puc)
933 TranslationBlock *tb;
934 int ret;
936 if (cpu_single_env)
937 env = cpu_single_env; /* XXX: find a correct solution for multithread */
938 #if defined(DEBUG_SIGNAL)
939 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
940 pc, address, is_write, *(unsigned long *)old_set);
941 #endif
942 /* XXX: locking issue */
943 if (is_write && page_unprotect(address, pc, puc)) {
944 return 1;
946 /* see if it is an MMU fault */
947 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
948 if (ret < 0)
949 return 0; /* not an MMU fault */
950 if (ret == 0)
951 return 1; /* the MMU fault was handled without causing real CPU fault */
952 /* now we have a real cpu fault */
953 tb = tb_find_pc(pc);
954 if (tb) {
955 /* the PC is inside the translated code. It means that we have
956 a virtual CPU fault */
957 cpu_restore_state(tb, env, pc, puc);
959 /* we restore the process signal mask as the sigreturn should
960 do it (XXX: use sigsetjmp) */
961 sigprocmask(SIG_SETMASK, old_set, NULL);
962 cpu_loop_exit();
963 /* never comes here */
964 return 1;
967 #elif defined (TARGET_MIPS)
968 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
969 int is_write, sigset_t *old_set,
970 void *puc)
972 TranslationBlock *tb;
973 int ret;
975 if (cpu_single_env)
976 env = cpu_single_env; /* XXX: find a correct solution for multithread */
977 #if defined(DEBUG_SIGNAL)
978 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
979 pc, address, is_write, *(unsigned long *)old_set);
980 #endif
981 /* XXX: locking issue */
982 if (is_write && page_unprotect(h2g(address), pc, puc)) {
983 return 1;
986 /* see if it is an MMU fault */
987 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
988 if (ret < 0)
989 return 0; /* not an MMU fault */
990 if (ret == 0)
991 return 1; /* the MMU fault was handled without causing real CPU fault */
993 /* now we have a real cpu fault */
994 tb = tb_find_pc(pc);
995 if (tb) {
996 /* the PC is inside the translated code. It means that we have
997 a virtual CPU fault */
998 cpu_restore_state(tb, env, pc, puc);
1000 if (ret == 1) {
1001 #if 0
1002 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1003 env->PC, env->error_code, tb);
1004 #endif
1005 /* we restore the process signal mask as the sigreturn should
1006 do it (XXX: use sigsetjmp) */
1007 sigprocmask(SIG_SETMASK, old_set, NULL);
1008 cpu_loop_exit();
1009 } else {
1010 /* activate soft MMU for this block */
1011 cpu_resume_from_signal(env, puc);
1013 /* never comes here */
1014 return 1;
1017 #elif defined (TARGET_SH4)
1018 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1019 int is_write, sigset_t *old_set,
1020 void *puc)
1022 TranslationBlock *tb;
1023 int ret;
1025 if (cpu_single_env)
1026 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1027 #if defined(DEBUG_SIGNAL)
1028 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1029 pc, address, is_write, *(unsigned long *)old_set);
1030 #endif
1031 /* XXX: locking issue */
1032 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1033 return 1;
1036 /* see if it is an MMU fault */
1037 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1038 if (ret < 0)
1039 return 0; /* not an MMU fault */
1040 if (ret == 0)
1041 return 1; /* the MMU fault was handled without causing real CPU fault */
1043 /* now we have a real cpu fault */
1044 tb = tb_find_pc(pc);
1045 if (tb) {
1046 /* the PC is inside the translated code. It means that we have
1047 a virtual CPU fault */
1048 cpu_restore_state(tb, env, pc, puc);
1050 #if 0
1051 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1052 env->nip, env->error_code, tb);
1053 #endif
1054 /* we restore the process signal mask as the sigreturn should
1055 do it (XXX: use sigsetjmp) */
1056 sigprocmask(SIG_SETMASK, old_set, NULL);
1057 cpu_loop_exit();
1058 /* never comes here */
1059 return 1;
1062 #elif defined (TARGET_ALPHA)
1063 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1064 int is_write, sigset_t *old_set,
1065 void *puc)
1067 TranslationBlock *tb;
1068 int ret;
1070 if (cpu_single_env)
1071 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1072 #if defined(DEBUG_SIGNAL)
1073 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1074 pc, address, is_write, *(unsigned long *)old_set);
1075 #endif
1076 /* XXX: locking issue */
1077 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1078 return 1;
1081 /* see if it is an MMU fault */
1082 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1083 if (ret < 0)
1084 return 0; /* not an MMU fault */
1085 if (ret == 0)
1086 return 1; /* the MMU fault was handled without causing real CPU fault */
1088 /* now we have a real cpu fault */
1089 tb = tb_find_pc(pc);
1090 if (tb) {
1091 /* the PC is inside the translated code. It means that we have
1092 a virtual CPU fault */
1093 cpu_restore_state(tb, env, pc, puc);
1095 #if 0
1096 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1097 env->nip, env->error_code, tb);
1098 #endif
1099 /* we restore the process signal mask as the sigreturn should
1100 do it (XXX: use sigsetjmp) */
1101 sigprocmask(SIG_SETMASK, old_set, NULL);
1102 cpu_loop_exit();
1103 /* never comes here */
1104 return 1;
1106 #elif defined (TARGET_CRIS)
1107 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1108 int is_write, sigset_t *old_set,
1109 void *puc)
1111 TranslationBlock *tb;
1112 int ret;
1114 if (cpu_single_env)
1115 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1116 #if defined(DEBUG_SIGNAL)
1117 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1118 pc, address, is_write, *(unsigned long *)old_set);
1119 #endif
1120 /* XXX: locking issue */
1121 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1122 return 1;
1125 /* see if it is an MMU fault */
1126 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1127 if (ret < 0)
1128 return 0; /* not an MMU fault */
1129 if (ret == 0)
1130 return 1; /* the MMU fault was handled without causing real CPU fault */
1132 /* now we have a real cpu fault */
1133 tb = tb_find_pc(pc);
1134 if (tb) {
1135 /* the PC is inside the translated code. It means that we have
1136 a virtual CPU fault */
1137 cpu_restore_state(tb, env, pc, puc);
1139 /* we restore the process signal mask as the sigreturn should
1140 do it (XXX: use sigsetjmp) */
1141 sigprocmask(SIG_SETMASK, old_set, NULL);
1142 cpu_loop_exit();
1143 /* never comes here */
1144 return 1;
1147 #else
1148 #error unsupported target CPU
1149 #endif
1151 #if defined(__i386__)
1153 #if defined(__APPLE__)
1154 # include <sys/ucontext.h>
1156 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1157 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1158 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1159 #else
1160 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1161 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1162 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1163 #endif
1165 int cpu_signal_handler(int host_signum, void *pinfo,
1166 void *puc)
1168 siginfo_t *info = pinfo;
1169 struct ucontext *uc = puc;
1170 unsigned long pc;
1171 int trapno;
1173 #ifndef REG_EIP
1174 /* for glibc 2.1 */
1175 #define REG_EIP EIP
1176 #define REG_ERR ERR
1177 #define REG_TRAPNO TRAPNO
1178 #endif
1179 pc = EIP_sig(uc);
1180 trapno = TRAP_sig(uc);
1181 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1182 trapno == 0xe ?
1183 (ERROR_sig(uc) >> 1) & 1 : 0,
1184 &uc->uc_sigmask, puc);
1187 #elif defined(__x86_64__)
1189 #ifdef __NetBSD__
1190 #define REG_ERR _REG_ERR
1191 #define REG_TRAPNO _REG_TRAPNO
1193 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1194 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1195 #else
1196 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1197 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1198 #endif
1200 int cpu_signal_handler(int host_signum, void *pinfo,
1201 void *puc)
1203 siginfo_t *info = pinfo;
1204 unsigned long pc;
1205 #ifdef __NetBSD__
1206 ucontext_t *uc = puc;
1207 #else
1208 struct ucontext *uc = puc;
1209 #endif
1211 pc = QEMU_UC_MACHINE_PC(uc);
1212 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1213 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
1214 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
1215 &uc->uc_sigmask, puc);
1218 #elif defined(_ARCH_PPC)
1220 /***********************************************************************
1221 * signal context platform-specific definitions
1222 * From Wine
1224 #ifdef linux
1225 /* All Registers access - only for local access */
1226 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1227 /* Gpr Registers access */
1228 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1229 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1230 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1231 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1232 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1233 # define LR_sig(context) REG_sig(link, context) /* Link register */
1234 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1235 /* Float Registers access */
1236 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1237 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1238 /* Exception Registers access */
1239 # define DAR_sig(context) REG_sig(dar, context)
1240 # define DSISR_sig(context) REG_sig(dsisr, context)
1241 # define TRAP_sig(context) REG_sig(trap, context)
1242 #endif /* linux */
1244 #ifdef __APPLE__
1245 # include <sys/ucontext.h>
1246 typedef struct ucontext SIGCONTEXT;
1247 /* All Registers access - only for local access */
1248 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1249 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1250 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1251 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1252 /* Gpr Registers access */
1253 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1254 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1255 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1256 # define CTR_sig(context) REG_sig(ctr, context)
1257 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1258 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1259 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1260 /* Float Registers access */
1261 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1262 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1263 /* Exception Registers access */
1264 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1265 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1266 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1267 #endif /* __APPLE__ */
1269 int cpu_signal_handler(int host_signum, void *pinfo,
1270 void *puc)
1272 siginfo_t *info = pinfo;
1273 struct ucontext *uc = puc;
1274 unsigned long pc;
1275 int is_write;
1277 pc = IAR_sig(uc);
1278 is_write = 0;
1279 #if 0
1280 /* ppc 4xx case */
1281 if (DSISR_sig(uc) & 0x00800000)
1282 is_write = 1;
1283 #else
1284 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1285 is_write = 1;
1286 #endif
1287 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1288 is_write, &uc->uc_sigmask, puc);
1291 #elif defined(__alpha__)
1293 int cpu_signal_handler(int host_signum, void *pinfo,
1294 void *puc)
1296 siginfo_t *info = pinfo;
1297 struct ucontext *uc = puc;
1298 uint32_t *pc = uc->uc_mcontext.sc_pc;
1299 uint32_t insn = *pc;
1300 int is_write = 0;
1302 /* XXX: need kernel patch to get write flag faster */
1303 switch (insn >> 26) {
1304 case 0x0d: // stw
1305 case 0x0e: // stb
1306 case 0x0f: // stq_u
1307 case 0x24: // stf
1308 case 0x25: // stg
1309 case 0x26: // sts
1310 case 0x27: // stt
1311 case 0x2c: // stl
1312 case 0x2d: // stq
1313 case 0x2e: // stl_c
1314 case 0x2f: // stq_c
1315 is_write = 1;
1318 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1319 is_write, &uc->uc_sigmask, puc);
1321 #elif defined(__sparc__)
1323 int cpu_signal_handler(int host_signum, void *pinfo,
1324 void *puc)
1326 siginfo_t *info = pinfo;
1327 int is_write;
1328 uint32_t insn;
1329 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1330 uint32_t *regs = (uint32_t *)(info + 1);
1331 void *sigmask = (regs + 20);
1332 /* XXX: is there a standard glibc define ? */
1333 unsigned long pc = regs[1];
1334 #else
1335 #ifdef __linux__
1336 struct sigcontext *sc = puc;
1337 unsigned long pc = sc->sigc_regs.tpc;
1338 void *sigmask = (void *)sc->sigc_mask;
1339 #elif defined(__OpenBSD__)
1340 struct sigcontext *uc = puc;
1341 unsigned long pc = uc->sc_pc;
1342 void *sigmask = (void *)(long)uc->sc_mask;
1343 #endif
1344 #endif
1346 /* XXX: need kernel patch to get write flag faster */
1347 is_write = 0;
1348 insn = *(uint32_t *)pc;
1349 if ((insn >> 30) == 3) {
1350 switch((insn >> 19) & 0x3f) {
1351 case 0x05: // stb
1352 case 0x06: // sth
1353 case 0x04: // st
1354 case 0x07: // std
1355 case 0x24: // stf
1356 case 0x27: // stdf
1357 case 0x25: // stfsr
1358 is_write = 1;
1359 break;
1362 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1363 is_write, sigmask, NULL);
1366 #elif defined(__arm__)
1368 int cpu_signal_handler(int host_signum, void *pinfo,
1369 void *puc)
1371 siginfo_t *info = pinfo;
1372 struct ucontext *uc = puc;
1373 unsigned long pc;
1374 int is_write;
1376 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1377 pc = uc->uc_mcontext.gregs[R15];
1378 #else
1379 pc = uc->uc_mcontext.arm_pc;
1380 #endif
1381 /* XXX: compute is_write */
1382 is_write = 0;
1383 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1384 is_write,
1385 &uc->uc_sigmask, puc);
1388 #elif defined(__mc68000)
1390 int cpu_signal_handler(int host_signum, void *pinfo,
1391 void *puc)
1393 siginfo_t *info = pinfo;
1394 struct ucontext *uc = puc;
1395 unsigned long pc;
1396 int is_write;
1398 pc = uc->uc_mcontext.gregs[16];
1399 /* XXX: compute is_write */
1400 is_write = 0;
1401 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1402 is_write,
1403 &uc->uc_sigmask, puc);
1406 #elif defined(__ia64)
1408 #ifndef __ISR_VALID
1409 /* This ought to be in <bits/siginfo.h>... */
1410 # define __ISR_VALID 1
1411 #endif
1413 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1415 siginfo_t *info = pinfo;
1416 struct ucontext *uc = puc;
1417 unsigned long ip;
1418 int is_write = 0;
1420 ip = uc->uc_mcontext.sc_ip;
1421 switch (host_signum) {
1422 case SIGILL:
1423 case SIGFPE:
1424 case SIGSEGV:
1425 case SIGBUS:
1426 case SIGTRAP:
1427 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1428 /* ISR.W (write-access) is bit 33: */
1429 is_write = (info->si_isr >> 33) & 1;
1430 break;
1432 default:
1433 break;
1435 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1436 is_write,
1437 &uc->uc_sigmask, puc);
1440 #elif defined(__s390__)
1442 int cpu_signal_handler(int host_signum, void *pinfo,
1443 void *puc)
1445 siginfo_t *info = pinfo;
1446 struct ucontext *uc = puc;
1447 unsigned long pc;
1448 int is_write;
1450 pc = uc->uc_mcontext.psw.addr;
1451 /* XXX: compute is_write */
1452 is_write = 0;
1453 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1454 is_write, &uc->uc_sigmask, puc);
1457 #elif defined(__mips__)
1459 int cpu_signal_handler(int host_signum, void *pinfo,
1460 void *puc)
1462 siginfo_t *info = pinfo;
1463 struct ucontext *uc = puc;
1464 greg_t pc = uc->uc_mcontext.pc;
1465 int is_write;
1467 /* XXX: compute is_write */
1468 is_write = 0;
1469 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1470 is_write, &uc->uc_sigmask, puc);
1473 #elif defined(__hppa__)
1475 int cpu_signal_handler(int host_signum, void *pinfo,
1476 void *puc)
1478 struct siginfo *info = pinfo;
1479 struct ucontext *uc = puc;
1480 unsigned long pc;
1481 int is_write;
1483 pc = uc->uc_mcontext.sc_iaoq[0];
1484 /* FIXME: compute is_write */
1485 is_write = 0;
1486 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1487 is_write,
1488 &uc->uc_sigmask, puc);
1491 #else
1493 #error host CPU specific signal handler needed
1495 #endif
1497 #endif /* !defined(CONFIG_SOFTMMU) */