Merge branch 'master' of ssh://repo.or.cz/srv/git/qemu
[qemu/hppa.git] / cpu-exec.c
blob592553578be0c0349bc2c0fc5221fa9b762dfca6
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
25 #include "kvm.h"
27 #if !defined(CONFIG_SOFTMMU)
28 #undef EAX
29 #undef ECX
30 #undef EDX
31 #undef EBX
32 #undef ESP
33 #undef EBP
34 #undef ESI
35 #undef EDI
36 #undef EIP
37 #include <signal.h>
38 #ifdef __linux__
39 #include <sys/ucontext.h>
40 #endif
41 #endif
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
45 #undef env
46 #define env cpu_single_env
47 #endif
49 int tb_invalidated_flag;
51 //#define DEBUG_EXEC
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
58 regs_to_env();
59 longjmp(env->jmp_env, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState *env1, void *puc)
67 #if !defined(CONFIG_SOFTMMU)
68 #ifdef __linux__
69 struct ucontext *uc = puc;
70 #elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
72 #endif
73 #endif
75 env = env1;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
80 if (puc) {
81 /* XXX: use siglongjmp ? */
82 #ifdef __linux__
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
86 #endif
88 #endif
89 env->exception_index = -1;
90 longjmp(env->jmp_env, 1);
93 /* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
97 unsigned long next_tb;
98 TranslationBlock *tb;
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles > CF_COUNT_MASK)
103 max_cycles = CF_COUNT_MASK;
105 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106 max_cycles);
107 env->current_tb = tb;
108 /* execute the generated code */
109 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
111 if ((next_tb & 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
114 cpu_pc_from_tb(env, tb);
116 tb_phys_invalidate(tb, -1);
117 tb_free(tb);
120 static TranslationBlock *tb_find_slow(target_ulong pc,
121 target_ulong cs_base,
122 uint64_t flags)
124 TranslationBlock *tb, **ptb1;
125 unsigned int h;
126 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
128 tb_invalidated_flag = 0;
130 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
132 /* find translated block using physical mappings */
133 phys_pc = get_phys_addr_code(env, pc);
134 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 phys_page2 = -1;
136 h = tb_phys_hash_func(phys_pc);
137 ptb1 = &tb_phys_hash[h];
138 for(;;) {
139 tb = *ptb1;
140 if (!tb)
141 goto not_found;
142 if (tb->pc == pc &&
143 tb->page_addr[0] == phys_page1 &&
144 tb->cs_base == cs_base &&
145 tb->flags == flags) {
146 /* check next page if needed */
147 if (tb->page_addr[1] != -1) {
148 virt_page2 = (pc & TARGET_PAGE_MASK) +
149 TARGET_PAGE_SIZE;
150 phys_page2 = get_phys_addr_code(env, virt_page2);
151 if (tb->page_addr[1] == phys_page2)
152 goto found;
153 } else {
154 goto found;
157 ptb1 = &tb->phys_hash_next;
159 not_found:
160 /* if no translated code available, then translate it now */
161 tb = tb_gen_code(env, pc, cs_base, flags, 0);
163 found:
164 /* we add the TB in the virtual pc hash table */
165 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
166 return tb;
169 static inline TranslationBlock *tb_find_fast(void)
171 TranslationBlock *tb;
172 target_ulong cs_base, pc;
173 int flags;
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
177 is executed. */
178 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
179 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
180 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
181 tb->flags != flags)) {
182 tb = tb_find_slow(pc, cs_base, flags);
184 return tb;
187 static CPUDebugExcpHandler *debug_excp_handler;
189 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
191 CPUDebugExcpHandler *old_handler = debug_excp_handler;
193 debug_excp_handler = handler;
194 return old_handler;
197 static void cpu_handle_debug_exception(CPUState *env)
199 CPUWatchpoint *wp;
201 if (!env->watchpoint_hit)
202 TAILQ_FOREACH(wp, &env->watchpoints, entry)
203 wp->flags &= ~BP_WATCHPOINT_HIT;
205 if (debug_excp_handler)
206 debug_excp_handler(env);
209 /* main execution loop */
211 int cpu_exec(CPUState *env1)
213 #define DECLARE_HOST_REGS 1
214 #include "hostregs_helper.h"
215 int ret, interrupt_request;
216 TranslationBlock *tb;
217 uint8_t *tc_ptr;
218 unsigned long next_tb;
220 if (cpu_halted(env1) == EXCP_HALTED)
221 return EXCP_HALTED;
223 cpu_single_env = env1;
225 /* first we save global registers */
226 #define SAVE_HOST_REGS 1
227 #include "hostregs_helper.h"
228 env = env1;
230 env_to_regs();
231 #if defined(TARGET_I386)
232 /* put eflags in CPU temporary format */
233 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
234 DF = 1 - (2 * ((env->eflags >> 10) & 1));
235 CC_OP = CC_OP_EFLAGS;
236 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
237 #elif defined(TARGET_SPARC)
238 #elif defined(TARGET_M68K)
239 env->cc_op = CC_OP_FLAGS;
240 env->cc_dest = env->sr & 0xf;
241 env->cc_x = (env->sr >> 4) & 1;
242 #elif defined(TARGET_ALPHA)
243 #elif defined(TARGET_ARM)
244 #elif defined(TARGET_PPC)
245 #elif defined(TARGET_HPPA)
246 #elif defined(TARGET_MIPS)
247 #elif defined(TARGET_SH4)
248 #elif defined(TARGET_CRIS)
249 /* XXXXX */
250 #else
251 #error unsupported target CPU
252 #endif
253 env->exception_index = -1;
255 /* prepare setjmp context for exception handling */
256 for(;;) {
257 if (setjmp(env->jmp_env) == 0) {
258 env->current_tb = NULL;
259 /* if an exception is pending, we execute it here */
260 if (env->exception_index >= 0) {
261 if (env->exception_index >= EXCP_INTERRUPT) {
262 /* exit request from the cpu execution loop */
263 ret = env->exception_index;
264 if (ret == EXCP_DEBUG)
265 cpu_handle_debug_exception(env);
266 break;
267 } else {
268 #if defined(CONFIG_USER_ONLY)
269 /* if user mode only, we simulate a fake exception
270 which will be handled outside the cpu execution
271 loop */
272 #if defined(TARGET_I386)
273 do_interrupt_user(env->exception_index,
274 env->exception_is_int,
275 env->error_code,
276 env->exception_next_eip);
277 /* successfully delivered */
278 env->old_exception = -1;
279 #endif
280 ret = env->exception_index;
281 break;
282 #else
283 #if defined(TARGET_I386)
284 /* simulate a real cpu exception. On i386, it can
285 trigger new exceptions, but we do not handle
286 double or triple faults yet. */
287 do_interrupt(env->exception_index,
288 env->exception_is_int,
289 env->error_code,
290 env->exception_next_eip, 0);
291 /* successfully delivered */
292 env->old_exception = -1;
293 #elif defined(TARGET_PPC)
294 do_interrupt(env);
295 #elif defined(TARGET_MIPS)
296 do_interrupt(env);
297 #elif defined(TARGET_SPARC)
298 do_interrupt(env);
299 #elif defined(TARGET_ARM)
300 do_interrupt(env);
301 #elif defined(TARGET_SH4)
302 do_interrupt(env);
303 #elif defined(TARGET_ALPHA)
304 do_interrupt(env);
305 #elif defined(TARGET_CRIS)
306 do_interrupt(env);
307 #elif defined(TARGET_M68K)
308 do_interrupt(0);
309 #elif defined(TARGET_HPPA)
310 do_interrupt(env);
311 #endif
312 #endif
314 env->exception_index = -1;
316 #ifdef USE_KQEMU
317 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
318 int ret;
319 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
320 ret = kqemu_cpu_exec(env);
321 /* put eflags in CPU temporary format */
322 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
323 DF = 1 - (2 * ((env->eflags >> 10) & 1));
324 CC_OP = CC_OP_EFLAGS;
325 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
326 if (ret == 1) {
327 /* exception */
328 longjmp(env->jmp_env, 1);
329 } else if (ret == 2) {
330 /* softmmu execution needed */
331 } else {
332 if (env->interrupt_request != 0) {
333 /* hardware interrupt will be executed just after */
334 } else {
335 /* otherwise, we restart */
336 longjmp(env->jmp_env, 1);
340 #endif
342 if (kvm_enabled()) {
343 kvm_cpu_exec(env);
344 longjmp(env->jmp_env, 1);
347 next_tb = 0; /* force lookup of first TB */
348 for(;;) {
349 interrupt_request = env->interrupt_request;
350 if (unlikely(interrupt_request)) {
351 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
352 /* Mask out external interrupts for this step. */
353 interrupt_request &= ~(CPU_INTERRUPT_HARD |
354 CPU_INTERRUPT_FIQ |
355 CPU_INTERRUPT_SMI |
356 CPU_INTERRUPT_NMI);
358 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
359 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
360 env->exception_index = EXCP_DEBUG;
361 cpu_loop_exit();
363 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
364 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
365 defined(TARGET_HPPA)
366 if (interrupt_request & CPU_INTERRUPT_HALT) {
367 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
368 env->halted = 1;
369 env->exception_index = EXCP_HLT;
370 cpu_loop_exit();
372 #endif
373 #if defined(TARGET_I386)
374 if (env->hflags2 & HF2_GIF_MASK) {
375 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
376 !(env->hflags & HF_SMM_MASK)) {
377 svm_check_intercept(SVM_EXIT_SMI);
378 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
379 do_smm_enter();
380 next_tb = 0;
381 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
382 !(env->hflags2 & HF2_NMI_MASK)) {
383 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
384 env->hflags2 |= HF2_NMI_MASK;
385 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
386 next_tb = 0;
387 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
388 (((env->hflags2 & HF2_VINTR_MASK) &&
389 (env->hflags2 & HF2_HIF_MASK)) ||
390 (!(env->hflags2 & HF2_VINTR_MASK) &&
391 (env->eflags & IF_MASK &&
392 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
393 int intno;
394 svm_check_intercept(SVM_EXIT_INTR);
395 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
396 intno = cpu_get_pic_interrupt(env);
397 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
398 do_interrupt(intno, 0, 0, 0, 1);
399 /* ensure that no TB jump will be modified as
400 the program flow was changed */
401 next_tb = 0;
402 #if !defined(CONFIG_USER_ONLY)
403 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
404 (env->eflags & IF_MASK) &&
405 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
406 int intno;
407 /* FIXME: this should respect TPR */
408 svm_check_intercept(SVM_EXIT_VINTR);
409 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
410 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
411 do_interrupt(intno, 0, 0, 0, 1);
412 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
413 next_tb = 0;
414 #endif
417 #elif defined(TARGET_PPC)
418 #if 0
419 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
420 cpu_ppc_reset(env);
422 #endif
423 if (interrupt_request & CPU_INTERRUPT_HARD) {
424 ppc_hw_interrupt(env);
425 if (env->pending_interrupts == 0)
426 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
427 next_tb = 0;
429 #elif defined(TARGET_MIPS)
430 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
431 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
432 (env->CP0_Status & (1 << CP0St_IE)) &&
433 !(env->CP0_Status & (1 << CP0St_EXL)) &&
434 !(env->CP0_Status & (1 << CP0St_ERL)) &&
435 !(env->hflags & MIPS_HFLAG_DM)) {
436 /* Raise it */
437 env->exception_index = EXCP_EXT_INTERRUPT;
438 env->error_code = 0;
439 do_interrupt(env);
440 next_tb = 0;
442 #elif defined(TARGET_SPARC)
443 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
444 (env->psret != 0)) {
445 int pil = env->interrupt_index & 15;
446 int type = env->interrupt_index & 0xf0;
448 if (((type == TT_EXTINT) &&
449 (pil == 15 || pil > env->psrpil)) ||
450 type != TT_EXTINT) {
451 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
452 env->exception_index = env->interrupt_index;
453 do_interrupt(env);
454 env->interrupt_index = 0;
455 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
456 cpu_check_irqs(env);
457 #endif
458 next_tb = 0;
460 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
461 //do_interrupt(0, 0, 0, 0, 0);
462 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
464 #elif defined(TARGET_ARM)
465 if (interrupt_request & CPU_INTERRUPT_FIQ
466 && !(env->uncached_cpsr & CPSR_F)) {
467 env->exception_index = EXCP_FIQ;
468 do_interrupt(env);
469 next_tb = 0;
471 /* ARMv7-M interrupt return works by loading a magic value
472 into the PC. On real hardware the load causes the
473 return to occur. The qemu implementation performs the
474 jump normally, then does the exception return when the
475 CPU tries to execute code at the magic address.
476 This will cause the magic PC value to be pushed to
477 the stack if an interrupt occured at the wrong time.
478 We avoid this by disabling interrupts when
479 pc contains a magic address. */
480 if (interrupt_request & CPU_INTERRUPT_HARD
481 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
482 || !(env->uncached_cpsr & CPSR_I))) {
483 env->exception_index = EXCP_IRQ;
484 do_interrupt(env);
485 next_tb = 0;
487 #elif defined(TARGET_SH4)
488 if (interrupt_request & CPU_INTERRUPT_HARD) {
489 do_interrupt(env);
490 next_tb = 0;
492 #elif defined(TARGET_ALPHA)
493 if (interrupt_request & CPU_INTERRUPT_HARD) {
494 do_interrupt(env);
495 next_tb = 0;
497 #elif defined(TARGET_CRIS)
498 if (interrupt_request & CPU_INTERRUPT_HARD
499 && (env->pregs[PR_CCS] & I_FLAG)) {
500 env->exception_index = EXCP_IRQ;
501 do_interrupt(env);
502 next_tb = 0;
504 if (interrupt_request & CPU_INTERRUPT_NMI
505 && (env->pregs[PR_CCS] & M_FLAG)) {
506 env->exception_index = EXCP_NMI;
507 do_interrupt(env);
508 next_tb = 0;
510 #elif defined(TARGET_M68K)
511 if (interrupt_request & CPU_INTERRUPT_HARD
512 && ((env->sr & SR_I) >> SR_I_SHIFT)
513 < env->pending_level) {
514 /* Real hardware gets the interrupt vector via an
515 IACK cycle at this point. Current emulated
516 hardware doesn't rely on this, so we
517 provide/save the vector when the interrupt is
518 first signalled. */
519 env->exception_index = env->pending_vector;
520 do_interrupt(1);
521 next_tb = 0;
523 #elif defined(TARGET_HPPA)
524 if (interrupt_request & CPU_INTERRUPT_HARD
525 && !(env->psw & PSW_I)) {
526 env->exception_index = EXCP_EXTINT;
527 do_interrupt(env);
528 next_tb = 0;
530 #endif
531 /* Don't use the cached interupt_request value,
532 do_interrupt may have updated the EXITTB flag. */
533 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
534 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
535 /* ensure that no TB jump will be modified as
536 the program flow was changed */
537 next_tb = 0;
539 if (interrupt_request & CPU_INTERRUPT_EXIT) {
540 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
541 env->exception_index = EXCP_INTERRUPT;
542 cpu_loop_exit();
545 #ifdef DEBUG_EXEC
546 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
547 /* restore flags in standard format */
548 regs_to_env();
549 #if defined(TARGET_I386)
550 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
551 log_cpu_state(env, X86_DUMP_CCOP);
552 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
553 #elif defined(TARGET_ARM)
554 log_cpu_state(env, 0);
555 #elif defined(TARGET_SPARC)
556 log_cpu_state(env, 0);
557 #elif defined(TARGET_PPC)
558 log_cpu_state(env, 0);
559 #elif defined(TARGET_M68K)
560 cpu_m68k_flush_flags(env, env->cc_op);
561 env->cc_op = CC_OP_FLAGS;
562 env->sr = (env->sr & 0xffe0)
563 | env->cc_dest | (env->cc_x << 4);
564 log_cpu_state(env, 0);
565 #elif defined(TARGET_MIPS)
566 log_cpu_state(env, 0);
567 #elif defined(TARGET_SH4)
568 log_cpu_state(env, 0);
569 #elif defined(TARGET_ALPHA)
570 log_cpu_state(env, 0);
571 #elif defined(TARGET_CRIS)
572 log_cpu_state(env, 0);
573 #elif defined(TARGET_HPPA)
574 log_cpu_state(env, 0);
575 #else
576 #error unsupported target CPU
577 #endif
579 #endif
580 spin_lock(&tb_lock);
581 tb = tb_find_fast();
582 /* Note: we do it here to avoid a gcc bug on Mac OS X when
583 doing it in tb_find_slow */
584 if (tb_invalidated_flag) {
585 /* as some TB could have been invalidated because
586 of memory exceptions while generating the code, we
587 must recompute the hash index here */
588 next_tb = 0;
589 tb_invalidated_flag = 0;
591 #ifdef DEBUG_EXEC
592 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
593 (long)tb->tc_ptr, tb->pc,
594 lookup_symbol(tb->pc));
595 #endif
596 /* see if we can patch the calling TB. When the TB
597 spans two pages, we cannot safely do a direct
598 jump. */
600 if (next_tb != 0 &&
601 #ifdef USE_KQEMU
602 (env->kqemu_enabled != 2) &&
603 #endif
604 tb->page_addr[1] == -1) {
605 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
608 spin_unlock(&tb_lock);
609 env->current_tb = tb;
611 /* cpu_interrupt might be called while translating the
612 TB, but before it is linked into a potentially
613 infinite loop and becomes env->current_tb. Avoid
614 starting execution if there is a pending interrupt. */
615 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
616 env->current_tb = NULL;
618 while (env->current_tb) {
619 tc_ptr = tb->tc_ptr;
620 /* execute the generated code */
621 #if defined(__sparc__) && !defined(HOST_SOLARIS)
622 #undef env
623 env = cpu_single_env;
624 #define env cpu_single_env
625 #endif
626 next_tb = tcg_qemu_tb_exec(tc_ptr);
627 env->current_tb = NULL;
628 if ((next_tb & 3) == 2) {
629 /* Instruction counter expired. */
630 int insns_left;
631 tb = (TranslationBlock *)(long)(next_tb & ~3);
632 /* Restore PC. */
633 cpu_pc_from_tb(env, tb);
634 insns_left = env->icount_decr.u32;
635 if (env->icount_extra && insns_left >= 0) {
636 /* Refill decrementer and continue execution. */
637 env->icount_extra += insns_left;
638 if (env->icount_extra > 0xffff) {
639 insns_left = 0xffff;
640 } else {
641 insns_left = env->icount_extra;
643 env->icount_extra -= insns_left;
644 env->icount_decr.u16.low = insns_left;
645 } else {
646 if (insns_left > 0) {
647 /* Execute remaining instructions. */
648 cpu_exec_nocache(insns_left, tb);
650 env->exception_index = EXCP_INTERRUPT;
651 next_tb = 0;
652 cpu_loop_exit();
656 /* reset soft MMU for next block (it can currently
657 only be set by a memory fault) */
658 #if defined(USE_KQEMU)
659 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
660 if (kqemu_is_ok(env) &&
661 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
662 cpu_loop_exit();
664 #endif
665 } /* for(;;) */
666 } else {
667 env_to_regs();
669 } /* for(;;) */
672 #if defined(TARGET_I386)
673 /* restore flags in standard format */
674 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
675 #elif defined(TARGET_ARM)
676 /* XXX: Save/restore host fpu exception state?. */
677 #elif defined(TARGET_SPARC)
678 #elif defined(TARGET_PPC)
679 #elif defined(TARGET_M68K)
680 cpu_m68k_flush_flags(env, env->cc_op);
681 env->cc_op = CC_OP_FLAGS;
682 env->sr = (env->sr & 0xffe0)
683 | env->cc_dest | (env->cc_x << 4);
684 #elif defined(TARGET_MIPS)
685 #elif defined(TARGET_SH4)
686 #elif defined(TARGET_ALPHA)
687 #elif defined(TARGET_CRIS)
688 #elif defined(TARGET_HPPA)
689 /* XXXXX */
690 #else
691 #error unsupported target CPU
692 #endif
694 /* restore global registers */
695 #include "hostregs_helper.h"
697 /* fail safe : never use cpu_single_env outside cpu_exec() */
698 cpu_single_env = NULL;
699 return ret;
702 /* must only be called from the generated code as an exception can be
703 generated */
704 void tb_invalidate_page_range(target_ulong start, target_ulong end)
706 /* XXX: cannot enable it yet because it yields to MMU exception
707 where NIP != read address on PowerPC */
708 #if 0
709 target_ulong phys_addr;
710 phys_addr = get_phys_addr_code(env, start);
711 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
712 #endif
715 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
717 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
719 CPUX86State *saved_env;
721 saved_env = env;
722 env = s;
723 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
724 selector &= 0xffff;
725 cpu_x86_load_seg_cache(env, seg_reg, selector,
726 (selector << 4), 0xffff, 0);
727 } else {
728 helper_load_seg(seg_reg, selector);
730 env = saved_env;
733 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
735 CPUX86State *saved_env;
737 saved_env = env;
738 env = s;
740 helper_fsave(ptr, data32);
742 env = saved_env;
745 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
747 CPUX86State *saved_env;
749 saved_env = env;
750 env = s;
752 helper_frstor(ptr, data32);
754 env = saved_env;
757 #endif /* TARGET_I386 */
759 #if !defined(CONFIG_SOFTMMU)
761 #if defined(TARGET_I386)
763 /* 'pc' is the host PC at which the exception was raised. 'address' is
764 the effective address of the memory exception. 'is_write' is 1 if a
765 write caused the exception and otherwise 0'. 'old_set' is the
766 signal set which should be restored */
767 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
768 int is_write, sigset_t *old_set,
769 void *puc)
771 TranslationBlock *tb;
772 int ret;
774 if (cpu_single_env)
775 env = cpu_single_env; /* XXX: find a correct solution for multithread */
776 #if defined(DEBUG_SIGNAL)
777 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
778 pc, address, is_write, *(unsigned long *)old_set);
779 #endif
780 /* XXX: locking issue */
781 if (is_write && page_unprotect(h2g(address), pc, puc)) {
782 return 1;
785 /* see if it is an MMU fault */
786 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
787 if (ret < 0)
788 return 0; /* not an MMU fault */
789 if (ret == 0)
790 return 1; /* the MMU fault was handled without causing real CPU fault */
791 /* now we have a real cpu fault */
792 tb = tb_find_pc(pc);
793 if (tb) {
794 /* the PC is inside the translated code. It means that we have
795 a virtual CPU fault */
796 cpu_restore_state(tb, env, pc, puc);
798 if (ret == 1) {
799 #if 0
800 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
801 env->eip, env->cr[2], env->error_code);
802 #endif
803 /* we restore the process signal mask as the sigreturn should
804 do it (XXX: use sigsetjmp) */
805 sigprocmask(SIG_SETMASK, old_set, NULL);
806 raise_exception_err(env->exception_index, env->error_code);
807 } else {
808 /* activate soft MMU for this block */
809 env->hflags |= HF_SOFTMMU_MASK;
810 cpu_resume_from_signal(env, puc);
812 /* never comes here */
813 return 1;
816 #elif defined(TARGET_ARM)
817 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
818 int is_write, sigset_t *old_set,
819 void *puc)
821 TranslationBlock *tb;
822 int ret;
824 if (cpu_single_env)
825 env = cpu_single_env; /* XXX: find a correct solution for multithread */
826 #if defined(DEBUG_SIGNAL)
827 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
828 pc, address, is_write, *(unsigned long *)old_set);
829 #endif
830 /* XXX: locking issue */
831 if (is_write && page_unprotect(h2g(address), pc, puc)) {
832 return 1;
834 /* see if it is an MMU fault */
835 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
836 if (ret < 0)
837 return 0; /* not an MMU fault */
838 if (ret == 0)
839 return 1; /* the MMU fault was handled without causing real CPU fault */
840 /* now we have a real cpu fault */
841 tb = tb_find_pc(pc);
842 if (tb) {
843 /* the PC is inside the translated code. It means that we have
844 a virtual CPU fault */
845 cpu_restore_state(tb, env, pc, puc);
847 /* we restore the process signal mask as the sigreturn should
848 do it (XXX: use sigsetjmp) */
849 sigprocmask(SIG_SETMASK, old_set, NULL);
850 cpu_loop_exit();
851 /* never comes here */
852 return 1;
854 #elif defined(TARGET_SPARC)
855 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
856 int is_write, sigset_t *old_set,
857 void *puc)
859 TranslationBlock *tb;
860 int ret;
862 if (cpu_single_env)
863 env = cpu_single_env; /* XXX: find a correct solution for multithread */
864 #if defined(DEBUG_SIGNAL)
865 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
866 pc, address, is_write, *(unsigned long *)old_set);
867 #endif
868 /* XXX: locking issue */
869 if (is_write && page_unprotect(h2g(address), pc, puc)) {
870 return 1;
872 /* see if it is an MMU fault */
873 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
874 if (ret < 0)
875 return 0; /* not an MMU fault */
876 if (ret == 0)
877 return 1; /* the MMU fault was handled without causing real CPU fault */
878 /* now we have a real cpu fault */
879 tb = tb_find_pc(pc);
880 if (tb) {
881 /* the PC is inside the translated code. It means that we have
882 a virtual CPU fault */
883 cpu_restore_state(tb, env, pc, puc);
885 /* we restore the process signal mask as the sigreturn should
886 do it (XXX: use sigsetjmp) */
887 sigprocmask(SIG_SETMASK, old_set, NULL);
888 cpu_loop_exit();
889 /* never comes here */
890 return 1;
892 #elif defined (TARGET_PPC)
893 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
894 int is_write, sigset_t *old_set,
895 void *puc)
897 TranslationBlock *tb;
898 int ret;
900 if (cpu_single_env)
901 env = cpu_single_env; /* XXX: find a correct solution for multithread */
902 #if defined(DEBUG_SIGNAL)
903 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
904 pc, address, is_write, *(unsigned long *)old_set);
905 #endif
906 /* XXX: locking issue */
907 if (is_write && page_unprotect(h2g(address), pc, puc)) {
908 return 1;
911 /* see if it is an MMU fault */
912 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
913 if (ret < 0)
914 return 0; /* not an MMU fault */
915 if (ret == 0)
916 return 1; /* the MMU fault was handled without causing real CPU fault */
918 /* now we have a real cpu fault */
919 tb = tb_find_pc(pc);
920 if (tb) {
921 /* the PC is inside the translated code. It means that we have
922 a virtual CPU fault */
923 cpu_restore_state(tb, env, pc, puc);
925 if (ret == 1) {
926 #if 0
927 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
928 env->nip, env->error_code, tb);
929 #endif
930 /* we restore the process signal mask as the sigreturn should
931 do it (XXX: use sigsetjmp) */
932 sigprocmask(SIG_SETMASK, old_set, NULL);
933 cpu_loop_exit();
934 } else {
935 /* activate soft MMU for this block */
936 cpu_resume_from_signal(env, puc);
938 /* never comes here */
939 return 1;
942 #elif defined(TARGET_M68K)
943 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
944 int is_write, sigset_t *old_set,
945 void *puc)
947 TranslationBlock *tb;
948 int ret;
950 if (cpu_single_env)
951 env = cpu_single_env; /* XXX: find a correct solution for multithread */
952 #if defined(DEBUG_SIGNAL)
953 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
954 pc, address, is_write, *(unsigned long *)old_set);
955 #endif
956 /* XXX: locking issue */
957 if (is_write && page_unprotect(address, pc, puc)) {
958 return 1;
960 /* see if it is an MMU fault */
961 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
962 if (ret < 0)
963 return 0; /* not an MMU fault */
964 if (ret == 0)
965 return 1; /* the MMU fault was handled without causing real CPU fault */
966 /* now we have a real cpu fault */
967 tb = tb_find_pc(pc);
968 if (tb) {
969 /* the PC is inside the translated code. It means that we have
970 a virtual CPU fault */
971 cpu_restore_state(tb, env, pc, puc);
973 /* we restore the process signal mask as the sigreturn should
974 do it (XXX: use sigsetjmp) */
975 sigprocmask(SIG_SETMASK, old_set, NULL);
976 cpu_loop_exit();
977 /* never comes here */
978 return 1;
981 #elif defined (TARGET_MIPS)
982 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
983 int is_write, sigset_t *old_set,
984 void *puc)
986 TranslationBlock *tb;
987 int ret;
989 if (cpu_single_env)
990 env = cpu_single_env; /* XXX: find a correct solution for multithread */
991 #if defined(DEBUG_SIGNAL)
992 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
993 pc, address, is_write, *(unsigned long *)old_set);
994 #endif
995 /* XXX: locking issue */
996 if (is_write && page_unprotect(h2g(address), pc, puc)) {
997 return 1;
1000 /* see if it is an MMU fault */
1001 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1002 if (ret < 0)
1003 return 0; /* not an MMU fault */
1004 if (ret == 0)
1005 return 1; /* the MMU fault was handled without causing real CPU fault */
1007 /* now we have a real cpu fault */
1008 tb = tb_find_pc(pc);
1009 if (tb) {
1010 /* the PC is inside the translated code. It means that we have
1011 a virtual CPU fault */
1012 cpu_restore_state(tb, env, pc, puc);
1014 if (ret == 1) {
1015 #if 0
1016 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1017 env->PC, env->error_code, tb);
1018 #endif
1019 /* we restore the process signal mask as the sigreturn should
1020 do it (XXX: use sigsetjmp) */
1021 sigprocmask(SIG_SETMASK, old_set, NULL);
1022 cpu_loop_exit();
1023 } else {
1024 /* activate soft MMU for this block */
1025 cpu_resume_from_signal(env, puc);
1027 /* never comes here */
1028 return 1;
1031 #elif defined (TARGET_SH4)
1032 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1033 int is_write, sigset_t *old_set,
1034 void *puc)
1036 TranslationBlock *tb;
1037 int ret;
1039 if (cpu_single_env)
1040 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1041 #if defined(DEBUG_SIGNAL)
1042 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1043 pc, address, is_write, *(unsigned long *)old_set);
1044 #endif
1045 /* XXX: locking issue */
1046 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1047 return 1;
1050 /* see if it is an MMU fault */
1051 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1052 if (ret < 0)
1053 return 0; /* not an MMU fault */
1054 if (ret == 0)
1055 return 1; /* the MMU fault was handled without causing real CPU fault */
1057 /* now we have a real cpu fault */
1058 tb = tb_find_pc(pc);
1059 if (tb) {
1060 /* the PC is inside the translated code. It means that we have
1061 a virtual CPU fault */
1062 cpu_restore_state(tb, env, pc, puc);
1064 #if 0
1065 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1066 env->nip, env->error_code, tb);
1067 #endif
1068 /* we restore the process signal mask as the sigreturn should
1069 do it (XXX: use sigsetjmp) */
1070 sigprocmask(SIG_SETMASK, old_set, NULL);
1071 cpu_loop_exit();
1072 /* never comes here */
1073 return 1;
1076 #elif defined (TARGET_ALPHA)
1077 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1078 int is_write, sigset_t *old_set,
1079 void *puc)
1081 TranslationBlock *tb;
1082 int ret;
1084 if (cpu_single_env)
1085 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1086 #if defined(DEBUG_SIGNAL)
1087 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1088 pc, address, is_write, *(unsigned long *)old_set);
1089 #endif
1090 /* XXX: locking issue */
1091 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1092 return 1;
1095 /* see if it is an MMU fault */
1096 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1097 if (ret < 0)
1098 return 0; /* not an MMU fault */
1099 if (ret == 0)
1100 return 1; /* the MMU fault was handled without causing real CPU fault */
1102 /* now we have a real cpu fault */
1103 tb = tb_find_pc(pc);
1104 if (tb) {
1105 /* the PC is inside the translated code. It means that we have
1106 a virtual CPU fault */
1107 cpu_restore_state(tb, env, pc, puc);
1109 #if 0
1110 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1111 env->nip, env->error_code, tb);
1112 #endif
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
1115 sigprocmask(SIG_SETMASK, old_set, NULL);
1116 cpu_loop_exit();
1117 /* never comes here */
1118 return 1;
1120 #elif defined (TARGET_CRIS)
1121 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1122 int is_write, sigset_t *old_set,
1123 void *puc)
1125 TranslationBlock *tb;
1126 int ret;
1128 if (cpu_single_env)
1129 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1130 #if defined(DEBUG_SIGNAL)
1131 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1132 pc, address, is_write, *(unsigned long *)old_set);
1133 #endif
1134 /* XXX: locking issue */
1135 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1136 return 1;
1139 /* see if it is an MMU fault */
1140 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1141 if (ret < 0)
1142 return 0; /* not an MMU fault */
1143 if (ret == 0)
1144 return 1; /* the MMU fault was handled without causing real CPU fault */
1146 /* now we have a real cpu fault */
1147 tb = tb_find_pc(pc);
1148 if (tb) {
1149 /* the PC is inside the translated code. It means that we have
1150 a virtual CPU fault */
1151 cpu_restore_state(tb, env, pc, puc);
1153 /* we restore the process signal mask as the sigreturn should
1154 do it (XXX: use sigsetjmp) */
1155 sigprocmask(SIG_SETMASK, old_set, NULL);
1156 cpu_loop_exit();
1157 /* never comes here */
1158 return 1;
1161 #elif defined(TARGET_HPPA)
1162 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1163 int is_write, sigset_t *old_set,
1164 void *puc)
1166 TranslationBlock *tb;
1167 int ret;
1169 if (cpu_single_env)
1170 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1171 #if defined(DEBUG_SIGNAL)
1172 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1173 pc, address, is_write, *(unsigned long *)old_set);
1174 #endif
1175 /* XXX: locking issue */
1176 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1177 return 1;
1179 /* see if it is an MMU fault */
1180 ret = cpu_hppa_handle_mmu_fault(env, address, is_write, 1, 0);
1181 if (ret < 0)
1182 return 0; /* not an MMU fault */
1183 if (ret == 0)
1184 return 1; /* the MMU fault was handled without causing real CPU fault */
1185 /* now we have a real cpu fault */
1186 tb = tb_find_pc(pc);
1187 if (tb) {
1188 /* the PC is inside the translated code. It means that we have
1189 a virtual CPU fault */
1190 cpu_restore_state(tb, env, pc, puc);
1192 /* we restore the process signal mask as the sigreturn should
1193 do it (XXX: use sigsetjmp) */
1194 sigprocmask(SIG_SETMASK, old_set, NULL);
1195 cpu_loop_exit();
1196 /* never comes here */
1197 return 1;
1200 #else
1201 #error unsupported target CPU
1202 #endif
1204 #if defined(__i386__)
1206 #if defined(__APPLE__)
1207 # include <sys/ucontext.h>
1209 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1210 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1211 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1212 #else
1213 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1214 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1215 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1216 #endif
1218 int cpu_signal_handler(int host_signum, void *pinfo,
1219 void *puc)
1221 siginfo_t *info = pinfo;
1222 struct ucontext *uc = puc;
1223 unsigned long pc;
1224 int trapno;
1226 #ifndef REG_EIP
1227 /* for glibc 2.1 */
1228 #define REG_EIP EIP
1229 #define REG_ERR ERR
1230 #define REG_TRAPNO TRAPNO
1231 #endif
1232 pc = EIP_sig(uc);
1233 trapno = TRAP_sig(uc);
1234 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1235 trapno == 0xe ?
1236 (ERROR_sig(uc) >> 1) & 1 : 0,
1237 &uc->uc_sigmask, puc);
1240 #elif defined(__x86_64__)
1242 #ifdef __NetBSD__
1243 #define REG_ERR _REG_ERR
1244 #define REG_TRAPNO _REG_TRAPNO
1246 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1247 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1248 #else
1249 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1250 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1251 #endif
1253 int cpu_signal_handler(int host_signum, void *pinfo,
1254 void *puc)
1256 siginfo_t *info = pinfo;
1257 unsigned long pc;
1258 #ifdef __NetBSD__
1259 ucontext_t *uc = puc;
1260 #else
1261 struct ucontext *uc = puc;
1262 #endif
1264 pc = QEMU_UC_MACHINE_PC(uc);
1265 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1266 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
1267 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
1268 &uc->uc_sigmask, puc);
1271 #elif defined(_ARCH_PPC)
1273 /***********************************************************************
1274 * signal context platform-specific definitions
1275 * From Wine
1277 #ifdef linux
1278 /* All Registers access - only for local access */
1279 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1280 /* Gpr Registers access */
1281 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1282 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1283 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1284 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1285 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1286 # define LR_sig(context) REG_sig(link, context) /* Link register */
1287 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1288 /* Float Registers access */
1289 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1290 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1291 /* Exception Registers access */
1292 # define DAR_sig(context) REG_sig(dar, context)
1293 # define DSISR_sig(context) REG_sig(dsisr, context)
1294 # define TRAP_sig(context) REG_sig(trap, context)
1295 #endif /* linux */
1297 #ifdef __APPLE__
1298 # include <sys/ucontext.h>
1299 typedef struct ucontext SIGCONTEXT;
1300 /* All Registers access - only for local access */
1301 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1302 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1303 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1304 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1305 /* Gpr Registers access */
1306 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1307 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1308 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1309 # define CTR_sig(context) REG_sig(ctr, context)
1310 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1311 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1312 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1313 /* Float Registers access */
1314 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1315 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1316 /* Exception Registers access */
1317 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1318 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1319 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1320 #endif /* __APPLE__ */
1322 int cpu_signal_handler(int host_signum, void *pinfo,
1323 void *puc)
1325 siginfo_t *info = pinfo;
1326 struct ucontext *uc = puc;
1327 unsigned long pc;
1328 int is_write;
1330 pc = IAR_sig(uc);
1331 is_write = 0;
1332 #if 0
1333 /* ppc 4xx case */
1334 if (DSISR_sig(uc) & 0x00800000)
1335 is_write = 1;
1336 #else
1337 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1338 is_write = 1;
1339 #endif
1340 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1341 is_write, &uc->uc_sigmask, puc);
1344 #elif defined(__alpha__)
1346 int cpu_signal_handler(int host_signum, void *pinfo,
1347 void *puc)
1349 siginfo_t *info = pinfo;
1350 struct ucontext *uc = puc;
1351 uint32_t *pc = uc->uc_mcontext.sc_pc;
1352 uint32_t insn = *pc;
1353 int is_write = 0;
1355 /* XXX: need kernel patch to get write flag faster */
1356 switch (insn >> 26) {
1357 case 0x0d: // stw
1358 case 0x0e: // stb
1359 case 0x0f: // stq_u
1360 case 0x24: // stf
1361 case 0x25: // stg
1362 case 0x26: // sts
1363 case 0x27: // stt
1364 case 0x2c: // stl
1365 case 0x2d: // stq
1366 case 0x2e: // stl_c
1367 case 0x2f: // stq_c
1368 is_write = 1;
1371 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1372 is_write, &uc->uc_sigmask, puc);
1374 #elif defined(__sparc__)
1376 int cpu_signal_handler(int host_signum, void *pinfo,
1377 void *puc)
1379 siginfo_t *info = pinfo;
1380 int is_write;
1381 uint32_t insn;
1382 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1383 uint32_t *regs = (uint32_t *)(info + 1);
1384 void *sigmask = (regs + 20);
1385 /* XXX: is there a standard glibc define ? */
1386 unsigned long pc = regs[1];
1387 #else
1388 #ifdef __linux__
1389 struct sigcontext *sc = puc;
1390 unsigned long pc = sc->sigc_regs.tpc;
1391 void *sigmask = (void *)sc->sigc_mask;
1392 #elif defined(__OpenBSD__)
1393 struct sigcontext *uc = puc;
1394 unsigned long pc = uc->sc_pc;
1395 void *sigmask = (void *)(long)uc->sc_mask;
1396 #endif
1397 #endif
1399 /* XXX: need kernel patch to get write flag faster */
1400 is_write = 0;
1401 insn = *(uint32_t *)pc;
1402 if ((insn >> 30) == 3) {
1403 switch((insn >> 19) & 0x3f) {
1404 case 0x05: // stb
1405 case 0x06: // sth
1406 case 0x04: // st
1407 case 0x07: // std
1408 case 0x24: // stf
1409 case 0x27: // stdf
1410 case 0x25: // stfsr
1411 is_write = 1;
1412 break;
1415 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1416 is_write, sigmask, NULL);
1419 #elif defined(__arm__)
1421 int cpu_signal_handler(int host_signum, void *pinfo,
1422 void *puc)
1424 siginfo_t *info = pinfo;
1425 struct ucontext *uc = puc;
1426 unsigned long pc;
1427 int is_write;
1429 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1430 pc = uc->uc_mcontext.gregs[R15];
1431 #else
1432 pc = uc->uc_mcontext.arm_pc;
1433 #endif
1434 /* XXX: compute is_write */
1435 is_write = 0;
1436 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1437 is_write,
1438 &uc->uc_sigmask, puc);
1441 #elif defined(__mc68000)
1443 int cpu_signal_handler(int host_signum, void *pinfo,
1444 void *puc)
1446 siginfo_t *info = pinfo;
1447 struct ucontext *uc = puc;
1448 unsigned long pc;
1449 int is_write;
1451 pc = uc->uc_mcontext.gregs[16];
1452 /* XXX: compute is_write */
1453 is_write = 0;
1454 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1455 is_write,
1456 &uc->uc_sigmask, puc);
1459 #elif defined(__ia64)
1461 #ifndef __ISR_VALID
1462 /* This ought to be in <bits/siginfo.h>... */
1463 # define __ISR_VALID 1
1464 #endif
1466 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1468 siginfo_t *info = pinfo;
1469 struct ucontext *uc = puc;
1470 unsigned long ip;
1471 int is_write = 0;
1473 ip = uc->uc_mcontext.sc_ip;
1474 switch (host_signum) {
1475 case SIGILL:
1476 case SIGFPE:
1477 case SIGSEGV:
1478 case SIGBUS:
1479 case SIGTRAP:
1480 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1481 /* ISR.W (write-access) is bit 33: */
1482 is_write = (info->si_isr >> 33) & 1;
1483 break;
1485 default:
1486 break;
1488 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1489 is_write,
1490 &uc->uc_sigmask, puc);
1493 #elif defined(__s390__)
1495 int cpu_signal_handler(int host_signum, void *pinfo,
1496 void *puc)
1498 siginfo_t *info = pinfo;
1499 struct ucontext *uc = puc;
1500 unsigned long pc;
1501 int is_write;
1503 pc = uc->uc_mcontext.psw.addr;
1504 /* XXX: compute is_write */
1505 is_write = 0;
1506 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1507 is_write, &uc->uc_sigmask, puc);
1510 #elif defined(__mips__)
1512 int cpu_signal_handler(int host_signum, void *pinfo,
1513 void *puc)
1515 siginfo_t *info = pinfo;
1516 struct ucontext *uc = puc;
1517 greg_t pc = uc->uc_mcontext.pc;
1518 int is_write;
1520 /* XXX: compute is_write */
1521 is_write = 0;
1522 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1523 is_write, &uc->uc_sigmask, puc);
1526 #elif defined(__hppa__)
1528 int cpu_signal_handler(int host_signum, void *pinfo,
1529 void *puc)
1531 struct siginfo *info = pinfo;
1532 struct ucontext *uc = puc;
1533 unsigned long pc;
1534 int is_write;
1536 pc = uc->uc_mcontext.sc_iaoq[0];
1537 /* FIXME: compute is_write */
1538 is_write = 0;
1539 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1540 is_write,
1541 &uc->uc_sigmask, puc);
1544 #else
1546 #error host CPU specific signal handler needed
1548 #endif
1550 #endif /* !defined(CONFIG_SOFTMMU) */