Implement preliminary SAM Coupé emulation
[qemu/z80.git] / cpu-exec.c
blobbffe28ecb845ab0d6a860bdfea4ecd0316d49600
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #include "tcg.h"
24 #include "kvm.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
48 int tb_invalidated_flag;
50 //#define DEBUG_EXEC
51 //#define DEBUG_SIGNAL
53 int qemu_cpu_has_work(CPUState *env)
55 return cpu_has_work(env);
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
62 regs_to_env();
63 longjmp(env->jmp_env, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState *env1, void *puc)
71 #if !defined(CONFIG_SOFTMMU)
72 #ifdef __linux__
73 struct ucontext *uc = puc;
74 #elif defined(__OpenBSD__)
75 struct sigcontext *uc = puc;
76 #endif
77 #endif
79 env = env1;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
84 if (puc) {
85 /* XXX: use siglongjmp ? */
86 #ifdef __linux__
87 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
90 #endif
92 #endif
93 env->exception_index = -1;
94 longjmp(env->jmp_env, 1);
97 /* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
101 unsigned long next_tb;
102 TranslationBlock *tb;
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles > CF_COUNT_MASK)
107 max_cycles = CF_COUNT_MASK;
109 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
110 max_cycles);
111 env->current_tb = tb;
112 /* execute the generated code */
113 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
115 if ((next_tb & 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
118 cpu_pc_from_tb(env, tb);
120 tb_phys_invalidate(tb, -1);
121 tb_free(tb);
124 static TranslationBlock *tb_find_slow(target_ulong pc,
125 target_ulong cs_base,
126 uint64_t flags)
128 TranslationBlock *tb, **ptb1;
129 unsigned int h;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
132 tb_invalidated_flag = 0;
134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
136 /* find translated block using physical mappings */
137 phys_pc = get_phys_addr_code(env, pc);
138 phys_page1 = phys_pc & TARGET_PAGE_MASK;
139 phys_page2 = -1;
140 h = tb_phys_hash_func(phys_pc);
141 ptb1 = &tb_phys_hash[h];
142 for(;;) {
143 tb = *ptb1;
144 if (!tb)
145 goto not_found;
146 if (tb->pc == pc &&
147 tb->page_addr[0] == phys_page1 &&
148 tb->cs_base == cs_base &&
149 tb->flags == flags) {
150 /* check next page if needed */
151 if (tb->page_addr[1] != -1) {
152 virt_page2 = (pc & TARGET_PAGE_MASK) +
153 TARGET_PAGE_SIZE;
154 phys_page2 = get_phys_addr_code(env, virt_page2);
155 if (tb->page_addr[1] == phys_page2)
156 goto found;
157 } else {
158 goto found;
161 ptb1 = &tb->phys_hash_next;
163 not_found:
164 /* if no translated code available, then translate it now */
165 tb = tb_gen_code(env, pc, cs_base, flags, 0);
167 found:
168 /* we add the TB in the virtual pc hash table */
169 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
170 return tb;
173 static inline TranslationBlock *tb_find_fast(void)
175 TranslationBlock *tb;
176 target_ulong cs_base, pc;
177 int flags;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
181 is executed. */
182 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185 tb->flags != flags)) {
186 tb = tb_find_slow(pc, cs_base, flags);
188 return tb;
191 static CPUDebugExcpHandler *debug_excp_handler;
193 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
195 CPUDebugExcpHandler *old_handler = debug_excp_handler;
197 debug_excp_handler = handler;
198 return old_handler;
201 static void cpu_handle_debug_exception(CPUState *env)
203 CPUWatchpoint *wp;
205 if (!env->watchpoint_hit)
206 TAILQ_FOREACH(wp, &env->watchpoints, entry)
207 wp->flags &= ~BP_WATCHPOINT_HIT;
209 if (debug_excp_handler)
210 debug_excp_handler(env);
213 /* main execution loop */
215 int cpu_exec(CPUState *env1)
217 #define DECLARE_HOST_REGS 1
218 #include "hostregs_helper.h"
219 int ret, interrupt_request;
220 TranslationBlock *tb;
221 uint8_t *tc_ptr;
222 unsigned long next_tb;
224 if (cpu_halted(env1) == EXCP_HALTED)
225 return EXCP_HALTED;
227 cpu_single_env = env1;
229 /* first we save global registers */
230 #define SAVE_HOST_REGS 1
231 #include "hostregs_helper.h"
232 env = env1;
234 env_to_regs();
235 #if defined(TARGET_I386)
236 /* put eflags in CPU temporary format */
237 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238 DF = 1 - (2 * ((env->eflags >> 10) & 1));
239 CC_OP = CC_OP_EFLAGS;
240 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243 env->cc_op = CC_OP_FLAGS;
244 env->cc_dest = env->sr & 0xf;
245 env->cc_x = (env->sr >> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_MICROBLAZE)
250 #elif defined(TARGET_MIPS)
251 #elif defined(TARGET_SH4)
252 #elif defined(TARGET_CRIS)
253 #elif defined(TARGET_Z80)
254 /* XXXXX */
255 #else
256 #error unsupported target CPU
257 #endif
258 env->exception_index = -1;
260 /* prepare setjmp context for exception handling */
261 for(;;) {
262 if (setjmp(env->jmp_env) == 0) {
263 #if defined(__sparc__) && !defined(HOST_SOLARIS)
264 #undef env
265 env = cpu_single_env;
266 #define env cpu_single_env
267 #endif
268 env->current_tb = NULL;
269 /* if an exception is pending, we execute it here */
270 if (env->exception_index >= 0) {
271 if (env->exception_index >= EXCP_INTERRUPT) {
272 /* exit request from the cpu execution loop */
273 ret = env->exception_index;
274 if (ret == EXCP_DEBUG)
275 cpu_handle_debug_exception(env);
276 break;
277 } else {
278 #if defined(CONFIG_USER_ONLY)
279 /* if user mode only, we simulate a fake exception
280 which will be handled outside the cpu execution
281 loop */
282 #if defined(TARGET_I386)
283 do_interrupt_user(env->exception_index,
284 env->exception_is_int,
285 env->error_code,
286 env->exception_next_eip);
287 /* successfully delivered */
288 env->old_exception = -1;
289 #endif
290 ret = env->exception_index;
291 break;
292 #else
293 #if defined(TARGET_I386)
294 /* simulate a real cpu exception. On i386, it can
295 trigger new exceptions, but we do not handle
296 double or triple faults yet. */
297 do_interrupt(env->exception_index,
298 env->exception_is_int,
299 env->error_code,
300 env->exception_next_eip, 0);
301 /* successfully delivered */
302 env->old_exception = -1;
303 #elif defined(TARGET_PPC)
304 do_interrupt(env);
305 #elif defined(TARGET_MICROBLAZE)
306 do_interrupt(env);
307 #elif defined(TARGET_MIPS)
308 do_interrupt(env);
309 #elif defined(TARGET_SPARC)
310 do_interrupt(env);
311 #elif defined(TARGET_ARM)
312 do_interrupt(env);
313 #elif defined(TARGET_SH4)
314 do_interrupt(env);
315 #elif defined(TARGET_ALPHA)
316 do_interrupt(env);
317 #elif defined(TARGET_CRIS)
318 do_interrupt(env);
319 #elif defined(TARGET_M68K)
320 do_interrupt(0);
321 #elif defined(TARGET_Z80)
322 do_interrupt(env);
323 #endif
324 #endif
326 env->exception_index = -1;
328 #ifdef CONFIG_KQEMU
329 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
330 int ret;
331 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
332 ret = kqemu_cpu_exec(env);
333 /* put eflags in CPU temporary format */
334 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
335 DF = 1 - (2 * ((env->eflags >> 10) & 1));
336 CC_OP = CC_OP_EFLAGS;
337 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
338 if (ret == 1) {
339 /* exception */
340 longjmp(env->jmp_env, 1);
341 } else if (ret == 2) {
342 /* softmmu execution needed */
343 } else {
344 if (env->interrupt_request != 0 || env->exit_request != 0) {
345 /* hardware interrupt will be executed just after */
346 } else {
347 /* otherwise, we restart */
348 longjmp(env->jmp_env, 1);
352 #endif
354 if (kvm_enabled()) {
355 kvm_cpu_exec(env);
356 longjmp(env->jmp_env, 1);
359 next_tb = 0; /* force lookup of first TB */
360 for(;;) {
361 interrupt_request = env->interrupt_request;
362 if (unlikely(interrupt_request)) {
363 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
364 /* Mask out external interrupts for this step. */
365 interrupt_request &= ~(CPU_INTERRUPT_HARD |
366 CPU_INTERRUPT_FIQ |
367 CPU_INTERRUPT_SMI |
368 CPU_INTERRUPT_NMI);
370 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
371 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
372 env->exception_index = EXCP_DEBUG;
373 cpu_loop_exit();
375 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
376 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
377 defined(TARGET_MICROBLAZE)
378 if (interrupt_request & CPU_INTERRUPT_HALT) {
379 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
380 env->halted = 1;
381 env->exception_index = EXCP_HLT;
382 cpu_loop_exit();
384 #endif
385 #if defined(TARGET_I386)
386 if (env->hflags2 & HF2_GIF_MASK) {
387 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
388 !(env->hflags & HF_SMM_MASK)) {
389 svm_check_intercept(SVM_EXIT_SMI);
390 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
391 do_smm_enter();
392 next_tb = 0;
393 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
394 !(env->hflags2 & HF2_NMI_MASK)) {
395 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
396 env->hflags2 |= HF2_NMI_MASK;
397 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
398 next_tb = 0;
399 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
400 (((env->hflags2 & HF2_VINTR_MASK) &&
401 (env->hflags2 & HF2_HIF_MASK)) ||
402 (!(env->hflags2 & HF2_VINTR_MASK) &&
403 (env->eflags & IF_MASK &&
404 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
405 int intno;
406 svm_check_intercept(SVM_EXIT_INTR);
407 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
408 intno = cpu_get_pic_interrupt(env);
409 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
410 #if defined(__sparc__) && !defined(HOST_SOLARIS)
411 #undef env
412 env = cpu_single_env;
413 #define env cpu_single_env
414 #endif
415 do_interrupt(intno, 0, 0, 0, 1);
416 /* ensure that no TB jump will be modified as
417 the program flow was changed */
418 next_tb = 0;
419 #if !defined(CONFIG_USER_ONLY)
420 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
421 (env->eflags & IF_MASK) &&
422 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
423 int intno;
424 /* FIXME: this should respect TPR */
425 svm_check_intercept(SVM_EXIT_VINTR);
426 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
427 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
428 do_interrupt(intno, 0, 0, 0, 1);
429 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
430 next_tb = 0;
431 #endif
434 #elif defined(TARGET_PPC)
435 #if 0
436 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
437 cpu_ppc_reset(env);
439 #endif
440 if (interrupt_request & CPU_INTERRUPT_HARD) {
441 ppc_hw_interrupt(env);
442 if (env->pending_interrupts == 0)
443 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
444 next_tb = 0;
446 #elif defined(TARGET_MICROBLAZE)
447 if ((interrupt_request & CPU_INTERRUPT_HARD)
448 && (env->sregs[SR_MSR] & MSR_IE)
449 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
450 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
451 env->exception_index = EXCP_IRQ;
452 do_interrupt(env);
453 next_tb = 0;
455 #elif defined(TARGET_MIPS)
456 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
457 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
458 (env->CP0_Status & (1 << CP0St_IE)) &&
459 !(env->CP0_Status & (1 << CP0St_EXL)) &&
460 !(env->CP0_Status & (1 << CP0St_ERL)) &&
461 !(env->hflags & MIPS_HFLAG_DM)) {
462 /* Raise it */
463 env->exception_index = EXCP_EXT_INTERRUPT;
464 env->error_code = 0;
465 do_interrupt(env);
466 next_tb = 0;
468 #elif defined(TARGET_SPARC)
469 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
470 (env->psret != 0)) {
471 int pil = env->interrupt_index & 15;
472 int type = env->interrupt_index & 0xf0;
474 if (((type == TT_EXTINT) &&
475 (pil == 15 || pil > env->psrpil)) ||
476 type != TT_EXTINT) {
477 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
478 env->exception_index = env->interrupt_index;
479 do_interrupt(env);
480 env->interrupt_index = 0;
481 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
482 cpu_check_irqs(env);
483 #endif
484 next_tb = 0;
486 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
487 //do_interrupt(0, 0, 0, 0, 0);
488 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
490 #elif defined(TARGET_ARM)
491 if (interrupt_request & CPU_INTERRUPT_FIQ
492 && !(env->uncached_cpsr & CPSR_F)) {
493 env->exception_index = EXCP_FIQ;
494 do_interrupt(env);
495 next_tb = 0;
497 /* ARMv7-M interrupt return works by loading a magic value
498 into the PC. On real hardware the load causes the
499 return to occur. The qemu implementation performs the
500 jump normally, then does the exception return when the
501 CPU tries to execute code at the magic address.
502 This will cause the magic PC value to be pushed to
503 the stack if an interrupt occured at the wrong time.
504 We avoid this by disabling interrupts when
505 pc contains a magic address. */
506 if (interrupt_request & CPU_INTERRUPT_HARD
507 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
508 || !(env->uncached_cpsr & CPSR_I))) {
509 env->exception_index = EXCP_IRQ;
510 do_interrupt(env);
511 next_tb = 0;
513 #elif defined(TARGET_SH4)
514 if (interrupt_request & CPU_INTERRUPT_HARD) {
515 do_interrupt(env);
516 next_tb = 0;
518 #elif defined(TARGET_ALPHA)
519 if (interrupt_request & CPU_INTERRUPT_HARD) {
520 do_interrupt(env);
521 next_tb = 0;
523 #elif defined(TARGET_CRIS)
524 if (interrupt_request & CPU_INTERRUPT_HARD
525 && (env->pregs[PR_CCS] & I_FLAG)) {
526 env->exception_index = EXCP_IRQ;
527 do_interrupt(env);
528 next_tb = 0;
530 if (interrupt_request & CPU_INTERRUPT_NMI
531 && (env->pregs[PR_CCS] & M_FLAG)) {
532 env->exception_index = EXCP_NMI;
533 do_interrupt(env);
534 next_tb = 0;
536 #elif defined(TARGET_M68K)
537 if (interrupt_request & CPU_INTERRUPT_HARD
538 && ((env->sr & SR_I) >> SR_I_SHIFT)
539 < env->pending_level) {
540 /* Real hardware gets the interrupt vector via an
541 IACK cycle at this point. Current emulated
542 hardware doesn't rely on this, so we
543 provide/save the vector when the interrupt is
544 first signalled. */
545 env->exception_index = env->pending_vector;
546 do_interrupt(1);
547 next_tb = 0;
549 #elif defined(TARGET_Z80)
550 if (interrupt_request & CPU_INTERRUPT_HARD) {
551 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
552 /* TODO: Add support for NMIs */
553 do_interrupt(env);
555 #endif
556 /* Don't use the cached interupt_request value,
557 do_interrupt may have updated the EXITTB flag. */
558 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
559 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
560 /* ensure that no TB jump will be modified as
561 the program flow was changed */
562 next_tb = 0;
565 if (unlikely(env->exit_request)) {
566 env->exit_request = 0;
567 env->exception_index = EXCP_INTERRUPT;
568 cpu_loop_exit();
570 #ifdef DEBUG_EXEC
571 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
572 /* restore flags in standard format */
573 regs_to_env();
574 #if defined(TARGET_I386)
575 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
576 log_cpu_state(env, X86_DUMP_CCOP);
577 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
578 #elif defined(TARGET_ARM)
579 log_cpu_state(env, 0);
580 #elif defined(TARGET_SPARC)
581 log_cpu_state(env, 0);
582 #elif defined(TARGET_PPC)
583 log_cpu_state(env, 0);
584 #elif defined(TARGET_M68K)
585 cpu_m68k_flush_flags(env, env->cc_op);
586 env->cc_op = CC_OP_FLAGS;
587 env->sr = (env->sr & 0xffe0)
588 | env->cc_dest | (env->cc_x << 4);
589 log_cpu_state(env, 0);
590 #elif defined(TARGET_MICROBLAZE)
591 log_cpu_state(env, 0);
592 #elif defined(TARGET_MIPS)
593 log_cpu_state(env, 0);
594 #elif defined(TARGET_SH4)
595 log_cpu_state(env, 0);
596 #elif defined(TARGET_ALPHA)
597 log_cpu_state(env, 0);
598 #elif defined(TARGET_CRIS)
599 log_cpu_state(env, 0);
600 #elif defined(TARGET_Z80)
601 log_cpu_state(env, 0);
602 #else
603 #error unsupported target CPU
604 #endif
606 #endif
607 spin_lock(&tb_lock);
608 tb = tb_find_fast();
609 /* Note: we do it here to avoid a gcc bug on Mac OS X when
610 doing it in tb_find_slow */
611 if (tb_invalidated_flag) {
612 /* as some TB could have been invalidated because
613 of memory exceptions while generating the code, we
614 must recompute the hash index here */
615 next_tb = 0;
616 tb_invalidated_flag = 0;
618 #ifdef DEBUG_EXEC
619 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
620 (long)tb->tc_ptr, tb->pc,
621 lookup_symbol(tb->pc));
622 #endif
623 /* see if we can patch the calling TB. When the TB
624 spans two pages, we cannot safely do a direct
625 jump. */
627 if (next_tb != 0 &&
628 #ifdef CONFIG_KQEMU
629 (env->kqemu_enabled != 2) &&
630 #endif
631 tb->page_addr[1] == -1) {
632 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
635 spin_unlock(&tb_lock);
636 env->current_tb = tb;
638 /* cpu_interrupt might be called while translating the
639 TB, but before it is linked into a potentially
640 infinite loop and becomes env->current_tb. Avoid
641 starting execution if there is a pending interrupt. */
642 if (unlikely (env->exit_request))
643 env->current_tb = NULL;
645 while (env->current_tb) {
646 tc_ptr = tb->tc_ptr;
647 /* execute the generated code */
648 #if defined(__sparc__) && !defined(HOST_SOLARIS)
649 #undef env
650 env = cpu_single_env;
651 #define env cpu_single_env
652 #endif
653 next_tb = tcg_qemu_tb_exec(tc_ptr);
654 env->current_tb = NULL;
655 if ((next_tb & 3) == 2) {
656 /* Instruction counter expired. */
657 int insns_left;
658 tb = (TranslationBlock *)(long)(next_tb & ~3);
659 /* Restore PC. */
660 cpu_pc_from_tb(env, tb);
661 insns_left = env->icount_decr.u32;
662 if (env->icount_extra && insns_left >= 0) {
663 /* Refill decrementer and continue execution. */
664 env->icount_extra += insns_left;
665 if (env->icount_extra > 0xffff) {
666 insns_left = 0xffff;
667 } else {
668 insns_left = env->icount_extra;
670 env->icount_extra -= insns_left;
671 env->icount_decr.u16.low = insns_left;
672 } else {
673 if (insns_left > 0) {
674 /* Execute remaining instructions. */
675 cpu_exec_nocache(insns_left, tb);
677 env->exception_index = EXCP_INTERRUPT;
678 next_tb = 0;
679 cpu_loop_exit();
683 /* reset soft MMU for next block (it can currently
684 only be set by a memory fault) */
685 #if defined(CONFIG_KQEMU)
686 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
687 if (kqemu_is_ok(env) &&
688 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
689 cpu_loop_exit();
691 #endif
692 } /* for(;;) */
693 } else {
694 env_to_regs();
696 } /* for(;;) */
699 #if defined(TARGET_I386)
700 /* restore flags in standard format */
701 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
702 #elif defined(TARGET_ARM)
703 /* XXX: Save/restore host fpu exception state?. */
704 #elif defined(TARGET_SPARC)
705 #elif defined(TARGET_PPC)
706 #elif defined(TARGET_M68K)
707 cpu_m68k_flush_flags(env, env->cc_op);
708 env->cc_op = CC_OP_FLAGS;
709 env->sr = (env->sr & 0xffe0)
710 | env->cc_dest | (env->cc_x << 4);
711 #elif defined(TARGET_MICROBLAZE)
712 #elif defined(TARGET_MIPS)
713 #elif defined(TARGET_SH4)
714 #elif defined(TARGET_ALPHA)
715 #elif defined(TARGET_CRIS)
716 #elif defined(TARGET_Z80)
717 /* XXXXX */
718 #else
719 #error unsupported target CPU
720 #endif
722 /* restore global registers */
723 #include "hostregs_helper.h"
725 /* fail safe : never use cpu_single_env outside cpu_exec() */
726 cpu_single_env = NULL;
727 return ret;
730 /* must only be called from the generated code as an exception can be
731 generated */
732 void tb_invalidate_page_range(target_ulong start, target_ulong end)
734 /* XXX: cannot enable it yet because it yields to MMU exception
735 where NIP != read address on PowerPC */
736 #if 0
737 target_ulong phys_addr;
738 phys_addr = get_phys_addr_code(env, start);
739 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
740 #endif
743 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
745 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
747 CPUX86State *saved_env;
749 saved_env = env;
750 env = s;
751 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
752 selector &= 0xffff;
753 cpu_x86_load_seg_cache(env, seg_reg, selector,
754 (selector << 4), 0xffff, 0);
755 } else {
756 helper_load_seg(seg_reg, selector);
758 env = saved_env;
761 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
763 CPUX86State *saved_env;
765 saved_env = env;
766 env = s;
768 helper_fsave(ptr, data32);
770 env = saved_env;
773 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
775 CPUX86State *saved_env;
777 saved_env = env;
778 env = s;
780 helper_frstor(ptr, data32);
782 env = saved_env;
785 #endif /* TARGET_I386 */
787 #if !defined(CONFIG_SOFTMMU)
789 #if defined(TARGET_I386)
791 /* 'pc' is the host PC at which the exception was raised. 'address' is
792 the effective address of the memory exception. 'is_write' is 1 if a
793 write caused the exception and otherwise 0'. 'old_set' is the
794 signal set which should be restored */
795 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
796 int is_write, sigset_t *old_set,
797 void *puc)
799 TranslationBlock *tb;
800 int ret;
802 if (cpu_single_env)
803 env = cpu_single_env; /* XXX: find a correct solution for multithread */
804 #if defined(DEBUG_SIGNAL)
805 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
806 pc, address, is_write, *(unsigned long *)old_set);
807 #endif
808 /* XXX: locking issue */
809 if (is_write && page_unprotect(h2g(address), pc, puc)) {
810 return 1;
813 /* see if it is an MMU fault */
814 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
815 if (ret < 0)
816 return 0; /* not an MMU fault */
817 if (ret == 0)
818 return 1; /* the MMU fault was handled without causing real CPU fault */
819 /* now we have a real cpu fault */
820 tb = tb_find_pc(pc);
821 if (tb) {
822 /* the PC is inside the translated code. It means that we have
823 a virtual CPU fault */
824 cpu_restore_state(tb, env, pc, puc);
826 if (ret == 1) {
827 #if 0
828 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
829 env->eip, env->cr[2], env->error_code);
830 #endif
831 /* we restore the process signal mask as the sigreturn should
832 do it (XXX: use sigsetjmp) */
833 sigprocmask(SIG_SETMASK, old_set, NULL);
834 raise_exception_err(env->exception_index, env->error_code);
835 } else {
836 /* activate soft MMU for this block */
837 env->hflags |= HF_SOFTMMU_MASK;
838 cpu_resume_from_signal(env, puc);
840 /* never comes here */
841 return 1;
844 #elif defined(TARGET_ARM)
845 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
846 int is_write, sigset_t *old_set,
847 void *puc)
849 TranslationBlock *tb;
850 int ret;
852 if (cpu_single_env)
853 env = cpu_single_env; /* XXX: find a correct solution for multithread */
854 #if defined(DEBUG_SIGNAL)
855 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
856 pc, address, is_write, *(unsigned long *)old_set);
857 #endif
858 /* XXX: locking issue */
859 if (is_write && page_unprotect(h2g(address), pc, puc)) {
860 return 1;
862 /* see if it is an MMU fault */
863 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
864 if (ret < 0)
865 return 0; /* not an MMU fault */
866 if (ret == 0)
867 return 1; /* the MMU fault was handled without causing real CPU fault */
868 /* now we have a real cpu fault */
869 tb = tb_find_pc(pc);
870 if (tb) {
871 /* the PC is inside the translated code. It means that we have
872 a virtual CPU fault */
873 cpu_restore_state(tb, env, pc, puc);
875 /* we restore the process signal mask as the sigreturn should
876 do it (XXX: use sigsetjmp) */
877 sigprocmask(SIG_SETMASK, old_set, NULL);
878 cpu_loop_exit();
879 /* never comes here */
880 return 1;
882 #elif defined(TARGET_SPARC)
883 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
884 int is_write, sigset_t *old_set,
885 void *puc)
887 TranslationBlock *tb;
888 int ret;
890 if (cpu_single_env)
891 env = cpu_single_env; /* XXX: find a correct solution for multithread */
892 #if defined(DEBUG_SIGNAL)
893 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
894 pc, address, is_write, *(unsigned long *)old_set);
895 #endif
896 /* XXX: locking issue */
897 if (is_write && page_unprotect(h2g(address), pc, puc)) {
898 return 1;
900 /* see if it is an MMU fault */
901 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
902 if (ret < 0)
903 return 0; /* not an MMU fault */
904 if (ret == 0)
905 return 1; /* the MMU fault was handled without causing real CPU fault */
906 /* now we have a real cpu fault */
907 tb = tb_find_pc(pc);
908 if (tb) {
909 /* the PC is inside the translated code. It means that we have
910 a virtual CPU fault */
911 cpu_restore_state(tb, env, pc, puc);
913 /* we restore the process signal mask as the sigreturn should
914 do it (XXX: use sigsetjmp) */
915 sigprocmask(SIG_SETMASK, old_set, NULL);
916 cpu_loop_exit();
917 /* never comes here */
918 return 1;
920 #elif defined (TARGET_PPC)
921 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
922 int is_write, sigset_t *old_set,
923 void *puc)
925 TranslationBlock *tb;
926 int ret;
928 if (cpu_single_env)
929 env = cpu_single_env; /* XXX: find a correct solution for multithread */
930 #if defined(DEBUG_SIGNAL)
931 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
932 pc, address, is_write, *(unsigned long *)old_set);
933 #endif
934 /* XXX: locking issue */
935 if (is_write && page_unprotect(h2g(address), pc, puc)) {
936 return 1;
939 /* see if it is an MMU fault */
940 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
941 if (ret < 0)
942 return 0; /* not an MMU fault */
943 if (ret == 0)
944 return 1; /* the MMU fault was handled without causing real CPU fault */
946 /* now we have a real cpu fault */
947 tb = tb_find_pc(pc);
948 if (tb) {
949 /* the PC is inside the translated code. It means that we have
950 a virtual CPU fault */
951 cpu_restore_state(tb, env, pc, puc);
953 if (ret == 1) {
954 #if 0
955 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
956 env->nip, env->error_code, tb);
957 #endif
958 /* we restore the process signal mask as the sigreturn should
959 do it (XXX: use sigsetjmp) */
960 sigprocmask(SIG_SETMASK, old_set, NULL);
961 cpu_loop_exit();
962 } else {
963 /* activate soft MMU for this block */
964 cpu_resume_from_signal(env, puc);
966 /* never comes here */
967 return 1;
970 #elif defined(TARGET_M68K)
971 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
972 int is_write, sigset_t *old_set,
973 void *puc)
975 TranslationBlock *tb;
976 int ret;
978 if (cpu_single_env)
979 env = cpu_single_env; /* XXX: find a correct solution for multithread */
980 #if defined(DEBUG_SIGNAL)
981 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
982 pc, address, is_write, *(unsigned long *)old_set);
983 #endif
984 /* XXX: locking issue */
985 if (is_write && page_unprotect(address, pc, puc)) {
986 return 1;
988 /* see if it is an MMU fault */
989 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
990 if (ret < 0)
991 return 0; /* not an MMU fault */
992 if (ret == 0)
993 return 1; /* the MMU fault was handled without causing real CPU fault */
994 /* now we have a real cpu fault */
995 tb = tb_find_pc(pc);
996 if (tb) {
997 /* the PC is inside the translated code. It means that we have
998 a virtual CPU fault */
999 cpu_restore_state(tb, env, pc, puc);
1001 /* we restore the process signal mask as the sigreturn should
1002 do it (XXX: use sigsetjmp) */
1003 sigprocmask(SIG_SETMASK, old_set, NULL);
1004 cpu_loop_exit();
1005 /* never comes here */
1006 return 1;
1009 #elif defined (TARGET_MIPS)
1010 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1011 int is_write, sigset_t *old_set,
1012 void *puc)
1014 TranslationBlock *tb;
1015 int ret;
1017 if (cpu_single_env)
1018 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1019 #if defined(DEBUG_SIGNAL)
1020 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1021 pc, address, is_write, *(unsigned long *)old_set);
1022 #endif
1023 /* XXX: locking issue */
1024 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1025 return 1;
1028 /* see if it is an MMU fault */
1029 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1030 if (ret < 0)
1031 return 0; /* not an MMU fault */
1032 if (ret == 0)
1033 return 1; /* the MMU fault was handled without causing real CPU fault */
1035 /* now we have a real cpu fault */
1036 tb = tb_find_pc(pc);
1037 if (tb) {
1038 /* the PC is inside the translated code. It means that we have
1039 a virtual CPU fault */
1040 cpu_restore_state(tb, env, pc, puc);
1042 if (ret == 1) {
1043 #if 0
1044 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1045 env->PC, env->error_code, tb);
1046 #endif
1047 /* we restore the process signal mask as the sigreturn should
1048 do it (XXX: use sigsetjmp) */
1049 sigprocmask(SIG_SETMASK, old_set, NULL);
1050 cpu_loop_exit();
1051 } else {
1052 /* activate soft MMU for this block */
1053 cpu_resume_from_signal(env, puc);
1055 /* never comes here */
1056 return 1;
1059 #elif defined (TARGET_MICROBLAZE)
1060 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1061 int is_write, sigset_t *old_set,
1062 void *puc)
1064 TranslationBlock *tb;
1065 int ret;
1067 if (cpu_single_env)
1068 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1069 #if defined(DEBUG_SIGNAL)
1070 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1071 pc, address, is_write, *(unsigned long *)old_set);
1072 #endif
1073 /* XXX: locking issue */
1074 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1075 return 1;
1078 /* see if it is an MMU fault */
1079 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1080 if (ret < 0)
1081 return 0; /* not an MMU fault */
1082 if (ret == 0)
1083 return 1; /* the MMU fault was handled without causing real CPU fault */
1085 /* now we have a real cpu fault */
1086 tb = tb_find_pc(pc);
1087 if (tb) {
1088 /* the PC is inside the translated code. It means that we have
1089 a virtual CPU fault */
1090 cpu_restore_state(tb, env, pc, puc);
1092 if (ret == 1) {
1093 #if 0
1094 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1095 env->PC, env->error_code, tb);
1096 #endif
1097 /* we restore the process signal mask as the sigreturn should
1098 do it (XXX: use sigsetjmp) */
1099 sigprocmask(SIG_SETMASK, old_set, NULL);
1100 cpu_loop_exit();
1101 } else {
1102 /* activate soft MMU for this block */
1103 cpu_resume_from_signal(env, puc);
1105 /* never comes here */
1106 return 1;
1109 #elif defined (TARGET_SH4)
1110 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1111 int is_write, sigset_t *old_set,
1112 void *puc)
1114 TranslationBlock *tb;
1115 int ret;
1117 if (cpu_single_env)
1118 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1119 #if defined(DEBUG_SIGNAL)
1120 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1121 pc, address, is_write, *(unsigned long *)old_set);
1122 #endif
1123 /* XXX: locking issue */
1124 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1125 return 1;
1128 /* see if it is an MMU fault */
1129 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1130 if (ret < 0)
1131 return 0; /* not an MMU fault */
1132 if (ret == 0)
1133 return 1; /* the MMU fault was handled without causing real CPU fault */
1135 /* now we have a real cpu fault */
1136 tb = tb_find_pc(pc);
1137 if (tb) {
1138 /* the PC is inside the translated code. It means that we have
1139 a virtual CPU fault */
1140 cpu_restore_state(tb, env, pc, puc);
1142 #if 0
1143 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1144 env->nip, env->error_code, tb);
1145 #endif
1146 /* we restore the process signal mask as the sigreturn should
1147 do it (XXX: use sigsetjmp) */
1148 sigprocmask(SIG_SETMASK, old_set, NULL);
1149 cpu_loop_exit();
1150 /* never comes here */
1151 return 1;
1154 #elif defined (TARGET_ALPHA)
1155 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1156 int is_write, sigset_t *old_set,
1157 void *puc)
1159 TranslationBlock *tb;
1160 int ret;
1162 if (cpu_single_env)
1163 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1164 #if defined(DEBUG_SIGNAL)
1165 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1166 pc, address, is_write, *(unsigned long *)old_set);
1167 #endif
1168 /* XXX: locking issue */
1169 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1170 return 1;
1173 /* see if it is an MMU fault */
1174 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1175 if (ret < 0)
1176 return 0; /* not an MMU fault */
1177 if (ret == 0)
1178 return 1; /* the MMU fault was handled without causing real CPU fault */
1180 /* now we have a real cpu fault */
1181 tb = tb_find_pc(pc);
1182 if (tb) {
1183 /* the PC is inside the translated code. It means that we have
1184 a virtual CPU fault */
1185 cpu_restore_state(tb, env, pc, puc);
1187 #if 0
1188 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1189 env->nip, env->error_code, tb);
1190 #endif
1191 /* we restore the process signal mask as the sigreturn should
1192 do it (XXX: use sigsetjmp) */
1193 sigprocmask(SIG_SETMASK, old_set, NULL);
1194 cpu_loop_exit();
1195 /* never comes here */
1196 return 1;
1198 #elif defined (TARGET_CRIS)
1199 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1200 int is_write, sigset_t *old_set,
1201 void *puc)
1203 TranslationBlock *tb;
1204 int ret;
1206 if (cpu_single_env)
1207 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1208 #if defined(DEBUG_SIGNAL)
1209 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1210 pc, address, is_write, *(unsigned long *)old_set);
1211 #endif
1212 /* XXX: locking issue */
1213 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1214 return 1;
1217 /* see if it is an MMU fault */
1218 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1219 if (ret < 0)
1220 return 0; /* not an MMU fault */
1221 if (ret == 0)
1222 return 1; /* the MMU fault was handled without causing real CPU fault */
1224 /* now we have a real cpu fault */
1225 tb = tb_find_pc(pc);
1226 if (tb) {
1227 /* the PC is inside the translated code. It means that we have
1228 a virtual CPU fault */
1229 cpu_restore_state(tb, env, pc, puc);
1231 /* we restore the process signal mask as the sigreturn should
1232 do it (XXX: use sigsetjmp) */
1233 sigprocmask(SIG_SETMASK, old_set, NULL);
1234 cpu_loop_exit();
1235 /* never comes here */
1236 return 1;
1239 #else
1240 #error unsupported target CPU
1241 #endif
1243 #if defined(__i386__)
1245 #if defined(__APPLE__)
1246 # include <sys/ucontext.h>
1248 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1249 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1250 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1251 # define MASK_sig(context) ((context)->uc_sigmask)
1252 #elif defined(__OpenBSD__)
1253 # define EIP_sig(context) ((context)->sc_eip)
1254 # define TRAP_sig(context) ((context)->sc_trapno)
1255 # define ERROR_sig(context) ((context)->sc_err)
1256 # define MASK_sig(context) ((context)->sc_mask)
1257 #else
1258 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1259 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1260 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1261 # define MASK_sig(context) ((context)->uc_sigmask)
1262 #endif
1264 int cpu_signal_handler(int host_signum, void *pinfo,
1265 void *puc)
1267 siginfo_t *info = pinfo;
1268 #if defined(__OpenBSD__)
1269 struct sigcontext *uc = puc;
1270 #else
1271 struct ucontext *uc = puc;
1272 #endif
1273 unsigned long pc;
1274 int trapno;
1276 #ifndef REG_EIP
1277 /* for glibc 2.1 */
1278 #define REG_EIP EIP
1279 #define REG_ERR ERR
1280 #define REG_TRAPNO TRAPNO
1281 #endif
1282 pc = EIP_sig(uc);
1283 trapno = TRAP_sig(uc);
1284 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1285 trapno == 0xe ?
1286 (ERROR_sig(uc) >> 1) & 1 : 0,
1287 &MASK_sig(uc), puc);
1290 #elif defined(__x86_64__)
1292 #ifdef __NetBSD__
1293 #define PC_sig(context) _UC_MACHINE_PC(context)
1294 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1295 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1296 #define MASK_sig(context) ((context)->uc_sigmask)
1297 #elif defined(__OpenBSD__)
1298 #define PC_sig(context) ((context)->sc_rip)
1299 #define TRAP_sig(context) ((context)->sc_trapno)
1300 #define ERROR_sig(context) ((context)->sc_err)
1301 #define MASK_sig(context) ((context)->sc_mask)
1302 #else
1303 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1304 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1305 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1306 #define MASK_sig(context) ((context)->uc_sigmask)
1307 #endif
1309 int cpu_signal_handler(int host_signum, void *pinfo,
1310 void *puc)
1312 siginfo_t *info = pinfo;
1313 unsigned long pc;
1314 #ifdef __NetBSD__
1315 ucontext_t *uc = puc;
1316 #elif defined(__OpenBSD__)
1317 struct sigcontext *uc = puc;
1318 #else
1319 struct ucontext *uc = puc;
1320 #endif
1322 pc = PC_sig(uc);
1323 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1324 TRAP_sig(uc) == 0xe ?
1325 (ERROR_sig(uc) >> 1) & 1 : 0,
1326 &MASK_sig(uc), puc);
1329 #elif defined(_ARCH_PPC)
1331 /***********************************************************************
1332 * signal context platform-specific definitions
1333 * From Wine
1335 #ifdef linux
1336 /* All Registers access - only for local access */
1337 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1338 /* Gpr Registers access */
1339 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1340 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1341 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1342 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1343 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1344 # define LR_sig(context) REG_sig(link, context) /* Link register */
1345 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1346 /* Float Registers access */
1347 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1348 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1349 /* Exception Registers access */
1350 # define DAR_sig(context) REG_sig(dar, context)
1351 # define DSISR_sig(context) REG_sig(dsisr, context)
1352 # define TRAP_sig(context) REG_sig(trap, context)
1353 #endif /* linux */
1355 #ifdef __APPLE__
1356 # include <sys/ucontext.h>
1357 typedef struct ucontext SIGCONTEXT;
1358 /* All Registers access - only for local access */
1359 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1360 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1361 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1362 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1363 /* Gpr Registers access */
1364 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1365 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1366 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1367 # define CTR_sig(context) REG_sig(ctr, context)
1368 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1369 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1370 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1371 /* Float Registers access */
1372 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1373 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1374 /* Exception Registers access */
1375 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1376 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1377 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1378 #endif /* __APPLE__ */
1380 int cpu_signal_handler(int host_signum, void *pinfo,
1381 void *puc)
1383 siginfo_t *info = pinfo;
1384 struct ucontext *uc = puc;
1385 unsigned long pc;
1386 int is_write;
1388 pc = IAR_sig(uc);
1389 is_write = 0;
1390 #if 0
1391 /* ppc 4xx case */
1392 if (DSISR_sig(uc) & 0x00800000)
1393 is_write = 1;
1394 #else
1395 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1396 is_write = 1;
1397 #endif
1398 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1399 is_write, &uc->uc_sigmask, puc);
1402 #elif defined(__alpha__)
1404 int cpu_signal_handler(int host_signum, void *pinfo,
1405 void *puc)
1407 siginfo_t *info = pinfo;
1408 struct ucontext *uc = puc;
1409 uint32_t *pc = uc->uc_mcontext.sc_pc;
1410 uint32_t insn = *pc;
1411 int is_write = 0;
1413 /* XXX: need kernel patch to get write flag faster */
1414 switch (insn >> 26) {
1415 case 0x0d: // stw
1416 case 0x0e: // stb
1417 case 0x0f: // stq_u
1418 case 0x24: // stf
1419 case 0x25: // stg
1420 case 0x26: // sts
1421 case 0x27: // stt
1422 case 0x2c: // stl
1423 case 0x2d: // stq
1424 case 0x2e: // stl_c
1425 case 0x2f: // stq_c
1426 is_write = 1;
1429 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1430 is_write, &uc->uc_sigmask, puc);
1432 #elif defined(__sparc__)
1434 int cpu_signal_handler(int host_signum, void *pinfo,
1435 void *puc)
1437 siginfo_t *info = pinfo;
1438 int is_write;
1439 uint32_t insn;
1440 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1441 uint32_t *regs = (uint32_t *)(info + 1);
1442 void *sigmask = (regs + 20);
1443 /* XXX: is there a standard glibc define ? */
1444 unsigned long pc = regs[1];
1445 #else
1446 #ifdef __linux__
1447 struct sigcontext *sc = puc;
1448 unsigned long pc = sc->sigc_regs.tpc;
1449 void *sigmask = (void *)sc->sigc_mask;
1450 #elif defined(__OpenBSD__)
1451 struct sigcontext *uc = puc;
1452 unsigned long pc = uc->sc_pc;
1453 void *sigmask = (void *)(long)uc->sc_mask;
1454 #endif
1455 #endif
1457 /* XXX: need kernel patch to get write flag faster */
1458 is_write = 0;
1459 insn = *(uint32_t *)pc;
1460 if ((insn >> 30) == 3) {
1461 switch((insn >> 19) & 0x3f) {
1462 case 0x05: // stb
1463 case 0x15: // stba
1464 case 0x06: // sth
1465 case 0x16: // stha
1466 case 0x04: // st
1467 case 0x14: // sta
1468 case 0x07: // std
1469 case 0x17: // stda
1470 case 0x0e: // stx
1471 case 0x1e: // stxa
1472 case 0x24: // stf
1473 case 0x34: // stfa
1474 case 0x27: // stdf
1475 case 0x37: // stdfa
1476 case 0x26: // stqf
1477 case 0x36: // stqfa
1478 case 0x25: // stfsr
1479 case 0x3c: // casa
1480 case 0x3e: // casxa
1481 is_write = 1;
1482 break;
1485 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1486 is_write, sigmask, NULL);
1489 #elif defined(__arm__)
1491 int cpu_signal_handler(int host_signum, void *pinfo,
1492 void *puc)
1494 siginfo_t *info = pinfo;
1495 struct ucontext *uc = puc;
1496 unsigned long pc;
1497 int is_write;
1499 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1500 pc = uc->uc_mcontext.gregs[R15];
1501 #else
1502 pc = uc->uc_mcontext.arm_pc;
1503 #endif
1504 /* XXX: compute is_write */
1505 is_write = 0;
1506 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1507 is_write,
1508 &uc->uc_sigmask, puc);
1511 #elif defined(__mc68000)
1513 int cpu_signal_handler(int host_signum, void *pinfo,
1514 void *puc)
1516 siginfo_t *info = pinfo;
1517 struct ucontext *uc = puc;
1518 unsigned long pc;
1519 int is_write;
1521 pc = uc->uc_mcontext.gregs[16];
1522 /* XXX: compute is_write */
1523 is_write = 0;
1524 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1525 is_write,
1526 &uc->uc_sigmask, puc);
1529 #elif defined(__ia64)
1531 #ifndef __ISR_VALID
1532 /* This ought to be in <bits/siginfo.h>... */
1533 # define __ISR_VALID 1
1534 #endif
1536 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1538 siginfo_t *info = pinfo;
1539 struct ucontext *uc = puc;
1540 unsigned long ip;
1541 int is_write = 0;
1543 ip = uc->uc_mcontext.sc_ip;
1544 switch (host_signum) {
1545 case SIGILL:
1546 case SIGFPE:
1547 case SIGSEGV:
1548 case SIGBUS:
1549 case SIGTRAP:
1550 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1551 /* ISR.W (write-access) is bit 33: */
1552 is_write = (info->si_isr >> 33) & 1;
1553 break;
1555 default:
1556 break;
1558 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1559 is_write,
1560 &uc->uc_sigmask, puc);
1563 #elif defined(__s390__)
1565 int cpu_signal_handler(int host_signum, void *pinfo,
1566 void *puc)
1568 siginfo_t *info = pinfo;
1569 struct ucontext *uc = puc;
1570 unsigned long pc;
1571 int is_write;
1573 pc = uc->uc_mcontext.psw.addr;
1574 /* XXX: compute is_write */
1575 is_write = 0;
1576 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1577 is_write, &uc->uc_sigmask, puc);
1580 #elif defined(__mips__)
1582 int cpu_signal_handler(int host_signum, void *pinfo,
1583 void *puc)
1585 siginfo_t *info = pinfo;
1586 struct ucontext *uc = puc;
1587 greg_t pc = uc->uc_mcontext.pc;
1588 int is_write;
1590 /* XXX: compute is_write */
1591 is_write = 0;
1592 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1593 is_write, &uc->uc_sigmask, puc);
1596 #elif defined(__hppa__)
1598 int cpu_signal_handler(int host_signum, void *pinfo,
1599 void *puc)
1601 struct siginfo *info = pinfo;
1602 struct ucontext *uc = puc;
1603 unsigned long pc;
1604 int is_write;
1606 pc = uc->uc_mcontext.sc_iaoq[0];
1607 /* FIXME: compute is_write */
1608 is_write = 0;
1609 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1610 is_write,
1611 &uc->uc_sigmask, puc);
1614 #else
1616 #error host CPU specific signal handler needed
1618 #endif
1620 #endif /* !defined(CONFIG_SOFTMMU) */