Add new/missing syscall definitions
[qemu/hppa.git] / cpu-exec.c
blob376ca1a39cae7b1f128041e2763add180965998a
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
25 #include "kvm.h"
27 #if !defined(CONFIG_SOFTMMU)
28 #undef EAX
29 #undef ECX
30 #undef EDX
31 #undef EBX
32 #undef ESP
33 #undef EBP
34 #undef ESI
35 #undef EDI
36 #undef EIP
37 #include <signal.h>
38 #ifdef __linux__
39 #include <sys/ucontext.h>
40 #endif
41 #endif
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
45 #undef env
46 #define env cpu_single_env
47 #endif
49 int tb_invalidated_flag;
51 //#define DEBUG_EXEC
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
58 regs_to_env();
59 longjmp(env->jmp_env, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState *env1, void *puc)
67 #if !defined(CONFIG_SOFTMMU)
68 #ifdef __linux__
69 struct ucontext *uc = puc;
70 #elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
72 #endif
73 #endif
75 env = env1;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
80 if (puc) {
81 /* XXX: use siglongjmp ? */
82 #ifdef __linux__
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
86 #endif
88 #endif
89 env->exception_index = -1;
90 longjmp(env->jmp_env, 1);
93 /* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
97 unsigned long next_tb;
98 TranslationBlock *tb;
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles > CF_COUNT_MASK)
103 max_cycles = CF_COUNT_MASK;
105 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106 max_cycles);
107 env->current_tb = tb;
108 /* execute the generated code */
109 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
111 if ((next_tb & 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
114 cpu_pc_from_tb(env, tb);
116 tb_phys_invalidate(tb, -1);
117 tb_free(tb);
120 static TranslationBlock *tb_find_slow(target_ulong pc,
121 target_ulong cs_base,
122 uint64_t flags)
124 TranslationBlock *tb, **ptb1;
125 unsigned int h;
126 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
128 tb_invalidated_flag = 0;
130 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
132 /* find translated block using physical mappings */
133 phys_pc = get_phys_addr_code(env, pc);
134 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 phys_page2 = -1;
136 h = tb_phys_hash_func(phys_pc);
137 ptb1 = &tb_phys_hash[h];
138 for(;;) {
139 tb = *ptb1;
140 if (!tb)
141 goto not_found;
142 if (tb->pc == pc &&
143 tb->page_addr[0] == phys_page1 &&
144 tb->cs_base == cs_base &&
145 tb->flags == flags) {
146 /* check next page if needed */
147 if (tb->page_addr[1] != -1) {
148 virt_page2 = (pc & TARGET_PAGE_MASK) +
149 TARGET_PAGE_SIZE;
150 phys_page2 = get_phys_addr_code(env, virt_page2);
151 if (tb->page_addr[1] == phys_page2)
152 goto found;
153 } else {
154 goto found;
157 ptb1 = &tb->phys_hash_next;
159 not_found:
160 /* if no translated code available, then translate it now */
161 tb = tb_gen_code(env, pc, cs_base, flags, 0);
163 found:
164 /* we add the TB in the virtual pc hash table */
165 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
166 return tb;
169 static inline TranslationBlock *tb_find_fast(void)
171 TranslationBlock *tb;
172 target_ulong cs_base, pc;
173 int flags;
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
177 is executed. */
178 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
179 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
180 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
181 tb->flags != flags)) {
182 tb = tb_find_slow(pc, cs_base, flags);
184 return tb;
187 static CPUDebugExcpHandler *debug_excp_handler;
189 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
191 CPUDebugExcpHandler *old_handler = debug_excp_handler;
193 debug_excp_handler = handler;
194 return old_handler;
197 static void cpu_handle_debug_exception(CPUState *env)
199 CPUWatchpoint *wp;
201 if (!env->watchpoint_hit)
202 TAILQ_FOREACH(wp, &env->watchpoints, entry)
203 wp->flags &= ~BP_WATCHPOINT_HIT;
205 if (debug_excp_handler)
206 debug_excp_handler(env);
209 /* main execution loop */
211 int cpu_exec(CPUState *env1)
213 #define DECLARE_HOST_REGS 1
214 #include "hostregs_helper.h"
215 int ret, interrupt_request;
216 TranslationBlock *tb;
217 uint8_t *tc_ptr;
218 unsigned long next_tb;
220 if (cpu_halted(env1) == EXCP_HALTED)
221 return EXCP_HALTED;
223 cpu_single_env = env1;
225 /* first we save global registers */
226 #define SAVE_HOST_REGS 1
227 #include "hostregs_helper.h"
228 env = env1;
230 env_to_regs();
231 #if defined(TARGET_I386)
232 /* put eflags in CPU temporary format */
233 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
234 DF = 1 - (2 * ((env->eflags >> 10) & 1));
235 CC_OP = CC_OP_EFLAGS;
236 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
237 #elif defined(TARGET_SPARC)
238 #elif defined(TARGET_M68K)
239 env->cc_op = CC_OP_FLAGS;
240 env->cc_dest = env->sr & 0xf;
241 env->cc_x = (env->sr >> 4) & 1;
242 #elif defined(TARGET_ALPHA)
243 #elif defined(TARGET_ARM)
244 #elif defined(TARGET_PPC)
245 #elif defined(TARGET_HPPA)
246 #elif defined(TARGET_MIPS)
247 #elif defined(TARGET_SH4)
248 #elif defined(TARGET_CRIS)
249 /* XXXXX */
250 #else
251 #error unsupported target CPU
252 #endif
253 env->exception_index = -1;
255 /* prepare setjmp context for exception handling */
256 for(;;) {
257 if (setjmp(env->jmp_env) == 0) {
258 env->current_tb = NULL;
259 /* if an exception is pending, we execute it here */
260 if (env->exception_index >= 0) {
261 if (env->exception_index >= EXCP_INTERRUPT) {
262 /* exit request from the cpu execution loop */
263 ret = env->exception_index;
264 if (ret == EXCP_DEBUG)
265 cpu_handle_debug_exception(env);
266 break;
267 } else {
268 #if defined(CONFIG_USER_ONLY)
269 /* if user mode only, we simulate a fake exception
270 which will be handled outside the cpu execution
271 loop */
272 #if defined(TARGET_I386)
273 do_interrupt_user(env->exception_index,
274 env->exception_is_int,
275 env->error_code,
276 env->exception_next_eip);
277 /* successfully delivered */
278 env->old_exception = -1;
279 #endif
280 ret = env->exception_index;
281 break;
282 #else
283 #if defined(TARGET_I386)
284 /* simulate a real cpu exception. On i386, it can
285 trigger new exceptions, but we do not handle
286 double or triple faults yet. */
287 do_interrupt(env->exception_index,
288 env->exception_is_int,
289 env->error_code,
290 env->exception_next_eip, 0);
291 /* successfully delivered */
292 env->old_exception = -1;
293 #elif defined(TARGET_PPC)
294 do_interrupt(env);
295 #elif defined(TARGET_MIPS)
296 do_interrupt(env);
297 #elif defined(TARGET_SPARC)
298 do_interrupt(env);
299 #elif defined(TARGET_ARM)
300 do_interrupt(env);
301 #elif defined(TARGET_SH4)
302 do_interrupt(env);
303 #elif defined(TARGET_ALPHA)
304 do_interrupt(env);
305 #elif defined(TARGET_CRIS)
306 do_interrupt(env);
307 #elif defined(TARGET_M68K)
308 do_interrupt(0);
309 #elif defined(TARGET_HPPA)
310 do_interrupt(env);
311 #endif
312 #endif
314 env->exception_index = -1;
316 #ifdef USE_KQEMU
317 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
318 int ret;
319 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
320 ret = kqemu_cpu_exec(env);
321 /* put eflags in CPU temporary format */
322 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
323 DF = 1 - (2 * ((env->eflags >> 10) & 1));
324 CC_OP = CC_OP_EFLAGS;
325 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
326 if (ret == 1) {
327 /* exception */
328 longjmp(env->jmp_env, 1);
329 } else if (ret == 2) {
330 /* softmmu execution needed */
331 } else {
332 if (env->interrupt_request != 0) {
333 /* hardware interrupt will be executed just after */
334 } else {
335 /* otherwise, we restart */
336 longjmp(env->jmp_env, 1);
340 #endif
342 if (kvm_enabled()) {
343 kvm_cpu_exec(env);
344 longjmp(env->jmp_env, 1);
347 next_tb = 0; /* force lookup of first TB */
348 for(;;) {
349 interrupt_request = env->interrupt_request;
350 if (unlikely(interrupt_request)) {
351 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
352 /* Mask out external interrupts for this step. */
353 interrupt_request &= ~(CPU_INTERRUPT_HARD |
354 CPU_INTERRUPT_FIQ |
355 CPU_INTERRUPT_SMI |
356 CPU_INTERRUPT_NMI);
358 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
359 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
360 env->exception_index = EXCP_DEBUG;
361 cpu_loop_exit();
363 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
364 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
365 if (interrupt_request & CPU_INTERRUPT_HALT) {
366 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
367 env->halted = 1;
368 env->exception_index = EXCP_HLT;
369 cpu_loop_exit();
371 #endif
372 #if defined(TARGET_I386)
373 if (env->hflags2 & HF2_GIF_MASK) {
374 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
375 !(env->hflags & HF_SMM_MASK)) {
376 svm_check_intercept(SVM_EXIT_SMI);
377 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
378 do_smm_enter();
379 next_tb = 0;
380 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
381 !(env->hflags2 & HF2_NMI_MASK)) {
382 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
383 env->hflags2 |= HF2_NMI_MASK;
384 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
385 next_tb = 0;
386 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
387 (((env->hflags2 & HF2_VINTR_MASK) &&
388 (env->hflags2 & HF2_HIF_MASK)) ||
389 (!(env->hflags2 & HF2_VINTR_MASK) &&
390 (env->eflags & IF_MASK &&
391 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
392 int intno;
393 svm_check_intercept(SVM_EXIT_INTR);
394 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
395 intno = cpu_get_pic_interrupt(env);
396 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
397 do_interrupt(intno, 0, 0, 0, 1);
398 /* ensure that no TB jump will be modified as
399 the program flow was changed */
400 next_tb = 0;
401 #if !defined(CONFIG_USER_ONLY)
402 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
403 (env->eflags & IF_MASK) &&
404 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
405 int intno;
406 /* FIXME: this should respect TPR */
407 svm_check_intercept(SVM_EXIT_VINTR);
408 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
409 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
410 do_interrupt(intno, 0, 0, 0, 1);
411 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
412 next_tb = 0;
413 #endif
416 #elif defined(TARGET_PPC)
417 #if 0
418 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
419 cpu_ppc_reset(env);
421 #endif
422 if (interrupt_request & CPU_INTERRUPT_HARD) {
423 ppc_hw_interrupt(env);
424 if (env->pending_interrupts == 0)
425 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
426 next_tb = 0;
428 #elif defined(TARGET_MIPS)
429 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
430 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
431 (env->CP0_Status & (1 << CP0St_IE)) &&
432 !(env->CP0_Status & (1 << CP0St_EXL)) &&
433 !(env->CP0_Status & (1 << CP0St_ERL)) &&
434 !(env->hflags & MIPS_HFLAG_DM)) {
435 /* Raise it */
436 env->exception_index = EXCP_EXT_INTERRUPT;
437 env->error_code = 0;
438 do_interrupt(env);
439 next_tb = 0;
441 #elif defined(TARGET_SPARC)
442 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
443 (env->psret != 0)) {
444 int pil = env->interrupt_index & 15;
445 int type = env->interrupt_index & 0xf0;
447 if (((type == TT_EXTINT) &&
448 (pil == 15 || pil > env->psrpil)) ||
449 type != TT_EXTINT) {
450 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
451 env->exception_index = env->interrupt_index;
452 do_interrupt(env);
453 env->interrupt_index = 0;
454 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
455 cpu_check_irqs(env);
456 #endif
457 next_tb = 0;
459 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
460 //do_interrupt(0, 0, 0, 0, 0);
461 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
463 #elif defined(TARGET_ARM)
464 if (interrupt_request & CPU_INTERRUPT_FIQ
465 && !(env->uncached_cpsr & CPSR_F)) {
466 env->exception_index = EXCP_FIQ;
467 do_interrupt(env);
468 next_tb = 0;
470 /* ARMv7-M interrupt return works by loading a magic value
471 into the PC. On real hardware the load causes the
472 return to occur. The qemu implementation performs the
473 jump normally, then does the exception return when the
474 CPU tries to execute code at the magic address.
475 This will cause the magic PC value to be pushed to
476 the stack if an interrupt occured at the wrong time.
477 We avoid this by disabling interrupts when
478 pc contains a magic address. */
479 if (interrupt_request & CPU_INTERRUPT_HARD
480 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
481 || !(env->uncached_cpsr & CPSR_I))) {
482 env->exception_index = EXCP_IRQ;
483 do_interrupt(env);
484 next_tb = 0;
486 #elif defined(TARGET_SH4)
487 if (interrupt_request & CPU_INTERRUPT_HARD) {
488 do_interrupt(env);
489 next_tb = 0;
491 #elif defined(TARGET_ALPHA)
492 if (interrupt_request & CPU_INTERRUPT_HARD) {
493 do_interrupt(env);
494 next_tb = 0;
496 #elif defined(TARGET_CRIS)
497 if (interrupt_request & CPU_INTERRUPT_HARD
498 && (env->pregs[PR_CCS] & I_FLAG)) {
499 env->exception_index = EXCP_IRQ;
500 do_interrupt(env);
501 next_tb = 0;
503 if (interrupt_request & CPU_INTERRUPT_NMI
504 && (env->pregs[PR_CCS] & M_FLAG)) {
505 env->exception_index = EXCP_NMI;
506 do_interrupt(env);
507 next_tb = 0;
509 #elif defined(TARGET_M68K)
510 if (interrupt_request & CPU_INTERRUPT_HARD
511 && ((env->sr & SR_I) >> SR_I_SHIFT)
512 < env->pending_level) {
513 /* Real hardware gets the interrupt vector via an
514 IACK cycle at this point. Current emulated
515 hardware doesn't rely on this, so we
516 provide/save the vector when the interrupt is
517 first signalled. */
518 env->exception_index = env->pending_vector;
519 do_interrupt(1);
520 next_tb = 0;
522 #elif defined(TARGET_HPPA)
523 if (interrupt_request & CPU_INTERRUPT_HARD
524 && !(env->psw & PSW_I)) {
525 env->exception_index = EXCP_EXTINT;
526 do_interrupt(env);
528 #endif
529 /* Don't use the cached interupt_request value,
530 do_interrupt may have updated the EXITTB flag. */
531 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
532 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
533 /* ensure that no TB jump will be modified as
534 the program flow was changed */
535 next_tb = 0;
537 if (interrupt_request & CPU_INTERRUPT_EXIT) {
538 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
539 env->exception_index = EXCP_INTERRUPT;
540 cpu_loop_exit();
543 #ifdef DEBUG_EXEC
544 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
545 /* restore flags in standard format */
546 regs_to_env();
547 #if defined(TARGET_I386)
548 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
549 log_cpu_state(env, X86_DUMP_CCOP);
550 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
551 #elif defined(TARGET_ARM)
552 log_cpu_state(env, 0);
553 #elif defined(TARGET_SPARC)
554 log_cpu_state(env, 0);
555 #elif defined(TARGET_PPC)
556 log_cpu_state(env, 0);
557 #elif defined(TARGET_M68K)
558 cpu_m68k_flush_flags(env, env->cc_op);
559 env->cc_op = CC_OP_FLAGS;
560 env->sr = (env->sr & 0xffe0)
561 | env->cc_dest | (env->cc_x << 4);
562 log_cpu_state(env, 0);
563 #elif defined(TARGET_MIPS)
564 log_cpu_state(env, 0);
565 #elif defined(TARGET_SH4)
566 log_cpu_state(env, 0);
567 #elif defined(TARGET_ALPHA)
568 log_cpu_state(env, 0);
569 #elif defined(TARGET_CRIS)
570 log_cpu_state(env, 0);
571 #elif defined(TARGET_HPPA)
572 log_cpu_state(env, 0);
573 #else
574 #error unsupported target CPU
575 #endif
577 #endif
578 spin_lock(&tb_lock);
579 tb = tb_find_fast();
580 /* Note: we do it here to avoid a gcc bug on Mac OS X when
581 doing it in tb_find_slow */
582 if (tb_invalidated_flag) {
583 /* as some TB could have been invalidated because
584 of memory exceptions while generating the code, we
585 must recompute the hash index here */
586 next_tb = 0;
587 tb_invalidated_flag = 0;
589 #ifdef DEBUG_EXEC
590 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
591 (long)tb->tc_ptr, tb->pc,
592 lookup_symbol(tb->pc));
593 #endif
594 /* see if we can patch the calling TB. When the TB
595 spans two pages, we cannot safely do a direct
596 jump. */
598 if (next_tb != 0 &&
599 #ifdef USE_KQEMU
600 (env->kqemu_enabled != 2) &&
601 #endif
602 tb->page_addr[1] == -1) {
603 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
606 spin_unlock(&tb_lock);
607 env->current_tb = tb;
609 /* cpu_interrupt might be called while translating the
610 TB, but before it is linked into a potentially
611 infinite loop and becomes env->current_tb. Avoid
612 starting execution if there is a pending interrupt. */
613 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
614 env->current_tb = NULL;
616 while (env->current_tb) {
617 tc_ptr = tb->tc_ptr;
618 /* execute the generated code */
619 #if defined(__sparc__) && !defined(HOST_SOLARIS)
620 #undef env
621 env = cpu_single_env;
622 #define env cpu_single_env
623 #endif
624 next_tb = tcg_qemu_tb_exec(tc_ptr);
625 env->current_tb = NULL;
626 if ((next_tb & 3) == 2) {
627 /* Instruction counter expired. */
628 int insns_left;
629 tb = (TranslationBlock *)(long)(next_tb & ~3);
630 /* Restore PC. */
631 cpu_pc_from_tb(env, tb);
632 insns_left = env->icount_decr.u32;
633 if (env->icount_extra && insns_left >= 0) {
634 /* Refill decrementer and continue execution. */
635 env->icount_extra += insns_left;
636 if (env->icount_extra > 0xffff) {
637 insns_left = 0xffff;
638 } else {
639 insns_left = env->icount_extra;
641 env->icount_extra -= insns_left;
642 env->icount_decr.u16.low = insns_left;
643 } else {
644 if (insns_left > 0) {
645 /* Execute remaining instructions. */
646 cpu_exec_nocache(insns_left, tb);
648 env->exception_index = EXCP_INTERRUPT;
649 next_tb = 0;
650 cpu_loop_exit();
654 /* reset soft MMU for next block (it can currently
655 only be set by a memory fault) */
656 #if defined(USE_KQEMU)
657 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
658 if (kqemu_is_ok(env) &&
659 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
660 cpu_loop_exit();
662 #endif
663 } /* for(;;) */
664 } else {
665 env_to_regs();
667 } /* for(;;) */
670 #if defined(TARGET_I386)
671 /* restore flags in standard format */
672 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
673 #elif defined(TARGET_ARM)
674 /* XXX: Save/restore host fpu exception state?. */
675 #elif defined(TARGET_SPARC)
676 #elif defined(TARGET_PPC)
677 #elif defined(TARGET_M68K)
678 cpu_m68k_flush_flags(env, env->cc_op);
679 env->cc_op = CC_OP_FLAGS;
680 env->sr = (env->sr & 0xffe0)
681 | env->cc_dest | (env->cc_x << 4);
682 #elif defined(TARGET_MIPS)
683 #elif defined(TARGET_SH4)
684 #elif defined(TARGET_ALPHA)
685 #elif defined(TARGET_CRIS)
686 #elif defined(TARGET_HPPA)
687 /* XXXXX */
688 #else
689 #error unsupported target CPU
690 #endif
692 /* restore global registers */
693 #include "hostregs_helper.h"
695 /* fail safe : never use cpu_single_env outside cpu_exec() */
696 cpu_single_env = NULL;
697 return ret;
700 /* must only be called from the generated code as an exception can be
701 generated */
702 void tb_invalidate_page_range(target_ulong start, target_ulong end)
704 /* XXX: cannot enable it yet because it yields to MMU exception
705 where NIP != read address on PowerPC */
706 #if 0
707 target_ulong phys_addr;
708 phys_addr = get_phys_addr_code(env, start);
709 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
710 #endif
713 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
715 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
717 CPUX86State *saved_env;
719 saved_env = env;
720 env = s;
721 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
722 selector &= 0xffff;
723 cpu_x86_load_seg_cache(env, seg_reg, selector,
724 (selector << 4), 0xffff, 0);
725 } else {
726 helper_load_seg(seg_reg, selector);
728 env = saved_env;
731 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
733 CPUX86State *saved_env;
735 saved_env = env;
736 env = s;
738 helper_fsave(ptr, data32);
740 env = saved_env;
743 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
745 CPUX86State *saved_env;
747 saved_env = env;
748 env = s;
750 helper_frstor(ptr, data32);
752 env = saved_env;
755 #endif /* TARGET_I386 */
757 #if !defined(CONFIG_SOFTMMU)
759 #if defined(TARGET_I386)
761 /* 'pc' is the host PC at which the exception was raised. 'address' is
762 the effective address of the memory exception. 'is_write' is 1 if a
763 write caused the exception and otherwise 0'. 'old_set' is the
764 signal set which should be restored */
765 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
766 int is_write, sigset_t *old_set,
767 void *puc)
769 TranslationBlock *tb;
770 int ret;
772 if (cpu_single_env)
773 env = cpu_single_env; /* XXX: find a correct solution for multithread */
774 #if defined(DEBUG_SIGNAL)
775 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
776 pc, address, is_write, *(unsigned long *)old_set);
777 #endif
778 /* XXX: locking issue */
779 if (is_write && page_unprotect(h2g(address), pc, puc)) {
780 return 1;
783 /* see if it is an MMU fault */
784 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
785 if (ret < 0)
786 return 0; /* not an MMU fault */
787 if (ret == 0)
788 return 1; /* the MMU fault was handled without causing real CPU fault */
789 /* now we have a real cpu fault */
790 tb = tb_find_pc(pc);
791 if (tb) {
792 /* the PC is inside the translated code. It means that we have
793 a virtual CPU fault */
794 cpu_restore_state(tb, env, pc, puc);
796 if (ret == 1) {
797 #if 0
798 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
799 env->eip, env->cr[2], env->error_code);
800 #endif
801 /* we restore the process signal mask as the sigreturn should
802 do it (XXX: use sigsetjmp) */
803 sigprocmask(SIG_SETMASK, old_set, NULL);
804 raise_exception_err(env->exception_index, env->error_code);
805 } else {
806 /* activate soft MMU for this block */
807 env->hflags |= HF_SOFTMMU_MASK;
808 cpu_resume_from_signal(env, puc);
810 /* never comes here */
811 return 1;
814 #elif defined(TARGET_ARM)
815 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
816 int is_write, sigset_t *old_set,
817 void *puc)
819 TranslationBlock *tb;
820 int ret;
822 if (cpu_single_env)
823 env = cpu_single_env; /* XXX: find a correct solution for multithread */
824 #if defined(DEBUG_SIGNAL)
825 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
826 pc, address, is_write, *(unsigned long *)old_set);
827 #endif
828 /* XXX: locking issue */
829 if (is_write && page_unprotect(h2g(address), pc, puc)) {
830 return 1;
832 /* see if it is an MMU fault */
833 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
834 if (ret < 0)
835 return 0; /* not an MMU fault */
836 if (ret == 0)
837 return 1; /* the MMU fault was handled without causing real CPU fault */
838 /* now we have a real cpu fault */
839 tb = tb_find_pc(pc);
840 if (tb) {
841 /* the PC is inside the translated code. It means that we have
842 a virtual CPU fault */
843 cpu_restore_state(tb, env, pc, puc);
845 /* we restore the process signal mask as the sigreturn should
846 do it (XXX: use sigsetjmp) */
847 sigprocmask(SIG_SETMASK, old_set, NULL);
848 cpu_loop_exit();
849 /* never comes here */
850 return 1;
852 #elif defined(TARGET_SPARC)
853 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
854 int is_write, sigset_t *old_set,
855 void *puc)
857 TranslationBlock *tb;
858 int ret;
860 if (cpu_single_env)
861 env = cpu_single_env; /* XXX: find a correct solution for multithread */
862 #if defined(DEBUG_SIGNAL)
863 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
864 pc, address, is_write, *(unsigned long *)old_set);
865 #endif
866 /* XXX: locking issue */
867 if (is_write && page_unprotect(h2g(address), pc, puc)) {
868 return 1;
870 /* see if it is an MMU fault */
871 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
872 if (ret < 0)
873 return 0; /* not an MMU fault */
874 if (ret == 0)
875 return 1; /* the MMU fault was handled without causing real CPU fault */
876 /* now we have a real cpu fault */
877 tb = tb_find_pc(pc);
878 if (tb) {
879 /* the PC is inside the translated code. It means that we have
880 a virtual CPU fault */
881 cpu_restore_state(tb, env, pc, puc);
883 /* we restore the process signal mask as the sigreturn should
884 do it (XXX: use sigsetjmp) */
885 sigprocmask(SIG_SETMASK, old_set, NULL);
886 cpu_loop_exit();
887 /* never comes here */
888 return 1;
890 #elif defined (TARGET_PPC)
891 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
892 int is_write, sigset_t *old_set,
893 void *puc)
895 TranslationBlock *tb;
896 int ret;
898 if (cpu_single_env)
899 env = cpu_single_env; /* XXX: find a correct solution for multithread */
900 #if defined(DEBUG_SIGNAL)
901 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
902 pc, address, is_write, *(unsigned long *)old_set);
903 #endif
904 /* XXX: locking issue */
905 if (is_write && page_unprotect(h2g(address), pc, puc)) {
906 return 1;
909 /* see if it is an MMU fault */
910 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
911 if (ret < 0)
912 return 0; /* not an MMU fault */
913 if (ret == 0)
914 return 1; /* the MMU fault was handled without causing real CPU fault */
916 /* now we have a real cpu fault */
917 tb = tb_find_pc(pc);
918 if (tb) {
919 /* the PC is inside the translated code. It means that we have
920 a virtual CPU fault */
921 cpu_restore_state(tb, env, pc, puc);
923 if (ret == 1) {
924 #if 0
925 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
926 env->nip, env->error_code, tb);
927 #endif
928 /* we restore the process signal mask as the sigreturn should
929 do it (XXX: use sigsetjmp) */
930 sigprocmask(SIG_SETMASK, old_set, NULL);
931 cpu_loop_exit();
932 } else {
933 /* activate soft MMU for this block */
934 cpu_resume_from_signal(env, puc);
936 /* never comes here */
937 return 1;
940 #elif defined(TARGET_M68K)
941 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
942 int is_write, sigset_t *old_set,
943 void *puc)
945 TranslationBlock *tb;
946 int ret;
948 if (cpu_single_env)
949 env = cpu_single_env; /* XXX: find a correct solution for multithread */
950 #if defined(DEBUG_SIGNAL)
951 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
952 pc, address, is_write, *(unsigned long *)old_set);
953 #endif
954 /* XXX: locking issue */
955 if (is_write && page_unprotect(address, pc, puc)) {
956 return 1;
958 /* see if it is an MMU fault */
959 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
960 if (ret < 0)
961 return 0; /* not an MMU fault */
962 if (ret == 0)
963 return 1; /* the MMU fault was handled without causing real CPU fault */
964 /* now we have a real cpu fault */
965 tb = tb_find_pc(pc);
966 if (tb) {
967 /* the PC is inside the translated code. It means that we have
968 a virtual CPU fault */
969 cpu_restore_state(tb, env, pc, puc);
971 /* we restore the process signal mask as the sigreturn should
972 do it (XXX: use sigsetjmp) */
973 sigprocmask(SIG_SETMASK, old_set, NULL);
974 cpu_loop_exit();
975 /* never comes here */
976 return 1;
979 #elif defined (TARGET_MIPS)
980 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
981 int is_write, sigset_t *old_set,
982 void *puc)
984 TranslationBlock *tb;
985 int ret;
987 if (cpu_single_env)
988 env = cpu_single_env; /* XXX: find a correct solution for multithread */
989 #if defined(DEBUG_SIGNAL)
990 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
991 pc, address, is_write, *(unsigned long *)old_set);
992 #endif
993 /* XXX: locking issue */
994 if (is_write && page_unprotect(h2g(address), pc, puc)) {
995 return 1;
998 /* see if it is an MMU fault */
999 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1000 if (ret < 0)
1001 return 0; /* not an MMU fault */
1002 if (ret == 0)
1003 return 1; /* the MMU fault was handled without causing real CPU fault */
1005 /* now we have a real cpu fault */
1006 tb = tb_find_pc(pc);
1007 if (tb) {
1008 /* the PC is inside the translated code. It means that we have
1009 a virtual CPU fault */
1010 cpu_restore_state(tb, env, pc, puc);
1012 if (ret == 1) {
1013 #if 0
1014 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1015 env->PC, env->error_code, tb);
1016 #endif
1017 /* we restore the process signal mask as the sigreturn should
1018 do it (XXX: use sigsetjmp) */
1019 sigprocmask(SIG_SETMASK, old_set, NULL);
1020 cpu_loop_exit();
1021 } else {
1022 /* activate soft MMU for this block */
1023 cpu_resume_from_signal(env, puc);
1025 /* never comes here */
1026 return 1;
1029 #elif defined (TARGET_SH4)
1030 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1031 int is_write, sigset_t *old_set,
1032 void *puc)
1034 TranslationBlock *tb;
1035 int ret;
1037 if (cpu_single_env)
1038 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1039 #if defined(DEBUG_SIGNAL)
1040 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1041 pc, address, is_write, *(unsigned long *)old_set);
1042 #endif
1043 /* XXX: locking issue */
1044 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1045 return 1;
1048 /* see if it is an MMU fault */
1049 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1050 if (ret < 0)
1051 return 0; /* not an MMU fault */
1052 if (ret == 0)
1053 return 1; /* the MMU fault was handled without causing real CPU fault */
1055 /* now we have a real cpu fault */
1056 tb = tb_find_pc(pc);
1057 if (tb) {
1058 /* the PC is inside the translated code. It means that we have
1059 a virtual CPU fault */
1060 cpu_restore_state(tb, env, pc, puc);
1062 #if 0
1063 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1064 env->nip, env->error_code, tb);
1065 #endif
1066 /* we restore the process signal mask as the sigreturn should
1067 do it (XXX: use sigsetjmp) */
1068 sigprocmask(SIG_SETMASK, old_set, NULL);
1069 cpu_loop_exit();
1070 /* never comes here */
1071 return 1;
1074 #elif defined (TARGET_ALPHA)
1075 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1076 int is_write, sigset_t *old_set,
1077 void *puc)
1079 TranslationBlock *tb;
1080 int ret;
1082 if (cpu_single_env)
1083 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1084 #if defined(DEBUG_SIGNAL)
1085 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1086 pc, address, is_write, *(unsigned long *)old_set);
1087 #endif
1088 /* XXX: locking issue */
1089 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1090 return 1;
1093 /* see if it is an MMU fault */
1094 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1095 if (ret < 0)
1096 return 0; /* not an MMU fault */
1097 if (ret == 0)
1098 return 1; /* the MMU fault was handled without causing real CPU fault */
1100 /* now we have a real cpu fault */
1101 tb = tb_find_pc(pc);
1102 if (tb) {
1103 /* the PC is inside the translated code. It means that we have
1104 a virtual CPU fault */
1105 cpu_restore_state(tb, env, pc, puc);
1107 #if 0
1108 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1109 env->nip, env->error_code, tb);
1110 #endif
1111 /* we restore the process signal mask as the sigreturn should
1112 do it (XXX: use sigsetjmp) */
1113 sigprocmask(SIG_SETMASK, old_set, NULL);
1114 cpu_loop_exit();
1115 /* never comes here */
1116 return 1;
1118 #elif defined (TARGET_CRIS)
1119 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1120 int is_write, sigset_t *old_set,
1121 void *puc)
1123 TranslationBlock *tb;
1124 int ret;
1126 if (cpu_single_env)
1127 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1128 #if defined(DEBUG_SIGNAL)
1129 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1130 pc, address, is_write, *(unsigned long *)old_set);
1131 #endif
1132 /* XXX: locking issue */
1133 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1134 return 1;
1137 /* see if it is an MMU fault */
1138 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1139 if (ret < 0)
1140 return 0; /* not an MMU fault */
1141 if (ret == 0)
1142 return 1; /* the MMU fault was handled without causing real CPU fault */
1144 /* now we have a real cpu fault */
1145 tb = tb_find_pc(pc);
1146 if (tb) {
1147 /* the PC is inside the translated code. It means that we have
1148 a virtual CPU fault */
1149 cpu_restore_state(tb, env, pc, puc);
1151 /* we restore the process signal mask as the sigreturn should
1152 do it (XXX: use sigsetjmp) */
1153 sigprocmask(SIG_SETMASK, old_set, NULL);
1154 cpu_loop_exit();
1155 /* never comes here */
1156 return 1;
1159 #elif defined(TARGET_HPPA)
1160 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1161 int is_write, sigset_t *old_set,
1162 void *puc)
1164 TranslationBlock *tb;
1165 int ret;
1167 if (cpu_single_env)
1168 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1169 #if defined(DEBUG_SIGNAL)
1170 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1171 pc, address, is_write, *(unsigned long *)old_set);
1172 #endif
1173 /* XXX: locking issue */
1174 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1175 return 1;
1177 /* see if it is an MMU fault */
1178 ret = cpu_hppa_handle_mmu_fault(env, address, is_write, 1, 0);
1179 if (ret < 0)
1180 return 0; /* not an MMU fault */
1181 if (ret == 0)
1182 return 1; /* the MMU fault was handled without causing real CPU fault */
1183 /* now we have a real cpu fault */
1184 tb = tb_find_pc(pc);
1185 if (tb) {
1186 /* the PC is inside the translated code. It means that we have
1187 a virtual CPU fault */
1188 cpu_restore_state(tb, env, pc, puc);
1190 /* we restore the process signal mask as the sigreturn should
1191 do it (XXX: use sigsetjmp) */
1192 sigprocmask(SIG_SETMASK, old_set, NULL);
1193 cpu_loop_exit();
1194 /* never comes here */
1195 return 1;
1198 #else
1199 #error unsupported target CPU
1200 #endif
1202 #if defined(__i386__)
1204 #if defined(__APPLE__)
1205 # include <sys/ucontext.h>
1207 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1208 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1209 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1210 #else
1211 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1212 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1213 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1214 #endif
1216 int cpu_signal_handler(int host_signum, void *pinfo,
1217 void *puc)
1219 siginfo_t *info = pinfo;
1220 struct ucontext *uc = puc;
1221 unsigned long pc;
1222 int trapno;
1224 #ifndef REG_EIP
1225 /* for glibc 2.1 */
1226 #define REG_EIP EIP
1227 #define REG_ERR ERR
1228 #define REG_TRAPNO TRAPNO
1229 #endif
1230 pc = EIP_sig(uc);
1231 trapno = TRAP_sig(uc);
1232 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1233 trapno == 0xe ?
1234 (ERROR_sig(uc) >> 1) & 1 : 0,
1235 &uc->uc_sigmask, puc);
1238 #elif defined(__x86_64__)
1240 #ifdef __NetBSD__
1241 #define REG_ERR _REG_ERR
1242 #define REG_TRAPNO _REG_TRAPNO
1244 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1245 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1246 #else
1247 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1248 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1249 #endif
1251 int cpu_signal_handler(int host_signum, void *pinfo,
1252 void *puc)
1254 siginfo_t *info = pinfo;
1255 unsigned long pc;
1256 #ifdef __NetBSD__
1257 ucontext_t *uc = puc;
1258 #else
1259 struct ucontext *uc = puc;
1260 #endif
1262 pc = QEMU_UC_MACHINE_PC(uc);
1263 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1264 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
1265 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
1266 &uc->uc_sigmask, puc);
1269 #elif defined(_ARCH_PPC)
1271 /***********************************************************************
1272 * signal context platform-specific definitions
1273 * From Wine
1275 #ifdef linux
1276 /* All Registers access - only for local access */
1277 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1278 /* Gpr Registers access */
1279 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1280 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1281 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1282 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1283 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1284 # define LR_sig(context) REG_sig(link, context) /* Link register */
1285 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1286 /* Float Registers access */
1287 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1288 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1289 /* Exception Registers access */
1290 # define DAR_sig(context) REG_sig(dar, context)
1291 # define DSISR_sig(context) REG_sig(dsisr, context)
1292 # define TRAP_sig(context) REG_sig(trap, context)
1293 #endif /* linux */
1295 #ifdef __APPLE__
1296 # include <sys/ucontext.h>
1297 typedef struct ucontext SIGCONTEXT;
1298 /* All Registers access - only for local access */
1299 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1300 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1301 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1302 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1303 /* Gpr Registers access */
1304 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1305 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1306 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1307 # define CTR_sig(context) REG_sig(ctr, context)
1308 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1309 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1310 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1311 /* Float Registers access */
1312 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1313 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1314 /* Exception Registers access */
1315 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1316 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1317 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1318 #endif /* __APPLE__ */
1320 int cpu_signal_handler(int host_signum, void *pinfo,
1321 void *puc)
1323 siginfo_t *info = pinfo;
1324 struct ucontext *uc = puc;
1325 unsigned long pc;
1326 int is_write;
1328 pc = IAR_sig(uc);
1329 is_write = 0;
1330 #if 0
1331 /* ppc 4xx case */
1332 if (DSISR_sig(uc) & 0x00800000)
1333 is_write = 1;
1334 #else
1335 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1336 is_write = 1;
1337 #endif
1338 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1339 is_write, &uc->uc_sigmask, puc);
1342 #elif defined(__alpha__)
1344 int cpu_signal_handler(int host_signum, void *pinfo,
1345 void *puc)
1347 siginfo_t *info = pinfo;
1348 struct ucontext *uc = puc;
1349 uint32_t *pc = uc->uc_mcontext.sc_pc;
1350 uint32_t insn = *pc;
1351 int is_write = 0;
1353 /* XXX: need kernel patch to get write flag faster */
1354 switch (insn >> 26) {
1355 case 0x0d: // stw
1356 case 0x0e: // stb
1357 case 0x0f: // stq_u
1358 case 0x24: // stf
1359 case 0x25: // stg
1360 case 0x26: // sts
1361 case 0x27: // stt
1362 case 0x2c: // stl
1363 case 0x2d: // stq
1364 case 0x2e: // stl_c
1365 case 0x2f: // stq_c
1366 is_write = 1;
1369 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1370 is_write, &uc->uc_sigmask, puc);
1372 #elif defined(__sparc__)
1374 int cpu_signal_handler(int host_signum, void *pinfo,
1375 void *puc)
1377 siginfo_t *info = pinfo;
1378 int is_write;
1379 uint32_t insn;
1380 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1381 uint32_t *regs = (uint32_t *)(info + 1);
1382 void *sigmask = (regs + 20);
1383 /* XXX: is there a standard glibc define ? */
1384 unsigned long pc = regs[1];
1385 #else
1386 #ifdef __linux__
1387 struct sigcontext *sc = puc;
1388 unsigned long pc = sc->sigc_regs.tpc;
1389 void *sigmask = (void *)sc->sigc_mask;
1390 #elif defined(__OpenBSD__)
1391 struct sigcontext *uc = puc;
1392 unsigned long pc = uc->sc_pc;
1393 void *sigmask = (void *)(long)uc->sc_mask;
1394 #endif
1395 #endif
1397 /* XXX: need kernel patch to get write flag faster */
1398 is_write = 0;
1399 insn = *(uint32_t *)pc;
1400 if ((insn >> 30) == 3) {
1401 switch((insn >> 19) & 0x3f) {
1402 case 0x05: // stb
1403 case 0x06: // sth
1404 case 0x04: // st
1405 case 0x07: // std
1406 case 0x24: // stf
1407 case 0x27: // stdf
1408 case 0x25: // stfsr
1409 is_write = 1;
1410 break;
1413 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1414 is_write, sigmask, NULL);
1417 #elif defined(__arm__)
1419 int cpu_signal_handler(int host_signum, void *pinfo,
1420 void *puc)
1422 siginfo_t *info = pinfo;
1423 struct ucontext *uc = puc;
1424 unsigned long pc;
1425 int is_write;
1427 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1428 pc = uc->uc_mcontext.gregs[R15];
1429 #else
1430 pc = uc->uc_mcontext.arm_pc;
1431 #endif
1432 /* XXX: compute is_write */
1433 is_write = 0;
1434 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1435 is_write,
1436 &uc->uc_sigmask, puc);
1439 #elif defined(__mc68000)
1441 int cpu_signal_handler(int host_signum, void *pinfo,
1442 void *puc)
1444 siginfo_t *info = pinfo;
1445 struct ucontext *uc = puc;
1446 unsigned long pc;
1447 int is_write;
1449 pc = uc->uc_mcontext.gregs[16];
1450 /* XXX: compute is_write */
1451 is_write = 0;
1452 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1453 is_write,
1454 &uc->uc_sigmask, puc);
1457 #elif defined(__ia64)
1459 #ifndef __ISR_VALID
1460 /* This ought to be in <bits/siginfo.h>... */
1461 # define __ISR_VALID 1
1462 #endif
1464 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1466 siginfo_t *info = pinfo;
1467 struct ucontext *uc = puc;
1468 unsigned long ip;
1469 int is_write = 0;
1471 ip = uc->uc_mcontext.sc_ip;
1472 switch (host_signum) {
1473 case SIGILL:
1474 case SIGFPE:
1475 case SIGSEGV:
1476 case SIGBUS:
1477 case SIGTRAP:
1478 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1479 /* ISR.W (write-access) is bit 33: */
1480 is_write = (info->si_isr >> 33) & 1;
1481 break;
1483 default:
1484 break;
1486 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1487 is_write,
1488 &uc->uc_sigmask, puc);
1491 #elif defined(__s390__)
1493 int cpu_signal_handler(int host_signum, void *pinfo,
1494 void *puc)
1496 siginfo_t *info = pinfo;
1497 struct ucontext *uc = puc;
1498 unsigned long pc;
1499 int is_write;
1501 pc = uc->uc_mcontext.psw.addr;
1502 /* XXX: compute is_write */
1503 is_write = 0;
1504 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1505 is_write, &uc->uc_sigmask, puc);
1508 #elif defined(__mips__)
1510 int cpu_signal_handler(int host_signum, void *pinfo,
1511 void *puc)
1513 siginfo_t *info = pinfo;
1514 struct ucontext *uc = puc;
1515 greg_t pc = uc->uc_mcontext.pc;
1516 int is_write;
1518 /* XXX: compute is_write */
1519 is_write = 0;
1520 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1521 is_write, &uc->uc_sigmask, puc);
1524 #elif defined(__hppa__)
1526 int cpu_signal_handler(int host_signum, void *pinfo,
1527 void *puc)
1529 struct siginfo *info = pinfo;
1530 struct ucontext *uc = puc;
1531 unsigned long pc;
1532 int is_write;
1534 pc = uc->uc_mcontext.sc_iaoq[0];
1535 /* FIXME: compute is_write */
1536 is_write = 0;
1537 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1538 is_write,
1539 &uc->uc_sigmask, puc);
1542 #else
1544 #error host CPU specific signal handler needed
1546 #endif
1548 #endif /* !defined(CONFIG_SOFTMMU) */