check extension
[qemu-kvm/fedora.git] / cpu-exec.c
blob35c71bd0ffe707c41e1473bd7a22ff36fa8be02f
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #if !defined(TARGET_IA64)
24 #include "tcg.h"
25 #endif
26 #include "kvm.h"
28 #if !defined(CONFIG_SOFTMMU)
29 #undef EAX
30 #undef ECX
31 #undef EDX
32 #undef EBX
33 #undef ESP
34 #undef EBP
35 #undef ESI
36 #undef EDI
37 #undef EIP
38 #include <signal.h>
39 #ifdef __linux__
40 #include <sys/ucontext.h>
41 #endif
42 #endif
44 #include "qemu-kvm.h"
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
48 #undef env
49 #define env cpu_single_env
50 #endif
52 int tb_invalidated_flag;
54 //#define DEBUG_EXEC
55 //#define DEBUG_SIGNAL
57 int qemu_cpu_has_work(CPUState *env)
59 return cpu_has_work(env);
62 void cpu_loop_exit(void)
64 /* NOTE: the register at this point must be saved by hand because
65 longjmp restore them */
66 regs_to_env();
67 longjmp(env->jmp_env, 1);
70 /* exit the current TB from a signal handler. The host registers are
71 restored in a state compatible with the CPU emulator
73 void cpu_resume_from_signal(CPUState *env1, void *puc)
75 #if !defined(CONFIG_SOFTMMU)
76 #ifdef __linux__
77 struct ucontext *uc = puc;
78 #elif defined(__OpenBSD__)
79 struct sigcontext *uc = puc;
80 #endif
81 #endif
83 env = env1;
85 /* XXX: restore cpu registers saved in host registers */
87 #if !defined(CONFIG_SOFTMMU)
88 if (puc) {
89 /* XXX: use siglongjmp ? */
90 #ifdef __linux__
91 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
92 #elif defined(__OpenBSD__)
93 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
94 #endif
96 #endif
97 env->exception_index = -1;
98 longjmp(env->jmp_env, 1);
101 /* Execute the code without caching the generated code. An interpreter
102 could be used if available. */
103 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
105 unsigned long next_tb;
106 TranslationBlock *tb;
108 /* Should never happen.
109 We only end up here when an existing TB is too long. */
110 if (max_cycles > CF_COUNT_MASK)
111 max_cycles = CF_COUNT_MASK;
113 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
114 max_cycles);
115 env->current_tb = tb;
116 /* execute the generated code */
117 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
119 if ((next_tb & 3) == 2) {
120 /* Restore PC. This may happen if async event occurs before
121 the TB starts executing. */
122 cpu_pc_from_tb(env, tb);
124 tb_phys_invalidate(tb, -1);
125 tb_free(tb);
128 static TranslationBlock *tb_find_slow(target_ulong pc,
129 target_ulong cs_base,
130 uint64_t flags)
132 TranslationBlock *tb, **ptb1;
133 unsigned int h;
134 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
136 tb_invalidated_flag = 0;
138 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
140 /* find translated block using physical mappings */
141 phys_pc = get_phys_addr_code(env, pc);
142 phys_page1 = phys_pc & TARGET_PAGE_MASK;
143 phys_page2 = -1;
144 h = tb_phys_hash_func(phys_pc);
145 ptb1 = &tb_phys_hash[h];
146 for(;;) {
147 tb = *ptb1;
148 if (!tb)
149 goto not_found;
150 if (tb->pc == pc &&
151 tb->page_addr[0] == phys_page1 &&
152 tb->cs_base == cs_base &&
153 tb->flags == flags) {
154 /* check next page if needed */
155 if (tb->page_addr[1] != -1) {
156 virt_page2 = (pc & TARGET_PAGE_MASK) +
157 TARGET_PAGE_SIZE;
158 phys_page2 = get_phys_addr_code(env, virt_page2);
159 if (tb->page_addr[1] == phys_page2)
160 goto found;
161 } else {
162 goto found;
165 ptb1 = &tb->phys_hash_next;
167 not_found:
168 /* if no translated code available, then translate it now */
169 tb = tb_gen_code(env, pc, cs_base, flags, 0);
171 found:
172 /* we add the TB in the virtual pc hash table */
173 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
174 return tb;
177 static inline TranslationBlock *tb_find_fast(void)
179 TranslationBlock *tb;
180 target_ulong cs_base, pc;
181 int flags;
183 /* we record a subset of the CPU state. It will
184 always be the same before a given translated block
185 is executed. */
186 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
187 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
188 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
189 tb->flags != flags)) {
190 tb = tb_find_slow(pc, cs_base, flags);
192 return tb;
195 static CPUDebugExcpHandler *debug_excp_handler;
197 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
199 CPUDebugExcpHandler *old_handler = debug_excp_handler;
201 debug_excp_handler = handler;
202 return old_handler;
205 static void cpu_handle_debug_exception(CPUState *env)
207 CPUWatchpoint *wp;
209 if (!env->watchpoint_hit)
210 TAILQ_FOREACH(wp, &env->watchpoints, entry)
211 wp->flags &= ~BP_WATCHPOINT_HIT;
213 if (debug_excp_handler)
214 debug_excp_handler(env);
217 /* main execution loop */
219 int cpu_exec(CPUState *env1)
221 #define DECLARE_HOST_REGS 1
222 #include "hostregs_helper.h"
223 int ret, interrupt_request;
224 TranslationBlock *tb;
225 uint8_t *tc_ptr;
226 unsigned long next_tb;
228 if (cpu_halted(env1) == EXCP_HALTED)
229 return EXCP_HALTED;
231 cpu_single_env = env1;
233 /* first we save global registers */
234 #define SAVE_HOST_REGS 1
235 #include "hostregs_helper.h"
236 env = env1;
238 env_to_regs();
239 #if defined(TARGET_I386)
240 /* put eflags in CPU temporary format */
241 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
242 DF = 1 - (2 * ((env->eflags >> 10) & 1));
243 CC_OP = CC_OP_EFLAGS;
244 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
245 #elif defined(TARGET_SPARC)
246 #elif defined(TARGET_M68K)
247 env->cc_op = CC_OP_FLAGS;
248 env->cc_dest = env->sr & 0xf;
249 env->cc_x = (env->sr >> 4) & 1;
250 #elif defined(TARGET_ALPHA)
251 #elif defined(TARGET_ARM)
252 #elif defined(TARGET_PPC)
253 #elif defined(TARGET_MICROBLAZE)
254 #elif defined(TARGET_MIPS)
255 #elif defined(TARGET_SH4)
256 #elif defined(TARGET_CRIS)
257 #elif defined(TARGET_IA64)
258 /* XXXXX */
259 #else
260 #error unsupported target CPU
261 #endif
262 env->exception_index = -1;
264 /* prepare setjmp context for exception handling */
265 for(;;) {
266 if (setjmp(env->jmp_env) == 0) {
267 #if defined(__sparc__) && !defined(HOST_SOLARIS)
268 #undef env
269 env = cpu_single_env;
270 #define env cpu_single_env
271 #endif
272 env->current_tb = NULL;
273 /* if an exception is pending, we execute it here */
274 if (env->exception_index >= 0) {
275 if (env->exception_index >= EXCP_INTERRUPT) {
276 /* exit request from the cpu execution loop */
277 ret = env->exception_index;
278 if (ret == EXCP_DEBUG)
279 cpu_handle_debug_exception(env);
280 break;
281 } else {
282 #if defined(CONFIG_USER_ONLY)
283 /* if user mode only, we simulate a fake exception
284 which will be handled outside the cpu execution
285 loop */
286 #if defined(TARGET_I386)
287 do_interrupt_user(env->exception_index,
288 env->exception_is_int,
289 env->error_code,
290 env->exception_next_eip);
291 /* successfully delivered */
292 env->old_exception = -1;
293 #endif
294 ret = env->exception_index;
295 break;
296 #else
297 #if defined(TARGET_I386)
298 /* simulate a real cpu exception. On i386, it can
299 trigger new exceptions, but we do not handle
300 double or triple faults yet. */
301 do_interrupt(env->exception_index,
302 env->exception_is_int,
303 env->error_code,
304 env->exception_next_eip, 0);
305 /* successfully delivered */
306 env->old_exception = -1;
307 #elif defined(TARGET_PPC)
308 do_interrupt(env);
309 #elif defined(TARGET_MICROBLAZE)
310 do_interrupt(env);
311 #elif defined(TARGET_MIPS)
312 do_interrupt(env);
313 #elif defined(TARGET_SPARC)
314 do_interrupt(env);
315 #elif defined(TARGET_ARM)
316 do_interrupt(env);
317 #elif defined(TARGET_SH4)
318 do_interrupt(env);
319 #elif defined(TARGET_ALPHA)
320 do_interrupt(env);
321 #elif defined(TARGET_CRIS)
322 do_interrupt(env);
323 #elif defined(TARGET_M68K)
324 do_interrupt(0);
325 #elif defined(TARGET_IA64)
326 do_interrupt(env);
327 #endif
328 #endif
330 env->exception_index = -1;
332 #ifdef CONFIG_KQEMU
333 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
334 int ret;
335 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
336 ret = kqemu_cpu_exec(env);
337 /* put eflags in CPU temporary format */
338 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
339 DF = 1 - (2 * ((env->eflags >> 10) & 1));
340 CC_OP = CC_OP_EFLAGS;
341 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
342 if (ret == 1) {
343 /* exception */
344 longjmp(env->jmp_env, 1);
345 } else if (ret == 2) {
346 /* softmmu execution needed */
347 } else {
348 if (env->interrupt_request != 0 || env->exit_request != 0) {
349 /* hardware interrupt will be executed just after */
350 } else {
351 /* otherwise, we restart */
352 longjmp(env->jmp_env, 1);
356 #endif
358 if (kvm_enabled()) {
359 kvm_cpu_exec(env);
360 longjmp(env->jmp_env, 1);
363 next_tb = 0; /* force lookup of first TB */
364 for(;;) {
365 interrupt_request = env->interrupt_request;
366 if (unlikely(interrupt_request)) {
367 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
368 /* Mask out external interrupts for this step. */
369 interrupt_request &= ~(CPU_INTERRUPT_HARD |
370 CPU_INTERRUPT_FIQ |
371 CPU_INTERRUPT_SMI |
372 CPU_INTERRUPT_NMI);
374 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
375 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
376 env->exception_index = EXCP_DEBUG;
377 cpu_loop_exit();
379 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
380 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
381 defined(TARGET_MICROBLAZE)
382 if (interrupt_request & CPU_INTERRUPT_HALT) {
383 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
384 env->halted = 1;
385 env->exception_index = EXCP_HLT;
386 cpu_loop_exit();
388 #endif
389 #if defined(TARGET_I386)
390 if (interrupt_request & CPU_INTERRUPT_INIT) {
391 svm_check_intercept(SVM_EXIT_INIT);
392 do_cpu_init(env);
393 env->exception_index = EXCP_HALTED;
394 cpu_loop_exit();
395 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
396 do_cpu_sipi(env);
397 } else if (env->hflags2 & HF2_GIF_MASK) {
398 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
399 !(env->hflags & HF_SMM_MASK)) {
400 svm_check_intercept(SVM_EXIT_SMI);
401 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
402 do_smm_enter();
403 next_tb = 0;
404 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
405 !(env->hflags2 & HF2_NMI_MASK)) {
406 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
407 env->hflags2 |= HF2_NMI_MASK;
408 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
409 next_tb = 0;
410 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
411 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
412 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
413 next_tb = 0;
414 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
415 (((env->hflags2 & HF2_VINTR_MASK) &&
416 (env->hflags2 & HF2_HIF_MASK)) ||
417 (!(env->hflags2 & HF2_VINTR_MASK) &&
418 (env->eflags & IF_MASK &&
419 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
420 int intno;
421 svm_check_intercept(SVM_EXIT_INTR);
422 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
423 intno = cpu_get_pic_interrupt(env);
424 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
425 #if defined(__sparc__) && !defined(HOST_SOLARIS)
426 #undef env
427 env = cpu_single_env;
428 #define env cpu_single_env
429 #endif
430 do_interrupt(intno, 0, 0, 0, 1);
431 /* ensure that no TB jump will be modified as
432 the program flow was changed */
433 next_tb = 0;
434 #if !defined(CONFIG_USER_ONLY)
435 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
436 (env->eflags & IF_MASK) &&
437 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
438 int intno;
439 /* FIXME: this should respect TPR */
440 svm_check_intercept(SVM_EXIT_VINTR);
441 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
442 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
443 do_interrupt(intno, 0, 0, 0, 1);
444 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
445 next_tb = 0;
446 #endif
449 #elif defined(TARGET_PPC)
450 #if 0
451 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
452 cpu_ppc_reset(env);
454 #endif
455 if (interrupt_request & CPU_INTERRUPT_HARD) {
456 ppc_hw_interrupt(env);
457 if (env->pending_interrupts == 0)
458 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
459 next_tb = 0;
461 #elif defined(TARGET_MICROBLAZE)
462 if ((interrupt_request & CPU_INTERRUPT_HARD)
463 && (env->sregs[SR_MSR] & MSR_IE)
464 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
465 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
466 env->exception_index = EXCP_IRQ;
467 do_interrupt(env);
468 next_tb = 0;
470 #elif defined(TARGET_MIPS)
471 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
472 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
473 (env->CP0_Status & (1 << CP0St_IE)) &&
474 !(env->CP0_Status & (1 << CP0St_EXL)) &&
475 !(env->CP0_Status & (1 << CP0St_ERL)) &&
476 !(env->hflags & MIPS_HFLAG_DM)) {
477 /* Raise it */
478 env->exception_index = EXCP_EXT_INTERRUPT;
479 env->error_code = 0;
480 do_interrupt(env);
481 next_tb = 0;
483 #elif defined(TARGET_SPARC)
484 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
485 cpu_interrupts_enabled(env)) {
486 int pil = env->interrupt_index & 15;
487 int type = env->interrupt_index & 0xf0;
489 if (((type == TT_EXTINT) &&
490 (pil == 15 || pil > env->psrpil)) ||
491 type != TT_EXTINT) {
492 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
493 env->exception_index = env->interrupt_index;
494 do_interrupt(env);
495 env->interrupt_index = 0;
496 #if !defined(CONFIG_USER_ONLY)
497 cpu_check_irqs(env);
498 #endif
499 next_tb = 0;
501 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
502 //do_interrupt(0, 0, 0, 0, 0);
503 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
505 #elif defined(TARGET_ARM)
506 if (interrupt_request & CPU_INTERRUPT_FIQ
507 && !(env->uncached_cpsr & CPSR_F)) {
508 env->exception_index = EXCP_FIQ;
509 do_interrupt(env);
510 next_tb = 0;
512 /* ARMv7-M interrupt return works by loading a magic value
513 into the PC. On real hardware the load causes the
514 return to occur. The qemu implementation performs the
515 jump normally, then does the exception return when the
516 CPU tries to execute code at the magic address.
517 This will cause the magic PC value to be pushed to
518 the stack if an interrupt occured at the wrong time.
519 We avoid this by disabling interrupts when
520 pc contains a magic address. */
521 if (interrupt_request & CPU_INTERRUPT_HARD
522 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
523 || !(env->uncached_cpsr & CPSR_I))) {
524 env->exception_index = EXCP_IRQ;
525 do_interrupt(env);
526 next_tb = 0;
528 #elif defined(TARGET_SH4)
529 if (interrupt_request & CPU_INTERRUPT_HARD) {
530 do_interrupt(env);
531 next_tb = 0;
533 #elif defined(TARGET_ALPHA)
534 if (interrupt_request & CPU_INTERRUPT_HARD) {
535 do_interrupt(env);
536 next_tb = 0;
538 #elif defined(TARGET_CRIS)
539 if (interrupt_request & CPU_INTERRUPT_HARD
540 && (env->pregs[PR_CCS] & I_FLAG)) {
541 env->exception_index = EXCP_IRQ;
542 do_interrupt(env);
543 next_tb = 0;
545 if (interrupt_request & CPU_INTERRUPT_NMI
546 && (env->pregs[PR_CCS] & M_FLAG)) {
547 env->exception_index = EXCP_NMI;
548 do_interrupt(env);
549 next_tb = 0;
551 #elif defined(TARGET_M68K)
552 if (interrupt_request & CPU_INTERRUPT_HARD
553 && ((env->sr & SR_I) >> SR_I_SHIFT)
554 < env->pending_level) {
555 /* Real hardware gets the interrupt vector via an
556 IACK cycle at this point. Current emulated
557 hardware doesn't rely on this, so we
558 provide/save the vector when the interrupt is
559 first signalled. */
560 env->exception_index = env->pending_vector;
561 do_interrupt(1);
562 next_tb = 0;
564 #endif
565 /* Don't use the cached interupt_request value,
566 do_interrupt may have updated the EXITTB flag. */
567 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
568 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
569 /* ensure that no TB jump will be modified as
570 the program flow was changed */
571 next_tb = 0;
574 if (unlikely(env->exit_request)) {
575 env->exit_request = 0;
576 env->exception_index = EXCP_INTERRUPT;
577 cpu_loop_exit();
579 #ifdef DEBUG_EXEC
580 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
581 /* restore flags in standard format */
582 regs_to_env();
583 #if defined(TARGET_I386)
584 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
585 log_cpu_state(env, X86_DUMP_CCOP);
586 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
587 #elif defined(TARGET_ARM)
588 log_cpu_state(env, 0);
589 #elif defined(TARGET_SPARC)
590 log_cpu_state(env, 0);
591 #elif defined(TARGET_PPC)
592 log_cpu_state(env, 0);
593 #elif defined(TARGET_M68K)
594 cpu_m68k_flush_flags(env, env->cc_op);
595 env->cc_op = CC_OP_FLAGS;
596 env->sr = (env->sr & 0xffe0)
597 | env->cc_dest | (env->cc_x << 4);
598 log_cpu_state(env, 0);
599 #elif defined(TARGET_MICROBLAZE)
600 log_cpu_state(env, 0);
601 #elif defined(TARGET_MIPS)
602 log_cpu_state(env, 0);
603 #elif defined(TARGET_SH4)
604 log_cpu_state(env, 0);
605 #elif defined(TARGET_ALPHA)
606 log_cpu_state(env, 0);
607 #elif defined(TARGET_CRIS)
608 log_cpu_state(env, 0);
609 #else
610 #error unsupported target CPU
611 #endif
613 #endif
614 spin_lock(&tb_lock);
615 tb = tb_find_fast();
616 /* Note: we do it here to avoid a gcc bug on Mac OS X when
617 doing it in tb_find_slow */
618 if (tb_invalidated_flag) {
619 /* as some TB could have been invalidated because
620 of memory exceptions while generating the code, we
621 must recompute the hash index here */
622 next_tb = 0;
623 tb_invalidated_flag = 0;
625 #ifdef DEBUG_EXEC
626 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
627 (long)tb->tc_ptr, tb->pc,
628 lookup_symbol(tb->pc));
629 #endif
630 /* see if we can patch the calling TB. When the TB
631 spans two pages, we cannot safely do a direct
632 jump. */
634 if (next_tb != 0 &&
635 #ifdef CONFIG_KQEMU
636 (env->kqemu_enabled != 2) &&
637 #endif
638 tb->page_addr[1] == -1) {
639 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
642 spin_unlock(&tb_lock);
643 env->current_tb = tb;
645 /* cpu_interrupt might be called while translating the
646 TB, but before it is linked into a potentially
647 infinite loop and becomes env->current_tb. Avoid
648 starting execution if there is a pending interrupt. */
649 if (unlikely (env->exit_request))
650 env->current_tb = NULL;
652 while (env->current_tb) {
653 tc_ptr = tb->tc_ptr;
654 /* execute the generated code */
655 #if defined(__sparc__) && !defined(HOST_SOLARIS)
656 #undef env
657 env = cpu_single_env;
658 #define env cpu_single_env
659 #endif
660 next_tb = tcg_qemu_tb_exec(tc_ptr);
661 env->current_tb = NULL;
662 if ((next_tb & 3) == 2) {
663 /* Instruction counter expired. */
664 int insns_left;
665 tb = (TranslationBlock *)(long)(next_tb & ~3);
666 /* Restore PC. */
667 cpu_pc_from_tb(env, tb);
668 insns_left = env->icount_decr.u32;
669 if (env->icount_extra && insns_left >= 0) {
670 /* Refill decrementer and continue execution. */
671 env->icount_extra += insns_left;
672 if (env->icount_extra > 0xffff) {
673 insns_left = 0xffff;
674 } else {
675 insns_left = env->icount_extra;
677 env->icount_extra -= insns_left;
678 env->icount_decr.u16.low = insns_left;
679 } else {
680 if (insns_left > 0) {
681 /* Execute remaining instructions. */
682 cpu_exec_nocache(insns_left, tb);
684 env->exception_index = EXCP_INTERRUPT;
685 next_tb = 0;
686 cpu_loop_exit();
690 /* reset soft MMU for next block (it can currently
691 only be set by a memory fault) */
692 #if defined(CONFIG_KQEMU)
693 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
694 if (kqemu_is_ok(env) &&
695 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
696 cpu_loop_exit();
698 #endif
699 } /* for(;;) */
700 } else {
701 env_to_regs();
703 } /* for(;;) */
706 #if defined(TARGET_I386)
707 /* restore flags in standard format */
708 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
709 #elif defined(TARGET_ARM)
710 /* XXX: Save/restore host fpu exception state?. */
711 #elif defined(TARGET_SPARC)
712 #elif defined(TARGET_PPC)
713 #elif defined(TARGET_M68K)
714 cpu_m68k_flush_flags(env, env->cc_op);
715 env->cc_op = CC_OP_FLAGS;
716 env->sr = (env->sr & 0xffe0)
717 | env->cc_dest | (env->cc_x << 4);
718 #elif defined(TARGET_MICROBLAZE)
719 #elif defined(TARGET_MIPS)
720 #elif defined(TARGET_SH4)
721 #elif defined(TARGET_IA64)
722 #elif defined(TARGET_ALPHA)
723 #elif defined(TARGET_CRIS)
724 /* XXXXX */
725 #else
726 #error unsupported target CPU
727 #endif
729 /* restore global registers */
730 #include "hostregs_helper.h"
732 /* fail safe : never use cpu_single_env outside cpu_exec() */
733 cpu_single_env = NULL;
734 return ret;
737 /* must only be called from the generated code as an exception can be
738 generated */
739 void tb_invalidate_page_range(target_ulong start, target_ulong end)
741 /* XXX: cannot enable it yet because it yields to MMU exception
742 where NIP != read address on PowerPC */
743 #if 0
744 target_ulong phys_addr;
745 phys_addr = get_phys_addr_code(env, start);
746 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
747 #endif
750 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
752 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
754 CPUX86State *saved_env;
756 saved_env = env;
757 env = s;
758 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
759 selector &= 0xffff;
760 cpu_x86_load_seg_cache(env, seg_reg, selector,
761 (selector << 4), 0xffff, 0);
762 } else {
763 helper_load_seg(seg_reg, selector);
765 env = saved_env;
768 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
770 CPUX86State *saved_env;
772 saved_env = env;
773 env = s;
775 helper_fsave(ptr, data32);
777 env = saved_env;
780 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
782 CPUX86State *saved_env;
784 saved_env = env;
785 env = s;
787 helper_frstor(ptr, data32);
789 env = saved_env;
792 #endif /* TARGET_I386 */
794 #if !defined(CONFIG_SOFTMMU)
796 #if defined(TARGET_I386)
798 /* 'pc' is the host PC at which the exception was raised. 'address' is
799 the effective address of the memory exception. 'is_write' is 1 if a
800 write caused the exception and otherwise 0'. 'old_set' is the
801 signal set which should be restored */
802 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
803 int is_write, sigset_t *old_set,
804 void *puc)
806 TranslationBlock *tb;
807 int ret;
809 if (cpu_single_env)
810 env = cpu_single_env; /* XXX: find a correct solution for multithread */
811 #if defined(DEBUG_SIGNAL)
812 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
813 pc, address, is_write, *(unsigned long *)old_set);
814 #endif
815 /* XXX: locking issue */
816 if (is_write && page_unprotect(h2g(address), pc, puc)) {
817 return 1;
820 /* see if it is an MMU fault */
821 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
822 if (ret < 0)
823 return 0; /* not an MMU fault */
824 if (ret == 0)
825 return 1; /* the MMU fault was handled without causing real CPU fault */
826 /* now we have a real cpu fault */
827 tb = tb_find_pc(pc);
828 if (tb) {
829 /* the PC is inside the translated code. It means that we have
830 a virtual CPU fault */
831 cpu_restore_state(tb, env, pc, puc);
833 if (ret == 1) {
834 #if 0
835 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
836 env->eip, env->cr[2], env->error_code);
837 #endif
838 /* we restore the process signal mask as the sigreturn should
839 do it (XXX: use sigsetjmp) */
840 sigprocmask(SIG_SETMASK, old_set, NULL);
841 raise_exception_err(env->exception_index, env->error_code);
842 } else {
843 /* activate soft MMU for this block */
844 env->hflags |= HF_SOFTMMU_MASK;
845 cpu_resume_from_signal(env, puc);
847 /* never comes here */
848 return 1;
851 #elif defined(TARGET_ARM)
852 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
853 int is_write, sigset_t *old_set,
854 void *puc)
856 TranslationBlock *tb;
857 int ret;
859 if (cpu_single_env)
860 env = cpu_single_env; /* XXX: find a correct solution for multithread */
861 #if defined(DEBUG_SIGNAL)
862 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
863 pc, address, is_write, *(unsigned long *)old_set);
864 #endif
865 /* XXX: locking issue */
866 if (is_write && page_unprotect(h2g(address), pc, puc)) {
867 return 1;
869 /* see if it is an MMU fault */
870 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
871 if (ret < 0)
872 return 0; /* not an MMU fault */
873 if (ret == 0)
874 return 1; /* the MMU fault was handled without causing real CPU fault */
875 /* now we have a real cpu fault */
876 tb = tb_find_pc(pc);
877 if (tb) {
878 /* the PC is inside the translated code. It means that we have
879 a virtual CPU fault */
880 cpu_restore_state(tb, env, pc, puc);
882 /* we restore the process signal mask as the sigreturn should
883 do it (XXX: use sigsetjmp) */
884 sigprocmask(SIG_SETMASK, old_set, NULL);
885 cpu_loop_exit();
886 /* never comes here */
887 return 1;
889 #elif defined(TARGET_SPARC)
890 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
891 int is_write, sigset_t *old_set,
892 void *puc)
894 TranslationBlock *tb;
895 int ret;
897 if (cpu_single_env)
898 env = cpu_single_env; /* XXX: find a correct solution for multithread */
899 #if defined(DEBUG_SIGNAL)
900 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
901 pc, address, is_write, *(unsigned long *)old_set);
902 #endif
903 /* XXX: locking issue */
904 if (is_write && page_unprotect(h2g(address), pc, puc)) {
905 return 1;
907 /* see if it is an MMU fault */
908 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
909 if (ret < 0)
910 return 0; /* not an MMU fault */
911 if (ret == 0)
912 return 1; /* the MMU fault was handled without causing real CPU fault */
913 /* now we have a real cpu fault */
914 tb = tb_find_pc(pc);
915 if (tb) {
916 /* the PC is inside the translated code. It means that we have
917 a virtual CPU fault */
918 cpu_restore_state(tb, env, pc, puc);
920 /* we restore the process signal mask as the sigreturn should
921 do it (XXX: use sigsetjmp) */
922 sigprocmask(SIG_SETMASK, old_set, NULL);
923 cpu_loop_exit();
924 /* never comes here */
925 return 1;
927 #elif defined (TARGET_PPC)
928 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
929 int is_write, sigset_t *old_set,
930 void *puc)
932 TranslationBlock *tb;
933 int ret;
935 if (cpu_single_env)
936 env = cpu_single_env; /* XXX: find a correct solution for multithread */
937 #if defined(DEBUG_SIGNAL)
938 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
939 pc, address, is_write, *(unsigned long *)old_set);
940 #endif
941 /* XXX: locking issue */
942 if (is_write && page_unprotect(h2g(address), pc, puc)) {
943 return 1;
946 /* see if it is an MMU fault */
947 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
948 if (ret < 0)
949 return 0; /* not an MMU fault */
950 if (ret == 0)
951 return 1; /* the MMU fault was handled without causing real CPU fault */
953 /* now we have a real cpu fault */
954 tb = tb_find_pc(pc);
955 if (tb) {
956 /* the PC is inside the translated code. It means that we have
957 a virtual CPU fault */
958 cpu_restore_state(tb, env, pc, puc);
960 if (ret == 1) {
961 #if 0
962 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
963 env->nip, env->error_code, tb);
964 #endif
965 /* we restore the process signal mask as the sigreturn should
966 do it (XXX: use sigsetjmp) */
967 sigprocmask(SIG_SETMASK, old_set, NULL);
968 cpu_loop_exit();
969 } else {
970 /* activate soft MMU for this block */
971 cpu_resume_from_signal(env, puc);
973 /* never comes here */
974 return 1;
977 #elif defined(TARGET_M68K)
978 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
979 int is_write, sigset_t *old_set,
980 void *puc)
982 TranslationBlock *tb;
983 int ret;
985 if (cpu_single_env)
986 env = cpu_single_env; /* XXX: find a correct solution for multithread */
987 #if defined(DEBUG_SIGNAL)
988 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
989 pc, address, is_write, *(unsigned long *)old_set);
990 #endif
991 /* XXX: locking issue */
992 if (is_write && page_unprotect(address, pc, puc)) {
993 return 1;
995 /* see if it is an MMU fault */
996 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
997 if (ret < 0)
998 return 0; /* not an MMU fault */
999 if (ret == 0)
1000 return 1; /* the MMU fault was handled without causing real CPU fault */
1001 /* now we have a real cpu fault */
1002 tb = tb_find_pc(pc);
1003 if (tb) {
1004 /* the PC is inside the translated code. It means that we have
1005 a virtual CPU fault */
1006 cpu_restore_state(tb, env, pc, puc);
1008 /* we restore the process signal mask as the sigreturn should
1009 do it (XXX: use sigsetjmp) */
1010 sigprocmask(SIG_SETMASK, old_set, NULL);
1011 cpu_loop_exit();
1012 /* never comes here */
1013 return 1;
1016 #elif defined (TARGET_MIPS)
1017 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1018 int is_write, sigset_t *old_set,
1019 void *puc)
1021 TranslationBlock *tb;
1022 int ret;
1024 if (cpu_single_env)
1025 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1026 #if defined(DEBUG_SIGNAL)
1027 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1028 pc, address, is_write, *(unsigned long *)old_set);
1029 #endif
1030 /* XXX: locking issue */
1031 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1032 return 1;
1035 /* see if it is an MMU fault */
1036 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1037 if (ret < 0)
1038 return 0; /* not an MMU fault */
1039 if (ret == 0)
1040 return 1; /* the MMU fault was handled without causing real CPU fault */
1042 /* now we have a real cpu fault */
1043 tb = tb_find_pc(pc);
1044 if (tb) {
1045 /* the PC is inside the translated code. It means that we have
1046 a virtual CPU fault */
1047 cpu_restore_state(tb, env, pc, puc);
1049 if (ret == 1) {
1050 #if 0
1051 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1052 env->PC, env->error_code, tb);
1053 #endif
1054 /* we restore the process signal mask as the sigreturn should
1055 do it (XXX: use sigsetjmp) */
1056 sigprocmask(SIG_SETMASK, old_set, NULL);
1057 cpu_loop_exit();
1058 } else {
1059 /* activate soft MMU for this block */
1060 cpu_resume_from_signal(env, puc);
1062 /* never comes here */
1063 return 1;
1066 #elif defined (TARGET_MICROBLAZE)
1067 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1068 int is_write, sigset_t *old_set,
1069 void *puc)
1071 TranslationBlock *tb;
1072 int ret;
1074 if (cpu_single_env)
1075 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1076 #if defined(DEBUG_SIGNAL)
1077 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1078 pc, address, is_write, *(unsigned long *)old_set);
1079 #endif
1080 /* XXX: locking issue */
1081 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1082 return 1;
1085 /* see if it is an MMU fault */
1086 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1087 if (ret < 0)
1088 return 0; /* not an MMU fault */
1089 if (ret == 0)
1090 return 1; /* the MMU fault was handled without causing real CPU fault */
1092 /* now we have a real cpu fault */
1093 tb = tb_find_pc(pc);
1094 if (tb) {
1095 /* the PC is inside the translated code. It means that we have
1096 a virtual CPU fault */
1097 cpu_restore_state(tb, env, pc, puc);
1099 if (ret == 1) {
1100 #if 0
1101 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1102 env->PC, env->error_code, tb);
1103 #endif
1104 /* we restore the process signal mask as the sigreturn should
1105 do it (XXX: use sigsetjmp) */
1106 sigprocmask(SIG_SETMASK, old_set, NULL);
1107 cpu_loop_exit();
1108 } else {
1109 /* activate soft MMU for this block */
1110 cpu_resume_from_signal(env, puc);
1112 /* never comes here */
1113 return 1;
1116 #elif defined (TARGET_SH4)
1117 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1118 int is_write, sigset_t *old_set,
1119 void *puc)
1121 TranslationBlock *tb;
1122 int ret;
1124 if (cpu_single_env)
1125 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1126 #if defined(DEBUG_SIGNAL)
1127 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1128 pc, address, is_write, *(unsigned long *)old_set);
1129 #endif
1130 /* XXX: locking issue */
1131 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1132 return 1;
1135 /* see if it is an MMU fault */
1136 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1137 if (ret < 0)
1138 return 0; /* not an MMU fault */
1139 if (ret == 0)
1140 return 1; /* the MMU fault was handled without causing real CPU fault */
1142 /* now we have a real cpu fault */
1143 tb = tb_find_pc(pc);
1144 if (tb) {
1145 /* the PC is inside the translated code. It means that we have
1146 a virtual CPU fault */
1147 cpu_restore_state(tb, env, pc, puc);
1149 #if 0
1150 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1151 env->nip, env->error_code, tb);
1152 #endif
1153 /* we restore the process signal mask as the sigreturn should
1154 do it (XXX: use sigsetjmp) */
1155 sigprocmask(SIG_SETMASK, old_set, NULL);
1156 cpu_loop_exit();
1157 /* never comes here */
1158 return 1;
1161 #elif defined (TARGET_ALPHA)
1162 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1163 int is_write, sigset_t *old_set,
1164 void *puc)
1166 TranslationBlock *tb;
1167 int ret;
1169 if (cpu_single_env)
1170 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1171 #if defined(DEBUG_SIGNAL)
1172 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1173 pc, address, is_write, *(unsigned long *)old_set);
1174 #endif
1175 /* XXX: locking issue */
1176 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1177 return 1;
1180 /* see if it is an MMU fault */
1181 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1182 if (ret < 0)
1183 return 0; /* not an MMU fault */
1184 if (ret == 0)
1185 return 1; /* the MMU fault was handled without causing real CPU fault */
1187 /* now we have a real cpu fault */
1188 tb = tb_find_pc(pc);
1189 if (tb) {
1190 /* the PC is inside the translated code. It means that we have
1191 a virtual CPU fault */
1192 cpu_restore_state(tb, env, pc, puc);
1194 #if 0
1195 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1196 env->nip, env->error_code, tb);
1197 #endif
1198 /* we restore the process signal mask as the sigreturn should
1199 do it (XXX: use sigsetjmp) */
1200 sigprocmask(SIG_SETMASK, old_set, NULL);
1201 cpu_loop_exit();
1202 /* never comes here */
1203 return 1;
1205 #elif defined (TARGET_CRIS)
1206 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1207 int is_write, sigset_t *old_set,
1208 void *puc)
1210 TranslationBlock *tb;
1211 int ret;
1213 if (cpu_single_env)
1214 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1215 #if defined(DEBUG_SIGNAL)
1216 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1217 pc, address, is_write, *(unsigned long *)old_set);
1218 #endif
1219 /* XXX: locking issue */
1220 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1221 return 1;
1224 /* see if it is an MMU fault */
1225 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1226 if (ret < 0)
1227 return 0; /* not an MMU fault */
1228 if (ret == 0)
1229 return 1; /* the MMU fault was handled without causing real CPU fault */
1231 /* now we have a real cpu fault */
1232 tb = tb_find_pc(pc);
1233 if (tb) {
1234 /* the PC is inside the translated code. It means that we have
1235 a virtual CPU fault */
1236 cpu_restore_state(tb, env, pc, puc);
1238 /* we restore the process signal mask as the sigreturn should
1239 do it (XXX: use sigsetjmp) */
1240 sigprocmask(SIG_SETMASK, old_set, NULL);
1241 cpu_loop_exit();
1242 /* never comes here */
1243 return 1;
1246 #else
1247 #error unsupported target CPU
1248 #endif
1250 #if defined(__i386__)
1252 #if defined(__APPLE__)
1253 # include <sys/ucontext.h>
1255 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1256 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1257 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1258 # define MASK_sig(context) ((context)->uc_sigmask)
1259 #elif defined(__OpenBSD__)
1260 # define EIP_sig(context) ((context)->sc_eip)
1261 # define TRAP_sig(context) ((context)->sc_trapno)
1262 # define ERROR_sig(context) ((context)->sc_err)
1263 # define MASK_sig(context) ((context)->sc_mask)
1264 #else
1265 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1266 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1267 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1268 # define MASK_sig(context) ((context)->uc_sigmask)
1269 #endif
1271 int cpu_signal_handler(int host_signum, void *pinfo,
1272 void *puc)
1274 siginfo_t *info = pinfo;
1275 #if defined(__OpenBSD__)
1276 struct sigcontext *uc = puc;
1277 #else
1278 struct ucontext *uc = puc;
1279 #endif
1280 unsigned long pc;
1281 int trapno;
1283 #ifndef REG_EIP
1284 /* for glibc 2.1 */
1285 #define REG_EIP EIP
1286 #define REG_ERR ERR
1287 #define REG_TRAPNO TRAPNO
1288 #endif
1289 pc = EIP_sig(uc);
1290 trapno = TRAP_sig(uc);
1291 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1292 trapno == 0xe ?
1293 (ERROR_sig(uc) >> 1) & 1 : 0,
1294 &MASK_sig(uc), puc);
1297 #elif defined(__x86_64__)
1299 #ifdef __NetBSD__
1300 #define PC_sig(context) _UC_MACHINE_PC(context)
1301 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1302 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1303 #define MASK_sig(context) ((context)->uc_sigmask)
1304 #elif defined(__OpenBSD__)
1305 #define PC_sig(context) ((context)->sc_rip)
1306 #define TRAP_sig(context) ((context)->sc_trapno)
1307 #define ERROR_sig(context) ((context)->sc_err)
1308 #define MASK_sig(context) ((context)->sc_mask)
1309 #else
1310 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1311 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1312 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1313 #define MASK_sig(context) ((context)->uc_sigmask)
1314 #endif
1316 int cpu_signal_handler(int host_signum, void *pinfo,
1317 void *puc)
1319 siginfo_t *info = pinfo;
1320 unsigned long pc;
1321 #ifdef __NetBSD__
1322 ucontext_t *uc = puc;
1323 #elif defined(__OpenBSD__)
1324 struct sigcontext *uc = puc;
1325 #else
1326 struct ucontext *uc = puc;
1327 #endif
1329 pc = PC_sig(uc);
1330 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1331 TRAP_sig(uc) == 0xe ?
1332 (ERROR_sig(uc) >> 1) & 1 : 0,
1333 &MASK_sig(uc), puc);
1336 #elif defined(_ARCH_PPC)
1338 /***********************************************************************
1339 * signal context platform-specific definitions
1340 * From Wine
1342 #ifdef linux
1343 /* All Registers access - only for local access */
1344 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1345 /* Gpr Registers access */
1346 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1347 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1348 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1349 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1350 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1351 # define LR_sig(context) REG_sig(link, context) /* Link register */
1352 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1353 /* Float Registers access */
1354 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1355 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1356 /* Exception Registers access */
1357 # define DAR_sig(context) REG_sig(dar, context)
1358 # define DSISR_sig(context) REG_sig(dsisr, context)
1359 # define TRAP_sig(context) REG_sig(trap, context)
1360 #endif /* linux */
1362 #ifdef __APPLE__
1363 # include <sys/ucontext.h>
1364 typedef struct ucontext SIGCONTEXT;
1365 /* All Registers access - only for local access */
1366 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1367 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1368 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1369 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1370 /* Gpr Registers access */
1371 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1372 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1373 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1374 # define CTR_sig(context) REG_sig(ctr, context)
1375 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1376 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1377 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1378 /* Float Registers access */
1379 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1380 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1381 /* Exception Registers access */
1382 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1383 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1384 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1385 #endif /* __APPLE__ */
1387 int cpu_signal_handler(int host_signum, void *pinfo,
1388 void *puc)
1390 siginfo_t *info = pinfo;
1391 struct ucontext *uc = puc;
1392 unsigned long pc;
1393 int is_write;
1395 pc = IAR_sig(uc);
1396 is_write = 0;
1397 #if 0
1398 /* ppc 4xx case */
1399 if (DSISR_sig(uc) & 0x00800000)
1400 is_write = 1;
1401 #else
1402 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1403 is_write = 1;
1404 #endif
1405 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1406 is_write, &uc->uc_sigmask, puc);
1409 #elif defined(__alpha__)
1411 int cpu_signal_handler(int host_signum, void *pinfo,
1412 void *puc)
1414 siginfo_t *info = pinfo;
1415 struct ucontext *uc = puc;
1416 uint32_t *pc = uc->uc_mcontext.sc_pc;
1417 uint32_t insn = *pc;
1418 int is_write = 0;
1420 /* XXX: need kernel patch to get write flag faster */
1421 switch (insn >> 26) {
1422 case 0x0d: // stw
1423 case 0x0e: // stb
1424 case 0x0f: // stq_u
1425 case 0x24: // stf
1426 case 0x25: // stg
1427 case 0x26: // sts
1428 case 0x27: // stt
1429 case 0x2c: // stl
1430 case 0x2d: // stq
1431 case 0x2e: // stl_c
1432 case 0x2f: // stq_c
1433 is_write = 1;
1436 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1437 is_write, &uc->uc_sigmask, puc);
1439 #elif defined(__sparc__)
1441 int cpu_signal_handler(int host_signum, void *pinfo,
1442 void *puc)
1444 siginfo_t *info = pinfo;
1445 int is_write;
1446 uint32_t insn;
1447 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1448 uint32_t *regs = (uint32_t *)(info + 1);
1449 void *sigmask = (regs + 20);
1450 /* XXX: is there a standard glibc define ? */
1451 unsigned long pc = regs[1];
1452 #else
1453 #ifdef __linux__
1454 struct sigcontext *sc = puc;
1455 unsigned long pc = sc->sigc_regs.tpc;
1456 void *sigmask = (void *)sc->sigc_mask;
1457 #elif defined(__OpenBSD__)
1458 struct sigcontext *uc = puc;
1459 unsigned long pc = uc->sc_pc;
1460 void *sigmask = (void *)(long)uc->sc_mask;
1461 #endif
1462 #endif
1464 /* XXX: need kernel patch to get write flag faster */
1465 is_write = 0;
1466 insn = *(uint32_t *)pc;
1467 if ((insn >> 30) == 3) {
1468 switch((insn >> 19) & 0x3f) {
1469 case 0x05: // stb
1470 case 0x15: // stba
1471 case 0x06: // sth
1472 case 0x16: // stha
1473 case 0x04: // st
1474 case 0x14: // sta
1475 case 0x07: // std
1476 case 0x17: // stda
1477 case 0x0e: // stx
1478 case 0x1e: // stxa
1479 case 0x24: // stf
1480 case 0x34: // stfa
1481 case 0x27: // stdf
1482 case 0x37: // stdfa
1483 case 0x26: // stqf
1484 case 0x36: // stqfa
1485 case 0x25: // stfsr
1486 case 0x3c: // casa
1487 case 0x3e: // casxa
1488 is_write = 1;
1489 break;
1492 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1493 is_write, sigmask, NULL);
1496 #elif defined(__arm__)
1498 int cpu_signal_handler(int host_signum, void *pinfo,
1499 void *puc)
1501 siginfo_t *info = pinfo;
1502 struct ucontext *uc = puc;
1503 unsigned long pc;
1504 int is_write;
1506 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1507 pc = uc->uc_mcontext.gregs[R15];
1508 #else
1509 pc = uc->uc_mcontext.arm_pc;
1510 #endif
1511 /* XXX: compute is_write */
1512 is_write = 0;
1513 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1514 is_write,
1515 &uc->uc_sigmask, puc);
1518 #elif defined(__mc68000)
1520 int cpu_signal_handler(int host_signum, void *pinfo,
1521 void *puc)
1523 siginfo_t *info = pinfo;
1524 struct ucontext *uc = puc;
1525 unsigned long pc;
1526 int is_write;
1528 pc = uc->uc_mcontext.gregs[16];
1529 /* XXX: compute is_write */
1530 is_write = 0;
1531 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1532 is_write,
1533 &uc->uc_sigmask, puc);
1536 #elif defined(__ia64)
1538 #ifndef __ISR_VALID
1539 /* This ought to be in <bits/siginfo.h>... */
1540 # define __ISR_VALID 1
1541 #endif
1543 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1545 siginfo_t *info = pinfo;
1546 struct ucontext *uc = puc;
1547 unsigned long ip;
1548 int is_write = 0;
1550 ip = uc->uc_mcontext.sc_ip;
1551 switch (host_signum) {
1552 case SIGILL:
1553 case SIGFPE:
1554 case SIGSEGV:
1555 case SIGBUS:
1556 case SIGTRAP:
1557 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1558 /* ISR.W (write-access) is bit 33: */
1559 is_write = (info->si_isr >> 33) & 1;
1560 break;
1562 default:
1563 break;
1565 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1566 is_write,
1567 &uc->uc_sigmask, puc);
1570 #elif defined(__s390__)
1572 int cpu_signal_handler(int host_signum, void *pinfo,
1573 void *puc)
1575 siginfo_t *info = pinfo;
1576 struct ucontext *uc = puc;
1577 unsigned long pc;
1578 int is_write;
1580 pc = uc->uc_mcontext.psw.addr;
1581 /* XXX: compute is_write */
1582 is_write = 0;
1583 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1584 is_write, &uc->uc_sigmask, puc);
1587 #elif defined(__mips__)
1589 int cpu_signal_handler(int host_signum, void *pinfo,
1590 void *puc)
1592 siginfo_t *info = pinfo;
1593 struct ucontext *uc = puc;
1594 greg_t pc = uc->uc_mcontext.pc;
1595 int is_write;
1597 /* XXX: compute is_write */
1598 is_write = 0;
1599 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1600 is_write, &uc->uc_sigmask, puc);
1603 #elif defined(__hppa__)
1605 int cpu_signal_handler(int host_signum, void *pinfo,
1606 void *puc)
1608 struct siginfo *info = pinfo;
1609 struct ucontext *uc = puc;
1610 unsigned long pc;
1611 int is_write;
1613 pc = uc->uc_mcontext.sc_iaoq[0];
1614 /* FIXME: compute is_write */
1615 is_write = 0;
1616 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1617 is_write,
1618 &uc->uc_sigmask, puc);
1621 #else
1623 #error host CPU specific signal handler needed
1625 #endif
1627 #endif /* !defined(CONFIG_SOFTMMU) */