Avoid a warning from OpenBSD linker
[qemu/agraf.git] / cpu-exec.c
blob39e5eeaa8fc51ad839cd58006084b9f28061a58f
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
24 #include "qemu-barrier.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
42 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
48 int tb_invalidated_flag;
50 //#define CONFIG_DEBUG_EXEC
51 //#define DEBUG_SIGNAL
53 int qemu_cpu_has_work(CPUState *env)
55 return cpu_has_work(env);
58 void cpu_loop_exit(void)
60 env->current_tb = NULL;
61 longjmp(env->jmp_env, 1);
64 /* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
67 void cpu_resume_from_signal(CPUState *env1, void *puc)
69 #if !defined(CONFIG_SOFTMMU)
70 #ifdef __linux__
71 struct ucontext *uc = puc;
72 #elif defined(__OpenBSD__)
73 struct sigcontext *uc = puc;
74 #endif
75 #endif
77 env = env1;
79 /* XXX: restore cpu registers saved in host registers */
81 #if !defined(CONFIG_SOFTMMU)
82 if (puc) {
83 /* XXX: use siglongjmp ? */
84 #ifdef __linux__
85 #ifdef __ia64
86 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
87 #else
88 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
89 #endif
90 #elif defined(__OpenBSD__)
91 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
92 #endif
94 #endif
95 env->exception_index = -1;
96 longjmp(env->jmp_env, 1);
99 /* Execute the code without caching the generated code. An interpreter
100 could be used if available. */
101 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
103 unsigned long next_tb;
104 TranslationBlock *tb;
106 /* Should never happen.
107 We only end up here when an existing TB is too long. */
108 if (max_cycles > CF_COUNT_MASK)
109 max_cycles = CF_COUNT_MASK;
111 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112 max_cycles);
113 env->current_tb = tb;
114 /* execute the generated code */
115 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
116 env->current_tb = NULL;
118 if ((next_tb & 3) == 2) {
119 /* Restore PC. This may happen if async event occurs before
120 the TB starts executing. */
121 cpu_pc_from_tb(env, tb);
123 tb_phys_invalidate(tb, -1);
124 tb_free(tb);
127 static TranslationBlock *tb_find_slow(target_ulong pc,
128 target_ulong cs_base,
129 uint64_t flags)
131 TranslationBlock *tb, **ptb1;
132 unsigned int h;
133 tb_page_addr_t phys_pc, phys_page1, phys_page2;
134 target_ulong virt_page2;
136 tb_invalidated_flag = 0;
138 /* find translated block using physical mappings */
139 phys_pc = get_page_addr_code(env, pc);
140 phys_page1 = phys_pc & TARGET_PAGE_MASK;
141 phys_page2 = -1;
142 h = tb_phys_hash_func(phys_pc);
143 ptb1 = &tb_phys_hash[h];
144 for(;;) {
145 tb = *ptb1;
146 if (!tb)
147 goto not_found;
148 if (tb->pc == pc &&
149 tb->page_addr[0] == phys_page1 &&
150 tb->cs_base == cs_base &&
151 tb->flags == flags) {
152 /* check next page if needed */
153 if (tb->page_addr[1] != -1) {
154 virt_page2 = (pc & TARGET_PAGE_MASK) +
155 TARGET_PAGE_SIZE;
156 phys_page2 = get_page_addr_code(env, virt_page2);
157 if (tb->page_addr[1] == phys_page2)
158 goto found;
159 } else {
160 goto found;
163 ptb1 = &tb->phys_hash_next;
165 not_found:
166 /* if no translated code available, then translate it now */
167 tb = tb_gen_code(env, pc, cs_base, flags, 0);
169 found:
170 /* Move the last found TB to the head of the list */
171 if (likely(*ptb1)) {
172 *ptb1 = tb->phys_hash_next;
173 tb->phys_hash_next = tb_phys_hash[h];
174 tb_phys_hash[h] = tb;
176 /* we add the TB in the virtual pc hash table */
177 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
178 return tb;
181 static inline TranslationBlock *tb_find_fast(void)
183 TranslationBlock *tb;
184 target_ulong cs_base, pc;
185 int flags;
187 /* we record a subset of the CPU state. It will
188 always be the same before a given translated block
189 is executed. */
190 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
191 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
192 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
193 tb->flags != flags)) {
194 tb = tb_find_slow(pc, cs_base, flags);
196 return tb;
199 static CPUDebugExcpHandler *debug_excp_handler;
201 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
203 CPUDebugExcpHandler *old_handler = debug_excp_handler;
205 debug_excp_handler = handler;
206 return old_handler;
209 static void cpu_handle_debug_exception(CPUState *env)
211 CPUWatchpoint *wp;
213 if (!env->watchpoint_hit)
214 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
215 wp->flags &= ~BP_WATCHPOINT_HIT;
217 if (debug_excp_handler)
218 debug_excp_handler(env);
221 /* main execution loop */
223 volatile sig_atomic_t exit_request;
225 int cpu_exec(CPUState *env1)
227 volatile host_reg_t saved_env_reg;
228 int ret, interrupt_request;
229 TranslationBlock *tb;
230 uint8_t *tc_ptr;
231 unsigned long next_tb;
233 if (cpu_halted(env1) == EXCP_HALTED)
234 return EXCP_HALTED;
236 cpu_single_env = env1;
238 /* the access to env below is actually saving the global register's
239 value, so that files not including target-xyz/exec.h are free to
240 use it. */
241 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
242 saved_env_reg = (host_reg_t) env;
243 barrier();
244 env = env1;
246 if (unlikely(exit_request)) {
247 env->exit_request = 1;
250 #if defined(TARGET_I386)
251 if (!kvm_enabled()) {
252 /* put eflags in CPU temporary format */
253 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
254 DF = 1 - (2 * ((env->eflags >> 10) & 1));
255 CC_OP = CC_OP_EFLAGS;
256 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
258 #elif defined(TARGET_SPARC)
259 #elif defined(TARGET_M68K)
260 env->cc_op = CC_OP_FLAGS;
261 env->cc_dest = env->sr & 0xf;
262 env->cc_x = (env->sr >> 4) & 1;
263 #elif defined(TARGET_ALPHA)
264 #elif defined(TARGET_ARM)
265 #elif defined(TARGET_PPC)
266 #elif defined(TARGET_MICROBLAZE)
267 #elif defined(TARGET_MIPS)
268 #elif defined(TARGET_SH4)
269 #elif defined(TARGET_CRIS)
270 #elif defined(TARGET_S390X)
271 /* XXXXX */
272 #else
273 #error unsupported target CPU
274 #endif
275 env->exception_index = -1;
277 /* prepare setjmp context for exception handling */
278 for(;;) {
279 if (setjmp(env->jmp_env) == 0) {
280 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
281 #undef env
282 env = cpu_single_env;
283 #define env cpu_single_env
284 #endif
285 /* if an exception is pending, we execute it here */
286 if (env->exception_index >= 0) {
287 if (env->exception_index >= EXCP_INTERRUPT) {
288 /* exit request from the cpu execution loop */
289 ret = env->exception_index;
290 if (ret == EXCP_DEBUG)
291 cpu_handle_debug_exception(env);
292 break;
293 } else {
294 #if defined(CONFIG_USER_ONLY)
295 /* if user mode only, we simulate a fake exception
296 which will be handled outside the cpu execution
297 loop */
298 #if defined(TARGET_I386)
299 do_interrupt_user(env->exception_index,
300 env->exception_is_int,
301 env->error_code,
302 env->exception_next_eip);
303 /* successfully delivered */
304 env->old_exception = -1;
305 #endif
306 ret = env->exception_index;
307 break;
308 #else
309 #if defined(TARGET_I386)
310 /* simulate a real cpu exception. On i386, it can
311 trigger new exceptions, but we do not handle
312 double or triple faults yet. */
313 do_interrupt(env->exception_index,
314 env->exception_is_int,
315 env->error_code,
316 env->exception_next_eip, 0);
317 /* successfully delivered */
318 env->old_exception = -1;
319 #elif defined(TARGET_PPC)
320 do_interrupt(env);
321 #elif defined(TARGET_MICROBLAZE)
322 do_interrupt(env);
323 #elif defined(TARGET_MIPS)
324 do_interrupt(env);
325 #elif defined(TARGET_SPARC)
326 do_interrupt(env);
327 #elif defined(TARGET_ARM)
328 do_interrupt(env);
329 #elif defined(TARGET_SH4)
330 do_interrupt(env);
331 #elif defined(TARGET_ALPHA)
332 do_interrupt(env);
333 #elif defined(TARGET_CRIS)
334 do_interrupt(env);
335 #elif defined(TARGET_M68K)
336 do_interrupt(0);
337 #endif
338 env->exception_index = -1;
339 #endif
343 if (kvm_enabled()) {
344 kvm_cpu_exec(env);
345 longjmp(env->jmp_env, 1);
348 next_tb = 0; /* force lookup of first TB */
349 for(;;) {
350 interrupt_request = env->interrupt_request;
351 if (unlikely(interrupt_request)) {
352 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
353 /* Mask out external interrupts for this step. */
354 interrupt_request &= ~(CPU_INTERRUPT_HARD |
355 CPU_INTERRUPT_FIQ |
356 CPU_INTERRUPT_SMI |
357 CPU_INTERRUPT_NMI);
359 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
360 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
361 env->exception_index = EXCP_DEBUG;
362 cpu_loop_exit();
364 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
365 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
366 defined(TARGET_MICROBLAZE)
367 if (interrupt_request & CPU_INTERRUPT_HALT) {
368 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
369 env->halted = 1;
370 env->exception_index = EXCP_HLT;
371 cpu_loop_exit();
373 #endif
374 #if defined(TARGET_I386)
375 if (interrupt_request & CPU_INTERRUPT_INIT) {
376 svm_check_intercept(SVM_EXIT_INIT);
377 do_cpu_init(env);
378 env->exception_index = EXCP_HALTED;
379 cpu_loop_exit();
380 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
381 do_cpu_sipi(env);
382 } else if (env->hflags2 & HF2_GIF_MASK) {
383 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
384 !(env->hflags & HF_SMM_MASK)) {
385 svm_check_intercept(SVM_EXIT_SMI);
386 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
387 do_smm_enter();
388 next_tb = 0;
389 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
390 !(env->hflags2 & HF2_NMI_MASK)) {
391 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
392 env->hflags2 |= HF2_NMI_MASK;
393 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
394 next_tb = 0;
395 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
396 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
397 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
398 next_tb = 0;
399 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
400 (((env->hflags2 & HF2_VINTR_MASK) &&
401 (env->hflags2 & HF2_HIF_MASK)) ||
402 (!(env->hflags2 & HF2_VINTR_MASK) &&
403 (env->eflags & IF_MASK &&
404 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
405 int intno;
406 svm_check_intercept(SVM_EXIT_INTR);
407 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
408 intno = cpu_get_pic_interrupt(env);
409 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
410 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
411 #undef env
412 env = cpu_single_env;
413 #define env cpu_single_env
414 #endif
415 do_interrupt(intno, 0, 0, 0, 1);
416 /* ensure that no TB jump will be modified as
417 the program flow was changed */
418 next_tb = 0;
419 #if !defined(CONFIG_USER_ONLY)
420 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
421 (env->eflags & IF_MASK) &&
422 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
423 int intno;
424 /* FIXME: this should respect TPR */
425 svm_check_intercept(SVM_EXIT_VINTR);
426 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
427 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
428 do_interrupt(intno, 0, 0, 0, 1);
429 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
430 next_tb = 0;
431 #endif
434 #elif defined(TARGET_PPC)
435 #if 0
436 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
437 cpu_reset(env);
439 #endif
440 if (interrupt_request & CPU_INTERRUPT_HARD) {
441 ppc_hw_interrupt(env);
442 if (env->pending_interrupts == 0)
443 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
444 next_tb = 0;
446 #elif defined(TARGET_MICROBLAZE)
447 if ((interrupt_request & CPU_INTERRUPT_HARD)
448 && (env->sregs[SR_MSR] & MSR_IE)
449 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
450 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
451 env->exception_index = EXCP_IRQ;
452 do_interrupt(env);
453 next_tb = 0;
455 #elif defined(TARGET_MIPS)
456 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
457 cpu_mips_hw_interrupts_pending(env) &&
458 (env->CP0_Status & (1 << CP0St_IE)) &&
459 !(env->CP0_Status & (1 << CP0St_EXL)) &&
460 !(env->CP0_Status & (1 << CP0St_ERL)) &&
461 !(env->hflags & MIPS_HFLAG_DM)) {
462 /* Raise it */
463 env->exception_index = EXCP_EXT_INTERRUPT;
464 env->error_code = 0;
465 do_interrupt(env);
466 next_tb = 0;
468 #elif defined(TARGET_SPARC)
469 if (interrupt_request & CPU_INTERRUPT_HARD) {
470 if (cpu_interrupts_enabled(env) &&
471 env->interrupt_index > 0) {
472 int pil = env->interrupt_index & 0xf;
473 int type = env->interrupt_index & 0xf0;
475 if (((type == TT_EXTINT) &&
476 cpu_pil_allowed(env, pil)) ||
477 type != TT_EXTINT) {
478 env->exception_index = env->interrupt_index;
479 do_interrupt(env);
480 next_tb = 0;
483 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
484 //do_interrupt(0, 0, 0, 0, 0);
485 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
487 #elif defined(TARGET_ARM)
488 if (interrupt_request & CPU_INTERRUPT_FIQ
489 && !(env->uncached_cpsr & CPSR_F)) {
490 env->exception_index = EXCP_FIQ;
491 do_interrupt(env);
492 next_tb = 0;
494 /* ARMv7-M interrupt return works by loading a magic value
495 into the PC. On real hardware the load causes the
496 return to occur. The qemu implementation performs the
497 jump normally, then does the exception return when the
498 CPU tries to execute code at the magic address.
499 This will cause the magic PC value to be pushed to
500 the stack if an interrupt occured at the wrong time.
501 We avoid this by disabling interrupts when
502 pc contains a magic address. */
503 if (interrupt_request & CPU_INTERRUPT_HARD
504 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
505 || !(env->uncached_cpsr & CPSR_I))) {
506 env->exception_index = EXCP_IRQ;
507 do_interrupt(env);
508 next_tb = 0;
510 #elif defined(TARGET_SH4)
511 if (interrupt_request & CPU_INTERRUPT_HARD) {
512 do_interrupt(env);
513 next_tb = 0;
515 #elif defined(TARGET_ALPHA)
516 if (interrupt_request & CPU_INTERRUPT_HARD) {
517 do_interrupt(env);
518 next_tb = 0;
520 #elif defined(TARGET_CRIS)
521 if (interrupt_request & CPU_INTERRUPT_HARD
522 && (env->pregs[PR_CCS] & I_FLAG)
523 && !env->locked_irq) {
524 env->exception_index = EXCP_IRQ;
525 do_interrupt(env);
526 next_tb = 0;
528 if (interrupt_request & CPU_INTERRUPT_NMI
529 && (env->pregs[PR_CCS] & M_FLAG)) {
530 env->exception_index = EXCP_NMI;
531 do_interrupt(env);
532 next_tb = 0;
534 #elif defined(TARGET_M68K)
535 if (interrupt_request & CPU_INTERRUPT_HARD
536 && ((env->sr & SR_I) >> SR_I_SHIFT)
537 < env->pending_level) {
538 /* Real hardware gets the interrupt vector via an
539 IACK cycle at this point. Current emulated
540 hardware doesn't rely on this, so we
541 provide/save the vector when the interrupt is
542 first signalled. */
543 env->exception_index = env->pending_vector;
544 do_interrupt(1);
545 next_tb = 0;
547 #endif
548 /* Don't use the cached interupt_request value,
549 do_interrupt may have updated the EXITTB flag. */
550 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
551 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
552 /* ensure that no TB jump will be modified as
553 the program flow was changed */
554 next_tb = 0;
557 if (unlikely(env->exit_request)) {
558 env->exit_request = 0;
559 env->exception_index = EXCP_INTERRUPT;
560 cpu_loop_exit();
562 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
563 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
564 /* restore flags in standard format */
565 #if defined(TARGET_I386)
566 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
567 log_cpu_state(env, X86_DUMP_CCOP);
568 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
569 #elif defined(TARGET_M68K)
570 cpu_m68k_flush_flags(env, env->cc_op);
571 env->cc_op = CC_OP_FLAGS;
572 env->sr = (env->sr & 0xffe0)
573 | env->cc_dest | (env->cc_x << 4);
574 log_cpu_state(env, 0);
575 #else
576 log_cpu_state(env, 0);
577 #endif
579 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
580 spin_lock(&tb_lock);
581 tb = tb_find_fast();
582 /* Note: we do it here to avoid a gcc bug on Mac OS X when
583 doing it in tb_find_slow */
584 if (tb_invalidated_flag) {
585 /* as some TB could have been invalidated because
586 of memory exceptions while generating the code, we
587 must recompute the hash index here */
588 next_tb = 0;
589 tb_invalidated_flag = 0;
591 #ifdef CONFIG_DEBUG_EXEC
592 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
593 (long)tb->tc_ptr, tb->pc,
594 lookup_symbol(tb->pc));
595 #endif
596 /* see if we can patch the calling TB. When the TB
597 spans two pages, we cannot safely do a direct
598 jump. */
599 if (next_tb != 0 && tb->page_addr[1] == -1) {
600 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
602 spin_unlock(&tb_lock);
604 /* cpu_interrupt might be called while translating the
605 TB, but before it is linked into a potentially
606 infinite loop and becomes env->current_tb. Avoid
607 starting execution if there is a pending interrupt. */
608 env->current_tb = tb;
609 barrier();
610 if (likely(!env->exit_request)) {
611 tc_ptr = tb->tc_ptr;
612 /* execute the generated code */
613 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
614 #undef env
615 env = cpu_single_env;
616 #define env cpu_single_env
617 #endif
618 next_tb = tcg_qemu_tb_exec(tc_ptr);
619 if ((next_tb & 3) == 2) {
620 /* Instruction counter expired. */
621 int insns_left;
622 tb = (TranslationBlock *)(long)(next_tb & ~3);
623 /* Restore PC. */
624 cpu_pc_from_tb(env, tb);
625 insns_left = env->icount_decr.u32;
626 if (env->icount_extra && insns_left >= 0) {
627 /* Refill decrementer and continue execution. */
628 env->icount_extra += insns_left;
629 if (env->icount_extra > 0xffff) {
630 insns_left = 0xffff;
631 } else {
632 insns_left = env->icount_extra;
634 env->icount_extra -= insns_left;
635 env->icount_decr.u16.low = insns_left;
636 } else {
637 if (insns_left > 0) {
638 /* Execute remaining instructions. */
639 cpu_exec_nocache(insns_left, tb);
641 env->exception_index = EXCP_INTERRUPT;
642 next_tb = 0;
643 cpu_loop_exit();
647 env->current_tb = NULL;
648 /* reset soft MMU for next block (it can currently
649 only be set by a memory fault) */
650 } /* for(;;) */
652 } /* for(;;) */
655 #if defined(TARGET_I386)
656 /* restore flags in standard format */
657 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
658 #elif defined(TARGET_ARM)
659 /* XXX: Save/restore host fpu exception state?. */
660 #elif defined(TARGET_SPARC)
661 #elif defined(TARGET_PPC)
662 #elif defined(TARGET_M68K)
663 cpu_m68k_flush_flags(env, env->cc_op);
664 env->cc_op = CC_OP_FLAGS;
665 env->sr = (env->sr & 0xffe0)
666 | env->cc_dest | (env->cc_x << 4);
667 #elif defined(TARGET_MICROBLAZE)
668 #elif defined(TARGET_MIPS)
669 #elif defined(TARGET_SH4)
670 #elif defined(TARGET_ALPHA)
671 #elif defined(TARGET_CRIS)
672 #elif defined(TARGET_S390X)
673 /* XXXXX */
674 #else
675 #error unsupported target CPU
676 #endif
678 /* restore global registers */
679 barrier();
680 env = (void *) saved_env_reg;
682 /* fail safe : never use cpu_single_env outside cpu_exec() */
683 cpu_single_env = NULL;
684 return ret;
687 /* must only be called from the generated code as an exception can be
688 generated */
689 void tb_invalidate_page_range(target_ulong start, target_ulong end)
691 /* XXX: cannot enable it yet because it yields to MMU exception
692 where NIP != read address on PowerPC */
693 #if 0
694 target_ulong phys_addr;
695 phys_addr = get_phys_addr_code(env, start);
696 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
697 #endif
700 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
702 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
704 CPUX86State *saved_env;
706 saved_env = env;
707 env = s;
708 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
709 selector &= 0xffff;
710 cpu_x86_load_seg_cache(env, seg_reg, selector,
711 (selector << 4), 0xffff, 0);
712 } else {
713 helper_load_seg(seg_reg, selector);
715 env = saved_env;
718 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
720 CPUX86State *saved_env;
722 saved_env = env;
723 env = s;
725 helper_fsave(ptr, data32);
727 env = saved_env;
730 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
732 CPUX86State *saved_env;
734 saved_env = env;
735 env = s;
737 helper_frstor(ptr, data32);
739 env = saved_env;
742 #endif /* TARGET_I386 */
744 #if !defined(CONFIG_SOFTMMU)
746 #if defined(TARGET_I386)
747 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
748 #else
749 #define EXCEPTION_ACTION cpu_loop_exit()
750 #endif
752 /* 'pc' is the host PC at which the exception was raised. 'address' is
753 the effective address of the memory exception. 'is_write' is 1 if a
754 write caused the exception and otherwise 0'. 'old_set' is the
755 signal set which should be restored */
756 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
757 int is_write, sigset_t *old_set,
758 void *puc)
760 TranslationBlock *tb;
761 int ret;
763 if (cpu_single_env)
764 env = cpu_single_env; /* XXX: find a correct solution for multithread */
765 #if defined(DEBUG_SIGNAL)
766 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
767 pc, address, is_write, *(unsigned long *)old_set);
768 #endif
769 /* XXX: locking issue */
770 if (is_write && page_unprotect(h2g(address), pc, puc)) {
771 return 1;
774 /* see if it is an MMU fault */
775 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
776 if (ret < 0)
777 return 0; /* not an MMU fault */
778 if (ret == 0)
779 return 1; /* the MMU fault was handled without causing real CPU fault */
780 /* now we have a real cpu fault */
781 tb = tb_find_pc(pc);
782 if (tb) {
783 /* the PC is inside the translated code. It means that we have
784 a virtual CPU fault */
785 cpu_restore_state(tb, env, pc, puc);
788 /* we restore the process signal mask as the sigreturn should
789 do it (XXX: use sigsetjmp) */
790 sigprocmask(SIG_SETMASK, old_set, NULL);
791 EXCEPTION_ACTION;
793 /* never comes here */
794 return 1;
797 #if defined(__i386__)
799 #if defined(__APPLE__)
800 # include <sys/ucontext.h>
802 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
803 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
804 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
805 # define MASK_sig(context) ((context)->uc_sigmask)
806 #elif defined (__NetBSD__)
807 # include <ucontext.h>
809 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
810 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
811 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
812 # define MASK_sig(context) ((context)->uc_sigmask)
813 #elif defined (__FreeBSD__) || defined(__DragonFly__)
814 # include <ucontext.h>
816 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
817 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
818 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
819 # define MASK_sig(context) ((context)->uc_sigmask)
820 #elif defined(__OpenBSD__)
821 # define EIP_sig(context) ((context)->sc_eip)
822 # define TRAP_sig(context) ((context)->sc_trapno)
823 # define ERROR_sig(context) ((context)->sc_err)
824 # define MASK_sig(context) ((context)->sc_mask)
825 #else
826 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
827 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
828 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
829 # define MASK_sig(context) ((context)->uc_sigmask)
830 #endif
832 int cpu_signal_handler(int host_signum, void *pinfo,
833 void *puc)
835 siginfo_t *info = pinfo;
836 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
837 ucontext_t *uc = puc;
838 #elif defined(__OpenBSD__)
839 struct sigcontext *uc = puc;
840 #else
841 struct ucontext *uc = puc;
842 #endif
843 unsigned long pc;
844 int trapno;
846 #ifndef REG_EIP
847 /* for glibc 2.1 */
848 #define REG_EIP EIP
849 #define REG_ERR ERR
850 #define REG_TRAPNO TRAPNO
851 #endif
852 pc = EIP_sig(uc);
853 trapno = TRAP_sig(uc);
854 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
855 trapno == 0xe ?
856 (ERROR_sig(uc) >> 1) & 1 : 0,
857 &MASK_sig(uc), puc);
860 #elif defined(__x86_64__)
862 #ifdef __NetBSD__
863 #define PC_sig(context) _UC_MACHINE_PC(context)
864 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
865 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
866 #define MASK_sig(context) ((context)->uc_sigmask)
867 #elif defined(__OpenBSD__)
868 #define PC_sig(context) ((context)->sc_rip)
869 #define TRAP_sig(context) ((context)->sc_trapno)
870 #define ERROR_sig(context) ((context)->sc_err)
871 #define MASK_sig(context) ((context)->sc_mask)
872 #elif defined (__FreeBSD__) || defined(__DragonFly__)
873 #include <ucontext.h>
875 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
876 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
877 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
878 #define MASK_sig(context) ((context)->uc_sigmask)
879 #else
880 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
881 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
882 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
883 #define MASK_sig(context) ((context)->uc_sigmask)
884 #endif
886 int cpu_signal_handler(int host_signum, void *pinfo,
887 void *puc)
889 siginfo_t *info = pinfo;
890 unsigned long pc;
891 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
892 ucontext_t *uc = puc;
893 #elif defined(__OpenBSD__)
894 struct sigcontext *uc = puc;
895 #else
896 struct ucontext *uc = puc;
897 #endif
899 pc = PC_sig(uc);
900 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
901 TRAP_sig(uc) == 0xe ?
902 (ERROR_sig(uc) >> 1) & 1 : 0,
903 &MASK_sig(uc), puc);
906 #elif defined(_ARCH_PPC)
908 /***********************************************************************
909 * signal context platform-specific definitions
910 * From Wine
912 #ifdef linux
913 /* All Registers access - only for local access */
914 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
915 /* Gpr Registers access */
916 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
917 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
918 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
919 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
920 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
921 # define LR_sig(context) REG_sig(link, context) /* Link register */
922 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
923 /* Float Registers access */
924 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
925 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
926 /* Exception Registers access */
927 # define DAR_sig(context) REG_sig(dar, context)
928 # define DSISR_sig(context) REG_sig(dsisr, context)
929 # define TRAP_sig(context) REG_sig(trap, context)
930 #endif /* linux */
932 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
933 #include <ucontext.h>
934 # define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
935 # define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
936 # define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
937 # define XER_sig(context) ((context)->uc_mcontext.mc_xer)
938 # define LR_sig(context) ((context)->uc_mcontext.mc_lr)
939 # define CR_sig(context) ((context)->uc_mcontext.mc_cr)
940 /* Exception Registers access */
941 # define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
942 # define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
943 # define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
944 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
946 #ifdef __APPLE__
947 # include <sys/ucontext.h>
948 typedef struct ucontext SIGCONTEXT;
949 /* All Registers access - only for local access */
950 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
951 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
952 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
953 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
954 /* Gpr Registers access */
955 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
956 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
957 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
958 # define CTR_sig(context) REG_sig(ctr, context)
959 # define XER_sig(context) REG_sig(xer, context) /* Link register */
960 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
961 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
962 /* Float Registers access */
963 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
964 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
965 /* Exception Registers access */
966 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
967 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
968 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
969 #endif /* __APPLE__ */
971 int cpu_signal_handler(int host_signum, void *pinfo,
972 void *puc)
974 siginfo_t *info = pinfo;
975 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
976 ucontext_t *uc = puc;
977 #else
978 struct ucontext *uc = puc;
979 #endif
980 unsigned long pc;
981 int is_write;
983 pc = IAR_sig(uc);
984 is_write = 0;
985 #if 0
986 /* ppc 4xx case */
987 if (DSISR_sig(uc) & 0x00800000)
988 is_write = 1;
989 #else
990 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
991 is_write = 1;
992 #endif
993 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
994 is_write, &uc->uc_sigmask, puc);
997 #elif defined(__alpha__)
999 int cpu_signal_handler(int host_signum, void *pinfo,
1000 void *puc)
1002 siginfo_t *info = pinfo;
1003 struct ucontext *uc = puc;
1004 uint32_t *pc = uc->uc_mcontext.sc_pc;
1005 uint32_t insn = *pc;
1006 int is_write = 0;
1008 /* XXX: need kernel patch to get write flag faster */
1009 switch (insn >> 26) {
1010 case 0x0d: // stw
1011 case 0x0e: // stb
1012 case 0x0f: // stq_u
1013 case 0x24: // stf
1014 case 0x25: // stg
1015 case 0x26: // sts
1016 case 0x27: // stt
1017 case 0x2c: // stl
1018 case 0x2d: // stq
1019 case 0x2e: // stl_c
1020 case 0x2f: // stq_c
1021 is_write = 1;
1024 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1025 is_write, &uc->uc_sigmask, puc);
1027 #elif defined(__sparc__)
1029 int cpu_signal_handler(int host_signum, void *pinfo,
1030 void *puc)
1032 siginfo_t *info = pinfo;
1033 int is_write;
1034 uint32_t insn;
1035 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1036 uint32_t *regs = (uint32_t *)(info + 1);
1037 void *sigmask = (regs + 20);
1038 /* XXX: is there a standard glibc define ? */
1039 unsigned long pc = regs[1];
1040 #else
1041 #ifdef __linux__
1042 struct sigcontext *sc = puc;
1043 unsigned long pc = sc->sigc_regs.tpc;
1044 void *sigmask = (void *)sc->sigc_mask;
1045 #elif defined(__OpenBSD__)
1046 struct sigcontext *uc = puc;
1047 unsigned long pc = uc->sc_pc;
1048 void *sigmask = (void *)(long)uc->sc_mask;
1049 #endif
1050 #endif
1052 /* XXX: need kernel patch to get write flag faster */
1053 is_write = 0;
1054 insn = *(uint32_t *)pc;
1055 if ((insn >> 30) == 3) {
1056 switch((insn >> 19) & 0x3f) {
1057 case 0x05: // stb
1058 case 0x15: // stba
1059 case 0x06: // sth
1060 case 0x16: // stha
1061 case 0x04: // st
1062 case 0x14: // sta
1063 case 0x07: // std
1064 case 0x17: // stda
1065 case 0x0e: // stx
1066 case 0x1e: // stxa
1067 case 0x24: // stf
1068 case 0x34: // stfa
1069 case 0x27: // stdf
1070 case 0x37: // stdfa
1071 case 0x26: // stqf
1072 case 0x36: // stqfa
1073 case 0x25: // stfsr
1074 case 0x3c: // casa
1075 case 0x3e: // casxa
1076 is_write = 1;
1077 break;
1080 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1081 is_write, sigmask, NULL);
1084 #elif defined(__arm__)
1086 int cpu_signal_handler(int host_signum, void *pinfo,
1087 void *puc)
1089 siginfo_t *info = pinfo;
1090 struct ucontext *uc = puc;
1091 unsigned long pc;
1092 int is_write;
1094 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1095 pc = uc->uc_mcontext.gregs[R15];
1096 #else
1097 pc = uc->uc_mcontext.arm_pc;
1098 #endif
1099 /* XXX: compute is_write */
1100 is_write = 0;
1101 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1102 is_write,
1103 &uc->uc_sigmask, puc);
1106 #elif defined(__mc68000)
1108 int cpu_signal_handler(int host_signum, void *pinfo,
1109 void *puc)
1111 siginfo_t *info = pinfo;
1112 struct ucontext *uc = puc;
1113 unsigned long pc;
1114 int is_write;
1116 pc = uc->uc_mcontext.gregs[16];
1117 /* XXX: compute is_write */
1118 is_write = 0;
1119 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1120 is_write,
1121 &uc->uc_sigmask, puc);
1124 #elif defined(__ia64)
1126 #ifndef __ISR_VALID
1127 /* This ought to be in <bits/siginfo.h>... */
1128 # define __ISR_VALID 1
1129 #endif
1131 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1133 siginfo_t *info = pinfo;
1134 struct ucontext *uc = puc;
1135 unsigned long ip;
1136 int is_write = 0;
1138 ip = uc->uc_mcontext.sc_ip;
1139 switch (host_signum) {
1140 case SIGILL:
1141 case SIGFPE:
1142 case SIGSEGV:
1143 case SIGBUS:
1144 case SIGTRAP:
1145 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1146 /* ISR.W (write-access) is bit 33: */
1147 is_write = (info->si_isr >> 33) & 1;
1148 break;
1150 default:
1151 break;
1153 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1154 is_write,
1155 (sigset_t *)&uc->uc_sigmask, puc);
1158 #elif defined(__s390__)
1160 int cpu_signal_handler(int host_signum, void *pinfo,
1161 void *puc)
1163 siginfo_t *info = pinfo;
1164 struct ucontext *uc = puc;
1165 unsigned long pc;
1166 uint16_t *pinsn;
1167 int is_write = 0;
1169 pc = uc->uc_mcontext.psw.addr;
1171 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1172 of the normal 2 arguments. The 3rd argument contains the "int_code"
1173 from the hardware which does in fact contain the is_write value.
1174 The rt signal handler, as far as I can tell, does not give this value
1175 at all. Not that we could get to it from here even if it were. */
1176 /* ??? This is not even close to complete, since it ignores all
1177 of the read-modify-write instructions. */
1178 pinsn = (uint16_t *)pc;
1179 switch (pinsn[0] >> 8) {
1180 case 0x50: /* ST */
1181 case 0x42: /* STC */
1182 case 0x40: /* STH */
1183 is_write = 1;
1184 break;
1185 case 0xc4: /* RIL format insns */
1186 switch (pinsn[0] & 0xf) {
1187 case 0xf: /* STRL */
1188 case 0xb: /* STGRL */
1189 case 0x7: /* STHRL */
1190 is_write = 1;
1192 break;
1193 case 0xe3: /* RXY format insns */
1194 switch (pinsn[2] & 0xff) {
1195 case 0x50: /* STY */
1196 case 0x24: /* STG */
1197 case 0x72: /* STCY */
1198 case 0x70: /* STHY */
1199 case 0x8e: /* STPQ */
1200 case 0x3f: /* STRVH */
1201 case 0x3e: /* STRV */
1202 case 0x2f: /* STRVG */
1203 is_write = 1;
1205 break;
1207 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1208 is_write, &uc->uc_sigmask, puc);
1211 #elif defined(__mips__)
1213 int cpu_signal_handler(int host_signum, void *pinfo,
1214 void *puc)
1216 siginfo_t *info = pinfo;
1217 struct ucontext *uc = puc;
1218 greg_t pc = uc->uc_mcontext.pc;
1219 int is_write;
1221 /* XXX: compute is_write */
1222 is_write = 0;
1223 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1224 is_write, &uc->uc_sigmask, puc);
1227 #elif defined(__hppa__)
1229 int cpu_signal_handler(int host_signum, void *pinfo,
1230 void *puc)
1232 struct siginfo *info = pinfo;
1233 struct ucontext *uc = puc;
1234 unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1235 uint32_t insn = *(uint32_t *)pc;
1236 int is_write = 0;
1238 /* XXX: need kernel patch to get write flag faster. */
1239 switch (insn >> 26) {
1240 case 0x1a: /* STW */
1241 case 0x19: /* STH */
1242 case 0x18: /* STB */
1243 case 0x1b: /* STWM */
1244 is_write = 1;
1245 break;
1247 case 0x09: /* CSTWX, FSTWX, FSTWS */
1248 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1249 /* Distinguish from coprocessor load ... */
1250 is_write = (insn >> 9) & 1;
1251 break;
1253 case 0x03:
1254 switch ((insn >> 6) & 15) {
1255 case 0xa: /* STWS */
1256 case 0x9: /* STHS */
1257 case 0x8: /* STBS */
1258 case 0xe: /* STWAS */
1259 case 0xc: /* STBYS */
1260 is_write = 1;
1262 break;
1265 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1266 is_write, &uc->uc_sigmask, puc);
1269 #else
1271 #error host CPU specific signal handler needed
1273 #endif
1275 #endif /* !defined(CONFIG_SOFTMMU) */