2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "qemu-barrier.h"
25 int tb_invalidated_flag
;
27 //#define CONFIG_DEBUG_EXEC
29 bool qemu_cpu_has_work(CPUArchState
*env
)
31 return cpu_has_work(env
);
34 void cpu_loop_exit(CPUArchState
*env
)
36 env
->current_tb
= NULL
;
37 longjmp(env
->jmp_env
, 1);
40 /* exit the current TB from a signal handler. The host registers are
41 restored in a state compatible with the CPU emulator
43 #if defined(CONFIG_SOFTMMU)
44 void cpu_resume_from_signal(CPUArchState
*env
, void *puc
)
46 /* XXX: restore cpu registers saved in host registers */
48 env
->exception_index
= -1;
49 longjmp(env
->jmp_env
, 1);
53 /* Execute the code without caching the generated code. An interpreter
54 could be used if available. */
55 static void cpu_exec_nocache(CPUArchState
*env
, int max_cycles
,
56 TranslationBlock
*orig_tb
)
58 unsigned long next_tb
;
61 /* Should never happen.
62 We only end up here when an existing TB is too long. */
63 if (max_cycles
> CF_COUNT_MASK
)
64 max_cycles
= CF_COUNT_MASK
;
66 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
69 /* execute the generated code */
70 next_tb
= tcg_qemu_tb_exec(env
, tb
->tc_ptr
);
71 env
->current_tb
= NULL
;
73 if ((next_tb
& 3) == 2) {
74 /* Restore PC. This may happen if async event occurs before
75 the TB starts executing. */
76 cpu_pc_from_tb(env
, tb
);
78 tb_phys_invalidate(tb
, -1);
82 static TranslationBlock
*tb_find_slow(CPUArchState
*env
,
87 TranslationBlock
*tb
, **ptb1
;
89 tb_page_addr_t phys_pc
, phys_page1
;
90 target_ulong virt_page2
;
92 tb_invalidated_flag
= 0;
94 /* find translated block using physical mappings */
95 phys_pc
= get_page_addr_code(env
, pc
);
96 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
97 h
= tb_phys_hash_func(phys_pc
);
98 ptb1
= &tb_phys_hash
[h
];
104 tb
->page_addr
[0] == phys_page1
&&
105 tb
->cs_base
== cs_base
&&
106 tb
->flags
== flags
) {
107 /* check next page if needed */
108 if (tb
->page_addr
[1] != -1) {
109 tb_page_addr_t phys_page2
;
111 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
113 phys_page2
= get_page_addr_code(env
, virt_page2
);
114 if (tb
->page_addr
[1] == phys_page2
)
120 ptb1
= &tb
->phys_hash_next
;
123 /* if no translated code available, then translate it now */
124 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
127 /* Move the last found TB to the head of the list */
129 *ptb1
= tb
->phys_hash_next
;
130 tb
->phys_hash_next
= tb_phys_hash
[h
];
131 tb_phys_hash
[h
] = tb
;
133 /* we add the TB in the virtual pc hash table */
134 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
138 static inline TranslationBlock
*tb_find_fast(CPUArchState
*env
)
140 TranslationBlock
*tb
;
141 target_ulong cs_base
, pc
;
144 /* we record a subset of the CPU state. It will
145 always be the same before a given translated block
147 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
148 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
149 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
150 tb
->flags
!= flags
)) {
151 tb
= tb_find_slow(env
, pc
, cs_base
, flags
);
156 static CPUDebugExcpHandler
*debug_excp_handler
;
158 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
160 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
162 debug_excp_handler
= handler
;
166 static void cpu_handle_debug_exception(CPUArchState
*env
)
170 if (!env
->watchpoint_hit
) {
171 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
172 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
175 if (debug_excp_handler
) {
176 debug_excp_handler(env
);
180 /* main execution loop */
182 volatile sig_atomic_t exit_request
;
184 int cpu_exec(CPUArchState
*env
)
186 int ret
, interrupt_request
;
187 TranslationBlock
*tb
;
189 unsigned long next_tb
;
192 if (!cpu_has_work(env
)) {
199 cpu_single_env
= env
;
201 if (unlikely(exit_request
)) {
202 env
->exit_request
= 1;
205 #if defined(TARGET_I386)
206 /* put eflags in CPU temporary format */
207 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
208 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
209 CC_OP
= CC_OP_EFLAGS
;
210 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
211 #elif defined(TARGET_SPARC)
212 #elif defined(TARGET_M68K)
213 env
->cc_op
= CC_OP_FLAGS
;
214 env
->cc_dest
= env
->sr
& 0xf;
215 env
->cc_x
= (env
->sr
>> 4) & 1;
216 #elif defined(TARGET_ALPHA)
217 #elif defined(TARGET_ARM)
218 #elif defined(TARGET_UNICORE32)
219 #elif defined(TARGET_PPC)
220 env
->reserve_addr
= -1;
221 #elif defined(TARGET_LM32)
222 #elif defined(TARGET_MICROBLAZE)
223 #elif defined(TARGET_MIPS)
224 #elif defined(TARGET_SH4)
225 #elif defined(TARGET_CRIS)
226 #elif defined(TARGET_S390X)
227 #elif defined(TARGET_XTENSA)
230 #error unsupported target CPU
232 env
->exception_index
= -1;
234 /* prepare setjmp context for exception handling */
236 if (setjmp(env
->jmp_env
) == 0) {
237 /* if an exception is pending, we execute it here */
238 if (env
->exception_index
>= 0) {
239 if (env
->exception_index
>= EXCP_INTERRUPT
) {
240 /* exit request from the cpu execution loop */
241 ret
= env
->exception_index
;
242 if (ret
== EXCP_DEBUG
) {
243 cpu_handle_debug_exception(env
);
247 #if defined(CONFIG_USER_ONLY)
248 /* if user mode only, we simulate a fake exception
249 which will be handled outside the cpu execution
251 #if defined(TARGET_I386)
254 ret
= env
->exception_index
;
258 env
->exception_index
= -1;
263 next_tb
= 0; /* force lookup of first TB */
265 interrupt_request
= env
->interrupt_request
;
266 if (unlikely(interrupt_request
)) {
267 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
268 /* Mask out external interrupts for this step. */
269 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
271 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
272 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
273 env
->exception_index
= EXCP_DEBUG
;
276 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
277 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
278 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
279 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
280 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
282 env
->exception_index
= EXCP_HLT
;
286 #if defined(TARGET_I386)
287 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
288 svm_check_intercept(env
, SVM_EXIT_INIT
);
290 env
->exception_index
= EXCP_HALTED
;
292 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
294 } else if (env
->hflags2
& HF2_GIF_MASK
) {
295 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
296 !(env
->hflags
& HF_SMM_MASK
)) {
297 svm_check_intercept(env
, SVM_EXIT_SMI
);
298 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
301 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
302 !(env
->hflags2
& HF2_NMI_MASK
)) {
303 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
304 env
->hflags2
|= HF2_NMI_MASK
;
305 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
307 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
308 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
309 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
311 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
312 (((env
->hflags2
& HF2_VINTR_MASK
) &&
313 (env
->hflags2
& HF2_HIF_MASK
)) ||
314 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
315 (env
->eflags
& IF_MASK
&&
316 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
318 svm_check_intercept(env
, SVM_EXIT_INTR
);
319 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
320 intno
= cpu_get_pic_interrupt(env
);
321 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
322 do_interrupt_x86_hardirq(env
, intno
, 1);
323 /* ensure that no TB jump will be modified as
324 the program flow was changed */
326 #if !defined(CONFIG_USER_ONLY)
327 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
328 (env
->eflags
& IF_MASK
) &&
329 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
331 /* FIXME: this should respect TPR */
332 svm_check_intercept(env
, SVM_EXIT_VINTR
);
333 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
334 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
335 do_interrupt_x86_hardirq(env
, intno
, 1);
336 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
341 #elif defined(TARGET_PPC)
342 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
343 cpu_state_reset(env
);
345 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
346 ppc_hw_interrupt(env
);
347 if (env
->pending_interrupts
== 0)
348 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
351 #elif defined(TARGET_LM32)
352 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
353 && (env
->ie
& IE_IE
)) {
354 env
->exception_index
= EXCP_IRQ
;
358 #elif defined(TARGET_MICROBLAZE)
359 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
360 && (env
->sregs
[SR_MSR
] & MSR_IE
)
361 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
362 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
363 env
->exception_index
= EXCP_IRQ
;
367 #elif defined(TARGET_MIPS)
368 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
369 cpu_mips_hw_interrupts_pending(env
)) {
371 env
->exception_index
= EXCP_EXT_INTERRUPT
;
376 #elif defined(TARGET_SPARC)
377 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
378 if (cpu_interrupts_enabled(env
) &&
379 env
->interrupt_index
> 0) {
380 int pil
= env
->interrupt_index
& 0xf;
381 int type
= env
->interrupt_index
& 0xf0;
383 if (((type
== TT_EXTINT
) &&
384 cpu_pil_allowed(env
, pil
)) ||
386 env
->exception_index
= env
->interrupt_index
;
392 #elif defined(TARGET_ARM)
393 if (interrupt_request
& CPU_INTERRUPT_FIQ
394 && !(env
->uncached_cpsr
& CPSR_F
)) {
395 env
->exception_index
= EXCP_FIQ
;
399 /* ARMv7-M interrupt return works by loading a magic value
400 into the PC. On real hardware the load causes the
401 return to occur. The qemu implementation performs the
402 jump normally, then does the exception return when the
403 CPU tries to execute code at the magic address.
404 This will cause the magic PC value to be pushed to
405 the stack if an interrupt occurred at the wrong time.
406 We avoid this by disabling interrupts when
407 pc contains a magic address. */
408 if (interrupt_request
& CPU_INTERRUPT_HARD
409 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
410 || !(env
->uncached_cpsr
& CPSR_I
))) {
411 env
->exception_index
= EXCP_IRQ
;
415 #elif defined(TARGET_UNICORE32)
416 if (interrupt_request
& CPU_INTERRUPT_HARD
417 && !(env
->uncached_asr
& ASR_I
)) {
421 #elif defined(TARGET_SH4)
422 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
426 #elif defined(TARGET_ALPHA)
429 /* ??? This hard-codes the OSF/1 interrupt levels. */
430 switch (env
->pal_mode
? 7 : env
->ps
& PS_INT_MASK
) {
432 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
433 idx
= EXCP_DEV_INTERRUPT
;
437 if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
438 idx
= EXCP_CLK_INTERRUPT
;
442 if (interrupt_request
& CPU_INTERRUPT_SMP
) {
443 idx
= EXCP_SMP_INTERRUPT
;
447 if (interrupt_request
& CPU_INTERRUPT_MCHK
) {
452 env
->exception_index
= idx
;
458 #elif defined(TARGET_CRIS)
459 if (interrupt_request
& CPU_INTERRUPT_HARD
460 && (env
->pregs
[PR_CCS
] & I_FLAG
)
461 && !env
->locked_irq
) {
462 env
->exception_index
= EXCP_IRQ
;
466 if (interrupt_request
& CPU_INTERRUPT_NMI
467 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
468 env
->exception_index
= EXCP_NMI
;
472 #elif defined(TARGET_M68K)
473 if (interrupt_request
& CPU_INTERRUPT_HARD
474 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
475 < env
->pending_level
) {
476 /* Real hardware gets the interrupt vector via an
477 IACK cycle at this point. Current emulated
478 hardware doesn't rely on this, so we
479 provide/save the vector when the interrupt is
481 env
->exception_index
= env
->pending_vector
;
482 do_interrupt_m68k_hardirq(env
);
485 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
486 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
487 (env
->psw
.mask
& PSW_MASK_EXT
)) {
491 #elif defined(TARGET_XTENSA)
492 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
493 env
->exception_index
= EXC_IRQ
;
498 /* Don't use the cached interrupt_request value,
499 do_interrupt may have updated the EXITTB flag. */
500 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
501 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
502 /* ensure that no TB jump will be modified as
503 the program flow was changed */
507 if (unlikely(env
->exit_request
)) {
508 env
->exit_request
= 0;
509 env
->exception_index
= EXCP_INTERRUPT
;
512 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
513 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
514 /* restore flags in standard format */
515 #if defined(TARGET_I386)
516 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
518 log_cpu_state(env
, X86_DUMP_CCOP
);
519 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
520 #elif defined(TARGET_M68K)
521 cpu_m68k_flush_flags(env
, env
->cc_op
);
522 env
->cc_op
= CC_OP_FLAGS
;
523 env
->sr
= (env
->sr
& 0xffe0)
524 | env
->cc_dest
| (env
->cc_x
<< 4);
525 log_cpu_state(env
, 0);
527 log_cpu_state(env
, 0);
530 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
532 tb
= tb_find_fast(env
);
533 /* Note: we do it here to avoid a gcc bug on Mac OS X when
534 doing it in tb_find_slow */
535 if (tb_invalidated_flag
) {
536 /* as some TB could have been invalidated because
537 of memory exceptions while generating the code, we
538 must recompute the hash index here */
540 tb_invalidated_flag
= 0;
542 #ifdef CONFIG_DEBUG_EXEC
543 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
544 (long)tb
->tc_ptr
, tb
->pc
,
545 lookup_symbol(tb
->pc
));
547 /* see if we can patch the calling TB. When the TB
548 spans two pages, we cannot safely do a direct
550 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
551 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
553 spin_unlock(&tb_lock
);
555 /* cpu_interrupt might be called while translating the
556 TB, but before it is linked into a potentially
557 infinite loop and becomes env->current_tb. Avoid
558 starting execution if there is a pending interrupt. */
559 env
->current_tb
= tb
;
561 if (likely(!env
->exit_request
)) {
563 /* execute the generated code */
564 next_tb
= tcg_qemu_tb_exec(env
, tc_ptr
);
565 if ((next_tb
& 3) == 2) {
566 /* Instruction counter expired. */
568 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
570 cpu_pc_from_tb(env
, tb
);
571 insns_left
= env
->icount_decr
.u32
;
572 if (env
->icount_extra
&& insns_left
>= 0) {
573 /* Refill decrementer and continue execution. */
574 env
->icount_extra
+= insns_left
;
575 if (env
->icount_extra
> 0xffff) {
578 insns_left
= env
->icount_extra
;
580 env
->icount_extra
-= insns_left
;
581 env
->icount_decr
.u16
.low
= insns_left
;
583 if (insns_left
> 0) {
584 /* Execute remaining instructions. */
585 cpu_exec_nocache(env
, insns_left
, tb
);
587 env
->exception_index
= EXCP_INTERRUPT
;
593 env
->current_tb
= NULL
;
594 /* reset soft MMU for next block (it can currently
595 only be set by a memory fault) */
598 /* Reload env after longjmp - the compiler may have smashed all
599 * local variables as longjmp is marked 'noreturn'. */
600 env
= cpu_single_env
;
605 #if defined(TARGET_I386)
606 /* restore flags in standard format */
607 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
609 #elif defined(TARGET_ARM)
610 /* XXX: Save/restore host fpu exception state?. */
611 #elif defined(TARGET_UNICORE32)
612 #elif defined(TARGET_SPARC)
613 #elif defined(TARGET_PPC)
614 #elif defined(TARGET_LM32)
615 #elif defined(TARGET_M68K)
616 cpu_m68k_flush_flags(env
, env
->cc_op
);
617 env
->cc_op
= CC_OP_FLAGS
;
618 env
->sr
= (env
->sr
& 0xffe0)
619 | env
->cc_dest
| (env
->cc_x
<< 4);
620 #elif defined(TARGET_MICROBLAZE)
621 #elif defined(TARGET_MIPS)
622 #elif defined(TARGET_SH4)
623 #elif defined(TARGET_ALPHA)
624 #elif defined(TARGET_CRIS)
625 #elif defined(TARGET_S390X)
626 #elif defined(TARGET_XTENSA)
629 #error unsupported target CPU
632 /* fail safe : never use cpu_single_env outside cpu_exec() */
633 cpu_single_env
= NULL
;