Release SMP restriction on Windows
[qemu/agraf.git] / cpu-exec.c
blobaa8fa893d9cafffbc13fcc1e5dce713d15e23648
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 //#define CONFIG_DEBUG_EXEC
28 bool qemu_cpu_has_work(CPUState *cpu)
30 return cpu_has_work(cpu);
33 void cpu_loop_exit(CPUArchState *env)
35 CPUState *cpu = ENV_GET_CPU(env);
37 cpu->current_tb = NULL;
38 siglongjmp(env->jmp_env, 1);
41 /* exit the current TB from a signal handler. The host registers are
42 restored in a state compatible with the CPU emulator
44 #if defined(CONFIG_SOFTMMU)
45 void cpu_resume_from_signal(CPUArchState *env, void *puc)
47 /* XXX: restore cpu registers saved in host registers */
49 env->exception_index = -1;
50 siglongjmp(env->jmp_env, 1);
52 #endif
54 /* Execute a TB, and fix up the CPU state afterwards if necessary */
55 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
57 CPUArchState *env = cpu->env_ptr;
58 tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
59 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
60 /* We didn't start executing this TB (eg because the instruction
61 * counter hit zero); we must restore the guest PC to the address
62 * of the start of the TB.
64 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
65 cpu_pc_from_tb(env, tb);
67 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
68 /* We were asked to stop executing TBs (probably a pending
69 * interrupt. We've now stopped, so clear the flag.
71 cpu->tcg_exit_req = 0;
73 return next_tb;
76 /* Execute the code without caching the generated code. An interpreter
77 could be used if available. */
78 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
79 TranslationBlock *orig_tb)
81 CPUState *cpu = ENV_GET_CPU(env);
82 TranslationBlock *tb;
84 /* Should never happen.
85 We only end up here when an existing TB is too long. */
86 if (max_cycles > CF_COUNT_MASK)
87 max_cycles = CF_COUNT_MASK;
89 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
90 max_cycles);
91 cpu->current_tb = tb;
92 /* execute the generated code */
93 cpu_tb_exec(cpu, tb->tc_ptr);
94 cpu->current_tb = NULL;
95 tb_phys_invalidate(tb, -1);
96 tb_free(tb);
99 static TranslationBlock *tb_find_slow(CPUArchState *env,
100 target_ulong pc,
101 target_ulong cs_base,
102 uint64_t flags)
104 TranslationBlock *tb, **ptb1;
105 unsigned int h;
106 tb_page_addr_t phys_pc, phys_page1;
107 target_ulong virt_page2;
109 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
111 /* find translated block using physical mappings */
112 phys_pc = get_page_addr_code(env, pc);
113 phys_page1 = phys_pc & TARGET_PAGE_MASK;
114 h = tb_phys_hash_func(phys_pc);
115 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
116 for(;;) {
117 tb = *ptb1;
118 if (!tb)
119 goto not_found;
120 if (tb->pc == pc &&
121 tb->page_addr[0] == phys_page1 &&
122 tb->cs_base == cs_base &&
123 tb->flags == flags) {
124 /* check next page if needed */
125 if (tb->page_addr[1] != -1) {
126 tb_page_addr_t phys_page2;
128 virt_page2 = (pc & TARGET_PAGE_MASK) +
129 TARGET_PAGE_SIZE;
130 phys_page2 = get_page_addr_code(env, virt_page2);
131 if (tb->page_addr[1] == phys_page2)
132 goto found;
133 } else {
134 goto found;
137 ptb1 = &tb->phys_hash_next;
139 not_found:
140 /* if no translated code available, then translate it now */
141 tb = tb_gen_code(env, pc, cs_base, flags, 0);
143 found:
144 /* Move the last found TB to the head of the list */
145 if (likely(*ptb1)) {
146 *ptb1 = tb->phys_hash_next;
147 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
148 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
150 /* we add the TB in the virtual pc hash table */
151 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
152 return tb;
155 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
157 TranslationBlock *tb;
158 target_ulong cs_base, pc;
159 int flags;
161 /* we record a subset of the CPU state. It will
162 always be the same before a given translated block
163 is executed. */
164 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
165 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
166 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
167 tb->flags != flags)) {
168 tb = tb_find_slow(env, pc, cs_base, flags);
170 return tb;
173 static CPUDebugExcpHandler *debug_excp_handler;
175 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
177 debug_excp_handler = handler;
180 static void cpu_handle_debug_exception(CPUArchState *env)
182 CPUWatchpoint *wp;
184 if (!env->watchpoint_hit) {
185 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
186 wp->flags &= ~BP_WATCHPOINT_HIT;
189 if (debug_excp_handler) {
190 debug_excp_handler(env);
194 /* main execution loop */
196 volatile sig_atomic_t exit_request;
198 int cpu_exec(CPUArchState *env)
200 CPUState *cpu = ENV_GET_CPU(env);
201 #if !(defined(CONFIG_USER_ONLY) && \
202 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
203 CPUClass *cc = CPU_GET_CLASS(cpu);
204 #endif
205 int ret, interrupt_request;
206 TranslationBlock *tb;
207 uint8_t *tc_ptr;
208 tcg_target_ulong next_tb;
210 if (cpu->halted) {
211 if (!cpu_has_work(cpu)) {
212 return EXCP_HALTED;
215 cpu->halted = 0;
218 cpu_single_env = env;
220 /* As long as cpu_single_env is null, up to the assignment just above,
221 * requests by other threads to exit the execution loop are expected to
222 * be issued using the exit_request global. We must make sure that our
223 * evaluation of the global value is performed past the cpu_single_env
224 * value transition point, which requires a memory barrier as well as
225 * an instruction scheduling constraint on modern architectures. */
226 smp_mb();
228 if (unlikely(exit_request)) {
229 cpu->exit_request = 1;
232 #if defined(TARGET_I386)
233 /* put eflags in CPU temporary format */
234 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
235 DF = 1 - (2 * ((env->eflags >> 10) & 1));
236 CC_OP = CC_OP_EFLAGS;
237 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238 #elif defined(TARGET_SPARC)
239 #elif defined(TARGET_M68K)
240 env->cc_op = CC_OP_FLAGS;
241 env->cc_dest = env->sr & 0xf;
242 env->cc_x = (env->sr >> 4) & 1;
243 #elif defined(TARGET_ALPHA)
244 #elif defined(TARGET_ARM)
245 #elif defined(TARGET_UNICORE32)
246 #elif defined(TARGET_PPC)
247 env->reserve_addr = -1;
248 #elif defined(TARGET_LM32)
249 #elif defined(TARGET_MICROBLAZE)
250 #elif defined(TARGET_MIPS)
251 #elif defined(TARGET_MOXIE)
252 #elif defined(TARGET_OPENRISC)
253 #elif defined(TARGET_SH4)
254 #elif defined(TARGET_CRIS)
255 #elif defined(TARGET_S390X)
256 #elif defined(TARGET_XTENSA)
257 /* XXXXX */
258 #else
259 #error unsupported target CPU
260 #endif
261 env->exception_index = -1;
263 /* prepare setjmp context for exception handling */
264 for(;;) {
265 if (sigsetjmp(env->jmp_env, 0) == 0) {
266 /* if an exception is pending, we execute it here */
267 if (env->exception_index >= 0) {
268 if (env->exception_index >= EXCP_INTERRUPT) {
269 /* exit request from the cpu execution loop */
270 ret = env->exception_index;
271 if (ret == EXCP_DEBUG) {
272 cpu_handle_debug_exception(env);
274 break;
275 } else {
276 #if defined(CONFIG_USER_ONLY)
277 /* if user mode only, we simulate a fake exception
278 which will be handled outside the cpu execution
279 loop */
280 #if defined(TARGET_I386)
281 cc->do_interrupt(cpu);
282 #endif
283 ret = env->exception_index;
284 break;
285 #else
286 cc->do_interrupt(cpu);
287 env->exception_index = -1;
288 #endif
292 next_tb = 0; /* force lookup of first TB */
293 for(;;) {
294 interrupt_request = cpu->interrupt_request;
295 if (unlikely(interrupt_request)) {
296 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
297 /* Mask out external interrupts for this step. */
298 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
300 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
301 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
302 env->exception_index = EXCP_DEBUG;
303 cpu_loop_exit(env);
305 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
306 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
307 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
308 if (interrupt_request & CPU_INTERRUPT_HALT) {
309 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
310 cpu->halted = 1;
311 env->exception_index = EXCP_HLT;
312 cpu_loop_exit(env);
314 #endif
315 #if defined(TARGET_I386)
316 #if !defined(CONFIG_USER_ONLY)
317 if (interrupt_request & CPU_INTERRUPT_POLL) {
318 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
319 apic_poll_irq(env->apic_state);
321 #endif
322 if (interrupt_request & CPU_INTERRUPT_INIT) {
323 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
325 do_cpu_init(x86_env_get_cpu(env));
326 env->exception_index = EXCP_HALTED;
327 cpu_loop_exit(env);
328 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
329 do_cpu_sipi(x86_env_get_cpu(env));
330 } else if (env->hflags2 & HF2_GIF_MASK) {
331 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
332 !(env->hflags & HF_SMM_MASK)) {
333 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
335 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
336 do_smm_enter(env);
337 next_tb = 0;
338 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
339 !(env->hflags2 & HF2_NMI_MASK)) {
340 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
341 env->hflags2 |= HF2_NMI_MASK;
342 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
343 next_tb = 0;
344 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
345 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
346 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
347 next_tb = 0;
348 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
349 (((env->hflags2 & HF2_VINTR_MASK) &&
350 (env->hflags2 & HF2_HIF_MASK)) ||
351 (!(env->hflags2 & HF2_VINTR_MASK) &&
352 (env->eflags & IF_MASK &&
353 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
354 int intno;
355 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
357 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
358 CPU_INTERRUPT_VIRQ);
359 intno = cpu_get_pic_interrupt(env);
360 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
361 do_interrupt_x86_hardirq(env, intno, 1);
362 /* ensure that no TB jump will be modified as
363 the program flow was changed */
364 next_tb = 0;
365 #if !defined(CONFIG_USER_ONLY)
366 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
367 (env->eflags & IF_MASK) &&
368 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
369 int intno;
370 /* FIXME: this should respect TPR */
371 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
373 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
374 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
375 do_interrupt_x86_hardirq(env, intno, 1);
376 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
377 next_tb = 0;
378 #endif
381 #elif defined(TARGET_PPC)
382 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
383 cpu_reset(cpu);
385 if (interrupt_request & CPU_INTERRUPT_HARD) {
386 ppc_hw_interrupt(env);
387 if (env->pending_interrupts == 0) {
388 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
390 next_tb = 0;
392 #elif defined(TARGET_LM32)
393 if ((interrupt_request & CPU_INTERRUPT_HARD)
394 && (env->ie & IE_IE)) {
395 env->exception_index = EXCP_IRQ;
396 cc->do_interrupt(cpu);
397 next_tb = 0;
399 #elif defined(TARGET_MICROBLAZE)
400 if ((interrupt_request & CPU_INTERRUPT_HARD)
401 && (env->sregs[SR_MSR] & MSR_IE)
402 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
403 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
404 env->exception_index = EXCP_IRQ;
405 cc->do_interrupt(cpu);
406 next_tb = 0;
408 #elif defined(TARGET_MIPS)
409 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
410 cpu_mips_hw_interrupts_pending(env)) {
411 /* Raise it */
412 env->exception_index = EXCP_EXT_INTERRUPT;
413 env->error_code = 0;
414 cc->do_interrupt(cpu);
415 next_tb = 0;
417 #elif defined(TARGET_OPENRISC)
419 int idx = -1;
420 if ((interrupt_request & CPU_INTERRUPT_HARD)
421 && (env->sr & SR_IEE)) {
422 idx = EXCP_INT;
424 if ((interrupt_request & CPU_INTERRUPT_TIMER)
425 && (env->sr & SR_TEE)) {
426 idx = EXCP_TICK;
428 if (idx >= 0) {
429 env->exception_index = idx;
430 cc->do_interrupt(cpu);
431 next_tb = 0;
434 #elif defined(TARGET_SPARC)
435 if (interrupt_request & CPU_INTERRUPT_HARD) {
436 if (cpu_interrupts_enabled(env) &&
437 env->interrupt_index > 0) {
438 int pil = env->interrupt_index & 0xf;
439 int type = env->interrupt_index & 0xf0;
441 if (((type == TT_EXTINT) &&
442 cpu_pil_allowed(env, pil)) ||
443 type != TT_EXTINT) {
444 env->exception_index = env->interrupt_index;
445 cc->do_interrupt(cpu);
446 next_tb = 0;
450 #elif defined(TARGET_ARM)
451 if (interrupt_request & CPU_INTERRUPT_FIQ
452 && !(env->uncached_cpsr & CPSR_F)) {
453 env->exception_index = EXCP_FIQ;
454 cc->do_interrupt(cpu);
455 next_tb = 0;
457 /* ARMv7-M interrupt return works by loading a magic value
458 into the PC. On real hardware the load causes the
459 return to occur. The qemu implementation performs the
460 jump normally, then does the exception return when the
461 CPU tries to execute code at the magic address.
462 This will cause the magic PC value to be pushed to
463 the stack if an interrupt occurred at the wrong time.
464 We avoid this by disabling interrupts when
465 pc contains a magic address. */
466 if (interrupt_request & CPU_INTERRUPT_HARD
467 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
468 || !(env->uncached_cpsr & CPSR_I))) {
469 env->exception_index = EXCP_IRQ;
470 cc->do_interrupt(cpu);
471 next_tb = 0;
473 #elif defined(TARGET_UNICORE32)
474 if (interrupt_request & CPU_INTERRUPT_HARD
475 && !(env->uncached_asr & ASR_I)) {
476 env->exception_index = UC32_EXCP_INTR;
477 cc->do_interrupt(cpu);
478 next_tb = 0;
480 #elif defined(TARGET_SH4)
481 if (interrupt_request & CPU_INTERRUPT_HARD) {
482 cc->do_interrupt(cpu);
483 next_tb = 0;
485 #elif defined(TARGET_ALPHA)
487 int idx = -1;
488 /* ??? This hard-codes the OSF/1 interrupt levels. */
489 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
490 case 0 ... 3:
491 if (interrupt_request & CPU_INTERRUPT_HARD) {
492 idx = EXCP_DEV_INTERRUPT;
494 /* FALLTHRU */
495 case 4:
496 if (interrupt_request & CPU_INTERRUPT_TIMER) {
497 idx = EXCP_CLK_INTERRUPT;
499 /* FALLTHRU */
500 case 5:
501 if (interrupt_request & CPU_INTERRUPT_SMP) {
502 idx = EXCP_SMP_INTERRUPT;
504 /* FALLTHRU */
505 case 6:
506 if (interrupt_request & CPU_INTERRUPT_MCHK) {
507 idx = EXCP_MCHK;
510 if (idx >= 0) {
511 env->exception_index = idx;
512 env->error_code = 0;
513 cc->do_interrupt(cpu);
514 next_tb = 0;
517 #elif defined(TARGET_CRIS)
518 if (interrupt_request & CPU_INTERRUPT_HARD
519 && (env->pregs[PR_CCS] & I_FLAG)
520 && !env->locked_irq) {
521 env->exception_index = EXCP_IRQ;
522 cc->do_interrupt(cpu);
523 next_tb = 0;
525 if (interrupt_request & CPU_INTERRUPT_NMI) {
526 unsigned int m_flag_archval;
527 if (env->pregs[PR_VR] < 32) {
528 m_flag_archval = M_FLAG_V10;
529 } else {
530 m_flag_archval = M_FLAG_V32;
532 if ((env->pregs[PR_CCS] & m_flag_archval)) {
533 env->exception_index = EXCP_NMI;
534 cc->do_interrupt(cpu);
535 next_tb = 0;
538 #elif defined(TARGET_M68K)
539 if (interrupt_request & CPU_INTERRUPT_HARD
540 && ((env->sr & SR_I) >> SR_I_SHIFT)
541 < env->pending_level) {
542 /* Real hardware gets the interrupt vector via an
543 IACK cycle at this point. Current emulated
544 hardware doesn't rely on this, so we
545 provide/save the vector when the interrupt is
546 first signalled. */
547 env->exception_index = env->pending_vector;
548 do_interrupt_m68k_hardirq(env);
549 next_tb = 0;
551 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
552 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
553 (env->psw.mask & PSW_MASK_EXT)) {
554 cc->do_interrupt(cpu);
555 next_tb = 0;
557 #elif defined(TARGET_XTENSA)
558 if (interrupt_request & CPU_INTERRUPT_HARD) {
559 env->exception_index = EXC_IRQ;
560 cc->do_interrupt(cpu);
561 next_tb = 0;
563 #endif
564 /* Don't use the cached interrupt_request value,
565 do_interrupt may have updated the EXITTB flag. */
566 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
567 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
568 /* ensure that no TB jump will be modified as
569 the program flow was changed */
570 next_tb = 0;
573 if (unlikely(cpu->exit_request)) {
574 cpu->exit_request = 0;
575 env->exception_index = EXCP_INTERRUPT;
576 cpu_loop_exit(env);
578 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
579 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
580 /* restore flags in standard format */
581 #if defined(TARGET_I386)
582 log_cpu_state(env, CPU_DUMP_CCOP);
583 #elif defined(TARGET_M68K)
584 cpu_m68k_flush_flags(env, env->cc_op);
585 env->cc_op = CC_OP_FLAGS;
586 env->sr = (env->sr & 0xffe0)
587 | env->cc_dest | (env->cc_x << 4);
588 log_cpu_state(env, 0);
589 #else
590 log_cpu_state(env, 0);
591 #endif
593 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
594 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
595 tb = tb_find_fast(env);
596 /* Note: we do it here to avoid a gcc bug on Mac OS X when
597 doing it in tb_find_slow */
598 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
599 /* as some TB could have been invalidated because
600 of memory exceptions while generating the code, we
601 must recompute the hash index here */
602 next_tb = 0;
603 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
605 #ifdef CONFIG_DEBUG_EXEC
606 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
607 tb->tc_ptr, tb->pc,
608 lookup_symbol(tb->pc));
609 #endif
610 /* see if we can patch the calling TB. When the TB
611 spans two pages, we cannot safely do a direct
612 jump. */
613 if (next_tb != 0 && tb->page_addr[1] == -1) {
614 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
615 next_tb & TB_EXIT_MASK, tb);
617 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
619 /* cpu_interrupt might be called while translating the
620 TB, but before it is linked into a potentially
621 infinite loop and becomes env->current_tb. Avoid
622 starting execution if there is a pending interrupt. */
623 cpu->current_tb = tb;
624 barrier();
625 if (likely(!cpu->exit_request)) {
626 tc_ptr = tb->tc_ptr;
627 /* execute the generated code */
628 next_tb = cpu_tb_exec(cpu, tc_ptr);
629 switch (next_tb & TB_EXIT_MASK) {
630 case TB_EXIT_REQUESTED:
631 /* Something asked us to stop executing
632 * chained TBs; just continue round the main
633 * loop. Whatever requested the exit will also
634 * have set something else (eg exit_request or
635 * interrupt_request) which we will handle
636 * next time around the loop.
638 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
639 next_tb = 0;
640 break;
641 case TB_EXIT_ICOUNT_EXPIRED:
643 /* Instruction counter expired. */
644 int insns_left;
645 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
646 insns_left = env->icount_decr.u32;
647 if (env->icount_extra && insns_left >= 0) {
648 /* Refill decrementer and continue execution. */
649 env->icount_extra += insns_left;
650 if (env->icount_extra > 0xffff) {
651 insns_left = 0xffff;
652 } else {
653 insns_left = env->icount_extra;
655 env->icount_extra -= insns_left;
656 env->icount_decr.u16.low = insns_left;
657 } else {
658 if (insns_left > 0) {
659 /* Execute remaining instructions. */
660 cpu_exec_nocache(env, insns_left, tb);
662 env->exception_index = EXCP_INTERRUPT;
663 next_tb = 0;
664 cpu_loop_exit(env);
666 break;
668 default:
669 break;
672 cpu->current_tb = NULL;
673 /* reset soft MMU for next block (it can currently
674 only be set by a memory fault) */
675 } /* for(;;) */
676 } else {
677 /* Reload env after longjmp - the compiler may have smashed all
678 * local variables as longjmp is marked 'noreturn'. */
679 env = cpu_single_env;
681 } /* for(;;) */
684 #if defined(TARGET_I386)
685 /* restore flags in standard format */
686 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
687 | (DF & DF_MASK);
688 #elif defined(TARGET_ARM)
689 /* XXX: Save/restore host fpu exception state?. */
690 #elif defined(TARGET_UNICORE32)
691 #elif defined(TARGET_SPARC)
692 #elif defined(TARGET_PPC)
693 #elif defined(TARGET_LM32)
694 #elif defined(TARGET_M68K)
695 cpu_m68k_flush_flags(env, env->cc_op);
696 env->cc_op = CC_OP_FLAGS;
697 env->sr = (env->sr & 0xffe0)
698 | env->cc_dest | (env->cc_x << 4);
699 #elif defined(TARGET_MICROBLAZE)
700 #elif defined(TARGET_MIPS)
701 #elif defined(TARGET_MOXIE)
702 #elif defined(TARGET_OPENRISC)
703 #elif defined(TARGET_SH4)
704 #elif defined(TARGET_ALPHA)
705 #elif defined(TARGET_CRIS)
706 #elif defined(TARGET_S390X)
707 #elif defined(TARGET_XTENSA)
708 /* XXXXX */
709 #else
710 #error unsupported target CPU
711 #endif
713 /* fail safe : never use cpu_single_env outside cpu_exec() */
714 cpu_single_env = NULL;
715 return ret;