Provide a kvm_qemu_memory_alias() function
[qemu-kvm/fedora.git] / cpu-exec.c
blob6bfd73136dcb870af53362b812a446a67ac860f9
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #if !defined(TARGET_IA64)
25 #include "tcg.h"
26 #endif
28 #if !defined(CONFIG_SOFTMMU)
29 #undef EAX
30 #undef ECX
31 #undef EDX
32 #undef EBX
33 #undef ESP
34 #undef EBP
35 #undef ESI
36 #undef EDI
37 #undef EIP
38 #include <signal.h>
39 #include <sys/ucontext.h>
40 #endif
42 #include "qemu-kvm.h"
44 #if defined(__sparc__) && !defined(HOST_SOLARIS)
45 // Work around ugly bugs in glibc that mangle global register contents
46 #undef env
47 #define env cpu_single_env
48 #endif
50 int tb_invalidated_flag;
52 //#define DEBUG_EXEC
53 //#define DEBUG_SIGNAL
55 void cpu_loop_exit(void)
57 /* NOTE: the register at this point must be saved by hand because
58 longjmp restore them */
59 regs_to_env();
60 longjmp(env->jmp_env, 1);
63 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
64 #define reg_T2
65 #endif
67 /* exit the current TB from a signal handler. The host registers are
68 restored in a state compatible with the CPU emulator
70 void cpu_resume_from_signal(CPUState *env1, void *puc)
72 #if !defined(CONFIG_SOFTMMU)
73 struct ucontext *uc = puc;
74 #endif
76 env = env1;
78 /* XXX: restore cpu registers saved in host registers */
80 #if !defined(CONFIG_SOFTMMU)
81 if (puc) {
82 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
85 #endif
86 longjmp(env->jmp_env, 1);
89 /* Execute the code without caching the generated code. An interpreter
90 could be used if available. */
91 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
93 unsigned long next_tb;
94 TranslationBlock *tb;
96 /* Should never happen.
97 We only end up here when an existing TB is too long. */
98 if (max_cycles > CF_COUNT_MASK)
99 max_cycles = CF_COUNT_MASK;
101 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
102 max_cycles);
103 env->current_tb = tb;
104 /* execute the generated code */
105 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
107 if ((next_tb & 3) == 2) {
108 /* Restore PC. This may happen if async event occurs before
109 the TB starts executing. */
110 CPU_PC_FROM_TB(env, tb);
112 tb_phys_invalidate(tb, -1);
113 tb_free(tb);
116 static TranslationBlock *tb_find_slow(target_ulong pc,
117 target_ulong cs_base,
118 uint64_t flags)
120 TranslationBlock *tb, **ptb1;
121 unsigned int h;
122 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
124 tb_invalidated_flag = 0;
126 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
128 /* find translated block using physical mappings */
129 phys_pc = get_phys_addr_code(env, pc);
130 phys_page1 = phys_pc & TARGET_PAGE_MASK;
131 phys_page2 = -1;
132 h = tb_phys_hash_func(phys_pc);
133 ptb1 = &tb_phys_hash[h];
134 for(;;) {
135 tb = *ptb1;
136 if (!tb)
137 goto not_found;
138 if (tb->pc == pc &&
139 tb->page_addr[0] == phys_page1 &&
140 tb->cs_base == cs_base &&
141 tb->flags == flags) {
142 /* check next page if needed */
143 if (tb->page_addr[1] != -1) {
144 virt_page2 = (pc & TARGET_PAGE_MASK) +
145 TARGET_PAGE_SIZE;
146 phys_page2 = get_phys_addr_code(env, virt_page2);
147 if (tb->page_addr[1] == phys_page2)
148 goto found;
149 } else {
150 goto found;
153 ptb1 = &tb->phys_hash_next;
155 not_found:
156 /* if no translated code available, then translate it now */
157 tb = tb_gen_code(env, pc, cs_base, flags, 0);
159 found:
160 /* we add the TB in the virtual pc hash table */
161 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
162 return tb;
165 static inline TranslationBlock *tb_find_fast(void)
167 TranslationBlock *tb;
168 target_ulong cs_base, pc;
169 uint64_t flags;
171 /* we record a subset of the CPU state. It will
172 always be the same before a given translated block
173 is executed. */
174 #if defined(TARGET_I386)
175 flags = env->hflags;
176 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
177 cs_base = env->segs[R_CS].base;
178 pc = cs_base + env->eip;
179 #elif defined(TARGET_ARM)
180 flags = env->thumb | (env->vfp.vec_len << 1)
181 | (env->vfp.vec_stride << 4);
182 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
183 flags |= (1 << 6);
184 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
185 flags |= (1 << 7);
186 flags |= (env->condexec_bits << 8);
187 cs_base = 0;
188 pc = env->regs[15];
189 #elif defined(TARGET_SPARC)
190 #ifdef TARGET_SPARC64
191 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
192 flags = ((env->pstate & PS_AM) << 2)
193 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
194 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
195 #else
196 // FPU enable . Supervisor
197 flags = (env->psref << 4) | env->psrs;
198 #endif
199 cs_base = env->npc;
200 pc = env->pc;
201 #elif defined(TARGET_PPC)
202 flags = env->hflags;
203 cs_base = 0;
204 pc = env->nip;
205 #elif defined(TARGET_MIPS)
206 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
207 cs_base = 0;
208 pc = env->active_tc.PC;
209 #elif defined(TARGET_M68K)
210 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
211 | (env->sr & SR_S) /* Bit 13 */
212 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
213 cs_base = 0;
214 pc = env->pc;
215 #elif defined(TARGET_SH4)
216 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
217 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
218 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
219 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
220 cs_base = 0;
221 pc = env->pc;
222 #elif defined(TARGET_ALPHA)
223 flags = env->ps;
224 cs_base = 0;
225 pc = env->pc;
226 #elif defined(TARGET_CRIS)
227 flags = env->pregs[PR_CCS] & (P_FLAG | U_FLAG | X_FLAG);
228 flags |= env->dslot;
229 cs_base = 0;
230 pc = env->pc;
231 #elif defined(TARGET_IA64)
232 flags = 0;
233 cs_base = 0; /* XXXXX */
234 pc = 0;
235 #else
236 #error unsupported CPU
237 #endif
238 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
239 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
240 tb->flags != flags)) {
241 tb = tb_find_slow(pc, cs_base, flags);
243 return tb;
246 /* main execution loop */
248 int cpu_exec(CPUState *env1)
250 #define DECLARE_HOST_REGS 1
251 #include "hostregs_helper.h"
252 int ret, interrupt_request;
253 TranslationBlock *tb;
254 uint8_t *tc_ptr;
255 unsigned long next_tb;
257 if (cpu_halted(env1) == EXCP_HALTED)
258 return EXCP_HALTED;
260 cpu_single_env = env1;
262 /* first we save global registers */
263 #define SAVE_HOST_REGS 1
264 #include "hostregs_helper.h"
265 env = env1;
267 env_to_regs();
268 #if defined(TARGET_I386)
269 /* put eflags in CPU temporary format */
270 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
271 DF = 1 - (2 * ((env->eflags >> 10) & 1));
272 CC_OP = CC_OP_EFLAGS;
273 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
274 #elif defined(TARGET_SPARC)
275 #elif defined(TARGET_M68K)
276 env->cc_op = CC_OP_FLAGS;
277 env->cc_dest = env->sr & 0xf;
278 env->cc_x = (env->sr >> 4) & 1;
279 #elif defined(TARGET_ALPHA)
280 #elif defined(TARGET_ARM)
281 #elif defined(TARGET_PPC)
282 #elif defined(TARGET_MIPS)
283 #elif defined(TARGET_SH4)
284 #elif defined(TARGET_CRIS)
285 #elif defined(TARGET_IA64)
286 /* XXXXX */
287 #else
288 #error unsupported target CPU
289 #endif
290 env->exception_index = -1;
292 /* prepare setjmp context for exception handling */
293 for(;;) {
294 if (setjmp(env->jmp_env) == 0) {
295 env->current_tb = NULL;
296 /* if an exception is pending, we execute it here */
297 if (env->exception_index >= 0) {
298 if (env->exception_index >= EXCP_INTERRUPT) {
299 /* exit request from the cpu execution loop */
300 ret = env->exception_index;
301 break;
302 } else if (env->user_mode_only) {
303 /* if user mode only, we simulate a fake exception
304 which will be handled outside the cpu execution
305 loop */
306 #if defined(TARGET_I386)
307 do_interrupt_user(env->exception_index,
308 env->exception_is_int,
309 env->error_code,
310 env->exception_next_eip);
311 /* successfully delivered */
312 env->old_exception = -1;
313 #endif
314 ret = env->exception_index;
315 break;
316 } else {
317 #if defined(TARGET_I386)
318 /* simulate a real cpu exception. On i386, it can
319 trigger new exceptions, but we do not handle
320 double or triple faults yet. */
321 do_interrupt(env->exception_index,
322 env->exception_is_int,
323 env->error_code,
324 env->exception_next_eip, 0);
325 /* successfully delivered */
326 env->old_exception = -1;
327 #elif defined(TARGET_PPC)
328 do_interrupt(env);
329 #elif defined(TARGET_MIPS)
330 do_interrupt(env);
331 #elif defined(TARGET_SPARC)
332 do_interrupt(env);
333 #elif defined(TARGET_ARM)
334 do_interrupt(env);
335 #elif defined(TARGET_SH4)
336 do_interrupt(env);
337 #elif defined(TARGET_ALPHA)
338 do_interrupt(env);
339 #elif defined(TARGET_CRIS)
340 do_interrupt(env);
341 #elif defined(TARGET_M68K)
342 do_interrupt(0);
343 #elif defined(TARGET_IA64)
344 do_interrupt(env);
345 #endif
347 env->exception_index = -1;
349 #ifdef USE_KQEMU
350 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
351 int ret;
352 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
353 ret = kqemu_cpu_exec(env);
354 /* put eflags in CPU temporary format */
355 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
356 DF = 1 - (2 * ((env->eflags >> 10) & 1));
357 CC_OP = CC_OP_EFLAGS;
358 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
359 if (ret == 1) {
360 /* exception */
361 longjmp(env->jmp_env, 1);
362 } else if (ret == 2) {
363 /* softmmu execution needed */
364 } else {
365 if (env->interrupt_request != 0) {
366 /* hardware interrupt will be executed just after */
367 } else {
368 /* otherwise, we restart */
369 longjmp(env->jmp_env, 1);
373 #endif
375 if (kvm_enabled()) {
376 kvm_cpu_exec(env);
377 longjmp(env->jmp_env, 1);
379 next_tb = 0; /* force lookup of first TB */
380 for(;;) {
381 interrupt_request = env->interrupt_request;
382 if (unlikely(interrupt_request) &&
383 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
384 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
385 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
386 env->exception_index = EXCP_DEBUG;
387 cpu_loop_exit();
389 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
390 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
391 if (interrupt_request & CPU_INTERRUPT_HALT) {
392 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
393 env->halted = 1;
394 env->exception_index = EXCP_HLT;
395 cpu_loop_exit();
397 #endif
398 #if defined(TARGET_I386)
399 if (env->hflags2 & HF2_GIF_MASK) {
400 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
401 !(env->hflags & HF_SMM_MASK)) {
402 svm_check_intercept(SVM_EXIT_SMI);
403 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
404 do_smm_enter();
405 next_tb = 0;
406 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
407 !(env->hflags2 & HF2_NMI_MASK)) {
408 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
409 env->hflags2 |= HF2_NMI_MASK;
410 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
411 next_tb = 0;
412 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
413 (((env->hflags2 & HF2_VINTR_MASK) &&
414 (env->hflags2 & HF2_HIF_MASK)) ||
415 (!(env->hflags2 & HF2_VINTR_MASK) &&
416 (env->eflags & IF_MASK &&
417 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
418 int intno;
419 svm_check_intercept(SVM_EXIT_INTR);
420 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
421 intno = cpu_get_pic_interrupt(env);
422 if (loglevel & CPU_LOG_TB_IN_ASM) {
423 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
425 do_interrupt(intno, 0, 0, 0, 1);
426 /* ensure that no TB jump will be modified as
427 the program flow was changed */
428 next_tb = 0;
429 #if !defined(CONFIG_USER_ONLY)
430 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
431 (env->eflags & IF_MASK) &&
432 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
433 int intno;
434 /* FIXME: this should respect TPR */
435 svm_check_intercept(SVM_EXIT_VINTR);
436 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
437 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
438 if (loglevel & CPU_LOG_TB_IN_ASM)
439 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
440 do_interrupt(intno, 0, 0, 0, 1);
441 next_tb = 0;
442 #endif
445 #elif defined(TARGET_PPC)
446 #if 0
447 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
448 cpu_ppc_reset(env);
450 #endif
451 if (interrupt_request & CPU_INTERRUPT_HARD) {
452 ppc_hw_interrupt(env);
453 if (env->pending_interrupts == 0)
454 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
455 next_tb = 0;
457 #elif defined(TARGET_MIPS)
458 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
459 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
460 (env->CP0_Status & (1 << CP0St_IE)) &&
461 !(env->CP0_Status & (1 << CP0St_EXL)) &&
462 !(env->CP0_Status & (1 << CP0St_ERL)) &&
463 !(env->hflags & MIPS_HFLAG_DM)) {
464 /* Raise it */
465 env->exception_index = EXCP_EXT_INTERRUPT;
466 env->error_code = 0;
467 do_interrupt(env);
468 next_tb = 0;
470 #elif defined(TARGET_SPARC)
471 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
472 (env->psret != 0)) {
473 int pil = env->interrupt_index & 15;
474 int type = env->interrupt_index & 0xf0;
476 if (((type == TT_EXTINT) &&
477 (pil == 15 || pil > env->psrpil)) ||
478 type != TT_EXTINT) {
479 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
480 env->exception_index = env->interrupt_index;
481 do_interrupt(env);
482 env->interrupt_index = 0;
483 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
484 cpu_check_irqs(env);
485 #endif
486 next_tb = 0;
488 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
489 //do_interrupt(0, 0, 0, 0, 0);
490 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
492 #elif defined(TARGET_ARM)
493 if (interrupt_request & CPU_INTERRUPT_FIQ
494 && !(env->uncached_cpsr & CPSR_F)) {
495 env->exception_index = EXCP_FIQ;
496 do_interrupt(env);
497 next_tb = 0;
499 /* ARMv7-M interrupt return works by loading a magic value
500 into the PC. On real hardware the load causes the
501 return to occur. The qemu implementation performs the
502 jump normally, then does the exception return when the
503 CPU tries to execute code at the magic address.
504 This will cause the magic PC value to be pushed to
505 the stack if an interrupt occured at the wrong time.
506 We avoid this by disabling interrupts when
507 pc contains a magic address. */
508 if (interrupt_request & CPU_INTERRUPT_HARD
509 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
510 || !(env->uncached_cpsr & CPSR_I))) {
511 env->exception_index = EXCP_IRQ;
512 do_interrupt(env);
513 next_tb = 0;
515 #elif defined(TARGET_SH4)
516 if (interrupt_request & CPU_INTERRUPT_HARD) {
517 do_interrupt(env);
518 next_tb = 0;
520 #elif defined(TARGET_ALPHA)
521 if (interrupt_request & CPU_INTERRUPT_HARD) {
522 do_interrupt(env);
523 next_tb = 0;
525 #elif defined(TARGET_CRIS)
526 if (interrupt_request & CPU_INTERRUPT_HARD
527 && (env->pregs[PR_CCS] & I_FLAG)) {
528 env->exception_index = EXCP_IRQ;
529 do_interrupt(env);
530 next_tb = 0;
532 if (interrupt_request & CPU_INTERRUPT_NMI
533 && (env->pregs[PR_CCS] & M_FLAG)) {
534 env->exception_index = EXCP_NMI;
535 do_interrupt(env);
536 next_tb = 0;
538 #elif defined(TARGET_M68K)
539 if (interrupt_request & CPU_INTERRUPT_HARD
540 && ((env->sr & SR_I) >> SR_I_SHIFT)
541 < env->pending_level) {
542 /* Real hardware gets the interrupt vector via an
543 IACK cycle at this point. Current emulated
544 hardware doesn't rely on this, so we
545 provide/save the vector when the interrupt is
546 first signalled. */
547 env->exception_index = env->pending_vector;
548 do_interrupt(1);
549 next_tb = 0;
551 #endif
552 /* Don't use the cached interupt_request value,
553 do_interrupt may have updated the EXITTB flag. */
554 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
555 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
556 /* ensure that no TB jump will be modified as
557 the program flow was changed */
558 next_tb = 0;
560 if (interrupt_request & CPU_INTERRUPT_EXIT) {
561 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
562 env->exception_index = EXCP_INTERRUPT;
563 cpu_loop_exit();
566 #ifdef DEBUG_EXEC
567 if ((loglevel & CPU_LOG_TB_CPU)) {
568 /* restore flags in standard format */
569 regs_to_env();
570 #if defined(TARGET_I386)
571 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
572 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
573 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
574 #elif defined(TARGET_ARM)
575 cpu_dump_state(env, logfile, fprintf, 0);
576 #elif defined(TARGET_SPARC)
577 cpu_dump_state(env, logfile, fprintf, 0);
578 #elif defined(TARGET_PPC)
579 cpu_dump_state(env, logfile, fprintf, 0);
580 #elif defined(TARGET_M68K)
581 cpu_m68k_flush_flags(env, env->cc_op);
582 env->cc_op = CC_OP_FLAGS;
583 env->sr = (env->sr & 0xffe0)
584 | env->cc_dest | (env->cc_x << 4);
585 cpu_dump_state(env, logfile, fprintf, 0);
586 #elif defined(TARGET_MIPS)
587 cpu_dump_state(env, logfile, fprintf, 0);
588 #elif defined(TARGET_SH4)
589 cpu_dump_state(env, logfile, fprintf, 0);
590 #elif defined(TARGET_ALPHA)
591 cpu_dump_state(env, logfile, fprintf, 0);
592 #elif defined(TARGET_CRIS)
593 cpu_dump_state(env, logfile, fprintf, 0);
594 #else
595 #error unsupported target CPU
596 #endif
598 #endif
599 spin_lock(&tb_lock);
600 tb = tb_find_fast();
601 /* Note: we do it here to avoid a gcc bug on Mac OS X when
602 doing it in tb_find_slow */
603 if (tb_invalidated_flag) {
604 /* as some TB could have been invalidated because
605 of memory exceptions while generating the code, we
606 must recompute the hash index here */
607 next_tb = 0;
608 tb_invalidated_flag = 0;
610 #ifdef DEBUG_EXEC
611 if ((loglevel & CPU_LOG_EXEC)) {
612 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
613 (long)tb->tc_ptr, tb->pc,
614 lookup_symbol(tb->pc));
616 #endif
617 /* see if we can patch the calling TB. When the TB
618 spans two pages, we cannot safely do a direct
619 jump. */
621 if (next_tb != 0 &&
622 #ifdef USE_KQEMU
623 (env->kqemu_enabled != 2) &&
624 #endif
625 tb->page_addr[1] == -1) {
626 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
629 spin_unlock(&tb_lock);
630 env->current_tb = tb;
631 while (env->current_tb) {
632 tc_ptr = tb->tc_ptr;
633 /* execute the generated code */
634 #if defined(__sparc__) && !defined(HOST_SOLARIS)
635 #undef env
636 env = cpu_single_env;
637 #define env cpu_single_env
638 #endif
639 next_tb = tcg_qemu_tb_exec(tc_ptr);
640 env->current_tb = NULL;
641 if ((next_tb & 3) == 2) {
642 /* Instruction counter expired. */
643 int insns_left;
644 tb = (TranslationBlock *)(long)(next_tb & ~3);
645 /* Restore PC. */
646 CPU_PC_FROM_TB(env, tb);
647 insns_left = env->icount_decr.u32;
648 if (env->icount_extra && insns_left >= 0) {
649 /* Refill decrementer and continue execution. */
650 env->icount_extra += insns_left;
651 if (env->icount_extra > 0xffff) {
652 insns_left = 0xffff;
653 } else {
654 insns_left = env->icount_extra;
656 env->icount_extra -= insns_left;
657 env->icount_decr.u16.low = insns_left;
658 } else {
659 if (insns_left > 0) {
660 /* Execute remaining instructions. */
661 cpu_exec_nocache(insns_left, tb);
663 env->exception_index = EXCP_INTERRUPT;
664 next_tb = 0;
665 cpu_loop_exit();
669 /* reset soft MMU for next block (it can currently
670 only be set by a memory fault) */
671 #if defined(USE_KQEMU)
672 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
673 if (kqemu_is_ok(env) &&
674 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
675 cpu_loop_exit();
677 #endif
678 } /* for(;;) */
679 } else {
680 env_to_regs();
682 } /* for(;;) */
685 #if defined(TARGET_I386)
686 /* restore flags in standard format */
687 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
688 #elif defined(TARGET_ARM)
689 /* XXX: Save/restore host fpu exception state?. */
690 #elif defined(TARGET_SPARC)
691 #elif defined(TARGET_PPC)
692 #elif defined(TARGET_M68K)
693 cpu_m68k_flush_flags(env, env->cc_op);
694 env->cc_op = CC_OP_FLAGS;
695 env->sr = (env->sr & 0xffe0)
696 | env->cc_dest | (env->cc_x << 4);
697 #elif defined(TARGET_MIPS)
698 #elif defined(TARGET_SH4)
699 #elif defined(TARGET_IA64)
700 #elif defined(TARGET_ALPHA)
701 #elif defined(TARGET_CRIS)
702 /* XXXXX */
703 #else
704 #error unsupported target CPU
705 #endif
707 /* restore global registers */
708 #include "hostregs_helper.h"
710 /* fail safe : never use cpu_single_env outside cpu_exec() */
711 cpu_single_env = NULL;
712 return ret;
715 /* must only be called from the generated code as an exception can be
716 generated */
717 void tb_invalidate_page_range(target_ulong start, target_ulong end)
719 /* XXX: cannot enable it yet because it yields to MMU exception
720 where NIP != read address on PowerPC */
721 #if 0
722 target_ulong phys_addr;
723 phys_addr = get_phys_addr_code(env, start);
724 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
725 #endif
728 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
730 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
732 CPUX86State *saved_env;
734 saved_env = env;
735 env = s;
736 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
737 selector &= 0xffff;
738 cpu_x86_load_seg_cache(env, seg_reg, selector,
739 (selector << 4), 0xffff, 0);
740 } else {
741 helper_load_seg(seg_reg, selector);
743 env = saved_env;
746 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
748 CPUX86State *saved_env;
750 saved_env = env;
751 env = s;
753 helper_fsave(ptr, data32);
755 env = saved_env;
758 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
760 CPUX86State *saved_env;
762 saved_env = env;
763 env = s;
765 helper_frstor(ptr, data32);
767 env = saved_env;
770 #endif /* TARGET_I386 */
772 #if !defined(CONFIG_SOFTMMU)
774 #if defined(TARGET_I386)
776 /* 'pc' is the host PC at which the exception was raised. 'address' is
777 the effective address of the memory exception. 'is_write' is 1 if a
778 write caused the exception and otherwise 0'. 'old_set' is the
779 signal set which should be restored */
780 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
781 int is_write, sigset_t *old_set,
782 void *puc)
784 TranslationBlock *tb;
785 int ret;
787 if (cpu_single_env)
788 env = cpu_single_env; /* XXX: find a correct solution for multithread */
789 #if defined(DEBUG_SIGNAL)
790 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
791 pc, address, is_write, *(unsigned long *)old_set);
792 #endif
793 /* XXX: locking issue */
794 if (is_write && page_unprotect(h2g(address), pc, puc)) {
795 return 1;
798 /* see if it is an MMU fault */
799 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
800 if (ret < 0)
801 return 0; /* not an MMU fault */
802 if (ret == 0)
803 return 1; /* the MMU fault was handled without causing real CPU fault */
804 /* now we have a real cpu fault */
805 tb = tb_find_pc(pc);
806 if (tb) {
807 /* the PC is inside the translated code. It means that we have
808 a virtual CPU fault */
809 cpu_restore_state(tb, env, pc, puc);
811 if (ret == 1) {
812 #if 0
813 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
814 env->eip, env->cr[2], env->error_code);
815 #endif
816 /* we restore the process signal mask as the sigreturn should
817 do it (XXX: use sigsetjmp) */
818 sigprocmask(SIG_SETMASK, old_set, NULL);
819 raise_exception_err(env->exception_index, env->error_code);
820 } else {
821 /* activate soft MMU for this block */
822 env->hflags |= HF_SOFTMMU_MASK;
823 cpu_resume_from_signal(env, puc);
825 /* never comes here */
826 return 1;
829 #elif defined(TARGET_ARM)
830 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
831 int is_write, sigset_t *old_set,
832 void *puc)
834 TranslationBlock *tb;
835 int ret;
837 if (cpu_single_env)
838 env = cpu_single_env; /* XXX: find a correct solution for multithread */
839 #if defined(DEBUG_SIGNAL)
840 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
841 pc, address, is_write, *(unsigned long *)old_set);
842 #endif
843 /* XXX: locking issue */
844 if (is_write && page_unprotect(h2g(address), pc, puc)) {
845 return 1;
847 /* see if it is an MMU fault */
848 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
849 if (ret < 0)
850 return 0; /* not an MMU fault */
851 if (ret == 0)
852 return 1; /* the MMU fault was handled without causing real CPU fault */
853 /* now we have a real cpu fault */
854 tb = tb_find_pc(pc);
855 if (tb) {
856 /* the PC is inside the translated code. It means that we have
857 a virtual CPU fault */
858 cpu_restore_state(tb, env, pc, puc);
860 /* we restore the process signal mask as the sigreturn should
861 do it (XXX: use sigsetjmp) */
862 sigprocmask(SIG_SETMASK, old_set, NULL);
863 cpu_loop_exit();
864 /* never comes here */
865 return 1;
867 #elif defined(TARGET_SPARC)
868 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
869 int is_write, sigset_t *old_set,
870 void *puc)
872 TranslationBlock *tb;
873 int ret;
875 if (cpu_single_env)
876 env = cpu_single_env; /* XXX: find a correct solution for multithread */
877 #if defined(DEBUG_SIGNAL)
878 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
879 pc, address, is_write, *(unsigned long *)old_set);
880 #endif
881 /* XXX: locking issue */
882 if (is_write && page_unprotect(h2g(address), pc, puc)) {
883 return 1;
885 /* see if it is an MMU fault */
886 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
887 if (ret < 0)
888 return 0; /* not an MMU fault */
889 if (ret == 0)
890 return 1; /* the MMU fault was handled without causing real CPU fault */
891 /* now we have a real cpu fault */
892 tb = tb_find_pc(pc);
893 if (tb) {
894 /* the PC is inside the translated code. It means that we have
895 a virtual CPU fault */
896 cpu_restore_state(tb, env, pc, puc);
898 /* we restore the process signal mask as the sigreturn should
899 do it (XXX: use sigsetjmp) */
900 sigprocmask(SIG_SETMASK, old_set, NULL);
901 cpu_loop_exit();
902 /* never comes here */
903 return 1;
905 #elif defined (TARGET_PPC)
906 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
907 int is_write, sigset_t *old_set,
908 void *puc)
910 TranslationBlock *tb;
911 int ret;
913 if (cpu_single_env)
914 env = cpu_single_env; /* XXX: find a correct solution for multithread */
915 #if defined(DEBUG_SIGNAL)
916 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
917 pc, address, is_write, *(unsigned long *)old_set);
918 #endif
919 /* XXX: locking issue */
920 if (is_write && page_unprotect(h2g(address), pc, puc)) {
921 return 1;
924 /* see if it is an MMU fault */
925 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
926 if (ret < 0)
927 return 0; /* not an MMU fault */
928 if (ret == 0)
929 return 1; /* the MMU fault was handled without causing real CPU fault */
931 /* now we have a real cpu fault */
932 tb = tb_find_pc(pc);
933 if (tb) {
934 /* the PC is inside the translated code. It means that we have
935 a virtual CPU fault */
936 cpu_restore_state(tb, env, pc, puc);
938 if (ret == 1) {
939 #if 0
940 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
941 env->nip, env->error_code, tb);
942 #endif
943 /* we restore the process signal mask as the sigreturn should
944 do it (XXX: use sigsetjmp) */
945 sigprocmask(SIG_SETMASK, old_set, NULL);
946 do_raise_exception_err(env->exception_index, env->error_code);
947 } else {
948 /* activate soft MMU for this block */
949 cpu_resume_from_signal(env, puc);
951 /* never comes here */
952 return 1;
955 #elif defined(TARGET_M68K)
956 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
957 int is_write, sigset_t *old_set,
958 void *puc)
960 TranslationBlock *tb;
961 int ret;
963 if (cpu_single_env)
964 env = cpu_single_env; /* XXX: find a correct solution for multithread */
965 #if defined(DEBUG_SIGNAL)
966 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
967 pc, address, is_write, *(unsigned long *)old_set);
968 #endif
969 /* XXX: locking issue */
970 if (is_write && page_unprotect(address, pc, puc)) {
971 return 1;
973 /* see if it is an MMU fault */
974 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
975 if (ret < 0)
976 return 0; /* not an MMU fault */
977 if (ret == 0)
978 return 1; /* the MMU fault was handled without causing real CPU fault */
979 /* now we have a real cpu fault */
980 tb = tb_find_pc(pc);
981 if (tb) {
982 /* the PC is inside the translated code. It means that we have
983 a virtual CPU fault */
984 cpu_restore_state(tb, env, pc, puc);
986 /* we restore the process signal mask as the sigreturn should
987 do it (XXX: use sigsetjmp) */
988 sigprocmask(SIG_SETMASK, old_set, NULL);
989 cpu_loop_exit();
990 /* never comes here */
991 return 1;
994 #elif defined (TARGET_MIPS)
995 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
996 int is_write, sigset_t *old_set,
997 void *puc)
999 TranslationBlock *tb;
1000 int ret;
1002 if (cpu_single_env)
1003 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1004 #if defined(DEBUG_SIGNAL)
1005 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1006 pc, address, is_write, *(unsigned long *)old_set);
1007 #endif
1008 /* XXX: locking issue */
1009 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1010 return 1;
1013 /* see if it is an MMU fault */
1014 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1015 if (ret < 0)
1016 return 0; /* not an MMU fault */
1017 if (ret == 0)
1018 return 1; /* the MMU fault was handled without causing real CPU fault */
1020 /* now we have a real cpu fault */
1021 tb = tb_find_pc(pc);
1022 if (tb) {
1023 /* the PC is inside the translated code. It means that we have
1024 a virtual CPU fault */
1025 cpu_restore_state(tb, env, pc, puc);
1027 if (ret == 1) {
1028 #if 0
1029 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1030 env->PC, env->error_code, tb);
1031 #endif
1032 /* we restore the process signal mask as the sigreturn should
1033 do it (XXX: use sigsetjmp) */
1034 sigprocmask(SIG_SETMASK, old_set, NULL);
1035 do_raise_exception_err(env->exception_index, env->error_code);
1036 } else {
1037 /* activate soft MMU for this block */
1038 cpu_resume_from_signal(env, puc);
1040 /* never comes here */
1041 return 1;
1044 #elif defined (TARGET_SH4)
1045 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1046 int is_write, sigset_t *old_set,
1047 void *puc)
1049 TranslationBlock *tb;
1050 int ret;
1052 if (cpu_single_env)
1053 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1054 #if defined(DEBUG_SIGNAL)
1055 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1056 pc, address, is_write, *(unsigned long *)old_set);
1057 #endif
1058 /* XXX: locking issue */
1059 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1060 return 1;
1063 /* see if it is an MMU fault */
1064 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1065 if (ret < 0)
1066 return 0; /* not an MMU fault */
1067 if (ret == 0)
1068 return 1; /* the MMU fault was handled without causing real CPU fault */
1070 /* now we have a real cpu fault */
1071 tb = tb_find_pc(pc);
1072 if (tb) {
1073 /* the PC is inside the translated code. It means that we have
1074 a virtual CPU fault */
1075 cpu_restore_state(tb, env, pc, puc);
1077 #if 0
1078 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1079 env->nip, env->error_code, tb);
1080 #endif
1081 /* we restore the process signal mask as the sigreturn should
1082 do it (XXX: use sigsetjmp) */
1083 sigprocmask(SIG_SETMASK, old_set, NULL);
1084 cpu_loop_exit();
1085 /* never comes here */
1086 return 1;
1089 #elif defined (TARGET_ALPHA)
1090 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1091 int is_write, sigset_t *old_set,
1092 void *puc)
1094 TranslationBlock *tb;
1095 int ret;
1097 if (cpu_single_env)
1098 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1099 #if defined(DEBUG_SIGNAL)
1100 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1101 pc, address, is_write, *(unsigned long *)old_set);
1102 #endif
1103 /* XXX: locking issue */
1104 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1105 return 1;
1108 /* see if it is an MMU fault */
1109 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1110 if (ret < 0)
1111 return 0; /* not an MMU fault */
1112 if (ret == 0)
1113 return 1; /* the MMU fault was handled without causing real CPU fault */
1115 /* now we have a real cpu fault */
1116 tb = tb_find_pc(pc);
1117 if (tb) {
1118 /* the PC is inside the translated code. It means that we have
1119 a virtual CPU fault */
1120 cpu_restore_state(tb, env, pc, puc);
1122 #if 0
1123 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1124 env->nip, env->error_code, tb);
1125 #endif
1126 /* we restore the process signal mask as the sigreturn should
1127 do it (XXX: use sigsetjmp) */
1128 sigprocmask(SIG_SETMASK, old_set, NULL);
1129 cpu_loop_exit();
1130 /* never comes here */
1131 return 1;
1133 #elif defined (TARGET_CRIS)
1134 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1135 int is_write, sigset_t *old_set,
1136 void *puc)
1138 TranslationBlock *tb;
1139 int ret;
1141 if (cpu_single_env)
1142 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1143 #if defined(DEBUG_SIGNAL)
1144 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1145 pc, address, is_write, *(unsigned long *)old_set);
1146 #endif
1147 /* XXX: locking issue */
1148 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1149 return 1;
1152 /* see if it is an MMU fault */
1153 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1154 if (ret < 0)
1155 return 0; /* not an MMU fault */
1156 if (ret == 0)
1157 return 1; /* the MMU fault was handled without causing real CPU fault */
1159 /* now we have a real cpu fault */
1160 tb = tb_find_pc(pc);
1161 if (tb) {
1162 /* the PC is inside the translated code. It means that we have
1163 a virtual CPU fault */
1164 cpu_restore_state(tb, env, pc, puc);
1166 /* we restore the process signal mask as the sigreturn should
1167 do it (XXX: use sigsetjmp) */
1168 sigprocmask(SIG_SETMASK, old_set, NULL);
1169 cpu_loop_exit();
1170 /* never comes here */
1171 return 1;
1174 #else
1175 #error unsupported target CPU
1176 #endif
1178 #if defined(__i386__)
1180 #if defined(__APPLE__)
1181 # include <sys/ucontext.h>
1183 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1184 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1185 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1186 #else
1187 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1188 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1189 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1190 #endif
1192 int cpu_signal_handler(int host_signum, void *pinfo,
1193 void *puc)
1195 siginfo_t *info = pinfo;
1196 struct ucontext *uc = puc;
1197 unsigned long pc;
1198 int trapno;
1200 #ifndef REG_EIP
1201 /* for glibc 2.1 */
1202 #define REG_EIP EIP
1203 #define REG_ERR ERR
1204 #define REG_TRAPNO TRAPNO
1205 #endif
1206 pc = EIP_sig(uc);
1207 trapno = TRAP_sig(uc);
1208 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1209 trapno == 0xe ?
1210 (ERROR_sig(uc) >> 1) & 1 : 0,
1211 &uc->uc_sigmask, puc);
1214 #elif defined(__x86_64__)
1216 int cpu_signal_handler(int host_signum, void *pinfo,
1217 void *puc)
1219 siginfo_t *info = pinfo;
1220 struct ucontext *uc = puc;
1221 unsigned long pc;
1223 pc = uc->uc_mcontext.gregs[REG_RIP];
1224 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1225 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1226 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1227 &uc->uc_sigmask, puc);
1230 #elif defined(__powerpc__)
1232 /***********************************************************************
1233 * signal context platform-specific definitions
1234 * From Wine
1236 #ifdef linux
1237 /* All Registers access - only for local access */
1238 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1239 /* Gpr Registers access */
1240 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1241 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1242 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1243 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1244 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1245 # define LR_sig(context) REG_sig(link, context) /* Link register */
1246 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1247 /* Float Registers access */
1248 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1249 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1250 /* Exception Registers access */
1251 # define DAR_sig(context) REG_sig(dar, context)
1252 # define DSISR_sig(context) REG_sig(dsisr, context)
1253 # define TRAP_sig(context) REG_sig(trap, context)
1254 #endif /* linux */
1256 #ifdef __APPLE__
1257 # include <sys/ucontext.h>
1258 typedef struct ucontext SIGCONTEXT;
1259 /* All Registers access - only for local access */
1260 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1261 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1262 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1263 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1264 /* Gpr Registers access */
1265 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1266 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1267 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1268 # define CTR_sig(context) REG_sig(ctr, context)
1269 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1270 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1271 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1272 /* Float Registers access */
1273 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1274 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1275 /* Exception Registers access */
1276 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1277 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1278 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1279 #endif /* __APPLE__ */
1281 int cpu_signal_handler(int host_signum, void *pinfo,
1282 void *puc)
1284 siginfo_t *info = pinfo;
1285 struct ucontext *uc = puc;
1286 unsigned long pc;
1287 int is_write;
1289 pc = IAR_sig(uc);
1290 is_write = 0;
1291 #if 0
1292 /* ppc 4xx case */
1293 if (DSISR_sig(uc) & 0x00800000)
1294 is_write = 1;
1295 #else
1296 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1297 is_write = 1;
1298 #endif
1299 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1300 is_write, &uc->uc_sigmask, puc);
1303 #elif defined(__alpha__)
1305 int cpu_signal_handler(int host_signum, void *pinfo,
1306 void *puc)
1308 siginfo_t *info = pinfo;
1309 struct ucontext *uc = puc;
1310 uint32_t *pc = uc->uc_mcontext.sc_pc;
1311 uint32_t insn = *pc;
1312 int is_write = 0;
1314 /* XXX: need kernel patch to get write flag faster */
1315 switch (insn >> 26) {
1316 case 0x0d: // stw
1317 case 0x0e: // stb
1318 case 0x0f: // stq_u
1319 case 0x24: // stf
1320 case 0x25: // stg
1321 case 0x26: // sts
1322 case 0x27: // stt
1323 case 0x2c: // stl
1324 case 0x2d: // stq
1325 case 0x2e: // stl_c
1326 case 0x2f: // stq_c
1327 is_write = 1;
1330 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1331 is_write, &uc->uc_sigmask, puc);
1333 #elif defined(__sparc__)
1335 int cpu_signal_handler(int host_signum, void *pinfo,
1336 void *puc)
1338 siginfo_t *info = pinfo;
1339 int is_write;
1340 uint32_t insn;
1341 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1342 uint32_t *regs = (uint32_t *)(info + 1);
1343 void *sigmask = (regs + 20);
1344 /* XXX: is there a standard glibc define ? */
1345 unsigned long pc = regs[1];
1346 #else
1347 struct sigcontext *sc = puc;
1348 unsigned long pc = sc->sigc_regs.tpc;
1349 void *sigmask = (void *)sc->sigc_mask;
1350 #endif
1352 /* XXX: need kernel patch to get write flag faster */
1353 is_write = 0;
1354 insn = *(uint32_t *)pc;
1355 if ((insn >> 30) == 3) {
1356 switch((insn >> 19) & 0x3f) {
1357 case 0x05: // stb
1358 case 0x06: // sth
1359 case 0x04: // st
1360 case 0x07: // std
1361 case 0x24: // stf
1362 case 0x27: // stdf
1363 case 0x25: // stfsr
1364 is_write = 1;
1365 break;
1368 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1369 is_write, sigmask, NULL);
1372 #elif defined(__arm__)
1374 int cpu_signal_handler(int host_signum, void *pinfo,
1375 void *puc)
1377 siginfo_t *info = pinfo;
1378 struct ucontext *uc = puc;
1379 unsigned long pc;
1380 int is_write;
1382 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1383 pc = uc->uc_mcontext.gregs[R15];
1384 #else
1385 pc = uc->uc_mcontext.arm_pc;
1386 #endif
1387 /* XXX: compute is_write */
1388 is_write = 0;
1389 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1390 is_write,
1391 &uc->uc_sigmask, puc);
1394 #elif defined(__mc68000)
1396 int cpu_signal_handler(int host_signum, void *pinfo,
1397 void *puc)
1399 siginfo_t *info = pinfo;
1400 struct ucontext *uc = puc;
1401 unsigned long pc;
1402 int is_write;
1404 pc = uc->uc_mcontext.gregs[16];
1405 /* XXX: compute is_write */
1406 is_write = 0;
1407 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1408 is_write,
1409 &uc->uc_sigmask, puc);
1412 #elif defined(__ia64)
1414 #ifndef __ISR_VALID
1415 /* This ought to be in <bits/siginfo.h>... */
1416 # define __ISR_VALID 1
1417 #endif
1419 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1421 siginfo_t *info = pinfo;
1422 struct ucontext *uc = puc;
1423 unsigned long ip;
1424 int is_write = 0;
1426 ip = uc->uc_mcontext.sc_ip;
1427 switch (host_signum) {
1428 case SIGILL:
1429 case SIGFPE:
1430 case SIGSEGV:
1431 case SIGBUS:
1432 case SIGTRAP:
1433 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1434 /* ISR.W (write-access) is bit 33: */
1435 is_write = (info->si_isr >> 33) & 1;
1436 break;
1438 default:
1439 break;
1441 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1442 is_write,
1443 &uc->uc_sigmask, puc);
1446 #elif defined(__s390__)
1448 int cpu_signal_handler(int host_signum, void *pinfo,
1449 void *puc)
1451 siginfo_t *info = pinfo;
1452 struct ucontext *uc = puc;
1453 unsigned long pc;
1454 int is_write;
1456 pc = uc->uc_mcontext.psw.addr;
1457 /* XXX: compute is_write */
1458 is_write = 0;
1459 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1460 is_write, &uc->uc_sigmask, puc);
1463 #elif defined(__mips__)
1465 int cpu_signal_handler(int host_signum, void *pinfo,
1466 void *puc)
1468 siginfo_t *info = pinfo;
1469 struct ucontext *uc = puc;
1470 greg_t pc = uc->uc_mcontext.pc;
1471 int is_write;
1473 /* XXX: compute is_write */
1474 is_write = 0;
1475 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1476 is_write, &uc->uc_sigmask, puc);
1479 #elif defined(__hppa__)
1481 int cpu_signal_handler(int host_signum, void *pinfo,
1482 void *puc)
1484 struct siginfo *info = pinfo;
1485 struct ucontext *uc = puc;
1486 unsigned long pc;
1487 int is_write;
1489 pc = uc->uc_mcontext.sc_iaoq[0];
1490 /* FIXME: compute is_write */
1491 is_write = 0;
1492 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1493 is_write,
1494 &uc->uc_sigmask, puc);
1497 #else
1499 #error host CPU specific signal handler needed
1501 #endif
1503 #endif /* !defined(CONFIG_SOFTMMU) */