Merge branch 'master' of git://repo.or.cz/qemu
[qemu/hppa.git] / cpu-exec.c
blob0074d0639055ca4e32670747625821783516e134
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #include <sys/ucontext.h>
38 #endif
40 int tb_invalidated_flag;
41 static unsigned long next_tb;
43 //#define DEBUG_EXEC
44 //#define DEBUG_SIGNAL
46 #define SAVE_GLOBALS()
47 #define RESTORE_GLOBALS()
49 #if defined(__sparc__) && !defined(HOST_SOLARIS)
50 #include <features.h>
51 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
52 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
53 // Work around ugly bugs in glibc that mangle global register contents
55 static volatile void *saved_env;
56 static volatile unsigned long saved_t0, saved_i7;
57 #undef SAVE_GLOBALS
58 #define SAVE_GLOBALS() do { \
59 saved_env = env; \
60 saved_t0 = T0; \
61 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
62 } while(0)
64 #undef RESTORE_GLOBALS
65 #define RESTORE_GLOBALS() do { \
66 env = (void *)saved_env; \
67 T0 = saved_t0; \
68 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
69 } while(0)
71 static int sparc_setjmp(jmp_buf buf)
73 int ret;
75 SAVE_GLOBALS();
76 ret = setjmp(buf);
77 RESTORE_GLOBALS();
78 return ret;
80 #undef setjmp
81 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
83 static void sparc_longjmp(jmp_buf buf, int val)
85 SAVE_GLOBALS();
86 longjmp(buf, val);
88 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
89 #endif
90 #endif
92 void cpu_loop_exit(void)
94 /* NOTE: the register at this point must be saved by hand because
95 longjmp restore them */
96 regs_to_env();
97 longjmp(env->jmp_env, 1);
100 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
101 #define reg_T2
102 #endif
104 /* exit the current TB from a signal handler. The host registers are
105 restored in a state compatible with the CPU emulator
107 void cpu_resume_from_signal(CPUState *env1, void *puc)
109 #if !defined(CONFIG_SOFTMMU)
110 struct ucontext *uc = puc;
111 #endif
113 env = env1;
115 /* XXX: restore cpu registers saved in host registers */
117 #if !defined(CONFIG_SOFTMMU)
118 if (puc) {
119 /* XXX: use siglongjmp ? */
120 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
122 #endif
123 longjmp(env->jmp_env, 1);
126 static TranslationBlock *tb_find_slow(target_ulong pc,
127 target_ulong cs_base,
128 uint64_t flags)
130 TranslationBlock *tb, **ptb1;
131 int code_gen_size;
132 unsigned int h;
133 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
134 uint8_t *tc_ptr;
136 spin_lock(&tb_lock);
138 tb_invalidated_flag = 0;
140 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
142 /* find translated block using physical mappings */
143 phys_pc = get_phys_addr_code(env, pc);
144 phys_page1 = phys_pc & TARGET_PAGE_MASK;
145 phys_page2 = -1;
146 h = tb_phys_hash_func(phys_pc);
147 ptb1 = &tb_phys_hash[h];
148 for(;;) {
149 tb = *ptb1;
150 if (!tb)
151 goto not_found;
152 if (tb->pc == pc &&
153 tb->page_addr[0] == phys_page1 &&
154 tb->cs_base == cs_base &&
155 tb->flags == flags) {
156 /* check next page if needed */
157 if (tb->page_addr[1] != -1) {
158 virt_page2 = (pc & TARGET_PAGE_MASK) +
159 TARGET_PAGE_SIZE;
160 phys_page2 = get_phys_addr_code(env, virt_page2);
161 if (tb->page_addr[1] == phys_page2)
162 goto found;
163 } else {
164 goto found;
167 ptb1 = &tb->phys_hash_next;
169 not_found:
170 /* if no translated code available, then translate it now */
171 tb = tb_alloc(pc);
172 if (!tb) {
173 /* flush must be done */
174 tb_flush(env);
175 /* cannot fail at this point */
176 tb = tb_alloc(pc);
177 /* don't forget to invalidate previous TB info */
178 tb_invalidated_flag = 1;
180 tc_ptr = code_gen_ptr;
181 tb->tc_ptr = tc_ptr;
182 tb->cs_base = cs_base;
183 tb->flags = flags;
184 SAVE_GLOBALS();
185 cpu_gen_code(env, tb, &code_gen_size);
186 RESTORE_GLOBALS();
187 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
189 /* check next page if needed */
190 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
191 phys_page2 = -1;
192 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
193 phys_page2 = get_phys_addr_code(env, virt_page2);
195 tb_link_phys(tb, phys_pc, phys_page2);
197 found:
198 /* we add the TB in the virtual pc hash table */
199 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
200 spin_unlock(&tb_lock);
201 return tb;
204 static inline TranslationBlock *tb_find_fast(void)
206 TranslationBlock *tb;
207 target_ulong cs_base, pc;
208 uint64_t flags;
210 /* we record a subset of the CPU state. It will
211 always be the same before a given translated block
212 is executed. */
213 #if defined(TARGET_I386)
214 flags = env->hflags;
215 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
216 flags |= env->intercept;
217 cs_base = env->segs[R_CS].base;
218 pc = cs_base + env->eip;
219 #elif defined(TARGET_ARM)
220 flags = env->thumb | (env->vfp.vec_len << 1)
221 | (env->vfp.vec_stride << 4);
222 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
223 flags |= (1 << 6);
224 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
225 flags |= (1 << 7);
226 flags |= (env->condexec_bits << 8);
227 cs_base = 0;
228 pc = env->regs[15];
229 #elif defined(TARGET_SPARC)
230 #ifdef TARGET_SPARC64
231 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
232 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
233 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
234 #else
235 // FPU enable . Supervisor
236 flags = (env->psref << 4) | env->psrs;
237 #endif
238 cs_base = env->npc;
239 pc = env->pc;
240 #elif defined(TARGET_PPC)
241 flags = env->hflags;
242 cs_base = 0;
243 pc = env->nip;
244 #elif defined(TARGET_MIPS)
245 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
246 cs_base = 0;
247 pc = env->PC[env->current_tc];
248 #elif defined(TARGET_M68K)
249 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
250 | (env->sr & SR_S) /* Bit 13 */
251 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
252 cs_base = 0;
253 pc = env->pc;
254 #elif defined(TARGET_SH4)
255 flags = env->flags;
256 cs_base = 0;
257 pc = env->pc;
258 #elif defined(TARGET_ALPHA)
259 flags = env->ps;
260 cs_base = 0;
261 pc = env->pc;
262 #elif defined(TARGET_CRIS)
263 flags = env->pregs[PR_CCS] & U_FLAG;
264 cs_base = 0;
265 pc = env->pc;
266 #elif defined(TARGET_HPPA)
267 flags = env->psw & PSW_N; /* XXX: use more bits? */
268 cs_base = env->iaoq[1];
269 pc = env->iaoq[0];
270 #else
271 #error unsupported CPU
272 #endif
273 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
274 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
275 tb->flags != flags, 0)) {
276 tb = tb_find_slow(pc, cs_base, flags);
277 /* Note: we do it here to avoid a gcc bug on Mac OS X when
278 doing it in tb_find_slow */
279 if (tb_invalidated_flag) {
280 /* as some TB could have been invalidated because
281 of memory exceptions while generating the code, we
282 must recompute the hash index here */
283 next_tb = 0;
286 return tb;
289 /* main execution loop */
291 int cpu_exec(CPUState *env1)
293 #define DECLARE_HOST_REGS 1
294 #include "hostregs_helper.h"
295 #if defined(TARGET_SPARC)
296 #if defined(reg_REGWPTR)
297 uint32_t *saved_regwptr;
298 #endif
299 #endif
300 int ret, interrupt_request;
301 TranslationBlock *tb;
302 uint8_t *tc_ptr;
304 if (cpu_halted(env1) == EXCP_HALTED)
305 return EXCP_HALTED;
307 cpu_single_env = env1;
309 /* first we save global registers */
310 #define SAVE_HOST_REGS 1
311 #include "hostregs_helper.h"
312 env = env1;
313 SAVE_GLOBALS();
315 env_to_regs();
316 #if defined(TARGET_I386)
317 /* put eflags in CPU temporary format */
318 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
319 DF = 1 - (2 * ((env->eflags >> 10) & 1));
320 CC_OP = CC_OP_EFLAGS;
321 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
322 #elif defined(TARGET_SPARC)
323 #if defined(reg_REGWPTR)
324 saved_regwptr = REGWPTR;
325 #endif
326 #elif defined(TARGET_M68K)
327 env->cc_op = CC_OP_FLAGS;
328 env->cc_dest = env->sr & 0xf;
329 env->cc_x = (env->sr >> 4) & 1;
330 #elif defined(TARGET_ALPHA)
331 #elif defined(TARGET_ARM)
332 #elif defined(TARGET_PPC)
333 #elif defined(TARGET_HPPA)
334 #elif defined(TARGET_MIPS)
335 #elif defined(TARGET_SH4)
336 #elif defined(TARGET_CRIS)
337 /* XXXXX */
338 #else
339 #error unsupported target CPU
340 #endif
341 env->exception_index = -1;
343 /* prepare setjmp context for exception handling */
344 for(;;) {
345 if (setjmp(env->jmp_env) == 0) {
346 env->current_tb = NULL;
347 /* if an exception is pending, we execute it here */
348 if (env->exception_index >= 0) {
349 if (env->exception_index >= EXCP_INTERRUPT) {
350 /* exit request from the cpu execution loop */
351 ret = env->exception_index;
352 break;
353 } else if (env->user_mode_only) {
354 /* if user mode only, we simulate a fake exception
355 which will be handled outside the cpu execution
356 loop */
357 #if defined(TARGET_I386)
358 do_interrupt_user(env->exception_index,
359 env->exception_is_int,
360 env->error_code,
361 env->exception_next_eip);
362 #endif
363 ret = env->exception_index;
364 break;
365 } else {
366 #if defined(TARGET_I386)
367 /* simulate a real cpu exception. On i386, it can
368 trigger new exceptions, but we do not handle
369 double or triple faults yet. */
370 do_interrupt(env->exception_index,
371 env->exception_is_int,
372 env->error_code,
373 env->exception_next_eip, 0);
374 /* successfully delivered */
375 env->old_exception = -1;
376 #elif defined(TARGET_PPC)
377 do_interrupt(env);
378 #elif defined(TARGET_MIPS)
379 do_interrupt(env);
380 #elif defined(TARGET_SPARC)
381 do_interrupt(env->exception_index);
382 #elif defined(TARGET_ARM)
383 do_interrupt(env);
384 #elif defined(TARGET_SH4)
385 do_interrupt(env);
386 #elif defined(TARGET_ALPHA)
387 do_interrupt(env);
388 #elif defined(TARGET_CRIS)
389 do_interrupt(env);
390 #elif defined(TARGET_M68K)
391 do_interrupt(0);
392 #elif defined(TARGET_HPPA)
393 do_interrupt(env);
394 #endif
396 env->exception_index = -1;
398 #ifdef USE_KQEMU
399 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
400 int ret;
401 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
402 ret = kqemu_cpu_exec(env);
403 /* put eflags in CPU temporary format */
404 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
405 DF = 1 - (2 * ((env->eflags >> 10) & 1));
406 CC_OP = CC_OP_EFLAGS;
407 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
408 if (ret == 1) {
409 /* exception */
410 longjmp(env->jmp_env, 1);
411 } else if (ret == 2) {
412 /* softmmu execution needed */
413 } else {
414 if (env->interrupt_request != 0) {
415 /* hardware interrupt will be executed just after */
416 } else {
417 /* otherwise, we restart */
418 longjmp(env->jmp_env, 1);
422 #endif
424 next_tb = 0; /* force lookup of first TB */
425 for(;;) {
426 SAVE_GLOBALS();
427 interrupt_request = env->interrupt_request;
428 if (__builtin_expect(interrupt_request, 0)
429 #if defined(TARGET_I386)
430 && env->hflags & HF_GIF_MASK
431 #endif
432 && !(env->singlestep_enabled & SSTEP_NOIRQ)) {
433 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
434 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
435 env->exception_index = EXCP_DEBUG;
436 cpu_loop_exit();
438 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
439 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
440 if (interrupt_request & CPU_INTERRUPT_HALT) {
441 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
442 env->halted = 1;
443 env->exception_index = EXCP_HLT;
444 cpu_loop_exit();
446 #endif
447 #if defined(TARGET_I386)
448 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
449 !(env->hflags & HF_SMM_MASK)) {
450 svm_check_intercept(SVM_EXIT_SMI);
451 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
452 do_smm_enter();
453 next_tb = 0;
454 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
455 !(env->hflags & HF_NMI_MASK)) {
456 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
457 env->hflags |= HF_NMI_MASK;
458 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
459 next_tb = 0;
460 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
461 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
462 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
463 int intno;
464 svm_check_intercept(SVM_EXIT_INTR);
465 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
466 intno = cpu_get_pic_interrupt(env);
467 if (loglevel & CPU_LOG_TB_IN_ASM) {
468 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
470 do_interrupt(intno, 0, 0, 0, 1);
471 /* ensure that no TB jump will be modified as
472 the program flow was changed */
473 next_tb = 0;
474 #if !defined(CONFIG_USER_ONLY)
475 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
476 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
477 int intno;
478 /* FIXME: this should respect TPR */
479 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
480 svm_check_intercept(SVM_EXIT_VINTR);
481 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
482 if (loglevel & CPU_LOG_TB_IN_ASM)
483 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
484 do_interrupt(intno, 0, 0, -1, 1);
485 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
486 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
487 next_tb = 0;
488 #endif
490 #elif defined(TARGET_PPC)
491 #if 0
492 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
493 cpu_ppc_reset(env);
495 #endif
496 if (interrupt_request & CPU_INTERRUPT_HARD) {
497 ppc_hw_interrupt(env);
498 if (env->pending_interrupts == 0)
499 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
500 next_tb = 0;
502 #elif defined(TARGET_MIPS)
503 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
504 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
505 (env->CP0_Status & (1 << CP0St_IE)) &&
506 !(env->CP0_Status & (1 << CP0St_EXL)) &&
507 !(env->CP0_Status & (1 << CP0St_ERL)) &&
508 !(env->hflags & MIPS_HFLAG_DM)) {
509 /* Raise it */
510 env->exception_index = EXCP_EXT_INTERRUPT;
511 env->error_code = 0;
512 do_interrupt(env);
513 next_tb = 0;
515 #elif defined(TARGET_SPARC)
516 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
517 (env->psret != 0)) {
518 int pil = env->interrupt_index & 15;
519 int type = env->interrupt_index & 0xf0;
521 if (((type == TT_EXTINT) &&
522 (pil == 15 || pil > env->psrpil)) ||
523 type != TT_EXTINT) {
524 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
525 do_interrupt(env->interrupt_index);
526 env->interrupt_index = 0;
527 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
528 cpu_check_irqs(env);
529 #endif
530 next_tb = 0;
532 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
533 //do_interrupt(0, 0, 0, 0, 0);
534 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
536 #elif defined(TARGET_ARM)
537 if (interrupt_request & CPU_INTERRUPT_FIQ
538 && !(env->uncached_cpsr & CPSR_F)) {
539 env->exception_index = EXCP_FIQ;
540 do_interrupt(env);
541 next_tb = 0;
543 /* ARMv7-M interrupt return works by loading a magic value
544 into the PC. On real hardware the load causes the
545 return to occur. The qemu implementation performs the
546 jump normally, then does the exception return when the
547 CPU tries to execute code at the magic address.
548 This will cause the magic PC value to be pushed to
549 the stack if an interrupt occured at the wrong time.
550 We avoid this by disabling interrupts when
551 pc contains a magic address. */
552 if (interrupt_request & CPU_INTERRUPT_HARD
553 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
554 || !(env->uncached_cpsr & CPSR_I))) {
555 env->exception_index = EXCP_IRQ;
556 do_interrupt(env);
557 next_tb = 0;
559 #elif defined(TARGET_SH4)
560 if (interrupt_request & CPU_INTERRUPT_HARD) {
561 do_interrupt(env);
562 next_tb = 0;
564 #elif defined(TARGET_ALPHA)
565 if (interrupt_request & CPU_INTERRUPT_HARD) {
566 do_interrupt(env);
567 next_tb = 0;
569 #elif defined(TARGET_CRIS)
570 if (interrupt_request & CPU_INTERRUPT_HARD) {
571 do_interrupt(env);
572 next_tb = 0;
574 #elif defined(TARGET_M68K)
575 if (interrupt_request & CPU_INTERRUPT_HARD
576 && ((env->sr & SR_I) >> SR_I_SHIFT)
577 < env->pending_level) {
578 /* Real hardware gets the interrupt vector via an
579 IACK cycle at this point. Current emulated
580 hardware doesn't rely on this, so we
581 provide/save the vector when the interrupt is
582 first signalled. */
583 env->exception_index = env->pending_vector;
584 do_interrupt(1);
585 next_tb = 0;
587 #elif defined(TARGET_HPPA)
588 if (interrupt_request & CPU_INTERRUPT_HARD
589 && !(env->psw & PSW_I)) {
590 env->exception_index = EXCP_EXTINT;
591 do_interrupt(env);
593 #endif
594 /* Don't use the cached interupt_request value,
595 do_interrupt may have updated the EXITTB flag. */
596 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
597 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
598 /* ensure that no TB jump will be modified as
599 the program flow was changed */
600 next_tb = 0;
602 if (interrupt_request & CPU_INTERRUPT_EXIT) {
603 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
604 env->exception_index = EXCP_INTERRUPT;
605 cpu_loop_exit();
608 #ifdef DEBUG_EXEC
609 if ((loglevel & CPU_LOG_TB_CPU)) {
610 /* restore flags in standard format */
611 regs_to_env();
612 #if defined(TARGET_I386)
613 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
614 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
615 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
616 #elif defined(TARGET_ARM)
617 cpu_dump_state(env, logfile, fprintf, 0);
618 #elif defined(TARGET_SPARC)
619 REGWPTR = env->regbase + (env->cwp * 16);
620 env->regwptr = REGWPTR;
621 cpu_dump_state(env, logfile, fprintf, 0);
622 #elif defined(TARGET_PPC)
623 cpu_dump_state(env, logfile, fprintf, 0);
624 #elif defined(TARGET_M68K)
625 cpu_m68k_flush_flags(env, env->cc_op);
626 env->cc_op = CC_OP_FLAGS;
627 env->sr = (env->sr & 0xffe0)
628 | env->cc_dest | (env->cc_x << 4);
629 cpu_dump_state(env, logfile, fprintf, 0);
630 #elif defined(TARGET_MIPS)
631 cpu_dump_state(env, logfile, fprintf, 0);
632 #elif defined(TARGET_SH4)
633 cpu_dump_state(env, logfile, fprintf, 0);
634 #elif defined(TARGET_ALPHA)
635 cpu_dump_state(env, logfile, fprintf, 0);
636 #elif defined(TARGET_CRIS)
637 cpu_dump_state(env, logfile, fprintf, 0);
638 #elif defined(TARGET_HPPA)
639 cpu_dump_state(env, logfile, fprintf, 0);
640 #else
641 #error unsupported target CPU
642 #endif
644 #endif
645 tb = tb_find_fast();
646 #ifdef DEBUG_EXEC
647 if ((loglevel & CPU_LOG_EXEC)) {
648 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
649 (long)tb->tc_ptr, tb->pc,
650 lookup_symbol(tb->pc));
652 #endif
653 RESTORE_GLOBALS();
654 /* see if we can patch the calling TB. When the TB
655 spans two pages, we cannot safely do a direct
656 jump. */
658 if (next_tb != 0 &&
659 #ifdef USE_KQEMU
660 (env->kqemu_enabled != 2) &&
661 #endif
662 tb->page_addr[1] == -1) {
663 spin_lock(&tb_lock);
664 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
665 spin_unlock(&tb_lock);
668 tc_ptr = tb->tc_ptr;
669 env->current_tb = tb;
670 /* execute the generated code */
671 next_tb = tcg_qemu_tb_exec(tc_ptr);
672 env->current_tb = NULL;
673 /* reset soft MMU for next block (it can currently
674 only be set by a memory fault) */
675 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
676 if (env->hflags & HF_SOFTMMU_MASK) {
677 env->hflags &= ~HF_SOFTMMU_MASK;
678 /* do not allow linking to another block */
679 next_tb = 0;
681 #endif
682 #if defined(USE_KQEMU)
683 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
684 if (kqemu_is_ok(env) &&
685 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
686 cpu_loop_exit();
688 #endif
689 } /* for(;;) */
690 } else {
691 env_to_regs();
693 } /* for(;;) */
696 #if defined(TARGET_I386)
697 /* restore flags in standard format */
698 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
699 #elif defined(TARGET_ARM)
700 /* XXX: Save/restore host fpu exception state?. */
701 #elif defined(TARGET_SPARC)
702 #if defined(reg_REGWPTR)
703 REGWPTR = saved_regwptr;
704 #endif
705 #elif defined(TARGET_PPC)
706 #elif defined(TARGET_M68K)
707 cpu_m68k_flush_flags(env, env->cc_op);
708 env->cc_op = CC_OP_FLAGS;
709 env->sr = (env->sr & 0xffe0)
710 | env->cc_dest | (env->cc_x << 4);
711 #elif defined(TARGET_MIPS)
712 #elif defined(TARGET_SH4)
713 #elif defined(TARGET_ALPHA)
714 #elif defined(TARGET_CRIS)
715 #elif defined(TARGET_HPPA)
716 /* XXXXX */
717 #else
718 #error unsupported target CPU
719 #endif
721 /* restore global registers */
722 RESTORE_GLOBALS();
723 #include "hostregs_helper.h"
725 /* fail safe : never use cpu_single_env outside cpu_exec() */
726 cpu_single_env = NULL;
727 return ret;
730 /* must only be called from the generated code as an exception can be
731 generated */
732 void tb_invalidate_page_range(target_ulong start, target_ulong end)
734 /* XXX: cannot enable it yet because it yields to MMU exception
735 where NIP != read address on PowerPC */
736 #if 0
737 target_ulong phys_addr;
738 phys_addr = get_phys_addr_code(env, start);
739 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
740 #endif
743 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
745 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
747 CPUX86State *saved_env;
749 saved_env = env;
750 env = s;
751 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
752 selector &= 0xffff;
753 cpu_x86_load_seg_cache(env, seg_reg, selector,
754 (selector << 4), 0xffff, 0);
755 } else {
756 load_seg(seg_reg, selector);
758 env = saved_env;
761 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
763 CPUX86State *saved_env;
765 saved_env = env;
766 env = s;
768 helper_fsave(ptr, data32);
770 env = saved_env;
773 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
775 CPUX86State *saved_env;
777 saved_env = env;
778 env = s;
780 helper_frstor(ptr, data32);
782 env = saved_env;
785 #endif /* TARGET_I386 */
787 #if !defined(CONFIG_SOFTMMU)
789 #if defined(TARGET_I386)
791 /* 'pc' is the host PC at which the exception was raised. 'address' is
792 the effective address of the memory exception. 'is_write' is 1 if a
793 write caused the exception and otherwise 0'. 'old_set' is the
794 signal set which should be restored */
795 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
796 int is_write, sigset_t *old_set,
797 void *puc)
799 TranslationBlock *tb;
800 int ret;
802 if (cpu_single_env)
803 env = cpu_single_env; /* XXX: find a correct solution for multithread */
804 #if defined(DEBUG_SIGNAL)
805 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
806 pc, address, is_write, *(unsigned long *)old_set);
807 #endif
808 /* XXX: locking issue */
809 if (is_write && page_unprotect(h2g(address), pc, puc)) {
810 return 1;
813 /* see if it is an MMU fault */
814 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
815 if (ret < 0)
816 return 0; /* not an MMU fault */
817 if (ret == 0)
818 return 1; /* the MMU fault was handled without causing real CPU fault */
819 /* now we have a real cpu fault */
820 tb = tb_find_pc(pc);
821 if (tb) {
822 /* the PC is inside the translated code. It means that we have
823 a virtual CPU fault */
824 cpu_restore_state(tb, env, pc, puc);
826 if (ret == 1) {
827 #if 0
828 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
829 env->eip, env->cr[2], env->error_code);
830 #endif
831 /* we restore the process signal mask as the sigreturn should
832 do it (XXX: use sigsetjmp) */
833 sigprocmask(SIG_SETMASK, old_set, NULL);
834 raise_exception_err(env->exception_index, env->error_code);
835 } else {
836 /* activate soft MMU for this block */
837 env->hflags |= HF_SOFTMMU_MASK;
838 cpu_resume_from_signal(env, puc);
840 /* never comes here */
841 return 1;
844 #elif defined(TARGET_ARM)
845 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
846 int is_write, sigset_t *old_set,
847 void *puc)
849 TranslationBlock *tb;
850 int ret;
852 if (cpu_single_env)
853 env = cpu_single_env; /* XXX: find a correct solution for multithread */
854 #if defined(DEBUG_SIGNAL)
855 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
856 pc, address, is_write, *(unsigned long *)old_set);
857 #endif
858 /* XXX: locking issue */
859 if (is_write && page_unprotect(h2g(address), pc, puc)) {
860 return 1;
862 /* see if it is an MMU fault */
863 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
864 if (ret < 0)
865 return 0; /* not an MMU fault */
866 if (ret == 0)
867 return 1; /* the MMU fault was handled without causing real CPU fault */
868 /* now we have a real cpu fault */
869 tb = tb_find_pc(pc);
870 if (tb) {
871 /* the PC is inside the translated code. It means that we have
872 a virtual CPU fault */
873 cpu_restore_state(tb, env, pc, puc);
875 /* we restore the process signal mask as the sigreturn should
876 do it (XXX: use sigsetjmp) */
877 sigprocmask(SIG_SETMASK, old_set, NULL);
878 cpu_loop_exit();
879 /* never comes here */
880 return 1;
882 #elif defined(TARGET_SPARC)
883 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
884 int is_write, sigset_t *old_set,
885 void *puc)
887 TranslationBlock *tb;
888 int ret;
890 if (cpu_single_env)
891 env = cpu_single_env; /* XXX: find a correct solution for multithread */
892 #if defined(DEBUG_SIGNAL)
893 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
894 pc, address, is_write, *(unsigned long *)old_set);
895 #endif
896 /* XXX: locking issue */
897 if (is_write && page_unprotect(h2g(address), pc, puc)) {
898 return 1;
900 /* see if it is an MMU fault */
901 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
902 if (ret < 0)
903 return 0; /* not an MMU fault */
904 if (ret == 0)
905 return 1; /* the MMU fault was handled without causing real CPU fault */
906 /* now we have a real cpu fault */
907 tb = tb_find_pc(pc);
908 if (tb) {
909 /* the PC is inside the translated code. It means that we have
910 a virtual CPU fault */
911 cpu_restore_state(tb, env, pc, puc);
913 /* we restore the process signal mask as the sigreturn should
914 do it (XXX: use sigsetjmp) */
915 sigprocmask(SIG_SETMASK, old_set, NULL);
916 cpu_loop_exit();
917 /* never comes here */
918 return 1;
920 #elif defined (TARGET_PPC)
921 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
922 int is_write, sigset_t *old_set,
923 void *puc)
925 TranslationBlock *tb;
926 int ret;
928 if (cpu_single_env)
929 env = cpu_single_env; /* XXX: find a correct solution for multithread */
930 #if defined(DEBUG_SIGNAL)
931 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
932 pc, address, is_write, *(unsigned long *)old_set);
933 #endif
934 /* XXX: locking issue */
935 if (is_write && page_unprotect(h2g(address), pc, puc)) {
936 return 1;
939 /* see if it is an MMU fault */
940 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
941 if (ret < 0)
942 return 0; /* not an MMU fault */
943 if (ret == 0)
944 return 1; /* the MMU fault was handled without causing real CPU fault */
946 /* now we have a real cpu fault */
947 tb = tb_find_pc(pc);
948 if (tb) {
949 /* the PC is inside the translated code. It means that we have
950 a virtual CPU fault */
951 cpu_restore_state(tb, env, pc, puc);
953 if (ret == 1) {
954 #if 0
955 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
956 env->nip, env->error_code, tb);
957 #endif
958 /* we restore the process signal mask as the sigreturn should
959 do it (XXX: use sigsetjmp) */
960 sigprocmask(SIG_SETMASK, old_set, NULL);
961 do_raise_exception_err(env->exception_index, env->error_code);
962 } else {
963 /* activate soft MMU for this block */
964 cpu_resume_from_signal(env, puc);
966 /* never comes here */
967 return 1;
970 #elif defined(TARGET_M68K)
971 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
972 int is_write, sigset_t *old_set,
973 void *puc)
975 TranslationBlock *tb;
976 int ret;
978 if (cpu_single_env)
979 env = cpu_single_env; /* XXX: find a correct solution for multithread */
980 #if defined(DEBUG_SIGNAL)
981 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
982 pc, address, is_write, *(unsigned long *)old_set);
983 #endif
984 /* XXX: locking issue */
985 if (is_write && page_unprotect(address, pc, puc)) {
986 return 1;
988 /* see if it is an MMU fault */
989 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
990 if (ret < 0)
991 return 0; /* not an MMU fault */
992 if (ret == 0)
993 return 1; /* the MMU fault was handled without causing real CPU fault */
994 /* now we have a real cpu fault */
995 tb = tb_find_pc(pc);
996 if (tb) {
997 /* the PC is inside the translated code. It means that we have
998 a virtual CPU fault */
999 cpu_restore_state(tb, env, pc, puc);
1001 /* we restore the process signal mask as the sigreturn should
1002 do it (XXX: use sigsetjmp) */
1003 sigprocmask(SIG_SETMASK, old_set, NULL);
1004 cpu_loop_exit();
1005 /* never comes here */
1006 return 1;
1009 #elif defined (TARGET_MIPS)
1010 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1011 int is_write, sigset_t *old_set,
1012 void *puc)
1014 TranslationBlock *tb;
1015 int ret;
1017 if (cpu_single_env)
1018 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1019 #if defined(DEBUG_SIGNAL)
1020 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1021 pc, address, is_write, *(unsigned long *)old_set);
1022 #endif
1023 /* XXX: locking issue */
1024 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1025 return 1;
1028 /* see if it is an MMU fault */
1029 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1030 if (ret < 0)
1031 return 0; /* not an MMU fault */
1032 if (ret == 0)
1033 return 1; /* the MMU fault was handled without causing real CPU fault */
1035 /* now we have a real cpu fault */
1036 tb = tb_find_pc(pc);
1037 if (tb) {
1038 /* the PC is inside the translated code. It means that we have
1039 a virtual CPU fault */
1040 cpu_restore_state(tb, env, pc, puc);
1042 if (ret == 1) {
1043 #if 0
1044 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1045 env->PC, env->error_code, tb);
1046 #endif
1047 /* we restore the process signal mask as the sigreturn should
1048 do it (XXX: use sigsetjmp) */
1049 sigprocmask(SIG_SETMASK, old_set, NULL);
1050 do_raise_exception_err(env->exception_index, env->error_code);
1051 } else {
1052 /* activate soft MMU for this block */
1053 cpu_resume_from_signal(env, puc);
1055 /* never comes here */
1056 return 1;
1059 #elif defined (TARGET_SH4)
1060 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1061 int is_write, sigset_t *old_set,
1062 void *puc)
1064 TranslationBlock *tb;
1065 int ret;
1067 if (cpu_single_env)
1068 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1069 #if defined(DEBUG_SIGNAL)
1070 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1071 pc, address, is_write, *(unsigned long *)old_set);
1072 #endif
1073 /* XXX: locking issue */
1074 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1075 return 1;
1078 /* see if it is an MMU fault */
1079 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1080 if (ret < 0)
1081 return 0; /* not an MMU fault */
1082 if (ret == 0)
1083 return 1; /* the MMU fault was handled without causing real CPU fault */
1085 /* now we have a real cpu fault */
1086 tb = tb_find_pc(pc);
1087 if (tb) {
1088 /* the PC is inside the translated code. It means that we have
1089 a virtual CPU fault */
1090 cpu_restore_state(tb, env, pc, puc);
1092 #if 0
1093 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1094 env->nip, env->error_code, tb);
1095 #endif
1096 /* we restore the process signal mask as the sigreturn should
1097 do it (XXX: use sigsetjmp) */
1098 sigprocmask(SIG_SETMASK, old_set, NULL);
1099 cpu_loop_exit();
1100 /* never comes here */
1101 return 1;
1104 #elif defined (TARGET_ALPHA)
1105 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1106 int is_write, sigset_t *old_set,
1107 void *puc)
1109 TranslationBlock *tb;
1110 int ret;
1112 if (cpu_single_env)
1113 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1114 #if defined(DEBUG_SIGNAL)
1115 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1116 pc, address, is_write, *(unsigned long *)old_set);
1117 #endif
1118 /* XXX: locking issue */
1119 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1120 return 1;
1123 /* see if it is an MMU fault */
1124 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1125 if (ret < 0)
1126 return 0; /* not an MMU fault */
1127 if (ret == 0)
1128 return 1; /* the MMU fault was handled without causing real CPU fault */
1130 /* now we have a real cpu fault */
1131 tb = tb_find_pc(pc);
1132 if (tb) {
1133 /* the PC is inside the translated code. It means that we have
1134 a virtual CPU fault */
1135 cpu_restore_state(tb, env, pc, puc);
1137 #if 0
1138 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1139 env->nip, env->error_code, tb);
1140 #endif
1141 /* we restore the process signal mask as the sigreturn should
1142 do it (XXX: use sigsetjmp) */
1143 sigprocmask(SIG_SETMASK, old_set, NULL);
1144 cpu_loop_exit();
1145 /* never comes here */
1146 return 1;
1148 #elif defined (TARGET_CRIS)
1149 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1150 int is_write, sigset_t *old_set,
1151 void *puc)
1153 TranslationBlock *tb;
1154 int ret;
1156 if (cpu_single_env)
1157 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1158 #if defined(DEBUG_SIGNAL)
1159 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1160 pc, address, is_write, *(unsigned long *)old_set);
1161 #endif
1162 /* XXX: locking issue */
1163 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1164 return 1;
1167 /* see if it is an MMU fault */
1168 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1169 if (ret < 0)
1170 return 0; /* not an MMU fault */
1171 if (ret == 0)
1172 return 1; /* the MMU fault was handled without causing real CPU fault */
1174 /* now we have a real cpu fault */
1175 tb = tb_find_pc(pc);
1176 if (tb) {
1177 /* the PC is inside the translated code. It means that we have
1178 a virtual CPU fault */
1179 cpu_restore_state(tb, env, pc, puc);
1181 /* we restore the process signal mask as the sigreturn should
1182 do it (XXX: use sigsetjmp) */
1183 sigprocmask(SIG_SETMASK, old_set, NULL);
1184 cpu_loop_exit();
1185 /* never comes here */
1186 return 1;
1189 #elif defined(TARGET_HPPA)
1190 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1191 int is_write, sigset_t *old_set,
1192 void *puc)
1194 TranslationBlock *tb;
1195 int ret;
1197 if (cpu_single_env)
1198 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1199 #if defined(DEBUG_SIGNAL)
1200 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1201 pc, address, is_write, *(unsigned long *)old_set);
1202 #endif
1203 /* XXX: locking issue */
1204 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1205 return 1;
1207 /* see if it is an MMU fault */
1208 ret = cpu_hppa_handle_mmu_fault(env, address, is_write, 1, 0);
1209 if (ret < 0)
1210 return 0; /* not an MMU fault */
1211 if (ret == 0)
1212 return 1; /* the MMU fault was handled without causing real CPU fault */
1213 /* now we have a real cpu fault */
1214 tb = tb_find_pc(pc);
1215 if (tb) {
1216 /* the PC is inside the translated code. It means that we have
1217 a virtual CPU fault */
1218 cpu_restore_state(tb, env, pc, puc);
1220 /* we restore the process signal mask as the sigreturn should
1221 do it (XXX: use sigsetjmp) */
1222 sigprocmask(SIG_SETMASK, old_set, NULL);
1223 cpu_loop_exit();
1226 #else
1227 #error unsupported target CPU
1228 #endif
1230 #if defined(__i386__)
1232 #if defined(__APPLE__)
1233 # include <sys/ucontext.h>
1235 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1236 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1237 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1238 #else
1239 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1240 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1241 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1242 #endif
1244 int cpu_signal_handler(int host_signum, void *pinfo,
1245 void *puc)
1247 siginfo_t *info = pinfo;
1248 struct ucontext *uc = puc;
1249 unsigned long pc;
1250 int trapno;
1252 #ifndef REG_EIP
1253 /* for glibc 2.1 */
1254 #define REG_EIP EIP
1255 #define REG_ERR ERR
1256 #define REG_TRAPNO TRAPNO
1257 #endif
1258 pc = EIP_sig(uc);
1259 trapno = TRAP_sig(uc);
1260 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1261 trapno == 0xe ?
1262 (ERROR_sig(uc) >> 1) & 1 : 0,
1263 &uc->uc_sigmask, puc);
1266 #elif defined(__x86_64__)
1268 int cpu_signal_handler(int host_signum, void *pinfo,
1269 void *puc)
1271 siginfo_t *info = pinfo;
1272 struct ucontext *uc = puc;
1273 unsigned long pc;
1275 pc = uc->uc_mcontext.gregs[REG_RIP];
1276 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1277 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1278 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1279 &uc->uc_sigmask, puc);
1282 #elif defined(__powerpc__)
1284 /***********************************************************************
1285 * signal context platform-specific definitions
1286 * From Wine
1288 #ifdef linux
1289 /* All Registers access - only for local access */
1290 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1291 /* Gpr Registers access */
1292 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1293 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1294 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1295 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1296 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1297 # define LR_sig(context) REG_sig(link, context) /* Link register */
1298 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1299 /* Float Registers access */
1300 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1301 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1302 /* Exception Registers access */
1303 # define DAR_sig(context) REG_sig(dar, context)
1304 # define DSISR_sig(context) REG_sig(dsisr, context)
1305 # define TRAP_sig(context) REG_sig(trap, context)
1306 #endif /* linux */
1308 #ifdef __APPLE__
1309 # include <sys/ucontext.h>
1310 typedef struct ucontext SIGCONTEXT;
1311 /* All Registers access - only for local access */
1312 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1313 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1314 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1315 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1316 /* Gpr Registers access */
1317 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1318 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1319 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1320 # define CTR_sig(context) REG_sig(ctr, context)
1321 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1322 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1323 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1324 /* Float Registers access */
1325 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1326 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1327 /* Exception Registers access */
1328 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1329 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1330 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1331 #endif /* __APPLE__ */
1333 int cpu_signal_handler(int host_signum, void *pinfo,
1334 void *puc)
1336 siginfo_t *info = pinfo;
1337 struct ucontext *uc = puc;
1338 unsigned long pc;
1339 int is_write;
1341 pc = IAR_sig(uc);
1342 is_write = 0;
1343 #if 0
1344 /* ppc 4xx case */
1345 if (DSISR_sig(uc) & 0x00800000)
1346 is_write = 1;
1347 #else
1348 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1349 is_write = 1;
1350 #endif
1351 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1352 is_write, &uc->uc_sigmask, puc);
1355 #elif defined(__alpha__)
1357 int cpu_signal_handler(int host_signum, void *pinfo,
1358 void *puc)
1360 siginfo_t *info = pinfo;
1361 struct ucontext *uc = puc;
1362 uint32_t *pc = uc->uc_mcontext.sc_pc;
1363 uint32_t insn = *pc;
1364 int is_write = 0;
1366 /* XXX: need kernel patch to get write flag faster */
1367 switch (insn >> 26) {
1368 case 0x0d: // stw
1369 case 0x0e: // stb
1370 case 0x0f: // stq_u
1371 case 0x24: // stf
1372 case 0x25: // stg
1373 case 0x26: // sts
1374 case 0x27: // stt
1375 case 0x2c: // stl
1376 case 0x2d: // stq
1377 case 0x2e: // stl_c
1378 case 0x2f: // stq_c
1379 is_write = 1;
1382 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1383 is_write, &uc->uc_sigmask, puc);
1385 #elif defined(__sparc__)
1387 int cpu_signal_handler(int host_signum, void *pinfo,
1388 void *puc)
1390 siginfo_t *info = pinfo;
1391 uint32_t *regs = (uint32_t *)(info + 1);
1392 void *sigmask = (regs + 20);
1393 unsigned long pc;
1394 int is_write;
1395 uint32_t insn;
1397 /* XXX: is there a standard glibc define ? */
1398 pc = regs[1];
1399 /* XXX: need kernel patch to get write flag faster */
1400 is_write = 0;
1401 insn = *(uint32_t *)pc;
1402 if ((insn >> 30) == 3) {
1403 switch((insn >> 19) & 0x3f) {
1404 case 0x05: // stb
1405 case 0x06: // sth
1406 case 0x04: // st
1407 case 0x07: // std
1408 case 0x24: // stf
1409 case 0x27: // stdf
1410 case 0x25: // stfsr
1411 is_write = 1;
1412 break;
1415 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1416 is_write, sigmask, NULL);
1419 #elif defined(__arm__)
1421 int cpu_signal_handler(int host_signum, void *pinfo,
1422 void *puc)
1424 siginfo_t *info = pinfo;
1425 struct ucontext *uc = puc;
1426 unsigned long pc;
1427 int is_write;
1429 pc = uc->uc_mcontext.arm_pc;
1430 /* XXX: compute is_write */
1431 is_write = 0;
1432 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1433 is_write,
1434 &uc->uc_sigmask, puc);
1437 #elif defined(__mc68000)
1439 int cpu_signal_handler(int host_signum, void *pinfo,
1440 void *puc)
1442 siginfo_t *info = pinfo;
1443 struct ucontext *uc = puc;
1444 unsigned long pc;
1445 int is_write;
1447 pc = uc->uc_mcontext.gregs[16];
1448 /* XXX: compute is_write */
1449 is_write = 0;
1450 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1451 is_write,
1452 &uc->uc_sigmask, puc);
1455 #elif defined(__ia64)
1457 #ifndef __ISR_VALID
1458 /* This ought to be in <bits/siginfo.h>... */
1459 # define __ISR_VALID 1
1460 #endif
1462 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1464 siginfo_t *info = pinfo;
1465 struct ucontext *uc = puc;
1466 unsigned long ip;
1467 int is_write = 0;
1469 ip = uc->uc_mcontext.sc_ip;
1470 switch (host_signum) {
1471 case SIGILL:
1472 case SIGFPE:
1473 case SIGSEGV:
1474 case SIGBUS:
1475 case SIGTRAP:
1476 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1477 /* ISR.W (write-access) is bit 33: */
1478 is_write = (info->si_isr >> 33) & 1;
1479 break;
1481 default:
1482 break;
1484 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1485 is_write,
1486 &uc->uc_sigmask, puc);
1489 #elif defined(__s390__)
1491 int cpu_signal_handler(int host_signum, void *pinfo,
1492 void *puc)
1494 siginfo_t *info = pinfo;
1495 struct ucontext *uc = puc;
1496 unsigned long pc;
1497 int is_write;
1499 pc = uc->uc_mcontext.psw.addr;
1500 /* XXX: compute is_write */
1501 is_write = 0;
1502 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1503 is_write, &uc->uc_sigmask, puc);
1506 #elif defined(__mips__)
1508 int cpu_signal_handler(int host_signum, void *pinfo,
1509 void *puc)
1511 siginfo_t *info = pinfo;
1512 struct ucontext *uc = puc;
1513 greg_t pc = uc->uc_mcontext.pc;
1514 int is_write;
1516 /* XXX: compute is_write */
1517 is_write = 0;
1518 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1519 is_write, &uc->uc_sigmask, puc);
1522 #elif defined(__hppa__)
1524 int cpu_signal_handler(int host_signum, void *pinfo,
1525 void *puc)
1527 struct siginfo *info = pinfo;
1528 struct ucontext *uc = puc;
1529 unsigned long pc;
1530 int is_write;
1532 pc = uc->uc_mcontext.sc_iaoq[0];
1533 /* FIXME: compute is_write */
1534 is_write = 0;
1535 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1536 is_write,
1537 &uc->uc_sigmask, puc);
1540 #else
1542 #error host CPU specific signal handler needed
1544 #endif
1546 #endif /* !defined(CONFIG_SOFTMMU) */