- compilation fixes for MSVC toolkit 2003
[bochs-mirror.git] / cpu / cpu.cc
blobbf7e7fbcb3ff47d1ffc8fcd5e60400bd38b273ce
1 /////////////////////////////////////////////////////////////////////////
2 // $Id: cpu.cc,v 1.248 2008/10/10 21:09:25 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (C) 2001 MandrakeSoft S.A.
6 //
7 // MandrakeSoft S.A.
8 // 43, rue d'Aboukir
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 /////////////////////////////////////////////////////////////////////////
28 #define NEED_CPU_REG_SHORTCUTS 1
29 #include "bochs.h"
30 #include "cpu.h"
31 #define LOG_THIS BX_CPU_THIS_PTR
33 #include "iodev/iodev.h"
35 // Make code more tidy with a few macros.
36 #if BX_SUPPORT_X86_64==0
37 #define RIP EIP
38 #define RCX ECX
39 #endif
41 // ICACHE instrumentation code
42 #if BX_SUPPORT_ICACHE
44 #define InstrumentICACHE 0
46 #if InstrumentICACHE
47 static unsigned iCacheLookups=0;
48 static unsigned iCacheMisses=0;
50 #define InstrICache_StatsMask 0xffffff
52 #define InstrICache_Stats() {\
53 if ((iCacheLookups & InstrICache_StatsMask) == 0) { \
54 BX_INFO(("ICACHE lookups: %u, misses: %u, hit rate = %6.2f%% ", \
55 iCacheLookups, \
56 iCacheMisses, \
57 (iCacheLookups-iCacheMisses) * 100.0 / iCacheLookups)); \
58 iCacheLookups = iCacheMisses = 0; \
59 } \
61 #define InstrICache_Increment(v) (v)++
63 #else
64 #define InstrICache_Stats()
65 #define InstrICache_Increment(v)
66 #endif
68 #endif // BX_SUPPORT_ICACHE
70 // The CHECK_MAX_INSTRUCTIONS macro allows cpu_loop to execute a few
71 // instructions and then return so that the other processors have a chance to
72 // run. This is used by bochs internal debugger or when simulating
73 // multiple processors.
75 // If maximum instructions have been executed, return. The zero-count
76 // means run forever.
77 #if BX_SUPPORT_SMP || BX_DEBUGGER
78 #define CHECK_MAX_INSTRUCTIONS(count) \
79 if ((count) > 0) { \
80 (count)--; \
81 if ((count) == 0) return; \
83 #else
84 #define CHECK_MAX_INSTRUCTIONS(count)
85 #endif
87 void BX_CPU_C::cpu_loop(Bit32u max_instr_count)
89 #if BX_DEBUGGER
90 BX_CPU_THIS_PTR break_point = 0;
91 BX_CPU_THIS_PTR magic_break = 0;
92 BX_CPU_THIS_PTR stop_reason = STOP_NO_REASON;
93 #endif
95 if (setjmp(BX_CPU_THIS_PTR jmp_buf_env)) {
96 // only from exception function we can get here ...
97 BX_INSTR_NEW_INSTRUCTION(BX_CPU_ID);
98 BX_TICK1_IF_SINGLE_PROCESSOR();
99 #if BX_DEBUGGER || BX_GDBSTUB
100 if (dbg_instruction_epilog()) return;
101 #endif
102 CHECK_MAX_INSTRUCTIONS(max_instr_count);
103 #if BX_GDBSTUB
104 if (bx_dbg.gdbstub_enabled) return;
105 #endif
108 // If the exception() routine has encountered a nasty fault scenario,
109 // the debugger may request that control is returned to it so that
110 // the situation may be examined.
111 #if BX_DEBUGGER
112 if (bx_guard.interrupt_requested) return;
113 #endif
115 // We get here either by a normal function call, or by a longjmp
116 // back from an exception() call. In either case, commit the
117 // new EIP/ESP, and set up other environmental fields. This code
118 // mirrors similar code below, after the interrupt() call.
119 BX_CPU_THIS_PTR prev_rip = RIP; // commit new EIP
120 BX_CPU_THIS_PTR speculative_rsp = 0;
121 BX_CPU_THIS_PTR EXT = 0;
122 BX_CPU_THIS_PTR errorno = 0;
124 while (1) {
126 // check on events which occurred for previous instructions (traps)
127 // and ones which are asynchronous to the CPU (hardware interrupts)
128 if (BX_CPU_THIS_PTR async_event) {
129 if (handleAsyncEvent()) {
130 // If request to return to caller ASAP.
131 return;
135 no_async_event:
137 Bit32u eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
139 if (eipBiased >= BX_CPU_THIS_PTR eipPageWindowSize) {
140 prefetch();
141 eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
144 #if BX_SUPPORT_ICACHE
145 bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrA20Page + eipBiased;
146 bxICacheEntry_c *entry = BX_CPU_THIS_PTR iCache.get_entry(pAddr);
147 bxInstruction_c *i = entry->i;
149 InstrICache_Increment(iCacheLookups);
150 InstrICache_Stats();
152 if ((entry->pAddr == pAddr) &&
153 (entry->writeStamp == *(BX_CPU_THIS_PTR currPageWriteStampPtr)))
155 // iCache hit. An instruction was found in the iCache
157 else {
158 // iCache miss. No validated instruction with matching fetch parameters
159 // is in the iCache.
160 InstrICache_Increment(iCacheMisses);
161 serveICacheMiss(entry, eipBiased, pAddr);
162 i = entry->i;
164 #else
165 bxInstruction_c iStorage, *i = &iStorage;
166 fetchInstruction(i, eipBiased);
167 #endif
169 #if BX_SUPPORT_TRACE_CACHE
170 unsigned length = entry->ilen;
172 for(;;i++) {
173 #endif
175 #if BX_INSTRUMENTATION
176 BX_INSTR_OPCODE(BX_CPU_ID, BX_CPU_THIS_PTR eipFetchPtr + (RIP + BX_CPU_THIS_PTR eipPageBias),
177 i->ilen(), BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b, Is64BitMode());
178 #endif
180 #if BX_DEBUGGER || BX_GDBSTUB
181 if (dbg_instruction_prolog()) return;
182 #endif
184 #if BX_DISASM
185 if (BX_CPU_THIS_PTR trace) {
186 // print the instruction that is about to be executed
187 debug_disasm_instruction(BX_CPU_THIS_PTR prev_rip);
189 #endif
191 // decoding instruction compeleted -> continue with execution
192 BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID, i);
193 RIP += i->ilen();
194 BX_CPU_CALL_METHOD(i->execute, (i)); // might iterate repeat instruction
195 BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
196 BX_INSTR_AFTER_EXECUTION(BX_CPU_ID, i);
197 BX_TICK1_IF_SINGLE_PROCESSOR();
199 // inform instrumentation about new instruction
200 BX_INSTR_NEW_INSTRUCTION(BX_CPU_ID);
202 // note instructions generating exceptions never reach this point
203 #if BX_DEBUGGER || BX_GDBSTUB
204 if (dbg_instruction_epilog()) return;
205 #endif
207 CHECK_MAX_INSTRUCTIONS(max_instr_count);
209 #if BX_SUPPORT_TRACE_CACHE
210 if (BX_CPU_THIS_PTR async_event) {
211 // clear stop trace magic indication that probably was set by repeat or branch32/64
212 BX_CPU_THIS_PTR async_event &= ~BX_ASYNC_EVENT_STOP_TRACE;
213 break;
216 if (--length == 0) goto no_async_event;
218 #endif
219 } // while (1)
222 void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat(bxInstruction_c *i, BxExecutePtr_tR execute)
224 // non repeated instruction
225 if (! i->repUsedL()) {
226 BX_CPU_CALL_METHOD(execute, (i));
227 return;
230 #if BX_SUPPORT_X86_64
231 if (i->as64L()) {
232 while(1) {
233 if (RCX != 0) {
234 BX_CPU_CALL_METHOD(execute, (i));
235 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
236 RCX --;
238 if (RCX == 0) return;
240 #if BX_DEBUGGER == 0
241 if (BX_CPU_THIS_PTR async_event)
242 #endif
243 break; // exit always if debugger enabled
245 BX_TICK1_IF_SINGLE_PROCESSOR();
248 else
249 #endif
250 if (i->as32L()) {
251 while(1) {
252 if (ECX != 0) {
253 BX_CPU_CALL_METHOD(execute, (i));
254 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
255 RCX = ECX - 1;
257 if (ECX == 0) return;
259 #if BX_DEBUGGER == 0
260 if (BX_CPU_THIS_PTR async_event)
261 #endif
262 break; // exit always if debugger enabled
264 BX_TICK1_IF_SINGLE_PROCESSOR();
267 else // 16bit addrsize
269 while(1) {
270 if (CX != 0) {
271 BX_CPU_CALL_METHOD(execute, (i));
272 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
273 CX --;
275 if (CX == 0) return;
277 #if BX_DEBUGGER == 0
278 if (BX_CPU_THIS_PTR async_event)
279 #endif
280 break; // exit always if debugger enabled
282 BX_TICK1_IF_SINGLE_PROCESSOR();
286 RIP = BX_CPU_THIS_PTR prev_rip; // repeat loop not done, restore RIP
288 #if BX_SUPPORT_TRACE_CACHE
289 // assert magic async_event to stop trace execution
290 BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
291 #endif
294 void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat_ZF(bxInstruction_c *i, BxExecutePtr_tR execute)
296 unsigned rep = i->repUsedValue();
298 // non repeated instruction
299 if (! rep) {
300 BX_CPU_CALL_METHOD(execute, (i));
301 return;
304 if (rep == 3) { /* repeat prefix 0xF3 */
305 #if BX_SUPPORT_X86_64
306 if (i->as64L()) {
307 while(1) {
308 if (RCX != 0) {
309 BX_CPU_CALL_METHOD(execute, (i));
310 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
311 RCX --;
313 if (! get_ZF() || RCX == 0) return;
315 #if BX_DEBUGGER == 0
316 if (BX_CPU_THIS_PTR async_event)
317 #endif
318 break; // exit always if debugger enabled
320 BX_TICK1_IF_SINGLE_PROCESSOR();
323 else
324 #endif
325 if (i->as32L()) {
326 while(1) {
327 if (ECX != 0) {
328 BX_CPU_CALL_METHOD(execute, (i));
329 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
330 RCX = ECX - 1;
332 if (! get_ZF() || ECX == 0) return;
334 #if BX_DEBUGGER == 0
335 if (BX_CPU_THIS_PTR async_event)
336 #endif
337 break; // exit always if debugger enabled
339 BX_TICK1_IF_SINGLE_PROCESSOR();
342 else // 16bit addrsize
344 while(1) {
345 if (CX != 0) {
346 BX_CPU_CALL_METHOD(execute, (i));
347 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
348 CX --;
350 if (! get_ZF() || CX == 0) return;
352 #if BX_DEBUGGER == 0
353 if (BX_CPU_THIS_PTR async_event)
354 #endif
355 break; // exit always if debugger enabled
357 BX_TICK1_IF_SINGLE_PROCESSOR();
361 else { /* repeat prefix 0xF2 */
362 #if BX_SUPPORT_X86_64
363 if (i->as64L()) {
364 while(1) {
365 if (RCX != 0) {
366 BX_CPU_CALL_METHOD(execute, (i));
367 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
368 RCX --;
370 if (get_ZF() || RCX == 0) return;
372 #if BX_DEBUGGER == 0
373 if (BX_CPU_THIS_PTR async_event)
374 #endif
375 break; // exit always if debugger enabled
377 BX_TICK1_IF_SINGLE_PROCESSOR();
380 else
381 #endif
382 if (i->as32L()) {
383 while(1) {
384 if (ECX != 0) {
385 BX_CPU_CALL_METHOD(execute, (i));
386 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
387 RCX = ECX - 1;
389 if (get_ZF() || ECX == 0) return;
391 #if BX_DEBUGGER == 0
392 if (BX_CPU_THIS_PTR async_event)
393 #endif
394 break; // exit always if debugger enabled
396 BX_TICK1_IF_SINGLE_PROCESSOR();
399 else // 16bit addrsize
401 while(1) {
402 if (CX != 0) {
403 BX_CPU_CALL_METHOD(execute, (i));
404 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
405 CX --;
407 if (get_ZF() || CX == 0) return;
409 #if BX_DEBUGGER == 0
410 if (BX_CPU_THIS_PTR async_event)
411 #endif
412 break; // exit always if debugger enabled
414 BX_TICK1_IF_SINGLE_PROCESSOR();
419 RIP = BX_CPU_THIS_PTR prev_rip; // repeat loop not done, restore RIP
421 #if BX_SUPPORT_TRACE_CACHE
422 // assert magic async_event to stop trace execution
423 BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
424 #endif
427 unsigned BX_CPU_C::handleAsyncEvent(void)
430 // This area is where we process special conditions and events.
432 if (BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_SPECIAL) {
433 // I made up the bitmask above to mean HALT state.
434 // for one processor, pass the time as quickly as possible until
435 // an interrupt wakes up the CPU.
436 while (1)
438 if ((BX_CPU_INTR && (BX_CPU_THIS_PTR get_IF() || (BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_MWAIT_IF))) ||
439 BX_CPU_THIS_PTR nmi_pending || BX_CPU_THIS_PTR smi_pending)
441 // interrupt ends the HALT condition
442 #if BX_SUPPORT_MONITOR_MWAIT
443 if (BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_MWAIT)
444 BX_MEM(0)->clear_monitor(BX_CPU_THIS_PTR bx_cpuid);
445 #endif
446 BX_CPU_THIS_PTR debug_trap = 0; // clear traps for after resume
447 BX_CPU_THIS_PTR inhibit_mask = 0; // clear inhibits for after resume
448 break;
450 if ((BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_SPECIAL) == 0) {
451 BX_INFO(("handleAsyncEvent: reset detected in HLT state"));
452 break;
455 // for multiprocessor simulation, even if this CPU is halted we still
456 // must give the others a chance to simulate. If an interrupt has
457 // arrived, then clear the HALT condition; otherwise just return from
458 // the CPU loop with stop_reason STOP_CPU_HALTED.
459 #if BX_SUPPORT_SMP
460 if (BX_SMP_PROCESSORS > 1) {
461 // HALT condition remains, return so other CPUs have a chance
462 #if BX_DEBUGGER
463 BX_CPU_THIS_PTR stop_reason = STOP_CPU_HALTED;
464 #endif
465 return 1; // Return to caller of cpu_loop.
467 #endif
469 #if BX_DEBUGGER
470 if (bx_guard.interrupt_requested)
471 return 1; // Return to caller of cpu_loop.
472 #endif
474 BX_TICK1();
476 } else if (bx_pc_system.kill_bochs_request) {
477 // setting kill_bochs_request causes the cpu loop to return ASAP.
478 return 1; // Return to caller of cpu_loop.
481 // Priority 1: Hardware Reset and Machine Checks
482 // RESET
483 // Machine Check
484 // (bochs doesn't support these)
486 // Priority 2: Trap on Task Switch
487 // T flag in TSS is set
488 if (BX_CPU_THIS_PTR debug_trap & 0x00008000) {
489 BX_CPU_THIS_PTR dr6 |= BX_CPU_THIS_PTR debug_trap;
490 exception(BX_DB_EXCEPTION, 0, 0); // no error, not interrupt
493 // Priority 3: External Hardware Interventions
494 // FLUSH
495 // STOPCLK
496 // SMI
497 // INIT
498 // (bochs doesn't support these)
499 if (BX_CPU_THIS_PTR smi_pending && ! BX_CPU_THIS_PTR smm_mode())
501 // clear SMI pending flag and disable NMI when SMM was accepted
502 BX_CPU_THIS_PTR smi_pending = 0;
503 BX_CPU_THIS_PTR nmi_disable = 1;
504 enter_system_management_mode();
507 // Priority 4: Traps on Previous Instruction
508 // Breakpoints
509 // Debug Trap Exceptions (TF flag set or data/IO breakpoint)
510 if (BX_CPU_THIS_PTR debug_trap &&
511 !(BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_DEBUG))
513 // A trap may be inhibited on this boundary due to an instruction
514 // which loaded SS. If so we clear the inhibit_mask below
515 // and don't execute this code until the next boundary.
516 // Commit debug events to DR6
517 BX_CPU_THIS_PTR dr6 |= BX_CPU_THIS_PTR debug_trap;
518 exception(BX_DB_EXCEPTION, 0, 0); // no error, not interrupt
521 // Priority 5: External Interrupts
522 // NMI Interrupts
523 // Maskable Hardware Interrupts
524 if (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_INTERRUPTS) {
525 // Processing external interrupts is inhibited on this
526 // boundary because of certain instructions like STI.
527 // inhibit_mask is cleared below, in which case we will have
528 // an opportunity to check interrupts on the next instruction
529 // boundary.
531 else if (BX_CPU_THIS_PTR nmi_pending) {
532 BX_CPU_THIS_PTR nmi_pending = 0;
533 BX_CPU_THIS_PTR nmi_disable = 1;
534 BX_CPU_THIS_PTR errorno = 0;
535 BX_CPU_THIS_PTR EXT = 1; /* external event */
536 BX_INSTR_HWINTERRUPT(BX_CPU_ID, 2, BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
537 interrupt(2, 0, 0, 0);
539 else if (BX_CPU_INTR && BX_CPU_THIS_PTR get_IF() && BX_DBG_ASYNC_INTR)
541 Bit8u vector;
543 // NOTE: similar code in ::take_irq()
544 #if BX_SUPPORT_APIC
545 if (BX_CPU_THIS_PTR local_apic.INTR)
546 vector = BX_CPU_THIS_PTR local_apic.acknowledge_int();
547 else
548 #endif
549 // if no local APIC, always acknowledge the PIC.
550 vector = DEV_pic_iac(); // may set INTR with next interrupt
551 BX_CPU_THIS_PTR errorno = 0;
552 BX_CPU_THIS_PTR EXT = 1; /* external event */
553 BX_INSTR_HWINTERRUPT(BX_CPU_ID, vector,
554 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
555 interrupt(vector, 0, 0, 0);
556 // Set up environment, as would be when this main cpu loop gets
557 // invoked. At the end of normal instructions, we always commmit
558 // the new EIP. But here, we call interrupt() much like
559 // it was a sofware interrupt instruction, and need to effect the
560 // commit here. This code mirrors similar code above.
561 BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
562 BX_CPU_THIS_PTR speculative_rsp = 0;
563 BX_CPU_THIS_PTR EXT = 0;
564 BX_CPU_THIS_PTR errorno = 0;
566 else if (BX_HRQ && BX_DBG_ASYNC_DMA) {
567 // NOTE: similar code in ::take_dma()
568 // assert Hold Acknowledge (HLDA) and go into a bus hold state
569 DEV_dma_raise_hlda();
572 // Priority 6: Faults from fetching next instruction
573 // Code breakpoint fault
574 // Code segment limit violation (priority 7 on 486/Pentium)
575 // Code page fault (priority 7 on 486/Pentium)
576 // (handled in main decode loop)
578 // Priority 7: Faults from decoding next instruction
579 // Instruction length > 15 bytes
580 // Illegal opcode
581 // Coprocessor not available
582 // (handled in main decode loop etc)
584 // Priority 8: Faults on executing an instruction
585 // Floating point execution
586 // Overflow
587 // Bound error
588 // Invalid TSS
589 // Segment not present
590 // Stack fault
591 // General protection
592 // Data page fault
593 // Alignment check
594 // (handled by rest of the code)
596 if (BX_CPU_THIS_PTR get_TF())
598 // TF is set before execution of next instruction. Schedule
599 // a debug trap (#DB) after execution. After completion of
600 // next instruction, the code above will invoke the trap.
601 BX_CPU_THIS_PTR debug_trap |= 0x00004000; // BS flag in DR6
604 // Now we can handle things which are synchronous to instruction
605 // execution.
606 if (BX_CPU_THIS_PTR get_RF()) {
607 BX_CPU_THIS_PTR clear_RF();
609 #if BX_X86_DEBUGGER
610 else {
611 // only bother comparing if any breakpoints enabled
612 if (BX_CPU_THIS_PTR dr7 & 0x000000ff) {
613 bx_address iaddr = get_laddr(BX_SEG_REG_CS, BX_CPU_THIS_PTR prev_rip);
614 Bit32u dr6_bits = hwdebug_compare(iaddr, 1, BX_HWDebugInstruction, BX_HWDebugInstruction);
615 if (dr6_bits)
617 // Add to the list of debug events thus far.
618 BX_CPU_THIS_PTR async_event = 1;
619 BX_CPU_THIS_PTR debug_trap |= dr6_bits;
620 // If debug events are not inhibited on this boundary,
621 // fire off a debug fault. Otherwise handle it on the next
622 // boundary. (becomes a trap)
623 if (! (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_DEBUG)) {
624 // Commit debug events to DR6
625 #if BX_CPU_LEVEL <= 4
626 // On 386/486 bit12 is settable
627 BX_CPU_THIS_PTR dr6 = (BX_CPU_THIS_PTR dr6 & 0xffff0ff0) |
628 (BX_CPU_THIS_PTR debug_trap & 0x0000f00f);
629 #else
630 // On Pentium+, bit12 is always zero
631 BX_CPU_THIS_PTR dr6 = (BX_CPU_THIS_PTR dr6 & 0xffff0ff0) |
632 (BX_CPU_THIS_PTR debug_trap & 0x0000e00f);
633 #endif
634 exception(BX_DB_EXCEPTION, 0, 0); // no error, not interrupt
639 #endif
641 // We have ignored processing of external interrupts and
642 // debug events on this boundary. Reset the mask so they
643 // will be processed on the next boundary.
644 BX_CPU_THIS_PTR inhibit_mask = 0;
646 if (!((BX_CPU_INTR && BX_CPU_THIS_PTR get_IF()) ||
647 BX_CPU_THIS_PTR debug_trap ||
648 BX_HRQ ||
649 BX_CPU_THIS_PTR get_TF()
650 #if BX_X86_DEBUGGER
651 || (BX_CPU_THIS_PTR dr7 & 0xff)
652 #endif
654 BX_CPU_THIS_PTR async_event = 0;
656 return 0; // Continue executing cpu_loop.
660 // boundaries of consideration:
662 // * physical memory boundary: 1024k (1Megabyte) (increments of...)
663 // * A20 boundary: 1024k (1Megabyte)
664 // * page boundary: 4k
665 // * ROM boundary: 2k (dont care since we are only reading)
666 // * segment boundary: any
668 void BX_CPU_C::prefetch(void)
670 bx_address laddr;
671 unsigned pageOffset;
673 #if BX_SUPPORT_X86_64
674 if (Is64BitMode()) {
675 if (! IsCanonical(RIP)) {
676 BX_ERROR(("prefetch: #GP(0): RIP crossed canonical boundary"));
677 exception(BX_GP_EXCEPTION, 0, 0);
680 // linear address is equal to RIP in 64-bit long mode
681 pageOffset = PAGE_OFFSET(EIP);
682 laddr = RIP;
684 // Calculate RIP at the beginning of the page.
685 BX_CPU_THIS_PTR eipPageBias = pageOffset - RIP;
686 BX_CPU_THIS_PTR eipPageWindowSize = 4096;
688 else
689 #endif
691 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RIP); /* avoid 32-bit EIP wrap */
692 laddr = BX_CPU_THIS_PTR get_laddr32(BX_SEG_REG_CS, EIP);
693 pageOffset = PAGE_OFFSET(laddr);
695 // Calculate RIP at the beginning of the page.
696 BX_CPU_THIS_PTR eipPageBias = pageOffset - EIP;
698 Bit32u limit = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled;
699 if (EIP > limit) {
700 BX_ERROR(("prefetch: EIP [%08x] > CS.limit [%08x]", EIP, limit));
701 exception(BX_GP_EXCEPTION, 0, 0);
703 if (limit + BX_CPU_THIS_PTR eipPageBias < 4096) {
704 BX_CPU_THIS_PTR eipPageWindowSize = limit + BX_CPU_THIS_PTR eipPageBias + 1;
706 else {
707 BX_CPU_THIS_PTR eipPageWindowSize = 4096;
711 bx_address lpf = LPFOf(laddr);
712 unsigned TLB_index = BX_TLB_INDEX_OF(lpf, 0);
713 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[TLB_index];
714 Bit8u *fetchPtr = 0;
716 if ((tlbEntry->lpf == lpf) && !(tlbEntry->accessBits & USER_PL)) {
717 BX_CPU_THIS_PTR pAddrA20Page = A20ADDR(tlbEntry->ppf);
718 #if BX_SupportGuest2HostTLB
719 fetchPtr = (Bit8u*) (tlbEntry->hostPageAddr);
720 #endif
722 else {
723 bx_phy_address pAddr;
725 if (BX_CPU_THIS_PTR cr0.get_PG()) {
726 pAddr = translate_linear(laddr, CPL, BX_READ, CODE_ACCESS);
727 pAddr = A20ADDR(pAddr);
729 else {
730 pAddr = A20ADDR(laddr);
733 BX_CPU_THIS_PTR pAddrA20Page = LPFOf(pAddr);
736 if (fetchPtr) {
737 BX_CPU_THIS_PTR eipFetchPtr = fetchPtr;
739 else {
740 BX_CPU_THIS_PTR eipFetchPtr = BX_MEM(0)->getHostMemAddr(BX_CPU_THIS,
741 BX_CPU_THIS_PTR pAddrA20Page, BX_READ, CODE_ACCESS);
744 // Sanity checks
745 if (! BX_CPU_THIS_PTR eipFetchPtr) {
746 bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrA20Page + pageOffset;
747 if (pAddr >= BX_MEM(0)->get_memory_len()) {
748 BX_PANIC(("prefetch: running in bogus memory, pAddr=0x" FMT_PHY_ADDRX, pAddr));
750 else {
751 BX_PANIC(("prefetch: getHostMemAddr vetoed direct read, pAddr=0x" FMT_PHY_ADDRX, pAddr));
755 #if BX_SUPPORT_ICACHE
756 BX_CPU_THIS_PTR currPageWriteStampPtr = pageWriteStampTable.getPageWriteStampPtr(BX_CPU_THIS_PTR pAddrA20Page);
757 Bit32u pageWriteStamp = *(BX_CPU_THIS_PTR currPageWriteStampPtr);
758 pageWriteStamp &= ~ICacheWriteStampFetchModeMask; // Clear out old fetch mode bits
759 pageWriteStamp |= BX_CPU_THIS_PTR fetchModeMask; // And add new ones
760 pageWriteStampTable.setPageWriteStamp(BX_CPU_THIS_PTR pAddrA20Page, pageWriteStamp);
761 #endif
764 void BX_CPU_C::boundaryFetch(const Bit8u *fetchPtr, unsigned remainingInPage, bxInstruction_c *i)
766 unsigned j;
767 Bit8u fetchBuffer[16]; // Really only need 15
768 unsigned ret;
770 if (remainingInPage >= 15) {
771 BX_ERROR(("boundaryFetch #GP(0): too many instruction prefixes"));
772 exception(BX_GP_EXCEPTION, 0, 0);
775 // Read all leftover bytes in current page up to boundary.
776 for (j=0; j<remainingInPage; j++) {
777 fetchBuffer[j] = *fetchPtr++;
780 // The 2nd chunk of the instruction is on the next page.
781 // Set RIP to the 0th byte of the 2nd page, and force a
782 // prefetch so direct access of that physical page is possible, and
783 // all the associated info is updated.
784 RIP += remainingInPage;
785 prefetch();
787 unsigned fetchBufferLimit = 15;
788 if (BX_CPU_THIS_PTR eipPageWindowSize < 15) {
789 BX_DEBUG(("boundaryFetch: small window size after prefetch - %d bytes", BX_CPU_THIS_PTR eipPageWindowSize));
790 fetchBufferLimit = BX_CPU_THIS_PTR eipPageWindowSize;
793 // We can fetch straight from the 0th byte, which is eipFetchPtr;
794 fetchPtr = BX_CPU_THIS_PTR eipFetchPtr;
796 // read leftover bytes in next page
797 for (; j<fetchBufferLimit; j++) {
798 fetchBuffer[j] = *fetchPtr++;
800 #if BX_SUPPORT_X86_64
801 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64)
802 ret = fetchDecode64(fetchBuffer, i, fetchBufferLimit);
803 else
804 #endif
805 ret = fetchDecode32(fetchBuffer, i, fetchBufferLimit);
807 if (ret==0) {
808 BX_INFO(("boundaryFetch #GP(0): failed to complete instruction decoding"));
809 exception(BX_GP_EXCEPTION, 0, 0);
812 // Restore EIP since we fudged it to start at the 2nd page boundary.
813 RIP = BX_CPU_THIS_PTR prev_rip;
815 // Since we cross an instruction boundary, note that we need a prefetch()
816 // again on the next instruction. Perhaps we can optimize this to
817 // eliminate the extra prefetch() since we do it above, but have to
818 // think about repeated instructions, etc.
819 // invalidate_prefetch_q();
821 BX_INSTR_OPCODE(BX_CPU_ID, fetchBuffer, i->ilen(),
822 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b, Is64BitMode());
825 void BX_CPU_C::deliver_NMI(void)
827 BX_CPU_THIS_PTR nmi_pending = 1;
828 BX_CPU_THIS_PTR async_event = 1;
831 void BX_CPU_C::deliver_SMI(void)
833 BX_CPU_THIS_PTR smi_pending = 1;
834 BX_CPU_THIS_PTR async_event = 1;
837 #if BX_DEBUGGER || BX_GDBSTUB
838 bx_bool BX_CPU_C::dbg_instruction_prolog(void)
840 #if BX_DEBUGGER
841 if(dbg_check_begin_instr_bpoint()) return 1;
842 #endif
844 return 0;
847 bx_bool BX_CPU_C::dbg_instruction_epilog(void)
849 #if BX_DEBUGGER
850 if (dbg_check_end_instr_bpoint()) return 1;
851 #endif
853 #if BX_GDBSTUB
854 if (bx_dbg.gdbstub_enabled) {
855 unsigned reason = bx_gdbstub_check(EIP);
856 if (reason != GDBSTUB_STOP_NO_REASON) return 1;
858 #endif
860 return 0;
862 #endif // BX_DEBUGGER || BX_GDBSTUB
864 #if BX_DEBUGGER
865 extern unsigned dbg_show_mask;
867 bx_bool BX_CPU_C::dbg_check_begin_instr_bpoint(void)
869 Bit64u tt = bx_pc_system.time_ticks();
870 bx_address debug_eip = RIP;
871 Bit16u cs = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
873 BX_CPU_THIS_PTR guard_found.cs = cs;
874 BX_CPU_THIS_PTR guard_found.eip = debug_eip;
875 BX_CPU_THIS_PTR guard_found.laddr = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_CS, debug_eip);
876 BX_CPU_THIS_PTR guard_found.is_32bit_code =
877 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b;
878 BX_CPU_THIS_PTR guard_found.is_64bit_code = Is64BitMode();
880 // support for 'show' command in debugger
881 if(dbg_show_mask) {
882 int rv = bx_dbg_show_symbolic();
883 if (rv) return(rv);
886 // see if debugger is looking for iaddr breakpoint of any type
887 if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_ALL) {
888 #if (BX_DBG_MAX_VIR_BPOINTS > 0)
889 if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_VIR) {
890 if ((BX_CPU_THIS_PTR guard_found.icount!=0) ||
891 (tt != BX_CPU_THIS_PTR guard_found.time_tick))
893 for (unsigned n=0; n<bx_guard.iaddr.num_virtual; n++) {
894 if (bx_guard.iaddr.vir[n].enabled &&
895 (bx_guard.iaddr.vir[n].cs == cs) &&
896 (bx_guard.iaddr.vir[n].eip == debug_eip))
898 BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_VIR;
899 BX_CPU_THIS_PTR guard_found.iaddr_index = n;
900 BX_CPU_THIS_PTR guard_found.time_tick = tt;
901 return(1); // on a breakpoint
906 #endif
907 #if (BX_DBG_MAX_LIN_BPOINTS > 0)
908 if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_LIN) {
909 if ((BX_CPU_THIS_PTR guard_found.icount!=0) ||
910 (tt != BX_CPU_THIS_PTR guard_found.time_tick))
912 for (unsigned n=0; n<bx_guard.iaddr.num_linear; n++) {
913 if (bx_guard.iaddr.lin[n].enabled &&
914 (bx_guard.iaddr.lin[n].addr == BX_CPU_THIS_PTR guard_found.laddr))
916 BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_LIN;
917 BX_CPU_THIS_PTR guard_found.iaddr_index = n;
918 BX_CPU_THIS_PTR guard_found.time_tick = tt;
919 return(1); // on a breakpoint
924 #endif
925 #if (BX_DBG_MAX_PHY_BPOINTS > 0)
926 if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_PHY) {
927 bx_phy_address phy;
928 bx_bool valid = dbg_xlate_linear2phy(BX_CPU_THIS_PTR guard_found.laddr, &phy);
929 // The "guard_found.icount!=0" condition allows you to step or
930 // continue beyond a breakpoint. Bryce tried removing it once,
931 // and once you get to a breakpoint you are stuck there forever.
932 // Not pretty.
933 if (valid && ((BX_CPU_THIS_PTR guard_found.icount!=0) ||
934 (tt != BX_CPU_THIS_PTR guard_found.time_tick)))
936 for (unsigned n=0; n<bx_guard.iaddr.num_physical; n++) {
937 if (bx_guard.iaddr.phy[n].enabled && (bx_guard.iaddr.phy[n].addr == phy))
939 BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_PHY;
940 BX_CPU_THIS_PTR guard_found.iaddr_index = n;
941 BX_CPU_THIS_PTR guard_found.time_tick = tt;
942 return(1); // on a breakpoint
947 #endif
950 return(0); // not on a breakpoint
953 bx_bool BX_CPU_C::dbg_check_end_instr_bpoint(void)
955 BX_CPU_THIS_PTR guard_found.icount++;
956 BX_CPU_THIS_PTR guard_found.cs =
957 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
958 BX_CPU_THIS_PTR guard_found.eip = RIP;
959 BX_CPU_THIS_PTR guard_found.laddr = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_CS, RIP);
960 BX_CPU_THIS_PTR guard_found.is_32bit_code =
961 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b;
962 BX_CPU_THIS_PTR guard_found.is_64bit_code = Is64BitMode();
964 // Check if we hit read/write or time breakpoint
965 if (BX_CPU_THIS_PTR break_point) {
966 switch (BX_CPU_THIS_PTR break_point) {
967 case BREAK_POINT_TIME:
968 BX_INFO(("[" FMT_LL "d] Caught time breakpoint", bx_pc_system.time_ticks()));
969 BX_CPU_THIS_PTR stop_reason = STOP_TIME_BREAK_POINT;
970 return(1); // on a breakpoint
971 case BREAK_POINT_READ:
972 BX_INFO(("[" FMT_LL "d] Caught read watch point", bx_pc_system.time_ticks()));
973 BX_CPU_THIS_PTR stop_reason = STOP_READ_WATCH_POINT;
974 return(1); // on a breakpoint
975 case BREAK_POINT_WRITE:
976 BX_INFO(("[" FMT_LL "d] Caught write watch point", bx_pc_system.time_ticks()));
977 BX_CPU_THIS_PTR stop_reason = STOP_WRITE_WATCH_POINT;
978 return(1); // on a breakpoint
979 default:
980 BX_PANIC(("Weird break point condition"));
984 if (BX_CPU_THIS_PTR magic_break) {
985 BX_INFO(("[" FMT_LL "d] Stopped on MAGIC BREAKPOINT", bx_pc_system.time_ticks()));
986 BX_CPU_THIS_PTR stop_reason = STOP_MAGIC_BREAK_POINT;
987 return(1); // on a breakpoint
990 // convenient point to see if user requested debug break or typed Ctrl-C
991 if (bx_guard.interrupt_requested) {
992 return(1);
995 return(0); // no breakpoint
998 void BX_CPU_C::dbg_take_irq(void)
1000 // NOTE: similar code in ::cpu_loop()
1002 if (BX_CPU_INTR && BX_CPU_THIS_PTR get_IF()) {
1003 if (setjmp(BX_CPU_THIS_PTR jmp_buf_env) == 0) {
1004 // normal return from setjmp setup
1005 unsigned vector = DEV_pic_iac(); // may set INTR with next interrupt
1006 BX_CPU_THIS_PTR errorno = 0;
1007 BX_CPU_THIS_PTR EXT = 1; // external event
1008 BX_CPU_THIS_PTR async_event = 1; // set in case INTR is triggered
1009 interrupt(vector, 0, 0, 0);
1014 void BX_CPU_C::dbg_force_interrupt(unsigned vector)
1016 // Used to force simulator to take an interrupt, without
1017 // regard to IF
1019 if (setjmp(BX_CPU_THIS_PTR jmp_buf_env) == 0) {
1020 // normal return from setjmp setup
1021 BX_CPU_THIS_PTR errorno = 0;
1022 BX_CPU_THIS_PTR EXT = 1; // external event
1023 BX_CPU_THIS_PTR async_event = 1; // probably don't need this
1024 interrupt(vector, 0, 0, 0);
1028 void BX_CPU_C::dbg_take_dma(void)
1030 // NOTE: similar code in ::cpu_loop()
1031 if (BX_HRQ) {
1032 BX_CPU_THIS_PTR async_event = 1; // set in case INTR is triggered
1033 DEV_dma_raise_hlda();
1037 #endif // #if BX_DEBUGGER