1 /////////////////////////////////////////////////////////////////////////
2 // $Id: cpu.cc,v 1.257 2008/12/11 21:19:38 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (C) 2001 MandrakeSoft S.A.
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 /////////////////////////////////////////////////////////////////////////
28 #define NEED_CPU_REG_SHORTCUTS 1
31 #define LOG_THIS BX_CPU_THIS_PTR
33 #include "iodev/iodev.h"
35 // Make code more tidy with a few macros.
36 #if BX_SUPPORT_X86_64==0
41 // ICACHE instrumentation code
44 #define InstrumentICACHE 0
47 static unsigned iCacheLookups
=0;
48 static unsigned iCacheMisses
=0;
50 #define InstrICache_StatsMask 0xffffff
52 #define InstrICache_Stats() {\
53 if ((iCacheLookups & InstrICache_StatsMask) == 0) { \
54 BX_INFO(("ICACHE lookups: %u, misses: %u, hit rate = %6.2f%% ", \
57 (iCacheLookups-iCacheMisses) * 100.0 / iCacheLookups)); \
58 iCacheLookups = iCacheMisses = 0; \
61 #define InstrICache_Increment(v) (v)++
64 #define InstrICache_Stats()
65 #define InstrICache_Increment(v)
68 #endif // BX_SUPPORT_ICACHE
70 // The CHECK_MAX_INSTRUCTIONS macro allows cpu_loop to execute a few
71 // instructions and then return so that the other processors have a chance to
72 // run. This is used by bochs internal debugger or when simulating
73 // multiple processors.
75 // If maximum instructions have been executed, return. The zero-count
77 #if BX_SUPPORT_SMP || BX_DEBUGGER
78 #define CHECK_MAX_INSTRUCTIONS(count) \
81 if ((count) == 0) return; \
84 #define CHECK_MAX_INSTRUCTIONS(count)
87 void BX_CPU_C::cpu_loop(Bit32u max_instr_count
)
90 BX_CPU_THIS_PTR break_point
= 0;
91 BX_CPU_THIS_PTR magic_break
= 0;
92 BX_CPU_THIS_PTR stop_reason
= STOP_NO_REASON
;
95 if (setjmp(BX_CPU_THIS_PTR jmp_buf_env
)) {
96 // only from exception function we can get here ...
97 BX_INSTR_NEW_INSTRUCTION(BX_CPU_ID
);
98 BX_TICK1_IF_SINGLE_PROCESSOR();
99 #if BX_DEBUGGER || BX_GDBSTUB
100 if (dbg_instruction_epilog()) return;
102 CHECK_MAX_INSTRUCTIONS(max_instr_count
);
104 if (bx_dbg
.gdbstub_enabled
) return;
108 // If the exception() routine has encountered a nasty fault scenario,
109 // the debugger may request that control is returned to it so that
110 // the situation may be examined.
112 if (bx_guard
.interrupt_requested
) return;
115 // We get here either by a normal function call, or by a longjmp
116 // back from an exception() call. In either case, commit the
117 // new EIP/ESP, and set up other environmental fields. This code
118 // mirrors similar code below, after the interrupt() call.
119 BX_CPU_THIS_PTR prev_rip
= RIP
; // commit new EIP
120 BX_CPU_THIS_PTR speculative_rsp
= 0;
121 BX_CPU_THIS_PTR EXT
= 0;
122 BX_CPU_THIS_PTR errorno
= 0;
126 // check on events which occurred for previous instructions (traps)
127 // and ones which are asynchronous to the CPU (hardware interrupts)
128 if (BX_CPU_THIS_PTR async_event
) {
129 if (handleAsyncEvent()) {
130 // If request to return to caller ASAP.
137 Bit32u eipBiased
= RIP
+ BX_CPU_THIS_PTR eipPageBias
;
139 if (eipBiased
>= BX_CPU_THIS_PTR eipPageWindowSize
) {
141 eipBiased
= RIP
+ BX_CPU_THIS_PTR eipPageBias
;
144 #if BX_SUPPORT_ICACHE
145 bx_phy_address pAddr
= BX_CPU_THIS_PTR pAddrA20Page
+ eipBiased
;
146 bxICacheEntry_c
*entry
= BX_CPU_THIS_PTR iCache
.get_entry(pAddr
);
147 bxInstruction_c
*i
= entry
->i
;
149 InstrICache_Increment(iCacheLookups
);
152 if ((entry
->pAddr
== pAddr
) &&
153 (entry
->writeStamp
== *(BX_CPU_THIS_PTR currPageWriteStampPtr
)))
155 // iCache hit. An instruction was found in the iCache
158 // iCache miss. No validated instruction with matching fetch parameters
160 InstrICache_Increment(iCacheMisses
);
161 serveICacheMiss(entry
, eipBiased
, pAddr
);
165 bxInstruction_c iStorage
, *i
= &iStorage
;
166 fetchInstruction(i
, eipBiased
);
169 #if BX_SUPPORT_TRACE_CACHE
170 unsigned length
= entry
->ilen
;
175 #if BX_INSTRUMENTATION
176 BX_INSTR_OPCODE(BX_CPU_ID
, BX_CPU_THIS_PTR eipFetchPtr
+ (RIP
+ BX_CPU_THIS_PTR eipPageBias
),
177 i
->ilen(), BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.d_b
, Is64BitMode());
180 #if BX_DEBUGGER || BX_GDBSTUB
181 if (dbg_instruction_prolog()) return;
185 if (BX_CPU_THIS_PTR trace
) {
186 // print the instruction that is about to be executed
187 debug_disasm_instruction(BX_CPU_THIS_PTR prev_rip
);
191 // decoding instruction compeleted -> continue with execution
192 BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID
, i
);
194 BX_CPU_CALL_METHOD(i
->execute
, (i
)); // might iterate repeat instruction
195 BX_CPU_THIS_PTR prev_rip
= RIP
; // commit new RIP
196 BX_INSTR_AFTER_EXECUTION(BX_CPU_ID
, i
);
197 BX_TICK1_IF_SINGLE_PROCESSOR();
199 // inform instrumentation about new instruction
200 BX_INSTR_NEW_INSTRUCTION(BX_CPU_ID
);
202 // note instructions generating exceptions never reach this point
203 #if BX_DEBUGGER || BX_GDBSTUB
204 if (dbg_instruction_epilog()) return;
207 CHECK_MAX_INSTRUCTIONS(max_instr_count
);
209 #if BX_SUPPORT_TRACE_CACHE
210 if (BX_CPU_THIS_PTR async_event
) {
211 // clear stop trace magic indication that probably was set by repeat or branch32/64
212 BX_CPU_THIS_PTR async_event
&= ~BX_ASYNC_EVENT_STOP_TRACE
;
216 if (--length
== 0) goto no_async_event
;
222 void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat(bxInstruction_c
*i
, BxExecutePtr_tR execute
)
224 // non repeated instruction
225 if (! i
->repUsedL()) {
226 BX_CPU_CALL_METHOD(execute
, (i
));
230 #if BX_SUPPORT_X86_64
234 BX_CPU_CALL_METHOD(execute
, (i
));
235 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
238 if (RCX
== 0) return;
241 if (BX_CPU_THIS_PTR async_event
)
243 break; // exit always if debugger enabled
245 BX_TICK1_IF_SINGLE_PROCESSOR();
253 BX_CPU_CALL_METHOD(execute
, (i
));
254 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
257 if (ECX
== 0) return;
260 if (BX_CPU_THIS_PTR async_event
)
262 break; // exit always if debugger enabled
264 BX_TICK1_IF_SINGLE_PROCESSOR();
267 else // 16bit addrsize
271 BX_CPU_CALL_METHOD(execute
, (i
));
272 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
278 if (BX_CPU_THIS_PTR async_event
)
280 break; // exit always if debugger enabled
282 BX_TICK1_IF_SINGLE_PROCESSOR();
286 RIP
= BX_CPU_THIS_PTR prev_rip
; // repeat loop not done, restore RIP
288 #if BX_SUPPORT_TRACE_CACHE
289 // assert magic async_event to stop trace execution
290 BX_CPU_THIS_PTR async_event
|= BX_ASYNC_EVENT_STOP_TRACE
;
294 void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat_ZF(bxInstruction_c
*i
, BxExecutePtr_tR execute
)
296 unsigned rep
= i
->repUsedValue();
298 // non repeated instruction
300 BX_CPU_CALL_METHOD(execute
, (i
));
304 if (rep
== 3) { /* repeat prefix 0xF3 */
305 #if BX_SUPPORT_X86_64
309 BX_CPU_CALL_METHOD(execute
, (i
));
310 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
313 if (! get_ZF() || RCX
== 0) return;
316 if (BX_CPU_THIS_PTR async_event
)
318 break; // exit always if debugger enabled
320 BX_TICK1_IF_SINGLE_PROCESSOR();
328 BX_CPU_CALL_METHOD(execute
, (i
));
329 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
332 if (! get_ZF() || ECX
== 0) return;
335 if (BX_CPU_THIS_PTR async_event
)
337 break; // exit always if debugger enabled
339 BX_TICK1_IF_SINGLE_PROCESSOR();
342 else // 16bit addrsize
346 BX_CPU_CALL_METHOD(execute
, (i
));
347 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
350 if (! get_ZF() || CX
== 0) return;
353 if (BX_CPU_THIS_PTR async_event
)
355 break; // exit always if debugger enabled
357 BX_TICK1_IF_SINGLE_PROCESSOR();
361 else { /* repeat prefix 0xF2 */
362 #if BX_SUPPORT_X86_64
366 BX_CPU_CALL_METHOD(execute
, (i
));
367 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
370 if (get_ZF() || RCX
== 0) return;
373 if (BX_CPU_THIS_PTR async_event
)
375 break; // exit always if debugger enabled
377 BX_TICK1_IF_SINGLE_PROCESSOR();
385 BX_CPU_CALL_METHOD(execute
, (i
));
386 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
389 if (get_ZF() || ECX
== 0) return;
392 if (BX_CPU_THIS_PTR async_event
)
394 break; // exit always if debugger enabled
396 BX_TICK1_IF_SINGLE_PROCESSOR();
399 else // 16bit addrsize
403 BX_CPU_CALL_METHOD(execute
, (i
));
404 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
407 if (get_ZF() || CX
== 0) return;
410 if (BX_CPU_THIS_PTR async_event
)
412 break; // exit always if debugger enabled
414 BX_TICK1_IF_SINGLE_PROCESSOR();
419 RIP
= BX_CPU_THIS_PTR prev_rip
; // repeat loop not done, restore RIP
421 #if BX_SUPPORT_TRACE_CACHE
422 // assert magic async_event to stop trace execution
423 BX_CPU_THIS_PTR async_event
|= BX_ASYNC_EVENT_STOP_TRACE
;
427 unsigned BX_CPU_C::handleAsyncEvent(void)
430 // This area is where we process special conditions and events.
432 if (BX_CPU_THIS_PTR debug_trap
& BX_DEBUG_TRAP_SPECIAL
) {
433 // I made up the bitmask above to mean HALT state.
434 // for one processor, pass the time as quickly as possible until
435 // an interrupt wakes up the CPU.
438 if ((BX_CPU_INTR
&& (BX_CPU_THIS_PTR
get_IF() || (BX_CPU_THIS_PTR debug_trap
& BX_DEBUG_TRAP_MWAIT_IF
))) ||
439 BX_CPU_THIS_PTR pending_NMI
|| BX_CPU_THIS_PTR pending_SMI
)
441 // interrupt ends the HALT condition
442 #if BX_SUPPORT_MONITOR_MWAIT
443 if (BX_CPU_THIS_PTR debug_trap
& BX_DEBUG_TRAP_MWAIT
)
444 BX_MEM(0)->clear_monitor(BX_CPU_THIS_PTR bx_cpuid
);
446 BX_CPU_THIS_PTR debug_trap
= 0; // clear traps for after resume
447 BX_CPU_THIS_PTR inhibit_mask
= 0; // clear inhibits for after resume
450 if ((BX_CPU_THIS_PTR debug_trap
& BX_DEBUG_TRAP_SPECIAL
) == 0) {
451 BX_INFO(("handleAsyncEvent: reset detected in HLT state"));
455 // for multiprocessor simulation, even if this CPU is halted we still
456 // must give the others a chance to simulate. If an interrupt has
457 // arrived, then clear the HALT condition; otherwise just return from
458 // the CPU loop with stop_reason STOP_CPU_HALTED.
460 if (BX_SMP_PROCESSORS
> 1) {
461 // HALT condition remains, return so other CPUs have a chance
463 BX_CPU_THIS_PTR stop_reason
= STOP_CPU_HALTED
;
465 return 1; // Return to caller of cpu_loop.
470 if (bx_guard
.interrupt_requested
)
471 return 1; // Return to caller of cpu_loop.
476 } else if (bx_pc_system
.kill_bochs_request
) {
477 // setting kill_bochs_request causes the cpu loop to return ASAP.
478 return 1; // Return to caller of cpu_loop.
481 // Priority 1: Hardware Reset and Machine Checks
484 // (bochs doesn't support these)
486 // Priority 2: Trap on Task Switch
487 // T flag in TSS is set
488 if (BX_CPU_THIS_PTR debug_trap
& 0x00008000) {
489 BX_CPU_THIS_PTR dr6
|= BX_CPU_THIS_PTR debug_trap
;
490 exception(BX_DB_EXCEPTION
, 0, 0); // no error, not interrupt
493 // Priority 3: External Hardware Interventions
498 // (bochs doesn't support these)
499 if (BX_CPU_THIS_PTR pending_SMI
&& ! BX_CPU_THIS_PTR
smm_mode())
501 // clear SMI pending flag and disable NMI when SMM was accepted
502 BX_CPU_THIS_PTR pending_SMI
= 0;
503 BX_CPU_THIS_PTR disable_NMI
= 1;
504 enter_system_management_mode();
507 // Priority 4: Traps on Previous Instruction
509 // Debug Trap Exceptions (TF flag set or data/IO breakpoint)
510 if (BX_CPU_THIS_PTR debug_trap
&&
511 !(BX_CPU_THIS_PTR inhibit_mask
& BX_INHIBIT_DEBUG
))
513 // A trap may be inhibited on this boundary due to an instruction
514 // which loaded SS. If so we clear the inhibit_mask below
515 // and don't execute this code until the next boundary.
516 // Commit debug events to DR6
517 BX_CPU_THIS_PTR dr6
|= BX_CPU_THIS_PTR debug_trap
;
518 exception(BX_DB_EXCEPTION
, 0, 0); // no error, not interrupt
521 // Priority 5: External Interrupts
523 // Maskable Hardware Interrupts
524 if (BX_CPU_THIS_PTR inhibit_mask
& BX_INHIBIT_INTERRUPTS
) {
525 // Processing external interrupts is inhibited on this
526 // boundary because of certain instructions like STI.
527 // inhibit_mask is cleared below, in which case we will have
528 // an opportunity to check interrupts on the next instruction
531 else if (BX_CPU_THIS_PTR pending_NMI
) {
532 BX_CPU_THIS_PTR pending_NMI
= 0;
533 BX_CPU_THIS_PTR disable_NMI
= 1;
534 BX_CPU_THIS_PTR errorno
= 0;
535 BX_CPU_THIS_PTR EXT
= 1; /* external event */
536 BX_INSTR_HWINTERRUPT(BX_CPU_ID
, 2, BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, RIP
);
537 interrupt(2, 0, 0, 0);
539 else if (BX_CPU_INTR
&& BX_CPU_THIS_PTR
get_IF() && BX_DBG_ASYNC_INTR
)
543 // NOTE: similar code in ::take_irq()
545 if (BX_CPU_THIS_PTR local_apic
.INTR
)
546 vector
= BX_CPU_THIS_PTR local_apic
.acknowledge_int();
549 // if no local APIC, always acknowledge the PIC.
550 vector
= DEV_pic_iac(); // may set INTR with next interrupt
551 BX_CPU_THIS_PTR errorno
= 0;
552 BX_CPU_THIS_PTR EXT
= 1; /* external event */
553 BX_INSTR_HWINTERRUPT(BX_CPU_ID
, vector
,
554 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, RIP
);
555 interrupt(vector
, 0, 0, 0);
556 // Set up environment, as would be when this main cpu loop gets
557 // invoked. At the end of normal instructions, we always commmit
558 // the new EIP. But here, we call interrupt() much like
559 // it was a sofware interrupt instruction, and need to effect the
560 // commit here. This code mirrors similar code above.
561 BX_CPU_THIS_PTR prev_rip
= RIP
; // commit new RIP
562 BX_CPU_THIS_PTR speculative_rsp
= 0;
563 BX_CPU_THIS_PTR EXT
= 0;
564 BX_CPU_THIS_PTR errorno
= 0;
566 else if (BX_HRQ
&& BX_DBG_ASYNC_DMA
) {
567 // NOTE: similar code in ::take_dma()
568 // assert Hold Acknowledge (HLDA) and go into a bus hold state
569 DEV_dma_raise_hlda();
572 // Priority 6: Faults from fetching next instruction
573 // Code breakpoint fault
574 // Code segment limit violation (priority 7 on 486/Pentium)
575 // Code page fault (priority 7 on 486/Pentium)
576 // (handled in main decode loop)
578 // Priority 7: Faults from decoding next instruction
579 // Instruction length > 15 bytes
581 // Coprocessor not available
582 // (handled in main decode loop etc)
584 // Priority 8: Faults on executing an instruction
585 // Floating point execution
589 // Segment not present
591 // General protection
594 // (handled by rest of the code)
596 if (BX_CPU_THIS_PTR
get_TF())
598 // TF is set before execution of next instruction. Schedule
599 // a debug trap (#DB) after execution. After completion of
600 // next instruction, the code above will invoke the trap.
601 BX_CPU_THIS_PTR debug_trap
|= 0x00004000; // BS flag in DR6
604 // Now we can handle things which are synchronous to instruction
606 if (BX_CPU_THIS_PTR
get_RF()) {
607 BX_CPU_THIS_PTR
clear_RF();
611 // only bother comparing if any breakpoints enabled
612 if (BX_CPU_THIS_PTR dr7
& 0x000000ff) {
613 bx_address iaddr
= get_laddr(BX_SEG_REG_CS
, BX_CPU_THIS_PTR prev_rip
);
614 Bit32u dr6_bits
= hwdebug_compare(iaddr
, 1, BX_HWDebugInstruction
, BX_HWDebugInstruction
);
617 // Add to the list of debug events thus far.
618 BX_CPU_THIS_PTR async_event
= 1;
619 BX_CPU_THIS_PTR debug_trap
|= dr6_bits
;
620 // If debug events are not inhibited on this boundary,
621 // fire off a debug fault. Otherwise handle it on the next
622 // boundary. (becomes a trap)
623 if (! (BX_CPU_THIS_PTR inhibit_mask
& BX_INHIBIT_DEBUG
)) {
624 // Commit debug events to DR6
625 #if BX_CPU_LEVEL <= 4
626 // On 386/486 bit12 is settable
627 BX_CPU_THIS_PTR dr6
= (BX_CPU_THIS_PTR dr6
& 0xffff0ff0) |
628 (BX_CPU_THIS_PTR debug_trap
& 0x0000f00f);
630 // On Pentium+, bit12 is always zero
631 BX_CPU_THIS_PTR dr6
= (BX_CPU_THIS_PTR dr6
& 0xffff0ff0) |
632 (BX_CPU_THIS_PTR debug_trap
& 0x0000e00f);
634 exception(BX_DB_EXCEPTION
, 0, 0); // no error, not interrupt
641 // We have ignored processing of external interrupts and
642 // debug events on this boundary. Reset the mask so they
643 // will be processed on the next boundary.
644 BX_CPU_THIS_PTR inhibit_mask
= 0;
646 if (!((BX_CPU_INTR
&& BX_CPU_THIS_PTR
get_IF()) ||
647 BX_CPU_THIS_PTR debug_trap
||
649 BX_CPU_THIS_PTR
get_TF()
651 || (BX_CPU_THIS_PTR dr7
& 0xff)
654 BX_CPU_THIS_PTR async_event
= 0;
656 return 0; // Continue executing cpu_loop.
660 // boundaries of consideration:
662 // * physical memory boundary: 1024k (1Megabyte) (increments of...)
663 // * A20 boundary: 1024k (1Megabyte)
664 // * page boundary: 4k
665 // * ROM boundary: 2k (dont care since we are only reading)
666 // * segment boundary: any
668 void BX_CPU_C::prefetch(void)
673 #if BX_SUPPORT_X86_64
675 if (! IsCanonical(RIP
)) {
676 BX_ERROR(("prefetch: #GP(0): RIP crossed canonical boundary"));
677 exception(BX_GP_EXCEPTION
, 0, 0);
680 // linear address is equal to RIP in 64-bit long mode
681 pageOffset
= PAGE_OFFSET(EIP
);
684 // Calculate RIP at the beginning of the page.
685 BX_CPU_THIS_PTR eipPageBias
= pageOffset
- RIP
;
686 BX_CPU_THIS_PTR eipPageWindowSize
= 4096;
691 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RIP
); /* avoid 32-bit EIP wrap */
692 laddr
= BX_CPU_THIS_PTR
get_laddr32(BX_SEG_REG_CS
, EIP
);
693 pageOffset
= PAGE_OFFSET(laddr
);
695 // Calculate RIP at the beginning of the page.
696 BX_CPU_THIS_PTR eipPageBias
= (bx_address
) pageOffset
- EIP
;
698 Bit32u limit
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
;
700 BX_ERROR(("prefetch: EIP [%08x] > CS.limit [%08x]", EIP
, limit
));
701 exception(BX_GP_EXCEPTION
, 0, 0);
704 BX_CPU_THIS_PTR eipPageWindowSize
= 4096;
705 if (limit
+ BX_CPU_THIS_PTR eipPageBias
< 4096) {
706 BX_CPU_THIS_PTR eipPageWindowSize
= limit
+ BX_CPU_THIS_PTR eipPageBias
+ 1;
710 bx_address lpf
= LPFOf(laddr
);
711 unsigned TLB_index
= BX_TLB_INDEX_OF(lpf
, 0);
712 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[TLB_index
];
715 if ((tlbEntry
->lpf
== lpf
) && !(tlbEntry
->accessBits
& USER_PL
)) {
716 BX_CPU_THIS_PTR pAddrA20Page
= A20ADDR(tlbEntry
->ppf
);
717 fetchPtr
= (Bit8u
*) (tlbEntry
->hostPageAddr
);
720 bx_phy_address pAddr
;
722 if (BX_CPU_THIS_PTR cr0
.get_PG()) {
723 pAddr
= translate_linear(laddr
, CPL
, BX_EXECUTE
);
724 pAddr
= A20ADDR(pAddr
);
727 pAddr
= A20ADDR(laddr
);
730 BX_CPU_THIS_PTR pAddrA20Page
= LPFOf(pAddr
);
734 BX_CPU_THIS_PTR eipFetchPtr
= fetchPtr
;
737 BX_CPU_THIS_PTR eipFetchPtr
= BX_MEM(0)->getHostMemAddr(BX_CPU_THIS
,
738 BX_CPU_THIS_PTR pAddrA20Page
, BX_EXECUTE
);
742 if (! BX_CPU_THIS_PTR eipFetchPtr
) {
743 bx_phy_address pAddr
= BX_CPU_THIS_PTR pAddrA20Page
+ pageOffset
;
744 if (pAddr
>= BX_MEM(0)->get_memory_len()) {
745 BX_PANIC(("prefetch: running in bogus memory, pAddr=0x" FMT_PHY_ADDRX
, pAddr
));
748 BX_PANIC(("prefetch: getHostMemAddr vetoed direct read, pAddr=0x" FMT_PHY_ADDRX
, pAddr
));
752 #if BX_SUPPORT_ICACHE
753 BX_CPU_THIS_PTR currPageWriteStampPtr
= pageWriteStampTable
.getPageWriteStampPtr(BX_CPU_THIS_PTR pAddrA20Page
);
754 Bit32u pageWriteStamp
= *(BX_CPU_THIS_PTR currPageWriteStampPtr
);
755 pageWriteStamp
&= ~ICacheWriteStampFetchModeMask
; // Clear out old fetch mode bits
756 pageWriteStamp
|= BX_CPU_THIS_PTR fetchModeMask
; // And add new ones
757 pageWriteStampTable
.setPageWriteStamp(BX_CPU_THIS_PTR pAddrA20Page
, pageWriteStamp
);
761 void BX_CPU_C::boundaryFetch(const Bit8u
*fetchPtr
, unsigned remainingInPage
, bxInstruction_c
*i
)
764 Bit8u fetchBuffer
[16]; // Really only need 15
767 if (remainingInPage
>= 15) {
768 BX_ERROR(("boundaryFetch #GP(0): too many instruction prefixes"));
769 exception(BX_GP_EXCEPTION
, 0, 0);
772 // Read all leftover bytes in current page up to boundary.
773 for (j
=0; j
<remainingInPage
; j
++) {
774 fetchBuffer
[j
] = *fetchPtr
++;
777 // The 2nd chunk of the instruction is on the next page.
778 // Set RIP to the 0th byte of the 2nd page, and force a
779 // prefetch so direct access of that physical page is possible, and
780 // all the associated info is updated.
781 RIP
+= remainingInPage
;
784 unsigned fetchBufferLimit
= 15;
785 if (BX_CPU_THIS_PTR eipPageWindowSize
< 15) {
786 BX_DEBUG(("boundaryFetch: small window size after prefetch - %d bytes", BX_CPU_THIS_PTR eipPageWindowSize
));
787 fetchBufferLimit
= BX_CPU_THIS_PTR eipPageWindowSize
;
790 // We can fetch straight from the 0th byte, which is eipFetchPtr;
791 fetchPtr
= BX_CPU_THIS_PTR eipFetchPtr
;
793 // read leftover bytes in next page
794 for (; j
<fetchBufferLimit
; j
++) {
795 fetchBuffer
[j
] = *fetchPtr
++;
797 #if BX_SUPPORT_X86_64
798 if (BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
)
799 ret
= fetchDecode64(fetchBuffer
, i
, fetchBufferLimit
);
802 ret
= fetchDecode32(fetchBuffer
, i
, fetchBufferLimit
);
805 BX_INFO(("boundaryFetch #GP(0): failed to complete instruction decoding"));
806 exception(BX_GP_EXCEPTION
, 0, 0);
809 // Restore EIP since we fudged it to start at the 2nd page boundary.
810 RIP
= BX_CPU_THIS_PTR prev_rip
;
812 // Since we cross an instruction boundary, note that we need a prefetch()
813 // again on the next instruction. Perhaps we can optimize this to
814 // eliminate the extra prefetch() since we do it above, but have to
815 // think about repeated instructions, etc.
816 // invalidate_prefetch_q();
818 BX_INSTR_OPCODE(BX_CPU_ID
, fetchBuffer
, i
->ilen(),
819 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.d_b
, Is64BitMode());
822 void BX_CPU_C::deliver_INIT(void)
824 if (! BX_CPU_THIS_PTR disable_INIT
)
825 BX_CPU_THIS_PTR
reset(BX_RESET_SOFTWARE
);
828 void BX_CPU_C::deliver_NMI(void)
830 BX_CPU_THIS_PTR pending_NMI
= 1;
831 BX_CPU_THIS_PTR async_event
= 1;
834 void BX_CPU_C::deliver_SMI(void)
836 BX_CPU_THIS_PTR pending_SMI
= 1;
837 BX_CPU_THIS_PTR async_event
= 1;
840 void BX_CPU_C::set_INTR(bx_bool value
)
842 BX_CPU_THIS_PTR INTR
= value
;
843 BX_CPU_THIS_PTR async_event
= 1;
846 #if BX_DEBUGGER || BX_GDBSTUB
847 bx_bool
BX_CPU_C::dbg_instruction_prolog(void)
850 if(dbg_check_begin_instr_bpoint()) return 1;
856 bx_bool
BX_CPU_C::dbg_instruction_epilog(void)
859 if (dbg_check_end_instr_bpoint()) return 1;
863 if (bx_dbg
.gdbstub_enabled
) {
864 unsigned reason
= bx_gdbstub_check(EIP
);
865 if (reason
!= GDBSTUB_STOP_NO_REASON
) return 1;
871 #endif // BX_DEBUGGER || BX_GDBSTUB
874 extern unsigned dbg_show_mask
;
876 bx_bool
BX_CPU_C::dbg_check_begin_instr_bpoint(void)
878 Bit64u tt
= bx_pc_system
.time_ticks();
879 bx_address debug_eip
= RIP
;
880 Bit16u cs
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
;
882 BX_CPU_THIS_PTR guard_found
.cs
= cs
;
883 BX_CPU_THIS_PTR guard_found
.eip
= debug_eip
;
884 BX_CPU_THIS_PTR guard_found
.laddr
= BX_CPU_THIS_PTR
get_laddr(BX_SEG_REG_CS
, debug_eip
);
885 BX_CPU_THIS_PTR guard_found
.is_32bit_code
=
886 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.d_b
;
887 BX_CPU_THIS_PTR guard_found
.is_64bit_code
= Is64BitMode();
889 // support for 'show' command in debugger
891 int rv
= bx_dbg_show_symbolic();
895 // see if debugger is looking for iaddr breakpoint of any type
896 if (bx_guard
.guard_for
& BX_DBG_GUARD_IADDR_ALL
) {
897 #if (BX_DBG_MAX_VIR_BPOINTS > 0)
898 if (bx_guard
.guard_for
& BX_DBG_GUARD_IADDR_VIR
) {
899 if ((BX_CPU_THIS_PTR guard_found
.icount
!=0) ||
900 (tt
!= BX_CPU_THIS_PTR guard_found
.time_tick
))
902 for (unsigned n
=0; n
<bx_guard
.iaddr
.num_virtual
; n
++) {
903 if (bx_guard
.iaddr
.vir
[n
].enabled
&&
904 (bx_guard
.iaddr
.vir
[n
].cs
== cs
) &&
905 (bx_guard
.iaddr
.vir
[n
].eip
== debug_eip
))
907 BX_CPU_THIS_PTR guard_found
.guard_found
= BX_DBG_GUARD_IADDR_VIR
;
908 BX_CPU_THIS_PTR guard_found
.iaddr_index
= n
;
909 BX_CPU_THIS_PTR guard_found
.time_tick
= tt
;
910 return(1); // on a breakpoint
916 #if (BX_DBG_MAX_LIN_BPOINTS > 0)
917 if (bx_guard
.guard_for
& BX_DBG_GUARD_IADDR_LIN
) {
918 if ((BX_CPU_THIS_PTR guard_found
.icount
!=0) ||
919 (tt
!= BX_CPU_THIS_PTR guard_found
.time_tick
))
921 for (unsigned n
=0; n
<bx_guard
.iaddr
.num_linear
; n
++) {
922 if (bx_guard
.iaddr
.lin
[n
].enabled
&&
923 (bx_guard
.iaddr
.lin
[n
].addr
== BX_CPU_THIS_PTR guard_found
.laddr
))
925 BX_CPU_THIS_PTR guard_found
.guard_found
= BX_DBG_GUARD_IADDR_LIN
;
926 BX_CPU_THIS_PTR guard_found
.iaddr_index
= n
;
927 BX_CPU_THIS_PTR guard_found
.time_tick
= tt
;
928 return(1); // on a breakpoint
934 #if (BX_DBG_MAX_PHY_BPOINTS > 0)
935 if (bx_guard
.guard_for
& BX_DBG_GUARD_IADDR_PHY
) {
937 bx_bool valid
= dbg_xlate_linear2phy(BX_CPU_THIS_PTR guard_found
.laddr
, &phy
);
938 // The "guard_found.icount!=0" condition allows you to step or
939 // continue beyond a breakpoint. Bryce tried removing it once,
940 // and once you get to a breakpoint you are stuck there forever.
942 if (valid
&& ((BX_CPU_THIS_PTR guard_found
.icount
!=0) ||
943 (tt
!= BX_CPU_THIS_PTR guard_found
.time_tick
)))
945 for (unsigned n
=0; n
<bx_guard
.iaddr
.num_physical
; n
++) {
946 if (bx_guard
.iaddr
.phy
[n
].enabled
&& (bx_guard
.iaddr
.phy
[n
].addr
== phy
))
948 BX_CPU_THIS_PTR guard_found
.guard_found
= BX_DBG_GUARD_IADDR_PHY
;
949 BX_CPU_THIS_PTR guard_found
.iaddr_index
= n
;
950 BX_CPU_THIS_PTR guard_found
.time_tick
= tt
;
951 return(1); // on a breakpoint
959 return(0); // not on a breakpoint
962 bx_bool
BX_CPU_C::dbg_check_end_instr_bpoint(void)
964 BX_CPU_THIS_PTR guard_found
.icount
++;
965 BX_CPU_THIS_PTR guard_found
.cs
=
966 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
;
967 BX_CPU_THIS_PTR guard_found
.eip
= RIP
;
968 BX_CPU_THIS_PTR guard_found
.laddr
= BX_CPU_THIS_PTR
get_laddr(BX_SEG_REG_CS
, RIP
);
969 BX_CPU_THIS_PTR guard_found
.is_32bit_code
=
970 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.d_b
;
971 BX_CPU_THIS_PTR guard_found
.is_64bit_code
= Is64BitMode();
973 // Check if we hit read/write or time breakpoint
974 if (BX_CPU_THIS_PTR break_point
) {
975 switch (BX_CPU_THIS_PTR break_point
) {
976 case BREAK_POINT_TIME
:
977 BX_INFO(("[" FMT_LL
"d] Caught time breakpoint", bx_pc_system
.time_ticks()));
978 BX_CPU_THIS_PTR stop_reason
= STOP_TIME_BREAK_POINT
;
979 return(1); // on a breakpoint
980 case BREAK_POINT_READ
:
981 BX_INFO(("[" FMT_LL
"d] Caught read watch point", bx_pc_system
.time_ticks()));
982 BX_CPU_THIS_PTR stop_reason
= STOP_READ_WATCH_POINT
;
983 return(1); // on a breakpoint
984 case BREAK_POINT_WRITE
:
985 BX_INFO(("[" FMT_LL
"d] Caught write watch point", bx_pc_system
.time_ticks()));
986 BX_CPU_THIS_PTR stop_reason
= STOP_WRITE_WATCH_POINT
;
987 return(1); // on a breakpoint
989 BX_PANIC(("Weird break point condition"));
993 if (BX_CPU_THIS_PTR magic_break
) {
994 BX_INFO(("[" FMT_LL
"d] Stopped on MAGIC BREAKPOINT", bx_pc_system
.time_ticks()));
995 BX_CPU_THIS_PTR stop_reason
= STOP_MAGIC_BREAK_POINT
;
996 return(1); // on a breakpoint
999 // convenient point to see if user requested debug break or typed Ctrl-C
1000 if (bx_guard
.interrupt_requested
) {
1004 return(0); // no breakpoint
1007 void BX_CPU_C::dbg_take_irq(void)
1009 // NOTE: similar code in ::cpu_loop()
1011 if (BX_CPU_INTR
&& BX_CPU_THIS_PTR
get_IF()) {
1012 if (setjmp(BX_CPU_THIS_PTR jmp_buf_env
) == 0) {
1013 // normal return from setjmp setup
1014 unsigned vector
= DEV_pic_iac(); // may set INTR with next interrupt
1015 BX_CPU_THIS_PTR errorno
= 0;
1016 BX_CPU_THIS_PTR EXT
= 1; // external event
1017 BX_CPU_THIS_PTR async_event
= 1; // set in case INTR is triggered
1018 interrupt(vector
, 0, 0, 0);
1023 void BX_CPU_C::dbg_force_interrupt(unsigned vector
)
1025 // Used to force simulator to take an interrupt, without
1028 if (setjmp(BX_CPU_THIS_PTR jmp_buf_env
) == 0) {
1029 // normal return from setjmp setup
1030 BX_CPU_THIS_PTR errorno
= 0;
1031 BX_CPU_THIS_PTR EXT
= 1; // external event
1032 BX_CPU_THIS_PTR async_event
= 1; // probably don't need this
1033 interrupt(vector
, 0, 0, 0);
1037 void BX_CPU_C::dbg_take_dma(void)
1039 // NOTE: similar code in ::cpu_loop()
1041 BX_CPU_THIS_PTR async_event
= 1; // set in case INTR is triggered
1042 DEV_dma_raise_hlda();
1046 #endif // #if BX_DEBUGGER