1 /////////////////////////////////////////////////////////////////////////
2 // $Id: cpu.cc,v 1.172 2007/07/09 15:16:10 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (C) 2001 MandrakeSoft S.A.
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #define NEED_CPU_REG_SHORTCUTS 1
31 #define LOG_THIS BX_CPU_THIS_PTR
33 #include "iodev/iodev.h"
35 #if BX_EXTERNAL_DEBUGGER
39 #if BX_PROVIDE_CPU_MEMORY
40 #if BX_ADDRESS_SPACES==1
41 BOCHSAPI BX_MEM_C bx_mem
;
43 BOCHSAPI BX_MEM_C bx_mem_array
[BX_ADDRESS_SPACES
];
49 bxPageWriteStampTable pageWriteStampTable
;
51 void purgeICaches(void)
54 for (unsigned i
=0; i
<BX_SMP_PROCESSORS
; i
++)
55 BX_CPU(i
)->iCache
.purgeICacheEntries();
57 BX_CPU(0)->iCache
.purgeICacheEntries();
60 pageWriteStampTable
.purgeWriteStamps();
63 void flushICaches(void)
66 for (unsigned i
=0; i
<BX_SMP_PROCESSORS
; i
++)
67 BX_CPU(i
)->iCache
.flushICacheEntries();
69 BX_CPU(0)->iCache
.flushICacheEntries();
72 pageWriteStampTable
.resetWriteStamps();
75 #define InstrumentICACHE 0
78 static unsigned iCacheLookups
=0;
79 static unsigned iCacheMisses
=0;
81 #define InstrICache_StatsMask 0xffffff
83 #define InstrICache_Stats() {\
84 if ((iCacheLookups & InstrICache_StatsMask) == 0) { \
85 BX_INFO(("ICACHE lookups: %u, misses: %u, hit rate = %6.2f%% ", \
88 (iCacheLookups-iCacheMisses) * 100.0 / iCacheLookups)); \
89 iCacheLookups = iCacheMisses = 0; \
92 #define InstrICache_Increment(v) (v)++
95 #define InstrICache_Stats()
96 #define InstrICache_Increment(v)
99 #endif // BX_SUPPORT_ICACHE
103 // The CHECK_MAX_INSTRUCTIONS macro allows cpu_loop to execute a few
104 // instructions and then return so that the other processors have a chance to
105 // run. This is used only when simulating multiple processors.
107 // If maximum instructions have been executed, return. The zero-count
108 // means run forever.
109 #define CHECK_MAX_INSTRUCTIONS(count) \
112 if (count == 0) return; \
115 // Make code more tidy with a few macros.
116 #if BX_SUPPORT_X86_64==0
121 BX_CPP_INLINE bxInstruction_c
* BX_CPU_C::fetchInstruction(bxInstruction_c
*iStorage
, bx_address eipBiased
)
124 bxInstruction_c
*i
= iStorage
;
126 #if BX_SUPPORT_ICACHE
127 bx_phy_address pAddr
= BX_CPU_THIS_PTR pAddrA20Page
+ eipBiased
;
128 unsigned iCacheHash
= BX_CPU_THIS_PTR iCache
.hash(pAddr
);
129 bxICacheEntry_c
*cache_entry
= &(BX_CPU_THIS_PTR iCache
.entry
[iCacheHash
]);
130 i
= &(cache_entry
->i
);
132 Bit32u pageWriteStamp
= *(BX_CPU_THIS_PTR currPageWriteStampPtr
);
134 InstrICache_Increment(iCacheLookups
);
137 if ((cache_entry
->pAddr
== pAddr
) &&
138 (cache_entry
->writeStamp
== pageWriteStamp
))
140 // iCache hit. Instruction is already decoded and stored in the
141 // instruction cache.
142 #if BX_INSTRUMENTATION
143 // An instruction was found in the iCache.
144 BX_INSTR_OPCODE(BX_CPU_ID
, BX_CPU_THIS_PTR eipFetchPtr
+ eipBiased
,
145 i
->ilen(), BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.d_b
, Is64BitMode());
151 // iCache miss. No validated instruction with matching fetch parameters
152 // is in the iCache. Or we're not compiling iCache support in, in which
153 // case we always have an iCache miss. :^)
154 bx_address remainingInPage
= (BX_CPU_THIS_PTR eipPageWindowSize
- eipBiased
);
155 unsigned maxFetch
= 15;
156 if (remainingInPage
< 15) maxFetch
= remainingInPage
;
157 Bit8u
*fetchPtr
= BX_CPU_THIS_PTR eipFetchPtr
+ eipBiased
;
159 #if BX_SUPPORT_ICACHE
160 // The entry will be marked valid if fetchdecode will succeed
161 cache_entry
->writeStamp
= ICacheWriteStampInvalid
;
162 InstrICache_Increment(iCacheMisses
);
165 #if BX_SUPPORT_X86_64
166 if (BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
)
167 ret
= fetchDecode64(fetchPtr
, i
, maxFetch
);
170 ret
= fetchDecode32(fetchPtr
, i
, maxFetch
);
173 // return iStorage and leave icache entry invalid (do not cache instr)
174 boundaryFetch(fetchPtr
, remainingInPage
, iStorage
);
179 #if BX_SUPPORT_ICACHE
180 cache_entry
->pAddr
= pAddr
;
181 cache_entry
->writeStamp
= pageWriteStamp
;
183 #if BX_INSTRUMENTATION
184 // An instruction was either fetched, or found in the iCache.
185 BX_INSTR_OPCODE(BX_CPU_ID
, fetchPtr
, i
->ilen(),
186 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.d_b
, Is64BitMode());
193 void BX_CPU_C::cpu_loop(Bit32u max_instr_count
)
195 bxInstruction_c iStorage
BX_CPP_AlignN(32);
198 BX_CPU_THIS_PTR break_point
= 0;
199 #if BX_MAGIC_BREAKPOINT
200 BX_CPU_THIS_PTR magic_break
= 0;
202 BX_CPU_THIS_PTR stop_reason
= STOP_NO_REASON
;
205 if (setjmp(BX_CPU_THIS_PTR jmp_buf_env
))
207 // only from exception function can we get here ...
208 BX_INSTR_NEW_INSTRUCTION(BX_CPU_ID
);
210 if (bx_dbg
.gdbstub_enabled
) {
217 // If the exception() routine has encountered a nasty fault scenario,
218 // the debugger may request that control is returned to it so that
219 // the situation may be examined.
220 if (bx_guard
.interrupt_requested
) {
221 BX_ERROR(("CPU_LOOP bx_guard.interrupt_requested=%d", bx_guard
.interrupt_requested
));
226 // We get here either by a normal function call, or by a longjmp
227 // back from an exception() call. In either case, commit the
228 // new EIP/ESP, and set up other environmental fields. This code
229 // mirrors similar code below, after the interrupt() call.
230 BX_CPU_THIS_PTR prev_eip
= RIP
; // commit new EIP
231 BX_CPU_THIS_PTR prev_esp
= RSP
; // commit new ESP
232 BX_CPU_THIS_PTR EXT
= 0;
233 BX_CPU_THIS_PTR errorno
= 0;
237 // First check on events which occurred for previous instructions
238 // (traps) and ones which are asynchronous to the CPU
239 // (hardware interrupts).
240 if (BX_CPU_THIS_PTR async_event
) {
241 if (handleAsyncEvent()) {
242 // If request to return to caller ASAP.
247 bx_address eipBiased
= RIP
+ BX_CPU_THIS_PTR eipPageBias
;
249 if (eipBiased
>= BX_CPU_THIS_PTR eipPageWindowSize
) {
251 eipBiased
= RIP
+ BX_CPU_THIS_PTR eipPageBias
;
254 // fetch and decode next instruction
255 bxInstruction_c
*i
= fetchInstruction(&iStorage
, eipBiased
);
256 BxExecutePtr_tR resolveModRM
= i
->ResolveModrm
; // Get as soon as possible for speculation
257 BxExecutePtr_t execute
= i
->execute
; // fetch as soon as possible for speculation
259 BX_CPU_CALL_METHODR(resolveModRM
, (i
));
261 // An instruction will have been fetched using either the normal case,
262 // or the boundary fetch (across pages), by this point.
263 BX_INSTR_FETCH_DECODE_COMPLETED(BX_CPU_ID
, i
);
265 #if BX_DEBUGGER || BX_EXTERNAL_DEBUGGER || BX_GDBSTUB
266 if (dbg_instruction_prolog()) return;
270 if (BX_CPU_THIS_PTR trace
) {
271 // print the instruction that is about to be executed
273 bx_dbg_disassemble_current(BX_CPU_ID
, 1); // only one cpu, print time stamp
275 debug_disasm_instruction(BX_CPU_THIS_PTR prev_eip
);
280 // decoding instruction compeleted -> continue with execution
281 BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID
, i
);
283 BX_CPU_CALL_METHOD(execute
, (i
)); // might iterate repeat instruction
284 BX_CPU_THIS_PTR prev_eip
= RIP
; // commit new RIP
285 BX_CPU_THIS_PTR prev_esp
= RSP
; // commit new RSP
286 BX_INSTR_AFTER_EXECUTION(BX_CPU_ID
, i
);
287 BX_TICK1_IF_SINGLE_PROCESSOR();
289 // inform instrumentation about new instruction
290 BX_INSTR_NEW_INSTRUCTION(BX_CPU_ID
);
292 // note instr generating exceptions never reach this point
293 #if BX_DEBUGGER || BX_EXTERNAL_DEBUGGER || BX_GDBSTUB
294 if (dbg_instruction_epilog()) return;
297 #if BX_SUPPORT_SMP || BX_DEBUGGER
298 // The CHECK_MAX_INSTRUCTIONS macro allows cpu_loop to execute a few
299 // instructions and then return so that the other processors have a chance
300 // to run. This is used only when simulating multiple processors. If only
301 // one processor, don't waste any cycles on it!
302 CHECK_MAX_INSTRUCTIONS(max_instr_count
);
308 void BX_CPU_C::repeat(bxInstruction_c
*i
, BxExecutePtr_t execute
)
310 // non repeated instruction
311 if (! i
->repUsedL()) {
312 BX_CPU_CALL_METHOD(execute
, (i
));
318 #if BX_SUPPORT_X86_64
321 BX_CPU_CALL_METHOD(execute
, (i
));
322 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
325 if (RCX
== 0) return;
331 BX_CPU_CALL_METHOD(execute
, (i
));
332 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
335 if (ECX
== 0) return;
337 else { // 16bit addrsize
339 BX_CPU_CALL_METHOD(execute
, (i
));
340 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
346 BX_TICK1_IF_SINGLE_PROCESSOR();
349 if (BX_CPU_THIS_PTR async_event
)
351 break; // exit always if debugger enabled
354 RIP
= BX_CPU_THIS_PTR prev_eip
; // repeat loop not done, restore RIP
357 void BX_CPU_C::repeat_ZFL(bxInstruction_c
*i
, BxExecutePtr_t execute
)
359 // non repeated instruction
360 if (! i
->repUsedL()) {
361 BX_CPU_CALL_METHOD(execute
, (i
));
367 #if BX_SUPPORT_X86_64
370 BX_CPU_CALL_METHOD(execute
, (i
));
371 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
374 if ((i
->repUsedValue()==3) && (get_ZF()==0)) return;
375 if ((i
->repUsedValue()==2) && (get_ZF()!=0)) return;
376 if (RCX
== 0) return;
382 BX_CPU_CALL_METHOD(execute
, (i
));
383 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
386 if ((i
->repUsedValue()==3) && (get_ZF()==0)) return;
387 if ((i
->repUsedValue()==2) && (get_ZF()!=0)) return;
388 if (ECX
== 0) return;
392 BX_CPU_CALL_METHOD(execute
, (i
));
393 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID
, i
);
396 if ((i
->repUsedValue()==3) && (get_ZF()==0)) return;
397 if ((i
->repUsedValue()==2) && (get_ZF()!=0)) return;
401 BX_TICK1_IF_SINGLE_PROCESSOR();
404 if (BX_CPU_THIS_PTR async_event
)
406 break; // exit always if debugger enabled
409 RIP
= BX_CPU_THIS_PTR prev_eip
; // repeat loop not done, restore RIP
412 unsigned BX_CPU_C::handleAsyncEvent(void)
415 // This area is where we process special conditions and events.
417 if (BX_CPU_THIS_PTR debug_trap
& BX_DEBUG_TRAP_HALT_STATE
) {
418 // I made up the bitmask above to mean HALT state.
419 #if BX_SUPPORT_SMP == 0
420 // for one processor, pass the time as quickly as possible until
421 // an interrupt wakes up the CPU.
423 while (bx_guard
.interrupt_requested
!= 1)
428 if ((BX_CPU_INTR
&& BX_CPU_THIS_PTR
get_IF()) ||
429 BX_CPU_THIS_PTR nmi_pending
|| BX_CPU_THIS_PTR smi_pending
)
431 // interrupt ends the HALT condition
432 BX_CPU_THIS_PTR debug_trap
= 0; // clear traps for after resume
433 BX_CPU_THIS_PTR inhibit_mask
= 0; // clear inhibits for after resume
436 if ((BX_CPU_THIS_PTR debug_trap
& BX_DEBUG_TRAP_HALT_STATE
) == 0) {
437 BX_INFO(("handleAsyncEvent: reset detected in HLT state"));
442 #else /* BX_SUPPORT_SMP */
443 // for multiprocessor simulation, even if this CPU is halted we still
444 // must give the others a chance to simulate. If an interrupt has
445 // arrived, then clear the HALT condition; otherwise just return from
446 // the CPU loop with stop_reason STOP_CPU_HALTED.
447 if ((BX_CPU_INTR
&& BX_CPU_THIS_PTR
get_IF()) ||
448 BX_CPU_THIS_PTR nmi_pending
|| BX_CPU_THIS_PTR smi_pending
)
450 // interrupt ends the HALT condition
451 BX_CPU_THIS_PTR debug_trap
= 0; // clear traps for after resume
452 BX_CPU_THIS_PTR inhibit_mask
= 0; // clear inhibits for after resume
454 // HALT condition remains, return so other CPUs have a chance
456 BX_CPU_THIS_PTR stop_reason
= STOP_CPU_HALTED
;
458 return 1; // Return to caller of cpu_loop.
461 } else if (bx_pc_system
.kill_bochs_request
) {
462 // setting kill_bochs_request causes the cpu loop to return ASAP.
463 return 1; // Return to caller of cpu_loop.
466 // Priority 1: Hardware Reset and Machine Checks
469 // (bochs doesn't support these)
471 // Priority 2: Trap on Task Switch
472 // T flag in TSS is set
473 if (BX_CPU_THIS_PTR debug_trap
& 0x00008000) {
474 BX_CPU_THIS_PTR dr6
|= BX_CPU_THIS_PTR debug_trap
;
475 exception(BX_DB_EXCEPTION
, 0, 0); // no error, not interrupt
478 // Priority 3: External Hardware Interventions
483 // (bochs doesn't support these)
484 if (BX_CPU_THIS_PTR smi_pending
&& ! BX_CPU_THIS_PTR
smm_mode())
486 // clear SMI pending flag and disable NMI when SMM was accepted
487 BX_CPU_THIS_PTR smi_pending
= 0;
488 BX_CPU_THIS_PTR nmi_disable
= 1;
489 enter_system_management_mode();
492 // Priority 4: Traps on Previous Instruction
494 // Debug Trap Exceptions (TF flag set or data/IO breakpoint)
495 if (BX_CPU_THIS_PTR debug_trap
&&
496 !(BX_CPU_THIS_PTR inhibit_mask
& BX_INHIBIT_DEBUG
))
498 // A trap may be inhibited on this boundary due to an instruction
499 // which loaded SS. If so we clear the inhibit_mask below
500 // and don't execute this code until the next boundary.
501 // Commit debug events to DR6
502 BX_CPU_THIS_PTR dr6
|= BX_CPU_THIS_PTR debug_trap
;
503 exception(BX_DB_EXCEPTION
, 0, 0); // no error, not interrupt
506 // Priority 5: External Interrupts
508 // Maskable Hardware Interrupts
509 if (BX_CPU_THIS_PTR inhibit_mask
& BX_INHIBIT_INTERRUPTS
) {
510 // Processing external interrupts is inhibited on this
511 // boundary because of certain instructions like STI.
512 // inhibit_mask is cleared below, in which case we will have
513 // an opportunity to check interrupts on the next instruction
516 else if (BX_CPU_THIS_PTR nmi_pending
) {
517 BX_CPU_THIS_PTR nmi_pending
= 0;
518 BX_CPU_THIS_PTR nmi_disable
= 1;
519 BX_CPU_THIS_PTR errorno
= 0;
520 BX_CPU_THIS_PTR EXT
= 1; /* external event */
521 BX_INSTR_HWINTERRUPT(BX_CPU_ID
, 2, BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, RIP
);
522 interrupt(2, 0, 0, 0);
524 else if (BX_CPU_INTR
&& BX_CPU_THIS_PTR
get_IF() && BX_DBG_ASYNC_INTR
)
528 // NOTE: similar code in ::take_irq()
530 if (BX_CPU_THIS_PTR local_apic
.INTR
)
531 vector
= BX_CPU_THIS_PTR local_apic
.acknowledge_int();
533 vector
= DEV_pic_iac(); // may set INTR with next interrupt
535 // if no local APIC, always acknowledge the PIC.
536 vector
= DEV_pic_iac(); // may set INTR with next interrupt
538 BX_CPU_THIS_PTR errorno
= 0;
539 BX_CPU_THIS_PTR EXT
= 1; /* external event */
540 BX_INSTR_HWINTERRUPT(BX_CPU_ID
, vector
,
541 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, RIP
);
542 interrupt(vector
, 0, 0, 0);
543 // Set up environment, as would be when this main cpu loop gets
544 // invoked. At the end of normal instructions, we always commmit
545 // the new EIP/ESP values. But here, we call interrupt() much like
546 // it was a sofware interrupt instruction, and need to effect the
547 // commit here. This code mirrors similar code above.
548 BX_CPU_THIS_PTR prev_eip
= RIP
; // commit new RIP
549 BX_CPU_THIS_PTR prev_esp
= RSP
; // commit new RSP
550 BX_CPU_THIS_PTR EXT
= 0;
551 BX_CPU_THIS_PTR errorno
= 0;
553 else if (BX_HRQ
&& BX_DBG_ASYNC_DMA
) {
554 // NOTE: similar code in ::take_dma()
555 // assert Hold Acknowledge (HLDA) and go into a bus hold state
556 DEV_dma_raise_hlda();
559 // Priority 6: Faults from fetching next instruction
560 // Code breakpoint fault
561 // Code segment limit violation (priority 7 on 486/Pentium)
562 // Code page fault (priority 7 on 486/Pentium)
563 // (handled in main decode loop)
565 // Priority 7: Faults from decoding next instruction
566 // Instruction length > 15 bytes
568 // Coprocessor not available
569 // (handled in main decode loop etc)
571 // Priority 8: Faults on executing an instruction
572 // Floating point execution
576 // Segment not present
578 // General protection
581 // (handled by rest of the code)
583 if (BX_CPU_THIS_PTR
get_TF())
585 // TF is set before execution of next instruction. Schedule
586 // a debug trap (#DB) after execution. After completion of
587 // next instruction, the code above will invoke the trap.
588 BX_CPU_THIS_PTR debug_trap
|= 0x00004000; // BS flag in DR6
591 // Now we can handle things which are synchronous to instruction
593 if (BX_CPU_THIS_PTR
get_RF()) {
594 BX_CPU_THIS_PTR
clear_RF();
598 // only bother comparing if any breakpoints enabled
599 if (BX_CPU_THIS_PTR dr7
& 0x000000ff) {
600 bx_address iaddr
= BX_CPU_THIS_PTR
get_segment_base(BX_SEG_REG_CS
) + BX_CPU_THIS_PTR prev_eip
;
601 Bit32u dr6_bits
= hwdebug_compare(iaddr
, 1, BX_HWDebugInstruction
, BX_HWDebugInstruction
);
604 // Add to the list of debug events thus far.
605 BX_CPU_THIS_PTR async_event
= 1;
606 BX_CPU_THIS_PTR debug_trap
|= dr6_bits
;
607 // If debug events are not inhibited on this boundary,
608 // fire off a debug fault. Otherwise handle it on the next
609 // boundary. (becomes a trap)
610 if (! (BX_CPU_THIS_PTR inhibit_mask
& BX_INHIBIT_DEBUG
)) {
611 // Commit debug events to DR6
612 BX_CPU_THIS_PTR dr6
= BX_CPU_THIS_PTR debug_trap
;
613 exception(BX_DB_EXCEPTION
, 0, 0); // no error, not interrupt
620 // We have ignored processing of external interrupts and
621 // debug events on this boundary. Reset the mask so they
622 // will be processed on the next boundary.
623 BX_CPU_THIS_PTR inhibit_mask
= 0;
625 if ( !(BX_CPU_INTR
||
626 BX_CPU_THIS_PTR debug_trap
||
628 BX_CPU_THIS_PTR
get_TF()
630 || (BX_CPU_THIS_PTR dr7
& 0xff)
633 BX_CPU_THIS_PTR async_event
= 0;
635 return 0; // Continue executing cpu_loop.
639 // boundaries of consideration:
641 // * physical memory boundary: 1024k (1Megabyte) (increments of...)
642 // * A20 boundary: 1024k (1Megabyte)
643 // * page boundary: 4k
644 // * ROM boundary: 2k (dont care since we are only reading)
645 // * segment boundary: any
647 void BX_CPU_C::prefetch(void)
649 bx_address temp_rip
= RIP
;
650 bx_address laddr
= BX_CPU_THIS_PTR
get_segment_base(BX_SEG_REG_CS
) + temp_rip
;
651 bx_phy_address pAddr
;
653 // Calculate RIP at the beginning of the page.
654 bx_address eipPageOffset0
= RIP
- (laddr
& 0xfff);
655 BX_CPU_THIS_PTR eipPageBias
= -eipPageOffset0
;
656 BX_CPU_THIS_PTR eipPageWindowSize
= 4096;
658 if (! Is64BitMode()) {
659 Bit32u temp_limit
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
;
660 if (((Bit32u
) temp_rip
) > temp_limit
) {
661 BX_ERROR(("prefetch: EIP [%08x] > CS.limit [%08x]", (Bit32u
) temp_rip
, temp_limit
));
662 exception(BX_GP_EXCEPTION
, 0, 0);
664 if (temp_limit
+ BX_CPU_THIS_PTR eipPageBias
< 4096) {
665 BX_CPU_THIS_PTR eipPageWindowSize
= temp_limit
+ BX_CPU_THIS_PTR eipPageBias
+ 1;
669 if (BX_CPU_THIS_PTR cr0
.get_PG()) {
670 // aligned block guaranteed to be all in one page, same A20 address
671 pAddr
= itranslate_linear(laddr
, CPL
==3);
672 pAddr
= A20ADDR(pAddr
);
676 pAddr
= A20ADDR(laddr
);
679 BX_CPU_THIS_PTR pAddrA20Page
= pAddr
& 0xfffff000;
680 BX_CPU_THIS_PTR eipFetchPtr
=
681 BX_CPU_THIS_PTR mem
->getHostMemAddr(BX_CPU_THIS
,
682 BX_CPU_THIS_PTR pAddrA20Page
, BX_READ
, CODE_ACCESS
);
685 if (! BX_CPU_THIS_PTR eipFetchPtr
) {
686 if (pAddr
>= BX_CPU_THIS_PTR mem
->len
) {
687 BX_PANIC(("prefetch: running in bogus memory, pAddr=0x%08x", pAddr
));
690 BX_PANIC(("prefetch: getHostMemAddr vetoed direct read, pAddr=0x%08x", pAddr
));
694 #if BX_SUPPORT_ICACHE
695 BX_CPU_THIS_PTR currPageWriteStampPtr
= pageWriteStampTable
.getPageWriteStampPtr(pAddr
);
696 Bit32u pageWriteStamp
= *(BX_CPU_THIS_PTR currPageWriteStampPtr
);
697 Bit32u fetchModeMask
= BX_CPU_THIS_PTR fetchModeMask
;
698 if ((pageWriteStamp
& ICacheFetchModeMask
) != fetchModeMask
)
700 // The current CPU mode does not match iCache entries for this
702 pageWriteStamp
&= ICacheWriteStampMask
; // Clear out old fetch mode bits.
703 pageWriteStamp
|= fetchModeMask
; // Add in new ones.
704 pageWriteStampTable
.setPageWriteStamp(pAddr
, pageWriteStamp
);
709 void BX_CPU_C::boundaryFetch(Bit8u
*fetchPtr
, unsigned remainingInPage
, bxInstruction_c
*i
)
712 Bit8u fetchBuffer
[16]; // Really only need 15
715 if (remainingInPage
>= 15) {
716 BX_INFO(("fetchDecode #GP(0): too many instruction prefixes"));
717 exception(BX_GP_EXCEPTION
, 0, 0);
720 // Read all leftover bytes in current page up to boundary.
721 for (j
=0; j
<remainingInPage
; j
++) {
722 fetchBuffer
[j
] = *fetchPtr
++;
725 // The 2nd chunk of the instruction is on the next page.
726 // Set RIP to the 0th byte of the 2nd page, and force a
727 // prefetch so direct access of that physical page is possible, and
728 // all the associated info is updated.
729 RIP
+= remainingInPage
;
731 if (BX_CPU_THIS_PTR eipPageWindowSize
< 15) {
732 BX_PANIC(("fetch_decode: small window size after prefetch"));
735 // We can fetch straight from the 0th byte, which is eipFetchPtr;
736 fetchPtr
= BX_CPU_THIS_PTR eipFetchPtr
;
738 // read leftover bytes in next page
740 fetchBuffer
[j
] = *fetchPtr
++;
742 #if BX_SUPPORT_X86_64
743 if (BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
) {
744 ret
= fetchDecode64(fetchBuffer
, i
, 15);
749 ret
= fetchDecode32(fetchBuffer
, i
, 15);
753 BX_INFO(("fetchDecode #GP(0): too many instruction prefixes"));
754 exception(BX_GP_EXCEPTION
, 0, 0);
757 // Restore EIP since we fudged it to start at the 2nd page boundary.
758 RIP
= BX_CPU_THIS_PTR prev_eip
;
760 // Since we cross an instruction boundary, note that we need a prefetch()
761 // again on the next instruction. Perhaps we can optimize this to
762 // eliminate the extra prefetch() since we do it above, but have to
763 // think about repeated instructions, etc.
764 invalidate_prefetch_q();
766 BX_INSTR_OPCODE(BX_CPU_ID
, fetchBuffer
, i
->ilen(),
767 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.d_b
, Is64BitMode());
770 void BX_CPU_C::deliver_NMI(void)
772 BX_CPU_THIS_PTR nmi_pending
= 1;
773 BX_CPU_THIS_PTR async_event
= 1;
776 void BX_CPU_C::deliver_SMI(void)
778 BX_CPU_THIS_PTR smi_pending
= 1;
779 BX_CPU_THIS_PTR async_event
= 1;
782 #if BX_EXTERNAL_DEBUGGER
783 void BX_CPU_C::ask(int level
, const char *prefix
, const char *fmt
, va_list ap
)
786 vsprintf (buf1
, fmt
, ap
);
787 printf ("%s %s\n", prefix
, buf1
);
792 #if BX_DEBUGGER || BX_EXTERNAL_DEBUGGER || BX_GDBSTUB
793 bx_bool
BX_CPU_C::dbg_instruction_prolog(void)
796 if(dbg_check_begin_instr_bpoint()) return 1;
799 #if BX_EXTERNAL_DEBUGGER
800 bx_external_debugger(BX_CPU_THIS
);
806 bx_bool
BX_CPU_C::dbg_instruction_epilog(void)
809 if (dbg_check_end_instr_bpoint()) return 1;
813 if (bx_dbg
.gdbstub_enabled
) {
814 unsigned reason
= bx_gdbstub_check(EIP
);
815 if (reason
!= GDBSTUB_STOP_NO_REASON
) return 1;
821 #endif // BX_DEBUGGER || BX_EXTERNAL_DEBUGGER || BX_GDBSTUB
824 extern unsigned dbg_show_mask
;
826 bx_bool
BX_CPU_C::dbg_check_begin_instr_bpoint(void)
828 Bit64u tt
= bx_pc_system
.time_ticks();
829 bx_address debug_eip
= BX_CPU_THIS_PTR prev_eip
;
830 Bit16u cs
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
;
832 BX_CPU_THIS_PTR guard_found
.cs
= cs
;
833 BX_CPU_THIS_PTR guard_found
.eip
= debug_eip
;
834 BX_CPU_THIS_PTR guard_found
.laddr
=
835 BX_CPU_THIS_PTR
get_segment_base(BX_SEG_REG_CS
) + debug_eip
;
836 BX_CPU_THIS_PTR guard_found
.is_32bit_code
=
837 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.d_b
;
838 BX_CPU_THIS_PTR guard_found
.is_64bit_code
= Is64BitMode();
840 // mode switch breakpoint
841 // instruction which generate exceptions never reach the end of the
842 // loop due to a long jump. Thats why we check at start of instr.
843 // Downside is that we show the instruction about to be executed
844 // (not the one generating the mode switch).
845 if (BX_CPU_THIS_PTR mode_break
&&
846 (BX_CPU_THIS_PTR dbg_cpu_mode
!= BX_CPU_THIS_PTR
get_cpu_mode()))
848 BX_INFO(("[" FMT_LL
"d] Caught mode switch breakpoint, switching from '%s' to '%s'",
849 bx_pc_system
.time_ticks(), cpu_mode_string(BX_CPU_THIS_PTR dbg_cpu_mode
),
850 cpu_mode_string(BX_CPU_THIS_PTR
get_cpu_mode())));
851 BX_CPU_THIS_PTR dbg_cpu_mode
= BX_CPU_THIS_PTR
get_cpu_mode();
852 BX_CPU_THIS_PTR stop_reason
= STOP_MODE_BREAK_POINT
;
856 // support for 'show' command in debugger
858 int rv
= bx_dbg_show_symbolic();
862 // see if debugger is looking for iaddr breakpoint of any type
863 if (bx_guard
.guard_for
& BX_DBG_GUARD_IADDR_ALL
) {
864 #if BX_DBG_SUPPORT_VIR_BPOINT
865 if (bx_guard
.guard_for
& BX_DBG_GUARD_IADDR_VIR
) {
866 if ((BX_CPU_THIS_PTR guard_found
.icount
!=0) ||
867 (tt
!= BX_CPU_THIS_PTR guard_found
.time_tick
))
869 for (unsigned i
=0; i
<bx_guard
.iaddr
.num_virtual
; i
++) {
870 if (bx_guard
.iaddr
.vir
[i
].enabled
&&
871 (bx_guard
.iaddr
.vir
[i
].cs
== cs
) &&
872 (bx_guard
.iaddr
.vir
[i
].eip
== debug_eip
))
874 BX_CPU_THIS_PTR guard_found
.guard_found
= BX_DBG_GUARD_IADDR_VIR
;
875 BX_CPU_THIS_PTR guard_found
.iaddr_index
= i
;
876 BX_CPU_THIS_PTR guard_found
.time_tick
= tt
;
877 return(1); // on a breakpoint
883 #if BX_DBG_SUPPORT_LIN_BPOINT
884 if (bx_guard
.guard_for
& BX_DBG_GUARD_IADDR_LIN
) {
885 if ((BX_CPU_THIS_PTR guard_found
.icount
!=0) ||
886 (tt
!= BX_CPU_THIS_PTR guard_found
.time_tick
))
888 for (unsigned i
=0; i
<bx_guard
.iaddr
.num_linear
; i
++) {
889 if (bx_guard
.iaddr
.lin
[i
].enabled
&&
890 (bx_guard
.iaddr
.lin
[i
].addr
== BX_CPU_THIS_PTR guard_found
.laddr
))
892 BX_CPU_THIS_PTR guard_found
.guard_found
= BX_DBG_GUARD_IADDR_LIN
;
893 BX_CPU_THIS_PTR guard_found
.iaddr_index
= i
;
894 BX_CPU_THIS_PTR guard_found
.time_tick
= tt
;
895 return(1); // on a breakpoint
901 #if BX_DBG_SUPPORT_PHY_BPOINT
902 if (bx_guard
.guard_for
& BX_DBG_GUARD_IADDR_PHY
) {
904 bx_bool valid
= dbg_xlate_linear2phy(BX_CPU_THIS_PTR guard_found
.laddr
, &phy
);
905 // The "guard_found.icount!=0" condition allows you to step or
906 // continue beyond a breakpoint. Bryce tried removing it once,
907 // and once you get to a breakpoint you are stuck there forever.
909 if (valid
&& ((BX_CPU_THIS_PTR guard_found
.icount
!=0) ||
910 (tt
!= BX_CPU_THIS_PTR guard_found
.time_tick
)))
912 for (unsigned i
=0; i
<bx_guard
.iaddr
.num_physical
; i
++) {
913 if (bx_guard
.iaddr
.phy
[i
].enabled
&& (bx_guard
.iaddr
.phy
[i
].addr
== phy
))
915 BX_CPU_THIS_PTR guard_found
.guard_found
= BX_DBG_GUARD_IADDR_PHY
;
916 BX_CPU_THIS_PTR guard_found
.iaddr_index
= i
;
917 BX_CPU_THIS_PTR guard_found
.time_tick
= tt
;
918 return(1); // on a breakpoint
926 return(0); // not on a breakpoint
929 bx_bool
BX_CPU_C::dbg_check_end_instr_bpoint(void)
931 bx_address debug_eip
= BX_CPU_THIS_PTR prev_eip
;
932 BX_CPU_THIS_PTR guard_found
.icount
++;
933 BX_CPU_THIS_PTR guard_found
.cs
=
934 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
;
935 BX_CPU_THIS_PTR guard_found
.eip
= debug_eip
;
936 BX_CPU_THIS_PTR guard_found
.laddr
=
937 BX_CPU_THIS_PTR
get_segment_base(BX_SEG_REG_CS
) + debug_eip
;
938 BX_CPU_THIS_PTR guard_found
.is_32bit_code
=
939 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.d_b
;
940 BX_CPU_THIS_PTR guard_found
.is_64bit_code
= Is64BitMode();
942 // Check if we hit read/write or time breakpoint
943 if (BX_CPU_THIS_PTR break_point
) {
944 switch (BX_CPU_THIS_PTR break_point
) {
945 case BREAK_POINT_TIME
:
946 BX_INFO(("[" FMT_LL
"d] Caught time breakpoint", bx_pc_system
.time_ticks()));
947 BX_CPU_THIS_PTR stop_reason
= STOP_TIME_BREAK_POINT
;
948 return(1); // on a breakpoint
949 case BREAK_POINT_READ
:
950 BX_INFO(("[" FMT_LL
"d] Caught read watch point", bx_pc_system
.time_ticks()));
951 BX_CPU_THIS_PTR stop_reason
= STOP_READ_WATCH_POINT
;
952 return(1); // on a breakpoint
953 case BREAK_POINT_WRITE
:
954 BX_INFO(("[" FMT_LL
"d] Caught write watch point", bx_pc_system
.time_ticks()));
955 BX_CPU_THIS_PTR stop_reason
= STOP_WRITE_WATCH_POINT
;
956 return(1); // on a breakpoint
958 BX_PANIC(("Weird break point condition"));
962 #if BX_MAGIC_BREAKPOINT
963 if (BX_CPU_THIS_PTR magic_break
) {
964 BX_INFO(("[" FMT_LL
"d] Stopped on MAGIC BREAKPOINT", bx_pc_system
.time_ticks()));
965 BX_CPU_THIS_PTR stop_reason
= STOP_MAGIC_BREAK_POINT
;
966 return(1); // on a breakpoint
970 // convenient point to see if user typed Ctrl-C
971 if (bx_guard
.interrupt_requested
&&
972 (bx_guard
.guard_for
& BX_DBG_GUARD_CTRL_C
))
974 BX_CPU_THIS_PTR guard_found
.guard_found
= BX_DBG_GUARD_CTRL_C
;
975 return(1); // Ctrl-C pressed
978 return(0); // no breakpoint
981 void BX_CPU_C::dbg_take_irq(void)
983 // NOTE: similar code in ::cpu_loop()
985 if (BX_CPU_INTR
&& BX_CPU_THIS_PTR
get_IF()) {
986 if (setjmp(BX_CPU_THIS_PTR jmp_buf_env
) == 0) {
987 // normal return from setjmp setup
988 unsigned vector
= DEV_pic_iac(); // may set INTR with next interrupt
989 BX_CPU_THIS_PTR errorno
= 0;
990 BX_CPU_THIS_PTR EXT
= 1; // external event
991 BX_CPU_THIS_PTR async_event
= 1; // set in case INTR is triggered
992 interrupt(vector
, 0, 0, 0);
997 void BX_CPU_C::dbg_force_interrupt(unsigned vector
)
999 // Used to force simulator to take an interrupt, without
1002 if (setjmp(BX_CPU_THIS_PTR jmp_buf_env
) == 0) {
1003 // normal return from setjmp setup
1004 BX_CPU_THIS_PTR errorno
= 0;
1005 BX_CPU_THIS_PTR EXT
= 1; // external event
1006 BX_CPU_THIS_PTR async_event
= 1; // probably don't need this
1007 interrupt(vector
, 0, 0, 0);
1011 void BX_CPU_C::dbg_take_dma(void)
1013 // NOTE: similar code in ::cpu_loop()
1015 BX_CPU_THIS_PTR async_event
= 1; // set in case INTR is triggered
1016 DEV_dma_raise_hlda();
1020 #endif // #if BX_DEBUGGER