- added instructions how to update the online documentation
[bochs-mirror.git] / cpu / proc_ctrl.cc
blob06da2eb6508b8f9c430fe63d48fe4ad0e251e93b
1 /////////////////////////////////////////////////////////////////////////
2 // $Id: proc_ctrl.cc,v 1.267 2008/12/13 18:40:39 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (C) 2001 MandrakeSoft S.A.
6 //
7 // MandrakeSoft S.A.
8 // 43, rue d'Aboukir
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 /////////////////////////////////////////////////////////////////////////
29 #define NEED_CPU_REG_SHORTCUTS 1
30 #include "bochs.h"
31 #include "cpu.h"
32 #define LOG_THIS BX_CPU_THIS_PTR
34 #if BX_SUPPORT_X86_64==0
35 // Make life easier for merging code.
36 #define RAX EAX
37 #define RCX ECX
38 #define RDX EDX
39 #define RIP EIP
40 #endif
42 void BX_CPP_AttrRegparmN(1) BX_CPU_C::UndefinedOpcode(bxInstruction_c *i)
44 BX_DEBUG(("UndefinedOpcode: b1 = 0x%02x causes #UD exception", i->b1()));
45 exception(BX_UD_EXCEPTION, 0, 0);
48 void BX_CPP_AttrRegparmN(1) BX_CPU_C::NOP(bxInstruction_c *i)
50 // No operation.
53 void BX_CPP_AttrRegparmN(1) BX_CPU_C::PREFETCH(bxInstruction_c *i)
55 #if BX_INSTRUMENTATION
56 bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
57 BX_INSTR_PREFETCH_HINT(BX_CPU_ID, i->nnn(), i->seg(), eaddr);
58 #endif
62 // The shutdown state is very similar to the state following the exection
63 // if HLT instruction. In this mode the processor stops executing
64 // instructions until #NMI, #SMI, #RESET or #INIT is received. If
65 // shutdown occurs why in NMI interrupt handler or in SMM, a hardware
66 // reset must be used to restart the processor execution.
68 void BX_CPU_C::shutdown(void)
70 BX_PANIC(("Entering to shutdown state still not implemented"));
72 BX_CPU_THIS_PTR clear_IF();
74 // artificial trap bit, why use another variable.
75 BX_CPU_THIS_PTR debug_trap |= BX_DEBUG_TRAP_SHUTDOWN; // artificial trap
76 BX_CPU_THIS_PTR async_event = 1; // so processor knows to check
77 // Execution of this instruction completes. The processor
78 // will remain in a halt state until one of the above conditions
79 // is met.
81 BX_INSTR_HLT(BX_CPU_ID);
83 #if BX_DEBUGGER
84 bx_dbg_halt(BX_CPU_ID);
85 #endif
87 #if BX_USE_IDLE_HACK
88 bx_gui->sim_is_idle();
89 #endif
91 longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
94 void BX_CPP_AttrRegparmN(1) BX_CPU_C::HLT(bxInstruction_c *i)
96 if (!real_mode() && CPL!=0) {
97 BX_DEBUG(("HLT: %s priveledge check failed, CPL=%d, generate #GP(0)",
98 cpu_mode_string(BX_CPU_THIS_PTR cpu_mode), CPL));
99 exception(BX_GP_EXCEPTION, 0, 0);
102 if (! BX_CPU_THIS_PTR get_IF()) {
103 BX_INFO(("WARNING: HLT instruction with IF=0!"));
106 // stops instruction execution and places the processor in a
107 // HALT state. An enabled interrupt, NMI, or reset will resume
108 // execution. If interrupt (including NMI) is used to resume
109 // execution after HLT, the saved CS:eIP points to instruction
110 // following HLT.
112 // artificial trap bit, why use another variable.
113 BX_CPU_THIS_PTR debug_trap |= BX_DEBUG_TRAP_HALT; // artificial trap
114 BX_CPU_THIS_PTR async_event = 1; // so processor knows to check
115 // Execution of this instruction completes. The processor
116 // will remain in a halt state until one of the above conditions
117 // is met.
119 BX_INSTR_HLT(BX_CPU_ID);
121 #if BX_DEBUGGER
122 bx_dbg_halt(BX_CPU_ID);
123 #endif
125 #if BX_USE_IDLE_HACK
126 bx_gui->sim_is_idle();
127 #endif
130 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CLTS(bxInstruction_c *i)
132 if (!real_mode() && CPL!=0) {
133 BX_ERROR(("CLTS: priveledge check failed, generate #GP(0)"));
134 exception(BX_GP_EXCEPTION, 0, 0);
137 BX_CPU_THIS_PTR cr0.set_TS(0);
140 /* 0F 08 INVD */
141 /* 0F 09 WBINVD */
142 void BX_CPP_AttrRegparmN(1) BX_CPU_C::WBINVD(bxInstruction_c *i)
144 #if BX_CPU_LEVEL >= 4
145 if (!real_mode() && CPL!=0) {
146 BX_ERROR(("INVD/WBINVD: priveledge check failed, generate #GP(0)"));
147 exception(BX_GP_EXCEPTION, 0, 0);
150 invalidate_prefetch_q();
152 BX_DEBUG(("INVD/WBINVD: Flush internal caches !"));
153 #if BX_INSTRUMENTATION
154 if (i->b1() == 0x08)
155 BX_INSTR_CACHE_CNTRL(BX_CPU_ID, BX_INSTR_INVD);
156 else
157 BX_INSTR_CACHE_CNTRL(BX_CPU_ID, BX_INSTR_WBINVD);
158 #endif
160 #if BX_SUPPORT_ICACHE
161 flushICaches();
162 #endif
164 #else
165 BX_INFO(("INVD/WBINVD: required 486 support, use --enable-cpu-level=4 option"));
166 exception(BX_UD_EXCEPTION, 0, 0);
167 #endif
170 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CLFLUSH(bxInstruction_c *i)
172 #if BX_SUPPORT_CLFLUSH
173 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[i->seg()];
175 bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
176 bx_address laddr = BX_CPU_THIS_PTR get_laddr(i->seg(), eaddr);
178 #if BX_SUPPORT_X86_64
179 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
180 if (! IsCanonical(laddr)) {
181 BX_ERROR(("CLFLUSH: non-canonical access !"));
182 exception(int_number(i->seg()), 0, 0);
185 else
186 #endif
188 // check if we could access the memory segment
189 if (!(seg->cache.valid & SegAccessROK)) {
190 if (! execute_virtual_checks(seg, eaddr, 1))
191 exception(int_number(i->seg()), 0, 0);
193 else {
194 if (eaddr > seg->cache.u.segment.limit_scaled) {
195 BX_ERROR(("CLFLUSH: segment limit violation"));
196 exception(int_number(i->seg()), 0, 0);
201 bx_phy_address paddr;
203 if (BX_CPU_THIS_PTR cr0.get_PG()) {
204 paddr = dtranslate_linear(laddr, CPL, BX_READ);
205 paddr = A20ADDR(paddr);
207 else
209 paddr = A20ADDR(laddr);
212 BX_INSTR_CLFLUSH(BX_CPU_ID, laddr, paddr);
214 #else
215 BX_INFO(("CLFLUSH: not supported, enable with SSE2"));
216 exception(BX_UD_EXCEPTION, 0, 0);
217 #endif
220 #if BX_CPU_LEVEL >= 3
221 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_DdRd(bxInstruction_c *i)
223 #if BX_CPU_LEVEL >= 4
224 if (BX_CPU_THIS_PTR cr4.get_DE()) {
225 if ((i->nnn() & 0xE) == 4) {
226 BX_ERROR(("MOV_DdRd: access to DR4/DR5 causes #UD"));
227 exception(BX_UD_EXCEPTION, 0, 0);
230 #endif
232 // Note: processor clears GD upon entering debug exception
233 // handler, to allow access to the debug registers
234 if (BX_CPU_THIS_PTR dr7 & 0x2000) { // GD bit set
235 BX_ERROR(("MOV_DdRd: DR7 GD bit is set"));
236 BX_CPU_THIS_PTR dr6 |= 0x2000;
237 exception(BX_DB_EXCEPTION, 0, 0);
240 if (!real_mode() && CPL!=0) {
241 BX_ERROR(("MOV_DdRd: CPL!=0 not in real mode"));
242 exception(BX_GP_EXCEPTION, 0, 0);
245 /* NOTES:
246 * 32bit operands always used
247 * r/m field specifies general register
248 * reg field specifies which special register
251 invalidate_prefetch_q();
253 /* This instruction is always treated as a register-to-register,
254 * regardless of the encoding of the MOD field in the MODRM byte.
256 if (!i->modC0())
257 BX_PANIC(("MOV_DdRd(): rm field not a register!"));
259 Bit32u val_32 = BX_READ_32BIT_REG(i->rm());
261 switch (i->nnn()) {
262 case 0: // DR0
263 case 1: // DR1
264 case 2: // DR2
265 case 3: // DR3
266 TLB_invlpg(val_32);
267 BX_CPU_THIS_PTR dr[i->nnn()] = val_32;
268 break;
270 case 4: // DR4
271 // DR4 aliased to DR6 by default. With Debug Extensions on,
272 // access to DR4 causes #UD
273 case 6: // DR6
274 #if BX_CPU_LEVEL <= 4
275 // On 386/486 bit12 is settable
276 BX_CPU_THIS_PTR dr6 = (BX_CPU_THIS_PTR dr6 & 0xffff0ff0) |
277 (val_32 & 0x0000f00f);
278 #else
279 // On Pentium+, bit12 is always zero
280 BX_CPU_THIS_PTR dr6 = (BX_CPU_THIS_PTR dr6 & 0xffff0ff0) |
281 (val_32 & 0x0000e00f);
282 #endif
283 break;
285 case 5: // DR5
286 // DR5 aliased to DR7 by default. With Debug Extensions on,
287 // access to DR5 causes #UD
288 case 7: // DR7
289 // Note: 486+ ignore GE and LE flags. On the 386, exact
290 // data breakpoint matching does not occur unless it is enabled
291 // by setting the LE and/or GE flags.
293 // Some sanity checks...
294 if (((((val_32>>16) & 3)==0) && (((val_32>>18) & 3)!=0)) ||
295 ((((val_32>>20) & 3)==0) && (((val_32>>22) & 3)!=0)) ||
296 ((((val_32>>24) & 3)==0) && (((val_32>>26) & 3)!=0)) ||
297 ((((val_32>>28) & 3)==0) && (((val_32>>30) & 3)!=0)))
299 // Instruction breakpoint with LENx not 00b (1-byte length)
300 BX_ERROR(("MOV_DdRd: write of %08x, R/W=00b LEN!=00b", val_32));
302 #if BX_CPU_LEVEL <= 4
303 // 386/486: you can play with all the bits except b10 is always 1
304 BX_CPU_THIS_PTR dr7 = val_32 | 0x00000400;
305 #else
306 // Pentium+: bits15,14,12 are hardwired to 0, rest are settable.
307 // Even bits 11,10 are changeable though reserved.
308 BX_CPU_THIS_PTR dr7 = (val_32 & 0xffff2fff) | 0x00000400;
309 #endif
310 // if we have breakpoints enabled then we must check
311 // breakpoints condition in cpu loop
312 if(BX_CPU_THIS_PTR dr7 & 0xff)
313 BX_CPU_THIS_PTR async_event = 1;
314 break;
316 default:
317 BX_ERROR(("MOV_DdRd: #UD - register index out of range"));
318 exception(BX_UD_EXCEPTION, 0, 0);
322 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RdDd(bxInstruction_c *i)
324 Bit32u val_32;
326 #if BX_CPU_LEVEL >= 4
327 if (BX_CPU_THIS_PTR cr4.get_DE()) {
328 if ((i->nnn() & 0xE) == 4) {
329 BX_ERROR(("MOV_RdDd: access to DR4/DR5 causes #UD"));
330 exception(BX_UD_EXCEPTION, 0, 0);
333 #endif
335 // Note: processor clears GD upon entering debug exception
336 // handler, to allow access to the debug registers
337 if (BX_CPU_THIS_PTR dr7 & 0x2000) { // GD bit set
338 BX_ERROR(("MOV_RdDd: DR7 GD bit is set"));
339 BX_CPU_THIS_PTR dr6 |= 0x2000;
340 exception(BX_DB_EXCEPTION, 0, 0);
343 if (!real_mode() && CPL!=0) {
344 BX_ERROR(("MOV_RdDd: CPL!=0 not in real mode"));
345 exception(BX_GP_EXCEPTION, 0, 0);
348 /* This instruction is always treated as a register-to-register,
349 * regardless of the encoding of the MOD field in the MODRM byte.
351 if (!i->modC0())
352 BX_PANIC(("MOV_RdDd(): rm field not a register!"));
354 switch (i->nnn()) {
355 case 0: // DR0
356 case 1: // DR1
357 case 2: // DR2
358 case 3: // DR3
359 val_32 = BX_CPU_THIS_PTR dr[i->nnn()];
360 break;
362 case 4: // DR4
363 // DR4 aliased to DR6 by default. With Debug Extensions ON,
364 // access to DR4 causes #UD
365 case 6: // DR6
366 val_32 = BX_CPU_THIS_PTR dr6;
367 break;
369 case 5: // DR5
370 // DR5 aliased to DR7 by default. With Debug Extensions ON,
371 // access to DR5 causes #UD
372 case 7: // DR7
373 val_32 = BX_CPU_THIS_PTR dr7;
374 break;
376 default:
377 BX_ERROR(("MOV_RdDd: #UD - register index out of range"));
378 exception(BX_UD_EXCEPTION, 0, 0);
381 BX_WRITE_32BIT_REGZ(i->rm(), val_32);
384 #if BX_SUPPORT_X86_64
385 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_DqRq(bxInstruction_c *i)
387 /* NOTES:
388 * 64bit operands always used
389 * r/m field specifies general register
390 * reg field specifies which special register
393 if (BX_CPU_THIS_PTR cr4.get_DE()) {
394 if ((i->nnn() & 0xE) == 4) {
395 BX_ERROR(("MOV_DqRq: access to DR4/DR5 causes #UD"));
396 exception(BX_UD_EXCEPTION, 0, 0);
400 // Note: processor clears GD upon entering debug exception
401 // handler, to allow access to the debug registers
402 if (BX_CPU_THIS_PTR dr7 & 0x2000) { // GD bit set
403 BX_ERROR(("MOV_DqRq: DR7 GD bit is set"));
404 BX_CPU_THIS_PTR dr6 |= 0x2000;
405 exception(BX_DB_EXCEPTION, 0, 0);
408 /* #GP(0) if CPL is not 0 */
409 if (CPL != 0) {
410 BX_ERROR(("MOV_DqRq: #GP(0) if CPL is not 0"));
411 exception(BX_GP_EXCEPTION, 0, 0);
414 invalidate_prefetch_q();
416 /* This instruction is always treated as a register-to-register,
417 * regardless of the encoding of the MOD field in the MODRM byte.
419 if (!i->modC0())
420 BX_PANIC(("MOV_DqRq(): rm field not a register!"));
422 Bit64u val_64 = BX_READ_64BIT_REG(i->rm());
424 switch (i->nnn()) {
425 case 0: // DR0
426 case 1: // DR1
427 case 2: // DR2
428 case 3: // DR3
429 TLB_invlpg(val_64);
430 BX_CPU_THIS_PTR dr[i->nnn()] = val_64;
431 break;
433 case 4: // DR4
434 // DR4 aliased to DR6 by default. With Debug Extensions ON,
435 // access to DR4 causes #UD
436 case 6: // DR6
437 // On Pentium+, bit12 is always zero
438 BX_CPU_THIS_PTR dr6 = (BX_CPU_THIS_PTR dr6 & 0xffff0ff0) |
439 (val_64 & 0x0000e00f);
440 break;
442 case 5: // DR5
443 // DR5 aliased to DR7 by default. With Debug Extensions ON,
444 // access to DR5 causes #UD
445 case 7: // DR7
446 // Note: 486+ ignore GE and LE flags. On the 386, exact
447 // data breakpoint matching does not occur unless it is enabled
448 // by setting the LE and/or GE flags.
450 // Some sanity checks...
451 if (((((val_64>>16) & 3)==0) && (((val_64>>18) & 3)!=0)) ||
452 ((((val_64>>20) & 3)==0) && (((val_64>>22) & 3)!=0)) ||
453 ((((val_64>>24) & 3)==0) && (((val_64>>26) & 3)!=0)) ||
454 ((((val_64>>28) & 3)==0) && (((val_64>>30) & 3)!=0)))
456 // Instruction breakpoint with LENx not 00b (1-byte length)
457 BX_ERROR(("MOV_DqRq: write of %08x:%08x, R/W=00b LEN!=00b",
458 (Bit32u)(val_64 >> 32), (Bit32u)(val_64 & 0xFFFFFFFF)));
461 // Pentium+: bits15,14,12 are hardwired to 0, rest are settable.
462 // Even bits 11,10 are changeable though reserved.
463 BX_CPU_THIS_PTR dr7 = (val_64 & 0xffff2fff) | 0x00000400;
464 break;
466 default:
467 BX_ERROR(("MOV_DqRq: #UD - register index out of range"));
468 exception(BX_UD_EXCEPTION, 0, 0);
472 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RqDq(bxInstruction_c *i)
474 Bit64u val_64;
476 if (BX_CPU_THIS_PTR cr4.get_DE()) {
477 if ((i->nnn() & 0xE) == 4) {
478 BX_ERROR(("MOV_RqDq: access to DR4/DR5 causes #UD"));
479 exception(BX_UD_EXCEPTION, 0, 0);
483 // Note: processor clears GD upon entering debug exception
484 // handler, to allow access to the debug registers
485 if (BX_CPU_THIS_PTR dr7 & 0x2000) { // GD bit set
486 BX_ERROR(("MOV_RqDq: DR7 GD bit is set"));
487 BX_CPU_THIS_PTR dr6 |= 0x2000;
488 exception(BX_DB_EXCEPTION, 0, 0);
491 /* #GP(0) if CPL is not 0 */
492 if (CPL != 0) {
493 BX_ERROR(("MOV_RqDq: #GP(0) if CPL is not 0"));
494 exception(BX_GP_EXCEPTION, 0, 0);
497 /* This instruction is always treated as a register-to-register,
498 * regardless of the encoding of the MOD field in the MODRM byte.
500 if (!i->modC0())
501 BX_PANIC(("MOV_RqDq(): rm field not a register!"));
503 switch (i->nnn()) {
504 case 0: // DR0
505 case 1: // DR1
506 case 2: // DR2
507 case 3: // DR3
508 val_64 = BX_CPU_THIS_PTR dr[i->nnn()];
509 break;
511 case 4: // DR4
512 // DR4 aliased to DR6 by default. With Debug Extensions ON,
513 // access to DR4 causes #UD
514 case 6: // DR6
515 val_64 = BX_CPU_THIS_PTR dr6;
516 break;
518 case 5: // DR5
519 // DR5 aliased to DR7 by default. With Debug Extensions ON,
520 // access to DR5 causes #UD
521 case 7: // DR7
522 val_64 = BX_CPU_THIS_PTR dr7;
523 break;
525 default:
526 BX_ERROR(("MOV_RqDq: #UD - register index out of range"));
527 exception(BX_UD_EXCEPTION, 0, 0);
530 BX_WRITE_64BIT_REG(i->rm(), val_64);
532 #endif // #if BX_SUPPORT_X86_64
534 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_CdRd(bxInstruction_c *i)
536 if (!real_mode() && CPL!=0) {
537 BX_ERROR(("MOV_CdRd: CPL!=0 not in real mode"));
538 exception(BX_GP_EXCEPTION, 0, 0);
541 /* NOTES:
542 * 32bit operands always used
543 * r/m field specifies general register
544 * reg field specifies which special register
547 /* This instruction is always treated as a register-to-register,
548 * regardless of the encoding of the MOD field in the MODRM byte.
550 if (!i->modC0())
551 BX_PANIC(("MOV_CdRd(): rm field not a register!"));
553 Bit32u val_32 = BX_READ_32BIT_REG(i->rm());
555 switch (i->nnn()) {
556 case 0: // CR0 (MSW)
557 SetCR0(val_32);
558 break;
559 case 2: /* CR2 */
560 BX_DEBUG(("MOV_CdRd:CR2 = %08x", val_32));
561 BX_CPU_THIS_PTR cr2 = val_32;
562 break;
563 case 3: // CR3
564 BX_DEBUG(("MOV_CdRd:CR3 = %08x", val_32));
565 // Reserved bits take on value of MOV instruction
566 SetCR3(val_32);
567 BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_MOV_CR3, val_32);
568 break;
569 #if BX_CPU_LEVEL > 3
570 case 4: // CR4
571 // Protected mode: #GP(0) if attempt to write a 1 to
572 // any reserved bit of CR4
573 if (! SetCR4(val_32))
574 exception(BX_GP_EXCEPTION, 0, 0);
575 break;
576 #endif
577 default:
578 BX_ERROR(("MOV_CdRd: #UD - control register %d index out of range", i->nnn()));
579 exception(BX_UD_EXCEPTION, 0, 0);
583 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RdCd(bxInstruction_c *i)
585 // mov control register data to register
586 Bit32u val_32 = 0;
588 if (!real_mode() && CPL!=0) {
589 BX_ERROR(("MOV_RdCd: CPL!=0 not in real mode"));
590 exception(BX_GP_EXCEPTION, 0, 0);
593 /* NOTES:
594 * 32bit operands always used
595 * r/m field specifies general register
596 * reg field specifies which special register
599 /* This instruction is always treated as a register-to-register,
600 * regardless of the encoding of the MOD field in the MODRM byte.
602 if (!i->modC0())
603 BX_PANIC(("MOV_RdCd(): rm field not a register!"));
605 switch (i->nnn()) {
606 case 0: // CR0 (MSW)
607 val_32 = BX_CPU_THIS_PTR cr0.get32();
608 break;
609 case 2: /* CR2 */
610 BX_DEBUG(("MOV_RdCd: reading CR2"));
611 val_32 = (Bit32u) BX_CPU_THIS_PTR cr2;
612 break;
613 case 3: // CR3
614 BX_DEBUG(("MOV_RdCd: reading CR3"));
615 val_32 = (Bit32u) BX_CPU_THIS_PTR cr3;
616 break;
617 case 4: // CR4
618 #if BX_CPU_LEVEL > 3
619 BX_DEBUG(("MOV_RdCd: read of CR4"));
620 val_32 = BX_CPU_THIS_PTR cr4.get32();
621 #endif
622 break;
623 default:
624 BX_ERROR(("MOV_RdCd: #UD - control register %d index out of range", i->nnn()));
625 exception(BX_UD_EXCEPTION, 0, 0);
628 BX_WRITE_32BIT_REGZ(i->rm(), val_32);
631 #if BX_SUPPORT_X86_64
632 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_CqRq(bxInstruction_c *i)
634 BX_ASSERT(protected_mode());
636 /* NOTES:
637 * 64bit operands always used
638 * r/m field specifies general register
639 * reg field specifies which special register
642 /* #GP(0) if CPL is not 0 */
643 if (CPL!=0) {
644 BX_ERROR(("MOV_CqRq: #GP(0) if CPL is not 0"));
645 exception(BX_GP_EXCEPTION, 0, 0);
648 /* This instruction is always treated as a register-to-register,
649 * regardless of the encoding of the MOD field in the MODRM byte.
651 if (!i->modC0())
652 BX_PANIC(("MOV_CqRq(): rm field not a register!"));
654 Bit64u val_64 = BX_READ_64BIT_REG(i->rm());
656 switch (i->nnn()) {
657 case 0: // CR0 (MSW)
658 SetCR0((Bit32u) val_64);
659 break;
660 case 2: /* CR2 */
661 BX_DEBUG(("MOV_CqRq: write to CR2 of %08x:%08x", GET32H(val_64), GET32L(val_64)));
662 BX_CPU_THIS_PTR cr2 = val_64;
663 break;
664 case 3: // CR3
665 BX_DEBUG(("MOV_CqRq: write to CR3 of %08x:%08x", GET32H(val_64), GET32L(val_64)));
666 // Reserved bits take on value of MOV instruction
667 SetCR3(val_64);
668 BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_MOV_CR3, val_64);
669 break;
670 case 4: // CR4
671 // Protected mode: #GP(0) if attempt to write a 1 to
672 // any reserved bit of CR4
673 BX_DEBUG(("MOV_CqRq: write to CR4 of %08x:%08x", GET32H(val_64), GET32L(val_64)));
674 if (! SetCR4(val_64))
675 exception(BX_GP_EXCEPTION, 0, 0);
676 break;
677 #if BX_SUPPORT_APIC
678 case 8: // CR8
679 // CR8 is aliased to APIC->TASK PRIORITY register
680 // APIC.TPR[7:4] = CR8[3:0]
681 // APIC.TPR[3:0] = 0
682 // Reads of CR8 return zero extended APIC.TPR[7:4]
683 // Write to CR8 update APIC.TPR[7:4]
684 if (val_64 & BX_CONST64(0xfffffffffffffff0)) {
685 BX_ERROR(("MOV_CqRq: Attempt to set reserved bits of CR8"));
686 exception(BX_GP_EXCEPTION, 0, 0);
688 BX_CPU_THIS_PTR local_apic.set_tpr((val_64 & 0xF) << 0x4);
689 break;
690 #endif
691 default:
692 BX_ERROR(("MOV_CqRq: #UD - control register %d index out of range", i->nnn()));
693 exception(BX_UD_EXCEPTION, 0, 0);
697 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RqCq(bxInstruction_c *i)
699 // mov control register data to register
700 Bit64u val_64 = 0;
702 BX_ASSERT(protected_mode());
704 /* NOTES:
705 * 64bit operands always used
706 * r/m field specifies general register
707 * reg field specifies which special register
710 /* #GP(0) if CPL is not 0 */
711 if (CPL!=0) {
712 BX_ERROR(("MOV_RqCq: #GP(0) if CPL is not 0"));
713 exception(BX_GP_EXCEPTION, 0, 0);
716 /* This instruction is always treated as a register-to-register,
717 * regardless of the encoding of the MOD field in the MODRM byte.
719 if (!i->modC0())
720 BX_PANIC(("MOV_RqCq(): rm field not a register!"));
722 switch (i->nnn()) {
723 case 0: // CR0 (MSW)
724 val_64 = BX_CPU_THIS_PTR cr0.get32();
725 break;
726 case 2: /* CR2 */
727 BX_DEBUG(("MOV_RqCq: read of CR2"));
728 val_64 = BX_CPU_THIS_PTR cr2;
729 break;
730 case 3: // CR3
731 BX_DEBUG(("MOV_RqCq: read of CR3"));
732 val_64 = BX_CPU_THIS_PTR cr3;
733 break;
734 case 4: // CR4
735 BX_DEBUG(("MOV_RqCq: read of CR4"));
736 val_64 = BX_CPU_THIS_PTR cr4.get32();
737 break;
738 #if BX_SUPPORT_APIC
739 case 8: // CR8
740 // CR8 is aliased to APIC->TASK PRIORITY register
741 // APIC.TPR[7:4] = CR8[3:0]
742 // APIC.TPR[3:0] = 0
743 // Reads of CR8 return zero extended APIC.TPR[7:4]
744 // Write to CR8 update APIC.TPR[7:4]
745 val_64 = (BX_CPU_THIS_PTR local_apic.get_tpr() & 0xF) >> 4;
746 break;
747 #endif
748 default:
749 BX_ERROR(("MOV_RqCq: #UD - control register %d index out of range", i->nnn()));
750 exception(BX_UD_EXCEPTION, 0, 0);
753 BX_WRITE_64BIT_REG(i->rm(), val_64);
755 #endif // #if BX_SUPPORT_X86_64
757 #endif // #if BX_CPU_LEVEL >= 3
759 #if BX_CPU_LEVEL >= 2
760 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LMSW_Ew(bxInstruction_c *i)
762 Bit16u msw;
764 if (!real_mode() && CPL!=0) {
765 BX_ERROR(("LMSW: CPL!=0 not in real mode"));
766 exception(BX_GP_EXCEPTION, 0, 0);
769 if (i->modC0()) {
770 msw = BX_READ_16BIT_REG(i->rm());
772 else {
773 bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
774 /* pointer, segment address pair */
775 msw = read_virtual_word(i->seg(), eaddr);
778 // LMSW does not affect PG,CD,NW,AM,WP,NE,ET bits, and cannot clear PE
780 // LMSW cannot clear PE
781 if (BX_CPU_THIS_PTR cr0.get_PE())
782 msw |= 0x0001; // adjust PE bit to current value of 1
784 msw &= 0xf; // LMSW only affects last 4 flags
785 Bit32u cr0 = (BX_CPU_THIS_PTR cr0.get32() & 0xfffffff0) | msw;
786 SetCR0(cr0);
789 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SMSW_EwR(bxInstruction_c *i)
791 if (i->os32L()) {
792 BX_WRITE_32BIT_REGZ(i->rm(), BX_CPU_THIS_PTR cr0.get32());
794 else {
795 BX_WRITE_16BIT_REG(i->rm(), BX_CPU_THIS_PTR cr0.get32() & 0xffff);
799 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SMSW_EwM(bxInstruction_c *i)
801 Bit16u msw = BX_CPU_THIS_PTR cr0.get32() & 0xffff;
802 bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
803 /* pointer, segment address pair */
804 write_virtual_word(i->seg(), eaddr, msw);
806 #endif
808 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_TdRd(bxInstruction_c *i)
810 #if BX_CPU_LEVEL <= 4
811 BX_PANIC(("MOV_TdRd: Still not implemented"));
812 #else
813 // Pentium+ does not have TRx. They were redesigned using the MSRs.
814 BX_INFO(("MOV_TdRd: causes #UD"));
815 exception(BX_UD_EXCEPTION, 0, 0);
816 #endif
819 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RdTd(bxInstruction_c *i)
821 #if BX_CPU_LEVEL <= 4
822 BX_PANIC(("MOV_RdTd: Still not implemented"));
823 #else
824 // Pentium+ does not have TRx. They were redesigned using the MSRs.
825 BX_INFO(("MOV_RdTd: causes #UD"));
826 exception(BX_UD_EXCEPTION, 0, 0);
827 #endif
830 #if BX_CPU_LEVEL == 2
831 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOADALL(bxInstruction_c *i)
833 Bit16u msw, tr, flags, ip, ldtr;
834 Bit16u ds_raw, ss_raw, cs_raw, es_raw;
835 Bit16u base_15_0, limit;
836 Bit8u base_23_16, access;
838 if (v8086_mode()) BX_PANIC(("proc_ctrl: LOADALL in v8086 mode unsupported"));
840 if (BX_CPU_THIS_PTR cr0.get_PE())
842 BX_PANIC(("LOADALL not yet supported for protected mode"));
845 BX_PANIC(("LOADALL: handle CR0.val32"));
846 /* MSW */
847 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x806, 2, &msw);
848 BX_CPU_THIS_PTR cr0.set_PE(msw & 0x01); msw >>= 1;
849 BX_CPU_THIS_PTR cr0.set_MP(msw & 0x01); msw >>= 1;
850 BX_CPU_THIS_PTR cr0.set_EM(msw & 0x01); msw >>= 1;
851 BX_CPU_THIS_PTR cr0.set_TS(msw & 0x01);
853 if (BX_CPU_THIS_PTR cr0.get_PE() || BX_CPU_THIS_PTR cr0.get_MP() || BX_CPU_THIS_PTR cr0.get_EM() || BX_CPU_THIS_PTR cr0.get_TS())
854 BX_PANIC(("LOADALL set PE, MP, EM or TS bits in MSW!"));
856 /* TR */
857 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x816, 2, &tr);
858 BX_CPU_THIS_PTR tr.selector.value = tr;
859 BX_CPU_THIS_PTR tr.selector.rpl = (tr & 0x03); tr >>= 2;
860 BX_CPU_THIS_PTR tr.selector.ti = (tr & 0x01); tr >>= 1;
861 BX_CPU_THIS_PTR tr.selector.index = tr;
862 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x860, 2, &base_15_0);
863 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x862, 1, &base_23_16);
864 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x863, 1, &access);
865 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x864, 2, &limit);
867 BX_CPU_THIS_PTR tr.cache.valid =
868 BX_CPU_THIS_PTR tr.cache.p = (access & 0x80) >> 7;
869 BX_CPU_THIS_PTR tr.cache.dpl = (access & 0x60) >> 5;
870 BX_CPU_THIS_PTR tr.cache.segment = (access & 0x10) >> 4;
871 // don't allow busy bit in tr.cache.type, so bit 2 is masked away too.
872 BX_CPU_THIS_PTR tr.cache.type = (access & 0x0d);
873 BX_CPU_THIS_PTR tr.cache.u.system.base = (base_23_16 << 16) | base_15_0;
874 BX_CPU_THIS_PTR tr.cache.u.system.limit = limit;
876 if ((BX_CPU_THIS_PTR tr.selector.value & 0xfffc) == 0) {
877 BX_CPU_THIS_PTR tr.cache.valid = 0;
879 if (BX_CPU_THIS_PTR tr.cache.u.system.limit < 43 ||
880 BX_CPU_THIS_PTR tr.cache.type != BX_SYS_SEGMENT_AVAIL_286_TSS ||
881 BX_CPU_THIS_PTR tr.cache.segment)
883 BX_CPU_THIS_PTR tr.cache.valid = 0;
885 if (BX_CPU_THIS_PTR tr.cache.valid==0)
887 BX_CPU_THIS_PTR tr.selector.value = 0;
888 BX_CPU_THIS_PTR tr.selector.index = 0;
889 BX_CPU_THIS_PTR tr.selector.ti = 0;
890 BX_CPU_THIS_PTR tr.selector.rpl = 0;
891 BX_CPU_THIS_PTR tr.cache.u.system.base = 0;
892 BX_CPU_THIS_PTR tr.cache.u.system.limit = 0;
893 BX_CPU_THIS_PTR tr.cache.p = 0;
896 /* FLAGS */
897 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x818, 2, &flags);
898 write_flags(flags, 1, 1);
900 /* IP */
901 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x81a, 2, &IP);
903 /* LDTR */
904 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x81c, 2, &ldtr);
905 BX_CPU_THIS_PTR ldtr.selector.value = ldtr;
906 BX_CPU_THIS_PTR ldtr.selector.rpl = (ldtr & 0x03); ldtr >>= 2;
907 BX_CPU_THIS_PTR ldtr.selector.ti = (ldtr & 0x01); ldtr >>= 1;
908 BX_CPU_THIS_PTR ldtr.selector.index = ldtr;
909 if ((BX_CPU_THIS_PTR ldtr.selector.value & 0xfffc) == 0)
911 BX_CPU_THIS_PTR ldtr.cache.valid = 0;
912 BX_CPU_THIS_PTR ldtr.cache.p = 0;
913 BX_CPU_THIS_PTR ldtr.cache.segment = 0;
914 BX_CPU_THIS_PTR ldtr.cache.type = 0;
915 BX_CPU_THIS_PTR ldtr.cache.u.system.base = 0;
916 BX_CPU_THIS_PTR ldtr.cache.u.system.limit = 0;
917 BX_CPU_THIS_PTR ldtr.selector.value = 0;
918 BX_CPU_THIS_PTR ldtr.selector.index = 0;
919 BX_CPU_THIS_PTR ldtr.selector.ti = 0;
921 else {
922 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x854, 2, &base_15_0);
923 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x856, 1, &base_23_16);
924 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x857, 1, &access);
925 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x858, 2, &limit);
926 BX_CPU_THIS_PTR ldtr.cache.valid =
927 BX_CPU_THIS_PTR ldtr.cache.p = access >> 7;
928 BX_CPU_THIS_PTR ldtr.cache.dpl = (access >> 5) & 0x03;
929 BX_CPU_THIS_PTR ldtr.cache.segment = (access >> 4) & 0x01;
930 BX_CPU_THIS_PTR ldtr.cache.type = (access & 0x0f);
931 BX_CPU_THIS_PTR ldtr.cache.u.system.base = (base_23_16 << 16) | base_15_0;
932 BX_CPU_THIS_PTR ldtr.cache.u.system.limit = limit;
934 if (access == 0) {
935 BX_PANIC(("loadall: LDTR case access byte=0"));
937 if (BX_CPU_THIS_PTR ldtr.cache.valid==0) {
938 BX_PANIC(("loadall: ldtr.valid=0"));
940 if (BX_CPU_THIS_PTR ldtr.cache.segment) { /* not a system segment */
941 BX_INFO((" AR byte = %02x", (unsigned) access));
942 BX_PANIC(("loadall: LDTR descriptor cache loaded with non system segment"));
944 if (BX_CPU_THIS_PTR ldtr.cache.type != BX_SYS_SEGMENT_LDT) {
945 BX_PANIC(("loadall: LDTR.type(%u) != LDT", (unsigned) (access & 0x0f)));
949 /* DS */
950 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x81e, 2, &ds_raw);
951 parse_selector(ds_raw, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector);
952 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x848, 2, &base_15_0);
953 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x84a, 1, &base_23_16);
954 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x84b, 1, &access);
955 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x84c, 2, &limit);
956 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.base = (base_23_16 << 16) | base_15_0;
957 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.limit = limit;
958 set_ar_byte(BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache, access);
960 if ((BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value & 0xfffc) == 0) {
961 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid = 0;
963 else {
964 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid = 1;
966 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid==0 ||
967 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.segment==0)
969 BX_PANIC(("loadall: DS invalid"));
972 /* SS */
973 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x820, 2, &ss_raw);
974 parse_selector(ss_raw, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
975 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x842, 2, &base_15_0);
976 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x844, 1, &base_23_16);
977 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x845, 1, &access);
978 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x846, 2, &limit);
979 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base = (base_23_16 << 16) | base_15_0;
980 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit = limit;
981 set_ar_byte(BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache, access);
983 if ((BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value & 0xfffc) == 0) {
984 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = 0;
986 else {
987 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = 1;
989 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid==0 ||
990 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment==0)
992 BX_PANIC(("loadall: SS invalid"));
995 /* CS */
996 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x822, 2, &cs_raw);
997 parse_selector(cs_raw, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
998 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x83c, 2, &base_15_0);
999 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x83e, 1, &base_23_16);
1000 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x83f, 1, &access);
1001 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x840, 2, &limit);
1002 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = (base_23_16 << 16) | base_15_0;
1003 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit = limit;
1004 set_ar_byte(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache, access);
1006 if ((BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value & 0xfffc) == 0) {
1007 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = 0;
1009 else {
1010 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = 1;
1012 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid==0 ||
1013 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment==0)
1015 BX_PANIC(("loadall: CS invalid"));
1018 updateFetchModeMask();
1019 handleCpuModeChange();
1021 /* ES */
1022 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x824, 2, &es_raw);
1023 parse_selector(es_raw, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector);
1024 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x836, 2, &base_15_0);
1025 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x838, 1, &base_23_16);
1026 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x839, 1, &access);
1027 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x83a, 2, &limit);
1028 BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.u.segment.base = (base_23_16 << 16) | base_15_0;
1029 BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.u.segment.limit = limit;
1030 set_ar_byte(BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache, access);
1032 if ((BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value & 0xfffc) == 0) {
1033 BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.valid = 0;
1035 else {
1036 BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.valid = 1;
1038 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.valid==0 ||
1039 BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.segment==0)
1041 BX_PANIC(("loadall: ES invalid"));
1044 #if 0
1045 BX_INFO(("cs.dpl = %02x", (unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl));
1046 BX_INFO(("ss.dpl = %02x", (unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl));
1047 BX_INFO(("BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].dpl = 0x%02x", (unsigned) BX_CPU_THIS_PTR ds.cache.dpl));
1048 BX_INFO(("BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].dpl = 0x%02x", (unsigned) BX_CPU_THIS_PTR es.cache.dpl));
1049 BX_INFO(("LOADALL: setting cs.selector.rpl to %u",
1050 (unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.rpl));
1051 BX_INFO(("LOADALL: setting ss.selector.rpl to %u",
1052 (unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.rpl));
1053 BX_INFO(("LOADALL: setting ds.selector.rpl to %u",
1054 (unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.rpl));
1055 BX_INFO(("LOADALL: setting es.selector.rpl to %u",
1056 (unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.rpl));
1057 #endif
1059 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x826, 2, &DI);
1060 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x828, 2, &SI);
1061 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x82a, 2, &BP);
1062 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x82c, 2, &SP);
1063 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x82e, 2, &BX);
1064 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x830, 2, &DX);
1065 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x832, 2, &CX);
1066 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x834, 2, &AX);
1068 /* GDTR */
1069 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x84e, 2, &base_15_0);
1070 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x850, 1, &base_23_16);
1071 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x851, 1, &access);
1072 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x852, 2, &limit);
1073 BX_CPU_THIS_PTR gdtr.base = (base_23_16 << 16) | base_15_0;
1074 BX_CPU_THIS_PTR gdtr.limit = limit;
1076 /* IDTR */
1077 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x85a, 2, &base_15_0);
1078 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x85c, 1, &base_23_16);
1079 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x85d, 1, &access);
1080 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, 0x85e, 2, &limit);
1081 BX_CPU_THIS_PTR idtr.base = (base_23_16 << 16) | base_15_0;
1082 BX_CPU_THIS_PTR idtr.limit = limit;
1084 #endif
1086 void BX_CPU_C::handleCpuModeChange(void)
1088 unsigned mode = BX_CPU_THIS_PTR cpu_mode;
1090 #if BX_SUPPORT_X86_64
1091 if (BX_CPU_THIS_PTR efer.get_LMA()) {
1092 if (! BX_CPU_THIS_PTR cr0.get_PE()) {
1093 BX_PANIC(("change_cpu_mode: EFER.LMA is set when CR0.PE=0 !"));
1095 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l) {
1096 BX_CPU_THIS_PTR cpu_mode = BX_MODE_LONG_64;
1098 else {
1099 BX_CPU_THIS_PTR cpu_mode = BX_MODE_LONG_COMPAT;
1100 // clear upper part of RIP/RSP when leaving 64-bit long mode
1101 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RIP);
1102 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSP);
1105 else
1106 #endif
1108 if (BX_CPU_THIS_PTR cr0.get_PE()) {
1109 if (BX_CPU_THIS_PTR get_VM()) {
1110 BX_CPU_THIS_PTR cpu_mode = BX_MODE_IA32_V8086;
1112 else
1113 BX_CPU_THIS_PTR cpu_mode = BX_MODE_IA32_PROTECTED;
1115 else {
1116 BX_CPU_THIS_PTR cpu_mode = BX_MODE_IA32_REAL;
1117 BX_ASSERT(CPL == 0);
1121 if (mode != BX_CPU_THIS_PTR cpu_mode) {
1122 BX_DEBUG(("%s activated", cpu_mode_string(BX_CPU_THIS_PTR cpu_mode)));
1123 #if BX_DEBUGGER
1124 if (BX_CPU_THIS_PTR mode_break) {
1125 BX_CPU_THIS_PTR stop_reason = STOP_MODE_BREAK_POINT;
1126 bx_debug_break(); // trap into debugger
1128 #endif
1132 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1133 void BX_CPU_C::handleAlignmentCheck(void)
1135 if (CPL == 3 && BX_CPU_THIS_PTR cr0.get_AM() && BX_CPU_THIS_PTR get_AC()) {
1136 if (BX_CPU_THIS_PTR alignment_check_mask == 0) {
1137 BX_CPU_THIS_PTR alignment_check_mask = 0xF;
1138 BX_INFO(("Enable alignment check (#AC exception)"));
1139 #if BX_SUPPORT_ICACHE
1140 BX_CPU_THIS_PTR iCache.flushICacheEntries();
1141 #endif
1144 else {
1145 if (BX_CPU_THIS_PTR alignment_check_mask != 0) {
1146 BX_CPU_THIS_PTR alignment_check_mask = 0;
1147 BX_INFO(("Disable alignment check (#AC exception)"));
1148 #if BX_SUPPORT_ICACHE
1149 BX_CPU_THIS_PTR iCache.flushICacheEntries();
1150 #endif
1154 #endif
1156 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SetCR0(Bit32u val_32)
1158 bx_bool pe = val_32 & 0x01;
1159 bx_bool nw = (val_32 >> 29) & 0x01;
1160 bx_bool cd = (val_32 >> 30) & 0x01;
1161 bx_bool pg = (val_32 >> 31) & 0x01;
1163 if (pg && !pe) {
1164 BX_ERROR(("SetCR0: GP(0) when attempt to set CR0.PG with CR0.PE cleared !"));
1165 exception(BX_GP_EXCEPTION, 0, 0);
1168 if (nw && !cd) {
1169 BX_ERROR(("SetCR0: GP(0) when attempt to set CR0.NW with CR0.CD cleared !"));
1170 exception(BX_GP_EXCEPTION, 0, 0);
1173 if (pe && BX_CPU_THIS_PTR get_VM()) BX_PANIC(("EFLAGS.VM=1, enter_PM"));
1175 // from either MOV_CdRd() or debug functions
1176 // protection checks made already or forcing from debug
1177 Bit32u oldCR0 = BX_CPU_THIS_PTR cr0.get32();
1179 #if BX_SUPPORT_X86_64
1180 bx_bool prev_pg = BX_CPU_THIS_PTR cr0.get_PG();
1182 if (prev_pg==0 && pg) {
1183 if (BX_CPU_THIS_PTR efer.get_LME()) {
1184 if (!BX_CPU_THIS_PTR cr4.get_PAE()) {
1185 BX_ERROR(("SetCR0: attempt to enter x86-64 long mode without enabling CR4.PAE !"));
1186 exception(BX_GP_EXCEPTION, 0, 0);
1188 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l) {
1189 BX_ERROR(("SetCR0: attempt to enter x86-64 long mode with CS.L !"));
1190 exception(BX_GP_EXCEPTION, 0, 0);
1192 if (BX_CPU_THIS_PTR tr.cache.type <= 3) {
1193 BX_ERROR(("SetCR0: attempt to enter x86-64 long mode with TSS286 in TR !"));
1194 exception(BX_GP_EXCEPTION, 0, 0);
1196 BX_CPU_THIS_PTR efer.set_LMA(1);
1199 else if (prev_pg==1 && ! pg) {
1200 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
1201 BX_ERROR(("SetCR0: attempt to leave 64 bit mode directly to legacy mode !"));
1202 exception(BX_GP_EXCEPTION, 0, 0);
1204 if (BX_CPU_THIS_PTR efer.get_LMA()) {
1205 if (BX_CPU_THIS_PTR gen_reg[BX_64BIT_REG_RIP].dword.hrx != 0) {
1206 BX_PANIC(("SetCR0: attempt to leave x86-64 LONG mode with RIP upper != 0 !!!"));
1208 BX_CPU_THIS_PTR efer.set_LMA(0);
1211 #endif // #if BX_SUPPORT_X86_64
1213 // handle reserved bits behaviour
1214 #if BX_CPU_LEVEL == 3
1215 val_32 = val_32 | 0x7ffffff0;
1216 #elif BX_CPU_LEVEL == 4
1217 val_32 = (val_32 | 0x00000010) & 0xe005003f;
1218 #elif BX_CPU_LEVEL == 5
1219 val_32 = val_32 | 0x00000010;
1220 #elif BX_CPU_LEVEL == 6
1221 val_32 = (val_32 | 0x00000010) & 0xe005003f;
1222 #else
1223 #error "SetCR0: implement reserved bits behaviour for this CPU_LEVEL"
1224 #endif
1225 BX_CPU_THIS_PTR cr0.set32(val_32);
1227 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1228 handleAlignmentCheck();
1229 #endif
1231 handleCpuModeChange();
1233 // Give the paging unit a chance to look for changes in bits
1234 // it cares about, like {PG,PE}, so it can flush cache entries etc.
1235 pagingCR0Changed(oldCR0, val_32);
1238 #if BX_CPU_LEVEL >= 4
1239 bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::SetCR4(bx_address val)
1241 Bit32u oldCR4 = BX_CPU_THIS_PTR cr4.get32();
1242 bx_address allowMask = 0;
1244 // CR4 bits definitions:
1245 // [31-19] Reserved, Must be Zero
1246 // [18] OSXSAVE: Operating System XSAVE Support R/W
1247 // [17-15] Reserved, Must be Zero
1248 // [14] SMXE: SMX Extensions R/W
1249 // [13] VMXE: VMX Extensions R/W
1250 // [12-11] Reserved, Must be Zero
1251 // [10] OSXMMEXCPT: Operating System Unmasked Exception Support R/W
1252 // [9] OSFXSR: Operating System FXSAVE/FXRSTOR Support R/W
1253 // [8] PCE: Performance-Monitoring Counter Enable R/W
1254 // [7] PGE: Page-Global Enable R/W
1255 // [6] MCE: Machine Check Enable R/W
1256 // [5] PAE: Physical-Address Extension R/W
1257 // [4] PSE: Page Size Extensions R/W
1258 // [3] DE: Debugging Extensions R/W
1259 // [2] TSD: Time Stamp Disable R/W
1260 // [1] PVI: Protected-Mode Virtual Interrupts R/W
1261 // [0] VME: Virtual-8086 Mode Extensions R/W
1263 #if BX_SUPPORT_VME
1264 allowMask |= (1<<0) | (1<<1); /* VME */
1265 #endif
1267 #if BX_CPU_LEVEL >= 5
1268 allowMask |= (1<<2); /* TSD */
1269 #endif
1271 allowMask |= (1<<3); /* DE */
1273 #if BX_SUPPORT_LARGE_PAGES
1274 allowMask |= (1<<4);
1275 #endif
1277 #if BX_SUPPORT_PAE
1278 allowMask |= (1<<5);
1279 #endif
1281 #if BX_CPU_LEVEL >= 5
1282 // NOTE: exception 18 (#MC) never appears in Bochs
1283 allowMask |= (1<<6); /* MCE */
1284 #endif
1286 #if BX_SUPPORT_GLOBAL_PAGES
1287 allowMask |= (1<<7);
1288 #endif
1290 #if BX_CPU_LEVEL >= 6
1291 allowMask |= (1<<8); /* PCE */
1292 allowMask |= (1<<9); /* OSFXSR */
1293 #endif
1295 #if BX_SUPPORT_SSE
1296 allowMask |= (1<<10); /* OSXMMECPT */
1297 #endif
1299 #if BX_SUPPORT_XSAVE
1300 allowMask |= (1<<18); /* OSXSAVE */
1301 #endif
1303 #if BX_SUPPORT_X86_64
1304 // need to GP(0) if LMA=1 and PAE=1->0
1305 if (BX_CPU_THIS_PTR efer.get_LMA()) {
1306 if(!(val & (1<<5)) && BX_CPU_THIS_PTR cr4.get_PAE()) {
1307 BX_ERROR(("SetCR4: attempt to change PAE when EFER.LMA=1"));
1308 return 0;
1311 #endif
1313 // Need to GPF if trying to set undefined bits.
1314 if (val & ~allowMask) {
1315 BX_ERROR(("#GP(0): SetCR4: Write of 0x%08x not supported (allowMask=0x%x)", val, allowMask));
1316 return 0;
1319 BX_CPU_THIS_PTR cr4.set32(val);
1320 pagingCR4Changed(oldCR4, BX_CPU_THIS_PTR cr4.get32());
1321 return 1;
1323 #endif
1325 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDPMC(bxInstruction_c *i)
1327 /* We need to be Pentium with MMX or later */
1328 #if ((BX_CPU_LEVEL >= 6) || (BX_SUPPORT_MMX && BX_CPU_LEVEL == 5))
1329 bx_bool pce = BX_CPU_THIS_PTR cr4.get_PCE();
1331 if ((pce==1) || (CPL==0) || real_mode())
1333 /* According to manual, Pentium 4 has 18 counters,
1334 * previous versions have two. And the P4 also can do
1335 * short read-out (EDX always 0). Otherwise it is
1336 * limited to 40 bits.
1339 #if (BX_CPU_LEVEL == 6 && BX_SUPPORT_SSE >= 2) // Pentium 4 processor (see cpuid.cc)
1340 if ((ECX & 0x7fffffff) >= 18)
1341 exception(BX_GP_EXCEPTION, 0, 0);
1342 #else //
1343 if ((ECX & 0xffffffff) >= 2)
1344 exception(BX_GP_EXCEPTION, 0, 0);
1345 #endif
1346 // Most counters are for hardware specific details, which
1347 // we anyhow do not emulate (like pipeline stalls etc)
1349 // Could be interesting to count number of memory reads,
1350 // writes. Misaligned etc... But to monitor bochs, this
1351 // is easier done from the host.
1353 RAX = 0;
1354 RDX = 0; // if P4 and ECX & 0x10000000, then always 0 (short read 32 bits)
1356 BX_ERROR(("RDPMC: Performance Counters Support not reasonably implemented yet"));
1357 } else {
1358 // not allowed to use RDPMC!
1359 exception(BX_GP_EXCEPTION, 0, 0);
1361 #else
1362 exception(BX_UD_EXCEPTION, 0, 0);
1363 #endif
1366 #if BX_CPU_LEVEL >= 5
1367 BX_CPP_INLINE Bit64u BX_CPU_C::get_TSC(void)
1369 return bx_pc_system.time_ticks() - BX_CPU_THIS_PTR msr.tsc_last_reset;
1372 void BX_CPU_C::set_TSC(Bit64u newval)
1374 // compute the correct setting of tsc_last_reset so that a get_TSC()
1375 // will return newval
1376 BX_CPU_THIS_PTR msr.tsc_last_reset = bx_pc_system.time_ticks() - newval;
1378 // verify
1379 BX_ASSERT (get_TSC() == newval);
1381 #endif
1383 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDTSC(bxInstruction_c *i)
1385 #if BX_CPU_LEVEL >= 5
1386 if (! BX_CPU_THIS_PTR cr4.get_TSD() || CPL==0) {
1387 // return ticks
1388 Bit64u ticks = BX_CPU_THIS_PTR get_TSC();
1389 RAX = (Bit32u) (ticks & 0xffffffff);
1390 RDX = (Bit32u) ((ticks >> 32) & 0xffffffff);
1391 } else {
1392 // not allowed to use RDTSC!
1393 BX_ERROR(("RDTSC: incorrect usage of RDTSC instruction !"));
1394 exception(BX_GP_EXCEPTION, 0, 0);
1396 #else
1397 BX_INFO(("RDTSC: Pentium CPU required, use --enable-cpu=5"));
1398 exception(BX_UD_EXCEPTION, 0, 0);
1399 #endif
1402 #if BX_SUPPORT_X86_64
1403 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDTSCP(bxInstruction_c *i)
1405 RDTSC(i);
1406 RCX = MSR_TSC_AUX;
1408 #endif
1410 #if BX_SUPPORT_MONITOR_MWAIT
1411 bx_bool BX_CPU_C::is_monitor(bx_phy_address begin_addr, unsigned len)
1413 bx_phy_address end_addr = begin_addr + len;
1414 if (begin_addr >= BX_CPU_THIS_PTR monitor.monitor_end || end_addr <= BX_CPU_THIS_PTR monitor.monitor_begin)
1415 return 0;
1416 else
1417 return 1;
1420 void BX_CPU_C::check_monitor(bx_phy_address begin_addr, unsigned len)
1422 if (is_monitor(begin_addr, len)) {
1423 // wakeup from MWAIT state
1424 BX_ASSERT(BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_MWAIT);
1425 BX_CPU_THIS_PTR debug_trap &= ~BX_DEBUG_TRAP_SPECIAL;
1426 // clear monitor
1427 BX_MEM(0)->clear_monitor(BX_CPU_THIS_PTR bx_cpuid);
1428 BX_CPU_THIS_PTR monitor.reset_monitor();
1431 #endif
1433 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MONITOR(bxInstruction_c *i)
1435 #if BX_SUPPORT_MONITOR_MWAIT
1436 // TODO: #UD when CPL > 0 and
1437 // MSR 0xC0010015[MONITOR_MWAIT_USER_UNABLE] = 1
1438 BX_DEBUG(("MONITOR instruction executed EAX = 0x08x", (unsigned) EAX));
1440 if (RCX != 0) {
1441 BX_ERROR(("MONITOR: no optional extensions supported"));
1442 exception(BX_GP_EXCEPTION, 0, 0);
1445 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[i->seg()];
1447 bx_address offset;
1449 #if BX_SUPPORT_X86_64
1450 if (i->as64L()) {
1451 offset = RAX;
1453 else
1454 #endif
1455 if (i->as32L()) {
1456 offset = EAX;
1458 else {
1459 offset = AX;
1462 // set MONITOR
1463 bx_address laddr = BX_CPU_THIS_PTR get_laddr(i->seg(), offset);
1465 #if BX_SUPPORT_X86_64
1466 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
1467 if (! IsCanonical(laddr)) {
1468 BX_ERROR(("MONITOR: non-canonical access !"));
1469 exception(int_number(i->seg()), 0, 0);
1472 else
1473 #endif
1475 // check if we could access the memory segment
1476 if (!(seg->cache.valid & SegAccessROK)) {
1477 if (! read_virtual_checks(seg, offset, 1))
1478 exception(int_number(i->seg()), 0, 0);
1480 else {
1481 if (offset > seg->cache.u.segment.limit_scaled) {
1482 BX_ERROR(("MONITOR: segment limit violation"));
1483 exception(int_number(i->seg()), 0, 0);
1488 bx_phy_address paddr;
1490 if (BX_CPU_THIS_PTR cr0.get_PG()) {
1491 paddr = dtranslate_linear(laddr, CPL, BX_READ);
1492 paddr = A20ADDR(paddr);
1494 else
1496 paddr = A20ADDR(laddr);
1499 BX_CPU_THIS_PTR monitor.monitor_begin = paddr;
1500 BX_CPU_THIS_PTR monitor.monitor_end = paddr + CACHE_LINE_SIZE;
1502 // Set the monitor immediately. If monitor is still armed when we MWAIT,
1503 // the processor will stall.
1504 bx_pc_system.invlpg(BX_CPU_THIS_PTR monitor.monitor_begin);
1505 if ((BX_CPU_THIS_PTR monitor.monitor_end & ~0xfff) != (BX_CPU_THIS_PTR monitor.monitor_begin & ~0xfff))
1506 bx_pc_system.invlpg(BX_CPU_THIS_PTR monitor.monitor_end);
1507 BX_DEBUG(("MONITOR for phys_addr=0x" FMT_PHY_ADDRX, BX_CPU_THIS_PTR monitor.monitor_begin));
1508 BX_MEM(0)->set_monitor(BX_CPU_THIS_PTR bx_cpuid);
1509 #else
1510 BX_INFO(("MONITOR: use --enable-monitor-mwait to enable MONITOR/MWAIT support"));
1511 exception(BX_UD_EXCEPTION, 0, 0);
1512 #endif
1515 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MWAIT(bxInstruction_c *i)
1517 #if BX_SUPPORT_MONITOR_MWAIT
1518 // TODO: #UD when CPL > 0 and
1519 // MSR 0xC0010015[MONITOR_MWAIT_USER_UNABLE] = 1
1520 BX_DEBUG(("MWAIT instruction executed ECX = 0x%08x", ECX));
1522 // only one extension is supported
1523 // ECX[0] - interrupt MWAIT even if EFLAGS.IF = 0
1524 if (RCX & ~(BX_CONST64(1))) {
1525 BX_ERROR(("MWAIT: incorrect optional extensions in RCX"));
1526 exception(BX_GP_EXCEPTION, 0, 0);
1529 // Do not enter optimized state if MONITOR wasn't properly set
1530 if (BX_CPU_THIS_PTR monitor.monitor_begin == BX_CPU_THIS_PTR monitor.monitor_end) {
1531 BX_DEBUG(("MWAIT: incorrect MONITOR settings"));
1532 return;
1535 // If monitor has already triggered, we just return.
1536 if (!BX_CPU_THIS_PTR monitor.armed) {
1537 BX_DEBUG(("MWAIT: the MONITOR was already triggered"));
1538 return;
1541 // stops instruction execution and places the processor in a optimized
1542 // state. Events that cause exit from MWAIT state are:
1543 // A store from another processor to monitored range, any unmasked
1544 // interrupt, including INTR, NMI, SMI, INIT or reset will resume
1545 // the execution. Any far control transfer between MONITOR and MWAIT
1546 // resets the monitoring logic.
1548 // artificial trap bit, why use another variable.
1549 BX_CPU_THIS_PTR debug_trap |= BX_DEBUG_TRAP_MWAIT; // artificial trap
1550 if (ECX & 1)
1551 BX_CPU_THIS_PTR debug_trap |= BX_DEBUG_TRAP_MWAIT_IF;
1552 BX_CPU_THIS_PTR async_event = 1; // so processor knows to check
1553 // Execution of this instruction completes. The processor
1554 // will remain in a optimized state until one of the above
1555 // conditions is met.
1557 BX_INSTR_MWAIT(BX_CPU_ID, BX_CPU_THIS_PTR monitor.monitor_begin, CACHE_LINE_SIZE, ECX);
1559 #if BX_USE_IDLE_HACK
1560 bx_gui->sim_is_idle();
1561 #endif
1563 #else
1564 BX_INFO(("MWAIT: use --enable-monitor-mwait to enable MONITOR/MWAIT support"));
1565 exception(BX_UD_EXCEPTION, 0, 0);
1566 #endif
1569 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSENTER(bxInstruction_c *i)
1571 #if BX_SUPPORT_SEP
1572 if (real_mode()) {
1573 BX_ERROR(("SYSENTER not recognized in real mode !"));
1574 exception(BX_GP_EXCEPTION, 0, 0);
1576 if ((BX_CPU_THIS_PTR msr.sysenter_cs_msr & BX_SELECTOR_RPL_MASK) == 0) {
1577 BX_ERROR(("SYSENTER with zero sysenter_cs_msr !"));
1578 exception(BX_GP_EXCEPTION, 0, 0);
1581 invalidate_prefetch_q();
1583 BX_CPU_THIS_PTR clear_VM(); // do this just like the book says to do
1584 BX_CPU_THIS_PTR clear_IF();
1585 BX_CPU_THIS_PTR clear_RF();
1587 #if BX_SUPPORT_X86_64
1588 if (long_mode()) {
1589 if (!IsCanonical(BX_CPU_THIS_PTR msr.sysenter_eip_msr)) {
1590 BX_ERROR(("SYSENTER with non-canonical SYSENTER_EIP_MSR !"));
1591 exception(BX_GP_EXCEPTION, 0, 0);
1593 if (!IsCanonical(BX_CPU_THIS_PTR msr.sysenter_esp_msr)) {
1594 BX_ERROR(("SYSENTER with non-canonical SYSENTER_ESP_MSR !"));
1595 exception(BX_SS_EXCEPTION, 0, 0);
1598 #endif
1600 parse_selector(BX_CPU_THIS_PTR msr.sysenter_cs_msr & BX_SELECTOR_RPL_MASK,
1601 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1603 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
1604 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1;
1605 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 0;
1606 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */
1607 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
1608 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = 0; // base address
1609 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit = 0xFFFF; // segment limit
1610 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF; // scaled segment limit
1611 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g = 1; // 4k granularity
1612 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0; // available for use by system
1613 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = !long_mode();
1614 #if BX_SUPPORT_X86_64
1615 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l = long_mode();
1616 #endif
1618 updateFetchModeMask();
1620 #if BX_SUPPORT_X86_64
1621 handleCpuModeChange(); // mode change could happen only when in long_mode()
1622 #endif
1624 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1625 BX_CPU_THIS_PTR alignment_check_mask = 0; // CPL=0
1626 #endif
1628 parse_selector((BX_CPU_THIS_PTR msr.sysenter_cs_msr + 8) & BX_SELECTOR_RPL_MASK,
1629 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
1631 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
1632 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = 1;
1633 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 0;
1634 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */
1635 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
1636 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base = 0; // base address
1637 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit = 0xFFFF; // segment limit
1638 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit_scaled = 0xFFFFFFFF; // scaled segment limit
1639 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.g = 1; // 4k granularity
1640 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b = 1; // 32-bit mode
1641 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.avl = 0; // available for use by system
1642 #if BX_SUPPORT_X86_64
1643 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.l = 0;
1644 #endif
1646 #if BX_SUPPORT_X86_64
1647 if (long_mode()) {
1648 RSP = BX_CPU_THIS_PTR msr.sysenter_esp_msr;
1649 RIP = BX_CPU_THIS_PTR msr.sysenter_eip_msr;
1651 else
1652 #endif
1654 ESP = (Bit32u) BX_CPU_THIS_PTR msr.sysenter_esp_msr;
1655 EIP = (Bit32u) BX_CPU_THIS_PTR msr.sysenter_eip_msr;
1658 BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_SYSENTER,
1659 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
1660 #else
1661 BX_INFO(("SYSENTER: use --enable-sep to enable SYSENTER/SYSEXIT support"));
1662 exception(BX_UD_EXCEPTION, 0, 0);
1663 #endif
1666 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSEXIT(bxInstruction_c *i)
1668 #if BX_SUPPORT_SEP
1669 if (real_mode() || CPL != 0) {
1670 BX_ERROR(("SYSEXIT from real mode or with CPL<>0 !"));
1671 exception(BX_GP_EXCEPTION, 0, 0);
1673 if ((BX_CPU_THIS_PTR msr.sysenter_cs_msr & BX_SELECTOR_RPL_MASK) == 0) {
1674 BX_ERROR(("SYSEXIT with zero sysenter_cs_msr !"));
1675 exception(BX_GP_EXCEPTION, 0, 0);
1678 #if BX_SUPPORT_X86_64
1679 if (i->os64L()) {
1680 if (!IsCanonical(RDX)) {
1681 BX_ERROR(("SYSEXIT with non-canonical RDX (RIP) pointer !"));
1682 exception(BX_GP_EXCEPTION, 0, 0);
1684 if (!IsCanonical(RCX)) {
1685 BX_ERROR(("SYSEXIT with non-canonical RCX (RSP) pointer !"));
1686 exception(BX_SS_EXCEPTION, 0, 0);
1689 #endif
1691 invalidate_prefetch_q();
1693 #if BX_SUPPORT_X86_64
1694 if (i->os64L()) {
1695 parse_selector(((BX_CPU_THIS_PTR msr.sysenter_cs_msr + 32) & BX_SELECTOR_RPL_MASK) | 3,
1696 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1698 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
1699 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1;
1700 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 3;
1701 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */
1702 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
1703 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = 0; // base address
1704 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit = 0xFFFF; // segment limit
1705 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF; // scaled segment limit
1706 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g = 1; // 4k granularity
1707 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0; // available for use by system
1708 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = 0;
1709 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l = 1;
1711 RSP = RCX;
1712 RIP = RDX;
1714 else
1715 #endif
1717 parse_selector(((BX_CPU_THIS_PTR msr.sysenter_cs_msr + 16) & BX_SELECTOR_RPL_MASK) | 3,
1718 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1720 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
1721 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1;
1722 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 3;
1723 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */
1724 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
1725 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = 0; // base address
1726 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit = 0xFFFF; // segment limit
1727 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF; // scaled segment limit
1728 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g = 1; // 4k granularity
1729 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0; // available for use by system
1730 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = 1;
1731 #if BX_SUPPORT_X86_64
1732 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l = 0;
1733 #endif
1735 ESP = ECX;
1736 EIP = EDX;
1739 updateFetchModeMask();
1741 #if BX_SUPPORT_X86_64
1742 handleCpuModeChange(); // mode change could happen only when in long_mode()
1743 #endif
1745 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1746 handleAlignmentCheck(); // CPL was modified
1747 #endif
1749 parse_selector(((BX_CPU_THIS_PTR msr.sysenter_cs_msr + (i->os64L() ? 40:24)) & BX_SELECTOR_RPL_MASK) | 3,
1750 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
1752 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
1753 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = 1;
1754 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 3;
1755 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */
1756 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
1757 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base = 0; // base address
1758 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit = 0xFFFF; // segment limit
1759 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit_scaled = 0xFFFFFFFF; // scaled segment limit
1760 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.g = 1; // 4k granularity
1761 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b = 1; // 32-bit mode
1762 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.avl = 0; // available for use by system
1763 #if BX_SUPPORT_X86_64
1764 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.l = 0;
1765 #endif
1767 BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_SYSEXIT,
1768 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
1769 #else
1770 BX_INFO(("SYSEXIT: use --enable-sep to enable SYSENTER/SYSEXIT support"));
1771 exception(BX_UD_EXCEPTION, 0, 0);
1772 #endif
1775 #if BX_SUPPORT_X86_64
1776 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSCALL(bxInstruction_c *i)
1778 bx_address temp_RIP;
1780 BX_DEBUG(("Execute SYSCALL instruction"));
1782 if (!BX_CPU_THIS_PTR efer.get_SCE()) {
1783 exception(BX_UD_EXCEPTION, 0, 0);
1786 invalidate_prefetch_q();
1788 if (long_mode())
1790 RCX = RIP;
1791 R11 = read_eflags() & ~(EFlagsRFMask);
1793 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
1794 temp_RIP = MSR_LSTAR;
1796 else {
1797 temp_RIP = MSR_CSTAR;
1800 // set up CS segment, flat, 64-bit DPL=0
1801 parse_selector((MSR_STAR >> 32) & BX_SELECTOR_RPL_MASK,
1802 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1804 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
1805 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1;
1806 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 0;
1807 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */
1808 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
1809 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = 0; /* base address */
1810 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit = 0xFFFF; /* segment limit */
1811 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF; /* scaled segment limit */
1812 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g = 1; /* 4k granularity */
1813 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = 0;
1814 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l = 1; /* 64-bit code */
1815 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0; /* available for use by system */
1817 updateFetchModeMask();
1818 handleCpuModeChange(); // mode change could only happen when in long_mode()
1820 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1821 BX_CPU_THIS_PTR alignment_check_mask = 0; // CPL=0
1822 #endif
1824 // set up SS segment, flat, 64-bit DPL=0
1825 parse_selector(((MSR_STAR >> 32) + 8) & BX_SELECTOR_RPL_MASK,
1826 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
1828 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
1829 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = 1;
1830 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 0;
1831 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */
1832 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
1833 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base = 0; /* base address */
1834 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit = 0xFFFF; /* segment limit */
1835 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit_scaled = 0xFFFFFFFF; /* scaled segment limit */
1836 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.g = 1; /* 4k granularity */
1837 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b = 1; /* 32 bit stack */
1838 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.l = 0;
1839 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.avl = 0; /* available for use by system */
1841 writeEFlags(read_eflags() & (~MSR_FMASK), EFlagsValidMask);
1842 BX_CPU_THIS_PTR clear_RF();
1843 RIP = temp_RIP;
1845 else {
1846 // legacy mode
1848 ECX = EIP;
1849 temp_RIP = MSR_STAR & 0xFFFFFFFF;
1851 // set up CS segment, flat, 32-bit DPL=0
1852 parse_selector((MSR_STAR >> 32) & BX_SELECTOR_RPL_MASK,
1853 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1855 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
1856 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1;
1857 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 0;
1858 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */
1859 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
1860 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = 0; /* base address */
1861 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit = 0xFFFF; /* segment limit */
1862 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF; /* scaled segment limit */
1863 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g = 1; /* 4k granularity */
1864 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = 1;
1865 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l = 0; /* 32-bit code */
1866 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0; /* available for use by system */
1868 updateFetchModeMask();
1870 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1871 BX_CPU_THIS_PTR alignment_check_mask = 0; // CPL=0
1872 #endif
1874 // set up SS segment, flat, 32-bit DPL=0
1875 parse_selector(((MSR_STAR >> 32) + 8) & BX_SELECTOR_RPL_MASK,
1876 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
1878 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
1879 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = 1;
1880 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 0;
1881 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */
1882 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
1883 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base = 0; /* base address */
1884 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit = 0xFFFF; /* segment limit */
1885 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit_scaled = 0xFFFFFFFF; /* scaled segment limit */
1886 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.g = 1; /* 4k granularity */
1887 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b = 1; /* 32 bit stack */
1888 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.l = 0;
1889 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.avl = 0; /* available for use by system */
1891 BX_CPU_THIS_PTR clear_VM();
1892 BX_CPU_THIS_PTR clear_IF();
1893 BX_CPU_THIS_PTR clear_RF();
1894 RIP = temp_RIP;
1897 BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_SYSCALL,
1898 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
1901 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSRET(bxInstruction_c *i)
1903 bx_address temp_RIP;
1905 BX_DEBUG(("Execute SYSRET instruction"));
1907 if (!BX_CPU_THIS_PTR efer.get_SCE()) {
1908 exception(BX_UD_EXCEPTION, 0, 0);
1911 if(!protected_mode() || CPL != 0) {
1912 BX_ERROR(("SYSRET: priveledge check failed, generate #GP(0)"));
1913 exception(BX_GP_EXCEPTION, 0, 0);
1916 #if BX_SUPPORT_X86_64
1917 if (!IsCanonical(RCX)) {
1918 BX_ERROR(("SYSRET: canonical failure for RCX (RIP)"));
1919 exception(BX_GP_EXCEPTION, 0, 0);
1921 #endif
1923 invalidate_prefetch_q();
1925 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64)
1927 if (i->os64L()) {
1928 // Return to 64-bit mode, set up CS segment, flat, 64-bit DPL=3
1929 parse_selector((((MSR_STAR >> 48) + 16) & BX_SELECTOR_RPL_MASK) | 3,
1930 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1932 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
1933 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1;
1934 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 3;
1935 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */
1936 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
1937 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = 0; /* base address */
1938 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit = 0xFFFF; /* segment limit */
1939 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF; /* scaled segment limit */
1940 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g = 1; /* 4k granularity */
1941 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = 0;
1942 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l = 1; /* 64-bit code */
1943 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0; /* available for use by system */
1945 temp_RIP = RCX;
1947 else {
1948 // Return to 32-bit compatibility mode, set up CS segment, flat, 32-bit DPL=3
1949 parse_selector((MSR_STAR >> 48) | 3,
1950 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1952 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
1953 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1;
1954 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 3;
1955 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */
1956 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
1957 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = 0; /* base address */
1958 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit = 0xFFFF; /* segment limit */
1959 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF; /* scaled segment limit */
1960 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g = 1; /* 4k granularity */
1961 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = 1;
1962 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l = 0; /* 32-bit code */
1963 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0; /* available for use by system */
1965 temp_RIP = ECX;
1968 updateFetchModeMask();
1969 handleCpuModeChange(); // mode change could only happen when in long64 mode
1971 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1972 handleAlignmentCheck(); // CPL was modified
1973 #endif
1975 // SS base, limit, attributes unchanged
1976 parse_selector((Bit16u)((MSR_STAR >> 48) + 8),
1977 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
1979 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
1980 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = 1;
1981 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 3;
1982 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */
1983 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
1985 writeEFlags((Bit32u) R11, EFlagsValidMask);
1987 else { // (!64BIT_MODE)
1988 // Return to 32-bit legacy mode, set up CS segment, flat, 32-bit DPL=3
1989 parse_selector((MSR_STAR >> 48) | 3,
1990 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1992 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
1993 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1;
1994 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 3;
1995 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */
1996 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
1997 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = 0; /* base address */
1998 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit = 0xFFFF; /* segment limit */
1999 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF; /* scaled segment limit */
2000 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g = 1; /* 4k granularity */
2001 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = 1;
2002 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l = 0; /* 32-bit code */
2003 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0; /* available for use by system */
2005 updateFetchModeMask();
2007 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
2008 handleAlignmentCheck(); // CPL was modified
2009 #endif
2011 // SS base, limit, attributes unchanged
2012 parse_selector((Bit16u)((MSR_STAR >> 48) + 8),
2013 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
2015 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
2016 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = 1;
2017 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 3;
2018 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */
2019 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
2021 BX_CPU_THIS_PTR assert_IF();
2022 temp_RIP = ECX;
2025 handleCpuModeChange();
2027 RIP = temp_RIP;
2029 BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_SYSRET,
2030 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
2033 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SWAPGS(bxInstruction_c *i)
2035 Bit64u temp_GS_base;
2037 BX_ASSERT(protected_mode());
2039 if(CPL != 0)
2040 exception(BX_GP_EXCEPTION, 0, 0);
2042 temp_GS_base = MSR_GSBASE;
2043 MSR_GSBASE = MSR_KERNELGSBASE;
2044 MSR_KERNELGSBASE = temp_GS_base;
2046 #endif
2048 #if BX_X86_DEBUGGER
2049 bx_bool BX_CPU_C::hwbreakpoint_check(bx_address laddr)
2051 laddr = LPFOf(laddr);
2053 for (int i=0;i<4;i++) {
2054 if (laddr == LPFOf(BX_CPU_THIS_PTR dr[i]))
2055 return 1;
2058 return 0;
2061 void BX_CPU_C::hwbreakpoint_match(bx_address laddr, unsigned len, unsigned rw)
2063 if (BX_CPU_THIS_PTR dr7 & 0x000000ff) {
2064 // Only compare debug registers if any breakpoints are enabled
2065 unsigned opa, opb, write = rw & 1;
2066 opa = BX_HWDebugMemRW; // Read or Write always compares vs 11b
2067 if (! write) // only compares vs 11b
2068 opb = opa;
2069 else // BX_WRITE or BX_RW; also compare vs 01b
2070 opb = BX_HWDebugMemW;
2071 Bit32u dr6_bits = hwdebug_compare(laddr, len, opa, opb);
2072 if (dr6_bits) {
2073 BX_CPU_THIS_PTR debug_trap |= dr6_bits;
2074 BX_CPU_THIS_PTR async_event = 1;
2079 Bit32u BX_CPU_C::hwdebug_compare(bx_address laddr_0, unsigned size,
2080 unsigned opa, unsigned opb)
2082 // Support x86 hardware debug facilities (DR0..DR7)
2083 Bit32u dr7 = BX_CPU_THIS_PTR dr7;
2085 static bx_address alignment_mask[4] =
2086 // 00b=1 01b=2 10b=undef(8) 11b=4
2087 { 0x0, 0x1, 0x7, 0x3 };
2089 bx_address laddr_n = laddr_0 + (size - 1);
2090 bx_address dr[4], dr_n[4];
2091 Bit32u dr_op[4], len[4];
2092 bx_bool ibpoint_found_n[4], ibpoint_found = 0;
2094 len[0] = (dr7>>18) & 3;
2095 len[1] = (dr7>>22) & 3;
2096 len[2] = (dr7>>26) & 3;
2097 len[3] = (dr7>>30) & 3;
2099 dr_op[0] = (dr7>>16) & 3;
2100 dr_op[1] = (dr7>>20) & 3;
2101 dr_op[2] = (dr7>>24) & 3;
2102 dr_op[3] = (dr7>>28) & 3;
2104 for (unsigned n=0;n<4;n++) {
2105 dr[n] = BX_CPU_THIS_PTR dr[n] & ~alignment_mask[len[n]];
2106 dr_n[n] = dr[n] + alignment_mask[len[n]];
2107 ibpoint_found_n[n] = 0;
2109 // See if this instruction address matches any breakpoints
2110 if (dr7 & (3 << n*2)) {
2111 if ((dr_op[n]==opa || dr_op[n]==opb) &&
2112 (laddr_0 <= dr_n[n]) &&
2113 (laddr_n >= dr[n])) {
2114 ibpoint_found_n[n] = 1;
2115 ibpoint_found = 1;
2120 // If *any* enabled breakpoints matched, then we need to
2121 // set status bits for *all* breakpoints, even disabled ones,
2122 // as long as they meet the other breakpoint criteria.
2123 // dr6_mask is the return value. These bits represent the bits
2124 // to be OR'd into DR6 as a result of the debug event.
2125 Bit32u dr6_mask = 0;
2127 if (ibpoint_found) {
2128 if (ibpoint_found_n[0]) dr6_mask |= 0x1;
2129 if (ibpoint_found_n[1]) dr6_mask |= 0x2;
2130 if (ibpoint_found_n[2]) dr6_mask |= 0x4;
2131 if (ibpoint_found_n[3]) dr6_mask |= 0x8;
2134 return dr6_mask;
2137 void BX_CPU_C::iobreakpoint_match(unsigned port, unsigned len)
2139 // Only compare debug registers if any breakpoints are enabled
2140 if (BX_CPU_THIS_PTR cr4.get_DE() && (BX_CPU_THIS_PTR dr7 & 0x000000ff))
2142 Bit32u dr_op[4], dr_len[4];
2143 Bit32u dr7 = BX_CPU_THIS_PTR dr7;
2145 dr_len[0] = 1 + (dr7>>18) & 3;
2146 dr_len[1] = 1 + (dr7>>22) & 3;
2147 dr_len[2] = 1 + (dr7>>26) & 3;
2148 dr_len[3] = 1 + (dr7>>30) & 3;
2150 dr_op[0] = (dr7>>16) & 3;
2151 dr_op[1] = (dr7>>20) & 3;
2152 dr_op[2] = (dr7>>24) & 3;
2153 dr_op[3] = (dr7>>28) & 3;
2155 for (unsigned n=0;n<4;n++) {
2156 if (dr_op[n] == 2 && dr_len[n] == len && BX_CPU_THIS_PTR dr[n] == port) {
2157 BX_CPU_THIS_PTR debug_trap |= (1<<n);
2158 BX_CPU_THIS_PTR async_event = 1;
2163 #endif