1 /////////////////////////////////////////////////////////////////////////
2 // $Id: smm.cc,v 1.45 2008/09/08 20:47:33 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (c) 2006 Stanislav Shwartsman
6 // Written by Stanislav Shwartsman [sshwarts at sourceforge net]
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2 of the License, or (at your option) any later version.
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 /////////////////////////////////////////////////////////////////////////
23 #define NEED_CPU_REG_SHORTCUTS 1
27 #define LOG_THIS BX_CPU_THIS_PTR
31 #if BX_SUPPORT_X86_64==0
36 // Some of the CPU field must be saved and restored in order to continue the
37 // simulation correctly after the RSM instruction:
39 // ---------------------------------------------------------------
41 // 1. General purpose registers: EAX-EDI, R8-R15
43 // 3. Segment registers CS, DS, SS, ES, FS, GS
44 // fields: valid - not required, initialized according to selector value
45 // p - must be saved/restored
46 // dpl - must be saved/restored
47 // segment - must be 1 for seg registers, not required to save
48 // type - must be saved/restored
49 // base - must be saved/restored
50 // limit - must be saved/restored
51 // g - must be saved/restored
52 // d_b - must be saved/restored
53 // l - must be saved/restored
54 // avl - must be saved/restored
56 // fields: base, limit
58 // fields: base, limit, anything else ?
59 // 6. Debug Registers DR0-DR7, only DR6 and DR7 are saved
60 // 7. Control Registers: CR0, CR1 is always 0, CR2 is NOT saved, CR3, CR4, EFER
62 // 9. MSR/FPU/XMM/APIC are NOT saved accoring to Intel docs
65 #define SMM_SAVE_STATE_MAP_SIZE 128
67 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RSM(bxInstruction_c
*i
)
69 /* If we are not in System Management Mode, then #UD should be generated */
70 if (! BX_CPU_THIS_PTR
smm_mode()) {
71 BX_INFO(("RSM not in System Management Mode !"));
72 exception(BX_UD_EXCEPTION
, 0, 0);
75 invalidate_prefetch_q();
77 BX_INFO(("RSM: Resuming from System Management Mode"));
79 BX_CPU_THIS_PTR nmi_disable
= 0;
81 Bit32u saved_state
[SMM_SAVE_STATE_MAP_SIZE
], n
;
82 // reset reserved bits
83 for(n
=0;n
<SMM_SAVE_STATE_MAP_SIZE
;n
++) saved_state
[n
] = 0;
85 bx_phy_address base
= BX_CPU_THIS_PTR smbase
+ 0x10000;
86 // could be optimized with reading of only non-reserved bytes
87 for(n
=0;n
<SMM_SAVE_STATE_MAP_SIZE
;n
++) {
89 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS
, base
, 4, &saved_state
[n
]);
90 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
, base
, 4, BX_READ
, (Bit8u
*)(&saved_state
[n
]));
92 BX_CPU_THIS_PTR in_smm
= 0;
94 // restore the CPU state from SMRAM
95 if (! smram_restore_state(saved_state
)) {
96 BX_PANIC(("RSM: Incorrect state when restoring CPU state - shutdown !"));
103 void BX_CPU_C::enter_system_management_mode(void)
105 invalidate_prefetch_q();
107 BX_INFO(("Enter to System Management Mode"));
109 // debug(BX_CPU_THIS_PTR prev_rip);
111 BX_CPU_THIS_PTR in_smm
= 1;
113 Bit32u saved_state
[SMM_SAVE_STATE_MAP_SIZE
], n
;
114 // reset reserved bits
115 for(n
=0;n
<SMM_SAVE_STATE_MAP_SIZE
;n
++) saved_state
[n
] = 0;
116 // prepare CPU state to be saved in the SMRAM
117 BX_CPU_THIS_PTR
smram_save_state(saved_state
);
119 bx_phy_address base
= BX_CPU_THIS_PTR smbase
+ 0x10000;
120 // could be optimized with reading of only non-reserved bytes
121 for(n
=0;n
<SMM_SAVE_STATE_MAP_SIZE
;n
++) {
123 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
, base
, 4, &saved_state
[n
]);
124 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
, base
, 4, BX_WRITE
, (Bit8u
*)(&saved_state
[n
]));
127 BX_CPU_THIS_PTR
setEFlags(0x2); // Bit1 is always set
128 BX_CPU_THIS_PTR prev_rip
= RIP
= 0x00008000;
129 BX_CPU_THIS_PTR dr7
= 0x00000400;
131 // CR0 - PE, EM, TS, and PG flags set to 0; others unmodified
132 BX_CPU_THIS_PTR cr0
.set_PE(0); // real mode (bit 0)
133 BX_CPU_THIS_PTR cr0
.set_EM(0); // emulate math coprocessor (bit 2)
134 BX_CPU_THIS_PTR cr0
.set_TS(0); // no task switch (bit 3)
135 BX_CPU_THIS_PTR cr0
.set_PG(0); // paging disabled (bit 31)
137 // paging mode was changed - flush TLB
138 TLB_flush(); // Flush Global entries also
140 #if BX_CPU_LEVEL >= 4
141 BX_CPU_THIS_PTR cr4
.setRegister(0);
144 #if BX_SUPPORT_X86_64
145 BX_CPU_THIS_PTR efer
.setRegister(0);
148 parse_selector(BX_CPU_THIS_PTR smbase
>> 4,
149 &BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
);
151 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.valid
= SegValidCache
| SegAccessROK
| SegAccessWOK
;
152 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.p
= 1;
153 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.dpl
= 0;
154 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.segment
= 1; /* data/code segment */
155 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.type
= BX_DATA_READ_WRITE_ACCESSED
;
157 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.base
= BX_CPU_THIS_PTR smbase
;
158 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit
= 0xffff;
159 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
= 0xffffffff;
160 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.avl
= 0;
161 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.g
= 1; /* page granular */
162 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.d_b
= 0; /* 16bit default size */
163 #if BX_SUPPORT_X86_64
164 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.l
= 0; /* 16bit default size */
167 updateFetchModeMask();
168 handleCpuModeChange();
170 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
171 handleAlignmentCheck();
174 /* DS (Data Segment) and descriptor cache */
175 parse_selector(0x0000,
176 &BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].selector
);
178 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.valid
= SegValidCache
| SegAccessROK
| SegAccessWOK
;
179 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.p
= 1;
180 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.dpl
= 0;
181 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.segment
= 1; /* data/code segment */
182 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.type
= BX_DATA_READ_WRITE_ACCESSED
;
184 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.u
.segment
.base
= 0x00000000;
185 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.u
.segment
.limit
= 0xffff;
186 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.u
.segment
.limit_scaled
= 0xffffffff;
187 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.u
.segment
.avl
= 0;
188 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.u
.segment
.g
= 1; /* byte granular */
189 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.u
.segment
.d_b
= 0; /* 16bit default size */
190 #if BX_SUPPORT_X86_64
191 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.u
.segment
.l
= 0; /* 16bit default size */
194 // use DS segment as template for the others
195 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
] = BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
];
196 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
] = BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
];
197 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
] = BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
];
198 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
] = BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
];
201 #define SMRAM_TRANSLATE(addr) (((0x8000 - (addr)) >> 2) - 1)
202 #define SMRAM_FIELD(state, addr) (state[SMRAM_TRANSLATE(addr)])
204 #if BX_SUPPORT_X86_64
206 BX_CPP_INLINE Bit64u
SMRAM_FIELD64(const Bit32u
*saved_state
, unsigned hi
, unsigned lo
)
208 Bit64u tmp
= ((Bit64u
) SMRAM_FIELD(saved_state
, hi
)) << 32;
209 tmp
|= (Bit64u
) SMRAM_FIELD(saved_state
, lo
);
213 void BX_CPU_C::smram_save_state(Bit32u
*saved_state
)
215 // --- General Purpose Registers --- //
216 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RAX_HI32
) = (Bit32u
)(RAX
>> 32);
217 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RAX_LO32
) = EAX
;
218 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RCX_HI32
) = (Bit32u
)(RCX
>> 32);
219 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RCX_LO32
) = ECX
;
220 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RDX_HI32
) = (Bit32u
)(RDX
>> 32);
221 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RDX_LO32
) = EDX
;
222 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RBX_HI32
) = (Bit32u
)(RBX
>> 32);
223 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RBX_LO32
) = EBX
;
224 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RSP_HI32
) = (Bit32u
)(RSP
>> 32);
225 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RSP_LO32
) = ESP
;
226 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RBP_HI32
) = (Bit32u
)(RBP
>> 32);
227 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RBP_LO32
) = EBP
;
228 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RSI_HI32
) = (Bit32u
)(RSI
>> 32);
229 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RSI_LO32
) = ESI
;
230 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RDI_HI32
) = (Bit32u
)(RDI
>> 32);
231 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RDI_LO32
) = EDI
;
232 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R8_HI32
) = (Bit32u
)(R8
>> 32);
233 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R8_LO32
) = (Bit32u
)(R8
& 0xffffffff);
234 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R9_HI32
) = (Bit32u
)(R9
>> 32);
235 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R9_LO32
) = (Bit32u
)(R9
& 0xffffffff);
236 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R10_HI32
) = (Bit32u
)(R10
>> 32);
237 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R10_LO32
) = (Bit32u
)(R10
& 0xffffffff);
238 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R11_HI32
) = (Bit32u
)(R11
>> 32);
239 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R11_LO32
) = (Bit32u
)(R11
& 0xffffffff);
240 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R12_HI32
) = (Bit32u
)(R12
>> 32);
241 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R12_LO32
) = (Bit32u
)(R12
& 0xffffffff);
242 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R13_HI32
) = (Bit32u
)(R13
>> 32);
243 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R13_LO32
) = (Bit32u
)(R13
& 0xffffffff);
244 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R14_HI32
) = (Bit32u
)(R14
>> 32);
245 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R14_LO32
) = (Bit32u
)(R14
& 0xffffffff);
246 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R15_HI32
) = (Bit32u
)(R15
>> 32);
247 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_R15_LO32
) = (Bit32u
)(R15
& 0xffffffff);
248 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RIP_HI32
) = (Bit32u
)(RIP
>> 32);
249 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RIP_LO32
) = EIP
;
250 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RFLAGS32
) = read_eflags();
252 // --- Debug and Control Registers --- //
253 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_DR6
) = BX_CPU_THIS_PTR dr6
;
254 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_DR7
) = BX_CPU_THIS_PTR dr7
;
255 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_CR0
) = BX_CPU_THIS_PTR cr0
.getRegister();
256 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_CR3
) = BX_CPU_THIS_PTR cr3
;
257 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_CR4
) = BX_CPU_THIS_PTR cr4
.getRegister();
258 /* base+0x7f44 to base+0x7f04 is reserved */
259 SMRAM_FIELD(saved_state
, SMRAM_SMBASE_OFFSET
) = BX_CPU_THIS_PTR smbase
;
260 SMRAM_FIELD(saved_state
, SMRAM_SMM_REVISION_ID
) = SMM_REVISION_ID
;
261 /* base+0x7ef8 to base+0x7ed8 is reserved */
262 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EFER
) = BX_CPU_THIS_PTR efer
.getRegister();
263 /* base+0x7ecc is reserved */
264 /* base+0x7ec8 is I/O Instruction Restart, Auto-Halt Restart and NMI Mask */
265 /* base+0x7ec4 is reserved */
266 /* base+0x7ec0 is SMM I/O Trap */
267 /* base+0x7ebc to base+0x7ea0 is reserved */
269 // --- Task Register --- //
270 SMRAM_FIELD(saved_state
, SMRAM_TR_BASE_HI32
) = (Bit32u
)(BX_CPU_THIS_PTR tr
.cache
.u
.system
.base
>> 32);
271 SMRAM_FIELD(saved_state
, SMRAM_TR_BASE_LO32
) = (Bit32u
)(BX_CPU_THIS_PTR tr
.cache
.u
.system
.base
& 0xffffffff);
272 SMRAM_FIELD(saved_state
, SMRAM_TR_LIMIT
) = BX_CPU_THIS_PTR tr
.cache
.u
.system
.limit
;
273 SMRAM_FIELD(saved_state
, SMRAM_TR_SELECTOR_AR
) = BX_CPU_THIS_PTR tr
.selector
.value
|
274 (((Bit32u
) get_segment_ar_data(&BX_CPU_THIS_PTR tr
.cache
)) << 16);
277 SMRAM_FIELD(saved_state
, SMRAM_IDTR_BASE_HI32
) = (Bit32u
)(BX_CPU_THIS_PTR idtr
.base
>> 32);
278 SMRAM_FIELD(saved_state
, SMRAM_IDTR_BASE_LO32
) = (Bit32u
)(BX_CPU_THIS_PTR idtr
.base
& 0xffffffff);
279 SMRAM_FIELD(saved_state
, SMRAM_IDTR_LIMIT
) = BX_CPU_THIS_PTR idtr
.limit
;
281 SMRAM_FIELD(saved_state
, SMRAM_LDTR_BASE_HI32
) = (Bit32u
)(BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.base
>> 32);
282 SMRAM_FIELD(saved_state
, SMRAM_LDTR_BASE_LO32
) = (Bit32u
)(BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.base
& 0xffffffff);
283 SMRAM_FIELD(saved_state
, SMRAM_LDTR_LIMIT
) = BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit
;
284 SMRAM_FIELD(saved_state
, SMRAM_LDTR_SELECTOR_AR
) = BX_CPU_THIS_PTR ldtr
.selector
.value
|
285 (((Bit32u
) get_segment_ar_data(&BX_CPU_THIS_PTR ldtr
.cache
)) << 16);
287 SMRAM_FIELD(saved_state
, SMRAM_GDTR_BASE_HI32
) = (Bit32u
)(BX_CPU_THIS_PTR gdtr
.base
>> 32);
288 SMRAM_FIELD(saved_state
, SMRAM_GDTR_BASE_LO32
) = (Bit32u
)(BX_CPU_THIS_PTR gdtr
.base
& 0xffffffff);
289 SMRAM_FIELD(saved_state
, SMRAM_GDTR_LIMIT
) = BX_CPU_THIS_PTR gdtr
.limit
;
290 // --- GS selector --- //
291 bx_segment_reg_t
*seg
= &(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
]);
292 SMRAM_FIELD(saved_state
, SMRAM_GS_BASE_HI32
) = (Bit32u
)(seg
->cache
.u
.segment
.base
>> 32);
293 SMRAM_FIELD(saved_state
, SMRAM_GS_BASE_LO32
) = (Bit32u
)(seg
->cache
.u
.segment
.base
& 0xffffffff);
294 SMRAM_FIELD(saved_state
, SMRAM_GS_LIMIT
) = seg
->cache
.u
.segment
.limit
;
295 SMRAM_FIELD(saved_state
, SMRAM_GS_SELECTOR_AR
) = seg
->selector
.value
|
296 (((Bit32u
) get_segment_ar_data(&seg
->cache
)) << 16);
297 // --- FS selector --- //
298 seg
= &(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
]);
299 SMRAM_FIELD(saved_state
, SMRAM_FS_BASE_HI32
) = (Bit32u
)(seg
->cache
.u
.segment
.base
>> 32);
300 SMRAM_FIELD(saved_state
, SMRAM_FS_BASE_LO32
) = (Bit32u
)(seg
->cache
.u
.segment
.base
& 0xffffffff);
301 SMRAM_FIELD(saved_state
, SMRAM_FS_LIMIT
) = seg
->cache
.u
.segment
.limit
;
302 SMRAM_FIELD(saved_state
, SMRAM_FS_SELECTOR_AR
) = seg
->selector
.value
|
303 (((Bit32u
) get_segment_ar_data(&seg
->cache
)) << 16);
304 // --- DS selector --- //
305 seg
= &(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
]);
306 SMRAM_FIELD(saved_state
, SMRAM_DS_BASE_HI32
) = (Bit32u
)(seg
->cache
.u
.segment
.base
>> 32);
307 SMRAM_FIELD(saved_state
, SMRAM_DS_BASE_LO32
) = (Bit32u
)(seg
->cache
.u
.segment
.base
& 0xffffffff);
308 SMRAM_FIELD(saved_state
, SMRAM_DS_LIMIT
) = seg
->cache
.u
.segment
.limit
;
309 SMRAM_FIELD(saved_state
, SMRAM_DS_SELECTOR_AR
) = seg
->selector
.value
|
310 (((Bit32u
) get_segment_ar_data(&seg
->cache
)) << 16);
311 // --- SS selector --- //
312 seg
= &(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
]);
313 SMRAM_FIELD(saved_state
, SMRAM_SS_BASE_HI32
) = (Bit32u
)(seg
->cache
.u
.segment
.base
>> 32);
314 SMRAM_FIELD(saved_state
, SMRAM_SS_BASE_LO32
) = (Bit32u
)(seg
->cache
.u
.segment
.base
& 0xffffffff);
315 SMRAM_FIELD(saved_state
, SMRAM_SS_LIMIT
) = seg
->cache
.u
.segment
.limit
;
316 SMRAM_FIELD(saved_state
, SMRAM_SS_SELECTOR_AR
) = seg
->selector
.value
|
317 (((Bit32u
) get_segment_ar_data(&seg
->cache
)) << 16);
318 // --- CS selector --- //
319 seg
= &(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
]);
320 SMRAM_FIELD(saved_state
, SMRAM_CS_BASE_HI32
) = (Bit32u
)(seg
->cache
.u
.segment
.base
>> 32);
321 SMRAM_FIELD(saved_state
, SMRAM_CS_BASE_LO32
) = (Bit32u
)(seg
->cache
.u
.segment
.base
& 0xffffffff);
322 SMRAM_FIELD(saved_state
, SMRAM_CS_LIMIT
) = seg
->cache
.u
.segment
.limit
;
323 SMRAM_FIELD(saved_state
, SMRAM_CS_SELECTOR_AR
) = seg
->selector
.value
|
324 (((Bit32u
) get_segment_ar_data(&seg
->cache
)) << 16);
325 // --- ES selector --- //
326 seg
= &(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
]);
327 SMRAM_FIELD(saved_state
, SMRAM_ES_BASE_HI32
) = (Bit32u
)(seg
->cache
.u
.segment
.base
>> 32);
328 SMRAM_FIELD(saved_state
, SMRAM_ES_BASE_LO32
) = (Bit32u
)(seg
->cache
.u
.segment
.base
& 0xffffffff);
329 SMRAM_FIELD(saved_state
, SMRAM_ES_LIMIT
) = seg
->cache
.u
.segment
.limit
;
330 SMRAM_FIELD(saved_state
, SMRAM_ES_SELECTOR_AR
) = seg
->selector
.value
|
331 (((Bit32u
) get_segment_ar_data(&seg
->cache
)) << 16);
334 bx_bool
BX_CPU_C::smram_restore_state(const Bit32u
*saved_state
)
336 Bit32u temp_cr0
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_CR0
);
337 Bit32u temp_eflags
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_RFLAGS32
);
338 Bit32u temp_efer
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EFER
);
340 bx_bool pe
= (temp_cr0
& 0x01);
341 bx_bool nw
= (temp_cr0
>> 29) & 0x01;
342 bx_bool cd
= (temp_cr0
>> 30) & 0x01;
343 bx_bool pg
= (temp_cr0
>> 31) & 0x01;
345 // check CR0 conditions for entering to shutdown state
347 BX_PANIC(("SMM restore: attempt to set CR0.PG with CR0.PE cleared !"));
352 BX_PANIC(("SMM restore: attempt to set CR0.NW with CR0.CD cleared !"));
356 // shutdown if write to reserved CR4 bits
357 if (! SetCR4(SMRAM_FIELD(saved_state
, SMRAM_OFFSET_CR4
))) {
358 BX_PANIC(("SMM restore: incorrect CR4 state !"));
362 if (temp_efer
& ~BX_EFER_SUPPORTED_BITS
) {
363 BX_PANIC(("SMM restore: Attemp to set EFER reserved bits: 0x%08x !", temp_efer
));
367 BX_CPU_THIS_PTR efer
.setRegister(temp_efer
& BX_EFER_SUPPORTED_BITS
);
369 if (BX_CPU_THIS_PTR efer
.get_LMA()) {
370 if (temp_eflags
& EFlagsVMMask
) {
371 BX_PANIC(("SMM restore: If EFER.LMA = 1 => RFLAGS.VM=0 !"));
375 if (!BX_CPU_THIS_PTR cr4
.get_PAE() || !pg
|| !pe
|| !BX_CPU_THIS_PTR efer
.get_LME()) {
376 BX_PANIC(("SMM restore: If EFER.LMA = 1 <=> CR4.PAE, CR0.PG, CR0.PE, EFER.LME=1 !"));
381 if (BX_CPU_THIS_PTR cr4
.get_PAE() && pg
&& pe
&& BX_CPU_THIS_PTR efer
.get_LME()) {
382 if (! BX_CPU_THIS_PTR efer
.get_LMA()) {
383 BX_PANIC(("SMM restore: If EFER.LMA = 1 <=> CR4.PAE, CR0.PG, CR0.PE, EFER.LME=1 !"));
388 // hack CR0 to be able to back to long mode correctly
389 BX_CPU_THIS_PTR cr0
.set_PE(0); // real mode (bit 0)
390 BX_CPU_THIS_PTR cr0
.set_PG(0); // paging disabled (bit 31)
392 setEFlags(temp_eflags
);
394 bx_phy_address temp_cr3
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_CR3
);
397 RAX
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_RAX_HI32
, SMRAM_OFFSET_RAX_LO32
);
398 RBX
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_RBX_HI32
, SMRAM_OFFSET_RBX_LO32
);
399 RCX
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_RCX_HI32
, SMRAM_OFFSET_RCX_LO32
);
400 RDX
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_RDX_HI32
, SMRAM_OFFSET_RDX_LO32
);
401 RSP
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_RSP_HI32
, SMRAM_OFFSET_RSP_LO32
);
402 RBP
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_RBP_HI32
, SMRAM_OFFSET_RBP_LO32
);
403 RSI
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_RSI_HI32
, SMRAM_OFFSET_RSI_LO32
);
404 RDI
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_RDI_HI32
, SMRAM_OFFSET_RDI_LO32
);
405 R8
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_R8_HI32
, SMRAM_OFFSET_R8_LO32
);
406 R9
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_R9_HI32
, SMRAM_OFFSET_R9_LO32
);
407 R10
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_R10_HI32
, SMRAM_OFFSET_R10_LO32
);
408 R11
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_R11_HI32
, SMRAM_OFFSET_R11_LO32
);
409 R12
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_R12_HI32
, SMRAM_OFFSET_R12_LO32
);
410 R13
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_R13_HI32
, SMRAM_OFFSET_R13_LO32
);
411 R14
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_R14_HI32
, SMRAM_OFFSET_R14_LO32
);
412 R15
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_R15_HI32
, SMRAM_OFFSET_R15_LO32
);
413 RIP
= SMRAM_FIELD64(saved_state
, SMRAM_OFFSET_RIP_HI32
, SMRAM_OFFSET_RIP_LO32
);
415 BX_CPU_THIS_PTR dr6
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_DR6
);
416 BX_CPU_THIS_PTR dr7
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_DR7
);
418 BX_CPU_THIS_PTR gdtr
.base
= SMRAM_FIELD64(saved_state
, SMRAM_GDTR_BASE_HI32
, SMRAM_GDTR_BASE_LO32
);
419 BX_CPU_THIS_PTR gdtr
.limit
= SMRAM_FIELD(saved_state
, SMRAM_GDTR_LIMIT
);
420 BX_CPU_THIS_PTR idtr
.base
= SMRAM_FIELD64(saved_state
, SMRAM_IDTR_BASE_HI32
, SMRAM_IDTR_BASE_LO32
);
421 BX_CPU_THIS_PTR idtr
.limit
= SMRAM_FIELD(saved_state
, SMRAM_IDTR_LIMIT
);
423 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
],
424 SMRAM_FIELD(saved_state
, SMRAM_CS_SELECTOR_AR
) & 0xffff,
425 SMRAM_FIELD64(saved_state
, SMRAM_CS_BASE_HI32
, SMRAM_CS_BASE_LO32
),
426 SMRAM_FIELD(saved_state
, SMRAM_CS_LIMIT
),
427 SMRAM_FIELD(saved_state
, SMRAM_CS_SELECTOR_AR
) >> 16))
429 if (! BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.segment
) {
430 BX_PANIC(("SMM restore: restored valid non segment CS !"));
435 handleCpuModeChange();
437 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
],
438 SMRAM_FIELD(saved_state
, SMRAM_DS_SELECTOR_AR
) & 0xffff,
439 SMRAM_FIELD64(saved_state
, SMRAM_DS_BASE_HI32
, SMRAM_DS_BASE_LO32
),
440 SMRAM_FIELD(saved_state
, SMRAM_DS_LIMIT
),
441 SMRAM_FIELD(saved_state
, SMRAM_DS_SELECTOR_AR
) >> 16))
443 if (! BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.segment
) {
444 BX_PANIC(("SMM restore: restored valid non segment DS !"));
449 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
],
450 SMRAM_FIELD(saved_state
, SMRAM_SS_SELECTOR_AR
) & 0xffff,
451 SMRAM_FIELD64(saved_state
, SMRAM_SS_BASE_HI32
, SMRAM_SS_BASE_LO32
),
452 SMRAM_FIELD(saved_state
, SMRAM_SS_LIMIT
),
453 SMRAM_FIELD(saved_state
, SMRAM_SS_SELECTOR_AR
) >> 16))
455 if (! BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.segment
) {
456 BX_PANIC(("SMM restore: restored valid non segment SS !"));
461 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
],
462 SMRAM_FIELD(saved_state
, SMRAM_ES_SELECTOR_AR
) & 0xffff,
463 SMRAM_FIELD64(saved_state
, SMRAM_ES_BASE_HI32
, SMRAM_ES_BASE_LO32
),
464 SMRAM_FIELD(saved_state
, SMRAM_ES_LIMIT
),
465 SMRAM_FIELD(saved_state
, SMRAM_ES_SELECTOR_AR
) >> 16))
467 if (! BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
].cache
.segment
) {
468 BX_PANIC(("SMM restore: restored valid non segment ES !"));
473 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
],
474 SMRAM_FIELD(saved_state
, SMRAM_FS_SELECTOR_AR
) & 0xffff,
475 SMRAM_FIELD64(saved_state
, SMRAM_FS_BASE_HI32
, SMRAM_FS_BASE_LO32
),
476 SMRAM_FIELD(saved_state
, SMRAM_FS_LIMIT
),
477 SMRAM_FIELD(saved_state
, SMRAM_FS_SELECTOR_AR
) >> 16))
479 if (! BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
].cache
.segment
) {
480 BX_PANIC(("SMM restore: restored valid non segment FS !"));
485 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
],
486 SMRAM_FIELD(saved_state
, SMRAM_GS_SELECTOR_AR
) & 0xffff,
487 SMRAM_FIELD64(saved_state
, SMRAM_GS_BASE_HI32
, SMRAM_GS_BASE_LO32
),
488 SMRAM_FIELD(saved_state
, SMRAM_GS_LIMIT
),
489 SMRAM_FIELD(saved_state
, SMRAM_GS_SELECTOR_AR
) >> 16))
491 if (! BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
].cache
.segment
) {
492 BX_PANIC(("SMM restore: restored valid non segment GS !"));
497 if (set_segment_ar_data(&BX_CPU_THIS_PTR ldtr
,
498 SMRAM_FIELD(saved_state
, SMRAM_LDTR_SELECTOR_AR
) & 0xffff,
499 SMRAM_FIELD64(saved_state
, SMRAM_LDTR_BASE_HI32
, SMRAM_LDTR_BASE_LO32
),
500 SMRAM_FIELD(saved_state
, SMRAM_LDTR_LIMIT
),
501 SMRAM_FIELD(saved_state
, SMRAM_LDTR_SELECTOR_AR
) >> 16))
503 if (BX_CPU_THIS_PTR ldtr
.cache
.type
!= BX_SYS_SEGMENT_LDT
) {
504 BX_PANIC(("SMM restore: LDTR is not LDT descriptor type !"));
509 if (set_segment_ar_data(&BX_CPU_THIS_PTR tr
,
510 SMRAM_FIELD(saved_state
, SMRAM_TR_SELECTOR_AR
) & 0xffff,
511 SMRAM_FIELD64(saved_state
, SMRAM_TR_BASE_HI32
, SMRAM_TR_BASE_LO32
),
512 SMRAM_FIELD(saved_state
, SMRAM_TR_LIMIT
),
513 SMRAM_FIELD(saved_state
, SMRAM_TR_SELECTOR_AR
) >> 16))
515 if (BX_CPU_THIS_PTR tr
.cache
.type
!= BX_SYS_SEGMENT_AVAIL_286_TSS
&&
516 BX_CPU_THIS_PTR tr
.cache
.type
!= BX_SYS_SEGMENT_BUSY_286_TSS
&&
517 BX_CPU_THIS_PTR tr
.cache
.type
!= BX_SYS_SEGMENT_AVAIL_386_TSS
&&
518 BX_CPU_THIS_PTR tr
.cache
.type
!= BX_SYS_SEGMENT_BUSY_386_TSS
)
520 BX_PANIC(("SMM restore: TR is not TSS descriptor type !"));
525 if (SMM_REVISION_ID
& SMM_SMBASE_RELOCATION
)
526 BX_CPU_THIS_PTR smbase
= SMRAM_FIELD(saved_state
, SMRAM_SMBASE_OFFSET
);
531 #else /* BX_SUPPORT_X86_64 == 0 */
533 void BX_CPU_C::smram_save_state(Bit32u
*saved_state
)
535 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_CR0
) = BX_CPU_THIS_PTR cr0
.getRegister();
536 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_CR3
) = BX_CPU_THIS_PTR cr3
;
537 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EFLAGS
) = read_eflags();
538 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EIP
) = EIP
;
539 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EDI
) = EDI
;
540 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_ESI
) = ESI
;
541 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EBP
) = EBP
;
542 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_ESP
) = ESP
;
543 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EBX
) = EBX
;
544 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EDX
) = EDX
;
545 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_ECX
) = ECX
;
546 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EAX
) = EAX
;
547 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_DR6
) = BX_CPU_THIS_PTR dr6
;
548 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_DR7
) = BX_CPU_THIS_PTR dr7
;
549 SMRAM_FIELD(saved_state
, SMRAM_TR_SELECTOR
) = BX_CPU_THIS_PTR tr
.selector
.value
;
550 SMRAM_FIELD(saved_state
, SMRAM_LDTR_SELECTOR
) = BX_CPU_THIS_PTR ldtr
.selector
.value
;
552 SMRAM_FIELD(saved_state
, SMRAM_GS_SELECTOR
) =
553 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
].selector
.value
;
554 SMRAM_FIELD(saved_state
, SMRAM_FS_SELECTOR
) =
555 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
].selector
.value
;
556 SMRAM_FIELD(saved_state
, SMRAM_DS_SELECTOR
) =
557 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].selector
.value
;
558 SMRAM_FIELD(saved_state
, SMRAM_SS_SELECTOR
) =
559 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
.value
;
560 SMRAM_FIELD(saved_state
, SMRAM_CS_SELECTOR
) =
561 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
;
562 SMRAM_FIELD(saved_state
, SMRAM_ES_SELECTOR
) =
563 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
].selector
.value
;
565 // --- SS selector --- //
566 bx_segment_reg_t
*seg
= &(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
]);
567 SMRAM_FIELD(saved_state
, SMRAM_SS_BASE
) = seg
->cache
.u
.segment
.base
;
568 SMRAM_FIELD(saved_state
, SMRAM_SS_LIMIT
) = seg
->cache
.u
.segment
.limit
;
569 SMRAM_FIELD(saved_state
, SMRAM_SS_SELECTOR_AR
) = seg
->selector
.value
|
570 (((Bit32u
) get_segment_ar_data(&seg
->cache
)) << 16);
571 // --- CS selector --- //
572 seg
= &(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
]);
573 SMRAM_FIELD(saved_state
, SMRAM_CS_BASE
) = seg
->cache
.u
.segment
.base
;
574 SMRAM_FIELD(saved_state
, SMRAM_CS_LIMIT
) = seg
->cache
.u
.segment
.limit
;
575 SMRAM_FIELD(saved_state
, SMRAM_CS_SELECTOR_AR
) = seg
->selector
.value
|
576 (((Bit32u
) get_segment_ar_data(&seg
->cache
)) << 16);
577 // --- ES selector --- //
578 seg
= &(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
]);
579 SMRAM_FIELD(saved_state
, SMRAM_ES_BASE
) = seg
->cache
.u
.segment
.base
;
580 SMRAM_FIELD(saved_state
, SMRAM_ES_LIMIT
) = seg
->cache
.u
.segment
.limit
;
581 SMRAM_FIELD(saved_state
, SMRAM_ES_SELECTOR_AR
) = seg
->selector
.value
|
582 (((Bit32u
) get_segment_ar_data(&seg
->cache
)) << 16);
584 SMRAM_FIELD(saved_state
, SMRAM_LDTR_BASE
) = BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.base
;
585 SMRAM_FIELD(saved_state
, SMRAM_LDTR_LIMIT
) = BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit
;
586 SMRAM_FIELD(saved_state
, SMRAM_LDTR_SELECTOR_AR
) = BX_CPU_THIS_PTR ldtr
.selector
.value
|
587 (((Bit32u
) get_segment_ar_data(&BX_CPU_THIS_PTR ldtr
.cache
)) << 16);
589 SMRAM_FIELD(saved_state
, SMRAM_GDTR_BASE
) = BX_CPU_THIS_PTR gdtr
.base
;
590 SMRAM_FIELD(saved_state
, SMRAM_GDTR_LIMIT
) = BX_CPU_THIS_PTR gdtr
.limit
;
591 /* base+0x7f6c is reserved */
592 /* base+0x7f68 is reserved */
594 // --- Task Register --- //
595 SMRAM_FIELD(saved_state
, SMRAM_TR_BASE
) = BX_CPU_THIS_PTR tr
.cache
.u
.system
.base
;
596 SMRAM_FIELD(saved_state
, SMRAM_TR_LIMIT
) = BX_CPU_THIS_PTR tr
.cache
.u
.system
.limit
;
597 SMRAM_FIELD(saved_state
, SMRAM_TR_SELECTOR_AR
) = BX_CPU_THIS_PTR tr
.selector
.value
|
598 (((Bit32u
) get_segment_ar_data(&BX_CPU_THIS_PTR tr
.cache
)) << 16);
601 SMRAM_FIELD(saved_state
, SMRAM_IDTR_BASE
) = BX_CPU_THIS_PTR idtr
.base
;
602 SMRAM_FIELD(saved_state
, SMRAM_IDTR_LIMIT
) = BX_CPU_THIS_PTR idtr
.limit
;
603 /* base+0x7f50 is reserved */
604 // --- GS selector --- //
605 seg
= &(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
]);
606 SMRAM_FIELD(saved_state
, SMRAM_GS_BASE
) = seg
->cache
.u
.segment
.base
;
607 SMRAM_FIELD(saved_state
, SMRAM_GS_LIMIT
) = seg
->cache
.u
.segment
.limit
;
608 SMRAM_FIELD(saved_state
, SMRAM_GS_SELECTOR_AR
) = seg
->selector
.value
|
609 (((Bit32u
) get_segment_ar_data(&seg
->cache
)) << 16);
610 // --- FS selector --- //
611 seg
= &(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
]);
612 SMRAM_FIELD(saved_state
, SMRAM_FS_BASE
) = seg
->cache
.u
.segment
.base
;
613 SMRAM_FIELD(saved_state
, SMRAM_FS_LIMIT
) = seg
->cache
.u
.segment
.limit
;
614 SMRAM_FIELD(saved_state
, SMRAM_FS_SELECTOR_AR
) = seg
->selector
.value
|
615 (((Bit32u
) get_segment_ar_data(&seg
->cache
)) << 16);
616 // --- DS selector --- //
617 seg
= &(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
]);
618 SMRAM_FIELD(saved_state
, SMRAM_DS_BASE
) = seg
->cache
.u
.segment
.base
;
619 SMRAM_FIELD(saved_state
, SMRAM_DS_LIMIT
) = seg
->cache
.u
.segment
.limit
;
620 SMRAM_FIELD(saved_state
, SMRAM_DS_SELECTOR_AR
) = seg
->selector
.value
|
621 (((Bit32u
) get_segment_ar_data(&seg
->cache
)) << 16);
623 /* base+0x7f28 to base+7f18 is reserved */
624 #if BX_CPU_LEVEL >= 4
625 SMRAM_FIELD(saved_state
, SMRAM_OFFSET_CR4
) = BX_CPU_THIS_PTR cr4
.getRegister();
628 /* base+0x7f02 is Auto HALT restart field (2 byte) */
629 /* base+0x7f00 is I/O restart field (2 byte) */
630 SMRAM_FIELD(saved_state
, SMRAM_SMM_REVISION_ID
) = SMM_REVISION_ID
;
631 SMRAM_FIELD(saved_state
, SMRAM_SMBASE_OFFSET
) = BX_CPU_THIS_PTR smbase
;
632 /* base+0x7ef4 to base+0x7e00 is reserved */
635 bx_bool
BX_CPU_C::smram_restore_state(const Bit32u
*saved_state
)
637 Bit32u temp_cr0
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_CR0
);
638 Bit32u temp_eflags
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EFLAGS
);
639 Bit32u temp_cr3
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_CR3
);
641 bx_bool pe
= (temp_cr0
& 0x01);
642 bx_bool nw
= (temp_cr0
>> 29) & 0x01;
643 bx_bool cd
= (temp_cr0
>> 30) & 0x01;
644 bx_bool pg
= (temp_cr0
>> 31) & 0x01;
646 // check conditions for entering to shutdown state
648 BX_PANIC(("SMM restore: attempt to set CR0.PG with CR0.PE cleared !"));
653 BX_PANIC(("SMM restore: attempt to set CR0.NW with CR0.CD cleared !"));
659 setEFlags(temp_eflags
);
661 #if BX_CPU_LEVEL >= 4
662 if (! SetCR4(SMRAM_FIELD(saved_state
, SMRAM_OFFSET_CR4
))) {
663 BX_PANIC(("SMM restore: incorrect CR4 state !"));
668 EIP
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EIP
);
669 EDI
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EDI
);
670 ESI
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_ESI
);
671 EBP
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EBP
);
672 ESP
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_ESP
);
673 EBX
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EBX
);
674 EDX
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EDX
);
675 ECX
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_ECX
);
676 EAX
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_EAX
);
678 BX_CPU_THIS_PTR dr6
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_DR6
);
679 BX_CPU_THIS_PTR dr7
= SMRAM_FIELD(saved_state
, SMRAM_OFFSET_DR7
);
681 BX_CPU_THIS_PTR gdtr
.base
= SMRAM_FIELD(saved_state
, SMRAM_GDTR_BASE
);
682 BX_CPU_THIS_PTR gdtr
.limit
= SMRAM_FIELD(saved_state
, SMRAM_GDTR_LIMIT
);
683 BX_CPU_THIS_PTR idtr
.base
= SMRAM_FIELD(saved_state
, SMRAM_IDTR_BASE
);
684 BX_CPU_THIS_PTR idtr
.limit
= SMRAM_FIELD(saved_state
, SMRAM_IDTR_LIMIT
);
686 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
],
687 SMRAM_FIELD(saved_state
, SMRAM_CS_SELECTOR_AR
) & 0xffff,
688 SMRAM_FIELD(saved_state
, SMRAM_CS_BASE
),
689 SMRAM_FIELD(saved_state
, SMRAM_CS_LIMIT
),
690 SMRAM_FIELD(saved_state
, SMRAM_CS_SELECTOR_AR
) >> 16))
692 if (! BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.segment
) {
693 BX_PANIC(("SMM restore: restored valid non segment CS !"));
698 handleCpuModeChange();
700 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
],
701 SMRAM_FIELD(saved_state
, SMRAM_DS_SELECTOR_AR
) & 0xffff,
702 SMRAM_FIELD(saved_state
, SMRAM_DS_BASE
),
703 SMRAM_FIELD(saved_state
, SMRAM_DS_LIMIT
),
704 SMRAM_FIELD(saved_state
, SMRAM_DS_SELECTOR_AR
) >> 16))
706 if (! BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.segment
) {
707 BX_PANIC(("SMM restore: restored valid non segment DS !"));
712 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
],
713 SMRAM_FIELD(saved_state
, SMRAM_SS_SELECTOR_AR
) & 0xffff,
714 SMRAM_FIELD(saved_state
, SMRAM_SS_BASE
),
715 SMRAM_FIELD(saved_state
, SMRAM_SS_LIMIT
),
716 SMRAM_FIELD(saved_state
, SMRAM_SS_SELECTOR_AR
) >> 16))
718 if (! BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.segment
) {
719 BX_PANIC(("SMM restore: restored valid non segment SS !"));
724 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
],
725 SMRAM_FIELD(saved_state
, SMRAM_ES_SELECTOR_AR
) & 0xffff,
726 SMRAM_FIELD(saved_state
, SMRAM_ES_BASE
),
727 SMRAM_FIELD(saved_state
, SMRAM_ES_LIMIT
),
728 SMRAM_FIELD(saved_state
, SMRAM_ES_SELECTOR_AR
) >> 16))
730 if (! BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
].cache
.segment
) {
731 BX_PANIC(("SMM restore: restored valid non segment ES !"));
736 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
],
737 SMRAM_FIELD(saved_state
, SMRAM_FS_SELECTOR_AR
) & 0xffff,
738 SMRAM_FIELD(saved_state
, SMRAM_FS_BASE
),
739 SMRAM_FIELD(saved_state
, SMRAM_FS_LIMIT
),
740 SMRAM_FIELD(saved_state
, SMRAM_FS_SELECTOR_AR
) >> 16))
742 if (! BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
].cache
.segment
) {
743 BX_PANIC(("SMM restore: restored valid non segment FS !"));
748 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
],
749 SMRAM_FIELD(saved_state
, SMRAM_GS_SELECTOR_AR
) & 0xffff,
750 SMRAM_FIELD(saved_state
, SMRAM_GS_BASE
),
751 SMRAM_FIELD(saved_state
, SMRAM_GS_LIMIT
),
752 SMRAM_FIELD(saved_state
, SMRAM_GS_SELECTOR_AR
) >> 16))
754 if (! BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
].cache
.segment
) {
755 BX_PANIC(("SMM restore: restored valid non segment GS !"));
760 if (set_segment_ar_data(&BX_CPU_THIS_PTR ldtr
,
761 SMRAM_FIELD(saved_state
, SMRAM_LDTR_SELECTOR_AR
) & 0xffff,
762 SMRAM_FIELD(saved_state
, SMRAM_LDTR_BASE
),
763 SMRAM_FIELD(saved_state
, SMRAM_LDTR_LIMIT
),
764 SMRAM_FIELD(saved_state
, SMRAM_LDTR_SELECTOR_AR
) >> 16))
766 if (BX_CPU_THIS_PTR ldtr
.cache
.type
!= BX_SYS_SEGMENT_LDT
) {
767 BX_PANIC(("SMM restore: LDTR is not LDT descriptor type !"));
772 if (set_segment_ar_data(&BX_CPU_THIS_PTR tr
,
773 SMRAM_FIELD(saved_state
, SMRAM_TR_SELECTOR_AR
) & 0xffff,
774 SMRAM_FIELD(saved_state
, SMRAM_TR_BASE
),
775 SMRAM_FIELD(saved_state
, SMRAM_TR_LIMIT
),
776 SMRAM_FIELD(saved_state
, SMRAM_TR_SELECTOR_AR
) >> 16))
778 if (BX_CPU_THIS_PTR tr
.cache
.type
!= BX_SYS_SEGMENT_AVAIL_286_TSS
&&
779 BX_CPU_THIS_PTR tr
.cache
.type
!= BX_SYS_SEGMENT_BUSY_286_TSS
&&
780 BX_CPU_THIS_PTR tr
.cache
.type
!= BX_SYS_SEGMENT_AVAIL_386_TSS
&&
781 BX_CPU_THIS_PTR tr
.cache
.type
!= BX_SYS_SEGMENT_BUSY_386_TSS
)
783 BX_PANIC(("SMM restore: TR is not TSS descriptor type !"));
788 if (SMM_REVISION_ID
& SMM_SMBASE_RELOCATION
) {
789 BX_CPU_THIS_PTR smbase
= SMRAM_FIELD(saved_state
, SMRAM_SMBASE_OFFSET
);
791 if (BX_CPU_THIS_PTR smbase
& 0x7fff) {
792 BX_PANIC(("SMM restore: SMBASE must be aligned to 32K !"));
801 #endif /* BX_SUPPORT_X86_64 */
803 #endif /* BX_CPU_LEVEL >= 3 */