- added instructions how to update the online documentation
[bochs-mirror.git] / cpu / smm.cc
blob02a55131c8f57dffbc15c683ed6dfb418417c76d
1 /////////////////////////////////////////////////////////////////////////
2 // $Id: smm.cc,v 1.49 2008/12/06 18:52:02 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (c) 2006 Stanislav Shwartsman
6 // Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7 //
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2 of the License, or (at your option) any later version.
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 /////////////////////////////////////////////////////////////////////////
23 #define NEED_CPU_REG_SHORTCUTS 1
24 #include "bochs.h"
25 #include "cpu.h"
26 #include "smm.h"
27 #define LOG_THIS BX_CPU_THIS_PTR
29 #if BX_CPU_LEVEL >= 3
31 #if BX_SUPPORT_X86_64==0
32 #define RIP EIP
33 #endif
36 // Some of the CPU field must be saved and restored in order to continue the
37 // simulation correctly after the RSM instruction:
39 // ---------------------------------------------------------------
41 // 1. General purpose registers: EAX-EDI, R8-R15
42 // 2. EIP, RFLAGS
43 // 3. Segment registers CS, DS, SS, ES, FS, GS
44 // fields: valid - not required, initialized according to selector value
45 // p - must be saved/restored
46 // dpl - must be saved/restored
47 // segment - must be 1 for seg registers, not required to save
48 // type - must be saved/restored
49 // base - must be saved/restored
50 // limit - must be saved/restored
51 // g - must be saved/restored
52 // d_b - must be saved/restored
53 // l - must be saved/restored
54 // avl - must be saved/restored
55 // 4. GDTR, IDTR
56 // fields: base, limit
57 // 5. LDTR, TR
58 // fields: base, limit, anything else ?
59 // 6. Debug Registers DR0-DR7, only DR6 and DR7 are saved
60 // 7. Control Registers: CR0, CR1 is always 0, CR2 is NOT saved, CR3, CR4, EFER
61 // 8. SMBASE
62 // 9. MSR/FPU/XMM/APIC are NOT saved accoring to Intel docs
65 #define SMM_SAVE_STATE_MAP_SIZE 128
67 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RSM(bxInstruction_c *i)
69 /* If we are not in System Management Mode, then #UD should be generated */
70 if (! BX_CPU_THIS_PTR smm_mode()) {
71 BX_INFO(("RSM not in System Management Mode !"));
72 exception(BX_UD_EXCEPTION, 0, 0);
75 invalidate_prefetch_q();
77 BX_INFO(("RSM: Resuming from System Management Mode"));
79 BX_CPU_THIS_PTR disable_NMI = 0;
81 Bit32u saved_state[SMM_SAVE_STATE_MAP_SIZE], n;
82 // reset reserved bits
83 for(n=0;n<SMM_SAVE_STATE_MAP_SIZE;n++) saved_state[n] = 0;
85 bx_phy_address base = BX_CPU_THIS_PTR smbase + 0x10000;
86 // could be optimized with reading of only non-reserved bytes
87 for(n=0;n<SMM_SAVE_STATE_MAP_SIZE;n++) {
88 base -= 4;
89 BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, base, 4, &saved_state[n]);
90 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, base, 4, BX_READ, (Bit8u*)(&saved_state[n]));
92 BX_CPU_THIS_PTR in_smm = 0;
94 // restore the CPU state from SMRAM
95 if (! smram_restore_state(saved_state)) {
96 BX_PANIC(("RSM: Incorrect state when restoring CPU state - shutdown !"));
97 shutdown();
100 // debug(RIP);
103 void BX_CPU_C::enter_system_management_mode(void)
105 invalidate_prefetch_q();
107 BX_INFO(("Enter to System Management Mode"));
109 // debug(BX_CPU_THIS_PTR prev_rip);
111 BX_CPU_THIS_PTR in_smm = 1;
113 Bit32u saved_state[SMM_SAVE_STATE_MAP_SIZE], n;
114 // reset reserved bits
115 for(n=0;n<SMM_SAVE_STATE_MAP_SIZE;n++) saved_state[n] = 0;
116 // prepare CPU state to be saved in the SMRAM
117 BX_CPU_THIS_PTR smram_save_state(saved_state);
119 bx_phy_address base = BX_CPU_THIS_PTR smbase + 0x10000;
120 // could be optimized with reading of only non-reserved bytes
121 for(n=0;n<SMM_SAVE_STATE_MAP_SIZE;n++) {
122 base -= 4;
123 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS, base, 4, &saved_state[n]);
124 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, base, 4, BX_WRITE, (Bit8u*)(&saved_state[n]));
127 BX_CPU_THIS_PTR setEFlags(0x2); // Bit1 is always set
128 BX_CPU_THIS_PTR prev_rip = RIP = 0x00008000;
129 BX_CPU_THIS_PTR dr7 = 0x00000400;
131 // CR0 - PE, EM, TS, and PG flags set to 0; others unmodified
132 BX_CPU_THIS_PTR cr0.set_PE(0); // real mode (bit 0)
133 BX_CPU_THIS_PTR cr0.set_EM(0); // emulate math coprocessor (bit 2)
134 BX_CPU_THIS_PTR cr0.set_TS(0); // no task switch (bit 3)
135 BX_CPU_THIS_PTR cr0.set_PG(0); // paging disabled (bit 31)
137 // paging mode was changed - flush TLB
138 TLB_flush(); // Flush Global entries also
140 #if BX_CPU_LEVEL >= 4
141 BX_CPU_THIS_PTR cr4.set32(0);
142 #endif
144 #if BX_SUPPORT_X86_64
145 BX_CPU_THIS_PTR efer.set32(0);
146 #endif
148 parse_selector(BX_CPU_THIS_PTR smbase >> 4,
149 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
151 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
152 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1;
153 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 0;
154 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */
155 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
157 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = BX_CPU_THIS_PTR smbase;
158 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit = 0xffff;
159 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xffffffff;
160 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0;
161 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g = 1; /* page granular */
162 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = 0; /* 16bit default size */
163 #if BX_SUPPORT_X86_64
164 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l = 0; /* 16bit default size */
165 #endif
167 updateFetchModeMask();
168 handleCpuModeChange();
170 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
171 handleAlignmentCheck();
172 #endif
174 /* DS (Data Segment) and descriptor cache */
175 parse_selector(0x0000,
176 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector);
178 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
179 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.p = 1;
180 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.dpl = 0;
181 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.segment = 1; /* data/code segment */
182 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
184 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.base = 0x00000000;
185 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.limit = 0xffff;
186 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.limit_scaled = 0xffffffff;
187 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.avl = 0;
188 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.g = 1; /* byte granular */
189 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.d_b = 0; /* 16bit default size */
190 #if BX_SUPPORT_X86_64
191 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.l = 0; /* 16bit default size */
192 #endif
194 // use DS segment as template for the others
195 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS] = BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
196 BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES] = BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
197 BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS] = BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
198 BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS] = BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
201 #define SMRAM_TRANSLATE(addr) (((0x8000 - (addr)) >> 2) - 1)
202 #define SMRAM_FIELD(state, addr) (state[SMRAM_TRANSLATE(addr)])
204 #if BX_SUPPORT_X86_64
206 BX_CPP_INLINE Bit64u SMRAM_FIELD64(const Bit32u *saved_state, unsigned hi, unsigned lo)
208 Bit64u tmp = ((Bit64u) SMRAM_FIELD(saved_state, hi)) << 32;
209 tmp |= (Bit64u) SMRAM_FIELD(saved_state, lo);
210 return tmp;
213 void BX_CPU_C::smram_save_state(Bit32u *saved_state)
215 // --- General Purpose Registers --- //
216 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RAX_HI32) = (Bit32u)(RAX >> 32);
217 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RAX_LO32) = EAX;
218 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RCX_HI32) = (Bit32u)(RCX >> 32);
219 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RCX_LO32) = ECX;
220 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RDX_HI32) = (Bit32u)(RDX >> 32);
221 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RDX_LO32) = EDX;
222 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RBX_HI32) = (Bit32u)(RBX >> 32);
223 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RBX_LO32) = EBX;
224 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RSP_HI32) = (Bit32u)(RSP >> 32);
225 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RSP_LO32) = ESP;
226 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RBP_HI32) = (Bit32u)(RBP >> 32);
227 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RBP_LO32) = EBP;
228 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RSI_HI32) = (Bit32u)(RSI >> 32);
229 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RSI_LO32) = ESI;
230 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RDI_HI32) = (Bit32u)(RDI >> 32);
231 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RDI_LO32) = EDI;
232 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R8_HI32) = (Bit32u)(R8 >> 32);
233 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R8_LO32) = (Bit32u)(R8 & 0xffffffff);
234 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R9_HI32) = (Bit32u)(R9 >> 32);
235 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R9_LO32) = (Bit32u)(R9 & 0xffffffff);
236 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R10_HI32) = (Bit32u)(R10 >> 32);
237 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R10_LO32) = (Bit32u)(R10 & 0xffffffff);
238 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R11_HI32) = (Bit32u)(R11 >> 32);
239 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R11_LO32) = (Bit32u)(R11 & 0xffffffff);
240 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R12_HI32) = (Bit32u)(R12 >> 32);
241 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R12_LO32) = (Bit32u)(R12 & 0xffffffff);
242 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R13_HI32) = (Bit32u)(R13 >> 32);
243 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R13_LO32) = (Bit32u)(R13 & 0xffffffff);
244 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R14_HI32) = (Bit32u)(R14 >> 32);
245 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R14_LO32) = (Bit32u)(R14 & 0xffffffff);
246 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R15_HI32) = (Bit32u)(R15 >> 32);
247 SMRAM_FIELD(saved_state, SMRAM_OFFSET_R15_LO32) = (Bit32u)(R15 & 0xffffffff);
248 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RIP_HI32) = (Bit32u)(RIP >> 32);
249 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RIP_LO32) = EIP;
250 SMRAM_FIELD(saved_state, SMRAM_OFFSET_RFLAGS32) = read_eflags();
252 // --- Debug and Control Registers --- //
253 SMRAM_FIELD(saved_state, SMRAM_OFFSET_DR6) = BX_CPU_THIS_PTR dr6;
254 SMRAM_FIELD(saved_state, SMRAM_OFFSET_DR7) = BX_CPU_THIS_PTR dr7;
255 SMRAM_FIELD(saved_state, SMRAM_OFFSET_CR0) = BX_CPU_THIS_PTR cr0.get32();
256 SMRAM_FIELD(saved_state, SMRAM_OFFSET_CR3) = BX_CPU_THIS_PTR cr3;
257 SMRAM_FIELD(saved_state, SMRAM_OFFSET_CR4) = BX_CPU_THIS_PTR cr4.get32();
258 /* base+0x7f44 to base+0x7f04 is reserved */
259 SMRAM_FIELD(saved_state, SMRAM_SMBASE_OFFSET) = BX_CPU_THIS_PTR smbase;
260 SMRAM_FIELD(saved_state, SMRAM_SMM_REVISION_ID) = SMM_REVISION_ID;
261 /* base+0x7ef8 to base+0x7ed8 is reserved */
262 SMRAM_FIELD(saved_state, SMRAM_OFFSET_EFER) = BX_CPU_THIS_PTR efer.get32();
263 /* base+0x7ecc is reserved */
264 /* base+0x7ec8 is I/O Instruction Restart, Auto-Halt Restart and NMI Mask */
265 /* base+0x7ec4 is reserved */
266 /* base+0x7ec0 is SMM I/O Trap */
267 /* base+0x7ebc to base+0x7ea0 is reserved */
269 // --- Task Register --- //
270 SMRAM_FIELD(saved_state, SMRAM_TR_BASE_HI32) = (Bit32u)(BX_CPU_THIS_PTR tr.cache.u.system.base >> 32);
271 SMRAM_FIELD(saved_state, SMRAM_TR_BASE_LO32) = (Bit32u)(BX_CPU_THIS_PTR tr.cache.u.system.base & 0xffffffff);
272 SMRAM_FIELD(saved_state, SMRAM_TR_LIMIT) = BX_CPU_THIS_PTR tr.cache.u.system.limit_scaled;
273 Bit32u tr_ar = get_segment_ar_data(&BX_CPU_THIS_PTR tr.cache) | (BX_CPU_THIS_PTR tr.cache.valid << 8);
274 SMRAM_FIELD(saved_state, SMRAM_TR_SELECTOR_AR) = BX_CPU_THIS_PTR tr.selector.value | (tr_ar << 16);
276 // --- IDTR --- //
277 SMRAM_FIELD(saved_state, SMRAM_IDTR_BASE_HI32) = (Bit32u)(BX_CPU_THIS_PTR idtr.base >> 32);
278 SMRAM_FIELD(saved_state, SMRAM_IDTR_BASE_LO32) = (Bit32u)(BX_CPU_THIS_PTR idtr.base & 0xffffffff);
279 SMRAM_FIELD(saved_state, SMRAM_IDTR_LIMIT) = BX_CPU_THIS_PTR idtr.limit;
280 // --- LDTR --- //
281 SMRAM_FIELD(saved_state, SMRAM_LDTR_BASE_HI32) = (Bit32u)(BX_CPU_THIS_PTR ldtr.cache.u.system.base >> 32);
282 SMRAM_FIELD(saved_state, SMRAM_LDTR_BASE_LO32) = (Bit32u)(BX_CPU_THIS_PTR ldtr.cache.u.system.base & 0xffffffff);
283 SMRAM_FIELD(saved_state, SMRAM_LDTR_LIMIT) = BX_CPU_THIS_PTR ldtr.cache.u.system.limit_scaled;
284 Bit32u ldtr_ar = get_segment_ar_data(&BX_CPU_THIS_PTR ldtr.cache) | (BX_CPU_THIS_PTR ldtr.cache.valid << 8);
285 SMRAM_FIELD(saved_state, SMRAM_LDTR_SELECTOR_AR) = BX_CPU_THIS_PTR ldtr.selector.value | (ldtr_ar << 16);
286 // --- GDTR --- //
287 SMRAM_FIELD(saved_state, SMRAM_GDTR_BASE_HI32) = (Bit32u)(BX_CPU_THIS_PTR gdtr.base >> 32);
288 SMRAM_FIELD(saved_state, SMRAM_GDTR_BASE_LO32) = (Bit32u)(BX_CPU_THIS_PTR gdtr.base & 0xffffffff);
289 SMRAM_FIELD(saved_state, SMRAM_GDTR_LIMIT) = BX_CPU_THIS_PTR gdtr.limit;
290 // --- GS selector --- //
291 bx_segment_reg_t *seg = &(BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS]);
292 SMRAM_FIELD(saved_state, SMRAM_GS_BASE_HI32) = (Bit32u)(seg->cache.u.segment.base >> 32);
293 SMRAM_FIELD(saved_state, SMRAM_GS_BASE_LO32) = (Bit32u)(seg->cache.u.segment.base & 0xffffffff);
294 SMRAM_FIELD(saved_state, SMRAM_GS_LIMIT) = seg->cache.u.segment.limit_scaled;
295 Bit32u seg_ar = get_segment_ar_data(&seg->cache) | (seg->cache.valid << 8);
296 SMRAM_FIELD(saved_state, SMRAM_GS_SELECTOR_AR) = seg->selector.value | (seg_ar << 16);
297 // --- FS selector --- //
298 seg = &(BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS]);
299 SMRAM_FIELD(saved_state, SMRAM_FS_BASE_HI32) = (Bit32u)(seg->cache.u.segment.base >> 32);
300 SMRAM_FIELD(saved_state, SMRAM_FS_BASE_LO32) = (Bit32u)(seg->cache.u.segment.base & 0xffffffff);
301 SMRAM_FIELD(saved_state, SMRAM_FS_LIMIT) = seg->cache.u.segment.limit_scaled;
302 seg_ar = get_segment_ar_data(&seg->cache) | (seg->cache.valid << 8);
303 SMRAM_FIELD(saved_state, SMRAM_FS_SELECTOR_AR) = seg->selector.value | (seg_ar << 16);
304 // --- DS selector --- //
305 seg = &(BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS]);
306 SMRAM_FIELD(saved_state, SMRAM_DS_BASE_HI32) = (Bit32u)(seg->cache.u.segment.base >> 32);
307 SMRAM_FIELD(saved_state, SMRAM_DS_BASE_LO32) = (Bit32u)(seg->cache.u.segment.base & 0xffffffff);
308 SMRAM_FIELD(saved_state, SMRAM_DS_LIMIT) = seg->cache.u.segment.limit_scaled;
309 seg_ar = get_segment_ar_data(&seg->cache) | (seg->cache.valid << 8);
310 SMRAM_FIELD(saved_state, SMRAM_DS_SELECTOR_AR) = seg->selector.value | (seg_ar << 16);
311 // --- SS selector --- //
312 seg = &(BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS]);
313 SMRAM_FIELD(saved_state, SMRAM_SS_BASE_HI32) = (Bit32u)(seg->cache.u.segment.base >> 32);
314 SMRAM_FIELD(saved_state, SMRAM_SS_BASE_LO32) = (Bit32u)(seg->cache.u.segment.base & 0xffffffff);
315 SMRAM_FIELD(saved_state, SMRAM_SS_LIMIT) = seg->cache.u.segment.limit_scaled;
316 seg_ar = get_segment_ar_data(&seg->cache) | (seg->cache.valid << 8);
317 SMRAM_FIELD(saved_state, SMRAM_SS_SELECTOR_AR) = seg->selector.value | (seg_ar << 16);
318 // --- CS selector --- //
319 seg = &(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS]);
320 SMRAM_FIELD(saved_state, SMRAM_CS_BASE_HI32) = (Bit32u)(seg->cache.u.segment.base >> 32);
321 SMRAM_FIELD(saved_state, SMRAM_CS_BASE_LO32) = (Bit32u)(seg->cache.u.segment.base & 0xffffffff);
322 SMRAM_FIELD(saved_state, SMRAM_CS_LIMIT) = seg->cache.u.segment.limit_scaled;
323 seg_ar = get_segment_ar_data(&seg->cache) | (seg->cache.valid << 8);
324 SMRAM_FIELD(saved_state, SMRAM_CS_SELECTOR_AR) = seg->selector.value | (seg_ar << 16);
325 // --- ES selector --- //
326 seg = &(BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES]);
327 SMRAM_FIELD(saved_state, SMRAM_ES_BASE_HI32) = (Bit32u)(seg->cache.u.segment.base >> 32);
328 SMRAM_FIELD(saved_state, SMRAM_ES_BASE_LO32) = (Bit32u)(seg->cache.u.segment.base & 0xffffffff);
329 SMRAM_FIELD(saved_state, SMRAM_ES_LIMIT) = seg->cache.u.segment.limit_scaled;
330 seg_ar = get_segment_ar_data(&seg->cache) | (seg->cache.valid << 8);
331 SMRAM_FIELD(saved_state, SMRAM_ES_SELECTOR_AR) = seg->selector.value | (seg_ar << 16);
334 bx_bool BX_CPU_C::smram_restore_state(const Bit32u *saved_state)
336 Bit32u temp_cr0 = SMRAM_FIELD(saved_state, SMRAM_OFFSET_CR0);
337 Bit32u temp_eflags = SMRAM_FIELD(saved_state, SMRAM_OFFSET_RFLAGS32);
338 Bit32u temp_efer = SMRAM_FIELD(saved_state, SMRAM_OFFSET_EFER);
340 bx_bool pe = (temp_cr0 & 0x1);
341 bx_bool nw = (temp_cr0 >> 29) & 0x1;
342 bx_bool cd = (temp_cr0 >> 30) & 0x1;
343 bx_bool pg = (temp_cr0 >> 31) & 0x1;
345 // check CR0 conditions for entering to shutdown state
346 if (pg && !pe) {
347 BX_PANIC(("SMM restore: attempt to set CR0.PG with CR0.PE cleared !"));
348 return 0;
351 if (nw && !cd) {
352 BX_PANIC(("SMM restore: attempt to set CR0.NW with CR0.CD cleared !"));
353 return 0;
356 // shutdown if write to reserved CR4 bits
357 if (! SetCR4(SMRAM_FIELD(saved_state, SMRAM_OFFSET_CR4))) {
358 BX_PANIC(("SMM restore: incorrect CR4 state !"));
359 return 0;
362 if (temp_efer & ~BX_EFER_SUPPORTED_BITS) {
363 BX_PANIC(("SMM restore: Attemp to set EFER reserved bits: 0x%08x !", temp_efer));
364 return 0;
367 BX_CPU_THIS_PTR efer.set32(temp_efer & BX_EFER_SUPPORTED_BITS);
369 if (BX_CPU_THIS_PTR efer.get_LMA()) {
370 if (temp_eflags & EFlagsVMMask) {
371 BX_PANIC(("SMM restore: If EFER.LMA = 1 => RFLAGS.VM=0 !"));
372 return 0;
375 if (!BX_CPU_THIS_PTR cr4.get_PAE() || !pg || !pe || !BX_CPU_THIS_PTR efer.get_LME()) {
376 BX_PANIC(("SMM restore: If EFER.LMA = 1 <=> CR4.PAE, CR0.PG, CR0.PE, EFER.LME=1 !"));
377 return 0;
381 if (BX_CPU_THIS_PTR cr4.get_PAE() && pg && pe && BX_CPU_THIS_PTR efer.get_LME()) {
382 if (! BX_CPU_THIS_PTR efer.get_LMA()) {
383 BX_PANIC(("SMM restore: If EFER.LMA = 1 <=> CR4.PAE, CR0.PG, CR0.PE, EFER.LME=1 !"));
384 return 0;
388 // hack CR0 to be able to back to long mode correctly
389 BX_CPU_THIS_PTR cr0.set_PE(0); // real mode (bit 0)
390 BX_CPU_THIS_PTR cr0.set_PG(0); // paging disabled (bit 31)
391 SetCR0(temp_cr0);
392 setEFlags(temp_eflags);
394 bx_phy_address temp_cr3 = SMRAM_FIELD(saved_state, SMRAM_OFFSET_CR3);
395 SetCR3(temp_cr3);
397 RAX = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_RAX_HI32, SMRAM_OFFSET_RAX_LO32);
398 RBX = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_RBX_HI32, SMRAM_OFFSET_RBX_LO32);
399 RCX = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_RCX_HI32, SMRAM_OFFSET_RCX_LO32);
400 RDX = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_RDX_HI32, SMRAM_OFFSET_RDX_LO32);
401 RSP = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_RSP_HI32, SMRAM_OFFSET_RSP_LO32);
402 RBP = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_RBP_HI32, SMRAM_OFFSET_RBP_LO32);
403 RSI = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_RSI_HI32, SMRAM_OFFSET_RSI_LO32);
404 RDI = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_RDI_HI32, SMRAM_OFFSET_RDI_LO32);
405 R8 = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_R8_HI32, SMRAM_OFFSET_R8_LO32);
406 R9 = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_R9_HI32, SMRAM_OFFSET_R9_LO32);
407 R10 = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_R10_HI32, SMRAM_OFFSET_R10_LO32);
408 R11 = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_R11_HI32, SMRAM_OFFSET_R11_LO32);
409 R12 = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_R12_HI32, SMRAM_OFFSET_R12_LO32);
410 R13 = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_R13_HI32, SMRAM_OFFSET_R13_LO32);
411 R14 = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_R14_HI32, SMRAM_OFFSET_R14_LO32);
412 R15 = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_R15_HI32, SMRAM_OFFSET_R15_LO32);
413 RIP = SMRAM_FIELD64(saved_state, SMRAM_OFFSET_RIP_HI32, SMRAM_OFFSET_RIP_LO32);
415 BX_CPU_THIS_PTR dr6 = SMRAM_FIELD(saved_state, SMRAM_OFFSET_DR6);
416 BX_CPU_THIS_PTR dr7 = SMRAM_FIELD(saved_state, SMRAM_OFFSET_DR7);
418 BX_CPU_THIS_PTR gdtr.base = SMRAM_FIELD64(saved_state, SMRAM_GDTR_BASE_HI32, SMRAM_GDTR_BASE_LO32);
419 BX_CPU_THIS_PTR gdtr.limit = SMRAM_FIELD(saved_state, SMRAM_GDTR_LIMIT);
420 BX_CPU_THIS_PTR idtr.base = SMRAM_FIELD64(saved_state, SMRAM_IDTR_BASE_HI32, SMRAM_IDTR_BASE_LO32);
421 BX_CPU_THIS_PTR idtr.limit = SMRAM_FIELD(saved_state, SMRAM_IDTR_LIMIT);
423 Bit16u ar_data = SMRAM_FIELD(saved_state, SMRAM_CS_SELECTOR_AR) >> 16;
424 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS],
425 (ar_data >> 8) & 1,
426 SMRAM_FIELD(saved_state, SMRAM_CS_SELECTOR_AR) & 0xffff,
427 SMRAM_FIELD64(saved_state, SMRAM_CS_BASE_HI32, SMRAM_CS_BASE_LO32),
428 SMRAM_FIELD(saved_state, SMRAM_CS_LIMIT), ar_data))
430 if (! BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment) {
431 BX_PANIC(("SMM restore: restored valid non segment CS !"));
432 return 0;
436 handleCpuModeChange();
438 ar_data = SMRAM_FIELD(saved_state, SMRAM_DS_SELECTOR_AR) >> 16;
439 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS],
440 (ar_data >> 8) & 1,
441 SMRAM_FIELD(saved_state, SMRAM_DS_SELECTOR_AR) & 0xffff,
442 SMRAM_FIELD64(saved_state, SMRAM_DS_BASE_HI32, SMRAM_DS_BASE_LO32),
443 SMRAM_FIELD(saved_state, SMRAM_DS_LIMIT), ar_data))
445 if (! BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.segment) {
446 BX_PANIC(("SMM restore: restored valid non segment DS !"));
447 return 0;
451 ar_data = SMRAM_FIELD(saved_state, SMRAM_SS_SELECTOR_AR) >> 16;
452 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
453 (ar_data >> 8) & 1,
454 SMRAM_FIELD(saved_state, SMRAM_SS_SELECTOR_AR) & 0xffff,
455 SMRAM_FIELD64(saved_state, SMRAM_SS_BASE_HI32, SMRAM_SS_BASE_LO32),
456 SMRAM_FIELD(saved_state, SMRAM_SS_LIMIT), ar_data))
458 if (! BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment) {
459 BX_PANIC(("SMM restore: restored valid non segment SS !"));
460 return 0;
464 ar_data = SMRAM_FIELD(saved_state, SMRAM_ES_SELECTOR_AR) >> 16;
465 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES],
466 (ar_data >> 8) & 1,
467 SMRAM_FIELD(saved_state, SMRAM_ES_SELECTOR_AR) & 0xffff,
468 SMRAM_FIELD64(saved_state, SMRAM_ES_BASE_HI32, SMRAM_ES_BASE_LO32),
469 SMRAM_FIELD(saved_state, SMRAM_ES_LIMIT), ar_data))
471 if (! BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.segment) {
472 BX_PANIC(("SMM restore: restored valid non segment ES !"));
473 return 0;
477 ar_data = SMRAM_FIELD(saved_state, SMRAM_FS_SELECTOR_AR) >> 16;
478 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS],
479 (ar_data >> 8) & 1,
480 SMRAM_FIELD(saved_state, SMRAM_FS_SELECTOR_AR) & 0xffff,
481 SMRAM_FIELD64(saved_state, SMRAM_FS_BASE_HI32, SMRAM_FS_BASE_LO32),
482 SMRAM_FIELD(saved_state, SMRAM_FS_LIMIT), ar_data))
484 if (! BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].cache.segment) {
485 BX_PANIC(("SMM restore: restored valid non segment FS !"));
486 return 0;
490 ar_data = SMRAM_FIELD(saved_state, SMRAM_GS_SELECTOR_AR) >> 16;
491 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS],
492 (ar_data >> 8) & 1,
493 SMRAM_FIELD(saved_state, SMRAM_GS_SELECTOR_AR) & 0xffff,
494 SMRAM_FIELD64(saved_state, SMRAM_GS_BASE_HI32, SMRAM_GS_BASE_LO32),
495 SMRAM_FIELD(saved_state, SMRAM_GS_LIMIT), ar_data))
497 if (! BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].cache.segment) {
498 BX_PANIC(("SMM restore: restored valid non segment GS !"));
499 return 0;
503 ar_data = SMRAM_FIELD(saved_state, SMRAM_LDTR_SELECTOR_AR) >> 16;
504 if (set_segment_ar_data(&BX_CPU_THIS_PTR ldtr,
505 (ar_data >> 8) & 1,
506 SMRAM_FIELD(saved_state, SMRAM_LDTR_SELECTOR_AR) & 0xffff,
507 SMRAM_FIELD64(saved_state, SMRAM_LDTR_BASE_HI32, SMRAM_LDTR_BASE_LO32),
508 SMRAM_FIELD(saved_state, SMRAM_LDTR_LIMIT), ar_data))
510 if (BX_CPU_THIS_PTR ldtr.cache.type != BX_SYS_SEGMENT_LDT) {
511 BX_PANIC(("SMM restore: LDTR is not LDT descriptor type !"));
512 return 0;
516 ar_data = SMRAM_FIELD(saved_state, SMRAM_TR_SELECTOR_AR) >> 16;
517 if (set_segment_ar_data(&BX_CPU_THIS_PTR tr,
518 (ar_data >> 8) & 1,
519 SMRAM_FIELD(saved_state, SMRAM_TR_SELECTOR_AR) & 0xffff,
520 SMRAM_FIELD64(saved_state, SMRAM_TR_BASE_HI32, SMRAM_TR_BASE_LO32),
521 SMRAM_FIELD(saved_state, SMRAM_TR_LIMIT), ar_data))
523 if (BX_CPU_THIS_PTR tr.cache.type != BX_SYS_SEGMENT_AVAIL_286_TSS &&
524 BX_CPU_THIS_PTR tr.cache.type != BX_SYS_SEGMENT_BUSY_286_TSS &&
525 BX_CPU_THIS_PTR tr.cache.type != BX_SYS_SEGMENT_AVAIL_386_TSS &&
526 BX_CPU_THIS_PTR tr.cache.type != BX_SYS_SEGMENT_BUSY_386_TSS)
528 BX_PANIC(("SMM restore: TR is not TSS descriptor type !"));
529 return 0;
533 if (SMM_REVISION_ID & SMM_SMBASE_RELOCATION)
534 BX_CPU_THIS_PTR smbase = SMRAM_FIELD(saved_state, SMRAM_SMBASE_OFFSET);
536 return 1;
539 #else /* BX_SUPPORT_X86_64 == 0 */
541 void BX_CPU_C::smram_save_state(Bit32u *saved_state)
543 SMRAM_FIELD(saved_state, SMRAM_OFFSET_CR0) = BX_CPU_THIS_PTR cr0.get32();
544 SMRAM_FIELD(saved_state, SMRAM_OFFSET_CR3) = BX_CPU_THIS_PTR cr3;
545 SMRAM_FIELD(saved_state, SMRAM_OFFSET_EFLAGS) = read_eflags();
546 SMRAM_FIELD(saved_state, SMRAM_OFFSET_EIP) = EIP;
547 SMRAM_FIELD(saved_state, SMRAM_OFFSET_EDI) = EDI;
548 SMRAM_FIELD(saved_state, SMRAM_OFFSET_ESI) = ESI;
549 SMRAM_FIELD(saved_state, SMRAM_OFFSET_EBP) = EBP;
550 SMRAM_FIELD(saved_state, SMRAM_OFFSET_ESP) = ESP;
551 SMRAM_FIELD(saved_state, SMRAM_OFFSET_EBX) = EBX;
552 SMRAM_FIELD(saved_state, SMRAM_OFFSET_EDX) = EDX;
553 SMRAM_FIELD(saved_state, SMRAM_OFFSET_ECX) = ECX;
554 SMRAM_FIELD(saved_state, SMRAM_OFFSET_EAX) = EAX;
555 SMRAM_FIELD(saved_state, SMRAM_OFFSET_DR6) = BX_CPU_THIS_PTR dr6;
556 SMRAM_FIELD(saved_state, SMRAM_OFFSET_DR7) = BX_CPU_THIS_PTR dr7;
557 SMRAM_FIELD(saved_state, SMRAM_TR_SELECTOR) = BX_CPU_THIS_PTR tr.selector.value;
558 SMRAM_FIELD(saved_state, SMRAM_LDTR_SELECTOR) = BX_CPU_THIS_PTR ldtr.selector.value;
560 SMRAM_FIELD(saved_state, SMRAM_GS_SELECTOR) =
561 BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value;
562 SMRAM_FIELD(saved_state, SMRAM_FS_SELECTOR) =
563 BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value;
564 SMRAM_FIELD(saved_state, SMRAM_DS_SELECTOR) =
565 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value;
566 SMRAM_FIELD(saved_state, SMRAM_SS_SELECTOR) =
567 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
568 SMRAM_FIELD(saved_state, SMRAM_CS_SELECTOR) =
569 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
570 SMRAM_FIELD(saved_state, SMRAM_ES_SELECTOR) =
571 BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value;
573 // --- SS selector --- //
574 bx_segment_reg_t *seg = &(BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS]);
575 SMRAM_FIELD(saved_state, SMRAM_SS_BASE) = seg->cache.u.segment.base;
576 SMRAM_FIELD(saved_state, SMRAM_SS_LIMIT) = seg->cache.u.segment.limit_scaled;
577 Bit32u seg_ar = get_segment_ar_data(&seg->cache) | (seg->cache.valid << 8);
578 SMRAM_FIELD(saved_state, SMRAM_SS_SELECTOR_AR) = seg->selector.value | (seg_ar << 16);
579 // --- CS selector --- //
580 seg = &(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS]);
581 SMRAM_FIELD(saved_state, SMRAM_CS_BASE) = seg->cache.u.segment.base;
582 SMRAM_FIELD(saved_state, SMRAM_CS_LIMIT) = seg->cache.u.segment.limit_scaled;
583 seg_ar = get_segment_ar_data(&seg->cache) | (seg->cache.valid << 8);
584 SMRAM_FIELD(saved_state, SMRAM_CS_SELECTOR_AR) = seg->selector.value | (seg_ar << 16);
585 // --- ES selector --- //
586 seg = &(BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES]);
587 SMRAM_FIELD(saved_state, SMRAM_ES_BASE) = seg->cache.u.segment.base;
588 SMRAM_FIELD(saved_state, SMRAM_ES_LIMIT) = seg->cache.u.segment.limit_scaled;
589 seg_ar = get_segment_ar_data(&seg->cache) | (seg->cache.valid << 8);
590 SMRAM_FIELD(saved_state, SMRAM_ES_SELECTOR_AR) = seg->selector.value | (seg_ar << 16);
591 // --- LDTR --- //
592 SMRAM_FIELD(saved_state, SMRAM_LDTR_BASE) = BX_CPU_THIS_PTR ldtr.cache.u.system.base;
593 SMRAM_FIELD(saved_state, SMRAM_LDTR_LIMIT) = BX_CPU_THIS_PTR ldtr.cache.u.system.limit_scaled;
594 Bit32u ldtr_ar = get_segment_ar_data(&BX_CPU_THIS_PTR ldtr.cache) | (BX_CPU_THIS_PTR ldtr.cache.valid << 8);
595 SMRAM_FIELD(saved_state, SMRAM_LDTR_SELECTOR_AR) = BX_CPU_THIS_PTR ldtr.selector.value | (ldtr_ar << 16);
596 // --- GDTR --- //
597 SMRAM_FIELD(saved_state, SMRAM_GDTR_BASE) = BX_CPU_THIS_PTR gdtr.base;
598 SMRAM_FIELD(saved_state, SMRAM_GDTR_LIMIT) = BX_CPU_THIS_PTR gdtr.limit;
599 /* base+0x7f6c is reserved */
600 /* base+0x7f68 is reserved */
602 // --- Task Register --- //
603 SMRAM_FIELD(saved_state, SMRAM_TR_BASE) = BX_CPU_THIS_PTR tr.cache.u.system.base;
604 SMRAM_FIELD(saved_state, SMRAM_TR_LIMIT) = BX_CPU_THIS_PTR tr.cache.u.system.limit_scaled;
605 Bit32u tr_ar = get_segment_ar_data(&BX_CPU_THIS_PTR tr.cache) | (BX_CPU_THIS_PTR tr.cache.valid << 8);
606 SMRAM_FIELD(saved_state, SMRAM_TR_SELECTOR_AR) = BX_CPU_THIS_PTR tr.selector.value | (tr_ar << 16);
608 // --- IDTR --- //
609 SMRAM_FIELD(saved_state, SMRAM_IDTR_BASE) = BX_CPU_THIS_PTR idtr.base;
610 SMRAM_FIELD(saved_state, SMRAM_IDTR_LIMIT) = BX_CPU_THIS_PTR idtr.limit;
611 /* base+0x7f50 is reserved */
612 // --- GS selector --- //
613 seg = &(BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS]);
614 SMRAM_FIELD(saved_state, SMRAM_GS_BASE) = seg->cache.u.segment.base;
615 SMRAM_FIELD(saved_state, SMRAM_GS_LIMIT) = seg->cache.u.segment.limit_scaled;
616 seg_ar = get_segment_ar_data(&seg->cache) | (seg->cache.valid << 8);
617 SMRAM_FIELD(saved_state, SMRAM_GS_SELECTOR_AR) = seg->selector.value | (seg_ar << 16);
618 // --- FS selector --- //
619 seg = &(BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS]);
620 SMRAM_FIELD(saved_state, SMRAM_FS_BASE) = seg->cache.u.segment.base;
621 SMRAM_FIELD(saved_state, SMRAM_FS_LIMIT) = seg->cache.u.segment.limit_scaled;
622 seg_ar = get_segment_ar_data(&seg->cache) | (seg->cache.valid << 8);
623 SMRAM_FIELD(saved_state, SMRAM_FS_SELECTOR_AR) = seg->selector.value | (seg_ar << 16);
624 // --- DS selector --- //
625 seg = &(BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS]);
626 SMRAM_FIELD(saved_state, SMRAM_DS_BASE) = seg->cache.u.segment.base;
627 SMRAM_FIELD(saved_state, SMRAM_DS_LIMIT) = seg->cache.u.segment.limit_scaled;
628 seg_ar = get_segment_ar_data(&seg->cache) | (seg->cache.valid << 8);
629 SMRAM_FIELD(saved_state, SMRAM_DS_SELECTOR_AR) = seg->selector.value | (seg_ar << 16);
631 /* base+0x7f28 to base+7f18 is reserved */
632 #if BX_CPU_LEVEL >= 4
633 SMRAM_FIELD(saved_state, SMRAM_OFFSET_CR4) = BX_CPU_THIS_PTR cr4.get32();
634 #endif
636 /* base+0x7f02 is Auto HALT restart field (2 byte) */
637 /* base+0x7f00 is I/O restart field (2 byte) */
638 SMRAM_FIELD(saved_state, SMRAM_SMM_REVISION_ID) = SMM_REVISION_ID;
639 SMRAM_FIELD(saved_state, SMRAM_SMBASE_OFFSET) = BX_CPU_THIS_PTR smbase;
640 /* base+0x7ef4 to base+0x7e00 is reserved */
643 bx_bool BX_CPU_C::smram_restore_state(const Bit32u *saved_state)
645 Bit32u temp_cr0 = SMRAM_FIELD(saved_state, SMRAM_OFFSET_CR0);
646 Bit32u temp_eflags = SMRAM_FIELD(saved_state, SMRAM_OFFSET_EFLAGS);
647 Bit32u temp_cr3 = SMRAM_FIELD(saved_state, SMRAM_OFFSET_CR3);
649 bx_bool pe = (temp_cr0 & 0x01);
650 bx_bool nw = (temp_cr0 >> 29) & 0x01;
651 bx_bool cd = (temp_cr0 >> 30) & 0x01;
652 bx_bool pg = (temp_cr0 >> 31) & 0x01;
654 // check conditions for entering to shutdown state
655 if (pg && !pe) {
656 BX_PANIC(("SMM restore: attempt to set CR0.PG with CR0.PE cleared !"));
657 return 0;
660 if (nw && !cd) {
661 BX_PANIC(("SMM restore: attempt to set CR0.NW with CR0.CD cleared !"));
662 return 0;
665 SetCR0(temp_cr0);
666 SetCR3(temp_cr3);
667 setEFlags(temp_eflags);
669 #if BX_CPU_LEVEL >= 4
670 if (! SetCR4(SMRAM_FIELD(saved_state, SMRAM_OFFSET_CR4))) {
671 BX_PANIC(("SMM restore: incorrect CR4 state !"));
672 return 0;
674 #endif
676 EIP = SMRAM_FIELD(saved_state, SMRAM_OFFSET_EIP);
677 EDI = SMRAM_FIELD(saved_state, SMRAM_OFFSET_EDI);
678 ESI = SMRAM_FIELD(saved_state, SMRAM_OFFSET_ESI);
679 EBP = SMRAM_FIELD(saved_state, SMRAM_OFFSET_EBP);
680 ESP = SMRAM_FIELD(saved_state, SMRAM_OFFSET_ESP);
681 EBX = SMRAM_FIELD(saved_state, SMRAM_OFFSET_EBX);
682 EDX = SMRAM_FIELD(saved_state, SMRAM_OFFSET_EDX);
683 ECX = SMRAM_FIELD(saved_state, SMRAM_OFFSET_ECX);
684 EAX = SMRAM_FIELD(saved_state, SMRAM_OFFSET_EAX);
686 BX_CPU_THIS_PTR dr6 = SMRAM_FIELD(saved_state, SMRAM_OFFSET_DR6);
687 BX_CPU_THIS_PTR dr7 = SMRAM_FIELD(saved_state, SMRAM_OFFSET_DR7);
689 BX_CPU_THIS_PTR gdtr.base = SMRAM_FIELD(saved_state, SMRAM_GDTR_BASE);
690 BX_CPU_THIS_PTR gdtr.limit = SMRAM_FIELD(saved_state, SMRAM_GDTR_LIMIT);
691 BX_CPU_THIS_PTR idtr.base = SMRAM_FIELD(saved_state, SMRAM_IDTR_BASE);
692 BX_CPU_THIS_PTR idtr.limit = SMRAM_FIELD(saved_state, SMRAM_IDTR_LIMIT);
694 Bit16u ar_data = SMRAM_FIELD(saved_state, SMRAM_CS_SELECTOR_AR) >> 16;
695 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS],
696 (ar_data >> 8) & 1,
697 SMRAM_FIELD(saved_state, SMRAM_CS_SELECTOR_AR) & 0xffff,
698 SMRAM_FIELD(saved_state, SMRAM_CS_BASE),
699 SMRAM_FIELD(saved_state, SMRAM_CS_LIMIT), ar_data))
701 if (! BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment) {
702 BX_PANIC(("SMM restore: restored valid non segment CS !"));
703 return 0;
707 handleCpuModeChange();
709 ar_data = SMRAM_FIELD(saved_state, SMRAM_DS_SELECTOR_AR) >> 16;
710 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS],
711 (ar_data >> 8) & 1,
712 SMRAM_FIELD(saved_state, SMRAM_DS_SELECTOR_AR) & 0xffff,
713 SMRAM_FIELD(saved_state, SMRAM_DS_BASE),
714 SMRAM_FIELD(saved_state, SMRAM_DS_LIMIT), ar_data))
716 if (! BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.segment) {
717 BX_PANIC(("SMM restore: restored valid non segment DS !"));
718 return 0;
722 ar_data = SMRAM_FIELD(saved_state, SMRAM_SS_SELECTOR_AR) >> 16;
723 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
724 (ar_data >> 8) & 1,
725 SMRAM_FIELD(saved_state, SMRAM_SS_SELECTOR_AR) & 0xffff,
726 SMRAM_FIELD(saved_state, SMRAM_SS_BASE),
727 SMRAM_FIELD(saved_state, SMRAM_SS_LIMIT), ar_data))
729 if (! BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment) {
730 BX_PANIC(("SMM restore: restored valid non segment SS !"));
731 return 0;
735 ar_data = SMRAM_FIELD(saved_state, SMRAM_ES_SELECTOR_AR) >> 16;
736 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES],
737 (ar_data >> 8) & 1,
738 SMRAM_FIELD(saved_state, SMRAM_ES_SELECTOR_AR) & 0xffff,
739 SMRAM_FIELD(saved_state, SMRAM_ES_BASE),
740 SMRAM_FIELD(saved_state, SMRAM_ES_LIMIT), ar_data))
742 if (! BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.segment) {
743 BX_PANIC(("SMM restore: restored valid non segment ES !"));
744 return 0;
748 ar_data = SMRAM_FIELD(saved_state, SMRAM_FS_SELECTOR_AR) >> 16;
749 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS],
750 (ar_data >> 8) & 1,
751 SMRAM_FIELD(saved_state, SMRAM_FS_SELECTOR_AR) & 0xffff,
752 SMRAM_FIELD(saved_state, SMRAM_FS_BASE),
753 SMRAM_FIELD(saved_state, SMRAM_FS_LIMIT), ar_data))
755 if (! BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].cache.segment) {
756 BX_PANIC(("SMM restore: restored valid non segment FS !"));
757 return 0;
761 ar_data = SMRAM_FIELD(saved_state, SMRAM_GS_SELECTOR_AR) >> 16;
762 if (set_segment_ar_data(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS],
763 (ar_data >> 8) & 1,
764 SMRAM_FIELD(saved_state, SMRAM_GS_SELECTOR_AR) & 0xffff,
765 SMRAM_FIELD(saved_state, SMRAM_GS_BASE),
766 SMRAM_FIELD(saved_state, SMRAM_GS_LIMIT), ar_data))
768 if (! BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].cache.segment) {
769 BX_PANIC(("SMM restore: restored valid non segment GS !"));
770 return 0;
774 ar_data = SMRAM_FIELD(saved_state, SMRAM_LDTR_SELECTOR_AR) >> 16;
775 if (set_segment_ar_data(&BX_CPU_THIS_PTR ldtr,
776 (ar_data >> 8) & 1,
777 SMRAM_FIELD(saved_state, SMRAM_LDTR_SELECTOR_AR) & 0xffff,
778 SMRAM_FIELD(saved_state, SMRAM_LDTR_BASE),
779 SMRAM_FIELD(saved_state, SMRAM_LDTR_LIMIT), ar_data))
781 if (BX_CPU_THIS_PTR ldtr.cache.type != BX_SYS_SEGMENT_LDT) {
782 BX_PANIC(("SMM restore: LDTR is not LDT descriptor type !"));
783 return 0;
787 ar_data = SMRAM_FIELD(saved_state, SMRAM_TR_SELECTOR_AR) >> 16;
788 if (set_segment_ar_data(&BX_CPU_THIS_PTR tr,
789 (ar_data >> 8) & 1,
790 SMRAM_FIELD(saved_state, SMRAM_TR_SELECTOR_AR) & 0xffff,
791 SMRAM_FIELD(saved_state, SMRAM_TR_BASE),
792 SMRAM_FIELD(saved_state, SMRAM_TR_LIMIT), ar_data))
794 if (BX_CPU_THIS_PTR tr.cache.type != BX_SYS_SEGMENT_AVAIL_286_TSS &&
795 BX_CPU_THIS_PTR tr.cache.type != BX_SYS_SEGMENT_BUSY_286_TSS &&
796 BX_CPU_THIS_PTR tr.cache.type != BX_SYS_SEGMENT_AVAIL_386_TSS &&
797 BX_CPU_THIS_PTR tr.cache.type != BX_SYS_SEGMENT_BUSY_386_TSS)
799 BX_PANIC(("SMM restore: TR is not TSS descriptor type !"));
800 return 0;
804 if (SMM_REVISION_ID & SMM_SMBASE_RELOCATION) {
805 BX_CPU_THIS_PTR smbase = SMRAM_FIELD(saved_state, SMRAM_SMBASE_OFFSET);
806 #if BX_CPU_LEVEL < 6
807 if (BX_CPU_THIS_PTR smbase & 0x7fff) {
808 BX_PANIC(("SMM restore: SMBASE must be aligned to 32K !"));
809 return 0;
811 #endif
814 return 1;
817 #endif /* BX_SUPPORT_X86_64 */
819 #endif /* BX_CPU_LEVEL >= 3 */