- added instructions how to update the online documentation
[bochs-mirror.git] / cpu / init.cc
blob803e4bd44de46fa4d47c6e76f1f585d31f117cb1
1 /////////////////////////////////////////////////////////////////////////
2 // $Id: init.cc,v 1.186 2008/12/07 19:47:34 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (C) 2001 MandrakeSoft S.A.
6 //
7 // MandrakeSoft S.A.
8 // 43, rue d'Aboukir
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 /////////////////////////////////////////////////////////////////////////
29 #define NEED_CPU_REG_SHORTCUTS 1
30 #include "bochs.h"
31 #include "cpu.h"
32 #define LOG_THIS BX_CPU_THIS_PTR
34 #if BX_SUPPORT_X86_64==0
35 // Make life easier merging cpu64 & cpu code.
36 #define RIP EIP
37 #endif
39 BX_CPU_C::BX_CPU_C(unsigned id): bx_cpuid(id)
40 #if BX_SUPPORT_APIC
41 ,local_apic (this)
42 #endif
44 // in case of SMF, you cannot reference any member data
45 // in the constructor because the only access to it is via
46 // global variables which aren't initialized quite yet.
47 put("CPU");
48 settype (CPU0LOG);
51 #if BX_WITH_WX
53 #define IF_SEG_REG_GET(x) \
54 if (!strcmp(param->get_name(), #x)) { \
55 return BX_CPU(cpu)->sregs[BX_SEG_REG_##x].selector.value; \
57 #define IF_SEG_REG_SET(reg, val) \
58 if (!strcmp(param->get_name(), #reg)) { \
59 BX_CPU(cpu)->load_seg_reg(&BX_CPU(cpu)->sregs[BX_SEG_REG_##reg],val); \
61 #define IF_LAZY_EFLAG_GET(flag) \
62 if (!strcmp(param->get_name(), #flag)) { \
63 return BX_CPU(cpu)->get_##flag(); \
65 #define IF_LAZY_EFLAG_SET(flag, val) \
66 if (!strcmp(param->get_name(), #flag)) { \
67 BX_CPU(cpu)->set_##flag(val); \
69 #define IF_EFLAG_GET(flag) \
70 if (!strcmp(param->get_name(), #flag)) { \
71 return BX_CPU(cpu)->get_##flag(); \
73 #define IF_EFLAG_SET(flag, val) \
74 if (!strcmp(param->get_name(), #flag)) { \
75 BX_CPU(cpu)->set_##flag(val); \
79 // implement get/set handler for parameters that need unusual set/get
80 static Bit64s cpu_param_handler(bx_param_c *param, int set, Bit64s val)
82 #if BX_SUPPORT_SMP
83 int cpu = atoi(param->get_parent()->get_name());
84 #endif
85 if (set) {
86 if (!strcmp(param->get_name(), "LDTR")) {
87 BX_CPU(cpu)->panic("setting LDTR not implemented");
89 if (!strcmp(param->get_name(), "TR")) {
90 BX_CPU(cpu)->panic("setting LDTR not implemented");
92 IF_SEG_REG_SET(CS, val);
93 IF_SEG_REG_SET(DS, val);
94 IF_SEG_REG_SET(SS, val);
95 IF_SEG_REG_SET(ES, val);
96 IF_SEG_REG_SET(FS, val);
97 IF_SEG_REG_SET(GS, val);
98 IF_LAZY_EFLAG_SET(OF, val);
99 IF_LAZY_EFLAG_SET(SF, val);
100 IF_LAZY_EFLAG_SET(ZF, val);
101 IF_LAZY_EFLAG_SET(AF, val);
102 IF_LAZY_EFLAG_SET(PF, val);
103 IF_LAZY_EFLAG_SET(CF, val);
104 IF_EFLAG_SET(ID, val);
105 IF_EFLAG_SET(VIP, val);
106 IF_EFLAG_SET(VIF, val);
107 IF_EFLAG_SET(AC, val);
108 IF_EFLAG_SET(VM, val);
109 IF_EFLAG_SET(RF, val);
110 IF_EFLAG_SET(NT, val);
111 IF_EFLAG_SET(IOPL, val);
112 IF_EFLAG_SET(DF, val);
113 IF_EFLAG_SET(IF, val);
114 IF_EFLAG_SET(TF, val);
115 } else {
116 if (!strcmp(param->get_name(), "LDTR")) {
117 return BX_CPU(cpu)->ldtr.selector.value;
119 if (!strcmp(param->get_name(), "TR")) {
120 return BX_CPU(cpu)->tr.selector.value;
122 IF_SEG_REG_GET (CS);
123 IF_SEG_REG_GET (DS);
124 IF_SEG_REG_GET (SS);
125 IF_SEG_REG_GET (ES);
126 IF_SEG_REG_GET (FS);
127 IF_SEG_REG_GET (GS);
128 IF_LAZY_EFLAG_GET(OF);
129 IF_LAZY_EFLAG_GET(SF);
130 IF_LAZY_EFLAG_GET(ZF);
131 IF_LAZY_EFLAG_GET(AF);
132 IF_LAZY_EFLAG_GET(PF);
133 IF_LAZY_EFLAG_GET(CF);
134 IF_EFLAG_GET(ID);
135 IF_EFLAG_GET(VIP);
136 IF_EFLAG_GET(VIF);
137 IF_EFLAG_GET(AC);
138 IF_EFLAG_GET(VM);
139 IF_EFLAG_GET(RF);
140 IF_EFLAG_GET(NT);
141 IF_EFLAG_GET(IOPL);
142 IF_EFLAG_GET(DF);
143 IF_EFLAG_GET(IF);
144 IF_EFLAG_GET(TF);
146 return val;
148 #undef IF_SEG_REG_GET
149 #undef IF_SEG_REG_SET
151 #endif
153 void BX_CPU_C::initialize(void)
155 // BX_CPU_C constructor
156 BX_CPU_THIS_PTR set_INTR (0);
158 #if BX_SUPPORT_APIC
159 BX_CPU_THIS_PTR local_apic.set_id(BX_CPU_ID);
160 BX_CPU_THIS_PTR local_apic.init();
161 #endif
163 // in SMP mode, the prefix of the CPU will be changed to [CPUn] in
164 // bx_local_apic_c::set_id as soon as the apic ID is assigned.
165 sprintf(name, "CPU %d", BX_CPU_ID);
167 #if BX_WITH_WX
168 register_wx_state();
169 #endif
172 #if BX_WITH_WX
173 void BX_CPU_C::register_wx_state(void)
175 if (SIM->get_param(BXPN_WX_CPU_STATE) != NULL) {
176 // Register some of the CPUs variables as shadow parameters so that
177 // they can be visible in the config interface.
178 // (Experimental, obviously not a complete list)
179 bx_param_num_c *param;
180 char cpu_name[10], cpu_title[10], cpu_pname[16];
181 const char *fmt16 = "%04X";
182 const char *fmt32 = "%08X";
183 Bit32u oldbase = bx_param_num_c::set_default_base(16);
184 const char *oldfmt = bx_param_num_c::set_default_format(fmt32);
185 sprintf(cpu_name, "%d", BX_CPU_ID);
186 sprintf(cpu_title, "CPU %d", BX_CPU_ID);
187 sprintf(cpu_pname, "%s.%d", BXPN_WX_CPU_STATE, BX_CPU_ID);
188 if (SIM->get_param(cpu_pname) == NULL) {
189 bx_list_c *list = new bx_list_c(SIM->get_param(BXPN_WX_CPU_STATE),
190 cpu_name, cpu_title, 60);
192 #define DEFPARAM_NORMAL(name,field) \
193 new bx_shadow_num_c(list, #name, &(field))
195 DEFPARAM_NORMAL(EAX, EAX);
196 DEFPARAM_NORMAL(EBX, EBX);
197 DEFPARAM_NORMAL(ECX, ECX);
198 DEFPARAM_NORMAL(EDX, EDX);
199 DEFPARAM_NORMAL(ESP, ESP);
200 DEFPARAM_NORMAL(EBP, EBP);
201 DEFPARAM_NORMAL(ESI, ESI);
202 DEFPARAM_NORMAL(EDI, EDI);
203 DEFPARAM_NORMAL(EIP, EIP);
204 DEFPARAM_NORMAL(DR0, dr[0]);
205 DEFPARAM_NORMAL(DR1, dr[1]);
206 DEFPARAM_NORMAL(DR2, dr[2]);
207 DEFPARAM_NORMAL(DR3, dr[3]);
208 DEFPARAM_NORMAL(DR6, dr6);
209 DEFPARAM_NORMAL(DR7, dr7);
210 DEFPARAM_NORMAL(CR0, cr0.val32);
211 DEFPARAM_NORMAL(CR1, cr1);
212 DEFPARAM_NORMAL(CR2, cr2);
213 DEFPARAM_NORMAL(CR3, cr3);
214 #if BX_CPU_LEVEL >= 4
215 DEFPARAM_NORMAL(CR4, cr4.val32);
216 #endif
218 // segment registers require a handler function because they have
219 // special get/set requirements.
220 #define DEFPARAM_SEG_REG(x) \
221 param = new bx_param_num_c(list, \
222 #x, #x, "", 0, 0xffff, 0); \
223 param->set_handler(cpu_param_handler); \
224 param->set_format(fmt16);
225 #define DEFPARAM_GLOBAL_SEG_REG(name,field) \
226 param = new bx_shadow_num_c(list, \
227 #name"_base", &(field.base)); \
228 param = new bx_shadow_num_c(list, \
229 #name"_limit", &(field.limit));
231 DEFPARAM_SEG_REG(CS);
232 DEFPARAM_SEG_REG(DS);
233 DEFPARAM_SEG_REG(SS);
234 DEFPARAM_SEG_REG(ES);
235 DEFPARAM_SEG_REG(FS);
236 DEFPARAM_SEG_REG(GS);
237 DEFPARAM_SEG_REG(LDTR);
238 DEFPARAM_SEG_REG(TR);
239 DEFPARAM_GLOBAL_SEG_REG(GDTR, BX_CPU_THIS_PTR gdtr);
240 DEFPARAM_GLOBAL_SEG_REG(IDTR, BX_CPU_THIS_PTR idtr);
241 #undef DEFPARAM_NORMAL
242 #undef DEFPARAM_SEG_REG
243 #undef DEFPARAM_GLOBAL_SEG_REG
245 param = new bx_shadow_num_c(list, "EFLAGS",
246 &BX_CPU_THIS_PTR eflags);
248 // flags implemented in lazy_flags.cc must be done with a handler
249 // that calls their get function, to force them to be computed.
250 #define DEFPARAM_EFLAG(name) \
251 param = new bx_param_bool_c(list, \
252 #name, #name, "", get_##name()); \
253 param->set_handler(cpu_param_handler);
254 #define DEFPARAM_LAZY_EFLAG(name) \
255 param = new bx_param_bool_c(list, \
256 #name, #name, "", get_##name()); \
257 param->set_handler(cpu_param_handler);
259 #if BX_CPU_LEVEL >= 4
260 DEFPARAM_EFLAG(ID);
261 DEFPARAM_EFLAG(VIP);
262 DEFPARAM_EFLAG(VIF);
263 DEFPARAM_EFLAG(AC);
264 #endif
265 #if BX_CPU_LEVEL >= 3
266 DEFPARAM_EFLAG(VM);
267 DEFPARAM_EFLAG(RF);
268 #endif
269 #if BX_CPU_LEVEL >= 2
270 DEFPARAM_EFLAG(NT);
271 // IOPL is a special case because it is 2 bits wide.
272 param = new bx_shadow_num_c(
273 list,
274 "IOPL",
275 &BX_CPU_THIS_PTR eflags, 10,
276 12, 13);
277 param->set_range(0, 3);
278 param->set_format("%d");
279 #endif
280 DEFPARAM_LAZY_EFLAG(OF);
281 DEFPARAM_EFLAG(DF);
282 DEFPARAM_EFLAG(IF);
283 DEFPARAM_EFLAG(TF);
284 DEFPARAM_LAZY_EFLAG(SF);
285 DEFPARAM_LAZY_EFLAG(ZF);
286 DEFPARAM_LAZY_EFLAG(AF);
287 DEFPARAM_LAZY_EFLAG(PF);
288 DEFPARAM_LAZY_EFLAG(CF);
290 // restore defaults
291 bx_param_num_c::set_default_base(oldbase);
292 bx_param_num_c::set_default_format(oldfmt);
296 #endif
298 // save/restore functionality
299 void BX_CPU_C::register_state(void)
301 unsigned n;
302 char name[10];
304 sprintf(name, "cpu%d", BX_CPU_ID);
306 bx_list_c *cpu = new bx_list_c(SIM->get_bochs_root(), name, name, 50 + BX_GENERAL_REGISTERS);
308 BXRS_PARAM_SPECIAL32(cpu, cpu_version, param_save_handler, param_restore_handler);
309 BXRS_PARAM_SPECIAL32(cpu, cpuid_std, param_save_handler, param_restore_handler);
310 BXRS_PARAM_SPECIAL32(cpu, cpuid_ext, param_save_handler, param_restore_handler);
311 BXRS_DEC_PARAM_SIMPLE(cpu, cpu_mode);
312 BXRS_HEX_PARAM_SIMPLE(cpu, inhibit_mask);
313 BXRS_HEX_PARAM_SIMPLE(cpu, debug_trap);
314 #if BX_SUPPORT_X86_64
315 BXRS_HEX_PARAM_SIMPLE(cpu, RAX);
316 BXRS_HEX_PARAM_SIMPLE(cpu, RBX);
317 BXRS_HEX_PARAM_SIMPLE(cpu, RCX);
318 BXRS_HEX_PARAM_SIMPLE(cpu, RDX);
319 BXRS_HEX_PARAM_SIMPLE(cpu, RSP);
320 BXRS_HEX_PARAM_SIMPLE(cpu, RBP);
321 BXRS_HEX_PARAM_SIMPLE(cpu, RSI);
322 BXRS_HEX_PARAM_SIMPLE(cpu, RDI);
323 BXRS_HEX_PARAM_SIMPLE(cpu, R8);
324 BXRS_HEX_PARAM_SIMPLE(cpu, R9);
325 BXRS_HEX_PARAM_SIMPLE(cpu, R10);
326 BXRS_HEX_PARAM_SIMPLE(cpu, R11);
327 BXRS_HEX_PARAM_SIMPLE(cpu, R12);
328 BXRS_HEX_PARAM_SIMPLE(cpu, R13);
329 BXRS_HEX_PARAM_SIMPLE(cpu, R14);
330 BXRS_HEX_PARAM_SIMPLE(cpu, R15);
331 BXRS_HEX_PARAM_SIMPLE(cpu, RIP);
332 #else
333 BXRS_HEX_PARAM_SIMPLE(cpu, EAX);
334 BXRS_HEX_PARAM_SIMPLE(cpu, EBX);
335 BXRS_HEX_PARAM_SIMPLE(cpu, ECX);
336 BXRS_HEX_PARAM_SIMPLE(cpu, EDX);
337 BXRS_HEX_PARAM_SIMPLE(cpu, ESP);
338 BXRS_HEX_PARAM_SIMPLE(cpu, EBP);
339 BXRS_HEX_PARAM_SIMPLE(cpu, ESI);
340 BXRS_HEX_PARAM_SIMPLE(cpu, EDI);
341 BXRS_HEX_PARAM_SIMPLE(cpu, EIP);
342 #endif
343 BXRS_PARAM_SPECIAL32(cpu, EFLAGS,
344 param_save_handler, param_restore_handler);
345 #if BX_CPU_LEVEL >= 3
346 BXRS_HEX_PARAM_FIELD(cpu, DR0, dr[0]);
347 BXRS_HEX_PARAM_FIELD(cpu, DR1, dr[1]);
348 BXRS_HEX_PARAM_FIELD(cpu, DR2, dr[2]);
349 BXRS_HEX_PARAM_FIELD(cpu, DR3, dr[3]);
350 BXRS_HEX_PARAM_FIELD(cpu, DR6, dr6);
351 BXRS_HEX_PARAM_FIELD(cpu, DR7, dr7);
352 #endif
353 BXRS_HEX_PARAM_FIELD(cpu, CR0, cr0.val32);
354 BXRS_HEX_PARAM_FIELD(cpu, CR2, cr2);
355 BXRS_HEX_PARAM_FIELD(cpu, CR3, cr3);
356 #if BX_CPU_LEVEL >= 4
357 BXRS_HEX_PARAM_FIELD(cpu, CR4, cr4.val32);
358 #endif
359 #if BX_SUPPORT_XSAVE
360 BXRS_HEX_PARAM_FIELD(cpu, XCR0, xcr0.val32);
361 #endif
363 for(n=0; n<6; n++) {
364 bx_segment_reg_t *segment = &BX_CPU_THIS_PTR sregs[n];
365 bx_list_c *sreg = new bx_list_c(cpu, strseg(segment), 9);
366 BXRS_PARAM_SPECIAL16(sreg, selector,
367 param_save_handler, param_restore_handler);
368 BXRS_HEX_PARAM_FIELD(sreg, base, segment->cache.u.segment.base);
369 BXRS_HEX_PARAM_FIELD(sreg, limit, segment->cache.u.segment.limit);
370 BXRS_HEX_PARAM_FIELD(sreg, limit_scaled, segment->cache.u.segment.limit_scaled);
371 BXRS_PARAM_SPECIAL8 (sreg, ar_byte,
372 param_save_handler, param_restore_handler);
373 BXRS_PARAM_BOOL(sreg, granularity, segment->cache.u.segment.g);
374 BXRS_PARAM_BOOL(sreg, d_b, segment->cache.u.segment.d_b);
375 #if BX_SUPPORT_X86_64
376 BXRS_PARAM_BOOL(sreg, l, segment->cache.u.segment.l);
377 #endif
378 BXRS_PARAM_BOOL(sreg, avl, segment->cache.u.segment.avl);
381 bx_list_c *GDTR = new bx_list_c(cpu, "GDTR", 2);
382 BXRS_HEX_PARAM_FIELD(GDTR, base, gdtr.base);
383 BXRS_HEX_PARAM_FIELD(GDTR, limit, gdtr.limit);
385 bx_list_c *IDTR = new bx_list_c(cpu, "IDTR", 2);
386 BXRS_HEX_PARAM_FIELD(IDTR, base, idtr.base);
387 BXRS_HEX_PARAM_FIELD(IDTR, limit, idtr.limit);
389 bx_list_c *LDTR = new bx_list_c(cpu, "LDTR", 7);
390 BXRS_PARAM_SPECIAL16(LDTR, selector, param_save_handler, param_restore_handler);
391 BXRS_HEX_PARAM_FIELD(LDTR, base, ldtr.cache.u.system.base);
392 BXRS_HEX_PARAM_FIELD(LDTR, limit, ldtr.cache.u.system.limit);
393 BXRS_HEX_PARAM_FIELD(LDTR, limit_scaled, ldtr.cache.u.system.limit);
394 BXRS_PARAM_SPECIAL8 (LDTR, ar_byte, param_save_handler, param_restore_handler);
395 BXRS_PARAM_BOOL(LDTR, granularity, ldtr.cache.u.system.g);
396 BXRS_PARAM_BOOL(LDTR, avl, ldtr.cache.u.system.avl);
398 bx_list_c *TR = new bx_list_c(cpu, "TR", 7);
399 BXRS_PARAM_SPECIAL16(TR, selector, param_save_handler, param_restore_handler);
400 BXRS_HEX_PARAM_FIELD(TR, base, tr.cache.u.system.base);
401 BXRS_HEX_PARAM_FIELD(TR, limit, tr.cache.u.system.limit);
402 BXRS_HEX_PARAM_FIELD(TR, limit_scaled, tr.cache.u.system.limit_scaled);
403 BXRS_PARAM_SPECIAL8 (TR, ar_byte, param_save_handler, param_restore_handler);
404 BXRS_PARAM_BOOL(TR, granularity, tr.cache.u.system.g);
405 BXRS_PARAM_BOOL(TR, avl, tr.cache.u.system.avl);
407 BXRS_HEX_PARAM_SIMPLE(cpu, smbase);
409 #if BX_CPU_LEVEL >= 5
410 bx_list_c *MSR = new bx_list_c(cpu, "MSR", 45);
412 #if BX_SUPPORT_APIC
413 BXRS_HEX_PARAM_FIELD(MSR, apicbase, msr.apicbase);
414 #endif
415 #if BX_SUPPORT_X86_64
416 BXRS_HEX_PARAM_FIELD(MSR, EFER, efer.val32);
417 BXRS_HEX_PARAM_FIELD(MSR, star, msr.star);
418 BXRS_HEX_PARAM_FIELD(MSR, lstar, msr.lstar);
419 BXRS_HEX_PARAM_FIELD(MSR, cstar, msr.cstar);
420 BXRS_HEX_PARAM_FIELD(MSR, fmask, msr.fmask);
421 BXRS_HEX_PARAM_FIELD(MSR, kernelgsbase, msr.kernelgsbase);
422 BXRS_HEX_PARAM_FIELD(MSR, tsc_aux, msr.tsc_aux);
423 #endif
424 BXRS_HEX_PARAM_FIELD(MSR, tsc_last_reset, msr.tsc_last_reset);
425 #if BX_SUPPORT_SEP
426 BXRS_HEX_PARAM_FIELD(MSR, sysenter_cs_msr, msr.sysenter_cs_msr);
427 BXRS_HEX_PARAM_FIELD(MSR, sysenter_esp_msr, msr.sysenter_esp_msr);
428 BXRS_HEX_PARAM_FIELD(MSR, sysenter_eip_msr, msr.sysenter_eip_msr);
429 #endif
430 #if BX_SUPPORT_MTRR
431 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase0, msr.mtrrphys[0]);
432 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask0, msr.mtrrphys[1]);
433 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase1, msr.mtrrphys[2]);
434 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask1, msr.mtrrphys[3]);
435 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase2, msr.mtrrphys[4]);
436 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask2, msr.mtrrphys[5]);
437 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase3, msr.mtrrphys[6]);
438 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask3, msr.mtrrphys[7]);
439 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase4, msr.mtrrphys[8]);
440 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask4, msr.mtrrphys[9]);
441 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase5, msr.mtrrphys[10]);
442 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask5, msr.mtrrphys[11]);
443 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase6, msr.mtrrphys[12]);
444 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask6, msr.mtrrphys[13]);
445 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase7, msr.mtrrphys[14]);
446 BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask7, msr.mtrrphys[15]);
448 BXRS_HEX_PARAM_FIELD(MSR, mtrrfix64k_00000, msr.mtrrfix64k_00000);
449 BXRS_HEX_PARAM_FIELD(MSR, mtrrfix16k_80000, msr.mtrrfix16k_80000);
450 BXRS_HEX_PARAM_FIELD(MSR, mtrrfix16k_a0000, msr.mtrrfix16k_a0000);
452 BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_c0000, msr.mtrrfix4k[0]);
453 BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_c8000, msr.mtrrfix4k[1]);
454 BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_d0000, msr.mtrrfix4k[2]);
455 BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_d8000, msr.mtrrfix4k[3]);
456 BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_e0000, msr.mtrrfix4k[4]);
457 BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_e8000, msr.mtrrfix4k[5]);
458 BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_f0000, msr.mtrrfix4k[6]);
459 BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_f8000, msr.mtrrfix4k[7]);
461 BXRS_HEX_PARAM_FIELD(MSR, pat, msr.pat);
462 BXRS_HEX_PARAM_FIELD(MSR, mtrr_deftype, msr.mtrr_deftype);
463 #endif
464 #endif
466 #if BX_SUPPORT_FPU || BX_SUPPORT_MMX
467 bx_list_c *fpu = new bx_list_c(cpu, "FPU", 17);
468 BXRS_HEX_PARAM_FIELD(fpu, cwd, the_i387.cwd);
469 BXRS_HEX_PARAM_FIELD(fpu, swd, the_i387.swd);
470 BXRS_HEX_PARAM_FIELD(fpu, twd, the_i387.twd);
471 BXRS_HEX_PARAM_FIELD(fpu, foo, the_i387.foo);
472 BXRS_HEX_PARAM_FIELD(fpu, fcs, the_i387.fcs);
473 BXRS_HEX_PARAM_FIELD(fpu, fip, the_i387.fip);
474 BXRS_HEX_PARAM_FIELD(fpu, fds, the_i387.fds);
475 BXRS_HEX_PARAM_FIELD(fpu, fdp, the_i387.fdp);
476 for (n=0; n<8; n++) {
477 sprintf(name, "st%d", n);
478 bx_list_c *STx = new bx_list_c(fpu, name, 8);
479 BXRS_HEX_PARAM_FIELD(STx, exp, the_i387.st_space[n].exp);
480 BXRS_HEX_PARAM_FIELD(STx, fraction, the_i387.st_space[n].fraction);
482 BXRS_DEC_PARAM_FIELD(fpu, tos, the_i387.tos);
483 #endif
485 #if BX_SUPPORT_SSE
486 bx_list_c *sse = new bx_list_c(cpu, "SSE", 2*BX_XMM_REGISTERS+1);
487 BXRS_HEX_PARAM_FIELD(sse, mxcsr, mxcsr.mxcsr);
488 for (n=0; n<BX_XMM_REGISTERS; n++) {
489 sprintf(name, "xmm%02d_hi", n);
490 new bx_shadow_num_c(sse, name, &xmm[n].xmm64u(1), BASE_HEX);
491 sprintf(name, "xmm%02d_lo", n);
492 new bx_shadow_num_c(sse, name, &xmm[n].xmm64u(0), BASE_HEX);
494 #endif
496 #if BX_SUPPORT_MONITOR_MWAIT
497 bx_list_c *monitor_list = new bx_list_c(cpu, "MONITOR", 2);
498 BXRS_HEX_PARAM_FIELD(monitor_list, begin_addr, monitor.monitor_begin);
499 BXRS_HEX_PARAM_FIELD(monitor_list, end_addr, monitor.monitor_end);
500 #endif
502 #if BX_SUPPORT_APIC
503 local_apic.register_state(cpu);
504 #endif
506 BXRS_HEX_PARAM_SIMPLE32(cpu, async_event);
508 BXRS_PARAM_BOOL(cpu, EXT, EXT);
509 BXRS_PARAM_BOOL(cpu, INTR, INTR);
510 BXRS_PARAM_BOOL(cpu, in_smm, in_smm);
511 BXRS_PARAM_BOOL(cpu, disable_SMI, disable_SMI);
512 BXRS_PARAM_BOOL(cpu, pending_SMI, pending_SMI);
513 BXRS_PARAM_BOOL(cpu, disable_NMI, disable_NMI);
514 BXRS_PARAM_BOOL(cpu, pending_NMI, pending_NMI);
515 BXRS_PARAM_BOOL(cpu, disable_INIT, disable_INIT);
516 BXRS_PARAM_BOOL(cpu, pending_INIT, pending_INIT);
517 BXRS_PARAM_BOOL(cpu, trace, trace);
520 Bit64s BX_CPU_C::param_save_handler(void *devptr, bx_param_c *param, Bit64s val)
522 #if !BX_USE_CPU_SMF
523 BX_CPU_C *class_ptr = (BX_CPU_C *) devptr;
524 return class_ptr->param_save(param, val);
527 Bit64s BX_CPU_C::param_save(bx_param_c *param, Bit64s val)
529 #else
530 UNUSED(devptr);
531 #endif // !BX_USE_CPU_SMF
532 const char *pname, *segname;
533 bx_segment_reg_t *segment = NULL;
535 pname = param->get_name();
536 if (!strcmp(pname, "cpu_version")) {
537 val = get_cpu_version_information();
538 } else if (!strcmp(pname, "cpuid_std")) {
539 val = get_std_cpuid_features();
540 } else if (!strcmp(pname, "cpuid_ext")) {
541 val = get_extended_cpuid_features();
542 } else if (!strcmp(pname, "EFLAGS")) {
543 val = BX_CPU_THIS_PTR read_eflags();
544 } else if (!strcmp(pname, "ar_byte") || !strcmp(pname, "selector")) {
545 segname = param->get_parent()->get_name();
546 if (!strcmp(segname, "CS")) {
547 segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS];
548 } else if (!strcmp(segname, "DS")) {
549 segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
550 } else if (!strcmp(segname, "SS")) {
551 segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS];
552 } else if (!strcmp(segname, "ES")) {
553 segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES];
554 } else if (!strcmp(segname, "FS")) {
555 segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS];
556 } else if (!strcmp(segname, "GS")) {
557 segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS];
558 } else if (!strcmp(segname, "LDTR")) {
559 segment = &BX_CPU_THIS_PTR ldtr;
560 } else if (!strcmp(segname, "TR")) {
561 segment = &BX_CPU_THIS_PTR tr;
563 if (segment != NULL) {
564 if (!strcmp(pname, "ar_byte")) {
565 val = ar_byte(&(segment->cache));
567 else if (!strcmp(pname, "selector")) {
568 val = segment->selector.value;
572 else {
573 BX_PANIC(("Unknown param %s in param_save handler !", pname));
575 return val;
578 Bit64s BX_CPU_C::param_restore_handler(void *devptr, bx_param_c *param, Bit64s val)
580 #if !BX_USE_CPU_SMF
581 BX_CPU_C *class_ptr = (BX_CPU_C *) devptr;
582 return class_ptr->param_restore(param, val);
585 Bit64s BX_CPU_C::param_restore(bx_param_c *param, Bit64s val)
587 #else
588 UNUSED(devptr);
589 #endif // !BX_USE_CPU_SMF
590 const char *pname, *segname;
591 bx_segment_reg_t *segment = NULL;
593 pname = param->get_name();
594 if (!strcmp(pname, "cpu_version")) {
595 if (val != get_cpu_version_information()) {
596 BX_PANIC(("save/restore: CPU version mismatch"));
598 } else if (!strcmp(pname, "cpuid_std")) {
599 if (val != get_std_cpuid_features()) {
600 BX_PANIC(("save/restore: CPUID mismatch"));
602 } else if (!strcmp(pname, "cpuid_ext")) {
603 if (val != get_extended_cpuid_features()) {
604 BX_PANIC(("save/restore: CPUID mismatch"));
606 } else if (!strcmp(pname, "EFLAGS")) {
607 BX_CPU_THIS_PTR setEFlags((Bit32u)val);
608 } else if (!strcmp(pname, "ar_byte") || !strcmp(pname, "selector")) {
609 segname = param->get_parent()->get_name();
610 if (!strcmp(segname, "CS")) {
611 segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS];
612 } else if (!strcmp(segname, "DS")) {
613 segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
614 } else if (!strcmp(segname, "SS")) {
615 segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS];
616 } else if (!strcmp(segname, "ES")) {
617 segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES];
618 } else if (!strcmp(segname, "FS")) {
619 segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS];
620 } else if (!strcmp(segname, "GS")) {
621 segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS];
622 } else if (!strcmp(segname, "LDTR")) {
623 segment = &BX_CPU_THIS_PTR ldtr;
624 } else if (!strcmp(segname, "TR")) {
625 segment = &BX_CPU_THIS_PTR tr;
627 if (segment != NULL) {
628 bx_descriptor_t *d = &(segment->cache);
629 bx_selector_t *selector = &(segment->selector);
630 if (!strcmp(pname, "ar_byte")) {
631 set_ar_byte(d, (Bit8u)val);
633 else if (!strcmp(pname, "selector")) {
634 parse_selector((Bit16u)val, selector);
635 // validate the selector
636 if ((selector->value & 0xfffc) != 0) d->valid = 1;
637 else d->valid = 0;
641 else {
642 BX_PANIC(("Unknown param %s in param_restore handler !", pname));
644 return val;
647 void BX_CPU_C::after_restore_state(void)
649 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_IA32_V8086) CPL = 3;
651 SetCR0(cr0.val32);
652 SetCR3(cr3);
653 TLB_flush();
654 assert_checks();
655 invalidate_prefetch_q();
656 debug(RIP);
658 // end of save/restore functionality
660 BX_CPU_C::~BX_CPU_C()
662 BX_INSTR_EXIT(BX_CPU_ID);
663 BX_DEBUG(("Exit."));
666 void BX_CPU_C::reset(unsigned source)
668 unsigned n;
670 if (source == BX_RESET_HARDWARE)
671 BX_INFO(("cpu hardware reset"));
672 else if (source == BX_RESET_SOFTWARE)
673 BX_INFO(("cpu software reset"));
674 else
675 BX_INFO(("cpu reset"));
677 #if BX_SUPPORT_X86_64
678 RAX = 0; // processor passed test :-)
679 RBX = 0;
680 RCX = 0;
681 RDX = get_cpu_version_information();
682 RBP = 0;
683 RSI = 0;
684 RDI = 0;
685 RSP = 0;
686 R8 = 0;
687 R9 = 0;
688 R10 = 0;
689 R11 = 0;
690 R12 = 0;
691 R13 = 0;
692 R14 = 0;
693 R15 = 0;
694 #else
695 // general registers
696 EAX = 0; // processor passed test :-)
697 EBX = 0;
698 ECX = 0;
699 EDX = get_cpu_version_information();
700 EBP = 0;
701 ESI = 0;
702 EDI = 0;
703 ESP = 0;
704 #endif
706 // initialize NIL register
707 BX_WRITE_32BIT_REGZ(BX_NIL_REGISTER, 0);
709 // status and control flags register set
710 BX_CPU_THIS_PTR setEFlags(0x2); // Bit1 is always set
712 BX_CPU_THIS_PTR inhibit_mask = 0;
713 BX_CPU_THIS_PTR debug_trap = 0;
715 /* instruction pointer */
716 #if BX_CPU_LEVEL < 2
717 BX_CPU_THIS_PTR prev_rip = EIP = 0x00000000;
718 #else /* from 286 up */
719 BX_CPU_THIS_PTR prev_rip = RIP = 0x0000FFF0;
720 #endif
722 /* CS (Code Segment) and descriptor cache */
723 /* Note: on a real cpu, CS initially points to upper memory. After
724 * the 1st jump, the descriptor base is zero'd out. Since I'm just
725 * going to jump to my BIOS, I don't need to do this.
726 * For future reference:
727 * processor cs.selector cs.base cs.limit EIP
728 * 8086 FFFF FFFF0 FFFF 0000
729 * 286 F000 FF0000 FFFF FFF0
730 * 386+ F000 FFFF0000 FFFF FFF0
732 parse_selector(0xf000,
733 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
735 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
736 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1;
737 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 0;
738 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */
739 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
741 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = 0xFFFF0000;
742 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit = 0xFFFF;
743 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFF;
745 #if BX_CPU_LEVEL >= 3
746 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g = 0; /* byte granular */
747 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = 0; /* 16bit default size */
748 #if BX_SUPPORT_X86_64
749 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l = 0; /* 16bit default size */
750 #endif
751 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0;
752 #endif
754 updateFetchModeMask();
755 #if BX_SUPPORT_ICACHE
756 flushICaches();
757 #endif
759 /* DS (Data Segment) and descriptor cache */
760 parse_selector(0x0000,
761 &BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector);
763 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
764 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.p = 1;
765 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.dpl = 0;
766 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.segment = 1; /* data/code segment */
767 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
769 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.base = 0x00000000;
770 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.limit = 0xFFFF;
771 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.limit_scaled = 0xFFFF;
772 #if BX_CPU_LEVEL >= 3
773 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.avl = 0;
774 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.g = 0; /* byte granular */
775 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.d_b = 0; /* 16bit default size */
776 #if BX_SUPPORT_X86_64
777 BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.l = 0; /* 16bit default size */
778 #endif
779 #endif
781 // use DS segment as template for the others
782 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS] = BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
783 BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES] = BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
784 #if BX_CPU_LEVEL >= 3
785 BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS] = BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
786 BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS] = BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
787 #endif
789 /* GDTR (Global Descriptor Table Register) */
790 BX_CPU_THIS_PTR gdtr.base = 0x00000000;
791 BX_CPU_THIS_PTR gdtr.limit = 0xFFFF;
793 /* IDTR (Interrupt Descriptor Table Register) */
794 BX_CPU_THIS_PTR idtr.base = 0x00000000;
795 BX_CPU_THIS_PTR idtr.limit = 0xFFFF; /* always byte granular */
797 /* LDTR (Local Descriptor Table Register) */
798 BX_CPU_THIS_PTR ldtr.selector.value = 0x0000;
799 BX_CPU_THIS_PTR ldtr.selector.index = 0x0000;
800 BX_CPU_THIS_PTR ldtr.selector.ti = 0;
801 BX_CPU_THIS_PTR ldtr.selector.rpl = 0;
803 BX_CPU_THIS_PTR ldtr.cache.valid = 1; /* valid */
804 BX_CPU_THIS_PTR ldtr.cache.p = 1; /* present */
805 BX_CPU_THIS_PTR ldtr.cache.dpl = 0; /* field not used */
806 BX_CPU_THIS_PTR ldtr.cache.segment = 0; /* system segment */
807 BX_CPU_THIS_PTR ldtr.cache.type = BX_SYS_SEGMENT_LDT;
808 BX_CPU_THIS_PTR ldtr.cache.u.system.base = 0x00000000;
809 BX_CPU_THIS_PTR ldtr.cache.u.system.limit = 0xFFFF;
810 #if BX_CPU_LEVEL >= 3
811 BX_CPU_THIS_PTR ldtr.cache.u.system.limit_scaled = 0xFFFF;
812 BX_CPU_THIS_PTR ldtr.cache.u.system.avl = 0;
813 BX_CPU_THIS_PTR ldtr.cache.u.system.g = 0; /* byte granular */
814 #endif
816 /* TR (Task Register) */
817 BX_CPU_THIS_PTR tr.selector.value = 0x0000;
818 BX_CPU_THIS_PTR tr.selector.index = 0x0000; /* undefined */
819 BX_CPU_THIS_PTR tr.selector.ti = 0;
820 BX_CPU_THIS_PTR tr.selector.rpl = 0;
822 BX_CPU_THIS_PTR tr.cache.valid = 1; /* valid */
823 BX_CPU_THIS_PTR tr.cache.p = 1; /* present */
824 BX_CPU_THIS_PTR tr.cache.dpl = 0; /* field not used */
825 BX_CPU_THIS_PTR tr.cache.segment = 0; /* system segment */
826 BX_CPU_THIS_PTR tr.cache.type = BX_SYS_SEGMENT_BUSY_386_TSS;
827 BX_CPU_THIS_PTR tr.cache.u.system.base = 0x00000000;
828 BX_CPU_THIS_PTR tr.cache.u.system.limit = 0xFFFF;
829 #if BX_CPU_LEVEL >= 3
830 BX_CPU_THIS_PTR tr.cache.u.system.limit_scaled = 0xFFFF;
831 BX_CPU_THIS_PTR tr.cache.u.system.avl = 0;
832 BX_CPU_THIS_PTR tr.cache.u.system.g = 0; /* byte granular */
833 #endif
835 // DR0 - DR7 (Debug Registers)
836 #if BX_CPU_LEVEL >= 3
837 BX_CPU_THIS_PTR dr[0] = 0; /* undefined */
838 BX_CPU_THIS_PTR dr[1] = 0; /* undefined */
839 BX_CPU_THIS_PTR dr[2] = 0; /* undefined */
840 BX_CPU_THIS_PTR dr[3] = 0; /* undefined */
841 #endif
843 BX_CPU_THIS_PTR dr7 = 0x00000400;
844 #if BX_CPU_LEVEL == 3
845 BX_CPU_THIS_PTR dr6 = 0xFFFF1FF0;
846 #elif BX_CPU_LEVEL == 4
847 BX_CPU_THIS_PTR dr6 = 0xFFFF1FF0;
848 #elif BX_CPU_LEVEL == 5
849 BX_CPU_THIS_PTR dr6 = 0xFFFF0FF0;
850 #elif BX_CPU_LEVEL == 6
851 BX_CPU_THIS_PTR dr6 = 0xFFFF0FF0;
852 #else
853 # error "DR6: CPU > 6"
854 #endif
856 BX_CPU_THIS_PTR in_smm = 0;
857 BX_CPU_THIS_PTR disable_SMI = 0;
858 BX_CPU_THIS_PTR pending_SMI = 0;
859 BX_CPU_THIS_PTR disable_NMI = 0;
860 BX_CPU_THIS_PTR pending_NMI = 0;
861 BX_CPU_THIS_PTR disable_INIT = 0;
862 BX_CPU_THIS_PTR pending_INIT = 0;
863 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
864 BX_CPU_THIS_PTR alignment_check_mask = 0;
865 #endif
867 if (source == BX_RESET_HARDWARE) {
868 BX_CPU_THIS_PTR smbase = 0x30000; // do not change SMBASE on INIT
871 BX_CPU_THIS_PTR cr0.set32(0x60000010);
872 // handle reserved bits
873 #if BX_CPU_LEVEL == 3
874 // reserved bits all set to 1 on 386
875 BX_CPU_THIS_PTR cr0.val32 |= 0x7ffffff0;
876 #endif
878 #if BX_CPU_LEVEL >= 3
879 BX_CPU_THIS_PTR cr1 = 0;
880 BX_CPU_THIS_PTR cr2 = 0;
881 BX_CPU_THIS_PTR cr3 = 0;
882 BX_CPU_THIS_PTR cr3_masked = 0;
883 #endif
885 #if BX_CPU_LEVEL >= 4
886 BX_CPU_THIS_PTR cr4.set32(0);
887 #endif
889 #if BX_SUPPORT_XSAVE
890 BX_CPU_THIS_PTR xcr0.set32(0x1);
891 #endif
893 /* initialise MSR registers to defaults */
894 #if BX_CPU_LEVEL >= 5
895 #if BX_SUPPORT_APIC
896 /* APIC Address, APIC enabled and BSP is default, we'll fill in the rest later */
897 BX_CPU_THIS_PTR msr.apicbase = BX_LAPIC_BASE_ADDR;
898 BX_CPU_THIS_PTR local_apic.init();
899 BX_CPU_THIS_PTR msr.apicbase |= 0x900;
900 #endif
901 #if BX_SUPPORT_X86_64
902 BX_CPU_THIS_PTR efer.set32(0);
904 BX_CPU_THIS_PTR msr.star = 0;
905 BX_CPU_THIS_PTR msr.lstar = 0;
906 BX_CPU_THIS_PTR msr.cstar = 0;
907 BX_CPU_THIS_PTR msr.fmask = 0x00020200;
908 BX_CPU_THIS_PTR msr.kernelgsbase = 0;
909 BX_CPU_THIS_PTR msr.tsc_aux = 0;
910 #endif
911 if (source == BX_RESET_HARDWARE) {
912 BX_CPU_THIS_PTR set_TSC(0); // do not change TSC on INIT
914 #endif
916 #if BX_SUPPORT_SEP
917 BX_CPU_THIS_PTR msr.sysenter_cs_msr = 0;
918 BX_CPU_THIS_PTR msr.sysenter_esp_msr = 0;
919 BX_CPU_THIS_PTR msr.sysenter_eip_msr = 0;
920 #endif
922 // Do not change MTRR on INIT
923 #if BX_SUPPORT_MTRR
924 if (source == BX_RESET_HARDWARE) {
925 for (n=0; n<16; n++)
926 BX_CPU_THIS_PTR msr.mtrrphys[n] = 0;
928 BX_CPU_THIS_PTR msr.mtrrfix64k_00000 = 0; // all fix range MTRRs undefined according to manual
929 BX_CPU_THIS_PTR msr.mtrrfix16k_80000 = 0;
930 BX_CPU_THIS_PTR msr.mtrrfix16k_a0000 = 0;
932 for (n=0; n<8; n++)
933 BX_CPU_THIS_PTR msr.mtrrfix4k[n] = 0;
935 BX_CPU_THIS_PTR msr.pat = BX_CONST64(0x0007040600070406);
936 BX_CPU_THIS_PTR msr.mtrr_deftype = 0;
938 #endif
940 BX_CPU_THIS_PTR EXT = 0;
942 TLB_init();
944 // invalidate the prefetch queue
945 BX_CPU_THIS_PTR eipPageBias = 0;
946 BX_CPU_THIS_PTR eipPageWindowSize = 0;
947 BX_CPU_THIS_PTR eipFetchPtr = NULL;
949 handleCpuModeChange();
951 #if BX_DEBUGGER
952 BX_CPU_THIS_PTR stop_reason = STOP_NO_REASON;
953 BX_CPU_THIS_PTR magic_break = 0;
954 BX_CPU_THIS_PTR trace_reg = 0;
955 BX_CPU_THIS_PTR trace_mem = 0;
956 #endif
958 BX_CPU_THIS_PTR trace = 0;
960 // Reset the Floating Point Unit
961 #if BX_SUPPORT_FPU
962 if (source == BX_RESET_HARDWARE) {
963 BX_CPU_THIS_PTR the_i387.reset();
965 #endif
967 // Reset XMM state
968 #if BX_SUPPORT_SSE >= 1 // unchanged on #INIT
969 if (source == BX_RESET_HARDWARE) {
970 for(n=0; n<BX_XMM_REGISTERS; n++)
972 BX_CPU_THIS_PTR xmm[n].xmm64u(0) = 0;
973 BX_CPU_THIS_PTR xmm[n].xmm64u(1) = 0;
976 BX_CPU_THIS_PTR mxcsr.mxcsr = MXCSR_RESET;
978 #endif
980 #if BX_SUPPORT_SMP
981 // notice if I'm the bootstrap processor. If not, do the equivalent of
982 // a HALT instruction.
983 int apic_id = local_apic.get_id();
984 if (BX_BOOTSTRAP_PROCESSOR == apic_id) {
985 // boot normally
986 BX_CPU_THIS_PTR msr.apicbase |= 0x0100; /* set bit 8 BSP */
987 BX_INFO(("CPU[%d] is the bootstrap processor", apic_id));
988 } else {
989 // it's an application processor, halt until IPI is heard.
990 BX_CPU_THIS_PTR msr.apicbase &= ~0x0100; /* clear bit 8 BSP */
991 BX_INFO(("CPU[%d] is an application processor. Halting until IPI.", apic_id));
992 debug_trap |= BX_DEBUG_TRAP_WAIT_FOR_SIPI;
993 async_event = 1;
995 #endif
997 // initialize CPUID values - make sure apicbase already initialized
998 set_cpuid_defaults();
1000 BX_INSTR_RESET(BX_CPU_ID, source);
1003 void BX_CPU_C::sanity_checks(void)
1005 Bit8u al, cl, dl, bl, ah, ch, dh, bh;
1006 Bit16u ax, cx, dx, bx, sp, bp, si, di;
1007 Bit32u eax, ecx, edx, ebx, esp, ebp, esi, edi;
1009 EAX = 0xFFEEDDCC;
1010 ECX = 0xBBAA9988;
1011 EDX = 0x77665544;
1012 EBX = 0x332211FF;
1013 ESP = 0xEEDDCCBB;
1014 EBP = 0xAA998877;
1015 ESI = 0x66554433;
1016 EDI = 0x2211FFEE;
1018 al = AL;
1019 cl = CL;
1020 dl = DL;
1021 bl = BL;
1022 ah = AH;
1023 ch = CH;
1024 dh = DH;
1025 bh = BH;
1027 if ( al != (EAX & 0xFF) ||
1028 cl != (ECX & 0xFF) ||
1029 dl != (EDX & 0xFF) ||
1030 bl != (EBX & 0xFF) ||
1031 ah != ((EAX >> 8) & 0xFF) ||
1032 ch != ((ECX >> 8) & 0xFF) ||
1033 dh != ((EDX >> 8) & 0xFF) ||
1034 bh != ((EBX >> 8) & 0xFF) )
1036 BX_PANIC(("problems using BX_READ_8BIT_REGx()!"));
1039 ax = AX;
1040 cx = CX;
1041 dx = DX;
1042 bx = BX;
1043 sp = SP;
1044 bp = BP;
1045 si = SI;
1046 di = DI;
1048 if ( ax != (EAX & 0xFFFF) ||
1049 cx != (ECX & 0xFFFF) ||
1050 dx != (EDX & 0xFFFF) ||
1051 bx != (EBX & 0xFFFF) ||
1052 sp != (ESP & 0xFFFF) ||
1053 bp != (EBP & 0xFFFF) ||
1054 si != (ESI & 0xFFFF) ||
1055 di != (EDI & 0xFFFF) )
1057 BX_PANIC(("problems using BX_READ_16BIT_REG()!"));
1060 eax = EAX;
1061 ecx = ECX;
1062 edx = EDX;
1063 ebx = EBX;
1064 esp = ESP;
1065 ebp = EBP;
1066 esi = ESI;
1067 edi = EDI;
1069 if (sizeof(Bit8u) != 1 || sizeof(Bit8s) != 1)
1070 BX_PANIC(("data type Bit8u or Bit8s is not of length 1 byte!"));
1071 if (sizeof(Bit16u) != 2 || sizeof(Bit16s) != 2)
1072 BX_PANIC(("data type Bit16u or Bit16s is not of length 2 bytes!"));
1073 if (sizeof(Bit32u) != 4 || sizeof(Bit32s) != 4)
1074 BX_PANIC(("data type Bit32u or Bit32s is not of length 4 bytes!"));
1075 if (sizeof(Bit64u) != 8 || sizeof(Bit64s) != 8)
1076 BX_PANIC(("data type Bit64u or Bit64u is not of length 8 bytes!"));
1078 BX_DEBUG(("#(%u)all sanity checks passed!", BX_CPU_ID));
1081 void BX_CPU_C::assert_checks(void)
1083 // check CPU mode consistency
1084 #if BX_SUPPORT_X86_64
1085 if (BX_CPU_THIS_PTR efer.get_LMA()) {
1086 if (! BX_CPU_THIS_PTR cr0.get_PE()) {
1087 BX_PANIC(("assert_checks: EFER.LMA is set when CR0.PE=0 !"));
1089 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l) {
1090 if (BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64)
1091 BX_PANIC(("assert_checks: unconsistent cpu_mode BX_MODE_LONG_64 !"));
1093 else {
1094 if (BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_COMPAT)
1095 BX_PANIC(("assert_checks: unconsistent cpu_mode BX_MODE_LONG_COMPAT !"));
1098 else
1099 #endif
1101 if (BX_CPU_THIS_PTR cr0.get_PE()) {
1102 if (BX_CPU_THIS_PTR get_VM()) {
1103 if (BX_CPU_THIS_PTR cpu_mode != BX_MODE_IA32_V8086)
1104 BX_PANIC(("assert_checks: unconsistent cpu_mode BX_MODE_IA32_V8086 !"));
1106 else {
1107 if (BX_CPU_THIS_PTR cpu_mode != BX_MODE_IA32_PROTECTED)
1108 BX_PANIC(("assert_checks: unconsistent cpu_mode BX_MODE_IA32_PROTECTED !"));
1111 else {
1112 if (BX_CPU_THIS_PTR cpu_mode != BX_MODE_IA32_REAL)
1113 BX_PANIC(("assert_checks: unconsistent cpu_mode BX_MODE_IA32_REAL !"));
1117 // check CR0 consistency
1118 if (BX_CPU_THIS_PTR cr0.get_PG() && ! BX_CPU_THIS_PTR cr0.get_PE())
1119 BX_PANIC(("assert_checks: CR0.PG=1 with CR0.PE=0 !"));
1120 #if BX_CPU_LEVEL >= 4
1121 if (BX_CPU_THIS_PTR cr0.get_NW() && ! BX_CPU_THIS_PTR cr0.get_CD())
1122 BX_PANIC(("assert_checks: CR0.NW=1 with CR0.CD=0 !"));
1123 #endif
1126 #if BX_SUPPORT_X86_64
1127 // VM should be OFF in long mode
1128 if (long_mode()) {
1129 if (BX_CPU_THIS_PTR get_VM()) BX_PANIC(("assert_checks: VM is set in long mode !"));
1132 // CS.L and CS.D_B are mutualy exclusive
1133 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l &&
1134 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b)
1136 BX_PANIC(("assert_checks: CS.l and CS.d_b set together !"));
1138 #endif
1140 // check LDTR type
1141 if (BX_CPU_THIS_PTR ldtr.cache.valid)
1143 if (BX_CPU_THIS_PTR ldtr.cache.type != BX_SYS_SEGMENT_LDT)
1145 BX_PANIC(("assert_checks: LDTR is not LDT type !"));
1149 // check Task Register type
1150 if(BX_CPU_THIS_PTR tr.cache.valid)
1152 switch(BX_CPU_THIS_PTR tr.cache.type)
1154 case BX_SYS_SEGMENT_BUSY_286_TSS:
1155 case BX_SYS_SEGMENT_AVAIL_286_TSS:
1156 #if BX_CPU_LEVEL >= 3
1157 if (BX_CPU_THIS_PTR tr.cache.u.system.g != 0)
1158 BX_PANIC(("assert_checks: tss286.g != 0 !"));
1159 if (BX_CPU_THIS_PTR tr.cache.u.system.avl != 0)
1160 BX_PANIC(("assert_checks: tss286.avl != 0 !"));
1161 #endif
1162 break;
1163 case BX_SYS_SEGMENT_BUSY_386_TSS:
1164 case BX_SYS_SEGMENT_AVAIL_386_TSS:
1165 break;
1166 default:
1167 BX_PANIC(("assert_checks: TR is not TSS type !"));
1171 #if BX_SUPPORT_MONITOR_MWAIT
1172 if (BX_CPU_THIS_PTR monitor.monitor_end < BX_CPU_THIS_PTR monitor.monitor_begin)
1173 BX_PANIC(("assert_checks: MONITOR range is not set correctly !"));
1174 #endif