- compilation fixes for MSVC toolkit 2003
[bochs-mirror.git] / cpu / segment_ctrl_pro.cc
blob5601129bac61e8d87b00d10217aa493def5c30a3
1 /////////////////////////////////////////////////////////////////////////
2 // $Id: segment_ctrl_pro.cc,v 1.101 2008/09/11 21:54:57 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (C) 2001 MandrakeSoft S.A.
6 //
7 // MandrakeSoft S.A.
8 // 43, rue d'Aboukir
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 /////////////////////////////////////////////////////////////////////////
28 #define NEED_CPU_REG_SHORTCUTS 1
29 #include "bochs.h"
30 #include "cpu.h"
31 #define LOG_THIS BX_CPU_THIS_PTR
33 void BX_CPP_AttrRegparmN(2)
34 BX_CPU_C::load_seg_reg(bx_segment_reg_t *seg, Bit16u new_value)
36 if (protected_mode())
38 if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS])
40 bx_selector_t ss_selector;
41 bx_descriptor_t descriptor;
42 Bit32u dword1, dword2;
44 parse_selector(new_value, &ss_selector);
46 if ((new_value & 0xfffc) == 0) { /* null selector */
47 #if BX_SUPPORT_X86_64
48 // allow SS = 0 in 64 bit mode only with cpl != 3 and rpl=cpl
49 if (Is64BitMode() && CPL != 3 && ss_selector.rpl == CPL) {
50 load_null_selector(seg);
51 return;
53 #endif
54 BX_ERROR(("load_seg_reg(SS): loading null selector"));
55 exception(BX_GP_EXCEPTION, new_value & 0xfffc, 0);
58 fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
60 /* selector's RPL must = CPL, else #GP(selector) */
61 if (ss_selector.rpl != CPL) {
62 BX_ERROR(("load_seg_reg(SS): rpl != CPL"));
63 exception(BX_GP_EXCEPTION, new_value & 0xfffc, 0);
66 parse_descriptor(dword1, dword2, &descriptor);
68 if (descriptor.valid==0) {
69 BX_ERROR(("load_seg_reg(SS): valid bit cleared"));
70 exception(BX_GP_EXCEPTION, new_value & 0xfffc, 0);
73 /* AR byte must indicate a writable data segment else #GP(selector) */
74 if (descriptor.segment==0 || IS_CODE_SEGMENT(descriptor.type) ||
75 IS_DATA_SEGMENT_WRITEABLE(descriptor.type) == 0)
77 BX_ERROR(("load_seg_reg(SS): not writable data segment"));
78 exception(BX_GP_EXCEPTION, new_value & 0xfffc, 0);
81 /* DPL in the AR byte must equal CPL else #GP(selector) */
82 if (descriptor.dpl != CPL) {
83 BX_ERROR(("load_seg_reg(SS): dpl != CPL"));
84 exception(BX_GP_EXCEPTION, new_value & 0xfffc, 0);
87 /* segment must be marked PRESENT else #SS(selector) */
88 if (! IS_PRESENT(descriptor)) {
89 BX_ERROR(("load_seg_reg(SS): not present"));
90 exception(BX_SS_EXCEPTION, new_value & 0xfffc, 0);
93 /* load SS with selector, load SS cache with descriptor */
94 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector = ss_selector;
95 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache = descriptor;
96 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = 1;
98 /* now set accessed bit in descriptor */
99 if (!(dword2 & 0x0100)) {
100 dword2 |= 0x0100;
101 if (ss_selector.ti == 0) { /* GDT */
102 access_write_linear(BX_CPU_THIS_PTR gdtr.base + ss_selector.index*8 + 4, 4, 0, &dword2);
104 else { /* LDT */
105 access_write_linear(BX_CPU_THIS_PTR ldtr.cache.u.system.base + ss_selector.index*8 + 4, 4, 0, &dword2);
109 return;
111 else if ((seg==&BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS]) ||
112 (seg==&BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES])
113 #if BX_CPU_LEVEL >= 3
114 || (seg==&BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS]) ||
115 (seg==&BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS])
116 #endif
119 bx_descriptor_t descriptor;
120 bx_selector_t selector;
121 Bit32u dword1, dword2;
123 if ((new_value & 0xfffc) == 0) { /* null selector */
124 load_null_selector(seg);
125 return;
128 parse_selector(new_value, &selector);
129 fetch_raw_descriptor(&selector, &dword1, &dword2, BX_GP_EXCEPTION);
130 parse_descriptor(dword1, dword2, &descriptor);
132 if (descriptor.valid==0) {
133 BX_ERROR(("load_seg_reg(%s, 0x%04x): invalid segment", strseg(seg), new_value));
134 exception(BX_GP_EXCEPTION, new_value & 0xfffc, 0);
137 /* AR byte must indicate data or readable code segment else #GP(selector) */
138 if (descriptor.segment==0 || (IS_CODE_SEGMENT(descriptor.type) &&
139 IS_CODE_SEGMENT_READABLE(descriptor.type) == 0))
141 BX_ERROR(("load_seg_reg(%s, 0x%04x): not data or readable code", strseg(seg), new_value));
142 exception(BX_GP_EXCEPTION, new_value & 0xfffc, 0);
145 /* If data or non-conforming code, then both the RPL and the CPL
146 * must be less than or equal to DPL in AR byte else #GP(selector) */
147 if (IS_DATA_SEGMENT(descriptor.type) ||
148 IS_CODE_SEGMENT_NON_CONFORMING(descriptor.type))
150 if ((selector.rpl > descriptor.dpl) || (CPL > descriptor.dpl)) {
151 BX_ERROR(("load_seg_reg(%s, 0x%04x): RPL & CPL must be <= DPL", strseg(seg), new_value));
152 exception(BX_GP_EXCEPTION, new_value & 0xfffc, 0);
156 /* segment must be marked PRESENT else #NP(selector) */
157 if (! IS_PRESENT(descriptor)) {
158 BX_ERROR(("load_seg_reg(%s, 0x%04x): segment not present", strseg(seg), new_value));
159 exception(BX_NP_EXCEPTION, new_value & 0xfffc, 0);
162 /* load segment register with selector */
163 /* load segment register-cache with descriptor */
164 seg->selector = selector;
165 seg->cache = descriptor;
166 seg->cache.valid = 1;
168 /* now set accessed bit in descriptor */
169 /* wmr: don't bother if it's already set (thus allowing */
170 /* GDT to be in read-only pages like real hdwe does) */
172 if (!(dword2 & 0x0100)) {
173 dword2 |= 0x0100;
174 if (selector.ti == 0) { /* GDT */
175 access_write_linear(BX_CPU_THIS_PTR gdtr.base + selector.index*8 + 4, 4, 0, &dword2);
177 else { /* LDT */
178 access_write_linear(BX_CPU_THIS_PTR ldtr.cache.u.system.base + selector.index*8 + 4, 4, 0, &dword2);
181 return;
183 else {
184 BX_PANIC(("load_seg_reg(): invalid segment register passed!"));
185 return;
189 /* real or v8086 mode */
191 /* www.x86.org:
192 According to Intel, each time any segment register is loaded in real
193 mode, the base address is calculated as 16 times the segment value,
194 while the access rights and size limit attributes are given fixed,
195 "real-mode compatible" values. This is not true. In fact, only the CS
196 descriptor caches for the 286, 386, and 486 get loaded with fixed
197 values each time the segment register is loaded. Loading CS, or any
198 other segment register in real mode, on later Intel processors doesn't
199 change the access rights or the segment size limit attributes stored
200 in the descriptor cache registers. For these segments, the access
201 rights and segment size limit attributes from any previous setting are
202 honored. */
204 seg->selector.value = new_value;
205 seg->selector.rpl = real_mode() ? 0 : 3;
206 seg->cache.valid = 1;
207 seg->cache.u.segment.base = new_value << 4;
208 seg->cache.segment = 1; /* regular segment */
209 seg->cache.p = 1; /* present */
210 seg->cache.type = BX_DATA_READ_WRITE_ACCESSED;
212 if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS]) {
213 invalidate_prefetch_q();
214 updateFetchModeMask();
215 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
216 handleAlignmentCheck(); // CPL was modified
217 #endif
220 /* Do not modify segment limit and AR bytes when in real mode */
221 /* Support for big real mode */
222 if (real_mode()) return;
224 seg->cache.dpl = 3; /* we are in v8086 mode */
225 seg->cache.u.segment.limit = 0xffff;
226 seg->cache.u.segment.limit_scaled = 0xffff;
227 #if BX_CPU_LEVEL >= 3
228 seg->cache.u.segment.g = 0; /* byte granular */
229 seg->cache.u.segment.d_b = 0; /* default 16bit size */
230 #if BX_SUPPORT_X86_64
231 seg->cache.u.segment.l = 0; /* default 16bit size */
232 #endif
233 seg->cache.u.segment.avl = 0;
234 #endif
237 void BX_CPP_AttrRegparmN(1)
238 BX_CPU_C::load_null_selector(bx_segment_reg_t *seg)
240 seg->selector.index = 0;
241 seg->selector.ti = 0;
242 seg->selector.rpl = 0;
243 seg->selector.value = 0;
245 seg->cache.valid = 0; /* invalidate null selector */
246 seg->cache.p = 0;
247 seg->cache.dpl = 0;
248 seg->cache.segment = 1; /* data/code segment */
249 seg->cache.type = 0;
251 seg->cache.u.segment.base = 0;
252 seg->cache.u.segment.limit = 0;
253 seg->cache.u.segment.limit_scaled = 0;
254 seg->cache.u.segment.g = 0;
255 seg->cache.u.segment.d_b = 0;
256 seg->cache.u.segment.avl = 0;
257 #if BX_SUPPORT_X86_64
258 seg->cache.u.segment.l = 0;
259 #endif
262 #if BX_SUPPORT_X86_64
263 void BX_CPU_C::loadSRegLMNominal(unsigned segI, unsigned selector, unsigned dpl)
265 bx_segment_reg_t *seg = & BX_CPU_THIS_PTR sregs[segI];
267 // Load a segment register in long-mode with nominal values,
268 // so descriptor cache values are compatible with existing checks.
269 seg->cache.u.segment.base = 0;
270 seg->cache.valid = 1;
271 seg->cache.dpl = dpl;
273 seg->selector.value = selector;
275 #endif
277 BX_CPP_INLINE void BX_CPU_C::validate_seg_reg(unsigned seg)
280 FOR (seg = ES, DS, FS, GS)
282 IF ((seg.attr.dpl < CPL) && ((seg.attr.type = 'data')
283 || (seg.attr.type = 'non-conforming-code')))
285 seg = NULL // can't use lower dpl data segment at higher cpl
290 bx_segment_reg_t *segment = &BX_CPU_THIS_PTR sregs[seg];
292 if (segment->cache.dpl < CPL)
294 // invalidate if data or non-conforming code segment
295 if (segment->cache.valid==0 || segment->cache.segment==0 ||
296 IS_DATA_SEGMENT(segment->cache.type) ||
297 IS_CODE_SEGMENT_NON_CONFORMING(segment->cache.type))
299 segment->selector.value = 0;
300 segment->cache.valid = 0;
305 void BX_CPU_C::validate_seg_regs(void)
307 validate_seg_reg(BX_SEG_REG_ES);
308 validate_seg_reg(BX_SEG_REG_DS);
309 validate_seg_reg(BX_SEG_REG_FS);
310 validate_seg_reg(BX_SEG_REG_GS);
313 void BX_CPP_AttrRegparmN(2)
314 BX_CPU_C::parse_selector(Bit16u raw_selector, bx_selector_t *selector)
316 selector->value = raw_selector;
317 selector->index = raw_selector >> 3;
318 selector->ti = (raw_selector >> 2) & 0x01;
319 selector->rpl = raw_selector & 0x03;
322 Bit8u BX_CPP_AttrRegparmN(1)
323 BX_CPU_C::ar_byte(const bx_descriptor_t *d)
325 if (d->valid == 0) {
326 return(0);
329 return (d->type) |
330 (d->segment << 4) |
331 (d->dpl << 5) |
332 (d->p << 7);
335 void BX_CPP_AttrRegparmN(2)
336 BX_CPU_C::set_ar_byte(bx_descriptor_t *d, Bit8u ar_byte)
338 d->p = (ar_byte >> 7) & 0x01;
339 d->dpl = (ar_byte >> 5) & 0x03;
340 d->segment = (ar_byte >> 4) & 0x01;
341 d->type = (ar_byte & 0x0f);
344 Bit32u BX_CPP_AttrRegparmN(1)
345 BX_CPU_C::get_descriptor_l(const bx_descriptor_t *d)
347 Bit32u val;
349 if (d->valid == 0) {
350 return(0);
353 if (d->segment) {
354 val = ((d->u.segment.base & 0xffff) << 16) | (d->u.segment.limit & 0xffff);
355 return(val);
357 else {
358 switch (d->type) {
359 case 0: // Reserved (not defined)
360 BX_ERROR(("#get_descriptor_l(): type %d not finished", d->type));
361 return(0);
363 case BX_SYS_SEGMENT_LDT:
364 case BX_SYS_SEGMENT_AVAIL_286_TSS:
365 case BX_SYS_SEGMENT_BUSY_286_TSS:
366 case BX_SYS_SEGMENT_AVAIL_386_TSS:
367 case BX_SYS_SEGMENT_BUSY_386_TSS:
368 val = ((d->u.system.base & 0xffff) << 16) | (d->u.system.limit & 0xffff);
369 return(val);
371 default:
372 BX_PANIC(("#get_descriptor_l(): type %d not finished", d->type));
373 return(0);
378 Bit32u BX_CPP_AttrRegparmN(1)
379 BX_CPU_C::get_descriptor_h(const bx_descriptor_t *d)
381 Bit32u val;
383 if (d->valid == 0) {
384 return(0);
387 if (d->segment) {
388 val = (d->u.segment.base & 0xff000000) |
389 ((d->u.segment.base >> 16) & 0x000000ff) |
390 (d->type << 8) |
391 (d->segment << 12) |
392 (d->dpl << 13) |
393 (d->p << 15) |
394 (d->u.segment.limit & 0xf0000) |
395 (d->u.segment.avl << 20) |
396 #if BX_SUPPORT_X86_64
397 (d->u.segment.l << 21) |
398 #endif
399 (d->u.segment.d_b << 22) |
400 (d->u.segment.g << 23);
401 return(val);
403 else {
404 switch (d->type) {
405 case 0: // Reserved (not yet defined)
406 BX_ERROR(("#get_descriptor_h(): type %d not finished", d->type));
407 return(0);
409 case BX_SYS_SEGMENT_AVAIL_286_TSS:
410 case BX_SYS_SEGMENT_BUSY_286_TSS:
411 BX_ASSERT(d->u.system.g == 0);
412 BX_ASSERT(d->u.system.avl == 0);
413 // fall through
414 case BX_SYS_SEGMENT_LDT:
415 case BX_SYS_SEGMENT_AVAIL_386_TSS:
416 case BX_SYS_SEGMENT_BUSY_386_TSS:
417 val = ((d->u.system.base >> 16) & 0xff) |
418 (d->type << 8) |
419 (d->dpl << 13) |
420 (d->p << 15) |
421 (d->u.system.limit & 0xf0000) |
422 (d->u.system.avl << 20) |
423 (d->u.system.g << 23) |
424 (d->u.system.base & 0xff000000);
425 return(val);
427 default:
428 BX_PANIC(("#get_descriptor_h(): type %d not finished", d->type));
429 return(0);
434 #if BX_CPU_LEVEL >= 3
435 Bit16u BX_CPP_AttrRegparmN(1)
436 BX_CPU_C::get_segment_ar_data(const bx_descriptor_t *d) // for SMM ONLY
438 Bit16u val = 0;
440 if (d->segment) { /* data/code segment descriptors */
441 val = (d->type) |
442 (d->segment << 4) |
443 (d->dpl << 5) |
444 (d->p << 7) |
445 (d->u.segment.avl << 12) |
446 #if BX_SUPPORT_X86_64
447 (d->u.segment.l << 13) |
448 #endif
449 (d->u.segment.d_b << 14) |
450 (d->u.segment.g << 15);
452 // for SMM map the segment cache valid bit stored in AR[8]
453 if (d->valid)
454 val |= (1<<8);
456 return val;
459 switch (d->type) {
460 case BX_SYS_SEGMENT_AVAIL_286_TSS:
461 case BX_SYS_SEGMENT_BUSY_286_TSS:
462 BX_ASSERT(d->u.system.g == 0);
463 BX_ASSERT(d->u.system.avl == 0);
464 // fall through
465 case BX_SYS_SEGMENT_LDT:
466 case BX_SYS_SEGMENT_AVAIL_386_TSS:
467 case BX_SYS_SEGMENT_BUSY_386_TSS:
468 val = (d->type) |
469 (d->dpl << 5) |
470 (d->p << 7) |
471 (d->u.system.avl << 12) |
472 (d->u.system.g << 15);
473 break;
474 default:
475 BX_PANIC(("get_segment_ar_data(): case %u unsupported", (unsigned) d->type));
478 // for SMM map the segment cache valid bit stored in AR[8]
479 if (d->valid)
480 val |= (1<<8);
482 return val;
485 // for SMM ONLY
486 bx_bool BX_CPU_C::set_segment_ar_data(bx_segment_reg_t *seg,
487 Bit16u raw_selector, bx_address base, Bit32u limit, Bit16u ar_data)
489 parse_selector(raw_selector, &seg->selector);
491 bx_descriptor_t *d = &seg->cache;
493 d->p = (ar_data >> 7) & 0x1;
494 d->dpl = (ar_data >> 5) & 0x3;
495 d->segment = (ar_data >> 4) & 0x1;
496 d->type = (ar_data & 0x0f);
498 // for SMM map the segment cache valid bit stored in AR[8]
499 d->valid = (ar_data >> 8) & 0x1;
501 if (d->segment) { /* data/code segment descriptors */
502 d->u.segment.g = (ar_data >> 15) & 0x1;
503 d->u.segment.d_b = (ar_data >> 14) & 0x1;
504 #if BX_SUPPORT_X86_64
505 d->u.segment.l = (ar_data >> 13) & 0x1;
506 #endif
507 d->u.segment.avl = (ar_data >> 12) & 0x1;
509 d->u.segment.base = base;
510 d->u.segment.limit = limit;
512 if (d->u.segment.g)
513 d->u.segment.limit_scaled = (d->u.segment.limit << 12) | 0xfff;
514 else
515 d->u.segment.limit_scaled = (d->u.segment.limit);
517 else {
518 switch(d->type) {
519 case BX_SYS_SEGMENT_LDT:
520 case BX_SYS_SEGMENT_AVAIL_286_TSS:
521 case BX_SYS_SEGMENT_BUSY_286_TSS:
522 case BX_SYS_SEGMENT_AVAIL_386_TSS:
523 case BX_SYS_SEGMENT_BUSY_386_TSS:
524 d->u.system.avl = (ar_data >> 12) & 0x1;
525 d->u.system.g = (ar_data >> 15) & 0x1;
526 d->u.system.base = base;
527 d->u.system.limit = limit;
528 if (d->u.system.g)
529 d->u.system.limit_scaled = (d->u.system.limit << 12) | 0xfff;
530 else
531 d->u.system.limit_scaled = (d->u.system.limit);
532 break;
534 default:
535 BX_PANIC(("set_segment_ar_data(): case %u unsupported", (unsigned) d->type));
539 return d->valid;
541 #endif
543 void BX_CPP_AttrRegparmN(3)
544 BX_CPU_C::parse_descriptor(Bit32u dword1, Bit32u dword2, bx_descriptor_t *temp)
546 Bit8u AR_byte;
548 AR_byte = dword2 >> 8;
549 temp->p = (AR_byte >> 7) & 0x1;
550 temp->dpl = (AR_byte >> 5) & 0x3;
551 temp->segment = (AR_byte >> 4) & 0x1;
552 temp->type = (AR_byte & 0xf);
553 temp->valid = 0; /* start out invalid */
555 if (temp->segment) { /* data/code segment descriptors */
556 temp->u.segment.limit = (dword1 & 0xffff);
557 temp->u.segment.base = (dword1 >> 16) | ((dword2 & 0xFF) << 16);
559 temp->u.segment.limit |= (dword2 & 0x000F0000);
560 temp->u.segment.g = (dword2 & 0x00800000) > 0;
561 temp->u.segment.d_b = (dword2 & 0x00400000) > 0;
562 #if BX_SUPPORT_X86_64
563 temp->u.segment.l = (dword2 & 0x00200000) > 0;
564 #endif
565 temp->u.segment.avl = (dword2 & 0x00100000) > 0;
566 temp->u.segment.base |= (dword2 & 0xFF000000);
568 if (temp->u.segment.g)
569 temp->u.segment.limit_scaled = (temp->u.segment.limit << 12) | 0xfff;
570 else
571 temp->u.segment.limit_scaled = (temp->u.segment.limit);
573 temp->valid = 1;
575 else { // system & gate segment descriptors
576 switch (temp->type) {
577 case 0: // reserved
578 case 8: // reserved
579 case 10: // reserved
580 case 13: // reserved
581 temp->valid = 0;
582 break;
584 case BX_286_CALL_GATE:
585 case BX_286_INTERRUPT_GATE:
586 case BX_286_TRAP_GATE:
587 // param count only used for call gate
588 temp->u.gate.param_count = dword2 & 0x1f;
589 temp->u.gate.dest_selector = dword1 >> 16;
590 temp->u.gate.dest_offset = dword1 & 0xffff;
591 temp->valid = 1;
592 break;
594 case BX_386_CALL_GATE:
595 case BX_386_INTERRUPT_GATE:
596 case BX_386_TRAP_GATE:
597 // param count only used for call gate
598 temp->u.gate.param_count = dword2 & 0x1f;
599 temp->u.gate.dest_selector = dword1 >> 16;
600 temp->u.gate.dest_offset = (dword2 & 0xffff0000) |
601 (dword1 & 0x0000ffff);
602 temp->valid = 1;
603 break;
605 case BX_TASK_GATE:
606 temp->u.taskgate.tss_selector = dword1 >> 16;
607 temp->valid = 1;
608 break;
610 case BX_SYS_SEGMENT_LDT:
611 case BX_SYS_SEGMENT_AVAIL_286_TSS:
612 case BX_SYS_SEGMENT_BUSY_286_TSS:
613 case BX_SYS_SEGMENT_AVAIL_386_TSS:
614 case BX_SYS_SEGMENT_BUSY_386_TSS:
615 temp->u.system.base = (dword1 >> 16) |
616 ((dword2 & 0xff) << 16) | (dword2 & 0xff000000);
617 temp->u.system.limit = (dword1 & 0x0000ffff) | (dword2 & 0x000f0000);
618 temp->u.system.g = (dword2 & 0x00800000) > 0;
619 temp->u.system.avl = (dword2 & 0x00100000) > 0;
620 if (temp->u.system.g)
621 temp->u.system.limit_scaled = (temp->u.system.limit << 12) | 0xfff;
622 else
623 temp->u.system.limit_scaled = (temp->u.system.limit);
624 temp->valid = 1;
625 break;
627 default:
628 BX_PANIC(("parse_descriptor(): case %u unfinished", (unsigned) temp->type));
629 temp->valid = 0;
634 void BX_CPP_AttrRegparmN(3)
635 BX_CPU_C::load_ss(bx_selector_t *selector, bx_descriptor_t *descriptor, Bit8u cpl)
637 // Add cpl to the selector value.
638 selector->value = (0xfffc & selector->value) | cpl;
640 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector = *selector;
641 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache = *descriptor;
642 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.rpl = cpl;
644 #if BX_SUPPORT_X86_64
645 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
646 loadSRegLMNominal(BX_SEG_REG_SS, selector->value, cpl);
647 return;
649 #endif
650 if ((BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value & 0xfffc) == 0)
651 BX_PANIC(("load_ss(): null selector passed"));
653 if (!BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid)
654 BX_PANIC(("load_ss(): invalid selector/descriptor passed."));
657 void BX_CPU_C::fetch_raw_descriptor(const bx_selector_t *selector,
658 Bit32u *dword1, Bit32u *dword2, unsigned exception_no)
660 Bit32u index = selector->index;
661 bx_address offset;
662 Bit64u raw_descriptor;
664 if (selector->ti == 0) { /* GDT */
665 if ((index*8 + 7) > BX_CPU_THIS_PTR gdtr.limit) {
666 BX_ERROR(("fetch_raw_descriptor: GDT: index (%x)%x > limit (%x)",
667 index*8 + 7, index, BX_CPU_THIS_PTR gdtr.limit));
668 exception(exception_no, selector->value & 0xfffc, 0);
670 offset = BX_CPU_THIS_PTR gdtr.base + index*8;
672 else { /* LDT */
673 if (BX_CPU_THIS_PTR ldtr.cache.valid==0) {
674 BX_ERROR(("fetch_raw_descriptor: LDTR.valid=0"));
675 exception(exception_no, selector->value & 0xfffc, 0);
677 if ((index*8 + 7) > BX_CPU_THIS_PTR ldtr.cache.u.system.limit_scaled) {
678 BX_ERROR(("fetch_raw_descriptor: LDT: index (%x)%x > limit (%x)",
679 index*8 + 7, index, BX_CPU_THIS_PTR ldtr.cache.u.system.limit_scaled));
680 exception(exception_no, selector->value & 0xfffc, 0);
682 offset = BX_CPU_THIS_PTR ldtr.cache.u.system.base + index*8;
685 raw_descriptor = system_read_qword(offset);
687 *dword1 = GET32L(raw_descriptor);
688 *dword2 = GET32H(raw_descriptor);
691 bx_bool BX_CPP_AttrRegparmN(3)
692 BX_CPU_C::fetch_raw_descriptor2(const bx_selector_t *selector, Bit32u *dword1, Bit32u *dword2)
694 Bit32u index = selector->index;
695 bx_address offset;
696 Bit64u raw_descriptor;
698 if (selector->ti == 0) { /* GDT */
699 if ((index*8 + 7) > BX_CPU_THIS_PTR gdtr.limit)
700 return 0;
701 offset = BX_CPU_THIS_PTR gdtr.base + index*8;
703 else { /* LDT */
704 if (BX_CPU_THIS_PTR ldtr.cache.valid==0) {
705 BX_ERROR(("fetch_raw_descriptor2: LDTR.valid=0"));
706 return 0;
708 if ((index*8 + 7) > BX_CPU_THIS_PTR ldtr.cache.u.system.limit_scaled)
709 return 0;
710 offset = BX_CPU_THIS_PTR ldtr.cache.u.system.base + index*8;
713 raw_descriptor = system_read_qword(offset);
715 *dword1 = GET32L(raw_descriptor);
716 *dword2 = GET32H(raw_descriptor);
718 return 1;
721 #if BX_SUPPORT_X86_64
722 void BX_CPU_C::fetch_raw_descriptor_64(const bx_selector_t *selector,
723 Bit32u *dword1, Bit32u *dword2, Bit32u *dword3, unsigned exception_no)
725 Bit32u index = selector->index;
726 bx_address offset;
727 Bit64u raw_descriptor1, raw_descriptor2;
729 if (selector->ti == 0) { /* GDT */
730 if ((index*8 + 15) > BX_CPU_THIS_PTR gdtr.limit) {
731 BX_ERROR(("fetch_raw_descriptor64: GDT: index (%x)%x > limit (%x)",
732 index*8 + 15, index, BX_CPU_THIS_PTR gdtr.limit));
733 exception(exception_no, selector->value & 0xfffc, 0);
735 offset = BX_CPU_THIS_PTR gdtr.base + index*8;
737 else { /* LDT */
738 if (BX_CPU_THIS_PTR ldtr.cache.valid==0) {
739 BX_ERROR(("fetch_raw_descriptor64: LDTR.valid=0"));
740 exception(exception_no, selector->value & 0xfffc, 0);
742 if ((index*8 + 15) > BX_CPU_THIS_PTR ldtr.cache.u.system.limit_scaled) {
743 BX_ERROR(("fetch_raw_descriptor64: LDT: index (%x)%x > limit (%x)",
744 index*8 + 15, index, BX_CPU_THIS_PTR ldtr.cache.u.system.limit_scaled));
745 exception(exception_no, selector->value & 0xfffc, 0);
747 offset = BX_CPU_THIS_PTR ldtr.cache.u.system.base + index*8;
750 raw_descriptor1 = system_read_qword(offset);
751 raw_descriptor2 = system_read_qword(offset + 8);
753 if (raw_descriptor2 & BX_CONST64(0x00001F0000000000)) {
754 BX_ERROR(("fetch_raw_descriptor64: extended attributes DWORD4 TYPE != 0"));
755 exception(BX_GP_EXCEPTION, selector->value & 0xfffc, 0);
758 *dword1 = GET32L(raw_descriptor1);
759 *dword2 = GET32H(raw_descriptor1);
760 *dword3 = GET32L(raw_descriptor2);
762 #endif