1 /////////////////////////////////////////////////////////////////////////
2 // $Id: segment_ctrl_pro.cc,v 1.101 2008/09/11 21:54:57 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (C) 2001 MandrakeSoft S.A.
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 /////////////////////////////////////////////////////////////////////////
28 #define NEED_CPU_REG_SHORTCUTS 1
31 #define LOG_THIS BX_CPU_THIS_PTR
33 void BX_CPP_AttrRegparmN(2)
34 BX_CPU_C::load_seg_reg(bx_segment_reg_t
*seg
, Bit16u new_value
)
38 if (seg
== &BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
])
40 bx_selector_t ss_selector
;
41 bx_descriptor_t descriptor
;
42 Bit32u dword1
, dword2
;
44 parse_selector(new_value
, &ss_selector
);
46 if ((new_value
& 0xfffc) == 0) { /* null selector */
48 // allow SS = 0 in 64 bit mode only with cpl != 3 and rpl=cpl
49 if (Is64BitMode() && CPL
!= 3 && ss_selector
.rpl
== CPL
) {
50 load_null_selector(seg
);
54 BX_ERROR(("load_seg_reg(SS): loading null selector"));
55 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
58 fetch_raw_descriptor(&ss_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
60 /* selector's RPL must = CPL, else #GP(selector) */
61 if (ss_selector
.rpl
!= CPL
) {
62 BX_ERROR(("load_seg_reg(SS): rpl != CPL"));
63 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
66 parse_descriptor(dword1
, dword2
, &descriptor
);
68 if (descriptor
.valid
==0) {
69 BX_ERROR(("load_seg_reg(SS): valid bit cleared"));
70 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
73 /* AR byte must indicate a writable data segment else #GP(selector) */
74 if (descriptor
.segment
==0 || IS_CODE_SEGMENT(descriptor
.type
) ||
75 IS_DATA_SEGMENT_WRITEABLE(descriptor
.type
) == 0)
77 BX_ERROR(("load_seg_reg(SS): not writable data segment"));
78 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
81 /* DPL in the AR byte must equal CPL else #GP(selector) */
82 if (descriptor
.dpl
!= CPL
) {
83 BX_ERROR(("load_seg_reg(SS): dpl != CPL"));
84 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
87 /* segment must be marked PRESENT else #SS(selector) */
88 if (! IS_PRESENT(descriptor
)) {
89 BX_ERROR(("load_seg_reg(SS): not present"));
90 exception(BX_SS_EXCEPTION
, new_value
& 0xfffc, 0);
93 /* load SS with selector, load SS cache with descriptor */
94 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
= ss_selector
;
95 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
= descriptor
;
96 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.valid
= 1;
98 /* now set accessed bit in descriptor */
99 if (!(dword2
& 0x0100)) {
101 if (ss_selector
.ti
== 0) { /* GDT */
102 access_write_linear(BX_CPU_THIS_PTR gdtr
.base
+ ss_selector
.index
*8 + 4, 4, 0, &dword2
);
105 access_write_linear(BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.base
+ ss_selector
.index
*8 + 4, 4, 0, &dword2
);
111 else if ((seg
==&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
]) ||
112 (seg
==&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
])
113 #if BX_CPU_LEVEL >= 3
114 || (seg
==&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
]) ||
115 (seg
==&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
])
119 bx_descriptor_t descriptor
;
120 bx_selector_t selector
;
121 Bit32u dword1
, dword2
;
123 if ((new_value
& 0xfffc) == 0) { /* null selector */
124 load_null_selector(seg
);
128 parse_selector(new_value
, &selector
);
129 fetch_raw_descriptor(&selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
130 parse_descriptor(dword1
, dword2
, &descriptor
);
132 if (descriptor
.valid
==0) {
133 BX_ERROR(("load_seg_reg(%s, 0x%04x): invalid segment", strseg(seg
), new_value
));
134 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
137 /* AR byte must indicate data or readable code segment else #GP(selector) */
138 if (descriptor
.segment
==0 || (IS_CODE_SEGMENT(descriptor
.type
) &&
139 IS_CODE_SEGMENT_READABLE(descriptor
.type
) == 0))
141 BX_ERROR(("load_seg_reg(%s, 0x%04x): not data or readable code", strseg(seg
), new_value
));
142 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
145 /* If data or non-conforming code, then both the RPL and the CPL
146 * must be less than or equal to DPL in AR byte else #GP(selector) */
147 if (IS_DATA_SEGMENT(descriptor
.type
) ||
148 IS_CODE_SEGMENT_NON_CONFORMING(descriptor
.type
))
150 if ((selector
.rpl
> descriptor
.dpl
) || (CPL
> descriptor
.dpl
)) {
151 BX_ERROR(("load_seg_reg(%s, 0x%04x): RPL & CPL must be <= DPL", strseg(seg
), new_value
));
152 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
156 /* segment must be marked PRESENT else #NP(selector) */
157 if (! IS_PRESENT(descriptor
)) {
158 BX_ERROR(("load_seg_reg(%s, 0x%04x): segment not present", strseg(seg
), new_value
));
159 exception(BX_NP_EXCEPTION
, new_value
& 0xfffc, 0);
162 /* load segment register with selector */
163 /* load segment register-cache with descriptor */
164 seg
->selector
= selector
;
165 seg
->cache
= descriptor
;
166 seg
->cache
.valid
= 1;
168 /* now set accessed bit in descriptor */
169 /* wmr: don't bother if it's already set (thus allowing */
170 /* GDT to be in read-only pages like real hdwe does) */
172 if (!(dword2
& 0x0100)) {
174 if (selector
.ti
== 0) { /* GDT */
175 access_write_linear(BX_CPU_THIS_PTR gdtr
.base
+ selector
.index
*8 + 4, 4, 0, &dword2
);
178 access_write_linear(BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.base
+ selector
.index
*8 + 4, 4, 0, &dword2
);
184 BX_PANIC(("load_seg_reg(): invalid segment register passed!"));
189 /* real or v8086 mode */
192 According to Intel, each time any segment register is loaded in real
193 mode, the base address is calculated as 16 times the segment value,
194 while the access rights and size limit attributes are given fixed,
195 "real-mode compatible" values. This is not true. In fact, only the CS
196 descriptor caches for the 286, 386, and 486 get loaded with fixed
197 values each time the segment register is loaded. Loading CS, or any
198 other segment register in real mode, on later Intel processors doesn't
199 change the access rights or the segment size limit attributes stored
200 in the descriptor cache registers. For these segments, the access
201 rights and segment size limit attributes from any previous setting are
204 seg
->selector
.value
= new_value
;
205 seg
->selector
.rpl
= real_mode() ? 0 : 3;
206 seg
->cache
.valid
= 1;
207 seg
->cache
.u
.segment
.base
= new_value
<< 4;
208 seg
->cache
.segment
= 1; /* regular segment */
209 seg
->cache
.p
= 1; /* present */
210 seg
->cache
.type
= BX_DATA_READ_WRITE_ACCESSED
;
212 if (seg
== &BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
]) {
213 invalidate_prefetch_q();
214 updateFetchModeMask();
215 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
216 handleAlignmentCheck(); // CPL was modified
220 /* Do not modify segment limit and AR bytes when in real mode */
221 /* Support for big real mode */
222 if (real_mode()) return;
224 seg
->cache
.dpl
= 3; /* we are in v8086 mode */
225 seg
->cache
.u
.segment
.limit
= 0xffff;
226 seg
->cache
.u
.segment
.limit_scaled
= 0xffff;
227 #if BX_CPU_LEVEL >= 3
228 seg
->cache
.u
.segment
.g
= 0; /* byte granular */
229 seg
->cache
.u
.segment
.d_b
= 0; /* default 16bit size */
230 #if BX_SUPPORT_X86_64
231 seg
->cache
.u
.segment
.l
= 0; /* default 16bit size */
233 seg
->cache
.u
.segment
.avl
= 0;
237 void BX_CPP_AttrRegparmN(1)
238 BX_CPU_C::load_null_selector(bx_segment_reg_t
*seg
)
240 seg
->selector
.index
= 0;
241 seg
->selector
.ti
= 0;
242 seg
->selector
.rpl
= 0;
243 seg
->selector
.value
= 0;
245 seg
->cache
.valid
= 0; /* invalidate null selector */
248 seg
->cache
.segment
= 1; /* data/code segment */
251 seg
->cache
.u
.segment
.base
= 0;
252 seg
->cache
.u
.segment
.limit
= 0;
253 seg
->cache
.u
.segment
.limit_scaled
= 0;
254 seg
->cache
.u
.segment
.g
= 0;
255 seg
->cache
.u
.segment
.d_b
= 0;
256 seg
->cache
.u
.segment
.avl
= 0;
257 #if BX_SUPPORT_X86_64
258 seg
->cache
.u
.segment
.l
= 0;
262 #if BX_SUPPORT_X86_64
263 void BX_CPU_C::loadSRegLMNominal(unsigned segI
, unsigned selector
, unsigned dpl
)
265 bx_segment_reg_t
*seg
= & BX_CPU_THIS_PTR sregs
[segI
];
267 // Load a segment register in long-mode with nominal values,
268 // so descriptor cache values are compatible with existing checks.
269 seg
->cache
.u
.segment
.base
= 0;
270 seg
->cache
.valid
= 1;
271 seg
->cache
.dpl
= dpl
;
273 seg
->selector
.value
= selector
;
277 BX_CPP_INLINE
void BX_CPU_C::validate_seg_reg(unsigned seg
)
280 FOR (seg = ES, DS, FS, GS)
282 IF ((seg.attr.dpl < CPL) && ((seg.attr.type = 'data')
283 || (seg.attr.type = 'non-conforming-code')))
285 seg = NULL // can't use lower dpl data segment at higher cpl
290 bx_segment_reg_t
*segment
= &BX_CPU_THIS_PTR sregs
[seg
];
292 if (segment
->cache
.dpl
< CPL
)
294 // invalidate if data or non-conforming code segment
295 if (segment
->cache
.valid
==0 || segment
->cache
.segment
==0 ||
296 IS_DATA_SEGMENT(segment
->cache
.type
) ||
297 IS_CODE_SEGMENT_NON_CONFORMING(segment
->cache
.type
))
299 segment
->selector
.value
= 0;
300 segment
->cache
.valid
= 0;
305 void BX_CPU_C::validate_seg_regs(void)
307 validate_seg_reg(BX_SEG_REG_ES
);
308 validate_seg_reg(BX_SEG_REG_DS
);
309 validate_seg_reg(BX_SEG_REG_FS
);
310 validate_seg_reg(BX_SEG_REG_GS
);
313 void BX_CPP_AttrRegparmN(2)
314 BX_CPU_C::parse_selector(Bit16u raw_selector
, bx_selector_t
*selector
)
316 selector
->value
= raw_selector
;
317 selector
->index
= raw_selector
>> 3;
318 selector
->ti
= (raw_selector
>> 2) & 0x01;
319 selector
->rpl
= raw_selector
& 0x03;
322 Bit8u
BX_CPP_AttrRegparmN(1)
323 BX_CPU_C::ar_byte(const bx_descriptor_t
*d
)
335 void BX_CPP_AttrRegparmN(2)
336 BX_CPU_C::set_ar_byte(bx_descriptor_t
*d
, Bit8u ar_byte
)
338 d
->p
= (ar_byte
>> 7) & 0x01;
339 d
->dpl
= (ar_byte
>> 5) & 0x03;
340 d
->segment
= (ar_byte
>> 4) & 0x01;
341 d
->type
= (ar_byte
& 0x0f);
344 Bit32u
BX_CPP_AttrRegparmN(1)
345 BX_CPU_C::get_descriptor_l(const bx_descriptor_t
*d
)
354 val
= ((d
->u
.segment
.base
& 0xffff) << 16) | (d
->u
.segment
.limit
& 0xffff);
359 case 0: // Reserved (not defined)
360 BX_ERROR(("#get_descriptor_l(): type %d not finished", d
->type
));
363 case BX_SYS_SEGMENT_LDT
:
364 case BX_SYS_SEGMENT_AVAIL_286_TSS
:
365 case BX_SYS_SEGMENT_BUSY_286_TSS
:
366 case BX_SYS_SEGMENT_AVAIL_386_TSS
:
367 case BX_SYS_SEGMENT_BUSY_386_TSS
:
368 val
= ((d
->u
.system
.base
& 0xffff) << 16) | (d
->u
.system
.limit
& 0xffff);
372 BX_PANIC(("#get_descriptor_l(): type %d not finished", d
->type
));
378 Bit32u
BX_CPP_AttrRegparmN(1)
379 BX_CPU_C::get_descriptor_h(const bx_descriptor_t
*d
)
388 val
= (d
->u
.segment
.base
& 0xff000000) |
389 ((d
->u
.segment
.base
>> 16) & 0x000000ff) |
394 (d
->u
.segment
.limit
& 0xf0000) |
395 (d
->u
.segment
.avl
<< 20) |
396 #if BX_SUPPORT_X86_64
397 (d
->u
.segment
.l
<< 21) |
399 (d
->u
.segment
.d_b
<< 22) |
400 (d
->u
.segment
.g
<< 23);
405 case 0: // Reserved (not yet defined)
406 BX_ERROR(("#get_descriptor_h(): type %d not finished", d
->type
));
409 case BX_SYS_SEGMENT_AVAIL_286_TSS
:
410 case BX_SYS_SEGMENT_BUSY_286_TSS
:
411 BX_ASSERT(d
->u
.system
.g
== 0);
412 BX_ASSERT(d
->u
.system
.avl
== 0);
414 case BX_SYS_SEGMENT_LDT
:
415 case BX_SYS_SEGMENT_AVAIL_386_TSS
:
416 case BX_SYS_SEGMENT_BUSY_386_TSS
:
417 val
= ((d
->u
.system
.base
>> 16) & 0xff) |
421 (d
->u
.system
.limit
& 0xf0000) |
422 (d
->u
.system
.avl
<< 20) |
423 (d
->u
.system
.g
<< 23) |
424 (d
->u
.system
.base
& 0xff000000);
428 BX_PANIC(("#get_descriptor_h(): type %d not finished", d
->type
));
434 #if BX_CPU_LEVEL >= 3
435 Bit16u
BX_CPP_AttrRegparmN(1)
436 BX_CPU_C::get_segment_ar_data(const bx_descriptor_t
*d
) // for SMM ONLY
440 if (d
->segment
) { /* data/code segment descriptors */
445 (d
->u
.segment
.avl
<< 12) |
446 #if BX_SUPPORT_X86_64
447 (d
->u
.segment
.l
<< 13) |
449 (d
->u
.segment
.d_b
<< 14) |
450 (d
->u
.segment
.g
<< 15);
452 // for SMM map the segment cache valid bit stored in AR[8]
460 case BX_SYS_SEGMENT_AVAIL_286_TSS
:
461 case BX_SYS_SEGMENT_BUSY_286_TSS
:
462 BX_ASSERT(d
->u
.system
.g
== 0);
463 BX_ASSERT(d
->u
.system
.avl
== 0);
465 case BX_SYS_SEGMENT_LDT
:
466 case BX_SYS_SEGMENT_AVAIL_386_TSS
:
467 case BX_SYS_SEGMENT_BUSY_386_TSS
:
471 (d
->u
.system
.avl
<< 12) |
472 (d
->u
.system
.g
<< 15);
475 BX_PANIC(("get_segment_ar_data(): case %u unsupported", (unsigned) d
->type
));
478 // for SMM map the segment cache valid bit stored in AR[8]
486 bx_bool
BX_CPU_C::set_segment_ar_data(bx_segment_reg_t
*seg
,
487 Bit16u raw_selector
, bx_address base
, Bit32u limit
, Bit16u ar_data
)
489 parse_selector(raw_selector
, &seg
->selector
);
491 bx_descriptor_t
*d
= &seg
->cache
;
493 d
->p
= (ar_data
>> 7) & 0x1;
494 d
->dpl
= (ar_data
>> 5) & 0x3;
495 d
->segment
= (ar_data
>> 4) & 0x1;
496 d
->type
= (ar_data
& 0x0f);
498 // for SMM map the segment cache valid bit stored in AR[8]
499 d
->valid
= (ar_data
>> 8) & 0x1;
501 if (d
->segment
) { /* data/code segment descriptors */
502 d
->u
.segment
.g
= (ar_data
>> 15) & 0x1;
503 d
->u
.segment
.d_b
= (ar_data
>> 14) & 0x1;
504 #if BX_SUPPORT_X86_64
505 d
->u
.segment
.l
= (ar_data
>> 13) & 0x1;
507 d
->u
.segment
.avl
= (ar_data
>> 12) & 0x1;
509 d
->u
.segment
.base
= base
;
510 d
->u
.segment
.limit
= limit
;
513 d
->u
.segment
.limit_scaled
= (d
->u
.segment
.limit
<< 12) | 0xfff;
515 d
->u
.segment
.limit_scaled
= (d
->u
.segment
.limit
);
519 case BX_SYS_SEGMENT_LDT
:
520 case BX_SYS_SEGMENT_AVAIL_286_TSS
:
521 case BX_SYS_SEGMENT_BUSY_286_TSS
:
522 case BX_SYS_SEGMENT_AVAIL_386_TSS
:
523 case BX_SYS_SEGMENT_BUSY_386_TSS
:
524 d
->u
.system
.avl
= (ar_data
>> 12) & 0x1;
525 d
->u
.system
.g
= (ar_data
>> 15) & 0x1;
526 d
->u
.system
.base
= base
;
527 d
->u
.system
.limit
= limit
;
529 d
->u
.system
.limit_scaled
= (d
->u
.system
.limit
<< 12) | 0xfff;
531 d
->u
.system
.limit_scaled
= (d
->u
.system
.limit
);
535 BX_PANIC(("set_segment_ar_data(): case %u unsupported", (unsigned) d
->type
));
543 void BX_CPP_AttrRegparmN(3)
544 BX_CPU_C::parse_descriptor(Bit32u dword1
, Bit32u dword2
, bx_descriptor_t
*temp
)
548 AR_byte
= dword2
>> 8;
549 temp
->p
= (AR_byte
>> 7) & 0x1;
550 temp
->dpl
= (AR_byte
>> 5) & 0x3;
551 temp
->segment
= (AR_byte
>> 4) & 0x1;
552 temp
->type
= (AR_byte
& 0xf);
553 temp
->valid
= 0; /* start out invalid */
555 if (temp
->segment
) { /* data/code segment descriptors */
556 temp
->u
.segment
.limit
= (dword1
& 0xffff);
557 temp
->u
.segment
.base
= (dword1
>> 16) | ((dword2
& 0xFF) << 16);
559 temp
->u
.segment
.limit
|= (dword2
& 0x000F0000);
560 temp
->u
.segment
.g
= (dword2
& 0x00800000) > 0;
561 temp
->u
.segment
.d_b
= (dword2
& 0x00400000) > 0;
562 #if BX_SUPPORT_X86_64
563 temp
->u
.segment
.l
= (dword2
& 0x00200000) > 0;
565 temp
->u
.segment
.avl
= (dword2
& 0x00100000) > 0;
566 temp
->u
.segment
.base
|= (dword2
& 0xFF000000);
568 if (temp
->u
.segment
.g
)
569 temp
->u
.segment
.limit_scaled
= (temp
->u
.segment
.limit
<< 12) | 0xfff;
571 temp
->u
.segment
.limit_scaled
= (temp
->u
.segment
.limit
);
575 else { // system & gate segment descriptors
576 switch (temp
->type
) {
584 case BX_286_CALL_GATE
:
585 case BX_286_INTERRUPT_GATE
:
586 case BX_286_TRAP_GATE
:
587 // param count only used for call gate
588 temp
->u
.gate
.param_count
= dword2
& 0x1f;
589 temp
->u
.gate
.dest_selector
= dword1
>> 16;
590 temp
->u
.gate
.dest_offset
= dword1
& 0xffff;
594 case BX_386_CALL_GATE
:
595 case BX_386_INTERRUPT_GATE
:
596 case BX_386_TRAP_GATE
:
597 // param count only used for call gate
598 temp
->u
.gate
.param_count
= dword2
& 0x1f;
599 temp
->u
.gate
.dest_selector
= dword1
>> 16;
600 temp
->u
.gate
.dest_offset
= (dword2
& 0xffff0000) |
601 (dword1
& 0x0000ffff);
606 temp
->u
.taskgate
.tss_selector
= dword1
>> 16;
610 case BX_SYS_SEGMENT_LDT
:
611 case BX_SYS_SEGMENT_AVAIL_286_TSS
:
612 case BX_SYS_SEGMENT_BUSY_286_TSS
:
613 case BX_SYS_SEGMENT_AVAIL_386_TSS
:
614 case BX_SYS_SEGMENT_BUSY_386_TSS
:
615 temp
->u
.system
.base
= (dword1
>> 16) |
616 ((dword2
& 0xff) << 16) | (dword2
& 0xff000000);
617 temp
->u
.system
.limit
= (dword1
& 0x0000ffff) | (dword2
& 0x000f0000);
618 temp
->u
.system
.g
= (dword2
& 0x00800000) > 0;
619 temp
->u
.system
.avl
= (dword2
& 0x00100000) > 0;
620 if (temp
->u
.system
.g
)
621 temp
->u
.system
.limit_scaled
= (temp
->u
.system
.limit
<< 12) | 0xfff;
623 temp
->u
.system
.limit_scaled
= (temp
->u
.system
.limit
);
628 BX_PANIC(("parse_descriptor(): case %u unfinished", (unsigned) temp
->type
));
634 void BX_CPP_AttrRegparmN(3)
635 BX_CPU_C::load_ss(bx_selector_t
*selector
, bx_descriptor_t
*descriptor
, Bit8u cpl
)
637 // Add cpl to the selector value.
638 selector
->value
= (0xfffc & selector
->value
) | cpl
;
640 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
= *selector
;
641 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
= *descriptor
;
642 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
.rpl
= cpl
;
644 #if BX_SUPPORT_X86_64
645 if (BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
) {
646 loadSRegLMNominal(BX_SEG_REG_SS
, selector
->value
, cpl
);
650 if ((BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
.value
& 0xfffc) == 0)
651 BX_PANIC(("load_ss(): null selector passed"));
653 if (!BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.valid
)
654 BX_PANIC(("load_ss(): invalid selector/descriptor passed."));
657 void BX_CPU_C::fetch_raw_descriptor(const bx_selector_t
*selector
,
658 Bit32u
*dword1
, Bit32u
*dword2
, unsigned exception_no
)
660 Bit32u index
= selector
->index
;
662 Bit64u raw_descriptor
;
664 if (selector
->ti
== 0) { /* GDT */
665 if ((index
*8 + 7) > BX_CPU_THIS_PTR gdtr
.limit
) {
666 BX_ERROR(("fetch_raw_descriptor: GDT: index (%x)%x > limit (%x)",
667 index
*8 + 7, index
, BX_CPU_THIS_PTR gdtr
.limit
));
668 exception(exception_no
, selector
->value
& 0xfffc, 0);
670 offset
= BX_CPU_THIS_PTR gdtr
.base
+ index
*8;
673 if (BX_CPU_THIS_PTR ldtr
.cache
.valid
==0) {
674 BX_ERROR(("fetch_raw_descriptor: LDTR.valid=0"));
675 exception(exception_no
, selector
->value
& 0xfffc, 0);
677 if ((index
*8 + 7) > BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit_scaled
) {
678 BX_ERROR(("fetch_raw_descriptor: LDT: index (%x)%x > limit (%x)",
679 index
*8 + 7, index
, BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit_scaled
));
680 exception(exception_no
, selector
->value
& 0xfffc, 0);
682 offset
= BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.base
+ index
*8;
685 raw_descriptor
= system_read_qword(offset
);
687 *dword1
= GET32L(raw_descriptor
);
688 *dword2
= GET32H(raw_descriptor
);
691 bx_bool
BX_CPP_AttrRegparmN(3)
692 BX_CPU_C::fetch_raw_descriptor2(const bx_selector_t
*selector
, Bit32u
*dword1
, Bit32u
*dword2
)
694 Bit32u index
= selector
->index
;
696 Bit64u raw_descriptor
;
698 if (selector
->ti
== 0) { /* GDT */
699 if ((index
*8 + 7) > BX_CPU_THIS_PTR gdtr
.limit
)
701 offset
= BX_CPU_THIS_PTR gdtr
.base
+ index
*8;
704 if (BX_CPU_THIS_PTR ldtr
.cache
.valid
==0) {
705 BX_ERROR(("fetch_raw_descriptor2: LDTR.valid=0"));
708 if ((index
*8 + 7) > BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit_scaled
)
710 offset
= BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.base
+ index
*8;
713 raw_descriptor
= system_read_qword(offset
);
715 *dword1
= GET32L(raw_descriptor
);
716 *dword2
= GET32H(raw_descriptor
);
721 #if BX_SUPPORT_X86_64
722 void BX_CPU_C::fetch_raw_descriptor_64(const bx_selector_t
*selector
,
723 Bit32u
*dword1
, Bit32u
*dword2
, Bit32u
*dword3
, unsigned exception_no
)
725 Bit32u index
= selector
->index
;
727 Bit64u raw_descriptor1
, raw_descriptor2
;
729 if (selector
->ti
== 0) { /* GDT */
730 if ((index
*8 + 15) > BX_CPU_THIS_PTR gdtr
.limit
) {
731 BX_ERROR(("fetch_raw_descriptor64: GDT: index (%x)%x > limit (%x)",
732 index
*8 + 15, index
, BX_CPU_THIS_PTR gdtr
.limit
));
733 exception(exception_no
, selector
->value
& 0xfffc, 0);
735 offset
= BX_CPU_THIS_PTR gdtr
.base
+ index
*8;
738 if (BX_CPU_THIS_PTR ldtr
.cache
.valid
==0) {
739 BX_ERROR(("fetch_raw_descriptor64: LDTR.valid=0"));
740 exception(exception_no
, selector
->value
& 0xfffc, 0);
742 if ((index
*8 + 15) > BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit_scaled
) {
743 BX_ERROR(("fetch_raw_descriptor64: LDT: index (%x)%x > limit (%x)",
744 index
*8 + 15, index
, BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit_scaled
));
745 exception(exception_no
, selector
->value
& 0xfffc, 0);
747 offset
= BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.base
+ index
*8;
750 raw_descriptor1
= system_read_qword(offset
);
751 raw_descriptor2
= system_read_qword(offset
+ 8);
753 if (raw_descriptor2
& BX_CONST64(0x00001F0000000000)) {
754 BX_ERROR(("fetch_raw_descriptor64: extended attributes DWORD4 TYPE != 0"));
755 exception(BX_GP_EXCEPTION
, selector
->value
& 0xfffc, 0);
758 *dword1
= GET32L(raw_descriptor1
);
759 *dword2
= GET32H(raw_descriptor1
);
760 *dword3
= GET32L(raw_descriptor2
);