1 /////////////////////////////////////////////////////////////////////////
2 // $Id: segment_ctrl_pro.cc,v 1.103 2008/12/06 18:52:02 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (C) 2001 MandrakeSoft S.A.
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 /////////////////////////////////////////////////////////////////////////
28 #define NEED_CPU_REG_SHORTCUTS 1
31 #define LOG_THIS BX_CPU_THIS_PTR
33 void BX_CPP_AttrRegparmN(2)
34 BX_CPU_C::load_seg_reg(bx_segment_reg_t
*seg
, Bit16u new_value
)
38 if (seg
== &BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
])
40 bx_selector_t ss_selector
;
41 bx_descriptor_t descriptor
;
42 Bit32u dword1
, dword2
;
44 parse_selector(new_value
, &ss_selector
);
46 if ((new_value
& 0xfffc) == 0) { /* null selector */
48 // allow SS = 0 in 64 bit mode only with cpl != 3 and rpl=cpl
49 if (Is64BitMode() && CPL
!= 3 && ss_selector
.rpl
== CPL
) {
50 load_null_selector(seg
);
54 BX_ERROR(("load_seg_reg(SS): loading null selector"));
55 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
58 fetch_raw_descriptor(&ss_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
60 /* selector's RPL must = CPL, else #GP(selector) */
61 if (ss_selector
.rpl
!= CPL
) {
62 BX_ERROR(("load_seg_reg(SS): rpl != CPL"));
63 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
66 parse_descriptor(dword1
, dword2
, &descriptor
);
68 if (descriptor
.valid
==0) {
69 BX_ERROR(("load_seg_reg(SS): valid bit cleared"));
70 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
73 /* AR byte must indicate a writable data segment else #GP(selector) */
74 if (descriptor
.segment
==0 || IS_CODE_SEGMENT(descriptor
.type
) ||
75 IS_DATA_SEGMENT_WRITEABLE(descriptor
.type
) == 0)
77 BX_ERROR(("load_seg_reg(SS): not writable data segment"));
78 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
81 /* DPL in the AR byte must equal CPL else #GP(selector) */
82 if (descriptor
.dpl
!= CPL
) {
83 BX_ERROR(("load_seg_reg(SS): dpl != CPL"));
84 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
87 /* segment must be marked PRESENT else #SS(selector) */
88 if (! IS_PRESENT(descriptor
)) {
89 BX_ERROR(("load_seg_reg(SS): not present"));
90 exception(BX_SS_EXCEPTION
, new_value
& 0xfffc, 0);
93 /* load SS with selector, load SS cache with descriptor */
94 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
= ss_selector
;
95 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
= descriptor
;
96 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.valid
= 1;
98 /* now set accessed bit in descriptor */
99 if (!(dword2
& 0x0100)) {
101 if (ss_selector
.ti
== 0) { /* GDT */
102 access_write_linear(BX_CPU_THIS_PTR gdtr
.base
+ ss_selector
.index
*8 + 4, 4, 0, &dword2
);
105 access_write_linear(BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.base
+ ss_selector
.index
*8 + 4, 4, 0, &dword2
);
111 else if ((seg
==&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
]) ||
112 (seg
==&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
])
113 #if BX_CPU_LEVEL >= 3
114 || (seg
==&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
]) ||
115 (seg
==&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
])
119 bx_descriptor_t descriptor
;
120 bx_selector_t selector
;
121 Bit32u dword1
, dword2
;
123 if ((new_value
& 0xfffc) == 0) { /* null selector */
124 load_null_selector(seg
);
128 parse_selector(new_value
, &selector
);
129 fetch_raw_descriptor(&selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
130 parse_descriptor(dword1
, dword2
, &descriptor
);
132 if (descriptor
.valid
==0) {
133 BX_ERROR(("load_seg_reg(%s, 0x%04x): invalid segment", strseg(seg
), new_value
));
134 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
137 /* AR byte must indicate data or readable code segment else #GP(selector) */
138 if (descriptor
.segment
==0 || (IS_CODE_SEGMENT(descriptor
.type
) &&
139 IS_CODE_SEGMENT_READABLE(descriptor
.type
) == 0))
141 BX_ERROR(("load_seg_reg(%s, 0x%04x): not data or readable code", strseg(seg
), new_value
));
142 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
145 /* If data or non-conforming code, then both the RPL and the CPL
146 * must be less than or equal to DPL in AR byte else #GP(selector) */
147 if (IS_DATA_SEGMENT(descriptor
.type
) ||
148 IS_CODE_SEGMENT_NON_CONFORMING(descriptor
.type
))
150 if ((selector
.rpl
> descriptor
.dpl
) || (CPL
> descriptor
.dpl
)) {
151 BX_ERROR(("load_seg_reg(%s, 0x%04x): RPL & CPL must be <= DPL", strseg(seg
), new_value
));
152 exception(BX_GP_EXCEPTION
, new_value
& 0xfffc, 0);
156 /* segment must be marked PRESENT else #NP(selector) */
157 if (! IS_PRESENT(descriptor
)) {
158 BX_ERROR(("load_seg_reg(%s, 0x%04x): segment not present", strseg(seg
), new_value
));
159 exception(BX_NP_EXCEPTION
, new_value
& 0xfffc, 0);
162 /* load segment register with selector */
163 /* load segment register-cache with descriptor */
164 seg
->selector
= selector
;
165 seg
->cache
= descriptor
;
166 seg
->cache
.valid
= 1;
168 /* now set accessed bit in descriptor */
169 /* wmr: don't bother if it's already set (thus allowing */
170 /* GDT to be in read-only pages like real hdwe does) */
172 if (!(dword2
& 0x0100)) {
174 if (selector
.ti
== 0) { /* GDT */
175 access_write_linear(BX_CPU_THIS_PTR gdtr
.base
+ selector
.index
*8 + 4, 4, 0, &dword2
);
178 access_write_linear(BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.base
+ selector
.index
*8 + 4, 4, 0, &dword2
);
184 BX_PANIC(("load_seg_reg(): invalid segment register passed!"));
189 /* real or v8086 mode */
192 According to Intel, each time any segment register is loaded in real
193 mode, the base address is calculated as 16 times the segment value,
194 while the access rights and size limit attributes are given fixed,
195 "real-mode compatible" values. This is not true. In fact, only the CS
196 descriptor caches for the 286, 386, and 486 get loaded with fixed
197 values each time the segment register is loaded. Loading CS, or any
198 other segment register in real mode, on later Intel processors doesn't
199 change the access rights or the segment size limit attributes stored
200 in the descriptor cache registers. For these segments, the access
201 rights and segment size limit attributes from any previous setting are
204 seg
->selector
.value
= new_value
;
205 seg
->selector
.rpl
= real_mode() ? 0 : 3;
206 seg
->cache
.valid
= 1;
207 seg
->cache
.u
.segment
.base
= new_value
<< 4;
208 seg
->cache
.segment
= 1; /* regular segment */
209 seg
->cache
.p
= 1; /* present */
210 seg
->cache
.type
= BX_DATA_READ_WRITE_ACCESSED
;
212 if (seg
== &BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
]) {
213 invalidate_prefetch_q();
214 updateFetchModeMask();
215 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
216 handleAlignmentCheck(); // CPL was modified
220 /* Do not modify segment limit and AR bytes when in real mode */
221 /* Support for big real mode */
222 if (real_mode()) return;
224 seg
->cache
.dpl
= 3; /* we are in v8086 mode */
225 seg
->cache
.u
.segment
.limit
= 0xffff;
226 seg
->cache
.u
.segment
.limit_scaled
= 0xffff;
227 #if BX_CPU_LEVEL >= 3
228 seg
->cache
.u
.segment
.g
= 0; /* byte granular */
229 seg
->cache
.u
.segment
.d_b
= 0; /* default 16bit size */
230 #if BX_SUPPORT_X86_64
231 seg
->cache
.u
.segment
.l
= 0; /* default 16bit size */
233 seg
->cache
.u
.segment
.avl
= 0;
237 void BX_CPP_AttrRegparmN(1)
238 BX_CPU_C::load_null_selector(bx_segment_reg_t
*seg
)
240 seg
->selector
.index
= 0;
241 seg
->selector
.ti
= 0;
242 seg
->selector
.rpl
= 0;
243 seg
->selector
.value
= 0;
245 seg
->cache
.valid
= 0; /* invalidate null selector */
248 seg
->cache
.segment
= 1; /* data/code segment */
251 seg
->cache
.u
.segment
.base
= 0;
252 seg
->cache
.u
.segment
.limit
= 0;
253 seg
->cache
.u
.segment
.limit_scaled
= 0;
254 seg
->cache
.u
.segment
.g
= 0;
255 seg
->cache
.u
.segment
.d_b
= 0;
256 seg
->cache
.u
.segment
.avl
= 0;
257 #if BX_SUPPORT_X86_64
258 seg
->cache
.u
.segment
.l
= 0;
262 #if BX_SUPPORT_X86_64
263 void BX_CPU_C::loadSRegLMNominal(unsigned segI
, unsigned selector
, unsigned dpl
)
265 bx_segment_reg_t
*seg
= & BX_CPU_THIS_PTR sregs
[segI
];
267 // Load a segment register in long-mode with nominal values,
268 // so descriptor cache values are compatible with existing checks.
269 seg
->cache
.u
.segment
.base
= 0;
270 seg
->cache
.valid
= 1;
271 seg
->cache
.dpl
= dpl
;
273 seg
->selector
.value
= selector
;
277 BX_CPP_INLINE
void BX_CPU_C::validate_seg_reg(unsigned seg
)
280 FOR (seg = ES, DS, FS, GS)
282 IF ((seg.attr.dpl < CPL) && ((seg.attr.type = 'data')
283 || (seg.attr.type = 'non-conforming-code')))
285 seg = NULL // can't use lower dpl data segment at higher cpl
290 bx_segment_reg_t
*segment
= &BX_CPU_THIS_PTR sregs
[seg
];
292 if (segment
->cache
.dpl
< CPL
)
294 // invalidate if data or non-conforming code segment
295 if (segment
->cache
.valid
==0 || segment
->cache
.segment
==0 ||
296 IS_DATA_SEGMENT(segment
->cache
.type
) ||
297 IS_CODE_SEGMENT_NON_CONFORMING(segment
->cache
.type
))
299 segment
->selector
.value
= 0;
300 segment
->cache
.valid
= 0;
305 void BX_CPU_C::validate_seg_regs(void)
307 validate_seg_reg(BX_SEG_REG_ES
);
308 validate_seg_reg(BX_SEG_REG_DS
);
309 validate_seg_reg(BX_SEG_REG_FS
);
310 validate_seg_reg(BX_SEG_REG_GS
);
313 void BX_CPP_AttrRegparmN(2)
314 BX_CPU_C::parse_selector(Bit16u raw_selector
, bx_selector_t
*selector
)
316 selector
->value
= raw_selector
;
317 selector
->index
= raw_selector
>> 3;
318 selector
->ti
= (raw_selector
>> 2) & 0x01;
319 selector
->rpl
= raw_selector
& 0x03;
322 Bit8u
BX_CPP_AttrRegparmN(1)
323 BX_CPU_C::ar_byte(const bx_descriptor_t
*d
)
335 void BX_CPP_AttrRegparmN(2)
336 BX_CPU_C::set_ar_byte(bx_descriptor_t
*d
, Bit8u ar_byte
)
338 d
->p
= (ar_byte
>> 7) & 0x01;
339 d
->dpl
= (ar_byte
>> 5) & 0x03;
340 d
->segment
= (ar_byte
>> 4) & 0x01;
341 d
->type
= (ar_byte
& 0x0f);
344 Bit32u
BX_CPP_AttrRegparmN(1)
345 BX_CPU_C::get_descriptor_l(const bx_descriptor_t
*d
)
354 val
= ((d
->u
.segment
.base
& 0xffff) << 16) | (d
->u
.segment
.limit
& 0xffff);
359 case 0: // Reserved (not defined)
360 BX_ERROR(("#get_descriptor_l(): type %d not finished", d
->type
));
363 case BX_SYS_SEGMENT_LDT
:
364 case BX_SYS_SEGMENT_AVAIL_286_TSS
:
365 case BX_SYS_SEGMENT_BUSY_286_TSS
:
366 case BX_SYS_SEGMENT_AVAIL_386_TSS
:
367 case BX_SYS_SEGMENT_BUSY_386_TSS
:
368 val
= ((d
->u
.system
.base
& 0xffff) << 16) | (d
->u
.system
.limit
& 0xffff);
372 BX_PANIC(("#get_descriptor_l(): type %d not finished", d
->type
));
378 Bit32u
BX_CPP_AttrRegparmN(1)
379 BX_CPU_C::get_descriptor_h(const bx_descriptor_t
*d
)
388 val
= (d
->u
.segment
.base
& 0xff000000) |
389 ((d
->u
.segment
.base
>> 16) & 0x000000ff) |
394 (d
->u
.segment
.limit
& 0xf0000) |
395 (d
->u
.segment
.avl
<< 20) |
396 #if BX_SUPPORT_X86_64
397 (d
->u
.segment
.l
<< 21) |
399 (d
->u
.segment
.d_b
<< 22) |
400 (d
->u
.segment
.g
<< 23);
405 case 0: // Reserved (not yet defined)
406 BX_ERROR(("#get_descriptor_h(): type %d not finished", d
->type
));
409 case BX_SYS_SEGMENT_AVAIL_286_TSS
:
410 case BX_SYS_SEGMENT_BUSY_286_TSS
:
411 BX_ASSERT(d
->u
.system
.g
== 0);
412 BX_ASSERT(d
->u
.system
.avl
== 0);
414 case BX_SYS_SEGMENT_LDT
:
415 case BX_SYS_SEGMENT_AVAIL_386_TSS
:
416 case BX_SYS_SEGMENT_BUSY_386_TSS
:
417 val
= ((d
->u
.system
.base
>> 16) & 0xff) |
421 (d
->u
.system
.limit
& 0xf0000) |
422 (d
->u
.system
.avl
<< 20) |
423 (d
->u
.system
.g
<< 23) |
424 (d
->u
.system
.base
& 0xff000000);
428 BX_PANIC(("#get_descriptor_h(): type %d not finished", d
->type
));
434 #if BX_CPU_LEVEL >= 3
435 Bit16u
BX_CPP_AttrRegparmN(1)
436 BX_CPU_C::get_segment_ar_data(const bx_descriptor_t
*d
)
440 if (d
->segment
) { /* data/code segment descriptors */
445 (d
->u
.segment
.avl
<< 12) |
446 #if BX_SUPPORT_X86_64
447 (d
->u
.segment
.l
<< 13) |
449 (d
->u
.segment
.d_b
<< 14) |
450 (d
->u
.segment
.g
<< 15);
456 case BX_SYS_SEGMENT_AVAIL_286_TSS
:
457 case BX_SYS_SEGMENT_BUSY_286_TSS
:
458 BX_ASSERT(d
->u
.system
.g
== 0);
459 BX_ASSERT(d
->u
.system
.avl
== 0);
461 case BX_SYS_SEGMENT_LDT
:
462 case BX_SYS_SEGMENT_AVAIL_386_TSS
:
463 case BX_SYS_SEGMENT_BUSY_386_TSS
:
467 (d
->u
.system
.avl
<< 12) |
468 (d
->u
.system
.g
<< 15);
471 BX_PANIC(("get_segment_ar_data(): case %u unsupported", (unsigned) d
->type
));
477 bx_bool
BX_CPU_C::set_segment_ar_data(bx_segment_reg_t
*seg
, bx_bool valid
,
478 Bit16u raw_selector
, bx_address base
, Bit32u limit_scaled
, Bit16u ar_data
)
480 parse_selector(raw_selector
, &seg
->selector
);
482 bx_descriptor_t
*d
= &seg
->cache
;
484 d
->p
= (ar_data
>> 7) & 0x1;
485 d
->dpl
= (ar_data
>> 5) & 0x3;
486 d
->segment
= (ar_data
>> 4) & 0x1;
487 d
->type
= (ar_data
& 0x0f);
491 if (d
->segment
) { /* data/code segment descriptors */
492 d
->u
.segment
.g
= (ar_data
>> 15) & 0x1;
493 d
->u
.segment
.d_b
= (ar_data
>> 14) & 0x1;
494 #if BX_SUPPORT_X86_64
495 d
->u
.segment
.l
= (ar_data
>> 13) & 0x1;
497 d
->u
.segment
.avl
= (ar_data
>> 12) & 0x1;
499 d
->u
.segment
.base
= base
;
500 d
->u
.segment
.limit_scaled
= limit_scaled
;
503 d
->u
.segment
.limit
= (d
->u
.segment
.limit_scaled
>> 12);
505 d
->u
.segment
.limit
= (d
->u
.segment
.limit_scaled
);
509 case BX_SYS_SEGMENT_LDT
:
510 case BX_SYS_SEGMENT_AVAIL_286_TSS
:
511 case BX_SYS_SEGMENT_BUSY_286_TSS
:
512 case BX_SYS_SEGMENT_AVAIL_386_TSS
:
513 case BX_SYS_SEGMENT_BUSY_386_TSS
:
514 d
->u
.system
.avl
= (ar_data
>> 12) & 0x1;
515 d
->u
.system
.g
= (ar_data
>> 15) & 0x1;
516 d
->u
.system
.base
= base
;
517 d
->u
.system
.limit_scaled
= limit_scaled
;
519 d
->u
.system
.limit
= (d
->u
.system
.limit_scaled
>> 12);
521 d
->u
.system
.limit
= (d
->u
.system
.limit_scaled
);
525 BX_PANIC(("set_segment_ar_data(): case %u unsupported", (unsigned) d
->type
));
533 void BX_CPP_AttrRegparmN(3)
534 BX_CPU_C::parse_descriptor(Bit32u dword1
, Bit32u dword2
, bx_descriptor_t
*temp
)
538 AR_byte
= dword2
>> 8;
539 temp
->p
= (AR_byte
>> 7) & 0x1;
540 temp
->dpl
= (AR_byte
>> 5) & 0x3;
541 temp
->segment
= (AR_byte
>> 4) & 0x1;
542 temp
->type
= (AR_byte
& 0xf);
543 temp
->valid
= 0; /* start out invalid */
545 if (temp
->segment
) { /* data/code segment descriptors */
546 temp
->u
.segment
.limit
= (dword1
& 0xffff);
547 temp
->u
.segment
.base
= (dword1
>> 16) | ((dword2
& 0xFF) << 16);
549 temp
->u
.segment
.limit
|= (dword2
& 0x000F0000);
550 temp
->u
.segment
.g
= (dword2
& 0x00800000) > 0;
551 temp
->u
.segment
.d_b
= (dword2
& 0x00400000) > 0;
552 #if BX_SUPPORT_X86_64
553 temp
->u
.segment
.l
= (dword2
& 0x00200000) > 0;
555 temp
->u
.segment
.avl
= (dword2
& 0x00100000) > 0;
556 temp
->u
.segment
.base
|= (dword2
& 0xFF000000);
558 if (temp
->u
.segment
.g
)
559 temp
->u
.segment
.limit_scaled
= (temp
->u
.segment
.limit
<< 12) | 0xfff;
561 temp
->u
.segment
.limit_scaled
= (temp
->u
.segment
.limit
);
565 else { // system & gate segment descriptors
566 switch (temp
->type
) {
574 case BX_286_CALL_GATE
:
575 case BX_286_INTERRUPT_GATE
:
576 case BX_286_TRAP_GATE
:
577 // param count only used for call gate
578 temp
->u
.gate
.param_count
= dword2
& 0x1f;
579 temp
->u
.gate
.dest_selector
= dword1
>> 16;
580 temp
->u
.gate
.dest_offset
= dword1
& 0xffff;
584 case BX_386_CALL_GATE
:
585 case BX_386_INTERRUPT_GATE
:
586 case BX_386_TRAP_GATE
:
587 // param count only used for call gate
588 temp
->u
.gate
.param_count
= dword2
& 0x1f;
589 temp
->u
.gate
.dest_selector
= dword1
>> 16;
590 temp
->u
.gate
.dest_offset
= (dword2
& 0xffff0000) |
591 (dword1
& 0x0000ffff);
596 temp
->u
.taskgate
.tss_selector
= dword1
>> 16;
600 case BX_SYS_SEGMENT_LDT
:
601 case BX_SYS_SEGMENT_AVAIL_286_TSS
:
602 case BX_SYS_SEGMENT_BUSY_286_TSS
:
603 case BX_SYS_SEGMENT_AVAIL_386_TSS
:
604 case BX_SYS_SEGMENT_BUSY_386_TSS
:
605 temp
->u
.system
.base
= (dword1
>> 16) |
606 ((dword2
& 0xff) << 16) | (dword2
& 0xff000000);
607 temp
->u
.system
.limit
= (dword1
& 0x0000ffff) | (dword2
& 0x000f0000);
608 temp
->u
.system
.g
= (dword2
& 0x00800000) > 0;
609 temp
->u
.system
.avl
= (dword2
& 0x00100000) > 0;
610 if (temp
->u
.system
.g
)
611 temp
->u
.system
.limit_scaled
= (temp
->u
.system
.limit
<< 12) | 0xfff;
613 temp
->u
.system
.limit_scaled
= (temp
->u
.system
.limit
);
618 BX_PANIC(("parse_descriptor(): case %u unfinished", (unsigned) temp
->type
));
624 void BX_CPP_AttrRegparmN(3)
625 BX_CPU_C::load_ss(bx_selector_t
*selector
, bx_descriptor_t
*descriptor
, Bit8u cpl
)
627 // Add cpl to the selector value.
628 selector
->value
= (0xfffc & selector
->value
) | cpl
;
630 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
= *selector
;
631 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
= *descriptor
;
632 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
.rpl
= cpl
;
634 #if BX_SUPPORT_X86_64
635 if (BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
) {
636 loadSRegLMNominal(BX_SEG_REG_SS
, selector
->value
, cpl
);
640 if ((BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
.value
& 0xfffc) == 0)
641 BX_PANIC(("load_ss(): null selector passed"));
643 if (!BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.valid
)
644 BX_PANIC(("load_ss(): invalid selector/descriptor passed."));
647 void BX_CPU_C::fetch_raw_descriptor(const bx_selector_t
*selector
,
648 Bit32u
*dword1
, Bit32u
*dword2
, unsigned exception_no
)
650 Bit32u index
= selector
->index
;
652 Bit64u raw_descriptor
;
654 if (selector
->ti
== 0) { /* GDT */
655 if ((index
*8 + 7) > BX_CPU_THIS_PTR gdtr
.limit
) {
656 BX_ERROR(("fetch_raw_descriptor: GDT: index (%x)%x > limit (%x)",
657 index
*8 + 7, index
, BX_CPU_THIS_PTR gdtr
.limit
));
658 exception(exception_no
, selector
->value
& 0xfffc, 0);
660 offset
= BX_CPU_THIS_PTR gdtr
.base
+ index
*8;
663 if (BX_CPU_THIS_PTR ldtr
.cache
.valid
==0) {
664 BX_ERROR(("fetch_raw_descriptor: LDTR.valid=0"));
665 exception(exception_no
, selector
->value
& 0xfffc, 0);
667 if ((index
*8 + 7) > BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit_scaled
) {
668 BX_ERROR(("fetch_raw_descriptor: LDT: index (%x)%x > limit (%x)",
669 index
*8 + 7, index
, BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit_scaled
));
670 exception(exception_no
, selector
->value
& 0xfffc, 0);
672 offset
= BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.base
+ index
*8;
675 raw_descriptor
= system_read_qword(offset
);
677 *dword1
= GET32L(raw_descriptor
);
678 *dword2
= GET32H(raw_descriptor
);
681 bx_bool
BX_CPP_AttrRegparmN(3)
682 BX_CPU_C::fetch_raw_descriptor2(const bx_selector_t
*selector
, Bit32u
*dword1
, Bit32u
*dword2
)
684 Bit32u index
= selector
->index
;
686 Bit64u raw_descriptor
;
688 if (selector
->ti
== 0) { /* GDT */
689 if ((index
*8 + 7) > BX_CPU_THIS_PTR gdtr
.limit
)
691 offset
= BX_CPU_THIS_PTR gdtr
.base
+ index
*8;
694 if (BX_CPU_THIS_PTR ldtr
.cache
.valid
==0) {
695 BX_ERROR(("fetch_raw_descriptor2: LDTR.valid=0"));
698 if ((index
*8 + 7) > BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit_scaled
)
700 offset
= BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.base
+ index
*8;
703 raw_descriptor
= system_read_qword(offset
);
705 *dword1
= GET32L(raw_descriptor
);
706 *dword2
= GET32H(raw_descriptor
);
711 #if BX_SUPPORT_X86_64
712 void BX_CPU_C::fetch_raw_descriptor_64(const bx_selector_t
*selector
,
713 Bit32u
*dword1
, Bit32u
*dword2
, Bit32u
*dword3
, unsigned exception_no
)
715 Bit32u index
= selector
->index
;
717 Bit64u raw_descriptor1
, raw_descriptor2
;
719 if (selector
->ti
== 0) { /* GDT */
720 if ((index
*8 + 15) > BX_CPU_THIS_PTR gdtr
.limit
) {
721 BX_ERROR(("fetch_raw_descriptor64: GDT: index (%x)%x > limit (%x)",
722 index
*8 + 15, index
, BX_CPU_THIS_PTR gdtr
.limit
));
723 exception(exception_no
, selector
->value
& 0xfffc, 0);
725 offset
= BX_CPU_THIS_PTR gdtr
.base
+ index
*8;
728 if (BX_CPU_THIS_PTR ldtr
.cache
.valid
==0) {
729 BX_ERROR(("fetch_raw_descriptor64: LDTR.valid=0"));
730 exception(exception_no
, selector
->value
& 0xfffc, 0);
732 if ((index
*8 + 15) > BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit_scaled
) {
733 BX_ERROR(("fetch_raw_descriptor64: LDT: index (%x)%x > limit (%x)",
734 index
*8 + 15, index
, BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit_scaled
));
735 exception(exception_no
, selector
->value
& 0xfffc, 0);
737 offset
= BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.base
+ index
*8;
740 raw_descriptor1
= system_read_qword(offset
);
741 raw_descriptor2
= system_read_qword(offset
+ 8);
743 if (raw_descriptor2
& BX_CONST64(0x00001F0000000000)) {
744 BX_ERROR(("fetch_raw_descriptor64: extended attributes DWORD4 TYPE != 0"));
745 exception(BX_GP_EXCEPTION
, selector
->value
& 0xfffc, 0);
748 *dword1
= GET32L(raw_descriptor1
);
749 *dword2
= GET32H(raw_descriptor1
);
750 *dword3
= GET32L(raw_descriptor2
);