1 /////////////////////////////////////////////////////////////////////////
2 // $Id: tasking.cc,v 1.39 2007/07/09 15:16:14 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (C) 2001 MandrakeSoft S.A.
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 #define NEED_CPU_REG_SHORTCUTS 1
32 #define LOG_THIS BX_CPU_THIS_PTR
40 // ======================
41 // 286 Task State Segment
42 // ======================
43 // dynamic item | hex dec offset
44 // 0 task LDT selector | 2a 42
45 // 1 DS selector | 28 40
46 // 1 SS selector | 26 38
47 // 1 CS selector | 24 36
48 // 1 ES selector | 22 34
57 // 1 flag word | 10 16
58 // 1 IP (entry point) | 0e 14
59 // 0 SS for CPL 2 | 0c 12
60 // 0 SP for CPL 2 | 0a 10
61 // 0 SS for CPL 1 | 08 08
62 // 0 SP for CPL 1 | 06 06
63 // 0 SS for CPL 0 | 04 04
64 // 0 SP for CPL 0 | 02 02
65 // back link selector to TSS | 00 00
68 // ======================
69 // 386 Task State Segment
70 // ======================
72 // |I/O Map Base |000000000000000000000|T| 64 static
73 // |0000000000000000| LDT | 60 static
74 // |0000000000000000| GS selector | 5c dynamic
75 // |0000000000000000| FS selector | 58 dynamic
76 // |0000000000000000| DS selector | 54 dynamic
77 // |0000000000000000| SS selector | 50 dynamic
78 // |0000000000000000| CS selector | 4c dynamic
79 // |0000000000000000| ES selector | 48 dynamic
88 // | EFLAGS | 24 dynamic
89 // | EIP (entry point) | 20 dynamic
90 // | CR3 (PDPR) | 1c static
91 // |000000000000000 | SS for CPL 2 | 18 static
92 // | ESP for CPL 2 | 14 static
93 // |000000000000000 | SS for CPL 1 | 10 static
94 // | ESP for CPL 1 | 0c static
95 // |000000000000000 | SS for CPL 0 | 08 static
96 // | ESP for CPL 0 | 04 static
97 // |000000000000000 | back link to prev TSS | 00 dynamic (updated only when return expected)
100 // ==================================================
101 // Effect of task switch on Busy, NT, and Link Fields
102 // ==================================================
104 // Field jump call/interrupt iret
105 // ------------------------------------------------------
106 // new busy bit Set Set No change
107 // old busy bit Cleared No change Cleared
108 // new NT flag No change Set No change
109 // old NT flag No change No change Cleared
110 // new link No change old TSS selector No change
111 // old link No change No change No change
112 // CR0.TS Set Set Set
114 // Note: I checked 386, 486, and Pentium, and they all exhibited
115 // exactly the same behaviour as above. There seems to
116 // be some misprints in the Intel docs.
118 void BX_CPU_C::task_switch(bx_selector_t
*tss_selector
,
119 bx_descriptor_t
*tss_descriptor
, unsigned source
,
120 Bit32u dword1
, Bit32u dword2
)
122 Bit32u obase32
; // base address of old TSS
123 Bit32u nbase32
; // base address of new TSS
124 Bit32u temp32
, newCR3
;
125 Bit16u raw_cs_selector
, raw_ss_selector
, raw_ds_selector
, raw_es_selector
,
126 raw_fs_selector
, raw_gs_selector
, raw_ldt_selector
;
127 Bit16u temp16
, trap_word
;
128 bx_selector_t cs_selector
, ss_selector
, ds_selector
, es_selector
,
129 fs_selector
, gs_selector
, ldt_selector
;
130 bx_descriptor_t cs_descriptor
, ss_descriptor
, ldt_descriptor
;
131 Bit32u old_TSS_max
, new_TSS_max
, old_TSS_limit
, new_TSS_limit
;
132 Bit32u newEAX
, newECX
, newEDX
, newEBX
;
133 Bit32u newESP
, newEBP
, newESI
, newEDI
;
134 Bit32u newEFLAGS
, newEIP
;
135 unsigned exception_no
= 256; // no exception
136 Bit16u error_code
= 0;
138 BX_DEBUG(("TASKING: ENTER"));
140 invalidate_prefetch_q();
142 // Discard any traps and inhibits for new context; traps will
143 // resume upon return.
144 BX_CPU_THIS_PTR debug_trap
= 0;
145 BX_CPU_THIS_PTR inhibit_mask
= 0;
147 // STEP 1: The following checks are made before calling task_switch(),
148 // for JMP & CALL only. These checks are NOT made for exceptions,
149 // interrupts & IRET.
151 // 1) TSS DPL must be >= CPL
152 // 2) TSS DPL must be >= TSS selector RPL
153 // 3) TSS descriptor is not busy.
155 // TSS must be present, else #NP(TSS selector)
156 if (tss_descriptor
->p
==0) {
157 BX_ERROR(("task_switch: TSS descriptor is not present !"));
158 exception(BX_NP_EXCEPTION
, tss_selector
->value
& 0xfffc, 0);
161 // STEP 2: The processor performs limit-checking on the target TSS
162 // to verify that the TSS limit is greater than or equal
163 // to 67h (2Bh for 16-bit TSS).
165 // Gather info about old TSS
166 if (BX_CPU_THIS_PTR tr
.cache
.type
<= 3) {
167 // sanity check type: cannot have busy bit
173 // Gather info about new TSS
174 if (tss_descriptor
->type
<= 3) { // {1,3}
177 else { // tss_descriptor->type = {9,11}
181 obase32
= BX_CPU_THIS_PTR tr
.cache
.u
.system
.base
; // old TSS.base
182 old_TSS_limit
= BX_CPU_THIS_PTR tr
.cache
.u
.system
.limit_scaled
;
183 nbase32
= tss_descriptor
->u
.system
.base
; // new TSS.base
184 new_TSS_limit
= tss_descriptor
->u
.system
.limit_scaled
;
186 // TSS must have valid limit, else #TS(TSS selector)
187 if (tss_selector
->ti
|| tss_descriptor
->valid
==0 ||
188 new_TSS_limit
< new_TSS_max
)
190 BX_ERROR(("task_switch(): new TSS limit < %d", new_TSS_max
));
191 exception(BX_TS_EXCEPTION
, tss_selector
->value
& 0xfffc, 0);
194 if (obase32
== nbase32
) {
195 BX_INFO(("TASK SWITCH: switching to the same TSS !"));
198 // Check that old TSS, new TSS, and all segment descriptors
199 // used in the task switch are paged in.
200 if (BX_CPU_THIS_PTR cr0
.get_PG())
202 dtranslate_linear(obase32
, 0, BX_WRITE
); // new TSS
203 dtranslate_linear(obase32
+ old_TSS_max
, 0, BX_WRITE
);
204 dtranslate_linear(nbase32
, 0, BX_READ
); // old TSS
205 dtranslate_linear(nbase32
+ new_TSS_max
, 0, BX_READ
);
207 // ??? Humm, we check the new TSS region with READ above,
208 // but sometimes we need to write the link field in that
209 // region. We also sometimes update other fields, perhaps
210 // we need to WRITE check them here also, so that we keep
211 // the written state consistent (ie, we don't encounter a
212 // page fault in the middle).
214 if (source
== BX_TASK_FROM_CALL_OR_INT
)
216 dtranslate_linear(nbase32
, 0, BX_WRITE
);
217 dtranslate_linear(nbase32
+ 2, 0, BX_WRITE
);
221 // Privilege and busy checks done in CALL, JUMP, INT, IRET
223 // STEP 3: Save the current task state in the TSS. Up to this point,
224 // any exception that occurs aborts the task switch without
225 // changing the processor state.
227 /* save current machine state in old task's TSS */
229 Bit32u oldEFLAGS
= read_eflags();
231 /* if moving to busy task, clear NT bit */
232 if (tss_descriptor
->type
== BX_SYS_SEGMENT_BUSY_286_TSS
||
233 tss_descriptor
->type
== BX_SYS_SEGMENT_BUSY_386_TSS
)
235 oldEFLAGS
&= ~EFlagsNTMask
;
238 if (BX_CPU_THIS_PTR tr
.cache
.type
<= 3) {
239 temp16
= IP
; access_linear(obase32
+ 14, 2, 0, BX_WRITE
, &temp16
);
240 temp16
= oldEFLAGS
; access_linear(obase32
+ 16, 2, 0, BX_WRITE
, &temp16
);
241 temp16
= AX
; access_linear(obase32
+ 18, 2, 0, BX_WRITE
, &temp16
);
242 temp16
= CX
; access_linear(obase32
+ 20, 2, 0, BX_WRITE
, &temp16
);
243 temp16
= DX
; access_linear(obase32
+ 22, 2, 0, BX_WRITE
, &temp16
);
244 temp16
= BX
; access_linear(obase32
+ 24, 2, 0, BX_WRITE
, &temp16
);
245 temp16
= SP
; access_linear(obase32
+ 26, 2, 0, BX_WRITE
, &temp16
);
246 temp16
= BP
; access_linear(obase32
+ 28, 2, 0, BX_WRITE
, &temp16
);
247 temp16
= SI
; access_linear(obase32
+ 30, 2, 0, BX_WRITE
, &temp16
);
248 temp16
= DI
; access_linear(obase32
+ 32, 2, 0, BX_WRITE
, &temp16
);
249 temp16
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
].selector
.value
;
250 access_linear(obase32
+ 34, 2, 0, BX_WRITE
, &temp16
);
251 temp16
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
;
252 access_linear(obase32
+ 36, 2, 0, BX_WRITE
, &temp16
);
253 temp16
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
.value
;
254 access_linear(obase32
+ 38, 2, 0, BX_WRITE
, &temp16
);
255 temp16
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].selector
.value
;
256 access_linear(obase32
+ 40, 2, 0, BX_WRITE
, &temp16
);
259 temp32
= EIP
; access_linear(obase32
+ 0x20, 4, 0, BX_WRITE
, &temp32
);
260 temp32
= oldEFLAGS
; access_linear(obase32
+ 0x24, 4, 0, BX_WRITE
, &temp32
);
261 temp32
= EAX
; access_linear(obase32
+ 0x28, 4, 0, BX_WRITE
, &temp32
);
262 temp32
= ECX
; access_linear(obase32
+ 0x2c, 4, 0, BX_WRITE
, &temp32
);
263 temp32
= EDX
; access_linear(obase32
+ 0x30, 4, 0, BX_WRITE
, &temp32
);
264 temp32
= EBX
; access_linear(obase32
+ 0x34, 4, 0, BX_WRITE
, &temp32
);
265 temp32
= ESP
; access_linear(obase32
+ 0x38, 4, 0, BX_WRITE
, &temp32
);
266 temp32
= EBP
; access_linear(obase32
+ 0x3c, 4, 0, BX_WRITE
, &temp32
);
267 temp32
= ESI
; access_linear(obase32
+ 0x40, 4, 0, BX_WRITE
, &temp32
);
268 temp32
= EDI
; access_linear(obase32
+ 0x44, 4, 0, BX_WRITE
, &temp32
);
269 temp16
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
].selector
.value
;
270 access_linear(obase32
+ 0x48, 2, 0, BX_WRITE
, &temp16
);
271 temp16
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
;
272 access_linear(obase32
+ 0x4c, 2, 0, BX_WRITE
, &temp16
);
273 temp16
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
.value
;
274 access_linear(obase32
+ 0x50, 2, 0, BX_WRITE
, &temp16
);
275 temp16
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].selector
.value
;
276 access_linear(obase32
+ 0x54, 2, 0, BX_WRITE
, &temp16
);
277 temp16
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
].selector
.value
;
278 access_linear(obase32
+ 0x58, 2, 0, BX_WRITE
, &temp16
);
279 temp16
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
].selector
.value
;
280 access_linear(obase32
+ 0x5c, 2, 0, BX_WRITE
, &temp16
);
283 // effect on link field of new task
284 if (source
== BX_TASK_FROM_CALL_OR_INT
)
286 // set to selector of old task's TSS
287 temp16
= BX_CPU_THIS_PTR tr
.selector
.value
;
288 access_linear(nbase32
, 2, 0, BX_WRITE
, &temp16
);
291 // STEP 4: The new-task state is loaded from the TSS
293 if (tss_descriptor
->type
<= 3) {
294 access_linear(nbase32
+ 14, 2, 0, BX_READ
, &temp16
);
295 newEIP
= temp16
; // zero out upper word
296 access_linear(nbase32
+ 16, 2, 0, BX_READ
, &temp16
);
299 // incoming TSS is 16bit:
300 // - upper word of general registers is set to 0xFFFF
301 // - upper word of eflags is zero'd
302 // - FS, GS are zero'd
303 // - upper word of eIP is zero'd
304 access_linear(nbase32
+ 18, 2, 0, BX_READ
, &temp16
);
305 newEAX
= 0xffff0000 | temp16
;
306 access_linear(nbase32
+ 20, 2, 0, BX_READ
, &temp16
);
307 newECX
= 0xffff0000 | temp16
;
308 access_linear(nbase32
+ 22, 2, 0, BX_READ
, &temp16
);
309 newEDX
= 0xffff0000 | temp16
;
310 access_linear(nbase32
+ 24, 2, 0, BX_READ
, &temp16
);
311 newEBX
= 0xffff0000 | temp16
;
312 access_linear(nbase32
+ 26, 2, 0, BX_READ
, &temp16
);
313 newESP
= 0xffff0000 | temp16
;
314 access_linear(nbase32
+ 28, 2, 0, BX_READ
, &temp16
);
315 newEBP
= 0xffff0000 | temp16
;
316 access_linear(nbase32
+ 30, 2, 0, BX_READ
, &temp16
);
317 newESI
= 0xffff0000 | temp16
;
318 access_linear(nbase32
+ 32, 2, 0, BX_READ
, &temp16
);
319 newEDI
= 0xffff0000 | temp16
;
321 access_linear(nbase32
+ 34, 2, 0, BX_READ
, &raw_es_selector
);
322 access_linear(nbase32
+ 36, 2, 0, BX_READ
, &raw_cs_selector
);
323 access_linear(nbase32
+ 38, 2, 0, BX_READ
, &raw_ss_selector
);
324 access_linear(nbase32
+ 40, 2, 0, BX_READ
, &raw_ds_selector
);
325 access_linear(nbase32
+ 42, 2, 0, BX_READ
, &raw_ldt_selector
);
327 raw_fs_selector
= 0; // use a NULL selector
328 raw_gs_selector
= 0; // use a NULL selector
329 // No CR3 change for 286 task switch
330 newCR3
= 0; // keep compiler happy (not used)
331 trap_word
= 0; // keep compiler happy (not used)
334 if (BX_CPU_THIS_PTR cr0
.get_PG())
335 access_linear(nbase32
+ 0x1c, 4, 0, BX_READ
, &newCR3
);
337 newCR3
= 0; // keep compiler happy (not used)
338 access_linear(nbase32
+ 0x20, 4, 0, BX_READ
, &newEIP
);
339 access_linear(nbase32
+ 0x24, 4, 0, BX_READ
, &newEFLAGS
);
340 access_linear(nbase32
+ 0x28, 4, 0, BX_READ
, &newEAX
);
341 access_linear(nbase32
+ 0x2c, 4, 0, BX_READ
, &newECX
);
342 access_linear(nbase32
+ 0x30, 4, 0, BX_READ
, &newEDX
);
343 access_linear(nbase32
+ 0x34, 4, 0, BX_READ
, &newEBX
);
344 access_linear(nbase32
+ 0x38, 4, 0, BX_READ
, &newESP
);
345 access_linear(nbase32
+ 0x3c, 4, 0, BX_READ
, &newEBP
);
346 access_linear(nbase32
+ 0x40, 4, 0, BX_READ
, &newESI
);
347 access_linear(nbase32
+ 0x44, 4, 0, BX_READ
, &newEDI
);
348 access_linear(nbase32
+ 0x48, 2, 0, BX_READ
, &raw_es_selector
);
349 access_linear(nbase32
+ 0x4c, 2, 0, BX_READ
, &raw_cs_selector
);
350 access_linear(nbase32
+ 0x50, 2, 0, BX_READ
, &raw_ss_selector
);
351 access_linear(nbase32
+ 0x54, 2, 0, BX_READ
, &raw_ds_selector
);
352 access_linear(nbase32
+ 0x58, 2, 0, BX_READ
, &raw_fs_selector
);
353 access_linear(nbase32
+ 0x5c, 2, 0, BX_READ
, &raw_gs_selector
);
354 access_linear(nbase32
+ 0x60, 2, 0, BX_READ
, &raw_ldt_selector
);
355 access_linear(nbase32
+ 0x64, 2, 0, BX_READ
, &trap_word
);
358 // Step 5: If CALL, interrupt, or JMP, set busy flag in new task's
359 // TSS descriptor. If IRET, leave set.
361 if (source
== BX_TASK_FROM_JUMP
|| source
== BX_TASK_FROM_CALL_OR_INT
)
363 // set the new task's busy bit
364 Bit32u laddr
= BX_CPU_THIS_PTR gdtr
.base
+ (tss_selector
->index
<<3) + 4;
365 access_linear(laddr
, 4, 0, BX_READ
, &dword2
);
366 dword2
|= 0x00000200;
367 access_linear(laddr
, 4, 0, BX_WRITE
, &dword2
);
370 // Step 6: If JMP or IRET, clear busy bit in old task TSS descriptor,
371 // otherwise leave set.
373 // effect on Busy bit of old task
374 if (source
== BX_TASK_FROM_JUMP
|| source
== BX_TASK_FROM_IRET
) {
376 Bit32u laddr
= BX_CPU_THIS_PTR gdtr
.base
+
377 (BX_CPU_THIS_PTR tr
.selector
.index
<<3) + 4;
378 access_linear(laddr
, 4, 0, BX_READ
, &temp32
);
379 temp32
&= ~0x00000200;
380 access_linear(laddr
, 4, 0, BX_WRITE
, &temp32
);
384 // Commit point. At this point, we commit to the new
385 // context. If an unrecoverable error occurs in further
386 // processing, we complete the task switch without performing
387 // additional access and segment availablility checks and
388 // generate the appropriate exception prior to beginning
389 // execution of the new task.
392 // Step 7: Load the task register with the segment selector and
393 // descriptor for the new task TSS.
395 BX_CPU_THIS_PTR tr
.selector
= *tss_selector
;
396 BX_CPU_THIS_PTR tr
.cache
= *tss_descriptor
;
397 // Reset the busy-flag, because all functions expect non-busy types in
398 // tr.cache. From Peter Lammich <peterl@sourceforge.net>.
399 BX_CPU_THIS_PTR tr
.cache
.type
&= ~2;
401 // Step 8: Set TS flag in the CR0 image stored in the new task TSS.
402 BX_CPU_THIS_PTR cr0
.set_TS(1);
404 // Task switch clears LE/L3/L2/L1/L0 in DR7
405 BX_CPU_THIS_PTR dr7
&= ~0x00000155;
407 // Step 9: If call or interrupt, set the NT flag in the eflags
408 // image stored in new task's TSS. If IRET or JMP,
409 // NT is restored from new TSS eflags image. (no change)
411 // effect on NT flag of new task
412 if (source
== BX_TASK_FROM_CALL_OR_INT
) {
413 newEFLAGS
|= EFlagsNTMask
; // NT flag is set
416 // Step 10: Load the new task (dynamic) state from new TSS.
417 // Any errors associated with loading and qualification of
418 // segment descriptors in this step occur in the new task's
419 // context. State loaded here includes LDTR, CR3,
420 // EFLAGS, EIP, general purpose registers, and segment
421 // descriptor parts of the segment registers.
423 if ((tss_descriptor
->type
>= 9) && BX_CPU_THIS_PTR cr0
.get_PG()) {
424 // change CR3 only if it actually modified
425 if (newCR3
!= BX_CPU_THIS_PTR cr3
) {
426 CR3_change(newCR3
); // Tell paging unit about new cr3 value
427 BX_DEBUG (("task_switch changing CR3 to 0x%08x", newCR3
));
428 BX_INSTR_TLB_CNTRL(BX_CPU_ID
, BX_INSTR_TASKSWITCH
, newCR3
);
432 BX_CPU_THIS_PTR prev_eip
= EIP
= newEIP
;
443 writeEFlags(newEFLAGS
, EFlagsValidMask
);
445 // Fill in selectors for all segment registers. If errors
446 // occur later, the selectors will at least be loaded.
447 parse_selector(raw_cs_selector
, &cs_selector
);
448 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
= cs_selector
;
449 parse_selector(raw_ds_selector
, &ds_selector
);
450 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].selector
= ds_selector
;
451 parse_selector(raw_es_selector
, &es_selector
);
452 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
].selector
= es_selector
;
453 parse_selector(raw_ss_selector
, &ss_selector
);
454 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
= ss_selector
;
455 parse_selector(raw_fs_selector
, &fs_selector
);
456 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
].selector
= fs_selector
;
457 parse_selector(raw_gs_selector
, &gs_selector
);
458 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
].selector
= gs_selector
;
460 parse_selector(raw_ldt_selector
, &ldt_selector
);
461 BX_CPU_THIS_PTR ldtr
.selector
= ldt_selector
;
463 // Start out with invalid descriptor caches, fill in
464 // with values only as they are validated.
465 BX_CPU_THIS_PTR ldtr
.cache
.valid
= 0;
466 BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit
= 0;
467 BX_CPU_THIS_PTR ldtr
.cache
.u
.system
.limit_scaled
= 0;
468 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
].cache
.valid
= 0;
469 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.valid
= 0;
470 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.valid
= 0;
471 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
].cache
.valid
= 0;
472 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
].cache
.valid
= 0;
473 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
].cache
.valid
= 0;
476 if (ldt_selector
.ti
) {
477 // LDT selector must be in GDT
478 BX_INFO(("task_switch: bad LDT selector TI=1"));
479 exception_no
= BX_TS_EXCEPTION
;
480 error_code
= raw_ldt_selector
& 0xfffc;
484 if ( (raw_ldt_selector
& 0xfffc) != 0 ) {
485 bx_bool good
= fetch_raw_descriptor2(&ldt_selector
, &dword1
, &dword2
);
487 BX_INFO(("task_switch: bad LDT fetch"));
488 exception_no
= BX_TS_EXCEPTION
;
489 error_code
= raw_ldt_selector
& 0xfffc;
493 parse_descriptor(dword1
, dword2
, &ldt_descriptor
);
495 // LDT selector of new task is valid, else #TS(new task's LDT)
496 if (ldt_descriptor
.valid
==0 ||
497 ldt_descriptor
.type
!=BX_SYS_SEGMENT_LDT
||
498 ldt_descriptor
.segment
)
500 BX_INFO(("task_switch: bad LDT segment"));
501 exception_no
= BX_TS_EXCEPTION
;
502 error_code
= raw_ldt_selector
& 0xfffc;
506 // LDT of new task is present in memory, else #TS(new tasks's LDT)
507 if (! IS_PRESENT(ldt_descriptor
)) {
508 exception_no
= BX_TS_EXCEPTION
;
509 error_code
= raw_ldt_selector
& 0xfffc;
513 // All checks pass, fill in LDTR shadow cache
514 BX_CPU_THIS_PTR ldtr
.cache
= ldt_descriptor
;
517 // NULL LDT selector is OK, leave cache invalid
521 // load seg regs as 8086 registers
522 load_seg_reg(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
], raw_cs_selector
);
523 load_seg_reg(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
], raw_ss_selector
);
524 load_seg_reg(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
], raw_ds_selector
);
525 load_seg_reg(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
], raw_es_selector
);
526 load_seg_reg(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
], raw_fs_selector
);
527 load_seg_reg(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
], raw_gs_selector
);
531 // if new selector is not null then perform following checks:
532 // index must be within its descriptor table limits else #TS(selector)
533 // AR byte must indicate data or readable code else #TS(selector)
534 // if data or non-conforming code then:
535 // DPL must be >= CPL else #TS(selector)
536 // DPL must be >= RPL else #TS(selector)
537 // AR byte must indicate PRESENT else #NP(selector)
538 // load cache with new segment descriptor and set valid bit
541 if ( (raw_cs_selector
& 0xfffc) != 0 ) {
542 bx_bool good
= fetch_raw_descriptor2(&cs_selector
, &dword1
, &dword2
);
544 BX_INFO(("task_switch: bad CS fetch"));
545 exception_no
= BX_TS_EXCEPTION
;
546 error_code
= raw_cs_selector
& 0xfffc;
550 parse_descriptor(dword1
, dword2
, &cs_descriptor
);
552 // CS descriptor AR byte must indicate code segment else #TS(CS)
553 if (cs_descriptor
.valid
==0 || cs_descriptor
.segment
==0 ||
554 IS_DATA_SEGMENT(cs_descriptor
.type
))
556 BX_PANIC(("task_switch: CS not valid executable seg"));
557 exception_no
= BX_TS_EXCEPTION
;
558 error_code
= raw_cs_selector
& 0xfffc;
562 // if non-conforming then DPL must equal selector RPL else #TS(CS)
563 if (IS_CODE_SEGMENT_NON_CONFORMING(cs_descriptor
.type
) &&
564 cs_descriptor
.dpl
!= cs_selector
.rpl
)
566 BX_INFO(("task_switch: non-conforming: CS.dpl!=CS.RPL"));
567 exception_no
= BX_TS_EXCEPTION
;
568 error_code
= raw_cs_selector
& 0xfffc;
572 // if conforming then DPL must be <= selector RPL else #TS(CS)
573 if (IS_CODE_SEGMENT_CONFORMING(cs_descriptor
.type
) &&
574 cs_descriptor
.dpl
> cs_selector
.rpl
)
576 BX_INFO(("task_switch: conforming: CS.dpl>RPL"));
577 exception_no
= BX_TS_EXCEPTION
;
578 error_code
= raw_cs_selector
& 0xfffc;
582 // Code segment is present in memory, else #NP(new code segment)
583 if (! IS_PRESENT(cs_descriptor
)) {
584 BX_PANIC(("task_switch: CS.p==0"));
585 exception_no
= BX_NP_EXCEPTION
;
586 error_code
= raw_cs_selector
& 0xfffc;
590 // All checks pass, fill in shadow cache
591 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
= cs_descriptor
;
594 // If new cs selector is null #TS(CS)
595 BX_PANIC(("task_switch: CS NULL"));
596 exception_no
= BX_TS_EXCEPTION
;
597 error_code
= raw_cs_selector
& 0xfffc;
601 #if BX_SUPPORT_ICACHE
602 BX_CPU_THIS_PTR
updateFetchModeMask();
606 if ( (raw_ss_selector
& 0xfffc) != 0 )
608 bx_bool good
= fetch_raw_descriptor2(&ss_selector
, &dword1
, &dword2
);
610 BX_INFO(("task_switch: bad SS fetch"));
611 exception_no
= BX_TS_EXCEPTION
;
612 error_code
= raw_ss_selector
& 0xfffc;
616 parse_descriptor(dword1
, dword2
, &ss_descriptor
);
617 // SS selector must be within its descriptor table limits else #TS(SS)
618 // SS descriptor AR byte must must indicate writable data segment,
620 if (ss_descriptor
.valid
==0 || ss_descriptor
.segment
==0 ||
621 IS_CODE_SEGMENT(ss_descriptor
.type
) ||
622 !IS_DATA_SEGMENT_WRITEABLE(ss_descriptor
.type
))
624 BX_INFO(("task_switch: SS not valid"));
625 exception_no
= BX_TS_EXCEPTION
;
626 error_code
= raw_ss_selector
& 0xfffc;
631 // Stack segment is present in memory, else #SS(new stack segment)
633 if (! IS_PRESENT(ss_descriptor
)) {
634 BX_PANIC(("task_switch: SS not present"));
635 exception_no
= BX_SS_EXCEPTION
;
636 error_code
= raw_ss_selector
& 0xfffc;
640 // Stack segment DPL matches CS.RPL, else #TS(new stack segment)
641 if (ss_descriptor
.dpl
!= cs_selector
.rpl
) {
642 BX_PANIC(("task_switch: SS.rpl != CS.RPL"));
643 exception_no
= BX_TS_EXCEPTION
;
644 error_code
= raw_ss_selector
& 0xfffc;
648 // Stack segment DPL matches selector RPL, else #TS(new stack segment)
649 if (ss_descriptor
.dpl
!= ss_selector
.rpl
) {
650 BX_PANIC(("task_switch: SS.dpl != SS.rpl"));
651 exception_no
= BX_TS_EXCEPTION
;
652 error_code
= raw_ss_selector
& 0xfffc;
656 // All checks pass, fill in shadow cache
657 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
= ss_descriptor
;
660 // SS selector is valid, else #TS(new stack segment)
661 BX_PANIC(("task_switch: SS NULL"));
662 exception_no
= BX_TS_EXCEPTION
;
663 error_code
= raw_ss_selector
& 0xfffc;
667 task_switch_load_selector(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
],
668 &ds_selector
, raw_ds_selector
, cs_selector
.rpl
);
669 task_switch_load_selector(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
],
670 &es_selector
, raw_es_selector
, cs_selector
.rpl
);
671 task_switch_load_selector(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
],
672 &fs_selector
, raw_fs_selector
, cs_selector
.rpl
);
673 task_switch_load_selector(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
],
674 &gs_selector
, raw_gs_selector
, cs_selector
.rpl
);
677 if ((tss_descriptor
->type
>=9) && (trap_word
& 0x0001)) {
678 BX_CPU_THIS_PTR debug_trap
|= 0x00008000; // BT flag in DR6
679 BX_CPU_THIS_PTR async_event
= 1; // so processor knows to check
680 BX_INFO(("task_switch: T bit set in new TSS"));
684 // Step 14: Begin execution of new task.
686 BX_DEBUG(( "TASKING: LEAVE" ));
690 BX_CPU_THIS_PTR debug_trap
= 0;
691 BX_CPU_THIS_PTR inhibit_mask
= 0;
692 BX_INFO(("task switch: posting exception %u after commit point", exception_no
));
693 exception(exception_no
, error_code
, 0);
696 void BX_CPU_C::task_switch_load_selector(bx_segment_reg_t
*seg
,
697 bx_selector_t
*selector
, Bit16u raw_selector
, Bit8u cs_rpl
)
699 bx_descriptor_t descriptor
;
700 Bit32u dword1
, dword2
;
702 // NULL selector is OK, will leave cache invalid
703 if ((raw_selector
& 0xfffc) != 0)
705 bx_bool good
= fetch_raw_descriptor2(selector
, &dword1
, &dword2
);
707 BX_ERROR(("task_switch(%s): bad selector fetch !", strseg(seg
)));
708 exception(BX_TS_EXCEPTION
, raw_selector
& 0xfffc, 0);
711 parse_descriptor(dword1
, dword2
, &descriptor
);
713 /* AR byte must indicate data or readable code segment else #TS(selector) */
714 if (descriptor
.segment
==0 || (IS_CODE_SEGMENT(descriptor
.type
) &&
715 IS_CODE_SEGMENT_READABLE(descriptor
.type
) == 0))
717 BX_ERROR(("task_switch(%s): not data or readable code !", strseg(seg
)));
718 exception(BX_TS_EXCEPTION
, raw_selector
& 0xfffc, 0);
721 /* If data or non-conforming code, then both the RPL and the CPL
722 * must be less than or equal to DPL in AR byte else #GP(selector) */
723 if (IS_DATA_SEGMENT(descriptor
.type
) ||
724 IS_CODE_SEGMENT_NON_CONFORMING(descriptor
.type
))
726 if ((selector
->rpl
> descriptor
.dpl
) || (cs_rpl
> descriptor
.dpl
)) {
727 BX_ERROR(("load_seg_reg(%s): RPL & CPL must be <= DPL", strseg(seg
)));
728 exception(BX_TS_EXCEPTION
, raw_selector
& 0xfffc, 0);
732 if (! IS_PRESENT(descriptor
)) {
733 BX_ERROR(("task_switch(%s): descriptor not present !", strseg(seg
)));
734 exception(BX_NP_EXCEPTION
, raw_selector
& 0xfffc, 0);
737 // All checks pass, fill in shadow cache
738 seg
->cache
= descriptor
;
742 void BX_CPU_C::get_SS_ESP_from_TSS(unsigned pl
, Bit16u
*ss
, Bit32u
*esp
)
744 if (BX_CPU_THIS_PTR tr
.cache
.valid
==0)
745 BX_PANIC(("get_SS_ESP_from_TSS: TR.cache invalid"));
747 if (BX_CPU_THIS_PTR tr
.cache
.type
==BX_SYS_SEGMENT_AVAIL_386_TSS
) {
749 Bit32u TSSstackaddr
= 8*pl
+ 4;
750 if ((TSSstackaddr
+7) > BX_CPU_THIS_PTR tr
.cache
.u
.system
.limit_scaled
) {
751 BX_DEBUG(("get_SS_ESP_from_TSS(386): TSSstackaddr > TSS.LIMIT"));
752 exception(BX_TS_EXCEPTION
, BX_CPU_THIS_PTR tr
.selector
.value
& 0xfffc, 0);
754 access_linear(BX_CPU_THIS_PTR tr
.cache
.u
.system
.base
+
755 TSSstackaddr
+4, 2, 0, BX_READ
, ss
);
756 access_linear(BX_CPU_THIS_PTR tr
.cache
.u
.system
.base
+
757 TSSstackaddr
, 4, 0, BX_READ
, esp
);
759 else if (BX_CPU_THIS_PTR tr
.cache
.type
==BX_SYS_SEGMENT_AVAIL_286_TSS
) {
762 Bit32u TSSstackaddr
= 4*pl
+ 2;
763 if ((TSSstackaddr
+4) > BX_CPU_THIS_PTR tr
.cache
.u
.system
.limit_scaled
) {
764 BX_DEBUG(("get_SS_ESP_from_TSS(286): TSSstackaddr > TSS.LIMIT"));
765 exception(BX_TS_EXCEPTION
, BX_CPU_THIS_PTR tr
.selector
.value
& 0xfffc, 0);
767 access_linear(BX_CPU_THIS_PTR tr
.cache
.u
.system
.base
+
768 TSSstackaddr
+2, 2, 0, BX_READ
, ss
);
769 access_linear(BX_CPU_THIS_PTR tr
.cache
.u
.system
.base
+
770 TSSstackaddr
, 2, 0, BX_READ
, &temp16
);
771 *esp
= temp16
; // truncate
774 BX_PANIC(("get_SS_ESP_from_TSS: TR is bogus type (%u)",
775 (unsigned) BX_CPU_THIS_PTR tr
.cache
.type
));
779 #if BX_SUPPORT_X86_64
780 void BX_CPU_C::get_RSP_from_TSS(unsigned pl
, Bit64u
*rsp
)
782 if (BX_CPU_THIS_PTR tr
.cache
.valid
==0)
783 BX_PANIC(("get_RSP_from_TSS: TR.cache invalid"));
786 Bit32u TSSstackaddr
= 8*pl
+ 4;
787 if ((TSSstackaddr
+7) > BX_CPU_THIS_PTR tr
.cache
.u
.system
.limit_scaled
) {
788 BX_DEBUG(("get_RSP_from_TSS(): TSSstackaddr > TSS.LIMIT"));
789 exception(BX_TS_EXCEPTION
, BX_CPU_THIS_PTR tr
.selector
.value
& 0xfffc, 0);
792 access_linear(BX_CPU_THIS_PTR tr
.cache
.u
.system
.base
+
793 TSSstackaddr
, 8, 0, BX_READ
, rsp
);
795 #endif // #if BX_SUPPORT_X86_64