1 ////////////////////////////////////////////////////////////////////////
2 // $Id: iret.cc,v 1.39 2008/09/06 17:44:02 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (c) 2005 Stanislav Shwartsman
6 // Written by Stanislav Shwartsman [sshwarts at sourceforge net]
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2 of the License, or (at your option) any later version.
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 /////////////////////////////////////////////////////////////////////////
24 #define NEED_CPU_REG_SHORTCUTS 1
27 #define LOG_THIS BX_CPU_THIS_PTR
29 #if BX_SUPPORT_X86_64==0
30 // Make life easier merging cpu64 & cpu code.
34 void BX_CPP_AttrRegparmN(1)
35 BX_CPU_C::iret_protected(bxInstruction_c
*i
)
37 Bit16u raw_cs_selector
, raw_ss_selector
;
38 bx_selector_t cs_selector
, ss_selector
;
39 Bit32u dword1
, dword2
;
40 bx_descriptor_t cs_descriptor
, ss_descriptor
;
49 if (BX_CPU_THIS_PTR
get_NT()) /* NT = 1: RETURN FROM NESTED TASK */
51 /* what's the deal with NT & VM ? */
52 Bit16u raw_link_selector
;
53 bx_selector_t link_selector
;
54 bx_descriptor_t tss_descriptor
;
56 if (BX_CPU_THIS_PTR
get_VM())
57 BX_PANIC(("iret_protected: VM sholdn't be set here !"));
59 BX_DEBUG(("IRET: nested task return"));
61 if (BX_CPU_THIS_PTR tr
.cache
.valid
==0)
62 BX_PANIC(("IRET: TR not valid"));
64 // examine back link selector in TSS addressed by current TR
65 raw_link_selector
= system_read_word(BX_CPU_THIS_PTR tr
.cache
.u
.system
.base
);
67 // must specify global, else #TS(new TSS selector)
68 parse_selector(raw_link_selector
, &link_selector
);
70 if (link_selector
.ti
) {
71 BX_ERROR(("iret: link selector.ti=1"));
72 exception(BX_TS_EXCEPTION
, raw_link_selector
& 0xfffc, 0);
75 // index must be within GDT limits, else #TS(new TSS selector)
76 fetch_raw_descriptor(&link_selector
, &dword1
, &dword2
, BX_TS_EXCEPTION
);
78 // AR byte must specify TSS, else #TS(new TSS selector)
79 // new TSS must be busy, else #TS(new TSS selector)
80 parse_descriptor(dword1
, dword2
, &tss_descriptor
);
81 if (tss_descriptor
.valid
==0 || tss_descriptor
.segment
) {
82 BX_ERROR(("iret: TSS selector points to bad TSS"));
83 exception(BX_TS_EXCEPTION
, raw_link_selector
& 0xfffc, 0);
85 if ((tss_descriptor
.type
!=11) && (tss_descriptor
.type
!=3)) {
86 BX_ERROR(("iret: TSS selector points to bad TSS"));
87 exception(BX_TS_EXCEPTION
, raw_link_selector
& 0xfffc, 0);
90 // TSS must be present, else #NP(new TSS selector)
91 if (! IS_PRESENT(tss_descriptor
)) {
92 BX_ERROR(("iret: task descriptor.p == 0"));
93 exception(BX_NP_EXCEPTION
, raw_link_selector
& 0xfffc, 0);
96 // switch tasks (without nesting) to TSS specified by back link selector
97 task_switch(&link_selector
, &tss_descriptor
,
98 BX_TASK_FROM_IRET
, dword1
, dword2
);
100 // mark the task just abandoned as not busy
102 // EIP must be within code seg limit, else #GP(0)
103 if (EIP
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
) {
104 BX_ERROR(("iret: EIP > CS.limit"));
105 exception(BX_GP_EXCEPTION
, 0, 0);
111 /* NT = 0: INTERRUPT RETURN ON STACK -or STACK_RETURN_TO_V86 */
112 unsigned top_nbytes_same
;
113 Bit32u new_eip
= 0, new_esp
, temp_ESP
, new_eflags
= 0;
114 Bit16u new_ip
= 0, new_flags
= 0;
116 /* 16bit opsize | 32bit opsize
117 * ==============================
118 * SS eSP+8 | SS eSP+16
119 * SP eSP+6 | ESP eSP+12
120 * -------------------------------
121 * FLAGS eSP+4 | EFLAGS eSP+8
122 * CS eSP+2 | CS eSP+4
123 * IP eSP+0 | EIP eSP+0
127 top_nbytes_same
= 12;
133 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
)
139 new_eflags
= read_virtual_dword_32(BX_SEG_REG_SS
, temp_ESP
+ 8);
140 raw_cs_selector
= (Bit16u
) read_virtual_dword_32(BX_SEG_REG_SS
, temp_ESP
+ 4);
141 new_eip
= read_virtual_dword_32(BX_SEG_REG_SS
, temp_ESP
+ 0);
143 // if VM=1 in flags image on stack then STACK_RETURN_TO_V86
144 if (new_eflags
& EFlagsVMMask
) {
146 BX_CPU_THIS_PTR
stack_return_to_v86(new_eip
, raw_cs_selector
, new_eflags
);
149 else BX_INFO(("iret: VM set on stack, CPL!=0"));
153 new_flags
= read_virtual_word_32(BX_SEG_REG_SS
, temp_ESP
+ 4);
154 raw_cs_selector
= read_virtual_word_32(BX_SEG_REG_SS
, temp_ESP
+ 2);
155 new_ip
= read_virtual_word_32(BX_SEG_REG_SS
, temp_ESP
+ 0);
158 parse_selector(raw_cs_selector
, &cs_selector
);
160 // return CS selector must be non-null, else #GP(0)
161 if ((raw_cs_selector
& 0xfffc) == 0) {
162 BX_ERROR(("iret: return CS selector null"));
163 exception(BX_GP_EXCEPTION
, 0, 0);
166 // selector index must be within descriptor table limits,
167 // else #GP(return selector)
168 fetch_raw_descriptor(&cs_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
169 parse_descriptor(dword1
, dword2
, &cs_descriptor
);
171 // return CS selector RPL must be >= CPL, else #GP(return selector)
172 if (cs_selector
.rpl
< CPL
) {
173 BX_ERROR(("iret: return selector RPL < CPL"));
174 exception(BX_GP_EXCEPTION
, raw_cs_selector
& 0xfffc, 0);
177 // check code-segment descriptor
178 check_cs(&cs_descriptor
, raw_cs_selector
, 0, cs_selector
.rpl
);
180 if (cs_selector
.rpl
== CPL
) { /* INTERRUPT RETURN TO SAME LEVEL */
181 /* top 6/12 bytes on stack must be within limits, else #SS(0) */
182 /* satisfied above */
184 /* load CS-cache with new code segment descriptor */
185 branch_far32(&cs_selector
, &cs_descriptor
, new_eip
, cs_selector
.rpl
);
187 // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
188 Bit32u changeMask
= EFlagsOSZAPCMask
| EFlagsTFMask
|
189 EFlagsDFMask
| EFlagsNTMask
| EFlagsRFMask
;
190 #if BX_CPU_LEVEL >= 4
191 changeMask
|= (EFlagsIDMask
| EFlagsACMask
); // ID/AC
193 if (CPL
<= BX_CPU_THIS_PTR
get_IOPL())
194 changeMask
|= EFlagsIFMask
;
196 changeMask
|= EFlagsVIPMask
| EFlagsVIFMask
| EFlagsIOPLMask
;
198 // IF only changed if (CPL <= EFLAGS.IOPL)
199 // VIF, VIP, IOPL only changed if CPL == 0
201 writeEFlags(new_eflags
, changeMask
);
204 /* load CS-cache with new code segment descriptor */
205 branch_far32(&cs_selector
, &cs_descriptor
, (Bit32u
) new_ip
, cs_selector
.rpl
);
207 /* load flags with third word on stack */
208 write_flags(new_flags
, CPL
==0, CPL
<=BX_CPU_THIS_PTR
get_IOPL());
211 /* increment stack by 6/12 */
212 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
)
213 ESP
+= top_nbytes_same
;
215 SP
+= top_nbytes_same
;
218 else { /* INTERRUPT RETURN TO OUTER PRIVILEGE LEVEL */
219 /* 16bit opsize | 32bit opsize
220 * ==============================
221 * SS eSP+8 | SS eSP+16
222 * SP eSP+6 | ESP eSP+12
223 * FLAGS eSP+4 | EFLAGS eSP+8
224 * CS eSP+2 | CS eSP+4
225 * IP eSP+0 | EIP eSP+0
228 /* examine return SS selector and associated descriptor */
230 raw_ss_selector
= (Bit16u
) read_virtual_dword_32(BX_SEG_REG_SS
, temp_ESP
+ 16);
233 raw_ss_selector
= read_virtual_word_32(BX_SEG_REG_SS
, temp_ESP
+ 8);
236 /* selector must be non-null, else #GP(0) */
237 if ((raw_ss_selector
& 0xfffc) == 0) {
238 BX_ERROR(("iret: SS selector null"));
239 exception(BX_GP_EXCEPTION
, 0, 0);
242 parse_selector(raw_ss_selector
, &ss_selector
);
244 /* selector RPL must = RPL of return CS selector,
245 * else #GP(SS selector) */
246 if (ss_selector
.rpl
!= cs_selector
.rpl
) {
247 BX_ERROR(("iret: SS.rpl != CS.rpl"));
248 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
251 /* selector index must be within its descriptor table limits,
252 * else #GP(SS selector) */
253 fetch_raw_descriptor(&ss_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
255 parse_descriptor(dword1
, dword2
, &ss_descriptor
);
257 /* AR byte must indicate a writable data segment,
258 * else #GP(SS selector) */
259 if (ss_descriptor
.valid
==0 || ss_descriptor
.segment
==0 ||
260 IS_CODE_SEGMENT(ss_descriptor
.type
) ||
261 !IS_DATA_SEGMENT_WRITEABLE(ss_descriptor
.type
))
263 BX_ERROR(("iret: SS AR byte not writable or code segment"));
264 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
267 /* stack segment DPL must equal the RPL of the return CS selector,
268 * else #GP(SS selector) */
269 if (ss_descriptor
.dpl
!= cs_selector
.rpl
) {
270 BX_ERROR(("iret: SS.dpl != CS selector RPL"));
271 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
274 /* SS must be present, else #NP(SS selector) */
275 if (! IS_PRESENT(ss_descriptor
)) {
276 BX_ERROR(("iret: SS not present!"));
277 exception(BX_NP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
281 new_esp
= read_virtual_dword_32(BX_SEG_REG_SS
, temp_ESP
+ 12);
282 new_eflags
= read_virtual_dword_32(BX_SEG_REG_SS
, temp_ESP
+ 8);
283 new_eip
= read_virtual_dword_32(BX_SEG_REG_SS
, temp_ESP
+ 0);
286 new_esp
= read_virtual_word_32(BX_SEG_REG_SS
, temp_ESP
+ 6);
287 new_eflags
= read_virtual_word_32(BX_SEG_REG_SS
, temp_ESP
+ 4);
288 new_eip
= read_virtual_word_32(BX_SEG_REG_SS
, temp_ESP
+ 0);
291 // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
292 Bit32u changeMask
= EFlagsOSZAPCMask
| EFlagsTFMask
|
293 EFlagsDFMask
| EFlagsNTMask
| EFlagsRFMask
;
294 #if BX_CPU_LEVEL >= 4
295 changeMask
|= (EFlagsIDMask
| EFlagsACMask
); // ID/AC
297 if (CPL
<= BX_CPU_THIS_PTR
get_IOPL())
298 changeMask
|= EFlagsIFMask
;
300 changeMask
|= EFlagsVIPMask
| EFlagsVIFMask
| EFlagsIOPLMask
;
302 if (! i
->os32L()) // 16 bit
303 changeMask
&= 0xffff;
305 /* load CS:EIP from stack */
306 /* load the CS-cache with CS descriptor */
307 /* set CPL to the RPL of the return CS selector */
308 branch_far32(&cs_selector
, &cs_descriptor
, new_eip
, cs_selector
.rpl
);
310 // IF only changed if (prev_CPL <= EFLAGS.IOPL)
311 // VIF, VIP, IOPL only changed if prev_CPL == 0
313 writeEFlags(new_eflags
, changeMask
);
315 // load SS:eSP from stack
316 // load the SS-cache with SS descriptor
317 load_ss(&ss_selector
, &ss_descriptor
, cs_selector
.rpl
);
318 if (ss_descriptor
.u
.segment
.d_b
)
327 #if BX_SUPPORT_X86_64
328 void BX_CPP_AttrRegparmN(1)
329 BX_CPU_C::long_iret(bxInstruction_c
*i
)
331 Bit16u raw_cs_selector
, raw_ss_selector
;
332 bx_selector_t cs_selector
, ss_selector
;
333 Bit32u dword1
, dword2
;
334 bx_descriptor_t cs_descriptor
, ss_descriptor
;
336 Bit64u new_rip
, new_rsp
, temp_RSP
;
338 BX_DEBUG (("LONG MODE IRET"));
340 if (BX_CPU_THIS_PTR
get_NT()) {
341 BX_ERROR(("iret64: return from nested task in x86-64 mode !"));
342 exception(BX_GP_EXCEPTION
, 0, 0);
355 if (StackAddrSize64()) temp_RSP
= RSP
;
357 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
) temp_RSP
= ESP
;
361 unsigned top_nbytes_same
= 0; /* stop compiler warnings */
363 #if BX_SUPPORT_X86_64
365 new_eflags
= (Bit32u
) read_virtual_qword_64(BX_SEG_REG_SS
, temp_RSP
+ 16);
366 raw_cs_selector
= (Bit16u
) read_virtual_qword_64(BX_SEG_REG_SS
, temp_RSP
+ 8);
367 new_rip
= read_virtual_qword_64(BX_SEG_REG_SS
, temp_RSP
+ 0);
368 top_nbytes_same
= 24;
373 new_eflags
= read_virtual_dword_32(BX_SEG_REG_SS
, temp_RSP
+ 8);
374 raw_cs_selector
= (Bit16u
) read_virtual_dword_32(BX_SEG_REG_SS
, temp_RSP
+ 4);
375 new_rip
= (Bit64u
) read_virtual_dword_32(BX_SEG_REG_SS
, temp_RSP
+ 0);
376 top_nbytes_same
= 12;
379 new_eflags
= read_virtual_word_32(BX_SEG_REG_SS
, temp_RSP
+ 4);
380 raw_cs_selector
= read_virtual_word_32(BX_SEG_REG_SS
, temp_RSP
+ 2);
381 new_rip
= (Bit64u
) read_virtual_word_32(BX_SEG_REG_SS
, temp_RSP
+ 0);
385 // ignore VM flag in long mode
386 new_eflags
&= ~EFlagsVMMask
;
388 parse_selector(raw_cs_selector
, &cs_selector
);
390 // return CS selector must be non-null, else #GP(0)
391 if ((raw_cs_selector
& 0xfffc) == 0) {
392 BX_ERROR(("iret64: return CS selector null"));
393 exception(BX_GP_EXCEPTION
, 0, 0);
396 // selector index must be within descriptor table limits,
397 // else #GP(return selector)
398 fetch_raw_descriptor(&cs_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
399 parse_descriptor(dword1
, dword2
, &cs_descriptor
);
401 // return CS selector RPL must be >= CPL, else #GP(return selector)
402 if (cs_selector
.rpl
< CPL
) {
403 BX_ERROR(("iret64: return selector RPL < CPL"));
404 exception(BX_GP_EXCEPTION
, raw_cs_selector
& 0xfffc, 0);
407 // check code-segment descriptor
408 check_cs(&cs_descriptor
, raw_cs_selector
, 0, cs_selector
.rpl
);
410 /* INTERRUPT RETURN TO SAME PRIVILEGE LEVEL */
411 if ((cs_selector
.rpl
== CPL
) && (BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
))
413 /* top 24 bytes on stack must be within limits, else #SS(0) */
414 /* satisfied above */
416 /* load CS:EIP from stack */
417 /* load CS-cache with new code segment descriptor */
418 branch_far32(&cs_selector
, &cs_descriptor
, (Bit32u
) new_rip
, CPL
);
420 // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
421 Bit32u changeMask
= EFlagsOSZAPCMask
| EFlagsTFMask
| EFlagsDFMask
|
422 EFlagsNTMask
| EFlagsRFMask
| EFlagsIDMask
| EFlagsACMask
;
423 if (CPL
<= BX_CPU_THIS_PTR
get_IOPL())
424 changeMask
|= EFlagsIFMask
;
426 changeMask
|= EFlagsVIPMask
| EFlagsVIFMask
| EFlagsIOPLMask
;
428 if (! i
->os32L()) // 16 bit
429 changeMask
&= 0xffff;
431 // IF only changed if (CPL <= EFLAGS.IOPL)
432 // VIF, VIP, IOPL only changed if CPL == 0
434 writeEFlags(new_eflags
, changeMask
);
436 /* we are NOT in 64-bit mode */
437 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
)
438 ESP
+= top_nbytes_same
;
440 SP
+= top_nbytes_same
;
442 else { /* INTERRUPT RETURN TO OUTER PRIVILEGE LEVEL or 64 BIT MODE */
452 /* examine return SS selector and associated descriptor */
453 #if BX_SUPPORT_X86_64
455 raw_ss_selector
= (Bit16u
) read_virtual_qword_64(BX_SEG_REG_SS
, temp_RSP
+ 32);
456 new_rsp
= read_virtual_qword_64(BX_SEG_REG_SS
, temp_RSP
+ 24);
462 raw_ss_selector
= (Bit16u
) read_virtual_dword_32(BX_SEG_REG_SS
, temp_RSP
+ 16);
463 new_rsp
= (Bit64u
) read_virtual_dword_32(BX_SEG_REG_SS
, temp_RSP
+ 12);
466 raw_ss_selector
= read_virtual_word_32(BX_SEG_REG_SS
, temp_RSP
+ 8);
467 new_rsp
= (Bit64u
) read_virtual_word_32(BX_SEG_REG_SS
, temp_RSP
+ 6);
471 if ((raw_ss_selector
& 0xfffc) == 0) {
472 if (! IS_LONG64_SEGMENT(cs_descriptor
) || cs_selector
.rpl
== 3) {
473 BX_ERROR(("iret64: SS selector null"));
474 exception(BX_GP_EXCEPTION
, 0, 0);
478 parse_selector(raw_ss_selector
, &ss_selector
);
480 /* selector RPL must = RPL of return CS selector,
481 * else #GP(SS selector) */
482 if (ss_selector
.rpl
!= cs_selector
.rpl
) {
483 BX_ERROR(("iret64: SS.rpl != CS.rpl"));
484 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
487 /* selector index must be within its descriptor table limits,
488 * else #GP(SS selector) */
489 fetch_raw_descriptor(&ss_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
490 parse_descriptor(dword1
, dword2
, &ss_descriptor
);
492 /* AR byte must indicate a writable data segment,
493 * else #GP(SS selector) */
494 if (ss_descriptor
.valid
==0 || ss_descriptor
.segment
==0 ||
495 IS_CODE_SEGMENT(ss_descriptor
.type
) ||
496 !IS_DATA_SEGMENT_WRITEABLE(ss_descriptor
.type
))
498 BX_ERROR(("iret64: SS AR byte not writable or code segment"));
499 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
502 /* stack segment DPL must equal the RPL of the return CS selector,
503 * else #GP(SS selector) */
504 if (ss_descriptor
.dpl
!= cs_selector
.rpl
) {
505 BX_ERROR(("iret64: SS.dpl != CS selector RPL"));
506 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
509 /* SS must be present, else #NP(SS selector) */
510 if (! IS_PRESENT(ss_descriptor
)) {
511 BX_ERROR(("iret64: SS not present!"));
512 exception(BX_NP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
516 Bit8u prev_cpl
= CPL
; /* previous CPL */
518 // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
519 Bit32u changeMask
= EFlagsOSZAPCMask
| EFlagsTFMask
| EFlagsDFMask
|
520 EFlagsNTMask
| EFlagsRFMask
| EFlagsIDMask
| EFlagsACMask
;
521 if (prev_cpl
<= BX_CPU_THIS_PTR
get_IOPL())
522 changeMask
|= EFlagsIFMask
;
524 changeMask
|= EFlagsVIPMask
| EFlagsVIFMask
| EFlagsIOPLMask
;
526 if (! i
->os32L()) // 16 bit
527 changeMask
&= 0xffff;
529 /* set CPL to the RPL of the return CS selector */
530 branch_far64(&cs_selector
, &cs_descriptor
, new_rip
, cs_selector
.rpl
);
532 // IF only changed if (prev_CPL <= EFLAGS.IOPL)
533 // VIF, VIP, IOPL only changed if prev_CPL == 0
535 writeEFlags(new_eflags
, changeMask
);
537 if ((raw_ss_selector
& 0xfffc) != 0) {
538 // load SS:RSP from stack
539 // load the SS-cache with SS descriptor
540 load_ss(&ss_selector
, &ss_descriptor
, cs_selector
.rpl
);
543 // we are in 64-bit mode !
544 load_null_selector(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
]);
545 loadSRegLMNominal(BX_SEG_REG_SS
, raw_ss_selector
, cs_selector
.rpl
);
548 if (StackAddrSize64()) RSP
= new_rsp
;
550 if (ss_descriptor
.u
.segment
.d_b
) ESP
= (Bit32u
) new_rsp
;
551 else SP
= (Bit16u
) new_rsp
;
554 if (prev_cpl
!= CPL
) validate_seg_regs();