1 ////////////////////////////////////////////////////////////////////////
2 // $Id: iret.cc,v 1.18 2007/03/14 21:15:15 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (C) 2001 MandrakeSoft S.A.
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 #define NEED_CPU_REG_SHORTCUTS 1
32 #define LOG_THIS BX_CPU_THIS_PTR
34 #if BX_SUPPORT_X86_64==0
35 // Make life easier merging cpu64 & cpu code.
40 void BX_CPP_AttrRegparmN(1)
41 BX_CPU_C::iret_protected(bxInstruction_c
*i
)
43 Bit16u raw_cs_selector
, raw_ss_selector
;
44 bx_selector_t cs_selector
, ss_selector
;
45 Bit32u dword1
, dword2
;
46 bx_descriptor_t cs_descriptor
, ss_descriptor
;
49 if (BX_CPU_THIS_PTR msr
.lma
)
56 if (BX_CPU_THIS_PTR
get_NT ()) /* NT = 1: RETURN FROM NESTED TASK */
58 /* what's the deal with NT & VM ? */
59 Bit16u raw_link_selector
;
60 bx_selector_t link_selector
;
61 bx_descriptor_t tss_descriptor
;
63 if (BX_CPU_THIS_PTR
get_VM())
64 BX_PANIC(("iret_protected: VM sholdn't be set here !"));
66 //BX_INFO(("IRET: nested task return"));
68 if (BX_CPU_THIS_PTR tr
.cache
.valid
==0)
69 BX_PANIC(("IRET: TR not valid"));
70 Bit32u base32
= BX_CPU_THIS_PTR tr
.cache
.u
.system
.base
;
72 // examine back link selector in TSS addressed by current TR:
73 access_linear(base32
, 2, 0, BX_READ
, &raw_link_selector
);
75 // must specify global, else #TS(new TSS selector)
76 parse_selector(raw_link_selector
, &link_selector
);
78 if (link_selector
.ti
) {
79 BX_ERROR(("iret: link selector.ti=1"));
80 exception(BX_TS_EXCEPTION
, raw_link_selector
& 0xfffc, 0);
83 // index must be within GDT limits, else #TS(new TSS selector)
84 fetch_raw_descriptor(&link_selector
, &dword1
, &dword2
, BX_TS_EXCEPTION
);
86 // AR byte must specify TSS, else #TS(new TSS selector)
87 // new TSS must be busy, else #TS(new TSS selector)
88 parse_descriptor(dword1
, dword2
, &tss_descriptor
);
89 if (tss_descriptor
.valid
==0 || tss_descriptor
.segment
) {
90 BX_ERROR(("iret: TSS selector points to bad TSS"));
91 exception(BX_TS_EXCEPTION
, raw_link_selector
& 0xfffc, 0);
93 if ((tss_descriptor
.type
!=11) && (tss_descriptor
.type
!=3)) {
94 BX_ERROR(("iret: TSS selector points to bad TSS"));
95 exception(BX_TS_EXCEPTION
, raw_link_selector
& 0xfffc, 0);
98 // TSS must be present, else #NP(new TSS selector)
99 if (! IS_PRESENT(tss_descriptor
)) {
100 BX_ERROR(("iret: task descriptor.p == 0"));
101 exception(BX_NP_EXCEPTION
, raw_link_selector
& 0xfffc, 0);
104 // switch tasks (without nesting) to TSS specified by back link selector
105 task_switch(&link_selector
, &tss_descriptor
,
106 BX_TASK_FROM_IRET
, dword1
, dword2
);
108 // mark the task just abandoned as not busy
110 // EIP must be within code seg limit, else #GP(0)
111 if (EIP
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
) {
112 BX_ERROR(("iret: EIP > CS.limit"));
113 exception(BX_GP_EXCEPTION
, 0, 0);
119 /* NT = 0: INTERRUPT RETURN ON STACK -or STACK_RETURN_TO_V86 */
120 unsigned top_nbytes_same
, top_nbytes_outer
;
121 Bit32u new_eip
, new_esp
, temp_ESP
, new_eflags
;
122 Bit16u new_ip
, new_flags
;
125 /* 16bit opsize | 32bit opsize
126 * ==============================
127 * SS eSP+8 | SS eSP+16
128 * SP eSP+6 | ESP eSP+12
129 * -------------------------------
130 * FLAGS eSP+4 | EFLAGS eSP+8
131 * CS eSP+2 | CS eSP+4
132 * IP eSP+0 | EIP eSP+0
136 top_nbytes_same
= 12;
137 top_nbytes_outer
= 20;
142 top_nbytes_outer
= 10;
146 /* CS on stack must be within stack limits, else #SS(0) */
147 if ( !can_pop(top_nbytes_same
) ) {
148 BX_ERROR(("iret: CS not within stack limits"));
149 exception(BX_SS_EXCEPTION
, 0, 0);
152 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
)
158 read_virtual_word (BX_SEG_REG_SS
, temp_ESP
+ 4, &raw_cs_selector
);
159 read_virtual_dword(BX_SEG_REG_SS
, temp_ESP
+ 0, &new_eip
);
160 read_virtual_dword(BX_SEG_REG_SS
, temp_ESP
+ 8, &new_eflags
);
162 // if VM=1 in flags image on stack then STACK_RETURN_TO_V86
163 if (new_eflags
& EFlagsVMMask
) {
165 BX_CPU_THIS_PTR
stack_return_to_v86(new_eip
, raw_cs_selector
, new_eflags
);
168 else BX_INFO(("iret: VM set on stack, CPL!=0"));
172 read_virtual_word(BX_SEG_REG_SS
, temp_ESP
+ 2, &raw_cs_selector
);
173 read_virtual_word(BX_SEG_REG_SS
, temp_ESP
+ 0, &new_ip
);
174 read_virtual_word(BX_SEG_REG_SS
, temp_ESP
+ 4, &new_flags
);
177 parse_selector(raw_cs_selector
, &cs_selector
);
179 // return CS selector must be non-null, else #GP(0)
180 if ( (raw_cs_selector
& 0xfffc) == 0 ) {
181 BX_ERROR(("iret: return CS selector null"));
182 exception(BX_GP_EXCEPTION
, 0, 0);
185 // selector index must be within descriptor table limits,
186 // else #GP(return selector)
187 fetch_raw_descriptor(&cs_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
188 parse_descriptor(dword1
, dword2
, &cs_descriptor
);
190 // return CS selector RPL must be >= CPL, else #GP(return selector)
191 if (cs_selector
.rpl
< CPL
) {
192 BX_ERROR(("iret: return selector RPL < CPL"));
193 exception(BX_GP_EXCEPTION
, raw_cs_selector
& 0xfffc, 0);
196 // check code-segment descriptor
197 check_cs(&cs_descriptor
, raw_cs_selector
, 0, cs_selector
.rpl
);
199 if (cs_selector
.rpl
== CPL
) { /* INTERRUPT RETURN TO SAME LEVEL */
200 /* top 6/12 bytes on stack must be within limits, else #SS(0) */
201 /* satisfied above */
203 /* load CS-cache with new code segment descriptor */
204 branch_far32(&cs_selector
, &cs_descriptor
, new_eip
, cs_selector
.rpl
);
206 // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
207 Bit32u changeMask
= EFlagsOSZAPCMask
| EFlagsTFMask
|
208 EFlagsDFMask
| EFlagsNTMask
| EFlagsRFMask
;
209 #if BX_CPU_LEVEL >= 4
210 changeMask
|= (EFlagsIDMask
| EFlagsACMask
); // ID/AC
212 if (CPL
<= BX_CPU_THIS_PTR
get_IOPL())
213 changeMask
|= EFlagsIFMask
;
215 changeMask
|= EFlagsVIPMask
| EFlagsVIFMask
| EFlagsIOPLMask
;
217 // IF only changed if (CPL <= EFLAGS.IOPL)
218 // VIF, VIP, IOPL only changed if CPL == 0
220 writeEFlags(new_eflags
, changeMask
);
223 /* load CS-cache with new code segment descriptor */
224 branch_far32(&cs_selector
, &cs_descriptor
, (Bit32u
) new_ip
, cs_selector
.rpl
);
226 /* load flags with third word on stack */
227 write_flags(new_flags
, CPL
==0, CPL
<=BX_CPU_THIS_PTR
get_IOPL ());
230 /* increment stack by 6/12 */
231 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
)
232 ESP
+= top_nbytes_same
;
234 SP
+= top_nbytes_same
;
237 else { /* INTERRUPT RETURN TO OUTER PRIVILEGE LEVEL */
238 /* 16bit opsize | 32bit opsize
239 * ==============================
240 * SS eSP+8 | SS eSP+16
241 * SP eSP+6 | ESP eSP+12
242 * FLAGS eSP+4 | EFLAGS eSP+8
243 * CS eSP+2 | CS eSP+4
244 * IP eSP+0 | EIP eSP+0
247 /* top 10/20 bytes on stack must be within limits else #SS(0) */
248 if ( !can_pop(top_nbytes_outer
) ) {
249 BX_ERROR(("iret: top 10/20 bytes not within stack limits"));
250 exception(BX_SS_EXCEPTION
, 0, 0);
253 /* examine return SS selector and associated descriptor */
254 read_virtual_word(BX_SEG_REG_SS
, temp_ESP
+ ss_offset
, &raw_ss_selector
);
256 /* selector must be non-null, else #GP(0) */
257 if ( (raw_ss_selector
& 0xfffc) == 0 ) {
258 BX_ERROR(("iret: SS selector null"));
259 exception(BX_GP_EXCEPTION
, 0, 0);
262 parse_selector(raw_ss_selector
, &ss_selector
);
264 /* selector RPL must = RPL of return CS selector,
265 * else #GP(SS selector) */
266 if (ss_selector
.rpl
!= cs_selector
.rpl
) {
267 BX_ERROR(("iret: SS.rpl != CS.rpl"));
268 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
271 /* selector index must be within its descriptor table limits,
272 * else #GP(SS selector) */
273 fetch_raw_descriptor(&ss_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
275 parse_descriptor(dword1
, dword2
, &ss_descriptor
);
277 /* AR byte must indicate a writable data segment,
278 * else #GP(SS selector) */
279 if (ss_descriptor
.valid
==0 || ss_descriptor
.segment
==0 ||
280 IS_CODE_SEGMENT(ss_descriptor
.type
) ||
281 !IS_DATA_SEGMENT_WRITEABLE(ss_descriptor
.type
))
283 BX_ERROR(("iret: SS AR byte not writable code segment"));
284 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
287 /* stack segment DPL must equal the RPL of the return CS selector,
288 * else #GP(SS selector) */
289 if (ss_descriptor
.dpl
!= cs_selector
.rpl
) {
290 BX_ERROR(("iret: SS.dpl != CS selector RPL"));
291 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
294 /* SS must be present, else #NP(SS selector) */
295 if (! IS_PRESENT(ss_descriptor
)) {
296 BX_ERROR(("iret: SS not present!"));
297 exception(BX_NP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
301 read_virtual_dword(BX_SEG_REG_SS
, temp_ESP
+ 0, &new_eip
);
302 read_virtual_dword(BX_SEG_REG_SS
, temp_ESP
+ 8, &new_eflags
);
303 read_virtual_dword(BX_SEG_REG_SS
, temp_ESP
+ 12, &new_esp
);
308 read_virtual_word(BX_SEG_REG_SS
, temp_ESP
+ 0, &new_ip
);
309 read_virtual_word(BX_SEG_REG_SS
, temp_ESP
+ 4, &new_flags
);
310 read_virtual_word(BX_SEG_REG_SS
, temp_ESP
+ 6, &new_sp
);
314 new_eflags
= new_flags
;
317 Bit8u prev_cpl
= CPL
; /* previous CPL */
319 /* load CS:EIP from stack */
320 /* load the CS-cache with CS descriptor */
321 /* set CPL to the RPL of the return CS selector */
322 branch_far32(&cs_selector
, &cs_descriptor
, new_eip
, cs_selector
.rpl
);
324 // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
325 Bit32u changeMask
= EFlagsOSZAPCMask
| EFlagsTFMask
|
326 EFlagsDFMask
| EFlagsNTMask
| EFlagsRFMask
;
327 #if BX_CPU_LEVEL >= 4
328 changeMask
|= (EFlagsIDMask
| EFlagsACMask
); // ID/AC
330 if (prev_cpl
<= BX_CPU_THIS_PTR
get_IOPL())
331 changeMask
|= EFlagsIFMask
;
333 changeMask
|= EFlagsVIPMask
| EFlagsVIFMask
| EFlagsIOPLMask
;
335 if (cs_descriptor
.u
.segment
.d_b
)
336 changeMask
&= 0xffff;
338 // IF only changed if (prev_CPL <= EFLAGS.IOPL)
339 // VIF, VIP, IOPL only changed if prev_CPL == 0
341 writeEFlags(new_eflags
, changeMask
);
343 // load SS:eSP from stack
344 // load the SS-cache with SS descriptor
345 load_ss(&ss_selector
, &ss_descriptor
, cs_selector
.rpl
);
346 if (ss_descriptor
.u
.segment
.d_b
)
355 #if BX_SUPPORT_X86_64
356 void BX_CPP_AttrRegparmN(1)
357 BX_CPU_C::long_iret(bxInstruction_c
*i
)
359 Bit16u raw_cs_selector
, raw_ss_selector
;
360 bx_selector_t cs_selector
, ss_selector
;
361 Bit32u dword1
, dword2
;
362 bx_descriptor_t cs_descriptor
, ss_descriptor
;
364 Bit64u new_rip
, new_rsp
, temp_RSP
;
365 unsigned top_nbytes_outer
, ss_offset
;
367 BX_DEBUG (("LONG MODE IRET"));
369 if (BX_CPU_THIS_PTR
get_NT()) {
370 BX_ERROR(("iret64: return from nested task in x86-64 mode !"));
371 exception(BX_GP_EXCEPTION
, 0, 0);
384 if (StackAddrSize64()) temp_RSP
= RSP
;
386 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
) temp_RSP
= ESP
;
390 unsigned top_nbytes_same
= 0; /* stop compiler warnings */
393 Bit64u new_rflags
= 0;
395 read_virtual_word (BX_SEG_REG_SS
, temp_RSP
+ 8, &raw_cs_selector
);
396 read_virtual_qword(BX_SEG_REG_SS
, temp_RSP
+ 0, &new_rip
);
397 read_virtual_qword(BX_SEG_REG_SS
, temp_RSP
+ 16, &new_rflags
);
399 new_eflags
= (Bit32u
) new_rflags
;
400 top_nbytes_outer
= 40;
403 else if (i
->os32L()) {
404 /* CS on stack must be within stack limits, else #SS(0) */
405 if ( !can_pop(12) ) {
406 BX_ERROR(("iret64: CS not within stack limits"));
407 exception(BX_SS_EXCEPTION
, 0, 0);
410 Bit32u return_EIP
= 0;
412 read_virtual_word (BX_SEG_REG_SS
, temp_RSP
+ 4, &raw_cs_selector
);
413 read_virtual_dword(BX_SEG_REG_SS
, temp_RSP
+ 0, &return_EIP
);
414 read_virtual_dword(BX_SEG_REG_SS
, temp_RSP
+ 8, &new_eflags
);
416 new_rip
= return_EIP
;
417 top_nbytes_outer
= 20;
418 top_nbytes_same
= 12;
422 /* CS on stack must be within stack limits, else #SS(0) */
424 BX_ERROR(("iret64: CS not within stack limits"));
425 exception(BX_SS_EXCEPTION
, 0, 0);
428 Bit16u return_IP
= 0, new_flags
= 0;
430 read_virtual_word(BX_SEG_REG_SS
, temp_RSP
+ 2, &raw_cs_selector
);
431 read_virtual_word(BX_SEG_REG_SS
, temp_RSP
+ 0, &return_IP
);
432 read_virtual_word(BX_SEG_REG_SS
, temp_RSP
+ 4, &new_flags
);
435 new_eflags
= (Bit32u
) new_flags
;
436 top_nbytes_outer
= 10;
441 // if VM=1 in flags image on stack then STACK_RETURN_TO_V86
442 if (new_eflags
& EFlagsVMMask
) {
443 BX_PANIC(("iret64: no V86 mode in x86-64 LONG mode"));
444 new_eflags
&= ~EFlagsVMMask
;
447 parse_selector(raw_cs_selector
, &cs_selector
);
449 // return CS selector must be non-null, else #GP(0)
450 if ((raw_cs_selector
& 0xfffc) == 0) {
451 BX_ERROR(("iret64: return CS selector null"));
452 exception(BX_GP_EXCEPTION
, 0, 0);
455 // selector index must be within descriptor table limits,
456 // else #GP(return selector)
457 fetch_raw_descriptor(&cs_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
458 parse_descriptor(dword1
, dword2
, &cs_descriptor
);
460 // return CS selector RPL must be >= CPL, else #GP(return selector)
461 if (cs_selector
.rpl
< CPL
) {
462 BX_ERROR(("iret64: return selector RPL < CPL"));
463 exception(BX_GP_EXCEPTION
, raw_cs_selector
& 0xfffc, 0);
466 // check code-segment descriptor
467 check_cs(&cs_descriptor
, raw_cs_selector
, 0, cs_selector
.rpl
);
469 /* INTERRUPT RETURN TO SAME PRIVILEGE LEVEL */
470 if ((cs_selector
.rpl
== CPL
) && (BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
))
472 /* top 24 bytes on stack must be within limits, else #SS(0) */
473 /* satisfied above */
475 /* load CS:EIP from stack */
476 /* load CS-cache with new code segment descriptor */
477 branch_far32(&cs_selector
, &cs_descriptor
, new_rip
, CPL
);
479 // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
480 Bit32u changeMask
= EFlagsOSZAPCMask
| EFlagsTFMask
| EFlagsDFMask
|
481 EFlagsNTMask
| EFlagsRFMask
| EFlagsIDMask
| EFlagsACMask
;
482 if (CPL
<= BX_CPU_THIS_PTR
get_IOPL())
483 changeMask
|= EFlagsIFMask
;
485 changeMask
|= EFlagsVIPMask
| EFlagsVIFMask
| EFlagsIOPLMask
;
487 // IF only changed if (CPL <= EFLAGS.IOPL)
488 // VIF, VIP, IOPL only changed if CPL == 0
490 writeEFlags(new_eflags
, changeMask
);
492 /* we are NOT in 64-bit mode */
493 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
)
494 ESP
+= top_nbytes_same
;
496 SP
+= top_nbytes_same
;
498 else { /* INTERRUPT RETURN TO OUTER PRIVILEGE LEVEL or 64 BIT MODE */
508 /* top 10/20 bytes on stack must be within limits else #SS(0) */
509 if (! can_pop(top_nbytes_outer
)) {
510 BX_PANIC(("iret64: top bytes not within stack limits"));
511 exception(BX_SS_EXCEPTION
, 0, 0);
514 /* examine return SS selector and associated descriptor */
515 read_virtual_word(BX_SEG_REG_SS
, temp_RSP
+ ss_offset
, &raw_ss_selector
);
517 if ((raw_ss_selector
& 0xfffc) == 0) {
518 if (! IS_LONG64_SEGMENT(cs_descriptor
) || (cs_selector
.rpl
== 3)) {
519 BX_ERROR(("iret64: SS selector null"));
520 exception(BX_GP_EXCEPTION
, 0, 0);
524 parse_selector(raw_ss_selector
, &ss_selector
);
526 /* selector RPL must = RPL of return CS selector,
527 * else #GP(SS selector) */
528 if (ss_selector
.rpl
!= cs_selector
.rpl
) {
529 BX_ERROR(("iret64: SS.rpl != CS.rpl"));
530 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
533 /* selector index must be within its descriptor table limits,
534 * else #GP(SS selector) */
535 fetch_raw_descriptor(&ss_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
536 parse_descriptor(dword1
, dword2
, &ss_descriptor
);
538 /* AR byte must indicate a writable data segment,
539 * else #GP(SS selector) */
540 if (ss_descriptor
.valid
==0 || ss_descriptor
.segment
==0 ||
541 IS_CODE_SEGMENT(ss_descriptor
.type
) ||
542 !IS_DATA_SEGMENT_WRITEABLE(ss_descriptor
.type
))
544 BX_ERROR(("iret64: SS AR byte not writable code segment"));
545 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
548 /* stack segment DPL must equal the RPL of the return CS selector,
549 * else #GP(SS selector) */
550 if (ss_descriptor
.dpl
!= cs_selector
.rpl
) {
551 BX_ERROR(("iret64: SS.dpl != CS selector RPL"));
552 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
555 /* SS must be present, else #NP(SS selector) */
556 if (! IS_PRESENT(ss_descriptor
)) {
557 BX_ERROR(("iret64: SS not present!"));
558 exception(BX_NP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
563 read_virtual_qword(BX_SEG_REG_SS
, temp_RSP
+ 24, &new_rsp
);
565 else if (i
->os32L()) {
566 Bit32u return_ESP
= 0;
567 read_virtual_dword(BX_SEG_REG_SS
, temp_RSP
+ 12, &return_ESP
);
568 new_rsp
= return_ESP
;
571 Bit16u return_SP
= 0;
572 read_virtual_word(BX_SEG_REG_SS
, temp_RSP
+ 6, &return_SP
);
576 Bit8u prev_cpl
= CPL
; /* previous CPL */
578 /* set CPL to the RPL of the return CS selector */
579 branch_far64(&cs_selector
, &cs_descriptor
, new_rip
, cs_selector
.rpl
);
581 // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
582 Bit32u changeMask
= EFlagsOSZAPCMask
| EFlagsTFMask
| EFlagsDFMask
|
583 EFlagsNTMask
| EFlagsRFMask
| EFlagsIDMask
| EFlagsACMask
;
584 if (prev_cpl
<= BX_CPU_THIS_PTR
get_IOPL())
585 changeMask
|= EFlagsIFMask
;
587 changeMask
|= EFlagsVIPMask
| EFlagsVIFMask
| EFlagsIOPLMask
;
589 if (cs_descriptor
.u
.segment
.d_b
)
590 changeMask
&= 0xffff;
592 // IF only changed if (prev_CPL <= EFLAGS.IOPL)
593 // VIF, VIP, IOPL only changed if prev_CPL == 0
595 writeEFlags(new_eflags
, changeMask
);
597 if ((raw_ss_selector
& 0xfffc) != 0) {
598 // load SS:RSP from stack
599 // load the SS-cache with SS descriptor
600 load_ss(&ss_selector
, &ss_descriptor
, cs_selector
.rpl
);
603 // we are in 64-bit mode !
604 loadSRegLMNominal(BX_SEG_REG_SS
, raw_ss_selector
, cs_selector
.rpl
);
607 if (StackAddrSize64()) RSP
= new_rsp
;
609 if (ss_descriptor
.u
.segment
.d_b
) ESP
= (Bit32u
) new_rsp
;
610 else SP
= (Bit16u
) new_rsp
;
613 if (prev_cpl
!= CPL
) validate_seg_regs();