1 ////////////////////////////////////////////////////////////////////////
2 // $Id: ret_far.cc,v 1.20 2008/06/13 08:02:22 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (c) 2005 Stanislav Shwartsman
6 // Written by Stanislav Shwartsman [sshwarts at sourceforge net]
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2 of the License, or (at your option) any later version.
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 ////////////////////////////////////////////////////////////////////////
24 #define NEED_CPU_REG_SHORTCUTS 1
27 #define LOG_THIS BX_CPU_THIS_PTR
29 #if BX_SUPPORT_X86_64==0
30 // Make life easier merging cpu64 & cpu code.
35 void BX_CPP_AttrRegparmN(2)
36 BX_CPU_C::return_protected(bxInstruction_c
*i
, Bit16u pop_bytes
)
38 Bit16u raw_cs_selector
, raw_ss_selector
;
39 bx_selector_t cs_selector
, ss_selector
;
40 bx_descriptor_t cs_descriptor
, ss_descriptor
;
41 Bit32u stack_param_offset
;
42 bx_address return_RIP
, return_RSP
, temp_RSP
;
43 Bit32u dword1
, dword2
;
45 /* + 6+N*2: SS | +12+N*4: SS | +24+N*8 SS */
46 /* + 4+N*2: SP | + 8+N*4: ESP | +16+N*8 RSP */
47 /* parm N | + parm N | + parm N */
48 /* parm 3 | + parm 3 | + parm 3 */
49 /* parm 2 | + parm 2 | + parm 2 */
50 /* + 4: parm 1 | + 8: parm 1 | +16: parm 1 */
51 /* + 2: CS | + 4: CS | + 8: CS */
52 /* + 0: IP | + 0: EIP | + 0: RIP */
55 if (StackAddrSize64()) temp_RSP
= RSP
;
59 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
) temp_RSP
= ESP
;
65 raw_cs_selector
= (Bit16u
) read_virtual_qword_64(BX_SEG_REG_SS
, temp_RSP
+ 8);
66 return_RIP
= read_virtual_qword_64(BX_SEG_REG_SS
, temp_RSP
);
67 stack_param_offset
= 16;
72 raw_cs_selector
= (Bit16u
) read_virtual_dword(BX_SEG_REG_SS
, temp_RSP
+ 4);
73 return_RIP
= read_virtual_dword(BX_SEG_REG_SS
, temp_RSP
);
74 stack_param_offset
= 8;
77 raw_cs_selector
= read_virtual_word(BX_SEG_REG_SS
, temp_RSP
+ 2);
78 return_RIP
= read_virtual_word(BX_SEG_REG_SS
, temp_RSP
);
79 stack_param_offset
= 4;
82 // selector must be non-null else #GP(0)
83 if ((raw_cs_selector
& 0xfffc) == 0) {
84 BX_ERROR(("return_protected: CS selector null"));
85 exception(BX_GP_EXCEPTION
, 0, 0);
88 parse_selector(raw_cs_selector
, &cs_selector
);
90 // selector index must be within its descriptor table limits,
92 fetch_raw_descriptor(&cs_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
94 // descriptor AR byte must indicate code segment, else #GP(selector)
95 parse_descriptor(dword1
, dword2
, &cs_descriptor
);
97 // return selector RPL must be >= CPL, else #GP(return selector)
98 if (cs_selector
.rpl
< CPL
) {
99 BX_ERROR(("return_protected: CS.rpl < CPL"));
100 exception(BX_GP_EXCEPTION
, raw_cs_selector
& 0xfffc, 0);
103 // check code-segment descriptor
104 check_cs(&cs_descriptor
, raw_cs_selector
, 0, cs_selector
.rpl
);
106 // if return selector RPL == CPL then
107 // RETURN TO SAME PRIVILEGE LEVEL
108 if (cs_selector
.rpl
== CPL
)
110 BX_DEBUG(("return_protected: return to SAME PRIVILEGE LEVEL"));
112 branch_far64(&cs_selector
, &cs_descriptor
, return_RIP
, CPL
);
114 #if BX_SUPPORT_X86_64
115 if (StackAddrSize64())
116 RSP
+= stack_param_offset
+ pop_bytes
;
120 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
)
121 RSP
= ESP
+ stack_param_offset
+ pop_bytes
;
123 SP
+= stack_param_offset
+ pop_bytes
;
127 /* RETURN TO OUTER PRIVILEGE LEVEL */
129 /* + 6+N*2: SS | +12+N*4: SS | +24+N*8 SS */
130 /* + 4+N*2: SP | + 8+N*4: ESP | +16+N*8 RSP */
131 /* parm N | + parm N | + parm N */
132 /* parm 3 | + parm 3 | + parm 3 */
133 /* parm 2 | + parm 2 | + parm 2 */
134 /* + 4: parm 1 | + 8: parm 1 | +16: parm 1 */
135 /* + 2: CS | + 4: CS | + 8: CS */
136 /* + 0: IP | + 0: EIP | + 0: RIP */
138 BX_DEBUG(("return_protected: return to OUTER PRIVILEGE LEVEL"));
140 #if BX_SUPPORT_X86_64
142 raw_ss_selector
= read_virtual_word_64(BX_SEG_REG_SS
, temp_RSP
+ 24 + pop_bytes
);
143 return_RSP
= read_virtual_qword_64(BX_SEG_REG_SS
, temp_RSP
+ 16 + pop_bytes
);
148 raw_ss_selector
= read_virtual_word(BX_SEG_REG_SS
, temp_RSP
+ 12 + pop_bytes
);
149 return_RSP
= read_virtual_dword(BX_SEG_REG_SS
, temp_RSP
+ 8 + pop_bytes
);
152 raw_ss_selector
= read_virtual_word(BX_SEG_REG_SS
, temp_RSP
+ 6 + pop_bytes
);
153 return_RSP
= read_virtual_word(BX_SEG_REG_SS
, temp_RSP
+ 4 + pop_bytes
);
156 /* selector index must be within its descriptor table limits,
157 * else #GP(selector) */
158 parse_selector(raw_ss_selector
, &ss_selector
);
160 if ((raw_ss_selector
& 0xfffc) == 0) {
162 if (! IS_LONG64_SEGMENT(cs_descriptor
) || (cs_selector
.rpl
== 3)) {
163 BX_ERROR(("return_protected: SS selector null"));
164 exception(BX_GP_EXCEPTION
, 0, 0);
167 else // not in long or compatibility mode
169 BX_ERROR(("return_protected: SS selector null"));
170 exception(BX_GP_EXCEPTION
, 0, 0);
174 fetch_raw_descriptor(&ss_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
175 parse_descriptor(dword1
, dword2
, &ss_descriptor
);
177 /* selector RPL must = RPL of the return CS selector,
178 * else #GP(selector) */
179 if (ss_selector
.rpl
!= cs_selector
.rpl
) {
180 BX_ERROR(("return_protected: ss.rpl != cs.rpl"));
181 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
184 /* descriptor AR byte must indicate a writable data segment,
185 * else #GP(selector) */
186 if (ss_descriptor
.valid
==0 || ss_descriptor
.segment
==0 ||
187 IS_CODE_SEGMENT(ss_descriptor
.type
) ||
188 !IS_DATA_SEGMENT_WRITEABLE(ss_descriptor
.type
))
190 BX_ERROR(("return_protected: SS.AR byte not writable data"));
191 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
194 /* descriptor dpl must = RPL of the return CS selector,
195 * else #GP(selector) */
196 if (ss_descriptor
.dpl
!= cs_selector
.rpl
) {
197 BX_ERROR(("return_protected: SS.dpl != cs.rpl"));
198 exception(BX_GP_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
201 /* segment must be present else #SS(selector) */
202 if (! IS_PRESENT(ss_descriptor
)) {
203 BX_ERROR(("return_protected: ss.present == 0"));
204 exception(BX_SS_EXCEPTION
, raw_ss_selector
& 0xfffc, 0);
207 branch_far64(&cs_selector
, &cs_descriptor
, return_RIP
, cs_selector
.rpl
);
209 /* load SS:SP from stack */
210 /* load SS-cache with return SS descriptor */
211 load_ss(&ss_selector
, &ss_descriptor
, cs_selector
.rpl
);
213 #if BX_SUPPORT_X86_64
214 if (StackAddrSize64())
215 RSP
= return_RSP
+ pop_bytes
;
218 if (ss_descriptor
.u
.segment
.d_b
)
219 RSP
= (Bit32u
) return_RSP
+ pop_bytes
;
221 SP
= (Bit16u
) return_RSP
+ pop_bytes
;
223 /* check ES, DS, FS, GS for validity */