1 ////////////////////////////////////////////////////////////////////////
2 // $Id: call_far.cc,v 1.41 2008/09/06 17:44:02 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (c) 2005 Stanislav Shwartsman
6 // Written by Stanislav Shwartsman [sshwarts at sourceforge net]
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2 of the License, or (at your option) any later version.
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 /////////////////////////////////////////////////////////////////////////
24 #define NEED_CPU_REG_SHORTCUTS 1
27 #define LOG_THIS BX_CPU_THIS_PTR
29 #if BX_SUPPORT_X86_64==0
30 // Make life easier merging cpu64 & cpu code.
34 void BX_CPP_AttrRegparmN(3)
35 BX_CPU_C::call_protected(bxInstruction_c
*i
, Bit16u cs_raw
, bx_address disp
)
37 bx_selector_t cs_selector
;
38 Bit32u dword1
, dword2
;
39 bx_descriptor_t cs_descriptor
;
42 /* new cs selector must not be null, else #GP(0) */
43 if ((cs_raw
& 0xfffc) == 0) {
44 BX_ERROR(("call_protected: CS selector null"));
45 exception(BX_GP_EXCEPTION
, 0, 0);
48 parse_selector(cs_raw
, &cs_selector
);
49 // check new CS selector index within its descriptor limits,
50 // else #GP(new CS selector)
51 fetch_raw_descriptor(&cs_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
52 parse_descriptor(dword1
, dword2
, &cs_descriptor
);
54 // examine AR byte of selected descriptor for various legal values
55 if (cs_descriptor
.valid
==0) {
56 BX_ERROR(("call_protected: invalid CS descriptor"));
57 exception(BX_GP_EXCEPTION
, cs_raw
& 0xfffc, 0);
60 if (cs_descriptor
.segment
) // normal segment
62 check_cs(&cs_descriptor
, cs_raw
, BX_SELECTOR_RPL(cs_raw
), CPL
);
65 if (cs_descriptor
.u
.segment
.l
) {
66 // moving to long mode, push return address onto 64-bit stack
68 write_new_stack_qword_64(RSP
- 8, cs_descriptor
.dpl
,
69 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
);
70 write_new_stack_qword_64(RSP
- 16, cs_descriptor
.dpl
, RIP
);
73 else if (i
->os32L()) {
74 write_new_stack_dword_64(RSP
- 4, cs_descriptor
.dpl
,
75 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
);
76 write_new_stack_dword_64(RSP
- 8, cs_descriptor
.dpl
, EIP
);
80 write_new_stack_word_64(RSP
- 2, cs_descriptor
.dpl
,
81 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
);
82 write_new_stack_word_64(RSP
- 4, cs_descriptor
.dpl
, IP
);
89 // moving to legacy mode, push return address onto 32-bit stack
90 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
)
97 write_new_stack_qword_32(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
],
98 temp_RSP
- 8, cs_descriptor
.dpl
,
99 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
);
100 write_new_stack_qword_32(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
],
101 temp_RSP
- 16, cs_descriptor
.dpl
, RIP
);
107 write_new_stack_dword_32(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
],
108 temp_RSP
- 4, cs_descriptor
.dpl
,
109 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
);
110 write_new_stack_dword_32(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
],
111 temp_RSP
- 8, cs_descriptor
.dpl
, EIP
);
115 write_new_stack_word_32(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
],
116 temp_RSP
- 2, cs_descriptor
.dpl
,
117 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
);
118 write_new_stack_word_32(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
],
119 temp_RSP
- 4, cs_descriptor
.dpl
, IP
);
123 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
)
124 ESP
= (Bit32u
) temp_RSP
;
126 SP
= (Bit16u
) temp_RSP
;
129 // load code segment descriptor into CS cache
130 // load CS with new code segment selector
131 // set RPL of CS to CPL
132 branch_far64(&cs_selector
, &cs_descriptor
, disp
, CPL
);
136 else { // gate & special segment
137 bx_descriptor_t gate_descriptor
= cs_descriptor
;
138 bx_selector_t gate_selector
= cs_selector
;
140 Bit16u dest_selector
;
141 Bit16u raw_tss_selector
;
142 bx_selector_t tss_selector
;
143 bx_descriptor_t tss_descriptor
;
146 // descriptor DPL must be >= CPL else #GP(gate selector)
147 if (gate_descriptor
.dpl
< CPL
) {
148 BX_ERROR(("call_protected: descriptor.dpl < CPL"));
149 exception(BX_GP_EXCEPTION
, cs_raw
& 0xfffc, 0);
152 // descriptor DPL must be >= gate selector RPL else #GP(gate selector)
153 if (gate_descriptor
.dpl
< gate_selector
.rpl
) {
154 BX_ERROR(("call_protected: descriptor.dpl < selector.rpl"));
155 exception(BX_GP_EXCEPTION
, cs_raw
& 0xfffc, 0);
158 #if BX_SUPPORT_X86_64
160 // call gate type is higher priority than non-present bit check
161 if (gate_descriptor
.type
!= BX_386_CALL_GATE
) {
162 BX_ERROR(("call_protected: gate type %u unsupported in long mode", (unsigned) gate_descriptor
.type
));
163 exception(BX_GP_EXCEPTION
, cs_raw
& 0xfffc, 0);
169 switch (gate_descriptor
.type
) {
170 case BX_SYS_SEGMENT_AVAIL_286_TSS
:
171 case BX_SYS_SEGMENT_AVAIL_386_TSS
:
173 case BX_286_CALL_GATE
:
174 case BX_386_CALL_GATE
:
177 BX_ERROR(("call_protected(): gate.type(%u) unsupported", (unsigned) gate_descriptor
.type
));
178 exception(BX_GP_EXCEPTION
, cs_raw
& 0xfffc, 0);
182 // gate descriptor must be present else #NP(gate selector)
183 if (! IS_PRESENT(gate_descriptor
)) {
184 BX_ERROR(("call_protected: gate not present"));
185 exception(BX_NP_EXCEPTION
, cs_raw
& 0xfffc, 0);
188 #if BX_SUPPORT_X86_64
190 call_gate64(&gate_selector
);
195 switch (gate_descriptor
.type
) {
196 case BX_SYS_SEGMENT_AVAIL_286_TSS
:
197 case BX_SYS_SEGMENT_AVAIL_386_TSS
:
199 if (gate_descriptor
.type
==BX_SYS_SEGMENT_AVAIL_286_TSS
)
200 BX_DEBUG(("call_protected: 16bit available TSS"));
202 BX_DEBUG(("call_protected: 32bit available TSS"));
204 // SWITCH_TASKS _without_ nesting to TSS
205 task_switch(&gate_selector
, &gate_descriptor
,
206 BX_TASK_FROM_CALL_OR_INT
, dword1
, dword2
);
208 // EIP must be in code seg limit, else #GP(0)
209 if (EIP
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
) {
210 BX_ERROR(("call_protected: EIP not within CS limits"));
211 exception(BX_GP_EXCEPTION
, 0, 0);
216 // examine selector to TSS, given in Task Gate descriptor
217 // must specify global in the local/global bit else #TS(TSS selector)
218 raw_tss_selector
= gate_descriptor
.u
.taskgate
.tss_selector
;
219 parse_selector(raw_tss_selector
, &tss_selector
);
221 if (tss_selector
.ti
) {
222 BX_ERROR(("call_protected: tss_selector.ti=1"));
223 exception(BX_GP_EXCEPTION
, raw_tss_selector
& 0xfffc, 0);
226 // index must be within GDT limits else #TS(TSS selector)
227 fetch_raw_descriptor(&tss_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
229 parse_descriptor(dword1
, dword2
, &tss_descriptor
);
231 // descriptor AR byte must specify available TSS
232 // else #GP(TSS selector)
233 if (tss_descriptor
.valid
==0 || tss_descriptor
.segment
) {
234 BX_ERROR(("call_protected: TSS selector points to bad TSS"));
235 exception(BX_GP_EXCEPTION
, raw_tss_selector
& 0xfffc, 0);
237 if (tss_descriptor
.type
!=BX_SYS_SEGMENT_AVAIL_286_TSS
&&
238 tss_descriptor
.type
!=BX_SYS_SEGMENT_AVAIL_386_TSS
)
240 BX_ERROR(("call_protected: TSS selector points to bad TSS"));
241 exception(BX_GP_EXCEPTION
, raw_tss_selector
& 0xfffc, 0);
244 // task state segment must be present, else #NP(tss selector)
245 if (! IS_PRESENT(tss_descriptor
)) {
246 BX_ERROR(("call_protected: task descriptor.p == 0"));
247 exception(BX_NP_EXCEPTION
, raw_tss_selector
& 0xfffc, 0);
250 // SWITCH_TASKS without nesting to TSS
251 task_switch(&tss_selector
, &tss_descriptor
,
252 BX_TASK_FROM_CALL_OR_INT
, dword1
, dword2
);
254 // EIP must be within code segment limit, else #TS(0)
255 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.d_b
)
260 if (temp_eIP
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
)
262 BX_ERROR(("call_protected: EIP > CS.limit"));
263 exception(BX_GP_EXCEPTION
, 0, 0);
267 case BX_286_CALL_GATE
:
268 case BX_386_CALL_GATE
:
269 // examine code segment selector in call gate descriptor
270 BX_DEBUG(("call_protected: call gate"));
271 dest_selector
= gate_descriptor
.u
.gate
.dest_selector
;
272 new_EIP
= gate_descriptor
.u
.gate
.dest_offset
;
274 // selector must not be null else #GP(0)
275 if ((dest_selector
& 0xfffc) == 0) {
276 BX_ERROR(("call_protected: selector in gate null"));
277 exception(BX_GP_EXCEPTION
, 0, 0);
280 parse_selector(dest_selector
, &cs_selector
);
281 // selector must be within its descriptor table limits,
282 // else #GP(code segment selector)
283 fetch_raw_descriptor(&cs_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
284 parse_descriptor(dword1
, dword2
, &cs_descriptor
);
286 // AR byte of selected descriptor must indicate code segment,
287 // else #GP(code segment selector)
288 // DPL of selected descriptor must be <= CPL,
289 // else #GP(code segment selector)
290 if (cs_descriptor
.valid
==0 || cs_descriptor
.segment
==0 ||
291 IS_DATA_SEGMENT(cs_descriptor
.type
) ||
292 cs_descriptor
.dpl
> CPL
)
294 BX_ERROR(("call_protected: selected descriptor is not code"));
295 exception(BX_GP_EXCEPTION
, dest_selector
& 0xfffc, 0);
298 // code segment must be present else #NP(selector)
299 if (! IS_PRESENT(cs_descriptor
)) {
300 BX_ERROR(("call_protected: code segment not present !"));
301 exception(BX_NP_EXCEPTION
, dest_selector
& 0xfffc, 0);
304 // CALL GATE TO MORE PRIVILEGE
305 // if non-conforming code segment and DPL < CPL then
306 if (IS_CODE_SEGMENT_NON_CONFORMING(cs_descriptor
.type
) && (cs_descriptor
.dpl
< CPL
))
309 Bit32u ESP_for_cpl_x
;
310 bx_selector_t ss_selector
;
311 bx_descriptor_t ss_descriptor
;
312 Bit16u return_SS
, return_CS
;
313 Bit32u return_ESP
, return_EIP
;
314 Bit16u parameter_word
[32];
315 Bit32u parameter_dword
[32];
317 BX_DEBUG(("CALL GATE TO MORE PRIVILEGE LEVEL"));
319 // get new SS selector for new privilege level from TSS
320 get_SS_ESP_from_TSS(cs_descriptor
.dpl
, &SS_for_cpl_x
, &ESP_for_cpl_x
);
322 // check selector & descriptor for new SS:
323 // selector must not be null, else #TS(0)
324 if ((SS_for_cpl_x
& 0xfffc) == 0) {
325 BX_ERROR(("call_protected: new SS null"));
326 exception(BX_TS_EXCEPTION
, 0, 0);
329 // selector index must be within its descriptor table limits,
330 // else #TS(SS selector)
331 parse_selector(SS_for_cpl_x
, &ss_selector
);
332 fetch_raw_descriptor(&ss_selector
, &dword1
, &dword2
, BX_TS_EXCEPTION
);
333 parse_descriptor(dword1
, dword2
, &ss_descriptor
);
335 // selector's RPL must equal DPL of code segment,
336 // else #TS(SS selector)
337 if (ss_selector
.rpl
!= cs_descriptor
.dpl
) {
338 BX_ERROR(("call_protected: SS selector.rpl != CS descr.dpl"));
339 exception(BX_TS_EXCEPTION
, SS_for_cpl_x
& 0xfffc, 0);
342 // stack segment DPL must equal DPL of code segment,
343 // else #TS(SS selector)
344 if (ss_descriptor
.dpl
!= cs_descriptor
.dpl
) {
345 BX_ERROR(("call_protected: SS descr.rpl != CS descr.dpl"));
346 exception(BX_TS_EXCEPTION
, SS_for_cpl_x
& 0xfffc, 0);
349 // descriptor must indicate writable data segment,
350 // else #TS(SS selector)
351 if (ss_descriptor
.valid
==0 || ss_descriptor
.segment
==0 ||
352 IS_CODE_SEGMENT(ss_descriptor
.type
) ||
353 !IS_DATA_SEGMENT_WRITEABLE(ss_descriptor
.type
))
355 BX_ERROR(("call_protected: ss descriptor is not writable data seg"));
356 exception(BX_TS_EXCEPTION
, SS_for_cpl_x
& 0xfffc, 0);
359 // segment must be present, else #SS(SS selector)
360 if (! IS_PRESENT(ss_descriptor
)) {
361 BX_ERROR(("call_protected: ss descriptor not present"));
362 exception(BX_SS_EXCEPTION
, SS_for_cpl_x
& 0xfffc, 0);
365 // get word count from call gate, mask to 5 bits
366 unsigned param_count
= gate_descriptor
.u
.gate
.param_count
& 0x1f;
368 // save return SS:eSP to be pushed on new stack
369 return_SS
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
.value
;
370 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
)
375 // save return CS:eIP to be pushed on new stack
376 return_CS
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
;
377 if (cs_descriptor
.u
.segment
.d_b
)
382 if (gate_descriptor
.type
==BX_286_CALL_GATE
) {
383 for (unsigned n
=0; n
<param_count
; n
++) {
384 parameter_word
[n
] = read_virtual_word_32(BX_SEG_REG_SS
, return_ESP
+ n
*2);
388 for (unsigned n
=0; n
<param_count
; n
++) {
389 parameter_dword
[n
] = read_virtual_dword_32(BX_SEG_REG_SS
, return_ESP
+ n
*4);
393 // Prepare new stack segment
394 bx_segment_reg_t new_stack
;
395 new_stack
.selector
= ss_selector
;
396 new_stack
.cache
= ss_descriptor
;
397 new_stack
.selector
.rpl
= cs_descriptor
.dpl
;
398 // add cpl to the selector value
399 new_stack
.selector
.value
= (0xfffc & new_stack
.selector
.value
) |
400 new_stack
.selector
.rpl
;
402 /* load new SS:SP value from TSS */
403 if (ss_descriptor
.u
.segment
.d_b
) {
404 Bit32u temp_ESP
= ESP_for_cpl_x
;
406 // push pointer of old stack onto new stack
407 if (gate_descriptor
.type
==BX_386_CALL_GATE
) {
408 write_new_stack_dword_32(&new_stack
, temp_ESP
-4, cs_descriptor
.dpl
, return_SS
);
409 write_new_stack_dword_32(&new_stack
, temp_ESP
-8, cs_descriptor
.dpl
, return_ESP
);
412 for (unsigned n
=param_count
; n
>0; n
--) {
414 write_new_stack_dword_32(&new_stack
, temp_ESP
, cs_descriptor
.dpl
, parameter_dword
[n
-1]);
416 // push return address onto new stack
417 write_new_stack_dword_32(&new_stack
, temp_ESP
-4, cs_descriptor
.dpl
, return_CS
);
418 write_new_stack_dword_32(&new_stack
, temp_ESP
-8, cs_descriptor
.dpl
, return_EIP
);
422 write_new_stack_word_32(&new_stack
, temp_ESP
-2, cs_descriptor
.dpl
, return_SS
);
423 write_new_stack_word_32(&new_stack
, temp_ESP
-4, cs_descriptor
.dpl
, (Bit16u
) return_ESP
);
426 for (unsigned n
=param_count
; n
>0; n
--) {
428 write_new_stack_word_32(&new_stack
, temp_ESP
, cs_descriptor
.dpl
, parameter_word
[n
-1]);
430 // push return address onto new stack
431 write_new_stack_word_32(&new_stack
, temp_ESP
-2, cs_descriptor
.dpl
, return_CS
);
432 write_new_stack_word_32(&new_stack
, temp_ESP
-4, cs_descriptor
.dpl
, (Bit16u
) return_EIP
);
439 Bit16u temp_SP
= (Bit16u
) ESP_for_cpl_x
;
441 // push pointer of old stack onto new stack
442 if (gate_descriptor
.type
==BX_386_CALL_GATE
) {
443 write_new_stack_dword_32(&new_stack
, (Bit16u
)(temp_SP
-4), cs_descriptor
.dpl
, return_SS
);
444 write_new_stack_dword_32(&new_stack
, (Bit16u
)(temp_SP
-8), cs_descriptor
.dpl
, return_ESP
);
447 for (unsigned n
=param_count
; n
>0; n
--) {
449 write_new_stack_dword_32(&new_stack
, temp_SP
, cs_descriptor
.dpl
, parameter_dword
[n
-1]);
451 // push return address onto new stack
452 write_new_stack_dword_32(&new_stack
, (Bit16u
)(temp_SP
-4), cs_descriptor
.dpl
, return_CS
);
453 write_new_stack_dword_32(&new_stack
, (Bit16u
)(temp_SP
-8), cs_descriptor
.dpl
, return_EIP
);
457 write_new_stack_word_32(&new_stack
, (Bit16u
)(temp_SP
-2), cs_descriptor
.dpl
, return_SS
);
458 write_new_stack_word_32(&new_stack
, (Bit16u
)(temp_SP
-4), cs_descriptor
.dpl
, (Bit16u
) return_ESP
);
461 for (unsigned n
=param_count
; n
>0; n
--) {
463 write_new_stack_word_32(&new_stack
, temp_SP
, cs_descriptor
.dpl
, parameter_word
[n
-1]);
465 // push return address onto new stack
466 write_new_stack_word_32(&new_stack
, (Bit16u
)(temp_SP
-2), cs_descriptor
.dpl
, return_CS
);
467 write_new_stack_word_32(&new_stack
, (Bit16u
)(temp_SP
-4), cs_descriptor
.dpl
, (Bit16u
) return_EIP
);
474 // new eIP must be in code segment limit else #GP(0)
475 if (new_EIP
> cs_descriptor
.u
.segment
.limit_scaled
) {
476 BX_ERROR(("call_protected: EIP not within CS limits"));
477 exception(BX_GP_EXCEPTION
, 0, 0);
480 /* load SS descriptor */
481 load_ss(&ss_selector
, &ss_descriptor
, cs_descriptor
.dpl
);
483 /* load new CS:IP value from gate */
484 /* load CS descriptor */
485 /* set CPL to stack segment DPL */
486 /* set RPL of CS to CPL */
487 load_cs(&cs_selector
, &cs_descriptor
, cs_descriptor
.dpl
);
490 else // CALL GATE TO SAME PRIVILEGE
492 BX_DEBUG(("CALL GATE TO SAME PRIVILEGE"));
494 if (gate_descriptor
.type
== BX_386_CALL_GATE
) {
495 // call gate 32bit, push return address onto stack
496 push_32(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
);
500 // call gate 16bit, push return address onto stack
501 push_16(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
);
505 // load CS:EIP from gate
506 // load code segment descriptor into CS register
507 // set RPL of CS to CPL
508 branch_far32(&cs_selector
, &cs_descriptor
, new_EIP
, CPL
);
512 default: // can't get here
513 BX_PANIC(("call_protected: gate type %u unsupported", (unsigned) cs_descriptor
.type
));
514 exception(BX_GP_EXCEPTION
, cs_raw
& 0xfffc, 0);
519 #if BX_SUPPORT_X86_64
520 void BX_CPP_AttrRegparmN(3)
521 BX_CPU_C::call_gate64(bx_selector_t
*gate_selector
)
523 bx_selector_t cs_selector
;
524 Bit32u dword1
, dword2
, dword3
;
525 bx_descriptor_t cs_descriptor
;
526 bx_descriptor_t gate_descriptor
;
528 // examine code segment selector in call gate descriptor
529 BX_DEBUG(("call_gate64: CALL 64bit call gate"));
531 fetch_raw_descriptor_64(gate_selector
, &dword1
, &dword2
, &dword3
, BX_GP_EXCEPTION
);
532 parse_descriptor(dword1
, dword2
, &gate_descriptor
);
534 Bit16u dest_selector
= gate_descriptor
.u
.gate
.dest_selector
;
535 // selector must not be null else #GP(0)
536 if ((dest_selector
& 0xfffc) == 0) {
537 BX_ERROR(("call_gate64: selector in gate null"));
538 exception(BX_GP_EXCEPTION
, 0, 0);
541 parse_selector(dest_selector
, &cs_selector
);
542 // selector must be within its descriptor table limits,
543 // else #GP(code segment selector)
544 fetch_raw_descriptor(&cs_selector
, &dword1
, &dword2
, BX_GP_EXCEPTION
);
545 parse_descriptor(dword1
, dword2
, &cs_descriptor
);
547 // find the RIP in the gate_descriptor
548 Bit64u new_RIP
= gate_descriptor
.u
.gate
.dest_offset
;
549 new_RIP
|= ((Bit64u
)dword3
<< 32);
551 // AR byte of selected descriptor must indicate code segment,
552 // else #GP(code segment selector)
553 // DPL of selected descriptor must be <= CPL,
554 // else #GP(code segment selector)
555 if (cs_descriptor
.valid
==0 || cs_descriptor
.segment
==0 ||
556 IS_DATA_SEGMENT(cs_descriptor
.type
) ||
557 cs_descriptor
.dpl
> CPL
)
559 BX_ERROR(("call_gate64: selected descriptor is not code"));
560 exception(BX_GP_EXCEPTION
, dest_selector
& 0xfffc, 0);
563 // In long mode, only 64-bit call gates are allowed, and they must point
564 // to 64-bit code segments, else #GP(selector)
565 if (! IS_LONG64_SEGMENT(cs_descriptor
) || cs_descriptor
.u
.segment
.d_b
)
567 BX_ERROR(("call_gate64: not 64-bit code segment in call gate 64"));
568 exception(BX_GP_EXCEPTION
, dest_selector
& 0xfffc, 0);
571 // code segment must be present else #NP(selector)
572 if (! IS_PRESENT(cs_descriptor
)) {
573 BX_ERROR(("call_gate64: code segment not present !"));
574 exception(BX_NP_EXCEPTION
, dest_selector
& 0xfffc, 0);
577 Bit64u old_CS
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
;
578 Bit64u old_RIP
= RIP
;
580 // CALL GATE TO MORE PRIVILEGE
581 // if non-conforming code segment and DPL < CPL then
582 if (IS_CODE_SEGMENT_NON_CONFORMING(cs_descriptor
.type
) && (cs_descriptor
.dpl
< CPL
))
584 Bit64u RSP_for_cpl_x
;
586 BX_DEBUG(("CALL GATE TO MORE PRIVILEGE LEVEL"));
588 // get new RSP for new privilege level from TSS
589 RSP_for_cpl_x
= get_RSP_from_TSS(cs_descriptor
.dpl
);
590 Bit64u old_SS
= BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].selector
.value
;
591 Bit64u old_RSP
= RSP
;
593 if (! IsCanonical(RSP_for_cpl_x
)) {
594 // #SS(selector) when changing priviledge level
595 BX_ERROR(("call_gate64: canonical address failure %08x%08x",
596 GET32H(RSP_for_cpl_x
), GET32L(RSP_for_cpl_x
)));
597 exception(BX_SS_EXCEPTION
, old_SS
& 0xfffc, 0);
600 // push old stack long pointer onto new stack
601 write_new_stack_qword_64(RSP_for_cpl_x
- 8, cs_descriptor
.dpl
, old_SS
);
602 write_new_stack_qword_64(RSP_for_cpl_x
- 16, cs_descriptor
.dpl
, old_RSP
);
603 // push long pointer to return address onto new stack
604 write_new_stack_qword_64(RSP_for_cpl_x
- 24, cs_descriptor
.dpl
, old_CS
);
605 write_new_stack_qword_64(RSP_for_cpl_x
- 32, cs_descriptor
.dpl
, old_RIP
);
608 // prepare new stack null SS selector
609 bx_selector_t ss_selector
;
610 bx_descriptor_t ss_descriptor
;
612 // set up a null descriptor
613 parse_selector(0, &ss_selector
);
614 parse_descriptor(0, 0, &ss_descriptor
);
616 // load CS:RIP (guaranteed to be in 64 bit mode)
617 branch_far64(&cs_selector
, &cs_descriptor
, new_RIP
, cs_descriptor
.dpl
);
619 // set up null SS descriptor
620 load_ss(&ss_selector
, &ss_descriptor
, cs_descriptor
.dpl
);
625 BX_DEBUG(("CALL GATE TO SAME PRIVILEGE"));
627 // push to 64-bit stack, switch to long64 guaranteed
628 write_new_stack_qword_64(RSP
- 8, CPL
, old_CS
);
629 write_new_stack_qword_64(RSP
- 16, CPL
, old_RIP
);
632 // load CS:RIP (guaranteed to be in 64 bit mode)
633 branch_far64(&cs_selector
, &cs_descriptor
, new_RIP
, CPL
);