1 ////////c/////////////////////////////////////////////////////////////////
2 // $Id: ctrl_xfer64.cc,v 1.75 2008/12/01 19:06:14 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (C) 2001 MandrakeSoft S.A.
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 /////////////////////////////////////////////////////////////////////////
28 #define NEED_CPU_REG_SHORTCUTS 1
31 #define LOG_THIS BX_CPU_THIS_PTR
35 BX_CPP_INLINE
void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near64(bxInstruction_c
*i
)
37 Bit64u new_RIP
= RIP
+ (Bit32s
) i
->Id();
39 if (! IsCanonical(new_RIP
)) {
40 BX_ERROR(("branch_near64: canonical RIP violation"));
41 exception(BX_GP_EXCEPTION
, 0, 0);
46 #if BX_SUPPORT_TRACE_CACHE && !defined(BX_TRACE_CACHE_NO_SPECULATIVE_TRACING)
47 // assert magic async_event to stop trace execution
48 BX_CPU_THIS_PTR async_event
|= BX_ASYNC_EVENT_STOP_TRACE
;
52 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear64_Iw(bxInstruction_c
*i
)
55 BX_CPU_THIS_PTR show_flag
|= Flag_ret
;
58 Bit64u return_RIP
= read_virtual_qword_64(BX_SEG_REG_SS
, RSP
);
60 if (! IsCanonical(return_RIP
)) {
61 BX_ERROR(("RETnear64_Iw: canonical RIP violation"));
62 exception(BX_GP_EXCEPTION
, 0, 0);
68 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_RET
, RIP
);
71 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear64(bxInstruction_c
*i
)
74 BX_CPU_THIS_PTR show_flag
|= Flag_ret
;
77 Bit64u return_RIP
= read_virtual_qword_64(BX_SEG_REG_SS
, RSP
);
79 if (! IsCanonical(return_RIP
)) {
80 BX_ERROR(("RETnear64: canonical RIP violation %08x%08x", GET32H(return_RIP
), GET32L(return_RIP
)));
81 exception(BX_GP_EXCEPTION
, 0, 0);
87 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_RET
, RIP
);
90 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETfar64_Iw(bxInstruction_c
*i
)
92 invalidate_prefetch_q();
95 BX_CPU_THIS_PTR show_flag
|= Flag_ret
;
98 BX_ASSERT(protected_mode());
100 BX_CPU_THIS_PTR speculative_rsp
= 1;
101 BX_CPU_THIS_PTR prev_rsp
= RSP
;
103 // return_protected is not RSP safe
104 return_protected(i
, i
->Iw());
106 BX_CPU_THIS_PTR speculative_rsp
= 0;
108 BX_INSTR_FAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_RET
,
109 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, RIP
);
112 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETfar64(bxInstruction_c
*i
)
114 invalidate_prefetch_q();
117 BX_CPU_THIS_PTR show_flag
|= Flag_ret
;
120 BX_ASSERT(protected_mode());
122 BX_CPU_THIS_PTR speculative_rsp
= 1;
123 BX_CPU_THIS_PTR prev_rsp
= RSP
;
125 // return_protected is not RSP safe
126 return_protected(i
, 0);
128 BX_CPU_THIS_PTR speculative_rsp
= 0;
130 BX_INSTR_FAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_RET
,
131 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, RIP
);
134 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_Jq(bxInstruction_c
*i
)
136 Bit64u new_RIP
= RIP
+ (Bit32s
) i
->Id();
139 BX_CPU_THIS_PTR show_flag
|= Flag_call
;
142 /* push 64 bit EA of next instruction */
143 write_virtual_qword_64(BX_SEG_REG_SS
, RSP
-8, RIP
);
145 if (! IsCanonical(new_RIP
)) {
146 BX_ERROR(("CALL_Jq: canonical RIP violation"));
147 exception(BX_GP_EXCEPTION
, 0, 0);
153 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_CALL
, RIP
);
156 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_EqR(bxInstruction_c
*i
)
159 BX_CPU_THIS_PTR show_flag
|= Flag_call
;
162 Bit64u new_RIP
= BX_READ_64BIT_REG(i
->rm());
164 /* push 64 bit EA of next instruction */
165 write_virtual_qword_64(BX_SEG_REG_SS
, RSP
-8, RIP
);
167 if (! IsCanonical(new_RIP
))
169 BX_ERROR(("CALL_Eq: canonical RIP violation"));
170 exception(BX_GP_EXCEPTION
, 0, 0);
176 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_CALL
, RIP
);
179 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL64_Ep(bxInstruction_c
*i
)
181 invalidate_prefetch_q();
184 BX_CPU_THIS_PTR show_flag
|= Flag_call
;
187 bx_address eaddr
= BX_CPU_CALL_METHODR(i
->ResolveModrm
, (i
));
189 /* pointer, segment address pair */
190 Bit64u op1_64
= read_virtual_qword_64(i
->seg(), eaddr
);
191 Bit16u cs_raw
= read_virtual_word_64(i
->seg(), eaddr
+8);
193 BX_ASSERT(protected_mode());
195 BX_CPU_THIS_PTR speculative_rsp
= 1;
196 BX_CPU_THIS_PTR prev_rsp
= RSP
;
198 // call_protected is not RSP safe
199 call_protected(i
, cs_raw
, op1_64
);
201 BX_CPU_THIS_PTR speculative_rsp
= 0;
203 BX_INSTR_FAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_CALL
,
204 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, RIP
);
207 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_Jq(bxInstruction_c
*i
)
209 Bit64u new_RIP
= RIP
+ (Bit32s
) i
->Id();
211 if (! IsCanonical(new_RIP
)) {
212 BX_ERROR(("JMP_Jq: canonical RIP violation"));
213 exception(BX_GP_EXCEPTION
, 0, 0);
218 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_JMP
, RIP
);
221 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JO_Jq(bxInstruction_c
*i
)
225 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
227 #if BX_INSTRUMENTATION
229 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
234 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNO_Jq(bxInstruction_c
*i
)
238 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
240 #if BX_INSTRUMENTATION
242 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
247 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JB_Jq(bxInstruction_c
*i
)
251 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
253 #if BX_INSTRUMENTATION
255 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
260 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNB_Jq(bxInstruction_c
*i
)
264 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
266 #if BX_INSTRUMENTATION
268 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
273 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JZ_Jq(bxInstruction_c
*i
)
277 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
279 #if BX_INSTRUMENTATION
281 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
286 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNZ_Jq(bxInstruction_c
*i
)
290 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
292 #if BX_INSTRUMENTATION
294 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
299 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JBE_Jq(bxInstruction_c
*i
)
301 if (get_CF() || get_ZF()) {
303 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
305 #if BX_INSTRUMENTATION
307 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
312 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNBE_Jq(bxInstruction_c
*i
)
314 if (! (get_CF() || get_ZF())) {
316 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
318 #if BX_INSTRUMENTATION
320 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
325 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JS_Jq(bxInstruction_c
*i
)
329 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
331 #if BX_INSTRUMENTATION
333 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
338 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNS_Jq(bxInstruction_c
*i
)
342 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
344 #if BX_INSTRUMENTATION
346 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
351 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JP_Jq(bxInstruction_c
*i
)
355 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
357 #if BX_INSTRUMENTATION
359 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
364 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNP_Jq(bxInstruction_c
*i
)
368 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
370 #if BX_INSTRUMENTATION
372 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
377 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JL_Jq(bxInstruction_c
*i
)
379 if (getB_SF() != getB_OF()) {
381 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
383 #if BX_INSTRUMENTATION
385 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
390 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNL_Jq(bxInstruction_c
*i
)
392 if (getB_SF() == getB_OF()) {
394 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
396 #if BX_INSTRUMENTATION
398 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
403 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JLE_Jq(bxInstruction_c
*i
)
405 if (get_ZF() || (getB_SF() != getB_OF())) {
407 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
409 #if BX_INSTRUMENTATION
411 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
416 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNLE_Jq(bxInstruction_c
*i
)
418 if (! get_ZF() && (getB_SF() == getB_OF())) {
420 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
422 #if BX_INSTRUMENTATION
424 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
429 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_EqR(bxInstruction_c
*i
)
431 Bit64u op1_64
= BX_READ_64BIT_REG(i
->rm());
433 if (! IsCanonical(op1_64
)) {
434 BX_ERROR(("JMP_Eq: canonical RIP violation"));
435 exception(BX_GP_EXCEPTION
, 0, 0);
440 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_JMP
, RIP
);
443 /* Far indirect jump */
444 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP64_Ep(bxInstruction_c
*i
)
446 invalidate_prefetch_q();
448 bx_address eaddr
= BX_CPU_CALL_METHODR(i
->ResolveModrm
, (i
));
450 Bit64u op1_64
= read_virtual_qword_64(i
->seg(), eaddr
);
451 Bit16u cs_raw
= read_virtual_word_64(i
->seg(), eaddr
+8);
453 BX_ASSERT(protected_mode());
455 jump_protected(i
, cs_raw
, op1_64
);
457 BX_INSTR_FAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_JMP
,
458 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, RIP
);
461 void BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET64(bxInstruction_c
*i
)
463 invalidate_prefetch_q();
466 BX_CPU_THIS_PTR show_flag
|= Flag_iret
;
469 BX_CPU_THIS_PTR disable_NMI
= 0;
471 BX_ASSERT(protected_mode());
473 BX_CPU_THIS_PTR speculative_rsp
= 1;
474 BX_CPU_THIS_PTR prev_rsp
= RSP
;
476 // long_iret is not RSP safe
479 BX_CPU_THIS_PTR speculative_rsp
= 0;
481 BX_INSTR_FAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_IRET
,
482 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, RIP
);
485 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JRCXZ_Jb(bxInstruction_c
*i
)
496 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
498 #if BX_INSTRUMENTATION
500 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
506 // There is some weirdness in LOOP instructions definition. If an exception
507 // was generated during the instruction execution (for example #GP fault
508 // because EIP was beyond CS segment limits) CPU state should restore the
509 // state prior to instruction execution.
511 // The final point that we are not allowed to decrement RCX register before
512 // it is known that no exceptions can happen.
515 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPNE64_Jb(bxInstruction_c
*i
)
520 if (((--count
) != 0) && (get_ZF()==0)) {
522 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
524 #if BX_INSTRUMENTATION
526 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
535 if (((--count
) != 0) && (get_ZF()==0)) {
537 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
539 #if BX_INSTRUMENTATION
541 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
549 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPE64_Jb(bxInstruction_c
*i
)
554 if (((--count
) != 0) && get_ZF()) {
556 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
558 #if BX_INSTRUMENTATION
560 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
569 if (((--count
) != 0) && get_ZF()) {
571 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
573 #if BX_INSTRUMENTATION
575 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
583 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOP64_Jb(bxInstruction_c
*i
)
588 if ((--count
) != 0) {
590 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
592 #if BX_INSTRUMENTATION
594 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
603 if ((--count
) != 0) {
605 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, RIP
);
607 #if BX_INSTRUMENTATION
609 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
617 #endif /* if BX_SUPPORT_X86_64 */