- added instructions how to update the online documentation
[bochs-mirror.git] / cpu / ctrl_xfer64.cc
blob543118d1cfffb484182d54154bc0a48f88af7b41
1 ////////c/////////////////////////////////////////////////////////////////
2 // $Id: ctrl_xfer64.cc,v 1.75 2008/12/01 19:06:14 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (C) 2001 MandrakeSoft S.A.
6 //
7 // MandrakeSoft S.A.
8 // 43, rue d'Aboukir
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 /////////////////////////////////////////////////////////////////////////
28 #define NEED_CPU_REG_SHORTCUTS 1
29 #include "bochs.h"
30 #include "cpu.h"
31 #define LOG_THIS BX_CPU_THIS_PTR
33 #if BX_SUPPORT_X86_64
35 BX_CPP_INLINE void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near64(bxInstruction_c *i)
37 Bit64u new_RIP = RIP + (Bit32s) i->Id();
39 if (! IsCanonical(new_RIP)) {
40 BX_ERROR(("branch_near64: canonical RIP violation"));
41 exception(BX_GP_EXCEPTION, 0, 0);
44 RIP = new_RIP;
46 #if BX_SUPPORT_TRACE_CACHE && !defined(BX_TRACE_CACHE_NO_SPECULATIVE_TRACING)
47 // assert magic async_event to stop trace execution
48 BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
49 #endif
52 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear64_Iw(bxInstruction_c *i)
54 #if BX_DEBUGGER
55 BX_CPU_THIS_PTR show_flag |= Flag_ret;
56 #endif
58 Bit64u return_RIP = read_virtual_qword_64(BX_SEG_REG_SS, RSP);
60 if (! IsCanonical(return_RIP)) {
61 BX_ERROR(("RETnear64_Iw: canonical RIP violation"));
62 exception(BX_GP_EXCEPTION, 0, 0);
65 RIP = return_RIP;
66 RSP += 8 + i->Iw();
68 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET, RIP);
71 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear64(bxInstruction_c *i)
73 #if BX_DEBUGGER
74 BX_CPU_THIS_PTR show_flag |= Flag_ret;
75 #endif
77 Bit64u return_RIP = read_virtual_qword_64(BX_SEG_REG_SS, RSP);
79 if (! IsCanonical(return_RIP)) {
80 BX_ERROR(("RETnear64: canonical RIP violation %08x%08x", GET32H(return_RIP), GET32L(return_RIP)));
81 exception(BX_GP_EXCEPTION, 0, 0);
84 RIP = return_RIP;
85 RSP += 8;
87 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET, RIP);
90 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETfar64_Iw(bxInstruction_c *i)
92 invalidate_prefetch_q();
94 #if BX_DEBUGGER
95 BX_CPU_THIS_PTR show_flag |= Flag_ret;
96 #endif
98 BX_ASSERT(protected_mode());
100 BX_CPU_THIS_PTR speculative_rsp = 1;
101 BX_CPU_THIS_PTR prev_rsp = RSP;
103 // return_protected is not RSP safe
104 return_protected(i, i->Iw());
106 BX_CPU_THIS_PTR speculative_rsp = 0;
108 BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET,
109 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
112 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETfar64(bxInstruction_c *i)
114 invalidate_prefetch_q();
116 #if BX_DEBUGGER
117 BX_CPU_THIS_PTR show_flag |= Flag_ret;
118 #endif
120 BX_ASSERT(protected_mode());
122 BX_CPU_THIS_PTR speculative_rsp = 1;
123 BX_CPU_THIS_PTR prev_rsp = RSP;
125 // return_protected is not RSP safe
126 return_protected(i, 0);
128 BX_CPU_THIS_PTR speculative_rsp = 0;
130 BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET,
131 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
134 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_Jq(bxInstruction_c *i)
136 Bit64u new_RIP = RIP + (Bit32s) i->Id();
138 #if BX_DEBUGGER
139 BX_CPU_THIS_PTR show_flag |= Flag_call;
140 #endif
142 /* push 64 bit EA of next instruction */
143 write_virtual_qword_64(BX_SEG_REG_SS, RSP-8, RIP);
145 if (! IsCanonical(new_RIP)) {
146 BX_ERROR(("CALL_Jq: canonical RIP violation"));
147 exception(BX_GP_EXCEPTION, 0, 0);
150 RIP = new_RIP;
151 RSP -= 8;
153 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL, RIP);
156 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_EqR(bxInstruction_c *i)
158 #if BX_DEBUGGER
159 BX_CPU_THIS_PTR show_flag |= Flag_call;
160 #endif
162 Bit64u new_RIP = BX_READ_64BIT_REG(i->rm());
164 /* push 64 bit EA of next instruction */
165 write_virtual_qword_64(BX_SEG_REG_SS, RSP-8, RIP);
167 if (! IsCanonical(new_RIP))
169 BX_ERROR(("CALL_Eq: canonical RIP violation"));
170 exception(BX_GP_EXCEPTION, 0, 0);
173 RIP = new_RIP;
174 RSP -= 8;
176 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL, RIP);
179 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL64_Ep(bxInstruction_c *i)
181 invalidate_prefetch_q();
183 #if BX_DEBUGGER
184 BX_CPU_THIS_PTR show_flag |= Flag_call;
185 #endif
187 bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
189 /* pointer, segment address pair */
190 Bit64u op1_64 = read_virtual_qword_64(i->seg(), eaddr);
191 Bit16u cs_raw = read_virtual_word_64(i->seg(), eaddr+8);
193 BX_ASSERT(protected_mode());
195 BX_CPU_THIS_PTR speculative_rsp = 1;
196 BX_CPU_THIS_PTR prev_rsp = RSP;
198 // call_protected is not RSP safe
199 call_protected(i, cs_raw, op1_64);
201 BX_CPU_THIS_PTR speculative_rsp = 0;
203 BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL,
204 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
207 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_Jq(bxInstruction_c *i)
209 Bit64u new_RIP = RIP + (Bit32s) i->Id();
211 if (! IsCanonical(new_RIP)) {
212 BX_ERROR(("JMP_Jq: canonical RIP violation"));
213 exception(BX_GP_EXCEPTION, 0, 0);
216 RIP = new_RIP;
218 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP, RIP);
221 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JO_Jq(bxInstruction_c *i)
223 if (get_OF()) {
224 branch_near64(i);
225 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
227 #if BX_INSTRUMENTATION
228 else {
229 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
231 #endif
234 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNO_Jq(bxInstruction_c *i)
236 if (! get_OF()) {
237 branch_near64(i);
238 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
240 #if BX_INSTRUMENTATION
241 else {
242 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
244 #endif
247 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JB_Jq(bxInstruction_c *i)
249 if (get_CF()) {
250 branch_near64(i);
251 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
253 #if BX_INSTRUMENTATION
254 else {
255 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
257 #endif
260 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNB_Jq(bxInstruction_c *i)
262 if (! get_CF()) {
263 branch_near64(i);
264 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
266 #if BX_INSTRUMENTATION
267 else {
268 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
270 #endif
273 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JZ_Jq(bxInstruction_c *i)
275 if (get_ZF()) {
276 branch_near64(i);
277 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
279 #if BX_INSTRUMENTATION
280 else {
281 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
283 #endif
286 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNZ_Jq(bxInstruction_c *i)
288 if (! get_ZF()) {
289 branch_near64(i);
290 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
292 #if BX_INSTRUMENTATION
293 else {
294 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
296 #endif
299 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JBE_Jq(bxInstruction_c *i)
301 if (get_CF() || get_ZF()) {
302 branch_near64(i);
303 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
305 #if BX_INSTRUMENTATION
306 else {
307 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
309 #endif
312 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNBE_Jq(bxInstruction_c *i)
314 if (! (get_CF() || get_ZF())) {
315 branch_near64(i);
316 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
318 #if BX_INSTRUMENTATION
319 else {
320 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
322 #endif
325 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JS_Jq(bxInstruction_c *i)
327 if (get_SF()) {
328 branch_near64(i);
329 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
331 #if BX_INSTRUMENTATION
332 else {
333 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
335 #endif
338 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNS_Jq(bxInstruction_c *i)
340 if (! get_SF()) {
341 branch_near64(i);
342 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
344 #if BX_INSTRUMENTATION
345 else {
346 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
348 #endif
351 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JP_Jq(bxInstruction_c *i)
353 if (get_PF()) {
354 branch_near64(i);
355 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
357 #if BX_INSTRUMENTATION
358 else {
359 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
361 #endif
364 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNP_Jq(bxInstruction_c *i)
366 if (! get_PF()) {
367 branch_near64(i);
368 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
370 #if BX_INSTRUMENTATION
371 else {
372 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
374 #endif
377 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JL_Jq(bxInstruction_c *i)
379 if (getB_SF() != getB_OF()) {
380 branch_near64(i);
381 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
383 #if BX_INSTRUMENTATION
384 else {
385 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
387 #endif
390 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNL_Jq(bxInstruction_c *i)
392 if (getB_SF() == getB_OF()) {
393 branch_near64(i);
394 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
396 #if BX_INSTRUMENTATION
397 else {
398 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
400 #endif
403 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JLE_Jq(bxInstruction_c *i)
405 if (get_ZF() || (getB_SF() != getB_OF())) {
406 branch_near64(i);
407 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
409 #if BX_INSTRUMENTATION
410 else {
411 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
413 #endif
416 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNLE_Jq(bxInstruction_c *i)
418 if (! get_ZF() && (getB_SF() == getB_OF())) {
419 branch_near64(i);
420 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
422 #if BX_INSTRUMENTATION
423 else {
424 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
426 #endif
429 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_EqR(bxInstruction_c *i)
431 Bit64u op1_64 = BX_READ_64BIT_REG(i->rm());
433 if (! IsCanonical(op1_64)) {
434 BX_ERROR(("JMP_Eq: canonical RIP violation"));
435 exception(BX_GP_EXCEPTION, 0, 0);
438 RIP = op1_64;
440 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP, RIP);
443 /* Far indirect jump */
444 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP64_Ep(bxInstruction_c *i)
446 invalidate_prefetch_q();
448 bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
450 Bit64u op1_64 = read_virtual_qword_64(i->seg(), eaddr);
451 Bit16u cs_raw = read_virtual_word_64(i->seg(), eaddr+8);
453 BX_ASSERT(protected_mode());
455 jump_protected(i, cs_raw, op1_64);
457 BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP,
458 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
461 void BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET64(bxInstruction_c *i)
463 invalidate_prefetch_q();
465 #if BX_DEBUGGER
466 BX_CPU_THIS_PTR show_flag |= Flag_iret;
467 #endif
469 BX_CPU_THIS_PTR disable_NMI = 0;
471 BX_ASSERT(protected_mode());
473 BX_CPU_THIS_PTR speculative_rsp = 1;
474 BX_CPU_THIS_PTR prev_rsp = RSP;
476 // long_iret is not RSP safe
477 long_iret(i);
479 BX_CPU_THIS_PTR speculative_rsp = 0;
481 BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_IRET,
482 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
485 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JRCXZ_Jb(bxInstruction_c *i)
487 Bit64u temp_RCX;
489 if (i->as64L())
490 temp_RCX = RCX;
491 else
492 temp_RCX = ECX;
494 if (temp_RCX == 0) {
495 branch_near64(i);
496 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
498 #if BX_INSTRUMENTATION
499 else {
500 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
502 #endif
506 // There is some weirdness in LOOP instructions definition. If an exception
507 // was generated during the instruction execution (for example #GP fault
508 // because EIP was beyond CS segment limits) CPU state should restore the
509 // state prior to instruction execution.
511 // The final point that we are not allowed to decrement RCX register before
512 // it is known that no exceptions can happen.
515 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPNE64_Jb(bxInstruction_c *i)
517 if (i->as64L()) {
518 Bit64u count = RCX;
520 if (((--count) != 0) && (get_ZF()==0)) {
521 branch_near64(i);
522 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
524 #if BX_INSTRUMENTATION
525 else {
526 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
528 #endif
530 RCX = count;
532 else {
533 Bit32u count = ECX;
535 if (((--count) != 0) && (get_ZF()==0)) {
536 branch_near64(i);
537 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
539 #if BX_INSTRUMENTATION
540 else {
541 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
543 #endif
545 RCX = count;
549 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPE64_Jb(bxInstruction_c *i)
551 if (i->as64L()) {
552 Bit64u count = RCX;
554 if (((--count) != 0) && get_ZF()) {
555 branch_near64(i);
556 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
558 #if BX_INSTRUMENTATION
559 else {
560 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
562 #endif
564 RCX = count;
566 else {
567 Bit32u count = ECX;
569 if (((--count) != 0) && get_ZF()) {
570 branch_near64(i);
571 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
573 #if BX_INSTRUMENTATION
574 else {
575 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
577 #endif
579 RCX = count;
583 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOP64_Jb(bxInstruction_c *i)
585 if (i->as64L()) {
586 Bit64u count = RCX;
588 if ((--count) != 0) {
589 branch_near64(i);
590 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
592 #if BX_INSTRUMENTATION
593 else {
594 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
596 #endif
598 RCX = count;
600 else {
601 Bit32u count = ECX;
603 if ((--count) != 0) {
604 branch_near64(i);
605 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
607 #if BX_INSTRUMENTATION
608 else {
609 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
611 #endif
613 RCX = count;
617 #endif /* if BX_SUPPORT_X86_64 */