1 /////////////////////////////////////////////////////////////////////////
2 // $Id: ctrl_xfer32.cc,v 1.80 2008/10/06 20:41:28 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (C) 2001 MandrakeSoft S.A.
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 /////////////////////////////////////////////////////////////////////////
28 #define NEED_CPU_REG_SHORTCUTS 1
31 #define LOG_THIS BX_CPU_THIS_PTR
33 // Make code more tidy with a few macros.
34 #if BX_SUPPORT_X86_64==0
41 BX_CPP_INLINE
void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near32(Bit32u new_EIP
)
43 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
45 // check always, not only in protected mode
46 if (new_EIP
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
)
48 BX_ERROR(("branch_near32: offset outside of CS limits"));
49 exception(BX_GP_EXCEPTION
, 0, 0);
54 #if BX_SUPPORT_TRACE_CACHE && !defined(BX_TRACE_CACHE_NO_SPECULATIVE_TRACING)
55 // assert magic async_event to stop trace execution
56 BX_CPU_THIS_PTR async_event
|= BX_ASYNC_EVENT_STOP_TRACE
;
60 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear32_Iw(bxInstruction_c
*i
)
62 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
65 BX_CPU_THIS_PTR show_flag
|= Flag_ret
;
68 BX_CPU_THIS_PTR speculative_rsp
= 1;
69 BX_CPU_THIS_PTR prev_rsp
= ESP
;
71 Bit16u imm16
= i
->Iw();
72 Bit32u return_EIP
= pop_32();
73 if (return_EIP
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
)
75 BX_ERROR(("RETnear32_Iw: offset outside of CS limits"));
76 exception(BX_GP_EXCEPTION
, 0, 0);
80 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
)
85 BX_CPU_THIS_PTR speculative_rsp
= 0;
87 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_RET
, EIP
);
90 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear32(bxInstruction_c
*i
)
92 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
95 BX_CPU_THIS_PTR show_flag
|= Flag_ret
;
98 BX_CPU_THIS_PTR speculative_rsp
= 1;
99 BX_CPU_THIS_PTR prev_rsp
= ESP
;
101 Bit32u return_EIP
= pop_32();
102 if (return_EIP
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
)
104 BX_ERROR(("RETnear32: offset outside of CS limits"));
105 exception(BX_GP_EXCEPTION
, 0, 0);
109 BX_CPU_THIS_PTR speculative_rsp
= 0;
111 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_RET
, EIP
);
114 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETfar32_Iw(bxInstruction_c
*i
)
116 invalidate_prefetch_q();
119 BX_CPU_THIS_PTR show_flag
|= Flag_ret
;
122 Bit16u imm16
= i
->Iw();
126 BX_CPU_THIS_PTR speculative_rsp
= 1;
127 BX_CPU_THIS_PTR prev_rsp
= RSP
;
129 if (protected_mode()) {
130 BX_CPU_THIS_PTR
return_protected(i
, imm16
);
135 cs_raw
= (Bit16u
) pop_32(); /* 32bit pop, MSW discarded */
137 // CS.LIMIT can't change when in real/v8086 mode
138 if (eip
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
) {
139 BX_ERROR(("RETfar32_Iw: instruction pointer not within code segment limits"));
140 exception(BX_GP_EXCEPTION
, 0, 0);
143 load_seg_reg(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
], cs_raw
);
146 if (BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
].cache
.u
.segment
.d_b
)
152 BX_CPU_THIS_PTR speculative_rsp
= 0;
154 BX_INSTR_FAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_RET
,
155 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, EIP
);
158 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETfar32(bxInstruction_c
*i
)
163 invalidate_prefetch_q();
166 BX_CPU_THIS_PTR show_flag
|= Flag_ret
;
169 BX_CPU_THIS_PTR speculative_rsp
= 1;
170 BX_CPU_THIS_PTR prev_rsp
= RSP
;
172 if (protected_mode()) {
173 BX_CPU_THIS_PTR
return_protected(i
, 0);
179 // CS.LIMIT can't change when in real/v8086 mode
180 if (eip
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
) {
181 BX_ERROR(("RETfar32: instruction pointer not within code segment limits"));
182 exception(BX_GP_EXCEPTION
, 0, 0);
185 cs_raw
= (Bit16u
) pop_32(); /* 32bit pop, MSW discarded */
187 load_seg_reg(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
], cs_raw
);
191 BX_CPU_THIS_PTR speculative_rsp
= 0;
193 BX_INSTR_FAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_RET
,
194 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, EIP
);
197 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_Jd(bxInstruction_c
*i
)
200 BX_CPU_THIS_PTR show_flag
|= Flag_call
;
203 Bit32u new_EIP
= EIP
+ i
->Id();
205 BX_CPU_THIS_PTR speculative_rsp
= 1;
206 BX_CPU_THIS_PTR prev_rsp
= ESP
;
208 /* push 32 bit EA of next instruction */
211 branch_near32(new_EIP
);
213 BX_CPU_THIS_PTR speculative_rsp
= 0;
215 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_CALL
, EIP
);
218 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL32_Ap(bxInstruction_c
*i
)
220 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
225 invalidate_prefetch_q();
228 BX_CPU_THIS_PTR show_flag
|= Flag_call
;
234 BX_CPU_THIS_PTR speculative_rsp
= 1;
235 BX_CPU_THIS_PTR prev_rsp
= ESP
;
237 if (protected_mode()) {
238 BX_CPU_THIS_PTR
call_protected(i
, cs_raw
, disp32
);
242 // CS.LIMIT can't change when in real/v8086 mode
243 if (disp32
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
) {
244 BX_ERROR(("CALL32_Ap: instruction pointer not within code segment limits"));
245 exception(BX_GP_EXCEPTION
, 0, 0);
248 push_32(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
);
251 load_seg_reg(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
], cs_raw
);
255 BX_CPU_THIS_PTR speculative_rsp
= 0;
257 BX_INSTR_FAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_CALL
,
258 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, EIP
);
261 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_EdR(bxInstruction_c
*i
)
264 BX_CPU_THIS_PTR show_flag
|= Flag_call
;
267 Bit32u new_EIP
= BX_READ_32BIT_REG(i
->rm());
269 BX_CPU_THIS_PTR speculative_rsp
= 1;
270 BX_CPU_THIS_PTR prev_rsp
= ESP
;
272 /* push 32 bit EA of next instruction */
275 branch_near32(new_EIP
);
277 BX_CPU_THIS_PTR speculative_rsp
= 0;
279 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_CALL
, EIP
);
282 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL32_Ep(bxInstruction_c
*i
)
287 invalidate_prefetch_q();
290 BX_CPU_THIS_PTR show_flag
|= Flag_call
;
293 bx_address eaddr
= BX_CPU_CALL_METHODR(i
->ResolveModrm
, (i
));
295 /* pointer, segment address pair */
296 op1_32
= read_virtual_dword(i
->seg(), eaddr
);
297 cs_raw
= read_virtual_word (i
->seg(), eaddr
+4);
299 BX_CPU_THIS_PTR speculative_rsp
= 1;
300 BX_CPU_THIS_PTR prev_rsp
= RSP
;
302 if (protected_mode()) {
303 BX_CPU_THIS_PTR
call_protected(i
, cs_raw
, op1_32
);
307 // CS.LIMIT can't change when in real/v8086 mode
308 if (op1_32
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
) {
309 BX_ERROR(("CALL32_Ep: instruction pointer not within code segment limits"));
310 exception(BX_GP_EXCEPTION
, 0, 0);
313 push_32(BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
);
316 load_seg_reg(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
], cs_raw
);
320 BX_CPU_THIS_PTR speculative_rsp
= 0;
322 BX_INSTR_FAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_CALL
,
323 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, EIP
);
326 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_Jd(bxInstruction_c
*i
)
328 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
329 branch_near32(new_EIP
);
330 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_JMP
, new_EIP
);
333 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JO_Jd(bxInstruction_c
*i
)
336 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
337 branch_near32(new_EIP
);
338 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
340 #if BX_INSTRUMENTATION
342 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
347 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNO_Jd(bxInstruction_c
*i
)
350 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
351 branch_near32(new_EIP
);
352 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
354 #if BX_INSTRUMENTATION
356 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
361 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JB_Jd(bxInstruction_c
*i
)
364 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
365 branch_near32(new_EIP
);
366 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
368 #if BX_INSTRUMENTATION
370 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
375 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNB_Jd(bxInstruction_c
*i
)
378 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
379 branch_near32(new_EIP
);
380 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
382 #if BX_INSTRUMENTATION
384 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
389 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JZ_Jd(bxInstruction_c
*i
)
392 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
393 branch_near32(new_EIP
);
394 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
396 #if BX_INSTRUMENTATION
398 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
403 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNZ_Jd(bxInstruction_c
*i
)
406 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
407 branch_near32(new_EIP
);
408 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
410 #if BX_INSTRUMENTATION
412 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
417 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JBE_Jd(bxInstruction_c
*i
)
419 if (get_CF() || get_ZF()) {
420 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
421 branch_near32(new_EIP
);
422 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
424 #if BX_INSTRUMENTATION
426 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
431 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNBE_Jd(bxInstruction_c
*i
)
433 if (! (get_CF() || get_ZF())) {
434 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
435 branch_near32(new_EIP
);
436 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
438 #if BX_INSTRUMENTATION
440 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
445 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JS_Jd(bxInstruction_c
*i
)
448 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
449 branch_near32(new_EIP
);
450 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
452 #if BX_INSTRUMENTATION
454 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
459 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNS_Jd(bxInstruction_c
*i
)
462 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
463 branch_near32(new_EIP
);
464 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
466 #if BX_INSTRUMENTATION
468 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
473 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JP_Jd(bxInstruction_c
*i
)
476 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
477 branch_near32(new_EIP
);
478 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
480 #if BX_INSTRUMENTATION
482 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
487 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNP_Jd(bxInstruction_c
*i
)
490 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
491 branch_near32(new_EIP
);
492 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
494 #if BX_INSTRUMENTATION
496 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
501 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JL_Jd(bxInstruction_c
*i
)
503 if (getB_SF() != getB_OF()) {
504 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
505 branch_near32(new_EIP
);
506 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
508 #if BX_INSTRUMENTATION
510 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
515 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNL_Jd(bxInstruction_c
*i
)
517 if (getB_SF() == getB_OF()) {
518 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
519 branch_near32(new_EIP
);
520 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
522 #if BX_INSTRUMENTATION
524 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
529 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JLE_Jd(bxInstruction_c
*i
)
531 if (get_ZF() || (getB_SF() != getB_OF())) {
532 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
533 branch_near32(new_EIP
);
534 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
536 #if BX_INSTRUMENTATION
538 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
543 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNLE_Jd(bxInstruction_c
*i
)
545 if (! get_ZF() && (getB_SF() == getB_OF())) {
546 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
547 branch_near32(new_EIP
);
548 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
550 #if BX_INSTRUMENTATION
552 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
557 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_Ap(bxInstruction_c
*i
)
559 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
564 invalidate_prefetch_q();
574 // jump_protected doesn't affect ESP so it is ESP safe
575 if (protected_mode()) {
576 BX_CPU_THIS_PTR
jump_protected(i
, cs_raw
, disp32
);
580 // CS.LIMIT can't change when in real/v8086 mode
581 if (disp32
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
) {
582 BX_ERROR(("JMP_Ap: instruction pointer not within code segment limits"));
583 exception(BX_GP_EXCEPTION
, 0, 0);
586 load_seg_reg(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
], cs_raw
);
590 BX_INSTR_FAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_JMP
,
591 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, EIP
);
594 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_EdR(bxInstruction_c
*i
)
596 Bit32u new_EIP
= BX_READ_32BIT_REG(i
->rm());
597 branch_near32(new_EIP
);
598 BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_JMP
, new_EIP
);
601 /* Far indirect jump */
602 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP32_Ep(bxInstruction_c
*i
)
607 invalidate_prefetch_q();
609 bx_address eaddr
= BX_CPU_CALL_METHODR(i
->ResolveModrm
, (i
));
611 /* pointer, segment address pair */
612 op1_32
= read_virtual_dword(i
->seg(), eaddr
);
613 cs_raw
= read_virtual_word (i
->seg(), eaddr
+4);
615 // jump_protected doesn't affect RSP so it is RSP safe
616 if (protected_mode()) {
617 BX_CPU_THIS_PTR
jump_protected(i
, cs_raw
, op1_32
);
621 // CS.LIMIT can't change when in real/v8086 mode
622 if (op1_32
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
) {
623 BX_ERROR(("JMP32_Ep: instruction pointer not within code segment limits"));
624 exception(BX_GP_EXCEPTION
, 0, 0);
627 load_seg_reg(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
], cs_raw
);
631 BX_INSTR_FAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_JMP
,
632 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, EIP
);
635 void BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET32(bxInstruction_c
*i
)
637 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
639 Bit32u eip
, eflags32
;
642 invalidate_prefetch_q();
645 BX_CPU_THIS_PTR show_flag
|= Flag_iret
;
648 BX_CPU_THIS_PTR nmi_disable
= 0;
650 BX_CPU_THIS_PTR speculative_rsp
= 1;
651 BX_CPU_THIS_PTR prev_rsp
= ESP
;
654 // IOPL check in stack_return_from_v86()
655 iret32_stack_return_from_v86(i
);
659 if (protected_mode()) {
665 cs_raw
= (Bit16u
) pop_32(); // #SS has higher priority
668 // CS.LIMIT can't change when in real/v8086 mode
669 if (eip
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
) {
670 BX_ERROR(("IRET32: instruction pointer not within code segment limits"));
671 exception(BX_GP_EXCEPTION
, 0, 0);
674 load_seg_reg(&BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
], cs_raw
);
676 writeEFlags(eflags32
, 0x00257fd5); // VIF, VIP, VM unchanged
679 BX_CPU_THIS_PTR speculative_rsp
= 0;
681 BX_INSTR_FAR_BRANCH(BX_CPU_ID
, BX_INSTR_IS_IRET
,
682 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
, EIP
);
685 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JECXZ_Jb(bxInstruction_c
*i
)
687 // it is impossible to get this instruction in long mode
688 BX_ASSERT(i
->as64L() == 0);
698 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
699 branch_near32(new_EIP
);
700 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
702 #if BX_INSTRUMENTATION
704 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
710 // There is some weirdness in LOOP instructions definition. If an exception
711 // was generated during the instruction execution (for example #GP fault
712 // because EIP was beyond CS segment limits) CPU state should restore the
713 // state prior to instruction execution.
715 // The final point that we are not allowed to decrement ECX register before
716 // it is known that no exceptions can happen.
719 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPNE32_Jb(bxInstruction_c
*i
)
721 // it is impossible to get this instruction in long mode
722 BX_ASSERT(i
->as64L() == 0);
728 if (count
!= 0 && (get_ZF()==0)) {
729 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
730 branch_near32(new_EIP
);
731 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
733 #if BX_INSTRUMENTATION
735 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
745 if (count
!= 0 && (get_ZF()==0)) {
746 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
747 branch_near32(new_EIP
);
748 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
750 #if BX_INSTRUMENTATION
752 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
760 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPE32_Jb(bxInstruction_c
*i
)
762 // it is impossible to get this instruction in long mode
763 BX_ASSERT(i
->as64L() == 0);
769 if (count
!= 0 && get_ZF()) {
770 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
771 branch_near32(new_EIP
);
772 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
774 #if BX_INSTRUMENTATION
776 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
786 if (count
!= 0 && get_ZF()) {
787 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
788 branch_near32(new_EIP
);
789 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
791 #if BX_INSTRUMENTATION
793 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
801 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOP32_Jb(bxInstruction_c
*i
)
803 // it is impossible to get this instruction in long mode
804 BX_ASSERT(i
->as64L() == 0);
811 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
812 branch_near32(new_EIP
);
813 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
815 #if BX_INSTRUMENTATION
817 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);
828 Bit32u new_EIP
= EIP
+ (Bit32s
) i
->Id();
829 branch_near32(new_EIP
);
830 BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID
, new_EIP
);
832 #if BX_INSTRUMENTATION
834 BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID
);