1 ////////////////////////////////////////////////////////////////////////
2 // $Id: ctrl_xfer_pro.cc,v 1.56 2007/03/14 21:15:15 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (C) 2001 MandrakeSoft S.A.
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #define NEED_CPU_REG_SHORTCUTS 1
31 #define LOG_THIS BX_CPU_THIS_PTR
33 #if BX_SUPPORT_X86_64==0
34 // Make life easier merging cpu64 & cpu code.
39 /* pass zero in check_rpl if no needed selector RPL checking for
40 non-conforming segments */
41 void BX_CPU_C::check_cs(bx_descriptor_t
*descriptor
, Bit16u cs_raw
, Bit8u check_rpl
, Bit8u check_cpl
)
43 // descriptor AR byte must indicate code segment else #GP(selector)
44 if (descriptor
->valid
==0 || descriptor
->segment
==0 ||
45 IS_DATA_SEGMENT(descriptor
->type
))
47 BX_ERROR(("check_cs: not a valid code segment !"));
48 exception(BX_GP_EXCEPTION
, cs_raw
& 0xfffc, 0);
52 if (descriptor
->u
.segment
.l
)
54 if (! BX_CPU_THIS_PTR msr
.lma
) {
55 BX_PANIC(("check_cs: attempt to jump to long mode without enabling EFER.LMA !"));
58 if (descriptor
->u
.segment
.d_b
) {
59 BX_ERROR(("check_cs: Both L and D bits enabled for segment descriptor !"));
60 exception(BX_GP_EXCEPTION
, cs_raw
& 0xfffc, 0);
65 // if non-conforming, code segment descriptor DPL must = CPL else #GP(selector)
66 if (IS_CODE_SEGMENT_NON_CONFORMING(descriptor
->type
)) {
67 if (descriptor
->dpl
!= check_cpl
) {
68 BX_ERROR(("check_cs: non-conforming code seg descriptor dpl != cpl"));
69 exception(BX_GP_EXCEPTION
, cs_raw
& 0xfffc, 0);
72 /* RPL of destination selector must be <= CPL else #GP(selector) */
73 if (check_rpl
> check_cpl
) {
74 BX_ERROR(("check_cs: non-conforming code seg selector rpl > cpl"));
75 exception(BX_GP_EXCEPTION
, cs_raw
& 0xfffc, 0);
78 // if conforming, then code segment descriptor DPL must <= CPL else #GP(selector)
80 if (descriptor
->dpl
> check_cpl
) {
81 BX_ERROR(("check_cs: conforming code seg descriptor dpl > cpl"));
82 exception(BX_GP_EXCEPTION
, cs_raw
& 0xfffc, 0);
86 // code segment must be present else #NP(selector)
87 if (! descriptor
->p
) {
88 BX_ERROR(("check_cs: code segment not present !"));
89 exception(BX_NP_EXCEPTION
, cs_raw
& 0xfffc, 0);
93 void BX_CPP_AttrRegparmN(3)
94 BX_CPU_C::load_cs(bx_selector_t
*selector
, bx_descriptor_t
*descriptor
, Bit8u cpl
)
96 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
= *selector
;
97 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
= *descriptor
;
99 /* caller may request different CPL then in selector */
100 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.rpl
= cpl
;
101 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.valid
= 1;
102 // Added cpl to the selector value.
103 BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
=
104 (0xfffc & BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].selector
.value
) | cpl
;
106 #if BX_SUPPORT_X86_64
107 if (BX_CPU_THIS_PTR msr
.lma
) {
108 if (descriptor
->u
.segment
.l
) {
109 BX_CPU_THIS_PTR cpu_mode
= BX_MODE_LONG_64
;
110 BX_DEBUG(("Long Mode Activated"));
111 loadSRegLMNominal(BX_SEG_REG_CS
, selector
->value
, cpl
);
114 BX_DEBUG(("Compatibility Mode Activated"));
115 BX_CPU_THIS_PTR cpu_mode
= BX_MODE_LONG_COMPAT
;
120 #if BX_SUPPORT_ICACHE
121 BX_CPU_THIS_PTR
updateFetchModeMask();
124 // Loading CS will invalidate the EIP fetch window.
125 invalidate_prefetch_q();
128 void BX_CPP_AttrRegparmN(1)
129 BX_CPU_C::branch_near32(Bit32u new_EIP
)
131 // check always, not only in protected mode
132 if (new_EIP
> BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
].cache
.u
.segment
.limit_scaled
)
134 BX_ERROR(("branch_near: offset outside of CS limits"));
135 exception(BX_GP_EXCEPTION
, 0, 0);
140 void BX_CPU_C::branch_far32(bx_selector_t
*selector
,
141 bx_descriptor_t
*descriptor
, Bit32u eip
, Bit8u cpl
)
143 /* instruction pointer must be in code segment limit else #GP(0) */
144 if (eip
> descriptor
->u
.segment
.limit_scaled
) {
145 BX_ERROR(("branch_far: EIP > limit"));
146 exception(BX_GP_EXCEPTION
, 0, 0);
149 /* Load CS:IP from destination pointer */
150 /* Load CS-cache with new segment descriptor */
151 load_cs(selector
, descriptor
, cpl
);
153 /* Change the EIP value */
157 #if BX_SUPPORT_X86_64
158 void BX_CPP_AttrRegparmN(1)
159 BX_CPU_C::branch_near64(bxInstruction_c
*i
)
161 Bit64u new_RIP
= RIP
+ (Bit32s
) i
->Id();
164 new_RIP
&= 0xffff; // For 16-bit opSize, upper 48 bits of RIP are cleared.
167 if (! IsCanonical(new_RIP
)) {
168 BX_ERROR(("branch_near64: canonical RIP violation"));
169 exception(BX_GP_EXCEPTION
, 0, 0);
177 void BX_CPU_C::branch_far64(bx_selector_t
*selector
,
178 bx_descriptor_t
*descriptor
, bx_address rip
, Bit8u cpl
)
180 #if BX_SUPPORT_X86_64
181 if (descriptor
->u
.segment
.l
)
183 if (! IsCanonical(rip
)) {
184 BX_ERROR(("branch_far: canonical RIP violation"));
185 exception(BX_GP_EXCEPTION
, 0, 0);
191 /* instruction pointer must be in code segment limit else #GP(0) */
192 if (rip
> descriptor
->u
.segment
.limit_scaled
) {
193 BX_ERROR(("branch_far: RIP > limit"));
194 exception(BX_GP_EXCEPTION
, 0, 0);
198 /* Load CS:IP from destination pointer */
199 /* Load CS-cache with new segment descriptor */
200 load_cs(selector
, descriptor
, cpl
);
202 /* Change the RIP value */