- update sector count before calling write completion function (SF patch #2144692)
[bochs-mirror.git] / cpu / ctrl_xfer_pro.cc
blob1683e212ca59bc66d5766bd8b1803396e9e0f732
1 ////////////////////////////////////////////////////////////////////////
2 // $Id: ctrl_xfer_pro.cc,v 1.76 2008/08/03 19:53:08 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (C) 2001 MandrakeSoft S.A.
6 //
7 // MandrakeSoft S.A.
8 // 43, rue d'Aboukir
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 /////////////////////////////////////////////////////////////////////////
28 #define NEED_CPU_REG_SHORTCUTS 1
29 #include "bochs.h"
30 #include "cpu.h"
31 #define LOG_THIS BX_CPU_THIS_PTR
33 #if BX_SUPPORT_X86_64==0
34 // Make life easier merging cpu64 & cpu code.
35 #define RIP EIP
36 #endif
38 /* pass zero in check_rpl if no needed selector RPL checking for
39 non-conforming segments */
40 void BX_CPU_C::check_cs(bx_descriptor_t *descriptor, Bit16u cs_raw, Bit8u check_rpl, Bit8u check_cpl)
42 // descriptor AR byte must indicate code segment else #GP(selector)
43 if (descriptor->valid==0 || descriptor->segment==0 ||
44 IS_DATA_SEGMENT(descriptor->type))
46 BX_ERROR(("check_cs(0x%04x): not a valid code segment !", cs_raw));
47 exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
50 #if BX_SUPPORT_X86_64
51 if (descriptor->u.segment.l) {
52 if (! BX_CPU_THIS_PTR efer.get_LMA()) {
53 BX_ERROR(("check_cs(0x%04x): attempt to jump to long mode without enabling EFER.LMA !", cs_raw));
55 else if (descriptor->u.segment.d_b) {
56 BX_ERROR(("check_cs(0x%04x): Both L and D bits enabled for segment descriptor !", cs_raw));
57 exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
60 #endif
62 // if non-conforming, code segment descriptor DPL must = CPL else #GP(selector)
63 if (IS_CODE_SEGMENT_NON_CONFORMING(descriptor->type)) {
64 if (descriptor->dpl != check_cpl) {
65 BX_ERROR(("check_cs(0x%04x): non-conforming code seg descriptor dpl != cpl, dpl=%d, cpl=%d",
66 cs_raw, descriptor->dpl, check_cpl));
67 exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
70 /* RPL of destination selector must be <= CPL else #GP(selector) */
71 if (check_rpl > check_cpl) {
72 BX_ERROR(("check_cs(0x%04x): non-conforming code seg selector rpl > cpl, rpl=%d, cpl=%d",
73 cs_raw, check_rpl, check_cpl));
74 exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
77 // if conforming, then code segment descriptor DPL must <= CPL else #GP(selector)
78 else {
79 if (descriptor->dpl > check_cpl) {
80 BX_ERROR(("check_cs(0x%04x): conforming code seg descriptor dpl > cpl, dpl=%d, cpl=%d",
81 cs_raw, descriptor->dpl, check_cpl));
82 exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
86 // code segment must be present else #NP(selector)
87 if (! descriptor->p) {
88 BX_ERROR(("check_cs(0x%04x): code segment not present !", cs_raw));
89 exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
93 void BX_CPP_AttrRegparmN(3)
94 BX_CPU_C::load_cs(bx_selector_t *selector, bx_descriptor_t *descriptor, Bit8u cpl)
96 // Add cpl to the selector value.
97 selector->value = (0xfffc & selector->value) | cpl;
99 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector = *selector;
100 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache = *descriptor;
101 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.rpl = cpl;
102 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = 1;
104 #if BX_SUPPORT_X86_64
105 if (long_mode()) {
106 if (descriptor->u.segment.l) {
107 loadSRegLMNominal(BX_SEG_REG_CS, selector->value, cpl);
109 handleCpuModeChange();
111 #endif
113 updateFetchModeMask();
115 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
116 handleAlignmentCheck(); // CPL was modified
117 #endif
119 // Loading CS will invalidate the EIP fetch window.
120 invalidate_prefetch_q();
123 void BX_CPU_C::branch_far32(bx_selector_t *selector,
124 bx_descriptor_t *descriptor, Bit32u eip, Bit8u cpl)
126 /* instruction pointer must be in code segment limit else #GP(0) */
127 if (eip > descriptor->u.segment.limit_scaled) {
128 BX_ERROR(("branch_far32: EIP > limit"));
129 exception(BX_GP_EXCEPTION, 0, 0);
132 /* Load CS:IP from destination pointer */
133 /* Load CS-cache with new segment descriptor */
134 load_cs(selector, descriptor, cpl);
136 /* Change the EIP value */
137 EIP = eip;
140 void BX_CPU_C::branch_far64(bx_selector_t *selector,
141 bx_descriptor_t *descriptor, bx_address rip, Bit8u cpl)
143 #if BX_SUPPORT_X86_64
144 if (long_mode() && descriptor->u.segment.l) {
145 if (! IsCanonical(rip)) {
146 BX_ERROR(("branch_far64: canonical RIP violation"));
147 exception(BX_GP_EXCEPTION, 0, 0);
150 else
151 #endif
153 /* instruction pointer must be in code segment limit else #GP(0) */
154 if (rip > descriptor->u.segment.limit_scaled) {
155 BX_ERROR(("branch_far64: RIP > limit"));
156 exception(BX_GP_EXCEPTION, 0, 0);
160 /* Load CS:IP from destination pointer */
161 /* Load CS-cache with new segment descriptor */
162 load_cs(selector, descriptor, cpl);
164 /* Change the RIP value */
165 RIP = rip;