1 /////////////////////////////////////////////////////////////////////////
2 // $Id: access.cc,v 1.121 2008/12/11 21:19:38 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (C) 2001 MandrakeSoft S.A.
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 /////////////////////////////////////////////////////////////////////////
29 #define NEED_CPU_REG_SHORTCUTS 1
32 #define LOG_THIS BX_CPU_THIS_PTR
34 bx_bool
BX_CPP_AttrRegparmN(3)
35 BX_CPU_C::write_virtual_checks(bx_segment_reg_t
*seg
, Bit32u offset
, unsigned length
)
40 if (BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
) {
41 // Mark cache as being OK type for succeeding reads/writes
42 seg
->cache
.valid
|= SegAccessROK
| SegAccessWOK
;
47 if (seg
->cache
.valid
==0) {
48 BX_DEBUG(("write_virtual_checks(): segment descriptor not valid"));
52 if (seg
->cache
.p
== 0) { /* not present */
53 BX_ERROR(("write_virtual_checks(): segment not present"));
57 switch (seg
->cache
.type
) {
58 case 0: case 1: // read only
59 case 4: case 5: // read only, expand down
60 case 8: case 9: // execute only
61 case 10: case 11: // execute/read
62 case 12: case 13: // execute only, conforming
63 case 14: case 15: // execute/read-only, conforming
64 BX_ERROR(("write_virtual_checks(): no write access to seg"));
67 case 2: case 3: /* read/write */
68 if (offset
> (seg
->cache
.u
.segment
.limit_scaled
- length
+ 1)
69 || (length
-1 > seg
->cache
.u
.segment
.limit_scaled
))
71 BX_ERROR(("write_virtual_checks(): write beyond limit, r/w"));
74 if (seg
->cache
.u
.segment
.limit_scaled
>= 15) {
75 // Mark cache as being OK type for succeeding read/writes. The limit
76 // checks still needs to be done though, but is more simple. We
77 // could probably also optimize that out with a flag for the case
78 // when limit is the maximum 32bit value. Limit should accomodate
79 // at least a dword, since we subtract from it in the simple
80 // limit check in other functions, and we don't want the value to roll.
81 // Only normal segments (not expand down) are handled this way.
82 seg
->cache
.valid
|= SegAccessROK
| SegAccessWOK
;
86 case 6: case 7: /* read/write, expand down */
87 if (seg
->cache
.u
.segment
.d_b
)
88 upper_limit
= 0xffffffff;
90 upper_limit
= 0x0000ffff;
91 if ((offset
<= seg
->cache
.u
.segment
.limit_scaled
) ||
92 (offset
> upper_limit
) || ((upper_limit
- offset
) < (length
- 1)))
94 BX_ERROR(("write_virtual_checks(): write beyond limit, r/w ED"));
100 BX_PANIC(("write_virtual_checks(): unknown descriptor type=%d", seg
->cache
.type
));
106 bx_bool
BX_CPP_AttrRegparmN(3)
107 BX_CPU_C::read_virtual_checks(bx_segment_reg_t
*seg
, Bit32u offset
, unsigned length
)
111 #if BX_SUPPORT_X86_64
112 if (BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
) {
113 // Mark cache as being OK type for succeeding reads/writes
114 seg
->cache
.valid
|= SegAccessROK
| SegAccessWOK
;
119 if (seg
->cache
.valid
==0) {
120 BX_DEBUG(("read_virtual_checks(): segment descriptor not valid"));
124 if (seg
->cache
.p
== 0) { /* not present */
125 BX_ERROR(("read_virtual_checks(): segment not present"));
129 switch (seg
->cache
.type
) {
130 case 0: case 1: /* read only */
131 case 2: case 3: /* read/write */
132 case 10: case 11: /* execute/read */
133 case 14: case 15: /* execute/read-only, conforming */
134 if (offset
> (seg
->cache
.u
.segment
.limit_scaled
- length
+ 1)
135 || (length
-1 > seg
->cache
.u
.segment
.limit_scaled
))
137 BX_ERROR(("read_virtual_checks(): read beyond limit"));
140 if (seg
->cache
.u
.segment
.limit_scaled
>= 15) {
141 // Mark cache as being OK type for succeeding reads. See notes for
142 // write checks; similar code.
143 seg
->cache
.valid
|= SegAccessROK
;
147 case 4: case 5: /* read only, expand down */
148 case 6: case 7: /* read/write, expand down */
149 if (seg
->cache
.u
.segment
.d_b
)
150 upper_limit
= 0xffffffff;
152 upper_limit
= 0x0000ffff;
153 if ((offset
<= seg
->cache
.u
.segment
.limit_scaled
) ||
154 (offset
> upper_limit
) || ((upper_limit
- offset
) < (length
- 1)))
156 BX_ERROR(("read_virtual_checks(): read beyond limit ED"));
161 case 8: case 9: /* execute only */
162 case 12: case 13: /* execute only, conforming */
163 /* can't read or write an execute-only segment */
164 BX_ERROR(("read_virtual_checks(): execute only"));
168 BX_PANIC(("read_virtual_checks(): unknown descriptor type=%d", seg
->cache
.type
));
174 bx_bool
BX_CPP_AttrRegparmN(3)
175 BX_CPU_C::execute_virtual_checks(bx_segment_reg_t
*seg
, Bit32u offset
, unsigned length
)
179 #if BX_SUPPORT_X86_64
180 if (BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
) {
181 // Mark cache as being OK type for succeeding reads/writes
182 seg
->cache
.valid
|= SegAccessROK
| SegAccessWOK
;
187 if (seg
->cache
.valid
==0) {
188 BX_DEBUG(("execute_virtual_checks(): segment descriptor not valid"));
192 if (seg
->cache
.p
== 0) { /* not present */
193 BX_ERROR(("execute_virtual_checks(): segment not present"));
197 switch (seg
->cache
.type
) {
198 case 0: case 1: /* read only */
199 case 2: case 3: /* read/write */
200 case 10: case 11: /* execute/read */
201 case 14: case 15: /* execute/read-only, conforming */
202 if (offset
> (seg
->cache
.u
.segment
.limit_scaled
- length
+ 1)
203 || (length
-1 > seg
->cache
.u
.segment
.limit_scaled
))
205 BX_ERROR(("execute_virtual_checks(): read beyond limit"));
208 if (seg
->cache
.u
.segment
.limit_scaled
>= 15) {
209 // Mark cache as being OK type for succeeding reads. See notes for
210 // write checks; similar code.
211 seg
->cache
.valid
|= SegAccessROK
;
215 case 8: case 9: /* execute only */
216 case 12: case 13: /* execute only, conforming */
217 if (offset
> (seg
->cache
.u
.segment
.limit_scaled
- length
+ 1)
218 || (length
-1 > seg
->cache
.u
.segment
.limit_scaled
))
220 BX_ERROR(("execute_virtual_checks(): read beyond limit execute only"));
225 case 4: case 5: /* read only, expand down */
226 case 6: case 7: /* read/write, expand down */
227 if (seg
->cache
.u
.segment
.d_b
)
228 upper_limit
= 0xffffffff;
230 upper_limit
= 0x0000ffff;
231 if ((offset
<= seg
->cache
.u
.segment
.limit_scaled
) ||
232 (offset
> upper_limit
) || ((upper_limit
- offset
) < (length
- 1)))
234 BX_ERROR(("execute_virtual_checks(): read beyond limit ED"));
240 BX_PANIC(("execute_virtual_checks(): unknown descriptor type=%d", seg
->cache
.type
));
246 const char *BX_CPU_C::strseg(bx_segment_reg_t
*seg
)
248 if (seg
== &BX_CPU_THIS_PTR sregs
[BX_SEG_REG_ES
]) return("ES");
249 else if (seg
== &BX_CPU_THIS_PTR sregs
[BX_SEG_REG_CS
]) return("CS");
250 else if (seg
== &BX_CPU_THIS_PTR sregs
[BX_SEG_REG_SS
]) return("SS");
251 else if (seg
== &BX_CPU_THIS_PTR sregs
[BX_SEG_REG_DS
]) return("DS");
252 else if (seg
== &BX_CPU_THIS_PTR sregs
[BX_SEG_REG_FS
]) return("FS");
253 else if (seg
== &BX_CPU_THIS_PTR sregs
[BX_SEG_REG_GS
]) return("GS");
255 BX_PANIC(("undefined segment passed to strseg()!"));
260 int BX_CPU_C::int_number(unsigned s
)
262 if (s
== BX_SEG_REG_SS
)
263 return BX_SS_EXCEPTION
;
265 return BX_GP_EXCEPTION
;
268 Bit8u
BX_CPP_AttrRegparmN(1)
269 BX_CPU_C::system_read_byte(bx_address laddr
)
273 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 0);
274 bx_address lpf
= LPFOf(laddr
);
275 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
276 if (tlbEntry
->lpf
== lpf
) {
277 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
278 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
279 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 1, BX_READ
);
280 Bit8u
*hostAddr
= (Bit8u
*) (hostPageAddr
| pageOffset
);
282 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
283 tlbEntry
->ppf
| pageOffset
, 1, 0, BX_READ
, (Bit8u
*) &data
);
287 access_read_linear(laddr
, 1, 0, BX_READ
, (void *) &data
);
291 Bit16u
BX_CPP_AttrRegparmN(1)
292 BX_CPU_C::system_read_word(bx_address laddr
)
296 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 1);
297 bx_address lpf
= LPFOf(laddr
);
298 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
299 if (tlbEntry
->lpf
== lpf
) {
300 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
301 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
302 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 2, BX_READ
);
303 Bit16u
*hostAddr
= (Bit16u
*) (hostPageAddr
| pageOffset
);
304 ReadHostWordFromLittleEndian(hostAddr
, data
);
305 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
306 tlbEntry
->ppf
| pageOffset
, 2, 0, BX_READ
, (Bit8u
*) &data
);
310 access_read_linear(laddr
, 2, 0, BX_READ
, (void *) &data
);
314 Bit32u
BX_CPP_AttrRegparmN(1)
315 BX_CPU_C::system_read_dword(bx_address laddr
)
319 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 3);
320 bx_address lpf
= LPFOf(laddr
);
321 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
322 if (tlbEntry
->lpf
== lpf
) {
323 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
324 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
325 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 4, BX_READ
);
326 Bit32u
*hostAddr
= (Bit32u
*) (hostPageAddr
| pageOffset
);
327 ReadHostDWordFromLittleEndian(hostAddr
, data
);
328 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
329 tlbEntry
->ppf
| pageOffset
, 4, 0, BX_READ
, (Bit8u
*) &data
);
333 access_read_linear(laddr
, 4, 0, BX_READ
, (void *) &data
);
337 Bit64u
BX_CPP_AttrRegparmN(1)
338 BX_CPU_C::system_read_qword(bx_address laddr
)
342 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 7);
343 bx_address lpf
= LPFOf(laddr
);
344 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
345 if (tlbEntry
->lpf
== lpf
) {
346 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
347 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
348 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 8, BX_READ
);
349 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
350 ReadHostQWordFromLittleEndian(hostAddr
, data
);
351 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
352 tlbEntry
->ppf
| pageOffset
, 8, 0, BX_READ
, (Bit8u
*) &data
);
356 access_read_linear(laddr
, 8, 0, BX_READ
, (void *) &data
);
360 Bit8u
* BX_CPP_AttrRegparmN(2)
361 BX_CPU_C::v2h_read_byte(bx_address laddr
, bx_bool user
)
363 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 0);
364 bx_address lpf
= LPFOf(laddr
);
365 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
366 if (tlbEntry
->lpf
== lpf
) {
367 // See if the TLB entry privilege level allows us read access
369 if (! (tlbEntry
->accessBits
& user
)) { // Read this pl OK.
370 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
371 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
372 Bit8u
*hostAddr
= (Bit8u
*) (hostPageAddr
| pageOffset
);
380 Bit8u
* BX_CPP_AttrRegparmN(2)
381 BX_CPU_C::v2h_write_byte(bx_address laddr
, bx_bool user
)
383 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 0);
384 bx_address lpf
= LPFOf(laddr
);
385 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
386 if (tlbEntry
->lpf
== lpf
)
388 // See if the TLB entry privilege level allows us write access
390 if (! (tlbEntry
->accessBits
& (0x2 | user
))) {
391 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
392 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
393 Bit8u
*hostAddr
= (Bit8u
*) (hostPageAddr
| pageOffset
);
394 #if BX_SUPPORT_ICACHE
395 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);