1 /////////////////////////////////////////////////////////////////////////
2 // $Id: access32.cc,v 1.20 2008/12/11 21:19:38 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (c) 2008 Stanislav Shwartsman
6 // Written by Stanislav Shwartsman [sshwarts at sourceforge net]
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2 of the License, or (at your option) any later version.
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 /////////////////////////////////////////////////////////////////////////
24 #define NEED_CPU_REG_SHORTCUTS 1
27 #define LOG_THIS BX_CPU_THIS_PTR
29 void BX_CPP_AttrRegparmN(3)
30 BX_CPU_C::write_virtual_byte_32(unsigned s
, Bit32u offset
, Bit8u data
)
33 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
34 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 1, BX_WRITE
);
36 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
38 if (seg
->cache
.valid
& SegAccessWOK
) {
39 if (offset
<= seg
->cache
.u
.segment
.limit_scaled
) {
41 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
42 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 0);
43 Bit32u lpf
= LPFOf(laddr
);
44 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
45 if (tlbEntry
->lpf
== lpf
) {
46 // See if the TLB entry privilege level allows us write access
48 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
49 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
50 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
51 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 1, BX_WRITE
);
52 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
53 tlbEntry
->ppf
| pageOffset
, 1, CPL
, BX_WRITE
, (Bit8u
*) &data
);
54 Bit8u
*hostAddr
= (Bit8u
*) (hostPageAddr
| pageOffset
);
56 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
62 access_write_linear(laddr
, 1, CPL
, (void *) &data
);
66 BX_ERROR(("write_virtual_byte_32(): segment limit violation"));
67 exception(int_number(s
), 0, 0);
71 if (!write_virtual_checks(seg
, offset
, 1))
72 exception(int_number(s
), 0, 0);
76 void BX_CPP_AttrRegparmN(3)
77 BX_CPU_C::write_virtual_word_32(unsigned s
, Bit32u offset
, Bit16u data
)
80 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
81 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 2, BX_WRITE
);
83 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
85 if (seg
->cache
.valid
& SegAccessWOK
) {
86 if (offset
< seg
->cache
.u
.segment
.limit_scaled
) {
88 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
89 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 1);
90 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
91 Bit32u lpf
= AlignedAccessLPFOf(laddr
, (1 & BX_CPU_THIS_PTR alignment_check_mask
));
93 Bit32u lpf
= LPFOf(laddr
);
95 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
96 if (tlbEntry
->lpf
== lpf
) {
97 // See if the TLB entry privilege level allows us write access
99 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
100 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
101 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
102 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 2, BX_WRITE
);
103 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
104 tlbEntry
->ppf
| pageOffset
, 2, CPL
, BX_WRITE
, (Bit8u
*) &data
);
105 Bit16u
*hostAddr
= (Bit16u
*) (hostPageAddr
| pageOffset
);
106 #if BX_SUPPORT_ICACHE
107 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
109 WriteHostWordToLittleEndian(hostAddr
, data
);
114 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
115 if (BX_CPU_THIS_PTR
alignment_check()) {
117 BX_ERROR(("write_virtual_word_32(): #AC misaligned access"));
118 exception(BX_AC_EXCEPTION
, 0, 0);
123 access_write_linear(laddr
, 2, CPL
, (void *) &data
);
127 BX_ERROR(("write_virtual_word_32(): segment limit violation"));
128 exception(int_number(s
), 0, 0);
132 if (!write_virtual_checks(seg
, offset
, 2))
133 exception(int_number(s
), 0, 0);
137 void BX_CPP_AttrRegparmN(3)
138 BX_CPU_C::write_virtual_dword_32(unsigned s
, Bit32u offset
, Bit32u data
)
141 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
142 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 4, BX_WRITE
);
144 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
146 if (seg
->cache
.valid
& SegAccessWOK
) {
147 if (offset
< (seg
->cache
.u
.segment
.limit_scaled
-2)) {
149 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
150 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 3);
151 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
152 Bit32u lpf
= AlignedAccessLPFOf(laddr
, (3 & BX_CPU_THIS_PTR alignment_check_mask
));
154 Bit32u lpf
= LPFOf(laddr
);
156 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
157 if (tlbEntry
->lpf
== lpf
) {
158 // See if the TLB entry privilege level allows us write access
160 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
161 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
162 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
163 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 4, BX_WRITE
);
164 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
165 tlbEntry
->ppf
| pageOffset
, 4, CPL
, BX_WRITE
, (Bit8u
*) &data
);
166 Bit32u
*hostAddr
= (Bit32u
*) (hostPageAddr
| pageOffset
);
167 #if BX_SUPPORT_ICACHE
168 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
170 WriteHostDWordToLittleEndian(hostAddr
, data
);
175 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
176 if (BX_CPU_THIS_PTR
alignment_check()) {
178 BX_ERROR(("write_virtual_dword_32(): #AC misaligned access"));
179 exception(BX_AC_EXCEPTION
, 0, 0);
184 access_write_linear(laddr
, 4, CPL
, (void *) &data
);
188 BX_ERROR(("write_virtual_dword_32(): segment limit violation"));
189 exception(int_number(s
), 0, 0);
193 if (!write_virtual_checks(seg
, offset
, 4))
194 exception(int_number(s
), 0, 0);
198 void BX_CPP_AttrRegparmN(3)
199 BX_CPU_C::write_virtual_qword_32(unsigned s
, Bit32u offset
, Bit64u data
)
202 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
203 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 8, BX_WRITE
);
205 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
207 if (seg
->cache
.valid
& SegAccessWOK
) {
208 if (offset
<= (seg
->cache
.u
.segment
.limit_scaled
-7)) {
210 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
211 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 7);
212 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
213 Bit32u lpf
= AlignedAccessLPFOf(laddr
, (7 & BX_CPU_THIS_PTR alignment_check_mask
));
215 Bit32u lpf
= LPFOf(laddr
);
217 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
218 if (tlbEntry
->lpf
== lpf
) {
219 // See if the TLB entry privilege level allows us write access
221 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
222 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
223 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
224 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 8, BX_WRITE
);
225 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
226 tlbEntry
->ppf
| pageOffset
, 8, CPL
, BX_WRITE
, (Bit8u
*) &data
);
227 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
228 #if BX_SUPPORT_ICACHE
229 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
231 WriteHostQWordToLittleEndian(hostAddr
, data
);
236 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
237 if (BX_CPU_THIS_PTR
alignment_check()) {
239 BX_ERROR(("write_virtual_qword_32(): #AC misaligned access"));
240 exception(BX_AC_EXCEPTION
, 0, 0);
245 access_write_linear(laddr
, 8, CPL
, (void *) &data
);
249 BX_ERROR(("write_virtual_qword_32(): segment limit violation"));
250 exception(int_number(s
), 0, 0);
254 if (!write_virtual_checks(seg
, offset
, 8))
255 exception(int_number(s
), 0, 0);
259 #if BX_CPU_LEVEL >= 6
261 void BX_CPP_AttrRegparmN(3)
262 BX_CPU_C::write_virtual_dqword_32(unsigned s
, Bit32u offset
, const BxPackedXmmRegister
*data
)
265 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
266 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 16, BX_WRITE
);
268 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
270 if (seg
->cache
.valid
& SegAccessWOK
) {
271 if (offset
<= (seg
->cache
.u
.segment
.limit_scaled
-15)) {
273 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
274 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 15);
275 Bit32u lpf
= LPFOf(laddr
);
276 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
277 if (tlbEntry
->lpf
== lpf
) {
278 // See if the TLB entry privilege level allows us write access
280 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
281 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
282 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
283 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 16, BX_WRITE
);
284 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
285 tlbEntry
->ppf
| pageOffset
, 16, CPL
, BX_WRITE
, (Bit8u
*) data
);
286 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
287 #if BX_SUPPORT_ICACHE
288 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
290 WriteHostQWordToLittleEndian(hostAddr
, data
->xmm64u(0));
291 WriteHostQWordToLittleEndian(hostAddr
+1, data
->xmm64u(1));
296 access_write_linear(laddr
, 16, CPL
, (void *) data
);
300 BX_ERROR(("write_virtual_dqword_32(): segment limit violation"));
301 exception(int_number(s
), 0, 0);
305 if (!write_virtual_checks(seg
, offset
, 16))
306 exception(int_number(s
), 0, 0);
310 void BX_CPP_AttrRegparmN(3)
311 BX_CPU_C::write_virtual_dqword_aligned_32(unsigned s
, Bit32u offset
, const BxPackedXmmRegister
*data
)
314 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
315 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 16, BX_WRITE
);
317 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
319 if (seg
->cache
.valid
& SegAccessWOK
) {
320 if (offset
<= (seg
->cache
.u
.segment
.limit_scaled
-15)) {
322 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
323 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 15);
324 Bit32u lpf
= AlignedAccessLPFOf(laddr
, 15);
325 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
326 if (tlbEntry
->lpf
== lpf
) {
327 // See if the TLB entry privilege level allows us write access
329 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
330 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
331 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
332 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 16, BX_WRITE
);
333 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
334 tlbEntry
->ppf
| pageOffset
, 16, CPL
, BX_WRITE
, (Bit8u
*) data
);
335 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
336 #if BX_SUPPORT_ICACHE
337 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
339 WriteHostQWordToLittleEndian(hostAddr
, data
->xmm64u(0));
340 WriteHostQWordToLittleEndian(hostAddr
+1, data
->xmm64u(1));
345 BX_ERROR(("write_virtual_dqword_aligned_32(): #GP misaligned access"));
346 exception(BX_GP_EXCEPTION
, 0, 0);
348 access_write_linear(laddr
, 16, CPL
, (void *) data
);
352 BX_ERROR(("write_virtual_dqword_aligned_32(): segment limit violation"));
353 exception(int_number(s
), 0, 0);
357 if (!write_virtual_checks(seg
, offset
, 16))
358 exception(int_number(s
), 0, 0);
364 Bit8u
BX_CPP_AttrRegparmN(2)
365 BX_CPU_C::read_virtual_byte_32(unsigned s
, Bit32u offset
)
368 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
370 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 1, BX_READ
);
372 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
374 if (seg
->cache
.valid
& SegAccessROK
) {
375 if (offset
<= seg
->cache
.u
.segment
.limit_scaled
) {
377 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
378 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 0);
379 Bit32u lpf
= LPFOf(laddr
);
380 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
381 if (tlbEntry
->lpf
== lpf
) {
382 // See if the TLB entry privilege level allows us read access
384 if (! (tlbEntry
->accessBits
& USER_PL
)) { // Read this pl OK.
385 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
386 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
387 Bit8u
*hostAddr
= (Bit8u
*) (hostPageAddr
| pageOffset
);
389 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 1, BX_READ
);
390 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
391 tlbEntry
->ppf
| pageOffset
, 1, CPL
, BX_READ
, (Bit8u
*) &data
);
395 access_read_linear(laddr
, 1, CPL
, BX_READ
, (void *) &data
);
399 BX_ERROR(("read_virtual_byte_32(): segment limit violation"));
400 exception(int_number(s
), 0, 0);
404 if (!read_virtual_checks(seg
, offset
, 1))
405 exception(int_number(s
), 0, 0);
409 Bit16u
BX_CPP_AttrRegparmN(2)
410 BX_CPU_C::read_virtual_word_32(unsigned s
, Bit32u offset
)
413 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
415 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 2, BX_READ
);
417 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
419 if (seg
->cache
.valid
& SegAccessROK
) {
420 if (offset
< seg
->cache
.u
.segment
.limit_scaled
) {
422 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
423 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 1);
424 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
425 Bit32u lpf
= AlignedAccessLPFOf(laddr
, (1 & BX_CPU_THIS_PTR alignment_check_mask
));
427 Bit32u lpf
= LPFOf(laddr
);
429 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
430 if (tlbEntry
->lpf
== lpf
) {
431 // See if the TLB entry privilege level allows us read access
433 if (! (tlbEntry
->accessBits
& USER_PL
)) { // Read this pl OK.
434 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
435 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
436 Bit16u
*hostAddr
= (Bit16u
*) (hostPageAddr
| pageOffset
);
437 ReadHostWordFromLittleEndian(hostAddr
, data
);
438 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 2, BX_READ
);
439 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
440 tlbEntry
->ppf
| pageOffset
, 2, CPL
, BX_READ
, (Bit8u
*) &data
);
445 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
446 if (BX_CPU_THIS_PTR
alignment_check()) {
448 BX_ERROR(("read_virtual_word_32(): #AC misaligned access"));
449 exception(BX_AC_EXCEPTION
, 0, 0);
454 access_read_linear(laddr
, 2, CPL
, BX_READ
, (void *) &data
);
458 BX_ERROR(("read_virtual_word_32(): segment limit violation"));
459 exception(int_number(s
), 0, 0);
463 if (!read_virtual_checks(seg
, offset
, 2))
464 exception(int_number(s
), 0, 0);
468 Bit32u
BX_CPP_AttrRegparmN(2)
469 BX_CPU_C::read_virtual_dword_32(unsigned s
, Bit32u offset
)
472 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
474 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 4, BX_READ
);
476 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
478 if (seg
->cache
.valid
& SegAccessROK
) {
479 if (offset
< (seg
->cache
.u
.segment
.limit_scaled
-2)) {
481 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
482 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 3);
483 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
484 Bit32u lpf
= AlignedAccessLPFOf(laddr
, (3 & BX_CPU_THIS_PTR alignment_check_mask
));
486 Bit32u lpf
= LPFOf(laddr
);
488 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
489 if (tlbEntry
->lpf
== lpf
) {
490 // See if the TLB entry privilege level allows us read access
492 if (! (tlbEntry
->accessBits
& USER_PL
)) { // Read this pl OK.
493 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
494 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
495 Bit32u
*hostAddr
= (Bit32u
*) (hostPageAddr
| pageOffset
);
496 ReadHostDWordFromLittleEndian(hostAddr
, data
);
497 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 4, BX_READ
);
498 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
499 tlbEntry
->ppf
| pageOffset
, 4, CPL
, BX_READ
, (Bit8u
*) &data
);
504 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
505 if (BX_CPU_THIS_PTR
alignment_check()) {
507 BX_ERROR(("read_virtual_dword_32(): #AC misaligned access"));
508 exception(BX_AC_EXCEPTION
, 0, 0);
513 access_read_linear(laddr
, 4, CPL
, BX_READ
, (void *) &data
);
517 BX_ERROR(("read_virtual_dword_32(): segment limit violation"));
518 exception(int_number(s
), 0, 0);
522 if (!read_virtual_checks(seg
, offset
, 4))
523 exception(int_number(s
), 0, 0);
527 Bit64u
BX_CPP_AttrRegparmN(2)
528 BX_CPU_C::read_virtual_qword_32(unsigned s
, Bit32u offset
)
531 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
533 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 8, BX_READ
);
535 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
537 if (seg
->cache
.valid
& SegAccessROK
) {
538 if (offset
<= (seg
->cache
.u
.segment
.limit_scaled
-7)) {
540 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
541 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 7);
542 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
543 Bit32u lpf
= AlignedAccessLPFOf(laddr
, (7 & BX_CPU_THIS_PTR alignment_check_mask
));
545 Bit32u lpf
= LPFOf(laddr
);
547 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
548 if (tlbEntry
->lpf
== lpf
) {
549 // See if the TLB entry privilege level allows us read access
551 if (! (tlbEntry
->accessBits
& USER_PL
)) { // Read this pl OK.
552 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
553 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
554 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
555 ReadHostQWordFromLittleEndian(hostAddr
, data
);
556 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 8, BX_READ
);
557 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
558 tlbEntry
->ppf
| pageOffset
, 8, CPL
, BX_READ
, (Bit8u
*) &data
);
563 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
564 if (BX_CPU_THIS_PTR
alignment_check()) {
566 BX_ERROR(("read_virtual_qword_32(): #AC misaligned access"));
567 exception(BX_AC_EXCEPTION
, 0, 0);
572 access_read_linear(laddr
, 8, CPL
, BX_READ
, (void *) &data
);
576 BX_ERROR(("read_virtual_qword_32(): segment limit violation"));
577 exception(int_number(s
), 0, 0);
581 if (!read_virtual_checks(seg
, offset
, 8))
582 exception(int_number(s
), 0, 0);
586 #if BX_CPU_LEVEL >= 6
588 void BX_CPP_AttrRegparmN(3)
589 BX_CPU_C::read_virtual_dqword_32(unsigned s
, Bit32u offset
, BxPackedXmmRegister
*data
)
592 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
593 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 16, BX_READ
);
595 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
597 if (seg
->cache
.valid
& SegAccessROK
) {
598 if (offset
<= (seg
->cache
.u
.segment
.limit_scaled
-15)) {
600 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
601 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 15);
602 Bit32u lpf
= LPFOf(laddr
);
603 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
604 if (tlbEntry
->lpf
== lpf
) {
605 // See if the TLB entry privilege level allows us read access
607 if (! (tlbEntry
->accessBits
& USER_PL
)) { // Read this pl OK.
608 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
609 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
610 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
611 ReadHostQWordFromLittleEndian(hostAddr
, data
->xmm64u(0));
612 ReadHostQWordFromLittleEndian(hostAddr
+1, data
->xmm64u(1));
613 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 16, BX_READ
);
614 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
615 tlbEntry
->ppf
| pageOffset
, 16, CPL
, BX_READ
, (Bit8u
*) data
);
619 access_read_linear(laddr
, 16, CPL
, BX_READ
, (void *) data
);
623 BX_ERROR(("read_virtual_dqword_32(): segment limit violation"));
624 exception(int_number(s
), 0, 0);
628 if (!read_virtual_checks(seg
, offset
, 16))
629 exception(int_number(s
), 0, 0);
633 void BX_CPP_AttrRegparmN(3)
634 BX_CPU_C::read_virtual_dqword_aligned_32(unsigned s
, Bit32u offset
, BxPackedXmmRegister
*data
)
637 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
638 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 16, BX_READ
);
640 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
642 if (seg
->cache
.valid
& SegAccessROK
) {
643 if (offset
<= (seg
->cache
.u
.segment
.limit_scaled
-15)) {
645 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
646 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 15);
647 Bit32u lpf
= AlignedAccessLPFOf(laddr
, 15);
648 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
649 if (tlbEntry
->lpf
== lpf
) {
650 // See if the TLB entry privilege level allows us read access
652 if (! (tlbEntry
->accessBits
& USER_PL
)) { // Read this pl OK.
653 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
654 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
655 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
656 ReadHostQWordFromLittleEndian(hostAddr
, data
->xmm64u(0));
657 ReadHostQWordFromLittleEndian(hostAddr
+1, data
->xmm64u(1));
658 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 16, BX_READ
);
659 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
660 tlbEntry
->ppf
| pageOffset
, 16, CPL
, BX_READ
, (Bit8u
*) data
);
665 BX_ERROR(("read_virtual_dqword_aligned_32(): #GP misaligned access"));
666 exception(BX_GP_EXCEPTION
, 0, 0);
668 access_read_linear(laddr
, 16, CPL
, BX_READ
, (void *) data
);
672 BX_ERROR(("read_virtual_dqword_aligned_32(): segment limit violation"));
673 exception(int_number(s
), 0, 0);
677 if (!read_virtual_checks(seg
, offset
, 16))
678 exception(int_number(s
), 0, 0);
684 //////////////////////////////////////////////////////////////
685 // special Read-Modify-Write operations //
686 // address translation info is kept across read/write calls //
687 //////////////////////////////////////////////////////////////
689 Bit8u
BX_CPP_AttrRegparmN(2)
690 BX_CPU_C::read_RMW_virtual_byte_32(unsigned s
, Bit32u offset
)
693 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
695 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 1, BX_RW
);
697 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
699 if (seg
->cache
.valid
& SegAccessWOK
) {
700 if (offset
<= seg
->cache
.u
.segment
.limit_scaled
) {
702 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
703 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 0);
704 Bit32u lpf
= LPFOf(laddr
);
705 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
706 if (tlbEntry
->lpf
== lpf
) {
707 // See if the TLB entry privilege level allows us write access
709 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
710 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
711 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
712 Bit8u
*hostAddr
= (Bit8u
*) (hostPageAddr
| pageOffset
);
713 #if BX_SUPPORT_ICACHE
714 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
717 BX_CPU_THIS_PTR address_xlation
.pages
= (bx_ptr_equiv_t
) hostAddr
;
718 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 1, BX_RW
);
719 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
720 tlbEntry
->ppf
| pageOffset
, 1, CPL
, BX_READ
, (Bit8u
*) &data
);
724 access_read_linear(laddr
, 1, CPL
, BX_RW
, (void *) &data
);
728 BX_ERROR(("read_RMW_virtual_byte_32(): segment limit violation"));
729 exception(int_number(s
), 0, 0);
733 if (!write_virtual_checks(seg
, offset
, 1))
734 exception(int_number(s
), 0, 0);
738 Bit16u
BX_CPP_AttrRegparmN(2)
739 BX_CPU_C::read_RMW_virtual_word_32(unsigned s
, Bit32u offset
)
742 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
744 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 2, BX_RW
);
746 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
748 if (seg
->cache
.valid
& SegAccessWOK
) {
749 if (offset
< seg
->cache
.u
.segment
.limit_scaled
) {
751 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
752 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 1);
753 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
754 Bit32u lpf
= AlignedAccessLPFOf(laddr
, (1 & BX_CPU_THIS_PTR alignment_check_mask
));
756 Bit32u lpf
= LPFOf(laddr
);
758 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
759 if (tlbEntry
->lpf
== lpf
) {
760 // See if the TLB entry privilege level allows us write access
762 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
763 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
764 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
765 Bit16u
*hostAddr
= (Bit16u
*) (hostPageAddr
| pageOffset
);
766 #if BX_SUPPORT_ICACHE
767 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
769 ReadHostWordFromLittleEndian(hostAddr
, data
);
770 BX_CPU_THIS_PTR address_xlation
.pages
= (bx_ptr_equiv_t
) hostAddr
;
771 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 2, BX_RW
);
772 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
773 tlbEntry
->ppf
| pageOffset
, 2, CPL
, BX_READ
, (Bit8u
*) &data
);
778 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
779 if (BX_CPU_THIS_PTR
alignment_check()) {
781 BX_ERROR(("read_RMW_virtual_word_32(): #AC misaligned access"));
782 exception(BX_AC_EXCEPTION
, 0, 0);
787 access_read_linear(laddr
, 2, CPL
, BX_RW
, (void *) &data
);
791 BX_ERROR(("read_RMW_virtual_word_32(): segment limit violation"));
792 exception(int_number(s
), 0, 0);
796 if (!write_virtual_checks(seg
, offset
, 2))
797 exception(int_number(s
), 0, 0);
801 Bit32u
BX_CPP_AttrRegparmN(2)
802 BX_CPU_C::read_RMW_virtual_dword_32(unsigned s
, Bit32u offset
)
805 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
807 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 4, BX_RW
);
809 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
811 if (seg
->cache
.valid
& SegAccessWOK
) {
812 if (offset
< (seg
->cache
.u
.segment
.limit_scaled
-2)) {
814 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
815 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 3);
816 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
817 Bit32u lpf
= AlignedAccessLPFOf(laddr
, (3 & BX_CPU_THIS_PTR alignment_check_mask
));
819 Bit32u lpf
= LPFOf(laddr
);
821 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
822 if (tlbEntry
->lpf
== lpf
) {
823 // See if the TLB entry privilege level allows us write access
825 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
826 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
827 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
828 Bit32u
*hostAddr
= (Bit32u
*) (hostPageAddr
| pageOffset
);
829 #if BX_SUPPORT_ICACHE
830 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
832 ReadHostDWordFromLittleEndian(hostAddr
, data
);
833 BX_CPU_THIS_PTR address_xlation
.pages
= (bx_ptr_equiv_t
) hostAddr
;
834 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 4, BX_RW
);
835 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
836 tlbEntry
->ppf
| pageOffset
, 4, CPL
, BX_READ
, (Bit8u
*) &data
);
841 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
842 if (BX_CPU_THIS_PTR
alignment_check()) {
844 BX_ERROR(("read_RMW_virtual_dword_32(): #AC misaligned access"));
845 exception(BX_AC_EXCEPTION
, 0, 0);
850 access_read_linear(laddr
, 4, CPL
, BX_RW
, (void *) &data
);
854 BX_ERROR(("read_RMW_virtual_dword_32(): segment limit violation"));
855 exception(int_number(s
), 0, 0);
859 if (!write_virtual_checks(seg
, offset
, 4))
860 exception(int_number(s
), 0, 0);
864 Bit64u
BX_CPP_AttrRegparmN(2)
865 BX_CPU_C::read_RMW_virtual_qword_32(unsigned s
, Bit32u offset
)
868 bx_segment_reg_t
*seg
= &BX_CPU_THIS_PTR sregs
[s
];
870 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 8, BX_RW
);
872 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
874 if (seg
->cache
.valid
& SegAccessWOK
) {
875 if (offset
<= (seg
->cache
.u
.segment
.limit_scaled
-7)) {
877 laddr
= BX_CPU_THIS_PTR
get_laddr32(s
, offset
);
878 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 7);
879 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
880 Bit32u lpf
= AlignedAccessLPFOf(laddr
, (7 & BX_CPU_THIS_PTR alignment_check_mask
));
882 Bit32u lpf
= LPFOf(laddr
);
884 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
885 if (tlbEntry
->lpf
== lpf
) {
886 // See if the TLB entry privilege level allows us write access
888 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
889 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
890 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
891 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
892 #if BX_SUPPORT_ICACHE
893 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
895 ReadHostQWordFromLittleEndian(hostAddr
, data
);
896 BX_CPU_THIS_PTR address_xlation
.pages
= (bx_ptr_equiv_t
) hostAddr
;
897 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 8, BX_RW
);
898 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
899 tlbEntry
->ppf
| pageOffset
, 8, CPL
, BX_READ
, (Bit8u
*) &data
);
904 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
905 if (BX_CPU_THIS_PTR
alignment_check()) {
907 BX_ERROR(("read_RMW_virtual_qword_32(): #AC misaligned access"));
908 exception(BX_AC_EXCEPTION
, 0, 0);
913 access_read_linear(laddr
, 8, CPL
, BX_RW
, (void *) &data
);
917 BX_ERROR(("read_RMW_virtual_qword_32(): segment limit violation"));
918 exception(int_number(s
), 0, 0);
922 if (!write_virtual_checks(seg
, offset
, 8))
923 exception(int_number(s
), 0, 0);
927 void BX_CPP_AttrRegparmN(1)
928 BX_CPU_C::write_RMW_virtual_byte(Bit8u val8
)
930 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
931 BX_CPU_THIS_PTR address_xlation
.paddress1
, 2, BX_WRITE
, (Bit8u
*) &val8
);
933 if (BX_CPU_THIS_PTR address_xlation
.pages
> 2) {
934 // Pages > 2 means it stores a host address for direct access.
935 Bit8u
*hostAddr
= (Bit8u
*) BX_CPU_THIS_PTR address_xlation
.pages
;
939 // address_xlation.pages must be 1
940 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
941 BX_CPU_THIS_PTR address_xlation
.paddress1
, 1, &val8
);
945 void BX_CPP_AttrRegparmN(1)
946 BX_CPU_C::write_RMW_virtual_word(Bit16u val16
)
948 if (BX_CPU_THIS_PTR address_xlation
.pages
> 2) {
949 // Pages > 2 means it stores a host address for direct access.
950 Bit16u
*hostAddr
= (Bit16u
*) BX_CPU_THIS_PTR address_xlation
.pages
;
951 WriteHostWordToLittleEndian(hostAddr
, val16
);
952 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
953 BX_CPU_THIS_PTR address_xlation
.paddress1
, 2, BX_WRITE
, (Bit8u
*) &val16
);
955 else if (BX_CPU_THIS_PTR address_xlation
.pages
== 1) {
956 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
957 BX_CPU_THIS_PTR address_xlation
.paddress1
, 2, &val16
);
958 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
959 BX_CPU_THIS_PTR address_xlation
.paddress1
, 2, BX_WRITE
, (Bit8u
*) &val16
);
962 #ifdef BX_LITTLE_ENDIAN
963 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
964 BX_CPU_THIS_PTR address_xlation
.paddress1
, 1, &val16
);
965 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
966 BX_CPU_THIS_PTR address_xlation
.paddress1
, 1, BX_WRITE
, (Bit8u
*) &val16
);
967 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
968 BX_CPU_THIS_PTR address_xlation
.paddress2
, 1, ((Bit8u
*) &val16
) + 1);
969 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
970 BX_CPU_THIS_PTR address_xlation
.paddress2
, 1, BX_WRITE
, ((Bit8u
*) &val16
)+1);
972 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
973 BX_CPU_THIS_PTR address_xlation
.paddress1
, 1, ((Bit8u
*) &val16
) + 1);
974 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
975 BX_CPU_THIS_PTR address_xlation
.paddress1
, 1, BX_WRITE
, ((Bit8u
*) &val16
)+1);
976 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
977 BX_CPU_THIS_PTR address_xlation
.paddress2
, 1, &val16
);
978 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
979 BX_CPU_THIS_PTR address_xlation
.paddress2
, 1, BX_WRITE
, (Bit8u
*) &val16
);
984 void BX_CPP_AttrRegparmN(1)
985 BX_CPU_C::write_RMW_virtual_dword(Bit32u val32
)
987 if (BX_CPU_THIS_PTR address_xlation
.pages
> 2) {
988 // Pages > 2 means it stores a host address for direct access.
989 Bit32u
*hostAddr
= (Bit32u
*) BX_CPU_THIS_PTR address_xlation
.pages
;
990 WriteHostDWordToLittleEndian(hostAddr
, val32
);
991 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
992 BX_CPU_THIS_PTR address_xlation
.paddress1
, 4, BX_WRITE
, (Bit8u
*) &val32
);
994 else if (BX_CPU_THIS_PTR address_xlation
.pages
== 1) {
995 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
996 BX_CPU_THIS_PTR address_xlation
.paddress1
, 4, &val32
);
997 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
998 BX_CPU_THIS_PTR address_xlation
.paddress1
, 4, BX_WRITE
, (Bit8u
*) &val32
);
1001 #ifdef BX_LITTLE_ENDIAN
1002 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
1003 BX_CPU_THIS_PTR address_xlation
.paddress1
,
1004 BX_CPU_THIS_PTR address_xlation
.len1
,
1006 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
1007 BX_CPU_THIS_PTR address_xlation
.paddress1
,
1008 BX_CPU_THIS_PTR address_xlation
.len1
, BX_WRITE
, (Bit8u
*) &val32
);
1009 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
1010 BX_CPU_THIS_PTR address_xlation
.paddress2
,
1011 BX_CPU_THIS_PTR address_xlation
.len2
,
1012 ((Bit8u
*) &val32
) + BX_CPU_THIS_PTR address_xlation
.len1
);
1013 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
1014 BX_CPU_THIS_PTR address_xlation
.paddress2
,
1015 BX_CPU_THIS_PTR address_xlation
.len2
, BX_WRITE
,
1016 ((Bit8u
*) &val32
) + BX_CPU_THIS_PTR address_xlation
.len1
);
1018 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
1019 BX_CPU_THIS_PTR address_xlation
.paddress1
,
1020 BX_CPU_THIS_PTR address_xlation
.len1
,
1021 ((Bit8u
*) &val32
) + (4 - BX_CPU_THIS_PTR address_xlation
.len1
));
1022 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
1023 BX_CPU_THIS_PTR address_xlation
.paddress1
,
1024 BX_CPU_THIS_PTR address_xlation
.len1
, BX_WRITE
,
1025 ((Bit8u
*) &val32
) + (4 - BX_CPU_THIS_PTR address_xlation
.len1
));
1026 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
1027 BX_CPU_THIS_PTR address_xlation
.paddress2
,
1028 BX_CPU_THIS_PTR address_xlation
.len2
,
1030 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
1031 BX_CPU_THIS_PTR address_xlation
.paddress2
,
1032 BX_CPU_THIS_PTR address_xlation
.len2
, BX_WRITE
, (Bit8u
*) &val32
);
1037 void BX_CPP_AttrRegparmN(1)
1038 BX_CPU_C::write_RMW_virtual_qword(Bit64u val64
)
1040 if (BX_CPU_THIS_PTR address_xlation
.pages
> 2) {
1041 // Pages > 2 means it stores a host address for direct access.
1042 Bit64u
*hostAddr
= (Bit64u
*) BX_CPU_THIS_PTR address_xlation
.pages
;
1043 WriteHostQWordToLittleEndian(hostAddr
, val64
);
1044 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
1045 BX_CPU_THIS_PTR address_xlation
.paddress1
, 8, BX_WRITE
, (Bit8u
*) &val64
);
1047 else if (BX_CPU_THIS_PTR address_xlation
.pages
== 1) {
1048 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
1049 BX_CPU_THIS_PTR address_xlation
.paddress1
, 8, &val64
);
1050 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
1051 BX_CPU_THIS_PTR address_xlation
.paddress1
, 8, BX_WRITE
, (Bit8u
*) &val64
);
1054 #ifdef BX_LITTLE_ENDIAN
1055 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
1056 BX_CPU_THIS_PTR address_xlation
.paddress1
,
1057 BX_CPU_THIS_PTR address_xlation
.len1
,
1059 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
1060 BX_CPU_THIS_PTR address_xlation
.paddress1
,
1061 BX_CPU_THIS_PTR address_xlation
.len1
, BX_WRITE
, (Bit8u
*) &val64
);
1062 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
1063 BX_CPU_THIS_PTR address_xlation
.paddress2
,
1064 BX_CPU_THIS_PTR address_xlation
.len2
,
1065 ((Bit8u
*) &val64
) + BX_CPU_THIS_PTR address_xlation
.len1
);
1066 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
1067 BX_CPU_THIS_PTR address_xlation
.paddress2
,
1068 BX_CPU_THIS_PTR address_xlation
.len2
, BX_WRITE
,
1069 ((Bit8u
*) &val64
) + BX_CPU_THIS_PTR address_xlation
.len1
);
1071 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
1072 BX_CPU_THIS_PTR address_xlation
.paddress1
,
1073 BX_CPU_THIS_PTR address_xlation
.len1
,
1074 ((Bit8u
*) &val64
) + (8 - BX_CPU_THIS_PTR address_xlation
.len1
));
1075 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
1076 BX_CPU_THIS_PTR address_xlation
.paddress1
,
1077 BX_CPU_THIS_PTR address_xlation
.len1
, BX_WRITE
,
1078 ((Bit8u
*) &val64
) + (8 - BX_CPU_THIS_PTR address_xlation
.len1
));
1079 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS
,
1080 BX_CPU_THIS_PTR address_xlation
.paddress2
,
1081 BX_CPU_THIS_PTR address_xlation
.len2
,
1083 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID
,
1084 BX_CPU_THIS_PTR address_xlation
.paddress2
,
1085 BX_CPU_THIS_PTR address_xlation
.len2
, BX_WRITE
, (Bit8u
*) &val64
);
1091 // Write data to new stack, these methods are required for emulation
1092 // correctness but not performance critical.
1095 // assuming the write happens in legacy mode
1096 void BX_CPU_C::write_new_stack_word_32(bx_segment_reg_t
*seg
, Bit32u offset
, unsigned curr_pl
, Bit16u data
)
1100 if (seg
->cache
.valid
& SegAccessWOK
) {
1101 if (offset
< seg
->cache
.u
.segment
.limit_scaled
) {
1103 laddr
= (Bit32u
)(seg
->cache
.u
.segment
.base
) + offset
;
1104 bx_bool user
= (curr_pl
== 3);
1105 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 1);
1106 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
1107 Bit32u lpf
= AlignedAccessLPFOf(laddr
, (1 & BX_CPU_THIS_PTR alignment_check_mask
));
1109 Bit32u lpf
= LPFOf(laddr
);
1111 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
1112 if (tlbEntry
->lpf
== lpf
) {
1113 // See if the TLB entry privilege level allows us write access
1115 if (! (tlbEntry
->accessBits
& (0x2 | user
))) {
1116 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
1117 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
1118 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 2, BX_WRITE
);
1119 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
1120 tlbEntry
->ppf
| pageOffset
, 2, curr_pl
, BX_WRITE
, (Bit8u
*) &data
);
1121 Bit16u
*hostAddr
= (Bit16u
*) (hostPageAddr
| pageOffset
);
1122 #if BX_SUPPORT_ICACHE
1123 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
1125 WriteHostWordToLittleEndian(hostAddr
, data
);
1130 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1131 if (BX_CPU_THIS_PTR
alignment_check() && user
) {
1133 BX_ERROR(("write_new_stack_word_32(): #AC misaligned access"));
1134 exception(BX_AC_EXCEPTION
, 0, 0);
1139 access_write_linear(laddr
, 2, curr_pl
, (void *) &data
);
1143 BX_ERROR(("write_new_stack_word_32(): segment limit violation"));
1144 exception(BX_SS_EXCEPTION
,
1145 seg
->selector
.rpl
!= CPL
? (seg
->selector
.value
& 0xfffc) : 0, 0);
1149 // add error code when segment violation occurs when pushing into new stack
1150 if (!write_virtual_checks(seg
, offset
, 2))
1151 exception(BX_SS_EXCEPTION
,
1152 seg
->selector
.rpl
!= CPL
? (seg
->selector
.value
& 0xfffc) : 0, 0);
1156 // assuming the write happens in legacy mode
1157 void BX_CPU_C::write_new_stack_dword_32(bx_segment_reg_t
*seg
, Bit32u offset
, unsigned curr_pl
, Bit32u data
)
1161 if (seg
->cache
.valid
& SegAccessWOK
) {
1162 if (offset
< (seg
->cache
.u
.segment
.limit_scaled
-2)) {
1164 laddr
= (Bit32u
)(seg
->cache
.u
.segment
.base
) + offset
;
1165 bx_bool user
= (curr_pl
== 3);
1166 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 3);
1167 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
1168 Bit32u lpf
= AlignedAccessLPFOf(laddr
, (3 & BX_CPU_THIS_PTR alignment_check_mask
));
1170 Bit32u lpf
= LPFOf(laddr
);
1172 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
1173 if (tlbEntry
->lpf
== lpf
) {
1174 // See if the TLB entry privilege level allows us write access
1176 if (! (tlbEntry
->accessBits
& (0x2 | user
))) {
1177 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
1178 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
1179 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 4, BX_WRITE
);
1180 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
1181 tlbEntry
->ppf
| pageOffset
, 4, curr_pl
, BX_WRITE
, (Bit8u
*) &data
);
1182 Bit32u
*hostAddr
= (Bit32u
*) (hostPageAddr
| pageOffset
);
1183 #if BX_SUPPORT_ICACHE
1184 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
1186 WriteHostDWordToLittleEndian(hostAddr
, data
);
1191 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1192 if (BX_CPU_THIS_PTR
alignment_check() && user
) {
1194 BX_ERROR(("write_new_stack_dword_32(): #AC misaligned access"));
1195 exception(BX_AC_EXCEPTION
, 0, 0);
1200 access_write_linear(laddr
, 4, curr_pl
, (void *) &data
);
1204 BX_ERROR(("write_new_stack_dword_32(): segment limit violation"));
1205 exception(BX_SS_EXCEPTION
,
1206 seg
->selector
.rpl
!= CPL
? (seg
->selector
.value
& 0xfffc) : 0, 0);
1210 // add error code when segment violation occurs when pushing into new stack
1211 if (!write_virtual_checks(seg
, offset
, 4))
1212 exception(BX_SS_EXCEPTION
,
1213 seg
->selector
.rpl
!= CPL
? (seg
->selector
.value
& 0xfffc) : 0, 0);
1217 // assuming the write happens in legacy mode
1218 void BX_CPU_C::write_new_stack_qword_32(bx_segment_reg_t
*seg
, Bit32u offset
, unsigned curr_pl
, Bit64u data
)
1222 if (seg
->cache
.valid
& SegAccessWOK
) {
1223 if (offset
<= (seg
->cache
.u
.segment
.limit_scaled
-7)) {
1225 laddr
= (Bit32u
)(seg
->cache
.u
.segment
.base
) + offset
;
1226 bx_bool user
= (curr_pl
== 3);
1227 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 7);
1228 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
1229 Bit32u lpf
= AlignedAccessLPFOf(laddr
, (7 & BX_CPU_THIS_PTR alignment_check_mask
));
1231 Bit32u lpf
= LPFOf(laddr
);
1233 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
1234 if (tlbEntry
->lpf
== lpf
) {
1235 // See if the TLB entry privilege level allows us write access
1237 if (! (tlbEntry
->accessBits
& (0x2 | user
))) {
1238 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
1239 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
1240 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 8, BX_WRITE
);
1241 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
1242 tlbEntry
->ppf
| pageOffset
, 8, curr_pl
, BX_WRITE
, (Bit8u
*) &data
);
1243 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
1244 #if BX_SUPPORT_ICACHE
1245 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
1247 WriteHostQWordToLittleEndian(hostAddr
, data
);
1252 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1253 if (BX_CPU_THIS_PTR
alignment_check() && user
) {
1255 BX_ERROR(("write_new_stack_qword_32(): #AC misaligned access"));
1256 exception(BX_AC_EXCEPTION
, 0, 0);
1261 access_write_linear(laddr
, 8, curr_pl
, (void *) &data
);
1265 BX_ERROR(("write_new_stack_qword_32(): segment limit violation"));
1266 exception(BX_SS_EXCEPTION
,
1267 seg
->selector
.rpl
!= CPL
? (seg
->selector
.value
& 0xfffc) : 0, 0);
1271 // add error code when segment violation occurs when pushing into new stack
1272 if (!write_virtual_checks(seg
, offset
, 8))
1273 exception(BX_SS_EXCEPTION
,
1274 seg
->selector
.rpl
!= CPL
? (seg
->selector
.value
& 0xfffc) : 0, 0);