- added instructions how to update the online documentation
[bochs-mirror.git] / cpu / access32.cc
blob03952bc5d1bcaff202440793a5fc07c831c7fff0
1 /////////////////////////////////////////////////////////////////////////
2 // $Id: access32.cc,v 1.20 2008/12/11 21:19:38 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (c) 2008 Stanislav Shwartsman
6 // Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7 //
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2 of the License, or (at your option) any later version.
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 /////////////////////////////////////////////////////////////////////////
24 #define NEED_CPU_REG_SHORTCUTS 1
25 #include "bochs.h"
26 #include "cpu.h"
27 #define LOG_THIS BX_CPU_THIS_PTR
29 void BX_CPP_AttrRegparmN(3)
30 BX_CPU_C::write_virtual_byte_32(unsigned s, Bit32u offset, Bit8u data)
32 Bit32u laddr;
33 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
34 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_WRITE);
36 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
38 if (seg->cache.valid & SegAccessWOK) {
39 if (offset <= seg->cache.u.segment.limit_scaled) {
40 accessOK:
41 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
42 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
43 Bit32u lpf = LPFOf(laddr);
44 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
45 if (tlbEntry->lpf == lpf) {
46 // See if the TLB entry privilege level allows us write access
47 // from this CPL.
48 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
49 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
50 Bit32u pageOffset = PAGE_OFFSET(laddr);
51 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_WRITE);
52 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
53 tlbEntry->ppf | pageOffset, 1, CPL, BX_WRITE, (Bit8u*) &data);
54 Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
55 #if BX_SUPPORT_ICACHE
56 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
57 #endif
58 *hostAddr = data;
59 return;
62 access_write_linear(laddr, 1, CPL, (void *) &data);
63 return;
65 else {
66 BX_ERROR(("write_virtual_byte_32(): segment limit violation"));
67 exception(int_number(s), 0, 0);
71 if (!write_virtual_checks(seg, offset, 1))
72 exception(int_number(s), 0, 0);
73 goto accessOK;
76 void BX_CPP_AttrRegparmN(3)
77 BX_CPU_C::write_virtual_word_32(unsigned s, Bit32u offset, Bit16u data)
79 Bit32u laddr;
80 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
81 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_WRITE);
83 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
85 if (seg->cache.valid & SegAccessWOK) {
86 if (offset < seg->cache.u.segment.limit_scaled) {
87 accessOK:
88 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
89 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
90 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
91 Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
92 #else
93 Bit32u lpf = LPFOf(laddr);
94 #endif
95 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
96 if (tlbEntry->lpf == lpf) {
97 // See if the TLB entry privilege level allows us write access
98 // from this CPL.
99 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
100 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
101 Bit32u pageOffset = PAGE_OFFSET(laddr);
102 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_WRITE);
103 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
104 tlbEntry->ppf | pageOffset, 2, CPL, BX_WRITE, (Bit8u*) &data);
105 Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
106 #if BX_SUPPORT_ICACHE
107 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
108 #endif
109 WriteHostWordToLittleEndian(hostAddr, data);
110 return;
114 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
115 if (BX_CPU_THIS_PTR alignment_check()) {
116 if (laddr & 1) {
117 BX_ERROR(("write_virtual_word_32(): #AC misaligned access"));
118 exception(BX_AC_EXCEPTION, 0, 0);
121 #endif
123 access_write_linear(laddr, 2, CPL, (void *) &data);
124 return;
126 else {
127 BX_ERROR(("write_virtual_word_32(): segment limit violation"));
128 exception(int_number(s), 0, 0);
132 if (!write_virtual_checks(seg, offset, 2))
133 exception(int_number(s), 0, 0);
134 goto accessOK;
137 void BX_CPP_AttrRegparmN(3)
138 BX_CPU_C::write_virtual_dword_32(unsigned s, Bit32u offset, Bit32u data)
140 Bit32u laddr;
141 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
142 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_WRITE);
144 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
146 if (seg->cache.valid & SegAccessWOK) {
147 if (offset < (seg->cache.u.segment.limit_scaled-2)) {
148 accessOK:
149 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
150 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
151 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
152 Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
153 #else
154 Bit32u lpf = LPFOf(laddr);
155 #endif
156 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
157 if (tlbEntry->lpf == lpf) {
158 // See if the TLB entry privilege level allows us write access
159 // from this CPL.
160 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
161 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
162 Bit32u pageOffset = PAGE_OFFSET(laddr);
163 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE);
164 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
165 tlbEntry->ppf | pageOffset, 4, CPL, BX_WRITE, (Bit8u*) &data);
166 Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
167 #if BX_SUPPORT_ICACHE
168 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
169 #endif
170 WriteHostDWordToLittleEndian(hostAddr, data);
171 return;
175 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
176 if (BX_CPU_THIS_PTR alignment_check()) {
177 if (laddr & 3) {
178 BX_ERROR(("write_virtual_dword_32(): #AC misaligned access"));
179 exception(BX_AC_EXCEPTION, 0, 0);
182 #endif
184 access_write_linear(laddr, 4, CPL, (void *) &data);
185 return;
187 else {
188 BX_ERROR(("write_virtual_dword_32(): segment limit violation"));
189 exception(int_number(s), 0, 0);
193 if (!write_virtual_checks(seg, offset, 4))
194 exception(int_number(s), 0, 0);
195 goto accessOK;
198 void BX_CPP_AttrRegparmN(3)
199 BX_CPU_C::write_virtual_qword_32(unsigned s, Bit32u offset, Bit64u data)
201 Bit32u laddr;
202 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
203 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_WRITE);
205 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
207 if (seg->cache.valid & SegAccessWOK) {
208 if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
209 accessOK:
210 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
211 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
212 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
213 Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
214 #else
215 Bit32u lpf = LPFOf(laddr);
216 #endif
217 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
218 if (tlbEntry->lpf == lpf) {
219 // See if the TLB entry privilege level allows us write access
220 // from this CPL.
221 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
222 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
223 Bit32u pageOffset = PAGE_OFFSET(laddr);
224 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE);
225 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
226 tlbEntry->ppf | pageOffset, 8, CPL, BX_WRITE, (Bit8u*) &data);
227 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
228 #if BX_SUPPORT_ICACHE
229 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
230 #endif
231 WriteHostQWordToLittleEndian(hostAddr, data);
232 return;
236 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
237 if (BX_CPU_THIS_PTR alignment_check()) {
238 if (laddr & 7) {
239 BX_ERROR(("write_virtual_qword_32(): #AC misaligned access"));
240 exception(BX_AC_EXCEPTION, 0, 0);
243 #endif
245 access_write_linear(laddr, 8, CPL, (void *) &data);
246 return;
248 else {
249 BX_ERROR(("write_virtual_qword_32(): segment limit violation"));
250 exception(int_number(s), 0, 0);
254 if (!write_virtual_checks(seg, offset, 8))
255 exception(int_number(s), 0, 0);
256 goto accessOK;
259 #if BX_CPU_LEVEL >= 6
261 void BX_CPP_AttrRegparmN(3)
262 BX_CPU_C::write_virtual_dqword_32(unsigned s, Bit32u offset, const BxPackedXmmRegister *data)
264 Bit32u laddr;
265 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
266 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE);
268 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
270 if (seg->cache.valid & SegAccessWOK) {
271 if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
272 accessOK:
273 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
274 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
275 Bit32u lpf = LPFOf(laddr);
276 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
277 if (tlbEntry->lpf == lpf) {
278 // See if the TLB entry privilege level allows us write access
279 // from this CPL.
280 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
281 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
282 Bit32u pageOffset = PAGE_OFFSET(laddr);
283 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_WRITE);
284 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
285 tlbEntry->ppf | pageOffset, 16, CPL, BX_WRITE, (Bit8u*) data);
286 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
287 #if BX_SUPPORT_ICACHE
288 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
289 #endif
290 WriteHostQWordToLittleEndian(hostAddr, data->xmm64u(0));
291 WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1));
292 return;
296 access_write_linear(laddr, 16, CPL, (void *) data);
297 return;
299 else {
300 BX_ERROR(("write_virtual_dqword_32(): segment limit violation"));
301 exception(int_number(s), 0, 0);
305 if (!write_virtual_checks(seg, offset, 16))
306 exception(int_number(s), 0, 0);
307 goto accessOK;
310 void BX_CPP_AttrRegparmN(3)
311 BX_CPU_C::write_virtual_dqword_aligned_32(unsigned s, Bit32u offset, const BxPackedXmmRegister *data)
313 Bit32u laddr;
314 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
315 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE);
317 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
319 if (seg->cache.valid & SegAccessWOK) {
320 if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
321 accessOK:
322 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
323 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
324 Bit32u lpf = AlignedAccessLPFOf(laddr, 15);
325 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
326 if (tlbEntry->lpf == lpf) {
327 // See if the TLB entry privilege level allows us write access
328 // from this CPL.
329 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
330 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
331 Bit32u pageOffset = PAGE_OFFSET(laddr);
332 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_WRITE);
333 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
334 tlbEntry->ppf | pageOffset, 16, CPL, BX_WRITE, (Bit8u*) data);
335 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
336 #if BX_SUPPORT_ICACHE
337 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
338 #endif
339 WriteHostQWordToLittleEndian(hostAddr, data->xmm64u(0));
340 WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1));
341 return;
344 if (laddr & 15) {
345 BX_ERROR(("write_virtual_dqword_aligned_32(): #GP misaligned access"));
346 exception(BX_GP_EXCEPTION, 0, 0);
348 access_write_linear(laddr, 16, CPL, (void *) data);
349 return;
351 else {
352 BX_ERROR(("write_virtual_dqword_aligned_32(): segment limit violation"));
353 exception(int_number(s), 0, 0);
357 if (!write_virtual_checks(seg, offset, 16))
358 exception(int_number(s), 0, 0);
359 goto accessOK;
362 #endif
364 Bit8u BX_CPP_AttrRegparmN(2)
365 BX_CPU_C::read_virtual_byte_32(unsigned s, Bit32u offset)
367 Bit32u laddr;
368 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
369 Bit8u data;
370 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ);
372 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
374 if (seg->cache.valid & SegAccessROK) {
375 if (offset <= seg->cache.u.segment.limit_scaled) {
376 accessOK:
377 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
378 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
379 Bit32u lpf = LPFOf(laddr);
380 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
381 if (tlbEntry->lpf == lpf) {
382 // See if the TLB entry privilege level allows us read access
383 // from this CPL.
384 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
385 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
386 Bit32u pageOffset = PAGE_OFFSET(laddr);
387 Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
388 data = *hostAddr;
389 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_READ);
390 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
391 tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
392 return data;
395 access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data);
396 return data;
398 else {
399 BX_ERROR(("read_virtual_byte_32(): segment limit violation"));
400 exception(int_number(s), 0, 0);
404 if (!read_virtual_checks(seg, offset, 1))
405 exception(int_number(s), 0, 0);
406 goto accessOK;
409 Bit16u BX_CPP_AttrRegparmN(2)
410 BX_CPU_C::read_virtual_word_32(unsigned s, Bit32u offset)
412 Bit32u laddr;
413 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
414 Bit16u data;
415 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_READ);
417 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
419 if (seg->cache.valid & SegAccessROK) {
420 if (offset < seg->cache.u.segment.limit_scaled) {
421 accessOK:
422 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
423 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
424 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
425 Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
426 #else
427 Bit32u lpf = LPFOf(laddr);
428 #endif
429 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
430 if (tlbEntry->lpf == lpf) {
431 // See if the TLB entry privilege level allows us read access
432 // from this CPL.
433 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
434 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
435 Bit32u pageOffset = PAGE_OFFSET(laddr);
436 Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
437 ReadHostWordFromLittleEndian(hostAddr, data);
438 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_READ);
439 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
440 tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &data);
441 return data;
445 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
446 if (BX_CPU_THIS_PTR alignment_check()) {
447 if (laddr & 1) {
448 BX_ERROR(("read_virtual_word_32(): #AC misaligned access"));
449 exception(BX_AC_EXCEPTION, 0, 0);
452 #endif
454 access_read_linear(laddr, 2, CPL, BX_READ, (void *) &data);
455 return data;
457 else {
458 BX_ERROR(("read_virtual_word_32(): segment limit violation"));
459 exception(int_number(s), 0, 0);
463 if (!read_virtual_checks(seg, offset, 2))
464 exception(int_number(s), 0, 0);
465 goto accessOK;
468 Bit32u BX_CPP_AttrRegparmN(2)
469 BX_CPU_C::read_virtual_dword_32(unsigned s, Bit32u offset)
471 Bit32u laddr;
472 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
473 Bit32u data;
474 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_READ);
476 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
478 if (seg->cache.valid & SegAccessROK) {
479 if (offset < (seg->cache.u.segment.limit_scaled-2)) {
480 accessOK:
481 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
482 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
483 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
484 Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
485 #else
486 Bit32u lpf = LPFOf(laddr);
487 #endif
488 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
489 if (tlbEntry->lpf == lpf) {
490 // See if the TLB entry privilege level allows us read access
491 // from this CPL.
492 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
493 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
494 Bit32u pageOffset = PAGE_OFFSET(laddr);
495 Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
496 ReadHostDWordFromLittleEndian(hostAddr, data);
497 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_READ);
498 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
499 tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data);
500 return data;
504 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
505 if (BX_CPU_THIS_PTR alignment_check()) {
506 if (laddr & 3) {
507 BX_ERROR(("read_virtual_dword_32(): #AC misaligned access"));
508 exception(BX_AC_EXCEPTION, 0, 0);
511 #endif
513 access_read_linear(laddr, 4, CPL, BX_READ, (void *) &data);
514 return data;
516 else {
517 BX_ERROR(("read_virtual_dword_32(): segment limit violation"));
518 exception(int_number(s), 0, 0);
522 if (!read_virtual_checks(seg, offset, 4))
523 exception(int_number(s), 0, 0);
524 goto accessOK;
527 Bit64u BX_CPP_AttrRegparmN(2)
528 BX_CPU_C::read_virtual_qword_32(unsigned s, Bit32u offset)
530 Bit32u laddr;
531 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
532 Bit64u data;
533 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_READ);
535 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
537 if (seg->cache.valid & SegAccessROK) {
538 if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
539 accessOK:
540 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
541 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
542 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
543 Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
544 #else
545 Bit32u lpf = LPFOf(laddr);
546 #endif
547 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
548 if (tlbEntry->lpf == lpf) {
549 // See if the TLB entry privilege level allows us read access
550 // from this CPL.
551 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
552 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
553 Bit32u pageOffset = PAGE_OFFSET(laddr);
554 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
555 ReadHostQWordFromLittleEndian(hostAddr, data);
556 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_READ);
557 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
558 tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data);
559 return data;
563 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
564 if (BX_CPU_THIS_PTR alignment_check()) {
565 if (laddr & 7) {
566 BX_ERROR(("read_virtual_qword_32(): #AC misaligned access"));
567 exception(BX_AC_EXCEPTION, 0, 0);
570 #endif
572 access_read_linear(laddr, 8, CPL, BX_READ, (void *) &data);
573 return data;
575 else {
576 BX_ERROR(("read_virtual_qword_32(): segment limit violation"));
577 exception(int_number(s), 0, 0);
581 if (!read_virtual_checks(seg, offset, 8))
582 exception(int_number(s), 0, 0);
583 goto accessOK;
586 #if BX_CPU_LEVEL >= 6
588 void BX_CPP_AttrRegparmN(3)
589 BX_CPU_C::read_virtual_dqword_32(unsigned s, Bit32u offset, BxPackedXmmRegister *data)
591 Bit32u laddr;
592 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
593 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ);
595 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
597 if (seg->cache.valid & SegAccessROK) {
598 if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
599 accessOK:
600 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
601 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
602 Bit32u lpf = LPFOf(laddr);
603 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
604 if (tlbEntry->lpf == lpf) {
605 // See if the TLB entry privilege level allows us read access
606 // from this CPL.
607 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
608 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
609 Bit32u pageOffset = PAGE_OFFSET(laddr);
610 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
611 ReadHostQWordFromLittleEndian(hostAddr, data->xmm64u(0));
612 ReadHostQWordFromLittleEndian(hostAddr+1, data->xmm64u(1));
613 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_READ);
614 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
615 tlbEntry->ppf | pageOffset, 16, CPL, BX_READ, (Bit8u*) data);
616 return;
619 access_read_linear(laddr, 16, CPL, BX_READ, (void *) data);
620 return;
622 else {
623 BX_ERROR(("read_virtual_dqword_32(): segment limit violation"));
624 exception(int_number(s), 0, 0);
628 if (!read_virtual_checks(seg, offset, 16))
629 exception(int_number(s), 0, 0);
630 goto accessOK;
633 void BX_CPP_AttrRegparmN(3)
634 BX_CPU_C::read_virtual_dqword_aligned_32(unsigned s, Bit32u offset, BxPackedXmmRegister *data)
636 Bit32u laddr;
637 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
638 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ);
640 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
642 if (seg->cache.valid & SegAccessROK) {
643 if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
644 accessOK:
645 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
646 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
647 Bit32u lpf = AlignedAccessLPFOf(laddr, 15);
648 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
649 if (tlbEntry->lpf == lpf) {
650 // See if the TLB entry privilege level allows us read access
651 // from this CPL.
652 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
653 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
654 Bit32u pageOffset = PAGE_OFFSET(laddr);
655 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
656 ReadHostQWordFromLittleEndian(hostAddr, data->xmm64u(0));
657 ReadHostQWordFromLittleEndian(hostAddr+1, data->xmm64u(1));
658 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_READ);
659 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
660 tlbEntry->ppf | pageOffset, 16, CPL, BX_READ, (Bit8u*) data);
661 return;
664 if (laddr & 15) {
665 BX_ERROR(("read_virtual_dqword_aligned_32(): #GP misaligned access"));
666 exception(BX_GP_EXCEPTION, 0, 0);
668 access_read_linear(laddr, 16, CPL, BX_READ, (void *) data);
669 return;
671 else {
672 BX_ERROR(("read_virtual_dqword_aligned_32(): segment limit violation"));
673 exception(int_number(s), 0, 0);
677 if (!read_virtual_checks(seg, offset, 16))
678 exception(int_number(s), 0, 0);
679 goto accessOK;
682 #endif
684 //////////////////////////////////////////////////////////////
685 // special Read-Modify-Write operations //
686 // address translation info is kept across read/write calls //
687 //////////////////////////////////////////////////////////////
689 Bit8u BX_CPP_AttrRegparmN(2)
690 BX_CPU_C::read_RMW_virtual_byte_32(unsigned s, Bit32u offset)
692 Bit32u laddr;
693 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
694 Bit8u data;
695 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW);
697 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
699 if (seg->cache.valid & SegAccessWOK) {
700 if (offset <= seg->cache.u.segment.limit_scaled) {
701 accessOK:
702 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
703 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
704 Bit32u lpf = LPFOf(laddr);
705 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
706 if (tlbEntry->lpf == lpf) {
707 // See if the TLB entry privilege level allows us write access
708 // from this CPL.
709 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
710 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
711 Bit32u pageOffset = PAGE_OFFSET(laddr);
712 Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
713 #if BX_SUPPORT_ICACHE
714 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
715 #endif
716 data = *hostAddr;
717 BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
718 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_RW);
719 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
720 tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
721 return data;
724 access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data);
725 return data;
727 else {
728 BX_ERROR(("read_RMW_virtual_byte_32(): segment limit violation"));
729 exception(int_number(s), 0, 0);
733 if (!write_virtual_checks(seg, offset, 1))
734 exception(int_number(s), 0, 0);
735 goto accessOK;
738 Bit16u BX_CPP_AttrRegparmN(2)
739 BX_CPU_C::read_RMW_virtual_word_32(unsigned s, Bit32u offset)
741 Bit32u laddr;
742 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
743 Bit16u data;
744 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_RW);
746 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
748 if (seg->cache.valid & SegAccessWOK) {
749 if (offset < seg->cache.u.segment.limit_scaled) {
750 accessOK:
751 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
752 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
753 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
754 Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
755 #else
756 Bit32u lpf = LPFOf(laddr);
757 #endif
758 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
759 if (tlbEntry->lpf == lpf) {
760 // See if the TLB entry privilege level allows us write access
761 // from this CPL.
762 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
763 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
764 Bit32u pageOffset = PAGE_OFFSET(laddr);
765 Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
766 #if BX_SUPPORT_ICACHE
767 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
768 #endif
769 ReadHostWordFromLittleEndian(hostAddr, data);
770 BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
771 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_RW);
772 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
773 tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &data);
774 return data;
778 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
779 if (BX_CPU_THIS_PTR alignment_check()) {
780 if (laddr & 1) {
781 BX_ERROR(("read_RMW_virtual_word_32(): #AC misaligned access"));
782 exception(BX_AC_EXCEPTION, 0, 0);
785 #endif
787 access_read_linear(laddr, 2, CPL, BX_RW, (void *) &data);
788 return data;
790 else {
791 BX_ERROR(("read_RMW_virtual_word_32(): segment limit violation"));
792 exception(int_number(s), 0, 0);
796 if (!write_virtual_checks(seg, offset, 2))
797 exception(int_number(s), 0, 0);
798 goto accessOK;
801 Bit32u BX_CPP_AttrRegparmN(2)
802 BX_CPU_C::read_RMW_virtual_dword_32(unsigned s, Bit32u offset)
804 Bit32u laddr;
805 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
806 Bit32u data;
807 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW);
809 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
811 if (seg->cache.valid & SegAccessWOK) {
812 if (offset < (seg->cache.u.segment.limit_scaled-2)) {
813 accessOK:
814 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
815 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
816 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
817 Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
818 #else
819 Bit32u lpf = LPFOf(laddr);
820 #endif
821 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
822 if (tlbEntry->lpf == lpf) {
823 // See if the TLB entry privilege level allows us write access
824 // from this CPL.
825 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
826 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
827 Bit32u pageOffset = PAGE_OFFSET(laddr);
828 Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
829 #if BX_SUPPORT_ICACHE
830 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
831 #endif
832 ReadHostDWordFromLittleEndian(hostAddr, data);
833 BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
834 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_RW);
835 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
836 tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data);
837 return data;
841 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
842 if (BX_CPU_THIS_PTR alignment_check()) {
843 if (laddr & 3) {
844 BX_ERROR(("read_RMW_virtual_dword_32(): #AC misaligned access"));
845 exception(BX_AC_EXCEPTION, 0, 0);
848 #endif
850 access_read_linear(laddr, 4, CPL, BX_RW, (void *) &data);
851 return data;
853 else {
854 BX_ERROR(("read_RMW_virtual_dword_32(): segment limit violation"));
855 exception(int_number(s), 0, 0);
859 if (!write_virtual_checks(seg, offset, 4))
860 exception(int_number(s), 0, 0);
861 goto accessOK;
864 Bit64u BX_CPP_AttrRegparmN(2)
865 BX_CPU_C::read_RMW_virtual_qword_32(unsigned s, Bit32u offset)
867 Bit32u laddr;
868 bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
869 Bit64u data;
870 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW);
872 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
874 if (seg->cache.valid & SegAccessWOK) {
875 if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
876 accessOK:
877 laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
878 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
879 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
880 Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
881 #else
882 Bit32u lpf = LPFOf(laddr);
883 #endif
884 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
885 if (tlbEntry->lpf == lpf) {
886 // See if the TLB entry privilege level allows us write access
887 // from this CPL.
888 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
889 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
890 Bit32u pageOffset = PAGE_OFFSET(laddr);
891 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
892 #if BX_SUPPORT_ICACHE
893 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
894 #endif
895 ReadHostQWordFromLittleEndian(hostAddr, data);
896 BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
897 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_RW);
898 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
899 tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data);
900 return data;
904 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
905 if (BX_CPU_THIS_PTR alignment_check()) {
906 if (laddr & 7) {
907 BX_ERROR(("read_RMW_virtual_qword_32(): #AC misaligned access"));
908 exception(BX_AC_EXCEPTION, 0, 0);
911 #endif
913 access_read_linear(laddr, 8, CPL, BX_RW, (void *) &data);
914 return data;
916 else {
917 BX_ERROR(("read_RMW_virtual_qword_32(): segment limit violation"));
918 exception(int_number(s), 0, 0);
922 if (!write_virtual_checks(seg, offset, 8))
923 exception(int_number(s), 0, 0);
924 goto accessOK;
927 void BX_CPP_AttrRegparmN(1)
928 BX_CPU_C::write_RMW_virtual_byte(Bit8u val8)
930 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
931 BX_CPU_THIS_PTR address_xlation.paddress1, 2, BX_WRITE, (Bit8u*) &val8);
933 if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
934 // Pages > 2 means it stores a host address for direct access.
935 Bit8u *hostAddr = (Bit8u *) BX_CPU_THIS_PTR address_xlation.pages;
936 *hostAddr = val8;
938 else {
939 // address_xlation.pages must be 1
940 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
941 BX_CPU_THIS_PTR address_xlation.paddress1, 1, &val8);
945 void BX_CPP_AttrRegparmN(1)
946 BX_CPU_C::write_RMW_virtual_word(Bit16u val16)
948 if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
949 // Pages > 2 means it stores a host address for direct access.
950 Bit16u *hostAddr = (Bit16u *) BX_CPU_THIS_PTR address_xlation.pages;
951 WriteHostWordToLittleEndian(hostAddr, val16);
952 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
953 BX_CPU_THIS_PTR address_xlation.paddress1, 2, BX_WRITE, (Bit8u*) &val16);
955 else if (BX_CPU_THIS_PTR address_xlation.pages == 1) {
956 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
957 BX_CPU_THIS_PTR address_xlation.paddress1, 2, &val16);
958 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
959 BX_CPU_THIS_PTR address_xlation.paddress1, 2, BX_WRITE, (Bit8u*) &val16);
961 else {
962 #ifdef BX_LITTLE_ENDIAN
963 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
964 BX_CPU_THIS_PTR address_xlation.paddress1, 1, &val16);
965 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
966 BX_CPU_THIS_PTR address_xlation.paddress1, 1, BX_WRITE, (Bit8u*) &val16);
967 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
968 BX_CPU_THIS_PTR address_xlation.paddress2, 1, ((Bit8u *) &val16) + 1);
969 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
970 BX_CPU_THIS_PTR address_xlation.paddress2, 1, BX_WRITE, ((Bit8u*) &val16)+1);
971 #else
972 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
973 BX_CPU_THIS_PTR address_xlation.paddress1, 1, ((Bit8u *) &val16) + 1);
974 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
975 BX_CPU_THIS_PTR address_xlation.paddress1, 1, BX_WRITE, ((Bit8u*) &val16)+1);
976 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
977 BX_CPU_THIS_PTR address_xlation.paddress2, 1, &val16);
978 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
979 BX_CPU_THIS_PTR address_xlation.paddress2, 1, BX_WRITE, (Bit8u*) &val16);
980 #endif
984 void BX_CPP_AttrRegparmN(1)
985 BX_CPU_C::write_RMW_virtual_dword(Bit32u val32)
987 if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
988 // Pages > 2 means it stores a host address for direct access.
989 Bit32u *hostAddr = (Bit32u *) BX_CPU_THIS_PTR address_xlation.pages;
990 WriteHostDWordToLittleEndian(hostAddr, val32);
991 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
992 BX_CPU_THIS_PTR address_xlation.paddress1, 4, BX_WRITE, (Bit8u*) &val32);
994 else if (BX_CPU_THIS_PTR address_xlation.pages == 1) {
995 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
996 BX_CPU_THIS_PTR address_xlation.paddress1, 4, &val32);
997 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
998 BX_CPU_THIS_PTR address_xlation.paddress1, 4, BX_WRITE, (Bit8u*) &val32);
1000 else {
1001 #ifdef BX_LITTLE_ENDIAN
1002 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
1003 BX_CPU_THIS_PTR address_xlation.paddress1,
1004 BX_CPU_THIS_PTR address_xlation.len1,
1005 &val32);
1006 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1007 BX_CPU_THIS_PTR address_xlation.paddress1,
1008 BX_CPU_THIS_PTR address_xlation.len1, BX_WRITE, (Bit8u*) &val32);
1009 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
1010 BX_CPU_THIS_PTR address_xlation.paddress2,
1011 BX_CPU_THIS_PTR address_xlation.len2,
1012 ((Bit8u *) &val32) + BX_CPU_THIS_PTR address_xlation.len1);
1013 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1014 BX_CPU_THIS_PTR address_xlation.paddress2,
1015 BX_CPU_THIS_PTR address_xlation.len2, BX_WRITE,
1016 ((Bit8u *) &val32) + BX_CPU_THIS_PTR address_xlation.len1);
1017 #else
1018 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
1019 BX_CPU_THIS_PTR address_xlation.paddress1,
1020 BX_CPU_THIS_PTR address_xlation.len1,
1021 ((Bit8u *) &val32) + (4 - BX_CPU_THIS_PTR address_xlation.len1));
1022 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1023 BX_CPU_THIS_PTR address_xlation.paddress1,
1024 BX_CPU_THIS_PTR address_xlation.len1, BX_WRITE,
1025 ((Bit8u *) &val32) + (4 - BX_CPU_THIS_PTR address_xlation.len1));
1026 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
1027 BX_CPU_THIS_PTR address_xlation.paddress2,
1028 BX_CPU_THIS_PTR address_xlation.len2,
1029 &val32);
1030 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1031 BX_CPU_THIS_PTR address_xlation.paddress2,
1032 BX_CPU_THIS_PTR address_xlation.len2, BX_WRITE, (Bit8u*) &val32);
1033 #endif
1037 void BX_CPP_AttrRegparmN(1)
1038 BX_CPU_C::write_RMW_virtual_qword(Bit64u val64)
1040 if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
1041 // Pages > 2 means it stores a host address for direct access.
1042 Bit64u *hostAddr = (Bit64u *) BX_CPU_THIS_PTR address_xlation.pages;
1043 WriteHostQWordToLittleEndian(hostAddr, val64);
1044 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1045 BX_CPU_THIS_PTR address_xlation.paddress1, 8, BX_WRITE, (Bit8u*) &val64);
1047 else if (BX_CPU_THIS_PTR address_xlation.pages == 1) {
1048 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
1049 BX_CPU_THIS_PTR address_xlation.paddress1, 8, &val64);
1050 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1051 BX_CPU_THIS_PTR address_xlation.paddress1, 8, BX_WRITE, (Bit8u*) &val64);
1053 else {
1054 #ifdef BX_LITTLE_ENDIAN
1055 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
1056 BX_CPU_THIS_PTR address_xlation.paddress1,
1057 BX_CPU_THIS_PTR address_xlation.len1,
1058 &val64);
1059 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1060 BX_CPU_THIS_PTR address_xlation.paddress1,
1061 BX_CPU_THIS_PTR address_xlation.len1, BX_WRITE, (Bit8u*) &val64);
1062 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
1063 BX_CPU_THIS_PTR address_xlation.paddress2,
1064 BX_CPU_THIS_PTR address_xlation.len2,
1065 ((Bit8u *) &val64) + BX_CPU_THIS_PTR address_xlation.len1);
1066 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1067 BX_CPU_THIS_PTR address_xlation.paddress2,
1068 BX_CPU_THIS_PTR address_xlation.len2, BX_WRITE,
1069 ((Bit8u *) &val64) + BX_CPU_THIS_PTR address_xlation.len1);
1070 #else
1071 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
1072 BX_CPU_THIS_PTR address_xlation.paddress1,
1073 BX_CPU_THIS_PTR address_xlation.len1,
1074 ((Bit8u *) &val64) + (8 - BX_CPU_THIS_PTR address_xlation.len1));
1075 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1076 BX_CPU_THIS_PTR address_xlation.paddress1,
1077 BX_CPU_THIS_PTR address_xlation.len1, BX_WRITE,
1078 ((Bit8u *) &val64) + (8 - BX_CPU_THIS_PTR address_xlation.len1));
1079 BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
1080 BX_CPU_THIS_PTR address_xlation.paddress2,
1081 BX_CPU_THIS_PTR address_xlation.len2,
1082 &val64);
1083 BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1084 BX_CPU_THIS_PTR address_xlation.paddress2,
1085 BX_CPU_THIS_PTR address_xlation.len2, BX_WRITE, (Bit8u*) &val64);
1086 #endif
1091 // Write data to new stack, these methods are required for emulation
1092 // correctness but not performance critical.
1095 // assuming the write happens in legacy mode
1096 void BX_CPU_C::write_new_stack_word_32(bx_segment_reg_t *seg, Bit32u offset, unsigned curr_pl, Bit16u data)
1098 Bit32u laddr;
1100 if (seg->cache.valid & SegAccessWOK) {
1101 if (offset < seg->cache.u.segment.limit_scaled) {
1102 accessOK:
1103 laddr = (Bit32u)(seg->cache.u.segment.base) + offset;
1104 bx_bool user = (curr_pl == 3);
1105 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
1106 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
1107 Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
1108 #else
1109 Bit32u lpf = LPFOf(laddr);
1110 #endif
1111 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
1112 if (tlbEntry->lpf == lpf) {
1113 // See if the TLB entry privilege level allows us write access
1114 // from this CPL.
1115 if (! (tlbEntry->accessBits & (0x2 | user))) {
1116 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
1117 Bit32u pageOffset = PAGE_OFFSET(laddr);
1118 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_WRITE);
1119 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
1120 tlbEntry->ppf | pageOffset, 2, curr_pl, BX_WRITE, (Bit8u*) &data);
1121 Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
1122 #if BX_SUPPORT_ICACHE
1123 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
1124 #endif
1125 WriteHostWordToLittleEndian(hostAddr, data);
1126 return;
1130 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1131 if (BX_CPU_THIS_PTR alignment_check() && user) {
1132 if (laddr & 1) {
1133 BX_ERROR(("write_new_stack_word_32(): #AC misaligned access"));
1134 exception(BX_AC_EXCEPTION, 0, 0);
1137 #endif
1139 access_write_linear(laddr, 2, curr_pl, (void *) &data);
1140 return;
1142 else {
1143 BX_ERROR(("write_new_stack_word_32(): segment limit violation"));
1144 exception(BX_SS_EXCEPTION,
1145 seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0, 0);
1149 // add error code when segment violation occurs when pushing into new stack
1150 if (!write_virtual_checks(seg, offset, 2))
1151 exception(BX_SS_EXCEPTION,
1152 seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0, 0);
1153 goto accessOK;
1156 // assuming the write happens in legacy mode
1157 void BX_CPU_C::write_new_stack_dword_32(bx_segment_reg_t *seg, Bit32u offset, unsigned curr_pl, Bit32u data)
1159 Bit32u laddr;
1161 if (seg->cache.valid & SegAccessWOK) {
1162 if (offset < (seg->cache.u.segment.limit_scaled-2)) {
1163 accessOK:
1164 laddr = (Bit32u)(seg->cache.u.segment.base) + offset;
1165 bx_bool user = (curr_pl == 3);
1166 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
1167 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
1168 Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
1169 #else
1170 Bit32u lpf = LPFOf(laddr);
1171 #endif
1172 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
1173 if (tlbEntry->lpf == lpf) {
1174 // See if the TLB entry privilege level allows us write access
1175 // from this CPL.
1176 if (! (tlbEntry->accessBits & (0x2 | user))) {
1177 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
1178 Bit32u pageOffset = PAGE_OFFSET(laddr);
1179 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE);
1180 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
1181 tlbEntry->ppf | pageOffset, 4, curr_pl, BX_WRITE, (Bit8u*) &data);
1182 Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
1183 #if BX_SUPPORT_ICACHE
1184 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
1185 #endif
1186 WriteHostDWordToLittleEndian(hostAddr, data);
1187 return;
1191 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1192 if (BX_CPU_THIS_PTR alignment_check() && user) {
1193 if (laddr & 3) {
1194 BX_ERROR(("write_new_stack_dword_32(): #AC misaligned access"));
1195 exception(BX_AC_EXCEPTION, 0, 0);
1198 #endif
1200 access_write_linear(laddr, 4, curr_pl, (void *) &data);
1201 return;
1203 else {
1204 BX_ERROR(("write_new_stack_dword_32(): segment limit violation"));
1205 exception(BX_SS_EXCEPTION,
1206 seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0, 0);
1210 // add error code when segment violation occurs when pushing into new stack
1211 if (!write_virtual_checks(seg, offset, 4))
1212 exception(BX_SS_EXCEPTION,
1213 seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0, 0);
1214 goto accessOK;
1217 // assuming the write happens in legacy mode
1218 void BX_CPU_C::write_new_stack_qword_32(bx_segment_reg_t *seg, Bit32u offset, unsigned curr_pl, Bit64u data)
1220 Bit32u laddr;
1222 if (seg->cache.valid & SegAccessWOK) {
1223 if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
1224 accessOK:
1225 laddr = (Bit32u)(seg->cache.u.segment.base) + offset;
1226 bx_bool user = (curr_pl == 3);
1227 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
1228 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
1229 Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
1230 #else
1231 Bit32u lpf = LPFOf(laddr);
1232 #endif
1233 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
1234 if (tlbEntry->lpf == lpf) {
1235 // See if the TLB entry privilege level allows us write access
1236 // from this CPL.
1237 if (! (tlbEntry->accessBits & (0x2 | user))) {
1238 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
1239 Bit32u pageOffset = PAGE_OFFSET(laddr);
1240 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE);
1241 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
1242 tlbEntry->ppf | pageOffset, 8, curr_pl, BX_WRITE, (Bit8u*) &data);
1243 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
1244 #if BX_SUPPORT_ICACHE
1245 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
1246 #endif
1247 WriteHostQWordToLittleEndian(hostAddr, data);
1248 return;
1252 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1253 if (BX_CPU_THIS_PTR alignment_check() && user) {
1254 if (laddr & 7) {
1255 BX_ERROR(("write_new_stack_qword_32(): #AC misaligned access"));
1256 exception(BX_AC_EXCEPTION, 0, 0);
1259 #endif
1261 access_write_linear(laddr, 8, curr_pl, (void *) &data);
1262 return;
1264 else {
1265 BX_ERROR(("write_new_stack_qword_32(): segment limit violation"));
1266 exception(BX_SS_EXCEPTION,
1267 seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0, 0);
1271 // add error code when segment violation occurs when pushing into new stack
1272 if (!write_virtual_checks(seg, offset, 8))
1273 exception(BX_SS_EXCEPTION,
1274 seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0, 0);
1275 goto accessOK;