- added instructions how to update the online documentation
[bochs-mirror.git] / cpu / access64.cc
blob32b39be4ecca56f75a6be7bcc75957461f125070
1 /////////////////////////////////////////////////////////////////////////
2 // $Id: access64.cc,v 1.21 2008/12/11 21:19:38 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (c) 2008 Stanislav Shwartsman
6 // Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7 //
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2 of the License, or (at your option) any later version.
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 /////////////////////////////////////////////////////////////////////////
24 #define NEED_CPU_REG_SHORTCUTS 1
25 #include "bochs.h"
26 #include "cpu.h"
27 #define LOG_THIS BX_CPU_THIS_PTR
29 #if BX_SUPPORT_X86_64
31 void BX_CPP_AttrRegparmN(3)
32 BX_CPU_C::write_virtual_byte_64(unsigned s, Bit64u offset, Bit8u data)
34 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
36 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_WRITE);
38 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
39 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
40 Bit64u lpf = LPFOf(laddr);
41 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
42 if (tlbEntry->lpf == lpf) {
43 // See if the TLB entry privilege level allows us write access
44 // from this CPL.
45 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
46 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
47 Bit32u pageOffset = PAGE_OFFSET(laddr);
48 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_WRITE);
49 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
50 tlbEntry->ppf | pageOffset, 1, CPL, BX_WRITE, (Bit8u*) &data);
51 Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
52 #if BX_SUPPORT_ICACHE
53 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
54 #endif
55 *hostAddr = data;
56 return;
60 if (! IsCanonical(laddr)) {
61 BX_ERROR(("write_virtual_byte_64(): canonical failure"));
62 exception(int_number(s), 0, 0);
65 access_write_linear(laddr, 1, CPL, (void *) &data);
68 void BX_CPP_AttrRegparmN(3)
69 BX_CPU_C::write_virtual_word_64(unsigned s, Bit64u offset, Bit16u data)
71 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
73 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_WRITE);
75 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
76 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
77 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
78 Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
79 #else
80 Bit64u lpf = LPFOf(laddr);
81 #endif
82 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
83 if (tlbEntry->lpf == lpf) {
84 // See if the TLB entry privilege level allows us write access
85 // from this CPL.
86 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
87 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
88 Bit32u pageOffset = PAGE_OFFSET(laddr);
89 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_WRITE);
90 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
91 tlbEntry->ppf | pageOffset, 2, CPL, BX_WRITE, (Bit8u*) &data);
92 Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
93 #if BX_SUPPORT_ICACHE
94 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
95 #endif
96 WriteHostWordToLittleEndian(hostAddr, data);
97 return;
101 if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
102 BX_ERROR(("write_virtual_word_64(): canonical failure"));
103 exception(int_number(s), 0, 0);
106 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
107 if (BX_CPU_THIS_PTR alignment_check()) {
108 if (laddr & 1) {
109 BX_ERROR(("write_virtual_word_64(): #AC misaligned access"));
110 exception(BX_AC_EXCEPTION, 0, 0);
113 #endif
115 access_write_linear(laddr, 2, CPL, (void *) &data);
118 void BX_CPP_AttrRegparmN(3)
119 BX_CPU_C::write_virtual_dword_64(unsigned s, Bit64u offset, Bit32u data)
121 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
123 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_WRITE);
125 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
126 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
127 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
128 Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
129 #else
130 Bit64u lpf = LPFOf(laddr);
131 #endif
132 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
133 if (tlbEntry->lpf == lpf) {
134 // See if the TLB entry privilege level allows us write access
135 // from this CPL.
136 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
137 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
138 Bit32u pageOffset = PAGE_OFFSET(laddr);
139 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE);
140 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
141 tlbEntry->ppf | pageOffset, 4, CPL, BX_WRITE, (Bit8u*) &data);
142 Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
143 #if BX_SUPPORT_ICACHE
144 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
145 #endif
146 WriteHostDWordToLittleEndian(hostAddr, data);
147 return;
151 if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
152 BX_ERROR(("write_virtual_dword_64(): canonical failure"));
153 exception(int_number(s), 0, 0);
156 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
157 if (BX_CPU_THIS_PTR alignment_check()) {
158 if (laddr & 3) {
159 BX_ERROR(("write_virtual_dword_64(): #AC misaligned access"));
160 exception(BX_AC_EXCEPTION, 0, 0);
163 #endif
165 access_write_linear(laddr, 4, CPL, (void *) &data);
168 void BX_CPP_AttrRegparmN(3)
169 BX_CPU_C::write_virtual_qword_64(unsigned s, Bit64u offset, Bit64u data)
171 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
173 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_WRITE);
175 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
176 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
177 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
178 Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
179 #else
180 Bit64u lpf = LPFOf(laddr);
181 #endif
182 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
183 if (tlbEntry->lpf == lpf) {
184 // See if the TLB entry privilege level allows us write access
185 // from this CPL.
186 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
187 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
188 Bit32u pageOffset = PAGE_OFFSET(laddr);
189 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE);
190 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
191 tlbEntry->ppf | pageOffset, 8, CPL, BX_WRITE, (Bit8u*) &data);
192 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
193 #if BX_SUPPORT_ICACHE
194 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
195 #endif
196 WriteHostQWordToLittleEndian(hostAddr, data);
197 return;
201 if (! IsCanonical(laddr) || ! IsCanonical(laddr+7)) {
202 BX_ERROR(("write_virtual_qword_64(): canonical failure"));
203 exception(int_number(s), 0, 0);
206 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
207 if (BX_CPU_THIS_PTR alignment_check()) {
208 if (laddr & 7) {
209 BX_ERROR(("write_virtual_qword_64(): #AC misaligned access"));
210 exception(BX_AC_EXCEPTION, 0, 0);
213 #endif
215 access_write_linear(laddr, 8, CPL, (void *) &data);
218 void BX_CPP_AttrRegparmN(3)
219 BX_CPU_C::write_virtual_dqword_64(unsigned s, Bit64u offset, const BxPackedXmmRegister *data)
221 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
223 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE);
225 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
226 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
227 Bit64u lpf = LPFOf(laddr);
228 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
229 if (tlbEntry->lpf == lpf) {
230 // See if the TLB entry privilege level allows us write access
231 // from this CPL.
232 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
233 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
234 Bit32u pageOffset = PAGE_OFFSET(laddr);
235 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_WRITE);
236 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
237 tlbEntry->ppf | pageOffset, 16, CPL, BX_WRITE, (Bit8u*) data);
238 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
239 #if BX_SUPPORT_ICACHE
240 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
241 #endif
242 WriteHostQWordToLittleEndian(hostAddr, data->xmm64u(0));
243 WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1));
244 return;
248 if (! IsCanonical(laddr) || ! IsCanonical(laddr+15)) {
249 BX_ERROR(("write_virtual_dqword_64(): canonical failure"));
250 exception(int_number(s), 0, 0);
253 access_write_linear(laddr, 16, CPL, (void *) data);
256 void BX_CPP_AttrRegparmN(3)
257 BX_CPU_C::write_virtual_dqword_aligned_64(unsigned s, Bit64u offset, const BxPackedXmmRegister *data)
259 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
261 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE);
263 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
264 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
265 Bit64u lpf = AlignedAccessLPFOf(laddr, 15);
266 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
267 if (tlbEntry->lpf == lpf) {
268 // See if the TLB entry privilege level allows us write access
269 // from this CPL.
270 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
271 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
272 Bit32u pageOffset = PAGE_OFFSET(laddr);
273 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_WRITE);
274 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
275 tlbEntry->ppf | pageOffset, 16, CPL, BX_WRITE, (Bit8u*) data);
276 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
277 #if BX_SUPPORT_ICACHE
278 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
279 #endif
280 WriteHostQWordToLittleEndian(hostAddr, data->xmm64u(0));
281 WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1));
282 return;
286 if (laddr & 15) {
287 BX_ERROR(("write_virtual_dqword_aligned_64(): #GP misaligned access"));
288 exception(BX_GP_EXCEPTION, 0, 0);
291 if (! IsCanonical(laddr) || ! IsCanonical(laddr+15)) {
292 BX_ERROR(("write_virtual_dqword_aligned_64(): canonical failure"));
293 exception(int_number(s), 0, 0);
296 access_write_linear(laddr, 16, CPL, (void *) data);
299 Bit8u BX_CPP_AttrRegparmN(2)
300 BX_CPU_C::read_virtual_byte_64(unsigned s, Bit64u offset)
302 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
303 Bit8u data;
304 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ);
306 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
307 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
308 Bit64u lpf = LPFOf(laddr);
309 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
310 if (tlbEntry->lpf == lpf) {
311 // See if the TLB entry privilege level allows us read access
312 // from this CPL.
313 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
314 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
315 Bit32u pageOffset = PAGE_OFFSET(laddr);
316 Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
317 data = *hostAddr;
318 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_READ);
319 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
320 tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
321 return data;
325 if (! IsCanonical(laddr)) {
326 BX_ERROR(("read_virtual_byte_64(): canonical failure"));
327 exception(int_number(s), 0, 0);
330 access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data);
331 return data;
334 Bit16u BX_CPP_AttrRegparmN(2)
335 BX_CPU_C::read_virtual_word_64(unsigned s, Bit64u offset)
337 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
338 Bit16u data;
339 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_READ);
341 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
342 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
343 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
344 Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
345 #else
346 Bit64u lpf = LPFOf(laddr);
347 #endif
348 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
349 if (tlbEntry->lpf == lpf) {
350 // See if the TLB entry privilege level allows us read access
351 // from this CPL.
352 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
353 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
354 Bit32u pageOffset = PAGE_OFFSET(laddr);
355 Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
356 ReadHostWordFromLittleEndian(hostAddr, data);
357 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_READ);
358 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
359 tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &data);
360 return data;
364 if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
365 BX_ERROR(("read_virtual_word_64(): canonical failure"));
366 exception(int_number(s), 0, 0);
369 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
370 if (BX_CPU_THIS_PTR alignment_check()) {
371 if (laddr & 1) {
372 BX_ERROR(("read_virtual_word_64(): #AC misaligned access"));
373 exception(BX_AC_EXCEPTION, 0, 0);
376 #endif
378 access_read_linear(laddr, 2, CPL, BX_READ, (void *) &data);
379 return data;
382 Bit32u BX_CPP_AttrRegparmN(2)
383 BX_CPU_C::read_virtual_dword_64(unsigned s, Bit64u offset)
385 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
386 Bit32u data;
387 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_READ);
389 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
390 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
391 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
392 Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
393 #else
394 Bit64u lpf = LPFOf(laddr);
395 #endif
396 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
397 if (tlbEntry->lpf == lpf) {
398 // See if the TLB entry privilege level allows us read access
399 // from this CPL.
400 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
401 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
402 Bit32u pageOffset = PAGE_OFFSET(laddr);
403 Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
404 ReadHostDWordFromLittleEndian(hostAddr, data);
405 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_READ);
406 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
407 tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data);
408 return data;
412 if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
413 BX_ERROR(("read_virtual_dword_64(): canonical failure"));
414 exception(int_number(s), 0, 0);
417 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
418 if (BX_CPU_THIS_PTR alignment_check()) {
419 if (laddr & 3) {
420 BX_ERROR(("read_virtual_dword_64(): #AC misaligned access"));
421 exception(BX_AC_EXCEPTION, 0, 0);
424 #endif
426 access_read_linear(laddr, 4, CPL, BX_READ, (void *) &data);
427 return data;
430 Bit64u BX_CPP_AttrRegparmN(2)
431 BX_CPU_C::read_virtual_qword_64(unsigned s, Bit64u offset)
433 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
434 Bit64u data;
435 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_READ);
437 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
438 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
439 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
440 Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
441 #else
442 Bit64u lpf = LPFOf(laddr);
443 #endif
444 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
445 if (tlbEntry->lpf == lpf) {
446 // See if the TLB entry privilege level allows us read access
447 // from this CPL.
448 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
449 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
450 Bit32u pageOffset = PAGE_OFFSET(laddr);
451 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
452 ReadHostQWordFromLittleEndian(hostAddr, data);
453 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_READ);
454 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
455 tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data);
456 return data;
460 if (! IsCanonical(laddr) || ! IsCanonical(laddr+7)) {
461 BX_ERROR(("read_virtual_qword_64(): canonical failure"));
462 exception(int_number(s), 0, 0);
465 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
466 if (BX_CPU_THIS_PTR alignment_check()) {
467 if (laddr & 7) {
468 BX_ERROR(("read_virtual_qword_64(): #AC misaligned access"));
469 exception(BX_AC_EXCEPTION, 0, 0);
472 #endif
473 access_read_linear(laddr, 8, CPL, BX_READ, (void *) &data);
474 return data;
477 void BX_CPP_AttrRegparmN(3)
478 BX_CPU_C::read_virtual_dqword_64(unsigned s, Bit64u offset, BxPackedXmmRegister *data)
480 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
481 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ);
483 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
484 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
485 Bit64u lpf = LPFOf(laddr);
486 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
487 if (tlbEntry->lpf == lpf) {
488 // See if the TLB entry privilege level allows us read access
489 // from this CPL.
490 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
491 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
492 Bit32u pageOffset = PAGE_OFFSET(laddr);
493 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
494 ReadHostQWordFromLittleEndian(hostAddr, data->xmm64u(0));
495 ReadHostQWordFromLittleEndian(hostAddr+1, data->xmm64u(1));
496 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_READ);
497 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
498 tlbEntry->ppf | pageOffset, 16, CPL, BX_READ, (Bit8u*) data);
499 return;
503 if (! IsCanonical(laddr) || ! IsCanonical(laddr+15)) {
504 BX_ERROR(("read_virtual_dqword_64(): canonical failure"));
505 exception(int_number(s), 0, 0);
508 access_read_linear(laddr, 16, CPL, BX_READ, (void *) data);
511 void BX_CPP_AttrRegparmN(3)
512 BX_CPU_C::read_virtual_dqword_aligned_64(unsigned s, Bit64u offset, BxPackedXmmRegister *data)
514 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
515 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ);
517 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
518 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
519 Bit64u lpf = AlignedAccessLPFOf(laddr, 15);
520 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
521 if (tlbEntry->lpf == lpf) {
522 // See if the TLB entry privilege level allows us read access
523 // from this CPL.
524 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
525 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
526 Bit32u pageOffset = PAGE_OFFSET(laddr);
527 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
528 ReadHostQWordFromLittleEndian(hostAddr, data->xmm64u(0));
529 ReadHostQWordFromLittleEndian(hostAddr+1, data->xmm64u(1));
530 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_READ);
531 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
532 tlbEntry->ppf | pageOffset, 16, CPL, BX_READ, (Bit8u*) data);
533 return;
537 if (laddr & 15) {
538 BX_ERROR(("read_virtual_dqword_aligned_64(): #GP misaligned access"));
539 exception(BX_GP_EXCEPTION, 0, 0);
542 if (! IsCanonical(laddr) || ! IsCanonical(laddr+15)) {
543 BX_ERROR(("read_virtual_dqword_aligned_64(): canonical failure"));
544 exception(int_number(s), 0, 0);
547 access_read_linear(laddr, 16, CPL, BX_READ, (void *) data);
550 //////////////////////////////////////////////////////////////
551 // special Read-Modify-Write operations //
552 // address translation info is kept across read/write calls //
553 //////////////////////////////////////////////////////////////
555 Bit8u BX_CPP_AttrRegparmN(2)
556 BX_CPU_C::read_RMW_virtual_byte_64(unsigned s, Bit64u offset)
558 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
559 Bit8u data;
560 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW);
562 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
563 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
564 Bit64u lpf = LPFOf(laddr);
565 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
566 if (tlbEntry->lpf == lpf) {
567 // See if the TLB entry privilege level allows us write access
568 // from this CPL.
569 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
570 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
571 Bit32u pageOffset = PAGE_OFFSET(laddr);
572 Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
573 #if BX_SUPPORT_ICACHE
574 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
575 #endif
576 data = *hostAddr;
577 BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
578 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_RW);
579 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
580 tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
581 return data;
585 if (! IsCanonical(laddr)) {
586 BX_ERROR(("read_RMW_virtual_byte_64(): canonical failure"));
587 exception(int_number(s), 0, 0);
590 access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data);
591 return data;
594 Bit16u BX_CPP_AttrRegparmN(2)
595 BX_CPU_C::read_RMW_virtual_word_64(unsigned s, Bit64u offset)
597 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
598 Bit16u data;
599 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_RW);
601 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
602 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
603 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
604 Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
605 #else
606 Bit64u lpf = LPFOf(laddr);
607 #endif
608 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
609 if (tlbEntry->lpf == lpf) {
610 // See if the TLB entry privilege level allows us write access
611 // from this CPL.
612 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
613 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
614 Bit32u pageOffset = PAGE_OFFSET(laddr);
615 Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
616 #if BX_SUPPORT_ICACHE
617 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
618 #endif
619 ReadHostWordFromLittleEndian(hostAddr, data);
620 BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
621 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_RW);
622 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
623 tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &data);
624 return data;
628 if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
629 BX_ERROR(("read_RMW_virtual_word_64(): canonical failure"));
630 exception(int_number(s), 0, 0);
633 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
634 if (BX_CPU_THIS_PTR alignment_check()) {
635 if (laddr & 1) {
636 BX_ERROR(("read_RMW_virtual_word_64(): #AC misaligned access"));
637 exception(BX_AC_EXCEPTION, 0, 0);
640 #endif
642 access_read_linear(laddr, 2, CPL, BX_RW, (void *) &data);
643 return data;
646 Bit32u BX_CPP_AttrRegparmN(2)
647 BX_CPU_C::read_RMW_virtual_dword_64(unsigned s, Bit64u offset)
649 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
650 Bit32u data;
651 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW);
653 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
654 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
655 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
656 Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
657 #else
658 Bit64u lpf = LPFOf(laddr);
659 #endif
660 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
661 if (tlbEntry->lpf == lpf) {
662 // See if the TLB entry privilege level allows us write access
663 // from this CPL.
664 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
665 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
666 Bit32u pageOffset = PAGE_OFFSET(laddr);
667 Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
668 #if BX_SUPPORT_ICACHE
669 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
670 #endif
671 ReadHostDWordFromLittleEndian(hostAddr, data);
672 BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
673 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_RW);
674 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
675 tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data);
676 return data;
680 if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
681 BX_ERROR(("read_RMW_virtual_dword_64(): canonical failure"));
682 exception(int_number(s), 0, 0);
685 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
686 if (BX_CPU_THIS_PTR alignment_check()) {
687 if (laddr & 3) {
688 BX_ERROR(("read_RMW_virtual_dword_64(): #AC misaligned access"));
689 exception(BX_AC_EXCEPTION, 0, 0);
692 #endif
694 access_read_linear(laddr, 4, CPL, BX_RW, (void *) &data);
695 return data;
698 Bit64u BX_CPP_AttrRegparmN(2)
699 BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
701 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
702 Bit64u data;
703 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW);
705 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
706 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
707 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
708 Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
709 #else
710 Bit64u lpf = LPFOf(laddr);
711 #endif
712 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
713 if (tlbEntry->lpf == lpf) {
714 // See if the TLB entry privilege level allows us write access
715 // from this CPL.
716 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
717 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
718 Bit32u pageOffset = PAGE_OFFSET(laddr);
719 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
720 #if BX_SUPPORT_ICACHE
721 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
722 #endif
723 ReadHostQWordFromLittleEndian(hostAddr, data);
724 BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
725 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_RW);
726 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
727 tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data);
728 return data;
732 if (! IsCanonical(laddr) || ! IsCanonical(laddr+7)) {
733 BX_ERROR(("read_RMW_virtual_qword_64(): canonical failure"));
734 exception(int_number(s), 0, 0);
737 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
738 if (BX_CPU_THIS_PTR alignment_check()) {
739 if (laddr & 7) {
740 BX_ERROR(("read_RMW_virtual_qword_64(): #AC misaligned access"));
741 exception(BX_AC_EXCEPTION, 0, 0);
744 #endif
746 access_read_linear(laddr, 8, CPL, BX_RW, (void *) &data);
747 return data;
750 void BX_CPU_C::write_new_stack_word_64(Bit64u laddr, unsigned curr_pl, Bit16u data)
752 bx_bool user = (curr_pl == 3);
753 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
754 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
755 Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
756 #else
757 Bit64u lpf = LPFOf(laddr);
758 #endif
759 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
760 if (tlbEntry->lpf == lpf) {
761 // See if the TLB entry privilege level allows us write access
762 // from this CPL.
763 if (! (tlbEntry->accessBits & (0x2 | user))) {
764 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
765 Bit32u pageOffset = PAGE_OFFSET(laddr);
766 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_WRITE);
767 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
768 tlbEntry->ppf | pageOffset, 2, curr_pl, BX_WRITE, (Bit8u*) &data);
769 Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
770 #if BX_SUPPORT_ICACHE
771 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
772 #endif
773 WriteHostWordToLittleEndian(hostAddr, data);
774 return;
778 if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
779 BX_ERROR(("write_new_stack_word_64(): canonical failure"));
780 exception(BX_SS_EXCEPTION, 0, 0);
783 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
784 if (BX_CPU_THIS_PTR alignment_check() && user) {
785 if (laddr & 1) {
786 BX_ERROR(("write_new_stack_word_64(): #AC misaligned access"));
787 exception(BX_AC_EXCEPTION, 0, 0);
790 #endif
792 access_write_linear(laddr, 2, curr_pl, (void *) &data);
795 void BX_CPU_C::write_new_stack_dword_64(Bit64u laddr, unsigned curr_pl, Bit32u data)
797 bx_bool user = (curr_pl == 3);
798 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
799 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
800 Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
801 #else
802 Bit64u lpf = LPFOf(laddr);
803 #endif
804 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
805 if (tlbEntry->lpf == lpf) {
806 // See if the TLB entry privilege level allows us write access
807 // from this CPL.
808 if (! (tlbEntry->accessBits & (0x2 | user))) {
809 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
810 Bit32u pageOffset = PAGE_OFFSET(laddr);
811 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE);
812 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
813 tlbEntry->ppf | pageOffset, 4, curr_pl, BX_WRITE, (Bit8u*) &data);
814 Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
815 #if BX_SUPPORT_ICACHE
816 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
817 #endif
818 WriteHostDWordToLittleEndian(hostAddr, data);
819 return;
823 if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
824 BX_ERROR(("write_new_stack_dword_64(): canonical failure"));
825 exception(BX_SS_EXCEPTION, 0, 0);
828 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
829 if (BX_CPU_THIS_PTR alignment_check() && user) {
830 if (laddr & 3) {
831 BX_ERROR(("write_new_stack_dword_64(): #AC misaligned access"));
832 exception(BX_AC_EXCEPTION, 0, 0);
835 #endif
837 access_write_linear(laddr, 4, curr_pl, (void *) &data);
840 void BX_CPU_C::write_new_stack_qword_64(Bit64u laddr, unsigned curr_pl, Bit64u data)
842 bx_bool user = (curr_pl == 3);
843 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
844 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
845 Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
846 #else
847 Bit64u lpf = LPFOf(laddr);
848 #endif
849 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
850 if (tlbEntry->lpf == lpf) {
851 // See if the TLB entry privilege level allows us write access
852 // from this CPL.
853 if (! (tlbEntry->accessBits & (0x2 | user))) {
854 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
855 Bit32u pageOffset = PAGE_OFFSET(laddr);
856 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE);
857 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
858 tlbEntry->ppf | pageOffset, 8, curr_pl, BX_WRITE, (Bit8u*) &data);
859 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
860 #if BX_SUPPORT_ICACHE
861 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
862 #endif
863 WriteHostQWordToLittleEndian(hostAddr, data);
864 return;
868 if (! IsCanonical(laddr) || ! IsCanonical(laddr+7)) {
869 BX_ERROR(("write_new_stack_qword_64(): canonical failure"));
870 exception(BX_SS_EXCEPTION, 0, 0);
873 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
874 if (BX_CPU_THIS_PTR alignment_check() && user) {
875 if (laddr & 7) {
876 BX_ERROR(("write_new_stack_qword_64(): #AC misaligned access"));
877 exception(BX_AC_EXCEPTION, 0, 0);
880 #endif
882 access_write_linear(laddr, 8, curr_pl, (void *) &data);
885 #endif