- compilation fixes for MSVC toolkit 2003
[bochs-mirror.git] / cpu / access64.cc
blob7c5c51406039e41ac946eb36e23bbcecadda5cef
1 /////////////////////////////////////////////////////////////////////////
2 // $Id: access64.cc,v 1.20 2008/09/18 17:37:28 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (c) 2008 Stanislav Shwartsman
6 // Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7 //
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2 of the License, or (at your option) any later version.
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 /////////////////////////////////////////////////////////////////////////
24 #define NEED_CPU_REG_SHORTCUTS 1
25 #include "bochs.h"
26 #include "cpu.h"
27 #define LOG_THIS BX_CPU_THIS_PTR
29 #if BX_SUPPORT_X86_64
31 void BX_CPP_AttrRegparmN(3)
32 BX_CPU_C::write_virtual_byte_64(unsigned s, Bit64u offset, Bit8u data)
34 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
36 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_WRITE);
38 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
39 #if BX_SupportGuest2HostTLB
40 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
41 Bit64u lpf = LPFOf(laddr);
42 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
43 if (tlbEntry->lpf == lpf) {
44 // See if the TLB entry privilege level allows us write access
45 // from this CPL.
46 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
47 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
48 Bit32u pageOffset = PAGE_OFFSET(laddr);
49 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_WRITE);
50 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
51 tlbEntry->ppf | pageOffset, 1, CPL, BX_WRITE, (Bit8u*) &data);
52 Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
53 #if BX_SUPPORT_ICACHE
54 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
55 #endif
56 *hostAddr = data;
57 return;
60 #endif
62 if (! IsCanonical(laddr)) {
63 BX_ERROR(("write_virtual_byte_64(): canonical failure"));
64 exception(int_number(s), 0, 0);
67 access_write_linear(laddr, 1, CPL, (void *) &data);
70 void BX_CPP_AttrRegparmN(3)
71 BX_CPU_C::write_virtual_word_64(unsigned s, Bit64u offset, Bit16u data)
73 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
75 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_WRITE);
77 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
78 #if BX_SupportGuest2HostTLB
79 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
80 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
81 Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
82 #else
83 Bit64u lpf = LPFOf(laddr);
84 #endif
85 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
86 if (tlbEntry->lpf == lpf) {
87 // See if the TLB entry privilege level allows us write access
88 // from this CPL.
89 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
90 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
91 Bit32u pageOffset = PAGE_OFFSET(laddr);
92 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_WRITE);
93 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
94 tlbEntry->ppf | pageOffset, 2, CPL, BX_WRITE, (Bit8u*) &data);
95 Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
96 #if BX_SUPPORT_ICACHE
97 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
98 #endif
99 WriteHostWordToLittleEndian(hostAddr, data);
100 return;
103 #endif
105 if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
106 BX_ERROR(("write_virtual_word_64(): canonical failure"));
107 exception(int_number(s), 0, 0);
110 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
111 if (BX_CPU_THIS_PTR alignment_check()) {
112 if (laddr & 1) {
113 BX_ERROR(("write_virtual_word_64(): #AC misaligned access"));
114 exception(BX_AC_EXCEPTION, 0, 0);
117 #endif
119 access_write_linear(laddr, 2, CPL, (void *) &data);
122 void BX_CPP_AttrRegparmN(3)
123 BX_CPU_C::write_virtual_dword_64(unsigned s, Bit64u offset, Bit32u data)
125 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
127 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_WRITE);
129 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
130 #if BX_SupportGuest2HostTLB
131 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
132 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
133 Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
134 #else
135 Bit64u lpf = LPFOf(laddr);
136 #endif
137 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
138 if (tlbEntry->lpf == lpf) {
139 // See if the TLB entry privilege level allows us write access
140 // from this CPL.
141 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
142 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
143 Bit32u pageOffset = PAGE_OFFSET(laddr);
144 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE);
145 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
146 tlbEntry->ppf | pageOffset, 4, CPL, BX_WRITE, (Bit8u*) &data);
147 Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
148 #if BX_SUPPORT_ICACHE
149 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
150 #endif
151 WriteHostDWordToLittleEndian(hostAddr, data);
152 return;
155 #endif
157 if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
158 BX_ERROR(("write_virtual_dword_64(): canonical failure"));
159 exception(int_number(s), 0, 0);
162 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
163 if (BX_CPU_THIS_PTR alignment_check()) {
164 if (laddr & 3) {
165 BX_ERROR(("write_virtual_dword_64(): #AC misaligned access"));
166 exception(BX_AC_EXCEPTION, 0, 0);
169 #endif
171 access_write_linear(laddr, 4, CPL, (void *) &data);
174 void BX_CPP_AttrRegparmN(3)
175 BX_CPU_C::write_virtual_qword_64(unsigned s, Bit64u offset, Bit64u data)
177 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
179 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_WRITE);
181 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
182 #if BX_SupportGuest2HostTLB
183 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
184 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
185 Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
186 #else
187 Bit64u lpf = LPFOf(laddr);
188 #endif
189 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
190 if (tlbEntry->lpf == lpf) {
191 // See if the TLB entry privilege level allows us write access
192 // from this CPL.
193 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
194 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
195 Bit32u pageOffset = PAGE_OFFSET(laddr);
196 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE);
197 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
198 tlbEntry->ppf | pageOffset, 8, CPL, BX_WRITE, (Bit8u*) &data);
199 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
200 #if BX_SUPPORT_ICACHE
201 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
202 #endif
203 WriteHostQWordToLittleEndian(hostAddr, data);
204 return;
207 #endif
209 if (! IsCanonical(laddr) || ! IsCanonical(laddr+7)) {
210 BX_ERROR(("write_virtual_qword_64(): canonical failure"));
211 exception(int_number(s), 0, 0);
214 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
215 if (BX_CPU_THIS_PTR alignment_check()) {
216 if (laddr & 7) {
217 BX_ERROR(("write_virtual_qword_64(): #AC misaligned access"));
218 exception(BX_AC_EXCEPTION, 0, 0);
221 #endif
223 access_write_linear(laddr, 8, CPL, (void *) &data);
226 void BX_CPP_AttrRegparmN(3)
227 BX_CPU_C::write_virtual_dqword_64(unsigned s, Bit64u offset, const BxPackedXmmRegister *data)
229 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
231 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE);
233 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
234 #if BX_SupportGuest2HostTLB
235 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
236 Bit64u lpf = LPFOf(laddr);
237 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
238 if (tlbEntry->lpf == lpf) {
239 // See if the TLB entry privilege level allows us write access
240 // from this CPL.
241 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
242 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
243 Bit32u pageOffset = PAGE_OFFSET(laddr);
244 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_WRITE);
245 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
246 tlbEntry->ppf | pageOffset, 16, CPL, BX_WRITE, (Bit8u*) data);
247 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
248 #if BX_SUPPORT_ICACHE
249 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
250 #endif
251 WriteHostQWordToLittleEndian(hostAddr, data->xmm64u(0));
252 WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1));
253 return;
256 #endif
258 if (! IsCanonical(laddr) || ! IsCanonical(laddr+15)) {
259 BX_ERROR(("write_virtual_dqword_64(): canonical failure"));
260 exception(int_number(s), 0, 0);
263 access_write_linear(laddr, 16, CPL, (void *) data);
266 void BX_CPP_AttrRegparmN(3)
267 BX_CPU_C::write_virtual_dqword_aligned_64(unsigned s, Bit64u offset, const BxPackedXmmRegister *data)
269 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
271 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE);
273 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
274 #if BX_SupportGuest2HostTLB
275 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
276 Bit64u lpf = AlignedAccessLPFOf(laddr, 15);
277 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
278 if (tlbEntry->lpf == lpf) {
279 // See if the TLB entry privilege level allows us write access
280 // from this CPL.
281 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
282 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
283 Bit32u pageOffset = PAGE_OFFSET(laddr);
284 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_WRITE);
285 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
286 tlbEntry->ppf | pageOffset, 16, CPL, BX_WRITE, (Bit8u*) data);
287 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
288 #if BX_SUPPORT_ICACHE
289 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
290 #endif
291 WriteHostQWordToLittleEndian(hostAddr, data->xmm64u(0));
292 WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1));
293 return;
296 #endif
298 if (laddr & 15) {
299 BX_ERROR(("write_virtual_dqword_aligned_64(): #GP misaligned access"));
300 exception(BX_GP_EXCEPTION, 0, 0);
303 if (! IsCanonical(laddr) || ! IsCanonical(laddr+15)) {
304 BX_ERROR(("write_virtual_dqword_aligned_64(): canonical failure"));
305 exception(int_number(s), 0, 0);
308 access_write_linear(laddr, 16, CPL, (void *) data);
311 Bit8u BX_CPP_AttrRegparmN(2)
312 BX_CPU_C::read_virtual_byte_64(unsigned s, Bit64u offset)
314 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
315 Bit8u data;
316 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ);
318 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
319 #if BX_SupportGuest2HostTLB
320 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
321 Bit64u lpf = LPFOf(laddr);
322 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
323 if (tlbEntry->lpf == lpf) {
324 // See if the TLB entry privilege level allows us read access
325 // from this CPL.
326 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
327 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
328 Bit32u pageOffset = PAGE_OFFSET(laddr);
329 Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
330 data = *hostAddr;
331 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_READ);
332 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
333 tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
334 return data;
337 #endif
339 if (! IsCanonical(laddr)) {
340 BX_ERROR(("read_virtual_byte_64(): canonical failure"));
341 exception(int_number(s), 0, 0);
344 access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data);
345 return data;
348 Bit16u BX_CPP_AttrRegparmN(2)
349 BX_CPU_C::read_virtual_word_64(unsigned s, Bit64u offset)
351 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
352 Bit16u data;
353 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_READ);
355 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
356 #if BX_SupportGuest2HostTLB
357 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
358 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
359 Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
360 #else
361 Bit64u lpf = LPFOf(laddr);
362 #endif
363 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
364 if (tlbEntry->lpf == lpf) {
365 // See if the TLB entry privilege level allows us read access
366 // from this CPL.
367 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
368 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
369 Bit32u pageOffset = PAGE_OFFSET(laddr);
370 Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
371 ReadHostWordFromLittleEndian(hostAddr, data);
372 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_READ);
373 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
374 tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &data);
375 return data;
378 #endif
380 if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
381 BX_ERROR(("read_virtual_word_64(): canonical failure"));
382 exception(int_number(s), 0, 0);
385 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
386 if (BX_CPU_THIS_PTR alignment_check()) {
387 if (laddr & 1) {
388 BX_ERROR(("read_virtual_word_64(): #AC misaligned access"));
389 exception(BX_AC_EXCEPTION, 0, 0);
392 #endif
394 access_read_linear(laddr, 2, CPL, BX_READ, (void *) &data);
395 return data;
398 Bit32u BX_CPP_AttrRegparmN(2)
399 BX_CPU_C::read_virtual_dword_64(unsigned s, Bit64u offset)
401 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
402 Bit32u data;
403 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_READ);
405 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
406 #if BX_SupportGuest2HostTLB
407 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
408 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
409 Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
410 #else
411 Bit64u lpf = LPFOf(laddr);
412 #endif
413 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
414 if (tlbEntry->lpf == lpf) {
415 // See if the TLB entry privilege level allows us read access
416 // from this CPL.
417 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
418 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
419 Bit32u pageOffset = PAGE_OFFSET(laddr);
420 Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
421 ReadHostDWordFromLittleEndian(hostAddr, data);
422 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_READ);
423 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
424 tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data);
425 return data;
428 #endif
430 if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
431 BX_ERROR(("read_virtual_dword_64(): canonical failure"));
432 exception(int_number(s), 0, 0);
435 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
436 if (BX_CPU_THIS_PTR alignment_check()) {
437 if (laddr & 3) {
438 BX_ERROR(("read_virtual_dword_64(): #AC misaligned access"));
439 exception(BX_AC_EXCEPTION, 0, 0);
442 #endif
444 access_read_linear(laddr, 4, CPL, BX_READ, (void *) &data);
445 return data;
448 Bit64u BX_CPP_AttrRegparmN(2)
449 BX_CPU_C::read_virtual_qword_64(unsigned s, Bit64u offset)
451 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
452 Bit64u data;
453 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_READ);
455 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
456 #if BX_SupportGuest2HostTLB
457 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
458 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
459 Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
460 #else
461 Bit64u lpf = LPFOf(laddr);
462 #endif
463 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
464 if (tlbEntry->lpf == lpf) {
465 // See if the TLB entry privilege level allows us read access
466 // from this CPL.
467 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
468 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
469 Bit32u pageOffset = PAGE_OFFSET(laddr);
470 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
471 ReadHostQWordFromLittleEndian(hostAddr, data);
472 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_READ);
473 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
474 tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data);
475 return data;
478 #endif
480 if (! IsCanonical(laddr) || ! IsCanonical(laddr+7)) {
481 BX_ERROR(("read_virtual_qword_64(): canonical failure"));
482 exception(int_number(s), 0, 0);
485 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
486 if (BX_CPU_THIS_PTR alignment_check()) {
487 if (laddr & 7) {
488 BX_ERROR(("read_virtual_qword_64(): #AC misaligned access"));
489 exception(BX_AC_EXCEPTION, 0, 0);
492 #endif
493 access_read_linear(laddr, 8, CPL, BX_READ, (void *) &data);
494 return data;
497 void BX_CPP_AttrRegparmN(3)
498 BX_CPU_C::read_virtual_dqword_64(unsigned s, Bit64u offset, BxPackedXmmRegister *data)
500 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
501 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ);
503 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
504 #if BX_SupportGuest2HostTLB
505 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
506 Bit64u lpf = LPFOf(laddr);
507 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
508 if (tlbEntry->lpf == lpf) {
509 // See if the TLB entry privilege level allows us read access
510 // from this CPL.
511 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
512 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
513 Bit32u pageOffset = PAGE_OFFSET(laddr);
514 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
515 ReadHostQWordFromLittleEndian(hostAddr, data->xmm64u(0));
516 ReadHostQWordFromLittleEndian(hostAddr+1, data->xmm64u(1));
517 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_READ);
518 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
519 tlbEntry->ppf | pageOffset, 16, CPL, BX_READ, (Bit8u*) data);
520 return;
523 #endif
525 if (! IsCanonical(laddr) || ! IsCanonical(laddr+15)) {
526 BX_ERROR(("read_virtual_dqword_64(): canonical failure"));
527 exception(int_number(s), 0, 0);
530 access_read_linear(laddr, 16, CPL, BX_READ, (void *) data);
533 void BX_CPP_AttrRegparmN(3)
534 BX_CPU_C::read_virtual_dqword_aligned_64(unsigned s, Bit64u offset, BxPackedXmmRegister *data)
536 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
537 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ);
539 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
540 #if BX_SupportGuest2HostTLB
541 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
542 Bit64u lpf = AlignedAccessLPFOf(laddr, 15);
543 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
544 if (tlbEntry->lpf == lpf) {
545 // See if the TLB entry privilege level allows us read access
546 // from this CPL.
547 if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK.
548 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
549 Bit32u pageOffset = PAGE_OFFSET(laddr);
550 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
551 ReadHostQWordFromLittleEndian(hostAddr, data->xmm64u(0));
552 ReadHostQWordFromLittleEndian(hostAddr+1, data->xmm64u(1));
553 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_READ);
554 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
555 tlbEntry->ppf | pageOffset, 16, CPL, BX_READ, (Bit8u*) data);
556 return;
559 #endif
561 if (laddr & 15) {
562 BX_ERROR(("read_virtual_dqword_aligned_64(): #GP misaligned access"));
563 exception(BX_GP_EXCEPTION, 0, 0);
566 if (! IsCanonical(laddr) || ! IsCanonical(laddr+15)) {
567 BX_ERROR(("read_virtual_dqword_aligned_64(): canonical failure"));
568 exception(int_number(s), 0, 0);
571 access_read_linear(laddr, 16, CPL, BX_READ, (void *) data);
574 //////////////////////////////////////////////////////////////
575 // special Read-Modify-Write operations //
576 // address translation info is kept across read/write calls //
577 //////////////////////////////////////////////////////////////
579 Bit8u BX_CPP_AttrRegparmN(2)
580 BX_CPU_C::read_RMW_virtual_byte_64(unsigned s, Bit64u offset)
582 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
583 Bit8u data;
584 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW);
586 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
587 #if BX_SupportGuest2HostTLB
588 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
589 Bit64u lpf = LPFOf(laddr);
590 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
591 if (tlbEntry->lpf == lpf) {
592 // See if the TLB entry privilege level allows us write access
593 // from this CPL.
594 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
595 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
596 Bit32u pageOffset = PAGE_OFFSET(laddr);
597 Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
598 #if BX_SUPPORT_ICACHE
599 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
600 #endif
601 data = *hostAddr;
602 BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
603 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_RW);
604 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
605 tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
606 return data;
609 #endif
611 if (! IsCanonical(laddr)) {
612 BX_ERROR(("read_RMW_virtual_byte_64(): canonical failure"));
613 exception(int_number(s), 0, 0);
616 access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data);
617 return data;
620 Bit16u BX_CPP_AttrRegparmN(2)
621 BX_CPU_C::read_RMW_virtual_word_64(unsigned s, Bit64u offset)
623 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
624 Bit16u data;
625 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_RW);
627 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
628 #if BX_SupportGuest2HostTLB
629 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
630 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
631 Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
632 #else
633 Bit64u lpf = LPFOf(laddr);
634 #endif
635 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
636 if (tlbEntry->lpf == lpf) {
637 // See if the TLB entry privilege level allows us write access
638 // from this CPL.
639 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
640 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
641 Bit32u pageOffset = PAGE_OFFSET(laddr);
642 Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
643 #if BX_SUPPORT_ICACHE
644 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
645 #endif
646 ReadHostWordFromLittleEndian(hostAddr, data);
647 BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
648 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_RW);
649 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
650 tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &data);
651 return data;
654 #endif
656 if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
657 BX_ERROR(("read_RMW_virtual_word_64(): canonical failure"));
658 exception(int_number(s), 0, 0);
661 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
662 if (BX_CPU_THIS_PTR alignment_check()) {
663 if (laddr & 1) {
664 BX_ERROR(("read_RMW_virtual_word_64(): #AC misaligned access"));
665 exception(BX_AC_EXCEPTION, 0, 0);
668 #endif
670 access_read_linear(laddr, 2, CPL, BX_RW, (void *) &data);
671 return data;
674 Bit32u BX_CPP_AttrRegparmN(2)
675 BX_CPU_C::read_RMW_virtual_dword_64(unsigned s, Bit64u offset)
677 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
678 Bit32u data;
679 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW);
681 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
682 #if BX_SupportGuest2HostTLB
683 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
684 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
685 Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
686 #else
687 Bit64u lpf = LPFOf(laddr);
688 #endif
689 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
690 if (tlbEntry->lpf == lpf) {
691 // See if the TLB entry privilege level allows us write access
692 // from this CPL.
693 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
694 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
695 Bit32u pageOffset = PAGE_OFFSET(laddr);
696 Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
697 #if BX_SUPPORT_ICACHE
698 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
699 #endif
700 ReadHostDWordFromLittleEndian(hostAddr, data);
701 BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
702 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_RW);
703 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
704 tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data);
705 return data;
708 #endif
710 if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
711 BX_ERROR(("read_RMW_virtual_dword_64(): canonical failure"));
712 exception(int_number(s), 0, 0);
715 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
716 if (BX_CPU_THIS_PTR alignment_check()) {
717 if (laddr & 3) {
718 BX_ERROR(("read_RMW_virtual_dword_64(): #AC misaligned access"));
719 exception(BX_AC_EXCEPTION, 0, 0);
722 #endif
724 access_read_linear(laddr, 4, CPL, BX_RW, (void *) &data);
725 return data;
728 Bit64u BX_CPP_AttrRegparmN(2)
729 BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
731 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
732 Bit64u data;
733 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW);
735 Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
736 #if BX_SupportGuest2HostTLB
737 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
738 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
739 Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
740 #else
741 Bit64u lpf = LPFOf(laddr);
742 #endif
743 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
744 if (tlbEntry->lpf == lpf) {
745 // See if the TLB entry privilege level allows us write access
746 // from this CPL.
747 if (! (tlbEntry->accessBits & (0x2 | USER_PL))) {
748 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
749 Bit32u pageOffset = PAGE_OFFSET(laddr);
750 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
751 #if BX_SUPPORT_ICACHE
752 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
753 #endif
754 ReadHostQWordFromLittleEndian(hostAddr, data);
755 BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
756 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_RW);
757 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
758 tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data);
759 return data;
762 #endif
764 if (! IsCanonical(laddr) || ! IsCanonical(laddr+7)) {
765 BX_ERROR(("read_RMW_virtual_qword_64(): canonical failure"));
766 exception(int_number(s), 0, 0);
769 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
770 if (BX_CPU_THIS_PTR alignment_check()) {
771 if (laddr & 7) {
772 BX_ERROR(("read_RMW_virtual_qword_64(): #AC misaligned access"));
773 exception(BX_AC_EXCEPTION, 0, 0);
776 #endif
778 access_read_linear(laddr, 8, CPL, BX_RW, (void *) &data);
779 return data;
782 void BX_CPU_C::write_new_stack_word_64(Bit64u laddr, unsigned curr_pl, Bit16u data)
784 bx_bool user = (curr_pl == 3);
785 #if BX_SupportGuest2HostTLB
786 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
787 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
788 Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
789 #else
790 Bit64u lpf = LPFOf(laddr);
791 #endif
792 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
793 if (tlbEntry->lpf == lpf) {
794 // See if the TLB entry privilege level allows us write access
795 // from this CPL.
796 if (! (tlbEntry->accessBits & (0x2 | user))) {
797 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
798 Bit32u pageOffset = PAGE_OFFSET(laddr);
799 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_WRITE);
800 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
801 tlbEntry->ppf | pageOffset, 2, curr_pl, BX_WRITE, (Bit8u*) &data);
802 Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
803 #if BX_SUPPORT_ICACHE
804 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
805 #endif
806 WriteHostWordToLittleEndian(hostAddr, data);
807 return;
810 #endif
812 if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
813 BX_ERROR(("write_new_stack_word_64(): canonical failure"));
814 exception(BX_SS_EXCEPTION, 0, 0);
817 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
818 if (BX_CPU_THIS_PTR alignment_check() && user) {
819 if (laddr & 1) {
820 BX_ERROR(("write_new_stack_word_64(): #AC misaligned access"));
821 exception(BX_AC_EXCEPTION, 0, 0);
824 #endif
826 access_write_linear(laddr, 2, curr_pl, (void *) &data);
829 void BX_CPU_C::write_new_stack_dword_64(Bit64u laddr, unsigned curr_pl, Bit32u data)
831 bx_bool user = (curr_pl == 3);
832 #if BX_SupportGuest2HostTLB
833 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
834 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
835 Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
836 #else
837 Bit64u lpf = LPFOf(laddr);
838 #endif
839 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
840 if (tlbEntry->lpf == lpf) {
841 // See if the TLB entry privilege level allows us write access
842 // from this CPL.
843 if (! (tlbEntry->accessBits & (0x2 | user))) {
844 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
845 Bit32u pageOffset = PAGE_OFFSET(laddr);
846 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE);
847 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
848 tlbEntry->ppf | pageOffset, 4, curr_pl, BX_WRITE, (Bit8u*) &data);
849 Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
850 #if BX_SUPPORT_ICACHE
851 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
852 #endif
853 WriteHostDWordToLittleEndian(hostAddr, data);
854 return;
857 #endif
859 if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
860 BX_ERROR(("write_new_stack_dword_64(): canonical failure"));
861 exception(BX_SS_EXCEPTION, 0, 0);
864 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
865 if (BX_CPU_THIS_PTR alignment_check() && user) {
866 if (laddr & 3) {
867 BX_ERROR(("write_new_stack_dword_64(): #AC misaligned access"));
868 exception(BX_AC_EXCEPTION, 0, 0);
871 #endif
873 access_write_linear(laddr, 4, curr_pl, (void *) &data);
876 void BX_CPU_C::write_new_stack_qword_64(Bit64u laddr, unsigned curr_pl, Bit64u data)
878 bx_bool user = (curr_pl == 3);
879 #if BX_SupportGuest2HostTLB
880 unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
881 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
882 Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
883 #else
884 Bit64u lpf = LPFOf(laddr);
885 #endif
886 bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
887 if (tlbEntry->lpf == lpf) {
888 // See if the TLB entry privilege level allows us write access
889 // from this CPL.
890 if (! (tlbEntry->accessBits & (0x2 | user))) {
891 bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
892 Bit32u pageOffset = PAGE_OFFSET(laddr);
893 BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE);
894 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
895 tlbEntry->ppf | pageOffset, 8, curr_pl, BX_WRITE, (Bit8u*) &data);
896 Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
897 #if BX_SUPPORT_ICACHE
898 pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
899 #endif
900 WriteHostQWordToLittleEndian(hostAddr, data);
901 return;
904 #endif
906 if (! IsCanonical(laddr) || ! IsCanonical(laddr+7)) {
907 BX_ERROR(("write_new_stack_qword_64(): canonical failure"));
908 exception(BX_SS_EXCEPTION, 0, 0);
911 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
912 if (BX_CPU_THIS_PTR alignment_check() && user) {
913 if (laddr & 7) {
914 BX_ERROR(("write_new_stack_qword_64(): #AC misaligned access"));
915 exception(BX_AC_EXCEPTION, 0, 0);
918 #endif
920 access_write_linear(laddr, 8, curr_pl, (void *) &data);
923 #endif