1 /////////////////////////////////////////////////////////////////////////
2 // $Id: access64.cc,v 1.20 2008/09/18 17:37:28 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (c) 2008 Stanislav Shwartsman
6 // Written by Stanislav Shwartsman [sshwarts at sourceforge net]
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2 of the License, or (at your option) any later version.
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 /////////////////////////////////////////////////////////////////////////
24 #define NEED_CPU_REG_SHORTCUTS 1
27 #define LOG_THIS BX_CPU_THIS_PTR
31 void BX_CPP_AttrRegparmN(3)
32 BX_CPU_C::write_virtual_byte_64(unsigned s
, Bit64u offset
, Bit8u data
)
34 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
36 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 1, BX_WRITE
);
38 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
39 #if BX_SupportGuest2HostTLB
40 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 0);
41 Bit64u lpf
= LPFOf(laddr
);
42 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
43 if (tlbEntry
->lpf
== lpf
) {
44 // See if the TLB entry privilege level allows us write access
46 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
47 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
48 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
49 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 1, BX_WRITE
);
50 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
51 tlbEntry
->ppf
| pageOffset
, 1, CPL
, BX_WRITE
, (Bit8u
*) &data
);
52 Bit8u
*hostAddr
= (Bit8u
*) (hostPageAddr
| pageOffset
);
54 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
62 if (! IsCanonical(laddr
)) {
63 BX_ERROR(("write_virtual_byte_64(): canonical failure"));
64 exception(int_number(s
), 0, 0);
67 access_write_linear(laddr
, 1, CPL
, (void *) &data
);
70 void BX_CPP_AttrRegparmN(3)
71 BX_CPU_C::write_virtual_word_64(unsigned s
, Bit64u offset
, Bit16u data
)
73 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
75 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 2, BX_WRITE
);
77 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
78 #if BX_SupportGuest2HostTLB
79 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 1);
80 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
81 Bit64u lpf
= AlignedAccessLPFOf(laddr
, (1 & BX_CPU_THIS_PTR alignment_check_mask
));
83 Bit64u lpf
= LPFOf(laddr
);
85 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
86 if (tlbEntry
->lpf
== lpf
) {
87 // See if the TLB entry privilege level allows us write access
89 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
90 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
91 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
92 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 2, BX_WRITE
);
93 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
94 tlbEntry
->ppf
| pageOffset
, 2, CPL
, BX_WRITE
, (Bit8u
*) &data
);
95 Bit16u
*hostAddr
= (Bit16u
*) (hostPageAddr
| pageOffset
);
97 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
99 WriteHostWordToLittleEndian(hostAddr
, data
);
105 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+1)) {
106 BX_ERROR(("write_virtual_word_64(): canonical failure"));
107 exception(int_number(s
), 0, 0);
110 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
111 if (BX_CPU_THIS_PTR
alignment_check()) {
113 BX_ERROR(("write_virtual_word_64(): #AC misaligned access"));
114 exception(BX_AC_EXCEPTION
, 0, 0);
119 access_write_linear(laddr
, 2, CPL
, (void *) &data
);
122 void BX_CPP_AttrRegparmN(3)
123 BX_CPU_C::write_virtual_dword_64(unsigned s
, Bit64u offset
, Bit32u data
)
125 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
127 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 4, BX_WRITE
);
129 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
130 #if BX_SupportGuest2HostTLB
131 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 3);
132 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
133 Bit64u lpf
= AlignedAccessLPFOf(laddr
, (3 & BX_CPU_THIS_PTR alignment_check_mask
));
135 Bit64u lpf
= LPFOf(laddr
);
137 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
138 if (tlbEntry
->lpf
== lpf
) {
139 // See if the TLB entry privilege level allows us write access
141 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
142 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
143 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
144 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 4, BX_WRITE
);
145 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
146 tlbEntry
->ppf
| pageOffset
, 4, CPL
, BX_WRITE
, (Bit8u
*) &data
);
147 Bit32u
*hostAddr
= (Bit32u
*) (hostPageAddr
| pageOffset
);
148 #if BX_SUPPORT_ICACHE
149 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
151 WriteHostDWordToLittleEndian(hostAddr
, data
);
157 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+3)) {
158 BX_ERROR(("write_virtual_dword_64(): canonical failure"));
159 exception(int_number(s
), 0, 0);
162 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
163 if (BX_CPU_THIS_PTR
alignment_check()) {
165 BX_ERROR(("write_virtual_dword_64(): #AC misaligned access"));
166 exception(BX_AC_EXCEPTION
, 0, 0);
171 access_write_linear(laddr
, 4, CPL
, (void *) &data
);
174 void BX_CPP_AttrRegparmN(3)
175 BX_CPU_C::write_virtual_qword_64(unsigned s
, Bit64u offset
, Bit64u data
)
177 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
179 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 8, BX_WRITE
);
181 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
182 #if BX_SupportGuest2HostTLB
183 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 7);
184 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
185 Bit64u lpf
= AlignedAccessLPFOf(laddr
, (7 & BX_CPU_THIS_PTR alignment_check_mask
));
187 Bit64u lpf
= LPFOf(laddr
);
189 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
190 if (tlbEntry
->lpf
== lpf
) {
191 // See if the TLB entry privilege level allows us write access
193 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
194 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
195 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
196 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 8, BX_WRITE
);
197 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
198 tlbEntry
->ppf
| pageOffset
, 8, CPL
, BX_WRITE
, (Bit8u
*) &data
);
199 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
200 #if BX_SUPPORT_ICACHE
201 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
203 WriteHostQWordToLittleEndian(hostAddr
, data
);
209 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+7)) {
210 BX_ERROR(("write_virtual_qword_64(): canonical failure"));
211 exception(int_number(s
), 0, 0);
214 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
215 if (BX_CPU_THIS_PTR
alignment_check()) {
217 BX_ERROR(("write_virtual_qword_64(): #AC misaligned access"));
218 exception(BX_AC_EXCEPTION
, 0, 0);
223 access_write_linear(laddr
, 8, CPL
, (void *) &data
);
226 void BX_CPP_AttrRegparmN(3)
227 BX_CPU_C::write_virtual_dqword_64(unsigned s
, Bit64u offset
, const BxPackedXmmRegister
*data
)
229 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
231 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 16, BX_WRITE
);
233 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
234 #if BX_SupportGuest2HostTLB
235 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 15);
236 Bit64u lpf
= LPFOf(laddr
);
237 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
238 if (tlbEntry
->lpf
== lpf
) {
239 // See if the TLB entry privilege level allows us write access
241 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
242 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
243 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
244 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 16, BX_WRITE
);
245 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
246 tlbEntry
->ppf
| pageOffset
, 16, CPL
, BX_WRITE
, (Bit8u
*) data
);
247 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
248 #if BX_SUPPORT_ICACHE
249 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
251 WriteHostQWordToLittleEndian(hostAddr
, data
->xmm64u(0));
252 WriteHostQWordToLittleEndian(hostAddr
+1, data
->xmm64u(1));
258 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+15)) {
259 BX_ERROR(("write_virtual_dqword_64(): canonical failure"));
260 exception(int_number(s
), 0, 0);
263 access_write_linear(laddr
, 16, CPL
, (void *) data
);
266 void BX_CPP_AttrRegparmN(3)
267 BX_CPU_C::write_virtual_dqword_aligned_64(unsigned s
, Bit64u offset
, const BxPackedXmmRegister
*data
)
269 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
271 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 16, BX_WRITE
);
273 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
274 #if BX_SupportGuest2HostTLB
275 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 15);
276 Bit64u lpf
= AlignedAccessLPFOf(laddr
, 15);
277 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
278 if (tlbEntry
->lpf
== lpf
) {
279 // See if the TLB entry privilege level allows us write access
281 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
282 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
283 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
284 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 16, BX_WRITE
);
285 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
286 tlbEntry
->ppf
| pageOffset
, 16, CPL
, BX_WRITE
, (Bit8u
*) data
);
287 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
288 #if BX_SUPPORT_ICACHE
289 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
291 WriteHostQWordToLittleEndian(hostAddr
, data
->xmm64u(0));
292 WriteHostQWordToLittleEndian(hostAddr
+1, data
->xmm64u(1));
299 BX_ERROR(("write_virtual_dqword_aligned_64(): #GP misaligned access"));
300 exception(BX_GP_EXCEPTION
, 0, 0);
303 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+15)) {
304 BX_ERROR(("write_virtual_dqword_aligned_64(): canonical failure"));
305 exception(int_number(s
), 0, 0);
308 access_write_linear(laddr
, 16, CPL
, (void *) data
);
311 Bit8u
BX_CPP_AttrRegparmN(2)
312 BX_CPU_C::read_virtual_byte_64(unsigned s
, Bit64u offset
)
314 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
316 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 1, BX_READ
);
318 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
319 #if BX_SupportGuest2HostTLB
320 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 0);
321 Bit64u lpf
= LPFOf(laddr
);
322 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
323 if (tlbEntry
->lpf
== lpf
) {
324 // See if the TLB entry privilege level allows us read access
326 if (! (tlbEntry
->accessBits
& USER_PL
)) { // Read this pl OK.
327 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
328 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
329 Bit8u
*hostAddr
= (Bit8u
*) (hostPageAddr
| pageOffset
);
331 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 1, BX_READ
);
332 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
333 tlbEntry
->ppf
| pageOffset
, 1, CPL
, BX_READ
, (Bit8u
*) &data
);
339 if (! IsCanonical(laddr
)) {
340 BX_ERROR(("read_virtual_byte_64(): canonical failure"));
341 exception(int_number(s
), 0, 0);
344 access_read_linear(laddr
, 1, CPL
, BX_READ
, (void *) &data
);
348 Bit16u
BX_CPP_AttrRegparmN(2)
349 BX_CPU_C::read_virtual_word_64(unsigned s
, Bit64u offset
)
351 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
353 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 2, BX_READ
);
355 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
356 #if BX_SupportGuest2HostTLB
357 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 1);
358 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
359 Bit64u lpf
= AlignedAccessLPFOf(laddr
, (1 & BX_CPU_THIS_PTR alignment_check_mask
));
361 Bit64u lpf
= LPFOf(laddr
);
363 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
364 if (tlbEntry
->lpf
== lpf
) {
365 // See if the TLB entry privilege level allows us read access
367 if (! (tlbEntry
->accessBits
& USER_PL
)) { // Read this pl OK.
368 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
369 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
370 Bit16u
*hostAddr
= (Bit16u
*) (hostPageAddr
| pageOffset
);
371 ReadHostWordFromLittleEndian(hostAddr
, data
);
372 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 2, BX_READ
);
373 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
374 tlbEntry
->ppf
| pageOffset
, 2, CPL
, BX_READ
, (Bit8u
*) &data
);
380 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+1)) {
381 BX_ERROR(("read_virtual_word_64(): canonical failure"));
382 exception(int_number(s
), 0, 0);
385 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
386 if (BX_CPU_THIS_PTR
alignment_check()) {
388 BX_ERROR(("read_virtual_word_64(): #AC misaligned access"));
389 exception(BX_AC_EXCEPTION
, 0, 0);
394 access_read_linear(laddr
, 2, CPL
, BX_READ
, (void *) &data
);
398 Bit32u
BX_CPP_AttrRegparmN(2)
399 BX_CPU_C::read_virtual_dword_64(unsigned s
, Bit64u offset
)
401 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
403 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 4, BX_READ
);
405 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
406 #if BX_SupportGuest2HostTLB
407 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 3);
408 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
409 Bit64u lpf
= AlignedAccessLPFOf(laddr
, (3 & BX_CPU_THIS_PTR alignment_check_mask
));
411 Bit64u lpf
= LPFOf(laddr
);
413 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
414 if (tlbEntry
->lpf
== lpf
) {
415 // See if the TLB entry privilege level allows us read access
417 if (! (tlbEntry
->accessBits
& USER_PL
)) { // Read this pl OK.
418 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
419 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
420 Bit32u
*hostAddr
= (Bit32u
*) (hostPageAddr
| pageOffset
);
421 ReadHostDWordFromLittleEndian(hostAddr
, data
);
422 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 4, BX_READ
);
423 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
424 tlbEntry
->ppf
| pageOffset
, 4, CPL
, BX_READ
, (Bit8u
*) &data
);
430 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+3)) {
431 BX_ERROR(("read_virtual_dword_64(): canonical failure"));
432 exception(int_number(s
), 0, 0);
435 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
436 if (BX_CPU_THIS_PTR
alignment_check()) {
438 BX_ERROR(("read_virtual_dword_64(): #AC misaligned access"));
439 exception(BX_AC_EXCEPTION
, 0, 0);
444 access_read_linear(laddr
, 4, CPL
, BX_READ
, (void *) &data
);
448 Bit64u
BX_CPP_AttrRegparmN(2)
449 BX_CPU_C::read_virtual_qword_64(unsigned s
, Bit64u offset
)
451 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
453 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 8, BX_READ
);
455 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
456 #if BX_SupportGuest2HostTLB
457 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 7);
458 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
459 Bit64u lpf
= AlignedAccessLPFOf(laddr
, (7 & BX_CPU_THIS_PTR alignment_check_mask
));
461 Bit64u lpf
= LPFOf(laddr
);
463 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
464 if (tlbEntry
->lpf
== lpf
) {
465 // See if the TLB entry privilege level allows us read access
467 if (! (tlbEntry
->accessBits
& USER_PL
)) { // Read this pl OK.
468 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
469 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
470 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
471 ReadHostQWordFromLittleEndian(hostAddr
, data
);
472 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 8, BX_READ
);
473 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
474 tlbEntry
->ppf
| pageOffset
, 8, CPL
, BX_READ
, (Bit8u
*) &data
);
480 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+7)) {
481 BX_ERROR(("read_virtual_qword_64(): canonical failure"));
482 exception(int_number(s
), 0, 0);
485 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
486 if (BX_CPU_THIS_PTR
alignment_check()) {
488 BX_ERROR(("read_virtual_qword_64(): #AC misaligned access"));
489 exception(BX_AC_EXCEPTION
, 0, 0);
493 access_read_linear(laddr
, 8, CPL
, BX_READ
, (void *) &data
);
497 void BX_CPP_AttrRegparmN(3)
498 BX_CPU_C::read_virtual_dqword_64(unsigned s
, Bit64u offset
, BxPackedXmmRegister
*data
)
500 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
501 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 16, BX_READ
);
503 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
504 #if BX_SupportGuest2HostTLB
505 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 15);
506 Bit64u lpf
= LPFOf(laddr
);
507 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
508 if (tlbEntry
->lpf
== lpf
) {
509 // See if the TLB entry privilege level allows us read access
511 if (! (tlbEntry
->accessBits
& USER_PL
)) { // Read this pl OK.
512 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
513 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
514 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
515 ReadHostQWordFromLittleEndian(hostAddr
, data
->xmm64u(0));
516 ReadHostQWordFromLittleEndian(hostAddr
+1, data
->xmm64u(1));
517 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 16, BX_READ
);
518 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
519 tlbEntry
->ppf
| pageOffset
, 16, CPL
, BX_READ
, (Bit8u
*) data
);
525 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+15)) {
526 BX_ERROR(("read_virtual_dqword_64(): canonical failure"));
527 exception(int_number(s
), 0, 0);
530 access_read_linear(laddr
, 16, CPL
, BX_READ
, (void *) data
);
533 void BX_CPP_AttrRegparmN(3)
534 BX_CPU_C::read_virtual_dqword_aligned_64(unsigned s
, Bit64u offset
, BxPackedXmmRegister
*data
)
536 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
537 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 16, BX_READ
);
539 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
540 #if BX_SupportGuest2HostTLB
541 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 15);
542 Bit64u lpf
= AlignedAccessLPFOf(laddr
, 15);
543 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
544 if (tlbEntry
->lpf
== lpf
) {
545 // See if the TLB entry privilege level allows us read access
547 if (! (tlbEntry
->accessBits
& USER_PL
)) { // Read this pl OK.
548 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
549 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
550 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
551 ReadHostQWordFromLittleEndian(hostAddr
, data
->xmm64u(0));
552 ReadHostQWordFromLittleEndian(hostAddr
+1, data
->xmm64u(1));
553 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 16, BX_READ
);
554 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
555 tlbEntry
->ppf
| pageOffset
, 16, CPL
, BX_READ
, (Bit8u
*) data
);
562 BX_ERROR(("read_virtual_dqword_aligned_64(): #GP misaligned access"));
563 exception(BX_GP_EXCEPTION
, 0, 0);
566 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+15)) {
567 BX_ERROR(("read_virtual_dqword_aligned_64(): canonical failure"));
568 exception(int_number(s
), 0, 0);
571 access_read_linear(laddr
, 16, CPL
, BX_READ
, (void *) data
);
574 //////////////////////////////////////////////////////////////
575 // special Read-Modify-Write operations //
576 // address translation info is kept across read/write calls //
577 //////////////////////////////////////////////////////////////
579 Bit8u
BX_CPP_AttrRegparmN(2)
580 BX_CPU_C::read_RMW_virtual_byte_64(unsigned s
, Bit64u offset
)
582 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
584 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 1, BX_RW
);
586 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
587 #if BX_SupportGuest2HostTLB
588 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 0);
589 Bit64u lpf
= LPFOf(laddr
);
590 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
591 if (tlbEntry
->lpf
== lpf
) {
592 // See if the TLB entry privilege level allows us write access
594 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
595 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
596 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
597 Bit8u
*hostAddr
= (Bit8u
*) (hostPageAddr
| pageOffset
);
598 #if BX_SUPPORT_ICACHE
599 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
602 BX_CPU_THIS_PTR address_xlation
.pages
= (bx_ptr_equiv_t
) hostAddr
;
603 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 1, BX_RW
);
604 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
605 tlbEntry
->ppf
| pageOffset
, 1, CPL
, BX_READ
, (Bit8u
*) &data
);
611 if (! IsCanonical(laddr
)) {
612 BX_ERROR(("read_RMW_virtual_byte_64(): canonical failure"));
613 exception(int_number(s
), 0, 0);
616 access_read_linear(laddr
, 1, CPL
, BX_RW
, (void *) &data
);
620 Bit16u
BX_CPP_AttrRegparmN(2)
621 BX_CPU_C::read_RMW_virtual_word_64(unsigned s
, Bit64u offset
)
623 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
625 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 2, BX_RW
);
627 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
628 #if BX_SupportGuest2HostTLB
629 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 1);
630 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
631 Bit64u lpf
= AlignedAccessLPFOf(laddr
, (1 & BX_CPU_THIS_PTR alignment_check_mask
));
633 Bit64u lpf
= LPFOf(laddr
);
635 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
636 if (tlbEntry
->lpf
== lpf
) {
637 // See if the TLB entry privilege level allows us write access
639 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
640 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
641 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
642 Bit16u
*hostAddr
= (Bit16u
*) (hostPageAddr
| pageOffset
);
643 #if BX_SUPPORT_ICACHE
644 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
646 ReadHostWordFromLittleEndian(hostAddr
, data
);
647 BX_CPU_THIS_PTR address_xlation
.pages
= (bx_ptr_equiv_t
) hostAddr
;
648 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 2, BX_RW
);
649 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
650 tlbEntry
->ppf
| pageOffset
, 2, CPL
, BX_READ
, (Bit8u
*) &data
);
656 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+1)) {
657 BX_ERROR(("read_RMW_virtual_word_64(): canonical failure"));
658 exception(int_number(s
), 0, 0);
661 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
662 if (BX_CPU_THIS_PTR
alignment_check()) {
664 BX_ERROR(("read_RMW_virtual_word_64(): #AC misaligned access"));
665 exception(BX_AC_EXCEPTION
, 0, 0);
670 access_read_linear(laddr
, 2, CPL
, BX_RW
, (void *) &data
);
674 Bit32u
BX_CPP_AttrRegparmN(2)
675 BX_CPU_C::read_RMW_virtual_dword_64(unsigned s
, Bit64u offset
)
677 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
679 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 4, BX_RW
);
681 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
682 #if BX_SupportGuest2HostTLB
683 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 3);
684 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
685 Bit64u lpf
= AlignedAccessLPFOf(laddr
, (3 & BX_CPU_THIS_PTR alignment_check_mask
));
687 Bit64u lpf
= LPFOf(laddr
);
689 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
690 if (tlbEntry
->lpf
== lpf
) {
691 // See if the TLB entry privilege level allows us write access
693 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
694 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
695 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
696 Bit32u
*hostAddr
= (Bit32u
*) (hostPageAddr
| pageOffset
);
697 #if BX_SUPPORT_ICACHE
698 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
700 ReadHostDWordFromLittleEndian(hostAddr
, data
);
701 BX_CPU_THIS_PTR address_xlation
.pages
= (bx_ptr_equiv_t
) hostAddr
;
702 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 4, BX_RW
);
703 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
704 tlbEntry
->ppf
| pageOffset
, 4, CPL
, BX_READ
, (Bit8u
*) &data
);
710 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+3)) {
711 BX_ERROR(("read_RMW_virtual_dword_64(): canonical failure"));
712 exception(int_number(s
), 0, 0);
715 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
716 if (BX_CPU_THIS_PTR
alignment_check()) {
718 BX_ERROR(("read_RMW_virtual_dword_64(): #AC misaligned access"));
719 exception(BX_AC_EXCEPTION
, 0, 0);
724 access_read_linear(laddr
, 4, CPL
, BX_RW
, (void *) &data
);
728 Bit64u
BX_CPP_AttrRegparmN(2)
729 BX_CPU_C::read_RMW_virtual_qword_64(unsigned s
, Bit64u offset
)
731 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
== BX_MODE_LONG_64
);
733 BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID
, s
, offset
, 8, BX_RW
);
735 Bit64u laddr
= BX_CPU_THIS_PTR
get_laddr64(s
, offset
);
736 #if BX_SupportGuest2HostTLB
737 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 7);
738 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
739 Bit64u lpf
= AlignedAccessLPFOf(laddr
, (7 & BX_CPU_THIS_PTR alignment_check_mask
));
741 Bit64u lpf
= LPFOf(laddr
);
743 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
744 if (tlbEntry
->lpf
== lpf
) {
745 // See if the TLB entry privilege level allows us write access
747 if (! (tlbEntry
->accessBits
& (0x2 | USER_PL
))) {
748 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
749 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
750 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
751 #if BX_SUPPORT_ICACHE
752 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
754 ReadHostQWordFromLittleEndian(hostAddr
, data
);
755 BX_CPU_THIS_PTR address_xlation
.pages
= (bx_ptr_equiv_t
) hostAddr
;
756 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 8, BX_RW
);
757 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
758 tlbEntry
->ppf
| pageOffset
, 8, CPL
, BX_READ
, (Bit8u
*) &data
);
764 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+7)) {
765 BX_ERROR(("read_RMW_virtual_qword_64(): canonical failure"));
766 exception(int_number(s
), 0, 0);
769 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
770 if (BX_CPU_THIS_PTR
alignment_check()) {
772 BX_ERROR(("read_RMW_virtual_qword_64(): #AC misaligned access"));
773 exception(BX_AC_EXCEPTION
, 0, 0);
778 access_read_linear(laddr
, 8, CPL
, BX_RW
, (void *) &data
);
782 void BX_CPU_C::write_new_stack_word_64(Bit64u laddr
, unsigned curr_pl
, Bit16u data
)
784 bx_bool user
= (curr_pl
== 3);
785 #if BX_SupportGuest2HostTLB
786 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 1);
787 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
788 Bit64u lpf
= AlignedAccessLPFOf(laddr
, (1 & BX_CPU_THIS_PTR alignment_check_mask
));
790 Bit64u lpf
= LPFOf(laddr
);
792 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
793 if (tlbEntry
->lpf
== lpf
) {
794 // See if the TLB entry privilege level allows us write access
796 if (! (tlbEntry
->accessBits
& (0x2 | user
))) {
797 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
798 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
799 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 2, BX_WRITE
);
800 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
801 tlbEntry
->ppf
| pageOffset
, 2, curr_pl
, BX_WRITE
, (Bit8u
*) &data
);
802 Bit16u
*hostAddr
= (Bit16u
*) (hostPageAddr
| pageOffset
);
803 #if BX_SUPPORT_ICACHE
804 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
806 WriteHostWordToLittleEndian(hostAddr
, data
);
812 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+1)) {
813 BX_ERROR(("write_new_stack_word_64(): canonical failure"));
814 exception(BX_SS_EXCEPTION
, 0, 0);
817 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
818 if (BX_CPU_THIS_PTR
alignment_check() && user
) {
820 BX_ERROR(("write_new_stack_word_64(): #AC misaligned access"));
821 exception(BX_AC_EXCEPTION
, 0, 0);
826 access_write_linear(laddr
, 2, curr_pl
, (void *) &data
);
829 void BX_CPU_C::write_new_stack_dword_64(Bit64u laddr
, unsigned curr_pl
, Bit32u data
)
831 bx_bool user
= (curr_pl
== 3);
832 #if BX_SupportGuest2HostTLB
833 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 3);
834 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
835 Bit64u lpf
= AlignedAccessLPFOf(laddr
, (3 & BX_CPU_THIS_PTR alignment_check_mask
));
837 Bit64u lpf
= LPFOf(laddr
);
839 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
840 if (tlbEntry
->lpf
== lpf
) {
841 // See if the TLB entry privilege level allows us write access
843 if (! (tlbEntry
->accessBits
& (0x2 | user
))) {
844 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
845 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
846 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 4, BX_WRITE
);
847 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
848 tlbEntry
->ppf
| pageOffset
, 4, curr_pl
, BX_WRITE
, (Bit8u
*) &data
);
849 Bit32u
*hostAddr
= (Bit32u
*) (hostPageAddr
| pageOffset
);
850 #if BX_SUPPORT_ICACHE
851 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
853 WriteHostDWordToLittleEndian(hostAddr
, data
);
859 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+3)) {
860 BX_ERROR(("write_new_stack_dword_64(): canonical failure"));
861 exception(BX_SS_EXCEPTION
, 0, 0);
864 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
865 if (BX_CPU_THIS_PTR
alignment_check() && user
) {
867 BX_ERROR(("write_new_stack_dword_64(): #AC misaligned access"));
868 exception(BX_AC_EXCEPTION
, 0, 0);
873 access_write_linear(laddr
, 4, curr_pl
, (void *) &data
);
876 void BX_CPU_C::write_new_stack_qword_64(Bit64u laddr
, unsigned curr_pl
, Bit64u data
)
878 bx_bool user
= (curr_pl
== 3);
879 #if BX_SupportGuest2HostTLB
880 unsigned tlbIndex
= BX_TLB_INDEX_OF(laddr
, 7);
881 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
882 Bit64u lpf
= AlignedAccessLPFOf(laddr
, (7 & BX_CPU_THIS_PTR alignment_check_mask
));
884 Bit64u lpf
= LPFOf(laddr
);
886 bx_TLB_entry
*tlbEntry
= &BX_CPU_THIS_PTR TLB
.entry
[tlbIndex
];
887 if (tlbEntry
->lpf
== lpf
) {
888 // See if the TLB entry privilege level allows us write access
890 if (! (tlbEntry
->accessBits
& (0x2 | user
))) {
891 bx_hostpageaddr_t hostPageAddr
= tlbEntry
->hostPageAddr
;
892 Bit32u pageOffset
= PAGE_OFFSET(laddr
);
893 BX_INSTR_LIN_ACCESS(BX_CPU_ID
, laddr
, tlbEntry
->ppf
| pageOffset
, 8, BX_WRITE
);
894 BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID
, laddr
,
895 tlbEntry
->ppf
| pageOffset
, 8, curr_pl
, BX_WRITE
, (Bit8u
*) &data
);
896 Bit64u
*hostAddr
= (Bit64u
*) (hostPageAddr
| pageOffset
);
897 #if BX_SUPPORT_ICACHE
898 pageWriteStampTable
.decWriteStamp(tlbEntry
->ppf
);
900 WriteHostQWordToLittleEndian(hostAddr
, data
);
906 if (! IsCanonical(laddr
) || ! IsCanonical(laddr
+7)) {
907 BX_ERROR(("write_new_stack_qword_64(): canonical failure"));
908 exception(BX_SS_EXCEPTION
, 0, 0);
911 #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
912 if (BX_CPU_THIS_PTR
alignment_check() && user
) {
914 BX_ERROR(("write_new_stack_qword_64(): #AC misaligned access"));
915 exception(BX_AC_EXCEPTION
, 0, 0);
920 access_write_linear(laddr
, 8, curr_pl
, (void *) &data
);