1 /////////////////////////////////////////////////////////////////////////
2 // $Id: string.cc,v 1.69 2008/12/11 21:19:38 sshwarts Exp $
3 /////////////////////////////////////////////////////////////////////////
5 // Copyright (C) 2001 MandrakeSoft S.A.
9 // 75002 Paris - France
10 // http://www.linux-mandrake.com/
11 // http://www.mandrakesoft.com/
13 // This library is free software; you can redistribute it and/or
14 // modify it under the terms of the GNU Lesser General Public
15 // License as published by the Free Software Foundation; either
16 // version 2 of the License, or (at your option) any later version.
18 // This library is distributed in the hope that it will be useful,
19 // but WITHOUT ANY WARRANTY; without even the implied warranty of
20 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 // Lesser General Public License for more details.
23 // You should have received a copy of the GNU Lesser General Public
24 // License along with this library; if not, write to the Free Software
25 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 /////////////////////////////////////////////////////////////////////////
28 #define NEED_CPU_REG_SHORTCUTS 1
31 #define LOG_THIS BX_CPU_THIS_PTR
33 #if BX_SUPPORT_X86_64==0
41 // Repeat Speedups methods
44 #if BX_SupportRepeatSpeedups
45 Bit32u
BX_CPU_C::FastRepMOVSB(bxInstruction_c
*i
, unsigned srcSeg
, bx_address srcOff
, unsigned dstSeg
, bx_address dstOff
, Bit32u count
)
47 Bit32u bytesFitSrc
, bytesFitDst
;
48 signed int pointerDelta
;
49 bx_address laddrDst
, laddrSrc
;
50 Bit8u
*hostAddrSrc
, *hostAddrDst
;
52 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
54 bx_segment_reg_t
*srcSegPtr
= &BX_CPU_THIS_PTR sregs
[srcSeg
];
55 if (!(srcSegPtr
->cache
.valid
& SegAccessROK
))
57 if ((srcOff
| 0xfff) > srcSegPtr
->cache
.u
.segment
.limit_scaled
)
60 bx_segment_reg_t
*dstSegPtr
= &BX_CPU_THIS_PTR sregs
[dstSeg
];
61 if (!(dstSegPtr
->cache
.valid
& SegAccessWOK
))
63 if ((dstOff
| 0xfff) > dstSegPtr
->cache
.u
.segment
.limit_scaled
)
66 laddrSrc
= BX_CPU_THIS_PTR
get_laddr(srcSeg
, srcOff
);
68 hostAddrSrc
= v2h_read_byte(laddrSrc
, BX_CPU_THIS_PTR user_pl
);
69 if (! hostAddrSrc
) return 0;
71 laddrDst
= BX_CPU_THIS_PTR
get_laddr(dstSeg
, dstOff
);
73 hostAddrDst
= v2h_write_byte(laddrDst
, BX_CPU_THIS_PTR user_pl
);
74 // Check that native host access was not vetoed for that page
75 if (!hostAddrDst
) return 0;
77 // See how many bytes can fit in the rest of this page.
78 if (BX_CPU_THIS_PTR
get_DF()) {
80 bytesFitSrc
= 1 + PAGE_OFFSET(laddrSrc
);
81 bytesFitDst
= 1 + PAGE_OFFSET(laddrDst
);
82 pointerDelta
= (signed int) -1;
86 bytesFitSrc
= 0x1000 - PAGE_OFFSET(laddrSrc
);
87 bytesFitDst
= 0x1000 - PAGE_OFFSET(laddrDst
);
88 pointerDelta
= (signed int) 1;
91 // Restrict word count to the number that will fit in either
92 // source or dest pages.
93 if (count
> bytesFitSrc
)
95 if (count
> bytesFitDst
)
97 if (count
> bx_pc_system
.getNumCpuTicksLeftNextEvent())
98 count
= bx_pc_system
.getNumCpuTicksLeftNextEvent();
100 // If after all the restrictions, there is anything left to do...
102 // Transfer data directly using host addresses
103 for (unsigned j
=0; j
<count
; j
++) {
104 * (Bit8u
*) hostAddrDst
= * (Bit8u
*) hostAddrSrc
;
105 hostAddrDst
+= pointerDelta
;
106 hostAddrSrc
+= pointerDelta
;
115 Bit32u
BX_CPU_C::FastRepMOVSW(bxInstruction_c
*i
, unsigned srcSeg
, bx_address srcOff
, unsigned dstSeg
, bx_address dstOff
, Bit32u count
)
117 Bit32u wordsFitSrc
, wordsFitDst
;
118 signed int pointerDelta
;
119 bx_address laddrDst
, laddrSrc
;
120 Bit8u
*hostAddrSrc
, *hostAddrDst
;
122 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
124 bx_segment_reg_t
*srcSegPtr
= &BX_CPU_THIS_PTR sregs
[srcSeg
];
125 if (!(srcSegPtr
->cache
.valid
& SegAccessROK
))
127 if ((srcOff
| 0xfff) > srcSegPtr
->cache
.u
.segment
.limit_scaled
)
130 bx_segment_reg_t
*dstSegPtr
= &BX_CPU_THIS_PTR sregs
[dstSeg
];
131 if (!(dstSegPtr
->cache
.valid
& SegAccessWOK
))
133 if ((dstOff
| 0xfff) > dstSegPtr
->cache
.u
.segment
.limit_scaled
)
136 laddrSrc
= BX_CPU_THIS_PTR
get_laddr(srcSeg
, srcOff
);
138 hostAddrSrc
= v2h_read_byte(laddrSrc
, BX_CPU_THIS_PTR user_pl
);
139 if (! hostAddrSrc
) return 0;
141 laddrDst
= BX_CPU_THIS_PTR
get_laddr(dstSeg
, dstOff
);
143 hostAddrDst
= v2h_write_byte(laddrDst
, BX_CPU_THIS_PTR user_pl
);
144 // Check that native host access was not vetoed for that page
145 if (!hostAddrDst
) return 0;
147 // See how many words can fit in the rest of this page.
148 if (BX_CPU_THIS_PTR
get_DF()) {
149 // Counting downward.
150 // Note: 1st word must not cross page boundary.
151 if (((laddrSrc
& 0xfff) > 0xffe) || ((laddrDst
& 0xfff) > 0xffe))
153 wordsFitSrc
= (2 + PAGE_OFFSET(laddrSrc
)) >> 1;
154 wordsFitDst
= (2 + PAGE_OFFSET(laddrDst
)) >> 1;
155 pointerDelta
= (signed int) -2;
159 wordsFitSrc
= (0x1000 - PAGE_OFFSET(laddrSrc
)) >> 1;
160 wordsFitDst
= (0x1000 - PAGE_OFFSET(laddrDst
)) >> 1;
161 pointerDelta
= (signed int) 2;
164 // Restrict word count to the number that will fit in either
165 // source or dest pages.
166 if (count
> wordsFitSrc
)
168 if (count
> wordsFitDst
)
170 if (count
> bx_pc_system
.getNumCpuTicksLeftNextEvent())
171 count
= bx_pc_system
.getNumCpuTicksLeftNextEvent();
173 // If after all the restrictions, there is anything left to do...
175 // Transfer data directly using host addresses
176 for (unsigned j
=0; j
<count
; j
++) {
177 CopyHostWordLittleEndian(hostAddrDst
, hostAddrSrc
);
178 hostAddrDst
+= pointerDelta
;
179 hostAddrSrc
+= pointerDelta
;
188 Bit32u
BX_CPU_C::FastRepMOVSD(bxInstruction_c
*i
, unsigned srcSeg
, bx_address srcOff
, unsigned dstSeg
, bx_address dstOff
, Bit32u count
)
190 Bit32u dwordsFitSrc
, dwordsFitDst
;
191 signed int pointerDelta
;
192 bx_address laddrDst
, laddrSrc
;
193 Bit8u
*hostAddrSrc
, *hostAddrDst
;
195 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
197 bx_segment_reg_t
*srcSegPtr
= &BX_CPU_THIS_PTR sregs
[srcSeg
];
198 if (!(srcSegPtr
->cache
.valid
& SegAccessROK
))
200 if ((srcOff
| 0xfff) > srcSegPtr
->cache
.u
.segment
.limit_scaled
)
203 bx_segment_reg_t
*dstSegPtr
= &BX_CPU_THIS_PTR sregs
[dstSeg
];
204 if (!(dstSegPtr
->cache
.valid
& SegAccessWOK
))
206 if ((dstOff
| 0xfff) > dstSegPtr
->cache
.u
.segment
.limit_scaled
)
209 laddrSrc
= BX_CPU_THIS_PTR
get_laddr(srcSeg
, srcOff
);
211 hostAddrSrc
= v2h_read_byte(laddrSrc
, BX_CPU_THIS_PTR user_pl
);
212 if (! hostAddrSrc
) return 0;
214 laddrDst
= BX_CPU_THIS_PTR
get_laddr(dstSeg
, dstOff
);
216 hostAddrDst
= v2h_write_byte(laddrDst
, BX_CPU_THIS_PTR user_pl
);
217 // Check that native host access was not vetoed for that page
218 if (!hostAddrDst
) return 0;
220 // See how many dwords can fit in the rest of this page.
221 if (BX_CPU_THIS_PTR
get_DF()) {
222 // Counting downward.
223 // Note: 1st dword must not cross page boundary.
224 if (((laddrSrc
& 0xfff) > 0xffc) || ((laddrDst
& 0xfff) > 0xffc))
226 dwordsFitSrc
= (4 + PAGE_OFFSET(laddrSrc
)) >> 2;
227 dwordsFitDst
= (4 + PAGE_OFFSET(laddrDst
)) >> 2;
228 pointerDelta
= (signed int) -4;
232 dwordsFitSrc
= (0x1000 - PAGE_OFFSET(laddrSrc
)) >> 2;
233 dwordsFitDst
= (0x1000 - PAGE_OFFSET(laddrDst
)) >> 2;
234 pointerDelta
= (signed int) 4;
237 // Restrict dword count to the number that will fit in either
238 // source or dest pages.
239 if (count
> dwordsFitSrc
)
240 count
= dwordsFitSrc
;
241 if (count
> dwordsFitDst
)
242 count
= dwordsFitDst
;
243 if (count
> bx_pc_system
.getNumCpuTicksLeftNextEvent())
244 count
= bx_pc_system
.getNumCpuTicksLeftNextEvent();
246 // If after all the restrictions, there is anything left to do...
248 // Transfer data directly using host addresses
249 for (unsigned j
=0; j
<count
; j
++) {
250 CopyHostDWordLittleEndian(hostAddrDst
, hostAddrSrc
);
251 hostAddrDst
+= pointerDelta
;
252 hostAddrSrc
+= pointerDelta
;
261 Bit32u
BX_CPU_C::FastRepSTOSB(bxInstruction_c
*i
, unsigned dstSeg
, bx_address dstOff
, Bit8u val
, Bit32u count
)
264 signed int pointerDelta
;
268 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
270 bx_segment_reg_t
*dstSegPtr
= &BX_CPU_THIS_PTR sregs
[dstSeg
];
271 if (!(dstSegPtr
->cache
.valid
& SegAccessWOK
))
273 if ((dstOff
| 0xfff) > dstSegPtr
->cache
.u
.segment
.limit_scaled
)
276 laddrDst
= BX_CPU_THIS_PTR
get_laddr(dstSeg
, dstOff
);
278 hostAddrDst
= v2h_write_byte(laddrDst
, BX_CPU_THIS_PTR user_pl
);
279 // Check that native host access was not vetoed for that page
280 if (!hostAddrDst
) return 0;
282 // See how many bytes can fit in the rest of this page.
283 if (BX_CPU_THIS_PTR
get_DF()) {
284 // Counting downward.
285 bytesFitDst
= 1 + PAGE_OFFSET(laddrDst
);
286 pointerDelta
= (signed int) -1;
290 bytesFitDst
= 0x1000 - PAGE_OFFSET(laddrDst
);
291 pointerDelta
= (signed int) 1;
294 // Restrict word count to the number that will fit in either
295 // source or dest pages.
296 if (count
> bytesFitDst
)
298 if (count
> bx_pc_system
.getNumCpuTicksLeftNextEvent())
299 count
= bx_pc_system
.getNumCpuTicksLeftNextEvent();
301 // If after all the restrictions, there is anything left to do...
303 // Transfer data directly using host addresses
304 for (unsigned j
=0; j
<count
; j
++) {
305 * (Bit8u
*) hostAddrDst
= val
;
306 hostAddrDst
+= pointerDelta
;
315 Bit32u
BX_CPU_C::FastRepSTOSW(bxInstruction_c
*i
, unsigned dstSeg
, bx_address dstOff
, Bit16u val
, Bit32u count
)
318 signed int pointerDelta
;
322 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
324 bx_segment_reg_t
*dstSegPtr
= &BX_CPU_THIS_PTR sregs
[dstSeg
];
325 if (!(dstSegPtr
->cache
.valid
& SegAccessWOK
))
327 if ((dstOff
| 0xfff) > dstSegPtr
->cache
.u
.segment
.limit_scaled
)
330 laddrDst
= BX_CPU_THIS_PTR
get_laddr(dstSeg
, dstOff
);
332 hostAddrDst
= v2h_write_byte(laddrDst
, BX_CPU_THIS_PTR user_pl
);
333 // Check that native host access was not vetoed for that page
334 if (!hostAddrDst
) return 0;
336 // See how many words can fit in the rest of this page.
337 if (BX_CPU_THIS_PTR
get_DF()) {
338 // Counting downward.
339 // Note: 1st word must not cross page boundary.
340 if ((laddrDst
& 0xfff) > 0xffe) return 0;
341 wordsFitDst
= (2 + PAGE_OFFSET(laddrDst
)) >> 1;
342 pointerDelta
= (signed int) -2;
346 wordsFitDst
= (0x1000 - PAGE_OFFSET(laddrDst
)) >> 1;
347 pointerDelta
= (signed int) 2;
350 // Restrict word count to the number that will fit in either
351 // source or dest pages.
352 if (count
> wordsFitDst
)
354 if (count
> bx_pc_system
.getNumCpuTicksLeftNextEvent())
355 count
= bx_pc_system
.getNumCpuTicksLeftNextEvent();
357 // If after all the restrictions, there is anything left to do...
359 // Transfer data directly using host addresses
360 for (unsigned j
=0; j
<count
; j
++) {
361 WriteHostWordToLittleEndian(hostAddrDst
, val
);
362 hostAddrDst
+= pointerDelta
;
371 Bit32u
BX_CPU_C::FastRepSTOSD(bxInstruction_c
*i
, unsigned dstSeg
, bx_address dstOff
, Bit32u val
, Bit32u count
)
374 signed int pointerDelta
;
378 BX_ASSERT(BX_CPU_THIS_PTR cpu_mode
!= BX_MODE_LONG_64
);
380 bx_segment_reg_t
*dstSegPtr
= &BX_CPU_THIS_PTR sregs
[dstSeg
];
381 if (!(dstSegPtr
->cache
.valid
& SegAccessWOK
))
383 if ((dstOff
| 0xfff) > dstSegPtr
->cache
.u
.segment
.limit_scaled
)
386 laddrDst
= BX_CPU_THIS_PTR
get_laddr(dstSeg
, dstOff
);
388 hostAddrDst
= v2h_write_byte(laddrDst
, BX_CPU_THIS_PTR user_pl
);
389 // Check that native host access was not vetoed for that page
390 if (!hostAddrDst
) return 0;
392 // See how many dwords can fit in the rest of this page.
393 if (BX_CPU_THIS_PTR
get_DF()) {
394 // Counting downward.
395 // Note: 1st dword must not cross page boundary.
396 if ((laddrDst
& 0xfff) > 0xffc) return 0;
397 dwordsFitDst
= (4 + PAGE_OFFSET(laddrDst
)) >> 2;
398 pointerDelta
= (signed int) -4;
402 dwordsFitDst
= (0x1000 - PAGE_OFFSET(laddrDst
)) >> 2;
403 pointerDelta
= (signed int) 4;
406 // Restrict dword count to the number that will fit in either
407 // source or dest pages.
408 if (count
> dwordsFitDst
)
409 count
= dwordsFitDst
;
410 if (count
> bx_pc_system
.getNumCpuTicksLeftNextEvent())
411 count
= bx_pc_system
.getNumCpuTicksLeftNextEvent();
413 // If after all the restrictions, there is anything left to do...
415 // Transfer data directly using host addresses
416 for (unsigned j
=0; j
<count
; j
++) {
417 WriteHostDWordToLittleEndian(hostAddrDst
, val
);
418 hostAddrDst
+= pointerDelta
;
432 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_MOVSB_XbYb(bxInstruction_c
*i
)
434 #if BX_SUPPORT_X86_64
436 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::MOVSB64_XbYb
);
440 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::MOVSB32_XbYb
);
441 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSI
); // always clear upper part of RSI/RDI
442 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
);
445 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::MOVSB16_XbYb
);
449 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_MOVSW_XwYw(bxInstruction_c
*i
)
451 #if BX_SUPPORT_X86_64
453 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::MOVSW64_XwYw
);
457 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::MOVSW32_XwYw
);
458 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSI
); // always clear upper part of RSI/RDI
459 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
);
462 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::MOVSW16_XwYw
);
466 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_MOVSD_XdYd(bxInstruction_c
*i
)
468 #if BX_SUPPORT_X86_64
470 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::MOVSD64_XdYd
);
474 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::MOVSD32_XdYd
);
475 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSI
); // always clear upper part of RSI/RDI
476 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
);
479 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::MOVSD16_XdYd
);
483 #if BX_SUPPORT_X86_64
484 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_MOVSQ_XqYq(bxInstruction_c
*i
)
487 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::MOVSQ64_XqYq
);
490 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::MOVSQ32_XqYq
);
491 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSI
); // always clear upper part of RSI/RDI
492 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
);
498 // MOVSB/MOVSW/MOVSD/MOVSQ methods
501 // 16 bit address size
502 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSB16_XbYb(bxInstruction_c
*i
)
504 Bit8u temp8
= read_virtual_byte_32(i
->seg(), SI
);
505 write_virtual_byte_32(BX_SEG_REG_ES
, DI
, temp8
);
507 if (BX_CPU_THIS_PTR
get_DF()) {
508 /* decrement SI, DI */
513 /* increment SI, DI */
519 // 32 bit address size
520 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSB32_XbYb(bxInstruction_c
*i
)
526 #if (BX_SupportRepeatSpeedups) && (BX_DEBUGGER == 0)
527 /* If conditions are right, we can transfer IO to physical memory
528 * in a batch, rather than one instruction at a time */
529 if (i
->repUsedL() && !BX_CPU_THIS_PTR async_event
)
531 Bit32u byteCount
= FastRepMOVSB(i
, i
->seg(), ESI
, BX_SEG_REG_ES
, EDI
, ECX
);
533 // Decrement the ticks count by the number of iterations, minus
534 // one, since the main cpu loop will decrement one. Also,
535 // the count is predecremented before examined, so defintely
536 // don't roll it under zero.
537 BX_TICKN(byteCount
-1);
539 // Decrement eCX. Note, the main loop will decrement 1 also, so
540 // decrement by one less than expected, like the case above.
541 RCX
= ECX
- (byteCount
-1);
546 temp8
= read_virtual_byte(i
->seg(), ESI
);
547 write_virtual_byte(BX_SEG_REG_ES
, EDI
, temp8
);
553 temp8
= read_virtual_byte(i
->seg(), ESI
);
554 write_virtual_byte(BX_SEG_REG_ES
, EDI
, temp8
);
557 if (BX_CPU_THIS_PTR
get_DF()) {
567 #if BX_SUPPORT_X86_64
568 // 64 bit address size
569 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSB64_XbYb(bxInstruction_c
*i
)
576 temp8
= read_virtual_byte_64(i
->seg(), rsi
);
577 write_virtual_byte_64(BX_SEG_REG_ES
, rdi
, temp8
);
579 if (BX_CPU_THIS_PTR
get_DF()) {
580 /* decrement RSI, RDI */
585 /* increment RSI, RDI */
595 /* 16 bit opsize mode, 16 bit address size */
596 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSW16_XwYw(bxInstruction_c
*i
)
601 Bit16u temp16
= read_virtual_word_32(i
->seg(), si
);
602 write_virtual_word_32(BX_SEG_REG_ES
, di
, temp16
);
604 if (BX_CPU_THIS_PTR
get_DF()) {
605 /* decrement SI, DI */
610 /* increment SI, DI */
619 /* 16 bit opsize mode, 32 bit address size */
620 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSW32_XwYw(bxInstruction_c
*i
)
627 temp16
= read_virtual_word(i
->seg(), esi
);
628 write_virtual_word(BX_SEG_REG_ES
, edi
, temp16
);
630 if (BX_CPU_THIS_PTR
get_DF()) {
639 // zero extension of RSI/RDI
644 #if BX_SUPPORT_X86_64
645 /* 16 bit opsize mode, 64 bit address size */
646 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSW64_XwYw(bxInstruction_c
*i
)
653 temp16
= read_virtual_word_64(i
->seg(), rsi
);
654 write_virtual_word_64(BX_SEG_REG_ES
, rdi
, temp16
);
656 if (BX_CPU_THIS_PTR
get_DF()) {
670 /* 32 bit opsize mode, 16 bit address size */
671 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSD16_XdYd(bxInstruction_c
*i
)
678 temp32
= read_virtual_dword_32(i
->seg(), si
);
679 write_virtual_dword_32(BX_SEG_REG_ES
, di
, temp32
);
681 if (BX_CPU_THIS_PTR
get_DF()) {
694 /* 32 bit opsize mode, 32 bit address size */
695 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSD32_XdYd(bxInstruction_c
*i
)
704 #if (BX_SupportRepeatSpeedups) && (BX_DEBUGGER == 0)
705 /* If conditions are right, we can transfer IO to physical memory
706 * in a batch, rather than one instruction at a time.
708 if (i
->repUsedL() && !BX_CPU_THIS_PTR async_event
)
710 Bit32u dwordCount
= FastRepMOVSD(i
, i
->seg(), esi
, BX_SEG_REG_ES
, edi
, ECX
);
712 // Decrement the ticks count by the number of iterations, minus
713 // one, since the main cpu loop will decrement one. Also,
714 // the count is predecremented before examined, so defintely
715 // don't roll it under zero.
716 BX_TICKN(dwordCount
-1);
718 // Decrement eCX. Note, the main loop will decrement 1 also, so
719 // decrement by one less than expected, like the case above.
720 RCX
= ECX
- (dwordCount
-1);
722 incr
= dwordCount
<< 2; // count * 4
725 temp32
= read_virtual_dword(i
->seg(), esi
);
726 write_virtual_dword(BX_SEG_REG_ES
, edi
, temp32
);
732 temp32
= read_virtual_dword(i
->seg(), esi
);
733 write_virtual_dword(BX_SEG_REG_ES
, edi
, temp32
);
736 if (BX_CPU_THIS_PTR
get_DF()) {
745 // zero extension of RSI/RDI
750 #if BX_SUPPORT_X86_64
752 /* 32 bit opsize mode, 64 bit address size */
753 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSD64_XdYd(bxInstruction_c
*i
)
760 temp32
= read_virtual_dword_64(i
->seg(), rsi
);
761 write_virtual_dword_64(BX_SEG_REG_ES
, rdi
, temp32
);
763 if (BX_CPU_THIS_PTR
get_DF()) {
776 /* 64 bit opsize mode, 32 bit address size */
777 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSQ32_XqYq(bxInstruction_c
*i
)
784 temp64
= read_virtual_qword_64(i
->seg(), esi
);
785 write_virtual_qword_64(BX_SEG_REG_ES
, edi
, temp64
);
787 if (BX_CPU_THIS_PTR
get_DF()) {
796 // zero extension of RSI/RDI
801 /* 64 bit opsize mode, 64 bit address size */
802 void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSQ64_XqYq(bxInstruction_c
*i
)
809 temp64
= read_virtual_qword_64(i
->seg(), rsi
);
810 write_virtual_qword_64(BX_SEG_REG_ES
, rdi
, temp64
);
812 if (BX_CPU_THIS_PTR
get_DF()) {
831 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_CMPSB_XbYb(bxInstruction_c
*i
)
833 #if BX_SUPPORT_X86_64
835 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::CMPSB64_XbYb
);
840 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::CMPSB32_XbYb
);
841 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSI
); // always clear upper part of RSI/RDI
842 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
);
845 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::CMPSB16_XbYb
);
849 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_CMPSW_XwYw(bxInstruction_c
*i
)
851 #if BX_SUPPORT_X86_64
853 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::CMPSW64_XwYw
);
858 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::CMPSW32_XwYw
);
859 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSI
); // always clear upper part of RSI/RDI
860 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
);
863 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::CMPSW16_XwYw
);
867 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_CMPSD_XdYd(bxInstruction_c
*i
)
869 #if BX_SUPPORT_X86_64
871 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::CMPSD64_XdYd
);
876 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::CMPSD32_XdYd
);
877 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSI
); // always clear upper part of RSI/RDI
878 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
);
881 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::CMPSD16_XdYd
);
885 #if BX_SUPPORT_X86_64
886 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_CMPSQ_XqYq(bxInstruction_c
*i
)
889 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::CMPSQ64_XqYq
);
892 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::CMPSQ32_XqYq
);
893 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSI
); // always clear upper part of RSI/RDI
894 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
);
900 // CMPSB/CMPSW/CMPSD/CMPSQ methods
903 /* 16 bit address size */
904 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSB16_XbYb(bxInstruction_c
*i
)
906 Bit8u op1_8
, op2_8
, diff_8
;
911 op1_8
= read_virtual_byte_32(i
->seg(), si
);
912 op2_8
= read_virtual_byte_32(BX_SEG_REG_ES
, di
);
914 diff_8
= op1_8
- op2_8
;
916 SET_FLAGS_OSZAPC_SUB_8(op1_8
, op2_8
, diff_8
);
918 if (BX_CPU_THIS_PTR
get_DF()) {
931 /* 32 bit address size */
932 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSB32_XbYb(bxInstruction_c
*i
)
934 Bit8u op1_8
, op2_8
, diff_8
;
939 op1_8
= read_virtual_byte(i
->seg(), esi
);
940 op2_8
= read_virtual_byte(BX_SEG_REG_ES
, edi
);
942 diff_8
= op1_8
- op2_8
;
944 SET_FLAGS_OSZAPC_SUB_8(op1_8
, op2_8
, diff_8
);
946 if (BX_CPU_THIS_PTR
get_DF()) {
955 // zero extension of RSI/RDI
960 #if BX_SUPPORT_X86_64
961 /* 64 bit address size */
962 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSB64_XbYb(bxInstruction_c
*i
)
964 Bit8u op1_8
, op2_8
, diff_8
;
969 op1_8
= read_virtual_byte_64(i
->seg(), rsi
);
970 op2_8
= read_virtual_byte_64(BX_SEG_REG_ES
, rdi
);
972 diff_8
= op1_8
- op2_8
;
974 SET_FLAGS_OSZAPC_SUB_8(op1_8
, op2_8
, diff_8
);
976 if (BX_CPU_THIS_PTR
get_DF()) {
990 /* 16 bit opsize mode, 16 bit address size */
991 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSW16_XwYw(bxInstruction_c
*i
)
993 Bit16u op1_16
, op2_16
, diff_16
;
998 op1_16
= read_virtual_word_32(i
->seg(), si
);
999 op2_16
= read_virtual_word_32(BX_SEG_REG_ES
, di
);
1001 diff_16
= op1_16
- op2_16
;
1003 SET_FLAGS_OSZAPC_SUB_16(op1_16
, op2_16
, diff_16
);
1005 if (BX_CPU_THIS_PTR
get_DF()) {
1018 /* 16 bit opsize mode, 32 bit address size */
1019 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSW32_XwYw(bxInstruction_c
*i
)
1021 Bit16u op1_16
, op2_16
, diff_16
;
1026 op1_16
= read_virtual_word(i
->seg(), esi
);
1027 op2_16
= read_virtual_word(BX_SEG_REG_ES
, edi
);
1029 diff_16
= op1_16
- op2_16
;
1031 SET_FLAGS_OSZAPC_SUB_16(op1_16
, op2_16
, diff_16
);
1033 if (BX_CPU_THIS_PTR
get_DF()) {
1042 // zero extension of RSI/RDI
1047 #if BX_SUPPORT_X86_64
1048 /* 16 bit opsize mode, 64 bit address size */
1049 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSW64_XwYw(bxInstruction_c
*i
)
1051 Bit16u op1_16
, op2_16
, diff_16
;
1056 op1_16
= read_virtual_word_64(i
->seg(), rsi
);
1057 op2_16
= read_virtual_word_64(BX_SEG_REG_ES
, rdi
);
1059 diff_16
= op1_16
- op2_16
;
1061 SET_FLAGS_OSZAPC_SUB_16(op1_16
, op2_16
, diff_16
);
1063 if (BX_CPU_THIS_PTR
get_DF()) {
1077 /* 32 bit opsize mode, 16 bit address size */
1078 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSD16_XdYd(bxInstruction_c
*i
)
1080 Bit32u op1_32
, op2_32
, diff_32
;
1085 op1_32
= read_virtual_dword_32(i
->seg(), si
);
1086 op2_32
= read_virtual_dword_32(BX_SEG_REG_ES
, di
);
1088 diff_32
= op1_32
- op2_32
;
1090 SET_FLAGS_OSZAPC_SUB_32(op1_32
, op2_32
, diff_32
);
1092 if (BX_CPU_THIS_PTR
get_DF()) {
1105 /* 32 bit opsize mode, 32 bit address size */
1106 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSD32_XdYd(bxInstruction_c
*i
)
1108 Bit32u op1_32
, op2_32
, diff_32
;
1113 op1_32
= read_virtual_dword(i
->seg(), esi
);
1114 op2_32
= read_virtual_dword(BX_SEG_REG_ES
, edi
);
1116 diff_32
= op1_32
- op2_32
;
1118 SET_FLAGS_OSZAPC_SUB_32(op1_32
, op2_32
, diff_32
);
1120 if (BX_CPU_THIS_PTR
get_DF()) {
1129 // zero extension of RSI/RDI
1134 #if BX_SUPPORT_X86_64
1136 /* 32 bit opsize mode, 64 bit address size */
1137 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSD64_XdYd(bxInstruction_c
*i
)
1139 Bit32u op1_32
, op2_32
, diff_32
;
1144 op1_32
= read_virtual_dword_64(i
->seg(), rsi
);
1145 op2_32
= read_virtual_dword_64(BX_SEG_REG_ES
, rdi
);
1147 diff_32
= op1_32
- op2_32
;
1149 SET_FLAGS_OSZAPC_SUB_32(op1_32
, op2_32
, diff_32
);
1151 if (BX_CPU_THIS_PTR
get_DF()) {
1164 /* 64 bit opsize mode, 32 bit address size */
1165 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSQ32_XqYq(bxInstruction_c
*i
)
1167 Bit64u op1_64
, op2_64
, diff_64
;
1172 op1_64
= read_virtual_qword_64(i
->seg(), esi
);
1173 op2_64
= read_virtual_qword_64(BX_SEG_REG_ES
, edi
);
1175 diff_64
= op1_64
- op2_64
;
1177 SET_FLAGS_OSZAPC_SUB_64(op1_64
, op2_64
, diff_64
);
1179 if (BX_CPU_THIS_PTR
get_DF()) {
1188 // zero extension of RSI/RDI
1193 /* 64 bit opsize mode, 64 bit address size */
1194 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSQ64_XqYq(bxInstruction_c
*i
)
1196 Bit64u op1_64
, op2_64
, diff_64
;
1201 op1_64
= read_virtual_qword_64(i
->seg(), rsi
);
1202 op2_64
= read_virtual_qword_64(BX_SEG_REG_ES
, rdi
);
1204 diff_64
= op1_64
- op2_64
;
1206 SET_FLAGS_OSZAPC_SUB_64(op1_64
, op2_64
, diff_64
);
1208 if (BX_CPU_THIS_PTR
get_DF()) {
1227 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_SCASB_ALXb(bxInstruction_c
*i
)
1229 #if BX_SUPPORT_X86_64
1231 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::SCASB64_ALXb
);
1236 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::SCASB32_ALXb
);
1237 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
); // always clear upper part of RDI
1240 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::SCASB16_ALXb
);
1244 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_SCASW_AXXw(bxInstruction_c
*i
)
1246 #if BX_SUPPORT_X86_64
1248 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::SCASW64_AXXw
);
1253 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::SCASW32_AXXw
);
1254 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
); // always clear upper part of RDI
1257 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::SCASW16_AXXw
);
1261 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_SCASD_EAXXd(bxInstruction_c
*i
)
1263 #if BX_SUPPORT_X86_64
1265 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::SCASD64_EAXXd
);
1270 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::SCASD32_EAXXd
);
1271 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
); // always clear upper part of RDI
1274 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::SCASD16_EAXXd
);
1278 #if BX_SUPPORT_X86_64
1279 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_SCASQ_RAXXq(bxInstruction_c
*i
)
1282 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::SCASQ64_RAXXq
);
1285 BX_CPU_THIS_PTR
repeat_ZF(i
, &BX_CPU_C::SCASQ32_RAXXq
);
1286 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
); // always clear upper part of RDI
1292 // SCASB/SCASW/SCASD/SCASQ methods
1295 /* 16 bit address size */
1296 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASB16_ALXb(bxInstruction_c
*i
)
1298 Bit8u op1_8
= AL
, op2_8
, diff_8
;
1302 op2_8
= read_virtual_byte_32(BX_SEG_REG_ES
, di
);
1304 diff_8
= op1_8
- op2_8
;
1306 SET_FLAGS_OSZAPC_SUB_8(op1_8
, op2_8
, diff_8
);
1308 if (BX_CPU_THIS_PTR
get_DF()) {
1318 /* 32 bit address size */
1319 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASB32_ALXb(bxInstruction_c
*i
)
1321 Bit8u op1_8
= AL
, op2_8
, diff_8
;
1325 op2_8
= read_virtual_byte(BX_SEG_REG_ES
, edi
);
1326 diff_8
= op1_8
- op2_8
;
1328 SET_FLAGS_OSZAPC_SUB_8(op1_8
, op2_8
, diff_8
);
1330 if (BX_CPU_THIS_PTR
get_DF()) {
1337 // zero extension of RDI
1341 #if BX_SUPPORT_X86_64
1342 /* 64 bit address size */
1343 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASB64_ALXb(bxInstruction_c
*i
)
1345 Bit8u op1_8
= AL
, op2_8
, diff_8
;
1349 op2_8
= read_virtual_byte_64(BX_SEG_REG_ES
, rdi
);
1351 diff_8
= op1_8
- op2_8
;
1353 SET_FLAGS_OSZAPC_SUB_8(op1_8
, op2_8
, diff_8
);
1355 if (BX_CPU_THIS_PTR
get_DF()) {
1366 /* 16 bit opsize mode, 16 bit address size */
1367 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASW16_AXXw(bxInstruction_c
*i
)
1369 Bit16u op1_16
= AX
, op2_16
, diff_16
;
1373 op2_16
= read_virtual_word_32(BX_SEG_REG_ES
, di
);
1374 diff_16
= op1_16
- op2_16
;
1376 SET_FLAGS_OSZAPC_SUB_16(op1_16
, op2_16
, diff_16
);
1378 if (BX_CPU_THIS_PTR
get_DF()) {
1388 /* 16 bit opsize mode, 32 bit address size */
1389 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASW32_AXXw(bxInstruction_c
*i
)
1391 Bit16u op1_16
= AX
, op2_16
, diff_16
;
1395 op2_16
= read_virtual_word(BX_SEG_REG_ES
, edi
);
1396 diff_16
= op1_16
- op2_16
;
1398 SET_FLAGS_OSZAPC_SUB_16(op1_16
, op2_16
, diff_16
);
1400 if (BX_CPU_THIS_PTR
get_DF()) {
1407 // zero extension of RDI
1411 #if BX_SUPPORT_X86_64
1412 /* 16 bit opsize mode, 64 bit address size */
1413 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASW64_AXXw(bxInstruction_c
*i
)
1415 Bit16u op1_16
= AX
, op2_16
, diff_16
;
1419 op2_16
= read_virtual_word_64(BX_SEG_REG_ES
, rdi
);
1421 diff_16
= op1_16
- op2_16
;
1423 SET_FLAGS_OSZAPC_SUB_16(op1_16
, op2_16
, diff_16
);
1425 if (BX_CPU_THIS_PTR
get_DF()) {
1436 /* 32 bit opsize mode, 16 bit address size */
1437 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASD16_EAXXd(bxInstruction_c
*i
)
1439 Bit32u op1_32
= EAX
, op2_32
, diff_32
;
1443 op2_32
= read_virtual_dword_32(BX_SEG_REG_ES
, di
);
1444 diff_32
= op1_32
- op2_32
;
1446 SET_FLAGS_OSZAPC_SUB_32(op1_32
, op2_32
, diff_32
);
1448 if (BX_CPU_THIS_PTR
get_DF()) {
1458 /* 32 bit opsize mode, 32 bit address size */
1459 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASD32_EAXXd(bxInstruction_c
*i
)
1461 Bit32u op1_32
= EAX
, op2_32
, diff_32
;
1465 op2_32
= read_virtual_dword(BX_SEG_REG_ES
, edi
);
1466 diff_32
= op1_32
- op2_32
;
1468 SET_FLAGS_OSZAPC_SUB_32(op1_32
, op2_32
, diff_32
);
1470 if (BX_CPU_THIS_PTR
get_DF()) {
1477 // zero extension of RDI
1481 #if BX_SUPPORT_X86_64
1483 /* 32 bit opsize mode, 64 bit address size */
1484 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASD64_EAXXd(bxInstruction_c
*i
)
1486 Bit32u op1_32
= EAX
, op2_32
, diff_32
;
1490 op2_32
= read_virtual_dword_64(BX_SEG_REG_ES
, rdi
);
1492 diff_32
= op1_32
- op2_32
;
1494 SET_FLAGS_OSZAPC_SUB_32(op1_32
, op2_32
, diff_32
);
1496 if (BX_CPU_THIS_PTR
get_DF()) {
1506 /* 64 bit opsize mode, 32 bit address size */
1507 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASQ32_RAXXq(bxInstruction_c
*i
)
1509 Bit64u op1_64
= RAX
, op2_64
, diff_64
;
1513 op2_64
= read_virtual_qword_64(BX_SEG_REG_ES
, edi
);
1515 diff_64
= op1_64
- op2_64
;
1517 SET_FLAGS_OSZAPC_SUB_64(op1_64
, op2_64
, diff_64
);
1519 if (BX_CPU_THIS_PTR
get_DF()) {
1526 // zero extension of RDI
1530 /* 64 bit opsize mode, 64 bit address size */
1531 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASQ64_RAXXq(bxInstruction_c
*i
)
1533 Bit64u op1_64
= RAX
, op2_64
, diff_64
;
1537 op2_64
= read_virtual_qword_64(BX_SEG_REG_ES
, rdi
);
1539 diff_64
= op1_64
- op2_64
;
1541 SET_FLAGS_OSZAPC_SUB_64(op1_64
, op2_64
, diff_64
);
1543 if (BX_CPU_THIS_PTR
get_DF()) {
1559 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_STOSB_YbAL(bxInstruction_c
*i
)
1561 #if BX_SUPPORT_X86_64
1563 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::STOSB64_YbAL
);
1567 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::STOSB32_YbAL
);
1568 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
); // always clear upper part of RDI
1571 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::STOSB16_YbAL
);
1575 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_STOSW_YwAX(bxInstruction_c
*i
)
1577 #if BX_SUPPORT_X86_64
1579 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::STOSW64_YwAX
);
1583 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::STOSW32_YwAX
);
1584 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
); // always clear upper part of RDI
1587 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::STOSW16_YwAX
);
1591 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_STOSD_YdEAX(bxInstruction_c
*i
)
1593 #if BX_SUPPORT_X86_64
1595 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::STOSD64_YdEAX
);
1599 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::STOSD32_YdEAX
);
1600 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
); // always clear upper part of RDI
1603 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::STOSD16_YdEAX
);
1607 #if BX_SUPPORT_X86_64
1608 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_STOSQ_YqRAX(bxInstruction_c
*i
)
1611 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::STOSQ64_YqRAX
);
1614 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::STOSQ32_YqRAX
);
1615 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RDI
); // always clear upper part of RDI
1621 // STOSB/STOSW/STOSD/STOSQ methods
1624 // 16 bit address size
1625 void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSB16_YbAL(bxInstruction_c
*i
)
1629 write_virtual_byte_32(BX_SEG_REG_ES
, di
, AL
);
1631 if (BX_CPU_THIS_PTR
get_DF()) {
1641 // 32 bit address size
1642 void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSB32_YbAL(bxInstruction_c
*i
)
1647 #if (BX_SupportRepeatSpeedups) && (BX_DEBUGGER == 0)
1648 /* If conditions are right, we can transfer IO to physical memory
1649 * in a batch, rather than one instruction at a time.
1651 if (i
->repUsedL() && !BX_CPU_THIS_PTR async_event
)
1653 Bit32u byteCount
= FastRepSTOSB(i
, BX_SEG_REG_ES
, edi
, AL
, ECX
);
1655 // Decrement the ticks count by the number of iterations, minus
1656 // one, since the main cpu loop will decrement one. Also,
1657 // the count is predecremented before examined, so defintely
1658 // don't roll it under zero.
1659 BX_TICKN(byteCount
-1);
1661 // Decrement eCX. Note, the main loop will decrement 1 also, so
1662 // decrement by one less than expected, like the case above.
1663 RCX
= ECX
- (byteCount
-1);
1668 write_virtual_byte(BX_SEG_REG_ES
, edi
, AL
);
1674 write_virtual_byte(BX_SEG_REG_ES
, edi
, AL
);
1677 if (BX_CPU_THIS_PTR
get_DF()) {
1684 // zero extension of RDI
1688 #if BX_SUPPORT_X86_64
1689 // 64 bit address size
1690 void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSB64_YbAL(bxInstruction_c
*i
)
1694 write_virtual_byte_64(BX_SEG_REG_ES
, rdi
, AL
);
1696 if (BX_CPU_THIS_PTR
get_DF()) {
1707 /* 16 bit opsize mode, 16 bit address size */
1708 void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSW16_YwAX(bxInstruction_c
*i
)
1712 write_virtual_word_32(BX_SEG_REG_ES
, di
, AX
);
1714 if (BX_CPU_THIS_PTR
get_DF()) {
1724 /* 16 bit opsize mode, 32 bit address size */
1725 void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSW32_YwAX(bxInstruction_c
*i
)
1729 write_virtual_word(BX_SEG_REG_ES
, edi
, AX
);
1731 if (BX_CPU_THIS_PTR
get_DF()) {
1738 // zero extension of RDI
1742 #if BX_SUPPORT_X86_64
1743 /* 16 bit opsize mode, 32 bit address size */
1744 void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSW64_YwAX(bxInstruction_c
*i
)
1748 write_virtual_word_64(BX_SEG_REG_ES
, rdi
, AX
);
1750 if (BX_CPU_THIS_PTR
get_DF()) {
1761 /* 32 bit opsize mode, 16 bit address size */
1762 void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSD16_YdEAX(bxInstruction_c
*i
)
1766 write_virtual_dword_32(BX_SEG_REG_ES
, di
, EAX
);
1768 if (BX_CPU_THIS_PTR
get_DF()) {
1778 /* 32 bit opsize mode, 32 bit address size */
1779 void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSD32_YdEAX(bxInstruction_c
*i
)
1783 write_virtual_dword(BX_SEG_REG_ES
, edi
, EAX
);
1785 if (BX_CPU_THIS_PTR
get_DF()) {
1792 // zero extension of RDI
1796 #if BX_SUPPORT_X86_64
1798 /* 32 bit opsize mode, 32 bit address size */
1799 void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSD64_YdEAX(bxInstruction_c
*i
)
1803 write_virtual_dword_64(BX_SEG_REG_ES
, rdi
, EAX
);
1805 if (BX_CPU_THIS_PTR
get_DF()) {
1815 /* 64 bit opsize mode, 32 bit address size */
1816 void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSQ32_YqRAX(bxInstruction_c
*i
)
1820 write_virtual_qword_64(BX_SEG_REG_ES
, edi
, RAX
);
1822 if (BX_CPU_THIS_PTR
get_DF()) {
1829 // zero extension of RDI
1833 /* 64 bit opsize mode, 64 bit address size */
1834 void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSQ64_YqRAX(bxInstruction_c
*i
)
1838 write_virtual_qword_64(BX_SEG_REG_ES
, rdi
, RAX
);
1840 if (BX_CPU_THIS_PTR
get_DF()) {
1856 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_LODSB_ALXb(bxInstruction_c
*i
)
1858 #if BX_SUPPORT_X86_64
1860 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::LODSB64_ALXb
);
1864 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::LODSB32_ALXb
);
1865 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSI
); // always clear upper part of RSI
1868 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::LODSB16_ALXb
);
1872 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_LODSW_AXXw(bxInstruction_c
*i
)
1874 #if BX_SUPPORT_X86_64
1876 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::LODSW64_AXXw
);
1880 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::LODSW32_AXXw
);
1881 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSI
); // always clear upper part of RSI
1884 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::LODSW16_AXXw
);
1888 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_LODSD_EAXXd(bxInstruction_c
*i
)
1890 #if BX_SUPPORT_X86_64
1892 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::LODSD64_EAXXd
);
1896 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::LODSD32_EAXXd
);
1897 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSI
); // always clear upper part of RSI
1900 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::LODSD16_EAXXd
);
1904 #if BX_SUPPORT_X86_64
1905 void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_LODSQ_RAXXq(bxInstruction_c
*i
)
1908 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::LODSQ64_RAXXq
);
1911 BX_CPU_THIS_PTR
repeat(i
, &BX_CPU_C::LODSQ32_RAXXq
);
1912 BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSI
); // always clear upper part of RSI
1918 // LODSB/LODSW/LODSD/LODSQ methods
1921 /* 16 bit address size */
1922 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSB16_ALXb(bxInstruction_c
*i
)
1926 AL
= read_virtual_byte_32(i
->seg(), si
);
1928 if (BX_CPU_THIS_PTR
get_DF()) {
1938 /* 32 bit address size */
1939 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSB32_ALXb(bxInstruction_c
*i
)
1943 AL
= read_virtual_byte(i
->seg(), esi
);
1945 if (BX_CPU_THIS_PTR
get_DF()) {
1952 // zero extension of RSI
1956 #if BX_SUPPORT_X86_64
1957 /* 64 bit address size */
1958 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSB64_ALXb(bxInstruction_c
*i
)
1962 AL
= read_virtual_byte_64(i
->seg(), rsi
);
1964 if (BX_CPU_THIS_PTR
get_DF()) {
1975 /* 16 bit opsize mode, 16 bit address size */
1976 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSW16_AXXw(bxInstruction_c
*i
)
1980 AX
= read_virtual_word_32(i
->seg(), si
);
1982 if (BX_CPU_THIS_PTR
get_DF()) {
1992 /* 16 bit opsize mode, 32 bit address size */
1993 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSW32_AXXw(bxInstruction_c
*i
)
1997 AX
= read_virtual_word(i
->seg(), esi
);
1999 if (BX_CPU_THIS_PTR
get_DF()) {
2006 // zero extension of RSI
2010 #if BX_SUPPORT_X86_64
2011 /* 16 bit opsize mode, 64 bit address size */
2012 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSW64_AXXw(bxInstruction_c
*i
)
2016 AX
= read_virtual_word_64(i
->seg(), rsi
);
2018 if (BX_CPU_THIS_PTR
get_DF()) {
2029 /* 32 bit opsize mode, 16 bit address size */
2030 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSD16_EAXXd(bxInstruction_c
*i
)
2034 RAX
= read_virtual_dword_32(i
->seg(), si
);
2036 if (BX_CPU_THIS_PTR
get_DF()) {
2046 /* 32 bit opsize mode, 32 bit address size */
2047 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSD32_EAXXd(bxInstruction_c
*i
)
2051 RAX
= read_virtual_dword(i
->seg(), esi
);
2053 if (BX_CPU_THIS_PTR
get_DF()) {
2060 // zero extension of RSI
2064 #if BX_SUPPORT_X86_64
2066 /* 32 bit opsize mode, 64 bit address size */
2067 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSD64_EAXXd(bxInstruction_c
*i
)
2071 RAX
= read_virtual_dword_64(i
->seg(), rsi
);
2073 if (BX_CPU_THIS_PTR
get_DF()) {
2083 /* 64 bit opsize mode, 32 bit address size */
2084 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSQ32_RAXXq(bxInstruction_c
*i
)
2088 RAX
= read_virtual_qword_64(i
->seg(), esi
);
2090 if (BX_CPU_THIS_PTR
get_DF()) {
2097 // zero extension of RSI
2101 /* 64 bit opsize mode, 64 bit address size */
2102 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSQ64_RAXXq(bxInstruction_c
*i
)
2106 RAX
= read_virtual_qword_64(i
->seg(), rsi
);
2108 if (BX_CPU_THIS_PTR
get_DF()) {