2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 /*****************************************************************************/
26 #if !defined(CONFIG_USER_ONLY)
27 #include "softmmu_exec.h"
29 #define MMUSUFFIX _mmu
32 #include "softmmu_template.h"
35 #include "softmmu_template.h"
38 #include "softmmu_template.h"
41 #include "softmmu_template.h"
43 /* try to fill the TLB and return an exception if error. If retaddr is
44 NULL, it means that the function was called in C code (i.e. not
45 from generated code or from helper.c) */
46 /* XXX: fix it to restore all registers */
47 void tlb_fill(CPUS390XState
*env
, target_ulong addr
, int is_write
, int mmu_idx
,
53 ret
= cpu_s390x_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
54 if (unlikely(ret
!= 0)) {
55 if (likely(retaddr
)) {
56 /* now we have a real cpu fault */
57 tb
= tb_find_pc(retaddr
);
59 /* the PC is inside the translated code. It means that we have
60 a virtual CPU fault */
61 cpu_restore_state(tb
, env
, retaddr
);
70 /* #define DEBUG_HELPER */
72 #define HELPER_LOG(x...) qemu_log(x)
74 #define HELPER_LOG(x...)
77 #ifndef CONFIG_USER_ONLY
78 static void mvc_fast_memset(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
81 target_phys_addr_t dest_phys
;
82 target_phys_addr_t len
= l
;
84 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
87 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
88 cpu_stb_data(env
, dest
, byte
);
89 cpu_abort(env
, "should never reach here");
91 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
93 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
95 memset(dest_p
, byte
, len
);
97 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
100 static void mvc_fast_memmove(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
103 target_phys_addr_t dest_phys
;
104 target_phys_addr_t src_phys
;
105 target_phys_addr_t len
= l
;
108 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
111 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
112 cpu_stb_data(env
, dest
, 0);
113 cpu_abort(env
, "should never reach here");
115 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
117 if (mmu_translate(env
, src
, 0, asc
, &src_phys
, &flags
)) {
118 cpu_ldub_data(env
, src
);
119 cpu_abort(env
, "should never reach here");
121 src_phys
|= src
& ~TARGET_PAGE_MASK
;
123 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
124 src_p
= cpu_physical_memory_map(src_phys
, &len
, 0);
126 memmove(dest_p
, src_p
, len
);
128 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
129 cpu_physical_memory_unmap(src_p
, 0, len
, len
);
134 uint32_t HELPER(nc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
141 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
142 __func__
, l
, dest
, src
);
143 for (i
= 0; i
<= l
; i
++) {
144 x
= cpu_ldub_data(env
, dest
+ i
) & cpu_ldub_data(env
, src
+ i
);
148 cpu_stb_data(env
, dest
+ i
, x
);
154 uint32_t HELPER(xc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
161 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
162 __func__
, l
, dest
, src
);
164 #ifndef CONFIG_USER_ONLY
165 /* xor with itself is the same as memset(0) */
166 if ((l
> 32) && (src
== dest
) &&
167 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
)) {
168 mvc_fast_memset(env
, l
+ 1, dest
, 0);
173 memset(g2h(dest
), 0, l
+ 1);
178 for (i
= 0; i
<= l
; i
++) {
179 x
= cpu_ldub_data(env
, dest
+ i
) ^ cpu_ldub_data(env
, src
+ i
);
183 cpu_stb_data(env
, dest
+ i
, x
);
189 uint32_t HELPER(oc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
196 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
197 __func__
, l
, dest
, src
);
198 for (i
= 0; i
<= l
; i
++) {
199 x
= cpu_ldub_data(env
, dest
+ i
) | cpu_ldub_data(env
, src
+ i
);
203 cpu_stb_data(env
, dest
+ i
, x
);
209 void HELPER(mvc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
213 uint32_t l_64
= (l
+ 1) / 8;
215 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
216 __func__
, l
, dest
, src
);
218 #ifndef CONFIG_USER_ONLY
220 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
) &&
221 (dest
& TARGET_PAGE_MASK
) == ((dest
+ l
) & TARGET_PAGE_MASK
)) {
222 if (dest
== (src
+ 1)) {
223 mvc_fast_memset(env
, l
+ 1, dest
, cpu_ldub_data(env
, src
));
225 } else if ((src
& TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) {
226 mvc_fast_memmove(env
, l
+ 1, dest
, src
);
231 if (dest
== (src
+ 1)) {
232 memset(g2h(dest
), cpu_ldub_data(env
, src
), l
+ 1);
235 memmove(g2h(dest
), g2h(src
), l
+ 1);
240 /* handle the parts that fit into 8-byte loads/stores */
241 if (dest
!= (src
+ 1)) {
242 for (i
= 0; i
< l_64
; i
++) {
243 cpu_stq_data(env
, dest
+ x
, cpu_ldq_data(env
, src
+ x
));
248 /* slow version crossing pages with byte accesses */
249 for (i
= x
; i
<= l
; i
++) {
250 cpu_stb_data(env
, dest
+ i
, cpu_ldub_data(env
, src
+ i
));
254 /* compare unsigned byte arrays */
255 uint32_t HELPER(clc
)(CPUS390XState
*env
, uint32_t l
, uint64_t s1
, uint64_t s2
)
261 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
262 __func__
, l
, s1
, s2
);
263 for (i
= 0; i
<= l
; i
++) {
264 x
= cpu_ldub_data(env
, s1
+ i
);
265 y
= cpu_ldub_data(env
, s2
+ i
);
266 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
281 /* compare logical under mask */
282 uint32_t HELPER(clm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
288 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __func__
, r1
,
293 d
= cpu_ldub_data(env
, addr
);
294 r
= (r1
& 0xff000000UL
) >> 24;
295 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
306 mask
= (mask
<< 1) & 0xf;
313 /* store character under mask */
314 void HELPER(stcm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
319 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __func__
, r1
, mask
,
323 r
= (r1
& 0xff000000UL
) >> 24;
324 cpu_stb_data(env
, addr
, r
);
325 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask
, r
, addr
);
328 mask
= (mask
<< 1) & 0xf;
334 static inline uint64_t get_address(CPUS390XState
*env
, int x2
, int b2
, int d2
)
347 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
354 static inline uint64_t get_address_31fix(CPUS390XState
*env
, int reg
)
356 uint64_t r
= env
->regs
[reg
];
359 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
366 /* search string (c is byte to search, r2 is string, r1 end of string) */
367 uint32_t HELPER(srst
)(CPUS390XState
*env
, uint32_t c
, uint32_t r1
, uint32_t r2
)
371 uint64_t str
= get_address_31fix(env
, r2
);
372 uint64_t end
= get_address_31fix(env
, r1
);
374 HELPER_LOG("%s: c %d *r1 0x%" PRIx64
" *r2 0x%" PRIx64
"\n", __func__
,
375 c
, env
->regs
[r1
], env
->regs
[r2
]);
377 for (i
= str
; i
!= end
; i
++) {
378 if (cpu_ldub_data(env
, i
) == c
) {
388 /* unsigned string compare (c is string terminator) */
389 uint32_t HELPER(clst
)(CPUS390XState
*env
, uint32_t c
, uint32_t r1
, uint32_t r2
)
391 uint64_t s1
= get_address_31fix(env
, r1
);
392 uint64_t s2
= get_address_31fix(env
, r2
);
397 #ifdef CONFIG_USER_ONLY
399 HELPER_LOG("%s: comparing '%s' and '%s'\n",
400 __func__
, (char *)g2h(s1
), (char *)g2h(s2
));
404 v1
= cpu_ldub_data(env
, s1
);
405 v2
= cpu_ldub_data(env
, s2
);
406 if ((v1
== c
|| v2
== c
) || (v1
!= v2
)) {
416 cc
= (v1
< v2
) ? 1 : 2;
417 /* FIXME: 31-bit mode! */
425 void HELPER(mvpg
)(CPUS390XState
*env
, uint64_t r0
, uint64_t r1
, uint64_t r2
)
427 /* XXX missing r0 handling */
428 #ifdef CONFIG_USER_ONLY
431 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
++) {
432 cpu_stb_data(env
, r1
+ i
, cpu_ldub_data(env
, r2
+ i
));
435 mvc_fast_memmove(env
, TARGET_PAGE_SIZE
, r1
, r2
);
439 /* string copy (c is string terminator) */
440 void HELPER(mvst
)(CPUS390XState
*env
, uint32_t c
, uint32_t r1
, uint32_t r2
)
442 uint64_t dest
= get_address_31fix(env
, r1
);
443 uint64_t src
= get_address_31fix(env
, r2
);
447 #ifdef CONFIG_USER_ONLY
449 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __func__
, (char *)g2h(src
),
454 v
= cpu_ldub_data(env
, src
);
455 cpu_stb_data(env
, dest
, v
);
462 env
->regs
[r1
] = dest
; /* FIXME: 31-bit mode! */
465 /* compare and swap 64-bit */
466 uint32_t HELPER(csg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
468 /* FIXME: locking? */
470 uint64_t v2
= cpu_ldq_data(env
, a2
);
472 if (env
->regs
[r1
] == v2
) {
474 cpu_stq_data(env
, a2
, env
->regs
[r3
]);
482 /* compare double and swap 64-bit */
483 uint32_t HELPER(cdsg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
485 /* FIXME: locking? */
487 uint64_t v2_hi
= cpu_ldq_data(env
, a2
);
488 uint64_t v2_lo
= cpu_ldq_data(env
, a2
+ 8);
489 uint64_t v1_hi
= env
->regs
[r1
];
490 uint64_t v1_lo
= env
->regs
[r1
+ 1];
492 if ((v1_hi
== v2_hi
) && (v1_lo
== v2_lo
)) {
494 cpu_stq_data(env
, a2
, env
->regs
[r3
]);
495 cpu_stq_data(env
, a2
+ 8, env
->regs
[r3
+ 1]);
498 env
->regs
[r1
] = v2_hi
;
499 env
->regs
[r1
+ 1] = v2_lo
;
505 /* compare and swap 32-bit */
506 uint32_t HELPER(cs
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
508 /* FIXME: locking? */
510 uint32_t v2
= cpu_ldl_data(env
, a2
);
512 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __func__
, r1
, a2
, r3
);
513 if (((uint32_t)env
->regs
[r1
]) == v2
) {
515 cpu_stl_data(env
, a2
, (uint32_t)env
->regs
[r3
]);
518 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | v2
;
523 static uint32_t helper_icm(CPUS390XState
*env
, uint32_t r1
, uint64_t address
,
526 int pos
= 24; /* top of the lower half of r1 */
527 uint64_t rmask
= 0xff000000ULL
;
534 env
->regs
[r1
] &= ~rmask
;
535 val
= cpu_ldub_data(env
, address
);
536 if ((val
& 0x80) && !ccd
) {
540 if (val
&& cc
== 0) {
543 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
546 mask
= (mask
<< 1) & 0xf;
554 /* execute instruction
555 this instruction executes an insn modified with the contents of r1
556 it does not change the executed instruction in memory
557 it does not change the program counter
558 in other words: tricky...
559 currently implemented by interpreting the cases it is most commonly used in
561 uint32_t HELPER(ex
)(CPUS390XState
*env
, uint32_t cc
, uint64_t v1
,
562 uint64_t addr
, uint64_t ret
)
564 uint16_t insn
= cpu_lduw_code(env
, addr
);
566 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__
, v1
, addr
,
568 if ((insn
& 0xf0ff) == 0xd000) {
569 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
572 insn2
= cpu_ldl_code(env
, addr
+ 2);
573 b1
= (insn2
>> 28) & 0xf;
574 b2
= (insn2
>> 12) & 0xf;
575 d1
= (insn2
>> 16) & 0xfff;
577 switch (insn
& 0xf00) {
579 helper_mvc(env
, l
, get_address(env
, 0, b1
, d1
),
580 get_address(env
, 0, b2
, d2
));
583 cc
= helper_clc(env
, l
, get_address(env
, 0, b1
, d1
),
584 get_address(env
, 0, b2
, d2
));
587 cc
= helper_xc(env
, l
, get_address(env
, 0, b1
, d1
),
588 get_address(env
, 0, b2
, d2
));
591 helper_tr(env
, l
, get_address(env
, 0, b1
, d1
),
592 get_address(env
, 0, b2
, d2
));
598 } else if ((insn
& 0xff00) == 0x0a00) {
599 /* supervisor call */
600 HELPER_LOG("%s: svc %ld via execute\n", __func__
, (insn
| v1
) & 0xff);
601 env
->psw
.addr
= ret
- 4;
602 env
->int_svc_code
= (insn
| v1
) & 0xff;
603 env
->int_svc_ilc
= 4;
604 helper_exception(env
, EXCP_SVC
);
605 } else if ((insn
& 0xff00) == 0xbf00) {
606 uint32_t insn2
, r1
, r3
, b2
, d2
;
608 insn2
= cpu_ldl_code(env
, addr
+ 2);
609 r1
= (insn2
>> 20) & 0xf;
610 r3
= (insn2
>> 16) & 0xf;
611 b2
= (insn2
>> 12) & 0xf;
613 cc
= helper_icm(env
, r1
, get_address(env
, 0, b2
, d2
), r3
);
616 cpu_abort(env
, "EXECUTE on instruction prefix 0x%x not implemented\n",
622 /* store character under mask high operates on the upper half of r1 */
623 void HELPER(stcmh
)(CPUS390XState
*env
, uint32_t r1
, uint64_t address
,
626 int pos
= 56; /* top of the upper half of r1 */
630 cpu_stb_data(env
, address
, (env
->regs
[r1
] >> pos
) & 0xff);
633 mask
= (mask
<< 1) & 0xf;
638 /* insert character under mask high; same as icm, but operates on the
640 uint32_t HELPER(icmh
)(CPUS390XState
*env
, uint32_t r1
, uint64_t address
,
643 int pos
= 56; /* top of the upper half of r1 */
644 uint64_t rmask
= 0xff00000000000000ULL
;
651 env
->regs
[r1
] &= ~rmask
;
652 val
= cpu_ldub_data(env
, address
);
653 if ((val
& 0x80) && !ccd
) {
657 if (val
&& cc
== 0) {
660 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
663 mask
= (mask
<< 1) & 0xf;
671 /* load access registers r1 to r3 from memory at a2 */
672 void HELPER(lam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
676 for (i
= r1
;; i
= (i
+ 1) % 16) {
677 env
->aregs
[i
] = cpu_ldl_data(env
, a2
);
686 /* store access registers r1 to r3 in memory at a2 */
687 void HELPER(stam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
691 for (i
= r1
;; i
= (i
+ 1) % 16) {
692 cpu_stl_data(env
, a2
, env
->aregs
[i
]);
702 uint32_t HELPER(mvcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
704 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
705 uint64_t dest
= get_address_31fix(env
, r1
);
706 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
707 uint64_t src
= get_address_31fix(env
, r2
);
708 uint8_t pad
= src
>> 24;
712 if (destlen
== srclen
) {
714 } else if (destlen
< srclen
) {
720 if (srclen
> destlen
) {
724 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
725 v
= cpu_ldub_data(env
, src
);
726 cpu_stb_data(env
, dest
, v
);
729 for (; destlen
; dest
++, destlen
--) {
730 cpu_stb_data(env
, dest
, pad
);
733 env
->regs
[r1
+ 1] = destlen
;
734 /* can't use srclen here, we trunc'ed it */
735 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
736 env
->regs
[r1
] = dest
;
742 /* move long extended another memcopy insn with more bells and whistles */
743 uint32_t HELPER(mvcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
746 uint64_t destlen
= env
->regs
[r1
+ 1];
747 uint64_t dest
= env
->regs
[r1
];
748 uint64_t srclen
= env
->regs
[r3
+ 1];
749 uint64_t src
= env
->regs
[r3
];
750 uint8_t pad
= a2
& 0xff;
754 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
755 destlen
= (uint32_t)destlen
;
756 srclen
= (uint32_t)srclen
;
761 if (destlen
== srclen
) {
763 } else if (destlen
< srclen
) {
769 if (srclen
> destlen
) {
773 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
774 v
= cpu_ldub_data(env
, src
);
775 cpu_stb_data(env
, dest
, v
);
778 for (; destlen
; dest
++, destlen
--) {
779 cpu_stb_data(env
, dest
, pad
);
782 env
->regs
[r1
+ 1] = destlen
;
783 /* can't use srclen here, we trunc'ed it */
784 /* FIXME: 31-bit mode! */
785 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
786 env
->regs
[r1
] = dest
;
792 /* compare logical long extended memcompare insn with padding */
793 uint32_t HELPER(clcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
796 uint64_t destlen
= env
->regs
[r1
+ 1];
797 uint64_t dest
= get_address_31fix(env
, r1
);
798 uint64_t srclen
= env
->regs
[r3
+ 1];
799 uint64_t src
= get_address_31fix(env
, r3
);
800 uint8_t pad
= a2
& 0xff;
801 uint8_t v1
= 0, v2
= 0;
804 if (!(destlen
|| srclen
)) {
808 if (srclen
> destlen
) {
812 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
813 v1
= srclen
? cpu_ldub_data(env
, src
) : pad
;
814 v2
= destlen
? cpu_ldub_data(env
, dest
) : pad
;
816 cc
= (v1
< v2
) ? 1 : 2;
821 env
->regs
[r1
+ 1] = destlen
;
822 /* can't use srclen here, we trunc'ed it */
823 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
824 env
->regs
[r1
] = dest
;
831 void HELPER(cksm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
833 uint64_t src
= get_address_31fix(env
, r2
);
834 uint64_t src_len
= env
->regs
[(r2
+ 1) & 15];
835 uint64_t cksm
= (uint32_t)env
->regs
[r1
];
837 while (src_len
>= 4) {
838 cksm
+= cpu_ldl_data(env
, src
);
840 /* move to next word */
849 cksm
+= cpu_ldub_data(env
, src
) << 24;
852 cksm
+= cpu_lduw_data(env
, src
) << 16;
855 cksm
+= cpu_lduw_data(env
, src
) << 16;
856 cksm
+= cpu_ldub_data(env
, src
+ 2) << 8;
860 /* indicate we've processed everything */
861 env
->regs
[r2
] = src
+ src_len
;
862 env
->regs
[(r2
+ 1) & 15] = 0;
865 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
866 ((uint32_t)cksm
+ (cksm
>> 32));
869 void HELPER(unpk
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
,
872 int len_dest
= len
>> 4;
873 int len_src
= len
& 0xf;
875 int second_nibble
= 0;
880 /* last byte is special, it only flips the nibbles */
881 b
= cpu_ldub_data(env
, src
);
882 cpu_stb_data(env
, dest
, (b
<< 4) | (b
>> 4));
886 /* now pad every nibble with 0xf0 */
888 while (len_dest
> 0) {
889 uint8_t cur_byte
= 0;
892 cur_byte
= cpu_ldub_data(env
, src
);
898 /* only advance one nibble at a time */
904 second_nibble
= !second_nibble
;
907 cur_byte
= (cur_byte
& 0xf);
911 cpu_stb_data(env
, dest
, cur_byte
);
915 void HELPER(tr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
920 for (i
= 0; i
<= len
; i
++) {
921 uint8_t byte
= cpu_ldub_data(env
, array
+ i
);
922 uint8_t new_byte
= cpu_ldub_data(env
, trans
+ byte
);
924 cpu_stb_data(env
, array
+ i
, new_byte
);
928 #if !defined(CONFIG_USER_ONLY)
929 void HELPER(lctlg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
934 for (i
= r1
;; i
= (i
+ 1) % 16) {
935 env
->cregs
[i
] = cpu_ldq_data(env
, src
);
936 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
937 i
, src
, env
->cregs
[i
]);
938 src
+= sizeof(uint64_t);
948 void HELPER(lctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
953 for (i
= r1
;; i
= (i
+ 1) % 16) {
954 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) |
955 cpu_ldl_data(env
, src
);
956 src
+= sizeof(uint32_t);
966 void HELPER(stctg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
971 for (i
= r1
;; i
= (i
+ 1) % 16) {
972 cpu_stq_data(env
, dest
, env
->cregs
[i
]);
973 dest
+= sizeof(uint64_t);
981 void HELPER(stctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
986 for (i
= r1
;; i
= (i
+ 1) % 16) {
987 cpu_stl_data(env
, dest
, env
->cregs
[i
]);
988 dest
+= sizeof(uint32_t);
996 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
1003 /* insert storage key extended */
1004 uint64_t HELPER(iske
)(CPUS390XState
*env
, uint64_t r2
)
1006 uint64_t addr
= get_address(env
, 0, 0, r2
);
1008 if (addr
> ram_size
) {
1012 return env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
];
1015 /* set storage key extended */
1016 void HELPER(sske
)(CPUS390XState
*env
, uint32_t r1
, uint64_t r2
)
1018 uint64_t addr
= get_address(env
, 0, 0, r2
);
1020 if (addr
> ram_size
) {
1024 env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
] = r1
;
1027 /* reset reference bit extended */
1028 uint32_t HELPER(rrbe
)(CPUS390XState
*env
, uint32_t r1
, uint64_t r2
)
1033 if (r2
> ram_size
) {
1037 key
= env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
];
1038 re
= key
& (SK_R
| SK_C
);
1039 env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
] = (key
& ~SK_R
);
1044 * 0 Reference bit zero; change bit zero
1045 * 1 Reference bit zero; change bit one
1046 * 2 Reference bit one; change bit zero
1047 * 3 Reference bit one; change bit one
1053 /* compare and swap and purge */
1054 uint32_t HELPER(csp
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
1057 uint32_t o1
= env
->regs
[r1
];
1058 uint64_t a2
= get_address_31fix(env
, r2
) & ~3ULL;
1059 uint32_t o2
= cpu_ldl_data(env
, a2
);
1062 cpu_stl_data(env
, a2
, env
->regs
[(r1
+ 1) & 15]);
1063 if (env
->regs
[r2
] & 0x3) {
1064 /* flush TLB / ALB */
1069 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
1076 static uint32_t mvc_asc(CPUS390XState
*env
, int64_t l
, uint64_t a1
,
1077 uint64_t mode1
, uint64_t a2
, uint64_t mode2
)
1079 target_ulong src
, dest
;
1080 int flags
, cc
= 0, i
;
1084 } else if (l
> 256) {
1090 if (mmu_translate(env
, a1
& TARGET_PAGE_MASK
, 1, mode1
, &dest
, &flags
)) {
1093 dest
|= a1
& ~TARGET_PAGE_MASK
;
1095 if (mmu_translate(env
, a2
& TARGET_PAGE_MASK
, 0, mode2
, &src
, &flags
)) {
1098 src
|= a2
& ~TARGET_PAGE_MASK
;
1100 /* XXX replace w/ memcpy */
1101 for (i
= 0; i
< l
; i
++) {
1102 /* XXX be more clever */
1103 if ((((dest
+ i
) & TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) ||
1104 (((src
+ i
) & TARGET_PAGE_MASK
) != (src
& TARGET_PAGE_MASK
))) {
1105 mvc_asc(env
, l
- i
, a1
+ i
, mode1
, a2
+ i
, mode2
);
1108 stb_phys(dest
+ i
, ldub_phys(src
+ i
));
1114 uint32_t HELPER(mvcs
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1116 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1117 __func__
, l
, a1
, a2
);
1119 return mvc_asc(env
, l
, a1
, PSW_ASC_SECONDARY
, a2
, PSW_ASC_PRIMARY
);
1122 uint32_t HELPER(mvcp
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1124 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1125 __func__
, l
, a1
, a2
);
1127 return mvc_asc(env
, l
, a1
, PSW_ASC_PRIMARY
, a2
, PSW_ASC_SECONDARY
);
1130 /* invalidate pte */
1131 void HELPER(ipte
)(CPUS390XState
*env
, uint64_t pte_addr
, uint64_t vaddr
)
1133 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
1136 /* XXX broadcast to other CPUs */
1138 /* XXX Linux is nice enough to give us the exact pte address.
1139 According to spec we'd have to find it out ourselves */
1140 /* XXX Linux is fine with overwriting the pte, the spec requires
1141 us to only set the invalid bit */
1142 stq_phys(pte_addr
, pte
| _PAGE_INVALID
);
1144 /* XXX we exploit the fact that Linux passes the exact virtual
1145 address here - it's not obliged to! */
1146 tlb_flush_page(env
, page
);
1148 /* XXX 31-bit hack */
1149 if (page
& 0x80000000) {
1150 tlb_flush_page(env
, page
& ~0x80000000);
1152 tlb_flush_page(env
, page
| 0x80000000);
1156 /* flush local tlb */
1157 void HELPER(ptlb
)(CPUS390XState
*env
)
1162 /* store using real address */
1163 void HELPER(stura
)(CPUS390XState
*env
, uint64_t addr
, uint32_t v1
)
1165 stw_phys(get_address(env
, 0, 0, addr
), v1
);
1168 /* load real address */
1169 uint32_t HELPER(lra
)(CPUS390XState
*env
, uint64_t addr
, uint32_t r1
)
1172 int old_exc
= env
->exception_index
;
1173 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
1177 /* XXX incomplete - has more corner cases */
1178 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
1179 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
1182 env
->exception_index
= old_exc
;
1183 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
)) {
1186 if (env
->exception_index
== EXCP_PGM
) {
1187 ret
= env
->int_pgm_code
| 0x80000000;
1189 ret
|= addr
& ~TARGET_PAGE_MASK
;
1191 env
->exception_index
= old_exc
;
1193 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
1194 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1195 (ret
& 0xffffffffULL
);
1197 env
->regs
[r1
] = ret
;