monitor/qmp: Update comment for commit 4eaca8de268
[qemu/armbru.git] / target / ppc / mem_helper.c
blob1351b53f284f56d5ba2096ee58bc5f4fdc92b6d2
1 /*
2 * PowerPC memory access emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/host-utils.h"
24 #include "qemu/main-loop.h"
25 #include "exec/helper-proto.h"
26 #include "helper_regs.h"
27 #include "exec/cpu_ldst.h"
28 #include "tcg.h"
29 #include "internal.h"
30 #include "qemu/atomic128.h"
32 /* #define DEBUG_OP */
34 static inline bool needs_byteswap(const CPUPPCState *env)
36 #if defined(TARGET_WORDS_BIGENDIAN)
37 return msr_le;
38 #else
39 return !msr_le;
40 #endif
43 /*****************************************************************************/
44 /* Memory load and stores */
46 static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr,
47 target_long arg)
49 #if defined(TARGET_PPC64)
50 if (!msr_is_64bit(env, env->msr)) {
51 return (uint32_t)(addr + arg);
52 } else
53 #endif
55 return addr + arg;
59 void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
61 for (; reg < 32; reg++) {
62 if (needs_byteswap(env)) {
63 env->gpr[reg] = bswap32(cpu_ldl_data_ra(env, addr, GETPC()));
64 } else {
65 env->gpr[reg] = cpu_ldl_data_ra(env, addr, GETPC());
67 addr = addr_add(env, addr, 4);
71 void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
73 for (; reg < 32; reg++) {
74 if (needs_byteswap(env)) {
75 cpu_stl_data_ra(env, addr, bswap32((uint32_t)env->gpr[reg]),
76 GETPC());
77 } else {
78 cpu_stl_data_ra(env, addr, (uint32_t)env->gpr[reg], GETPC());
80 addr = addr_add(env, addr, 4);
84 static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
85 uint32_t reg, uintptr_t raddr)
87 int sh;
89 for (; nb > 3; nb -= 4) {
90 env->gpr[reg] = cpu_ldl_data_ra(env, addr, raddr);
91 reg = (reg + 1) % 32;
92 addr = addr_add(env, addr, 4);
94 if (unlikely(nb > 0)) {
95 env->gpr[reg] = 0;
96 for (sh = 24; nb > 0; nb--, sh -= 8) {
97 env->gpr[reg] |= cpu_ldub_data_ra(env, addr, raddr) << sh;
98 addr = addr_add(env, addr, 1);
103 void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg)
105 do_lsw(env, addr, nb, reg, GETPC());
109 * PPC32 specification says we must generate an exception if rA is in
110 * the range of registers to be loaded. In an other hand, IBM says
111 * this is valid, but rA won't be loaded. For now, I'll follow the
112 * spec...
114 void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
115 uint32_t ra, uint32_t rb)
117 if (likely(xer_bc != 0)) {
118 int num_used_regs = DIV_ROUND_UP(xer_bc, 4);
119 if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) ||
120 lsw_reg_in_range(reg, num_used_regs, rb))) {
121 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
122 POWERPC_EXCP_INVAL |
123 POWERPC_EXCP_INVAL_LSWX, GETPC());
124 } else {
125 do_lsw(env, addr, xer_bc, reg, GETPC());
130 void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
131 uint32_t reg)
133 int sh;
135 for (; nb > 3; nb -= 4) {
136 cpu_stl_data_ra(env, addr, env->gpr[reg], GETPC());
137 reg = (reg + 1) % 32;
138 addr = addr_add(env, addr, 4);
140 if (unlikely(nb > 0)) {
141 for (sh = 24; nb > 0; nb--, sh -= 8) {
142 cpu_stb_data_ra(env, addr, (env->gpr[reg] >> sh) & 0xFF, GETPC());
143 addr = addr_add(env, addr, 1);
148 static void dcbz_common(CPUPPCState *env, target_ulong addr,
149 uint32_t opcode, bool epid, uintptr_t retaddr)
151 target_ulong mask, dcbz_size = env->dcache_line_size;
152 uint32_t i;
153 void *haddr;
154 int mmu_idx = epid ? PPC_TLB_EPID_STORE : env->dmmu_idx;
156 #if defined(TARGET_PPC64)
157 /* Check for dcbz vs dcbzl on 970 */
158 if (env->excp_model == POWERPC_EXCP_970 &&
159 !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
160 dcbz_size = 32;
162 #endif
164 /* Align address */
165 mask = ~(dcbz_size - 1);
166 addr &= mask;
168 /* Check reservation */
169 if ((env->reserve_addr & mask) == (addr & mask)) {
170 env->reserve_addr = (target_ulong)-1ULL;
173 /* Try fast path translate */
174 haddr = tlb_vaddr_to_host(env, addr, MMU_DATA_STORE, mmu_idx);
175 if (haddr) {
176 memset(haddr, 0, dcbz_size);
177 } else {
178 /* Slow path */
179 for (i = 0; i < dcbz_size; i += 8) {
180 if (epid) {
181 #if !defined(CONFIG_USER_ONLY)
182 /* Does not make sense on USER_ONLY config */
183 cpu_stq_eps_ra(env, addr + i, 0, retaddr);
184 #endif
185 } else {
186 cpu_stq_data_ra(env, addr + i, 0, retaddr);
192 void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode)
194 dcbz_common(env, addr, opcode, false, GETPC());
197 void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode)
199 dcbz_common(env, addr, opcode, true, GETPC());
202 void helper_icbi(CPUPPCState *env, target_ulong addr)
204 addr &= ~(env->dcache_line_size - 1);
206 * Invalidate one cache line :
207 * PowerPC specification says this is to be treated like a load
208 * (not a fetch) by the MMU. To be sure it will be so,
209 * do the load "by hand".
211 cpu_ldl_data_ra(env, addr, GETPC());
214 void helper_icbiep(CPUPPCState *env, target_ulong addr)
216 #if !defined(CONFIG_USER_ONLY)
217 /* See comments above */
218 addr &= ~(env->dcache_line_size - 1);
219 cpu_ldl_epl_ra(env, addr, GETPC());
220 #endif
223 /* XXX: to be tested */
224 target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
225 uint32_t ra, uint32_t rb)
227 int i, c, d;
229 d = 24;
230 for (i = 0; i < xer_bc; i++) {
231 c = cpu_ldub_data_ra(env, addr, GETPC());
232 addr = addr_add(env, addr, 1);
233 /* ra (if not 0) and rb are never modified */
234 if (likely(reg != rb && (ra == 0 || reg != ra))) {
235 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
237 if (unlikely(c == xer_cmp)) {
238 break;
240 if (likely(d != 0)) {
241 d -= 8;
242 } else {
243 d = 24;
244 reg++;
245 reg = reg & 0x1F;
248 return i;
251 #ifdef TARGET_PPC64
252 uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
253 uint32_t opidx)
255 Int128 ret;
257 /* We will have raised EXCP_ATOMIC from the translator. */
258 assert(HAVE_ATOMIC128);
259 ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
260 env->retxh = int128_gethi(ret);
261 return int128_getlo(ret);
264 uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
265 uint32_t opidx)
267 Int128 ret;
269 /* We will have raised EXCP_ATOMIC from the translator. */
270 assert(HAVE_ATOMIC128);
271 ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
272 env->retxh = int128_gethi(ret);
273 return int128_getlo(ret);
276 void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr,
277 uint64_t lo, uint64_t hi, uint32_t opidx)
279 Int128 val;
281 /* We will have raised EXCP_ATOMIC from the translator. */
282 assert(HAVE_ATOMIC128);
283 val = int128_make128(lo, hi);
284 helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
287 void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
288 uint64_t lo, uint64_t hi, uint32_t opidx)
290 Int128 val;
292 /* We will have raised EXCP_ATOMIC from the translator. */
293 assert(HAVE_ATOMIC128);
294 val = int128_make128(lo, hi);
295 helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
298 uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
299 uint64_t new_lo, uint64_t new_hi,
300 uint32_t opidx)
302 bool success = false;
304 /* We will have raised EXCP_ATOMIC from the translator. */
305 assert(HAVE_CMPXCHG128);
307 if (likely(addr == env->reserve_addr)) {
308 Int128 oldv, cmpv, newv;
310 cmpv = int128_make128(env->reserve_val2, env->reserve_val);
311 newv = int128_make128(new_lo, new_hi);
312 oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
313 opidx, GETPC());
314 success = int128_eq(oldv, cmpv);
316 env->reserve_addr = -1;
317 return env->so + success * CRF_EQ_BIT;
320 uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
321 uint64_t new_lo, uint64_t new_hi,
322 uint32_t opidx)
324 bool success = false;
326 /* We will have raised EXCP_ATOMIC from the translator. */
327 assert(HAVE_CMPXCHG128);
329 if (likely(addr == env->reserve_addr)) {
330 Int128 oldv, cmpv, newv;
332 cmpv = int128_make128(env->reserve_val2, env->reserve_val);
333 newv = int128_make128(new_lo, new_hi);
334 oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
335 opidx, GETPC());
336 success = int128_eq(oldv, cmpv);
338 env->reserve_addr = -1;
339 return env->so + success * CRF_EQ_BIT;
341 #endif
343 /*****************************************************************************/
344 /* Altivec extension helpers */
345 #if defined(HOST_WORDS_BIGENDIAN)
346 #define HI_IDX 0
347 #define LO_IDX 1
348 #else
349 #define HI_IDX 1
350 #define LO_IDX 0
351 #endif
354 * We use msr_le to determine index ordering in a vector. However,
355 * byteswapping is not simply controlled by msr_le. We also need to
356 * take into account endianness of the target. This is done for the
357 * little-endian PPC64 user-mode target.
360 #define LVE(name, access, swap, element) \
361 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
362 target_ulong addr) \
364 size_t n_elems = ARRAY_SIZE(r->element); \
365 int adjust = HI_IDX * (n_elems - 1); \
366 int sh = sizeof(r->element[0]) >> 1; \
367 int index = (addr & 0xf) >> sh; \
368 if (msr_le) { \
369 index = n_elems - index - 1; \
372 if (needs_byteswap(env)) { \
373 r->element[LO_IDX ? index : (adjust - index)] = \
374 swap(access(env, addr, GETPC())); \
375 } else { \
376 r->element[LO_IDX ? index : (adjust - index)] = \
377 access(env, addr, GETPC()); \
380 #define I(x) (x)
381 LVE(lvebx, cpu_ldub_data_ra, I, u8)
382 LVE(lvehx, cpu_lduw_data_ra, bswap16, u16)
383 LVE(lvewx, cpu_ldl_data_ra, bswap32, u32)
384 #undef I
385 #undef LVE
387 #define STVE(name, access, swap, element) \
388 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
389 target_ulong addr) \
391 size_t n_elems = ARRAY_SIZE(r->element); \
392 int adjust = HI_IDX * (n_elems - 1); \
393 int sh = sizeof(r->element[0]) >> 1; \
394 int index = (addr & 0xf) >> sh; \
395 if (msr_le) { \
396 index = n_elems - index - 1; \
399 if (needs_byteswap(env)) { \
400 access(env, addr, swap(r->element[LO_IDX ? index : \
401 (adjust - index)]), \
402 GETPC()); \
403 } else { \
404 access(env, addr, r->element[LO_IDX ? index : \
405 (adjust - index)], GETPC()); \
408 #define I(x) (x)
409 STVE(stvebx, cpu_stb_data_ra, I, u8)
410 STVE(stvehx, cpu_stw_data_ra, bswap16, u16)
411 STVE(stvewx, cpu_stl_data_ra, bswap32, u32)
412 #undef I
413 #undef LVE
415 #ifdef TARGET_PPC64
416 #define GET_NB(rb) ((rb >> 56) & 0xFF)
418 #define VSX_LXVL(name, lj) \
419 void helper_##name(CPUPPCState *env, target_ulong addr, \
420 ppc_vsr_t *xt, target_ulong rb) \
422 ppc_vsr_t t; \
423 uint64_t nb = GET_NB(rb); \
424 int i; \
426 t.s128 = int128_zero(); \
427 if (nb) { \
428 nb = (nb >= 16) ? 16 : nb; \
429 if (msr_le && !lj) { \
430 for (i = 16; i > 16 - nb; i--) { \
431 t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
432 addr = addr_add(env, addr, 1); \
434 } else { \
435 for (i = 0; i < nb; i++) { \
436 t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \
437 addr = addr_add(env, addr, 1); \
441 *xt = t; \
444 VSX_LXVL(lxvl, 0)
445 VSX_LXVL(lxvll, 1)
446 #undef VSX_LXVL
448 #define VSX_STXVL(name, lj) \
449 void helper_##name(CPUPPCState *env, target_ulong addr, \
450 ppc_vsr_t *xt, target_ulong rb) \
452 target_ulong nb = GET_NB(rb); \
453 int i; \
455 if (!nb) { \
456 return; \
459 nb = (nb >= 16) ? 16 : nb; \
460 if (msr_le && !lj) { \
461 for (i = 16; i > 16 - nb; i--) { \
462 cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \
463 addr = addr_add(env, addr, 1); \
465 } else { \
466 for (i = 0; i < nb; i++) { \
467 cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \
468 addr = addr_add(env, addr, 1); \
473 VSX_STXVL(stxvl, 0)
474 VSX_STXVL(stxvll, 1)
475 #undef VSX_STXVL
476 #undef GET_NB
477 #endif /* TARGET_PPC64 */
479 #undef HI_IDX
480 #undef LO_IDX
482 void helper_tbegin(CPUPPCState *env)
485 * As a degenerate implementation, always fail tbegin. The reason
486 * given is "Nesting overflow". The "persistent" bit is set,
487 * providing a hint to the error handler to not retry. The TFIAR
488 * captures the address of the failure, which is this tbegin
489 * instruction. Instruction execution will continue with the next
490 * instruction in memory, which is precisely what we want.
493 env->spr[SPR_TEXASR] =
494 (1ULL << TEXASR_FAILURE_PERSISTENT) |
495 (1ULL << TEXASR_NESTING_OVERFLOW) |
496 (msr_hv << TEXASR_PRIVILEGE_HV) |
497 (msr_pr << TEXASR_PRIVILEGE_PR) |
498 (1ULL << TEXASR_FAILURE_SUMMARY) |
499 (1ULL << TEXASR_TFIAR_EXACT);
500 env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr;
501 env->spr[SPR_TFHAR] = env->nip + 4;
502 env->crf[0] = 0xB; /* 0b1010 = transaction failure */