4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/gdbstub.h"
25 #include "qemu/timer.h"
26 #include "qemu/qemu-print.h"
27 #include "hw/s390x/ioinst.h"
28 #include "hw/s390x/pv.h"
29 #include "sysemu/hw_accel.h"
30 #include "sysemu/runstate.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "sysemu/tcg.h"
35 #ifndef CONFIG_USER_ONLY
36 void s390x_tod_timer(void *opaque
)
38 cpu_inject_clock_comparator((S390CPU
*) opaque
);
41 void s390x_cpu_timer(void *opaque
)
43 cpu_inject_cpu_timer((S390CPU
*) opaque
);
46 hwaddr
s390_cpu_get_phys_page_debug(CPUState
*cs
, vaddr vaddr
)
48 S390CPU
*cpu
= S390_CPU(cs
);
49 CPUS390XState
*env
= &cpu
->env
;
52 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
56 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
60 /* We want to read the code (e.g., see what we are single-stepping).*/
61 if (asc
!= PSW_ASC_HOME
) {
62 asc
= PSW_ASC_PRIMARY
;
66 * We want to read code even if IEP is active. Use MMU_DATA_LOAD instead
69 if (mmu_translate(env
, vaddr
, MMU_DATA_LOAD
, asc
, &raddr
, &prot
, &tec
)) {
75 hwaddr
s390_cpu_get_phys_addr_debug(CPUState
*cs
, vaddr vaddr
)
80 page
= vaddr
& TARGET_PAGE_MASK
;
81 phys_addr
= cpu_get_phys_page_debug(cs
, page
);
82 phys_addr
+= (vaddr
& ~TARGET_PAGE_MASK
);
87 static inline bool is_special_wait_psw(uint64_t psw_addr
)
90 return (psw_addr
& 0xfffUL
) == 0xfffUL
;
93 void s390_handle_wait(S390CPU
*cpu
)
95 CPUState
*cs
= CPU(cpu
);
97 if (s390_cpu_halt(cpu
) == 0) {
98 if (is_special_wait_psw(cpu
->env
.psw
.addr
)) {
99 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN
);
101 cpu
->env
.crash_reason
= S390_CRASH_REASON_DISABLED_WAIT
;
102 qemu_system_guest_panicked(cpu_get_crash_info(cs
));
107 void load_psw(CPUS390XState
*env
, uint64_t mask
, uint64_t addr
)
109 uint64_t old_mask
= env
->psw
.mask
;
111 env
->psw
.addr
= addr
;
112 env
->psw
.mask
= mask
;
114 /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */
115 if (!tcg_enabled()) {
118 env
->cc_op
= (mask
>> 44) & 3;
120 if ((old_mask
^ mask
) & PSW_MASK_PER
) {
121 s390_cpu_recompute_watchpoints(env_cpu(env
));
124 if (mask
& PSW_MASK_WAIT
) {
125 s390_handle_wait(env_archcpu(env
));
129 uint64_t get_psw_mask(CPUS390XState
*env
)
131 uint64_t r
= env
->psw
.mask
;
134 env
->cc_op
= calc_cc(env
, env
->cc_op
, env
->cc_src
, env
->cc_dst
,
138 assert(!(env
->cc_op
& ~3));
139 r
|= (uint64_t)env
->cc_op
<< 44;
145 LowCore
*cpu_map_lowcore(CPUS390XState
*env
)
148 hwaddr len
= sizeof(LowCore
);
150 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, true);
152 if (len
< sizeof(LowCore
)) {
153 cpu_abort(env_cpu(env
), "Could not map lowcore\n");
159 void cpu_unmap_lowcore(LowCore
*lowcore
)
161 cpu_physical_memory_unmap(lowcore
, sizeof(LowCore
), 1, sizeof(LowCore
));
164 void do_restart_interrupt(CPUS390XState
*env
)
169 lowcore
= cpu_map_lowcore(env
);
171 lowcore
->restart_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
172 lowcore
->restart_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
173 mask
= be64_to_cpu(lowcore
->restart_new_psw
.mask
);
174 addr
= be64_to_cpu(lowcore
->restart_new_psw
.addr
);
176 cpu_unmap_lowcore(lowcore
);
177 env
->pending_int
&= ~INTERRUPT_RESTART
;
179 load_psw(env
, mask
, addr
);
182 void s390_cpu_recompute_watchpoints(CPUState
*cs
)
184 const int wp_flags
= BP_CPU
| BP_MEM_WRITE
| BP_STOP_BEFORE_ACCESS
;
185 S390CPU
*cpu
= S390_CPU(cs
);
186 CPUS390XState
*env
= &cpu
->env
;
188 /* We are called when the watchpoints have changed. First
190 cpu_watchpoint_remove_all(cs
, BP_CPU
);
192 /* Return if PER is not enabled */
193 if (!(env
->psw
.mask
& PSW_MASK_PER
)) {
197 /* Return if storage-alteration event is not enabled. */
198 if (!(env
->cregs
[9] & PER_CR9_EVENT_STORE
)) {
202 if (env
->cregs
[10] == 0 && env
->cregs
[11] == -1LL) {
203 /* We can't create a watchoint spanning the whole memory range, so
204 split it in two parts. */
205 cpu_watchpoint_insert(cs
, 0, 1ULL << 63, wp_flags
, NULL
);
206 cpu_watchpoint_insert(cs
, 1ULL << 63, 1ULL << 63, wp_flags
, NULL
);
207 } else if (env
->cregs
[10] > env
->cregs
[11]) {
208 /* The address range loops, create two watchpoints. */
209 cpu_watchpoint_insert(cs
, env
->cregs
[10], -env
->cregs
[10],
211 cpu_watchpoint_insert(cs
, 0, env
->cregs
[11] + 1, wp_flags
, NULL
);
214 /* Default case, create a single watchpoint. */
215 cpu_watchpoint_insert(cs
, env
->cregs
[10],
216 env
->cregs
[11] - env
->cregs
[10] + 1,
221 typedef struct SigpSaveArea
{
222 uint64_t fprs
[16]; /* 0x0000 */
223 uint64_t grs
[16]; /* 0x0080 */
224 PSW psw
; /* 0x0100 */
225 uint8_t pad_0x0110
[0x0118 - 0x0110]; /* 0x0110 */
226 uint32_t prefix
; /* 0x0118 */
227 uint32_t fpc
; /* 0x011c */
228 uint8_t pad_0x0120
[0x0124 - 0x0120]; /* 0x0120 */
229 uint32_t todpr
; /* 0x0124 */
230 uint64_t cputm
; /* 0x0128 */
231 uint64_t ckc
; /* 0x0130 */
232 uint8_t pad_0x0138
[0x0140 - 0x0138]; /* 0x0138 */
233 uint32_t ars
[16]; /* 0x0140 */
234 uint64_t crs
[16]; /* 0x0384 */
236 QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea
) != 512);
238 int s390_store_status(S390CPU
*cpu
, hwaddr addr
, bool store_arch
)
240 static const uint8_t ar_id
= 1;
242 hwaddr len
= sizeof(*sa
);
245 /* For PVMs storing will occur when this cpu enters SIE again */
250 sa
= cpu_physical_memory_map(addr
, &len
, true);
254 if (len
!= sizeof(*sa
)) {
255 cpu_physical_memory_unmap(sa
, len
, 1, 0);
260 cpu_physical_memory_write(offsetof(LowCore
, ar_access_id
), &ar_id
, 1);
262 for (i
= 0; i
< 16; ++i
) {
263 sa
->fprs
[i
] = cpu_to_be64(*get_freg(&cpu
->env
, i
));
265 for (i
= 0; i
< 16; ++i
) {
266 sa
->grs
[i
] = cpu_to_be64(cpu
->env
.regs
[i
]);
268 sa
->psw
.addr
= cpu_to_be64(cpu
->env
.psw
.addr
);
269 sa
->psw
.mask
= cpu_to_be64(get_psw_mask(&cpu
->env
));
270 sa
->prefix
= cpu_to_be32(cpu
->env
.psa
);
271 sa
->fpc
= cpu_to_be32(cpu
->env
.fpc
);
272 sa
->todpr
= cpu_to_be32(cpu
->env
.todpr
);
273 sa
->cputm
= cpu_to_be64(cpu
->env
.cputm
);
274 sa
->ckc
= cpu_to_be64(cpu
->env
.ckc
>> 8);
275 for (i
= 0; i
< 16; ++i
) {
276 sa
->ars
[i
] = cpu_to_be32(cpu
->env
.aregs
[i
]);
278 for (i
= 0; i
< 16; ++i
) {
279 sa
->crs
[i
] = cpu_to_be64(cpu
->env
.cregs
[i
]);
282 cpu_physical_memory_unmap(sa
, len
, 1, len
);
287 typedef struct SigpAdtlSaveArea
{
288 uint64_t vregs
[32][2]; /* 0x0000 */
289 uint8_t pad_0x0200
[0x0400 - 0x0200]; /* 0x0200 */
290 uint64_t gscb
[4]; /* 0x0400 */
291 uint8_t pad_0x0420
[0x1000 - 0x0420]; /* 0x0420 */
293 QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea
) != 4096);
295 #define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
296 int s390_store_adtl_status(S390CPU
*cpu
, hwaddr addr
, hwaddr len
)
298 SigpAdtlSaveArea
*sa
;
302 sa
= cpu_physical_memory_map(addr
, &save
, true);
307 cpu_physical_memory_unmap(sa
, len
, 1, 0);
311 if (s390_has_feat(S390_FEAT_VECTOR
)) {
312 for (i
= 0; i
< 32; i
++) {
313 sa
->vregs
[i
][0] = cpu_to_be64(cpu
->env
.vregs
[i
][0]);
314 sa
->vregs
[i
][1] = cpu_to_be64(cpu
->env
.vregs
[i
][1]);
317 if (s390_has_feat(S390_FEAT_GUARDED_STORAGE
) && len
>= ADTL_GS_MIN_SIZE
) {
318 for (i
= 0; i
< 4; i
++) {
319 sa
->gscb
[i
] = cpu_to_be64(cpu
->env
.gscb
[i
]);
323 cpu_physical_memory_unmap(sa
, len
, 1, len
);
326 #endif /* CONFIG_USER_ONLY */
328 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
330 S390CPU
*cpu
= S390_CPU(cs
);
331 CPUS390XState
*env
= &cpu
->env
;
334 if (env
->cc_op
> 3) {
335 qemu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
336 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
338 qemu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
339 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
342 for (i
= 0; i
< 16; i
++) {
343 qemu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
345 qemu_fprintf(f
, "\n");
347 qemu_fprintf(f
, " ");
351 if (flags
& CPU_DUMP_FPU
) {
352 if (s390_has_feat(S390_FEAT_VECTOR
)) {
353 for (i
= 0; i
< 32; i
++) {
354 qemu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
"%c",
355 i
, env
->vregs
[i
][0], env
->vregs
[i
][1],
359 for (i
= 0; i
< 16; i
++) {
360 qemu_fprintf(f
, "F%02d=%016" PRIx64
"%c",
361 i
, *get_freg(env
, i
),
362 (i
% 4) == 3 ? '\n' : ' ');
367 #ifndef CONFIG_USER_ONLY
368 for (i
= 0; i
< 16; i
++) {
369 qemu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
371 qemu_fprintf(f
, "\n");
373 qemu_fprintf(f
, " ");
378 #ifdef DEBUG_INLINE_BRANCHES
379 for (i
= 0; i
< CC_OP_MAX
; i
++) {
380 qemu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
381 inline_branch_miss
[i
], inline_branch_hit
[i
]);
385 qemu_fprintf(f
, "\n");
388 const char *cc_name(enum cc_op cc_op
)
390 static const char * const cc_names
[] = {
391 [CC_OP_CONST0
] = "CC_OP_CONST0",
392 [CC_OP_CONST1
] = "CC_OP_CONST1",
393 [CC_OP_CONST2
] = "CC_OP_CONST2",
394 [CC_OP_CONST3
] = "CC_OP_CONST3",
395 [CC_OP_DYNAMIC
] = "CC_OP_DYNAMIC",
396 [CC_OP_STATIC
] = "CC_OP_STATIC",
397 [CC_OP_NZ
] = "CC_OP_NZ",
398 [CC_OP_LTGT_32
] = "CC_OP_LTGT_32",
399 [CC_OP_LTGT_64
] = "CC_OP_LTGT_64",
400 [CC_OP_LTUGTU_32
] = "CC_OP_LTUGTU_32",
401 [CC_OP_LTUGTU_64
] = "CC_OP_LTUGTU_64",
402 [CC_OP_LTGT0_32
] = "CC_OP_LTGT0_32",
403 [CC_OP_LTGT0_64
] = "CC_OP_LTGT0_64",
404 [CC_OP_ADD_64
] = "CC_OP_ADD_64",
405 [CC_OP_ADDU_64
] = "CC_OP_ADDU_64",
406 [CC_OP_ADDC_64
] = "CC_OP_ADDC_64",
407 [CC_OP_SUB_64
] = "CC_OP_SUB_64",
408 [CC_OP_SUBU_64
] = "CC_OP_SUBU_64",
409 [CC_OP_SUBB_64
] = "CC_OP_SUBB_64",
410 [CC_OP_ABS_64
] = "CC_OP_ABS_64",
411 [CC_OP_NABS_64
] = "CC_OP_NABS_64",
412 [CC_OP_ADD_32
] = "CC_OP_ADD_32",
413 [CC_OP_ADDU_32
] = "CC_OP_ADDU_32",
414 [CC_OP_ADDC_32
] = "CC_OP_ADDC_32",
415 [CC_OP_SUB_32
] = "CC_OP_SUB_32",
416 [CC_OP_SUBU_32
] = "CC_OP_SUBU_32",
417 [CC_OP_SUBB_32
] = "CC_OP_SUBB_32",
418 [CC_OP_ABS_32
] = "CC_OP_ABS_32",
419 [CC_OP_NABS_32
] = "CC_OP_NABS_32",
420 [CC_OP_COMP_32
] = "CC_OP_COMP_32",
421 [CC_OP_COMP_64
] = "CC_OP_COMP_64",
422 [CC_OP_TM_32
] = "CC_OP_TM_32",
423 [CC_OP_TM_64
] = "CC_OP_TM_64",
424 [CC_OP_NZ_F32
] = "CC_OP_NZ_F32",
425 [CC_OP_NZ_F64
] = "CC_OP_NZ_F64",
426 [CC_OP_NZ_F128
] = "CC_OP_NZ_F128",
427 [CC_OP_ICM
] = "CC_OP_ICM",
428 [CC_OP_SLA_32
] = "CC_OP_SLA_32",
429 [CC_OP_SLA_64
] = "CC_OP_SLA_64",
430 [CC_OP_FLOGR
] = "CC_OP_FLOGR",
431 [CC_OP_LCBB
] = "CC_OP_LCBB",
432 [CC_OP_VC
] = "CC_OP_VC",
435 return cc_names
[cc_op
];