2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #include "tcg_s390x.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "sysemu/sysemu.h"
33 #include "hw/s390x/s390_flic.h"
36 /* #define DEBUG_S390 */
37 /* #define DEBUG_S390_STDOUT */
40 #ifdef DEBUG_S390_STDOUT
41 #define DPRINTF(fmt, ...) \
42 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
43 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
45 #define DPRINTF(fmt, ...) \
46 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
49 #define DPRINTF(fmt, ...) \
53 void QEMU_NORETURN
tcg_s390_program_interrupt(CPUS390XState
*env
, uint32_t code
,
54 int ilen
, uintptr_t ra
)
56 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
58 cpu_restore_state(cs
, ra
, true);
59 qemu_log_mask(CPU_LOG_INT
, "program interrupt at %#" PRIx64
"\n",
61 trigger_pgm_exception(env
, code
, ilen
);
65 void QEMU_NORETURN
tcg_s390_data_exception(CPUS390XState
*env
, uint32_t dxc
,
68 g_assert(dxc
<= 0xff);
69 #if !defined(CONFIG_USER_ONLY)
70 /* Store the DXC into the lowcore */
71 stl_phys(CPU(s390_env_get_cpu(env
))->as
,
72 env
->psa
+ offsetof(LowCore
, data_exc_code
), dxc
);
75 /* Store the DXC into the FPC if AFP is enabled */
76 if (env
->cregs
[0] & CR0_AFP
) {
77 env
->fpc
= deposit32(env
->fpc
, 8, 8, dxc
);
79 tcg_s390_program_interrupt(env
, PGM_DATA
, ILEN_AUTO
, ra
);
82 void HELPER(data_exception
)(CPUS390XState
*env
, uint32_t dxc
)
84 tcg_s390_data_exception(env
, dxc
, GETPC());
87 #if defined(CONFIG_USER_ONLY)
89 void s390_cpu_do_interrupt(CPUState
*cs
)
91 cs
->exception_index
= -1;
94 int s390_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
, int size
,
97 S390CPU
*cpu
= S390_CPU(cs
);
99 trigger_pgm_exception(&cpu
->env
, PGM_ADDRESSING
, ILEN_AUTO
);
100 /* On real machines this value is dropped into LowMem. Since this
101 is userland, simply put this someplace that cpu_loop can find it. */
102 cpu
->env
.__excp_addr
= address
;
106 #else /* !CONFIG_USER_ONLY */
108 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx
)
111 case MMU_PRIMARY_IDX
:
112 return PSW_ASC_PRIMARY
;
113 case MMU_SECONDARY_IDX
:
114 return PSW_ASC_SECONDARY
;
122 int s390_cpu_handle_mmu_fault(CPUState
*cs
, vaddr orig_vaddr
, int size
,
125 S390CPU
*cpu
= S390_CPU(cs
);
126 CPUS390XState
*env
= &cpu
->env
;
127 target_ulong vaddr
, raddr
;
131 DPRINTF("%s: address 0x%" VADDR_PRIx
" rw %d mmu_idx %d\n",
132 __func__
, orig_vaddr
, rw
, mmu_idx
);
136 if (mmu_idx
< MMU_REAL_IDX
) {
137 asc
= cpu_mmu_idx_to_asc(mmu_idx
);
139 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
142 if (mmu_translate(env
, vaddr
, rw
, asc
, &raddr
, &prot
, true)) {
145 } else if (mmu_idx
== MMU_REAL_IDX
) {
147 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
150 if (mmu_translate_real(env
, vaddr
, rw
, &raddr
, &prot
)) {
157 /* check out of RAM access */
158 if (!address_space_access_valid(&address_space_memory
, raddr
,
159 TARGET_PAGE_SIZE
, rw
,
160 MEMTXATTRS_UNSPECIFIED
)) {
161 DPRINTF("%s: raddr %" PRIx64
" > ram_size %" PRIx64
"\n", __func__
,
162 (uint64_t)raddr
, (uint64_t)ram_size
);
163 trigger_pgm_exception(env
, PGM_ADDRESSING
, ILEN_AUTO
);
167 qemu_log_mask(CPU_LOG_MMU
, "%s: set tlb %" PRIx64
" -> %" PRIx64
" (%x)\n",
168 __func__
, (uint64_t)vaddr
, (uint64_t)raddr
, prot
);
170 tlb_set_page(cs
, orig_vaddr
& TARGET_PAGE_MASK
, raddr
, prot
,
171 mmu_idx
, TARGET_PAGE_SIZE
);
176 static void do_program_interrupt(CPUS390XState
*env
)
180 int ilen
= env
->int_pgm_ilen
;
182 if (ilen
== ILEN_AUTO
) {
183 ilen
= get_ilen(cpu_ldub_code(env
, env
->psw
.addr
));
185 assert(ilen
== 2 || ilen
== 4 || ilen
== 6);
187 switch (env
->int_pgm_code
) {
189 if (env
->per_perc_atmid
& PER_CODE_EVENT_NULLIFICATION
) {
198 case PGM_SPECIFICATION
:
200 case PGM_FIXPT_OVERFLOW
:
201 case PGM_FIXPT_DIVIDE
:
202 case PGM_DEC_OVERFLOW
:
204 case PGM_HFP_EXP_OVERFLOW
:
205 case PGM_HFP_EXP_UNDERFLOW
:
206 case PGM_HFP_SIGNIFICANCE
:
212 case PGM_PC_TRANS_SPEC
:
215 /* advance the PSW if our exception is not nullifying */
216 env
->psw
.addr
+= ilen
;
220 qemu_log_mask(CPU_LOG_INT
, "%s: code=0x%x ilen=%d\n",
221 __func__
, env
->int_pgm_code
, ilen
);
223 lowcore
= cpu_map_lowcore(env
);
225 /* Signal PER events with the exception. */
226 if (env
->per_perc_atmid
) {
227 env
->int_pgm_code
|= PGM_PER
;
228 lowcore
->per_address
= cpu_to_be64(env
->per_address
);
229 lowcore
->per_perc_atmid
= cpu_to_be16(env
->per_perc_atmid
);
230 env
->per_perc_atmid
= 0;
233 lowcore
->pgm_ilen
= cpu_to_be16(ilen
);
234 lowcore
->pgm_code
= cpu_to_be16(env
->int_pgm_code
);
235 lowcore
->program_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
236 lowcore
->program_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
237 mask
= be64_to_cpu(lowcore
->program_new_psw
.mask
);
238 addr
= be64_to_cpu(lowcore
->program_new_psw
.addr
);
239 lowcore
->per_breaking_event_addr
= cpu_to_be64(env
->gbea
);
241 cpu_unmap_lowcore(lowcore
);
243 DPRINTF("%s: %x %x %" PRIx64
" %" PRIx64
"\n", __func__
,
244 env
->int_pgm_code
, ilen
, env
->psw
.mask
,
247 load_psw(env
, mask
, addr
);
250 static void do_svc_interrupt(CPUS390XState
*env
)
255 lowcore
= cpu_map_lowcore(env
);
257 lowcore
->svc_code
= cpu_to_be16(env
->int_svc_code
);
258 lowcore
->svc_ilen
= cpu_to_be16(env
->int_svc_ilen
);
259 lowcore
->svc_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
260 lowcore
->svc_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
+ env
->int_svc_ilen
);
261 mask
= be64_to_cpu(lowcore
->svc_new_psw
.mask
);
262 addr
= be64_to_cpu(lowcore
->svc_new_psw
.addr
);
264 cpu_unmap_lowcore(lowcore
);
266 load_psw(env
, mask
, addr
);
268 /* When a PER event is pending, the PER exception has to happen
269 immediately after the SERVICE CALL one. */
270 if (env
->per_perc_atmid
) {
271 env
->int_pgm_code
= PGM_PER
;
272 env
->int_pgm_ilen
= env
->int_svc_ilen
;
273 do_program_interrupt(env
);
277 #define VIRTIO_SUBCODE_64 0x0D00
279 static void do_ext_interrupt(CPUS390XState
*env
)
281 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
282 S390CPU
*cpu
= s390_env_get_cpu(env
);
287 if (!(env
->psw
.mask
& PSW_MASK_EXT
)) {
288 cpu_abort(CPU(cpu
), "Ext int w/o ext mask\n");
291 lowcore
= cpu_map_lowcore(env
);
293 if ((env
->pending_int
& INTERRUPT_EMERGENCY_SIGNAL
) &&
294 (env
->cregs
[0] & CR0_EMERGENCY_SIGNAL_SC
)) {
295 lowcore
->ext_int_code
= cpu_to_be16(EXT_EMERGENCY
);
296 cpu_addr
= find_first_bit(env
->emergency_signals
, S390_MAX_CPUS
);
297 g_assert(cpu_addr
< S390_MAX_CPUS
);
298 lowcore
->cpu_addr
= cpu_to_be16(cpu_addr
);
299 clear_bit(cpu_addr
, env
->emergency_signals
);
300 if (bitmap_empty(env
->emergency_signals
, max_cpus
)) {
301 env
->pending_int
&= ~INTERRUPT_EMERGENCY_SIGNAL
;
303 } else if ((env
->pending_int
& INTERRUPT_EXTERNAL_CALL
) &&
304 (env
->cregs
[0] & CR0_EXTERNAL_CALL_SC
)) {
305 lowcore
->ext_int_code
= cpu_to_be16(EXT_EXTERNAL_CALL
);
306 lowcore
->cpu_addr
= cpu_to_be16(env
->external_call_addr
);
307 env
->pending_int
&= ~INTERRUPT_EXTERNAL_CALL
;
308 } else if ((env
->pending_int
& INTERRUPT_EXT_CLOCK_COMPARATOR
) &&
309 (env
->cregs
[0] & CR0_CKC_SC
)) {
310 lowcore
->ext_int_code
= cpu_to_be16(EXT_CLOCK_COMP
);
311 lowcore
->cpu_addr
= 0;
312 env
->pending_int
&= ~INTERRUPT_EXT_CLOCK_COMPARATOR
;
313 } else if ((env
->pending_int
& INTERRUPT_EXT_CPU_TIMER
) &&
314 (env
->cregs
[0] & CR0_CPU_TIMER_SC
)) {
315 lowcore
->ext_int_code
= cpu_to_be16(EXT_CPU_TIMER
);
316 lowcore
->cpu_addr
= 0;
317 env
->pending_int
&= ~INTERRUPT_EXT_CPU_TIMER
;
318 } else if (qemu_s390_flic_has_service(flic
) &&
319 (env
->cregs
[0] & CR0_SERVICE_SC
)) {
322 param
= qemu_s390_flic_dequeue_service(flic
);
323 lowcore
->ext_int_code
= cpu_to_be16(EXT_SERVICE
);
324 lowcore
->ext_params
= cpu_to_be32(param
);
325 lowcore
->cpu_addr
= 0;
327 g_assert_not_reached();
330 mask
= be64_to_cpu(lowcore
->external_new_psw
.mask
);
331 addr
= be64_to_cpu(lowcore
->external_new_psw
.addr
);
332 lowcore
->external_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
333 lowcore
->external_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
335 cpu_unmap_lowcore(lowcore
);
337 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
338 env
->psw
.mask
, env
->psw
.addr
);
340 load_psw(env
, mask
, addr
);
343 static void do_io_interrupt(CPUS390XState
*env
)
345 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
350 g_assert(env
->psw
.mask
& PSW_MASK_IO
);
351 io
= qemu_s390_flic_dequeue_io(flic
, env
->cregs
[6]);
354 lowcore
= cpu_map_lowcore(env
);
356 lowcore
->subchannel_id
= cpu_to_be16(io
->id
);
357 lowcore
->subchannel_nr
= cpu_to_be16(io
->nr
);
358 lowcore
->io_int_parm
= cpu_to_be32(io
->parm
);
359 lowcore
->io_int_word
= cpu_to_be32(io
->word
);
360 lowcore
->io_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
361 lowcore
->io_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
362 mask
= be64_to_cpu(lowcore
->io_new_psw
.mask
);
363 addr
= be64_to_cpu(lowcore
->io_new_psw
.addr
);
365 cpu_unmap_lowcore(lowcore
);
368 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
, env
->psw
.mask
,
370 load_psw(env
, mask
, addr
);
373 static void do_mchk_interrupt(CPUS390XState
*env
)
375 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
380 /* for now we only support channel report machine checks (floating) */
381 g_assert(env
->psw
.mask
& PSW_MASK_MCHECK
);
382 g_assert(env
->cregs
[14] & CR14_CHANNEL_REPORT_SC
);
384 qemu_s390_flic_dequeue_crw_mchk(flic
);
386 lowcore
= cpu_map_lowcore(env
);
388 /* we are always in z/Architecture mode */
389 lowcore
->ar_access_id
= 1;
391 for (i
= 0; i
< 16; i
++) {
392 lowcore
->floating_pt_save_area
[i
] = cpu_to_be64(get_freg(env
, i
)->ll
);
393 lowcore
->gpregs_save_area
[i
] = cpu_to_be64(env
->regs
[i
]);
394 lowcore
->access_regs_save_area
[i
] = cpu_to_be32(env
->aregs
[i
]);
395 lowcore
->cregs_save_area
[i
] = cpu_to_be64(env
->cregs
[i
]);
397 lowcore
->prefixreg_save_area
= cpu_to_be32(env
->psa
);
398 lowcore
->fpt_creg_save_area
= cpu_to_be32(env
->fpc
);
399 lowcore
->tod_progreg_save_area
= cpu_to_be32(env
->todpr
);
400 lowcore
->cpu_timer_save_area
= cpu_to_be64(env
->cputm
);
401 lowcore
->clock_comp_save_area
= cpu_to_be64(env
->ckc
>> 8);
403 lowcore
->mcic
= cpu_to_be64(s390_build_validity_mcic() | MCIC_SC_CP
);
404 lowcore
->mcck_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
405 lowcore
->mcck_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
406 mask
= be64_to_cpu(lowcore
->mcck_new_psw
.mask
);
407 addr
= be64_to_cpu(lowcore
->mcck_new_psw
.addr
);
409 cpu_unmap_lowcore(lowcore
);
411 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
412 env
->psw
.mask
, env
->psw
.addr
);
414 load_psw(env
, mask
, addr
);
417 void s390_cpu_do_interrupt(CPUState
*cs
)
419 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
420 S390CPU
*cpu
= S390_CPU(cs
);
421 CPUS390XState
*env
= &cpu
->env
;
422 bool stopped
= false;
424 qemu_log_mask(CPU_LOG_INT
, "%s: %d at pc=%" PRIx64
"\n",
425 __func__
, cs
->exception_index
, env
->psw
.addr
);
428 /* handle machine checks */
429 if (cs
->exception_index
== -1 && s390_cpu_has_mcck_int(cpu
)) {
430 cs
->exception_index
= EXCP_MCHK
;
432 /* handle external interrupts */
433 if (cs
->exception_index
== -1 && s390_cpu_has_ext_int(cpu
)) {
434 cs
->exception_index
= EXCP_EXT
;
436 /* handle I/O interrupts */
437 if (cs
->exception_index
== -1 && s390_cpu_has_io_int(cpu
)) {
438 cs
->exception_index
= EXCP_IO
;
440 /* RESTART interrupt */
441 if (cs
->exception_index
== -1 && s390_cpu_has_restart_int(cpu
)) {
442 cs
->exception_index
= EXCP_RESTART
;
444 /* STOP interrupt has least priority */
445 if (cs
->exception_index
== -1 && s390_cpu_has_stop_int(cpu
)) {
446 cs
->exception_index
= EXCP_STOP
;
449 switch (cs
->exception_index
) {
451 do_program_interrupt(env
);
454 do_svc_interrupt(env
);
457 do_ext_interrupt(env
);
460 do_io_interrupt(env
);
463 do_mchk_interrupt(env
);
466 do_restart_interrupt(env
);
469 do_stop_interrupt(env
);
474 if (cs
->exception_index
!= -1 && !stopped
) {
475 /* check if there are more pending interrupts to deliver */
476 cs
->exception_index
= -1;
479 cs
->exception_index
= -1;
481 /* we might still have pending interrupts, but not deliverable */
482 if (!env
->pending_int
&& !qemu_s390_flic_has_any(flic
)) {
483 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
486 /* WAIT PSW during interrupt injection or STOP interrupt */
487 if ((env
->psw
.mask
& PSW_MASK_WAIT
) || stopped
) {
488 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
489 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HALT
);
490 } else if (cs
->halted
) {
491 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
492 s390_cpu_unhalt(cpu
);
496 bool s390_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
498 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
499 S390CPU
*cpu
= S390_CPU(cs
);
500 CPUS390XState
*env
= &cpu
->env
;
503 /* Execution of the target insn is indivisible from
504 the parent EXECUTE insn. */
507 if (s390_cpu_has_int(cpu
)) {
508 s390_cpu_do_interrupt(cs
);
511 if (env
->psw
.mask
& PSW_MASK_WAIT
) {
512 /* Woken up because of a floating interrupt but it has already
513 * been delivered. Go back to sleep. */
514 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HALT
);
520 void s390x_cpu_debug_excp_handler(CPUState
*cs
)
522 S390CPU
*cpu
= S390_CPU(cs
);
523 CPUS390XState
*env
= &cpu
->env
;
524 CPUWatchpoint
*wp_hit
= cs
->watchpoint_hit
;
526 if (wp_hit
&& wp_hit
->flags
& BP_CPU
) {
527 /* FIXME: When the storage-alteration-space control bit is set,
528 the exception should only be triggered if the memory access
529 is done using an address space with the storage-alteration-event
530 bit set. We have no way to detect that with the current
532 cs
->watchpoint_hit
= NULL
;
534 env
->per_address
= env
->psw
.addr
;
535 env
->per_perc_atmid
|= PER_CODE_EVENT_STORE
| get_per_atmid(env
);
536 /* FIXME: We currently no way to detect the address space used
537 to trigger the watchpoint. For now just consider it is the
538 current default ASC. This turn to be true except when MVCP
539 and MVCS instrutions are not used. */
540 env
->per_perc_atmid
|= env
->psw
.mask
& (PSW_MASK_ASC
) >> 46;
542 /* Remove all watchpoints to re-execute the code. A PER exception
543 will be triggered, it will call load_psw which will recompute
545 cpu_watchpoint_remove_all(cs
, BP_CPU
);
546 cpu_loop_exit_noexc(cs
);
550 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
551 this is only for the atomic operations, for which we want to raise a
552 specification exception. */
553 void s390x_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
554 MMUAccessType access_type
,
555 int mmu_idx
, uintptr_t retaddr
)
557 S390CPU
*cpu
= S390_CPU(cs
);
558 CPUS390XState
*env
= &cpu
->env
;
560 s390_program_interrupt(env
, PGM_SPECIFICATION
, ILEN_AUTO
, retaddr
);
563 #endif /* CONFIG_USER_ONLY */