2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #include "tcg_s390x.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "sysemu/sysemu.h"
33 #include "hw/s390x/s390_flic.h"
34 #include "hw/boards.h"
37 void QEMU_NORETURN
tcg_s390_program_interrupt(CPUS390XState
*env
,
38 uint32_t code
, uintptr_t ra
)
40 CPUState
*cs
= env_cpu(env
);
42 cpu_restore_state(cs
, ra
, true);
43 qemu_log_mask(CPU_LOG_INT
, "program interrupt at %#" PRIx64
"\n",
45 trigger_pgm_exception(env
, code
);
49 void QEMU_NORETURN
tcg_s390_data_exception(CPUS390XState
*env
, uint32_t dxc
,
52 g_assert(dxc
<= 0xff);
53 #if !defined(CONFIG_USER_ONLY)
54 /* Store the DXC into the lowcore */
55 stl_phys(env_cpu(env
)->as
,
56 env
->psa
+ offsetof(LowCore
, data_exc_code
), dxc
);
59 /* Store the DXC into the FPC if AFP is enabled */
60 if (env
->cregs
[0] & CR0_AFP
) {
61 env
->fpc
= deposit32(env
->fpc
, 8, 8, dxc
);
63 tcg_s390_program_interrupt(env
, PGM_DATA
, ra
);
66 void QEMU_NORETURN
tcg_s390_vector_exception(CPUS390XState
*env
, uint32_t vxc
,
69 g_assert(vxc
<= 0xff);
70 #if !defined(CONFIG_USER_ONLY)
71 /* Always store the VXC into the lowcore, without AFP it is undefined */
72 stl_phys(env_cpu(env
)->as
,
73 env
->psa
+ offsetof(LowCore
, data_exc_code
), vxc
);
76 /* Always store the VXC into the FPC, without AFP it is undefined */
77 env
->fpc
= deposit32(env
->fpc
, 8, 8, vxc
);
78 tcg_s390_program_interrupt(env
, PGM_VECTOR_PROCESSING
, ra
);
81 void HELPER(data_exception
)(CPUS390XState
*env
, uint32_t dxc
)
83 tcg_s390_data_exception(env
, dxc
, GETPC());
86 #if defined(CONFIG_USER_ONLY)
88 void s390_cpu_do_interrupt(CPUState
*cs
)
90 cs
->exception_index
= -1;
93 bool s390_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
94 MMUAccessType access_type
, int mmu_idx
,
95 bool probe
, uintptr_t retaddr
)
97 S390CPU
*cpu
= S390_CPU(cs
);
99 trigger_pgm_exception(&cpu
->env
, PGM_ADDRESSING
);
100 /* On real machines this value is dropped into LowMem. Since this
101 is userland, simply put this someplace that cpu_loop can find it. */
102 cpu
->env
.__excp_addr
= address
;
103 cpu_loop_exit_restore(cs
, retaddr
);
106 #else /* !CONFIG_USER_ONLY */
108 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx
)
111 case MMU_PRIMARY_IDX
:
112 return PSW_ASC_PRIMARY
;
113 case MMU_SECONDARY_IDX
:
114 return PSW_ASC_SECONDARY
;
122 bool s390_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
123 MMUAccessType access_type
, int mmu_idx
,
124 bool probe
, uintptr_t retaddr
)
126 S390CPU
*cpu
= S390_CPU(cs
);
127 CPUS390XState
*env
= &cpu
->env
;
128 target_ulong vaddr
, raddr
;
132 qemu_log_mask(CPU_LOG_MMU
, "%s: addr 0x%" VADDR_PRIx
" rw %d mmu_idx %d\n",
133 __func__
, address
, access_type
, mmu_idx
);
137 if (mmu_idx
< MMU_REAL_IDX
) {
138 asc
= cpu_mmu_idx_to_asc(mmu_idx
);
140 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
143 excp
= mmu_translate(env
, vaddr
, access_type
, asc
, &raddr
, &prot
, &tec
);
144 } else if (mmu_idx
== MMU_REAL_IDX
) {
146 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
149 excp
= mmu_translate_real(env
, vaddr
, access_type
, &raddr
, &prot
, &tec
);
151 g_assert_not_reached();
154 /* check out of RAM access */
156 !address_space_access_valid(&address_space_memory
, raddr
,
157 TARGET_PAGE_SIZE
, access_type
,
158 MEMTXATTRS_UNSPECIFIED
)) {
159 qemu_log_mask(CPU_LOG_MMU
,
160 "%s: raddr %" PRIx64
" > ram_size %" PRIx64
"\n",
161 __func__
, (uint64_t)raddr
, (uint64_t)ram_size
);
162 excp
= PGM_ADDRESSING
;
163 tec
= 0; /* unused */
167 qemu_log_mask(CPU_LOG_MMU
,
168 "%s: set tlb %" PRIx64
" -> %" PRIx64
" (%x)\n",
169 __func__
, (uint64_t)vaddr
, (uint64_t)raddr
, prot
);
170 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
, raddr
, prot
,
171 mmu_idx
, TARGET_PAGE_SIZE
);
178 if (excp
!= PGM_ADDRESSING
) {
179 stq_phys(env_cpu(env
)->as
,
180 env
->psa
+ offsetof(LowCore
, trans_exc_code
), tec
);
184 * For data accesses, ILEN will be filled in from the unwind info,
185 * within cpu_loop_exit_restore. For code accesses, retaddr == 0,
186 * and so unwinding will not occur. However, ILEN is also undefined
187 * for that case -- we choose to set ILEN = 2.
189 env
->int_pgm_ilen
= 2;
190 trigger_pgm_exception(env
, excp
);
191 cpu_loop_exit_restore(cs
, retaddr
);
194 static void do_program_interrupt(CPUS390XState
*env
)
198 int ilen
= env
->int_pgm_ilen
;
200 assert(ilen
== 2 || ilen
== 4 || ilen
== 6);
202 switch (env
->int_pgm_code
) {
204 if (env
->per_perc_atmid
& PER_CODE_EVENT_NULLIFICATION
) {
213 case PGM_SPECIFICATION
:
215 case PGM_FIXPT_OVERFLOW
:
216 case PGM_FIXPT_DIVIDE
:
217 case PGM_DEC_OVERFLOW
:
219 case PGM_HFP_EXP_OVERFLOW
:
220 case PGM_HFP_EXP_UNDERFLOW
:
221 case PGM_HFP_SIGNIFICANCE
:
227 case PGM_PC_TRANS_SPEC
:
230 /* advance the PSW if our exception is not nullifying */
231 env
->psw
.addr
+= ilen
;
235 qemu_log_mask(CPU_LOG_INT
,
236 "%s: code=0x%x ilen=%d psw: %" PRIx64
" %" PRIx64
"\n",
237 __func__
, env
->int_pgm_code
, ilen
, env
->psw
.mask
,
240 lowcore
= cpu_map_lowcore(env
);
242 /* Signal PER events with the exception. */
243 if (env
->per_perc_atmid
) {
244 env
->int_pgm_code
|= PGM_PER
;
245 lowcore
->per_address
= cpu_to_be64(env
->per_address
);
246 lowcore
->per_perc_atmid
= cpu_to_be16(env
->per_perc_atmid
);
247 env
->per_perc_atmid
= 0;
250 lowcore
->pgm_ilen
= cpu_to_be16(ilen
);
251 lowcore
->pgm_code
= cpu_to_be16(env
->int_pgm_code
);
252 lowcore
->program_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
253 lowcore
->program_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
254 mask
= be64_to_cpu(lowcore
->program_new_psw
.mask
);
255 addr
= be64_to_cpu(lowcore
->program_new_psw
.addr
);
256 lowcore
->per_breaking_event_addr
= cpu_to_be64(env
->gbea
);
258 cpu_unmap_lowcore(lowcore
);
260 load_psw(env
, mask
, addr
);
263 static void do_svc_interrupt(CPUS390XState
*env
)
268 lowcore
= cpu_map_lowcore(env
);
270 lowcore
->svc_code
= cpu_to_be16(env
->int_svc_code
);
271 lowcore
->svc_ilen
= cpu_to_be16(env
->int_svc_ilen
);
272 lowcore
->svc_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
273 lowcore
->svc_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
+ env
->int_svc_ilen
);
274 mask
= be64_to_cpu(lowcore
->svc_new_psw
.mask
);
275 addr
= be64_to_cpu(lowcore
->svc_new_psw
.addr
);
277 cpu_unmap_lowcore(lowcore
);
279 load_psw(env
, mask
, addr
);
281 /* When a PER event is pending, the PER exception has to happen
282 immediately after the SERVICE CALL one. */
283 if (env
->per_perc_atmid
) {
284 env
->int_pgm_code
= PGM_PER
;
285 env
->int_pgm_ilen
= env
->int_svc_ilen
;
286 do_program_interrupt(env
);
290 #define VIRTIO_SUBCODE_64 0x0D00
292 static void do_ext_interrupt(CPUS390XState
*env
)
294 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
295 S390CPU
*cpu
= env_archcpu(env
);
300 if (!(env
->psw
.mask
& PSW_MASK_EXT
)) {
301 cpu_abort(CPU(cpu
), "Ext int w/o ext mask\n");
304 lowcore
= cpu_map_lowcore(env
);
306 if ((env
->pending_int
& INTERRUPT_EMERGENCY_SIGNAL
) &&
307 (env
->cregs
[0] & CR0_EMERGENCY_SIGNAL_SC
)) {
308 MachineState
*ms
= MACHINE(qdev_get_machine());
309 unsigned int max_cpus
= ms
->smp
.max_cpus
;
311 lowcore
->ext_int_code
= cpu_to_be16(EXT_EMERGENCY
);
312 cpu_addr
= find_first_bit(env
->emergency_signals
, S390_MAX_CPUS
);
313 g_assert(cpu_addr
< S390_MAX_CPUS
);
314 lowcore
->cpu_addr
= cpu_to_be16(cpu_addr
);
315 clear_bit(cpu_addr
, env
->emergency_signals
);
316 if (bitmap_empty(env
->emergency_signals
, max_cpus
)) {
317 env
->pending_int
&= ~INTERRUPT_EMERGENCY_SIGNAL
;
319 } else if ((env
->pending_int
& INTERRUPT_EXTERNAL_CALL
) &&
320 (env
->cregs
[0] & CR0_EXTERNAL_CALL_SC
)) {
321 lowcore
->ext_int_code
= cpu_to_be16(EXT_EXTERNAL_CALL
);
322 lowcore
->cpu_addr
= cpu_to_be16(env
->external_call_addr
);
323 env
->pending_int
&= ~INTERRUPT_EXTERNAL_CALL
;
324 } else if ((env
->pending_int
& INTERRUPT_EXT_CLOCK_COMPARATOR
) &&
325 (env
->cregs
[0] & CR0_CKC_SC
)) {
326 lowcore
->ext_int_code
= cpu_to_be16(EXT_CLOCK_COMP
);
327 lowcore
->cpu_addr
= 0;
328 env
->pending_int
&= ~INTERRUPT_EXT_CLOCK_COMPARATOR
;
329 } else if ((env
->pending_int
& INTERRUPT_EXT_CPU_TIMER
) &&
330 (env
->cregs
[0] & CR0_CPU_TIMER_SC
)) {
331 lowcore
->ext_int_code
= cpu_to_be16(EXT_CPU_TIMER
);
332 lowcore
->cpu_addr
= 0;
333 env
->pending_int
&= ~INTERRUPT_EXT_CPU_TIMER
;
334 } else if (qemu_s390_flic_has_service(flic
) &&
335 (env
->cregs
[0] & CR0_SERVICE_SC
)) {
338 param
= qemu_s390_flic_dequeue_service(flic
);
339 lowcore
->ext_int_code
= cpu_to_be16(EXT_SERVICE
);
340 lowcore
->ext_params
= cpu_to_be32(param
);
341 lowcore
->cpu_addr
= 0;
343 g_assert_not_reached();
346 mask
= be64_to_cpu(lowcore
->external_new_psw
.mask
);
347 addr
= be64_to_cpu(lowcore
->external_new_psw
.addr
);
348 lowcore
->external_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
349 lowcore
->external_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
351 cpu_unmap_lowcore(lowcore
);
353 load_psw(env
, mask
, addr
);
356 static void do_io_interrupt(CPUS390XState
*env
)
358 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
363 g_assert(env
->psw
.mask
& PSW_MASK_IO
);
364 io
= qemu_s390_flic_dequeue_io(flic
, env
->cregs
[6]);
367 lowcore
= cpu_map_lowcore(env
);
369 lowcore
->subchannel_id
= cpu_to_be16(io
->id
);
370 lowcore
->subchannel_nr
= cpu_to_be16(io
->nr
);
371 lowcore
->io_int_parm
= cpu_to_be32(io
->parm
);
372 lowcore
->io_int_word
= cpu_to_be32(io
->word
);
373 lowcore
->io_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
374 lowcore
->io_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
375 mask
= be64_to_cpu(lowcore
->io_new_psw
.mask
);
376 addr
= be64_to_cpu(lowcore
->io_new_psw
.addr
);
378 cpu_unmap_lowcore(lowcore
);
381 load_psw(env
, mask
, addr
);
384 typedef struct MchkExtSaveArea
{
385 uint64_t vregs
[32][2]; /* 0x0000 */
386 uint8_t pad_0x0200
[0x0400 - 0x0200]; /* 0x0200 */
388 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea
) != 1024);
390 static int mchk_store_vregs(CPUS390XState
*env
, uint64_t mcesao
)
392 hwaddr len
= sizeof(MchkExtSaveArea
);
396 sa
= cpu_physical_memory_map(mcesao
, &len
, true);
400 if (len
!= sizeof(MchkExtSaveArea
)) {
401 cpu_physical_memory_unmap(sa
, len
, 1, 0);
405 for (i
= 0; i
< 32; i
++) {
406 sa
->vregs
[i
][0] = cpu_to_be64(env
->vregs
[i
][0]);
407 sa
->vregs
[i
][1] = cpu_to_be64(env
->vregs
[i
][1]);
410 cpu_physical_memory_unmap(sa
, len
, 1, len
);
414 static void do_mchk_interrupt(CPUS390XState
*env
)
416 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
417 uint64_t mcic
= s390_build_validity_mcic() | MCIC_SC_CP
;
418 uint64_t mask
, addr
, mcesao
= 0;
422 /* for now we only support channel report machine checks (floating) */
423 g_assert(env
->psw
.mask
& PSW_MASK_MCHECK
);
424 g_assert(env
->cregs
[14] & CR14_CHANNEL_REPORT_SC
);
426 qemu_s390_flic_dequeue_crw_mchk(flic
);
428 lowcore
= cpu_map_lowcore(env
);
430 /* extended save area */
431 if (mcic
& MCIC_VB_VR
) {
432 /* length and alignment is 1024 bytes */
433 mcesao
= be64_to_cpu(lowcore
->mcesad
) & ~0x3ffull
;
436 /* try to store vector registers */
437 if (!mcesao
|| mchk_store_vregs(env
, mcesao
)) {
441 /* we are always in z/Architecture mode */
442 lowcore
->ar_access_id
= 1;
444 for (i
= 0; i
< 16; i
++) {
445 lowcore
->floating_pt_save_area
[i
] = cpu_to_be64(*get_freg(env
, i
));
446 lowcore
->gpregs_save_area
[i
] = cpu_to_be64(env
->regs
[i
]);
447 lowcore
->access_regs_save_area
[i
] = cpu_to_be32(env
->aregs
[i
]);
448 lowcore
->cregs_save_area
[i
] = cpu_to_be64(env
->cregs
[i
]);
450 lowcore
->prefixreg_save_area
= cpu_to_be32(env
->psa
);
451 lowcore
->fpt_creg_save_area
= cpu_to_be32(env
->fpc
);
452 lowcore
->tod_progreg_save_area
= cpu_to_be32(env
->todpr
);
453 lowcore
->cpu_timer_save_area
= cpu_to_be64(env
->cputm
);
454 lowcore
->clock_comp_save_area
= cpu_to_be64(env
->ckc
>> 8);
456 lowcore
->mcic
= cpu_to_be64(mcic
);
457 lowcore
->mcck_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
458 lowcore
->mcck_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
459 mask
= be64_to_cpu(lowcore
->mcck_new_psw
.mask
);
460 addr
= be64_to_cpu(lowcore
->mcck_new_psw
.addr
);
462 cpu_unmap_lowcore(lowcore
);
464 load_psw(env
, mask
, addr
);
467 void s390_cpu_do_interrupt(CPUState
*cs
)
469 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
470 S390CPU
*cpu
= S390_CPU(cs
);
471 CPUS390XState
*env
= &cpu
->env
;
472 bool stopped
= false;
474 qemu_log_mask(CPU_LOG_INT
, "%s: %d at psw=%" PRIx64
":%" PRIx64
"\n",
475 __func__
, cs
->exception_index
, env
->psw
.mask
, env
->psw
.addr
);
478 /* handle machine checks */
479 if (cs
->exception_index
== -1 && s390_cpu_has_mcck_int(cpu
)) {
480 cs
->exception_index
= EXCP_MCHK
;
482 /* handle external interrupts */
483 if (cs
->exception_index
== -1 && s390_cpu_has_ext_int(cpu
)) {
484 cs
->exception_index
= EXCP_EXT
;
486 /* handle I/O interrupts */
487 if (cs
->exception_index
== -1 && s390_cpu_has_io_int(cpu
)) {
488 cs
->exception_index
= EXCP_IO
;
490 /* RESTART interrupt */
491 if (cs
->exception_index
== -1 && s390_cpu_has_restart_int(cpu
)) {
492 cs
->exception_index
= EXCP_RESTART
;
494 /* STOP interrupt has least priority */
495 if (cs
->exception_index
== -1 && s390_cpu_has_stop_int(cpu
)) {
496 cs
->exception_index
= EXCP_STOP
;
499 switch (cs
->exception_index
) {
501 do_program_interrupt(env
);
504 do_svc_interrupt(env
);
507 do_ext_interrupt(env
);
510 do_io_interrupt(env
);
513 do_mchk_interrupt(env
);
516 do_restart_interrupt(env
);
519 do_stop_interrupt(env
);
524 if (cs
->exception_index
!= -1 && !stopped
) {
525 /* check if there are more pending interrupts to deliver */
526 cs
->exception_index
= -1;
529 cs
->exception_index
= -1;
531 /* we might still have pending interrupts, but not deliverable */
532 if (!env
->pending_int
&& !qemu_s390_flic_has_any(flic
)) {
533 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
536 /* WAIT PSW during interrupt injection or STOP interrupt */
537 if ((env
->psw
.mask
& PSW_MASK_WAIT
) || stopped
) {
538 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
539 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HALT
);
540 } else if (cs
->halted
) {
541 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
542 s390_cpu_unhalt(cpu
);
546 bool s390_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
548 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
549 S390CPU
*cpu
= S390_CPU(cs
);
550 CPUS390XState
*env
= &cpu
->env
;
553 /* Execution of the target insn is indivisible from
554 the parent EXECUTE insn. */
557 if (s390_cpu_has_int(cpu
)) {
558 s390_cpu_do_interrupt(cs
);
561 if (env
->psw
.mask
& PSW_MASK_WAIT
) {
562 /* Woken up because of a floating interrupt but it has already
563 * been delivered. Go back to sleep. */
564 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HALT
);
570 void s390x_cpu_debug_excp_handler(CPUState
*cs
)
572 S390CPU
*cpu
= S390_CPU(cs
);
573 CPUS390XState
*env
= &cpu
->env
;
574 CPUWatchpoint
*wp_hit
= cs
->watchpoint_hit
;
576 if (wp_hit
&& wp_hit
->flags
& BP_CPU
) {
577 /* FIXME: When the storage-alteration-space control bit is set,
578 the exception should only be triggered if the memory access
579 is done using an address space with the storage-alteration-event
580 bit set. We have no way to detect that with the current
582 cs
->watchpoint_hit
= NULL
;
584 env
->per_address
= env
->psw
.addr
;
585 env
->per_perc_atmid
|= PER_CODE_EVENT_STORE
| get_per_atmid(env
);
586 /* FIXME: We currently no way to detect the address space used
587 to trigger the watchpoint. For now just consider it is the
588 current default ASC. This turn to be true except when MVCP
589 and MVCS instrutions are not used. */
590 env
->per_perc_atmid
|= env
->psw
.mask
& (PSW_MASK_ASC
) >> 46;
592 /* Remove all watchpoints to re-execute the code. A PER exception
593 will be triggered, it will call load_psw which will recompute
595 cpu_watchpoint_remove_all(cs
, BP_CPU
);
596 cpu_loop_exit_noexc(cs
);
600 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
601 this is only for the atomic operations, for which we want to raise a
602 specification exception. */
603 void s390x_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
604 MMUAccessType access_type
,
605 int mmu_idx
, uintptr_t retaddr
)
607 S390CPU
*cpu
= S390_CPU(cs
);
608 CPUS390XState
*env
= &cpu
->env
;
610 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, retaddr
);
613 #endif /* CONFIG_USER_ONLY */