4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "qemu-common.h"
29 #include "qemu-timer.h"
31 #if !defined(CONFIG_USER_ONLY)
32 #include <linux/kvm.h>
37 //#define DEBUG_S390_PTE
38 //#define DEBUG_S390_STDOUT
41 #ifdef DEBUG_S390_STDOUT
42 #define DPRINTF(fmt, ...) \
43 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
44 qemu_log(fmt, ##__VA_ARGS__); } while (0)
46 #define DPRINTF(fmt, ...) \
47 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
50 #define DPRINTF(fmt, ...) \
55 #define PTE_DPRINTF DPRINTF
57 #define PTE_DPRINTF(fmt, ...) \
61 #ifndef CONFIG_USER_ONLY
62 static void s390x_tod_timer(void *opaque
)
64 CPUState
*env
= opaque
;
66 env
->pending_int
|= INTERRUPT_TOD
;
67 cpu_interrupt(env
, CPU_INTERRUPT_HARD
);
70 static void s390x_cpu_timer(void *opaque
)
72 CPUState
*env
= opaque
;
74 env
->pending_int
|= INTERRUPT_CPUTIMER
;
75 cpu_interrupt(env
, CPU_INTERRUPT_HARD
);
79 CPUS390XState
*cpu_s390x_init(const char *cpu_model
)
82 #if !defined (CONFIG_USER_ONLY)
85 static int inited
= 0;
86 static int cpu_num
= 0;
88 env
= qemu_mallocz(sizeof(CPUS390XState
));
92 s390x_translate_init();
95 #if !defined(CONFIG_USER_ONLY)
96 qemu_get_timedate(&tm
, 0);
97 env
->tod_offset
= TOD_UNIX_EPOCH
+
98 (time2tod(mktimegm(&tm
)) * 1000000000ULL);
99 env
->tod_basetime
= 0;
100 env
->tod_timer
= qemu_new_timer_ns(vm_clock
, s390x_tod_timer
, env
);
101 env
->cpu_timer
= qemu_new_timer_ns(vm_clock
, s390x_cpu_timer
, env
);
103 env
->cpu_model_str
= cpu_model
;
104 env
->cpu_num
= cpu_num
++;
111 #if defined(CONFIG_USER_ONLY)
113 void do_interrupt (CPUState
*env
)
115 env
->exception_index
= -1;
118 int cpu_s390x_handle_mmu_fault (CPUState
*env
, target_ulong address
, int rw
,
119 int mmu_idx
, int is_softmmu
)
121 /* fprintf(stderr,"%s: address 0x%lx rw %d mmu_idx %d is_softmmu %d\n",
122 __FUNCTION__, address, rw, mmu_idx, is_softmmu); */
123 env
->exception_index
= EXCP_ADDR
;
124 env
->__excp_addr
= address
; /* FIXME: find out how this works on a real machine */
128 #endif /* CONFIG_USER_ONLY */
130 void cpu_reset(CPUS390XState
*env
)
132 if (qemu_loglevel_mask(CPU_LOG_RESET
)) {
133 qemu_log("CPU Reset (CPU %d)\n", env
->cpu_index
);
134 log_cpu_state(env
, 0);
137 memset(env
, 0, offsetof(CPUS390XState
, breakpoints
));
138 /* FIXME: reset vector? */
142 #ifndef CONFIG_USER_ONLY
144 /* Ensure to exit the TB after this call! */
145 static void trigger_pgm_exception(CPUState
*env
, uint32_t code
, uint32_t ilc
)
147 env
->exception_index
= EXCP_PGM
;
148 env
->int_pgm_code
= code
;
149 env
->int_pgm_ilc
= ilc
;
152 static int trans_bits(CPUState
*env
, uint64_t mode
)
157 case PSW_ASC_PRIMARY
:
160 case PSW_ASC_SECONDARY
:
167 cpu_abort(env
, "unknown asc mode\n");
174 static void trigger_prot_fault(CPUState
*env
, target_ulong vaddr
, uint64_t mode
)
176 int ilc
= ILC_LATER_INC_2
;
177 int bits
= trans_bits(env
, mode
) | 4;
179 DPRINTF("%s: vaddr=%016" PRIx64
" bits=%d\n", __FUNCTION__
, vaddr
, bits
);
181 stq_phys(env
->psa
+ offsetof(LowCore
, trans_exc_code
), vaddr
| bits
);
182 trigger_pgm_exception(env
, PGM_PROTECTION
, ilc
);
185 static void trigger_page_fault(CPUState
*env
, target_ulong vaddr
, uint32_t type
,
186 uint64_t asc
, int rw
)
189 int bits
= trans_bits(env
, asc
);
192 /* code has is undefined ilc */
196 DPRINTF("%s: vaddr=%016" PRIx64
" bits=%d\n", __FUNCTION__
, vaddr
, bits
);
198 stq_phys(env
->psa
+ offsetof(LowCore
, trans_exc_code
), vaddr
| bits
);
199 trigger_pgm_exception(env
, type
, ilc
);
202 static int mmu_translate_asce(CPUState
*env
, target_ulong vaddr
, uint64_t asc
,
203 uint64_t asce
, int level
, target_ulong
*raddr
,
210 PTE_DPRINTF("%s: 0x%" PRIx64
"\n", __FUNCTION__
, asce
);
212 if (((level
!= _ASCE_TYPE_SEGMENT
) && (asce
& _REGION_ENTRY_INV
)) ||
213 ((level
== _ASCE_TYPE_SEGMENT
) && (asce
& _SEGMENT_ENTRY_INV
))) {
214 /* XXX different regions have different faults */
215 DPRINTF("%s: invalid region\n", __FUNCTION__
);
216 trigger_page_fault(env
, vaddr
, PGM_SEGMENT_TRANS
, asc
, rw
);
220 if ((level
<= _ASCE_TYPE_MASK
) && ((asce
& _ASCE_TYPE_MASK
) != level
)) {
221 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
225 if (asce
& _ASCE_REAL_SPACE
) {
232 origin
= asce
& _ASCE_ORIGIN
;
235 case _ASCE_TYPE_REGION1
+ 4:
236 offs
= (vaddr
>> 50) & 0x3ff8;
238 case _ASCE_TYPE_REGION1
:
239 offs
= (vaddr
>> 39) & 0x3ff8;
241 case _ASCE_TYPE_REGION2
:
242 offs
= (vaddr
>> 28) & 0x3ff8;
244 case _ASCE_TYPE_REGION3
:
245 offs
= (vaddr
>> 17) & 0x3ff8;
247 case _ASCE_TYPE_SEGMENT
:
248 offs
= (vaddr
>> 9) & 0x07f8;
249 origin
= asce
& _SEGMENT_ENTRY_ORIGIN
;
253 /* XXX region protection flags */
254 /* *flags &= ~PAGE_WRITE */
256 new_asce
= ldq_phys(origin
+ offs
);
257 PTE_DPRINTF("%s: 0x%" PRIx64
" + 0x%" PRIx64
" => 0x%016" PRIx64
"\n",
258 __FUNCTION__
, origin
, offs
, new_asce
);
260 if (level
!= _ASCE_TYPE_SEGMENT
) {
261 /* yet another region */
262 return mmu_translate_asce(env
, vaddr
, asc
, new_asce
, level
- 4, raddr
,
267 if (new_asce
& _PAGE_INVALID
) {
268 DPRINTF("%s: PTE=0x%" PRIx64
" invalid\n", __FUNCTION__
, new_asce
);
269 trigger_page_fault(env
, vaddr
, PGM_PAGE_TRANS
, asc
, rw
);
273 if (new_asce
& _PAGE_RO
) {
274 *flags
&= ~PAGE_WRITE
;
277 *raddr
= new_asce
& _ASCE_ORIGIN
;
279 PTE_DPRINTF("%s: PTE=0x%" PRIx64
"\n", __FUNCTION__
, new_asce
);
284 static int mmu_translate_asc(CPUState
*env
, target_ulong vaddr
, uint64_t asc
,
285 target_ulong
*raddr
, int *flags
, int rw
)
288 int level
, new_level
;
292 case PSW_ASC_PRIMARY
:
293 PTE_DPRINTF("%s: asc=primary\n", __FUNCTION__
);
294 asce
= env
->cregs
[1];
296 case PSW_ASC_SECONDARY
:
297 PTE_DPRINTF("%s: asc=secondary\n", __FUNCTION__
);
298 asce
= env
->cregs
[7];
301 PTE_DPRINTF("%s: asc=home\n", __FUNCTION__
);
302 asce
= env
->cregs
[13];
306 switch (asce
& _ASCE_TYPE_MASK
) {
307 case _ASCE_TYPE_REGION1
:
309 case _ASCE_TYPE_REGION2
:
310 if (vaddr
& 0xffe0000000000000ULL
) {
311 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
312 " 0xffe0000000000000ULL\n", __FUNCTION__
,
314 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
318 case _ASCE_TYPE_REGION3
:
319 if (vaddr
& 0xfffffc0000000000ULL
) {
320 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
321 " 0xfffffc0000000000ULL\n", __FUNCTION__
,
323 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
327 case _ASCE_TYPE_SEGMENT
:
328 if (vaddr
& 0xffffffff80000000ULL
) {
329 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
330 " 0xffffffff80000000ULL\n", __FUNCTION__
,
332 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
338 /* fake level above current */
339 level
= asce
& _ASCE_TYPE_MASK
;
340 new_level
= level
+ 4;
341 asce
= (asce
& ~_ASCE_TYPE_MASK
) | (new_level
& _ASCE_TYPE_MASK
);
343 r
= mmu_translate_asce(env
, vaddr
, asc
, asce
, new_level
, raddr
, flags
, rw
);
345 if ((rw
== 1) && !(*flags
& PAGE_WRITE
)) {
346 trigger_prot_fault(env
, vaddr
, asc
);
353 int mmu_translate(CPUState
*env
, target_ulong vaddr
, int rw
, uint64_t asc
,
354 target_ulong
*raddr
, int *flags
)
358 *flags
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
359 vaddr
&= TARGET_PAGE_MASK
;
361 if (!(env
->psw
.mask
& PSW_MASK_DAT
)) {
368 case PSW_ASC_PRIMARY
:
370 r
= mmu_translate_asc(env
, vaddr
, asc
, raddr
, flags
, rw
);
372 case PSW_ASC_SECONDARY
:
374 * Instruction: Primary
378 r
= mmu_translate_asc(env
, vaddr
, PSW_ASC_PRIMARY
, raddr
, flags
,
380 *flags
&= ~(PAGE_READ
| PAGE_WRITE
);
382 r
= mmu_translate_asc(env
, vaddr
, PSW_ASC_SECONDARY
, raddr
, flags
,
384 *flags
&= ~(PAGE_EXEC
);
389 hw_error("guest switched to unknown asc mode\n");
394 /* Convert real address -> absolute address */
395 if (*raddr
< 0x2000) {
396 *raddr
= *raddr
+ env
->psa
;
402 int cpu_s390x_handle_mmu_fault (CPUState
*env
, target_ulong _vaddr
, int rw
,
403 int mmu_idx
, int is_softmmu
)
405 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
406 target_ulong vaddr
, raddr
;
409 DPRINTF("%s: address 0x%" PRIx64
" rw %d mmu_idx %d is_softmmu %d\n",
410 __FUNCTION__
, _vaddr
, rw
, mmu_idx
, is_softmmu
);
412 _vaddr
&= TARGET_PAGE_MASK
;
416 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
420 if (mmu_translate(env
, vaddr
, rw
, asc
, &raddr
, &prot
)) {
421 /* Translation ended in exception */
425 /* check out of RAM access */
426 if (raddr
> (ram_size
+ virtio_size
)) {
427 DPRINTF("%s: aaddr %" PRIx64
" > ram_size %" PRIx64
"\n", __FUNCTION__
,
428 (uint64_t)aaddr
, (uint64_t)ram_size
);
429 trigger_pgm_exception(env
, PGM_ADDRESSING
, ILC_LATER
);
433 DPRINTF("%s: set tlb %" PRIx64
" -> %" PRIx64
" (%x)\n", __FUNCTION__
,
434 (uint64_t)vaddr
, (uint64_t)raddr
, prot
);
436 tlb_set_page(env
, _vaddr
, raddr
, prot
,
437 mmu_idx
, TARGET_PAGE_SIZE
);
442 target_phys_addr_t
cpu_get_phys_page_debug(CPUState
*env
, target_ulong vaddr
)
445 int prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
446 int old_exc
= env
->exception_index
;
447 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
450 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
454 mmu_translate(env
, vaddr
, 2, asc
, &raddr
, &prot
);
455 env
->exception_index
= old_exc
;
460 void load_psw(CPUState
*env
, uint64_t mask
, uint64_t addr
)
462 if (mask
& PSW_MASK_WAIT
) {
464 env
->exception_index
= EXCP_HLT
;
465 if (!(mask
& (PSW_MASK_IO
| PSW_MASK_EXT
| PSW_MASK_MCHECK
))) {
466 /* XXX disabled wait state - CPU is dead */
470 env
->psw
.addr
= addr
;
471 env
->psw
.mask
= mask
;
472 env
->cc_op
= (mask
>> 13) & 3;
475 static uint64_t get_psw_mask(CPUState
*env
)
477 uint64_t r
= env
->psw
.mask
;
479 env
->cc_op
= calc_cc(env
, env
->cc_op
, env
->cc_src
, env
->cc_dst
, env
->cc_vr
);
482 assert(!(env
->cc_op
& ~3));
483 r
|= env
->cc_op
<< 13;
488 static void do_svc_interrupt(CPUState
*env
)
492 target_phys_addr_t len
= TARGET_PAGE_SIZE
;
494 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, 1);
496 lowcore
->svc_code
= cpu_to_be16(env
->int_svc_code
);
497 lowcore
->svc_ilc
= cpu_to_be16(env
->int_svc_ilc
);
498 lowcore
->svc_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
499 lowcore
->svc_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
+ (env
->int_svc_ilc
));
500 mask
= be64_to_cpu(lowcore
->svc_new_psw
.mask
);
501 addr
= be64_to_cpu(lowcore
->svc_new_psw
.addr
);
503 cpu_physical_memory_unmap(lowcore
, len
, 1, len
);
505 load_psw(env
, mask
, addr
);
508 static void do_program_interrupt(CPUState
*env
)
512 target_phys_addr_t len
= TARGET_PAGE_SIZE
;
513 int ilc
= env
->int_pgm_ilc
;
517 ilc
= get_ilc(ldub_code(env
->psw
.addr
));
520 ilc
= get_ilc(ldub_code(env
->psw
.addr
));
521 env
->psw
.addr
+= ilc
* 2;
523 case ILC_LATER_INC_2
:
524 ilc
= get_ilc(ldub_code(env
->psw
.addr
)) * 2;
525 env
->psw
.addr
+= ilc
;
529 qemu_log("%s: code=0x%x ilc=%d\n", __FUNCTION__
, env
->int_pgm_code
, ilc
);
531 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, 1);
533 lowcore
->pgm_ilc
= cpu_to_be16(ilc
);
534 lowcore
->pgm_code
= cpu_to_be16(env
->int_pgm_code
);
535 lowcore
->program_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
536 lowcore
->program_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
537 mask
= be64_to_cpu(lowcore
->program_new_psw
.mask
);
538 addr
= be64_to_cpu(lowcore
->program_new_psw
.addr
);
540 cpu_physical_memory_unmap(lowcore
, len
, 1, len
);
542 DPRINTF("%s: %x %x %" PRIx64
" %" PRIx64
"\n", __FUNCTION__
,
543 env
->int_pgm_code
, ilc
, env
->psw
.mask
,
546 load_psw(env
, mask
, addr
);
549 #define VIRTIO_SUBCODE_64 0x0D00
551 static void do_ext_interrupt(CPUState
*env
)
555 target_phys_addr_t len
= TARGET_PAGE_SIZE
;
558 if (!(env
->psw
.mask
& PSW_MASK_EXT
)) {
559 cpu_abort(env
, "Ext int w/o ext mask\n");
562 if (env
->ext_index
< 0 || env
->ext_index
> MAX_EXT_QUEUE
) {
563 cpu_abort(env
, "Ext queue overrun: %d\n", env
->ext_index
);
566 q
= &env
->ext_queue
[env
->ext_index
];
567 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, 1);
569 lowcore
->ext_int_code
= cpu_to_be16(q
->code
);
570 lowcore
->ext_params
= cpu_to_be32(q
->param
);
571 lowcore
->ext_params2
= cpu_to_be64(q
->param64
);
572 lowcore
->external_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
573 lowcore
->external_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
574 lowcore
->cpu_addr
= cpu_to_be16(env
->cpu_num
| VIRTIO_SUBCODE_64
);
575 mask
= be64_to_cpu(lowcore
->external_new_psw
.mask
);
576 addr
= be64_to_cpu(lowcore
->external_new_psw
.addr
);
578 cpu_physical_memory_unmap(lowcore
, len
, 1, len
);
581 if (env
->ext_index
== -1) {
582 env
->pending_int
&= ~INTERRUPT_EXT
;
585 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __FUNCTION__
,
586 env
->psw
.mask
, env
->psw
.addr
);
588 load_psw(env
, mask
, addr
);
591 void do_interrupt (CPUState
*env
)
593 qemu_log("%s: %d at pc=%" PRIx64
"\n", __FUNCTION__
, env
->exception_index
,
596 /* handle external interrupts */
597 if ((env
->psw
.mask
& PSW_MASK_EXT
) &&
598 env
->exception_index
== -1) {
599 if (env
->pending_int
& INTERRUPT_EXT
) {
600 /* code is already in env */
601 env
->exception_index
= EXCP_EXT
;
602 } else if (env
->pending_int
& INTERRUPT_TOD
) {
603 cpu_inject_ext(env
, 0x1004, 0, 0);
604 env
->exception_index
= EXCP_EXT
;
605 env
->pending_int
&= ~INTERRUPT_EXT
;
606 env
->pending_int
&= ~INTERRUPT_TOD
;
607 } else if (env
->pending_int
& INTERRUPT_CPUTIMER
) {
608 cpu_inject_ext(env
, 0x1005, 0, 0);
609 env
->exception_index
= EXCP_EXT
;
610 env
->pending_int
&= ~INTERRUPT_EXT
;
611 env
->pending_int
&= ~INTERRUPT_TOD
;
615 switch (env
->exception_index
) {
617 do_program_interrupt(env
);
620 do_svc_interrupt(env
);
623 do_ext_interrupt(env
);
626 env
->exception_index
= -1;
628 if (!env
->pending_int
) {
629 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
633 #endif /* CONFIG_USER_ONLY */