4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "exec/gdbstub.h"
23 #include "qemu/timer.h"
24 #ifndef CONFIG_USER_ONLY
25 #include "sysemu/sysemu.h"
29 //#define DEBUG_S390_PTE
30 //#define DEBUG_S390_STDOUT
33 #ifdef DEBUG_S390_STDOUT
34 #define DPRINTF(fmt, ...) \
35 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
36 qemu_log(fmt, ##__VA_ARGS__); } while (0)
38 #define DPRINTF(fmt, ...) \
39 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
42 #define DPRINTF(fmt, ...) \
47 #define PTE_DPRINTF DPRINTF
49 #define PTE_DPRINTF(fmt, ...) \
53 #ifndef CONFIG_USER_ONLY
54 void s390x_tod_timer(void *opaque
)
56 S390CPU
*cpu
= opaque
;
57 CPUS390XState
*env
= &cpu
->env
;
59 env
->pending_int
|= INTERRUPT_TOD
;
60 cpu_interrupt(env
, CPU_INTERRUPT_HARD
);
63 void s390x_cpu_timer(void *opaque
)
65 S390CPU
*cpu
= opaque
;
66 CPUS390XState
*env
= &cpu
->env
;
68 env
->pending_int
|= INTERRUPT_CPUTIMER
;
69 cpu_interrupt(env
, CPU_INTERRUPT_HARD
);
73 S390CPU
*cpu_s390x_init(const char *cpu_model
)
79 cpu
= S390_CPU(object_new(TYPE_S390_CPU
));
82 if (tcg_enabled() && !inited
) {
84 s390x_translate_init();
87 env
->cpu_model_str
= cpu_model
;
92 #if defined(CONFIG_USER_ONLY)
94 void do_interrupt(CPUS390XState
*env
)
96 env
->exception_index
= -1;
99 int cpu_s390x_handle_mmu_fault(CPUS390XState
*env
, target_ulong address
,
102 env
->exception_index
= EXCP_PGM
;
103 env
->int_pgm_code
= PGM_ADDRESSING
;
104 /* On real machines this value is dropped into LowMem. Since this
105 is userland, simply put this someplace that cpu_loop can find it. */
106 env
->__excp_addr
= address
;
110 #else /* !CONFIG_USER_ONLY */
112 /* Ensure to exit the TB after this call! */
113 static void trigger_pgm_exception(CPUS390XState
*env
, uint32_t code
,
116 env
->exception_index
= EXCP_PGM
;
117 env
->int_pgm_code
= code
;
118 env
->int_pgm_ilen
= ilen
;
121 static int trans_bits(CPUS390XState
*env
, uint64_t mode
)
126 case PSW_ASC_PRIMARY
:
129 case PSW_ASC_SECONDARY
:
136 cpu_abort(env
, "unknown asc mode\n");
143 static void trigger_prot_fault(CPUS390XState
*env
, target_ulong vaddr
,
146 int ilen
= ILEN_LATER_INC
;
147 int bits
= trans_bits(env
, mode
) | 4;
149 DPRINTF("%s: vaddr=%016" PRIx64
" bits=%d\n", __func__
, vaddr
, bits
);
151 stq_phys(env
->psa
+ offsetof(LowCore
, trans_exc_code
), vaddr
| bits
);
152 trigger_pgm_exception(env
, PGM_PROTECTION
, ilen
);
155 static void trigger_page_fault(CPUS390XState
*env
, target_ulong vaddr
,
156 uint32_t type
, uint64_t asc
, int rw
)
158 int ilen
= ILEN_LATER
;
159 int bits
= trans_bits(env
, asc
);
161 /* Code accesses have an undefined ilc. */
166 DPRINTF("%s: vaddr=%016" PRIx64
" bits=%d\n", __func__
, vaddr
, bits
);
168 stq_phys(env
->psa
+ offsetof(LowCore
, trans_exc_code
), vaddr
| bits
);
169 trigger_pgm_exception(env
, type
, ilen
);
172 static int mmu_translate_asce(CPUS390XState
*env
, target_ulong vaddr
,
173 uint64_t asc
, uint64_t asce
, int level
,
174 target_ulong
*raddr
, int *flags
, int rw
)
180 PTE_DPRINTF("%s: 0x%" PRIx64
"\n", __func__
, asce
);
182 if (((level
!= _ASCE_TYPE_SEGMENT
) && (asce
& _REGION_ENTRY_INV
)) ||
183 ((level
== _ASCE_TYPE_SEGMENT
) && (asce
& _SEGMENT_ENTRY_INV
))) {
184 /* XXX different regions have different faults */
185 DPRINTF("%s: invalid region\n", __func__
);
186 trigger_page_fault(env
, vaddr
, PGM_SEGMENT_TRANS
, asc
, rw
);
190 if ((level
<= _ASCE_TYPE_MASK
) && ((asce
& _ASCE_TYPE_MASK
) != level
)) {
191 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
195 if (asce
& _ASCE_REAL_SPACE
) {
202 origin
= asce
& _ASCE_ORIGIN
;
205 case _ASCE_TYPE_REGION1
+ 4:
206 offs
= (vaddr
>> 50) & 0x3ff8;
208 case _ASCE_TYPE_REGION1
:
209 offs
= (vaddr
>> 39) & 0x3ff8;
211 case _ASCE_TYPE_REGION2
:
212 offs
= (vaddr
>> 28) & 0x3ff8;
214 case _ASCE_TYPE_REGION3
:
215 offs
= (vaddr
>> 17) & 0x3ff8;
217 case _ASCE_TYPE_SEGMENT
:
218 offs
= (vaddr
>> 9) & 0x07f8;
219 origin
= asce
& _SEGMENT_ENTRY_ORIGIN
;
223 /* XXX region protection flags */
224 /* *flags &= ~PAGE_WRITE */
226 new_asce
= ldq_phys(origin
+ offs
);
227 PTE_DPRINTF("%s: 0x%" PRIx64
" + 0x%" PRIx64
" => 0x%016" PRIx64
"\n",
228 __func__
, origin
, offs
, new_asce
);
230 if (level
!= _ASCE_TYPE_SEGMENT
) {
231 /* yet another region */
232 return mmu_translate_asce(env
, vaddr
, asc
, new_asce
, level
- 4, raddr
,
237 if (new_asce
& _PAGE_INVALID
) {
238 DPRINTF("%s: PTE=0x%" PRIx64
" invalid\n", __func__
, new_asce
);
239 trigger_page_fault(env
, vaddr
, PGM_PAGE_TRANS
, asc
, rw
);
243 if (new_asce
& _PAGE_RO
) {
244 *flags
&= ~PAGE_WRITE
;
247 *raddr
= new_asce
& _ASCE_ORIGIN
;
249 PTE_DPRINTF("%s: PTE=0x%" PRIx64
"\n", __func__
, new_asce
);
254 static int mmu_translate_asc(CPUS390XState
*env
, target_ulong vaddr
,
255 uint64_t asc
, target_ulong
*raddr
, int *flags
,
259 int level
, new_level
;
263 case PSW_ASC_PRIMARY
:
264 PTE_DPRINTF("%s: asc=primary\n", __func__
);
265 asce
= env
->cregs
[1];
267 case PSW_ASC_SECONDARY
:
268 PTE_DPRINTF("%s: asc=secondary\n", __func__
);
269 asce
= env
->cregs
[7];
272 PTE_DPRINTF("%s: asc=home\n", __func__
);
273 asce
= env
->cregs
[13];
277 switch (asce
& _ASCE_TYPE_MASK
) {
278 case _ASCE_TYPE_REGION1
:
280 case _ASCE_TYPE_REGION2
:
281 if (vaddr
& 0xffe0000000000000ULL
) {
282 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
283 " 0xffe0000000000000ULL\n", __func__
, vaddr
);
284 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
288 case _ASCE_TYPE_REGION3
:
289 if (vaddr
& 0xfffffc0000000000ULL
) {
290 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
291 " 0xfffffc0000000000ULL\n", __func__
, vaddr
);
292 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
296 case _ASCE_TYPE_SEGMENT
:
297 if (vaddr
& 0xffffffff80000000ULL
) {
298 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
299 " 0xffffffff80000000ULL\n", __func__
, vaddr
);
300 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
306 /* fake level above current */
307 level
= asce
& _ASCE_TYPE_MASK
;
308 new_level
= level
+ 4;
309 asce
= (asce
& ~_ASCE_TYPE_MASK
) | (new_level
& _ASCE_TYPE_MASK
);
311 r
= mmu_translate_asce(env
, vaddr
, asc
, asce
, new_level
, raddr
, flags
, rw
);
313 if ((rw
== 1) && !(*flags
& PAGE_WRITE
)) {
314 trigger_prot_fault(env
, vaddr
, asc
);
321 int mmu_translate(CPUS390XState
*env
, target_ulong vaddr
, int rw
, uint64_t asc
,
322 target_ulong
*raddr
, int *flags
)
327 *flags
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
328 vaddr
&= TARGET_PAGE_MASK
;
330 if (!(env
->psw
.mask
& PSW_MASK_DAT
)) {
337 case PSW_ASC_PRIMARY
:
339 r
= mmu_translate_asc(env
, vaddr
, asc
, raddr
, flags
, rw
);
341 case PSW_ASC_SECONDARY
:
343 * Instruction: Primary
347 r
= mmu_translate_asc(env
, vaddr
, PSW_ASC_PRIMARY
, raddr
, flags
,
349 *flags
&= ~(PAGE_READ
| PAGE_WRITE
);
351 r
= mmu_translate_asc(env
, vaddr
, PSW_ASC_SECONDARY
, raddr
, flags
,
353 *flags
&= ~(PAGE_EXEC
);
358 hw_error("guest switched to unknown asc mode\n");
363 /* Convert real address -> absolute address */
364 if (*raddr
< 0x2000) {
365 *raddr
= *raddr
+ env
->psa
;
368 if (*raddr
<= ram_size
) {
369 sk
= &env
->storage_keys
[*raddr
/ TARGET_PAGE_SIZE
];
370 if (*flags
& PAGE_READ
) {
374 if (*flags
& PAGE_WRITE
) {
382 int cpu_s390x_handle_mmu_fault(CPUS390XState
*env
, target_ulong orig_vaddr
,
385 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
386 target_ulong vaddr
, raddr
;
389 DPRINTF("%s: address 0x%" PRIx64
" rw %d mmu_idx %d\n",
390 __func__
, _vaddr
, rw
, mmu_idx
);
392 orig_vaddr
&= TARGET_PAGE_MASK
;
396 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
400 if (mmu_translate(env
, vaddr
, rw
, asc
, &raddr
, &prot
)) {
401 /* Translation ended in exception */
405 /* check out of RAM access */
406 if (raddr
> (ram_size
+ virtio_size
)) {
407 DPRINTF("%s: aaddr %" PRIx64
" > ram_size %" PRIx64
"\n", __func__
,
408 (uint64_t)aaddr
, (uint64_t)ram_size
);
409 trigger_pgm_exception(env
, PGM_ADDRESSING
, ILEN_LATER
);
413 DPRINTF("%s: set tlb %" PRIx64
" -> %" PRIx64
" (%x)\n", __func__
,
414 (uint64_t)vaddr
, (uint64_t)raddr
, prot
);
416 tlb_set_page(env
, orig_vaddr
, raddr
, prot
,
417 mmu_idx
, TARGET_PAGE_SIZE
);
422 hwaddr
cpu_get_phys_page_debug(CPUS390XState
*env
,
426 int prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
427 int old_exc
= env
->exception_index
;
428 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
431 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
435 mmu_translate(env
, vaddr
, 2, asc
, &raddr
, &prot
);
436 env
->exception_index
= old_exc
;
441 void load_psw(CPUS390XState
*env
, uint64_t mask
, uint64_t addr
)
443 if (mask
& PSW_MASK_WAIT
) {
444 if (!(mask
& (PSW_MASK_IO
| PSW_MASK_EXT
| PSW_MASK_MCHECK
))) {
445 if (s390_del_running_cpu(env
) == 0) {
446 #ifndef CONFIG_USER_ONLY
447 qemu_system_shutdown_request();
452 env
->exception_index
= EXCP_HLT
;
455 env
->psw
.addr
= addr
;
456 env
->psw
.mask
= mask
;
457 env
->cc_op
= (mask
>> 44) & 3;
460 static uint64_t get_psw_mask(CPUS390XState
*env
)
464 env
->cc_op
= calc_cc(env
, env
->cc_op
, env
->cc_src
, env
->cc_dst
, env
->cc_vr
);
468 assert(!(env
->cc_op
& ~3));
469 r
|= (uint64_t)env
->cc_op
<< 44;
474 static void do_svc_interrupt(CPUS390XState
*env
)
478 hwaddr len
= TARGET_PAGE_SIZE
;
480 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, 1);
482 lowcore
->svc_code
= cpu_to_be16(env
->int_svc_code
);
483 lowcore
->svc_ilen
= cpu_to_be16(env
->int_svc_ilen
);
484 lowcore
->svc_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
485 lowcore
->svc_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
+ env
->int_svc_ilen
);
486 mask
= be64_to_cpu(lowcore
->svc_new_psw
.mask
);
487 addr
= be64_to_cpu(lowcore
->svc_new_psw
.addr
);
489 cpu_physical_memory_unmap(lowcore
, len
, 1, len
);
491 load_psw(env
, mask
, addr
);
494 static void do_program_interrupt(CPUS390XState
*env
)
498 hwaddr len
= TARGET_PAGE_SIZE
;
499 int ilen
= env
->int_pgm_ilen
;
503 ilen
= get_ilen(cpu_ldub_code(env
, env
->psw
.addr
));
506 ilen
= get_ilen(cpu_ldub_code(env
, env
->psw
.addr
));
507 env
->psw
.addr
+= ilen
;
510 assert(ilen
== 2 || ilen
== 4 || ilen
== 6);
513 qemu_log_mask(CPU_LOG_INT
, "%s: code=0x%x ilen=%d\n",
514 __func__
, env
->int_pgm_code
, ilen
);
516 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, 1);
518 lowcore
->pgm_ilen
= cpu_to_be16(ilen
);
519 lowcore
->pgm_code
= cpu_to_be16(env
->int_pgm_code
);
520 lowcore
->program_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
521 lowcore
->program_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
522 mask
= be64_to_cpu(lowcore
->program_new_psw
.mask
);
523 addr
= be64_to_cpu(lowcore
->program_new_psw
.addr
);
525 cpu_physical_memory_unmap(lowcore
, len
, 1, len
);
527 DPRINTF("%s: %x %x %" PRIx64
" %" PRIx64
"\n", __func__
,
528 env
->int_pgm_code
, ilen
, env
->psw
.mask
,
531 load_psw(env
, mask
, addr
);
534 #define VIRTIO_SUBCODE_64 0x0D00
536 static void do_ext_interrupt(CPUS390XState
*env
)
540 hwaddr len
= TARGET_PAGE_SIZE
;
543 if (!(env
->psw
.mask
& PSW_MASK_EXT
)) {
544 cpu_abort(env
, "Ext int w/o ext mask\n");
547 if (env
->ext_index
< 0 || env
->ext_index
> MAX_EXT_QUEUE
) {
548 cpu_abort(env
, "Ext queue overrun: %d\n", env
->ext_index
);
551 q
= &env
->ext_queue
[env
->ext_index
];
552 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, 1);
554 lowcore
->ext_int_code
= cpu_to_be16(q
->code
);
555 lowcore
->ext_params
= cpu_to_be32(q
->param
);
556 lowcore
->ext_params2
= cpu_to_be64(q
->param64
);
557 lowcore
->external_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
558 lowcore
->external_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
559 lowcore
->cpu_addr
= cpu_to_be16(env
->cpu_num
| VIRTIO_SUBCODE_64
);
560 mask
= be64_to_cpu(lowcore
->external_new_psw
.mask
);
561 addr
= be64_to_cpu(lowcore
->external_new_psw
.addr
);
563 cpu_physical_memory_unmap(lowcore
, len
, 1, len
);
566 if (env
->ext_index
== -1) {
567 env
->pending_int
&= ~INTERRUPT_EXT
;
570 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
571 env
->psw
.mask
, env
->psw
.addr
);
573 load_psw(env
, mask
, addr
);
576 void do_interrupt(CPUS390XState
*env
)
578 qemu_log_mask(CPU_LOG_INT
, "%s: %d at pc=%" PRIx64
"\n",
579 __func__
, env
->exception_index
, env
->psw
.addr
);
581 s390_add_running_cpu(env
);
582 /* handle external interrupts */
583 if ((env
->psw
.mask
& PSW_MASK_EXT
) &&
584 env
->exception_index
== -1) {
585 if (env
->pending_int
& INTERRUPT_EXT
) {
586 /* code is already in env */
587 env
->exception_index
= EXCP_EXT
;
588 } else if (env
->pending_int
& INTERRUPT_TOD
) {
589 cpu_inject_ext(env
, 0x1004, 0, 0);
590 env
->exception_index
= EXCP_EXT
;
591 env
->pending_int
&= ~INTERRUPT_EXT
;
592 env
->pending_int
&= ~INTERRUPT_TOD
;
593 } else if (env
->pending_int
& INTERRUPT_CPUTIMER
) {
594 cpu_inject_ext(env
, 0x1005, 0, 0);
595 env
->exception_index
= EXCP_EXT
;
596 env
->pending_int
&= ~INTERRUPT_EXT
;
597 env
->pending_int
&= ~INTERRUPT_TOD
;
601 switch (env
->exception_index
) {
603 do_program_interrupt(env
);
606 do_svc_interrupt(env
);
609 do_ext_interrupt(env
);
612 env
->exception_index
= -1;
614 if (!env
->pending_int
) {
615 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
619 #endif /* CONFIG_USER_ONLY */