Merge remote-tracking branch 'remotes/agraf/tags/signed-ppc-for-upstream' into staging
[qemu/qmp-unstable.git] / target-s390x / helper.c
blobe0fd8fc3797c4d9549707236b903a1bcb7d2d4b5
1 /*
2 * S/390 helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "exec/gdbstub.h"
23 #include "qemu/timer.h"
24 #include "exec/cpu_ldst.h"
25 #ifndef CONFIG_USER_ONLY
26 #include "sysemu/sysemu.h"
27 #endif
29 //#define DEBUG_S390
30 //#define DEBUG_S390_STDOUT
32 #ifdef DEBUG_S390
33 #ifdef DEBUG_S390_STDOUT
34 #define DPRINTF(fmt, ...) \
35 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
36 qemu_log(fmt, ##__VA_ARGS__); } while (0)
37 #else
38 #define DPRINTF(fmt, ...) \
39 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
40 #endif
41 #else
42 #define DPRINTF(fmt, ...) \
43 do { } while (0)
44 #endif
47 #ifndef CONFIG_USER_ONLY
48 void s390x_tod_timer(void *opaque)
50 S390CPU *cpu = opaque;
51 CPUS390XState *env = &cpu->env;
53 env->pending_int |= INTERRUPT_TOD;
54 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
57 void s390x_cpu_timer(void *opaque)
59 S390CPU *cpu = opaque;
60 CPUS390XState *env = &cpu->env;
62 env->pending_int |= INTERRUPT_CPUTIMER;
63 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
65 #endif
67 S390CPU *cpu_s390x_init(const char *cpu_model)
69 S390CPU *cpu;
71 cpu = S390_CPU(object_new(TYPE_S390_CPU));
73 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
75 return cpu;
78 #if defined(CONFIG_USER_ONLY)
80 void s390_cpu_do_interrupt(CPUState *cs)
82 cs->exception_index = -1;
85 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
86 int rw, int mmu_idx)
88 S390CPU *cpu = S390_CPU(cs);
90 cs->exception_index = EXCP_PGM;
91 cpu->env.int_pgm_code = PGM_ADDRESSING;
92 /* On real machines this value is dropped into LowMem. Since this
93 is userland, simply put this someplace that cpu_loop can find it. */
94 cpu->env.__excp_addr = address;
95 return 1;
98 #else /* !CONFIG_USER_ONLY */
100 /* Ensure to exit the TB after this call! */
101 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
103 CPUState *cs = CPU(s390_env_get_cpu(env));
105 cs->exception_index = EXCP_PGM;
106 env->int_pgm_code = code;
107 env->int_pgm_ilen = ilen;
110 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
111 int rw, int mmu_idx)
113 S390CPU *cpu = S390_CPU(cs);
114 CPUS390XState *env = &cpu->env;
115 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
116 target_ulong vaddr, raddr;
117 int prot;
119 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
120 __func__, orig_vaddr, rw, mmu_idx);
122 orig_vaddr &= TARGET_PAGE_MASK;
123 vaddr = orig_vaddr;
125 /* 31-Bit mode */
126 if (!(env->psw.mask & PSW_MASK_64)) {
127 vaddr &= 0x7fffffff;
130 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
131 /* Translation ended in exception */
132 return 1;
135 /* check out of RAM access */
136 if (raddr > (ram_size + virtio_size)) {
137 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
138 (uint64_t)raddr, (uint64_t)ram_size);
139 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
140 return 1;
143 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
144 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
146 tlb_set_page(cs, orig_vaddr, raddr, prot,
147 mmu_idx, TARGET_PAGE_SIZE);
149 return 0;
152 hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
154 S390CPU *cpu = S390_CPU(cs);
155 CPUS390XState *env = &cpu->env;
156 target_ulong raddr;
157 int prot;
158 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
160 /* 31-Bit mode */
161 if (!(env->psw.mask & PSW_MASK_64)) {
162 vaddr &= 0x7fffffff;
165 mmu_translate(env, vaddr, 2, asc, &raddr, &prot, false);
167 return raddr;
170 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
172 hwaddr phys_addr;
173 target_ulong page;
175 page = vaddr & TARGET_PAGE_MASK;
176 phys_addr = cpu_get_phys_page_debug(cs, page);
177 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
179 return phys_addr;
182 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
184 env->psw.addr = addr;
185 env->psw.mask = mask;
186 env->cc_op = (mask >> 44) & 3;
188 if (mask & PSW_MASK_WAIT) {
189 S390CPU *cpu = s390_env_get_cpu(env);
190 if (s390_cpu_halt(cpu) == 0) {
191 #ifndef CONFIG_USER_ONLY
192 qemu_system_shutdown_request();
193 #endif
198 static uint64_t get_psw_mask(CPUS390XState *env)
200 uint64_t r;
202 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
204 r = env->psw.mask;
205 r &= ~PSW_MASK_CC;
206 assert(!(env->cc_op & ~3));
207 r |= (uint64_t)env->cc_op << 44;
209 return r;
212 static LowCore *cpu_map_lowcore(CPUS390XState *env)
214 S390CPU *cpu = s390_env_get_cpu(env);
215 LowCore *lowcore;
216 hwaddr len = sizeof(LowCore);
218 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
220 if (len < sizeof(LowCore)) {
221 cpu_abort(CPU(cpu), "Could not map lowcore\n");
224 return lowcore;
227 static void cpu_unmap_lowcore(LowCore *lowcore)
229 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
232 static void do_svc_interrupt(CPUS390XState *env)
234 uint64_t mask, addr;
235 LowCore *lowcore;
237 lowcore = cpu_map_lowcore(env);
239 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
240 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
241 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
242 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
243 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
244 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
246 cpu_unmap_lowcore(lowcore);
248 load_psw(env, mask, addr);
251 static void do_program_interrupt(CPUS390XState *env)
253 uint64_t mask, addr;
254 LowCore *lowcore;
255 int ilen = env->int_pgm_ilen;
257 switch (ilen) {
258 case ILEN_LATER:
259 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
260 break;
261 case ILEN_LATER_INC:
262 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
263 env->psw.addr += ilen;
264 break;
265 default:
266 assert(ilen == 2 || ilen == 4 || ilen == 6);
269 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
270 __func__, env->int_pgm_code, ilen);
272 lowcore = cpu_map_lowcore(env);
274 lowcore->pgm_ilen = cpu_to_be16(ilen);
275 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
276 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
277 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
278 mask = be64_to_cpu(lowcore->program_new_psw.mask);
279 addr = be64_to_cpu(lowcore->program_new_psw.addr);
281 cpu_unmap_lowcore(lowcore);
283 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
284 env->int_pgm_code, ilen, env->psw.mask,
285 env->psw.addr);
287 load_psw(env, mask, addr);
290 #define VIRTIO_SUBCODE_64 0x0D00
292 static void do_ext_interrupt(CPUS390XState *env)
294 S390CPU *cpu = s390_env_get_cpu(env);
295 uint64_t mask, addr;
296 LowCore *lowcore;
297 ExtQueue *q;
299 if (!(env->psw.mask & PSW_MASK_EXT)) {
300 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
303 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
304 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
307 q = &env->ext_queue[env->ext_index];
308 lowcore = cpu_map_lowcore(env);
310 lowcore->ext_int_code = cpu_to_be16(q->code);
311 lowcore->ext_params = cpu_to_be32(q->param);
312 lowcore->ext_params2 = cpu_to_be64(q->param64);
313 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
314 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
315 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
316 mask = be64_to_cpu(lowcore->external_new_psw.mask);
317 addr = be64_to_cpu(lowcore->external_new_psw.addr);
319 cpu_unmap_lowcore(lowcore);
321 env->ext_index--;
322 if (env->ext_index == -1) {
323 env->pending_int &= ~INTERRUPT_EXT;
326 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
327 env->psw.mask, env->psw.addr);
329 load_psw(env, mask, addr);
332 static void do_io_interrupt(CPUS390XState *env)
334 S390CPU *cpu = s390_env_get_cpu(env);
335 LowCore *lowcore;
336 IOIntQueue *q;
337 uint8_t isc;
338 int disable = 1;
339 int found = 0;
341 if (!(env->psw.mask & PSW_MASK_IO)) {
342 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
345 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
346 uint64_t isc_bits;
348 if (env->io_index[isc] < 0) {
349 continue;
351 if (env->io_index[isc] >= MAX_IO_QUEUE) {
352 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
353 isc, env->io_index[isc]);
356 q = &env->io_queue[env->io_index[isc]][isc];
357 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
358 if (!(env->cregs[6] & isc_bits)) {
359 disable = 0;
360 continue;
362 if (!found) {
363 uint64_t mask, addr;
365 found = 1;
366 lowcore = cpu_map_lowcore(env);
368 lowcore->subchannel_id = cpu_to_be16(q->id);
369 lowcore->subchannel_nr = cpu_to_be16(q->nr);
370 lowcore->io_int_parm = cpu_to_be32(q->parm);
371 lowcore->io_int_word = cpu_to_be32(q->word);
372 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
373 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
374 mask = be64_to_cpu(lowcore->io_new_psw.mask);
375 addr = be64_to_cpu(lowcore->io_new_psw.addr);
377 cpu_unmap_lowcore(lowcore);
379 env->io_index[isc]--;
381 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
382 env->psw.mask, env->psw.addr);
383 load_psw(env, mask, addr);
385 if (env->io_index[isc] >= 0) {
386 disable = 0;
388 continue;
391 if (disable) {
392 env->pending_int &= ~INTERRUPT_IO;
397 static void do_mchk_interrupt(CPUS390XState *env)
399 S390CPU *cpu = s390_env_get_cpu(env);
400 uint64_t mask, addr;
401 LowCore *lowcore;
402 MchkQueue *q;
403 int i;
405 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
406 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
409 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
410 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
413 q = &env->mchk_queue[env->mchk_index];
415 if (q->type != 1) {
416 /* Don't know how to handle this... */
417 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
419 if (!(env->cregs[14] & (1 << 28))) {
420 /* CRW machine checks disabled */
421 return;
424 lowcore = cpu_map_lowcore(env);
426 for (i = 0; i < 16; i++) {
427 lowcore->floating_pt_save_area[i] = cpu_to_be64(env->fregs[i].ll);
428 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
429 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
430 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
432 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
433 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
434 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
435 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
436 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
437 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
438 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
440 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
441 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
442 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
443 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
444 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
445 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
447 cpu_unmap_lowcore(lowcore);
449 env->mchk_index--;
450 if (env->mchk_index == -1) {
451 env->pending_int &= ~INTERRUPT_MCHK;
454 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
455 env->psw.mask, env->psw.addr);
457 load_psw(env, mask, addr);
460 void s390_cpu_do_interrupt(CPUState *cs)
462 S390CPU *cpu = S390_CPU(cs);
463 CPUS390XState *env = &cpu->env;
465 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
466 __func__, cs->exception_index, env->psw.addr);
468 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
469 /* handle machine checks */
470 if ((env->psw.mask & PSW_MASK_MCHECK) &&
471 (cs->exception_index == -1)) {
472 if (env->pending_int & INTERRUPT_MCHK) {
473 cs->exception_index = EXCP_MCHK;
476 /* handle external interrupts */
477 if ((env->psw.mask & PSW_MASK_EXT) &&
478 cs->exception_index == -1) {
479 if (env->pending_int & INTERRUPT_EXT) {
480 /* code is already in env */
481 cs->exception_index = EXCP_EXT;
482 } else if (env->pending_int & INTERRUPT_TOD) {
483 cpu_inject_ext(cpu, 0x1004, 0, 0);
484 cs->exception_index = EXCP_EXT;
485 env->pending_int &= ~INTERRUPT_EXT;
486 env->pending_int &= ~INTERRUPT_TOD;
487 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
488 cpu_inject_ext(cpu, 0x1005, 0, 0);
489 cs->exception_index = EXCP_EXT;
490 env->pending_int &= ~INTERRUPT_EXT;
491 env->pending_int &= ~INTERRUPT_TOD;
494 /* handle I/O interrupts */
495 if ((env->psw.mask & PSW_MASK_IO) &&
496 (cs->exception_index == -1)) {
497 if (env->pending_int & INTERRUPT_IO) {
498 cs->exception_index = EXCP_IO;
502 switch (cs->exception_index) {
503 case EXCP_PGM:
504 do_program_interrupt(env);
505 break;
506 case EXCP_SVC:
507 do_svc_interrupt(env);
508 break;
509 case EXCP_EXT:
510 do_ext_interrupt(env);
511 break;
512 case EXCP_IO:
513 do_io_interrupt(env);
514 break;
515 case EXCP_MCHK:
516 do_mchk_interrupt(env);
517 break;
519 cs->exception_index = -1;
521 if (!env->pending_int) {
522 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
526 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
528 if (interrupt_request & CPU_INTERRUPT_HARD) {
529 S390CPU *cpu = S390_CPU(cs);
530 CPUS390XState *env = &cpu->env;
532 if (env->psw.mask & PSW_MASK_EXT) {
533 s390_cpu_do_interrupt(cs);
534 return true;
537 return false;
539 #endif /* CONFIG_USER_ONLY */