qapi: Improve specificity of type/member descriptions
[qemu/armbru.git] / hw / intc / openpic.c
blobc757adbe5382a2db53d735a51766ecda17a3b0b2
1 /*
2 * OpenPIC emulation
4 * Copyright (c) 2004 Jocelyn Mayer
5 * 2011 Alexander Graf
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
27 * Based on OpenPic implementations:
28 * - Motorola MPC8245 & MPC8540 user manuals.
29 * - Motorola Harrier programmer manual
33 #include "qemu/osdep.h"
34 #include "hw/irq.h"
35 #include "hw/pci/pci.h"
36 #include "hw/ppc/openpic.h"
37 #include "hw/ppc/ppc_e500.h"
38 #include "hw/qdev-properties.h"
39 #include "hw/sysbus.h"
40 #include "migration/vmstate.h"
41 #include "hw/pci/msi.h"
42 #include "qapi/error.h"
43 #include "qemu/bitops.h"
44 #include "qapi/qmp/qerror.h"
45 #include "qemu/module.h"
46 #include "qemu/timer.h"
47 #include "qemu/error-report.h"
49 /* #define DEBUG_OPENPIC */
51 #ifdef DEBUG_OPENPIC
52 static const int debug_openpic = 1;
53 #else
54 static const int debug_openpic = 0;
55 #endif
57 static int get_current_cpu(void);
58 #define DPRINTF(fmt, ...) do { \
59 if (debug_openpic) { \
60 info_report("Core%d: " fmt, get_current_cpu(), ## __VA_ARGS__); \
61 } \
62 } while (0)
64 /* OpenPIC capability flags */
65 #define OPENPIC_FLAG_IDR_CRIT (1 << 0)
66 #define OPENPIC_FLAG_ILR (2 << 0)
68 /* OpenPIC address map */
69 #define OPENPIC_GLB_REG_START 0x0
70 #define OPENPIC_GLB_REG_SIZE 0x10F0
71 #define OPENPIC_TMR_REG_START 0x10F0
72 #define OPENPIC_TMR_REG_SIZE 0x220
73 #define OPENPIC_MSI_REG_START 0x1600
74 #define OPENPIC_MSI_REG_SIZE 0x200
75 #define OPENPIC_SUMMARY_REG_START 0x3800
76 #define OPENPIC_SUMMARY_REG_SIZE 0x800
77 #define OPENPIC_SRC_REG_START 0x10000
78 #define OPENPIC_SRC_REG_SIZE (OPENPIC_MAX_SRC * 0x20)
79 #define OPENPIC_CPU_REG_START 0x20000
80 #define OPENPIC_CPU_REG_SIZE 0x100 + ((MAX_CPU - 1) * 0x1000)
82 static FslMpicInfo fsl_mpic_20 = {
83 .max_ext = 12,
86 static FslMpicInfo fsl_mpic_42 = {
87 .max_ext = 12,
90 #define FRR_NIRQ_SHIFT 16
91 #define FRR_NCPU_SHIFT 8
92 #define FRR_VID_SHIFT 0
94 #define VID_REVISION_1_2 2
95 #define VID_REVISION_1_3 3
97 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
98 #define VIR_MPIC2A 0x00004614 /* IBM MPIC-2A */
100 #define GCR_RESET 0x80000000
101 #define GCR_MODE_PASS 0x00000000
102 #define GCR_MODE_MIXED 0x20000000
103 #define GCR_MODE_PROXY 0x60000000
105 #define TBCR_CI 0x80000000 /* count inhibit */
106 #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
108 #define IDR_EP_SHIFT 31
109 #define IDR_EP_MASK (1U << IDR_EP_SHIFT)
110 #define IDR_CI0_SHIFT 30
111 #define IDR_CI1_SHIFT 29
112 #define IDR_P1_SHIFT 1
113 #define IDR_P0_SHIFT 0
115 #define ILR_INTTGT_MASK 0x000000ff
116 #define ILR_INTTGT_INT 0x00
117 #define ILR_INTTGT_CINT 0x01 /* critical */
118 #define ILR_INTTGT_MCP 0x02 /* machine check */
121 * The currently supported INTTGT values happen to be the same as QEMU's
122 * openpic output codes, but don't depend on this. The output codes
123 * could change (unlikely, but...) or support could be added for
124 * more INTTGT values.
126 static const int inttgt_output[][2] = {
127 { ILR_INTTGT_INT, OPENPIC_OUTPUT_INT },
128 { ILR_INTTGT_CINT, OPENPIC_OUTPUT_CINT },
129 { ILR_INTTGT_MCP, OPENPIC_OUTPUT_MCK },
132 static int inttgt_to_output(int inttgt)
134 int i;
136 for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) {
137 if (inttgt_output[i][0] == inttgt) {
138 return inttgt_output[i][1];
142 error_report("%s: unsupported inttgt %d", __func__, inttgt);
143 return OPENPIC_OUTPUT_INT;
146 static int output_to_inttgt(int output)
148 int i;
150 for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) {
151 if (inttgt_output[i][1] == output) {
152 return inttgt_output[i][0];
156 abort();
159 #define MSIIR_OFFSET 0x140
160 #define MSIIR_SRS_SHIFT 29
161 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
162 #define MSIIR_IBS_SHIFT 24
163 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
165 static int get_current_cpu(void)
167 if (!current_cpu) {
168 return -1;
171 return current_cpu->cpu_index;
174 static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
175 int idx);
176 static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
177 uint32_t val, int idx);
178 static void openpic_reset(DeviceState *d);
181 * Convert between openpic clock ticks and nanosecs. In the hardware the clock
182 * frequency is driven by board inputs to the PIC which the PIC would then
183 * divide by 4 or 8. For now hard code to 25MZ.
185 #define OPENPIC_TIMER_FREQ_MHZ 25
186 #define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ)
187 static inline uint64_t ns_to_ticks(uint64_t ns)
189 return ns / OPENPIC_TIMER_NS_PER_TICK;
191 static inline uint64_t ticks_to_ns(uint64_t ticks)
193 return ticks * OPENPIC_TIMER_NS_PER_TICK;
196 static inline void IRQ_setbit(IRQQueue *q, int n_IRQ)
198 set_bit(n_IRQ, q->queue);
201 static inline void IRQ_resetbit(IRQQueue *q, int n_IRQ)
203 clear_bit(n_IRQ, q->queue);
206 static void IRQ_check(OpenPICState *opp, IRQQueue *q)
208 int irq = -1;
209 int next = -1;
210 int priority = -1;
212 for (;;) {
213 irq = find_next_bit(q->queue, opp->max_irq, irq + 1);
214 if (irq == opp->max_irq) {
215 break;
218 DPRINTF("IRQ_check: irq %d set ivpr_pr=%d pr=%d",
219 irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority);
221 if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) {
222 next = irq;
223 priority = IVPR_PRIORITY(opp->src[irq].ivpr);
227 q->next = next;
228 q->priority = priority;
231 static int IRQ_get_next(OpenPICState *opp, IRQQueue *q)
233 /* XXX: optimize */
234 IRQ_check(opp, q);
236 return q->next;
239 static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ,
240 bool active, bool was_active)
242 IRQDest *dst;
243 IRQSource *src;
244 int priority;
246 dst = &opp->dst[n_CPU];
247 src = &opp->src[n_IRQ];
249 DPRINTF("%s: IRQ %d active %d was %d",
250 __func__, n_IRQ, active, was_active);
252 if (src->output != OPENPIC_OUTPUT_INT) {
253 DPRINTF("%s: output %d irq %d active %d was %d count %d",
254 __func__, src->output, n_IRQ, active, was_active,
255 dst->outputs_active[src->output]);
258 * On Freescale MPIC, critical interrupts ignore priority,
259 * IACK, EOI, etc. Before MPIC v4.1 they also ignore
260 * masking.
262 if (active) {
263 if (!was_active && dst->outputs_active[src->output]++ == 0) {
264 DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d",
265 __func__, src->output, n_CPU, n_IRQ);
266 qemu_irq_raise(dst->irqs[src->output]);
268 } else {
269 if (was_active && --dst->outputs_active[src->output] == 0) {
270 DPRINTF("%s: Lower OpenPIC output %d cpu %d irq %d",
271 __func__, src->output, n_CPU, n_IRQ);
272 qemu_irq_lower(dst->irqs[src->output]);
276 return;
279 priority = IVPR_PRIORITY(src->ivpr);
282 * Even if the interrupt doesn't have enough priority,
283 * it is still raised, in case ctpr is lowered later.
285 if (active) {
286 IRQ_setbit(&dst->raised, n_IRQ);
287 } else {
288 IRQ_resetbit(&dst->raised, n_IRQ);
291 IRQ_check(opp, &dst->raised);
293 if (active && priority <= dst->ctpr) {
294 DPRINTF("%s: IRQ %d priority %d too low for ctpr %d on CPU %d",
295 __func__, n_IRQ, priority, dst->ctpr, n_CPU);
296 active = 0;
299 if (active) {
300 if (IRQ_get_next(opp, &dst->servicing) >= 0 &&
301 priority <= dst->servicing.priority) {
302 DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d",
303 __func__, n_IRQ, dst->servicing.next, n_CPU);
304 } else {
305 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d/%d",
306 __func__, n_CPU, n_IRQ, dst->raised.next);
307 qemu_irq_raise(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]);
309 } else {
310 IRQ_get_next(opp, &dst->servicing);
311 if (dst->raised.priority > dst->ctpr &&
312 dst->raised.priority > dst->servicing.priority) {
313 DPRINTF("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d",
314 __func__, n_IRQ, dst->raised.next, dst->raised.priority,
315 dst->ctpr, dst->servicing.priority, n_CPU);
316 /* IRQ line stays asserted */
317 } else {
318 DPRINTF("%s: IRQ %d inactive, current prio %d/%d, CPU %d",
319 __func__, n_IRQ, dst->ctpr, dst->servicing.priority, n_CPU);
320 qemu_irq_lower(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]);
325 /* update pic state because registers for n_IRQ have changed value */
326 static void openpic_update_irq(OpenPICState *opp, int n_IRQ)
328 IRQSource *src;
329 bool active, was_active;
330 int i;
332 src = &opp->src[n_IRQ];
333 active = src->pending;
335 if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) {
336 /* Interrupt source is disabled */
337 DPRINTF("%s: IRQ %d is disabled", __func__, n_IRQ);
338 active = false;
341 was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK);
344 * We don't have a similar check for already-active because
345 * ctpr may have changed and we need to withdraw the interrupt.
347 if (!active && !was_active) {
348 DPRINTF("%s: IRQ %d is already inactive", __func__, n_IRQ);
349 return;
352 if (active) {
353 src->ivpr |= IVPR_ACTIVITY_MASK;
354 } else {
355 src->ivpr &= ~IVPR_ACTIVITY_MASK;
358 if (src->destmask == 0) {
359 /* No target */
360 DPRINTF("%s: IRQ %d has no target", __func__, n_IRQ);
361 return;
364 if (src->destmask == (1 << src->last_cpu)) {
365 /* Only one CPU is allowed to receive this IRQ */
366 IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active);
367 } else if (!(src->ivpr & IVPR_MODE_MASK)) {
368 /* Directed delivery mode */
369 for (i = 0; i < opp->nb_cpus; i++) {
370 if (src->destmask & (1 << i)) {
371 IRQ_local_pipe(opp, i, n_IRQ, active, was_active);
374 } else {
375 /* Distributed delivery mode */
376 for (i = src->last_cpu + 1; i != src->last_cpu; i++) {
377 if (i == opp->nb_cpus) {
378 i = 0;
380 if (src->destmask & (1 << i)) {
381 IRQ_local_pipe(opp, i, n_IRQ, active, was_active);
382 src->last_cpu = i;
383 break;
389 static void openpic_set_irq(void *opaque, int n_IRQ, int level)
391 OpenPICState *opp = opaque;
392 IRQSource *src;
394 if (n_IRQ >= OPENPIC_MAX_IRQ) {
395 error_report("%s: IRQ %d out of range", __func__, n_IRQ);
396 abort();
399 src = &opp->src[n_IRQ];
400 DPRINTF("openpic: set irq %d = %d ivpr=0x%08x",
401 n_IRQ, level, src->ivpr);
402 if (src->level) {
403 /* level-sensitive irq */
404 src->pending = level;
405 openpic_update_irq(opp, n_IRQ);
406 } else {
407 /* edge-sensitive irq */
408 if (level) {
409 src->pending = 1;
410 openpic_update_irq(opp, n_IRQ);
413 if (src->output != OPENPIC_OUTPUT_INT) {
415 * Edge-triggered interrupts shouldn't be used
416 * with non-INT delivery, but just in case,
417 * try to make it do something sane rather than
418 * cause an interrupt storm. This is close to
419 * what you'd probably see happen in real hardware.
421 src->pending = 0;
422 openpic_update_irq(opp, n_IRQ);
427 static inline uint32_t read_IRQreg_idr(OpenPICState *opp, int n_IRQ)
429 return opp->src[n_IRQ].idr;
432 static inline uint32_t read_IRQreg_ilr(OpenPICState *opp, int n_IRQ)
434 if (opp->flags & OPENPIC_FLAG_ILR) {
435 return output_to_inttgt(opp->src[n_IRQ].output);
438 return 0xffffffff;
441 static inline uint32_t read_IRQreg_ivpr(OpenPICState *opp, int n_IRQ)
443 return opp->src[n_IRQ].ivpr;
446 static inline void write_IRQreg_idr(OpenPICState *opp, int n_IRQ, uint32_t val)
448 IRQSource *src = &opp->src[n_IRQ];
449 uint32_t normal_mask = (1UL << opp->nb_cpus) - 1;
450 uint32_t crit_mask = 0;
451 uint32_t mask = normal_mask;
452 int crit_shift = IDR_EP_SHIFT - opp->nb_cpus;
453 int i;
455 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
456 crit_mask = mask << crit_shift;
457 mask |= crit_mask | IDR_EP;
460 src->idr = val & mask;
461 DPRINTF("Set IDR %d to 0x%08x", n_IRQ, src->idr);
463 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
464 if (src->idr & crit_mask) {
465 if (src->idr & normal_mask) {
466 DPRINTF("%s: IRQ configured for multiple output types, using "
467 "critical", __func__);
470 src->output = OPENPIC_OUTPUT_CINT;
471 src->nomask = true;
472 src->destmask = 0;
474 for (i = 0; i < opp->nb_cpus; i++) {
475 int n_ci = IDR_CI0_SHIFT - i;
477 if (src->idr & (1UL << n_ci)) {
478 src->destmask |= 1UL << i;
481 } else {
482 src->output = OPENPIC_OUTPUT_INT;
483 src->nomask = false;
484 src->destmask = src->idr & normal_mask;
486 } else {
487 src->destmask = src->idr;
491 static inline void write_IRQreg_ilr(OpenPICState *opp, int n_IRQ, uint32_t val)
493 if (opp->flags & OPENPIC_FLAG_ILR) {
494 IRQSource *src = &opp->src[n_IRQ];
496 src->output = inttgt_to_output(val & ILR_INTTGT_MASK);
497 DPRINTF("Set ILR %d to 0x%08x, output %d", n_IRQ, src->idr,
498 src->output);
500 /* TODO: on MPIC v4.0 only, set nomask for non-INT */
504 static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val)
506 uint32_t mask;
509 * NOTE when implementing newer FSL MPIC models: starting with v4.0,
510 * the polarity bit is read-only on internal interrupts.
512 mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK |
513 IVPR_POLARITY_MASK | opp->vector_mask;
515 /* ACTIVITY bit is read-only */
516 opp->src[n_IRQ].ivpr =
517 (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask);
520 * For FSL internal interrupts, The sense bit is reserved and zero,
521 * and the interrupt is always level-triggered. Timers and IPIs
522 * have no sense or polarity bits, and are edge-triggered.
524 switch (opp->src[n_IRQ].type) {
525 case IRQ_TYPE_NORMAL:
526 opp->src[n_IRQ].level = !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK);
527 break;
529 case IRQ_TYPE_FSLINT:
530 opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK;
531 break;
533 case IRQ_TYPE_FSLSPECIAL:
534 opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK);
535 break;
538 openpic_update_irq(opp, n_IRQ);
539 DPRINTF("Set IVPR %d to 0x%08x -> 0x%08x", n_IRQ, val,
540 opp->src[n_IRQ].ivpr);
543 static void openpic_gcr_write(OpenPICState *opp, uint64_t val)
545 bool mpic_proxy = false;
547 if (val & GCR_RESET) {
548 openpic_reset(DEVICE(opp));
549 return;
552 opp->gcr &= ~opp->mpic_mode_mask;
553 opp->gcr |= val & opp->mpic_mode_mask;
555 /* Set external proxy mode */
556 if ((val & opp->mpic_mode_mask) == GCR_MODE_PROXY) {
557 mpic_proxy = true;
560 ppce500_set_mpic_proxy(mpic_proxy);
563 static void openpic_gbl_write(void *opaque, hwaddr addr, uint64_t val,
564 unsigned len)
566 OpenPICState *opp = opaque;
567 IRQDest *dst;
568 int idx;
570 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
571 __func__, addr, val);
572 if (addr & 0xF) {
573 return;
575 switch (addr) {
576 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
577 break;
578 case 0x40:
579 case 0x50:
580 case 0x60:
581 case 0x70:
582 case 0x80:
583 case 0x90:
584 case 0xA0:
585 case 0xB0:
586 openpic_cpu_write_internal(opp, addr, val, get_current_cpu());
587 break;
588 case 0x1000: /* FRR */
589 break;
590 case 0x1020: /* GCR */
591 openpic_gcr_write(opp, val);
592 break;
593 case 0x1080: /* VIR */
594 break;
595 case 0x1090: /* PIR */
596 for (idx = 0; idx < opp->nb_cpus; idx++) {
597 if ((val & (1 << idx)) && !(opp->pir & (1 << idx))) {
598 DPRINTF("Raise OpenPIC RESET output for CPU %d", idx);
599 dst = &opp->dst[idx];
600 qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_RESET]);
601 } else if (!(val & (1 << idx)) && (opp->pir & (1 << idx))) {
602 DPRINTF("Lower OpenPIC RESET output for CPU %d", idx);
603 dst = &opp->dst[idx];
604 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_RESET]);
607 opp->pir = val;
608 break;
609 case 0x10A0: /* IPI_IVPR */
610 case 0x10B0:
611 case 0x10C0:
612 case 0x10D0:
614 int idx;
615 idx = (addr - 0x10A0) >> 4;
616 write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
618 break;
619 case 0x10E0: /* SPVE */
620 opp->spve = val & opp->vector_mask;
621 break;
622 default:
623 break;
627 static uint64_t openpic_gbl_read(void *opaque, hwaddr addr, unsigned len)
629 OpenPICState *opp = opaque;
630 uint32_t retval;
632 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
633 retval = 0xFFFFFFFF;
634 if (addr & 0xF) {
635 return retval;
637 switch (addr) {
638 case 0x1000: /* FRR */
639 retval = opp->frr;
640 break;
641 case 0x1020: /* GCR */
642 retval = opp->gcr;
643 break;
644 case 0x1080: /* VIR */
645 retval = opp->vir;
646 break;
647 case 0x1090: /* PIR */
648 retval = 0x00000000;
649 break;
650 case 0x00: /* Block Revision Register1 (BRR1) */
651 retval = opp->brr1;
652 break;
653 case 0x40:
654 case 0x50:
655 case 0x60:
656 case 0x70:
657 case 0x80:
658 case 0x90:
659 case 0xA0:
660 case 0xB0:
661 retval = openpic_cpu_read_internal(opp, addr, get_current_cpu());
662 break;
663 case 0x10A0: /* IPI_IVPR */
664 case 0x10B0:
665 case 0x10C0:
666 case 0x10D0:
668 int idx;
669 idx = (addr - 0x10A0) >> 4;
670 retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx);
672 break;
673 case 0x10E0: /* SPVE */
674 retval = opp->spve;
675 break;
676 default:
677 break;
679 DPRINTF("%s: => 0x%08x", __func__, retval);
681 return retval;
684 static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled);
686 static void qemu_timer_cb(void *opaque)
688 OpenPICTimer *tmr = opaque;
689 OpenPICState *opp = tmr->opp;
690 uint32_t n_IRQ = tmr->n_IRQ;
691 uint32_t val = tmr->tbcr & ~TBCR_CI;
692 uint32_t tog = ((tmr->tccr & TCCR_TOG) ^ TCCR_TOG); /* invert toggle. */
694 DPRINTF("%s n_IRQ=%d", __func__, n_IRQ);
695 /* Reload current count from base count and setup timer. */
696 tmr->tccr = val | tog;
697 openpic_tmr_set_tmr(tmr, val, /*enabled=*/true);
698 /* Raise the interrupt. */
699 opp->src[n_IRQ].destmask = read_IRQreg_idr(opp, n_IRQ);
700 openpic_set_irq(opp, n_IRQ, 1);
701 openpic_set_irq(opp, n_IRQ, 0);
705 * If enabled is true, arranges for an interrupt to be raised val clocks into
706 * the future, if enabled is false cancels the timer.
708 static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled)
710 uint64_t ns = ticks_to_ns(val & ~TCCR_TOG);
712 * A count of zero causes a timer to be set to expire immediately. This
713 * effectively stops the simulation since the timer is constantly expiring
714 * which prevents guest code execution, so we don't honor that
715 * configuration. On real hardware, this situation would generate an
716 * interrupt on every clock cycle if the interrupt was unmasked.
718 if ((ns == 0) || !enabled) {
719 tmr->qemu_timer_active = false;
720 tmr->tccr = tmr->tccr & TCCR_TOG;
721 timer_del(tmr->qemu_timer); /* set timer to never expire. */
722 } else {
723 tmr->qemu_timer_active = true;
724 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
725 tmr->origin_time = now;
726 timer_mod(tmr->qemu_timer, now + ns); /* set timer expiration. */
731 * Returns the current tccr value, i.e., timer value (in clocks) with
732 * appropriate TOG.
734 static uint64_t openpic_tmr_get_timer(OpenPICTimer *tmr)
736 uint64_t retval;
737 if (!tmr->qemu_timer_active) {
738 retval = tmr->tccr;
739 } else {
740 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
741 uint64_t used = now - tmr->origin_time; /* nsecs */
742 uint32_t used_ticks = (uint32_t)ns_to_ticks(used);
743 uint32_t count = (tmr->tccr & ~TCCR_TOG) - used_ticks;
744 retval = (uint32_t)((tmr->tccr & TCCR_TOG) | (count & ~TCCR_TOG));
746 return retval;
749 static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val,
750 unsigned len)
752 OpenPICState *opp = opaque;
753 int idx;
755 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
756 __func__, (addr + 0x10f0), val);
757 if (addr & 0xF) {
758 return;
761 if (addr == 0) {
762 /* TFRR */
763 opp->tfrr = val;
764 return;
766 addr -= 0x10; /* correct for TFRR */
767 idx = (addr >> 6) & 0x3;
769 switch (addr & 0x30) {
770 case 0x00: /* TCCR */
771 break;
772 case 0x10: /* TBCR */
773 /* Did the enable status change? */
774 if ((opp->timers[idx].tbcr & TBCR_CI) != (val & TBCR_CI)) {
775 /* Did "Count Inhibit" transition from 1 to 0? */
776 if ((val & TBCR_CI) == 0) {
777 opp->timers[idx].tccr = val & ~TCCR_TOG;
779 openpic_tmr_set_tmr(&opp->timers[idx],
780 (val & ~TBCR_CI),
781 /*enabled=*/((val & TBCR_CI) == 0));
783 opp->timers[idx].tbcr = val;
784 break;
785 case 0x20: /* TVPR */
786 write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val);
787 break;
788 case 0x30: /* TDR */
789 write_IRQreg_idr(opp, opp->irq_tim0 + idx, val);
790 break;
794 static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len)
796 OpenPICState *opp = opaque;
797 uint32_t retval = -1;
798 int idx;
800 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr + 0x10f0);
801 if (addr & 0xF) {
802 goto out;
804 if (addr == 0) {
805 /* TFRR */
806 retval = opp->tfrr;
807 goto out;
809 addr -= 0x10; /* correct for TFRR */
810 idx = (addr >> 6) & 0x3;
811 switch (addr & 0x30) {
812 case 0x00: /* TCCR */
813 retval = openpic_tmr_get_timer(&opp->timers[idx]);
814 break;
815 case 0x10: /* TBCR */
816 retval = opp->timers[idx].tbcr;
817 break;
818 case 0x20: /* TVPR */
819 retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx);
820 break;
821 case 0x30: /* TDR */
822 retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx);
823 break;
826 out:
827 DPRINTF("%s: => 0x%08x", __func__, retval);
829 return retval;
832 static void openpic_src_write(void *opaque, hwaddr addr, uint64_t val,
833 unsigned len)
835 OpenPICState *opp = opaque;
836 int idx;
838 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
839 __func__, addr, val);
841 addr = addr & 0xffff;
842 idx = addr >> 5;
844 switch (addr & 0x1f) {
845 case 0x00:
846 write_IRQreg_ivpr(opp, idx, val);
847 break;
848 case 0x10:
849 write_IRQreg_idr(opp, idx, val);
850 break;
851 case 0x18:
852 write_IRQreg_ilr(opp, idx, val);
853 break;
857 static uint64_t openpic_src_read(void *opaque, uint64_t addr, unsigned len)
859 OpenPICState *opp = opaque;
860 uint32_t retval;
861 int idx;
863 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
864 retval = 0xFFFFFFFF;
866 addr = addr & 0xffff;
867 idx = addr >> 5;
869 switch (addr & 0x1f) {
870 case 0x00:
871 retval = read_IRQreg_ivpr(opp, idx);
872 break;
873 case 0x10:
874 retval = read_IRQreg_idr(opp, idx);
875 break;
876 case 0x18:
877 retval = read_IRQreg_ilr(opp, idx);
878 break;
881 DPRINTF("%s: => 0x%08x", __func__, retval);
882 return retval;
885 static void openpic_msi_write(void *opaque, hwaddr addr, uint64_t val,
886 unsigned size)
888 OpenPICState *opp = opaque;
889 int idx = opp->irq_msi;
890 int srs, ibs;
892 DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64,
893 __func__, addr, val);
894 if (addr & 0xF) {
895 return;
898 switch (addr) {
899 case MSIIR_OFFSET:
900 srs = val >> MSIIR_SRS_SHIFT;
901 idx += srs;
902 ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT;
903 opp->msi[srs].msir |= 1 << ibs;
904 openpic_set_irq(opp, idx, 1);
905 break;
906 default:
907 /* most registers are read-only, thus ignored */
908 break;
912 static uint64_t openpic_msi_read(void *opaque, hwaddr addr, unsigned size)
914 OpenPICState *opp = opaque;
915 uint64_t r = 0;
916 int i, srs;
918 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
919 if (addr & 0xF) {
920 return -1;
923 srs = addr >> 4;
925 switch (addr) {
926 case 0x00:
927 case 0x10:
928 case 0x20:
929 case 0x30:
930 case 0x40:
931 case 0x50:
932 case 0x60:
933 case 0x70: /* MSIRs */
934 r = opp->msi[srs].msir;
935 /* Clear on read */
936 opp->msi[srs].msir = 0;
937 openpic_set_irq(opp, opp->irq_msi + srs, 0);
938 break;
939 case 0x120: /* MSISR */
940 for (i = 0; i < MAX_MSI; i++) {
941 r |= (opp->msi[i].msir ? 1 : 0) << i;
943 break;
946 return r;
949 static uint64_t openpic_summary_read(void *opaque, hwaddr addr, unsigned size)
951 uint64_t r = 0;
953 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
955 /* TODO: EISR/EIMR */
957 return r;
960 static void openpic_summary_write(void *opaque, hwaddr addr, uint64_t val,
961 unsigned size)
963 DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64,
964 __func__, addr, val);
966 /* TODO: EISR/EIMR */
969 static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
970 uint32_t val, int idx)
972 OpenPICState *opp = opaque;
973 IRQSource *src;
974 IRQDest *dst;
975 int s_IRQ, n_IRQ;
977 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx " <= 0x%08x", __func__, idx,
978 addr, val);
980 if (idx < 0 || idx >= opp->nb_cpus) {
981 return;
984 if (addr & 0xF) {
985 return;
987 dst = &opp->dst[idx];
988 addr &= 0xFF0;
989 switch (addr) {
990 case 0x40: /* IPIDR */
991 case 0x50:
992 case 0x60:
993 case 0x70:
994 idx = (addr - 0x40) >> 4;
995 /* we use IDE as mask which CPUs to deliver the IPI to still. */
996 opp->src[opp->irq_ipi0 + idx].destmask |= val;
997 openpic_set_irq(opp, opp->irq_ipi0 + idx, 1);
998 openpic_set_irq(opp, opp->irq_ipi0 + idx, 0);
999 break;
1000 case 0x80: /* CTPR */
1001 dst->ctpr = val & 0x0000000F;
1003 DPRINTF("%s: set CPU %d ctpr to %d, raised %d servicing %d",
1004 __func__, idx, dst->ctpr, dst->raised.priority,
1005 dst->servicing.priority);
1007 if (dst->raised.priority <= dst->ctpr) {
1008 DPRINTF("%s: Lower OpenPIC INT output cpu %d due to ctpr",
1009 __func__, idx);
1010 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]);
1011 } else if (dst->raised.priority > dst->servicing.priority) {
1012 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d",
1013 __func__, idx, dst->raised.next);
1014 qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_INT]);
1017 break;
1018 case 0x90: /* WHOAMI */
1019 /* Read-only register */
1020 break;
1021 case 0xA0: /* IACK */
1022 /* Read-only register */
1023 break;
1024 case 0xB0: /* EOI */
1025 DPRINTF("EOI");
1026 s_IRQ = IRQ_get_next(opp, &dst->servicing);
1028 if (s_IRQ < 0) {
1029 DPRINTF("%s: EOI with no interrupt in service", __func__);
1030 break;
1033 IRQ_resetbit(&dst->servicing, s_IRQ);
1034 /* Set up next servicing IRQ */
1035 s_IRQ = IRQ_get_next(opp, &dst->servicing);
1036 /* Check queued interrupts. */
1037 n_IRQ = IRQ_get_next(opp, &dst->raised);
1038 src = &opp->src[n_IRQ];
1039 if (n_IRQ != -1 &&
1040 (s_IRQ == -1 ||
1041 IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) {
1042 DPRINTF("Raise OpenPIC INT output cpu %d irq %d",
1043 idx, n_IRQ);
1044 qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]);
1046 break;
1047 default:
1048 break;
1052 static void openpic_cpu_write(void *opaque, hwaddr addr, uint64_t val,
1053 unsigned len)
1055 openpic_cpu_write_internal(opaque, addr, val, (addr & 0x1f000) >> 12);
1059 static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu)
1061 IRQSource *src;
1062 int retval, irq;
1064 DPRINTF("Lower OpenPIC INT output");
1065 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]);
1067 irq = IRQ_get_next(opp, &dst->raised);
1068 DPRINTF("IACK: irq=%d", irq);
1070 if (irq == -1) {
1071 /* No more interrupt pending */
1072 return opp->spve;
1075 src = &opp->src[irq];
1076 if (!(src->ivpr & IVPR_ACTIVITY_MASK) ||
1077 !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) {
1078 error_report("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x",
1079 __func__, irq, dst->ctpr, src->ivpr);
1080 openpic_update_irq(opp, irq);
1081 retval = opp->spve;
1082 } else {
1083 /* IRQ enter servicing state */
1084 IRQ_setbit(&dst->servicing, irq);
1085 retval = IVPR_VECTOR(opp, src->ivpr);
1088 if (!src->level) {
1089 /* edge-sensitive IRQ */
1090 src->ivpr &= ~IVPR_ACTIVITY_MASK;
1091 src->pending = 0;
1092 IRQ_resetbit(&dst->raised, irq);
1095 /* Timers and IPIs support multicast. */
1096 if (((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + OPENPIC_MAX_IPI))) ||
1097 ((irq >= opp->irq_tim0) && (irq < (opp->irq_tim0 + OPENPIC_MAX_TMR)))) {
1098 DPRINTF("irq is IPI or TMR");
1099 src->destmask &= ~(1 << cpu);
1100 if (src->destmask && !src->level) {
1101 /* trigger on CPUs that didn't know about it yet */
1102 openpic_set_irq(opp, irq, 1);
1103 openpic_set_irq(opp, irq, 0);
1104 /* if all CPUs knew about it, set active bit again */
1105 src->ivpr |= IVPR_ACTIVITY_MASK;
1109 return retval;
1112 static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
1113 int idx)
1115 OpenPICState *opp = opaque;
1116 IRQDest *dst;
1117 uint32_t retval;
1119 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx, __func__, idx, addr);
1120 retval = 0xFFFFFFFF;
1122 if (idx < 0 || idx >= opp->nb_cpus) {
1123 return retval;
1126 if (addr & 0xF) {
1127 return retval;
1129 dst = &opp->dst[idx];
1130 addr &= 0xFF0;
1131 switch (addr) {
1132 case 0x80: /* CTPR */
1133 retval = dst->ctpr;
1134 break;
1135 case 0x90: /* WHOAMI */
1136 retval = idx;
1137 break;
1138 case 0xA0: /* IACK */
1139 retval = openpic_iack(opp, dst, idx);
1140 break;
1141 case 0xB0: /* EOI */
1142 retval = 0;
1143 break;
1144 default:
1145 break;
1147 DPRINTF("%s: => 0x%08x", __func__, retval);
1149 return retval;
1152 static uint64_t openpic_cpu_read(void *opaque, hwaddr addr, unsigned len)
1154 return openpic_cpu_read_internal(opaque, addr, (addr & 0x1f000) >> 12);
1157 static const MemoryRegionOps openpic_glb_ops_le = {
1158 .write = openpic_gbl_write,
1159 .read = openpic_gbl_read,
1160 .endianness = DEVICE_LITTLE_ENDIAN,
1161 .impl = {
1162 .min_access_size = 4,
1163 .max_access_size = 4,
1167 static const MemoryRegionOps openpic_glb_ops_be = {
1168 .write = openpic_gbl_write,
1169 .read = openpic_gbl_read,
1170 .endianness = DEVICE_BIG_ENDIAN,
1171 .impl = {
1172 .min_access_size = 4,
1173 .max_access_size = 4,
1177 static const MemoryRegionOps openpic_tmr_ops_le = {
1178 .write = openpic_tmr_write,
1179 .read = openpic_tmr_read,
1180 .endianness = DEVICE_LITTLE_ENDIAN,
1181 .impl = {
1182 .min_access_size = 4,
1183 .max_access_size = 4,
1187 static const MemoryRegionOps openpic_tmr_ops_be = {
1188 .write = openpic_tmr_write,
1189 .read = openpic_tmr_read,
1190 .endianness = DEVICE_BIG_ENDIAN,
1191 .impl = {
1192 .min_access_size = 4,
1193 .max_access_size = 4,
1197 static const MemoryRegionOps openpic_cpu_ops_le = {
1198 .write = openpic_cpu_write,
1199 .read = openpic_cpu_read,
1200 .endianness = DEVICE_LITTLE_ENDIAN,
1201 .impl = {
1202 .min_access_size = 4,
1203 .max_access_size = 4,
1207 static const MemoryRegionOps openpic_cpu_ops_be = {
1208 .write = openpic_cpu_write,
1209 .read = openpic_cpu_read,
1210 .endianness = DEVICE_BIG_ENDIAN,
1211 .impl = {
1212 .min_access_size = 4,
1213 .max_access_size = 4,
1217 static const MemoryRegionOps openpic_src_ops_le = {
1218 .write = openpic_src_write,
1219 .read = openpic_src_read,
1220 .endianness = DEVICE_LITTLE_ENDIAN,
1221 .impl = {
1222 .min_access_size = 4,
1223 .max_access_size = 4,
1227 static const MemoryRegionOps openpic_src_ops_be = {
1228 .write = openpic_src_write,
1229 .read = openpic_src_read,
1230 .endianness = DEVICE_BIG_ENDIAN,
1231 .impl = {
1232 .min_access_size = 4,
1233 .max_access_size = 4,
1237 static const MemoryRegionOps openpic_msi_ops_be = {
1238 .read = openpic_msi_read,
1239 .write = openpic_msi_write,
1240 .endianness = DEVICE_BIG_ENDIAN,
1241 .impl = {
1242 .min_access_size = 4,
1243 .max_access_size = 4,
1247 static const MemoryRegionOps openpic_summary_ops_be = {
1248 .read = openpic_summary_read,
1249 .write = openpic_summary_write,
1250 .endianness = DEVICE_BIG_ENDIAN,
1251 .impl = {
1252 .min_access_size = 4,
1253 .max_access_size = 4,
1257 static void openpic_reset(DeviceState *d)
1259 OpenPICState *opp = OPENPIC(d);
1260 int i;
1262 opp->gcr = GCR_RESET;
1263 /* Initialise controller registers */
1264 opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) |
1265 ((opp->nb_cpus - 1) << FRR_NCPU_SHIFT) |
1266 (opp->vid << FRR_VID_SHIFT);
1268 opp->pir = 0;
1269 opp->spve = -1 & opp->vector_mask;
1270 opp->tfrr = opp->tfrr_reset;
1271 /* Initialise IRQ sources */
1272 for (i = 0; i < opp->max_irq; i++) {
1273 opp->src[i].ivpr = opp->ivpr_reset;
1274 switch (opp->src[i].type) {
1275 case IRQ_TYPE_NORMAL:
1276 opp->src[i].level = !!(opp->ivpr_reset & IVPR_SENSE_MASK);
1277 break;
1279 case IRQ_TYPE_FSLINT:
1280 opp->src[i].ivpr |= IVPR_POLARITY_MASK;
1281 break;
1283 case IRQ_TYPE_FSLSPECIAL:
1284 break;
1287 /* Mask all IPI interrupts for Freescale OpenPIC */
1288 if ((opp->model == OPENPIC_MODEL_FSL_MPIC_20) ||
1289 (opp->model == OPENPIC_MODEL_FSL_MPIC_42)) {
1290 if (i >= opp->irq_ipi0 && i < opp->irq_tim0) {
1291 write_IRQreg_idr(opp, i, 0);
1292 continue;
1296 write_IRQreg_idr(opp, i, opp->idr_reset);
1298 /* Initialise IRQ destinations */
1299 for (i = 0; i < opp->nb_cpus; i++) {
1300 opp->dst[i].ctpr = 15;
1301 opp->dst[i].raised.next = -1;
1302 opp->dst[i].raised.priority = 0;
1303 bitmap_clear(opp->dst[i].raised.queue, 0, IRQQUEUE_SIZE_BITS);
1304 opp->dst[i].servicing.next = -1;
1305 opp->dst[i].servicing.priority = 0;
1306 bitmap_clear(opp->dst[i].servicing.queue, 0, IRQQUEUE_SIZE_BITS);
1308 /* Initialise timers */
1309 for (i = 0; i < OPENPIC_MAX_TMR; i++) {
1310 opp->timers[i].tccr = 0;
1311 opp->timers[i].tbcr = TBCR_CI;
1312 if (opp->timers[i].qemu_timer_active) {
1313 timer_del(opp->timers[i].qemu_timer); /* Inhibit timer */
1314 opp->timers[i].qemu_timer_active = false;
1317 /* Go out of RESET state */
1318 opp->gcr = 0;
1321 typedef struct MemReg {
1322 const char *name;
1323 MemoryRegionOps const *ops;
1324 hwaddr start_addr;
1325 ram_addr_t size;
1326 } MemReg;
1328 static void fsl_common_init(OpenPICState *opp)
1330 int i;
1331 int virq = OPENPIC_MAX_SRC;
1333 opp->vid = VID_REVISION_1_2;
1334 opp->vir = VIR_GENERIC;
1335 opp->vector_mask = 0xFFFF;
1336 opp->tfrr_reset = 0;
1337 opp->ivpr_reset = IVPR_MASK_MASK;
1338 opp->idr_reset = 1 << 0;
1339 opp->max_irq = OPENPIC_MAX_IRQ;
1341 opp->irq_ipi0 = virq;
1342 virq += OPENPIC_MAX_IPI;
1343 opp->irq_tim0 = virq;
1344 virq += OPENPIC_MAX_TMR;
1346 assert(virq <= OPENPIC_MAX_IRQ);
1348 opp->irq_msi = 224;
1350 msi_nonbroken = true;
1351 for (i = 0; i < opp->fsl->max_ext; i++) {
1352 opp->src[i].level = false;
1355 /* Internal interrupts, including message and MSI */
1356 for (i = 16; i < OPENPIC_MAX_SRC; i++) {
1357 opp->src[i].type = IRQ_TYPE_FSLINT;
1358 opp->src[i].level = true;
1361 /* timers and IPIs */
1362 for (i = OPENPIC_MAX_SRC; i < virq; i++) {
1363 opp->src[i].type = IRQ_TYPE_FSLSPECIAL;
1364 opp->src[i].level = false;
1367 for (i = 0; i < OPENPIC_MAX_TMR; i++) {
1368 opp->timers[i].n_IRQ = opp->irq_tim0 + i;
1369 opp->timers[i].qemu_timer_active = false;
1370 opp->timers[i].qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1371 &qemu_timer_cb,
1372 &opp->timers[i]);
1373 opp->timers[i].opp = opp;
1377 static void map_list(OpenPICState *opp, const MemReg *list, int *count)
1379 while (list->name) {
1380 assert(*count < ARRAY_SIZE(opp->sub_io_mem));
1382 memory_region_init_io(&opp->sub_io_mem[*count], OBJECT(opp), list->ops,
1383 opp, list->name, list->size);
1385 memory_region_add_subregion(&opp->mem, list->start_addr,
1386 &opp->sub_io_mem[*count]);
1388 (*count)++;
1389 list++;
1393 static const VMStateDescription vmstate_openpic_irq_queue = {
1394 .name = "openpic_irq_queue",
1395 .version_id = 0,
1396 .minimum_version_id = 0,
1397 .fields = (VMStateField[]) {
1398 VMSTATE_BITMAP(queue, IRQQueue, 0, queue_size),
1399 VMSTATE_INT32(next, IRQQueue),
1400 VMSTATE_INT32(priority, IRQQueue),
1401 VMSTATE_END_OF_LIST()
1405 static const VMStateDescription vmstate_openpic_irqdest = {
1406 .name = "openpic_irqdest",
1407 .version_id = 0,
1408 .minimum_version_id = 0,
1409 .fields = (VMStateField[]) {
1410 VMSTATE_INT32(ctpr, IRQDest),
1411 VMSTATE_STRUCT(raised, IRQDest, 0, vmstate_openpic_irq_queue,
1412 IRQQueue),
1413 VMSTATE_STRUCT(servicing, IRQDest, 0, vmstate_openpic_irq_queue,
1414 IRQQueue),
1415 VMSTATE_UINT32_ARRAY(outputs_active, IRQDest, OPENPIC_OUTPUT_NB),
1416 VMSTATE_END_OF_LIST()
1420 static const VMStateDescription vmstate_openpic_irqsource = {
1421 .name = "openpic_irqsource",
1422 .version_id = 0,
1423 .minimum_version_id = 0,
1424 .fields = (VMStateField[]) {
1425 VMSTATE_UINT32(ivpr, IRQSource),
1426 VMSTATE_UINT32(idr, IRQSource),
1427 VMSTATE_UINT32(destmask, IRQSource),
1428 VMSTATE_INT32(last_cpu, IRQSource),
1429 VMSTATE_INT32(pending, IRQSource),
1430 VMSTATE_END_OF_LIST()
1434 static const VMStateDescription vmstate_openpic_timer = {
1435 .name = "openpic_timer",
1436 .version_id = 0,
1437 .minimum_version_id = 0,
1438 .fields = (VMStateField[]) {
1439 VMSTATE_UINT32(tccr, OpenPICTimer),
1440 VMSTATE_UINT32(tbcr, OpenPICTimer),
1441 VMSTATE_END_OF_LIST()
1445 static const VMStateDescription vmstate_openpic_msi = {
1446 .name = "openpic_msi",
1447 .version_id = 0,
1448 .minimum_version_id = 0,
1449 .fields = (VMStateField[]) {
1450 VMSTATE_UINT32(msir, OpenPICMSI),
1451 VMSTATE_END_OF_LIST()
1455 static int openpic_post_load(void *opaque, int version_id)
1457 OpenPICState *opp = (OpenPICState *)opaque;
1458 int i;
1460 /* Update internal ivpr and idr variables */
1461 for (i = 0; i < opp->max_irq; i++) {
1462 write_IRQreg_idr(opp, i, opp->src[i].idr);
1463 write_IRQreg_ivpr(opp, i, opp->src[i].ivpr);
1466 return 0;
1469 static const VMStateDescription vmstate_openpic = {
1470 .name = "openpic",
1471 .version_id = 3,
1472 .minimum_version_id = 3,
1473 .post_load = openpic_post_load,
1474 .fields = (VMStateField[]) {
1475 VMSTATE_UINT32(gcr, OpenPICState),
1476 VMSTATE_UINT32(vir, OpenPICState),
1477 VMSTATE_UINT32(pir, OpenPICState),
1478 VMSTATE_UINT32(spve, OpenPICState),
1479 VMSTATE_UINT32(tfrr, OpenPICState),
1480 VMSTATE_UINT32(max_irq, OpenPICState),
1481 VMSTATE_STRUCT_VARRAY_UINT32(src, OpenPICState, max_irq, 0,
1482 vmstate_openpic_irqsource, IRQSource),
1483 VMSTATE_UINT32_EQUAL(nb_cpus, OpenPICState, NULL),
1484 VMSTATE_STRUCT_VARRAY_UINT32(dst, OpenPICState, nb_cpus, 0,
1485 vmstate_openpic_irqdest, IRQDest),
1486 VMSTATE_STRUCT_ARRAY(timers, OpenPICState, OPENPIC_MAX_TMR, 0,
1487 vmstate_openpic_timer, OpenPICTimer),
1488 VMSTATE_STRUCT_ARRAY(msi, OpenPICState, MAX_MSI, 0,
1489 vmstate_openpic_msi, OpenPICMSI),
1490 VMSTATE_UINT32(irq_ipi0, OpenPICState),
1491 VMSTATE_UINT32(irq_tim0, OpenPICState),
1492 VMSTATE_UINT32(irq_msi, OpenPICState),
1493 VMSTATE_END_OF_LIST()
1497 static void openpic_init(Object *obj)
1499 OpenPICState *opp = OPENPIC(obj);
1501 memory_region_init(&opp->mem, obj, "openpic", 0x40000);
1504 static void openpic_realize(DeviceState *dev, Error **errp)
1506 SysBusDevice *d = SYS_BUS_DEVICE(dev);
1507 OpenPICState *opp = OPENPIC(dev);
1508 int i, j;
1509 int list_count = 0;
1510 static const MemReg list_le[] = {
1511 {"glb", &openpic_glb_ops_le,
1512 OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE},
1513 {"tmr", &openpic_tmr_ops_le,
1514 OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
1515 {"src", &openpic_src_ops_le,
1516 OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
1517 {"cpu", &openpic_cpu_ops_le,
1518 OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
1519 {NULL}
1521 static const MemReg list_be[] = {
1522 {"glb", &openpic_glb_ops_be,
1523 OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE},
1524 {"tmr", &openpic_tmr_ops_be,
1525 OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
1526 {"src", &openpic_src_ops_be,
1527 OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
1528 {"cpu", &openpic_cpu_ops_be,
1529 OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
1530 {NULL}
1532 static const MemReg list_fsl[] = {
1533 {"msi", &openpic_msi_ops_be,
1534 OPENPIC_MSI_REG_START, OPENPIC_MSI_REG_SIZE},
1535 {"summary", &openpic_summary_ops_be,
1536 OPENPIC_SUMMARY_REG_START, OPENPIC_SUMMARY_REG_SIZE},
1537 {NULL}
1540 if (opp->nb_cpus > MAX_CPU) {
1541 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE,
1542 TYPE_OPENPIC, "nb_cpus", (uint64_t)opp->nb_cpus,
1543 (uint64_t)0, (uint64_t)MAX_CPU);
1544 return;
1547 switch (opp->model) {
1548 case OPENPIC_MODEL_FSL_MPIC_20:
1549 default:
1550 opp->fsl = &fsl_mpic_20;
1551 opp->brr1 = 0x00400200;
1552 opp->flags |= OPENPIC_FLAG_IDR_CRIT;
1553 opp->nb_irqs = 80;
1554 opp->mpic_mode_mask = GCR_MODE_MIXED;
1556 fsl_common_init(opp);
1557 map_list(opp, list_be, &list_count);
1558 map_list(opp, list_fsl, &list_count);
1560 break;
1562 case OPENPIC_MODEL_FSL_MPIC_42:
1563 opp->fsl = &fsl_mpic_42;
1564 opp->brr1 = 0x00400402;
1565 opp->flags |= OPENPIC_FLAG_ILR;
1566 opp->nb_irqs = 196;
1567 opp->mpic_mode_mask = GCR_MODE_PROXY;
1569 fsl_common_init(opp);
1570 map_list(opp, list_be, &list_count);
1571 map_list(opp, list_fsl, &list_count);
1573 break;
1575 case OPENPIC_MODEL_KEYLARGO:
1576 opp->nb_irqs = KEYLARGO_MAX_EXT;
1577 opp->vid = VID_REVISION_1_2;
1578 opp->vir = VIR_GENERIC;
1579 opp->vector_mask = 0xFF;
1580 opp->tfrr_reset = 4160000;
1581 opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK;
1582 opp->idr_reset = 0;
1583 opp->max_irq = KEYLARGO_MAX_IRQ;
1584 opp->irq_ipi0 = KEYLARGO_IPI_IRQ;
1585 opp->irq_tim0 = KEYLARGO_TMR_IRQ;
1586 opp->brr1 = -1;
1587 opp->mpic_mode_mask = GCR_MODE_MIXED;
1589 if (opp->nb_cpus != 1) {
1590 error_setg(errp, "Only UP supported today");
1591 return;
1594 map_list(opp, list_le, &list_count);
1595 break;
1598 for (i = 0; i < opp->nb_cpus; i++) {
1599 opp->dst[i].irqs = g_new0(qemu_irq, OPENPIC_OUTPUT_NB);
1600 for (j = 0; j < OPENPIC_OUTPUT_NB; j++) {
1601 sysbus_init_irq(d, &opp->dst[i].irqs[j]);
1604 opp->dst[i].raised.queue_size = IRQQUEUE_SIZE_BITS;
1605 opp->dst[i].raised.queue = bitmap_new(IRQQUEUE_SIZE_BITS);
1606 opp->dst[i].servicing.queue_size = IRQQUEUE_SIZE_BITS;
1607 opp->dst[i].servicing.queue = bitmap_new(IRQQUEUE_SIZE_BITS);
1610 sysbus_init_mmio(d, &opp->mem);
1611 qdev_init_gpio_in(dev, openpic_set_irq, opp->max_irq);
1614 static Property openpic_properties[] = {
1615 DEFINE_PROP_UINT32("model", OpenPICState, model, OPENPIC_MODEL_FSL_MPIC_20),
1616 DEFINE_PROP_UINT32("nb_cpus", OpenPICState, nb_cpus, 1),
1617 DEFINE_PROP_END_OF_LIST(),
1620 static void openpic_class_init(ObjectClass *oc, void *data)
1622 DeviceClass *dc = DEVICE_CLASS(oc);
1624 dc->realize = openpic_realize;
1625 device_class_set_props(dc, openpic_properties);
1626 dc->reset = openpic_reset;
1627 dc->vmsd = &vmstate_openpic;
1628 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1631 static const TypeInfo openpic_info = {
1632 .name = TYPE_OPENPIC,
1633 .parent = TYPE_SYS_BUS_DEVICE,
1634 .instance_size = sizeof(OpenPICState),
1635 .instance_init = openpic_init,
1636 .class_init = openpic_class_init,
1639 static void openpic_register_types(void)
1641 type_register_static(&openpic_info);
1644 type_init(openpic_register_types)