2 * ARM Generic/Distributed Interrupt Controller
4 * Copyright (c) 2006-2007 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licensed under the GPL.
10 /* This file contains implementation code for the RealView EB interrupt
11 * controller, MPCore distributed interrupt controller and ARMv7-M
12 * Nested Vectored Interrupt Controller.
13 * It is compiled in two ways:
14 * (1) as a standalone file to produce a sysbus device which is a GIC
15 * that can be used on the realview board and as one of the builtin
16 * private peripherals for the ARM MP CPUs (11MPCore, A9, etc)
17 * (2) by being directly #included into armv7m_nvic.c to produce the
21 #include "hw/sysbus.h"
22 #include "gic_internal.h"
28 #define DPRINTF(fmt, ...) \
29 do { fprintf(stderr, "arm_gic: " fmt , ## __VA_ARGS__); } while (0)
31 #define DPRINTF(fmt, ...) do {} while(0)
34 static const uint8_t gic_id
[] = {
35 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1
38 #define NUM_CPU(s) ((s)->num_cpu)
40 static inline int gic_get_current_cpu(GICState
*s
)
43 return current_cpu
->cpu_index
;
48 /* Return true if this GIC config has interrupt groups, which is
49 * true if we're a GICv2, or a GICv1 with the security extensions.
51 static inline bool gic_has_groups(GICState
*s
)
53 return s
->revision
== 2 || s
->security_extn
;
56 /* TODO: Many places that call this routine could be optimized. */
57 /* Update interrupt status after enabled or pending bits have been changed. */
58 void gic_update(GICState
*s
)
63 int irq_level
, fiq_level
;
67 for (cpu
= 0; cpu
< NUM_CPU(s
); cpu
++) {
69 s
->current_pending
[cpu
] = 1023;
70 if (!(s
->ctlr
& (GICD_CTLR_EN_GRP0
| GICD_CTLR_EN_GRP1
))
71 || !(s
->cpu_ctlr
[cpu
] & (GICC_CTLR_EN_GRP0
| GICC_CTLR_EN_GRP1
))) {
72 qemu_irq_lower(s
->parent_irq
[cpu
]);
73 qemu_irq_lower(s
->parent_fiq
[cpu
]);
78 for (irq
= 0; irq
< s
->num_irq
; irq
++) {
79 if (GIC_TEST_ENABLED(irq
, cm
) && gic_test_pending(s
, irq
, cm
) &&
80 (irq
< GIC_INTERNAL
|| GIC_TARGET(irq
) & cm
)) {
81 if (GIC_GET_PRIORITY(irq
, cpu
) < best_prio
) {
82 best_prio
= GIC_GET_PRIORITY(irq
, cpu
);
88 irq_level
= fiq_level
= 0;
90 if (best_prio
< s
->priority_mask
[cpu
]) {
91 s
->current_pending
[cpu
] = best_irq
;
92 if (best_prio
< s
->running_priority
[cpu
]) {
93 int group
= GIC_TEST_GROUP(best_irq
, cm
);
95 if (extract32(s
->ctlr
, group
, 1) &&
96 extract32(s
->cpu_ctlr
[cpu
], group
, 1)) {
97 if (group
== 0 && s
->cpu_ctlr
[cpu
] & GICC_CTLR_FIQ_EN
) {
98 DPRINTF("Raised pending FIQ %d (cpu %d)\n",
102 DPRINTF("Raised pending IRQ %d (cpu %d)\n",
110 qemu_set_irq(s
->parent_irq
[cpu
], irq_level
);
111 qemu_set_irq(s
->parent_fiq
[cpu
], fiq_level
);
115 void gic_set_pending_private(GICState
*s
, int cpu
, int irq
)
119 if (gic_test_pending(s
, irq
, cm
)) {
123 DPRINTF("Set %d pending cpu %d\n", irq
, cpu
);
124 GIC_SET_PENDING(irq
, cm
);
128 static void gic_set_irq_11mpcore(GICState
*s
, int irq
, int level
,
132 GIC_SET_LEVEL(irq
, cm
);
133 if (GIC_TEST_EDGE_TRIGGER(irq
) || GIC_TEST_ENABLED(irq
, cm
)) {
134 DPRINTF("Set %d pending mask %x\n", irq
, target
);
135 GIC_SET_PENDING(irq
, target
);
138 GIC_CLEAR_LEVEL(irq
, cm
);
142 static void gic_set_irq_generic(GICState
*s
, int irq
, int level
,
146 GIC_SET_LEVEL(irq
, cm
);
147 DPRINTF("Set %d pending mask %x\n", irq
, target
);
148 if (GIC_TEST_EDGE_TRIGGER(irq
)) {
149 GIC_SET_PENDING(irq
, target
);
152 GIC_CLEAR_LEVEL(irq
, cm
);
156 /* Process a change in an external IRQ input. */
157 static void gic_set_irq(void *opaque
, int irq
, int level
)
159 /* Meaning of the 'irq' parameter:
160 * [0..N-1] : external interrupts
161 * [N..N+31] : PPI (internal) interrupts for CPU 0
162 * [N+32..N+63] : PPI (internal interrupts for CPU 1
165 GICState
*s
= (GICState
*)opaque
;
167 if (irq
< (s
->num_irq
- GIC_INTERNAL
)) {
168 /* The first external input line is internal interrupt 32. */
171 target
= GIC_TARGET(irq
);
174 irq
-= (s
->num_irq
- GIC_INTERNAL
);
175 cpu
= irq
/ GIC_INTERNAL
;
181 assert(irq
>= GIC_NR_SGIS
);
183 if (level
== GIC_TEST_LEVEL(irq
, cm
)) {
187 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
188 gic_set_irq_11mpcore(s
, irq
, level
, cm
, target
);
190 gic_set_irq_generic(s
, irq
, level
, cm
, target
);
196 static uint16_t gic_get_current_pending_irq(GICState
*s
, int cpu
,
199 uint16_t pending_irq
= s
->current_pending
[cpu
];
201 if (pending_irq
< GIC_MAXIRQ
&& gic_has_groups(s
)) {
202 int group
= GIC_TEST_GROUP(pending_irq
, (1 << cpu
));
203 /* On a GIC without the security extensions, reading this register
204 * behaves in the same way as a secure access to a GIC with them.
206 bool secure
= !s
->security_extn
|| attrs
.secure
;
208 if (group
== 0 && !secure
) {
209 /* Group0 interrupts hidden from Non-secure access */
212 if (group
== 1 && secure
&& !(s
->cpu_ctlr
[cpu
] & GICC_CTLR_ACK_CTL
)) {
213 /* Group1 interrupts only seen by Secure access if
222 static void gic_set_running_irq(GICState
*s
, int cpu
, int irq
)
224 s
->running_irq
[cpu
] = irq
;
226 s
->running_priority
[cpu
] = 0x100;
228 s
->running_priority
[cpu
] = GIC_GET_PRIORITY(irq
, cpu
);
233 uint32_t gic_acknowledge_irq(GICState
*s
, int cpu
, MemTxAttrs attrs
)
238 /* gic_get_current_pending_irq() will return 1022 or 1023 appropriately
239 * for the case where this GIC supports grouping and the pending interrupt
240 * is in the wrong group.
242 irq
= gic_get_current_pending_irq(s
, cpu
, attrs
);;
244 if (irq
>= GIC_MAXIRQ
) {
245 DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq
);
249 if (GIC_GET_PRIORITY(irq
, cpu
) >= s
->running_priority
[cpu
]) {
250 DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq
);
253 s
->last_active
[irq
][cpu
] = s
->running_irq
[cpu
];
255 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
256 /* Clear pending flags for both level and edge triggered interrupts.
257 * Level triggered IRQs will be reasserted once they become inactive.
259 GIC_CLEAR_PENDING(irq
, GIC_TEST_MODEL(irq
) ? ALL_CPU_MASK
: cm
);
262 if (irq
< GIC_NR_SGIS
) {
263 /* Lookup the source CPU for the SGI and clear this in the
264 * sgi_pending map. Return the src and clear the overall pending
265 * state on this CPU if the SGI is not pending from any CPUs.
267 assert(s
->sgi_pending
[irq
][cpu
] != 0);
268 src
= ctz32(s
->sgi_pending
[irq
][cpu
]);
269 s
->sgi_pending
[irq
][cpu
] &= ~(1 << src
);
270 if (s
->sgi_pending
[irq
][cpu
] == 0) {
271 GIC_CLEAR_PENDING(irq
, GIC_TEST_MODEL(irq
) ? ALL_CPU_MASK
: cm
);
273 ret
= irq
| ((src
& 0x7) << 10);
275 /* Clear pending state for both level and edge triggered
276 * interrupts. (level triggered interrupts with an active line
277 * remain pending, see gic_test_pending)
279 GIC_CLEAR_PENDING(irq
, GIC_TEST_MODEL(irq
) ? ALL_CPU_MASK
: cm
);
284 gic_set_running_irq(s
, cpu
, irq
);
285 DPRINTF("ACK %d\n", irq
);
289 void gic_set_priority(GICState
*s
, int cpu
, int irq
, uint8_t val
,
292 if (s
->security_extn
&& !attrs
.secure
) {
293 if (!GIC_TEST_GROUP(irq
, (1 << cpu
))) {
294 return; /* Ignore Non-secure access of Group0 IRQ */
296 val
= 0x80 | (val
>> 1); /* Non-secure view */
299 if (irq
< GIC_INTERNAL
) {
300 s
->priority1
[irq
][cpu
] = val
;
302 s
->priority2
[(irq
) - GIC_INTERNAL
] = val
;
306 static uint32_t gic_get_priority(GICState
*s
, int cpu
, int irq
,
309 uint32_t prio
= GIC_GET_PRIORITY(irq
, cpu
);
311 if (s
->security_extn
&& !attrs
.secure
) {
312 if (!GIC_TEST_GROUP(irq
, (1 << cpu
))) {
313 return 0; /* Non-secure access cannot read priority of Group0 IRQ */
315 prio
= (prio
<< 1) & 0xff; /* Non-secure view */
320 static void gic_set_priority_mask(GICState
*s
, int cpu
, uint8_t pmask
,
323 if (s
->security_extn
&& !attrs
.secure
) {
324 if (s
->priority_mask
[cpu
] & 0x80) {
325 /* Priority Mask in upper half */
326 pmask
= 0x80 | (pmask
>> 1);
328 /* Non-secure write ignored if priority mask is in lower half */
332 s
->priority_mask
[cpu
] = pmask
;
335 static uint32_t gic_get_priority_mask(GICState
*s
, int cpu
, MemTxAttrs attrs
)
337 uint32_t pmask
= s
->priority_mask
[cpu
];
339 if (s
->security_extn
&& !attrs
.secure
) {
341 /* Priority Mask in upper half, return Non-secure view */
342 pmask
= (pmask
<< 1) & 0xff;
344 /* Priority Mask in lower half, RAZ */
351 static uint32_t gic_get_cpu_control(GICState
*s
, int cpu
, MemTxAttrs attrs
)
353 uint32_t ret
= s
->cpu_ctlr
[cpu
];
355 if (s
->security_extn
&& !attrs
.secure
) {
356 /* Construct the NS banked view of GICC_CTLR from the correct
357 * bits of the S banked view. We don't need to move the bypass
358 * control bits because we don't implement that (IMPDEF) part
359 * of the GIC architecture.
361 ret
= (ret
& (GICC_CTLR_EN_GRP1
| GICC_CTLR_EOIMODE_NS
)) >> 1;
366 static void gic_set_cpu_control(GICState
*s
, int cpu
, uint32_t value
,
371 if (s
->security_extn
&& !attrs
.secure
) {
372 /* The NS view can only write certain bits in the register;
373 * the rest are unchanged
375 mask
= GICC_CTLR_EN_GRP1
;
376 if (s
->revision
== 2) {
377 mask
|= GICC_CTLR_EOIMODE_NS
;
379 s
->cpu_ctlr
[cpu
] &= ~mask
;
380 s
->cpu_ctlr
[cpu
] |= (value
<< 1) & mask
;
382 if (s
->revision
== 2) {
383 mask
= s
->security_extn
? GICC_CTLR_V2_S_MASK
: GICC_CTLR_V2_MASK
;
385 mask
= s
->security_extn
? GICC_CTLR_V1_S_MASK
: GICC_CTLR_V1_MASK
;
387 s
->cpu_ctlr
[cpu
] = value
& mask
;
389 DPRINTF("CPU Interface %d: Group0 Interrupts %sabled, "
390 "Group1 Interrupts %sabled\n", cpu
,
391 (s
->cpu_ctlr
[cpu
] & GICC_CTLR_EN_GRP0
) ? "En" : "Dis",
392 (s
->cpu_ctlr
[cpu
] & GICC_CTLR_EN_GRP1
) ? "En" : "Dis");
395 static uint8_t gic_get_running_priority(GICState
*s
, int cpu
, MemTxAttrs attrs
)
397 if (s
->security_extn
&& !attrs
.secure
) {
398 if (s
->running_priority
[cpu
] & 0x80) {
399 /* Running priority in upper half of range: return the Non-secure
400 * view of the priority.
402 return s
->running_priority
[cpu
] << 1;
404 /* Running priority in lower half of range: RAZ */
408 return s
->running_priority
[cpu
];
412 void gic_complete_irq(GICState
*s
, int cpu
, int irq
, MemTxAttrs attrs
)
416 DPRINTF("EOI %d\n", irq
);
417 if (irq
>= s
->num_irq
) {
418 /* This handles two cases:
419 * 1. If software writes the ID of a spurious interrupt [ie 1023]
420 * to the GICC_EOIR, the GIC ignores that write.
421 * 2. If software writes the number of a non-existent interrupt
422 * this must be a subcase of "value written does not match the last
423 * valid interrupt value read from the Interrupt Acknowledge
424 * register" and so this is UNPREDICTABLE. We choose to ignore it.
428 if (s
->running_irq
[cpu
] == 1023)
429 return; /* No active IRQ. */
431 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
432 /* Mark level triggered interrupts as pending if they are still
434 if (!GIC_TEST_EDGE_TRIGGER(irq
) && GIC_TEST_ENABLED(irq
, cm
)
435 && GIC_TEST_LEVEL(irq
, cm
) && (GIC_TARGET(irq
) & cm
) != 0) {
436 DPRINTF("Set %d pending mask %x\n", irq
, cm
);
437 GIC_SET_PENDING(irq
, cm
);
442 if (s
->security_extn
&& !attrs
.secure
&& !GIC_TEST_GROUP(irq
, cm
)) {
443 DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq
);
447 /* Secure EOI with GICC_CTLR.AckCtl == 0 when the IRQ is a Group 1
448 * interrupt is UNPREDICTABLE. We choose to handle it as if AckCtl == 1,
449 * i.e. go ahead and complete the irq anyway.
452 if (irq
!= s
->running_irq
[cpu
]) {
453 /* Complete an IRQ that is not currently running. */
454 int tmp
= s
->running_irq
[cpu
];
455 while (s
->last_active
[tmp
][cpu
] != 1023) {
456 if (s
->last_active
[tmp
][cpu
] == irq
) {
457 s
->last_active
[tmp
][cpu
] = s
->last_active
[irq
][cpu
];
460 tmp
= s
->last_active
[tmp
][cpu
];
466 /* Complete the current running IRQ. */
467 gic_set_running_irq(s
, cpu
, s
->last_active
[s
->running_irq
[cpu
]][cpu
]);
471 static uint32_t gic_dist_readb(void *opaque
, hwaddr offset
, MemTxAttrs attrs
)
473 GICState
*s
= (GICState
*)opaque
;
481 cpu
= gic_get_current_cpu(s
);
483 if (offset
< 0x100) {
484 if (offset
== 0) { /* GICD_CTLR */
485 if (s
->security_extn
&& !attrs
.secure
) {
486 /* The NS bank of this register is just an alias of the
487 * EnableGrp1 bit in the S bank version.
489 return extract32(s
->ctlr
, 1, 1);
495 /* Interrupt Controller Type Register */
496 return ((s
->num_irq
/ 32) - 1)
497 | ((NUM_CPU(s
) - 1) << 5)
498 | (s
->security_extn
<< 10);
501 if (offset
>= 0x80) {
502 /* Interrupt Group Registers: these RAZ/WI if this is an NS
503 * access to a GIC with the security extensions, or if the GIC
504 * doesn't have groups at all.
507 if (!(s
->security_extn
&& !attrs
.secure
) && gic_has_groups(s
)) {
508 /* Every byte offset holds 8 group status bits */
509 irq
= (offset
- 0x080) * 8 + GIC_BASE_IRQ
;
510 if (irq
>= s
->num_irq
) {
513 for (i
= 0; i
< 8; i
++) {
514 if (GIC_TEST_GROUP(irq
+ i
, cm
)) {
522 } else if (offset
< 0x200) {
523 /* Interrupt Set/Clear Enable. */
525 irq
= (offset
- 0x100) * 8;
527 irq
= (offset
- 0x180) * 8;
529 if (irq
>= s
->num_irq
)
532 for (i
= 0; i
< 8; i
++) {
533 if (GIC_TEST_ENABLED(irq
+ i
, cm
)) {
537 } else if (offset
< 0x300) {
538 /* Interrupt Set/Clear Pending. */
540 irq
= (offset
- 0x200) * 8;
542 irq
= (offset
- 0x280) * 8;
544 if (irq
>= s
->num_irq
)
547 mask
= (irq
< GIC_INTERNAL
) ? cm
: ALL_CPU_MASK
;
548 for (i
= 0; i
< 8; i
++) {
549 if (gic_test_pending(s
, irq
+ i
, mask
)) {
553 } else if (offset
< 0x400) {
554 /* Interrupt Active. */
555 irq
= (offset
- 0x300) * 8 + GIC_BASE_IRQ
;
556 if (irq
>= s
->num_irq
)
559 mask
= (irq
< GIC_INTERNAL
) ? cm
: ALL_CPU_MASK
;
560 for (i
= 0; i
< 8; i
++) {
561 if (GIC_TEST_ACTIVE(irq
+ i
, mask
)) {
565 } else if (offset
< 0x800) {
566 /* Interrupt Priority. */
567 irq
= (offset
- 0x400) + GIC_BASE_IRQ
;
568 if (irq
>= s
->num_irq
)
570 res
= gic_get_priority(s
, cpu
, irq
, attrs
);
571 } else if (offset
< 0xc00) {
572 /* Interrupt CPU Target. */
573 if (s
->num_cpu
== 1 && s
->revision
!= REV_11MPCORE
) {
574 /* For uniprocessor GICs these RAZ/WI */
577 irq
= (offset
- 0x800) + GIC_BASE_IRQ
;
578 if (irq
>= s
->num_irq
) {
581 if (irq
>= 29 && irq
<= 31) {
584 res
= GIC_TARGET(irq
);
587 } else if (offset
< 0xf00) {
588 /* Interrupt Configuration. */
589 irq
= (offset
- 0xc00) * 4 + GIC_BASE_IRQ
;
590 if (irq
>= s
->num_irq
)
593 for (i
= 0; i
< 4; i
++) {
594 if (GIC_TEST_MODEL(irq
+ i
))
595 res
|= (1 << (i
* 2));
596 if (GIC_TEST_EDGE_TRIGGER(irq
+ i
))
597 res
|= (2 << (i
* 2));
599 } else if (offset
< 0xf10) {
601 } else if (offset
< 0xf30) {
602 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
606 if (offset
< 0xf20) {
607 /* GICD_CPENDSGIRn */
608 irq
= (offset
- 0xf10);
610 irq
= (offset
- 0xf20);
611 /* GICD_SPENDSGIRn */
614 res
= s
->sgi_pending
[irq
][cpu
];
615 } else if (offset
< 0xfe0) {
617 } else /* offset >= 0xfe0 */ {
621 res
= gic_id
[(offset
- 0xfe0) >> 2];
626 qemu_log_mask(LOG_GUEST_ERROR
,
627 "gic_dist_readb: Bad offset %x\n", (int)offset
);
631 static MemTxResult
gic_dist_read(void *opaque
, hwaddr offset
, uint64_t *data
,
632 unsigned size
, MemTxAttrs attrs
)
636 *data
= gic_dist_readb(opaque
, offset
, attrs
);
639 *data
= gic_dist_readb(opaque
, offset
, attrs
);
640 *data
|= gic_dist_readb(opaque
, offset
+ 1, attrs
) << 8;
643 *data
= gic_dist_readb(opaque
, offset
, attrs
);
644 *data
|= gic_dist_readb(opaque
, offset
+ 1, attrs
) << 8;
645 *data
|= gic_dist_readb(opaque
, offset
+ 2, attrs
) << 16;
646 *data
|= gic_dist_readb(opaque
, offset
+ 3, attrs
) << 24;
653 static void gic_dist_writeb(void *opaque
, hwaddr offset
,
654 uint32_t value
, MemTxAttrs attrs
)
656 GICState
*s
= (GICState
*)opaque
;
661 cpu
= gic_get_current_cpu(s
);
662 if (offset
< 0x100) {
664 if (s
->security_extn
&& !attrs
.secure
) {
665 /* NS version is just an alias of the S version's bit 1 */
666 s
->ctlr
= deposit32(s
->ctlr
, 1, 1, value
);
667 } else if (gic_has_groups(s
)) {
668 s
->ctlr
= value
& (GICD_CTLR_EN_GRP0
| GICD_CTLR_EN_GRP1
);
670 s
->ctlr
= value
& GICD_CTLR_EN_GRP0
;
672 DPRINTF("Distributor: Group0 %sabled; Group 1 %sabled\n",
673 s
->ctlr
& GICD_CTLR_EN_GRP0
? "En" : "Dis",
674 s
->ctlr
& GICD_CTLR_EN_GRP1
? "En" : "Dis");
675 } else if (offset
< 4) {
677 } else if (offset
>= 0x80) {
678 /* Interrupt Group Registers: RAZ/WI for NS access to secure
679 * GIC, or for GICs without groups.
681 if (!(s
->security_extn
&& !attrs
.secure
) && gic_has_groups(s
)) {
682 /* Every byte offset holds 8 group status bits */
683 irq
= (offset
- 0x80) * 8 + GIC_BASE_IRQ
;
684 if (irq
>= s
->num_irq
) {
687 for (i
= 0; i
< 8; i
++) {
688 /* Group bits are banked for private interrupts */
689 int cm
= (irq
< GIC_INTERNAL
) ? (1 << cpu
) : ALL_CPU_MASK
;
690 if (value
& (1 << i
)) {
691 /* Group1 (Non-secure) */
692 GIC_SET_GROUP(irq
+ i
, cm
);
694 /* Group0 (Secure) */
695 GIC_CLEAR_GROUP(irq
+ i
, cm
);
702 } else if (offset
< 0x180) {
703 /* Interrupt Set Enable. */
704 irq
= (offset
- 0x100) * 8 + GIC_BASE_IRQ
;
705 if (irq
>= s
->num_irq
)
707 if (irq
< GIC_NR_SGIS
) {
711 for (i
= 0; i
< 8; i
++) {
712 if (value
& (1 << i
)) {
714 (irq
< GIC_INTERNAL
) ? (1 << cpu
) : GIC_TARGET(irq
+ i
);
715 int cm
= (irq
< GIC_INTERNAL
) ? (1 << cpu
) : ALL_CPU_MASK
;
717 if (!GIC_TEST_ENABLED(irq
+ i
, cm
)) {
718 DPRINTF("Enabled IRQ %d\n", irq
+ i
);
720 GIC_SET_ENABLED(irq
+ i
, cm
);
721 /* If a raised level triggered IRQ enabled then mark
723 if (GIC_TEST_LEVEL(irq
+ i
, mask
)
724 && !GIC_TEST_EDGE_TRIGGER(irq
+ i
)) {
725 DPRINTF("Set %d pending mask %x\n", irq
+ i
, mask
);
726 GIC_SET_PENDING(irq
+ i
, mask
);
730 } else if (offset
< 0x200) {
731 /* Interrupt Clear Enable. */
732 irq
= (offset
- 0x180) * 8 + GIC_BASE_IRQ
;
733 if (irq
>= s
->num_irq
)
735 if (irq
< GIC_NR_SGIS
) {
739 for (i
= 0; i
< 8; i
++) {
740 if (value
& (1 << i
)) {
741 int cm
= (irq
< GIC_INTERNAL
) ? (1 << cpu
) : ALL_CPU_MASK
;
743 if (GIC_TEST_ENABLED(irq
+ i
, cm
)) {
744 DPRINTF("Disabled IRQ %d\n", irq
+ i
);
746 GIC_CLEAR_ENABLED(irq
+ i
, cm
);
749 } else if (offset
< 0x280) {
750 /* Interrupt Set Pending. */
751 irq
= (offset
- 0x200) * 8 + GIC_BASE_IRQ
;
752 if (irq
>= s
->num_irq
)
754 if (irq
< GIC_NR_SGIS
) {
758 for (i
= 0; i
< 8; i
++) {
759 if (value
& (1 << i
)) {
760 GIC_SET_PENDING(irq
+ i
, GIC_TARGET(irq
+ i
));
763 } else if (offset
< 0x300) {
764 /* Interrupt Clear Pending. */
765 irq
= (offset
- 0x280) * 8 + GIC_BASE_IRQ
;
766 if (irq
>= s
->num_irq
)
768 if (irq
< GIC_NR_SGIS
) {
772 for (i
= 0; i
< 8; i
++) {
773 /* ??? This currently clears the pending bit for all CPUs, even
774 for per-CPU interrupts. It's unclear whether this is the
776 if (value
& (1 << i
)) {
777 GIC_CLEAR_PENDING(irq
+ i
, ALL_CPU_MASK
);
780 } else if (offset
< 0x400) {
781 /* Interrupt Active. */
783 } else if (offset
< 0x800) {
784 /* Interrupt Priority. */
785 irq
= (offset
- 0x400) + GIC_BASE_IRQ
;
786 if (irq
>= s
->num_irq
)
788 gic_set_priority(s
, cpu
, irq
, value
, attrs
);
789 } else if (offset
< 0xc00) {
790 /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the
791 * annoying exception of the 11MPCore's GIC.
793 if (s
->num_cpu
!= 1 || s
->revision
== REV_11MPCORE
) {
794 irq
= (offset
- 0x800) + GIC_BASE_IRQ
;
795 if (irq
>= s
->num_irq
) {
800 } else if (irq
< GIC_INTERNAL
) {
801 value
= ALL_CPU_MASK
;
803 s
->irq_target
[irq
] = value
& ALL_CPU_MASK
;
805 } else if (offset
< 0xf00) {
806 /* Interrupt Configuration. */
807 irq
= (offset
- 0xc00) * 4 + GIC_BASE_IRQ
;
808 if (irq
>= s
->num_irq
)
810 if (irq
< GIC_NR_SGIS
)
812 for (i
= 0; i
< 4; i
++) {
813 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
814 if (value
& (1 << (i
* 2))) {
815 GIC_SET_MODEL(irq
+ i
);
817 GIC_CLEAR_MODEL(irq
+ i
);
820 if (value
& (2 << (i
* 2))) {
821 GIC_SET_EDGE_TRIGGER(irq
+ i
);
823 GIC_CLEAR_EDGE_TRIGGER(irq
+ i
);
826 } else if (offset
< 0xf10) {
827 /* 0xf00 is only handled for 32-bit writes. */
829 } else if (offset
< 0xf20) {
830 /* GICD_CPENDSGIRn */
831 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
834 irq
= (offset
- 0xf10);
836 s
->sgi_pending
[irq
][cpu
] &= ~value
;
837 if (s
->sgi_pending
[irq
][cpu
] == 0) {
838 GIC_CLEAR_PENDING(irq
, 1 << cpu
);
840 } else if (offset
< 0xf30) {
841 /* GICD_SPENDSGIRn */
842 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
845 irq
= (offset
- 0xf20);
847 GIC_SET_PENDING(irq
, 1 << cpu
);
848 s
->sgi_pending
[irq
][cpu
] |= value
;
855 qemu_log_mask(LOG_GUEST_ERROR
,
856 "gic_dist_writeb: Bad offset %x\n", (int)offset
);
859 static void gic_dist_writew(void *opaque
, hwaddr offset
,
860 uint32_t value
, MemTxAttrs attrs
)
862 gic_dist_writeb(opaque
, offset
, value
& 0xff, attrs
);
863 gic_dist_writeb(opaque
, offset
+ 1, value
>> 8, attrs
);
866 static void gic_dist_writel(void *opaque
, hwaddr offset
,
867 uint32_t value
, MemTxAttrs attrs
)
869 GICState
*s
= (GICState
*)opaque
;
870 if (offset
== 0xf00) {
876 cpu
= gic_get_current_cpu(s
);
878 switch ((value
>> 24) & 3) {
880 mask
= (value
>> 16) & ALL_CPU_MASK
;
883 mask
= ALL_CPU_MASK
^ (1 << cpu
);
889 DPRINTF("Bad Soft Int target filter\n");
893 GIC_SET_PENDING(irq
, mask
);
894 target_cpu
= ctz32(mask
);
895 while (target_cpu
< GIC_NCPU
) {
896 s
->sgi_pending
[irq
][target_cpu
] |= (1 << cpu
);
897 mask
&= ~(1 << target_cpu
);
898 target_cpu
= ctz32(mask
);
903 gic_dist_writew(opaque
, offset
, value
& 0xffff, attrs
);
904 gic_dist_writew(opaque
, offset
+ 2, value
>> 16, attrs
);
907 static MemTxResult
gic_dist_write(void *opaque
, hwaddr offset
, uint64_t data
,
908 unsigned size
, MemTxAttrs attrs
)
912 gic_dist_writeb(opaque
, offset
, data
, attrs
);
915 gic_dist_writew(opaque
, offset
, data
, attrs
);
918 gic_dist_writel(opaque
, offset
, data
, attrs
);
925 static const MemoryRegionOps gic_dist_ops
= {
926 .read_with_attrs
= gic_dist_read
,
927 .write_with_attrs
= gic_dist_write
,
928 .endianness
= DEVICE_NATIVE_ENDIAN
,
931 static MemTxResult
gic_cpu_read(GICState
*s
, int cpu
, int offset
,
932 uint64_t *data
, MemTxAttrs attrs
)
935 case 0x00: /* Control */
936 *data
= gic_get_cpu_control(s
, cpu
, attrs
);
938 case 0x04: /* Priority mask */
939 *data
= gic_get_priority_mask(s
, cpu
, attrs
);
941 case 0x08: /* Binary Point */
942 if (s
->security_extn
&& !attrs
.secure
) {
943 /* BPR is banked. Non-secure copy stored in ABPR. */
944 *data
= s
->abpr
[cpu
];
949 case 0x0c: /* Acknowledge */
950 *data
= gic_acknowledge_irq(s
, cpu
, attrs
);
952 case 0x14: /* Running Priority */
953 *data
= gic_get_running_priority(s
, cpu
, attrs
);
955 case 0x18: /* Highest Pending Interrupt */
956 *data
= gic_get_current_pending_irq(s
, cpu
, attrs
);
958 case 0x1c: /* Aliased Binary Point */
959 /* GIC v2, no security: ABPR
960 * GIC v1, no security: not implemented (RAZ/WI)
961 * With security extensions, secure access: ABPR (alias of NS BPR)
962 * With security extensions, nonsecure access: RAZ/WI
964 if (!gic_has_groups(s
) || (s
->security_extn
&& !attrs
.secure
)) {
967 *data
= s
->abpr
[cpu
];
970 case 0xd0: case 0xd4: case 0xd8: case 0xdc:
971 *data
= s
->apr
[(offset
- 0xd0) / 4][cpu
];
974 qemu_log_mask(LOG_GUEST_ERROR
,
975 "gic_cpu_read: Bad offset %x\n", (int)offset
);
981 static MemTxResult
gic_cpu_write(GICState
*s
, int cpu
, int offset
,
982 uint32_t value
, MemTxAttrs attrs
)
985 case 0x00: /* Control */
986 gic_set_cpu_control(s
, cpu
, value
, attrs
);
988 case 0x04: /* Priority mask */
989 gic_set_priority_mask(s
, cpu
, value
, attrs
);
991 case 0x08: /* Binary Point */
992 if (s
->security_extn
&& !attrs
.secure
) {
993 s
->abpr
[cpu
] = MAX(value
& 0x7, GIC_MIN_ABPR
);
995 s
->bpr
[cpu
] = MAX(value
& 0x7, GIC_MIN_BPR
);
998 case 0x10: /* End Of Interrupt */
999 gic_complete_irq(s
, cpu
, value
& 0x3ff, attrs
);
1001 case 0x1c: /* Aliased Binary Point */
1002 if (!gic_has_groups(s
) || (s
->security_extn
&& !attrs
.secure
)) {
1003 /* unimplemented, or NS access: RAZ/WI */
1006 s
->abpr
[cpu
] = MAX(value
& 0x7, GIC_MIN_ABPR
);
1009 case 0xd0: case 0xd4: case 0xd8: case 0xdc:
1010 qemu_log_mask(LOG_UNIMP
, "Writing APR not implemented\n");
1013 qemu_log_mask(LOG_GUEST_ERROR
,
1014 "gic_cpu_write: Bad offset %x\n", (int)offset
);
1021 /* Wrappers to read/write the GIC CPU interface for the current CPU */
1022 static MemTxResult
gic_thiscpu_read(void *opaque
, hwaddr addr
, uint64_t *data
,
1023 unsigned size
, MemTxAttrs attrs
)
1025 GICState
*s
= (GICState
*)opaque
;
1026 return gic_cpu_read(s
, gic_get_current_cpu(s
), addr
, data
, attrs
);
1029 static MemTxResult
gic_thiscpu_write(void *opaque
, hwaddr addr
,
1030 uint64_t value
, unsigned size
,
1033 GICState
*s
= (GICState
*)opaque
;
1034 return gic_cpu_write(s
, gic_get_current_cpu(s
), addr
, value
, attrs
);
1037 /* Wrappers to read/write the GIC CPU interface for a specific CPU.
1038 * These just decode the opaque pointer into GICState* + cpu id.
1040 static MemTxResult
gic_do_cpu_read(void *opaque
, hwaddr addr
, uint64_t *data
,
1041 unsigned size
, MemTxAttrs attrs
)
1043 GICState
**backref
= (GICState
**)opaque
;
1044 GICState
*s
= *backref
;
1045 int id
= (backref
- s
->backref
);
1046 return gic_cpu_read(s
, id
, addr
, data
, attrs
);
1049 static MemTxResult
gic_do_cpu_write(void *opaque
, hwaddr addr
,
1050 uint64_t value
, unsigned size
,
1053 GICState
**backref
= (GICState
**)opaque
;
1054 GICState
*s
= *backref
;
1055 int id
= (backref
- s
->backref
);
1056 return gic_cpu_write(s
, id
, addr
, value
, attrs
);
1059 static const MemoryRegionOps gic_thiscpu_ops
= {
1060 .read_with_attrs
= gic_thiscpu_read
,
1061 .write_with_attrs
= gic_thiscpu_write
,
1062 .endianness
= DEVICE_NATIVE_ENDIAN
,
1065 static const MemoryRegionOps gic_cpu_ops
= {
1066 .read_with_attrs
= gic_do_cpu_read
,
1067 .write_with_attrs
= gic_do_cpu_write
,
1068 .endianness
= DEVICE_NATIVE_ENDIAN
,
1071 void gic_init_irqs_and_distributor(GICState
*s
)
1073 SysBusDevice
*sbd
= SYS_BUS_DEVICE(s
);
1076 i
= s
->num_irq
- GIC_INTERNAL
;
1077 /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
1078 * GPIO array layout is thus:
1080 * [N..N+31] PPIs for CPU 0
1081 * [N+32..N+63] PPIs for CPU 1
1084 if (s
->revision
!= REV_NVIC
) {
1085 i
+= (GIC_INTERNAL
* s
->num_cpu
);
1087 qdev_init_gpio_in(DEVICE(s
), gic_set_irq
, i
);
1088 for (i
= 0; i
< NUM_CPU(s
); i
++) {
1089 sysbus_init_irq(sbd
, &s
->parent_irq
[i
]);
1091 for (i
= 0; i
< NUM_CPU(s
); i
++) {
1092 sysbus_init_irq(sbd
, &s
->parent_fiq
[i
]);
1094 memory_region_init_io(&s
->iomem
, OBJECT(s
), &gic_dist_ops
, s
,
1095 "gic_dist", 0x1000);
1098 static void arm_gic_realize(DeviceState
*dev
, Error
**errp
)
1100 /* Device instance realize function for the GIC sysbus device */
1102 GICState
*s
= ARM_GIC(dev
);
1103 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1104 ARMGICClass
*agc
= ARM_GIC_GET_CLASS(s
);
1105 Error
*local_err
= NULL
;
1107 agc
->parent_realize(dev
, &local_err
);
1109 error_propagate(errp
, local_err
);
1113 gic_init_irqs_and_distributor(s
);
1115 /* Memory regions for the CPU interfaces (NVIC doesn't have these):
1116 * a region for "CPU interface for this core", then a region for
1117 * "CPU interface for core 0", "for core 1", ...
1118 * NB that the memory region size of 0x100 applies for the 11MPCore
1119 * and also cores following the GIC v1 spec (ie A9).
1120 * GIC v2 defines a larger memory region (0x1000) so this will need
1121 * to be extended when we implement A15.
1123 memory_region_init_io(&s
->cpuiomem
[0], OBJECT(s
), &gic_thiscpu_ops
, s
,
1125 for (i
= 0; i
< NUM_CPU(s
); i
++) {
1127 memory_region_init_io(&s
->cpuiomem
[i
+1], OBJECT(s
), &gic_cpu_ops
,
1128 &s
->backref
[i
], "gic_cpu", 0x100);
1131 sysbus_init_mmio(sbd
, &s
->iomem
);
1132 /* cpu interfaces (one for "current cpu" plus one per cpu) */
1133 for (i
= 0; i
<= NUM_CPU(s
); i
++) {
1134 sysbus_init_mmio(sbd
, &s
->cpuiomem
[i
]);
1138 static void arm_gic_class_init(ObjectClass
*klass
, void *data
)
1140 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1141 ARMGICClass
*agc
= ARM_GIC_CLASS(klass
);
1143 agc
->parent_realize
= dc
->realize
;
1144 dc
->realize
= arm_gic_realize
;
1147 static const TypeInfo arm_gic_info
= {
1148 .name
= TYPE_ARM_GIC
,
1149 .parent
= TYPE_ARM_GIC_COMMON
,
1150 .instance_size
= sizeof(GICState
),
1151 .class_init
= arm_gic_class_init
,
1152 .class_size
= sizeof(ARMGICClass
),
1155 static void arm_gic_register_types(void)
1157 type_register_static(&arm_gic_info
);
1160 type_init(arm_gic_register_types
)