2 * ARM Generic Interrupt Controller v3 (emulation)
4 * Copyright (c) 2015 Huawei.
5 * Copyright (c) 2016 Linaro Limited
6 * Written by Shlomo Pongratz, Peter Maydell
8 * This code is licensed under the GPL, version 2 or (at your option)
12 /* This file contains implementation code for an interrupt controller
13 * which implements the GICv3 architecture. Specifically this is where
14 * the device class itself and the functions for handling interrupts
15 * coming in and going out live.
18 #include "qemu/osdep.h"
19 #include "qapi/error.h"
20 #include "qemu/module.h"
21 #include "hw/intc/arm_gicv3.h"
22 #include "gicv3_internal.h"
24 static bool irqbetter(GICv3CPUState
*cs
, int irq
, uint8_t prio
)
26 /* Return true if this IRQ at this priority should take
27 * precedence over the current recorded highest priority
28 * pending interrupt for this CPU. We also return true if
29 * the current recorded highest priority pending interrupt
30 * is the same as this one (a property which the calling code
33 if (prio
< cs
->hppi
.prio
) {
36 /* If multiple pending interrupts have the same priority then it is an
37 * IMPDEF choice which of them to signal to the CPU. We choose to
38 * signal the one with the lowest interrupt number.
40 if (prio
== cs
->hppi
.prio
&& irq
<= cs
->hppi
.irq
) {
46 static uint32_t gicd_int_pending(GICv3State
*s
, int irq
)
48 /* Recalculate which distributor interrupts are actually pending
49 * in the group of 32 interrupts starting at irq (which should be a multiple
50 * of 32), and return a 32-bit integer which has a bit set for each
51 * interrupt that is eligible to be signaled to the CPU interface.
53 * An interrupt is pending if:
54 * + the PENDING latch is set OR it is level triggered and the input is 1
55 * + its ENABLE bit is set
56 * + the GICD enable bit for its group is set
57 * + its ACTIVE bit is not set (otherwise it would be Active+Pending)
58 * Conveniently we can bulk-calculate this with bitwise operations.
60 uint32_t pend
, grpmask
;
61 uint32_t pending
= *gic_bmp_ptr32(s
->pending
, irq
);
62 uint32_t edge_trigger
= *gic_bmp_ptr32(s
->edge_trigger
, irq
);
63 uint32_t level
= *gic_bmp_ptr32(s
->level
, irq
);
64 uint32_t group
= *gic_bmp_ptr32(s
->group
, irq
);
65 uint32_t grpmod
= *gic_bmp_ptr32(s
->grpmod
, irq
);
66 uint32_t enable
= *gic_bmp_ptr32(s
->enabled
, irq
);
67 uint32_t active
= *gic_bmp_ptr32(s
->active
, irq
);
69 pend
= pending
| (~edge_trigger
& level
);
73 if (s
->gicd_ctlr
& GICD_CTLR_DS
) {
78 if (s
->gicd_ctlr
& GICD_CTLR_EN_GRP1NS
) {
81 if (s
->gicd_ctlr
& GICD_CTLR_EN_GRP1S
) {
82 grpmask
|= (~group
& grpmod
);
84 if (s
->gicd_ctlr
& GICD_CTLR_EN_GRP0
) {
85 grpmask
|= (~group
& ~grpmod
);
92 static uint32_t gicr_int_pending(GICv3CPUState
*cs
)
94 /* Recalculate which redistributor interrupts are actually pending,
95 * and return a 32-bit integer which has a bit set for each interrupt
96 * that is eligible to be signaled to the CPU interface.
98 * An interrupt is pending if:
99 * + the PENDING latch is set OR it is level triggered and the input is 1
100 * + its ENABLE bit is set
101 * + the GICD enable bit for its group is set
102 * + its ACTIVE bit is not set (otherwise it would be Active+Pending)
103 * Conveniently we can bulk-calculate this with bitwise operations.
105 uint32_t pend
, grpmask
, grpmod
;
107 pend
= cs
->gicr_ipendr0
| (~cs
->edge_trigger
& cs
->level
);
108 pend
&= cs
->gicr_ienabler0
;
109 pend
&= ~cs
->gicr_iactiver0
;
111 if (cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) {
114 grpmod
= cs
->gicr_igrpmodr0
;
118 if (cs
->gic
->gicd_ctlr
& GICD_CTLR_EN_GRP1NS
) {
119 grpmask
|= cs
->gicr_igroupr0
;
121 if (cs
->gic
->gicd_ctlr
& GICD_CTLR_EN_GRP1S
) {
122 grpmask
|= (~cs
->gicr_igroupr0
& grpmod
);
124 if (cs
->gic
->gicd_ctlr
& GICD_CTLR_EN_GRP0
) {
125 grpmask
|= (~cs
->gicr_igroupr0
& ~grpmod
);
132 /* Update the interrupt status after state in a redistributor
133 * or CPU interface has changed, but don't tell the CPU i/f.
135 static void gicv3_redist_update_noirqset(GICv3CPUState
*cs
)
137 /* Find the highest priority pending interrupt among the
138 * redistributor interrupts (SGIs and PPIs).
140 bool seenbetter
= false;
145 /* Find out which redistributor interrupts are eligible to be
146 * signaled to the CPU interface.
148 pend
= gicr_int_pending(cs
);
151 for (i
= 0; i
< GIC_INTERNAL
; i
++) {
152 if (!(pend
& (1 << i
))) {
155 prio
= cs
->gicr_ipriorityr
[i
];
156 if (irqbetter(cs
, i
, prio
)) {
158 cs
->hppi
.prio
= prio
;
165 cs
->hppi
.grp
= gicv3_irq_group(cs
->gic
, cs
, cs
->hppi
.irq
);
168 if ((cs
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
) && cs
->gic
->lpi_enable
&&
169 (cs
->gic
->gicd_ctlr
& GICD_CTLR_EN_GRP1NS
) &&
170 (cs
->hpplpi
.prio
!= 0xff)) {
171 if (irqbetter(cs
, cs
->hpplpi
.irq
, cs
->hpplpi
.prio
)) {
172 cs
->hppi
.irq
= cs
->hpplpi
.irq
;
173 cs
->hppi
.prio
= cs
->hpplpi
.prio
;
174 cs
->hppi
.grp
= cs
->hpplpi
.grp
;
179 /* If the best interrupt we just found would preempt whatever
180 * was the previous best interrupt before this update, then
181 * we know it's definitely the best one now.
182 * If we didn't find an interrupt that would preempt the previous
183 * best, and the previous best is outside our range (or there was no
184 * previous pending interrupt at all), then that is still valid, and
185 * we leave it as the best.
186 * Otherwise, we need to do a full update (because the previous best
187 * interrupt has reduced in priority and any other interrupt could
188 * now be the new best one).
190 if (!seenbetter
&& cs
->hppi
.prio
!= 0xff &&
191 (cs
->hppi
.irq
< GIC_INTERNAL
||
192 cs
->hppi
.irq
>= GICV3_LPI_INTID_START
)) {
193 gicv3_full_update_noirqset(cs
->gic
);
197 /* Update the GIC status after state in a redistributor or
198 * CPU interface has changed, and inform the CPU i/f of
199 * its new highest priority pending interrupt.
201 void gicv3_redist_update(GICv3CPUState
*cs
)
203 gicv3_redist_update_noirqset(cs
);
204 gicv3_cpuif_update(cs
);
207 /* Update the GIC status after state in the distributor has
208 * changed affecting @len interrupts starting at @start,
209 * but don't tell the CPU i/f.
211 static void gicv3_update_noirqset(GICv3State
*s
, int start
, int len
)
217 assert(start
>= GIC_INTERNAL
);
220 for (i
= 0; i
< s
->num_cpu
; i
++) {
221 s
->cpu
[i
].seenbetter
= false;
224 /* Find the highest priority pending interrupt in this range. */
225 for (i
= start
; i
< start
+ len
; i
++) {
228 if (i
== start
|| (i
& 0x1f) == 0) {
229 /* Calculate the next 32 bits worth of pending status */
230 pend
= gicd_int_pending(s
, i
& ~0x1f);
233 if (!(pend
& (1 << (i
& 0x1f)))) {
236 cs
= s
->gicd_irouter_target
[i
];
238 /* Interrupts targeting no implemented CPU should remain pending
239 * and not be forwarded to any CPU.
243 prio
= s
->gicd_ipriority
[i
];
244 if (irqbetter(cs
, i
, prio
)) {
246 cs
->hppi
.prio
= prio
;
247 cs
->seenbetter
= true;
251 /* If the best interrupt we just found would preempt whatever
252 * was the previous best interrupt before this update, then
253 * we know it's definitely the best one now.
254 * If we didn't find an interrupt that would preempt the previous
255 * best, and the previous best is outside our range (or there was
256 * no previous pending interrupt at all), then that
257 * is still valid, and we leave it as the best.
258 * Otherwise, we need to do a full update (because the previous best
259 * interrupt has reduced in priority and any other interrupt could
260 * now be the new best one).
262 for (i
= 0; i
< s
->num_cpu
; i
++) {
263 GICv3CPUState
*cs
= &s
->cpu
[i
];
265 if (cs
->seenbetter
) {
266 cs
->hppi
.grp
= gicv3_irq_group(cs
->gic
, cs
, cs
->hppi
.irq
);
269 if (!cs
->seenbetter
&& cs
->hppi
.prio
!= 0xff &&
270 cs
->hppi
.irq
>= start
&& cs
->hppi
.irq
< start
+ len
) {
271 gicv3_full_update_noirqset(s
);
277 void gicv3_update(GICv3State
*s
, int start
, int len
)
281 gicv3_update_noirqset(s
, start
, len
);
282 for (i
= 0; i
< s
->num_cpu
; i
++) {
283 gicv3_cpuif_update(&s
->cpu
[i
]);
287 void gicv3_full_update_noirqset(GICv3State
*s
)
289 /* Completely recalculate the GIC status from scratch, but
290 * don't update any outbound IRQ lines.
294 for (i
= 0; i
< s
->num_cpu
; i
++) {
295 s
->cpu
[i
].hppi
.prio
= 0xff;
298 /* Note that we can guarantee that these functions will not
299 * recursively call back into gicv3_full_update(), because
300 * at each point the "previous best" is always outside the
301 * range we ask them to update.
303 gicv3_update_noirqset(s
, GIC_INTERNAL
, s
->num_irq
- GIC_INTERNAL
);
305 for (i
= 0; i
< s
->num_cpu
; i
++) {
306 gicv3_redist_update_noirqset(&s
->cpu
[i
]);
310 void gicv3_full_update(GICv3State
*s
)
312 /* Completely recalculate the GIC status from scratch, including
313 * updating outbound IRQ lines.
317 gicv3_full_update_noirqset(s
);
318 for (i
= 0; i
< s
->num_cpu
; i
++) {
319 gicv3_cpuif_update(&s
->cpu
[i
]);
323 /* Process a change in an external IRQ input. */
324 static void gicv3_set_irq(void *opaque
, int irq
, int level
)
326 /* Meaning of the 'irq' parameter:
327 * [0..N-1] : external interrupts
328 * [N..N+31] : PPI (internal) interrupts for CPU 0
329 * [N+32..N+63] : PPI (internal interrupts for CPU 1
332 GICv3State
*s
= opaque
;
334 if (irq
< (s
->num_irq
- GIC_INTERNAL
)) {
335 /* external interrupt (SPI) */
336 gicv3_dist_set_irq(s
, irq
+ GIC_INTERNAL
, level
);
338 /* per-cpu interrupt (PPI) */
341 irq
-= (s
->num_irq
- GIC_INTERNAL
);
342 cpu
= irq
/ GIC_INTERNAL
;
344 assert(cpu
< s
->num_cpu
);
345 /* Raising SGIs via this function would be a bug in how the board
346 * model wires up interrupts.
348 assert(irq
>= GIC_NR_SGIS
);
349 gicv3_redist_set_irq(&s
->cpu
[cpu
], irq
, level
);
353 static void arm_gicv3_post_load(GICv3State
*s
)
356 /* Recalculate our cached idea of the current highest priority
357 * pending interrupt, but don't set IRQ or FIQ lines.
359 for (i
= 0; i
< s
->num_cpu
; i
++) {
360 gicv3_redist_update_lpi_only(&s
->cpu
[i
]);
362 gicv3_full_update_noirqset(s
);
363 /* Repopulate the cache of GICv3CPUState pointers for target CPUs */
364 gicv3_cache_all_target_cpustates(s
);
367 static const MemoryRegionOps gic_ops
[] = {
369 .read_with_attrs
= gicv3_dist_read
,
370 .write_with_attrs
= gicv3_dist_write
,
371 .endianness
= DEVICE_NATIVE_ENDIAN
,
372 .valid
.min_access_size
= 1,
373 .valid
.max_access_size
= 8,
374 .impl
.min_access_size
= 1,
375 .impl
.max_access_size
= 8,
378 .read_with_attrs
= gicv3_redist_read
,
379 .write_with_attrs
= gicv3_redist_write
,
380 .endianness
= DEVICE_NATIVE_ENDIAN
,
381 .valid
.min_access_size
= 1,
382 .valid
.max_access_size
= 8,
383 .impl
.min_access_size
= 1,
384 .impl
.max_access_size
= 8,
388 static void arm_gic_realize(DeviceState
*dev
, Error
**errp
)
390 /* Device instance realize function for the GIC sysbus device */
391 GICv3State
*s
= ARM_GICV3(dev
);
392 ARMGICv3Class
*agc
= ARM_GICV3_GET_CLASS(s
);
393 Error
*local_err
= NULL
;
395 agc
->parent_realize(dev
, &local_err
);
397 error_propagate(errp
, local_err
);
401 gicv3_init_irqs_and_mmio(s
, gicv3_set_irq
, gic_ops
);
406 static void arm_gicv3_class_init(ObjectClass
*klass
, void *data
)
408 DeviceClass
*dc
= DEVICE_CLASS(klass
);
409 ARMGICv3CommonClass
*agcc
= ARM_GICV3_COMMON_CLASS(klass
);
410 ARMGICv3Class
*agc
= ARM_GICV3_CLASS(klass
);
412 agcc
->post_load
= arm_gicv3_post_load
;
413 device_class_set_parent_realize(dc
, arm_gic_realize
, &agc
->parent_realize
);
416 static const TypeInfo arm_gicv3_info
= {
417 .name
= TYPE_ARM_GICV3
,
418 .parent
= TYPE_ARM_GICV3_COMMON
,
419 .instance_size
= sizeof(GICv3State
),
420 .class_init
= arm_gicv3_class_init
,
421 .class_size
= sizeof(ARMGICv3Class
),
424 static void arm_gicv3_register_types(void)
426 type_register_static(&arm_gicv3_info
);
429 type_init(arm_gicv3_register_types
)