qapi/error: Check format string argument in error_*prepend()
[qemu/armbru.git] / hw / intc / arm_gicv3.c
blob66eaa9719828ca2c257cdee9e7e8b0f615e61e78
1 /*
2 * ARM Generic Interrupt Controller v3
4 * Copyright (c) 2015 Huawei.
5 * Copyright (c) 2016 Linaro Limited
6 * Written by Shlomo Pongratz, Peter Maydell
8 * This code is licensed under the GPL, version 2 or (at your option)
9 * any later version.
12 /* This file contains implementation code for an interrupt controller
13 * which implements the GICv3 architecture. Specifically this is where
14 * the device class itself and the functions for handling interrupts
15 * coming in and going out live.
18 #include "qemu/osdep.h"
19 #include "qapi/error.h"
20 #include "qemu/module.h"
21 #include "hw/sysbus.h"
22 #include "hw/intc/arm_gicv3.h"
23 #include "gicv3_internal.h"
25 static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio)
27 /* Return true if this IRQ at this priority should take
28 * precedence over the current recorded highest priority
29 * pending interrupt for this CPU. We also return true if
30 * the current recorded highest priority pending interrupt
31 * is the same as this one (a property which the calling code
32 * relies on).
34 if (prio < cs->hppi.prio) {
35 return true;
37 /* If multiple pending interrupts have the same priority then it is an
38 * IMPDEF choice which of them to signal to the CPU. We choose to
39 * signal the one with the lowest interrupt number.
41 if (prio == cs->hppi.prio && irq <= cs->hppi.irq) {
42 return true;
44 return false;
47 static uint32_t gicd_int_pending(GICv3State *s, int irq)
49 /* Recalculate which distributor interrupts are actually pending
50 * in the group of 32 interrupts starting at irq (which should be a multiple
51 * of 32), and return a 32-bit integer which has a bit set for each
52 * interrupt that is eligible to be signaled to the CPU interface.
54 * An interrupt is pending if:
55 * + the PENDING latch is set OR it is level triggered and the input is 1
56 * + its ENABLE bit is set
57 * + the GICD enable bit for its group is set
58 * + its ACTIVE bit is not set (otherwise it would be Active+Pending)
59 * Conveniently we can bulk-calculate this with bitwise operations.
61 uint32_t pend, grpmask;
62 uint32_t pending = *gic_bmp_ptr32(s->pending, irq);
63 uint32_t edge_trigger = *gic_bmp_ptr32(s->edge_trigger, irq);
64 uint32_t level = *gic_bmp_ptr32(s->level, irq);
65 uint32_t group = *gic_bmp_ptr32(s->group, irq);
66 uint32_t grpmod = *gic_bmp_ptr32(s->grpmod, irq);
67 uint32_t enable = *gic_bmp_ptr32(s->enabled, irq);
68 uint32_t active = *gic_bmp_ptr32(s->active, irq);
70 pend = pending | (~edge_trigger & level);
71 pend &= enable;
72 pend &= ~active;
74 if (s->gicd_ctlr & GICD_CTLR_DS) {
75 grpmod = 0;
78 grpmask = 0;
79 if (s->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
80 grpmask |= group;
82 if (s->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
83 grpmask |= (~group & grpmod);
85 if (s->gicd_ctlr & GICD_CTLR_EN_GRP0) {
86 grpmask |= (~group & ~grpmod);
88 pend &= grpmask;
90 return pend;
93 static uint32_t gicr_int_pending(GICv3CPUState *cs)
95 /* Recalculate which redistributor interrupts are actually pending,
96 * and return a 32-bit integer which has a bit set for each interrupt
97 * that is eligible to be signaled to the CPU interface.
99 * An interrupt is pending if:
100 * + the PENDING latch is set OR it is level triggered and the input is 1
101 * + its ENABLE bit is set
102 * + the GICD enable bit for its group is set
103 * + its ACTIVE bit is not set (otherwise it would be Active+Pending)
104 * Conveniently we can bulk-calculate this with bitwise operations.
106 uint32_t pend, grpmask, grpmod;
108 pend = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
109 pend &= cs->gicr_ienabler0;
110 pend &= ~cs->gicr_iactiver0;
112 if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
113 grpmod = 0;
114 } else {
115 grpmod = cs->gicr_igrpmodr0;
118 grpmask = 0;
119 if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
120 grpmask |= cs->gicr_igroupr0;
122 if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
123 grpmask |= (~cs->gicr_igroupr0 & grpmod);
125 if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP0) {
126 grpmask |= (~cs->gicr_igroupr0 & ~grpmod);
128 pend &= grpmask;
130 return pend;
133 /* Update the interrupt status after state in a redistributor
134 * or CPU interface has changed, but don't tell the CPU i/f.
136 static void gicv3_redist_update_noirqset(GICv3CPUState *cs)
138 /* Find the highest priority pending interrupt among the
139 * redistributor interrupts (SGIs and PPIs).
141 bool seenbetter = false;
142 uint8_t prio;
143 int i;
144 uint32_t pend;
146 /* Find out which redistributor interrupts are eligible to be
147 * signaled to the CPU interface.
149 pend = gicr_int_pending(cs);
151 if (pend) {
152 for (i = 0; i < GIC_INTERNAL; i++) {
153 if (!(pend & (1 << i))) {
154 continue;
156 prio = cs->gicr_ipriorityr[i];
157 if (irqbetter(cs, i, prio)) {
158 cs->hppi.irq = i;
159 cs->hppi.prio = prio;
160 seenbetter = true;
165 if (seenbetter) {
166 cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
169 /* If the best interrupt we just found would preempt whatever
170 * was the previous best interrupt before this update, then
171 * we know it's definitely the best one now.
172 * If we didn't find an interrupt that would preempt the previous
173 * best, and the previous best is outside our range (or there was no
174 * previous pending interrupt at all), then that is still valid, and
175 * we leave it as the best.
176 * Otherwise, we need to do a full update (because the previous best
177 * interrupt has reduced in priority and any other interrupt could
178 * now be the new best one).
180 if (!seenbetter && cs->hppi.prio != 0xff && cs->hppi.irq < GIC_INTERNAL) {
181 gicv3_full_update_noirqset(cs->gic);
185 /* Update the GIC status after state in a redistributor or
186 * CPU interface has changed, and inform the CPU i/f of
187 * its new highest priority pending interrupt.
189 void gicv3_redist_update(GICv3CPUState *cs)
191 gicv3_redist_update_noirqset(cs);
192 gicv3_cpuif_update(cs);
195 /* Update the GIC status after state in the distributor has
196 * changed affecting @len interrupts starting at @start,
197 * but don't tell the CPU i/f.
199 static void gicv3_update_noirqset(GICv3State *s, int start, int len)
201 int i;
202 uint8_t prio;
203 uint32_t pend = 0;
205 assert(start >= GIC_INTERNAL);
206 assert(len > 0);
208 for (i = 0; i < s->num_cpu; i++) {
209 s->cpu[i].seenbetter = false;
212 /* Find the highest priority pending interrupt in this range. */
213 for (i = start; i < start + len; i++) {
214 GICv3CPUState *cs;
216 if (i == start || (i & 0x1f) == 0) {
217 /* Calculate the next 32 bits worth of pending status */
218 pend = gicd_int_pending(s, i & ~0x1f);
221 if (!(pend & (1 << (i & 0x1f)))) {
222 continue;
224 cs = s->gicd_irouter_target[i];
225 if (!cs) {
226 /* Interrupts targeting no implemented CPU should remain pending
227 * and not be forwarded to any CPU.
229 continue;
231 prio = s->gicd_ipriority[i];
232 if (irqbetter(cs, i, prio)) {
233 cs->hppi.irq = i;
234 cs->hppi.prio = prio;
235 cs->seenbetter = true;
239 /* If the best interrupt we just found would preempt whatever
240 * was the previous best interrupt before this update, then
241 * we know it's definitely the best one now.
242 * If we didn't find an interrupt that would preempt the previous
243 * best, and the previous best is outside our range (or there was
244 * no previous pending interrupt at all), then that
245 * is still valid, and we leave it as the best.
246 * Otherwise, we need to do a full update (because the previous best
247 * interrupt has reduced in priority and any other interrupt could
248 * now be the new best one).
250 for (i = 0; i < s->num_cpu; i++) {
251 GICv3CPUState *cs = &s->cpu[i];
253 if (cs->seenbetter) {
254 cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
257 if (!cs->seenbetter && cs->hppi.prio != 0xff &&
258 cs->hppi.irq >= start && cs->hppi.irq < start + len) {
259 gicv3_full_update_noirqset(s);
260 break;
265 void gicv3_update(GICv3State *s, int start, int len)
267 int i;
269 gicv3_update_noirqset(s, start, len);
270 for (i = 0; i < s->num_cpu; i++) {
271 gicv3_cpuif_update(&s->cpu[i]);
275 void gicv3_full_update_noirqset(GICv3State *s)
277 /* Completely recalculate the GIC status from scratch, but
278 * don't update any outbound IRQ lines.
280 int i;
282 for (i = 0; i < s->num_cpu; i++) {
283 s->cpu[i].hppi.prio = 0xff;
286 /* Note that we can guarantee that these functions will not
287 * recursively call back into gicv3_full_update(), because
288 * at each point the "previous best" is always outside the
289 * range we ask them to update.
291 gicv3_update_noirqset(s, GIC_INTERNAL, s->num_irq - GIC_INTERNAL);
293 for (i = 0; i < s->num_cpu; i++) {
294 gicv3_redist_update_noirqset(&s->cpu[i]);
298 void gicv3_full_update(GICv3State *s)
300 /* Completely recalculate the GIC status from scratch, including
301 * updating outbound IRQ lines.
303 int i;
305 gicv3_full_update_noirqset(s);
306 for (i = 0; i < s->num_cpu; i++) {
307 gicv3_cpuif_update(&s->cpu[i]);
311 /* Process a change in an external IRQ input. */
312 static void gicv3_set_irq(void *opaque, int irq, int level)
314 /* Meaning of the 'irq' parameter:
315 * [0..N-1] : external interrupts
316 * [N..N+31] : PPI (internal) interrupts for CPU 0
317 * [N+32..N+63] : PPI (internal interrupts for CPU 1
318 * ...
320 GICv3State *s = opaque;
322 if (irq < (s->num_irq - GIC_INTERNAL)) {
323 /* external interrupt (SPI) */
324 gicv3_dist_set_irq(s, irq + GIC_INTERNAL, level);
325 } else {
326 /* per-cpu interrupt (PPI) */
327 int cpu;
329 irq -= (s->num_irq - GIC_INTERNAL);
330 cpu = irq / GIC_INTERNAL;
331 irq %= GIC_INTERNAL;
332 assert(cpu < s->num_cpu);
333 /* Raising SGIs via this function would be a bug in how the board
334 * model wires up interrupts.
336 assert(irq >= GIC_NR_SGIS);
337 gicv3_redist_set_irq(&s->cpu[cpu], irq, level);
341 static void arm_gicv3_post_load(GICv3State *s)
343 /* Recalculate our cached idea of the current highest priority
344 * pending interrupt, but don't set IRQ or FIQ lines.
346 gicv3_full_update_noirqset(s);
347 /* Repopulate the cache of GICv3CPUState pointers for target CPUs */
348 gicv3_cache_all_target_cpustates(s);
351 static const MemoryRegionOps gic_ops[] = {
353 .read_with_attrs = gicv3_dist_read,
354 .write_with_attrs = gicv3_dist_write,
355 .endianness = DEVICE_NATIVE_ENDIAN,
358 .read_with_attrs = gicv3_redist_read,
359 .write_with_attrs = gicv3_redist_write,
360 .endianness = DEVICE_NATIVE_ENDIAN,
364 static void arm_gic_realize(DeviceState *dev, Error **errp)
366 /* Device instance realize function for the GIC sysbus device */
367 GICv3State *s = ARM_GICV3(dev);
368 ARMGICv3Class *agc = ARM_GICV3_GET_CLASS(s);
369 Error *local_err = NULL;
371 agc->parent_realize(dev, &local_err);
372 if (local_err) {
373 error_propagate(errp, local_err);
374 return;
377 if (s->nb_redist_regions != 1) {
378 error_setg(errp, "VGICv3 redist region number(%d) not equal to 1",
379 s->nb_redist_regions);
380 return;
383 gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops, &local_err);
384 if (local_err) {
385 error_propagate(errp, local_err);
386 return;
389 gicv3_init_cpuif(s);
392 static void arm_gicv3_class_init(ObjectClass *klass, void *data)
394 DeviceClass *dc = DEVICE_CLASS(klass);
395 ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass);
396 ARMGICv3Class *agc = ARM_GICV3_CLASS(klass);
398 agcc->post_load = arm_gicv3_post_load;
399 device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize);
402 static const TypeInfo arm_gicv3_info = {
403 .name = TYPE_ARM_GICV3,
404 .parent = TYPE_ARM_GICV3_COMMON,
405 .instance_size = sizeof(GICv3State),
406 .class_init = arm_gicv3_class_init,
407 .class_size = sizeof(ARMGICv3Class),
410 static void arm_gicv3_register_types(void)
412 type_register_static(&arm_gicv3_info);
415 type_init(arm_gicv3_register_types)