2 * ARM GIC support - internal interfaces
4 * Copyright (c) 2012 Linaro Limited
5 * Written by Peter Maydell
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, see <http://www.gnu.org/licenses/>.
21 #ifndef QEMU_ARM_GIC_INTERNAL_H
22 #define QEMU_ARM_GIC_INTERNAL_H
24 #include "hw/registerfields.h"
25 #include "hw/intc/arm_gic.h"
27 #define ALL_CPU_MASK ((unsigned)(((1 << GIC_NCPU) - 1)))
29 #define GIC_DIST_SET_ENABLED(irq, cm) (s->irq_state[irq].enabled |= (cm))
30 #define GIC_DIST_CLEAR_ENABLED(irq, cm) (s->irq_state[irq].enabled &= ~(cm))
31 #define GIC_DIST_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0)
32 #define GIC_DIST_SET_PENDING(irq, cm) (s->irq_state[irq].pending |= (cm))
33 #define GIC_DIST_CLEAR_PENDING(irq, cm) (s->irq_state[irq].pending &= ~(cm))
34 #define GIC_DIST_SET_ACTIVE(irq, cm) (s->irq_state[irq].active |= (cm))
35 #define GIC_DIST_CLEAR_ACTIVE(irq, cm) (s->irq_state[irq].active &= ~(cm))
36 #define GIC_DIST_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0)
37 #define GIC_DIST_SET_MODEL(irq) (s->irq_state[irq].model = true)
38 #define GIC_DIST_CLEAR_MODEL(irq) (s->irq_state[irq].model = false)
39 #define GIC_DIST_TEST_MODEL(irq) (s->irq_state[irq].model)
40 #define GIC_DIST_SET_LEVEL(irq, cm) (s->irq_state[irq].level |= (cm))
41 #define GIC_DIST_CLEAR_LEVEL(irq, cm) (s->irq_state[irq].level &= ~(cm))
42 #define GIC_DIST_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0)
43 #define GIC_DIST_SET_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger = true)
44 #define GIC_DIST_CLEAR_EDGE_TRIGGER(irq) \
45 (s->irq_state[irq].edge_trigger = false)
46 #define GIC_DIST_TEST_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger)
47 #define GIC_DIST_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \
48 s->priority1[irq][cpu] : \
49 s->priority2[(irq) - GIC_INTERNAL])
50 #define GIC_DIST_TARGET(irq) (s->irq_target[irq])
51 #define GIC_DIST_CLEAR_GROUP(irq, cm) (s->irq_state[irq].group &= ~(cm))
52 #define GIC_DIST_SET_GROUP(irq, cm) (s->irq_state[irq].group |= (cm))
53 #define GIC_DIST_TEST_GROUP(irq, cm) ((s->irq_state[irq].group & (cm)) != 0)
55 #define GICD_CTLR_EN_GRP0 (1U << 0)
56 #define GICD_CTLR_EN_GRP1 (1U << 1)
58 #define GICC_CTLR_EN_GRP0 (1U << 0)
59 #define GICC_CTLR_EN_GRP1 (1U << 1)
60 #define GICC_CTLR_ACK_CTL (1U << 2)
61 #define GICC_CTLR_FIQ_EN (1U << 3)
62 #define GICC_CTLR_CBPR (1U << 4) /* GICv1: SBPR */
63 #define GICC_CTLR_EOIMODE (1U << 9)
64 #define GICC_CTLR_EOIMODE_NS (1U << 10)
67 FIELD(GICH_HCR
, EN
, 0, 1)
68 FIELD(GICH_HCR
, UIE
, 1, 1)
69 FIELD(GICH_HCR
, LRENPIE
, 2, 1)
70 FIELD(GICH_HCR
, NPIE
, 3, 1)
71 FIELD(GICH_HCR
, VGRP0EIE
, 4, 1)
72 FIELD(GICH_HCR
, VGRP0DIE
, 5, 1)
73 FIELD(GICH_HCR
, VGRP1EIE
, 6, 1)
74 FIELD(GICH_HCR
, VGRP1DIE
, 7, 1)
75 FIELD(GICH_HCR
, EOICount
, 27, 5)
77 #define GICH_HCR_MASK \
78 (R_GICH_HCR_EN_MASK | R_GICH_HCR_UIE_MASK | \
79 R_GICH_HCR_LRENPIE_MASK | R_GICH_HCR_NPIE_MASK | \
80 R_GICH_HCR_VGRP0EIE_MASK | R_GICH_HCR_VGRP0DIE_MASK | \
81 R_GICH_HCR_VGRP1EIE_MASK | R_GICH_HCR_VGRP1DIE_MASK | \
82 R_GICH_HCR_EOICount_MASK)
85 FIELD(GICH_VTR
, ListRegs
, 0, 6)
86 FIELD(GICH_VTR
, PREbits
, 26, 3)
87 FIELD(GICH_VTR
, PRIbits
, 29, 3)
90 FIELD(GICH_VMCR
, VMCCtlr
, 0, 10)
91 FIELD(GICH_VMCR
, VMABP
, 18, 3)
92 FIELD(GICH_VMCR
, VMBP
, 21, 3)
93 FIELD(GICH_VMCR
, VMPriMask
, 27, 5)
95 REG32(GICH_MISR
, 0x10)
96 FIELD(GICH_MISR
, EOI
, 0, 1)
97 FIELD(GICH_MISR
, U
, 1, 1)
98 FIELD(GICH_MISR
, LRENP
, 2, 1)
99 FIELD(GICH_MISR
, NP
, 3, 1)
100 FIELD(GICH_MISR
, VGrp0E
, 4, 1)
101 FIELD(GICH_MISR
, VGrp0D
, 5, 1)
102 FIELD(GICH_MISR
, VGrp1E
, 6, 1)
103 FIELD(GICH_MISR
, VGrp1D
, 7, 1)
105 REG32(GICH_EISR0
, 0x20)
106 REG32(GICH_EISR1
, 0x24)
107 REG32(GICH_ELRSR0
, 0x30)
108 REG32(GICH_ELRSR1
, 0x34)
109 REG32(GICH_APR
, 0xf0)
111 REG32(GICH_LR0
, 0x100)
112 FIELD(GICH_LR0
, VirtualID
, 0, 10)
113 FIELD(GICH_LR0
, PhysicalID
, 10, 10)
114 FIELD(GICH_LR0
, CPUID
, 10, 3)
115 FIELD(GICH_LR0
, EOI
, 19, 1)
116 FIELD(GICH_LR0
, Priority
, 23, 5)
117 FIELD(GICH_LR0
, State
, 28, 2)
118 FIELD(GICH_LR0
, Grp1
, 30, 1)
119 FIELD(GICH_LR0
, HW
, 31, 1)
121 /* Last LR register */
122 REG32(GICH_LR63
, 0x1fc)
124 #define GICH_LR_MASK \
125 (R_GICH_LR0_VirtualID_MASK | R_GICH_LR0_PhysicalID_MASK | \
126 R_GICH_LR0_CPUID_MASK | R_GICH_LR0_EOI_MASK | \
127 R_GICH_LR0_Priority_MASK | R_GICH_LR0_State_MASK | \
128 R_GICH_LR0_Grp1_MASK | R_GICH_LR0_HW_MASK)
130 #define GICH_LR_STATE_INVALID 0
131 #define GICH_LR_STATE_PENDING 1
132 #define GICH_LR_STATE_ACTIVE 2
133 #define GICH_LR_STATE_ACTIVE_PENDING 3
135 #define GICH_LR_VIRT_ID(entry) (FIELD_EX32(entry, GICH_LR0, VirtualID))
136 #define GICH_LR_PHYS_ID(entry) (FIELD_EX32(entry, GICH_LR0, PhysicalID))
137 #define GICH_LR_CPUID(entry) (FIELD_EX32(entry, GICH_LR0, CPUID))
138 #define GICH_LR_EOI(entry) (FIELD_EX32(entry, GICH_LR0, EOI))
139 #define GICH_LR_PRIORITY(entry) (FIELD_EX32(entry, GICH_LR0, Priority) << 3)
140 #define GICH_LR_STATE(entry) (FIELD_EX32(entry, GICH_LR0, State))
141 #define GICH_LR_GROUP(entry) (FIELD_EX32(entry, GICH_LR0, Grp1))
142 #define GICH_LR_HW(entry) (FIELD_EX32(entry, GICH_LR0, HW))
144 #define GICH_LR_CLEAR_PENDING(entry) \
145 ((entry) &= ~(GICH_LR_STATE_PENDING << R_GICH_LR0_State_SHIFT))
146 #define GICH_LR_SET_ACTIVE(entry) \
147 ((entry) |= (GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT))
148 #define GICH_LR_CLEAR_ACTIVE(entry) \
149 ((entry) &= ~(GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT))
151 /* Valid bits for GICC_CTLR for GICv1, v1 with security extensions,
152 * GICv2 and GICv2 with security extensions:
154 #define GICC_CTLR_V1_MASK 0x1
155 #define GICC_CTLR_V1_S_MASK 0x1f
156 #define GICC_CTLR_V2_MASK 0x21f
157 #define GICC_CTLR_V2_S_MASK 0x61f
159 /* The special cases for the revision property: */
160 #define REV_11MPCORE 0
162 uint32_t gic_acknowledge_irq(GICState
*s
, int cpu
, MemTxAttrs attrs
);
163 void gic_dist_set_priority(GICState
*s
, int cpu
, int irq
, uint8_t val
,
166 static inline bool gic_test_pending(GICState
*s
, int irq
, int cm
)
168 if (s
->revision
== REV_11MPCORE
) {
169 return s
->irq_state
[irq
].pending
& cm
;
171 /* Edge-triggered interrupts are marked pending on a rising edge, but
172 * level-triggered interrupts are either considered pending when the
173 * level is active or if software has explicitly written to
174 * GICD_ISPENDR to set the state pending.
176 return (s
->irq_state
[irq
].pending
& cm
) ||
177 (!GIC_DIST_TEST_EDGE_TRIGGER(irq
) && GIC_DIST_TEST_LEVEL(irq
, cm
));
181 static inline bool gic_is_vcpu(int cpu
)
183 return cpu
>= GIC_NCPU
;
186 static inline int gic_get_vcpu_real_id(int cpu
)
188 return (cpu
>= GIC_NCPU
) ? (cpu
- GIC_NCPU
) : cpu
;
191 /* Return true if the given vIRQ state exists in a LR and is either active or
192 * pending and active.
194 * This function is used to check that a guest's `end of interrupt' or
195 * `interrupts deactivation' request is valid, and matches with a LR of an
196 * already acknowledged vIRQ (i.e. has the active bit set in its state).
198 static inline bool gic_virq_is_valid(GICState
*s
, int irq
, int vcpu
)
200 int cpu
= gic_get_vcpu_real_id(vcpu
);
203 for (lr_idx
= 0; lr_idx
< s
->num_lrs
; lr_idx
++) {
204 uint32_t *entry
= &s
->h_lr
[lr_idx
][cpu
];
206 if ((GICH_LR_VIRT_ID(*entry
) == irq
) &&
207 (GICH_LR_STATE(*entry
) & GICH_LR_STATE_ACTIVE
)) {
215 /* Return a pointer on the LR entry matching the given vIRQ.
217 * This function is used to retrieve an LR for which we know for sure that the
218 * corresponding vIRQ exists in the current context (i.e. its current state is
220 * - Either the corresponding vIRQ has been validated with gic_virq_is_valid()
221 * so it is `active' or `active and pending',
222 * - Or it was pending and has been selected by gic_get_best_virq(). It is now
223 * `pending', `active' or `active and pending', depending on what the guest
224 * already did with this vIRQ.
226 * Having multiple LRs with the same VirtualID leads to UNPREDICTABLE
227 * behaviour in the GIC. We choose to return the first one that matches.
229 static inline uint32_t *gic_get_lr_entry(GICState
*s
, int irq
, int vcpu
)
231 int cpu
= gic_get_vcpu_real_id(vcpu
);
234 for (lr_idx
= 0; lr_idx
< s
->num_lrs
; lr_idx
++) {
235 uint32_t *entry
= &s
->h_lr
[lr_idx
][cpu
];
237 if ((GICH_LR_VIRT_ID(*entry
) == irq
) &&
238 (GICH_LR_STATE(*entry
) != GICH_LR_STATE_INVALID
)) {
243 g_assert_not_reached();
246 static inline bool gic_test_group(GICState
*s
, int irq
, int cpu
)
248 if (gic_is_vcpu(cpu
)) {
249 uint32_t *entry
= gic_get_lr_entry(s
, irq
, cpu
);
250 return GICH_LR_GROUP(*entry
);
252 return GIC_DIST_TEST_GROUP(irq
, 1 << cpu
);
256 static inline void gic_clear_pending(GICState
*s
, int irq
, int cpu
)
258 if (gic_is_vcpu(cpu
)) {
259 uint32_t *entry
= gic_get_lr_entry(s
, irq
, cpu
);
260 GICH_LR_CLEAR_PENDING(*entry
);
262 /* Clear pending state for both level and edge triggered
263 * interrupts. (level triggered interrupts with an active line
264 * remain pending, see gic_test_pending)
266 GIC_DIST_CLEAR_PENDING(irq
, GIC_DIST_TEST_MODEL(irq
) ? ALL_CPU_MASK
271 static inline void gic_set_active(GICState
*s
, int irq
, int cpu
)
273 if (gic_is_vcpu(cpu
)) {
274 uint32_t *entry
= gic_get_lr_entry(s
, irq
, cpu
);
275 GICH_LR_SET_ACTIVE(*entry
);
277 GIC_DIST_SET_ACTIVE(irq
, 1 << cpu
);
281 static inline void gic_clear_active(GICState
*s
, int irq
, int cpu
)
285 if (gic_is_vcpu(cpu
)) {
286 uint32_t *entry
= gic_get_lr_entry(s
, irq
, cpu
);
287 GICH_LR_CLEAR_ACTIVE(*entry
);
289 if (GICH_LR_HW(*entry
)) {
290 /* Hardware interrupt. We must forward the deactivation request to
293 int phys_irq
= GICH_LR_PHYS_ID(*entry
);
294 int rcpu
= gic_get_vcpu_real_id(cpu
);
296 if (phys_irq
< GIC_NR_SGIS
|| phys_irq
>= GIC_MAXIRQ
) {
297 /* UNPREDICTABLE behaviour, we choose to ignore the request */
301 /* This is equivalent to a NS write to DIR on the physical CPU
302 * interface. Hence group0 interrupt deactivation is ignored if
305 if (!s
->security_extn
|| GIC_DIST_TEST_GROUP(phys_irq
, 1 << rcpu
)) {
306 cm
= phys_irq
< GIC_INTERNAL
? 1 << rcpu
: ALL_CPU_MASK
;
307 GIC_DIST_CLEAR_ACTIVE(phys_irq
, cm
);
311 cm
= irq
< GIC_INTERNAL
? 1 << cpu
: ALL_CPU_MASK
;
312 GIC_DIST_CLEAR_ACTIVE(irq
, cm
);
316 static inline int gic_get_priority(GICState
*s
, int irq
, int cpu
)
318 if (gic_is_vcpu(cpu
)) {
319 uint32_t *entry
= gic_get_lr_entry(s
, irq
, cpu
);
320 return GICH_LR_PRIORITY(*entry
);
322 return GIC_DIST_GET_PRIORITY(irq
, cpu
);
326 #endif /* QEMU_ARM_GIC_INTERNAL_H */