2 * ARM GICv3 support - common bits of emulated and KVM kernel model
4 * Copyright (c) 2012 Linaro Limited
5 * Copyright (c) 2015 Huawei.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Written by Peter Maydell
8 * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation, either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "qemu/module.h"
27 #include "qemu/error-report.h"
28 #include "hw/core/cpu.h"
29 #include "hw/intc/arm_gicv3_common.h"
30 #include "hw/qdev-properties.h"
31 #include "migration/vmstate.h"
32 #include "gicv3_internal.h"
33 #include "hw/arm/linux-boot-if.h"
34 #include "sysemu/kvm.h"
37 static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State
*cs
)
39 if (cs
->gicd_no_migration_shift_bug
) {
43 /* Older versions of QEMU had a bug in the handling of state save/restore
44 * to the KVM GICv3: they got the offset in the bitmap arrays wrong,
45 * so that instead of the data for external interrupts 32 and up
46 * starting at bit position 32 in the bitmap, it started at bit
47 * position 64. If we're receiving data from a QEMU with that bug,
48 * we must move the data down into the right place.
50 memmove(cs
->group
, (uint8_t *)cs
->group
+ GIC_INTERNAL
/ 8,
51 sizeof(cs
->group
) - GIC_INTERNAL
/ 8);
52 memmove(cs
->grpmod
, (uint8_t *)cs
->grpmod
+ GIC_INTERNAL
/ 8,
53 sizeof(cs
->grpmod
) - GIC_INTERNAL
/ 8);
54 memmove(cs
->enabled
, (uint8_t *)cs
->enabled
+ GIC_INTERNAL
/ 8,
55 sizeof(cs
->enabled
) - GIC_INTERNAL
/ 8);
56 memmove(cs
->pending
, (uint8_t *)cs
->pending
+ GIC_INTERNAL
/ 8,
57 sizeof(cs
->pending
) - GIC_INTERNAL
/ 8);
58 memmove(cs
->active
, (uint8_t *)cs
->active
+ GIC_INTERNAL
/ 8,
59 sizeof(cs
->active
) - GIC_INTERNAL
/ 8);
60 memmove(cs
->edge_trigger
, (uint8_t *)cs
->edge_trigger
+ GIC_INTERNAL
/ 8,
61 sizeof(cs
->edge_trigger
) - GIC_INTERNAL
/ 8);
64 * While this new version QEMU doesn't have this kind of bug as we fix it,
65 * so it needs to set the flag to true to indicate that and it's necessary
66 * for next migration to work from this new version QEMU.
68 cs
->gicd_no_migration_shift_bug
= true;
71 static int gicv3_pre_save(void *opaque
)
73 GICv3State
*s
= (GICv3State
*)opaque
;
74 ARMGICv3CommonClass
*c
= ARM_GICV3_COMMON_GET_CLASS(s
);
83 static int gicv3_post_load(void *opaque
, int version_id
)
85 GICv3State
*s
= (GICv3State
*)opaque
;
86 ARMGICv3CommonClass
*c
= ARM_GICV3_COMMON_GET_CLASS(s
);
88 gicv3_gicd_no_migration_shift_bug_post_load(s
);
96 static bool virt_state_needed(void *opaque
)
98 GICv3CPUState
*cs
= opaque
;
100 return cs
->num_list_regs
!= 0;
103 static const VMStateDescription vmstate_gicv3_cpu_virt
= {
104 .name
= "arm_gicv3_cpu/virt",
106 .minimum_version_id
= 1,
107 .needed
= virt_state_needed
,
108 .fields
= (const VMStateField
[]) {
109 VMSTATE_UINT64_2DARRAY(ich_apr
, GICv3CPUState
, 3, 4),
110 VMSTATE_UINT64(ich_hcr_el2
, GICv3CPUState
),
111 VMSTATE_UINT64_ARRAY(ich_lr_el2
, GICv3CPUState
, GICV3_LR_MAX
),
112 VMSTATE_UINT64(ich_vmcr_el2
, GICv3CPUState
),
113 VMSTATE_END_OF_LIST()
117 static int vmstate_gicv3_cpu_pre_load(void *opaque
)
119 GICv3CPUState
*cs
= opaque
;
122 * If the sre_el1 subsection is not transferred this
123 * means SRE_EL1 is 0x7 (which might not be the same as
126 cs
->icc_sre_el1
= 0x7;
130 static bool icc_sre_el1_reg_needed(void *opaque
)
132 GICv3CPUState
*cs
= opaque
;
134 return cs
->icc_sre_el1
!= 7;
137 const VMStateDescription vmstate_gicv3_cpu_sre_el1
= {
138 .name
= "arm_gicv3_cpu/sre_el1",
140 .minimum_version_id
= 1,
141 .needed
= icc_sre_el1_reg_needed
,
142 .fields
= (const VMStateField
[]) {
143 VMSTATE_UINT64(icc_sre_el1
, GICv3CPUState
),
144 VMSTATE_END_OF_LIST()
148 static bool gicv4_needed(void *opaque
)
150 GICv3CPUState
*cs
= opaque
;
152 return cs
->gic
->revision
> 3;
155 const VMStateDescription vmstate_gicv3_gicv4
= {
156 .name
= "arm_gicv3_cpu/gicv4",
158 .minimum_version_id
= 1,
159 .needed
= gicv4_needed
,
160 .fields
= (const VMStateField
[]) {
161 VMSTATE_UINT64(gicr_vpropbaser
, GICv3CPUState
),
162 VMSTATE_UINT64(gicr_vpendbaser
, GICv3CPUState
),
163 VMSTATE_END_OF_LIST()
167 static bool gicv3_cpu_nmi_needed(void *opaque
)
169 GICv3CPUState
*cs
= opaque
;
171 return cs
->gic
->nmi_support
;
174 static const VMStateDescription vmstate_gicv3_cpu_nmi
= {
175 .name
= "arm_gicv3_cpu/nmi",
177 .minimum_version_id
= 1,
178 .needed
= gicv3_cpu_nmi_needed
,
179 .fields
= (const VMStateField
[]) {
180 VMSTATE_UINT32(gicr_inmir0
, GICv3CPUState
),
181 VMSTATE_END_OF_LIST()
185 static const VMStateDescription vmstate_gicv3_cpu
= {
186 .name
= "arm_gicv3_cpu",
188 .minimum_version_id
= 1,
189 .pre_load
= vmstate_gicv3_cpu_pre_load
,
190 .fields
= (const VMStateField
[]) {
191 VMSTATE_UINT32(level
, GICv3CPUState
),
192 VMSTATE_UINT32(gicr_ctlr
, GICv3CPUState
),
193 VMSTATE_UINT32_ARRAY(gicr_statusr
, GICv3CPUState
, 2),
194 VMSTATE_UINT32(gicr_waker
, GICv3CPUState
),
195 VMSTATE_UINT64(gicr_propbaser
, GICv3CPUState
),
196 VMSTATE_UINT64(gicr_pendbaser
, GICv3CPUState
),
197 VMSTATE_UINT32(gicr_igroupr0
, GICv3CPUState
),
198 VMSTATE_UINT32(gicr_ienabler0
, GICv3CPUState
),
199 VMSTATE_UINT32(gicr_ipendr0
, GICv3CPUState
),
200 VMSTATE_UINT32(gicr_iactiver0
, GICv3CPUState
),
201 VMSTATE_UINT32(edge_trigger
, GICv3CPUState
),
202 VMSTATE_UINT32(gicr_igrpmodr0
, GICv3CPUState
),
203 VMSTATE_UINT32(gicr_nsacr
, GICv3CPUState
),
204 VMSTATE_UINT8_ARRAY(gicr_ipriorityr
, GICv3CPUState
, GIC_INTERNAL
),
205 VMSTATE_UINT64_ARRAY(icc_ctlr_el1
, GICv3CPUState
, 2),
206 VMSTATE_UINT64(icc_pmr_el1
, GICv3CPUState
),
207 VMSTATE_UINT64_ARRAY(icc_bpr
, GICv3CPUState
, 3),
208 VMSTATE_UINT64_2DARRAY(icc_apr
, GICv3CPUState
, 3, 4),
209 VMSTATE_UINT64_ARRAY(icc_igrpen
, GICv3CPUState
, 3),
210 VMSTATE_UINT64(icc_ctlr_el3
, GICv3CPUState
),
211 VMSTATE_END_OF_LIST()
213 .subsections
= (const VMStateDescription
* const []) {
214 &vmstate_gicv3_cpu_virt
,
215 &vmstate_gicv3_cpu_sre_el1
,
216 &vmstate_gicv3_gicv4
,
217 &vmstate_gicv3_cpu_nmi
,
222 static int gicv3_pre_load(void *opaque
)
224 GICv3State
*cs
= opaque
;
227 * The gicd_no_migration_shift_bug flag is used for migration compatibility
228 * for old version QEMU which may have the GICD bmp shift bug under KVM mode.
229 * Strictly, what we want to know is whether the migration source is using
230 * KVM. Since we don't have any way to determine that, we look at whether the
231 * destination is using KVM; this is close enough because for the older QEMU
232 * versions with this bug KVM -> TCG migration didn't work anyway. If the
233 * source is a newer QEMU without this bug it will transmit the migration
234 * subsection which sets the flag to true; otherwise it will remain set to
235 * the value we select here.
238 cs
->gicd_no_migration_shift_bug
= false;
244 static bool needed_always(void *opaque
)
249 const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug
= {
250 .name
= "arm_gicv3/gicd_no_migration_shift_bug",
252 .minimum_version_id
= 1,
253 .needed
= needed_always
,
254 .fields
= (const VMStateField
[]) {
255 VMSTATE_BOOL(gicd_no_migration_shift_bug
, GICv3State
),
256 VMSTATE_END_OF_LIST()
260 static bool gicv3_nmi_needed(void *opaque
)
262 GICv3State
*cs
= opaque
;
264 return cs
->nmi_support
;
267 const VMStateDescription vmstate_gicv3_gicd_nmi
= {
268 .name
= "arm_gicv3/gicd_nmi",
270 .minimum_version_id
= 1,
271 .needed
= gicv3_nmi_needed
,
272 .fields
= (const VMStateField
[]) {
273 VMSTATE_UINT32_ARRAY(nmi
, GICv3State
, GICV3_BMP_SIZE
),
274 VMSTATE_END_OF_LIST()
278 static const VMStateDescription vmstate_gicv3
= {
281 .minimum_version_id
= 1,
282 .pre_load
= gicv3_pre_load
,
283 .pre_save
= gicv3_pre_save
,
284 .post_load
= gicv3_post_load
,
285 .priority
= MIG_PRI_GICV3
,
286 .fields
= (const VMStateField
[]) {
287 VMSTATE_UINT32(gicd_ctlr
, GICv3State
),
288 VMSTATE_UINT32_ARRAY(gicd_statusr
, GICv3State
, 2),
289 VMSTATE_UINT32_ARRAY(group
, GICv3State
, GICV3_BMP_SIZE
),
290 VMSTATE_UINT32_ARRAY(grpmod
, GICv3State
, GICV3_BMP_SIZE
),
291 VMSTATE_UINT32_ARRAY(enabled
, GICv3State
, GICV3_BMP_SIZE
),
292 VMSTATE_UINT32_ARRAY(pending
, GICv3State
, GICV3_BMP_SIZE
),
293 VMSTATE_UINT32_ARRAY(active
, GICv3State
, GICV3_BMP_SIZE
),
294 VMSTATE_UINT32_ARRAY(level
, GICv3State
, GICV3_BMP_SIZE
),
295 VMSTATE_UINT32_ARRAY(edge_trigger
, GICv3State
, GICV3_BMP_SIZE
),
296 VMSTATE_UINT8_ARRAY(gicd_ipriority
, GICv3State
, GICV3_MAXIRQ
),
297 VMSTATE_UINT64_ARRAY(gicd_irouter
, GICv3State
, GICV3_MAXIRQ
),
298 VMSTATE_UINT32_ARRAY(gicd_nsacr
, GICv3State
,
299 DIV_ROUND_UP(GICV3_MAXIRQ
, 16)),
300 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu
, GICv3State
, num_cpu
,
301 vmstate_gicv3_cpu
, GICv3CPUState
),
302 VMSTATE_END_OF_LIST()
304 .subsections
= (const VMStateDescription
* const []) {
305 &vmstate_gicv3_gicd_no_migration_shift_bug
,
306 &vmstate_gicv3_gicd_nmi
,
311 void gicv3_init_irqs_and_mmio(GICv3State
*s
, qemu_irq_handler handler
,
312 const MemoryRegionOps
*ops
)
314 SysBusDevice
*sbd
= SYS_BUS_DEVICE(s
);
318 /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
319 * GPIO array layout is thus:
321 * [N..N+31] PPIs for CPU 0
322 * [N+32..N+63] PPIs for CPU 1
325 i
= s
->num_irq
- GIC_INTERNAL
+ GIC_INTERNAL
* s
->num_cpu
;
326 qdev_init_gpio_in(DEVICE(s
), handler
, i
);
328 for (i
= 0; i
< s
->num_cpu
; i
++) {
329 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_irq
);
331 for (i
= 0; i
< s
->num_cpu
; i
++) {
332 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_fiq
);
334 for (i
= 0; i
< s
->num_cpu
; i
++) {
335 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_virq
);
337 for (i
= 0; i
< s
->num_cpu
; i
++) {
338 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_vfiq
);
340 for (i
= 0; i
< s
->num_cpu
; i
++) {
341 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_nmi
);
343 for (i
= 0; i
< s
->num_cpu
; i
++) {
344 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_vnmi
);
347 memory_region_init_io(&s
->iomem_dist
, OBJECT(s
), ops
, s
,
348 "gicv3_dist", 0x10000);
349 sysbus_init_mmio(sbd
, &s
->iomem_dist
);
351 s
->redist_regions
= g_new0(GICv3RedistRegion
, s
->nb_redist_regions
);
353 for (i
= 0; i
< s
->nb_redist_regions
; i
++) {
354 char *name
= g_strdup_printf("gicv3_redist_region[%d]", i
);
355 GICv3RedistRegion
*region
= &s
->redist_regions
[i
];
358 region
->cpuidx
= cpuidx
;
359 cpuidx
+= s
->redist_region_count
[i
];
361 memory_region_init_io(®ion
->iomem
, OBJECT(s
),
362 ops
? &ops
[1] : NULL
, region
, name
,
363 s
->redist_region_count
[i
] * gicv3_redist_size(s
));
364 sysbus_init_mmio(sbd
, ®ion
->iomem
);
369 static void arm_gicv3_common_realize(DeviceState
*dev
, Error
**errp
)
371 GICv3State
*s
= ARM_GICV3_COMMON(dev
);
372 int i
, rdist_capacity
, cpuidx
;
375 * This GIC device supports only revisions 3 and 4. The GICv1/v2
376 * is a separate device.
377 * Note that subclasses of this device may impose further restrictions
378 * on the GIC revision: notably, the in-kernel KVM GIC doesn't
381 if (s
->revision
!= 3 && s
->revision
!= 4) {
382 error_setg(errp
, "unsupported GIC revision %d", s
->revision
);
386 if (s
->num_irq
> GICV3_MAXIRQ
) {
388 "requested %u interrupt lines exceeds GIC maximum %d",
389 s
->num_irq
, GICV3_MAXIRQ
);
392 if (s
->num_irq
< GIC_INTERNAL
) {
394 "requested %u interrupt lines is below GIC minimum %d",
395 s
->num_irq
, GIC_INTERNAL
);
398 if (s
->num_cpu
== 0) {
399 error_setg(errp
, "num-cpu must be at least 1");
403 /* ITLinesNumber is represented as (N / 32) - 1, so this is an
404 * implementation imposed restriction, not an architectural one,
405 * so we don't have to deal with bitfields where only some of the
406 * bits in a 32-bit word should be valid.
408 if (s
->num_irq
% 32) {
410 "%d interrupt lines unsupported: not divisible by 32",
415 if (s
->lpi_enable
&& !s
->dma
) {
416 error_setg(errp
, "Redist-ITS: Guest 'sysmem' reference link not set");
421 for (i
= 0; i
< s
->nb_redist_regions
; i
++) {
422 rdist_capacity
+= s
->redist_region_count
[i
];
424 if (rdist_capacity
!= s
->num_cpu
) {
425 error_setg(errp
, "Capacity of the redist regions(%d) "
426 "does not match the number of vcpus(%d)",
427 rdist_capacity
, s
->num_cpu
);
432 address_space_init(&s
->dma_as
, s
->dma
,
436 s
->cpu
= g_new0(GICv3CPUState
, s
->num_cpu
);
438 for (i
= 0; i
< s
->num_cpu
; i
++) {
439 CPUState
*cpu
= qemu_get_cpu(i
);
444 /* Store GICv3CPUState in CPUARMState gicv3state pointer */
445 gicv3_set_gicv3state(cpu
, &s
->cpu
[i
]);
447 /* Pre-construct the GICR_TYPER:
448 * For our implementation:
449 * Top 32 bits are the affinity value of the associated CPU
450 * CommonLPIAff == 01 (redistributors with same Aff3 share LPI table)
451 * Processor_Number == CPU index starting from 0
452 * DPGS == 0 (GICR_CTLR.DPG* not supported)
453 * Last == 1 if this is the last redistributor in a series of
454 * contiguous redistributor pages
455 * DirectLPI == 0 (direct injection of LPIs not supported)
456 * VLPIS == 1 if vLPIs supported (GICv4 and up)
457 * PLPIS == 1 if LPIs supported
459 cpu_affid
= object_property_get_uint(OBJECT(cpu
), "mp-affinity", NULL
);
461 /* The CPU mp-affinity property is in MPIDR register format; squash
462 * the affinity bytes into 32 bits as the GICR_TYPER has them.
464 cpu_affid
= ((cpu_affid
& 0xFF00000000ULL
) >> 8) |
465 (cpu_affid
& 0xFFFFFF);
466 s
->cpu
[i
].gicr_typer
= (cpu_affid
<< 32) |
471 s
->cpu
[i
].gicr_typer
|= GICR_TYPER_PLPIS
;
472 if (s
->revision
> 3) {
473 s
->cpu
[i
].gicr_typer
|= GICR_TYPER_VLPIS
;
479 * Now go through and set GICR_TYPER.Last for the final
480 * redistributor in each region.
483 for (i
= 0; i
< s
->nb_redist_regions
; i
++) {
484 cpuidx
+= s
->redist_region_count
[i
];
485 s
->cpu
[cpuidx
- 1].gicr_typer
|= GICR_TYPER_LAST
;
488 s
->itslist
= g_ptr_array_new();
491 static void arm_gicv3_finalize(Object
*obj
)
493 GICv3State
*s
= ARM_GICV3_COMMON(obj
);
495 g_free(s
->redist_region_count
);
498 static void arm_gicv3_common_reset_hold(Object
*obj
, ResetType type
)
500 GICv3State
*s
= ARM_GICV3_COMMON(obj
);
503 for (i
= 0; i
< s
->num_cpu
; i
++) {
504 GICv3CPUState
*cs
= &s
->cpu
[i
];
509 /* Our implementation supports clearing GICR_CTLR.EnableLPIs */
510 cs
->gicr_ctlr
|= GICR_CTLR_CES
;
512 cs
->gicr_statusr
[GICV3_S
] = 0;
513 cs
->gicr_statusr
[GICV3_NS
] = 0;
514 cs
->gicr_waker
= GICR_WAKER_ProcessorSleep
| GICR_WAKER_ChildrenAsleep
;
515 cs
->gicr_propbaser
= 0;
516 cs
->gicr_pendbaser
= 0;
517 cs
->gicr_vpropbaser
= 0;
518 cs
->gicr_vpendbaser
= 0;
519 /* If we're resetting a TZ-aware GIC as if secure firmware
520 * had set it up ready to start a kernel in non-secure, we
521 * need to set interrupts to group 1 so the kernel can use them.
522 * Otherwise they reset to group 0 like the hardware.
524 if (s
->irq_reset_nonsecure
) {
525 cs
->gicr_igroupr0
= 0xffffffff;
527 cs
->gicr_igroupr0
= 0;
530 cs
->gicr_ienabler0
= 0;
531 cs
->gicr_ipendr0
= 0;
532 cs
->gicr_iactiver0
= 0;
533 cs
->edge_trigger
= 0xffff;
534 cs
->gicr_igrpmodr0
= 0;
536 memset(cs
->gicr_ipriorityr
, 0, sizeof(cs
->gicr_ipriorityr
));
538 cs
->hppi
.prio
= 0xff;
539 cs
->hppi
.nmi
= false;
540 cs
->hpplpi
.prio
= 0xff;
541 cs
->hpplpi
.nmi
= false;
542 cs
->hppvlpi
.prio
= 0xff;
543 cs
->hppvlpi
.nmi
= false;
545 /* State in the CPU interface must *not* be reset here, because it
546 * is part of the CPU's reset domain, not the GIC device's.
550 /* For our implementation affinity routing is always enabled */
551 if (s
->security_extn
) {
552 s
->gicd_ctlr
= GICD_CTLR_ARE_S
| GICD_CTLR_ARE_NS
;
554 s
->gicd_ctlr
= GICD_CTLR_DS
| GICD_CTLR_ARE
;
557 s
->gicd_statusr
[GICV3_S
] = 0;
558 s
->gicd_statusr
[GICV3_NS
] = 0;
560 memset(s
->group
, 0, sizeof(s
->group
));
561 memset(s
->grpmod
, 0, sizeof(s
->grpmod
));
562 memset(s
->enabled
, 0, sizeof(s
->enabled
));
563 memset(s
->pending
, 0, sizeof(s
->pending
));
564 memset(s
->active
, 0, sizeof(s
->active
));
565 memset(s
->level
, 0, sizeof(s
->level
));
566 memset(s
->edge_trigger
, 0, sizeof(s
->edge_trigger
));
567 memset(s
->gicd_ipriority
, 0, sizeof(s
->gicd_ipriority
));
568 memset(s
->gicd_irouter
, 0, sizeof(s
->gicd_irouter
));
569 memset(s
->gicd_nsacr
, 0, sizeof(s
->gicd_nsacr
));
570 /* GICD_IROUTER are UNKNOWN at reset so in theory the guest must
571 * write these to get sane behaviour and we need not populate the
572 * pointer cache here; however having the cache be different for
573 * "happened to be 0 from reset" and "guest wrote 0" would be
576 gicv3_cache_all_target_cpustates(s
);
578 if (s
->irq_reset_nonsecure
) {
579 /* If we're resetting a TZ-aware GIC as if secure firmware
580 * had set it up ready to start a kernel in non-secure, we
581 * need to set interrupts to group 1 so the kernel can use them.
582 * Otherwise they reset to group 0 like the hardware.
584 for (i
= GIC_INTERNAL
; i
< s
->num_irq
; i
++) {
585 gicv3_gicd_group_set(s
, i
);
588 s
->gicd_no_migration_shift_bug
= true;
591 static void arm_gic_common_linux_init(ARMLinuxBootIf
*obj
,
594 GICv3State
*s
= ARM_GICV3_COMMON(obj
);
596 if (s
->security_extn
&& !secure_boot
) {
597 /* We're directly booting a kernel into NonSecure. If this GIC
598 * implements the security extensions then we must configure it
599 * to have all the interrupts be NonSecure (this is a job that
600 * is done by the Secure boot firmware in real hardware, and in
601 * this mode QEMU is acting as a minimalist firmware-and-bootloader
604 s
->irq_reset_nonsecure
= true;
608 static Property arm_gicv3_common_properties
[] = {
609 DEFINE_PROP_UINT32("num-cpu", GICv3State
, num_cpu
, 1),
610 DEFINE_PROP_UINT32("num-irq", GICv3State
, num_irq
, 32),
611 DEFINE_PROP_UINT32("revision", GICv3State
, revision
, 3),
612 DEFINE_PROP_BOOL("has-lpi", GICv3State
, lpi_enable
, 0),
613 DEFINE_PROP_BOOL("has-nmi", GICv3State
, nmi_support
, 0),
614 DEFINE_PROP_BOOL("has-security-extensions", GICv3State
, security_extn
, 0),
616 * Compatibility property: force 8 bits of physical priority, even
617 * if the CPU being emulated should have fewer.
619 DEFINE_PROP_BOOL("force-8-bit-prio", GICv3State
, force_8bit_prio
, 0),
620 DEFINE_PROP_ARRAY("redist-region-count", GICv3State
, nb_redist_regions
,
621 redist_region_count
, qdev_prop_uint32
, uint32_t),
622 DEFINE_PROP_LINK("sysmem", GICv3State
, dma
, TYPE_MEMORY_REGION
,
624 DEFINE_PROP_END_OF_LIST(),
627 static void arm_gicv3_common_class_init(ObjectClass
*klass
, void *data
)
629 DeviceClass
*dc
= DEVICE_CLASS(klass
);
630 ResettableClass
*rc
= RESETTABLE_CLASS(klass
);
631 ARMLinuxBootIfClass
*albifc
= ARM_LINUX_BOOT_IF_CLASS(klass
);
633 rc
->phases
.hold
= arm_gicv3_common_reset_hold
;
634 dc
->realize
= arm_gicv3_common_realize
;
635 device_class_set_props(dc
, arm_gicv3_common_properties
);
636 dc
->vmsd
= &vmstate_gicv3
;
637 albifc
->arm_linux_init
= arm_gic_common_linux_init
;
640 static const TypeInfo arm_gicv3_common_type
= {
641 .name
= TYPE_ARM_GICV3_COMMON
,
642 .parent
= TYPE_SYS_BUS_DEVICE
,
643 .instance_size
= sizeof(GICv3State
),
644 .class_size
= sizeof(ARMGICv3CommonClass
),
645 .class_init
= arm_gicv3_common_class_init
,
646 .instance_finalize
= arm_gicv3_finalize
,
648 .interfaces
= (InterfaceInfo
[]) {
649 { TYPE_ARM_LINUX_BOOT_IF
},
654 static void register_types(void)
656 type_register_static(&arm_gicv3_common_type
);
659 type_init(register_types
)
661 const char *gicv3_class_name(void)
663 if (kvm_irqchip_in_kernel()) {
664 return "kvm-arm-gicv3";
667 error_report("Userspace GICv3 is not supported with KVM");