2 * ARM GICv3 emulation: Redistributor
4 * Copyright (c) 2015 Huawei.
5 * Copyright (c) 2016 Linaro Limited.
6 * Written by Shlomo Pongratz, Peter Maydell
8 * This code is licensed under the GPL, version 2 or (at your option)
12 #include "qemu/osdep.h"
15 #include "gicv3_internal.h"
17 static uint32_t mask_group(GICv3CPUState
*cs
, MemTxAttrs attrs
)
19 /* Return a 32-bit mask which should be applied for this set of 32
20 * interrupts; each bit is 1 if access is permitted by the
21 * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
22 * not affect config register accesses, unlike GICD_NSACR.)
24 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
25 /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
26 return cs
->gicr_igroupr0
;
31 static int gicr_ns_access(GICv3CPUState
*cs
, int irq
)
33 /* Return the 2 bit NSACR.NS_access field for this SGI */
35 return extract32(cs
->gicr_nsacr
, irq
* 2, 2);
38 static void gicr_write_set_bitmap_reg(GICv3CPUState
*cs
, MemTxAttrs attrs
,
39 uint32_t *reg
, uint32_t val
)
41 /* Helper routine to implement writing to a "set-bitmap" register */
42 val
&= mask_group(cs
, attrs
);
44 gicv3_redist_update(cs
);
47 static void gicr_write_clear_bitmap_reg(GICv3CPUState
*cs
, MemTxAttrs attrs
,
48 uint32_t *reg
, uint32_t val
)
50 /* Helper routine to implement writing to a "clear-bitmap" register */
51 val
&= mask_group(cs
, attrs
);
53 gicv3_redist_update(cs
);
56 static uint32_t gicr_read_bitmap_reg(GICv3CPUState
*cs
, MemTxAttrs attrs
,
59 reg
&= mask_group(cs
, attrs
);
63 static uint8_t gicr_read_ipriorityr(GICv3CPUState
*cs
, MemTxAttrs attrs
,
66 /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
67 * honouring security state (these are RAZ/WI for Group 0 or Secure
68 * Group 1 interrupts).
72 prio
= cs
->gicr_ipriorityr
[irq
];
74 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
75 if (!(cs
->gicr_igroupr0
& (1U << irq
))) {
76 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
79 /* NS view of the interrupt priority */
80 prio
= (prio
<< 1) & 0xff;
85 static void gicr_write_ipriorityr(GICv3CPUState
*cs
, MemTxAttrs attrs
, int irq
,
88 /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
89 * honouring security state (these are RAZ/WI for Group 0 or Secure
90 * Group 1 interrupts).
92 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
93 if (!(cs
->gicr_igroupr0
& (1U << irq
))) {
94 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
97 /* NS view of the interrupt priority */
98 value
= 0x80 | (value
>> 1);
100 cs
->gicr_ipriorityr
[irq
] = value
;
103 static MemTxResult
gicr_readb(GICv3CPUState
*cs
, hwaddr offset
,
104 uint64_t *data
, MemTxAttrs attrs
)
107 case GICR_IPRIORITYR
... GICR_IPRIORITYR
+ 0x1f:
108 *data
= gicr_read_ipriorityr(cs
, attrs
, offset
- GICR_IPRIORITYR
);
115 static MemTxResult
gicr_writeb(GICv3CPUState
*cs
, hwaddr offset
,
116 uint64_t value
, MemTxAttrs attrs
)
119 case GICR_IPRIORITYR
... GICR_IPRIORITYR
+ 0x1f:
120 gicr_write_ipriorityr(cs
, attrs
, offset
- GICR_IPRIORITYR
, value
);
121 gicv3_redist_update(cs
);
128 static MemTxResult
gicr_readl(GICv3CPUState
*cs
, hwaddr offset
,
129 uint64_t *data
, MemTxAttrs attrs
)
133 *data
= cs
->gicr_ctlr
;
136 *data
= gicv3_iidr();
139 *data
= extract64(cs
->gicr_typer
, 0, 32);
142 *data
= extract64(cs
->gicr_typer
, 32, 32);
145 /* RAZ/WI for us (this is an optional register and our implementation
146 * does not track RO/WO/reserved violations to report them to the guest)
151 *data
= cs
->gicr_waker
;
154 *data
= extract64(cs
->gicr_propbaser
, 0, 32);
156 case GICR_PROPBASER
+ 4:
157 *data
= extract64(cs
->gicr_propbaser
, 32, 32);
160 *data
= extract64(cs
->gicr_pendbaser
, 0, 32);
162 case GICR_PENDBASER
+ 4:
163 *data
= extract64(cs
->gicr_pendbaser
, 32, 32);
166 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
170 *data
= cs
->gicr_igroupr0
;
172 case GICR_ISENABLER0
:
173 case GICR_ICENABLER0
:
174 *data
= gicr_read_bitmap_reg(cs
, attrs
, cs
->gicr_ienabler0
);
179 /* The pending register reads as the logical OR of the pending
180 * latch and the input line level for level-triggered interrupts.
182 uint32_t val
= cs
->gicr_ipendr0
| (~cs
->edge_trigger
& cs
->level
);
183 *data
= gicr_read_bitmap_reg(cs
, attrs
, val
);
186 case GICR_ISACTIVER0
:
187 case GICR_ICACTIVER0
:
188 *data
= gicr_read_bitmap_reg(cs
, attrs
, cs
->gicr_iactiver0
);
190 case GICR_IPRIORITYR
... GICR_IPRIORITYR
+ 0x1f:
192 int i
, irq
= offset
- GICR_IPRIORITYR
;
195 for (i
= irq
+ 3; i
>= irq
; i
--) {
197 value
|= gicr_read_ipriorityr(cs
, attrs
, i
);
205 /* Our edge_trigger bitmap is one bit per irq; take the correct
206 * half of it, and spread it out into the odd bits.
210 value
= cs
->edge_trigger
& mask_group(cs
, attrs
);
211 value
= extract32(value
, (offset
== GICR_ICFGR1
) ? 16 : 0, 16);
212 value
= half_shuffle32(value
) << 1;
217 if ((cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) || !attrs
.secure
) {
218 /* RAZ/WI if security disabled, or if
219 * security enabled and this is an NS access
224 *data
= cs
->gicr_igrpmodr0
;
227 if ((cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) || !attrs
.secure
) {
228 /* RAZ/WI if security disabled, or if
229 * security enabled and this is an NS access
234 *data
= cs
->gicr_nsacr
;
236 case GICR_IDREGS
... GICR_IDREGS
+ 0x2f:
237 *data
= gicv3_idreg(offset
- GICR_IDREGS
);
244 static MemTxResult
gicr_writel(GICv3CPUState
*cs
, hwaddr offset
,
245 uint64_t value
, MemTxAttrs attrs
)
249 /* For our implementation, GICR_TYPER.DPGS is 0 and so all
250 * the DPG bits are RAZ/WI. We don't do anything asynchronously,
251 * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we
252 * implement LPIs) so Enable_LPIs is programmable.
254 if (cs
->gicr_typer
& GICR_TYPER_PLPIS
) {
255 if (value
& GICR_CTLR_ENABLE_LPIS
) {
256 cs
->gicr_ctlr
|= GICR_CTLR_ENABLE_LPIS
;
257 /* Check for any pending interr in pending table */
258 gicv3_redist_update_lpi(cs
);
259 gicv3_redist_update(cs
);
261 cs
->gicr_ctlr
&= ~GICR_CTLR_ENABLE_LPIS
;
266 /* RAZ/WI for our implementation */
269 /* Only the ProcessorSleep bit is writeable. When the guest sets
270 * it it requests that we transition the channel between the
271 * redistributor and the cpu interface to quiescent, and that
272 * we set the ChildrenAsleep bit once the inteface has reached the
274 * Setting the ProcessorSleep to 0 reverses the quiescing, and
275 * ChildrenAsleep is cleared once the transition is complete.
276 * Since our interface is not asynchronous, we complete these
277 * transitions instantaneously, so we set ChildrenAsleep to the
278 * same value as ProcessorSleep here.
280 value
&= GICR_WAKER_ProcessorSleep
;
281 if (value
& GICR_WAKER_ProcessorSleep
) {
282 value
|= GICR_WAKER_ChildrenAsleep
;
284 cs
->gicr_waker
= value
;
287 cs
->gicr_propbaser
= deposit64(cs
->gicr_propbaser
, 0, 32, value
);
289 case GICR_PROPBASER
+ 4:
290 cs
->gicr_propbaser
= deposit64(cs
->gicr_propbaser
, 32, 32, value
);
293 cs
->gicr_pendbaser
= deposit64(cs
->gicr_pendbaser
, 0, 32, value
);
295 case GICR_PENDBASER
+ 4:
296 cs
->gicr_pendbaser
= deposit64(cs
->gicr_pendbaser
, 32, 32, value
);
299 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
302 cs
->gicr_igroupr0
= value
;
303 gicv3_redist_update(cs
);
305 case GICR_ISENABLER0
:
306 gicr_write_set_bitmap_reg(cs
, attrs
, &cs
->gicr_ienabler0
, value
);
308 case GICR_ICENABLER0
:
309 gicr_write_clear_bitmap_reg(cs
, attrs
, &cs
->gicr_ienabler0
, value
);
312 gicr_write_set_bitmap_reg(cs
, attrs
, &cs
->gicr_ipendr0
, value
);
315 gicr_write_clear_bitmap_reg(cs
, attrs
, &cs
->gicr_ipendr0
, value
);
317 case GICR_ISACTIVER0
:
318 gicr_write_set_bitmap_reg(cs
, attrs
, &cs
->gicr_iactiver0
, value
);
320 case GICR_ICACTIVER0
:
321 gicr_write_clear_bitmap_reg(cs
, attrs
, &cs
->gicr_iactiver0
, value
);
323 case GICR_IPRIORITYR
... GICR_IPRIORITYR
+ 0x1f:
325 int i
, irq
= offset
- GICR_IPRIORITYR
;
327 for (i
= irq
; i
< irq
+ 4; i
++, value
>>= 8) {
328 gicr_write_ipriorityr(cs
, attrs
, i
, value
);
330 gicv3_redist_update(cs
);
334 /* Register is all RAZ/WI or RAO/WI bits */
340 /* Since our edge_trigger bitmap is one bit per irq, our input
341 * 32-bits will compress down into 16 bits which we need
342 * to write into the bitmap.
344 value
= half_unshuffle32(value
>> 1) << 16;
345 mask
= mask_group(cs
, attrs
) & 0xffff0000U
;
347 cs
->edge_trigger
&= ~mask
;
348 cs
->edge_trigger
|= (value
& mask
);
350 gicv3_redist_update(cs
);
354 if ((cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) || !attrs
.secure
) {
355 /* RAZ/WI if security disabled, or if
356 * security enabled and this is an NS access
360 cs
->gicr_igrpmodr0
= value
;
361 gicv3_redist_update(cs
);
364 if ((cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) || !attrs
.secure
) {
365 /* RAZ/WI if security disabled, or if
366 * security enabled and this is an NS access
370 cs
->gicr_nsacr
= value
;
371 /* no update required as this only affects access permission checks */
375 case GICR_IDREGS
... GICR_IDREGS
+ 0x2f:
376 /* RO registers, ignore the write */
377 qemu_log_mask(LOG_GUEST_ERROR
,
378 "%s: invalid guest write to RO register at offset "
379 TARGET_FMT_plx
"\n", __func__
, offset
);
386 static MemTxResult
gicr_readll(GICv3CPUState
*cs
, hwaddr offset
,
387 uint64_t *data
, MemTxAttrs attrs
)
391 *data
= cs
->gicr_typer
;
394 *data
= cs
->gicr_propbaser
;
397 *data
= cs
->gicr_pendbaser
;
404 static MemTxResult
gicr_writell(GICv3CPUState
*cs
, hwaddr offset
,
405 uint64_t value
, MemTxAttrs attrs
)
409 cs
->gicr_propbaser
= value
;
412 cs
->gicr_pendbaser
= value
;
415 /* RO register, ignore the write */
416 qemu_log_mask(LOG_GUEST_ERROR
,
417 "%s: invalid guest write to RO register at offset "
418 TARGET_FMT_plx
"\n", __func__
, offset
);
425 MemTxResult
gicv3_redist_read(void *opaque
, hwaddr offset
, uint64_t *data
,
426 unsigned size
, MemTxAttrs attrs
)
428 GICv3State
*s
= opaque
;
433 assert((offset
& (size
- 1)) == 0);
435 /* This region covers all the redistributor pages; there are
436 * (for GICv3) two 64K pages per CPU. At the moment they are
437 * all contiguous (ie in this one region), though we might later
438 * want to allow splitting of redistributor pages into several
439 * blocks so we can support more CPUs.
441 cpuidx
= offset
/ 0x20000;
443 assert(cpuidx
< s
->num_cpu
);
445 cs
= &s
->cpu
[cpuidx
];
449 r
= gicr_readb(cs
, offset
, data
, attrs
);
452 r
= gicr_readl(cs
, offset
, data
, attrs
);
455 r
= gicr_readll(cs
, offset
, data
, attrs
);
462 if (r
== MEMTX_ERROR
) {
463 qemu_log_mask(LOG_GUEST_ERROR
,
464 "%s: invalid guest read at offset " TARGET_FMT_plx
465 " size %u\n", __func__
, offset
, size
);
466 trace_gicv3_redist_badread(gicv3_redist_affid(cs
), offset
,
468 /* The spec requires that reserved registers are RAZ/WI;
469 * so use MEMTX_ERROR returns from leaf functions as a way to
470 * trigger the guest-error logging but don't return it to
471 * the caller, or we'll cause a spurious guest data abort.
476 trace_gicv3_redist_read(gicv3_redist_affid(cs
), offset
, *data
,
482 MemTxResult
gicv3_redist_write(void *opaque
, hwaddr offset
, uint64_t data
,
483 unsigned size
, MemTxAttrs attrs
)
485 GICv3State
*s
= opaque
;
490 assert((offset
& (size
- 1)) == 0);
492 /* This region covers all the redistributor pages; there are
493 * (for GICv3) two 64K pages per CPU. At the moment they are
494 * all contiguous (ie in this one region), though we might later
495 * want to allow splitting of redistributor pages into several
496 * blocks so we can support more CPUs.
498 cpuidx
= offset
/ 0x20000;
500 assert(cpuidx
< s
->num_cpu
);
502 cs
= &s
->cpu
[cpuidx
];
506 r
= gicr_writeb(cs
, offset
, data
, attrs
);
509 r
= gicr_writel(cs
, offset
, data
, attrs
);
512 r
= gicr_writell(cs
, offset
, data
, attrs
);
519 if (r
== MEMTX_ERROR
) {
520 qemu_log_mask(LOG_GUEST_ERROR
,
521 "%s: invalid guest write at offset " TARGET_FMT_plx
522 " size %u\n", __func__
, offset
, size
);
523 trace_gicv3_redist_badwrite(gicv3_redist_affid(cs
), offset
, data
,
525 /* The spec requires that reserved registers are RAZ/WI;
526 * so use MEMTX_ERROR returns from leaf functions as a way to
527 * trigger the guest-error logging but don't return it to
528 * the caller, or we'll cause a spurious guest data abort.
532 trace_gicv3_redist_write(gicv3_redist_affid(cs
), offset
, data
,
538 static void gicv3_redist_check_lpi_priority(GICv3CPUState
*cs
, int irq
)
540 AddressSpace
*as
= &cs
->gic
->dma_as
;
541 uint64_t lpict_baddr
;
545 lpict_baddr
= cs
->gicr_propbaser
& R_GICR_PROPBASER_PHYADDR_MASK
;
547 address_space_read(as
, lpict_baddr
+ ((irq
- GICV3_LPI_INTID_START
) *
548 sizeof(lpite
)), MEMTXATTRS_UNSPECIFIED
, &lpite
,
551 if (!(lpite
& LPI_CTE_ENABLED
)) {
555 if (cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) {
556 prio
= lpite
& LPI_PRIORITY_MASK
;
558 prio
= ((lpite
& LPI_PRIORITY_MASK
) >> 1) | 0x80;
561 if ((prio
< cs
->hpplpi
.prio
) ||
562 ((prio
== cs
->hpplpi
.prio
) && (irq
<= cs
->hpplpi
.irq
))) {
563 cs
->hpplpi
.irq
= irq
;
564 cs
->hpplpi
.prio
= prio
;
565 /* LPIs are always non-secure Grp1 interrupts */
566 cs
->hpplpi
.grp
= GICV3_G1NS
;
570 void gicv3_redist_update_lpi(GICv3CPUState
*cs
)
573 * This function scans the LPI pending table and for each pending
574 * LPI, reads the corresponding entry from LPI configuration table
575 * to extract the priority info and determine if the current LPI
576 * priority is lower than the last computed high priority lpi interrupt.
577 * If yes, replace current LPI as the new high priority lpi interrupt.
579 AddressSpace
*as
= &cs
->gic
->dma_as
;
580 uint64_t lpipt_baddr
;
581 uint32_t pendt_size
= 0;
586 idbits
= MIN(FIELD_EX64(cs
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
589 if (!(cs
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
) || !cs
->gicr_propbaser
||
590 !cs
->gicr_pendbaser
) {
594 cs
->hpplpi
.prio
= 0xff;
596 lpipt_baddr
= cs
->gicr_pendbaser
& R_GICR_PENDBASER_PHYADDR_MASK
;
598 /* Determine the highest priority pending interrupt among LPIs */
599 pendt_size
= (1ULL << (idbits
+ 1));
601 for (i
= GICV3_LPI_INTID_START
/ 8; i
< pendt_size
/ 8; i
++) {
602 address_space_read(as
, lpipt_baddr
+ i
, MEMTXATTRS_UNSPECIFIED
, &pend
,
607 gicv3_redist_check_lpi_priority(cs
, i
* 8 + bit
);
613 void gicv3_redist_lpi_pending(GICv3CPUState
*cs
, int irq
, int level
)
616 * This function updates the pending bit in lpi pending table for
617 * the irq being activated or deactivated.
619 AddressSpace
*as
= &cs
->gic
->dma_as
;
620 uint64_t lpipt_baddr
;
625 * get the bit value corresponding to this irq in the
628 lpipt_baddr
= cs
->gicr_pendbaser
& R_GICR_PENDBASER_PHYADDR_MASK
;
630 address_space_read(as
, lpipt_baddr
+ ((irq
/ 8) * sizeof(pend
)),
631 MEMTXATTRS_UNSPECIFIED
, &pend
, sizeof(pend
));
633 ispend
= extract32(pend
, irq
% 8, 1);
635 /* no change in the value of pending bit, return */
636 if (ispend
== level
) {
639 pend
= deposit32(pend
, irq
% 8, 1, level
? 1 : 0);
641 address_space_write(as
, lpipt_baddr
+ ((irq
/ 8) * sizeof(pend
)),
642 MEMTXATTRS_UNSPECIFIED
, &pend
, sizeof(pend
));
645 * check if this LPI is better than the current hpplpi, if yes
646 * just set hpplpi.prio and .irq without doing a full rescan
649 gicv3_redist_check_lpi_priority(cs
, irq
);
651 if (irq
== cs
->hpplpi
.irq
) {
652 gicv3_redist_update_lpi(cs
);
657 void gicv3_redist_process_lpi(GICv3CPUState
*cs
, int irq
, int level
)
661 idbits
= MIN(FIELD_EX64(cs
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
664 if (!(cs
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
) || !cs
->gicr_propbaser
||
665 !cs
->gicr_pendbaser
|| (irq
> (1ULL << (idbits
+ 1)) - 1) ||
666 irq
< GICV3_LPI_INTID_START
) {
670 /* set/clear the pending bit for this irq */
671 gicv3_redist_lpi_pending(cs
, irq
, level
);
673 gicv3_redist_update(cs
);
676 void gicv3_redist_set_irq(GICv3CPUState
*cs
, int irq
, int level
)
678 /* Update redistributor state for a change in an external PPI input line */
679 if (level
== extract32(cs
->level
, irq
, 1)) {
683 trace_gicv3_redist_set_irq(gicv3_redist_affid(cs
), irq
, level
);
685 cs
->level
= deposit32(cs
->level
, irq
, 1, level
);
688 /* 0->1 edges latch the pending bit for edge-triggered interrupts */
689 if (extract32(cs
->edge_trigger
, irq
, 1)) {
690 cs
->gicr_ipendr0
= deposit32(cs
->gicr_ipendr0
, irq
, 1, 1);
694 gicv3_redist_update(cs
);
697 void gicv3_redist_send_sgi(GICv3CPUState
*cs
, int grp
, int irq
, bool ns
)
699 /* Update redistributor state for a generated SGI */
700 int irqgrp
= gicv3_irq_group(cs
->gic
, cs
, irq
);
702 /* If we are asked for a Secure Group 1 SGI and it's actually
703 * configured as Secure Group 0 this is OK (subject to the usual
706 if (grp
== GICV3_G1
&& irqgrp
== GICV3_G0
) {
714 if (ns
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
715 /* If security is enabled we must test the NSACR bits */
716 int nsaccess
= gicr_ns_access(cs
, irq
);
718 if ((irqgrp
== GICV3_G0
&& nsaccess
< 1) ||
719 (irqgrp
== GICV3_G1
&& nsaccess
< 2)) {
724 /* OK, we can accept the SGI */
725 trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs
), irq
);
726 cs
->gicr_ipendr0
= deposit32(cs
->gicr_ipendr0
, irq
, 1, 1);
727 gicv3_redist_update(cs
);