2 * ARM GICv3 emulation: Redistributor
4 * Copyright (c) 2015 Huawei.
5 * Copyright (c) 2016 Linaro Limited.
6 * Written by Shlomo Pongratz, Peter Maydell
8 * This code is licensed under the GPL, version 2 or (at your option)
12 #include "qemu/osdep.h"
15 #include "gicv3_internal.h"
17 static uint32_t mask_group(GICv3CPUState
*cs
, MemTxAttrs attrs
)
19 /* Return a 32-bit mask which should be applied for this set of 32
20 * interrupts; each bit is 1 if access is permitted by the
21 * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
22 * not affect config register accesses, unlike GICD_NSACR.)
24 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
25 /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
26 return cs
->gicr_igroupr0
;
31 static int gicr_ns_access(GICv3CPUState
*cs
, int irq
)
33 /* Return the 2 bit NSACR.NS_access field for this SGI */
35 return extract32(cs
->gicr_nsacr
, irq
* 2, 2);
38 static void gicr_write_bitmap_reg(GICv3CPUState
*cs
, MemTxAttrs attrs
,
39 uint32_t *reg
, uint32_t val
)
41 /* Helper routine to implement writing to a "set" register */
42 val
&= mask_group(cs
, attrs
);
44 gicv3_redist_update(cs
);
47 static void gicr_write_set_bitmap_reg(GICv3CPUState
*cs
, MemTxAttrs attrs
,
48 uint32_t *reg
, uint32_t val
)
50 /* Helper routine to implement writing to a "set-bitmap" register */
51 val
&= mask_group(cs
, attrs
);
53 gicv3_redist_update(cs
);
56 static void gicr_write_clear_bitmap_reg(GICv3CPUState
*cs
, MemTxAttrs attrs
,
57 uint32_t *reg
, uint32_t val
)
59 /* Helper routine to implement writing to a "clear-bitmap" register */
60 val
&= mask_group(cs
, attrs
);
62 gicv3_redist_update(cs
);
65 static uint32_t gicr_read_bitmap_reg(GICv3CPUState
*cs
, MemTxAttrs attrs
,
68 reg
&= mask_group(cs
, attrs
);
72 static bool vcpu_resident(GICv3CPUState
*cs
, uint64_t vptaddr
)
75 * Return true if a vCPU is resident, which is defined by
76 * whether the GICR_VPENDBASER register is marked VALID and
77 * has the right virtual pending table address.
79 if (!FIELD_EX64(cs
->gicr_vpendbaser
, GICR_VPENDBASER
, VALID
)) {
82 return vptaddr
== (cs
->gicr_vpendbaser
& R_GICR_VPENDBASER_PHYADDR_MASK
);
86 * update_for_one_lpi: Update pending information if this LPI is better
89 * @irq: interrupt to look up in the LPI Configuration table
90 * @ctbase: physical address of the LPI Configuration table to use
91 * @ds: true if priority value should not be shifted
92 * @hpp: points to pending information to update
94 * Look up @irq in the Configuration table specified by @ctbase
95 * to see if it is enabled and what its priority is. If it is an
96 * enabled interrupt with a higher priority than that currently
97 * recorded in @hpp, update @hpp.
99 static void update_for_one_lpi(GICv3CPUState
*cs
, int irq
,
100 uint64_t ctbase
, bool ds
, PendingIrq
*hpp
)
105 address_space_read(&cs
->gic
->dma_as
,
106 ctbase
+ ((irq
- GICV3_LPI_INTID_START
) * sizeof(lpite
)),
107 MEMTXATTRS_UNSPECIFIED
, &lpite
, sizeof(lpite
));
109 if (!(lpite
& LPI_CTE_ENABLED
)) {
114 prio
= lpite
& LPI_PRIORITY_MASK
;
116 prio
= ((lpite
& LPI_PRIORITY_MASK
) >> 1) | 0x80;
119 if ((prio
< hpp
->prio
) ||
120 ((prio
== hpp
->prio
) && (irq
<= hpp
->irq
))) {
124 /* LPIs and vLPIs are always non-secure Grp1 interrupts */
125 hpp
->grp
= GICV3_G1NS
;
130 * update_for_all_lpis: Fully scan LPI tables and find best pending LPI
133 * @ptbase: physical address of LPI Pending table
134 * @ctbase: physical address of LPI Configuration table
135 * @ptsizebits: size of tables, specified as number of interrupt ID bits minus 1
136 * @ds: true if priority value should not be shifted
137 * @hpp: points to pending information to set
139 * Recalculate the highest priority pending enabled LPI from scratch,
140 * and set @hpp accordingly.
142 * We scan the LPI pending table @ptbase; for each pending LPI, we read the
143 * corresponding entry in the LPI configuration table @ctbase to extract
144 * the priority and enabled information.
146 * We take @ptsizebits in the form idbits-1 because this is the way that
147 * LPI table sizes are architecturally specified in GICR_PROPBASER.IDBits
148 * and in the VMAPP command's VPT_size field.
150 static void update_for_all_lpis(GICv3CPUState
*cs
, uint64_t ptbase
,
151 uint64_t ctbase
, unsigned ptsizebits
,
152 bool ds
, PendingIrq
*hpp
)
154 AddressSpace
*as
= &cs
->gic
->dma_as
;
156 uint32_t pendt_size
= (1ULL << (ptsizebits
+ 1));
162 for (i
= GICV3_LPI_INTID_START
/ 8; i
< pendt_size
/ 8; i
++) {
163 address_space_read(as
, ptbase
+ i
, MEMTXATTRS_UNSPECIFIED
, &pend
, 1);
166 update_for_one_lpi(cs
, i
* 8 + bit
, ctbase
, ds
, hpp
);
173 * set_lpi_pending_bit: Set or clear pending bit for an LPI
176 * @ptbase: physical address of LPI Pending table
177 * @irq: LPI to change pending state for
178 * @level: false to clear pending state, true to set
180 * Returns true if we needed to do something, false if the pending bit
181 * was already at @level.
183 static bool set_pending_table_bit(GICv3CPUState
*cs
, uint64_t ptbase
,
186 AddressSpace
*as
= &cs
->gic
->dma_as
;
187 uint64_t addr
= ptbase
+ irq
/ 8;
190 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
, &pend
, 1);
191 if (extract32(pend
, irq
% 8, 1) == level
) {
192 /* Bit already at requested state, no action required */
195 pend
= deposit32(pend
, irq
% 8, 1, level
? 1 : 0);
196 address_space_write(as
, addr
, MEMTXATTRS_UNSPECIFIED
, &pend
, 1);
200 static uint8_t gicr_read_ipriorityr(GICv3CPUState
*cs
, MemTxAttrs attrs
,
203 /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
204 * honouring security state (these are RAZ/WI for Group 0 or Secure
205 * Group 1 interrupts).
209 prio
= cs
->gicr_ipriorityr
[irq
];
211 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
212 if (!(cs
->gicr_igroupr0
& (1U << irq
))) {
213 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
216 /* NS view of the interrupt priority */
217 prio
= (prio
<< 1) & 0xff;
222 static void gicr_write_ipriorityr(GICv3CPUState
*cs
, MemTxAttrs attrs
, int irq
,
225 /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
226 * honouring security state (these are RAZ/WI for Group 0 or Secure
227 * Group 1 interrupts).
229 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
230 if (!(cs
->gicr_igroupr0
& (1U << irq
))) {
231 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
234 /* NS view of the interrupt priority */
235 value
= 0x80 | (value
>> 1);
237 cs
->gicr_ipriorityr
[irq
] = value
;
240 static void gicv3_redist_update_vlpi_only(GICv3CPUState
*cs
)
242 uint64_t ptbase
, ctbase
, idbits
;
244 if (!FIELD_EX64(cs
->gicr_vpendbaser
, GICR_VPENDBASER
, VALID
)) {
245 cs
->hppvlpi
.prio
= 0xff;
246 cs
->hppvlpi
.nmi
= false;
250 ptbase
= cs
->gicr_vpendbaser
& R_GICR_VPENDBASER_PHYADDR_MASK
;
251 ctbase
= cs
->gicr_vpropbaser
& R_GICR_VPROPBASER_PHYADDR_MASK
;
252 idbits
= FIELD_EX64(cs
->gicr_vpropbaser
, GICR_VPROPBASER
, IDBITS
);
254 update_for_all_lpis(cs
, ptbase
, ctbase
, idbits
, true, &cs
->hppvlpi
);
257 static void gicv3_redist_update_vlpi(GICv3CPUState
*cs
)
259 gicv3_redist_update_vlpi_only(cs
);
260 gicv3_cpuif_virt_irq_fiq_update(cs
);
263 static void gicr_write_vpendbaser(GICv3CPUState
*cs
, uint64_t newval
)
265 /* Write @newval to GICR_VPENDBASER, handling its effects */
266 bool oldvalid
= FIELD_EX64(cs
->gicr_vpendbaser
, GICR_VPENDBASER
, VALID
);
267 bool newvalid
= FIELD_EX64(newval
, GICR_VPENDBASER
, VALID
);
271 * The DIRTY bit is read-only and for us is always zero;
272 * other fields are writable.
274 newval
&= R_GICR_VPENDBASER_INNERCACHE_MASK
|
275 R_GICR_VPENDBASER_SHAREABILITY_MASK
|
276 R_GICR_VPENDBASER_PHYADDR_MASK
|
277 R_GICR_VPENDBASER_OUTERCACHE_MASK
|
278 R_GICR_VPENDBASER_PENDINGLAST_MASK
|
279 R_GICR_VPENDBASER_IDAI_MASK
|
280 R_GICR_VPENDBASER_VALID_MASK
;
282 if (oldvalid
&& newvalid
) {
284 * Changing other fields while VALID is 1 is UNPREDICTABLE;
285 * we choose to log and ignore the write.
287 if (cs
->gicr_vpendbaser
^ newval
) {
288 qemu_log_mask(LOG_GUEST_ERROR
,
289 "%s: Changing GICR_VPENDBASER when VALID=1 "
290 "is UNPREDICTABLE\n", __func__
);
294 if (!oldvalid
&& !newvalid
) {
295 cs
->gicr_vpendbaser
= newval
;
301 * Valid going from 0 to 1: update hppvlpi from tables.
302 * If IDAI is 0 we are allowed to use the info we cached in
303 * the IMPDEF area of the table.
304 * PendingLast is RES1 when we make this transition.
309 * Valid going from 1 to 0:
310 * Set PendingLast if there was a pending enabled interrupt
311 * for the vPE that was just descheduled.
312 * If we cache info in the IMPDEF area, write it out here.
314 pendinglast
= cs
->hppvlpi
.prio
!= 0xff;
317 newval
= FIELD_DP64(newval
, GICR_VPENDBASER
, PENDINGLAST
, pendinglast
);
318 cs
->gicr_vpendbaser
= newval
;
319 gicv3_redist_update_vlpi(cs
);
322 static MemTxResult
gicr_readb(GICv3CPUState
*cs
, hwaddr offset
,
323 uint64_t *data
, MemTxAttrs attrs
)
326 case GICR_IPRIORITYR
... GICR_IPRIORITYR
+ 0x1f:
327 *data
= gicr_read_ipriorityr(cs
, attrs
, offset
- GICR_IPRIORITYR
);
334 static MemTxResult
gicr_writeb(GICv3CPUState
*cs
, hwaddr offset
,
335 uint64_t value
, MemTxAttrs attrs
)
338 case GICR_IPRIORITYR
... GICR_IPRIORITYR
+ 0x1f:
339 gicr_write_ipriorityr(cs
, attrs
, offset
- GICR_IPRIORITYR
, value
);
340 gicv3_redist_update(cs
);
347 static MemTxResult
gicr_readl(GICv3CPUState
*cs
, hwaddr offset
,
348 uint64_t *data
, MemTxAttrs attrs
)
352 *data
= cs
->gicr_ctlr
;
355 *data
= gicv3_iidr();
358 *data
= extract64(cs
->gicr_typer
, 0, 32);
361 *data
= extract64(cs
->gicr_typer
, 32, 32);
364 /* RAZ/WI for us (this is an optional register and our implementation
365 * does not track RO/WO/reserved violations to report them to the guest)
370 *data
= cs
->gicr_waker
;
373 *data
= extract64(cs
->gicr_propbaser
, 0, 32);
375 case GICR_PROPBASER
+ 4:
376 *data
= extract64(cs
->gicr_propbaser
, 32, 32);
379 *data
= extract64(cs
->gicr_pendbaser
, 0, 32);
381 case GICR_PENDBASER
+ 4:
382 *data
= extract64(cs
->gicr_pendbaser
, 32, 32);
385 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
389 *data
= cs
->gicr_igroupr0
;
391 case GICR_ISENABLER0
:
392 case GICR_ICENABLER0
:
393 *data
= gicr_read_bitmap_reg(cs
, attrs
, cs
->gicr_ienabler0
);
398 /* The pending register reads as the logical OR of the pending
399 * latch and the input line level for level-triggered interrupts.
401 uint32_t val
= cs
->gicr_ipendr0
| (~cs
->edge_trigger
& cs
->level
);
402 *data
= gicr_read_bitmap_reg(cs
, attrs
, val
);
405 case GICR_ISACTIVER0
:
406 case GICR_ICACTIVER0
:
407 *data
= gicr_read_bitmap_reg(cs
, attrs
, cs
->gicr_iactiver0
);
409 case GICR_IPRIORITYR
... GICR_IPRIORITYR
+ 0x1f:
411 int i
, irq
= offset
- GICR_IPRIORITYR
;
414 for (i
= irq
+ 3; i
>= irq
; i
--) {
416 value
|= gicr_read_ipriorityr(cs
, attrs
, i
);
422 *data
= cs
->gic
->nmi_support
?
423 gicr_read_bitmap_reg(cs
, attrs
, cs
->gicr_inmir0
) : 0;
428 /* Our edge_trigger bitmap is one bit per irq; take the correct
429 * half of it, and spread it out into the odd bits.
433 value
= cs
->edge_trigger
& mask_group(cs
, attrs
);
434 value
= extract32(value
, (offset
== GICR_ICFGR1
) ? 16 : 0, 16);
435 value
= half_shuffle32(value
) << 1;
440 if ((cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) || !attrs
.secure
) {
441 /* RAZ/WI if security disabled, or if
442 * security enabled and this is an NS access
447 *data
= cs
->gicr_igrpmodr0
;
450 if ((cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) || !attrs
.secure
) {
451 /* RAZ/WI if security disabled, or if
452 * security enabled and this is an NS access
457 *data
= cs
->gicr_nsacr
;
459 case GICR_IDREGS
... GICR_IDREGS
+ 0x2f:
460 *data
= gicv3_idreg(cs
->gic
, offset
- GICR_IDREGS
, GICV3_PIDR0_REDIST
);
463 * VLPI frame registers. We don't need a version check for
464 * VPROPBASER and VPENDBASER because gicv3_redist_size() will
465 * prevent pre-v4 GIC from passing us offsets this high.
467 case GICR_VPROPBASER
:
468 *data
= extract64(cs
->gicr_vpropbaser
, 0, 32);
470 case GICR_VPROPBASER
+ 4:
471 *data
= extract64(cs
->gicr_vpropbaser
, 32, 32);
473 case GICR_VPENDBASER
:
474 *data
= extract64(cs
->gicr_vpendbaser
, 0, 32);
476 case GICR_VPENDBASER
+ 4:
477 *data
= extract64(cs
->gicr_vpendbaser
, 32, 32);
484 static MemTxResult
gicr_writel(GICv3CPUState
*cs
, hwaddr offset
,
485 uint64_t value
, MemTxAttrs attrs
)
489 /* For our implementation, GICR_TYPER.DPGS is 0 and so all
490 * the DPG bits are RAZ/WI. We don't do anything asynchronously,
491 * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we
492 * implement LPIs) so Enable_LPIs is programmable.
494 if (cs
->gicr_typer
& GICR_TYPER_PLPIS
) {
495 if (value
& GICR_CTLR_ENABLE_LPIS
) {
496 cs
->gicr_ctlr
|= GICR_CTLR_ENABLE_LPIS
;
497 /* Check for any pending interr in pending table */
498 gicv3_redist_update_lpi(cs
);
500 cs
->gicr_ctlr
&= ~GICR_CTLR_ENABLE_LPIS
;
501 /* cs->hppi might have been an LPI; recalculate */
502 gicv3_redist_update(cs
);
507 /* RAZ/WI for our implementation */
510 /* Only the ProcessorSleep bit is writable. When the guest sets
511 * it, it requests that we transition the channel between the
512 * redistributor and the cpu interface to quiescent, and that
513 * we set the ChildrenAsleep bit once the interface has reached the
515 * Setting the ProcessorSleep to 0 reverses the quiescing, and
516 * ChildrenAsleep is cleared once the transition is complete.
517 * Since our interface is not asynchronous, we complete these
518 * transitions instantaneously, so we set ChildrenAsleep to the
519 * same value as ProcessorSleep here.
521 value
&= GICR_WAKER_ProcessorSleep
;
522 if (value
& GICR_WAKER_ProcessorSleep
) {
523 value
|= GICR_WAKER_ChildrenAsleep
;
525 cs
->gicr_waker
= value
;
528 cs
->gicr_propbaser
= deposit64(cs
->gicr_propbaser
, 0, 32, value
);
530 case GICR_PROPBASER
+ 4:
531 cs
->gicr_propbaser
= deposit64(cs
->gicr_propbaser
, 32, 32, value
);
534 cs
->gicr_pendbaser
= deposit64(cs
->gicr_pendbaser
, 0, 32, value
);
536 case GICR_PENDBASER
+ 4:
537 cs
->gicr_pendbaser
= deposit64(cs
->gicr_pendbaser
, 32, 32, value
);
540 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
543 cs
->gicr_igroupr0
= value
;
544 gicv3_redist_update(cs
);
546 case GICR_ISENABLER0
:
547 gicr_write_set_bitmap_reg(cs
, attrs
, &cs
->gicr_ienabler0
, value
);
549 case GICR_ICENABLER0
:
550 gicr_write_clear_bitmap_reg(cs
, attrs
, &cs
->gicr_ienabler0
, value
);
553 gicr_write_set_bitmap_reg(cs
, attrs
, &cs
->gicr_ipendr0
, value
);
556 gicr_write_clear_bitmap_reg(cs
, attrs
, &cs
->gicr_ipendr0
, value
);
558 case GICR_ISACTIVER0
:
559 gicr_write_set_bitmap_reg(cs
, attrs
, &cs
->gicr_iactiver0
, value
);
561 case GICR_ICACTIVER0
:
562 gicr_write_clear_bitmap_reg(cs
, attrs
, &cs
->gicr_iactiver0
, value
);
564 case GICR_IPRIORITYR
... GICR_IPRIORITYR
+ 0x1f:
566 int i
, irq
= offset
- GICR_IPRIORITYR
;
568 for (i
= irq
; i
< irq
+ 4; i
++, value
>>= 8) {
569 gicr_write_ipriorityr(cs
, attrs
, i
, value
);
571 gicv3_redist_update(cs
);
575 if (cs
->gic
->nmi_support
) {
576 gicr_write_bitmap_reg(cs
, attrs
, &cs
->gicr_inmir0
, value
);
581 /* Register is all RAZ/WI or RAO/WI bits */
587 /* Since our edge_trigger bitmap is one bit per irq, our input
588 * 32-bits will compress down into 16 bits which we need
589 * to write into the bitmap.
591 value
= half_unshuffle32(value
>> 1) << 16;
592 mask
= mask_group(cs
, attrs
) & 0xffff0000U
;
594 cs
->edge_trigger
&= ~mask
;
595 cs
->edge_trigger
|= (value
& mask
);
597 gicv3_redist_update(cs
);
601 if ((cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) || !attrs
.secure
) {
602 /* RAZ/WI if security disabled, or if
603 * security enabled and this is an NS access
607 cs
->gicr_igrpmodr0
= value
;
608 gicv3_redist_update(cs
);
611 if ((cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) || !attrs
.secure
) {
612 /* RAZ/WI if security disabled, or if
613 * security enabled and this is an NS access
617 cs
->gicr_nsacr
= value
;
618 /* no update required as this only affects access permission checks */
622 case GICR_IDREGS
... GICR_IDREGS
+ 0x2f:
623 /* RO registers, ignore the write */
624 qemu_log_mask(LOG_GUEST_ERROR
,
625 "%s: invalid guest write to RO register at offset "
626 HWADDR_FMT_plx
"\n", __func__
, offset
);
629 * VLPI frame registers. We don't need a version check for
630 * VPROPBASER and VPENDBASER because gicv3_redist_size() will
631 * prevent pre-v4 GIC from passing us offsets this high.
633 case GICR_VPROPBASER
:
634 cs
->gicr_vpropbaser
= deposit64(cs
->gicr_vpropbaser
, 0, 32, value
);
636 case GICR_VPROPBASER
+ 4:
637 cs
->gicr_vpropbaser
= deposit64(cs
->gicr_vpropbaser
, 32, 32, value
);
639 case GICR_VPENDBASER
:
640 gicr_write_vpendbaser(cs
, deposit64(cs
->gicr_vpendbaser
, 0, 32, value
));
642 case GICR_VPENDBASER
+ 4:
643 gicr_write_vpendbaser(cs
, deposit64(cs
->gicr_vpendbaser
, 32, 32, value
));
650 static MemTxResult
gicr_readll(GICv3CPUState
*cs
, hwaddr offset
,
651 uint64_t *data
, MemTxAttrs attrs
)
655 *data
= cs
->gicr_typer
;
658 *data
= cs
->gicr_propbaser
;
661 *data
= cs
->gicr_pendbaser
;
664 * VLPI frame registers. We don't need a version check for
665 * VPROPBASER and VPENDBASER because gicv3_redist_size() will
666 * prevent pre-v4 GIC from passing us offsets this high.
668 case GICR_VPROPBASER
:
669 *data
= cs
->gicr_vpropbaser
;
671 case GICR_VPENDBASER
:
672 *data
= cs
->gicr_vpendbaser
;
679 static MemTxResult
gicr_writell(GICv3CPUState
*cs
, hwaddr offset
,
680 uint64_t value
, MemTxAttrs attrs
)
684 cs
->gicr_propbaser
= value
;
687 cs
->gicr_pendbaser
= value
;
690 /* RO register, ignore the write */
691 qemu_log_mask(LOG_GUEST_ERROR
,
692 "%s: invalid guest write to RO register at offset "
693 HWADDR_FMT_plx
"\n", __func__
, offset
);
696 * VLPI frame registers. We don't need a version check for
697 * VPROPBASER and VPENDBASER because gicv3_redist_size() will
698 * prevent pre-v4 GIC from passing us offsets this high.
700 case GICR_VPROPBASER
:
701 cs
->gicr_vpropbaser
= value
;
703 case GICR_VPENDBASER
:
704 gicr_write_vpendbaser(cs
, value
);
711 MemTxResult
gicv3_redist_read(void *opaque
, hwaddr offset
, uint64_t *data
,
712 unsigned size
, MemTxAttrs attrs
)
714 GICv3RedistRegion
*region
= opaque
;
715 GICv3State
*s
= region
->gic
;
720 assert((offset
& (size
- 1)) == 0);
723 * There are (for GICv3) two 64K redistributor pages per CPU.
724 * In some cases the redistributor pages for all CPUs are not
725 * contiguous (eg on the virt board they are split into two
726 * parts if there are too many CPUs to all fit in the same place
727 * in the memory map); if so then the GIC has multiple MemoryRegions
728 * for the redistributors.
730 cpuidx
= region
->cpuidx
+ offset
/ gicv3_redist_size(s
);
731 offset
%= gicv3_redist_size(s
);
733 cs
= &s
->cpu
[cpuidx
];
737 r
= gicr_readb(cs
, offset
, data
, attrs
);
740 r
= gicr_readl(cs
, offset
, data
, attrs
);
743 r
= gicr_readll(cs
, offset
, data
, attrs
);
751 qemu_log_mask(LOG_GUEST_ERROR
,
752 "%s: invalid guest read at offset " HWADDR_FMT_plx
753 " size %u\n", __func__
, offset
, size
);
754 trace_gicv3_redist_badread(gicv3_redist_affid(cs
), offset
,
756 /* The spec requires that reserved registers are RAZ/WI;
757 * so use MEMTX_ERROR returns from leaf functions as a way to
758 * trigger the guest-error logging but don't return it to
759 * the caller, or we'll cause a spurious guest data abort.
764 trace_gicv3_redist_read(gicv3_redist_affid(cs
), offset
, *data
,
770 MemTxResult
gicv3_redist_write(void *opaque
, hwaddr offset
, uint64_t data
,
771 unsigned size
, MemTxAttrs attrs
)
773 GICv3RedistRegion
*region
= opaque
;
774 GICv3State
*s
= region
->gic
;
779 assert((offset
& (size
- 1)) == 0);
782 * There are (for GICv3) two 64K redistributor pages per CPU.
783 * In some cases the redistributor pages for all CPUs are not
784 * contiguous (eg on the virt board they are split into two
785 * parts if there are too many CPUs to all fit in the same place
786 * in the memory map); if so then the GIC has multiple MemoryRegions
787 * for the redistributors.
789 cpuidx
= region
->cpuidx
+ offset
/ gicv3_redist_size(s
);
790 offset
%= gicv3_redist_size(s
);
792 cs
= &s
->cpu
[cpuidx
];
796 r
= gicr_writeb(cs
, offset
, data
, attrs
);
799 r
= gicr_writel(cs
, offset
, data
, attrs
);
802 r
= gicr_writell(cs
, offset
, data
, attrs
);
810 qemu_log_mask(LOG_GUEST_ERROR
,
811 "%s: invalid guest write at offset " HWADDR_FMT_plx
812 " size %u\n", __func__
, offset
, size
);
813 trace_gicv3_redist_badwrite(gicv3_redist_affid(cs
), offset
, data
,
815 /* The spec requires that reserved registers are RAZ/WI;
816 * so use MEMTX_ERROR returns from leaf functions as a way to
817 * trigger the guest-error logging but don't return it to
818 * the caller, or we'll cause a spurious guest data abort.
822 trace_gicv3_redist_write(gicv3_redist_affid(cs
), offset
, data
,
828 static void gicv3_redist_check_lpi_priority(GICv3CPUState
*cs
, int irq
)
830 uint64_t lpict_baddr
= cs
->gicr_propbaser
& R_GICR_PROPBASER_PHYADDR_MASK
;
832 update_for_one_lpi(cs
, irq
, lpict_baddr
,
833 cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
,
837 void gicv3_redist_update_lpi_only(GICv3CPUState
*cs
)
840 * This function scans the LPI pending table and for each pending
841 * LPI, reads the corresponding entry from LPI configuration table
842 * to extract the priority info and determine if the current LPI
843 * priority is lower than the last computed high priority lpi interrupt.
844 * If yes, replace current LPI as the new high priority lpi interrupt.
846 uint64_t lpipt_baddr
, lpict_baddr
;
849 idbits
= MIN(FIELD_EX64(cs
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
852 if (!(cs
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
)) {
856 lpipt_baddr
= cs
->gicr_pendbaser
& R_GICR_PENDBASER_PHYADDR_MASK
;
857 lpict_baddr
= cs
->gicr_propbaser
& R_GICR_PROPBASER_PHYADDR_MASK
;
859 update_for_all_lpis(cs
, lpipt_baddr
, lpict_baddr
, idbits
,
860 cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
, &cs
->hpplpi
);
863 void gicv3_redist_update_lpi(GICv3CPUState
*cs
)
865 gicv3_redist_update_lpi_only(cs
);
866 gicv3_redist_update(cs
);
869 void gicv3_redist_lpi_pending(GICv3CPUState
*cs
, int irq
, int level
)
872 * This function updates the pending bit in lpi pending table for
873 * the irq being activated or deactivated.
875 uint64_t lpipt_baddr
;
877 lpipt_baddr
= cs
->gicr_pendbaser
& R_GICR_PENDBASER_PHYADDR_MASK
;
878 if (!set_pending_table_bit(cs
, lpipt_baddr
, irq
, level
)) {
879 /* no change in the value of pending bit, return */
884 * check if this LPI is better than the current hpplpi, if yes
885 * just set hpplpi.prio and .irq without doing a full rescan
888 gicv3_redist_check_lpi_priority(cs
, irq
);
889 gicv3_redist_update(cs
);
891 if (irq
== cs
->hpplpi
.irq
) {
892 gicv3_redist_update_lpi(cs
);
897 void gicv3_redist_process_lpi(GICv3CPUState
*cs
, int irq
, int level
)
901 idbits
= MIN(FIELD_EX64(cs
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
904 if (!(cs
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
) ||
905 (irq
> (1ULL << (idbits
+ 1)) - 1) || irq
< GICV3_LPI_INTID_START
) {
909 /* set/clear the pending bit for this irq */
910 gicv3_redist_lpi_pending(cs
, irq
, level
);
913 void gicv3_redist_inv_lpi(GICv3CPUState
*cs
, int irq
)
916 * The only cached information for LPIs we have is the HPPLPI.
917 * We could be cleverer about identifying when we don't need
918 * to do a full rescan of the pending table, but until we find
919 * this is a performance issue, just always recalculate.
921 gicv3_redist_update_lpi(cs
);
924 void gicv3_redist_mov_lpi(GICv3CPUState
*src
, GICv3CPUState
*dest
, int irq
)
927 * Move the specified LPI's pending state from the source redistributor
928 * to the destination.
930 * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
931 * we choose to NOP. If LPIs are disabled on source there's nothing
932 * to be transferred anyway.
938 if (!(src
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
) ||
939 !(dest
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
)) {
943 idbits
= MIN(FIELD_EX64(src
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
945 idbits
= MIN(FIELD_EX64(dest
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
948 pendt_size
= 1ULL << (idbits
+ 1);
949 if ((irq
/ 8) >= pendt_size
) {
953 src_baddr
= src
->gicr_pendbaser
& R_GICR_PENDBASER_PHYADDR_MASK
;
955 if (!set_pending_table_bit(src
, src_baddr
, irq
, 0)) {
956 /* Not pending on source, nothing to do */
959 if (irq
== src
->hpplpi
.irq
) {
961 * We just made this LPI not-pending so only need to update
962 * if it was previously the highest priority pending LPI
964 gicv3_redist_update_lpi(src
);
966 /* Mark it pending on the destination */
967 gicv3_redist_lpi_pending(dest
, irq
, 1);
970 void gicv3_redist_movall_lpis(GICv3CPUState
*src
, GICv3CPUState
*dest
)
973 * We must move all pending LPIs from the source redistributor
974 * to the destination. That is, for every pending LPI X on
975 * src, we must set it not-pending on src and pending on dest.
976 * LPIs that are already pending on dest are not cleared.
978 * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
979 * we choose to NOP. If LPIs are disabled on source there's nothing
980 * to be transferred anyway.
982 AddressSpace
*as
= &src
->gic
->dma_as
;
985 uint64_t src_baddr
, dest_baddr
;
988 if (!(src
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
) ||
989 !(dest
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
)) {
993 idbits
= MIN(FIELD_EX64(src
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
995 idbits
= MIN(FIELD_EX64(dest
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
998 pendt_size
= 1ULL << (idbits
+ 1);
999 src_baddr
= src
->gicr_pendbaser
& R_GICR_PENDBASER_PHYADDR_MASK
;
1000 dest_baddr
= dest
->gicr_pendbaser
& R_GICR_PENDBASER_PHYADDR_MASK
;
1002 for (i
= GICV3_LPI_INTID_START
/ 8; i
< pendt_size
/ 8; i
++) {
1003 uint8_t src_pend
, dest_pend
;
1005 address_space_read(as
, src_baddr
+ i
, MEMTXATTRS_UNSPECIFIED
,
1006 &src_pend
, sizeof(src_pend
));
1010 address_space_read(as
, dest_baddr
+ i
, MEMTXATTRS_UNSPECIFIED
,
1011 &dest_pend
, sizeof(dest_pend
));
1012 dest_pend
|= src_pend
;
1014 address_space_write(as
, src_baddr
+ i
, MEMTXATTRS_UNSPECIFIED
,
1015 &src_pend
, sizeof(src_pend
));
1016 address_space_write(as
, dest_baddr
+ i
, MEMTXATTRS_UNSPECIFIED
,
1017 &dest_pend
, sizeof(dest_pend
));
1020 gicv3_redist_update_lpi(src
);
1021 gicv3_redist_update_lpi(dest
);
1024 void gicv3_redist_vlpi_pending(GICv3CPUState
*cs
, int irq
, int level
)
1027 * Change the pending state of the specified vLPI.
1028 * Unlike gicv3_redist_process_vlpi(), we know here that the
1029 * vCPU is definitely resident on this redistributor, and that
1030 * the irq is in range.
1032 uint64_t vptbase
, ctbase
;
1034 vptbase
= FIELD_EX64(cs
->gicr_vpendbaser
, GICR_VPENDBASER
, PHYADDR
) << 16;
1036 if (set_pending_table_bit(cs
, vptbase
, irq
, level
)) {
1038 /* Check whether this vLPI is now the best */
1039 ctbase
= cs
->gicr_vpropbaser
& R_GICR_VPROPBASER_PHYADDR_MASK
;
1040 update_for_one_lpi(cs
, irq
, ctbase
, true, &cs
->hppvlpi
);
1041 gicv3_cpuif_virt_irq_fiq_update(cs
);
1043 /* Only need to recalculate if this was previously the best vLPI */
1044 if (irq
== cs
->hppvlpi
.irq
) {
1045 gicv3_redist_update_vlpi(cs
);
1051 void gicv3_redist_process_vlpi(GICv3CPUState
*cs
, int irq
, uint64_t vptaddr
,
1052 int doorbell
, int level
)
1055 bool resident
= vcpu_resident(cs
, vptaddr
);
1059 uint32_t idbits
= FIELD_EX64(cs
->gicr_vpropbaser
, GICR_VPROPBASER
, IDBITS
);
1060 if (irq
>= (1ULL << (idbits
+ 1))) {
1065 bit_changed
= set_pending_table_bit(cs
, vptaddr
, irq
, level
);
1066 if (resident
&& bit_changed
) {
1068 /* Check whether this vLPI is now the best */
1069 ctbase
= cs
->gicr_vpropbaser
& R_GICR_VPROPBASER_PHYADDR_MASK
;
1070 update_for_one_lpi(cs
, irq
, ctbase
, true, &cs
->hppvlpi
);
1071 gicv3_cpuif_virt_irq_fiq_update(cs
);
1073 /* Only need to recalculate if this was previously the best vLPI */
1074 if (irq
== cs
->hppvlpi
.irq
) {
1075 gicv3_redist_update_vlpi(cs
);
1080 if (!resident
&& level
&& doorbell
!= INTID_SPURIOUS
&&
1081 (cs
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
)) {
1082 /* vCPU is not currently resident: ring the doorbell */
1083 gicv3_redist_process_lpi(cs
, doorbell
, 1);
1087 void gicv3_redist_mov_vlpi(GICv3CPUState
*src
, uint64_t src_vptaddr
,
1088 GICv3CPUState
*dest
, uint64_t dest_vptaddr
,
1089 int irq
, int doorbell
)
1092 * Move the specified vLPI's pending state from the source redistributor
1093 * to the destination.
1095 if (!set_pending_table_bit(src
, src_vptaddr
, irq
, 0)) {
1096 /* Not pending on source, nothing to do */
1099 if (vcpu_resident(src
, src_vptaddr
) && irq
== src
->hppvlpi
.irq
) {
1101 * Update src's cached highest-priority pending vLPI if we just made
1104 gicv3_redist_update_vlpi(src
);
1107 * Mark the vLPI pending on the destination (ringing the doorbell
1108 * if the vCPU isn't resident)
1110 gicv3_redist_process_vlpi(dest
, irq
, dest_vptaddr
, doorbell
, irq
);
1113 void gicv3_redist_vinvall(GICv3CPUState
*cs
, uint64_t vptaddr
)
1115 if (!vcpu_resident(cs
, vptaddr
)) {
1116 /* We don't have anything cached if the vCPU isn't resident */
1120 /* Otherwise, our only cached information is the HPPVLPI info */
1121 gicv3_redist_update_vlpi(cs
);
1124 void gicv3_redist_inv_vlpi(GICv3CPUState
*cs
, int irq
, uint64_t vptaddr
)
1127 * The only cached information for LPIs we have is the HPPLPI.
1128 * We could be cleverer about identifying when we don't need
1129 * to do a full rescan of the pending table, but until we find
1130 * this is a performance issue, just always recalculate.
1132 gicv3_redist_vinvall(cs
, vptaddr
);
1135 void gicv3_redist_set_irq(GICv3CPUState
*cs
, int irq
, int level
)
1137 /* Update redistributor state for a change in an external PPI input line */
1138 if (level
== extract32(cs
->level
, irq
, 1)) {
1142 trace_gicv3_redist_set_irq(gicv3_redist_affid(cs
), irq
, level
);
1144 cs
->level
= deposit32(cs
->level
, irq
, 1, level
);
1147 /* 0->1 edges latch the pending bit for edge-triggered interrupts */
1148 if (extract32(cs
->edge_trigger
, irq
, 1)) {
1149 cs
->gicr_ipendr0
= deposit32(cs
->gicr_ipendr0
, irq
, 1, 1);
1153 gicv3_redist_update(cs
);
1156 void gicv3_redist_send_sgi(GICv3CPUState
*cs
, int grp
, int irq
, bool ns
)
1158 /* Update redistributor state for a generated SGI */
1159 int irqgrp
= gicv3_irq_group(cs
->gic
, cs
, irq
);
1161 /* If we are asked for a Secure Group 1 SGI and it's actually
1162 * configured as Secure Group 0 this is OK (subject to the usual
1165 if (grp
== GICV3_G1
&& irqgrp
== GICV3_G0
) {
1169 if (grp
!= irqgrp
) {
1173 if (ns
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
1174 /* If security is enabled we must test the NSACR bits */
1175 int nsaccess
= gicr_ns_access(cs
, irq
);
1177 if ((irqgrp
== GICV3_G0
&& nsaccess
< 1) ||
1178 (irqgrp
== GICV3_G1
&& nsaccess
< 2)) {
1183 /* OK, we can accept the SGI */
1184 trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs
), irq
);
1185 cs
->gicr_ipendr0
= deposit32(cs
->gicr_ipendr0
, irq
, 1, 1);
1186 gicv3_redist_update(cs
);