2 * ITS emulation for a GICv3-based system
4 * Copyright Linaro.org 2021
7 * Shashi Mallela <shashi.mallela@linaro.org>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10 * option) any later version. See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
23 typedef struct GICv3ITSClass GICv3ITSClass
;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState
, GICv3ITSClass
,
26 ARM_GICV3_ITS
, TYPE_ARM_GICV3_ITS
)
28 struct GICv3ITSClass
{
29 GICv3ITSCommonClass parent_class
;
30 ResettablePhases parent_phases
;
34 * This is an internal enum used to distinguish between LPI triggered
35 * via command queue and LPI triggered via gits_translater write.
37 typedef enum ItsCmdType
{
38 NONE
= 0, /* internal indication for GITS_TRANSLATER write */
44 typedef struct DTEntry
{
50 typedef struct CTEntry
{
55 typedef struct ITEntry
{
64 typedef struct VTEntry
{
72 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
73 * if a command parameter is not correct. These include both "stall
74 * processing of the command queue" and "ignore this command, and
75 * keep processing the queue". In our implementation we choose that
76 * memory transaction errors reading the command packet provoke a
77 * stall, but errors in parameters cause us to ignore the command
78 * and continue processing.
79 * The process_* functions which handle individual ITS commands all
80 * return an ItsCmdResult which tells process_cmdq() whether it should
81 * stall, keep going because of an error, or keep going because the
82 * command was a success.
84 typedef enum ItsCmdResult
{
90 /* True if the ITS supports the GICv4 virtual LPI feature */
91 static bool its_feature_virtual(GICv3ITSState
*s
)
93 return s
->typer
& R_GITS_TYPER_VIRTUAL_MASK
;
96 static inline bool intid_in_lpi_range(uint32_t id
)
98 return id
>= GICV3_LPI_INTID_START
&&
99 id
< (1 << (GICD_TYPER_IDBITS
+ 1));
102 static inline bool valid_doorbell(uint32_t id
)
104 /* Doorbell fields may be an LPI, or 1023 to mean "no doorbell" */
105 return id
== INTID_SPURIOUS
|| intid_in_lpi_range(id
);
108 static uint64_t baser_base_addr(uint64_t value
, uint32_t page_sz
)
113 case GITS_PAGE_SIZE_4K
:
114 case GITS_PAGE_SIZE_16K
:
115 result
= FIELD_EX64(value
, GITS_BASER
, PHYADDR
) << 12;
118 case GITS_PAGE_SIZE_64K
:
119 result
= FIELD_EX64(value
, GITS_BASER
, PHYADDRL_64K
) << 16;
120 result
|= FIELD_EX64(value
, GITS_BASER
, PHYADDRH_64K
) << 48;
129 static uint64_t table_entry_addr(GICv3ITSState
*s
, TableDesc
*td
,
130 uint32_t idx
, MemTxResult
*res
)
133 * Given a TableDesc describing one of the ITS in-guest-memory
134 * tables and an index into it, return the guest address
135 * corresponding to that table entry.
136 * If there was a memory error reading the L1 table of an
137 * indirect table, *res is set accordingly, and we return -1.
138 * If the L1 table entry is marked not valid, we return -1 with
139 * *res set to MEMTX_OK.
141 * The specification defines the format of level 1 entries of a
142 * 2-level table, but the format of level 2 entries and the format
143 * of flat-mapped tables is IMPDEF.
145 AddressSpace
*as
= &s
->gicv3
->dma_as
;
148 uint32_t num_l2_entries
;
153 /* Single level table */
154 return td
->base_addr
+ idx
* td
->entry_sz
;
157 /* Two level table */
158 l2idx
= idx
/ (td
->page_sz
/ L1TABLE_ENTRY_SIZE
);
160 l2
= address_space_ldq_le(as
,
161 td
->base_addr
+ (l2idx
* L1TABLE_ENTRY_SIZE
),
162 MEMTXATTRS_UNSPECIFIED
, res
);
163 if (*res
!= MEMTX_OK
) {
166 if (!(l2
& L2_TABLE_VALID_MASK
)) {
170 num_l2_entries
= td
->page_sz
/ td
->entry_sz
;
171 return (l2
& ((1ULL << 51) - 1)) + (idx
% num_l2_entries
) * td
->entry_sz
;
175 * Read the Collection Table entry at index @icid. On success (including
176 * successfully determining that there is no valid CTE for this index),
177 * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
178 * If there is an error reading memory then we return the error code.
180 static MemTxResult
get_cte(GICv3ITSState
*s
, uint16_t icid
, CTEntry
*cte
)
182 AddressSpace
*as
= &s
->gicv3
->dma_as
;
183 MemTxResult res
= MEMTX_OK
;
184 uint64_t entry_addr
= table_entry_addr(s
, &s
->ct
, icid
, &res
);
187 if (entry_addr
== -1) {
188 /* No L2 table entry, i.e. no valid CTE, or a memory error */
193 cteval
= address_space_ldq_le(as
, entry_addr
, MEMTXATTRS_UNSPECIFIED
, &res
);
194 if (res
!= MEMTX_OK
) {
197 cte
->valid
= FIELD_EX64(cteval
, CTE
, VALID
);
198 cte
->rdbase
= FIELD_EX64(cteval
, CTE
, RDBASE
);
200 if (res
!= MEMTX_OK
) {
201 trace_gicv3_its_cte_read_fault(icid
);
203 trace_gicv3_its_cte_read(icid
, cte
->valid
, cte
->rdbase
);
209 * Update the Interrupt Table entry at index @evinted in the table specified
210 * by the dte @dte. Returns true on success, false if there was a memory
213 static bool update_ite(GICv3ITSState
*s
, uint32_t eventid
, const DTEntry
*dte
,
216 AddressSpace
*as
= &s
->gicv3
->dma_as
;
217 MemTxResult res
= MEMTX_OK
;
218 hwaddr iteaddr
= dte
->ittaddr
+ eventid
* ITS_ITT_ENTRY_SIZE
;
222 trace_gicv3_its_ite_write(dte
->ittaddr
, eventid
, ite
->valid
,
223 ite
->inttype
, ite
->intid
, ite
->icid
,
224 ite
->vpeid
, ite
->doorbell
);
227 itel
= FIELD_DP64(itel
, ITE_L
, VALID
, 1);
228 itel
= FIELD_DP64(itel
, ITE_L
, INTTYPE
, ite
->inttype
);
229 itel
= FIELD_DP64(itel
, ITE_L
, INTID
, ite
->intid
);
230 itel
= FIELD_DP64(itel
, ITE_L
, ICID
, ite
->icid
);
231 itel
= FIELD_DP64(itel
, ITE_L
, VPEID
, ite
->vpeid
);
232 iteh
= FIELD_DP32(iteh
, ITE_H
, DOORBELL
, ite
->doorbell
);
235 address_space_stq_le(as
, iteaddr
, itel
, MEMTXATTRS_UNSPECIFIED
, &res
);
236 if (res
!= MEMTX_OK
) {
239 address_space_stl_le(as
, iteaddr
+ 8, iteh
, MEMTXATTRS_UNSPECIFIED
, &res
);
240 return res
== MEMTX_OK
;
244 * Read the Interrupt Table entry at index @eventid from the table specified
245 * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
246 * struct @ite accordingly. If there is an error reading memory then we return
249 static MemTxResult
get_ite(GICv3ITSState
*s
, uint32_t eventid
,
250 const DTEntry
*dte
, ITEntry
*ite
)
252 AddressSpace
*as
= &s
->gicv3
->dma_as
;
253 MemTxResult res
= MEMTX_OK
;
256 hwaddr iteaddr
= dte
->ittaddr
+ eventid
* ITS_ITT_ENTRY_SIZE
;
258 itel
= address_space_ldq_le(as
, iteaddr
, MEMTXATTRS_UNSPECIFIED
, &res
);
259 if (res
!= MEMTX_OK
) {
260 trace_gicv3_its_ite_read_fault(dte
->ittaddr
, eventid
);
264 iteh
= address_space_ldl_le(as
, iteaddr
+ 8, MEMTXATTRS_UNSPECIFIED
, &res
);
265 if (res
!= MEMTX_OK
) {
266 trace_gicv3_its_ite_read_fault(dte
->ittaddr
, eventid
);
270 ite
->valid
= FIELD_EX64(itel
, ITE_L
, VALID
);
271 ite
->inttype
= FIELD_EX64(itel
, ITE_L
, INTTYPE
);
272 ite
->intid
= FIELD_EX64(itel
, ITE_L
, INTID
);
273 ite
->icid
= FIELD_EX64(itel
, ITE_L
, ICID
);
274 ite
->vpeid
= FIELD_EX64(itel
, ITE_L
, VPEID
);
275 ite
->doorbell
= FIELD_EX64(iteh
, ITE_H
, DOORBELL
);
276 trace_gicv3_its_ite_read(dte
->ittaddr
, eventid
, ite
->valid
,
277 ite
->inttype
, ite
->intid
, ite
->icid
,
278 ite
->vpeid
, ite
->doorbell
);
283 * Read the Device Table entry at index @devid. On success (including
284 * successfully determining that there is no valid DTE for this index),
285 * we return MEMTX_OK and populate the DTEntry struct accordingly.
286 * If there is an error reading memory then we return the error code.
288 static MemTxResult
get_dte(GICv3ITSState
*s
, uint32_t devid
, DTEntry
*dte
)
290 MemTxResult res
= MEMTX_OK
;
291 AddressSpace
*as
= &s
->gicv3
->dma_as
;
292 uint64_t entry_addr
= table_entry_addr(s
, &s
->dt
, devid
, &res
);
295 if (entry_addr
== -1) {
296 /* No L2 table entry, i.e. no valid DTE, or a memory error */
300 dteval
= address_space_ldq_le(as
, entry_addr
, MEMTXATTRS_UNSPECIFIED
, &res
);
301 if (res
!= MEMTX_OK
) {
304 dte
->valid
= FIELD_EX64(dteval
, DTE
, VALID
);
305 dte
->size
= FIELD_EX64(dteval
, DTE
, SIZE
);
306 /* DTE word field stores bits [51:8] of the ITT address */
307 dte
->ittaddr
= FIELD_EX64(dteval
, DTE
, ITTADDR
) << ITTADDR_SHIFT
;
309 if (res
!= MEMTX_OK
) {
310 trace_gicv3_its_dte_read_fault(devid
);
312 trace_gicv3_its_dte_read(devid
, dte
->valid
, dte
->size
, dte
->ittaddr
);
318 * Read the vPE Table entry at index @vpeid. On success (including
319 * successfully determining that there is no valid entry for this index),
320 * we return MEMTX_OK and populate the VTEntry struct accordingly.
321 * If there is an error reading memory then we return the error code.
323 static MemTxResult
get_vte(GICv3ITSState
*s
, uint32_t vpeid
, VTEntry
*vte
)
325 MemTxResult res
= MEMTX_OK
;
326 AddressSpace
*as
= &s
->gicv3
->dma_as
;
327 uint64_t entry_addr
= table_entry_addr(s
, &s
->vpet
, vpeid
, &res
);
330 if (entry_addr
== -1) {
331 /* No L2 table entry, i.e. no valid VTE, or a memory error */
333 trace_gicv3_its_vte_read_fault(vpeid
);
336 vteval
= address_space_ldq_le(as
, entry_addr
, MEMTXATTRS_UNSPECIFIED
, &res
);
337 if (res
!= MEMTX_OK
) {
338 trace_gicv3_its_vte_read_fault(vpeid
);
341 vte
->valid
= FIELD_EX64(vteval
, VTE
, VALID
);
342 vte
->vptsize
= FIELD_EX64(vteval
, VTE
, VPTSIZE
);
343 vte
->vptaddr
= FIELD_EX64(vteval
, VTE
, VPTADDR
);
344 vte
->rdbase
= FIELD_EX64(vteval
, VTE
, RDBASE
);
345 trace_gicv3_its_vte_read(vpeid
, vte
->valid
, vte
->vptsize
,
346 vte
->vptaddr
, vte
->rdbase
);
351 * Given a (DeviceID, EventID), look up the corresponding ITE, including
352 * checking for the various invalid-value cases. If we find a valid ITE,
353 * fill in @ite and @dte and return CMD_CONTINUE_OK. Otherwise return
354 * CMD_STALL or CMD_CONTINUE as appropriate (and the contents of @ite
355 * should not be relied on).
357 * The string @who is purely for the LOG_GUEST_ERROR messages,
358 * and should indicate the name of the calling function or similar.
360 static ItsCmdResult
lookup_ite(GICv3ITSState
*s
, const char *who
,
361 uint32_t devid
, uint32_t eventid
, ITEntry
*ite
,
364 uint64_t num_eventids
;
366 if (devid
>= s
->dt
.num_entries
) {
367 qemu_log_mask(LOG_GUEST_ERROR
,
368 "%s: invalid command attributes: devid %d>=%d",
369 who
, devid
, s
->dt
.num_entries
);
373 if (get_dte(s
, devid
, dte
) != MEMTX_OK
) {
377 qemu_log_mask(LOG_GUEST_ERROR
,
378 "%s: invalid command attributes: "
379 "invalid dte for %d\n", who
, devid
);
383 num_eventids
= 1ULL << (dte
->size
+ 1);
384 if (eventid
>= num_eventids
) {
385 qemu_log_mask(LOG_GUEST_ERROR
,
386 "%s: invalid command attributes: eventid %d >= %"
387 PRId64
"\n", who
, eventid
, num_eventids
);
391 if (get_ite(s
, eventid
, dte
, ite
) != MEMTX_OK
) {
396 qemu_log_mask(LOG_GUEST_ERROR
,
397 "%s: invalid command attributes: invalid ITE\n", who
);
401 return CMD_CONTINUE_OK
;
405 * Given an ICID, look up the corresponding CTE, including checking for various
406 * invalid-value cases. If we find a valid CTE, fill in @cte and return
407 * CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE (and the
408 * contents of @cte should not be relied on).
410 * The string @who is purely for the LOG_GUEST_ERROR messages,
411 * and should indicate the name of the calling function or similar.
413 static ItsCmdResult
lookup_cte(GICv3ITSState
*s
, const char *who
,
414 uint32_t icid
, CTEntry
*cte
)
416 if (icid
>= s
->ct
.num_entries
) {
417 qemu_log_mask(LOG_GUEST_ERROR
, "%s: invalid ICID 0x%x\n", who
, icid
);
420 if (get_cte(s
, icid
, cte
) != MEMTX_OK
) {
424 qemu_log_mask(LOG_GUEST_ERROR
, "%s: invalid CTE\n", who
);
427 if (cte
->rdbase
>= s
->gicv3
->num_cpu
) {
430 return CMD_CONTINUE_OK
;
434 * Given a VPEID, look up the corresponding VTE, including checking
435 * for various invalid-value cases. if we find a valid VTE, fill in @vte
436 * and return CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE
437 * (and the contents of @vte should not be relied on).
439 * The string @who is purely for the LOG_GUEST_ERROR messages,
440 * and should indicate the name of the calling function or similar.
442 static ItsCmdResult
lookup_vte(GICv3ITSState
*s
, const char *who
,
443 uint32_t vpeid
, VTEntry
*vte
)
445 if (vpeid
>= s
->vpet
.num_entries
) {
446 qemu_log_mask(LOG_GUEST_ERROR
, "%s: invalid VPEID 0x%x\n", who
, vpeid
);
450 if (get_vte(s
, vpeid
, vte
) != MEMTX_OK
) {
454 qemu_log_mask(LOG_GUEST_ERROR
,
455 "%s: invalid VTE for VPEID 0x%x\n", who
, vpeid
);
459 if (vte
->rdbase
>= s
->gicv3
->num_cpu
) {
462 return CMD_CONTINUE_OK
;
465 static ItsCmdResult
process_its_cmd_phys(GICv3ITSState
*s
, const ITEntry
*ite
,
471 cmdres
= lookup_cte(s
, __func__
, ite
->icid
, &cte
);
472 if (cmdres
!= CMD_CONTINUE_OK
) {
475 gicv3_redist_process_lpi(&s
->gicv3
->cpu
[cte
.rdbase
], ite
->intid
, irqlevel
);
476 return CMD_CONTINUE_OK
;
479 static ItsCmdResult
process_its_cmd_virt(GICv3ITSState
*s
, const ITEntry
*ite
,
485 cmdres
= lookup_vte(s
, __func__
, ite
->vpeid
, &vte
);
486 if (cmdres
!= CMD_CONTINUE_OK
) {
490 if (!intid_in_lpi_range(ite
->intid
) ||
491 ite
->intid
>= (1ULL << (vte
.vptsize
+ 1))) {
492 qemu_log_mask(LOG_GUEST_ERROR
, "%s: intid 0x%x out of range\n",
493 __func__
, ite
->intid
);
498 * For QEMU the actual pending of the vLPI is handled in the
501 gicv3_redist_process_vlpi(&s
->gicv3
->cpu
[vte
.rdbase
], ite
->intid
,
502 vte
.vptaddr
<< 16, ite
->doorbell
, irqlevel
);
503 return CMD_CONTINUE_OK
;
507 * This function handles the processing of following commands based on
508 * the ItsCmdType parameter passed:-
509 * 1. triggering of lpi interrupt translation via ITS INT command
510 * 2. triggering of lpi interrupt translation via gits_translater register
511 * 3. handling of ITS CLEAR command
512 * 4. handling of ITS DISCARD command
514 static ItsCmdResult
do_process_its_cmd(GICv3ITSState
*s
, uint32_t devid
,
515 uint32_t eventid
, ItsCmdType cmd
)
522 cmdres
= lookup_ite(s
, __func__
, devid
, eventid
, &ite
, &dte
);
523 if (cmdres
!= CMD_CONTINUE_OK
) {
527 irqlevel
= (cmd
== CLEAR
|| cmd
== DISCARD
) ? 0 : 1;
529 switch (ite
.inttype
) {
530 case ITE_INTTYPE_PHYSICAL
:
531 cmdres
= process_its_cmd_phys(s
, &ite
, irqlevel
);
533 case ITE_INTTYPE_VIRTUAL
:
534 if (!its_feature_virtual(s
)) {
535 /* Can't happen unless guest is illegally writing to table memory */
536 qemu_log_mask(LOG_GUEST_ERROR
,
537 "%s: invalid type %d in ITE (table corrupted?)\n",
538 __func__
, ite
.inttype
);
541 cmdres
= process_its_cmd_virt(s
, &ite
, irqlevel
);
544 g_assert_not_reached();
547 if (cmdres
== CMD_CONTINUE_OK
&& cmd
== DISCARD
) {
549 /* remove mapping from interrupt translation table */
551 return update_ite(s
, eventid
, &dte
, &i
) ? CMD_CONTINUE_OK
: CMD_STALL
;
553 return CMD_CONTINUE_OK
;
556 static ItsCmdResult
process_its_cmd(GICv3ITSState
*s
, const uint64_t *cmdpkt
,
559 uint32_t devid
, eventid
;
561 devid
= (cmdpkt
[0] & DEVID_MASK
) >> DEVID_SHIFT
;
562 eventid
= cmdpkt
[1] & EVENTID_MASK
;
565 trace_gicv3_its_cmd_int(devid
, eventid
);
568 trace_gicv3_its_cmd_clear(devid
, eventid
);
571 trace_gicv3_its_cmd_discard(devid
, eventid
);
574 g_assert_not_reached();
576 return do_process_its_cmd(s
, devid
, eventid
, cmd
);
579 static ItsCmdResult
process_mapti(GICv3ITSState
*s
, const uint64_t *cmdpkt
,
582 uint32_t devid
, eventid
;
584 uint64_t num_eventids
;
589 devid
= (cmdpkt
[0] & DEVID_MASK
) >> DEVID_SHIFT
;
590 eventid
= cmdpkt
[1] & EVENTID_MASK
;
591 icid
= cmdpkt
[2] & ICID_MASK
;
595 trace_gicv3_its_cmd_mapi(devid
, eventid
, icid
);
597 pIntid
= (cmdpkt
[1] & pINTID_MASK
) >> pINTID_SHIFT
;
598 trace_gicv3_its_cmd_mapti(devid
, eventid
, icid
, pIntid
);
601 if (devid
>= s
->dt
.num_entries
) {
602 qemu_log_mask(LOG_GUEST_ERROR
,
603 "%s: invalid command attributes: devid %d>=%d",
604 __func__
, devid
, s
->dt
.num_entries
);
608 if (get_dte(s
, devid
, &dte
) != MEMTX_OK
) {
611 num_eventids
= 1ULL << (dte
.size
+ 1);
613 if (icid
>= s
->ct
.num_entries
) {
614 qemu_log_mask(LOG_GUEST_ERROR
,
615 "%s: invalid ICID 0x%x >= 0x%x\n",
616 __func__
, icid
, s
->ct
.num_entries
);
621 qemu_log_mask(LOG_GUEST_ERROR
,
622 "%s: no valid DTE for devid 0x%x\n", __func__
, devid
);
626 if (eventid
>= num_eventids
) {
627 qemu_log_mask(LOG_GUEST_ERROR
,
628 "%s: invalid event ID 0x%x >= 0x%" PRIx64
"\n",
629 __func__
, eventid
, num_eventids
);
633 if (!intid_in_lpi_range(pIntid
)) {
634 qemu_log_mask(LOG_GUEST_ERROR
,
635 "%s: invalid interrupt ID 0x%x\n", __func__
, pIntid
);
639 /* add ite entry to interrupt translation table */
641 ite
.inttype
= ITE_INTTYPE_PHYSICAL
;
644 ite
.doorbell
= INTID_SPURIOUS
;
646 return update_ite(s
, eventid
, &dte
, &ite
) ? CMD_CONTINUE_OK
: CMD_STALL
;
649 static ItsCmdResult
process_vmapti(GICv3ITSState
*s
, const uint64_t *cmdpkt
,
652 uint32_t devid
, eventid
, vintid
, doorbell
, vpeid
;
653 uint32_t num_eventids
;
657 if (!its_feature_virtual(s
)) {
661 devid
= FIELD_EX64(cmdpkt
[0], VMAPTI_0
, DEVICEID
);
662 eventid
= FIELD_EX64(cmdpkt
[1], VMAPTI_1
, EVENTID
);
663 vpeid
= FIELD_EX64(cmdpkt
[1], VMAPTI_1
, VPEID
);
664 doorbell
= FIELD_EX64(cmdpkt
[2], VMAPTI_2
, DOORBELL
);
667 trace_gicv3_its_cmd_vmapi(devid
, eventid
, vpeid
, doorbell
);
669 vintid
= FIELD_EX64(cmdpkt
[2], VMAPTI_2
, VINTID
);
670 trace_gicv3_its_cmd_vmapti(devid
, eventid
, vpeid
, vintid
, doorbell
);
673 if (devid
>= s
->dt
.num_entries
) {
674 qemu_log_mask(LOG_GUEST_ERROR
,
675 "%s: invalid DeviceID 0x%x (must be less than 0x%x)\n",
676 __func__
, devid
, s
->dt
.num_entries
);
680 if (get_dte(s
, devid
, &dte
) != MEMTX_OK
) {
685 qemu_log_mask(LOG_GUEST_ERROR
,
686 "%s: no entry in device table for DeviceID 0x%x\n",
691 num_eventids
= 1ULL << (dte
.size
+ 1);
693 if (eventid
>= num_eventids
) {
694 qemu_log_mask(LOG_GUEST_ERROR
,
695 "%s: EventID 0x%x too large for DeviceID 0x%x "
696 "(must be less than 0x%x)\n",
697 __func__
, eventid
, devid
, num_eventids
);
700 if (!intid_in_lpi_range(vintid
)) {
701 qemu_log_mask(LOG_GUEST_ERROR
,
702 "%s: VIntID 0x%x not a valid LPI\n",
706 if (!valid_doorbell(doorbell
)) {
707 qemu_log_mask(LOG_GUEST_ERROR
,
708 "%s: Doorbell %d not 1023 and not a valid LPI\n",
712 if (vpeid
>= s
->vpet
.num_entries
) {
713 qemu_log_mask(LOG_GUEST_ERROR
,
714 "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
715 __func__
, vpeid
, s
->vpet
.num_entries
);
718 /* add ite entry to interrupt translation table */
720 ite
.inttype
= ITE_INTTYPE_VIRTUAL
;
723 ite
.doorbell
= doorbell
;
725 return update_ite(s
, eventid
, &dte
, &ite
) ? CMD_CONTINUE_OK
: CMD_STALL
;
729 * Update the Collection Table entry for @icid to @cte. Returns true
730 * on success, false if there was a memory access error.
732 static bool update_cte(GICv3ITSState
*s
, uint16_t icid
, const CTEntry
*cte
)
734 AddressSpace
*as
= &s
->gicv3
->dma_as
;
737 MemTxResult res
= MEMTX_OK
;
739 trace_gicv3_its_cte_write(icid
, cte
->valid
, cte
->rdbase
);
742 /* add mapping entry to collection table */
743 cteval
= FIELD_DP64(cteval
, CTE
, VALID
, 1);
744 cteval
= FIELD_DP64(cteval
, CTE
, RDBASE
, cte
->rdbase
);
747 entry_addr
= table_entry_addr(s
, &s
->ct
, icid
, &res
);
748 if (res
!= MEMTX_OK
) {
749 /* memory access error: stall */
752 if (entry_addr
== -1) {
753 /* No L2 table for this index: discard write and continue */
757 address_space_stq_le(as
, entry_addr
, cteval
, MEMTXATTRS_UNSPECIFIED
, &res
);
758 return res
== MEMTX_OK
;
761 static ItsCmdResult
process_mapc(GICv3ITSState
*s
, const uint64_t *cmdpkt
)
766 icid
= cmdpkt
[2] & ICID_MASK
;
767 cte
.valid
= cmdpkt
[2] & CMD_FIELD_VALID_MASK
;
769 cte
.rdbase
= (cmdpkt
[2] & R_MAPC_RDBASE_MASK
) >> R_MAPC_RDBASE_SHIFT
;
770 cte
.rdbase
&= RDBASE_PROCNUM_MASK
;
774 trace_gicv3_its_cmd_mapc(icid
, cte
.rdbase
, cte
.valid
);
776 if (icid
>= s
->ct
.num_entries
) {
777 qemu_log_mask(LOG_GUEST_ERROR
, "ITS MAPC: invalid ICID 0x%x\n", icid
);
780 if (cte
.valid
&& cte
.rdbase
>= s
->gicv3
->num_cpu
) {
781 qemu_log_mask(LOG_GUEST_ERROR
,
782 "ITS MAPC: invalid RDBASE %u\n", cte
.rdbase
);
786 return update_cte(s
, icid
, &cte
) ? CMD_CONTINUE_OK
: CMD_STALL
;
790 * Update the Device Table entry for @devid to @dte. Returns true
791 * on success, false if there was a memory access error.
793 static bool update_dte(GICv3ITSState
*s
, uint32_t devid
, const DTEntry
*dte
)
795 AddressSpace
*as
= &s
->gicv3
->dma_as
;
798 MemTxResult res
= MEMTX_OK
;
800 trace_gicv3_its_dte_write(devid
, dte
->valid
, dte
->size
, dte
->ittaddr
);
803 /* add mapping entry to device table */
804 dteval
= FIELD_DP64(dteval
, DTE
, VALID
, 1);
805 dteval
= FIELD_DP64(dteval
, DTE
, SIZE
, dte
->size
);
806 dteval
= FIELD_DP64(dteval
, DTE
, ITTADDR
, dte
->ittaddr
);
809 entry_addr
= table_entry_addr(s
, &s
->dt
, devid
, &res
);
810 if (res
!= MEMTX_OK
) {
811 /* memory access error: stall */
814 if (entry_addr
== -1) {
815 /* No L2 table for this index: discard write and continue */
818 address_space_stq_le(as
, entry_addr
, dteval
, MEMTXATTRS_UNSPECIFIED
, &res
);
819 return res
== MEMTX_OK
;
822 static ItsCmdResult
process_mapd(GICv3ITSState
*s
, const uint64_t *cmdpkt
)
827 devid
= (cmdpkt
[0] & DEVID_MASK
) >> DEVID_SHIFT
;
828 dte
.size
= cmdpkt
[1] & SIZE_MASK
;
829 dte
.ittaddr
= (cmdpkt
[2] & ITTADDR_MASK
) >> ITTADDR_SHIFT
;
830 dte
.valid
= cmdpkt
[2] & CMD_FIELD_VALID_MASK
;
832 trace_gicv3_its_cmd_mapd(devid
, dte
.size
, dte
.ittaddr
, dte
.valid
);
834 if (devid
>= s
->dt
.num_entries
) {
835 qemu_log_mask(LOG_GUEST_ERROR
,
836 "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
837 devid
, s
->dt
.num_entries
);
841 if (dte
.size
> FIELD_EX64(s
->typer
, GITS_TYPER
, IDBITS
)) {
842 qemu_log_mask(LOG_GUEST_ERROR
,
843 "ITS MAPD: invalid size %d\n", dte
.size
);
847 return update_dte(s
, devid
, &dte
) ? CMD_CONTINUE_OK
: CMD_STALL
;
850 static ItsCmdResult
process_movall(GICv3ITSState
*s
, const uint64_t *cmdpkt
)
854 rd1
= FIELD_EX64(cmdpkt
[2], MOVALL_2
, RDBASE1
);
855 rd2
= FIELD_EX64(cmdpkt
[3], MOVALL_3
, RDBASE2
);
857 trace_gicv3_its_cmd_movall(rd1
, rd2
);
859 if (rd1
>= s
->gicv3
->num_cpu
) {
860 qemu_log_mask(LOG_GUEST_ERROR
,
861 "%s: RDBASE1 %" PRId64
862 " out of range (must be less than %d)\n",
863 __func__
, rd1
, s
->gicv3
->num_cpu
);
866 if (rd2
>= s
->gicv3
->num_cpu
) {
867 qemu_log_mask(LOG_GUEST_ERROR
,
868 "%s: RDBASE2 %" PRId64
869 " out of range (must be less than %d)\n",
870 __func__
, rd2
, s
->gicv3
->num_cpu
);
875 /* Move to same target must succeed as a no-op */
876 return CMD_CONTINUE_OK
;
879 /* Move all pending LPIs from redistributor 1 to redistributor 2 */
880 gicv3_redist_movall_lpis(&s
->gicv3
->cpu
[rd1
], &s
->gicv3
->cpu
[rd2
]);
882 return CMD_CONTINUE_OK
;
885 static ItsCmdResult
process_movi(GICv3ITSState
*s
, const uint64_t *cmdpkt
)
887 uint32_t devid
, eventid
;
890 CTEntry old_cte
, new_cte
;
894 devid
= FIELD_EX64(cmdpkt
[0], MOVI_0
, DEVICEID
);
895 eventid
= FIELD_EX64(cmdpkt
[1], MOVI_1
, EVENTID
);
896 new_icid
= FIELD_EX64(cmdpkt
[2], MOVI_2
, ICID
);
898 trace_gicv3_its_cmd_movi(devid
, eventid
, new_icid
);
900 cmdres
= lookup_ite(s
, __func__
, devid
, eventid
, &old_ite
, &dte
);
901 if (cmdres
!= CMD_CONTINUE_OK
) {
905 if (old_ite
.inttype
!= ITE_INTTYPE_PHYSICAL
) {
906 qemu_log_mask(LOG_GUEST_ERROR
,
907 "%s: invalid command attributes: invalid ITE\n",
912 cmdres
= lookup_cte(s
, __func__
, old_ite
.icid
, &old_cte
);
913 if (cmdres
!= CMD_CONTINUE_OK
) {
916 cmdres
= lookup_cte(s
, __func__
, new_icid
, &new_cte
);
917 if (cmdres
!= CMD_CONTINUE_OK
) {
921 if (old_cte
.rdbase
!= new_cte
.rdbase
) {
922 /* Move the LPI from the old redistributor to the new one */
923 gicv3_redist_mov_lpi(&s
->gicv3
->cpu
[old_cte
.rdbase
],
924 &s
->gicv3
->cpu
[new_cte
.rdbase
],
928 /* Update the ICID field in the interrupt translation table entry */
929 old_ite
.icid
= new_icid
;
930 return update_ite(s
, eventid
, &dte
, &old_ite
) ? CMD_CONTINUE_OK
: CMD_STALL
;
934 * Update the vPE Table entry at index @vpeid with the entry @vte.
935 * Returns true on success, false if there was a memory access error.
937 static bool update_vte(GICv3ITSState
*s
, uint32_t vpeid
, const VTEntry
*vte
)
939 AddressSpace
*as
= &s
->gicv3
->dma_as
;
942 MemTxResult res
= MEMTX_OK
;
944 trace_gicv3_its_vte_write(vpeid
, vte
->valid
, vte
->vptsize
, vte
->vptaddr
,
948 vteval
= FIELD_DP64(vteval
, VTE
, VALID
, 1);
949 vteval
= FIELD_DP64(vteval
, VTE
, VPTSIZE
, vte
->vptsize
);
950 vteval
= FIELD_DP64(vteval
, VTE
, VPTADDR
, vte
->vptaddr
);
951 vteval
= FIELD_DP64(vteval
, VTE
, RDBASE
, vte
->rdbase
);
954 entry_addr
= table_entry_addr(s
, &s
->vpet
, vpeid
, &res
);
955 if (res
!= MEMTX_OK
) {
958 if (entry_addr
== -1) {
959 /* No L2 table for this index: discard write and continue */
962 address_space_stq_le(as
, entry_addr
, vteval
, MEMTXATTRS_UNSPECIFIED
, &res
);
963 return res
== MEMTX_OK
;
966 static ItsCmdResult
process_vmapp(GICv3ITSState
*s
, const uint64_t *cmdpkt
)
971 if (!its_feature_virtual(s
)) {
975 vpeid
= FIELD_EX64(cmdpkt
[1], VMAPP_1
, VPEID
);
976 vte
.rdbase
= FIELD_EX64(cmdpkt
[2], VMAPP_2
, RDBASE
);
977 vte
.valid
= FIELD_EX64(cmdpkt
[2], VMAPP_2
, V
);
978 vte
.vptsize
= FIELD_EX64(cmdpkt
[3], VMAPP_3
, VPTSIZE
);
979 vte
.vptaddr
= FIELD_EX64(cmdpkt
[3], VMAPP_3
, VPTADDR
);
981 trace_gicv3_its_cmd_vmapp(vpeid
, vte
.rdbase
, vte
.valid
,
982 vte
.vptaddr
, vte
.vptsize
);
985 * For GICv4.0 the VPT_size field is only 5 bits, whereas we
986 * define our field macros to include the full GICv4.1 8 bits.
987 * The range check on VPT_size will catch the cases where
988 * the guest set the RES0-in-GICv4.0 bits [7:6].
990 if (vte
.vptsize
> FIELD_EX64(s
->typer
, GITS_TYPER
, IDBITS
)) {
991 qemu_log_mask(LOG_GUEST_ERROR
,
992 "%s: invalid VPT_size 0x%x\n", __func__
, vte
.vptsize
);
996 if (vte
.valid
&& vte
.rdbase
>= s
->gicv3
->num_cpu
) {
997 qemu_log_mask(LOG_GUEST_ERROR
,
998 "%s: invalid rdbase 0x%x\n", __func__
, vte
.rdbase
);
1002 if (vpeid
>= s
->vpet
.num_entries
) {
1003 qemu_log_mask(LOG_GUEST_ERROR
,
1004 "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
1005 __func__
, vpeid
, s
->vpet
.num_entries
);
1006 return CMD_CONTINUE
;
1009 return update_vte(s
, vpeid
, &vte
) ? CMD_CONTINUE_OK
: CMD_STALL
;
1012 typedef struct VmovpCallbackData
{
1016 * Overall command result. If more than one callback finds an
1017 * error, STALL beats CONTINUE.
1019 ItsCmdResult result
;
1020 } VmovpCallbackData
;
1022 static void vmovp_callback(gpointer data
, gpointer opaque
)
1025 * This function is called to update the VPEID field in a VPE
1026 * table entry for this ITS. This might be because of a VMOVP
1027 * command executed on any ITS that is connected to the same GIC
1028 * as this ITS. We need to read the VPE table entry for the VPEID
1029 * and update its RDBASE field.
1031 GICv3ITSState
*s
= data
;
1032 VmovpCallbackData
*cbdata
= opaque
;
1034 ItsCmdResult cmdres
;
1036 cmdres
= lookup_vte(s
, __func__
, cbdata
->vpeid
, &vte
);
1039 cbdata
->result
= CMD_STALL
;
1042 if (cbdata
->result
!= CMD_STALL
) {
1043 cbdata
->result
= CMD_CONTINUE
;
1046 case CMD_CONTINUE_OK
:
1050 vte
.rdbase
= cbdata
->rdbase
;
1051 if (!update_vte(s
, cbdata
->vpeid
, &vte
)) {
1052 cbdata
->result
= CMD_STALL
;
1056 static ItsCmdResult
process_vmovp(GICv3ITSState
*s
, const uint64_t *cmdpkt
)
1058 VmovpCallbackData cbdata
;
1060 if (!its_feature_virtual(s
)) {
1061 return CMD_CONTINUE
;
1064 cbdata
.vpeid
= FIELD_EX64(cmdpkt
[1], VMOVP_1
, VPEID
);
1065 cbdata
.rdbase
= FIELD_EX64(cmdpkt
[2], VMOVP_2
, RDBASE
);
1067 trace_gicv3_its_cmd_vmovp(cbdata
.vpeid
, cbdata
.rdbase
);
1069 if (cbdata
.rdbase
>= s
->gicv3
->num_cpu
) {
1070 return CMD_CONTINUE
;
1074 * Our ITS implementation reports GITS_TYPER.VMOVP == 1, which means
1075 * that when the VMOVP command is executed on an ITS to change the
1076 * VPEID field in a VPE table entry the change must be propagated
1077 * to all the ITSes connected to the same GIC.
1079 cbdata
.result
= CMD_CONTINUE_OK
;
1080 gicv3_foreach_its(s
->gicv3
, vmovp_callback
, &cbdata
);
1081 return cbdata
.result
;
1084 static ItsCmdResult
process_vmovi(GICv3ITSState
*s
, const uint64_t *cmdpkt
)
1086 uint32_t devid
, eventid
, vpeid
, doorbell
;
1087 bool doorbell_valid
;
1090 VTEntry old_vte
, new_vte
;
1091 ItsCmdResult cmdres
;
1093 if (!its_feature_virtual(s
)) {
1094 return CMD_CONTINUE
;
1097 devid
= FIELD_EX64(cmdpkt
[0], VMOVI_0
, DEVICEID
);
1098 eventid
= FIELD_EX64(cmdpkt
[1], VMOVI_1
, EVENTID
);
1099 vpeid
= FIELD_EX64(cmdpkt
[1], VMOVI_1
, VPEID
);
1100 doorbell_valid
= FIELD_EX64(cmdpkt
[2], VMOVI_2
, D
);
1101 doorbell
= FIELD_EX64(cmdpkt
[2], VMOVI_2
, DOORBELL
);
1103 trace_gicv3_its_cmd_vmovi(devid
, eventid
, vpeid
, doorbell_valid
, doorbell
);
1105 if (doorbell_valid
&& !valid_doorbell(doorbell
)) {
1106 qemu_log_mask(LOG_GUEST_ERROR
,
1107 "%s: invalid doorbell 0x%x\n", __func__
, doorbell
);
1108 return CMD_CONTINUE
;
1111 cmdres
= lookup_ite(s
, __func__
, devid
, eventid
, &ite
, &dte
);
1112 if (cmdres
!= CMD_CONTINUE_OK
) {
1116 if (ite
.inttype
!= ITE_INTTYPE_VIRTUAL
) {
1117 qemu_log_mask(LOG_GUEST_ERROR
, "%s: ITE is not for virtual interrupt\n",
1119 return CMD_CONTINUE
;
1122 cmdres
= lookup_vte(s
, __func__
, ite
.vpeid
, &old_vte
);
1123 if (cmdres
!= CMD_CONTINUE_OK
) {
1126 cmdres
= lookup_vte(s
, __func__
, vpeid
, &new_vte
);
1127 if (cmdres
!= CMD_CONTINUE_OK
) {
1131 if (!intid_in_lpi_range(ite
.intid
) ||
1132 ite
.intid
>= (1ULL << (old_vte
.vptsize
+ 1)) ||
1133 ite
.intid
>= (1ULL << (new_vte
.vptsize
+ 1))) {
1134 qemu_log_mask(LOG_GUEST_ERROR
,
1135 "%s: ITE intid 0x%x out of range\n",
1136 __func__
, ite
.intid
);
1137 return CMD_CONTINUE
;
1141 if (doorbell_valid
) {
1142 ite
.doorbell
= doorbell
;
1146 * Move the LPI from the old redistributor to the new one. We don't
1147 * need to do anything if the guest somehow specified the
1148 * same pending table for source and destination.
1150 if (old_vte
.vptaddr
!= new_vte
.vptaddr
) {
1151 gicv3_redist_mov_vlpi(&s
->gicv3
->cpu
[old_vte
.rdbase
],
1152 old_vte
.vptaddr
<< 16,
1153 &s
->gicv3
->cpu
[new_vte
.rdbase
],
1154 new_vte
.vptaddr
<< 16,
1159 /* Update the ITE to the new VPEID and possibly doorbell values */
1160 return update_ite(s
, eventid
, &dte
, &ite
) ? CMD_CONTINUE_OK
: CMD_STALL
;
1163 static ItsCmdResult
process_vinvall(GICv3ITSState
*s
, const uint64_t *cmdpkt
)
1167 ItsCmdResult cmdres
;
1169 if (!its_feature_virtual(s
)) {
1170 return CMD_CONTINUE
;
1173 vpeid
= FIELD_EX64(cmdpkt
[1], VINVALL_1
, VPEID
);
1175 trace_gicv3_its_cmd_vinvall(vpeid
);
1177 cmdres
= lookup_vte(s
, __func__
, vpeid
, &vte
);
1178 if (cmdres
!= CMD_CONTINUE_OK
) {
1182 gicv3_redist_vinvall(&s
->gicv3
->cpu
[vte
.rdbase
], vte
.vptaddr
<< 16);
1183 return CMD_CONTINUE_OK
;
1186 static ItsCmdResult
process_inv(GICv3ITSState
*s
, const uint64_t *cmdpkt
)
1188 uint32_t devid
, eventid
;
1193 ItsCmdResult cmdres
;
1195 devid
= FIELD_EX64(cmdpkt
[0], INV_0
, DEVICEID
);
1196 eventid
= FIELD_EX64(cmdpkt
[1], INV_1
, EVENTID
);
1198 trace_gicv3_its_cmd_inv(devid
, eventid
);
1200 cmdres
= lookup_ite(s
, __func__
, devid
, eventid
, &ite
, &dte
);
1201 if (cmdres
!= CMD_CONTINUE_OK
) {
1205 switch (ite
.inttype
) {
1206 case ITE_INTTYPE_PHYSICAL
:
1207 cmdres
= lookup_cte(s
, __func__
, ite
.icid
, &cte
);
1208 if (cmdres
!= CMD_CONTINUE_OK
) {
1211 gicv3_redist_inv_lpi(&s
->gicv3
->cpu
[cte
.rdbase
], ite
.intid
);
1213 case ITE_INTTYPE_VIRTUAL
:
1214 if (!its_feature_virtual(s
)) {
1215 /* Can't happen unless guest is illegally writing to table memory */
1216 qemu_log_mask(LOG_GUEST_ERROR
,
1217 "%s: invalid type %d in ITE (table corrupted?)\n",
1218 __func__
, ite
.inttype
);
1219 return CMD_CONTINUE
;
1222 cmdres
= lookup_vte(s
, __func__
, ite
.vpeid
, &vte
);
1223 if (cmdres
!= CMD_CONTINUE_OK
) {
1226 if (!intid_in_lpi_range(ite
.intid
) ||
1227 ite
.intid
>= (1ULL << (vte
.vptsize
+ 1))) {
1228 qemu_log_mask(LOG_GUEST_ERROR
, "%s: intid 0x%x out of range\n",
1229 __func__
, ite
.intid
);
1230 return CMD_CONTINUE
;
1232 gicv3_redist_inv_vlpi(&s
->gicv3
->cpu
[vte
.rdbase
], ite
.intid
,
1236 g_assert_not_reached();
1239 return CMD_CONTINUE_OK
;
1243 * Current implementation blocks until all
1244 * commands are processed
1246 static void process_cmdq(GICv3ITSState
*s
)
1248 uint32_t wr_offset
= 0;
1249 uint32_t rd_offset
= 0;
1250 uint32_t cq_offset
= 0;
1251 AddressSpace
*as
= &s
->gicv3
->dma_as
;
1255 if (!(s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
)) {
1259 wr_offset
= FIELD_EX64(s
->cwriter
, GITS_CWRITER
, OFFSET
);
1261 if (wr_offset
>= s
->cq
.num_entries
) {
1262 qemu_log_mask(LOG_GUEST_ERROR
,
1263 "%s: invalid write offset "
1264 "%d\n", __func__
, wr_offset
);
1268 rd_offset
= FIELD_EX64(s
->creadr
, GITS_CREADR
, OFFSET
);
1270 if (rd_offset
>= s
->cq
.num_entries
) {
1271 qemu_log_mask(LOG_GUEST_ERROR
,
1272 "%s: invalid read offset "
1273 "%d\n", __func__
, rd_offset
);
1277 while (wr_offset
!= rd_offset
) {
1278 ItsCmdResult result
= CMD_CONTINUE_OK
;
1281 uint64_t cmdpkt
[GITS_CMDQ_ENTRY_WORDS
];
1283 cq_offset
= (rd_offset
* GITS_CMDQ_ENTRY_SIZE
);
1285 buflen
= GITS_CMDQ_ENTRY_SIZE
;
1286 hostmem
= address_space_map(as
, s
->cq
.base_addr
+ cq_offset
,
1287 &buflen
, false, MEMTXATTRS_UNSPECIFIED
);
1288 if (!hostmem
|| buflen
!= GITS_CMDQ_ENTRY_SIZE
) {
1290 address_space_unmap(as
, hostmem
, buflen
, false, 0);
1292 s
->creadr
= FIELD_DP64(s
->creadr
, GITS_CREADR
, STALLED
, 1);
1293 qemu_log_mask(LOG_GUEST_ERROR
,
1294 "%s: could not read command at 0x%" PRIx64
"\n",
1295 __func__
, s
->cq
.base_addr
+ cq_offset
);
1298 for (i
= 0; i
< ARRAY_SIZE(cmdpkt
); i
++) {
1299 cmdpkt
[i
] = ldq_le_p(hostmem
+ i
* sizeof(uint64_t));
1301 address_space_unmap(as
, hostmem
, buflen
, false, 0);
1303 cmd
= cmdpkt
[0] & CMD_MASK
;
1305 trace_gicv3_its_process_command(rd_offset
, cmd
);
1309 result
= process_its_cmd(s
, cmdpkt
, INTERRUPT
);
1311 case GITS_CMD_CLEAR
:
1312 result
= process_its_cmd(s
, cmdpkt
, CLEAR
);
1316 * Current implementation makes a blocking synchronous call
1317 * for every command issued earlier, hence the internal state
1318 * is already consistent by the time SYNC command is executed.
1319 * Hence no further processing is required for SYNC command.
1321 trace_gicv3_its_cmd_sync();
1323 case GITS_CMD_VSYNC
:
1325 * VSYNC also is a nop, because our implementation is always
1328 if (!its_feature_virtual(s
)) {
1329 result
= CMD_CONTINUE
;
1332 trace_gicv3_its_cmd_vsync();
1335 result
= process_mapd(s
, cmdpkt
);
1338 result
= process_mapc(s
, cmdpkt
);
1340 case GITS_CMD_MAPTI
:
1341 result
= process_mapti(s
, cmdpkt
, false);
1344 result
= process_mapti(s
, cmdpkt
, true);
1346 case GITS_CMD_DISCARD
:
1347 result
= process_its_cmd(s
, cmdpkt
, DISCARD
);
1350 result
= process_inv(s
, cmdpkt
);
1352 case GITS_CMD_INVALL
:
1354 * Current implementation doesn't cache any ITS tables,
1355 * but the calculated lpi priority information. We only
1356 * need to trigger lpi priority re-calculation to be in
1357 * sync with LPI config table or pending table changes.
1358 * INVALL operates on a collection specified by ICID so
1359 * it only affects physical LPIs.
1361 trace_gicv3_its_cmd_invall();
1362 for (i
= 0; i
< s
->gicv3
->num_cpu
; i
++) {
1363 gicv3_redist_update_lpi(&s
->gicv3
->cpu
[i
]);
1367 result
= process_movi(s
, cmdpkt
);
1369 case GITS_CMD_MOVALL
:
1370 result
= process_movall(s
, cmdpkt
);
1372 case GITS_CMD_VMAPTI
:
1373 result
= process_vmapti(s
, cmdpkt
, false);
1375 case GITS_CMD_VMAPI
:
1376 result
= process_vmapti(s
, cmdpkt
, true);
1378 case GITS_CMD_VMAPP
:
1379 result
= process_vmapp(s
, cmdpkt
);
1381 case GITS_CMD_VMOVP
:
1382 result
= process_vmovp(s
, cmdpkt
);
1384 case GITS_CMD_VMOVI
:
1385 result
= process_vmovi(s
, cmdpkt
);
1387 case GITS_CMD_VINVALL
:
1388 result
= process_vinvall(s
, cmdpkt
);
1391 trace_gicv3_its_cmd_unknown(cmd
);
1394 if (result
!= CMD_STALL
) {
1395 /* CMD_CONTINUE or CMD_CONTINUE_OK */
1397 rd_offset
%= s
->cq
.num_entries
;
1398 s
->creadr
= FIELD_DP64(s
->creadr
, GITS_CREADR
, OFFSET
, rd_offset
);
1401 s
->creadr
= FIELD_DP64(s
->creadr
, GITS_CREADR
, STALLED
, 1);
1402 qemu_log_mask(LOG_GUEST_ERROR
,
1403 "%s: 0x%x cmd processing failed, stalling\n",
1411 * This function extracts the ITS Device and Collection table specific
1412 * parameters (like base_addr, size etc) from GITS_BASER register.
1413 * It is called during ITS enable and also during post_load migration
1415 static void extract_table_params(GICv3ITSState
*s
)
1417 uint16_t num_pages
= 0;
1418 uint8_t page_sz_type
;
1420 uint32_t page_sz
= 0;
1423 for (int i
= 0; i
< 8; i
++) {
1427 value
= s
->baser
[i
];
1433 page_sz_type
= FIELD_EX64(value
, GITS_BASER
, PAGESIZE
);
1435 switch (page_sz_type
) {
1437 page_sz
= GITS_PAGE_SIZE_4K
;
1441 page_sz
= GITS_PAGE_SIZE_16K
;
1446 page_sz
= GITS_PAGE_SIZE_64K
;
1450 g_assert_not_reached();
1453 num_pages
= FIELD_EX64(value
, GITS_BASER
, SIZE
) + 1;
1455 type
= FIELD_EX64(value
, GITS_BASER
, TYPE
);
1458 case GITS_BASER_TYPE_DEVICE
:
1460 idbits
= FIELD_EX64(s
->typer
, GITS_TYPER
, DEVBITS
) + 1;
1462 case GITS_BASER_TYPE_COLLECTION
:
1464 if (FIELD_EX64(s
->typer
, GITS_TYPER
, CIL
)) {
1465 idbits
= FIELD_EX64(s
->typer
, GITS_TYPER
, CIDBITS
) + 1;
1467 /* 16-bit CollectionId supported when CIL == 0 */
1471 case GITS_BASER_TYPE_VPE
:
1474 * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an
1475 * implementation to implement fewer bits and report this
1482 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
1483 * ensures we will only see type values corresponding to
1484 * the values set up in gicv3_its_reset().
1486 g_assert_not_reached();
1489 memset(td
, 0, sizeof(*td
));
1491 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
1492 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
1493 * do not have a special case where the GITS_BASER<n>.Valid bit is 0
1494 * for the register corresponding to the Collection table but we
1495 * still have to process interrupts using non-memory-backed
1496 * Collection table entries.)
1497 * The specification makes it UNPREDICTABLE to enable the ITS without
1498 * marking each BASER<n> as valid. We choose to handle these as if
1499 * the table was zero-sized, so commands using the table will fail
1500 * and interrupts requested via GITS_TRANSLATER writes will be ignored.
1501 * This happens automatically by leaving the num_entries field at
1502 * zero, which will be caught by the bounds checks we have before
1503 * every table lookup anyway.
1505 if (!FIELD_EX64(value
, GITS_BASER
, VALID
)) {
1508 td
->page_sz
= page_sz
;
1509 td
->indirect
= FIELD_EX64(value
, GITS_BASER
, INDIRECT
);
1510 td
->entry_sz
= FIELD_EX64(value
, GITS_BASER
, ENTRYSIZE
) + 1;
1511 td
->base_addr
= baser_base_addr(value
, page_sz
);
1512 if (!td
->indirect
) {
1513 td
->num_entries
= (num_pages
* page_sz
) / td
->entry_sz
;
1515 td
->num_entries
= (((num_pages
* page_sz
) /
1516 L1TABLE_ENTRY_SIZE
) *
1517 (page_sz
/ td
->entry_sz
));
1519 td
->num_entries
= MIN(td
->num_entries
, 1ULL << idbits
);
1523 static void extract_cmdq_params(GICv3ITSState
*s
)
1525 uint16_t num_pages
= 0;
1526 uint64_t value
= s
->cbaser
;
1528 num_pages
= FIELD_EX64(value
, GITS_CBASER
, SIZE
) + 1;
1530 memset(&s
->cq
, 0 , sizeof(s
->cq
));
1532 if (FIELD_EX64(value
, GITS_CBASER
, VALID
)) {
1533 s
->cq
.num_entries
= (num_pages
* GITS_PAGE_SIZE_4K
) /
1534 GITS_CMDQ_ENTRY_SIZE
;
1535 s
->cq
.base_addr
= FIELD_EX64(value
, GITS_CBASER
, PHYADDR
);
1536 s
->cq
.base_addr
<<= R_GITS_CBASER_PHYADDR_SHIFT
;
1540 static MemTxResult
gicv3_its_translation_read(void *opaque
, hwaddr offset
,
1541 uint64_t *data
, unsigned size
,
1545 * GITS_TRANSLATER is write-only, and all other addresses
1546 * in the interrupt translation space frame are RES0.
1552 static MemTxResult
gicv3_its_translation_write(void *opaque
, hwaddr offset
,
1553 uint64_t data
, unsigned size
,
1556 GICv3ITSState
*s
= (GICv3ITSState
*)opaque
;
1559 trace_gicv3_its_translation_write(offset
, data
, size
, attrs
.requester_id
);
1562 case GITS_TRANSLATER
:
1563 if (s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
) {
1564 result
= do_process_its_cmd(s
, attrs
.requester_id
, data
, NONE
);
1578 static bool its_writel(GICv3ITSState
*s
, hwaddr offset
,
1579 uint64_t value
, MemTxAttrs attrs
)
1586 if (value
& R_GITS_CTLR_ENABLED_MASK
) {
1587 s
->ctlr
|= R_GITS_CTLR_ENABLED_MASK
;
1588 extract_table_params(s
);
1589 extract_cmdq_params(s
);
1592 s
->ctlr
&= ~R_GITS_CTLR_ENABLED_MASK
;
1597 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1600 if (!(s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
)) {
1601 s
->cbaser
= deposit64(s
->cbaser
, 0, 32, value
);
1605 case GITS_CBASER
+ 4:
1607 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1610 if (!(s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
)) {
1611 s
->cbaser
= deposit64(s
->cbaser
, 32, 32, value
);
1616 s
->cwriter
= deposit64(s
->cwriter
, 0, 32,
1617 (value
& ~R_GITS_CWRITER_RETRY_MASK
));
1618 if (s
->cwriter
!= s
->creadr
) {
1622 case GITS_CWRITER
+ 4:
1623 s
->cwriter
= deposit64(s
->cwriter
, 32, 32, value
);
1626 if (s
->gicv3
->gicd_ctlr
& GICD_CTLR_DS
) {
1627 s
->creadr
= deposit64(s
->creadr
, 0, 32,
1628 (value
& ~R_GITS_CREADR_STALLED_MASK
));
1630 /* RO register, ignore the write */
1631 qemu_log_mask(LOG_GUEST_ERROR
,
1632 "%s: invalid guest write to RO register at offset "
1633 HWADDR_FMT_plx
"\n", __func__
, offset
);
1636 case GITS_CREADR
+ 4:
1637 if (s
->gicv3
->gicd_ctlr
& GICD_CTLR_DS
) {
1638 s
->creadr
= deposit64(s
->creadr
, 32, 32, value
);
1640 /* RO register, ignore the write */
1641 qemu_log_mask(LOG_GUEST_ERROR
,
1642 "%s: invalid guest write to RO register at offset "
1643 HWADDR_FMT_plx
"\n", __func__
, offset
);
1646 case GITS_BASER
... GITS_BASER
+ 0x3f:
1648 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1651 if (!(s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
)) {
1652 index
= (offset
- GITS_BASER
) / 8;
1654 if (s
->baser
[index
] == 0) {
1655 /* Unimplemented GITS_BASERn: RAZ/WI */
1660 value
&= ~GITS_BASER_RO_MASK
;
1661 s
->baser
[index
] &= GITS_BASER_RO_MASK
| MAKE_64BIT_MASK(0, 32);
1662 s
->baser
[index
] |= value
;
1664 value
&= ~GITS_BASER_RO_MASK
;
1665 s
->baser
[index
] &= GITS_BASER_RO_MASK
| MAKE_64BIT_MASK(32, 32);
1666 s
->baser
[index
] |= value
;
1671 case GITS_IDREGS
... GITS_IDREGS
+ 0x2f:
1672 /* RO registers, ignore the write */
1673 qemu_log_mask(LOG_GUEST_ERROR
,
1674 "%s: invalid guest write to RO register at offset "
1675 HWADDR_FMT_plx
"\n", __func__
, offset
);
1684 static bool its_readl(GICv3ITSState
*s
, hwaddr offset
,
1685 uint64_t *data
, MemTxAttrs attrs
)
1695 *data
= gicv3_iidr();
1697 case GITS_IDREGS
... GITS_IDREGS
+ 0x2f:
1699 *data
= gicv3_idreg(s
->gicv3
, offset
- GITS_IDREGS
, GICV3_PIDR0_ITS
);
1702 *data
= extract64(s
->typer
, 0, 32);
1704 case GITS_TYPER
+ 4:
1705 *data
= extract64(s
->typer
, 32, 32);
1708 *data
= extract64(s
->cbaser
, 0, 32);
1710 case GITS_CBASER
+ 4:
1711 *data
= extract64(s
->cbaser
, 32, 32);
1714 *data
= extract64(s
->creadr
, 0, 32);
1716 case GITS_CREADR
+ 4:
1717 *data
= extract64(s
->creadr
, 32, 32);
1720 *data
= extract64(s
->cwriter
, 0, 32);
1722 case GITS_CWRITER
+ 4:
1723 *data
= extract64(s
->cwriter
, 32, 32);
1725 case GITS_BASER
... GITS_BASER
+ 0x3f:
1726 index
= (offset
- GITS_BASER
) / 8;
1728 *data
= extract64(s
->baser
[index
], 32, 32);
1730 *data
= extract64(s
->baser
[index
], 0, 32);
1740 static bool its_writell(GICv3ITSState
*s
, hwaddr offset
,
1741 uint64_t value
, MemTxAttrs attrs
)
1747 case GITS_BASER
... GITS_BASER
+ 0x3f:
1749 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1752 if (!(s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
)) {
1753 index
= (offset
- GITS_BASER
) / 8;
1754 if (s
->baser
[index
] == 0) {
1755 /* Unimplemented GITS_BASERn: RAZ/WI */
1758 s
->baser
[index
] &= GITS_BASER_RO_MASK
;
1759 s
->baser
[index
] |= (value
& ~GITS_BASER_RO_MASK
);
1764 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1767 if (!(s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
)) {
1773 s
->cwriter
= value
& ~R_GITS_CWRITER_RETRY_MASK
;
1774 if (s
->cwriter
!= s
->creadr
) {
1779 if (s
->gicv3
->gicd_ctlr
& GICD_CTLR_DS
) {
1780 s
->creadr
= value
& ~R_GITS_CREADR_STALLED_MASK
;
1782 /* RO register, ignore the write */
1783 qemu_log_mask(LOG_GUEST_ERROR
,
1784 "%s: invalid guest write to RO register at offset "
1785 HWADDR_FMT_plx
"\n", __func__
, offset
);
1789 /* RO registers, ignore the write */
1790 qemu_log_mask(LOG_GUEST_ERROR
,
1791 "%s: invalid guest write to RO register at offset "
1792 HWADDR_FMT_plx
"\n", __func__
, offset
);
1801 static bool its_readll(GICv3ITSState
*s
, hwaddr offset
,
1802 uint64_t *data
, MemTxAttrs attrs
)
1811 case GITS_BASER
... GITS_BASER
+ 0x3f:
1812 index
= (offset
- GITS_BASER
) / 8;
1813 *data
= s
->baser
[index
];
1831 static MemTxResult
gicv3_its_read(void *opaque
, hwaddr offset
, uint64_t *data
,
1832 unsigned size
, MemTxAttrs attrs
)
1834 GICv3ITSState
*s
= (GICv3ITSState
*)opaque
;
1839 result
= its_readl(s
, offset
, data
, attrs
);
1842 result
= its_readll(s
, offset
, data
, attrs
);
1850 qemu_log_mask(LOG_GUEST_ERROR
,
1851 "%s: invalid guest read at offset " HWADDR_FMT_plx
1852 " size %u\n", __func__
, offset
, size
);
1853 trace_gicv3_its_badread(offset
, size
);
1855 * The spec requires that reserved registers are RAZ/WI;
1856 * so use false returns from leaf functions as a way to
1857 * trigger the guest-error logging but don't return it to
1858 * the caller, or we'll cause a spurious guest data abort.
1862 trace_gicv3_its_read(offset
, *data
, size
);
1867 static MemTxResult
gicv3_its_write(void *opaque
, hwaddr offset
, uint64_t data
,
1868 unsigned size
, MemTxAttrs attrs
)
1870 GICv3ITSState
*s
= (GICv3ITSState
*)opaque
;
1875 result
= its_writel(s
, offset
, data
, attrs
);
1878 result
= its_writell(s
, offset
, data
, attrs
);
1886 qemu_log_mask(LOG_GUEST_ERROR
,
1887 "%s: invalid guest write at offset " HWADDR_FMT_plx
1888 " size %u\n", __func__
, offset
, size
);
1889 trace_gicv3_its_badwrite(offset
, data
, size
);
1891 * The spec requires that reserved registers are RAZ/WI;
1892 * so use false returns from leaf functions as a way to
1893 * trigger the guest-error logging but don't return it to
1894 * the caller, or we'll cause a spurious guest data abort.
1897 trace_gicv3_its_write(offset
, data
, size
);
1902 static const MemoryRegionOps gicv3_its_control_ops
= {
1903 .read_with_attrs
= gicv3_its_read
,
1904 .write_with_attrs
= gicv3_its_write
,
1905 .valid
.min_access_size
= 4,
1906 .valid
.max_access_size
= 8,
1907 .impl
.min_access_size
= 4,
1908 .impl
.max_access_size
= 8,
1909 .endianness
= DEVICE_NATIVE_ENDIAN
,
1912 static const MemoryRegionOps gicv3_its_translation_ops
= {
1913 .read_with_attrs
= gicv3_its_translation_read
,
1914 .write_with_attrs
= gicv3_its_translation_write
,
1915 .valid
.min_access_size
= 2,
1916 .valid
.max_access_size
= 4,
1917 .impl
.min_access_size
= 2,
1918 .impl
.max_access_size
= 4,
1919 .endianness
= DEVICE_NATIVE_ENDIAN
,
1922 static void gicv3_arm_its_realize(DeviceState
*dev
, Error
**errp
)
1924 GICv3ITSState
*s
= ARM_GICV3_ITS_COMMON(dev
);
1927 for (i
= 0; i
< s
->gicv3
->num_cpu
; i
++) {
1928 if (!(s
->gicv3
->cpu
[i
].gicr_typer
& GICR_TYPER_PLPIS
)) {
1929 error_setg(errp
, "Physical LPI not supported by CPU %d", i
);
1934 gicv3_add_its(s
->gicv3
, dev
);
1936 gicv3_its_init_mmio(s
, &gicv3_its_control_ops
, &gicv3_its_translation_ops
);
1938 /* set the ITS default features supported */
1939 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, PHYSICAL
, 1);
1940 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, ITT_ENTRY_SIZE
,
1941 ITS_ITT_ENTRY_SIZE
- 1);
1942 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, IDBITS
, ITS_IDBITS
);
1943 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, DEVBITS
, ITS_DEVBITS
);
1944 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, CIL
, 1);
1945 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, CIDBITS
, ITS_CIDBITS
);
1946 if (s
->gicv3
->revision
>= 4) {
1947 /* Our VMOVP handles cross-ITS synchronization itself */
1948 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, VMOVP
, 1);
1949 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, VIRTUAL
, 1);
1953 static void gicv3_its_reset_hold(Object
*obj
, ResetType type
)
1955 GICv3ITSState
*s
= ARM_GICV3_ITS_COMMON(obj
);
1956 GICv3ITSClass
*c
= ARM_GICV3_ITS_GET_CLASS(s
);
1958 if (c
->parent_phases
.hold
) {
1959 c
->parent_phases
.hold(obj
, type
);
1962 /* Quiescent bit reset to 1 */
1963 s
->ctlr
= FIELD_DP32(s
->ctlr
, GITS_CTLR
, QUIESCENT
, 1);
1966 * setting GITS_BASER0.Type = 0b001 (Device)
1967 * GITS_BASER1.Type = 0b100 (Collection Table)
1968 * GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later
1969 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1970 * GITS_BASER<0,1>.Page_Size = 64KB
1971 * and default translation table entry size to 16 bytes
1973 s
->baser
[0] = FIELD_DP64(s
->baser
[0], GITS_BASER
, TYPE
,
1974 GITS_BASER_TYPE_DEVICE
);
1975 s
->baser
[0] = FIELD_DP64(s
->baser
[0], GITS_BASER
, PAGESIZE
,
1976 GITS_BASER_PAGESIZE_64K
);
1977 s
->baser
[0] = FIELD_DP64(s
->baser
[0], GITS_BASER
, ENTRYSIZE
,
1980 s
->baser
[1] = FIELD_DP64(s
->baser
[1], GITS_BASER
, TYPE
,
1981 GITS_BASER_TYPE_COLLECTION
);
1982 s
->baser
[1] = FIELD_DP64(s
->baser
[1], GITS_BASER
, PAGESIZE
,
1983 GITS_BASER_PAGESIZE_64K
);
1984 s
->baser
[1] = FIELD_DP64(s
->baser
[1], GITS_BASER
, ENTRYSIZE
,
1987 if (its_feature_virtual(s
)) {
1988 s
->baser
[2] = FIELD_DP64(s
->baser
[2], GITS_BASER
, TYPE
,
1989 GITS_BASER_TYPE_VPE
);
1990 s
->baser
[2] = FIELD_DP64(s
->baser
[2], GITS_BASER
, PAGESIZE
,
1991 GITS_BASER_PAGESIZE_64K
);
1992 s
->baser
[2] = FIELD_DP64(s
->baser
[2], GITS_BASER
, ENTRYSIZE
,
1997 static void gicv3_its_post_load(GICv3ITSState
*s
)
1999 if (s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
) {
2000 extract_table_params(s
);
2001 extract_cmdq_params(s
);
2005 static Property gicv3_its_props
[] = {
2006 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState
, gicv3
, "arm-gicv3",
2008 DEFINE_PROP_END_OF_LIST(),
2011 static void gicv3_its_class_init(ObjectClass
*klass
, void *data
)
2013 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2014 ResettableClass
*rc
= RESETTABLE_CLASS(klass
);
2015 GICv3ITSClass
*ic
= ARM_GICV3_ITS_CLASS(klass
);
2016 GICv3ITSCommonClass
*icc
= ARM_GICV3_ITS_COMMON_CLASS(klass
);
2018 dc
->realize
= gicv3_arm_its_realize
;
2019 device_class_set_props(dc
, gicv3_its_props
);
2020 resettable_class_set_parent_phases(rc
, NULL
, gicv3_its_reset_hold
, NULL
,
2021 &ic
->parent_phases
);
2022 icc
->post_load
= gicv3_its_post_load
;
2025 static const TypeInfo gicv3_its_info
= {
2026 .name
= TYPE_ARM_GICV3_ITS
,
2027 .parent
= TYPE_ARM_GICV3_ITS_COMMON
,
2028 .instance_size
= sizeof(GICv3ITSState
),
2029 .class_init
= gicv3_its_class_init
,
2030 .class_size
= sizeof(GICv3ITSClass
),
2033 static void gicv3_its_register_types(void)
2035 type_register_static(&gicv3_its_info
);
2038 type_init(gicv3_its_register_types
)