2 * ITS emulation for a GICv3-based system
4 * Copyright Linaro.org 2021
7 * Shashi Mallela <shashi.mallela@linaro.org>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10 * option) any later version. See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/intc/arm_gicv3_its_common.h"
18 #include "gicv3_internal.h"
19 #include "qom/object.h"
20 #include "qapi/error.h"
22 typedef struct GICv3ITSClass GICv3ITSClass
;
23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
24 DECLARE_OBJ_CHECKERS(GICv3ITSState
, GICv3ITSClass
,
25 ARM_GICV3_ITS
, TYPE_ARM_GICV3_ITS
)
27 struct GICv3ITSClass
{
28 GICv3ITSCommonClass parent_class
;
29 void (*parent_reset
)(DeviceState
*dev
);
33 * This is an internal enum used to distinguish between LPI triggered
34 * via command queue and LPI triggered via gits_translater write.
36 typedef enum ItsCmdType
{
37 NONE
= 0, /* internal indication for GITS_TRANSLATER write */
48 static uint64_t baser_base_addr(uint64_t value
, uint32_t page_sz
)
53 case GITS_PAGE_SIZE_4K
:
54 case GITS_PAGE_SIZE_16K
:
55 result
= FIELD_EX64(value
, GITS_BASER
, PHYADDR
) << 12;
58 case GITS_PAGE_SIZE_64K
:
59 result
= FIELD_EX64(value
, GITS_BASER
, PHYADDRL_64K
) << 16;
60 result
|= FIELD_EX64(value
, GITS_BASER
, PHYADDRH_64K
) << 48;
69 static bool get_cte(GICv3ITSState
*s
, uint16_t icid
, uint64_t *cte
,
72 AddressSpace
*as
= &s
->gicv3
->dma_as
;
77 uint32_t max_l2_entries
;
80 l2t_id
= icid
/ (s
->ct
.page_sz
/ L1TABLE_ENTRY_SIZE
);
82 value
= address_space_ldq_le(as
,
84 (l2t_id
* L1TABLE_ENTRY_SIZE
),
85 MEMTXATTRS_UNSPECIFIED
, res
);
87 if (*res
== MEMTX_OK
) {
88 valid_l2t
= (value
& L2_TABLE_VALID_MASK
) != 0;
91 max_l2_entries
= s
->ct
.page_sz
/ s
->ct
.entry_sz
;
93 l2t_addr
= value
& ((1ULL << 51) - 1);
95 *cte
= address_space_ldq_le(as
, l2t_addr
+
96 ((icid
% max_l2_entries
) * GITS_CTE_SIZE
),
97 MEMTXATTRS_UNSPECIFIED
, res
);
101 /* Flat level table */
102 *cte
= address_space_ldq_le(as
, s
->ct
.base_addr
+
103 (icid
* GITS_CTE_SIZE
),
104 MEMTXATTRS_UNSPECIFIED
, res
);
107 return (*cte
& TABLE_ENTRY_VALID_MASK
) != 0;
110 static bool update_ite(GICv3ITSState
*s
, uint32_t eventid
, uint64_t dte
,
113 AddressSpace
*as
= &s
->gicv3
->dma_as
;
115 MemTxResult res
= MEMTX_OK
;
117 itt_addr
= (dte
& GITS_DTE_ITTADDR_MASK
) >> GITS_DTE_ITTADDR_SHIFT
;
118 itt_addr
<<= ITTADDR_SHIFT
; /* 256 byte aligned */
120 address_space_stq_le(as
, itt_addr
+ (eventid
* (sizeof(uint64_t) +
121 sizeof(uint32_t))), ite
.itel
, MEMTXATTRS_UNSPECIFIED
,
124 if (res
== MEMTX_OK
) {
125 address_space_stl_le(as
, itt_addr
+ (eventid
* (sizeof(uint64_t) +
126 sizeof(uint32_t))) + sizeof(uint32_t), ite
.iteh
,
127 MEMTXATTRS_UNSPECIFIED
, &res
);
129 if (res
!= MEMTX_OK
) {
136 static bool get_ite(GICv3ITSState
*s
, uint32_t eventid
, uint64_t dte
,
137 uint16_t *icid
, uint32_t *pIntid
, MemTxResult
*res
)
139 AddressSpace
*as
= &s
->gicv3
->dma_as
;
144 itt_addr
= (dte
& GITS_DTE_ITTADDR_MASK
) >> GITS_DTE_ITTADDR_SHIFT
;
145 itt_addr
<<= ITTADDR_SHIFT
; /* 256 byte aligned */
147 ite
.itel
= address_space_ldq_le(as
, itt_addr
+
148 (eventid
* (sizeof(uint64_t) +
149 sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED
,
152 if (*res
== MEMTX_OK
) {
153 ite
.iteh
= address_space_ldl_le(as
, itt_addr
+
154 (eventid
* (sizeof(uint64_t) +
155 sizeof(uint32_t))) + sizeof(uint32_t),
156 MEMTXATTRS_UNSPECIFIED
, res
);
158 if (*res
== MEMTX_OK
) {
159 if (ite
.itel
& TABLE_ENTRY_VALID_MASK
) {
160 if ((ite
.itel
>> ITE_ENTRY_INTTYPE_SHIFT
) &
161 GITS_TYPE_PHYSICAL
) {
162 *pIntid
= (ite
.itel
& ITE_ENTRY_INTID_MASK
) >>
163 ITE_ENTRY_INTID_SHIFT
;
164 *icid
= ite
.iteh
& ITE_ENTRY_ICID_MASK
;
173 static uint64_t get_dte(GICv3ITSState
*s
, uint32_t devid
, MemTxResult
*res
)
175 AddressSpace
*as
= &s
->gicv3
->dma_as
;
180 uint32_t max_l2_entries
;
182 if (s
->dt
.indirect
) {
183 l2t_id
= devid
/ (s
->dt
.page_sz
/ L1TABLE_ENTRY_SIZE
);
185 value
= address_space_ldq_le(as
,
187 (l2t_id
* L1TABLE_ENTRY_SIZE
),
188 MEMTXATTRS_UNSPECIFIED
, res
);
190 if (*res
== MEMTX_OK
) {
191 valid_l2t
= (value
& L2_TABLE_VALID_MASK
) != 0;
194 max_l2_entries
= s
->dt
.page_sz
/ s
->dt
.entry_sz
;
196 l2t_addr
= value
& ((1ULL << 51) - 1);
198 value
= address_space_ldq_le(as
, l2t_addr
+
199 ((devid
% max_l2_entries
) * GITS_DTE_SIZE
),
200 MEMTXATTRS_UNSPECIFIED
, res
);
204 /* Flat level table */
205 value
= address_space_ldq_le(as
, s
->dt
.base_addr
+
206 (devid
* GITS_DTE_SIZE
),
207 MEMTXATTRS_UNSPECIFIED
, res
);
214 * This function handles the processing of following commands based on
215 * the ItsCmdType parameter passed:-
216 * 1. triggering of lpi interrupt translation via ITS INT command
217 * 2. triggering of lpi interrupt translation via gits_translater register
218 * 3. handling of ITS CLEAR command
219 * 4. handling of ITS DISCARD command
221 static bool process_its_cmd(GICv3ITSState
*s
, uint64_t value
, uint32_t offset
,
224 AddressSpace
*as
= &s
->gicv3
->dma_as
;
225 uint32_t devid
, eventid
;
226 MemTxResult res
= MEMTX_OK
;
229 uint32_t max_eventid
;
232 bool ite_valid
= false;
234 bool cte_valid
= false;
241 devid
= ((value
& DEVID_MASK
) >> DEVID_SHIFT
);
243 offset
+= NUM_BYTES_IN_DW
;
244 value
= address_space_ldq_le(as
, s
->cq
.base_addr
+ offset
,
245 MEMTXATTRS_UNSPECIFIED
, &res
);
248 if (res
!= MEMTX_OK
) {
252 eventid
= (value
& EVENTID_MASK
);
254 dte
= get_dte(s
, devid
, &res
);
256 if (res
!= MEMTX_OK
) {
259 dte_valid
= dte
& TABLE_ENTRY_VALID_MASK
;
262 max_eventid
= (1UL << (((dte
>> 1U) & SIZE_MASK
) + 1));
264 ite_valid
= get_ite(s
, eventid
, dte
, &icid
, &pIntid
, &res
);
266 if (res
!= MEMTX_OK
) {
271 cte_valid
= get_cte(s
, icid
, &cte
, &res
);
274 if (res
!= MEMTX_OK
) {
279 if ((devid
> s
->dt
.maxids
.max_devids
) || !dte_valid
|| !ite_valid
||
280 !cte_valid
|| (eventid
> max_eventid
)) {
281 qemu_log_mask(LOG_GUEST_ERROR
,
282 "%s: invalid command attributes "
283 "devid %d or eventid %d or invalid dte %d or"
284 "invalid cte %d or invalid ite %d\n",
285 __func__
, devid
, eventid
, dte_valid
, cte_valid
,
288 * in this implementation, in case of error
289 * we ignore this command and move onto the next
290 * command in the queue
294 * Current implementation only supports rdbase == procnum
295 * Hence rdbase physical address is ignored
297 rdbase
= (cte
& GITS_CTE_RDBASE_PROCNUM_MASK
) >> 1U;
299 if (rdbase
> s
->gicv3
->num_cpu
) {
303 if ((cmd
== CLEAR
) || (cmd
== DISCARD
)) {
304 gicv3_redist_process_lpi(&s
->gicv3
->cpu
[rdbase
], pIntid
, 0);
306 gicv3_redist_process_lpi(&s
->gicv3
->cpu
[rdbase
], pIntid
, 1);
309 if (cmd
== DISCARD
) {
311 /* remove mapping from interrupt translation table */
312 result
= update_ite(s
, eventid
, dte
, ite
);
319 static bool process_mapti(GICv3ITSState
*s
, uint64_t value
, uint32_t offset
,
322 AddressSpace
*as
= &s
->gicv3
->dma_as
;
323 uint32_t devid
, eventid
;
325 uint32_t max_eventid
, max_Intid
;
327 MemTxResult res
= MEMTX_OK
;
331 uint32_t int_spurious
= INTID_SPURIOUS
;
334 devid
= ((value
& DEVID_MASK
) >> DEVID_SHIFT
);
335 offset
+= NUM_BYTES_IN_DW
;
336 value
= address_space_ldq_le(as
, s
->cq
.base_addr
+ offset
,
337 MEMTXATTRS_UNSPECIFIED
, &res
);
339 if (res
!= MEMTX_OK
) {
343 eventid
= (value
& EVENTID_MASK
);
346 pIntid
= ((value
& pINTID_MASK
) >> pINTID_SHIFT
);
349 offset
+= NUM_BYTES_IN_DW
;
350 value
= address_space_ldq_le(as
, s
->cq
.base_addr
+ offset
,
351 MEMTXATTRS_UNSPECIFIED
, &res
);
353 if (res
!= MEMTX_OK
) {
357 icid
= value
& ICID_MASK
;
359 dte
= get_dte(s
, devid
, &res
);
361 if (res
!= MEMTX_OK
) {
364 dte_valid
= dte
& TABLE_ENTRY_VALID_MASK
;
366 max_eventid
= (1UL << (((dte
>> 1U) & SIZE_MASK
) + 1));
369 max_Intid
= (1ULL << (GICD_TYPER_IDBITS
+ 1)) - 1;
372 if ((devid
> s
->dt
.maxids
.max_devids
) || (icid
> s
->ct
.maxids
.max_collids
)
373 || !dte_valid
|| (eventid
> max_eventid
) ||
374 (!ignore_pInt
&& (((pIntid
< GICV3_LPI_INTID_START
) ||
375 (pIntid
> max_Intid
)) && (pIntid
!= INTID_SPURIOUS
)))) {
376 qemu_log_mask(LOG_GUEST_ERROR
,
377 "%s: invalid command attributes "
378 "devid %d or icid %d or eventid %d or pIntid %d or"
379 "unmapped dte %d\n", __func__
, devid
, icid
, eventid
,
382 * in this implementation, in case of error
383 * we ignore this command and move onto the next
384 * command in the queue
387 /* add ite entry to interrupt translation table */
388 ite
.itel
= (dte_valid
& TABLE_ENTRY_VALID_MASK
) |
389 (GITS_TYPE_PHYSICAL
<< ITE_ENTRY_INTTYPE_SHIFT
);
392 ite
.itel
|= (eventid
<< ITE_ENTRY_INTID_SHIFT
);
394 ite
.itel
|= (pIntid
<< ITE_ENTRY_INTID_SHIFT
);
396 ite
.itel
|= (int_spurious
<< ITE_ENTRY_INTSP_SHIFT
);
399 result
= update_ite(s
, eventid
, dte
, ite
);
405 static bool update_cte(GICv3ITSState
*s
, uint16_t icid
, bool valid
,
408 AddressSpace
*as
= &s
->gicv3
->dma_as
;
413 uint32_t max_l2_entries
;
415 MemTxResult res
= MEMTX_OK
;
422 /* add mapping entry to collection table */
423 cte
= (valid
& TABLE_ENTRY_VALID_MASK
) | (rdbase
<< 1ULL);
427 * The specification defines the format of level 1 entries of a
428 * 2-level table, but the format of level 2 entries and the format
429 * of flat-mapped tables is IMPDEF.
431 if (s
->ct
.indirect
) {
432 l2t_id
= icid
/ (s
->ct
.page_sz
/ L1TABLE_ENTRY_SIZE
);
434 value
= address_space_ldq_le(as
,
436 (l2t_id
* L1TABLE_ENTRY_SIZE
),
437 MEMTXATTRS_UNSPECIFIED
, &res
);
439 if (res
!= MEMTX_OK
) {
443 valid_l2t
= (value
& L2_TABLE_VALID_MASK
) != 0;
446 max_l2_entries
= s
->ct
.page_sz
/ s
->ct
.entry_sz
;
448 l2t_addr
= value
& ((1ULL << 51) - 1);
450 address_space_stq_le(as
, l2t_addr
+
451 ((icid
% max_l2_entries
) * GITS_CTE_SIZE
),
452 cte
, MEMTXATTRS_UNSPECIFIED
, &res
);
455 /* Flat level table */
456 address_space_stq_le(as
, s
->ct
.base_addr
+ (icid
* GITS_CTE_SIZE
),
457 cte
, MEMTXATTRS_UNSPECIFIED
, &res
);
459 if (res
!= MEMTX_OK
) {
466 static bool process_mapc(GICv3ITSState
*s
, uint32_t offset
)
468 AddressSpace
*as
= &s
->gicv3
->dma_as
;
472 MemTxResult res
= MEMTX_OK
;
476 offset
+= NUM_BYTES_IN_DW
;
477 offset
+= NUM_BYTES_IN_DW
;
479 value
= address_space_ldq_le(as
, s
->cq
.base_addr
+ offset
,
480 MEMTXATTRS_UNSPECIFIED
, &res
);
482 if (res
!= MEMTX_OK
) {
486 icid
= value
& ICID_MASK
;
488 rdbase
= (value
& R_MAPC_RDBASE_MASK
) >> R_MAPC_RDBASE_SHIFT
;
489 rdbase
&= RDBASE_PROCNUM_MASK
;
491 valid
= (value
& CMD_FIELD_VALID_MASK
);
493 if ((icid
> s
->ct
.maxids
.max_collids
) || (rdbase
> s
->gicv3
->num_cpu
)) {
494 qemu_log_mask(LOG_GUEST_ERROR
,
495 "ITS MAPC: invalid collection table attributes "
496 "icid %d rdbase %" PRIu64
"\n", icid
, rdbase
);
498 * in this implementation, in case of error
499 * we ignore this command and move onto the next
500 * command in the queue
503 result
= update_cte(s
, icid
, valid
, rdbase
);
509 static bool update_dte(GICv3ITSState
*s
, uint32_t devid
, bool valid
,
510 uint8_t size
, uint64_t itt_addr
)
512 AddressSpace
*as
= &s
->gicv3
->dma_as
;
517 uint32_t max_l2_entries
;
519 MemTxResult res
= MEMTX_OK
;
523 /* add mapping entry to device table */
524 dte
= (valid
& TABLE_ENTRY_VALID_MASK
) |
525 ((size
& SIZE_MASK
) << 1U) |
526 (itt_addr
<< GITS_DTE_ITTADDR_SHIFT
);
533 * The specification defines the format of level 1 entries of a
534 * 2-level table, but the format of level 2 entries and the format
535 * of flat-mapped tables is IMPDEF.
537 if (s
->dt
.indirect
) {
538 l2t_id
= devid
/ (s
->dt
.page_sz
/ L1TABLE_ENTRY_SIZE
);
540 value
= address_space_ldq_le(as
,
542 (l2t_id
* L1TABLE_ENTRY_SIZE
),
543 MEMTXATTRS_UNSPECIFIED
, &res
);
545 if (res
!= MEMTX_OK
) {
549 valid_l2t
= (value
& L2_TABLE_VALID_MASK
) != 0;
552 max_l2_entries
= s
->dt
.page_sz
/ s
->dt
.entry_sz
;
554 l2t_addr
= value
& ((1ULL << 51) - 1);
556 address_space_stq_le(as
, l2t_addr
+
557 ((devid
% max_l2_entries
) * GITS_DTE_SIZE
),
558 dte
, MEMTXATTRS_UNSPECIFIED
, &res
);
561 /* Flat level table */
562 address_space_stq_le(as
, s
->dt
.base_addr
+ (devid
* GITS_DTE_SIZE
),
563 dte
, MEMTXATTRS_UNSPECIFIED
, &res
);
565 if (res
!= MEMTX_OK
) {
572 static bool process_mapd(GICv3ITSState
*s
, uint64_t value
, uint32_t offset
)
574 AddressSpace
*as
= &s
->gicv3
->dma_as
;
579 MemTxResult res
= MEMTX_OK
;
582 devid
= ((value
& DEVID_MASK
) >> DEVID_SHIFT
);
584 offset
+= NUM_BYTES_IN_DW
;
585 value
= address_space_ldq_le(as
, s
->cq
.base_addr
+ offset
,
586 MEMTXATTRS_UNSPECIFIED
, &res
);
588 if (res
!= MEMTX_OK
) {
592 size
= (value
& SIZE_MASK
);
594 offset
+= NUM_BYTES_IN_DW
;
595 value
= address_space_ldq_le(as
, s
->cq
.base_addr
+ offset
,
596 MEMTXATTRS_UNSPECIFIED
, &res
);
598 if (res
!= MEMTX_OK
) {
602 itt_addr
= (value
& ITTADDR_MASK
) >> ITTADDR_SHIFT
;
604 valid
= (value
& CMD_FIELD_VALID_MASK
);
606 if ((devid
> s
->dt
.maxids
.max_devids
) ||
607 (size
> FIELD_EX64(s
->typer
, GITS_TYPER
, IDBITS
))) {
608 qemu_log_mask(LOG_GUEST_ERROR
,
609 "ITS MAPD: invalid device table attributes "
610 "devid %d or size %d\n", devid
, size
);
612 * in this implementation, in case of error
613 * we ignore this command and move onto the next
614 * command in the queue
617 result
= update_dte(s
, devid
, valid
, size
, itt_addr
);
624 * Current implementation blocks until all
625 * commands are processed
627 static void process_cmdq(GICv3ITSState
*s
)
629 uint32_t wr_offset
= 0;
630 uint32_t rd_offset
= 0;
631 uint32_t cq_offset
= 0;
633 AddressSpace
*as
= &s
->gicv3
->dma_as
;
634 MemTxResult res
= MEMTX_OK
;
639 if (!(s
->ctlr
& ITS_CTLR_ENABLED
)) {
643 wr_offset
= FIELD_EX64(s
->cwriter
, GITS_CWRITER
, OFFSET
);
645 if (wr_offset
> s
->cq
.max_entries
) {
646 qemu_log_mask(LOG_GUEST_ERROR
,
647 "%s: invalid write offset "
648 "%d\n", __func__
, wr_offset
);
652 rd_offset
= FIELD_EX64(s
->creadr
, GITS_CREADR
, OFFSET
);
654 if (rd_offset
> s
->cq
.max_entries
) {
655 qemu_log_mask(LOG_GUEST_ERROR
,
656 "%s: invalid read offset "
657 "%d\n", __func__
, rd_offset
);
661 while (wr_offset
!= rd_offset
) {
662 cq_offset
= (rd_offset
* GITS_CMDQ_ENTRY_SIZE
);
663 data
= address_space_ldq_le(as
, s
->cq
.base_addr
+ cq_offset
,
664 MEMTXATTRS_UNSPECIFIED
, &res
);
665 if (res
!= MEMTX_OK
) {
668 cmd
= (data
& CMD_MASK
);
672 res
= process_its_cmd(s
, data
, cq_offset
, INTERRUPT
);
675 res
= process_its_cmd(s
, data
, cq_offset
, CLEAR
);
679 * Current implementation makes a blocking synchronous call
680 * for every command issued earlier, hence the internal state
681 * is already consistent by the time SYNC command is executed.
682 * Hence no further processing is required for SYNC command.
686 result
= process_mapd(s
, data
, cq_offset
);
689 result
= process_mapc(s
, cq_offset
);
692 result
= process_mapti(s
, data
, cq_offset
, false);
695 result
= process_mapti(s
, data
, cq_offset
, true);
697 case GITS_CMD_DISCARD
:
698 result
= process_its_cmd(s
, data
, cq_offset
, DISCARD
);
701 case GITS_CMD_INVALL
:
703 * Current implementation doesn't cache any ITS tables,
704 * but the calculated lpi priority information. We only
705 * need to trigger lpi priority re-calculation to be in
706 * sync with LPI config table or pending table changes.
708 for (i
= 0; i
< s
->gicv3
->num_cpu
; i
++) {
709 gicv3_redist_update_lpi(&s
->gicv3
->cpu
[i
]);
717 rd_offset
%= s
->cq
.max_entries
;
718 s
->creadr
= FIELD_DP64(s
->creadr
, GITS_CREADR
, OFFSET
, rd_offset
);
721 * in this implementation, in case of dma read/write error
722 * we stall the command processing
724 s
->creadr
= FIELD_DP64(s
->creadr
, GITS_CREADR
, STALLED
, 1);
725 qemu_log_mask(LOG_GUEST_ERROR
,
726 "%s: %x cmd processing failed\n", __func__
, cmd
);
733 * This function extracts the ITS Device and Collection table specific
734 * parameters (like base_addr, size etc) from GITS_BASER register.
735 * It is called during ITS enable and also during post_load migration
737 static void extract_table_params(GICv3ITSState
*s
)
739 uint16_t num_pages
= 0;
740 uint8_t page_sz_type
;
742 uint32_t page_sz
= 0;
745 for (int i
= 0; i
< 8; i
++) {
752 page_sz_type
= FIELD_EX64(value
, GITS_BASER
, PAGESIZE
);
754 switch (page_sz_type
) {
756 page_sz
= GITS_PAGE_SIZE_4K
;
760 page_sz
= GITS_PAGE_SIZE_16K
;
765 page_sz
= GITS_PAGE_SIZE_64K
;
769 g_assert_not_reached();
772 num_pages
= FIELD_EX64(value
, GITS_BASER
, SIZE
) + 1;
774 type
= FIELD_EX64(value
, GITS_BASER
, TYPE
);
778 case GITS_BASER_TYPE_DEVICE
:
779 memset(&s
->dt
, 0 , sizeof(s
->dt
));
780 s
->dt
.valid
= FIELD_EX64(value
, GITS_BASER
, VALID
);
786 s
->dt
.page_sz
= page_sz
;
787 s
->dt
.indirect
= FIELD_EX64(value
, GITS_BASER
, INDIRECT
);
788 s
->dt
.entry_sz
= FIELD_EX64(value
, GITS_BASER
, ENTRYSIZE
);
790 if (!s
->dt
.indirect
) {
791 s
->dt
.max_entries
= (num_pages
* page_sz
) / s
->dt
.entry_sz
;
793 s
->dt
.max_entries
= (((num_pages
* page_sz
) /
794 L1TABLE_ENTRY_SIZE
) *
795 (page_sz
/ s
->dt
.entry_sz
));
798 s
->dt
.maxids
.max_devids
= (1UL << (FIELD_EX64(s
->typer
, GITS_TYPER
,
801 s
->dt
.base_addr
= baser_base_addr(value
, page_sz
);
805 case GITS_BASER_TYPE_COLLECTION
:
806 memset(&s
->ct
, 0 , sizeof(s
->ct
));
807 s
->ct
.valid
= FIELD_EX64(value
, GITS_BASER
, VALID
);
810 * GITS_TYPER.HCC is 0 for this implementation
811 * hence writes are discarded if ct.valid is 0
817 s
->ct
.page_sz
= page_sz
;
818 s
->ct
.indirect
= FIELD_EX64(value
, GITS_BASER
, INDIRECT
);
819 s
->ct
.entry_sz
= FIELD_EX64(value
, GITS_BASER
, ENTRYSIZE
);
821 if (!s
->ct
.indirect
) {
822 s
->ct
.max_entries
= (num_pages
* page_sz
) / s
->ct
.entry_sz
;
824 s
->ct
.max_entries
= (((num_pages
* page_sz
) /
825 L1TABLE_ENTRY_SIZE
) *
826 (page_sz
/ s
->ct
.entry_sz
));
829 if (FIELD_EX64(s
->typer
, GITS_TYPER
, CIL
)) {
830 s
->ct
.maxids
.max_collids
= (1UL << (FIELD_EX64(s
->typer
,
831 GITS_TYPER
, CIDBITS
) + 1));
833 /* 16-bit CollectionId supported when CIL == 0 */
834 s
->ct
.maxids
.max_collids
= (1UL << 16);
837 s
->ct
.base_addr
= baser_base_addr(value
, page_sz
);
847 static void extract_cmdq_params(GICv3ITSState
*s
)
849 uint16_t num_pages
= 0;
850 uint64_t value
= s
->cbaser
;
852 num_pages
= FIELD_EX64(value
, GITS_CBASER
, SIZE
) + 1;
854 memset(&s
->cq
, 0 , sizeof(s
->cq
));
855 s
->cq
.valid
= FIELD_EX64(value
, GITS_CBASER
, VALID
);
858 s
->cq
.max_entries
= (num_pages
* GITS_PAGE_SIZE_4K
) /
859 GITS_CMDQ_ENTRY_SIZE
;
860 s
->cq
.base_addr
= FIELD_EX64(value
, GITS_CBASER
, PHYADDR
);
861 s
->cq
.base_addr
<<= R_GITS_CBASER_PHYADDR_SHIFT
;
865 static MemTxResult
gicv3_its_translation_write(void *opaque
, hwaddr offset
,
866 uint64_t data
, unsigned size
,
869 GICv3ITSState
*s
= (GICv3ITSState
*)opaque
;
874 case GITS_TRANSLATER
:
875 if (s
->ctlr
& ITS_CTLR_ENABLED
) {
876 devid
= attrs
.requester_id
;
877 result
= process_its_cmd(s
, data
, devid
, NONE
);
891 static bool its_writel(GICv3ITSState
*s
, hwaddr offset
,
892 uint64_t value
, MemTxAttrs attrs
)
899 s
->ctlr
|= (value
& ~(s
->ctlr
));
901 if (s
->ctlr
& ITS_CTLR_ENABLED
) {
902 extract_table_params(s
);
903 extract_cmdq_params(s
);
910 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
913 if (!(s
->ctlr
& ITS_CTLR_ENABLED
)) {
914 s
->cbaser
= deposit64(s
->cbaser
, 0, 32, value
);
916 s
->cwriter
= s
->creadr
;
919 case GITS_CBASER
+ 4:
921 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
924 if (!(s
->ctlr
& ITS_CTLR_ENABLED
)) {
925 s
->cbaser
= deposit64(s
->cbaser
, 32, 32, value
);
927 s
->cwriter
= s
->creadr
;
931 s
->cwriter
= deposit64(s
->cwriter
, 0, 32,
932 (value
& ~R_GITS_CWRITER_RETRY_MASK
));
933 if (s
->cwriter
!= s
->creadr
) {
937 case GITS_CWRITER
+ 4:
938 s
->cwriter
= deposit64(s
->cwriter
, 32, 32, value
);
941 if (s
->gicv3
->gicd_ctlr
& GICD_CTLR_DS
) {
942 s
->creadr
= deposit64(s
->creadr
, 0, 32,
943 (value
& ~R_GITS_CREADR_STALLED_MASK
));
945 /* RO register, ignore the write */
946 qemu_log_mask(LOG_GUEST_ERROR
,
947 "%s: invalid guest write to RO register at offset "
948 TARGET_FMT_plx
"\n", __func__
, offset
);
951 case GITS_CREADR
+ 4:
952 if (s
->gicv3
->gicd_ctlr
& GICD_CTLR_DS
) {
953 s
->creadr
= deposit64(s
->creadr
, 32, 32, value
);
955 /* RO register, ignore the write */
956 qemu_log_mask(LOG_GUEST_ERROR
,
957 "%s: invalid guest write to RO register at offset "
958 TARGET_FMT_plx
"\n", __func__
, offset
);
961 case GITS_BASER
... GITS_BASER
+ 0x3f:
963 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
966 if (!(s
->ctlr
& ITS_CTLR_ENABLED
)) {
967 index
= (offset
- GITS_BASER
) / 8;
971 value
&= ~GITS_BASER_RO_MASK
;
972 s
->baser
[index
] &= GITS_BASER_RO_MASK
| MAKE_64BIT_MASK(0, 32);
973 s
->baser
[index
] |= value
;
975 value
&= ~GITS_BASER_RO_MASK
;
976 s
->baser
[index
] &= GITS_BASER_RO_MASK
| MAKE_64BIT_MASK(32, 32);
977 s
->baser
[index
] |= value
;
982 case GITS_IDREGS
... GITS_IDREGS
+ 0x2f:
983 /* RO registers, ignore the write */
984 qemu_log_mask(LOG_GUEST_ERROR
,
985 "%s: invalid guest write to RO register at offset "
986 TARGET_FMT_plx
"\n", __func__
, offset
);
995 static bool its_readl(GICv3ITSState
*s
, hwaddr offset
,
996 uint64_t *data
, MemTxAttrs attrs
)
1006 *data
= gicv3_iidr();
1008 case GITS_IDREGS
... GITS_IDREGS
+ 0x2f:
1010 *data
= gicv3_idreg(offset
- GITS_IDREGS
);
1013 *data
= extract64(s
->typer
, 0, 32);
1015 case GITS_TYPER
+ 4:
1016 *data
= extract64(s
->typer
, 32, 32);
1019 *data
= extract64(s
->cbaser
, 0, 32);
1021 case GITS_CBASER
+ 4:
1022 *data
= extract64(s
->cbaser
, 32, 32);
1025 *data
= extract64(s
->creadr
, 0, 32);
1027 case GITS_CREADR
+ 4:
1028 *data
= extract64(s
->creadr
, 32, 32);
1031 *data
= extract64(s
->cwriter
, 0, 32);
1033 case GITS_CWRITER
+ 4:
1034 *data
= extract64(s
->cwriter
, 32, 32);
1036 case GITS_BASER
... GITS_BASER
+ 0x3f:
1037 index
= (offset
- GITS_BASER
) / 8;
1039 *data
= extract64(s
->baser
[index
], 32, 32);
1041 *data
= extract64(s
->baser
[index
], 0, 32);
1051 static bool its_writell(GICv3ITSState
*s
, hwaddr offset
,
1052 uint64_t value
, MemTxAttrs attrs
)
1058 case GITS_BASER
... GITS_BASER
+ 0x3f:
1060 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1063 if (!(s
->ctlr
& ITS_CTLR_ENABLED
)) {
1064 index
= (offset
- GITS_BASER
) / 8;
1065 s
->baser
[index
] &= GITS_BASER_RO_MASK
;
1066 s
->baser
[index
] |= (value
& ~GITS_BASER_RO_MASK
);
1071 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1074 if (!(s
->ctlr
& ITS_CTLR_ENABLED
)) {
1077 s
->cwriter
= s
->creadr
;
1081 s
->cwriter
= value
& ~R_GITS_CWRITER_RETRY_MASK
;
1082 if (s
->cwriter
!= s
->creadr
) {
1087 if (s
->gicv3
->gicd_ctlr
& GICD_CTLR_DS
) {
1088 s
->creadr
= value
& ~R_GITS_CREADR_STALLED_MASK
;
1090 /* RO register, ignore the write */
1091 qemu_log_mask(LOG_GUEST_ERROR
,
1092 "%s: invalid guest write to RO register at offset "
1093 TARGET_FMT_plx
"\n", __func__
, offset
);
1097 /* RO registers, ignore the write */
1098 qemu_log_mask(LOG_GUEST_ERROR
,
1099 "%s: invalid guest write to RO register at offset "
1100 TARGET_FMT_plx
"\n", __func__
, offset
);
1109 static bool its_readll(GICv3ITSState
*s
, hwaddr offset
,
1110 uint64_t *data
, MemTxAttrs attrs
)
1119 case GITS_BASER
... GITS_BASER
+ 0x3f:
1120 index
= (offset
- GITS_BASER
) / 8;
1121 *data
= s
->baser
[index
];
1139 static MemTxResult
gicv3_its_read(void *opaque
, hwaddr offset
, uint64_t *data
,
1140 unsigned size
, MemTxAttrs attrs
)
1142 GICv3ITSState
*s
= (GICv3ITSState
*)opaque
;
1147 result
= its_readl(s
, offset
, data
, attrs
);
1150 result
= its_readll(s
, offset
, data
, attrs
);
1158 qemu_log_mask(LOG_GUEST_ERROR
,
1159 "%s: invalid guest read at offset " TARGET_FMT_plx
1160 "size %u\n", __func__
, offset
, size
);
1162 * The spec requires that reserved registers are RAZ/WI;
1163 * so use false returns from leaf functions as a way to
1164 * trigger the guest-error logging but don't return it to
1165 * the caller, or we'll cause a spurious guest data abort.
1172 static MemTxResult
gicv3_its_write(void *opaque
, hwaddr offset
, uint64_t data
,
1173 unsigned size
, MemTxAttrs attrs
)
1175 GICv3ITSState
*s
= (GICv3ITSState
*)opaque
;
1180 result
= its_writel(s
, offset
, data
, attrs
);
1183 result
= its_writell(s
, offset
, data
, attrs
);
1191 qemu_log_mask(LOG_GUEST_ERROR
,
1192 "%s: invalid guest write at offset " TARGET_FMT_plx
1193 "size %u\n", __func__
, offset
, size
);
1195 * The spec requires that reserved registers are RAZ/WI;
1196 * so use false returns from leaf functions as a way to
1197 * trigger the guest-error logging but don't return it to
1198 * the caller, or we'll cause a spurious guest data abort.
1204 static const MemoryRegionOps gicv3_its_control_ops
= {
1205 .read_with_attrs
= gicv3_its_read
,
1206 .write_with_attrs
= gicv3_its_write
,
1207 .valid
.min_access_size
= 4,
1208 .valid
.max_access_size
= 8,
1209 .impl
.min_access_size
= 4,
1210 .impl
.max_access_size
= 8,
1211 .endianness
= DEVICE_NATIVE_ENDIAN
,
1214 static const MemoryRegionOps gicv3_its_translation_ops
= {
1215 .write_with_attrs
= gicv3_its_translation_write
,
1216 .valid
.min_access_size
= 2,
1217 .valid
.max_access_size
= 4,
1218 .impl
.min_access_size
= 2,
1219 .impl
.max_access_size
= 4,
1220 .endianness
= DEVICE_NATIVE_ENDIAN
,
1223 static void gicv3_arm_its_realize(DeviceState
*dev
, Error
**errp
)
1225 GICv3ITSState
*s
= ARM_GICV3_ITS_COMMON(dev
);
1228 for (i
= 0; i
< s
->gicv3
->num_cpu
; i
++) {
1229 if (!(s
->gicv3
->cpu
[i
].gicr_typer
& GICR_TYPER_PLPIS
)) {
1230 error_setg(errp
, "Physical LPI not supported by CPU %d", i
);
1235 gicv3_its_init_mmio(s
, &gicv3_its_control_ops
, &gicv3_its_translation_ops
);
1237 address_space_init(&s
->gicv3
->dma_as
, s
->gicv3
->dma
,
1238 "gicv3-its-sysmem");
1240 /* set the ITS default features supported */
1241 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, PHYSICAL
,
1242 GITS_TYPE_PHYSICAL
);
1243 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, ITT_ENTRY_SIZE
,
1244 ITS_ITT_ENTRY_SIZE
- 1);
1245 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, IDBITS
, ITS_IDBITS
);
1246 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, DEVBITS
, ITS_DEVBITS
);
1247 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, CIL
, 1);
1248 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, CIDBITS
, ITS_CIDBITS
);
1251 static void gicv3_its_reset(DeviceState
*dev
)
1253 GICv3ITSState
*s
= ARM_GICV3_ITS_COMMON(dev
);
1254 GICv3ITSClass
*c
= ARM_GICV3_ITS_GET_CLASS(s
);
1256 c
->parent_reset(dev
);
1258 /* Quiescent bit reset to 1 */
1259 s
->ctlr
= FIELD_DP32(s
->ctlr
, GITS_CTLR
, QUIESCENT
, 1);
1262 * setting GITS_BASER0.Type = 0b001 (Device)
1263 * GITS_BASER1.Type = 0b100 (Collection Table)
1264 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1265 * GITS_BASER<0,1>.Page_Size = 64KB
1266 * and default translation table entry size to 16 bytes
1268 s
->baser
[0] = FIELD_DP64(s
->baser
[0], GITS_BASER
, TYPE
,
1269 GITS_BASER_TYPE_DEVICE
);
1270 s
->baser
[0] = FIELD_DP64(s
->baser
[0], GITS_BASER
, PAGESIZE
,
1271 GITS_BASER_PAGESIZE_64K
);
1272 s
->baser
[0] = FIELD_DP64(s
->baser
[0], GITS_BASER
, ENTRYSIZE
,
1275 s
->baser
[1] = FIELD_DP64(s
->baser
[1], GITS_BASER
, TYPE
,
1276 GITS_BASER_TYPE_COLLECTION
);
1277 s
->baser
[1] = FIELD_DP64(s
->baser
[1], GITS_BASER
, PAGESIZE
,
1278 GITS_BASER_PAGESIZE_64K
);
1279 s
->baser
[1] = FIELD_DP64(s
->baser
[1], GITS_BASER
, ENTRYSIZE
,
1283 static void gicv3_its_post_load(GICv3ITSState
*s
)
1285 if (s
->ctlr
& ITS_CTLR_ENABLED
) {
1286 extract_table_params(s
);
1287 extract_cmdq_params(s
);
1291 static Property gicv3_its_props
[] = {
1292 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState
, gicv3
, "arm-gicv3",
1294 DEFINE_PROP_END_OF_LIST(),
1297 static void gicv3_its_class_init(ObjectClass
*klass
, void *data
)
1299 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1300 GICv3ITSClass
*ic
= ARM_GICV3_ITS_CLASS(klass
);
1301 GICv3ITSCommonClass
*icc
= ARM_GICV3_ITS_COMMON_CLASS(klass
);
1303 dc
->realize
= gicv3_arm_its_realize
;
1304 device_class_set_props(dc
, gicv3_its_props
);
1305 device_class_set_parent_reset(dc
, gicv3_its_reset
, &ic
->parent_reset
);
1306 icc
->post_load
= gicv3_its_post_load
;
1309 static const TypeInfo gicv3_its_info
= {
1310 .name
= TYPE_ARM_GICV3_ITS
,
1311 .parent
= TYPE_ARM_GICV3_ITS_COMMON
,
1312 .instance_size
= sizeof(GICv3ITSState
),
1313 .class_init
= gicv3_its_class_init
,
1314 .class_size
= sizeof(GICv3ITSClass
),
1317 static void gicv3_its_register_types(void)
1319 type_register_static(&gicv3_its_info
);
1322 type_init(gicv3_its_register_types
)