2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
4 * Copyright (c) 2019-2022, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "sysemu/cpus.h"
15 #include "sysemu/dma.h"
16 #include "hw/ppc/fdt.h"
17 #include "hw/ppc/pnv.h"
18 #include "hw/ppc/pnv_chip.h"
19 #include "hw/ppc/pnv_core.h"
20 #include "hw/ppc/pnv_xscom.h"
21 #include "hw/ppc/xive2.h"
22 #include "hw/ppc/pnv_xive.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "hw/ppc/xive2_regs.h"
25 #include "hw/ppc/ppc.h"
26 #include "hw/qdev-properties.h"
27 #include "sysemu/reset.h"
28 #include "sysemu/qtest.h"
32 #include "pnv_xive2_regs.h"
36 /* XIVE Sync or Flush Notification Block */
37 typedef struct XiveSfnBlock
{
41 /* XIVE Thread Sync or Flush Notification Area */
42 typedef struct XiveThreadNA
{
43 XiveSfnBlock topo
[16];
47 * Virtual structures table (VST)
49 #define SBE_PER_BYTE 4
51 typedef struct XiveVstInfo
{
57 static const XiveVstInfo vst_infos
[] = {
59 [VST_EAS
] = { "EAT", sizeof(Xive2Eas
), 16 },
60 [VST_ESB
] = { "ESB", 1, 16 },
61 [VST_END
] = { "ENDT", sizeof(Xive2End
), 16 },
63 [VST_NVP
] = { "NVPT", sizeof(Xive2Nvp
), 16 },
64 [VST_NVG
] = { "NVGT", sizeof(Xive2Nvgc
), 16 },
65 [VST_NVC
] = { "NVCT", sizeof(Xive2Nvgc
), 16 },
67 [VST_IC
] = { "IC", 1, /* ? */ 16 }, /* Topology # */
68 [VST_SYNC
] = { "SYNC", sizeof(XiveThreadNA
), 16 }, /* Topology # */
71 * This table contains the backing store pages for the interrupt
72 * fifos of the VC sub-engine in case of overflow.
82 [VST_ERQ
] = { "ERQ", 1, VC_QUEUE_COUNT
},
85 #define xive2_error(xive, fmt, ...) \
86 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
87 (xive)->chip->chip_id, ## __VA_ARGS__);
90 * TODO: Document block id override
92 static uint32_t pnv_xive2_block_id(PnvXive2
*xive
)
94 uint8_t blk
= xive
->chip
->chip_id
;
95 uint64_t cfg_val
= xive
->cq_regs
[CQ_XIVE_CFG
>> 3];
97 if (cfg_val
& CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE
) {
98 blk
= GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID
, cfg_val
);
105 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
106 * of the chips is good enough.
108 * TODO: Block scope support
110 static PnvXive2
*pnv_xive2_get_remote(uint8_t blk
)
112 PnvMachineState
*pnv
= PNV_MACHINE(qdev_get_machine());
115 for (i
= 0; i
< pnv
->num_chips
; i
++) {
116 Pnv10Chip
*chip10
= PNV10_CHIP(pnv
->chips
[i
]);
117 PnvXive2
*xive
= &chip10
->xive
;
119 if (pnv_xive2_block_id(xive
) == blk
) {
127 * VST accessors for ESB, EAT, ENDT, NVP
129 * Indirect VST tables are arrays of VSDs pointing to a page (of same
130 * size). Each page is a direct VST table.
133 #define XIVE_VSD_SIZE 8
135 /* Indirect page size can be 4K, 64K, 2M, 16M. */
136 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift
)
138 return page_shift
== 12 || page_shift
== 16 ||
139 page_shift
== 21 || page_shift
== 24;
142 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2
*xive
, uint32_t type
,
143 uint64_t vsd
, uint32_t idx
)
145 const XiveVstInfo
*info
= &vst_infos
[type
];
146 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
147 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
150 idx_max
= vst_tsize
/ info
->size
- 1;
153 xive2_error(xive
, "VST: %s entry %x out of range [ 0 .. %x ] !?",
154 info
->name
, idx
, idx_max
);
159 return vst_addr
+ idx
* info
->size
;
162 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2
*xive
, uint32_t type
,
163 uint64_t vsd
, uint32_t idx
)
165 const XiveVstInfo
*info
= &vst_infos
[type
];
169 uint32_t vst_per_page
;
171 /* Get the page size of the indirect table. */
172 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
173 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
, MEMTXATTRS_UNSPECIFIED
);
175 if (!(vsd
& VSD_ADDRESS_MASK
)) {
177 xive2_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
182 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
184 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
185 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
190 vst_per_page
= (1ull << page_shift
) / info
->size
;
191 vsd_idx
= idx
/ vst_per_page
;
193 /* Load the VSD we are looking for, if not already done */
195 vsd_addr
= vsd_addr
+ vsd_idx
* XIVE_VSD_SIZE
;
196 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
,
197 MEMTXATTRS_UNSPECIFIED
);
199 if (!(vsd
& VSD_ADDRESS_MASK
)) {
201 xive2_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
207 * Check that the pages have a consistent size across the
210 if (page_shift
!= GETFIELD(VSD_TSIZE
, vsd
) + 12) {
211 xive2_error(xive
, "VST: %s entry %x indirect page size differ !?",
217 return pnv_xive2_vst_addr_direct(xive
, type
, vsd
, (idx
% vst_per_page
));
220 static uint8_t pnv_xive2_nvc_table_compress_shift(PnvXive2
*xive
)
222 uint8_t shift
= GETFIELD(PC_NXC_PROC_CONFIG_NVC_TABLE_COMPRESS
,
223 xive
->pc_regs
[PC_NXC_PROC_CONFIG
>> 3]);
224 return shift
> 8 ? 0 : shift
;
227 static uint8_t pnv_xive2_nvg_table_compress_shift(PnvXive2
*xive
)
229 uint8_t shift
= GETFIELD(PC_NXC_PROC_CONFIG_NVG_TABLE_COMPRESS
,
230 xive
->pc_regs
[PC_NXC_PROC_CONFIG
>> 3]);
231 return shift
> 8 ? 0 : shift
;
234 static uint64_t pnv_xive2_vst_addr(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
237 const XiveVstInfo
*info
= &vst_infos
[type
];
240 if (blk
>= info
->max_blocks
) {
241 xive2_error(xive
, "VST: invalid block id %d for VST %s %d !?",
242 blk
, info
->name
, idx
);
246 vsd
= xive
->vsds
[type
][blk
];
248 xive2_error(xive
, "VST: vsd == 0 block id %d for VST %s %d !?",
249 blk
, info
->name
, idx
);
253 /* Remote VST access */
254 if (GETFIELD(VSD_MODE
, vsd
) == VSD_MODE_FORWARD
) {
255 xive
= pnv_xive2_get_remote(blk
);
257 return xive
? pnv_xive2_vst_addr(xive
, type
, blk
, idx
) : 0;
260 if (type
== VST_NVG
) {
261 idx
>>= pnv_xive2_nvg_table_compress_shift(xive
);
262 } else if (type
== VST_NVC
) {
263 idx
>>= pnv_xive2_nvc_table_compress_shift(xive
);
266 if (VSD_INDIRECT
& vsd
) {
267 return pnv_xive2_vst_addr_indirect(xive
, type
, vsd
, idx
);
270 return pnv_xive2_vst_addr_direct(xive
, type
, vsd
, idx
);
273 static int pnv_xive2_vst_read(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
274 uint32_t idx
, void *data
)
276 const XiveVstInfo
*info
= &vst_infos
[type
];
277 uint64_t addr
= pnv_xive2_vst_addr(xive
, type
, blk
, idx
);
284 result
= address_space_read(&address_space_memory
, addr
,
285 MEMTXATTRS_UNSPECIFIED
, data
,
287 if (result
!= MEMTX_OK
) {
288 xive2_error(xive
, "VST: read failed at @0x%" HWADDR_PRIx
289 " for VST %s %x/%x\n", addr
, info
->name
, blk
, idx
);
295 #define XIVE_VST_WORD_ALL -1
297 static int pnv_xive2_vst_write(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
298 uint32_t idx
, void *data
, uint32_t word_number
)
300 const XiveVstInfo
*info
= &vst_infos
[type
];
301 uint64_t addr
= pnv_xive2_vst_addr(xive
, type
, blk
, idx
);
308 if (word_number
== XIVE_VST_WORD_ALL
) {
309 result
= address_space_write(&address_space_memory
, addr
,
310 MEMTXATTRS_UNSPECIFIED
, data
,
313 result
= address_space_write(&address_space_memory
,
314 addr
+ word_number
* 4,
315 MEMTXATTRS_UNSPECIFIED
,
316 data
+ word_number
* 4, 4);
319 if (result
!= MEMTX_OK
) {
320 xive2_error(xive
, "VST: write failed at @0x%" HWADDR_PRIx
321 "for VST %s %x/%x\n", addr
, info
->name
, blk
, idx
);
327 static int pnv_xive2_get_pq(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
330 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
332 if (pnv_xive2_block_id(xive
) != blk
) {
333 xive2_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
337 *pq
= xive_source_esb_get(&xive
->ipi_source
, idx
);
341 static int pnv_xive2_set_pq(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
344 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
346 if (pnv_xive2_block_id(xive
) != blk
) {
347 xive2_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
351 *pq
= xive_source_esb_set(&xive
->ipi_source
, idx
, *pq
);
355 static int pnv_xive2_get_end(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
358 return pnv_xive2_vst_read(PNV_XIVE2(xrtr
), VST_END
, blk
, idx
, end
);
361 static int pnv_xive2_write_end(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
362 Xive2End
*end
, uint8_t word_number
)
364 return pnv_xive2_vst_write(PNV_XIVE2(xrtr
), VST_END
, blk
, idx
, end
,
368 static inline int pnv_xive2_get_current_pir(PnvXive2
*xive
)
370 if (!qtest_enabled()) {
371 PowerPCCPU
*cpu
= POWERPC_CPU(current_cpu
);
372 return ppc_cpu_pir(cpu
);
378 * After SW injects a Queue Sync or Cache Flush operation, HW will notify
379 * SW of the completion of the operation by writing a byte of all 1's (0xff)
380 * to a specific memory location. The memory location is calculated by first
381 * looking up a base address in the SYNC VSD using the Topology ID of the
382 * originating thread as the "block" number. This points to a
383 * 64k block of memory that is further divided into 128 512 byte chunks of
384 * memory, which is indexed by the thread id of the requesting thread.
385 * Finally, this 512 byte chunk of memory is divided into 16 32 byte
386 * chunks which are indexed by the topology id of the targeted IC's chip.
387 * The values below are the offsets into that 32 byte chunk of memory for
388 * each type of cache flush or queue sync operation.
390 #define PNV_XIVE2_QUEUE_IPI 0x00
391 #define PNV_XIVE2_QUEUE_HW 0x01
392 #define PNV_XIVE2_QUEUE_NXC 0x02
393 #define PNV_XIVE2_QUEUE_INT 0x03
394 #define PNV_XIVE2_QUEUE_OS 0x04
395 #define PNV_XIVE2_QUEUE_POOL 0x05
396 #define PNV_XIVE2_QUEUE_HARD 0x06
397 #define PNV_XIVE2_CACHE_ENDC 0x08
398 #define PNV_XIVE2_CACHE_ESBC 0x09
399 #define PNV_XIVE2_CACHE_EASC 0x0a
400 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO 0x10
401 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO 0x11
402 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI 0x12
403 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI 0x13
404 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI 0x14
405 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI 0x15
406 #define PNV_XIVE2_CACHE_NXC 0x18
408 static int pnv_xive2_inject_notify(PnvXive2
*xive
, int type
)
411 int pir
= pnv_xive2_get_current_pir(xive
);
412 int thread_nr
= PNV10_PIR2THREAD(pir
);
413 int thread_topo_id
= PNV10_PIR2CHIP(pir
);
414 int ic_topo_id
= xive
->chip
->chip_id
;
415 uint64_t offset
= ic_topo_id
* sizeof(XiveSfnBlock
);
419 /* Retrieve the address of requesting thread's notification area */
420 addr
= pnv_xive2_vst_addr(xive
, VST_SYNC
, thread_topo_id
, thread_nr
);
423 xive2_error(xive
, "VST: no SYNC entry %x/%x !?",
424 thread_topo_id
, thread_nr
);
428 address_space_stb(&address_space_memory
, addr
+ offset
+ type
, byte
,
429 MEMTXATTRS_UNSPECIFIED
, &result
);
430 assert(result
== MEMTX_OK
);
435 static int pnv_xive2_end_update(PnvXive2
*xive
, uint8_t watch_engine
)
439 int i
, spec_reg
, data_reg
;
440 uint64_t endc_watch
[4];
442 assert(watch_engine
< ARRAY_SIZE(endc_watch
));
444 spec_reg
= (VC_ENDC_WATCH0_SPEC
+ watch_engine
* 0x40) >> 3;
445 data_reg
= (VC_ENDC_WATCH0_DATA0
+ watch_engine
* 0x40) >> 3;
446 blk
= GETFIELD(VC_ENDC_WATCH_BLOCK_ID
, xive
->vc_regs
[spec_reg
]);
447 idx
= GETFIELD(VC_ENDC_WATCH_INDEX
, xive
->vc_regs
[spec_reg
]);
449 for (i
= 0; i
< ARRAY_SIZE(endc_watch
); i
++) {
450 endc_watch
[i
] = cpu_to_be64(xive
->vc_regs
[data_reg
+ i
]);
453 return pnv_xive2_vst_write(xive
, VST_END
, blk
, idx
, endc_watch
,
457 static void pnv_xive2_end_cache_load(PnvXive2
*xive
, uint8_t watch_engine
)
461 uint64_t endc_watch
[4] = { 0 };
462 int i
, spec_reg
, data_reg
;
464 assert(watch_engine
< ARRAY_SIZE(endc_watch
));
466 spec_reg
= (VC_ENDC_WATCH0_SPEC
+ watch_engine
* 0x40) >> 3;
467 data_reg
= (VC_ENDC_WATCH0_DATA0
+ watch_engine
* 0x40) >> 3;
468 blk
= GETFIELD(VC_ENDC_WATCH_BLOCK_ID
, xive
->vc_regs
[spec_reg
]);
469 idx
= GETFIELD(VC_ENDC_WATCH_INDEX
, xive
->vc_regs
[spec_reg
]);
471 if (pnv_xive2_vst_read(xive
, VST_END
, blk
, idx
, endc_watch
)) {
472 xive2_error(xive
, "VST: no END entry %x/%x !?", blk
, idx
);
475 for (i
= 0; i
< ARRAY_SIZE(endc_watch
); i
++) {
476 xive
->vc_regs
[data_reg
+ i
] = be64_to_cpu(endc_watch
[i
]);
480 static int pnv_xive2_get_nvp(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
483 return pnv_xive2_vst_read(PNV_XIVE2(xrtr
), VST_NVP
, blk
, idx
, nvp
);
486 static int pnv_xive2_write_nvp(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
487 Xive2Nvp
*nvp
, uint8_t word_number
)
489 return pnv_xive2_vst_write(PNV_XIVE2(xrtr
), VST_NVP
, blk
, idx
, nvp
,
493 static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type
, uint32_t *table_type
)
496 case PC_NXC_WATCH_NXC_NVP
:
497 *table_type
= VST_NVP
;
499 case PC_NXC_WATCH_NXC_NVG
:
500 *table_type
= VST_NVG
;
502 case PC_NXC_WATCH_NXC_NVC
:
503 *table_type
= VST_NVC
;
506 qemu_log_mask(LOG_GUEST_ERROR
,
507 "XIVE: invalid table type for nxc operation\n");
513 static int pnv_xive2_nxc_update(PnvXive2
*xive
, uint8_t watch_engine
)
515 uint8_t blk
, nxc_type
;
516 uint32_t idx
, table_type
= -1;
517 int i
, spec_reg
, data_reg
;
518 uint64_t nxc_watch
[4];
520 assert(watch_engine
< ARRAY_SIZE(nxc_watch
));
522 spec_reg
= (PC_NXC_WATCH0_SPEC
+ watch_engine
* 0x40) >> 3;
523 data_reg
= (PC_NXC_WATCH0_DATA0
+ watch_engine
* 0x40) >> 3;
524 nxc_type
= GETFIELD(PC_NXC_WATCH_NXC_TYPE
, xive
->pc_regs
[spec_reg
]);
525 blk
= GETFIELD(PC_NXC_WATCH_BLOCK_ID
, xive
->pc_regs
[spec_reg
]);
526 idx
= GETFIELD(PC_NXC_WATCH_INDEX
, xive
->pc_regs
[spec_reg
]);
528 assert(!pnv_xive2_nxc_to_table_type(nxc_type
, &table_type
));
530 for (i
= 0; i
< ARRAY_SIZE(nxc_watch
); i
++) {
531 nxc_watch
[i
] = cpu_to_be64(xive
->pc_regs
[data_reg
+ i
]);
534 return pnv_xive2_vst_write(xive
, table_type
, blk
, idx
, nxc_watch
,
538 static void pnv_xive2_nxc_cache_load(PnvXive2
*xive
, uint8_t watch_engine
)
540 uint8_t blk
, nxc_type
;
541 uint32_t idx
, table_type
= -1;
542 uint64_t nxc_watch
[4] = { 0 };
543 int i
, spec_reg
, data_reg
;
545 assert(watch_engine
< ARRAY_SIZE(nxc_watch
));
547 spec_reg
= (PC_NXC_WATCH0_SPEC
+ watch_engine
* 0x40) >> 3;
548 data_reg
= (PC_NXC_WATCH0_DATA0
+ watch_engine
* 0x40) >> 3;
549 nxc_type
= GETFIELD(PC_NXC_WATCH_NXC_TYPE
, xive
->pc_regs
[spec_reg
]);
550 blk
= GETFIELD(PC_NXC_WATCH_BLOCK_ID
, xive
->pc_regs
[spec_reg
]);
551 idx
= GETFIELD(PC_NXC_WATCH_INDEX
, xive
->pc_regs
[spec_reg
]);
553 assert(!pnv_xive2_nxc_to_table_type(nxc_type
, &table_type
));
555 if (pnv_xive2_vst_read(xive
, table_type
, blk
, idx
, nxc_watch
)) {
556 xive2_error(xive
, "VST: no NXC entry %x/%x in %s table!?",
557 blk
, idx
, vst_infos
[table_type
].name
);
560 for (i
= 0; i
< ARRAY_SIZE(nxc_watch
); i
++) {
561 xive
->pc_regs
[data_reg
+ i
] = be64_to_cpu(nxc_watch
[i
]);
565 static int pnv_xive2_get_eas(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
568 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
570 if (pnv_xive2_block_id(xive
) != blk
) {
571 xive2_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
575 return pnv_xive2_vst_read(xive
, VST_EAS
, blk
, idx
, eas
);
578 static uint32_t pnv_xive2_get_config(Xive2Router
*xrtr
)
580 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
583 if (xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS
) {
584 cfg
|= XIVE2_GEN1_TIMA_OS
;
587 if (xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE
) {
588 cfg
|= XIVE2_VP_SAVE_RESTORE
;
591 if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE
,
592 xive
->cq_regs
[CQ_XIVE_CFG
>> 3]) == CQ_XIVE_CFG_THREADID_8BITS
) {
593 cfg
|= XIVE2_THREADID_8BITS
;
599 static bool pnv_xive2_is_cpu_enabled(PnvXive2
*xive
, PowerPCCPU
*cpu
)
601 int pir
= ppc_cpu_pir(cpu
);
602 uint32_t fc
= PNV10_PIR2FUSEDCORE(pir
);
603 uint64_t reg
= fc
< 8 ? TCTXT_EN0
: TCTXT_EN1
;
604 uint32_t bit
= pir
& 0x3f;
606 return xive
->tctxt_regs
[reg
>> 3] & PPC_BIT(bit
);
609 static int pnv_xive2_match_nvt(XivePresenter
*xptr
, uint8_t format
,
610 uint8_t nvt_blk
, uint32_t nvt_idx
,
611 bool cam_ignore
, uint8_t priority
,
612 uint32_t logic_serv
, XiveTCTXMatch
*match
)
614 PnvXive2
*xive
= PNV_XIVE2(xptr
);
615 PnvChip
*chip
= xive
->chip
;
619 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS
;
621 for (i
= 0; i
< chip
->nr_cores
; i
++) {
622 PnvCore
*pc
= chip
->cores
[i
];
623 CPUCore
*cc
= CPU_CORE(pc
);
625 for (j
= 0; j
< cc
->nr_threads
; j
++) {
626 PowerPCCPU
*cpu
= pc
->threads
[j
];
630 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
634 tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
637 ring
= xive_presenter_tctx_match(xptr
, tctx
, format
, nvt_blk
,
641 ring
= xive2_presenter_tctx_match(xptr
, tctx
, format
, nvt_blk
,
647 * Save the context and follow on to catch duplicates,
648 * that we don't support yet.
652 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: already found a "
653 "thread context NVT %x/%x\n",
668 static uint32_t pnv_xive2_presenter_get_config(XivePresenter
*xptr
)
670 PnvXive2
*xive
= PNV_XIVE2(xptr
);
673 if (xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS
) {
674 cfg
|= XIVE_PRESENTER_GEN1_TIMA_OS
;
679 static uint8_t pnv_xive2_get_block_id(Xive2Router
*xrtr
)
681 return pnv_xive2_block_id(PNV_XIVE2(xrtr
));
685 * The TIMA MMIO space is shared among the chips and to identify the
686 * chip from which the access is being done, we extract the chip id
689 static PnvXive2
*pnv_xive2_tm_get_xive(PowerPCCPU
*cpu
)
691 int pir
= ppc_cpu_pir(cpu
);
692 XivePresenter
*xptr
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
)->xptr
;
693 PnvXive2
*xive
= PNV_XIVE2(xptr
);
695 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
696 xive2_error(xive
, "IC: CPU %x is not enabled", pir
);
702 * The internal sources of the interrupt controller have no knowledge
703 * of the XIVE2 chip on which they reside. Encode the block id in the
704 * source interrupt number before forwarding the source event
705 * notification to the Router. This is required on a multichip system.
707 static void pnv_xive2_notify(XiveNotifier
*xn
, uint32_t srcno
, bool pq_checked
)
709 PnvXive2
*xive
= PNV_XIVE2(xn
);
710 uint8_t blk
= pnv_xive2_block_id(xive
);
712 xive2_router_notify(xn
, XIVE_EAS(blk
, srcno
), pq_checked
);
716 * Set Translation Tables
718 * TODO add support for multiple sets
720 static int pnv_xive2_stt_set_data(PnvXive2
*xive
, uint64_t val
)
722 uint8_t tsel
= GETFIELD(CQ_TAR_SELECT
, xive
->cq_regs
[CQ_TAR
>> 3]);
723 uint8_t entry
= GETFIELD(CQ_TAR_ENTRY_SELECT
,
724 xive
->cq_regs
[CQ_TAR
>> 3]);
731 xive
->tables
[tsel
][entry
] = val
;
734 xive2_error(xive
, "IC: unsupported table %d", tsel
);
738 if (xive
->cq_regs
[CQ_TAR
>> 3] & CQ_TAR_AUTOINC
) {
739 xive
->cq_regs
[CQ_TAR
>> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT
,
740 xive
->cq_regs
[CQ_TAR
>> 3], ++entry
);
746 * Virtual Structure Tables (VST) configuration
748 static void pnv_xive2_vst_set_exclusive(PnvXive2
*xive
, uint8_t type
,
749 uint8_t blk
, uint64_t vsd
)
751 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
752 XiveSource
*xsrc
= &xive
->ipi_source
;
753 const XiveVstInfo
*info
= &vst_infos
[type
];
754 uint32_t page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
755 uint64_t vst_tsize
= 1ull << page_shift
;
756 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
760 if (VSD_INDIRECT
& vsd
) {
761 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
762 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
768 if (!QEMU_IS_ALIGNED(vst_addr
, 1ull << page_shift
)) {
769 xive2_error(xive
, "VST: %s table address 0x%"PRIx64
770 " is not aligned with page shift %d",
771 info
->name
, vst_addr
, page_shift
);
775 /* Record the table configuration (in SRAM on HW) */
776 xive
->vsds
[type
][blk
] = vsd
;
778 /* Now tune the models with the configuration provided by the FW */
783 * Backing store pages for the source PQ bits. The model does
784 * not use these PQ bits backed in RAM because the XiveSource
787 * If the table is direct, we can compute the number of PQ
788 * entries provisioned by FW (such as skiboot) and resize the
789 * ESB window accordingly.
791 if (memory_region_is_mapped(&xsrc
->esb_mmio
)) {
792 memory_region_del_subregion(&xive
->esb_mmio
, &xsrc
->esb_mmio
);
794 if (!(VSD_INDIRECT
& vsd
)) {
795 memory_region_set_size(&xsrc
->esb_mmio
, vst_tsize
* SBE_PER_BYTE
796 * (1ull << xsrc
->esb_shift
));
799 memory_region_add_subregion(&xive
->esb_mmio
, 0, &xsrc
->esb_mmio
);
802 case VST_EAS
: /* Nothing to be done */
807 * Backing store pages for the END.
809 if (memory_region_is_mapped(&end_xsrc
->esb_mmio
)) {
810 memory_region_del_subregion(&xive
->end_mmio
, &end_xsrc
->esb_mmio
);
812 if (!(VSD_INDIRECT
& vsd
)) {
813 memory_region_set_size(&end_xsrc
->esb_mmio
, (vst_tsize
/ info
->size
)
814 * (1ull << end_xsrc
->esb_shift
));
816 memory_region_add_subregion(&xive
->end_mmio
, 0, &end_xsrc
->esb_mmio
);
819 case VST_NVP
: /* Not modeled */
820 case VST_NVG
: /* Not modeled */
821 case VST_NVC
: /* Not modeled */
822 case VST_IC
: /* Not modeled */
823 case VST_SYNC
: /* Not modeled */
824 case VST_ERQ
: /* Not modeled */
828 g_assert_not_reached();
833 * Both PC and VC sub-engines are configured as each use the Virtual
836 static void pnv_xive2_vst_set_data(PnvXive2
*xive
, uint64_t vsd
,
837 uint8_t type
, uint8_t blk
)
839 uint8_t mode
= GETFIELD(VSD_MODE
, vsd
);
840 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
842 if (type
> VST_ERQ
) {
843 xive2_error(xive
, "VST: invalid table type %d", type
);
847 if (blk
>= vst_infos
[type
].max_blocks
) {
848 xive2_error(xive
, "VST: invalid block id %d for"
849 " %s table", blk
, vst_infos
[type
].name
);
854 xive2_error(xive
, "VST: invalid %s table address",
855 vst_infos
[type
].name
);
860 case VSD_MODE_FORWARD
:
861 xive
->vsds
[type
][blk
] = vsd
;
864 case VSD_MODE_EXCLUSIVE
:
865 pnv_xive2_vst_set_exclusive(xive
, type
, blk
, vsd
);
869 xive2_error(xive
, "VST: unsupported table mode %d", mode
);
874 static void pnv_xive2_vc_vst_set_data(PnvXive2
*xive
, uint64_t vsd
)
876 uint8_t type
= GETFIELD(VC_VSD_TABLE_SELECT
,
877 xive
->vc_regs
[VC_VSD_TABLE_ADDR
>> 3]);
878 uint8_t blk
= GETFIELD(VC_VSD_TABLE_ADDRESS
,
879 xive
->vc_regs
[VC_VSD_TABLE_ADDR
>> 3]);
881 pnv_xive2_vst_set_data(xive
, vsd
, type
, blk
);
892 * Page 0: Internal CQ register accesses (reads & writes)
893 * Page 1: Internal PC register accesses (reads & writes)
894 * Page 2: Internal VC register accesses (reads & writes)
895 * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
896 * Page 4: Notify Port page (writes only, w/data),
898 * Page 6: Sync Poll page (writes only, dataless)
899 * Page 7: Sync Inject page (writes only, dataless)
900 * Page 8: LSI Trigger page (writes only, dataless)
901 * Page 9: LSI SB Management page (reads & writes dataless)
902 * Pages 10-255: Reserved
903 * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
904 * covering the 128 threads in P10.
905 * Pages 384-511: Reserved
907 typedef struct PnvXive2Region
{
911 const MemoryRegionOps
*ops
;
914 static const MemoryRegionOps pnv_xive2_ic_cq_ops
;
915 static const MemoryRegionOps pnv_xive2_ic_pc_ops
;
916 static const MemoryRegionOps pnv_xive2_ic_vc_ops
;
917 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops
;
918 static const MemoryRegionOps pnv_xive2_ic_notify_ops
;
919 static const MemoryRegionOps pnv_xive2_ic_sync_ops
;
920 static const MemoryRegionOps pnv_xive2_ic_lsi_ops
;
921 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops
;
923 /* 512 pages. 4K: 2M range, 64K: 32M range */
924 static const PnvXive2Region pnv_xive2_ic_regions
[] = {
925 { "xive-ic-cq", 0, 1, &pnv_xive2_ic_cq_ops
},
926 { "xive-ic-vc", 1, 1, &pnv_xive2_ic_vc_ops
},
927 { "xive-ic-pc", 2, 1, &pnv_xive2_ic_pc_ops
},
928 { "xive-ic-tctxt", 3, 1, &pnv_xive2_ic_tctxt_ops
},
929 { "xive-ic-notify", 4, 1, &pnv_xive2_ic_notify_ops
},
930 /* page 5 reserved */
931 { "xive-ic-sync", 6, 2, &pnv_xive2_ic_sync_ops
},
932 { "xive-ic-lsi", 8, 2, &pnv_xive2_ic_lsi_ops
},
933 /* pages 10-255 reserved */
934 { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops
},
935 /* pages 384-511 reserved */
942 static uint64_t pnv_xive2_ic_cq_read(void *opaque
, hwaddr offset
,
945 PnvXive2
*xive
= PNV_XIVE2(opaque
);
946 uint32_t reg
= offset
>> 3;
950 case CQ_XIVE_CAP
: /* Set at reset */
952 val
= xive
->cq_regs
[reg
];
954 case CQ_MSGSND
: /* TODO check the #cores of the machine */
955 val
= 0xffffffff00000000;
958 val
= CQ_CFG_PB_GEN_PB_INIT
; /* TODO: fix CQ_CFG_PB_GEN default value */
961 xive2_error(xive
, "CQ: invalid read @%"HWADDR_PRIx
, offset
);
967 static uint64_t pnv_xive2_bar_size(uint64_t val
)
969 return 1ull << (GETFIELD(CQ_BAR_RANGE
, val
) + 24);
972 static void pnv_xive2_ic_cq_write(void *opaque
, hwaddr offset
,
973 uint64_t val
, unsigned size
)
975 PnvXive2
*xive
= PNV_XIVE2(opaque
);
976 MemoryRegion
*sysmem
= get_system_memory();
977 uint32_t reg
= offset
>> 3;
982 case CQ_RST_CTL
: /* TODO: reset all BARs */
986 xive
->ic_shift
= val
& CQ_IC_BAR_64K
? 16 : 12;
987 if (!(val
& CQ_IC_BAR_VALID
)) {
989 if (xive
->cq_regs
[reg
] & CQ_IC_BAR_VALID
) {
990 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
991 memory_region_del_subregion(&xive
->ic_mmio
,
994 memory_region_del_subregion(sysmem
, &xive
->ic_mmio
);
997 xive
->ic_base
= val
& ~(CQ_IC_BAR_VALID
| CQ_IC_BAR_64K
);
998 if (!(xive
->cq_regs
[reg
] & CQ_IC_BAR_VALID
)) {
999 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
1000 memory_region_add_subregion(&xive
->ic_mmio
,
1001 pnv_xive2_ic_regions
[i
].pgoff
<< xive
->ic_shift
,
1002 &xive
->ic_mmios
[i
]);
1004 memory_region_add_subregion(sysmem
, xive
->ic_base
,
1011 xive
->tm_shift
= val
& CQ_TM_BAR_64K
? 16 : 12;
1012 if (!(val
& CQ_TM_BAR_VALID
)) {
1014 if (xive
->cq_regs
[reg
] & CQ_TM_BAR_VALID
) {
1015 memory_region_del_subregion(sysmem
, &xive
->tm_mmio
);
1018 xive
->tm_base
= val
& ~(CQ_TM_BAR_VALID
| CQ_TM_BAR_64K
);
1019 if (!(xive
->cq_regs
[reg
] & CQ_TM_BAR_VALID
)) {
1020 memory_region_add_subregion(sysmem
, xive
->tm_base
,
1027 xive
->esb_shift
= val
& CQ_BAR_64K
? 16 : 12;
1028 if (!(val
& CQ_BAR_VALID
)) {
1030 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
1031 memory_region_del_subregion(sysmem
, &xive
->esb_mmio
);
1034 xive
->esb_base
= val
& CQ_BAR_ADDR
;
1035 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
1036 memory_region_set_size(&xive
->esb_mmio
,
1037 pnv_xive2_bar_size(val
));
1038 memory_region_add_subregion(sysmem
, xive
->esb_base
,
1045 xive
->end_shift
= val
& CQ_BAR_64K
? 16 : 12;
1046 if (!(val
& CQ_BAR_VALID
)) {
1048 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
1049 memory_region_del_subregion(sysmem
, &xive
->end_mmio
);
1052 xive
->end_base
= val
& CQ_BAR_ADDR
;
1053 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
1054 memory_region_set_size(&xive
->end_mmio
,
1055 pnv_xive2_bar_size(val
));
1056 memory_region_add_subregion(sysmem
, xive
->end_base
,
1063 xive
->nvc_shift
= val
& CQ_BAR_64K
? 16 : 12;
1064 if (!(val
& CQ_BAR_VALID
)) {
1066 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
1067 memory_region_del_subregion(sysmem
, &xive
->nvc_mmio
);
1070 xive
->nvc_base
= val
& CQ_BAR_ADDR
;
1071 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
1072 memory_region_set_size(&xive
->nvc_mmio
,
1073 pnv_xive2_bar_size(val
));
1074 memory_region_add_subregion(sysmem
, xive
->nvc_base
,
1081 xive
->nvpg_shift
= val
& CQ_BAR_64K
? 16 : 12;
1082 if (!(val
& CQ_BAR_VALID
)) {
1083 xive
->nvpg_base
= 0;
1084 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
1085 memory_region_del_subregion(sysmem
, &xive
->nvpg_mmio
);
1088 xive
->nvpg_base
= val
& CQ_BAR_ADDR
;
1089 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
1090 memory_region_set_size(&xive
->nvpg_mmio
,
1091 pnv_xive2_bar_size(val
));
1092 memory_region_add_subregion(sysmem
, xive
->nvpg_base
,
1098 case CQ_TAR
: /* Set Translation Table Address */
1100 case CQ_TDR
: /* Set Translation Table Data */
1101 pnv_xive2_stt_set_data(xive
, val
);
1103 case CQ_FIRMASK_OR
: /* FIR error reporting */
1106 xive2_error(xive
, "CQ: invalid write 0x%"HWADDR_PRIx
, offset
);
1110 xive
->cq_regs
[reg
] = val
;
1113 static const MemoryRegionOps pnv_xive2_ic_cq_ops
= {
1114 .read
= pnv_xive2_ic_cq_read
,
1115 .write
= pnv_xive2_ic_cq_write
,
1116 .endianness
= DEVICE_BIG_ENDIAN
,
1118 .min_access_size
= 8,
1119 .max_access_size
= 8,
1122 .min_access_size
= 8,
1123 .max_access_size
= 8,
1127 static uint8_t pnv_xive2_cache_watch_assign(uint64_t engine_mask
,
1133 for (i
= 3; i
>= 0; i
--) {
1134 if (BIT(i
) & engine_mask
) {
1135 if (!(BIT(i
) & *state
)) {
1145 static void pnv_xive2_cache_watch_release(uint64_t *state
, uint8_t watch_engine
)
1147 uint8_t engine_bit
= 3 - watch_engine
;
1149 if (*state
& BIT(engine_bit
)) {
1150 *state
&= ~BIT(engine_bit
);
1154 static uint8_t pnv_xive2_endc_cache_watch_assign(PnvXive2
*xive
)
1156 uint64_t engine_mask
= GETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN
,
1157 xive
->vc_regs
[VC_ENDC_CFG
>> 3]);
1158 uint64_t state
= xive
->vc_regs
[VC_ENDC_WATCH_ASSIGN
>> 3];
1162 * We keep track of which engines are currently busy in the
1163 * VC_ENDC_WATCH_ASSIGN register directly. When the firmware reads
1164 * the register, we don't return its value but the ID of an engine
1166 * There are 4 engines. 0xFF means no engine is available.
1168 val
= pnv_xive2_cache_watch_assign(engine_mask
, &state
);
1170 xive
->vc_regs
[VC_ENDC_WATCH_ASSIGN
>> 3] = state
;
1175 static void pnv_xive2_endc_cache_watch_release(PnvXive2
*xive
,
1176 uint8_t watch_engine
)
1178 uint64_t state
= xive
->vc_regs
[VC_ENDC_WATCH_ASSIGN
>> 3];
1180 pnv_xive2_cache_watch_release(&state
, watch_engine
);
1181 xive
->vc_regs
[VC_ENDC_WATCH_ASSIGN
>> 3] = state
;
1184 static uint64_t pnv_xive2_ic_vc_read(void *opaque
, hwaddr offset
,
1187 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1189 uint32_t reg
= offset
>> 3;
1190 uint8_t watch_engine
;
1194 * VSD table settings.
1196 case VC_VSD_TABLE_ADDR
:
1197 case VC_VSD_TABLE_DATA
:
1198 val
= xive
->vc_regs
[reg
];
1202 * ESB cache updates (not modeled)
1204 case VC_ESBC_FLUSH_CTRL
:
1205 xive
->vc_regs
[reg
] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID
;
1206 val
= xive
->vc_regs
[reg
];
1210 val
= xive
->vc_regs
[reg
];
1214 * EAS cache updates (not modeled)
1216 case VC_EASC_FLUSH_CTRL
:
1217 xive
->vc_regs
[reg
] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID
;
1218 val
= xive
->vc_regs
[reg
];
1221 case VC_ENDC_WATCH_ASSIGN
:
1222 val
= pnv_xive2_endc_cache_watch_assign(xive
);
1226 val
= xive
->vc_regs
[reg
];
1232 case VC_ENDC_WATCH0_SPEC
:
1233 case VC_ENDC_WATCH1_SPEC
:
1234 case VC_ENDC_WATCH2_SPEC
:
1235 case VC_ENDC_WATCH3_SPEC
:
1236 watch_engine
= (offset
- VC_ENDC_WATCH0_SPEC
) >> 6;
1237 xive
->vc_regs
[reg
] &= ~(VC_ENDC_WATCH_FULL
| VC_ENDC_WATCH_CONFLICT
);
1238 pnv_xive2_endc_cache_watch_release(xive
, watch_engine
);
1239 val
= xive
->vc_regs
[reg
];
1242 case VC_ENDC_WATCH0_DATA0
:
1243 case VC_ENDC_WATCH1_DATA0
:
1244 case VC_ENDC_WATCH2_DATA0
:
1245 case VC_ENDC_WATCH3_DATA0
:
1247 * Load DATA registers from cache with data requested by the
1250 watch_engine
= (offset
- VC_ENDC_WATCH0_DATA0
) >> 6;
1251 pnv_xive2_end_cache_load(xive
, watch_engine
);
1252 val
= xive
->vc_regs
[reg
];
1255 case VC_ENDC_WATCH0_DATA1
... VC_ENDC_WATCH0_DATA3
:
1256 case VC_ENDC_WATCH1_DATA1
... VC_ENDC_WATCH1_DATA3
:
1257 case VC_ENDC_WATCH2_DATA1
... VC_ENDC_WATCH2_DATA3
:
1258 case VC_ENDC_WATCH3_DATA1
... VC_ENDC_WATCH3_DATA3
:
1259 val
= xive
->vc_regs
[reg
];
1262 case VC_ENDC_FLUSH_CTRL
:
1263 xive
->vc_regs
[reg
] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID
;
1264 val
= xive
->vc_regs
[reg
];
1268 * Indirect invalidation
1270 case VC_AT_MACRO_KILL_MASK
:
1271 val
= xive
->vc_regs
[reg
];
1274 case VC_AT_MACRO_KILL
:
1275 xive
->vc_regs
[reg
] &= ~VC_AT_MACRO_KILL_VALID
;
1276 val
= xive
->vc_regs
[reg
];
1280 * Interrupt fifo overflow in memory backing store (Not modeled)
1282 case VC_QUEUES_CFG_REM0
... VC_QUEUES_CFG_REM6
:
1283 val
= xive
->vc_regs
[reg
];
1289 case VC_ENDC_SYNC_DONE
:
1290 val
= VC_ENDC_SYNC_POLL_DONE
;
1293 xive2_error(xive
, "VC: invalid read @%"HWADDR_PRIx
, offset
);
1299 static void pnv_xive2_ic_vc_write(void *opaque
, hwaddr offset
,
1300 uint64_t val
, unsigned size
)
1302 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1303 uint32_t reg
= offset
>> 3;
1304 uint8_t watch_engine
;
1308 * VSD table settings.
1310 case VC_VSD_TABLE_ADDR
:
1312 case VC_VSD_TABLE_DATA
:
1313 pnv_xive2_vc_vst_set_data(xive
, val
);
1317 * ESB cache updates (not modeled)
1319 /* case VC_ESBC_FLUSH_CTRL: */
1320 case VC_ESBC_FLUSH_POLL
:
1321 xive
->vc_regs
[VC_ESBC_FLUSH_CTRL
>> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID
;
1325 case VC_ESBC_FLUSH_INJECT
:
1326 pnv_xive2_inject_notify(xive
, PNV_XIVE2_CACHE_ESBC
);
1333 * EAS cache updates (not modeled)
1335 /* case VC_EASC_FLUSH_CTRL: */
1336 case VC_EASC_FLUSH_POLL
:
1337 xive
->vc_regs
[VC_EASC_FLUSH_CTRL
>> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID
;
1341 case VC_EASC_FLUSH_INJECT
:
1342 pnv_xive2_inject_notify(xive
, PNV_XIVE2_CACHE_EASC
);
1351 case VC_ENDC_WATCH0_SPEC
:
1352 case VC_ENDC_WATCH1_SPEC
:
1353 case VC_ENDC_WATCH2_SPEC
:
1354 case VC_ENDC_WATCH3_SPEC
:
1355 val
&= ~VC_ENDC_WATCH_CONFLICT
; /* HW will set this bit */
1358 case VC_ENDC_WATCH0_DATA1
... VC_ENDC_WATCH0_DATA3
:
1359 case VC_ENDC_WATCH1_DATA1
... VC_ENDC_WATCH1_DATA3
:
1360 case VC_ENDC_WATCH2_DATA1
... VC_ENDC_WATCH2_DATA3
:
1361 case VC_ENDC_WATCH3_DATA1
... VC_ENDC_WATCH3_DATA3
:
1363 case VC_ENDC_WATCH0_DATA0
:
1364 case VC_ENDC_WATCH1_DATA0
:
1365 case VC_ENDC_WATCH2_DATA0
:
1366 case VC_ENDC_WATCH3_DATA0
:
1367 /* writing to DATA0 triggers the cache write */
1368 watch_engine
= (offset
- VC_ENDC_WATCH0_DATA0
) >> 6;
1369 xive
->vc_regs
[reg
] = val
;
1370 pnv_xive2_end_update(xive
, watch_engine
);
1374 /* case VC_ENDC_FLUSH_CTRL: */
1375 case VC_ENDC_FLUSH_POLL
:
1376 xive
->vc_regs
[VC_ENDC_FLUSH_CTRL
>> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID
;
1379 case VC_ENDC_FLUSH_INJECT
:
1380 pnv_xive2_inject_notify(xive
, PNV_XIVE2_CACHE_ENDC
);
1384 * Indirect invalidation
1386 case VC_AT_MACRO_KILL
:
1387 case VC_AT_MACRO_KILL_MASK
:
1391 * Interrupt fifo overflow in memory backing store (Not modeled)
1393 case VC_QUEUES_CFG_REM0
... VC_QUEUES_CFG_REM6
:
1399 case VC_ENDC_SYNC_DONE
:
1403 xive2_error(xive
, "VC: invalid write @%"HWADDR_PRIx
, offset
);
1407 xive
->vc_regs
[reg
] = val
;
1410 static const MemoryRegionOps pnv_xive2_ic_vc_ops
= {
1411 .read
= pnv_xive2_ic_vc_read
,
1412 .write
= pnv_xive2_ic_vc_write
,
1413 .endianness
= DEVICE_BIG_ENDIAN
,
1415 .min_access_size
= 8,
1416 .max_access_size
= 8,
1419 .min_access_size
= 8,
1420 .max_access_size
= 8,
1424 static uint8_t pnv_xive2_nxc_cache_watch_assign(PnvXive2
*xive
)
1426 uint64_t engine_mask
= GETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN
,
1427 xive
->pc_regs
[PC_NXC_PROC_CONFIG
>> 3]);
1428 uint64_t state
= xive
->pc_regs
[PC_NXC_WATCH_ASSIGN
>> 3];
1432 * We keep track of which engines are currently busy in the
1433 * PC_NXC_WATCH_ASSIGN register directly. When the firmware reads
1434 * the register, we don't return its value but the ID of an engine
1436 * There are 4 engines. 0xFF means no engine is available.
1438 val
= pnv_xive2_cache_watch_assign(engine_mask
, &state
);
1440 xive
->pc_regs
[PC_NXC_WATCH_ASSIGN
>> 3] = state
;
1445 static void pnv_xive2_nxc_cache_watch_release(PnvXive2
*xive
,
1446 uint8_t watch_engine
)
1448 uint64_t state
= xive
->pc_regs
[PC_NXC_WATCH_ASSIGN
>> 3];
1450 pnv_xive2_cache_watch_release(&state
, watch_engine
);
1451 xive
->pc_regs
[PC_NXC_WATCH_ASSIGN
>> 3] = state
;
1454 static uint64_t pnv_xive2_ic_pc_read(void *opaque
, hwaddr offset
,
1457 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1459 uint32_t reg
= offset
>> 3;
1460 uint8_t watch_engine
;
1464 * VSD table settings.
1466 case PC_VSD_TABLE_ADDR
:
1467 case PC_VSD_TABLE_DATA
:
1468 val
= xive
->pc_regs
[reg
];
1471 case PC_NXC_WATCH_ASSIGN
:
1472 val
= pnv_xive2_nxc_cache_watch_assign(xive
);
1475 case PC_NXC_PROC_CONFIG
:
1476 val
= xive
->pc_regs
[reg
];
1482 case PC_NXC_WATCH0_SPEC
:
1483 case PC_NXC_WATCH1_SPEC
:
1484 case PC_NXC_WATCH2_SPEC
:
1485 case PC_NXC_WATCH3_SPEC
:
1486 watch_engine
= (offset
- PC_NXC_WATCH0_SPEC
) >> 6;
1487 xive
->pc_regs
[reg
] &= ~(PC_NXC_WATCH_FULL
| PC_NXC_WATCH_CONFLICT
);
1488 pnv_xive2_nxc_cache_watch_release(xive
, watch_engine
);
1489 val
= xive
->pc_regs
[reg
];
1492 case PC_NXC_WATCH0_DATA0
:
1493 case PC_NXC_WATCH1_DATA0
:
1494 case PC_NXC_WATCH2_DATA0
:
1495 case PC_NXC_WATCH3_DATA0
:
1497 * Load DATA registers from cache with data requested by the
1500 watch_engine
= (offset
- PC_NXC_WATCH0_DATA0
) >> 6;
1501 pnv_xive2_nxc_cache_load(xive
, watch_engine
);
1502 val
= xive
->pc_regs
[reg
];
1505 case PC_NXC_WATCH0_DATA1
... PC_NXC_WATCH0_DATA3
:
1506 case PC_NXC_WATCH1_DATA1
... PC_NXC_WATCH1_DATA3
:
1507 case PC_NXC_WATCH2_DATA1
... PC_NXC_WATCH2_DATA3
:
1508 case PC_NXC_WATCH3_DATA1
... PC_NXC_WATCH3_DATA3
:
1509 val
= xive
->pc_regs
[reg
];
1512 case PC_NXC_FLUSH_CTRL
:
1513 xive
->pc_regs
[reg
] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID
;
1514 val
= xive
->pc_regs
[reg
];
1518 * Indirect invalidation
1521 xive
->pc_regs
[reg
] &= ~PC_AT_KILL_VALID
;
1522 val
= xive
->pc_regs
[reg
];
1526 xive2_error(xive
, "PC: invalid read @%"HWADDR_PRIx
, offset
);
1532 static void pnv_xive2_pc_vst_set_data(PnvXive2
*xive
, uint64_t vsd
)
1534 uint8_t type
= GETFIELD(PC_VSD_TABLE_SELECT
,
1535 xive
->pc_regs
[PC_VSD_TABLE_ADDR
>> 3]);
1536 uint8_t blk
= GETFIELD(PC_VSD_TABLE_ADDRESS
,
1537 xive
->pc_regs
[PC_VSD_TABLE_ADDR
>> 3]);
1539 pnv_xive2_vst_set_data(xive
, vsd
, type
, blk
);
1542 static void pnv_xive2_ic_pc_write(void *opaque
, hwaddr offset
,
1543 uint64_t val
, unsigned size
)
1545 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1546 uint32_t reg
= offset
>> 3;
1547 uint8_t watch_engine
;
1552 * VSD table settings.
1553 * The Xive2Router model combines both VC and PC sub-engines. We
1554 * allow to configure the tables through both, for the rare cases
1555 * where a table only really needs to be configured for one of
1556 * them (e.g. the NVG table for the presenter). It assumes that
1557 * firmware passes the same address to the VC and PC when tables
1558 * are defined for both, which seems acceptable.
1560 case PC_VSD_TABLE_ADDR
:
1562 case PC_VSD_TABLE_DATA
:
1563 pnv_xive2_pc_vst_set_data(xive
, val
);
1566 case PC_NXC_PROC_CONFIG
:
1572 case PC_NXC_WATCH0_SPEC
:
1573 case PC_NXC_WATCH1_SPEC
:
1574 case PC_NXC_WATCH2_SPEC
:
1575 case PC_NXC_WATCH3_SPEC
:
1576 val
&= ~PC_NXC_WATCH_CONFLICT
; /* HW will set this bit */
1579 case PC_NXC_WATCH0_DATA1
... PC_NXC_WATCH0_DATA3
:
1580 case PC_NXC_WATCH1_DATA1
... PC_NXC_WATCH1_DATA3
:
1581 case PC_NXC_WATCH2_DATA1
... PC_NXC_WATCH2_DATA3
:
1582 case PC_NXC_WATCH3_DATA1
... PC_NXC_WATCH3_DATA3
:
1584 case PC_NXC_WATCH0_DATA0
:
1585 case PC_NXC_WATCH1_DATA0
:
1586 case PC_NXC_WATCH2_DATA0
:
1587 case PC_NXC_WATCH3_DATA0
:
1588 /* writing to DATA0 triggers the cache write */
1589 watch_engine
= (offset
- PC_NXC_WATCH0_DATA0
) >> 6;
1590 xive
->pc_regs
[reg
] = val
;
1591 pnv_xive2_nxc_update(xive
, watch_engine
);
1594 /* case PC_NXC_FLUSH_CTRL: */
1595 case PC_NXC_FLUSH_POLL
:
1596 xive
->pc_regs
[PC_NXC_FLUSH_CTRL
>> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID
;
1599 case PC_NXC_FLUSH_INJECT
:
1600 pnv_xive2_inject_notify(xive
, PNV_XIVE2_CACHE_NXC
);
1604 * Indirect invalidation
1607 case PC_AT_KILL_MASK
:
1611 xive2_error(xive
, "PC: invalid write @%"HWADDR_PRIx
, offset
);
1615 xive
->pc_regs
[reg
] = val
;
1618 static const MemoryRegionOps pnv_xive2_ic_pc_ops
= {
1619 .read
= pnv_xive2_ic_pc_read
,
1620 .write
= pnv_xive2_ic_pc_write
,
1621 .endianness
= DEVICE_BIG_ENDIAN
,
1623 .min_access_size
= 8,
1624 .max_access_size
= 8,
1627 .min_access_size
= 8,
1628 .max_access_size
= 8,
1633 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque
, hwaddr offset
,
1636 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1638 uint32_t reg
= offset
>> 3;
1642 * XIVE2 hardware thread enablement
1646 val
= xive
->tctxt_regs
[reg
];
1650 case TCTXT_EN0_RESET
:
1651 val
= xive
->tctxt_regs
[TCTXT_EN0
>> 3];
1654 case TCTXT_EN1_RESET
:
1655 val
= xive
->tctxt_regs
[TCTXT_EN1
>> 3];
1658 val
= xive
->tctxt_regs
[reg
];
1661 xive2_error(xive
, "TCTXT: invalid read @%"HWADDR_PRIx
, offset
);
1667 static void pnv_xive2_ic_tctxt_write(void *opaque
, hwaddr offset
,
1668 uint64_t val
, unsigned size
)
1670 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1671 uint32_t reg
= offset
>> 3;
1675 * XIVE2 hardware thread enablement
1677 case TCTXT_EN0
: /* Physical Thread Enable */
1678 case TCTXT_EN1
: /* Physical Thread Enable (fused core) */
1679 xive
->tctxt_regs
[reg
] = val
;
1683 xive
->tctxt_regs
[TCTXT_EN0
>> 3] |= val
;
1686 xive
->tctxt_regs
[TCTXT_EN1
>> 3] |= val
;
1688 case TCTXT_EN0_RESET
:
1689 xive
->tctxt_regs
[TCTXT_EN0
>> 3] &= ~val
;
1691 case TCTXT_EN1_RESET
:
1692 xive
->tctxt_regs
[TCTXT_EN1
>> 3] &= ~val
;
1695 xive
->tctxt_regs
[reg
] = val
;
1698 xive2_error(xive
, "TCTXT: invalid write @%"HWADDR_PRIx
, offset
);
1703 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops
= {
1704 .read
= pnv_xive2_ic_tctxt_read
,
1705 .write
= pnv_xive2_ic_tctxt_write
,
1706 .endianness
= DEVICE_BIG_ENDIAN
,
1708 .min_access_size
= 8,
1709 .max_access_size
= 8,
1712 .min_access_size
= 8,
1713 .max_access_size
= 8,
1718 * Redirect XSCOM to MMIO handlers
1720 static uint64_t pnv_xive2_xscom_read(void *opaque
, hwaddr offset
,
1723 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1725 uint32_t xscom_reg
= offset
>> 3;
1726 uint32_t mmio_offset
= (xscom_reg
& 0xFF) << 3;
1728 switch (xscom_reg
) {
1729 case 0x000 ... 0x0FF:
1730 val
= pnv_xive2_ic_cq_read(opaque
, mmio_offset
, size
);
1732 case 0x100 ... 0x1FF:
1733 val
= pnv_xive2_ic_vc_read(opaque
, mmio_offset
, size
);
1735 case 0x200 ... 0x2FF:
1736 val
= pnv_xive2_ic_pc_read(opaque
, mmio_offset
, size
);
1738 case 0x300 ... 0x3FF:
1739 val
= pnv_xive2_ic_tctxt_read(opaque
, mmio_offset
, size
);
1742 xive2_error(xive
, "XSCOM: invalid read @%"HWADDR_PRIx
, offset
);
1748 static void pnv_xive2_xscom_write(void *opaque
, hwaddr offset
,
1749 uint64_t val
, unsigned size
)
1751 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1752 uint32_t xscom_reg
= offset
>> 3;
1753 uint32_t mmio_offset
= (xscom_reg
& 0xFF) << 3;
1755 switch (xscom_reg
) {
1756 case 0x000 ... 0x0FF:
1757 pnv_xive2_ic_cq_write(opaque
, mmio_offset
, val
, size
);
1759 case 0x100 ... 0x1FF:
1760 pnv_xive2_ic_vc_write(opaque
, mmio_offset
, val
, size
);
1762 case 0x200 ... 0x2FF:
1763 pnv_xive2_ic_pc_write(opaque
, mmio_offset
, val
, size
);
1765 case 0x300 ... 0x3FF:
1766 pnv_xive2_ic_tctxt_write(opaque
, mmio_offset
, val
, size
);
1769 xive2_error(xive
, "XSCOM: invalid write @%"HWADDR_PRIx
, offset
);
1773 static const MemoryRegionOps pnv_xive2_xscom_ops
= {
1774 .read
= pnv_xive2_xscom_read
,
1775 .write
= pnv_xive2_xscom_write
,
1776 .endianness
= DEVICE_BIG_ENDIAN
,
1778 .min_access_size
= 8,
1779 .max_access_size
= 8,
1782 .min_access_size
= 8,
1783 .max_access_size
= 8,
1788 * Notify port page. The layout is compatible between 4K and 64K pages :
1790 * Page 1 Notify page (writes only)
1791 * 0x000 - 0x7FF IPI interrupt (NPU)
1792 * 0x800 - 0xFFF HW interrupt triggers (PSI, PHB)
1795 static void pnv_xive2_ic_hw_trigger(PnvXive2
*xive
, hwaddr addr
,
1801 if (val
& XIVE_TRIGGER_END
) {
1802 xive2_error(xive
, "IC: END trigger at @0x%"HWADDR_PRIx
" data 0x%"PRIx64
,
1808 * Forward the source event notification directly to the Router.
1809 * The source interrupt number should already be correctly encoded
1810 * with the chip block id by the sending device (PHB, PSI).
1812 blk
= XIVE_EAS_BLOCK(val
);
1813 idx
= XIVE_EAS_INDEX(val
);
1815 xive2_router_notify(XIVE_NOTIFIER(xive
), XIVE_EAS(blk
, idx
),
1816 !!(val
& XIVE_TRIGGER_PQ
));
1819 static void pnv_xive2_ic_notify_write(void *opaque
, hwaddr offset
,
1820 uint64_t val
, unsigned size
)
1822 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1824 /* VC: IPI triggers */
1826 case 0x000 ... 0x7FF:
1827 /* TODO: check IPI notify sub-page routing */
1828 pnv_xive2_ic_hw_trigger(opaque
, offset
, val
);
1831 /* VC: HW triggers */
1832 case 0x800 ... 0xFFF:
1833 pnv_xive2_ic_hw_trigger(opaque
, offset
, val
);
1837 xive2_error(xive
, "NOTIFY: invalid write @%"HWADDR_PRIx
, offset
);
1841 static uint64_t pnv_xive2_ic_notify_read(void *opaque
, hwaddr offset
,
1844 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1846 /* loads are invalid */
1847 xive2_error(xive
, "NOTIFY: invalid read @%"HWADDR_PRIx
, offset
);
1851 static const MemoryRegionOps pnv_xive2_ic_notify_ops
= {
1852 .read
= pnv_xive2_ic_notify_read
,
1853 .write
= pnv_xive2_ic_notify_write
,
1854 .endianness
= DEVICE_BIG_ENDIAN
,
1856 .min_access_size
= 8,
1857 .max_access_size
= 8,
1860 .min_access_size
= 8,
1861 .max_access_size
= 8,
1865 static uint64_t pnv_xive2_ic_lsi_read(void *opaque
, hwaddr offset
,
1868 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1870 xive2_error(xive
, "LSI: invalid read @%"HWADDR_PRIx
, offset
);
1874 static void pnv_xive2_ic_lsi_write(void *opaque
, hwaddr offset
,
1875 uint64_t val
, unsigned size
)
1877 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1879 xive2_error(xive
, "LSI: invalid write @%"HWADDR_PRIx
, offset
);
1882 static const MemoryRegionOps pnv_xive2_ic_lsi_ops
= {
1883 .read
= pnv_xive2_ic_lsi_read
,
1884 .write
= pnv_xive2_ic_lsi_write
,
1885 .endianness
= DEVICE_BIG_ENDIAN
,
1887 .min_access_size
= 8,
1888 .max_access_size
= 8,
1891 .min_access_size
= 8,
1892 .max_access_size
= 8,
1897 * Sync MMIO page (write only)
1899 #define PNV_XIVE2_SYNC_IPI 0x000
1900 #define PNV_XIVE2_SYNC_HW 0x080
1901 #define PNV_XIVE2_SYNC_NxC 0x100
1902 #define PNV_XIVE2_SYNC_INT 0x180
1903 #define PNV_XIVE2_SYNC_OS_ESC 0x200
1904 #define PNV_XIVE2_SYNC_POOL_ESC 0x280
1905 #define PNV_XIVE2_SYNC_HARD_ESC 0x300
1906 #define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO 0x800
1907 #define PNV_XIVE2_SYNC_NXC_LD_LCL_CO 0x880
1908 #define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI 0x900
1909 #define PNV_XIVE2_SYNC_NXC_ST_LCL_CI 0x980
1910 #define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI 0xA00
1911 #define PNV_XIVE2_SYNC_NXC_ST_RMT_CI 0xA80
1913 static uint64_t pnv_xive2_ic_sync_read(void *opaque
, hwaddr offset
,
1916 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1918 /* loads are invalid */
1919 xive2_error(xive
, "SYNC: invalid read @%"HWADDR_PRIx
, offset
);
1924 * The sync MMIO space spans two pages. The lower page is use for
1925 * queue sync "poll" requests while the upper page is used for queue
1926 * sync "inject" requests. Inject requests require the HW to write
1927 * a byte of all 1's to a predetermined location in memory in order
1928 * to signal completion of the request. Both pages have the same
1929 * layout, so it is easiest to handle both with a single function.
1931 static void pnv_xive2_ic_sync_write(void *opaque
, hwaddr offset
,
1932 uint64_t val
, unsigned size
)
1934 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1936 hwaddr pg_offset_mask
= (1ull << xive
->ic_shift
) - 1;
1938 /* adjust offset for inject page */
1939 hwaddr adj_offset
= offset
& pg_offset_mask
;
1941 switch (adj_offset
) {
1942 case PNV_XIVE2_SYNC_IPI
:
1943 inject_type
= PNV_XIVE2_QUEUE_IPI
;
1945 case PNV_XIVE2_SYNC_HW
:
1946 inject_type
= PNV_XIVE2_QUEUE_HW
;
1948 case PNV_XIVE2_SYNC_NxC
:
1949 inject_type
= PNV_XIVE2_QUEUE_NXC
;
1951 case PNV_XIVE2_SYNC_INT
:
1952 inject_type
= PNV_XIVE2_QUEUE_INT
;
1954 case PNV_XIVE2_SYNC_OS_ESC
:
1955 inject_type
= PNV_XIVE2_QUEUE_OS
;
1957 case PNV_XIVE2_SYNC_POOL_ESC
:
1958 inject_type
= PNV_XIVE2_QUEUE_POOL
;
1960 case PNV_XIVE2_SYNC_HARD_ESC
:
1961 inject_type
= PNV_XIVE2_QUEUE_HARD
;
1963 case PNV_XIVE2_SYNC_NXC_LD_LCL_NCO
:
1964 inject_type
= PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO
;
1966 case PNV_XIVE2_SYNC_NXC_LD_LCL_CO
:
1967 inject_type
= PNV_XIVE2_QUEUE_NXC_LD_LCL_CO
;
1969 case PNV_XIVE2_SYNC_NXC_ST_LCL_NCI
:
1970 inject_type
= PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI
;
1972 case PNV_XIVE2_SYNC_NXC_ST_LCL_CI
:
1973 inject_type
= PNV_XIVE2_QUEUE_NXC_ST_LCL_CI
;
1975 case PNV_XIVE2_SYNC_NXC_ST_RMT_NCI
:
1976 inject_type
= PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI
;
1978 case PNV_XIVE2_SYNC_NXC_ST_RMT_CI
:
1979 inject_type
= PNV_XIVE2_QUEUE_NXC_ST_RMT_CI
;
1982 xive2_error(xive
, "SYNC: invalid write @%"HWADDR_PRIx
, offset
);
1986 /* Write Queue Sync notification byte if writing to sync inject page */
1987 if ((offset
& ~pg_offset_mask
) != 0) {
1988 pnv_xive2_inject_notify(xive
, inject_type
);
1992 static const MemoryRegionOps pnv_xive2_ic_sync_ops
= {
1993 .read
= pnv_xive2_ic_sync_read
,
1994 .write
= pnv_xive2_ic_sync_write
,
1995 .endianness
= DEVICE_BIG_ENDIAN
,
1997 .min_access_size
= 8,
1998 .max_access_size
= 8,
2001 .min_access_size
= 8,
2002 .max_access_size
= 8,
2007 * When the TM direct pages of the IC controller are accessed, the
2008 * target HW thread is deduced from the page offset.
2010 static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2
*xive
, hwaddr offset
)
2012 /* On P10, the node ID shift in the PIR register is 8 bits */
2013 return xive
->chip
->chip_id
<< 8 | offset
>> xive
->ic_shift
;
2016 static uint32_t pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2
*xive
,
2020 * Indirect TIMA accesses are similar to direct accesses for
2021 * privilege ring 0. So remove any traces of the hw thread ID from
2022 * the offset in the IC BAR as it could be interpreted as the ring
2023 * privilege when calling the underlying direct access functions.
2025 return offset
& ((1ull << xive
->ic_shift
) - 1);
2028 static XiveTCTX
*pnv_xive2_get_indirect_tctx(PnvXive2
*xive
, uint32_t pir
)
2030 PnvChip
*chip
= xive
->chip
;
2031 PowerPCCPU
*cpu
= NULL
;
2033 cpu
= pnv_chip_find_cpu(chip
, pir
);
2035 xive2_error(xive
, "IC: invalid PIR %x for indirect access", pir
);
2039 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
2040 xive2_error(xive
, "IC: CPU %x is not enabled", pir
);
2043 return XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
2046 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque
, hwaddr offset
,
2049 PnvXive2
*xive
= PNV_XIVE2(opaque
);
2050 XivePresenter
*xptr
= XIVE_PRESENTER(xive
);
2051 hwaddr hw_page_offset
;
2056 pir
= pnv_xive2_ic_tm_get_pir(xive
, offset
);
2057 hw_page_offset
= pnv_xive2_ic_tm_get_hw_page_offset(xive
, offset
);
2058 tctx
= pnv_xive2_get_indirect_tctx(xive
, pir
);
2060 val
= xive_tctx_tm_read(xptr
, tctx
, hw_page_offset
, size
);
2066 static void pnv_xive2_ic_tm_indirect_write(void *opaque
, hwaddr offset
,
2067 uint64_t val
, unsigned size
)
2069 PnvXive2
*xive
= PNV_XIVE2(opaque
);
2070 XivePresenter
*xptr
= XIVE_PRESENTER(xive
);
2071 hwaddr hw_page_offset
;
2075 pir
= pnv_xive2_ic_tm_get_pir(xive
, offset
);
2076 hw_page_offset
= pnv_xive2_ic_tm_get_hw_page_offset(xive
, offset
);
2077 tctx
= pnv_xive2_get_indirect_tctx(xive
, pir
);
2079 xive_tctx_tm_write(xptr
, tctx
, hw_page_offset
, val
, size
);
2083 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops
= {
2084 .read
= pnv_xive2_ic_tm_indirect_read
,
2085 .write
= pnv_xive2_ic_tm_indirect_write
,
2086 .endianness
= DEVICE_BIG_ENDIAN
,
2088 .min_access_size
= 1,
2089 .max_access_size
= 8,
2092 .min_access_size
= 1,
2093 .max_access_size
= 8,
2100 static void pnv_xive2_tm_write(void *opaque
, hwaddr offset
,
2101 uint64_t value
, unsigned size
)
2103 PowerPCCPU
*cpu
= POWERPC_CPU(current_cpu
);
2104 PnvXive2
*xive
= pnv_xive2_tm_get_xive(cpu
);
2105 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
2106 XivePresenter
*xptr
= XIVE_PRESENTER(xive
);
2108 xive_tctx_tm_write(xptr
, tctx
, offset
, value
, size
);
2111 static uint64_t pnv_xive2_tm_read(void *opaque
, hwaddr offset
, unsigned size
)
2113 PowerPCCPU
*cpu
= POWERPC_CPU(current_cpu
);
2114 PnvXive2
*xive
= pnv_xive2_tm_get_xive(cpu
);
2115 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
2116 XivePresenter
*xptr
= XIVE_PRESENTER(xive
);
2118 return xive_tctx_tm_read(xptr
, tctx
, offset
, size
);
2121 static const MemoryRegionOps pnv_xive2_tm_ops
= {
2122 .read
= pnv_xive2_tm_read
,
2123 .write
= pnv_xive2_tm_write
,
2124 .endianness
= DEVICE_BIG_ENDIAN
,
2126 .min_access_size
= 1,
2127 .max_access_size
= 8,
2130 .min_access_size
= 1,
2131 .max_access_size
= 8,
2135 static uint64_t pnv_xive2_nvc_read(void *opaque
, hwaddr offset
,
2138 PnvXive2
*xive
= PNV_XIVE2(opaque
);
2140 xive2_error(xive
, "NVC: invalid read @%"HWADDR_PRIx
, offset
);
2144 static void pnv_xive2_nvc_write(void *opaque
, hwaddr offset
,
2145 uint64_t val
, unsigned size
)
2147 PnvXive2
*xive
= PNV_XIVE2(opaque
);
2149 xive2_error(xive
, "NVC: invalid write @%"HWADDR_PRIx
, offset
);
2152 static const MemoryRegionOps pnv_xive2_nvc_ops
= {
2153 .read
= pnv_xive2_nvc_read
,
2154 .write
= pnv_xive2_nvc_write
,
2155 .endianness
= DEVICE_BIG_ENDIAN
,
2157 .min_access_size
= 8,
2158 .max_access_size
= 8,
2161 .min_access_size
= 8,
2162 .max_access_size
= 8,
2166 static uint64_t pnv_xive2_nvpg_read(void *opaque
, hwaddr offset
,
2169 PnvXive2
*xive
= PNV_XIVE2(opaque
);
2171 xive2_error(xive
, "NVPG: invalid read @%"HWADDR_PRIx
, offset
);
2175 static void pnv_xive2_nvpg_write(void *opaque
, hwaddr offset
,
2176 uint64_t val
, unsigned size
)
2178 PnvXive2
*xive
= PNV_XIVE2(opaque
);
2180 xive2_error(xive
, "NVPG: invalid write @%"HWADDR_PRIx
, offset
);
2183 static const MemoryRegionOps pnv_xive2_nvpg_ops
= {
2184 .read
= pnv_xive2_nvpg_read
,
2185 .write
= pnv_xive2_nvpg_write
,
2186 .endianness
= DEVICE_BIG_ENDIAN
,
2188 .min_access_size
= 8,
2189 .max_access_size
= 8,
2192 .min_access_size
= 8,
2193 .max_access_size
= 8,
2198 * POWER10 default capabilities: 0x2000120076f000FC
2200 #define PNV_XIVE2_CAPABILITIES 0x2000120076f000FC
2203 * POWER10 default configuration: 0x0030000033000000
2205 * 8bits thread id was dropped for P10
2207 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
2209 static void pnv_xive2_reset(void *dev
)
2211 PnvXive2
*xive
= PNV_XIVE2(dev
);
2212 XiveSource
*xsrc
= &xive
->ipi_source
;
2213 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
2215 xive
->cq_regs
[CQ_XIVE_CAP
>> 3] = xive
->capabilities
;
2216 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] = xive
->config
;
2218 /* HW hardwires the #Topology of the chip in the block field */
2219 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] |=
2220 SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID
, 0ull, xive
->chip
->chip_id
);
2222 /* VC and PC cache watch assign mechanism */
2223 xive
->vc_regs
[VC_ENDC_CFG
>> 3] =
2224 SETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN
, 0ull, 0b0111);
2225 xive
->pc_regs
[PC_NXC_PROC_CONFIG
>> 3] =
2226 SETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN
, 0ull, 0b0111);
2228 /* Set default page size to 64k */
2229 xive
->ic_shift
= xive
->esb_shift
= xive
->end_shift
= 16;
2230 xive
->nvc_shift
= xive
->nvpg_shift
= xive
->tm_shift
= 16;
2232 /* Clear source MMIOs */
2233 if (memory_region_is_mapped(&xsrc
->esb_mmio
)) {
2234 memory_region_del_subregion(&xive
->esb_mmio
, &xsrc
->esb_mmio
);
2237 if (memory_region_is_mapped(&end_xsrc
->esb_mmio
)) {
2238 memory_region_del_subregion(&xive
->end_mmio
, &end_xsrc
->esb_mmio
);
2243 * Maximum number of IRQs and ENDs supported by HW. Will be tuned by
2246 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2247 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2249 static void pnv_xive2_realize(DeviceState
*dev
, Error
**errp
)
2251 PnvXive2
*xive
= PNV_XIVE2(dev
);
2252 PnvXive2Class
*pxc
= PNV_XIVE2_GET_CLASS(dev
);
2253 XiveSource
*xsrc
= &xive
->ipi_source
;
2254 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
2255 Error
*local_err
= NULL
;
2258 pxc
->parent_realize(dev
, &local_err
);
2260 error_propagate(errp
, local_err
);
2267 * The XiveSource and Xive2EndSource objects are realized with the
2268 * maximum allowed HW configuration. The ESB MMIO regions will be
2269 * resized dynamically when the controller is configured by the FW
2270 * to limit accesses to resources not provisioned.
2272 object_property_set_int(OBJECT(xsrc
), "flags", XIVE_SRC_STORE_EOI
,
2274 object_property_set_int(OBJECT(xsrc
), "nr-irqs", PNV_XIVE2_NR_IRQS
,
2276 object_property_set_link(OBJECT(xsrc
), "xive", OBJECT(xive
),
2278 qdev_realize(DEVICE(xsrc
), NULL
, &local_err
);
2280 error_propagate(errp
, local_err
);
2284 object_property_set_int(OBJECT(end_xsrc
), "nr-ends", PNV_XIVE2_NR_ENDS
,
2286 object_property_set_link(OBJECT(end_xsrc
), "xive", OBJECT(xive
),
2288 qdev_realize(DEVICE(end_xsrc
), NULL
, &local_err
);
2290 error_propagate(errp
, local_err
);
2294 /* XSCOM region, used for initial configuration of the BARs */
2295 memory_region_init_io(&xive
->xscom_regs
, OBJECT(dev
),
2296 &pnv_xive2_xscom_ops
, xive
, "xscom-xive",
2297 PNV10_XSCOM_XIVE2_SIZE
<< 3);
2299 /* Interrupt controller MMIO regions */
2300 xive
->ic_shift
= 16;
2301 memory_region_init(&xive
->ic_mmio
, OBJECT(dev
), "xive-ic",
2302 PNV10_XIVE2_IC_SIZE
);
2304 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
2305 memory_region_init_io(&xive
->ic_mmios
[i
], OBJECT(dev
),
2306 pnv_xive2_ic_regions
[i
].ops
, xive
,
2307 pnv_xive2_ic_regions
[i
].name
,
2308 pnv_xive2_ic_regions
[i
].pgsize
<< xive
->ic_shift
);
2314 xive
->esb_shift
= 16;
2315 xive
->end_shift
= 16;
2316 memory_region_init(&xive
->esb_mmio
, OBJECT(xive
), "xive-esb",
2317 PNV10_XIVE2_ESB_SIZE
);
2318 memory_region_init(&xive
->end_mmio
, OBJECT(xive
), "xive-end",
2319 PNV10_XIVE2_END_SIZE
);
2321 /* Presenter Controller MMIO region (not modeled) */
2322 xive
->nvc_shift
= 16;
2323 xive
->nvpg_shift
= 16;
2324 memory_region_init_io(&xive
->nvc_mmio
, OBJECT(dev
),
2325 &pnv_xive2_nvc_ops
, xive
,
2326 "xive-nvc", PNV10_XIVE2_NVC_SIZE
);
2328 memory_region_init_io(&xive
->nvpg_mmio
, OBJECT(dev
),
2329 &pnv_xive2_nvpg_ops
, xive
,
2330 "xive-nvpg", PNV10_XIVE2_NVPG_SIZE
);
2332 /* Thread Interrupt Management Area (Direct) */
2333 xive
->tm_shift
= 16;
2334 memory_region_init_io(&xive
->tm_mmio
, OBJECT(dev
), &pnv_xive2_tm_ops
,
2335 xive
, "xive-tima", PNV10_XIVE2_TM_SIZE
);
2337 qemu_register_reset(pnv_xive2_reset
, dev
);
2340 static Property pnv_xive2_properties
[] = {
2341 DEFINE_PROP_UINT64("ic-bar", PnvXive2
, ic_base
, 0),
2342 DEFINE_PROP_UINT64("esb-bar", PnvXive2
, esb_base
, 0),
2343 DEFINE_PROP_UINT64("end-bar", PnvXive2
, end_base
, 0),
2344 DEFINE_PROP_UINT64("nvc-bar", PnvXive2
, nvc_base
, 0),
2345 DEFINE_PROP_UINT64("nvpg-bar", PnvXive2
, nvpg_base
, 0),
2346 DEFINE_PROP_UINT64("tm-bar", PnvXive2
, tm_base
, 0),
2347 DEFINE_PROP_UINT64("capabilities", PnvXive2
, capabilities
,
2348 PNV_XIVE2_CAPABILITIES
),
2349 DEFINE_PROP_UINT64("config", PnvXive2
, config
,
2350 PNV_XIVE2_CONFIGURATION
),
2351 DEFINE_PROP_LINK("chip", PnvXive2
, chip
, TYPE_PNV_CHIP
, PnvChip
*),
2352 DEFINE_PROP_END_OF_LIST(),
2355 static void pnv_xive2_instance_init(Object
*obj
)
2357 PnvXive2
*xive
= PNV_XIVE2(obj
);
2359 object_initialize_child(obj
, "ipi_source", &xive
->ipi_source
,
2361 object_initialize_child(obj
, "end_source", &xive
->end_source
,
2362 TYPE_XIVE2_END_SOURCE
);
2365 static int pnv_xive2_dt_xscom(PnvXScomInterface
*dev
, void *fdt
,
2368 const char compat_p10
[] = "ibm,power10-xive-x";
2372 cpu_to_be32(PNV10_XSCOM_XIVE2_BASE
),
2373 cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE
)
2376 name
= g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE
);
2377 offset
= fdt_add_subnode(fdt
, xscom_offset
, name
);
2381 _FDT((fdt_setprop(fdt
, offset
, "reg", reg
, sizeof(reg
))));
2382 _FDT(fdt_setprop(fdt
, offset
, "compatible", compat_p10
,
2383 sizeof(compat_p10
)));
2387 static void pnv_xive2_class_init(ObjectClass
*klass
, void *data
)
2389 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2390 PnvXScomInterfaceClass
*xdc
= PNV_XSCOM_INTERFACE_CLASS(klass
);
2391 Xive2RouterClass
*xrc
= XIVE2_ROUTER_CLASS(klass
);
2392 XiveNotifierClass
*xnc
= XIVE_NOTIFIER_CLASS(klass
);
2393 XivePresenterClass
*xpc
= XIVE_PRESENTER_CLASS(klass
);
2394 PnvXive2Class
*pxc
= PNV_XIVE2_CLASS(klass
);
2396 xdc
->dt_xscom
= pnv_xive2_dt_xscom
;
2398 dc
->desc
= "PowerNV XIVE2 Interrupt Controller (POWER10)";
2399 device_class_set_parent_realize(dc
, pnv_xive2_realize
,
2400 &pxc
->parent_realize
);
2401 device_class_set_props(dc
, pnv_xive2_properties
);
2403 xrc
->get_eas
= pnv_xive2_get_eas
;
2404 xrc
->get_pq
= pnv_xive2_get_pq
;
2405 xrc
->set_pq
= pnv_xive2_set_pq
;
2406 xrc
->get_end
= pnv_xive2_get_end
;
2407 xrc
->write_end
= pnv_xive2_write_end
;
2408 xrc
->get_nvp
= pnv_xive2_get_nvp
;
2409 xrc
->write_nvp
= pnv_xive2_write_nvp
;
2410 xrc
->get_config
= pnv_xive2_get_config
;
2411 xrc
->get_block_id
= pnv_xive2_get_block_id
;
2413 xnc
->notify
= pnv_xive2_notify
;
2415 xpc
->match_nvt
= pnv_xive2_match_nvt
;
2416 xpc
->get_config
= pnv_xive2_presenter_get_config
;
2419 static const TypeInfo pnv_xive2_info
= {
2420 .name
= TYPE_PNV_XIVE2
,
2421 .parent
= TYPE_XIVE2_ROUTER
,
2422 .instance_init
= pnv_xive2_instance_init
,
2423 .instance_size
= sizeof(PnvXive2
),
2424 .class_init
= pnv_xive2_class_init
,
2425 .class_size
= sizeof(PnvXive2Class
),
2426 .interfaces
= (InterfaceInfo
[]) {
2427 { TYPE_PNV_XSCOM_INTERFACE
},
2432 static void pnv_xive2_register_types(void)
2434 type_register_static(&pnv_xive2_info
);
2437 type_init(pnv_xive2_register_types
)
2440 * If the table is direct, we can compute the number of PQ entries
2441 * provisioned by FW.
2443 static uint32_t pnv_xive2_nr_esbs(PnvXive2
*xive
)
2445 uint8_t blk
= pnv_xive2_block_id(xive
);
2446 uint64_t vsd
= xive
->vsds
[VST_ESB
][blk
];
2447 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
2449 return VSD_INDIRECT
& vsd
? 0 : vst_tsize
* SBE_PER_BYTE
;
2453 * Compute the number of entries per indirect subpage.
2455 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2
*xive
, uint32_t type
)
2457 uint8_t blk
= pnv_xive2_block_id(xive
);
2458 uint64_t vsd
= xive
->vsds
[type
][blk
];
2459 const XiveVstInfo
*info
= &vst_infos
[type
];
2461 uint32_t page_shift
;
2463 /* For direct tables, fake a valid value */
2464 if (!(VSD_INDIRECT
& vsd
)) {
2468 /* Get the page size of the indirect table. */
2469 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
2470 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
, MEMTXATTRS_UNSPECIFIED
);
2472 if (!(vsd
& VSD_ADDRESS_MASK
)) {
2474 xive2_error(xive
, "VST: invalid %s entry!?", info
->name
);
2479 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
2481 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
2482 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
2487 return (1ull << page_shift
) / info
->size
;
2490 void pnv_xive2_pic_print_info(PnvXive2
*xive
, GString
*buf
)
2492 Xive2Router
*xrtr
= XIVE2_ROUTER(xive
);
2493 uint8_t blk
= pnv_xive2_block_id(xive
);
2494 uint8_t chip_id
= xive
->chip
->chip_id
;
2495 uint32_t srcno0
= XIVE_EAS(blk
, 0);
2496 uint32_t nr_esbs
= pnv_xive2_nr_esbs(xive
);
2501 uint64_t xive_nvp_per_subpage
;
2503 g_string_append_printf(buf
, "XIVE[%x] Source %08x .. %08x\n",
2504 blk
, srcno0
, srcno0
+ nr_esbs
- 1);
2505 xive_source_pic_print_info(&xive
->ipi_source
, srcno0
, buf
);
2507 g_string_append_printf(buf
, "XIVE[%x] EAT %08x .. %08x\n",
2508 blk
, srcno0
, srcno0
+ nr_esbs
- 1);
2509 for (i
= 0; i
< nr_esbs
; i
++) {
2510 if (xive2_router_get_eas(xrtr
, blk
, i
, &eas
)) {
2513 if (!xive2_eas_is_masked(&eas
)) {
2514 xive2_eas_pic_print_info(&eas
, i
, buf
);
2518 g_string_append_printf(buf
, "XIVE[%x] #%d END Escalation EAT\n",
2521 while (!xive2_router_get_end(xrtr
, blk
, i
, &end
)) {
2522 xive2_end_eas_pic_print_info(&end
, i
++, buf
);
2525 g_string_append_printf(buf
, "XIVE[%x] #%d ENDT\n", chip_id
, blk
);
2527 while (!xive2_router_get_end(xrtr
, blk
, i
, &end
)) {
2528 xive2_end_pic_print_info(&end
, i
++, buf
);
2531 g_string_append_printf(buf
, "XIVE[%x] #%d NVPT %08x .. %08x\n",
2532 chip_id
, blk
, 0, XIVE2_NVP_COUNT
- 1);
2533 xive_nvp_per_subpage
= pnv_xive2_vst_per_subpage(xive
, VST_NVP
);
2534 for (i
= 0; i
< XIVE2_NVP_COUNT
; i
+= xive_nvp_per_subpage
) {
2535 while (!xive2_router_get_nvp(xrtr
, blk
, i
, &nvp
)) {
2536 xive2_nvp_pic_print_info(&nvp
, i
++, buf
);