1 #include "qemu/osdep.h"
2 #include "qemu/units.h"
3 #include "qemu/error-report.h"
4 #include "qapi/qapi-commands-cxl.h"
5 #include "hw/mem/memory-device.h"
6 #include "hw/mem/pc-dimm.h"
7 #include "hw/pci/pci.h"
8 #include "hw/qdev-properties.h"
9 #include "qapi/error.h"
11 #include "qemu/module.h"
12 #include "qemu/pmem.h"
13 #include "qemu/range.h"
15 #include "sysemu/hostmem.h"
16 #include "sysemu/numa.h"
17 #include "hw/cxl/cxl.h"
18 #include "hw/pci/msix.h"
22 /* Default CDAT entries for a memory region */
33 static int ct3_build_cdat_entries_for_mr(CDATSubHeader
**cdat_table
,
34 int dsmad_handle
, MemoryRegion
*mr
)
36 g_autofree CDATDsmas
*dsmas
= NULL
;
37 g_autofree CDATDslbis
*dslbis0
= NULL
;
38 g_autofree CDATDslbis
*dslbis1
= NULL
;
39 g_autofree CDATDslbis
*dslbis2
= NULL
;
40 g_autofree CDATDslbis
*dslbis3
= NULL
;
41 g_autofree CDATDsemts
*dsemts
= NULL
;
43 dsmas
= g_malloc(sizeof(*dsmas
));
47 *dsmas
= (CDATDsmas
) {
49 .type
= CDAT_TYPE_DSMAS
,
50 .length
= sizeof(*dsmas
),
52 .DSMADhandle
= dsmad_handle
,
53 .flags
= CDAT_DSMAS_FLAG_NV
,
55 .DPA_length
= int128_get64(mr
->size
),
58 /* For now, no memory side cache, plausiblish numbers */
59 dslbis0
= g_malloc(sizeof(*dslbis0
));
63 *dslbis0
= (CDATDslbis
) {
65 .type
= CDAT_TYPE_DSLBIS
,
66 .length
= sizeof(*dslbis0
),
68 .handle
= dsmad_handle
,
69 .flags
= HMAT_LB_MEM_MEMORY
,
70 .data_type
= HMAT_LB_DATA_READ_LATENCY
,
71 .entry_base_unit
= 10000, /* 10ns base */
72 .entry
[0] = 15, /* 150ns */
75 dslbis1
= g_malloc(sizeof(*dslbis1
));
79 *dslbis1
= (CDATDslbis
) {
81 .type
= CDAT_TYPE_DSLBIS
,
82 .length
= sizeof(*dslbis1
),
84 .handle
= dsmad_handle
,
85 .flags
= HMAT_LB_MEM_MEMORY
,
86 .data_type
= HMAT_LB_DATA_WRITE_LATENCY
,
87 .entry_base_unit
= 10000,
88 .entry
[0] = 25, /* 250ns */
91 dslbis2
= g_malloc(sizeof(*dslbis2
));
95 *dslbis2
= (CDATDslbis
) {
97 .type
= CDAT_TYPE_DSLBIS
,
98 .length
= sizeof(*dslbis2
),
100 .handle
= dsmad_handle
,
101 .flags
= HMAT_LB_MEM_MEMORY
,
102 .data_type
= HMAT_LB_DATA_READ_BANDWIDTH
,
103 .entry_base_unit
= 1000, /* GB/s */
107 dslbis3
= g_malloc(sizeof(*dslbis3
));
111 *dslbis3
= (CDATDslbis
) {
113 .type
= CDAT_TYPE_DSLBIS
,
114 .length
= sizeof(*dslbis3
),
116 .handle
= dsmad_handle
,
117 .flags
= HMAT_LB_MEM_MEMORY
,
118 .data_type
= HMAT_LB_DATA_WRITE_BANDWIDTH
,
119 .entry_base_unit
= 1000, /* GB/s */
123 dsemts
= g_malloc(sizeof(*dsemts
));
127 *dsemts
= (CDATDsemts
) {
129 .type
= CDAT_TYPE_DSEMTS
,
130 .length
= sizeof(*dsemts
),
132 .DSMAS_handle
= dsmad_handle
,
133 /* Reserved - the non volatile from DSMAS matters */
134 .EFI_memory_type_attr
= 2,
136 .DPA_length
= int128_get64(mr
->size
),
139 /* Header always at start of structure */
140 cdat_table
[CT3_CDAT_DSMAS
] = g_steal_pointer(&dsmas
);
141 cdat_table
[CT3_CDAT_DSLBIS0
] = g_steal_pointer(&dslbis0
);
142 cdat_table
[CT3_CDAT_DSLBIS1
] = g_steal_pointer(&dslbis1
);
143 cdat_table
[CT3_CDAT_DSLBIS2
] = g_steal_pointer(&dslbis2
);
144 cdat_table
[CT3_CDAT_DSLBIS3
] = g_steal_pointer(&dslbis3
);
145 cdat_table
[CT3_CDAT_DSEMTS
] = g_steal_pointer(&dsemts
);
150 static int ct3_build_cdat_table(CDATSubHeader
***cdat_table
, void *priv
)
152 g_autofree CDATSubHeader
**table
= NULL
;
153 MemoryRegion
*nonvolatile_mr
;
154 CXLType3Dev
*ct3d
= priv
;
155 int dsmad_handle
= 0;
158 if (!ct3d
->hostmem
) {
162 nonvolatile_mr
= host_memory_backend_get_memory(ct3d
->hostmem
);
163 if (!nonvolatile_mr
) {
167 table
= g_malloc0(CT3_CDAT_NUM_ENTRIES
* sizeof(*table
));
172 rc
= ct3_build_cdat_entries_for_mr(table
, dsmad_handle
++, nonvolatile_mr
);
177 *cdat_table
= g_steal_pointer(&table
);
179 return CT3_CDAT_NUM_ENTRIES
;
182 static void ct3_free_cdat_table(CDATSubHeader
**cdat_table
, int num
, void *priv
)
186 for (i
= 0; i
< num
; i
++) {
187 g_free(cdat_table
[i
]);
192 static bool cxl_doe_cdat_rsp(DOECap
*doe_cap
)
194 CDATObject
*cdat
= &CXL_TYPE3(doe_cap
->pdev
)->cxl_cstate
.cdat
;
198 CDATReq
*req
= pcie_doe_get_write_mbox_ptr(doe_cap
);
201 assert(cdat
->entry_len
);
203 /* Discard if request length mismatched */
204 if (pcie_doe_get_obj_len(req
) <
205 DIV_ROUND_UP(sizeof(CDATReq
), DWORD_BYTE
)) {
209 ent
= req
->entry_handle
;
210 base
= cdat
->entry
[ent
].base
;
211 len
= cdat
->entry
[ent
].length
;
215 .vendor_id
= CXL_VENDOR_ID
,
216 .data_obj_type
= CXL_DOE_TABLE_ACCESS
,
218 .length
= DIV_ROUND_UP((sizeof(rsp
) + len
), DWORD_BYTE
),
220 .rsp_code
= CXL_DOE_TAB_RSP
,
221 .table_type
= CXL_DOE_TAB_TYPE_CDAT
,
222 .entry_handle
= (ent
< cdat
->entry_len
- 1) ?
223 ent
+ 1 : CXL_DOE_TAB_ENT_MAX
,
226 memcpy(doe_cap
->read_mbox
, &rsp
, sizeof(rsp
));
227 memcpy(doe_cap
->read_mbox
+ DIV_ROUND_UP(sizeof(rsp
), DWORD_BYTE
),
230 doe_cap
->read_mbox_len
+= rsp
.header
.length
;
235 static uint32_t ct3d_config_read(PCIDevice
*pci_dev
, uint32_t addr
, int size
)
237 CXLType3Dev
*ct3d
= CXL_TYPE3(pci_dev
);
240 if (pcie_doe_read_config(&ct3d
->doe_cdat
, addr
, size
, &val
)) {
244 return pci_default_read_config(pci_dev
, addr
, size
);
247 static void ct3d_config_write(PCIDevice
*pci_dev
, uint32_t addr
, uint32_t val
,
250 CXLType3Dev
*ct3d
= CXL_TYPE3(pci_dev
);
252 pcie_doe_write_config(&ct3d
->doe_cdat
, addr
, val
, size
);
253 pci_default_write_config(pci_dev
, addr
, val
, size
);
254 pcie_aer_write_config(pci_dev
, addr
, val
, size
);
258 * Null value of all Fs suggested by IEEE RA guidelines for use of
261 #define UI64_NULL ~(0ULL)
263 static void build_dvsecs(CXLType3Dev
*ct3d
)
265 CXLComponentState
*cxl_cstate
= &ct3d
->cxl_cstate
;
268 dvsec
= (uint8_t *)&(CXLDVSECDevice
){
272 .range1_size_hi
= ct3d
->hostmem
->size
>> 32,
273 .range1_size_lo
= (2 << 5) | (2 << 2) | 0x3 |
274 (ct3d
->hostmem
->size
& 0xF0000000),
278 cxl_component_create_dvsec(cxl_cstate
, CXL2_TYPE3_DEVICE
,
279 PCIE_CXL_DEVICE_DVSEC_LENGTH
,
280 PCIE_CXL_DEVICE_DVSEC
,
281 PCIE_CXL2_DEVICE_DVSEC_REVID
, dvsec
);
283 dvsec
= (uint8_t *)&(CXLDVSECRegisterLocator
){
285 .reg0_base_lo
= RBI_COMPONENT_REG
| CXL_COMPONENT_REG_BAR_IDX
,
287 .reg1_base_lo
= RBI_CXL_DEVICE_REG
| CXL_DEVICE_REG_BAR_IDX
,
290 cxl_component_create_dvsec(cxl_cstate
, CXL2_TYPE3_DEVICE
,
291 REG_LOC_DVSEC_LENGTH
, REG_LOC_DVSEC
,
292 REG_LOC_DVSEC_REVID
, dvsec
);
293 dvsec
= (uint8_t *)&(CXLDVSECDeviceGPF
){
294 .phase2_duration
= 0x603, /* 3 seconds */
295 .phase2_power
= 0x33, /* 0x33 miliwatts */
297 cxl_component_create_dvsec(cxl_cstate
, CXL2_TYPE3_DEVICE
,
298 GPF_DEVICE_DVSEC_LENGTH
, GPF_DEVICE_DVSEC
,
299 GPF_DEVICE_DVSEC_REVID
, dvsec
);
301 dvsec
= (uint8_t *)&(CXLDVSECPortFlexBus
){
302 .cap
= 0x26, /* 68B, IO, Mem, non-MLD */
303 .ctrl
= 0x02, /* IO always enabled */
304 .status
= 0x26, /* same as capabilities */
305 .rcvd_mod_ts_data_phase1
= 0xef, /* WTF? */
307 cxl_component_create_dvsec(cxl_cstate
, CXL2_TYPE3_DEVICE
,
308 PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0
,
309 PCIE_FLEXBUS_PORT_DVSEC
,
310 PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0
, dvsec
);
313 static void hdm_decoder_commit(CXLType3Dev
*ct3d
, int which
)
315 ComponentRegisters
*cregs
= &ct3d
->cxl_cstate
.crb
;
316 uint32_t *cache_mem
= cregs
->cache_mem_registers
;
320 /* TODO: Sanity checks that the decoder is possible */
321 ARRAY_FIELD_DP32(cache_mem
, CXL_HDM_DECODER0_CTRL
, COMMIT
, 0);
322 ARRAY_FIELD_DP32(cache_mem
, CXL_HDM_DECODER0_CTRL
, ERR
, 0);
324 ARRAY_FIELD_DP32(cache_mem
, CXL_HDM_DECODER0_CTRL
, COMMITTED
, 1);
327 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err
)
330 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY
:
331 return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY
;
332 case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY
:
333 return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY
;
334 case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY
:
335 return CXL_RAS_UNC_ERR_CACHE_BE_PARITY
;
336 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC
:
337 return CXL_RAS_UNC_ERR_CACHE_DATA_ECC
;
338 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY
:
339 return CXL_RAS_UNC_ERR_MEM_DATA_PARITY
;
340 case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY
:
341 return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY
;
342 case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY
:
343 return CXL_RAS_UNC_ERR_MEM_BE_PARITY
;
344 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC
:
345 return CXL_RAS_UNC_ERR_MEM_DATA_ECC
;
346 case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD
:
347 return CXL_RAS_UNC_ERR_REINIT_THRESHOLD
;
348 case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING
:
349 return CXL_RAS_UNC_ERR_RSVD_ENCODING
;
350 case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED
:
351 return CXL_RAS_UNC_ERR_POISON_RECEIVED
;
352 case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW
:
353 return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW
;
354 case CXL_UNCOR_ERROR_TYPE_INTERNAL
:
355 return CXL_RAS_UNC_ERR_INTERNAL
;
356 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX
:
357 return CXL_RAS_UNC_ERR_CXL_IDE_TX
;
358 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX
:
359 return CXL_RAS_UNC_ERR_CXL_IDE_RX
;
365 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err
)
368 case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC
:
369 return CXL_RAS_COR_ERR_CACHE_DATA_ECC
;
370 case CXL_COR_ERROR_TYPE_MEM_DATA_ECC
:
371 return CXL_RAS_COR_ERR_MEM_DATA_ECC
;
372 case CXL_COR_ERROR_TYPE_CRC_THRESHOLD
:
373 return CXL_RAS_COR_ERR_CRC_THRESHOLD
;
374 case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD
:
375 return CXL_RAS_COR_ERR_RETRY_THRESHOLD
;
376 case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED
:
377 return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED
;
378 case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED
:
379 return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED
;
380 case CXL_COR_ERROR_TYPE_PHYSICAL
:
381 return CXL_RAS_COR_ERR_PHYSICAL
;
387 static void ct3d_reg_write(void *opaque
, hwaddr offset
, uint64_t value
,
390 CXLComponentState
*cxl_cstate
= opaque
;
391 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
392 CXLType3Dev
*ct3d
= container_of(cxl_cstate
, CXLType3Dev
, cxl_cstate
);
393 uint32_t *cache_mem
= cregs
->cache_mem_registers
;
394 bool should_commit
= false;
398 g_assert(offset
< CXL2_COMPONENT_CM_REGION_SIZE
);
401 case A_CXL_HDM_DECODER0_CTRL
:
402 should_commit
= FIELD_EX32(value
, CXL_HDM_DECODER0_CTRL
, COMMIT
);
405 case A_CXL_RAS_UNC_ERR_STATUS
:
407 uint32_t capctrl
= ldl_le_p(cache_mem
+ R_CXL_RAS_ERR_CAP_CTRL
);
408 uint32_t fe
= FIELD_EX32(capctrl
, CXL_RAS_ERR_CAP_CTRL
, FIRST_ERROR_POINTER
);
413 * If single bit written that corresponds to the first error
414 * pointer being cleared, update the status and header log.
416 if (!QTAILQ_EMPTY(&ct3d
->error_list
)) {
417 if ((1 << fe
) ^ value
) {
420 * Software is using wrong flow for multiple header recording
421 * Following behavior in PCIe r6.0 and assuming multiple
422 * header support. Implementation defined choice to clear all
423 * matching records if more than one bit set - which corresponds
424 * closest to behavior of hardware not capable of multiple
427 QTAILQ_FOREACH_SAFE(cxl_err
, &ct3d
->error_list
, node
, cxl_next
) {
428 if ((1 << cxl_err
->type
) & value
) {
429 QTAILQ_REMOVE(&ct3d
->error_list
, cxl_err
, node
);
434 /* Done with previous FE, so drop from list */
435 cxl_err
= QTAILQ_FIRST(&ct3d
->error_list
);
436 QTAILQ_REMOVE(&ct3d
->error_list
, cxl_err
, node
);
441 * If there is another FE, then put that in place and update
444 if (!QTAILQ_EMPTY(&ct3d
->error_list
)) {
445 uint32_t *header_log
= &cache_mem
[R_CXL_RAS_ERR_HEADER0
];
448 cxl_err
= QTAILQ_FIRST(&ct3d
->error_list
);
449 for (i
= 0; i
< CXL_RAS_ERR_HEADER_NUM
; i
++) {
450 stl_le_p(header_log
+ i
, cxl_err
->header
[i
]);
452 capctrl
= FIELD_DP32(capctrl
, CXL_RAS_ERR_CAP_CTRL
,
453 FIRST_ERROR_POINTER
, cxl_err
->type
);
456 * If no more errors, then follow recomendation of PCI spec
457 * r6.0 6.2.4.2 to set the first error pointer to a status
458 * bit that will never be used.
460 capctrl
= FIELD_DP32(capctrl
, CXL_RAS_ERR_CAP_CTRL
,
462 CXL_RAS_UNC_ERR_CXL_UNUSED
);
464 stl_le_p((uint8_t *)cache_mem
+ A_CXL_RAS_ERR_CAP_CTRL
, capctrl
);
467 QTAILQ_FOREACH(cxl_err
, &ct3d
->error_list
, node
) {
468 unc_err
|= 1 << cxl_err
->type
;
470 stl_le_p((uint8_t *)cache_mem
+ offset
, unc_err
);
474 case A_CXL_RAS_COR_ERR_STATUS
:
476 uint32_t rw1c
= value
;
477 uint32_t temp
= ldl_le_p((uint8_t *)cache_mem
+ offset
);
479 stl_le_p((uint8_t *)cache_mem
+ offset
, temp
);
486 stl_le_p((uint8_t *)cache_mem
+ offset
, value
);
488 hdm_decoder_commit(ct3d
, which_hdm
);
492 static bool cxl_setup_memory(CXLType3Dev
*ct3d
, Error
**errp
)
494 DeviceState
*ds
= DEVICE(ct3d
);
498 if (!ct3d
->hostmem
) {
499 error_setg(errp
, "memdev property must be set");
503 mr
= host_memory_backend_get_memory(ct3d
->hostmem
);
505 error_setg(errp
, "memdev property must be set");
508 memory_region_set_nonvolatile(mr
, true);
509 memory_region_set_enabled(mr
, true);
510 host_memory_backend_set_mapped(ct3d
->hostmem
, true);
513 name
= g_strdup_printf("cxl-type3-dpa-space:%s", ds
->id
);
515 name
= g_strdup("cxl-type3-dpa-space");
517 address_space_init(&ct3d
->hostmem_as
, mr
, name
);
520 ct3d
->cxl_dstate
.pmem_size
= ct3d
->hostmem
->size
;
523 error_setg(errp
, "lsa property must be set");
530 static DOEProtocol doe_cdat_prot
[] = {
531 { CXL_VENDOR_ID
, CXL_DOE_TABLE_ACCESS
, cxl_doe_cdat_rsp
},
535 static void ct3_realize(PCIDevice
*pci_dev
, Error
**errp
)
537 CXLType3Dev
*ct3d
= CXL_TYPE3(pci_dev
);
538 CXLComponentState
*cxl_cstate
= &ct3d
->cxl_cstate
;
539 ComponentRegisters
*regs
= &cxl_cstate
->crb
;
540 MemoryRegion
*mr
= ®s
->component_registers
;
541 uint8_t *pci_conf
= pci_dev
->config
;
542 unsigned short msix_num
= 1;
545 QTAILQ_INIT(&ct3d
->error_list
);
547 if (!cxl_setup_memory(ct3d
, errp
)) {
551 pci_config_set_prog_interface(pci_conf
, 0x10);
553 pcie_endpoint_cap_init(pci_dev
, 0x80);
554 if (ct3d
->sn
!= UI64_NULL
) {
555 pcie_dev_ser_num_init(pci_dev
, 0x100, ct3d
->sn
);
556 cxl_cstate
->dvsec_offset
= 0x100 + 0x0c;
558 cxl_cstate
->dvsec_offset
= 0x100;
561 ct3d
->cxl_cstate
.pdev
= pci_dev
;
564 regs
->special_ops
= g_new0(MemoryRegionOps
, 1);
565 regs
->special_ops
->write
= ct3d_reg_write
;
567 cxl_component_register_block_init(OBJECT(pci_dev
), cxl_cstate
,
571 pci_dev
, CXL_COMPONENT_REG_BAR_IDX
,
572 PCI_BASE_ADDRESS_SPACE_MEMORY
| PCI_BASE_ADDRESS_MEM_TYPE_64
, mr
);
574 cxl_device_register_block_init(OBJECT(pci_dev
), &ct3d
->cxl_dstate
);
575 pci_register_bar(pci_dev
, CXL_DEVICE_REG_BAR_IDX
,
576 PCI_BASE_ADDRESS_SPACE_MEMORY
|
577 PCI_BASE_ADDRESS_MEM_TYPE_64
,
578 &ct3d
->cxl_dstate
.device_registers
);
580 /* MSI(-X) Initailization */
581 rc
= msix_init_exclusive_bar(pci_dev
, msix_num
, 4, NULL
);
583 goto err_address_space_free
;
585 for (i
= 0; i
< msix_num
; i
++) {
586 msix_vector_use(pci_dev
, i
);
589 /* DOE Initailization */
590 pcie_doe_init(pci_dev
, &ct3d
->doe_cdat
, 0x190, doe_cdat_prot
, true, 0);
592 cxl_cstate
->cdat
.build_cdat_table
= ct3_build_cdat_table
;
593 cxl_cstate
->cdat
.free_cdat_table
= ct3_free_cdat_table
;
594 cxl_cstate
->cdat
.private = ct3d
;
595 cxl_doe_cdat_init(cxl_cstate
, errp
);
597 pcie_cap_deverr_init(pci_dev
);
598 /* Leave a bit of room for expansion */
599 rc
= pcie_aer_init(pci_dev
, PCI_ERR_VER
, 0x200, PCI_ERR_SIZEOF
, NULL
);
601 goto err_release_cdat
;
607 cxl_doe_cdat_release(cxl_cstate
);
608 g_free(regs
->special_ops
);
609 err_address_space_free
:
610 address_space_destroy(&ct3d
->hostmem_as
);
614 static void ct3_exit(PCIDevice
*pci_dev
)
616 CXLType3Dev
*ct3d
= CXL_TYPE3(pci_dev
);
617 CXLComponentState
*cxl_cstate
= &ct3d
->cxl_cstate
;
618 ComponentRegisters
*regs
= &cxl_cstate
->crb
;
620 pcie_aer_exit(pci_dev
);
621 cxl_doe_cdat_release(cxl_cstate
);
622 g_free(regs
->special_ops
);
623 address_space_destroy(&ct3d
->hostmem_as
);
626 /* TODO: Support multiple HDM decoders and DPA skip */
627 static bool cxl_type3_dpa(CXLType3Dev
*ct3d
, hwaddr host_addr
, uint64_t *dpa
)
629 uint32_t *cache_mem
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
630 uint64_t decoder_base
, decoder_size
, hpa_offset
;
634 decoder_base
= (((uint64_t)cache_mem
[R_CXL_HDM_DECODER0_BASE_HI
] << 32) |
635 cache_mem
[R_CXL_HDM_DECODER0_BASE_LO
]);
636 if ((uint64_t)host_addr
< decoder_base
) {
640 hpa_offset
= (uint64_t)host_addr
- decoder_base
;
642 decoder_size
= ((uint64_t)cache_mem
[R_CXL_HDM_DECODER0_SIZE_HI
] << 32) |
643 cache_mem
[R_CXL_HDM_DECODER0_SIZE_LO
];
644 if (hpa_offset
>= decoder_size
) {
648 hdm0_ctrl
= cache_mem
[R_CXL_HDM_DECODER0_CTRL
];
649 iw
= FIELD_EX32(hdm0_ctrl
, CXL_HDM_DECODER0_CTRL
, IW
);
650 ig
= FIELD_EX32(hdm0_ctrl
, CXL_HDM_DECODER0_CTRL
, IG
);
652 *dpa
= (MAKE_64BIT_MASK(0, 8 + ig
) & hpa_offset
) |
653 ((MAKE_64BIT_MASK(8 + ig
+ iw
, 64 - 8 - ig
- iw
) & hpa_offset
) >> iw
);
658 MemTxResult
cxl_type3_read(PCIDevice
*d
, hwaddr host_addr
, uint64_t *data
,
659 unsigned size
, MemTxAttrs attrs
)
661 CXLType3Dev
*ct3d
= CXL_TYPE3(d
);
665 /* TODO support volatile region */
666 mr
= host_memory_backend_get_memory(ct3d
->hostmem
);
671 if (!cxl_type3_dpa(ct3d
, host_addr
, &dpa_offset
)) {
675 if (dpa_offset
> int128_get64(mr
->size
)) {
679 return address_space_read(&ct3d
->hostmem_as
, dpa_offset
, attrs
, data
, size
);
682 MemTxResult
cxl_type3_write(PCIDevice
*d
, hwaddr host_addr
, uint64_t data
,
683 unsigned size
, MemTxAttrs attrs
)
685 CXLType3Dev
*ct3d
= CXL_TYPE3(d
);
689 mr
= host_memory_backend_get_memory(ct3d
->hostmem
);
694 if (!cxl_type3_dpa(ct3d
, host_addr
, &dpa_offset
)) {
698 if (dpa_offset
> int128_get64(mr
->size
)) {
701 return address_space_write(&ct3d
->hostmem_as
, dpa_offset
, attrs
,
705 static void ct3d_reset(DeviceState
*dev
)
707 CXLType3Dev
*ct3d
= CXL_TYPE3(dev
);
708 uint32_t *reg_state
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
709 uint32_t *write_msk
= ct3d
->cxl_cstate
.crb
.cache_mem_regs_write_mask
;
711 cxl_component_register_init_common(reg_state
, write_msk
, CXL2_TYPE3_DEVICE
);
712 cxl_device_register_init_common(&ct3d
->cxl_dstate
);
715 static Property ct3_props
[] = {
716 DEFINE_PROP_LINK("memdev", CXLType3Dev
, hostmem
, TYPE_MEMORY_BACKEND
,
717 HostMemoryBackend
*),
718 DEFINE_PROP_LINK("lsa", CXLType3Dev
, lsa
, TYPE_MEMORY_BACKEND
,
719 HostMemoryBackend
*),
720 DEFINE_PROP_UINT64("sn", CXLType3Dev
, sn
, UI64_NULL
),
721 DEFINE_PROP_STRING("cdat", CXLType3Dev
, cxl_cstate
.cdat
.filename
),
722 DEFINE_PROP_END_OF_LIST(),
725 static uint64_t get_lsa_size(CXLType3Dev
*ct3d
)
729 mr
= host_memory_backend_get_memory(ct3d
->lsa
);
730 return memory_region_size(mr
);
733 static void validate_lsa_access(MemoryRegion
*mr
, uint64_t size
,
736 assert(offset
+ size
<= memory_region_size(mr
));
737 assert(offset
+ size
> offset
);
740 static uint64_t get_lsa(CXLType3Dev
*ct3d
, void *buf
, uint64_t size
,
746 mr
= host_memory_backend_get_memory(ct3d
->lsa
);
747 validate_lsa_access(mr
, size
, offset
);
749 lsa
= memory_region_get_ram_ptr(mr
) + offset
;
750 memcpy(buf
, lsa
, size
);
755 static void set_lsa(CXLType3Dev
*ct3d
, const void *buf
, uint64_t size
,
761 mr
= host_memory_backend_get_memory(ct3d
->lsa
);
762 validate_lsa_access(mr
, size
, offset
);
764 lsa
= memory_region_get_ram_ptr(mr
) + offset
;
765 memcpy(lsa
, buf
, size
);
766 memory_region_set_dirty(mr
, offset
, size
);
769 * Just like the PMEM, if the guest is not allowed to exit gracefully, label
770 * updates will get lost.
774 /* For uncorrectable errors include support for multiple header recording */
775 void qmp_cxl_inject_uncorrectable_errors(const char *path
,
776 CXLUncorErrorRecordList
*errors
,
779 Object
*obj
= object_resolve_path(path
, NULL
);
780 static PCIEAERErr err
= {};
788 error_setg(errp
, "Unable to resolve path");
792 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
793 error_setg(errp
, "Path does not point to a CXL type 3 device");
797 err
.status
= PCI_ERR_UNC_INTN
;
798 err
.source_id
= pci_requester_id(PCI_DEVICE(obj
));
801 ct3d
= CXL_TYPE3(obj
);
803 first
= QTAILQ_EMPTY(&ct3d
->error_list
);
804 reg_state
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
806 uint32List
*header
= errors
->value
->header
;
807 uint8_t header_count
= 0;
810 cxl_err_code
= ct3d_qmp_uncor_err_to_cxl(errors
->value
->type
);
811 if (cxl_err_code
< 0) {
812 error_setg(errp
, "Unknown error code");
816 /* If the error is masked, nothing to do here */
817 if (!((1 << cxl_err_code
) &
818 ~ldl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_MASK
))) {
819 errors
= errors
->next
;
823 cxl_err
= g_malloc0(sizeof(*cxl_err
));
828 cxl_err
->type
= cxl_err_code
;
829 while (header
&& header_count
< 32) {
830 cxl_err
->header
[header_count
++] = header
->value
;
831 header
= header
->next
;
833 if (header_count
> 32) {
834 error_setg(errp
, "Header must be 32 DWORD or less");
837 QTAILQ_INSERT_TAIL(&ct3d
->error_list
, cxl_err
, node
);
839 errors
= errors
->next
;
842 if (first
&& !QTAILQ_EMPTY(&ct3d
->error_list
)) {
843 uint32_t *cache_mem
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
844 uint32_t capctrl
= ldl_le_p(cache_mem
+ R_CXL_RAS_ERR_CAP_CTRL
);
845 uint32_t *header_log
= &cache_mem
[R_CXL_RAS_ERR_HEADER0
];
848 cxl_err
= QTAILQ_FIRST(&ct3d
->error_list
);
849 for (i
= 0; i
< CXL_RAS_ERR_HEADER_NUM
; i
++) {
850 stl_le_p(header_log
+ i
, cxl_err
->header
[i
]);
853 capctrl
= FIELD_DP32(capctrl
, CXL_RAS_ERR_CAP_CTRL
,
854 FIRST_ERROR_POINTER
, cxl_err
->type
);
855 stl_le_p(cache_mem
+ R_CXL_RAS_ERR_CAP_CTRL
, capctrl
);
859 QTAILQ_FOREACH(cxl_err
, &ct3d
->error_list
, node
) {
860 unc_err
|= (1 << cxl_err
->type
);
866 stl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_STATUS
, unc_err
);
867 pcie_aer_inject_error(PCI_DEVICE(obj
), &err
);
872 void qmp_cxl_inject_correctable_error(const char *path
, CxlCorErrorType type
,
875 static PCIEAERErr err
= {};
876 Object
*obj
= object_resolve_path(path
, NULL
);
883 error_setg(errp
, "Unable to resolve path");
886 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
887 error_setg(errp
, "Path does not point to a CXL type 3 device");
891 err
.status
= PCI_ERR_COR_INTERNAL
;
892 err
.source_id
= pci_requester_id(PCI_DEVICE(obj
));
893 err
.flags
= PCIE_AER_ERR_IS_CORRECTABLE
;
895 ct3d
= CXL_TYPE3(obj
);
896 reg_state
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
897 cor_err
= ldl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_STATUS
);
899 cxl_err_type
= ct3d_qmp_cor_err_to_cxl(type
);
900 if (cxl_err_type
< 0) {
901 error_setg(errp
, "Invalid COR error");
904 /* If the error is masked, nothting to do here */
905 if (!((1 << cxl_err_type
) & ~ldl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_MASK
))) {
909 cor_err
|= (1 << cxl_err_type
);
910 stl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_STATUS
, cor_err
);
912 pcie_aer_inject_error(PCI_DEVICE(obj
), &err
);
915 static void ct3_class_init(ObjectClass
*oc
, void *data
)
917 DeviceClass
*dc
= DEVICE_CLASS(oc
);
918 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
919 CXLType3Class
*cvc
= CXL_TYPE3_CLASS(oc
);
921 pc
->realize
= ct3_realize
;
923 pc
->class_id
= PCI_CLASS_MEMORY_CXL
;
924 pc
->vendor_id
= PCI_VENDOR_ID_INTEL
;
925 pc
->device_id
= 0xd93; /* LVF for now */
928 pc
->config_write
= ct3d_config_write
;
929 pc
->config_read
= ct3d_config_read
;
931 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
932 dc
->desc
= "CXL PMEM Device (Type 3)";
933 dc
->reset
= ct3d_reset
;
934 device_class_set_props(dc
, ct3_props
);
936 cvc
->get_lsa_size
= get_lsa_size
;
937 cvc
->get_lsa
= get_lsa
;
938 cvc
->set_lsa
= set_lsa
;
941 static const TypeInfo ct3d_info
= {
942 .name
= TYPE_CXL_TYPE3
,
943 .parent
= TYPE_PCI_DEVICE
,
944 .class_size
= sizeof(struct CXLType3Class
),
945 .class_init
= ct3_class_init
,
946 .instance_size
= sizeof(CXLType3Dev
),
947 .interfaces
= (InterfaceInfo
[]) {
948 { INTERFACE_CXL_DEVICE
},
949 { INTERFACE_PCIE_DEVICE
},
954 static void ct3d_registers(void)
956 type_register_static(&ct3d_info
);
959 type_init(ct3d_registers
);