2 * CXL Utility library for components
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "hw/pci/pci.h"
14 #include "hw/cxl/cxl.h"
16 /* CXL r3.1 Section 8.2.4.20.1 CXL HDM Decoder Capability Register */
17 int cxl_decoder_count_enc(int count
)
26 /* Switches and Host Bridges may have more than 10 decoders */
38 int cxl_decoder_count_dec(int enc_cnt
)
47 /* Switches and Host Bridges may have more than 10 decoders */
59 hwaddr
cxl_decode_ig(int ig
)
61 return 1ULL << (ig
+ 8);
64 static uint64_t cxl_cache_mem_read_reg(void *opaque
, hwaddr offset
,
67 CXLComponentState
*cxl_cstate
= opaque
;
68 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
72 if (cregs
->special_ops
&& cregs
->special_ops
->read
) {
73 return cregs
->special_ops
->read(cxl_cstate
, offset
, 4);
75 QEMU_BUILD_BUG_ON(sizeof(*cregs
->cache_mem_registers
) != 4);
76 return cregs
->cache_mem_registers
[offset
/ 4];
79 qemu_log_mask(LOG_UNIMP
,
80 "CXL 8 byte cache mem registers not implemented\n");
84 * In line with specification limitaions on access sizes, this
85 * routine is not called with other sizes.
87 g_assert_not_reached();
91 static void dumb_hdm_handler(CXLComponentState
*cxl_cstate
, hwaddr offset
,
94 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
95 uint32_t *cache_mem
= cregs
->cache_mem_registers
;
96 bool should_commit
= false;
97 bool should_uncommit
= false;
100 case A_CXL_HDM_DECODER0_CTRL
:
101 case A_CXL_HDM_DECODER1_CTRL
:
102 case A_CXL_HDM_DECODER2_CTRL
:
103 case A_CXL_HDM_DECODER3_CTRL
:
104 should_commit
= FIELD_EX32(value
, CXL_HDM_DECODER0_CTRL
, COMMIT
);
105 should_uncommit
= !should_commit
;
112 value
= FIELD_DP32(value
, CXL_HDM_DECODER0_CTRL
, ERR
, 0);
113 value
= FIELD_DP32(value
, CXL_HDM_DECODER0_CTRL
, COMMITTED
, 1);
114 } else if (should_uncommit
) {
115 value
= FIELD_DP32(value
, CXL_HDM_DECODER0_CTRL
, ERR
, 0);
116 value
= FIELD_DP32(value
, CXL_HDM_DECODER0_CTRL
, COMMITTED
, 0);
118 stl_le_p((uint8_t *)cache_mem
+ offset
, value
);
121 static void cxl_cache_mem_write_reg(void *opaque
, hwaddr offset
, uint64_t value
,
124 CXLComponentState
*cxl_cstate
= opaque
;
125 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
130 QEMU_BUILD_BUG_ON(sizeof(*cregs
->cache_mem_regs_write_mask
) != 4);
131 QEMU_BUILD_BUG_ON(sizeof(*cregs
->cache_mem_registers
) != 4);
132 mask
= cregs
->cache_mem_regs_write_mask
[offset
/ 4];
134 /* RO bits should remain constant. Done by reading existing value */
135 value
|= ~mask
& cregs
->cache_mem_registers
[offset
/ 4];
136 if (cregs
->special_ops
&& cregs
->special_ops
->write
) {
137 cregs
->special_ops
->write(cxl_cstate
, offset
, value
, size
);
141 if (offset
>= A_CXL_HDM_DECODER_CAPABILITY
&&
142 offset
<= A_CXL_HDM_DECODER3_TARGET_LIST_HI
) {
143 dumb_hdm_handler(cxl_cstate
, offset
, value
);
145 cregs
->cache_mem_registers
[offset
/ 4] = value
;
150 qemu_log_mask(LOG_UNIMP
,
151 "CXL 8 byte cache mem registers not implemented\n");
155 * In line with specification limitaions on access sizes, this
156 * routine is not called with other sizes.
158 g_assert_not_reached();
163 * CXL r3.1 Section 8.2.3: Component Register Layout and Definition
164 * The access restrictions specified in Section 8.2.2 also apply to CXL 2.0
165 * Component Registers.
167 * CXL r3.1 Section 8.2.2: Accessing Component Registers
168 * • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial
169 * reads are not permitted.
170 * • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial
171 * reads are not permitted.
173 * As of the spec defined today, only 4 byte registers exist.
175 static const MemoryRegionOps cache_mem_ops
= {
176 .read
= cxl_cache_mem_read_reg
,
177 .write
= cxl_cache_mem_write_reg
,
178 .endianness
= DEVICE_LITTLE_ENDIAN
,
180 .min_access_size
= 4,
181 .max_access_size
= 8,
185 .min_access_size
= 4,
186 .max_access_size
= 8,
190 void cxl_component_register_block_init(Object
*obj
,
191 CXLComponentState
*cxl_cstate
,
194 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
196 memory_region_init(&cregs
->component_registers
, obj
, type
,
197 CXL2_COMPONENT_BLOCK_SIZE
);
199 /* io registers controls link which we don't care about in QEMU */
200 memory_region_init_io(&cregs
->io
, obj
, NULL
, NULL
, ".io",
201 CXL2_COMPONENT_IO_REGION_SIZE
);
202 memory_region_init_io(&cregs
->cache_mem
, obj
, &cache_mem_ops
, cxl_cstate
,
203 ".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE
);
205 memory_region_add_subregion(&cregs
->component_registers
, 0, &cregs
->io
);
206 memory_region_add_subregion(&cregs
->component_registers
,
207 CXL2_COMPONENT_IO_REGION_SIZE
,
211 static void ras_init_common(uint32_t *reg_state
, uint32_t *write_msk
)
214 * Error status is RW1C but given bits are not yet set, it can
217 stl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_STATUS
, 0);
218 stl_le_p(write_msk
+ R_CXL_RAS_UNC_ERR_STATUS
, 0x1cfff);
219 /* Bits 12-13 and 17-31 reserved in CXL 2.0 */
220 stl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_MASK
, 0x1cfff);
221 stl_le_p(write_msk
+ R_CXL_RAS_UNC_ERR_MASK
, 0x1cfff);
222 stl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_SEVERITY
, 0x1cfff);
223 stl_le_p(write_msk
+ R_CXL_RAS_UNC_ERR_SEVERITY
, 0x1cfff);
224 stl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_STATUS
, 0);
225 stl_le_p(write_msk
+ R_CXL_RAS_COR_ERR_STATUS
, 0x7f);
226 stl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_MASK
, 0x7f);
227 stl_le_p(write_msk
+ R_CXL_RAS_COR_ERR_MASK
, 0x7f);
228 /* CXL switches and devices must set */
229 stl_le_p(reg_state
+ R_CXL_RAS_ERR_CAP_CTRL
, 0x200);
232 static void hdm_init_common(uint32_t *reg_state
, uint32_t *write_msk
,
235 int decoder_count
= CXL_HDM_DECODER_COUNT
;
236 int hdm_inc
= R_CXL_HDM_DECODER1_BASE_LO
- R_CXL_HDM_DECODER0_BASE_LO
;
239 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, DECODER_COUNT
,
240 cxl_decoder_count_enc(decoder_count
));
241 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, TARGET_COUNT
, 1);
242 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, INTERLEAVE_256B
, 1);
243 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, INTERLEAVE_4K
, 1);
244 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
,
245 POISON_ON_ERR_CAP
, 0);
246 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, 3_6_12_WAY
, 0);
247 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, 16_WAY
, 0);
248 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, UIO
, 0);
249 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
,
250 UIO_DECODER_COUNT
, 0);
251 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, MEMDATA_NXM_CAP
, 0);
252 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
,
253 SUPPORTED_COHERENCY_MODEL
, 0); /* Unknown */
254 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_GLOBAL_CONTROL
,
255 HDM_DECODER_ENABLE
, 0);
256 write_msk
[R_CXL_HDM_DECODER_GLOBAL_CONTROL
] = 0x3;
257 for (i
= 0; i
< decoder_count
; i
++) {
258 write_msk
[R_CXL_HDM_DECODER0_BASE_LO
+ i
* hdm_inc
] = 0xf0000000;
259 write_msk
[R_CXL_HDM_DECODER0_BASE_HI
+ i
* hdm_inc
] = 0xffffffff;
260 write_msk
[R_CXL_HDM_DECODER0_SIZE_LO
+ i
* hdm_inc
] = 0xf0000000;
261 write_msk
[R_CXL_HDM_DECODER0_SIZE_HI
+ i
* hdm_inc
] = 0xffffffff;
262 write_msk
[R_CXL_HDM_DECODER0_CTRL
+ i
* hdm_inc
] = 0x13ff;
263 if (type
== CXL2_DEVICE
||
264 type
== CXL2_TYPE3_DEVICE
||
265 type
== CXL2_LOGICAL_DEVICE
) {
266 write_msk
[R_CXL_HDM_DECODER0_TARGET_LIST_LO
+ i
* hdm_inc
] =
269 write_msk
[R_CXL_HDM_DECODER0_TARGET_LIST_LO
+ i
* hdm_inc
] =
272 write_msk
[R_CXL_HDM_DECODER0_TARGET_LIST_HI
+ i
* hdm_inc
] = 0xffffffff;
276 void cxl_component_register_init_common(uint32_t *reg_state
,
283 * In CXL 2.0 the capabilities required for each CXL component are such
284 * that, with the ordering chosen here, a single number can be used to
285 * define which capabilities should be provided.
288 case CXL2_DOWNSTREAM_PORT
:
293 case CXL2_UPSTREAM_PORT
:
294 case CXL2_TYPE3_DEVICE
:
295 case CXL2_LOGICAL_DEVICE
:
301 /* + Extended Security, + Snoop */
308 memset(reg_state
, 0, CXL2_COMPONENT_CM_REGION_SIZE
);
310 /* CXL Capability Header Register */
311 ARRAY_FIELD_DP32(reg_state
, CXL_CAPABILITY_HEADER
, ID
, 1);
312 ARRAY_FIELD_DP32(reg_state
, CXL_CAPABILITY_HEADER
, VERSION
,
313 CXL_CAPABILITY_VERSION
);
314 ARRAY_FIELD_DP32(reg_state
, CXL_CAPABILITY_HEADER
, CACHE_MEM_VERSION
, 1);
315 ARRAY_FIELD_DP32(reg_state
, CXL_CAPABILITY_HEADER
, ARRAY_SIZE
, caps
);
317 #define init_cap_reg(reg, id, version) \
319 int which = R_CXL_##reg##_CAPABILITY_HEADER; \
320 reg_state[which] = FIELD_DP32(reg_state[which], \
321 CXL_##reg##_CAPABILITY_HEADER, ID, id); \
323 FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, \
326 FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, PTR, \
327 CXL_##reg##_REGISTERS_OFFSET); \
332 case CXL2_TYPE3_DEVICE
:
333 case CXL2_LOGICAL_DEVICE
:
335 case CXL2_UPSTREAM_PORT
:
336 case CXL2_DOWNSTREAM_PORT
:
337 init_cap_reg(RAS
, 2, CXL_RAS_CAPABILITY_VERSION
);
338 ras_init_common(reg_state
, write_msk
);
344 init_cap_reg(LINK
, 4, CXL_LINK_CAPABILITY_VERSION
);
350 if (type
!= CXL2_ROOT_PORT
) {
351 init_cap_reg(HDM
, 5, CXL_HDM_CAPABILITY_VERSION
);
352 hdm_init_common(reg_state
, write_msk
, type
);
358 init_cap_reg(EXTSEC
, 6, CXL_EXTSEC_CAP_VERSION
);
359 init_cap_reg(SNOOP
, 8, CXL_SNOOP_CAP_VERSION
);
365 * Helper to creates a DVSEC header for a CXL entity. The caller is responsible
366 * for tracking the valid offset.
368 * This function will build the DVSEC header on behalf of the caller and then
369 * copy in the remaining data for the vendor specific bits.
370 * It will also set up appropriate write masks.
372 void cxl_component_create_dvsec(CXLComponentState
*cxl
,
373 enum reg_type cxl_dev_type
, uint16_t length
,
374 uint16_t type
, uint8_t rev
, uint8_t *body
)
376 PCIDevice
*pdev
= cxl
->pdev
;
377 uint16_t offset
= cxl
->dvsec_offset
;
378 uint8_t *wmask
= pdev
->wmask
;
380 assert(offset
>= PCI_CFG_SPACE_SIZE
&&
381 ((offset
+ length
) < PCI_CFG_SPACE_EXP_SIZE
));
382 assert((length
& 0xf000) == 0);
383 assert((rev
& ~0xf) == 0);
385 /* Create the DVSEC in the MCFG space */
386 pcie_add_capability(pdev
, PCI_EXT_CAP_ID_DVSEC
, 1, offset
, length
);
387 pci_set_long(pdev
->config
+ offset
+ PCIE_DVSEC_HEADER1_OFFSET
,
388 (length
<< 20) | (rev
<< 16) | CXL_VENDOR_ID
);
389 pci_set_word(pdev
->config
+ offset
+ PCIE_DVSEC_ID_OFFSET
, type
);
390 memcpy(pdev
->config
+ offset
+ sizeof(DVSECHeader
),
391 body
+ sizeof(DVSECHeader
),
392 length
- sizeof(DVSECHeader
));
394 /* Configure write masks */
396 case PCIE_CXL_DEVICE_DVSEC
:
397 /* Cntrl RW Lock - so needs explicit blocking when lock is set */
398 wmask
[offset
+ offsetof(CXLDVSECDevice
, ctrl
)] = 0xFD;
399 wmask
[offset
+ offsetof(CXLDVSECDevice
, ctrl
) + 1] = 0x4F;
400 /* Status is RW1CS */
401 wmask
[offset
+ offsetof(CXLDVSECDevice
, ctrl2
)] = 0x0F;
402 /* Lock is RW Once */
403 wmask
[offset
+ offsetof(CXLDVSECDevice
, lock
)] = 0x01;
404 /* range1/2_base_high/low is RW Lock */
405 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_hi
)] = 0xFF;
406 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_hi
) + 1] = 0xFF;
407 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_hi
) + 2] = 0xFF;
408 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_hi
) + 3] = 0xFF;
409 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_lo
) + 3] = 0xF0;
410 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_hi
)] = 0xFF;
411 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_hi
) + 1] = 0xFF;
412 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_hi
) + 2] = 0xFF;
413 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_hi
) + 3] = 0xFF;
414 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_lo
) + 3] = 0xF0;
416 case NON_CXL_FUNCTION_MAP_DVSEC
:
417 break; /* Not yet implemented */
418 case EXTENSIONS_PORT_DVSEC
:
419 wmask
[offset
+ offsetof(CXLDVSECPortExt
, control
)] = 0x0F;
420 wmask
[offset
+ offsetof(CXLDVSECPortExt
, control
) + 1] = 0x40;
421 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_bus_base
)] = 0xFF;
422 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_bus_limit
)] = 0xFF;
423 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_memory_base
)] = 0xF0;
424 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_memory_base
) + 1] = 0xFF;
425 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_memory_limit
)] = 0xF0;
426 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_memory_limit
) + 1] = 0xFF;
427 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_base
)] = 0xF0;
428 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_base
) + 1] = 0xFF;
429 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_limit
)] = 0xF0;
430 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_limit
) + 1] =
432 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_base_high
)] =
434 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_base_high
) + 1] =
436 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_base_high
) + 2] =
438 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_base_high
) + 3] =
440 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_limit_high
)] =
442 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_limit_high
) + 1] =
444 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_limit_high
) + 2] =
446 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_limit_high
) + 3] =
450 wmask
[offset
+ offsetof(CXLDVSECPortGPF
, phase1_ctrl
)] = 0x0F;
451 wmask
[offset
+ offsetof(CXLDVSECPortGPF
, phase1_ctrl
) + 1] = 0x0F;
452 wmask
[offset
+ offsetof(CXLDVSECPortGPF
, phase2_ctrl
)] = 0x0F;
453 wmask
[offset
+ offsetof(CXLDVSECPortGPF
, phase2_ctrl
) + 1] = 0x0F;
455 case GPF_DEVICE_DVSEC
:
456 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_duration
)] = 0x0F;
457 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_duration
) + 1] = 0x0F;
458 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_power
)] = 0xFF;
459 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_power
) + 1] = 0xFF;
460 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_power
) + 2] = 0xFF;
461 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_power
) + 3] = 0xFF;
463 case PCIE_FLEXBUS_PORT_DVSEC
:
464 switch (cxl_dev_type
) {
467 wmask
[offset
+ offsetof(CXLDVSECPortFlexBus
, ctrl
)] = 0xbd;
469 case CXL2_DOWNSTREAM_PORT
:
470 wmask
[offset
+ offsetof(CXLDVSECPortFlexBus
, ctrl
)] = 0xfd;
472 default: /* Registers are RO for other component types */
475 /* There are rw1cs bits in the status register but never set */
479 /* Update state for future DVSEC additions */
480 range_init_nofail(&cxl
->dvsecs
[type
], cxl
->dvsec_offset
, length
);
481 cxl
->dvsec_offset
+= length
;
484 /* CXL r3.1 Section 8.2.4.20.7 CXL HDM Decoder n Control Register */
485 uint8_t cxl_interleave_ways_enc(int iw
, Error
**errp
)
497 error_setg(errp
, "Interleave ways: %d not supported", iw
);
502 int cxl_interleave_ways_dec(uint8_t iw_enc
, Error
**errp
)
514 error_setg(errp
, "Encoded interleave ways: %d not supported", iw_enc
);
519 uint8_t cxl_interleave_granularity_enc(uint64_t gran
, Error
**errp
)
528 case 16384: return 6;
530 error_setg(errp
, "Interleave granularity: %" PRIu64
" invalid", gran
);