2 * CXL Utility library for mailbox interface
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
11 #include "hw/pci/msi.h"
12 #include "hw/pci/msix.h"
13 #include "hw/cxl/cxl.h"
14 #include "hw/cxl/cxl_events.h"
15 #include "hw/cxl/cxl_mailbox.h"
16 #include "hw/pci/pci.h"
17 #include "hw/pci-bridge/cxl_upstream_port.h"
18 #include "qemu/cutils.h"
20 #include "qemu/units.h"
21 #include "qemu/uuid.h"
22 #include "sysemu/hostmem.h"
23 #include "qemu/range.h"
25 #define CXL_CAPACITY_MULTIPLIER (256 * MiB)
26 #define CXL_DC_EVENT_LOG_SIZE 8
27 #define CXL_NUM_EXTENTS_SUPPORTED 512
28 #define CXL_NUM_TAGS_SUPPORTED 0
31 * How to add a new command, example. The command set FOO, with cmd BAR.
32 * 1. Add the command set and cmd to the enum.
35 * 2. Implement the handler
36 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
37 * CXLDeviceState *cxl_dstate, uint16_t *len)
38 * 3. Add the command to the cxl_cmd_set[][]
39 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
40 * 4. Implement your handler
41 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; }
44 * Writing the handler:
45 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the
46 * in/out length of the payload. The handler is responsible for consuming the
47 * payload from cmd->payload and operating upon it as necessary. It must then
48 * fill the output data into cmd->payload (overwriting what was there),
49 * setting the length, and returning a valid return code.
51 * XXX: The handler need not worry about endianness. The payload is read out of
52 * a register interface that already deals with it.
57 #define IS_IDENTIFY 0x1
58 #define BACKGROUND_OPERATION_STATUS 0x2
60 #define GET_RECORDS 0x0
61 #define CLEAR_RECORDS 0x1
62 #define GET_INTERRUPT_POLICY 0x2
63 #define SET_INTERRUPT_POLICY 0x3
64 FIRMWARE_UPDATE
= 0x02,
72 #define GET_SUPPORTED 0x0
75 #define GET_SUPPORTED 0x0
76 #define GET_FEATURE 0x1
77 #define SET_FEATURE 0x2
79 #define MEMORY_DEVICE 0x0
81 #define GET_PARTITION_INFO 0x0
86 #define SECURE_ERASE 0x1
87 PERSISTENT_MEM
= 0x45,
88 #define GET_SECURITY_STATE 0x0
89 MEDIA_AND_POISON
= 0x43,
90 #define GET_POISON_LIST 0x0
91 #define INJECT_POISON 0x1
92 #define CLEAR_POISON 0x2
93 #define GET_SCAN_MEDIA_CAPABILITIES 0x3
94 #define SCAN_MEDIA 0x4
95 #define GET_SCAN_MEDIA_RESULTS 0x5
97 #define GET_DC_CONFIG 0x0
98 #define GET_DYN_CAP_EXT_LIST 0x1
99 #define ADD_DYN_CAP_RSP 0x2
100 #define RELEASE_DYN_CAP 0x3
101 PHYSICAL_SWITCH
= 0x51,
102 #define IDENTIFY_SWITCH_DEVICE 0x0
103 #define GET_PHYSICAL_PORT_STATE 0x1
105 #define MANAGEMENT_COMMAND 0x0
108 /* CCI Message Format CXL r3.1 Figure 7-19 */
109 typedef struct CXLCCIMessage
{
111 #define CXL_CCI_CAT_REQ 0
112 #define CXL_CCI_CAT_RSP 1
117 uint8_t pl_length
[3];
119 uint16_t vendor_specific
;
121 } QEMU_PACKED CXLCCIMessage
;
123 /* This command is only defined to an MLD FM Owned LD or an MHD */
124 static CXLRetCode
cmd_tunnel_management_cmd(const struct cxl_cmd
*cmd
,
127 uint8_t *payload_out
,
131 PCIDevice
*tunnel_target
;
134 uint8_t port_or_ld_id
;
137 CXLCCIMessage ccimessage
;
142 CXLCCIMessage ccimessage
;
144 size_t pl_length
, length_out
;
148 if (cmd
->in
< sizeof(*in
)) {
149 return CXL_MBOX_INVALID_INPUT
;
151 in
= (void *)payload_in
;
152 out
= (void *)payload_out
;
154 /* Enough room for minimum sized message - no payload */
155 if (in
->size
< sizeof(in
->ccimessage
)) {
156 return CXL_MBOX_INVALID_PAYLOAD_LENGTH
;
158 /* Length of input payload should be in->size + a wrapping tunnel header */
159 if (in
->size
!= len_in
- offsetof(typeof(*out
), ccimessage
)) {
160 return CXL_MBOX_INVALID_PAYLOAD_LENGTH
;
162 if (in
->ccimessage
.category
!= CXL_CCI_CAT_REQ
) {
163 return CXL_MBOX_INVALID_INPUT
;
166 if (in
->target_type
!= 0) {
167 qemu_log_mask(LOG_UNIMP
,
168 "Tunneled Command sent to non existent FM-LD");
169 return CXL_MBOX_INVALID_INPUT
;
173 * Target of a tunnel unfortunately depends on type of CCI readint
175 * If in a switch, then it's the port number.
176 * If in an MLD it is the ld number.
177 * If in an MHD target type indicate where we are going.
179 if (object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_TYPE3
)) {
180 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
181 if (in
->port_or_ld_id
!= 0) {
182 /* Only pretending to have one for now! */
183 return CXL_MBOX_INVALID_INPUT
;
185 target_cci
= &ct3d
->ld0_cci
;
186 } else if (object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_USP
)) {
187 CXLUpstreamPort
*usp
= CXL_USP(cci
->d
);
189 tunnel_target
= pcie_find_port_by_pn(&PCI_BRIDGE(usp
)->sec_bus
,
191 if (!tunnel_target
) {
192 return CXL_MBOX_INVALID_INPUT
;
195 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target
))->devices
[0];
196 if (!tunnel_target
) {
197 return CXL_MBOX_INVALID_INPUT
;
199 if (object_dynamic_cast(OBJECT(tunnel_target
), TYPE_CXL_TYPE3
)) {
200 CXLType3Dev
*ct3d
= CXL_TYPE3(tunnel_target
);
201 /* Tunneled VDMs always land on FM Owned LD */
202 target_cci
= &ct3d
->vdm_fm_owned_ld_mctp_cci
;
204 return CXL_MBOX_INVALID_INPUT
;
207 return CXL_MBOX_INVALID_INPUT
;
210 pl_length
= in
->ccimessage
.pl_length
[2] << 16 |
211 in
->ccimessage
.pl_length
[1] << 8 | in
->ccimessage
.pl_length
[0];
212 rc
= cxl_process_cci_message(target_cci
,
213 in
->ccimessage
.command_set
,
214 in
->ccimessage
.command
,
215 pl_length
, in
->ccimessage
.payload
,
216 &length_out
, out
->ccimessage
.payload
,
218 /* Payload should be in place. Rest of CCI header and needs filling */
219 out
->resp_len
= length_out
+ sizeof(CXLCCIMessage
);
220 st24_le_p(out
->ccimessage
.pl_length
, length_out
);
221 out
->ccimessage
.rc
= rc
;
222 out
->ccimessage
.category
= CXL_CCI_CAT_RSP
;
223 out
->ccimessage
.command
= in
->ccimessage
.command
;
224 out
->ccimessage
.command_set
= in
->ccimessage
.command_set
;
225 out
->ccimessage
.tag
= in
->ccimessage
.tag
;
226 *len_out
= length_out
+ sizeof(*out
);
228 return CXL_MBOX_SUCCESS
;
231 static CXLRetCode
cmd_events_get_records(const struct cxl_cmd
*cmd
,
232 uint8_t *payload_in
, size_t len_in
,
233 uint8_t *payload_out
, size_t *len_out
,
236 CXLDeviceState
*cxlds
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
237 CXLGetEventPayload
*pl
;
241 if (cmd
->in
< sizeof(log_type
)) {
242 return CXL_MBOX_INVALID_INPUT
;
245 log_type
= payload_in
[0];
247 pl
= (CXLGetEventPayload
*)payload_out
;
249 max_recs
= (cxlds
->payload_size
- CXL_EVENT_PAYLOAD_HDR_SIZE
) /
250 CXL_EVENT_RECORD_SIZE
;
251 if (max_recs
> 0xFFFF) {
255 return cxl_event_get_records(cxlds
, pl
, log_type
, max_recs
, len_out
);
258 static CXLRetCode
cmd_events_clear_records(const struct cxl_cmd
*cmd
,
261 uint8_t *payload_out
,
265 CXLDeviceState
*cxlds
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
266 CXLClearEventPayload
*pl
;
268 pl
= (CXLClearEventPayload
*)payload_in
;
270 return cxl_event_clear_records(cxlds
, pl
);
273 static CXLRetCode
cmd_events_get_interrupt_policy(const struct cxl_cmd
*cmd
,
276 uint8_t *payload_out
,
280 CXLDeviceState
*cxlds
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
281 CXLEventInterruptPolicy
*policy
;
284 policy
= (CXLEventInterruptPolicy
*)payload_out
;
286 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_INFO
];
287 if (log
->irq_enabled
) {
288 policy
->info_settings
= CXL_EVENT_INT_SETTING(log
->irq_vec
);
291 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_WARN
];
292 if (log
->irq_enabled
) {
293 policy
->warn_settings
= CXL_EVENT_INT_SETTING(log
->irq_vec
);
296 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_FAIL
];
297 if (log
->irq_enabled
) {
298 policy
->failure_settings
= CXL_EVENT_INT_SETTING(log
->irq_vec
);
301 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_FATAL
];
302 if (log
->irq_enabled
) {
303 policy
->fatal_settings
= CXL_EVENT_INT_SETTING(log
->irq_vec
);
306 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_DYNAMIC_CAP
];
307 if (log
->irq_enabled
) {
308 /* Dynamic Capacity borrows the same vector as info */
309 policy
->dyn_cap_settings
= CXL_INT_MSI_MSIX
;
312 *len_out
= sizeof(*policy
);
313 return CXL_MBOX_SUCCESS
;
316 static CXLRetCode
cmd_events_set_interrupt_policy(const struct cxl_cmd
*cmd
,
319 uint8_t *payload_out
,
323 CXLDeviceState
*cxlds
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
324 CXLEventInterruptPolicy
*policy
;
327 if (len_in
< CXL_EVENT_INT_SETTING_MIN_LEN
) {
328 return CXL_MBOX_INVALID_PAYLOAD_LENGTH
;
331 policy
= (CXLEventInterruptPolicy
*)payload_in
;
333 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_INFO
];
334 log
->irq_enabled
= (policy
->info_settings
& CXL_EVENT_INT_MODE_MASK
) ==
337 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_WARN
];
338 log
->irq_enabled
= (policy
->warn_settings
& CXL_EVENT_INT_MODE_MASK
) ==
341 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_FAIL
];
342 log
->irq_enabled
= (policy
->failure_settings
& CXL_EVENT_INT_MODE_MASK
) ==
345 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_FATAL
];
346 log
->irq_enabled
= (policy
->fatal_settings
& CXL_EVENT_INT_MODE_MASK
) ==
349 /* DCD is optional */
350 if (len_in
< sizeof(*policy
)) {
351 return CXL_MBOX_SUCCESS
;
354 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_DYNAMIC_CAP
];
355 log
->irq_enabled
= (policy
->dyn_cap_settings
& CXL_EVENT_INT_MODE_MASK
) ==
359 return CXL_MBOX_SUCCESS
;
362 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */
363 static CXLRetCode
cmd_infostat_identify(const struct cxl_cmd
*cmd
,
366 uint8_t *payload_out
,
370 PCIDeviceClass
*class = PCI_DEVICE_GET_CLASS(cci
->d
);
374 uint16_t pcie_subsys_vid
;
375 uint16_t pcie_subsys_id
;
377 uint8_t max_message_size
;
378 uint8_t component_type
;
379 } QEMU_PACKED
*is_identify
;
380 QEMU_BUILD_BUG_ON(sizeof(*is_identify
) != 18);
382 is_identify
= (void *)payload_out
;
383 is_identify
->pcie_vid
= class->vendor_id
;
384 is_identify
->pcie_did
= class->device_id
;
385 if (object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_USP
)) {
386 is_identify
->sn
= CXL_USP(cci
->d
)->sn
;
387 /* Subsystem info not defined for a USP */
388 is_identify
->pcie_subsys_vid
= 0;
389 is_identify
->pcie_subsys_id
= 0;
390 is_identify
->component_type
= 0x0; /* Switch */
391 } else if (object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_TYPE3
)) {
392 PCIDevice
*pci_dev
= PCI_DEVICE(cci
->d
);
394 is_identify
->sn
= CXL_TYPE3(cci
->d
)->sn
;
396 * We can't always use class->subsystem_vendor_id as
397 * it is not set if the defaults are used.
399 is_identify
->pcie_subsys_vid
=
400 pci_get_word(pci_dev
->config
+ PCI_SUBSYSTEM_VENDOR_ID
);
401 is_identify
->pcie_subsys_id
=
402 pci_get_word(pci_dev
->config
+ PCI_SUBSYSTEM_ID
);
403 is_identify
->component_type
= 0x3; /* Type 3 */
406 /* TODO: Allow this to vary across different CCIs */
407 is_identify
->max_message_size
= 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */
408 *len_out
= sizeof(*is_identify
);
409 return CXL_MBOX_SUCCESS
;
412 static void cxl_set_dsp_active_bm(PCIBus
*b
, PCIDevice
*d
,
415 uint8_t *bm
= private;
416 if (object_dynamic_cast(OBJECT(d
), TYPE_CXL_DSP
)) {
417 uint8_t port
= PCIE_PORT(d
)->port
;
418 bm
[port
/ 8] |= 1 << (port
% 8);
422 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */
423 static CXLRetCode
cmd_identify_switch_device(const struct cxl_cmd
*cmd
,
426 uint8_t *payload_out
,
430 PCIEPort
*usp
= PCIE_PORT(cci
->d
);
431 PCIBus
*bus
= &PCI_BRIDGE(cci
->d
)->sec_bus
;
432 int num_phys_ports
= pcie_count_ds_ports(bus
);
434 struct cxl_fmapi_ident_switch_dev_resp_pl
{
435 uint8_t ingress_port_id
;
437 uint8_t num_physical_ports
;
439 uint8_t active_port_bitmask
[0x20];
440 uint8_t active_vcs_bitmask
[0x20];
441 uint16_t total_vppbs
;
442 uint16_t bound_vppbs
;
443 uint8_t num_hdm_decoders_per_usp
;
445 QEMU_BUILD_BUG_ON(sizeof(*out
) != 0x49);
447 out
= (struct cxl_fmapi_ident_switch_dev_resp_pl
*)payload_out
;
448 *out
= (struct cxl_fmapi_ident_switch_dev_resp_pl
) {
449 .num_physical_ports
= num_phys_ports
+ 1, /* 1 USP */
450 .num_vcss
= 1, /* Not yet support multiple VCS - potentially tricky */
451 .active_vcs_bitmask
[0] = 0x1,
452 .total_vppbs
= num_phys_ports
+ 1,
453 .bound_vppbs
= num_phys_ports
+ 1,
454 .num_hdm_decoders_per_usp
= 4,
457 /* Depends on the CCI type */
458 if (object_dynamic_cast(OBJECT(cci
->intf
), TYPE_PCIE_PORT
)) {
459 out
->ingress_port_id
= PCIE_PORT(cci
->intf
)->port
;
462 out
->ingress_port_id
= 0;
465 pci_for_each_device_under_bus(bus
, cxl_set_dsp_active_bm
,
466 out
->active_port_bitmask
);
467 out
->active_port_bitmask
[usp
->port
/ 8] |= (1 << usp
->port
% 8);
469 *len_out
= sizeof(*out
);
471 return CXL_MBOX_SUCCESS
;
474 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */
475 static CXLRetCode
cmd_get_physical_port_state(const struct cxl_cmd
*cmd
,
478 uint8_t *payload_out
,
482 /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */
483 struct cxl_fmapi_get_phys_port_state_req_pl
{
489 * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block
492 struct cxl_fmapi_port_state_info_block
{
494 uint8_t config_state
;
495 uint8_t connected_device_cxl_version
;
497 uint8_t connected_device_type
;
498 uint8_t port_cxl_version_bitmask
;
499 uint8_t max_link_width
;
500 uint8_t negotiated_link_width
;
501 uint8_t supported_link_speeds_vector
;
502 uint8_t max_link_speed
;
503 uint8_t current_link_speed
;
505 uint8_t first_lane_num
;
507 uint8_t supported_ld_count
;
510 /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */
511 struct cxl_fmapi_get_phys_port_state_resp_pl
{
514 struct cxl_fmapi_port_state_info_block ports
[];
516 PCIBus
*bus
= &PCI_BRIDGE(cci
->d
)->sec_bus
;
517 PCIEPort
*usp
= PCIE_PORT(cci
->d
);
521 in
= (struct cxl_fmapi_get_phys_port_state_req_pl
*)payload_in
;
522 out
= (struct cxl_fmapi_get_phys_port_state_resp_pl
*)payload_out
;
524 /* Check if what was requested can fit */
525 if (sizeof(*out
) + sizeof(*out
->ports
) * in
->num_ports
> cci
->payload_max
) {
526 return CXL_MBOX_INVALID_INPUT
;
529 /* For success there should be a match for each requested */
530 out
->num_ports
= in
->num_ports
;
532 for (i
= 0; i
< in
->num_ports
; i
++) {
533 struct cxl_fmapi_port_state_info_block
*port
;
534 /* First try to match on downstream port */
536 uint16_t lnkcap
, lnkcap2
, lnksta
;
538 port
= &out
->ports
[i
];
540 port_dev
= pcie_find_port_by_pn(bus
, in
->ports
[i
]);
541 if (port_dev
) { /* DSP */
542 PCIDevice
*ds_dev
= pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev
))
544 port
->config_state
= 3;
546 if (object_dynamic_cast(OBJECT(ds_dev
), TYPE_CXL_TYPE3
)) {
547 port
->connected_device_type
= 5; /* Assume MLD for now */
549 port
->connected_device_type
= 1;
552 port
->connected_device_type
= 0;
554 port
->supported_ld_count
= 3;
555 } else if (usp
->port
== in
->ports
[i
]) { /* USP */
556 port_dev
= PCI_DEVICE(usp
);
557 port
->config_state
= 4;
558 port
->connected_device_type
= 0;
560 return CXL_MBOX_INVALID_INPUT
;
563 port
->port_id
= in
->ports
[i
];
564 /* Information on status of this port in lnksta, lnkcap */
565 if (!port_dev
->exp
.exp_cap
) {
566 return CXL_MBOX_INTERNAL_ERROR
;
568 lnksta
= port_dev
->config_read(port_dev
,
569 port_dev
->exp
.exp_cap
+ PCI_EXP_LNKSTA
,
571 lnkcap
= port_dev
->config_read(port_dev
,
572 port_dev
->exp
.exp_cap
+ PCI_EXP_LNKCAP
,
574 lnkcap2
= port_dev
->config_read(port_dev
,
575 port_dev
->exp
.exp_cap
+ PCI_EXP_LNKCAP2
,
578 port
->max_link_width
= (lnkcap
& PCI_EXP_LNKCAP_MLW
) >> 4;
579 port
->negotiated_link_width
= (lnksta
& PCI_EXP_LNKSTA_NLW
) >> 4;
580 /* No definition for SLS field in linux/pci_regs.h */
581 port
->supported_link_speeds_vector
= (lnkcap2
& 0xFE) >> 1;
582 port
->max_link_speed
= lnkcap
& PCI_EXP_LNKCAP_SLS
;
583 port
->current_link_speed
= lnksta
& PCI_EXP_LNKSTA_CLS
;
584 /* TODO: Track down if we can get the rest of the info */
585 port
->ltssm_state
= 0x7;
586 port
->first_lane_num
= 0;
587 port
->link_state
= 0;
588 port
->port_cxl_version_bitmask
= 0x2;
589 port
->connected_device_cxl_version
= 0x2;
592 pl_size
= sizeof(*out
) + sizeof(*out
->ports
) * in
->num_ports
;
595 return CXL_MBOX_SUCCESS
;
598 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */
599 static CXLRetCode
cmd_infostat_bg_op_sts(const struct cxl_cmd
*cmd
,
602 uint8_t *payload_out
,
611 uint16_t vendor_ext_status
;
612 } QEMU_PACKED
*bg_op_status
;
613 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status
) != 8);
615 bg_op_status
= (void *)payload_out
;
616 bg_op_status
->status
= cci
->bg
.complete_pct
<< 1;
617 if (cci
->bg
.runtime
> 0) {
618 bg_op_status
->status
|= 1U << 0;
620 bg_op_status
->opcode
= cci
->bg
.opcode
;
621 bg_op_status
->returncode
= cci
->bg
.ret_code
;
622 *len_out
= sizeof(*bg_op_status
);
624 return CXL_MBOX_SUCCESS
;
627 #define CXL_FW_SLOTS 2
628 #define CXL_FW_SIZE 0x02000000 /* 32 mb */
630 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */
631 static CXLRetCode
cmd_firmware_update_get_info(const struct cxl_cmd
*cmd
,
634 uint8_t *payload_out
,
638 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
639 CXLDeviceState
*cxl_dstate
= &ct3d
->cxl_dstate
;
641 uint8_t slots_supported
;
649 } QEMU_PACKED
*fw_info
;
650 QEMU_BUILD_BUG_ON(sizeof(*fw_info
) != 0x50);
652 if ((cxl_dstate
->vmem_size
< CXL_CAPACITY_MULTIPLIER
) ||
653 (cxl_dstate
->pmem_size
< CXL_CAPACITY_MULTIPLIER
) ||
654 (ct3d
->dc
.total_capacity
< CXL_CAPACITY_MULTIPLIER
)) {
655 return CXL_MBOX_INTERNAL_ERROR
;
658 fw_info
= (void *)payload_out
;
660 fw_info
->slots_supported
= CXL_FW_SLOTS
;
661 fw_info
->slot_info
= (cci
->fw
.active_slot
& 0x7) |
662 ((cci
->fw
.staged_slot
& 0x7) << 3);
663 fw_info
->caps
= BIT(0); /* online update supported */
665 if (cci
->fw
.slot
[0]) {
666 pstrcpy(fw_info
->fw_rev1
, sizeof(fw_info
->fw_rev1
), "BWFW VERSION 0");
668 if (cci
->fw
.slot
[1]) {
669 pstrcpy(fw_info
->fw_rev2
, sizeof(fw_info
->fw_rev2
), "BWFW VERSION 1");
672 *len_out
= sizeof(*fw_info
);
673 return CXL_MBOX_SUCCESS
;
676 /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */
677 #define CXL_FW_XFER_ALIGNMENT 128
679 #define CXL_FW_XFER_ACTION_FULL 0x0
680 #define CXL_FW_XFER_ACTION_INIT 0x1
681 #define CXL_FW_XFER_ACTION_CONTINUE 0x2
682 #define CXL_FW_XFER_ACTION_END 0x3
683 #define CXL_FW_XFER_ACTION_ABORT 0x4
685 static CXLRetCode
cmd_firmware_update_transfer(const struct cxl_cmd
*cmd
,
688 uint8_t *payload_out
,
699 } QEMU_PACKED
*fw_transfer
= (void *)payload_in
;
700 size_t offset
, length
;
702 if (fw_transfer
->action
== CXL_FW_XFER_ACTION_ABORT
) {
704 * At this point there aren't any on-going transfers
705 * running in the bg - this is serialized before this
706 * call altogether. Just mark the state machine and
707 * disregard any other input.
709 cci
->fw
.transferring
= false;
710 return CXL_MBOX_SUCCESS
;
713 offset
= fw_transfer
->offset
* CXL_FW_XFER_ALIGNMENT
;
714 length
= len
- sizeof(*fw_transfer
);
715 if (offset
+ length
> CXL_FW_SIZE
) {
716 return CXL_MBOX_INVALID_INPUT
;
719 if (cci
->fw
.transferring
) {
720 if (fw_transfer
->action
== CXL_FW_XFER_ACTION_FULL
||
721 fw_transfer
->action
== CXL_FW_XFER_ACTION_INIT
) {
722 return CXL_MBOX_FW_XFER_IN_PROGRESS
;
725 * Abort partitioned package transfer if over 30 secs
726 * between parts. As opposed to the explicit ABORT action,
727 * semantically treat this condition as an error - as
728 * if a part action were passed without a previous INIT.
730 if (difftime(time(NULL
), cci
->fw
.last_partxfer
) > 30.0) {
731 cci
->fw
.transferring
= false;
732 return CXL_MBOX_INVALID_INPUT
;
734 } else if (fw_transfer
->action
== CXL_FW_XFER_ACTION_CONTINUE
||
735 fw_transfer
->action
== CXL_FW_XFER_ACTION_END
) {
736 return CXL_MBOX_INVALID_INPUT
;
739 /* allow back-to-back retransmission */
740 if ((offset
!= cci
->fw
.prev_offset
|| length
!= cci
->fw
.prev_len
) &&
741 (fw_transfer
->action
== CXL_FW_XFER_ACTION_CONTINUE
||
742 fw_transfer
->action
== CXL_FW_XFER_ACTION_END
)) {
743 /* verify no overlaps */
744 if (offset
< cci
->fw
.prev_offset
+ cci
->fw
.prev_len
) {
745 return CXL_MBOX_FW_XFER_OUT_OF_ORDER
;
749 switch (fw_transfer
->action
) {
750 case CXL_FW_XFER_ACTION_FULL
: /* ignores offset */
751 case CXL_FW_XFER_ACTION_END
:
752 if (fw_transfer
->slot
== 0 ||
753 fw_transfer
->slot
== cci
->fw
.active_slot
||
754 fw_transfer
->slot
> CXL_FW_SLOTS
) {
755 return CXL_MBOX_FW_INVALID_SLOT
;
758 /* mark the slot used upon bg completion */
760 case CXL_FW_XFER_ACTION_INIT
:
762 return CXL_MBOX_INVALID_INPUT
;
765 cci
->fw
.transferring
= true;
766 cci
->fw
.prev_offset
= offset
;
767 cci
->fw
.prev_len
= length
;
769 case CXL_FW_XFER_ACTION_CONTINUE
:
770 cci
->fw
.prev_offset
= offset
;
771 cci
->fw
.prev_len
= length
;
774 return CXL_MBOX_INVALID_INPUT
;
777 if (fw_transfer
->action
== CXL_FW_XFER_ACTION_FULL
) {
778 cci
->bg
.runtime
= 10 * 1000UL;
780 cci
->bg
.runtime
= 2 * 1000UL;
782 /* keep relevant context for bg completion */
783 cci
->fw
.curr_action
= fw_transfer
->action
;
784 cci
->fw
.curr_slot
= fw_transfer
->slot
;
787 return CXL_MBOX_BG_STARTED
;
790 static void __do_firmware_xfer(CXLCCI
*cci
)
792 switch (cci
->fw
.curr_action
) {
793 case CXL_FW_XFER_ACTION_FULL
:
794 case CXL_FW_XFER_ACTION_END
:
795 cci
->fw
.slot
[cci
->fw
.curr_slot
- 1] = true;
796 cci
->fw
.transferring
= false;
798 case CXL_FW_XFER_ACTION_INIT
:
799 case CXL_FW_XFER_ACTION_CONTINUE
:
800 time(&cci
->fw
.last_partxfer
);
807 /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */
808 static CXLRetCode
cmd_firmware_update_activate(const struct cxl_cmd
*cmd
,
811 uint8_t *payload_out
,
818 } QEMU_PACKED
*fw_activate
= (void *)payload_in
;
819 QEMU_BUILD_BUG_ON(sizeof(*fw_activate
) != 0x2);
821 if (fw_activate
->slot
== 0 ||
822 fw_activate
->slot
== cci
->fw
.active_slot
||
823 fw_activate
->slot
> CXL_FW_SLOTS
) {
824 return CXL_MBOX_FW_INVALID_SLOT
;
827 /* ensure that an actual fw package is there */
828 if (!cci
->fw
.slot
[fw_activate
->slot
- 1]) {
829 return CXL_MBOX_FW_INVALID_SLOT
;
832 switch (fw_activate
->action
) {
834 cci
->fw
.active_slot
= fw_activate
->slot
;
837 cci
->fw
.staged_slot
= fw_activate
->slot
;
840 return CXL_MBOX_INVALID_INPUT
;
843 return CXL_MBOX_SUCCESS
;
846 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */
847 static CXLRetCode
cmd_timestamp_get(const struct cxl_cmd
*cmd
,
850 uint8_t *payload_out
,
854 CXLDeviceState
*cxl_dstate
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
855 uint64_t final_time
= cxl_device_get_timestamp(cxl_dstate
);
857 stq_le_p(payload_out
, final_time
);
860 return CXL_MBOX_SUCCESS
;
863 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */
864 static CXLRetCode
cmd_timestamp_set(const struct cxl_cmd
*cmd
,
867 uint8_t *payload_out
,
871 CXLDeviceState
*cxl_dstate
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
873 cxl_dstate
->timestamp
.set
= true;
874 cxl_dstate
->timestamp
.last_set
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
876 cxl_dstate
->timestamp
.host_set
= le64_to_cpu(*(uint64_t *)payload_in
);
879 return CXL_MBOX_SUCCESS
;
882 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */
883 static const QemuUUID cel_uuid
= {
884 .data
= UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
885 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
888 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */
889 static CXLRetCode
cmd_logs_get_supported(const struct cxl_cmd
*cmd
,
892 uint8_t *payload_out
,
903 } QEMU_PACKED
*supported_logs
= (void *)payload_out
;
904 QEMU_BUILD_BUG_ON(sizeof(*supported_logs
) != 0x1c);
906 supported_logs
->entries
= 1;
907 supported_logs
->log_entries
[0].uuid
= cel_uuid
;
908 supported_logs
->log_entries
[0].size
= 4 * cci
->cel_size
;
910 *len_out
= sizeof(*supported_logs
);
911 return CXL_MBOX_SUCCESS
;
914 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */
915 static CXLRetCode
cmd_logs_get_log(const struct cxl_cmd
*cmd
,
918 uint8_t *payload_out
,
926 } QEMU_PACKED
QEMU_ALIGNED(16) *get_log
;
928 get_log
= (void *)payload_in
;
931 * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h)
932 * The device shall return Invalid Input if the Offset or Length
933 * fields attempt to access beyond the size of the log as reported by Get
936 * The CEL buffer is large enough to fit all commands in the emulation, so
937 * the only possible failure would be if the mailbox itself isn't big
940 if (get_log
->offset
+ get_log
->length
> cci
->payload_max
) {
941 return CXL_MBOX_INVALID_INPUT
;
944 if (!qemu_uuid_is_equal(&get_log
->uuid
, &cel_uuid
)) {
945 return CXL_MBOX_INVALID_LOG
;
948 /* Store off everything to local variables so we can wipe out the payload */
949 *len_out
= get_log
->length
;
951 memmove(payload_out
, cci
->cel_log
+ get_log
->offset
, get_log
->length
);
953 return CXL_MBOX_SUCCESS
;
956 /* CXL r3.1 section 8.2.9.6: Features */
958 * Get Supported Features output payload
959 * CXL r3.1 section 8.2.9.6.1 Table 8-96
961 typedef struct CXLSupportedFeatureHeader
{
963 uint16_t nsuppfeats_dev
;
965 } QEMU_PACKED CXLSupportedFeatureHeader
;
968 * Get Supported Features Supported Feature Entry
969 * CXL r3.1 section 8.2.9.6.1 Table 8-97
971 typedef struct CXLSupportedFeatureEntry
{
974 uint16_t get_feat_size
;
975 uint16_t set_feat_size
;
977 uint8_t get_feat_version
;
978 uint8_t set_feat_version
;
979 uint16_t set_feat_effects
;
981 } QEMU_PACKED CXLSupportedFeatureEntry
;
984 * Get Supported Features Supported Feature Entry
985 * CXL rev 3.1 section 8.2.9.6.1 Table 8-97
987 /* Supported Feature Entry : attribute flags */
988 #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0)
989 #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1)
990 #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4)
991 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5)
992 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6)
994 /* Supported Feature Entry : set feature effects */
995 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0)
996 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1)
997 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2)
998 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3)
999 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4)
1000 #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5)
1001 #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6)
1002 #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7)
1003 #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8)
1004 #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9)
1005 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10)
1006 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11)
1008 enum CXL_SUPPORTED_FEATURES_LIST
{
1009 CXL_FEATURE_PATROL_SCRUB
= 0,
1014 /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */
1016 * Get Feature input payload
1017 * CXL r3.1 section 8.2.9.6.2 Table 8-99
1019 /* Get Feature : Payload in selection */
1020 enum CXL_GET_FEATURE_SELECTION
{
1021 CXL_GET_FEATURE_SEL_CURRENT_VALUE
,
1022 CXL_GET_FEATURE_SEL_DEFAULT_VALUE
,
1023 CXL_GET_FEATURE_SEL_SAVED_VALUE
,
1024 CXL_GET_FEATURE_SEL_MAX
1027 /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */
1029 * Set Feature input payload
1030 * CXL r3.1 section 8.2.9.6.3 Table 8-101
1032 typedef struct CXLSetFeatureInHeader
{
1038 } QEMU_PACKED
QEMU_ALIGNED(16) CXLSetFeatureInHeader
;
1040 /* Set Feature : Payload in flags */
1041 #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK 0x7
1042 enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER
{
1043 CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER
,
1044 CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER
,
1045 CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER
,
1046 CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER
,
1047 CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER
,
1048 CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX
1050 #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3)
1052 /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */
1053 static const QemuUUID patrol_scrub_uuid
= {
1054 .data
= UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33,
1055 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a)
1058 typedef struct CXLMemPatrolScrubSetFeature
{
1059 CXLSetFeatureInHeader hdr
;
1060 CXLMemPatrolScrubWriteAttrs feat_data
;
1061 } QEMU_PACKED
QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature
;
1064 * CXL r3.1 section 8.2.9.9.11.2:
1065 * DDR5 Error Check Scrub (ECS) Control Feature
1067 static const QemuUUID ecs_uuid
= {
1068 .data
= UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba,
1069 0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86)
1072 typedef struct CXLMemECSSetFeature
{
1073 CXLSetFeatureInHeader hdr
;
1074 CXLMemECSWriteAttrs feat_data
[];
1075 } QEMU_PACKED
QEMU_ALIGNED(16) CXLMemECSSetFeature
;
1077 /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */
1078 static CXLRetCode
cmd_features_get_supported(const struct cxl_cmd
*cmd
,
1079 uint8_t *payload_in
,
1081 uint8_t *payload_out
,
1087 uint16_t start_index
;
1089 } QEMU_PACKED
QEMU_ALIGNED(16) * get_feats_in
= (void *)payload_in
;
1092 CXLSupportedFeatureHeader hdr
;
1093 CXLSupportedFeatureEntry feat_entries
[];
1094 } QEMU_PACKED
QEMU_ALIGNED(16) * get_feats_out
= (void *)payload_out
;
1095 uint16_t index
, req_entries
;
1098 if (!object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_TYPE3
)) {
1099 return CXL_MBOX_UNSUPPORTED
;
1101 if (get_feats_in
->count
< sizeof(CXLSupportedFeatureHeader
) ||
1102 get_feats_in
->start_index
>= CXL_FEATURE_MAX
) {
1103 return CXL_MBOX_INVALID_INPUT
;
1106 req_entries
= (get_feats_in
->count
-
1107 sizeof(CXLSupportedFeatureHeader
)) /
1108 sizeof(CXLSupportedFeatureEntry
);
1109 req_entries
= MIN(req_entries
,
1110 (CXL_FEATURE_MAX
- get_feats_in
->start_index
));
1112 for (entry
= 0, index
= get_feats_in
->start_index
;
1113 entry
< req_entries
; index
++) {
1115 case CXL_FEATURE_PATROL_SCRUB
:
1116 /* Fill supported feature entry for device patrol scrub control */
1117 get_feats_out
->feat_entries
[entry
++] =
1118 (struct CXLSupportedFeatureEntry
) {
1119 .uuid
= patrol_scrub_uuid
,
1120 .feat_index
= index
,
1121 .get_feat_size
= sizeof(CXLMemPatrolScrubReadAttrs
),
1122 .set_feat_size
= sizeof(CXLMemPatrolScrubWriteAttrs
),
1123 .attr_flags
= CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE
,
1124 .get_feat_version
= CXL_MEMDEV_PS_GET_FEATURE_VERSION
,
1125 .set_feat_version
= CXL_MEMDEV_PS_SET_FEATURE_VERSION
,
1126 .set_feat_effects
= CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE
|
1127 CXL_FEAT_ENTRY_SFE_CEL_VALID
,
1130 case CXL_FEATURE_ECS
:
1131 /* Fill supported feature entry for device DDR5 ECS control */
1132 get_feats_out
->feat_entries
[entry
++] =
1133 (struct CXLSupportedFeatureEntry
) {
1135 .feat_index
= index
,
1136 .get_feat_size
= CXL_ECS_NUM_MEDIA_FRUS
*
1137 sizeof(CXLMemECSReadAttrs
),
1138 .set_feat_size
= CXL_ECS_NUM_MEDIA_FRUS
*
1139 sizeof(CXLMemECSWriteAttrs
),
1140 .attr_flags
= CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE
,
1141 .get_feat_version
= CXL_ECS_GET_FEATURE_VERSION
,
1142 .set_feat_version
= CXL_ECS_SET_FEATURE_VERSION
,
1143 .set_feat_effects
= CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE
|
1144 CXL_FEAT_ENTRY_SFE_CEL_VALID
,
1148 __builtin_unreachable();
1151 get_feats_out
->hdr
.nsuppfeats_dev
= CXL_FEATURE_MAX
;
1152 get_feats_out
->hdr
.entries
= req_entries
;
1153 *len_out
= sizeof(CXLSupportedFeatureHeader
) +
1154 req_entries
* sizeof(CXLSupportedFeatureEntry
);
1156 return CXL_MBOX_SUCCESS
;
1159 /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */
1160 static CXLRetCode
cmd_features_get_feature(const struct cxl_cmd
*cmd
,
1161 uint8_t *payload_in
,
1163 uint8_t *payload_out
,
1172 } QEMU_PACKED
QEMU_ALIGNED(16) * get_feature
;
1173 uint16_t bytes_to_copy
= 0;
1175 CXLSetFeatureInfo
*set_feat_info
;
1177 if (!object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_TYPE3
)) {
1178 return CXL_MBOX_UNSUPPORTED
;
1181 ct3d
= CXL_TYPE3(cci
->d
);
1182 get_feature
= (void *)payload_in
;
1184 set_feat_info
= &ct3d
->set_feat_info
;
1185 if (qemu_uuid_is_equal(&get_feature
->uuid
, &set_feat_info
->uuid
)) {
1186 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS
;
1189 if (get_feature
->selection
!= CXL_GET_FEATURE_SEL_CURRENT_VALUE
) {
1190 return CXL_MBOX_UNSUPPORTED
;
1192 if (get_feature
->offset
+ get_feature
->count
> cci
->payload_max
) {
1193 return CXL_MBOX_INVALID_INPUT
;
1196 if (qemu_uuid_is_equal(&get_feature
->uuid
, &patrol_scrub_uuid
)) {
1197 if (get_feature
->offset
>= sizeof(CXLMemPatrolScrubReadAttrs
)) {
1198 return CXL_MBOX_INVALID_INPUT
;
1200 bytes_to_copy
= sizeof(CXLMemPatrolScrubReadAttrs
) -
1201 get_feature
->offset
;
1202 bytes_to_copy
= MIN(bytes_to_copy
, get_feature
->count
);
1204 (uint8_t *)&ct3d
->patrol_scrub_attrs
+ get_feature
->offset
,
1206 } else if (qemu_uuid_is_equal(&get_feature
->uuid
, &ecs_uuid
)) {
1207 if (get_feature
->offset
>= CXL_ECS_NUM_MEDIA_FRUS
*
1208 sizeof(CXLMemECSReadAttrs
)) {
1209 return CXL_MBOX_INVALID_INPUT
;
1211 bytes_to_copy
= CXL_ECS_NUM_MEDIA_FRUS
*
1212 sizeof(CXLMemECSReadAttrs
) -
1213 get_feature
->offset
;
1214 bytes_to_copy
= MIN(bytes_to_copy
, get_feature
->count
);
1216 (uint8_t *)&ct3d
->ecs_attrs
+ get_feature
->offset
,
1219 return CXL_MBOX_UNSUPPORTED
;
1222 *len_out
= bytes_to_copy
;
1224 return CXL_MBOX_SUCCESS
;
1227 /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */
1228 static CXLRetCode
cmd_features_set_feature(const struct cxl_cmd
*cmd
,
1229 uint8_t *payload_in
,
1231 uint8_t *payload_out
,
1235 CXLSetFeatureInHeader
*hdr
= (void *)payload_in
;
1236 CXLMemPatrolScrubWriteAttrs
*ps_write_attrs
;
1237 CXLMemPatrolScrubSetFeature
*ps_set_feature
;
1238 CXLMemECSWriteAttrs
*ecs_write_attrs
;
1239 CXLMemECSSetFeature
*ecs_set_feature
;
1240 CXLSetFeatureInfo
*set_feat_info
;
1241 uint16_t bytes_to_copy
= 0;
1242 uint8_t data_transfer_flag
;
1247 if (!object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_TYPE3
)) {
1248 return CXL_MBOX_UNSUPPORTED
;
1250 ct3d
= CXL_TYPE3(cci
->d
);
1251 set_feat_info
= &ct3d
->set_feat_info
;
1253 if (!qemu_uuid_is_null(&set_feat_info
->uuid
) &&
1254 !qemu_uuid_is_equal(&hdr
->uuid
, &set_feat_info
->uuid
)) {
1255 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS
;
1257 if (hdr
->flags
& CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET
) {
1258 set_feat_info
->data_saved_across_reset
= true;
1260 set_feat_info
->data_saved_across_reset
= false;
1263 data_transfer_flag
=
1264 hdr
->flags
& CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK
;
1265 if (data_transfer_flag
== CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER
) {
1266 set_feat_info
->uuid
= hdr
->uuid
;
1267 set_feat_info
->data_size
= 0;
1269 set_feat_info
->data_transfer_flag
= data_transfer_flag
;
1270 set_feat_info
->data_offset
= hdr
->offset
;
1271 bytes_to_copy
= len_in
- sizeof(CXLSetFeatureInHeader
);
1273 if (qemu_uuid_is_equal(&hdr
->uuid
, &patrol_scrub_uuid
)) {
1274 if (hdr
->version
!= CXL_MEMDEV_PS_SET_FEATURE_VERSION
) {
1275 return CXL_MBOX_UNSUPPORTED
;
1278 ps_set_feature
= (void *)payload_in
;
1279 ps_write_attrs
= &ps_set_feature
->feat_data
;
1280 memcpy((uint8_t *)&ct3d
->patrol_scrub_wr_attrs
+ hdr
->offset
,
1283 set_feat_info
->data_size
+= bytes_to_copy
;
1285 if (data_transfer_flag
== CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER
||
1286 data_transfer_flag
== CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER
) {
1287 ct3d
->patrol_scrub_attrs
.scrub_cycle
&= ~0xFF;
1288 ct3d
->patrol_scrub_attrs
.scrub_cycle
|=
1289 ct3d
->patrol_scrub_wr_attrs
.scrub_cycle_hr
& 0xFF;
1290 ct3d
->patrol_scrub_attrs
.scrub_flags
&= ~0x1;
1291 ct3d
->patrol_scrub_attrs
.scrub_flags
|=
1292 ct3d
->patrol_scrub_wr_attrs
.scrub_flags
& 0x1;
1294 } else if (qemu_uuid_is_equal(&hdr
->uuid
,
1296 if (hdr
->version
!= CXL_ECS_SET_FEATURE_VERSION
) {
1297 return CXL_MBOX_UNSUPPORTED
;
1300 ecs_set_feature
= (void *)payload_in
;
1301 ecs_write_attrs
= ecs_set_feature
->feat_data
;
1302 memcpy((uint8_t *)ct3d
->ecs_wr_attrs
+ hdr
->offset
,
1305 set_feat_info
->data_size
+= bytes_to_copy
;
1307 if (data_transfer_flag
== CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER
||
1308 data_transfer_flag
== CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER
) {
1309 for (count
= 0; count
< CXL_ECS_NUM_MEDIA_FRUS
; count
++) {
1310 ct3d
->ecs_attrs
[count
].ecs_log_cap
=
1311 ct3d
->ecs_wr_attrs
[count
].ecs_log_cap
;
1312 ct3d
->ecs_attrs
[count
].ecs_config
=
1313 ct3d
->ecs_wr_attrs
[count
].ecs_config
& 0x1F;
1317 return CXL_MBOX_UNSUPPORTED
;
1320 if (data_transfer_flag
== CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER
||
1321 data_transfer_flag
== CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER
||
1322 data_transfer_flag
== CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER
) {
1323 memset(&set_feat_info
->uuid
, 0, sizeof(QemuUUID
));
1324 if (qemu_uuid_is_equal(&hdr
->uuid
, &patrol_scrub_uuid
)) {
1325 memset(&ct3d
->patrol_scrub_wr_attrs
, 0, set_feat_info
->data_size
);
1326 } else if (qemu_uuid_is_equal(&hdr
->uuid
, &ecs_uuid
)) {
1327 memset(ct3d
->ecs_wr_attrs
, 0, set_feat_info
->data_size
);
1329 set_feat_info
->data_transfer_flag
= 0;
1330 set_feat_info
->data_saved_across_reset
= false;
1331 set_feat_info
->data_offset
= 0;
1332 set_feat_info
->data_size
= 0;
1335 return CXL_MBOX_SUCCESS
;
1338 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */
1339 static CXLRetCode
cmd_identify_memory_device(const struct cxl_cmd
*cmd
,
1340 uint8_t *payload_in
,
1342 uint8_t *payload_out
,
1347 char fw_revision
[0x10];
1348 uint64_t total_capacity
;
1349 uint64_t volatile_capacity
;
1350 uint64_t persistent_capacity
;
1351 uint64_t partition_align
;
1352 uint16_t info_event_log_size
;
1353 uint16_t warning_event_log_size
;
1354 uint16_t failure_event_log_size
;
1355 uint16_t fatal_event_log_size
;
1357 uint8_t poison_list_max_mer
[3];
1358 uint16_t inject_poison_limit
;
1359 uint8_t poison_caps
;
1360 uint8_t qos_telemetry_caps
;
1361 uint16_t dc_event_log_size
;
1363 QEMU_BUILD_BUG_ON(sizeof(*id
) != 0x45);
1364 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1365 CXLType3Class
*cvc
= CXL_TYPE3_GET_CLASS(ct3d
);
1366 CXLDeviceState
*cxl_dstate
= &ct3d
->cxl_dstate
;
1368 if ((!QEMU_IS_ALIGNED(cxl_dstate
->vmem_size
, CXL_CAPACITY_MULTIPLIER
)) ||
1369 (!QEMU_IS_ALIGNED(cxl_dstate
->pmem_size
, CXL_CAPACITY_MULTIPLIER
)) ||
1370 (!QEMU_IS_ALIGNED(ct3d
->dc
.total_capacity
, CXL_CAPACITY_MULTIPLIER
))) {
1371 return CXL_MBOX_INTERNAL_ERROR
;
1374 id
= (void *)payload_out
;
1376 snprintf(id
->fw_revision
, 0x10, "BWFW VERSION %02d", 0);
1378 stq_le_p(&id
->total_capacity
,
1379 cxl_dstate
->static_mem_size
/ CXL_CAPACITY_MULTIPLIER
);
1380 stq_le_p(&id
->persistent_capacity
,
1381 cxl_dstate
->pmem_size
/ CXL_CAPACITY_MULTIPLIER
);
1382 stq_le_p(&id
->volatile_capacity
,
1383 cxl_dstate
->vmem_size
/ CXL_CAPACITY_MULTIPLIER
);
1384 stl_le_p(&id
->lsa_size
, cvc
->get_lsa_size(ct3d
));
1385 /* 256 poison records */
1386 st24_le_p(id
->poison_list_max_mer
, 256);
1387 /* No limit - so limited by main poison record limit */
1388 stw_le_p(&id
->inject_poison_limit
, 0);
1389 stw_le_p(&id
->dc_event_log_size
, CXL_DC_EVENT_LOG_SIZE
);
1391 *len_out
= sizeof(*id
);
1392 return CXL_MBOX_SUCCESS
;
1395 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */
1396 static CXLRetCode
cmd_ccls_get_partition_info(const struct cxl_cmd
*cmd
,
1397 uint8_t *payload_in
,
1399 uint8_t *payload_out
,
1403 CXLDeviceState
*cxl_dstate
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
1405 uint64_t active_vmem
;
1406 uint64_t active_pmem
;
1409 } QEMU_PACKED
*part_info
= (void *)payload_out
;
1410 QEMU_BUILD_BUG_ON(sizeof(*part_info
) != 0x20);
1411 CXLType3Dev
*ct3d
= container_of(cxl_dstate
, CXLType3Dev
, cxl_dstate
);
1413 if ((!QEMU_IS_ALIGNED(cxl_dstate
->vmem_size
, CXL_CAPACITY_MULTIPLIER
)) ||
1414 (!QEMU_IS_ALIGNED(cxl_dstate
->pmem_size
, CXL_CAPACITY_MULTIPLIER
)) ||
1415 (!QEMU_IS_ALIGNED(ct3d
->dc
.total_capacity
, CXL_CAPACITY_MULTIPLIER
))) {
1416 return CXL_MBOX_INTERNAL_ERROR
;
1419 stq_le_p(&part_info
->active_vmem
,
1420 cxl_dstate
->vmem_size
/ CXL_CAPACITY_MULTIPLIER
);
1422 * When both next_vmem and next_pmem are 0, there is no pending change to
1425 stq_le_p(&part_info
->next_vmem
, 0);
1426 stq_le_p(&part_info
->active_pmem
,
1427 cxl_dstate
->pmem_size
/ CXL_CAPACITY_MULTIPLIER
);
1428 stq_le_p(&part_info
->next_pmem
, 0);
1430 *len_out
= sizeof(*part_info
);
1431 return CXL_MBOX_SUCCESS
;
1434 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */
1435 static CXLRetCode
cmd_ccls_get_lsa(const struct cxl_cmd
*cmd
,
1436 uint8_t *payload_in
,
1438 uint8_t *payload_out
,
1445 } QEMU_PACKED
*get_lsa
;
1446 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1447 CXLType3Class
*cvc
= CXL_TYPE3_GET_CLASS(ct3d
);
1448 uint32_t offset
, length
;
1450 get_lsa
= (void *)payload_in
;
1451 offset
= get_lsa
->offset
;
1452 length
= get_lsa
->length
;
1454 if (offset
+ length
> cvc
->get_lsa_size(ct3d
)) {
1456 return CXL_MBOX_INVALID_INPUT
;
1459 *len_out
= cvc
->get_lsa(ct3d
, payload_out
, length
, offset
);
1460 return CXL_MBOX_SUCCESS
;
1463 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */
1464 static CXLRetCode
cmd_ccls_set_lsa(const struct cxl_cmd
*cmd
,
1465 uint8_t *payload_in
,
1467 uint8_t *payload_out
,
1476 struct set_lsa_pl
*set_lsa_payload
= (void *)payload_in
;
1477 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1478 CXLType3Class
*cvc
= CXL_TYPE3_GET_CLASS(ct3d
);
1479 const size_t hdr_len
= offsetof(struct set_lsa_pl
, data
);
1483 return CXL_MBOX_SUCCESS
;
1486 if (set_lsa_payload
->offset
+ len_in
> cvc
->get_lsa_size(ct3d
) + hdr_len
) {
1487 return CXL_MBOX_INVALID_INPUT
;
1491 cvc
->set_lsa(ct3d
, set_lsa_payload
->data
, len_in
, set_lsa_payload
->offset
);
1492 return CXL_MBOX_SUCCESS
;
1495 /* Perform the actual device zeroing */
1496 static void __do_sanitization(CXLType3Dev
*ct3d
)
1500 if (ct3d
->hostvmem
) {
1501 mr
= host_memory_backend_get_memory(ct3d
->hostvmem
);
1503 void *hostmem
= memory_region_get_ram_ptr(mr
);
1504 memset(hostmem
, 0, memory_region_size(mr
));
1508 if (ct3d
->hostpmem
) {
1509 mr
= host_memory_backend_get_memory(ct3d
->hostpmem
);
1511 void *hostmem
= memory_region_get_ram_ptr(mr
);
1512 memset(hostmem
, 0, memory_region_size(mr
));
1516 mr
= host_memory_backend_get_memory(ct3d
->lsa
);
1518 void *lsa
= memory_region_get_ram_ptr(mr
);
1519 memset(lsa
, 0, memory_region_size(mr
));
1522 cxl_discard_all_event_records(&ct3d
->cxl_dstate
);
1526 * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h)
1528 * Once the Sanitize command has started successfully, the device shall be
1529 * placed in the media disabled state. If the command fails or is interrupted
1530 * by a reset or power failure, it shall remain in the media disabled state
1531 * until a successful Sanitize command has been completed. During this state:
1533 * 1. Memory writes to the device will have no effect, and all memory reads
1534 * will return random values (no user data returned, even for locations that
1535 * the failed Sanitize operation didn’t sanitize yet).
1537 * 2. Mailbox commands shall still be processed in the disabled state, except
1538 * that commands that access Sanitized areas shall fail with the Media Disabled
1541 static CXLRetCode
cmd_sanitize_overwrite(const struct cxl_cmd
*cmd
,
1542 uint8_t *payload_in
,
1544 uint8_t *payload_out
,
1548 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1549 uint64_t total_mem
; /* in Mb */
1552 total_mem
= (ct3d
->cxl_dstate
.vmem_size
+ ct3d
->cxl_dstate
.pmem_size
) >> 20;
1553 if (total_mem
<= 512) {
1555 } else if (total_mem
<= 1024) {
1557 } else if (total_mem
<= 2 * 1024) {
1559 } else if (total_mem
<= 4 * 1024) {
1561 } else if (total_mem
<= 8 * 1024) {
1563 } else if (total_mem
<= 16 * 1024) {
1565 } else if (total_mem
<= 32 * 1024) {
1567 } else if (total_mem
<= 64 * 1024) {
1569 } else if (total_mem
<= 128 * 1024) {
1571 } else if (total_mem
<= 256 * 1024) {
1573 } else if (total_mem
<= 512 * 1024) {
1575 } else if (total_mem
<= 1024 * 1024) {
1578 secs
= 240 * 60; /* max 4 hrs */
1581 /* EBUSY other bg cmds as of now */
1582 cci
->bg
.runtime
= secs
* 1000UL;
1585 cxl_dev_disable_media(&ct3d
->cxl_dstate
);
1587 /* sanitize when done */
1588 return CXL_MBOX_BG_STARTED
;
1591 static CXLRetCode
cmd_get_security_state(const struct cxl_cmd
*cmd
,
1592 uint8_t *payload_in
,
1594 uint8_t *payload_out
,
1598 uint32_t *state
= (uint32_t *)payload_out
;
1602 return CXL_MBOX_SUCCESS
;
1606 * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h)
1608 * This is very inefficient, but good enough for now!
1609 * Also the payload will always fit, so no need to handle the MORE flag and
1610 * make this stateful. We may want to allow longer poison lists to aid
1611 * testing that kernel functionality.
1613 static CXLRetCode
cmd_media_get_poison_list(const struct cxl_cmd
*cmd
,
1614 uint8_t *payload_in
,
1616 uint8_t *payload_out
,
1620 struct get_poison_list_pl
{
1625 struct get_poison_list_out_pl
{
1628 uint64_t overflow_timestamp
;
1630 uint8_t rsvd2
[0x14];
1635 } QEMU_PACKED records
[];
1638 struct get_poison_list_pl
*in
= (void *)payload_in
;
1639 struct get_poison_list_out_pl
*out
= (void *)payload_out
;
1640 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1641 uint16_t record_count
= 0, i
= 0;
1642 uint64_t query_start
, query_length
;
1643 CXLPoisonList
*poison_list
= &ct3d
->poison_list
;
1645 uint16_t out_pl_len
;
1647 query_start
= ldq_le_p(&in
->pa
);
1648 /* 64 byte alignment required */
1649 if (query_start
& 0x3f) {
1650 return CXL_MBOX_INVALID_INPUT
;
1652 query_length
= ldq_le_p(&in
->length
) * CXL_CACHE_LINE_SIZE
;
1654 QLIST_FOREACH(ent
, poison_list
, node
) {
1655 /* Check for no overlap */
1656 if (!ranges_overlap(ent
->start
, ent
->length
,
1657 query_start
, query_length
)) {
1662 out_pl_len
= sizeof(*out
) + record_count
* sizeof(out
->records
[0]);
1663 assert(out_pl_len
<= CXL_MAILBOX_MAX_PAYLOAD_SIZE
);
1665 QLIST_FOREACH(ent
, poison_list
, node
) {
1666 uint64_t start
, stop
;
1668 /* Check for no overlap */
1669 if (!ranges_overlap(ent
->start
, ent
->length
,
1670 query_start
, query_length
)) {
1674 /* Deal with overlap */
1675 start
= MAX(ROUND_DOWN(ent
->start
, 64ull), query_start
);
1676 stop
= MIN(ROUND_DOWN(ent
->start
, 64ull) + ent
->length
,
1677 query_start
+ query_length
);
1678 stq_le_p(&out
->records
[i
].addr
, start
| (ent
->type
& 0x7));
1679 stl_le_p(&out
->records
[i
].length
, (stop
- start
) / CXL_CACHE_LINE_SIZE
);
1682 if (ct3d
->poison_list_overflowed
) {
1683 out
->flags
= (1 << 1);
1684 stq_le_p(&out
->overflow_timestamp
, ct3d
->poison_list_overflow_ts
);
1686 if (scan_media_running(cci
)) {
1687 out
->flags
|= (1 << 2);
1690 stw_le_p(&out
->count
, record_count
);
1691 *len_out
= out_pl_len
;
1692 return CXL_MBOX_SUCCESS
;
1695 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */
1696 static CXLRetCode
cmd_media_inject_poison(const struct cxl_cmd
*cmd
,
1697 uint8_t *payload_in
,
1699 uint8_t *payload_out
,
1703 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1704 CXLPoisonList
*poison_list
= &ct3d
->poison_list
;
1706 struct inject_poison_pl
{
1709 struct inject_poison_pl
*in
= (void *)payload_in
;
1710 uint64_t dpa
= ldq_le_p(&in
->dpa
);
1713 QLIST_FOREACH(ent
, poison_list
, node
) {
1714 if (dpa
>= ent
->start
&&
1715 dpa
+ CXL_CACHE_LINE_SIZE
<= ent
->start
+ ent
->length
) {
1716 return CXL_MBOX_SUCCESS
;
1720 * Freeze the list if there is an on-going scan media operation.
1722 if (scan_media_running(cci
)) {
1724 * XXX: Spec is ambiguous - is this case considered
1725 * a successful return despite not adding to the list?
1730 if (ct3d
->poison_list_cnt
== CXL_POISON_LIST_LIMIT
) {
1731 return CXL_MBOX_INJECT_POISON_LIMIT
;
1733 p
= g_new0(CXLPoison
, 1);
1735 p
->length
= CXL_CACHE_LINE_SIZE
;
1737 p
->type
= CXL_POISON_TYPE_INJECTED
;
1740 * Possible todo: Merge with existing entry if next to it and if same type
1742 QLIST_INSERT_HEAD(poison_list
, p
, node
);
1743 ct3d
->poison_list_cnt
++;
1747 return CXL_MBOX_SUCCESS
;
1750 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */
1751 static CXLRetCode
cmd_media_clear_poison(const struct cxl_cmd
*cmd
,
1752 uint8_t *payload_in
,
1754 uint8_t *payload_out
,
1758 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1759 CXLDeviceState
*cxl_dstate
= &ct3d
->cxl_dstate
;
1760 CXLPoisonList
*poison_list
= &ct3d
->poison_list
;
1761 CXLType3Class
*cvc
= CXL_TYPE3_GET_CLASS(ct3d
);
1762 struct clear_poison_pl
{
1769 struct clear_poison_pl
*in
= (void *)payload_in
;
1771 dpa
= ldq_le_p(&in
->dpa
);
1772 if (dpa
+ CXL_CACHE_LINE_SIZE
> cxl_dstate
->static_mem_size
+
1773 ct3d
->dc
.total_capacity
) {
1774 return CXL_MBOX_INVALID_PA
;
1777 /* Clearing a region with no poison is not an error so always do so */
1778 if (cvc
->set_cacheline
) {
1779 if (!cvc
->set_cacheline(ct3d
, dpa
, in
->data
)) {
1780 return CXL_MBOX_INTERNAL_ERROR
;
1785 * Freeze the list if there is an on-going scan media operation.
1787 if (scan_media_running(cci
)) {
1789 * XXX: Spec is ambiguous - is this case considered
1790 * a successful return despite not removing from the list?
1795 QLIST_FOREACH(ent
, poison_list
, node
) {
1797 * Test for contained in entry. Simpler than general case
1798 * as clearing 64 bytes and entries 64 byte aligned
1800 if ((dpa
>= ent
->start
) && (dpa
< ent
->start
+ ent
->length
)) {
1808 QLIST_REMOVE(ent
, node
);
1809 ct3d
->poison_list_cnt
--;
1811 if (dpa
> ent
->start
) {
1813 /* Cannot overflow as replacing existing entry */
1815 frag
= g_new0(CXLPoison
, 1);
1817 frag
->start
= ent
->start
;
1818 frag
->length
= dpa
- ent
->start
;
1819 frag
->type
= ent
->type
;
1821 QLIST_INSERT_HEAD(poison_list
, frag
, node
);
1822 ct3d
->poison_list_cnt
++;
1825 if (dpa
+ CXL_CACHE_LINE_SIZE
< ent
->start
+ ent
->length
) {
1828 if (ct3d
->poison_list_cnt
== CXL_POISON_LIST_LIMIT
) {
1829 cxl_set_poison_list_overflowed(ct3d
);
1831 frag
= g_new0(CXLPoison
, 1);
1833 frag
->start
= dpa
+ CXL_CACHE_LINE_SIZE
;
1834 frag
->length
= ent
->start
+ ent
->length
- frag
->start
;
1835 frag
->type
= ent
->type
;
1836 QLIST_INSERT_HEAD(poison_list
, frag
, node
);
1837 ct3d
->poison_list_cnt
++;
1840 /* Any fragments have been added, free original entry */
1845 return CXL_MBOX_SUCCESS
;
1849 * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities
1852 cmd_media_get_scan_media_capabilities(const struct cxl_cmd
*cmd
,
1853 uint8_t *payload_in
,
1855 uint8_t *payload_out
,
1859 struct get_scan_media_capabilities_pl
{
1864 struct get_scan_media_capabilities_out_pl
{
1865 uint32_t estimated_runtime_ms
;
1868 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1869 CXLDeviceState
*cxl_dstate
= &ct3d
->cxl_dstate
;
1870 struct get_scan_media_capabilities_pl
*in
= (void *)payload_in
;
1871 struct get_scan_media_capabilities_out_pl
*out
= (void *)payload_out
;
1872 uint64_t query_start
;
1873 uint64_t query_length
;
1875 query_start
= ldq_le_p(&in
->pa
);
1876 /* 64 byte alignment required */
1877 if (query_start
& 0x3f) {
1878 return CXL_MBOX_INVALID_INPUT
;
1880 query_length
= ldq_le_p(&in
->length
) * CXL_CACHE_LINE_SIZE
;
1882 if (query_start
+ query_length
> cxl_dstate
->static_mem_size
) {
1883 return CXL_MBOX_INVALID_PA
;
1887 * Just use 400 nanosecond access/read latency + 100 ns for
1888 * the cost of updating the poison list. For small enough
1889 * chunks return at least 1 ms.
1891 stl_le_p(&out
->estimated_runtime_ms
,
1892 MAX(1, query_length
* (0.0005L / 64)));
1894 *len_out
= sizeof(*out
);
1895 return CXL_MBOX_SUCCESS
;
1898 static void __do_scan_media(CXLType3Dev
*ct3d
)
1901 unsigned int results_cnt
= 0;
1903 QLIST_FOREACH(ent
, &ct3d
->scan_media_results
, node
) {
1907 /* only scan media may clear the overflow */
1908 if (ct3d
->poison_list_overflowed
&&
1909 ct3d
->poison_list_cnt
== results_cnt
) {
1910 cxl_clear_poison_list_overflowed(ct3d
);
1912 /* scan media has run since last conventional reset */
1913 ct3d
->scan_media_hasrun
= true;
1917 * CXL r3.1 section 8.2.9.9.4.5: Scan Media
1919 static CXLRetCode
cmd_media_scan_media(const struct cxl_cmd
*cmd
,
1920 uint8_t *payload_in
,
1922 uint8_t *payload_out
,
1926 struct scan_media_pl
{
1932 struct scan_media_pl
*in
= (void *)payload_in
;
1933 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1934 CXLDeviceState
*cxl_dstate
= &ct3d
->cxl_dstate
;
1935 uint64_t query_start
;
1936 uint64_t query_length
;
1937 CXLPoison
*ent
, *next
;
1939 query_start
= ldq_le_p(&in
->pa
);
1940 /* 64 byte alignment required */
1941 if (query_start
& 0x3f) {
1942 return CXL_MBOX_INVALID_INPUT
;
1944 query_length
= ldq_le_p(&in
->length
) * CXL_CACHE_LINE_SIZE
;
1946 if (query_start
+ query_length
> cxl_dstate
->static_mem_size
) {
1947 return CXL_MBOX_INVALID_PA
;
1949 if (ct3d
->dc
.num_regions
&& query_start
+ query_length
>=
1950 cxl_dstate
->static_mem_size
+ ct3d
->dc
.total_capacity
) {
1951 return CXL_MBOX_INVALID_PA
;
1954 if (in
->flags
== 0) { /* TODO */
1955 qemu_log_mask(LOG_UNIMP
,
1956 "Scan Media Event Log is unsupported\n");
1959 /* any previous results are discarded upon a new Scan Media */
1960 QLIST_FOREACH_SAFE(ent
, &ct3d
->scan_media_results
, node
, next
) {
1961 QLIST_REMOVE(ent
, node
);
1965 /* kill the poison list - it will be recreated */
1966 if (ct3d
->poison_list_overflowed
) {
1967 QLIST_FOREACH_SAFE(ent
, &ct3d
->poison_list
, node
, next
) {
1968 QLIST_REMOVE(ent
, node
);
1970 ct3d
->poison_list_cnt
--;
1975 * Scan the backup list and move corresponding entries
1976 * into the results list, updating the poison list
1979 QLIST_FOREACH_SAFE(ent
, &ct3d
->poison_list_bkp
, node
, next
) {
1982 if (ent
->start
>= query_start
+ query_length
||
1983 ent
->start
+ ent
->length
<= query_start
) {
1988 * If a Get Poison List cmd comes in while this
1989 * scan is being done, it will see the new complete
1990 * list, while setting the respective flag.
1992 if (ct3d
->poison_list_cnt
< CXL_POISON_LIST_LIMIT
) {
1993 CXLPoison
*p
= g_new0(CXLPoison
, 1);
1995 p
->start
= ent
->start
;
1996 p
->length
= ent
->length
;
1997 p
->type
= ent
->type
;
1998 QLIST_INSERT_HEAD(&ct3d
->poison_list
, p
, node
);
1999 ct3d
->poison_list_cnt
++;
2002 res
= g_new0(CXLPoison
, 1);
2003 res
->start
= ent
->start
;
2004 res
->length
= ent
->length
;
2005 res
->type
= ent
->type
;
2006 QLIST_INSERT_HEAD(&ct3d
->scan_media_results
, res
, node
);
2008 QLIST_REMOVE(ent
, node
);
2012 cci
->bg
.runtime
= MAX(1, query_length
* (0.0005L / 64));
2015 return CXL_MBOX_BG_STARTED
;
2019 * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results
2021 static CXLRetCode
cmd_media_get_scan_media_results(const struct cxl_cmd
*cmd
,
2022 uint8_t *payload_in
,
2024 uint8_t *payload_out
,
2028 struct get_scan_media_results_out_pl
{
2029 uint64_t dpa_restart
;
2039 } QEMU_PACKED records
[];
2042 struct get_scan_media_results_out_pl
*out
= (void *)payload_out
;
2043 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
2044 CXLPoisonList
*scan_media_results
= &ct3d
->scan_media_results
;
2045 CXLPoison
*ent
, *next
;
2046 uint16_t total_count
= 0, record_count
= 0, i
= 0;
2047 uint16_t out_pl_len
;
2049 if (!ct3d
->scan_media_hasrun
) {
2050 return CXL_MBOX_UNSUPPORTED
;
2054 * Calculate limits, all entries are within the same address range of the
2055 * last scan media call.
2057 QLIST_FOREACH(ent
, scan_media_results
, node
) {
2058 size_t rec_size
= record_count
* sizeof(out
->records
[0]);
2060 if (sizeof(*out
) + rec_size
< CXL_MAILBOX_MAX_PAYLOAD_SIZE
) {
2066 out_pl_len
= sizeof(*out
) + record_count
* sizeof(out
->records
[0]);
2067 assert(out_pl_len
<= CXL_MAILBOX_MAX_PAYLOAD_SIZE
);
2069 memset(out
, 0, out_pl_len
);
2070 QLIST_FOREACH_SAFE(ent
, scan_media_results
, node
, next
) {
2071 uint64_t start
, stop
;
2073 if (i
== record_count
) {
2077 start
= ROUND_DOWN(ent
->start
, 64ull);
2078 stop
= ROUND_DOWN(ent
->start
, 64ull) + ent
->length
;
2079 stq_le_p(&out
->records
[i
].addr
, start
);
2080 stl_le_p(&out
->records
[i
].length
, (stop
- start
) / CXL_CACHE_LINE_SIZE
);
2083 /* consume the returning entry */
2084 QLIST_REMOVE(ent
, node
);
2088 stw_le_p(&out
->count
, record_count
);
2089 if (total_count
> record_count
) {
2090 out
->flags
= (1 << 0); /* More Media Error Records */
2093 *len_out
= out_pl_len
;
2094 return CXL_MBOX_SUCCESS
;
2098 * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration
2101 static CXLRetCode
cmd_dcd_get_dyn_cap_config(const struct cxl_cmd
*cmd
,
2102 uint8_t *payload_in
,
2104 uint8_t *payload_out
,
2108 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
2112 } QEMU_PACKED
*in
= (void *)payload_in
;
2114 uint8_t num_regions
;
2115 uint8_t regions_returned
;
2119 uint64_t decode_len
;
2120 uint64_t region_len
;
2121 uint64_t block_size
;
2122 uint32_t dsmadhandle
;
2125 } QEMU_PACKED records
[];
2126 } QEMU_PACKED
*out
= (void *)payload_out
;
2128 uint32_t num_extents_supported
;
2129 uint32_t num_extents_available
;
2130 uint32_t num_tags_supported
;
2131 uint32_t num_tags_available
;
2132 } QEMU_PACKED
*extra_out
;
2133 uint16_t record_count
;
2135 uint16_t out_pl_len
;
2138 start_rid
= in
->start_rid
;
2139 if (start_rid
>= ct3d
->dc
.num_regions
) {
2140 return CXL_MBOX_INVALID_INPUT
;
2143 record_count
= MIN(ct3d
->dc
.num_regions
- in
->start_rid
, in
->region_cnt
);
2145 out_pl_len
= sizeof(*out
) + record_count
* sizeof(out
->records
[0]);
2146 extra_out
= (void *)(payload_out
+ out_pl_len
);
2147 out_pl_len
+= sizeof(*extra_out
);
2148 assert(out_pl_len
<= CXL_MAILBOX_MAX_PAYLOAD_SIZE
);
2150 out
->num_regions
= ct3d
->dc
.num_regions
;
2151 out
->regions_returned
= record_count
;
2152 for (i
= 0; i
< record_count
; i
++) {
2153 stq_le_p(&out
->records
[i
].base
,
2154 ct3d
->dc
.regions
[start_rid
+ i
].base
);
2155 stq_le_p(&out
->records
[i
].decode_len
,
2156 ct3d
->dc
.regions
[start_rid
+ i
].decode_len
/
2157 CXL_CAPACITY_MULTIPLIER
);
2158 stq_le_p(&out
->records
[i
].region_len
,
2159 ct3d
->dc
.regions
[start_rid
+ i
].len
);
2160 stq_le_p(&out
->records
[i
].block_size
,
2161 ct3d
->dc
.regions
[start_rid
+ i
].block_size
);
2162 stl_le_p(&out
->records
[i
].dsmadhandle
,
2163 ct3d
->dc
.regions
[start_rid
+ i
].dsmadhandle
);
2164 out
->records
[i
].flags
= ct3d
->dc
.regions
[start_rid
+ i
].flags
;
2167 * TODO: Assign values once extents and tags are introduced
2170 stl_le_p(&extra_out
->num_extents_supported
, CXL_NUM_EXTENTS_SUPPORTED
);
2171 stl_le_p(&extra_out
->num_extents_available
, CXL_NUM_EXTENTS_SUPPORTED
-
2172 ct3d
->dc
.total_extent_count
);
2173 stl_le_p(&extra_out
->num_tags_supported
, CXL_NUM_TAGS_SUPPORTED
);
2174 stl_le_p(&extra_out
->num_tags_available
, CXL_NUM_TAGS_SUPPORTED
);
2176 *len_out
= out_pl_len
;
2177 return CXL_MBOX_SUCCESS
;
2181 * CXL r3.1 section 8.2.9.9.9.2:
2182 * Get Dynamic Capacity Extent List (Opcode 4801h)
2184 static CXLRetCode
cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd
*cmd
,
2185 uint8_t *payload_in
,
2187 uint8_t *payload_out
,
2191 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
2193 uint32_t extent_cnt
;
2194 uint32_t start_extent_id
;
2195 } QEMU_PACKED
*in
= (void *)payload_in
;
2198 uint32_t total_extents
;
2199 uint32_t generation_num
;
2201 CXLDCExtentRaw records
[];
2202 } QEMU_PACKED
*out
= (void *)payload_out
;
2203 uint32_t start_extent_id
= in
->start_extent_id
;
2204 CXLDCExtentList
*extent_list
= &ct3d
->dc
.extents
;
2205 uint16_t record_count
= 0, i
= 0, record_done
= 0;
2206 uint16_t out_pl_len
, size
;
2209 if (start_extent_id
> ct3d
->dc
.total_extent_count
) {
2210 return CXL_MBOX_INVALID_INPUT
;
2213 record_count
= MIN(in
->extent_cnt
,
2214 ct3d
->dc
.total_extent_count
- start_extent_id
);
2215 size
= CXL_MAILBOX_MAX_PAYLOAD_SIZE
- sizeof(*out
);
2216 record_count
= MIN(record_count
, size
/ sizeof(out
->records
[0]));
2217 out_pl_len
= sizeof(*out
) + record_count
* sizeof(out
->records
[0]);
2219 stl_le_p(&out
->count
, record_count
);
2220 stl_le_p(&out
->total_extents
, ct3d
->dc
.total_extent_count
);
2221 stl_le_p(&out
->generation_num
, ct3d
->dc
.ext_list_gen_seq
);
2223 if (record_count
> 0) {
2224 CXLDCExtentRaw
*out_rec
= &out
->records
[record_done
];
2226 QTAILQ_FOREACH(ent
, extent_list
, node
) {
2227 if (i
++ < start_extent_id
) {
2230 stq_le_p(&out_rec
->start_dpa
, ent
->start_dpa
);
2231 stq_le_p(&out_rec
->len
, ent
->len
);
2232 memcpy(&out_rec
->tag
, ent
->tag
, 0x10);
2233 stw_le_p(&out_rec
->shared_seq
, ent
->shared_seq
);
2236 if (record_done
== record_count
) {
2242 *len_out
= out_pl_len
;
2243 return CXL_MBOX_SUCCESS
;
2247 * Check whether any bit between addr[nr, nr+size) is set,
2248 * return true if any bit is set, otherwise return false
2250 bool test_any_bits_set(const unsigned long *addr
, unsigned long nr
,
2253 unsigned long res
= find_next_bit(addr
, size
+ nr
, nr
);
2255 return res
< nr
+ size
;
2258 CXLDCRegion
*cxl_find_dc_region(CXLType3Dev
*ct3d
, uint64_t dpa
, uint64_t len
)
2261 CXLDCRegion
*region
= &ct3d
->dc
.regions
[0];
2263 if (dpa
< region
->base
||
2264 dpa
>= region
->base
+ ct3d
->dc
.total_capacity
) {
2269 * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD)
2271 * Regions are used in increasing-DPA order, with Region 0 being used for
2272 * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA.
2273 * So check from the last region to find where the dpa belongs. Extents that
2274 * cross multiple regions are not allowed.
2276 for (i
= ct3d
->dc
.num_regions
- 1; i
>= 0; i
--) {
2277 region
= &ct3d
->dc
.regions
[i
];
2278 if (dpa
>= region
->base
) {
2279 if (dpa
+ len
> region
->base
+ region
->len
) {
2289 void cxl_insert_extent_to_extent_list(CXLDCExtentList
*list
,
2293 uint16_t shared_seq
)
2295 CXLDCExtent
*extent
;
2297 extent
= g_new0(CXLDCExtent
, 1);
2298 extent
->start_dpa
= dpa
;
2301 memcpy(extent
->tag
, tag
, 0x10);
2303 extent
->shared_seq
= shared_seq
;
2305 QTAILQ_INSERT_TAIL(list
, extent
, node
);
2308 void cxl_remove_extent_from_extent_list(CXLDCExtentList
*list
,
2309 CXLDCExtent
*extent
)
2311 QTAILQ_REMOVE(list
, extent
, node
);
2316 * Add a new extent to the extent "group" if group exists;
2317 * otherwise, create a new group
2318 * Return value: the extent group where the extent is inserted.
2320 CXLDCExtentGroup
*cxl_insert_extent_to_extent_group(CXLDCExtentGroup
*group
,
2324 uint16_t shared_seq
)
2327 group
= g_new0(CXLDCExtentGroup
, 1);
2328 QTAILQ_INIT(&group
->list
);
2330 cxl_insert_extent_to_extent_list(&group
->list
, dpa
, len
,
2335 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList
*list
,
2336 CXLDCExtentGroup
*group
)
2338 QTAILQ_INSERT_TAIL(list
, group
, node
);
2341 void cxl_extent_group_list_delete_front(CXLDCExtentGroupList
*list
)
2343 CXLDCExtent
*ent
, *ent_next
;
2344 CXLDCExtentGroup
*group
= QTAILQ_FIRST(list
);
2346 QTAILQ_REMOVE(list
, group
, node
);
2347 QTAILQ_FOREACH_SAFE(ent
, &group
->list
, node
, ent_next
) {
2348 cxl_remove_extent_from_extent_list(&group
->list
, ent
);
2354 * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload
2355 * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload
2357 typedef struct CXLUpdateDCExtentListInPl
{
2358 uint32_t num_entries_updated
;
2361 /* CXL r3.1 Table 8-169: Updated Extent */
2366 } QEMU_PACKED updated_entries
[];
2367 } QEMU_PACKED CXLUpdateDCExtentListInPl
;
2370 * For the extents in the extent list to operate, check whether they are valid
2371 * 1. The extent should be in the range of a valid DC region;
2372 * 2. The extent should not cross multiple regions;
2373 * 3. The start DPA and the length of the extent should align with the block
2374 * size of the region;
2375 * 4. The address range of multiple extents in the list should not overlap.
2377 static CXLRetCode
cxl_detect_malformed_extent_list(CXLType3Dev
*ct3d
,
2378 const CXLUpdateDCExtentListInPl
*in
)
2380 uint64_t min_block_size
= UINT64_MAX
;
2381 CXLDCRegion
*region
;
2382 CXLDCRegion
*lastregion
= &ct3d
->dc
.regions
[ct3d
->dc
.num_regions
- 1];
2383 g_autofree
unsigned long *blk_bitmap
= NULL
;
2387 for (i
= 0; i
< ct3d
->dc
.num_regions
; i
++) {
2388 region
= &ct3d
->dc
.regions
[i
];
2389 min_block_size
= MIN(min_block_size
, region
->block_size
);
2392 blk_bitmap
= bitmap_new((lastregion
->base
+ lastregion
->len
-
2393 ct3d
->dc
.regions
[0].base
) / min_block_size
);
2395 for (i
= 0; i
< in
->num_entries_updated
; i
++) {
2396 dpa
= in
->updated_entries
[i
].start_dpa
;
2397 len
= in
->updated_entries
[i
].len
;
2399 region
= cxl_find_dc_region(ct3d
, dpa
, len
);
2401 return CXL_MBOX_INVALID_PA
;
2404 dpa
-= ct3d
->dc
.regions
[0].base
;
2405 if (dpa
% region
->block_size
|| len
% region
->block_size
) {
2406 return CXL_MBOX_INVALID_EXTENT_LIST
;
2408 /* the dpa range already covered by some other extents in the list */
2409 if (test_any_bits_set(blk_bitmap
, dpa
/ min_block_size
,
2410 len
/ min_block_size
)) {
2411 return CXL_MBOX_INVALID_EXTENT_LIST
;
2413 bitmap_set(blk_bitmap
, dpa
/ min_block_size
, len
/ min_block_size
);
2416 return CXL_MBOX_SUCCESS
;
2419 static CXLRetCode
cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev
*ct3d
,
2420 const CXLUpdateDCExtentListInPl
*in
)
2424 CXLDCExtentGroup
*ext_group
;
2426 Range range1
, range2
;
2428 for (i
= 0; i
< in
->num_entries_updated
; i
++) {
2429 dpa
= in
->updated_entries
[i
].start_dpa
;
2430 len
= in
->updated_entries
[i
].len
;
2432 range_init_nofail(&range1
, dpa
, len
);
2435 * The host-accepted DPA range must be contained by the first extent
2436 * group in the pending list
2438 ext_group
= QTAILQ_FIRST(&ct3d
->dc
.extents_pending
);
2439 if (!cxl_extents_contains_dpa_range(&ext_group
->list
, dpa
, len
)) {
2440 return CXL_MBOX_INVALID_PA
;
2443 /* to-be-added range should not overlap with range already accepted */
2444 QTAILQ_FOREACH(ent
, &ct3d
->dc
.extents
, node
) {
2445 range_init_nofail(&range2
, ent
->start_dpa
, ent
->len
);
2446 if (range_overlaps_range(&range1
, &range2
)) {
2447 return CXL_MBOX_INVALID_PA
;
2451 return CXL_MBOX_SUCCESS
;
2455 * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h)
2456 * An extent is added to the extent list and becomes usable only after the
2457 * response is processed successfully.
2459 static CXLRetCode
cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd
*cmd
,
2460 uint8_t *payload_in
,
2462 uint8_t *payload_out
,
2466 CXLUpdateDCExtentListInPl
*in
= (void *)payload_in
;
2467 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
2468 CXLDCExtentList
*extent_list
= &ct3d
->dc
.extents
;
2473 if (in
->num_entries_updated
== 0) {
2474 cxl_extent_group_list_delete_front(&ct3d
->dc
.extents_pending
);
2475 return CXL_MBOX_SUCCESS
;
2478 /* Adding extents causes exceeding device's extent tracking ability. */
2479 if (in
->num_entries_updated
+ ct3d
->dc
.total_extent_count
>
2480 CXL_NUM_EXTENTS_SUPPORTED
) {
2481 return CXL_MBOX_RESOURCES_EXHAUSTED
;
2484 ret
= cxl_detect_malformed_extent_list(ct3d
, in
);
2485 if (ret
!= CXL_MBOX_SUCCESS
) {
2489 ret
= cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d
, in
);
2490 if (ret
!= CXL_MBOX_SUCCESS
) {
2494 for (i
= 0; i
< in
->num_entries_updated
; i
++) {
2495 dpa
= in
->updated_entries
[i
].start_dpa
;
2496 len
= in
->updated_entries
[i
].len
;
2498 cxl_insert_extent_to_extent_list(extent_list
, dpa
, len
, NULL
, 0);
2499 ct3d
->dc
.total_extent_count
+= 1;
2500 ct3_set_region_block_backed(ct3d
, dpa
, len
);
2502 /* Remove the first extent group in the pending list */
2503 cxl_extent_group_list_delete_front(&ct3d
->dc
.extents_pending
);
2505 return CXL_MBOX_SUCCESS
;
2509 * Copy extent list from src to dst
2510 * Return value: number of extents copied
2512 static uint32_t copy_extent_list(CXLDCExtentList
*dst
,
2513 const CXLDCExtentList
*src
)
2522 QTAILQ_FOREACH(ent
, src
, node
) {
2523 cxl_insert_extent_to_extent_list(dst
, ent
->start_dpa
, ent
->len
,
2524 ent
->tag
, ent
->shared_seq
);
2530 static CXLRetCode
cxl_dc_extent_release_dry_run(CXLType3Dev
*ct3d
,
2531 const CXLUpdateDCExtentListInPl
*in
, CXLDCExtentList
*updated_list
,
2532 uint32_t *updated_list_size
)
2534 CXLDCExtent
*ent
, *ent_next
;
2538 CXLRetCode ret
= CXL_MBOX_SUCCESS
;
2540 QTAILQ_INIT(updated_list
);
2541 copy_extent_list(updated_list
, &ct3d
->dc
.extents
);
2543 for (i
= 0; i
< in
->num_entries_updated
; i
++) {
2546 dpa
= in
->updated_entries
[i
].start_dpa
;
2547 len
= in
->updated_entries
[i
].len
;
2549 /* Check if the DPA range is not fully backed with valid extents */
2550 if (!ct3_test_region_block_backed(ct3d
, dpa
, len
)) {
2551 ret
= CXL_MBOX_INVALID_PA
;
2555 /* After this point, extent overflow is the only error can happen */
2557 QTAILQ_FOREACH(ent
, updated_list
, node
) {
2558 range_init_nofail(&range
, ent
->start_dpa
, ent
->len
);
2560 if (range_contains(&range
, dpa
)) {
2561 uint64_t len1
, len2
= 0, len_done
= 0;
2562 uint64_t ent_start_dpa
= ent
->start_dpa
;
2563 uint64_t ent_len
= ent
->len
;
2565 len1
= dpa
- ent
->start_dpa
;
2566 /* Found the extent or the subset of an existing extent */
2567 if (range_contains(&range
, dpa
+ len
- 1)) {
2568 len2
= ent_start_dpa
+ ent_len
- dpa
- len
;
2570 dpa
= ent_start_dpa
+ ent_len
;
2572 len_done
= ent_len
- len1
- len2
;
2574 cxl_remove_extent_from_extent_list(updated_list
, ent
);
2578 cxl_insert_extent_to_extent_list(updated_list
,
2584 cxl_insert_extent_to_extent_list(updated_list
,
2590 if (cnt_delta
+ ct3d
->dc
.total_extent_count
>
2591 CXL_NUM_EXTENTS_SUPPORTED
) {
2592 ret
= CXL_MBOX_RESOURCES_EXHAUSTED
;
2603 if (ret
!= CXL_MBOX_SUCCESS
) {
2604 QTAILQ_FOREACH_SAFE(ent
, updated_list
, node
, ent_next
) {
2605 cxl_remove_extent_from_extent_list(updated_list
, ent
);
2607 *updated_list_size
= 0;
2609 *updated_list_size
= ct3d
->dc
.total_extent_count
+ cnt_delta
;
2616 * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h)
2618 static CXLRetCode
cmd_dcd_release_dyn_cap(const struct cxl_cmd
*cmd
,
2619 uint8_t *payload_in
,
2621 uint8_t *payload_out
,
2625 CXLUpdateDCExtentListInPl
*in
= (void *)payload_in
;
2626 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
2627 CXLDCExtentList updated_list
;
2628 CXLDCExtent
*ent
, *ent_next
;
2629 uint32_t updated_list_size
;
2632 if (in
->num_entries_updated
== 0) {
2633 return CXL_MBOX_INVALID_INPUT
;
2636 ret
= cxl_detect_malformed_extent_list(ct3d
, in
);
2637 if (ret
!= CXL_MBOX_SUCCESS
) {
2641 ret
= cxl_dc_extent_release_dry_run(ct3d
, in
, &updated_list
,
2642 &updated_list_size
);
2643 if (ret
!= CXL_MBOX_SUCCESS
) {
2648 * If the dry run release passes, the returned updated_list will
2649 * be the updated extent list and we just need to clear the extents
2650 * in the accepted list and copy extents in the updated_list to accepted
2651 * list and update the extent count;
2653 QTAILQ_FOREACH_SAFE(ent
, &ct3d
->dc
.extents
, node
, ent_next
) {
2654 ct3_clear_region_block_backed(ct3d
, ent
->start_dpa
, ent
->len
);
2655 cxl_remove_extent_from_extent_list(&ct3d
->dc
.extents
, ent
);
2657 copy_extent_list(&ct3d
->dc
.extents
, &updated_list
);
2658 QTAILQ_FOREACH_SAFE(ent
, &updated_list
, node
, ent_next
) {
2659 ct3_set_region_block_backed(ct3d
, ent
->start_dpa
, ent
->len
);
2660 cxl_remove_extent_from_extent_list(&updated_list
, ent
);
2662 ct3d
->dc
.total_extent_count
= updated_list_size
;
2664 return CXL_MBOX_SUCCESS
;
2667 static const struct cxl_cmd cxl_cmd_set
[256][256] = {
2668 [EVENTS
][GET_RECORDS
] = { "EVENTS_GET_RECORDS",
2669 cmd_events_get_records
, 1, 0 },
2670 [EVENTS
][CLEAR_RECORDS
] = { "EVENTS_CLEAR_RECORDS",
2671 cmd_events_clear_records
, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE
},
2672 [EVENTS
][GET_INTERRUPT_POLICY
] = { "EVENTS_GET_INTERRUPT_POLICY",
2673 cmd_events_get_interrupt_policy
, 0, 0 },
2674 [EVENTS
][SET_INTERRUPT_POLICY
] = { "EVENTS_SET_INTERRUPT_POLICY",
2675 cmd_events_set_interrupt_policy
,
2676 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE
},
2677 [FIRMWARE_UPDATE
][GET_INFO
] = { "FIRMWARE_UPDATE_GET_INFO",
2678 cmd_firmware_update_get_info
, 0, 0 },
2679 [FIRMWARE_UPDATE
][TRANSFER
] = { "FIRMWARE_UPDATE_TRANSFER",
2680 cmd_firmware_update_transfer
, ~0, CXL_MBOX_BACKGROUND_OPERATION
},
2681 [FIRMWARE_UPDATE
][ACTIVATE
] = { "FIRMWARE_UPDATE_ACTIVATE",
2682 cmd_firmware_update_activate
, 2, CXL_MBOX_BACKGROUND_OPERATION
},
2683 [TIMESTAMP
][GET
] = { "TIMESTAMP_GET", cmd_timestamp_get
, 0, 0 },
2684 [TIMESTAMP
][SET
] = { "TIMESTAMP_SET", cmd_timestamp_set
,
2685 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE
},
2686 [LOGS
][GET_SUPPORTED
] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported
,
2688 [LOGS
][GET_LOG
] = { "LOGS_GET_LOG", cmd_logs_get_log
, 0x18, 0 },
2689 [FEATURES
][GET_SUPPORTED
] = { "FEATURES_GET_SUPPORTED",
2690 cmd_features_get_supported
, 0x8, 0 },
2691 [FEATURES
][GET_FEATURE
] = { "FEATURES_GET_FEATURE",
2692 cmd_features_get_feature
, 0x15, 0 },
2693 [FEATURES
][SET_FEATURE
] = { "FEATURES_SET_FEATURE",
2694 cmd_features_set_feature
,
2696 (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE
|
2697 CXL_MBOX_IMMEDIATE_DATA_CHANGE
|
2698 CXL_MBOX_IMMEDIATE_POLICY_CHANGE
|
2699 CXL_MBOX_IMMEDIATE_LOG_CHANGE
|
2700 CXL_MBOX_SECURITY_STATE_CHANGE
)},
2701 [IDENTIFY
][MEMORY_DEVICE
] = { "IDENTIFY_MEMORY_DEVICE",
2702 cmd_identify_memory_device
, 0, 0 },
2703 [CCLS
][GET_PARTITION_INFO
] = { "CCLS_GET_PARTITION_INFO",
2704 cmd_ccls_get_partition_info
, 0, 0 },
2705 [CCLS
][GET_LSA
] = { "CCLS_GET_LSA", cmd_ccls_get_lsa
, 8, 0 },
2706 [CCLS
][SET_LSA
] = { "CCLS_SET_LSA", cmd_ccls_set_lsa
,
2707 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE
| CXL_MBOX_IMMEDIATE_DATA_CHANGE
},
2708 [SANITIZE
][OVERWRITE
] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite
, 0,
2709 (CXL_MBOX_IMMEDIATE_DATA_CHANGE
|
2710 CXL_MBOX_SECURITY_STATE_CHANGE
|
2711 CXL_MBOX_BACKGROUND_OPERATION
)},
2712 [PERSISTENT_MEM
][GET_SECURITY_STATE
] = { "GET_SECURITY_STATE",
2713 cmd_get_security_state
, 0, 0 },
2714 [MEDIA_AND_POISON
][GET_POISON_LIST
] = { "MEDIA_AND_POISON_GET_POISON_LIST",
2715 cmd_media_get_poison_list
, 16, 0 },
2716 [MEDIA_AND_POISON
][INJECT_POISON
] = { "MEDIA_AND_POISON_INJECT_POISON",
2717 cmd_media_inject_poison
, 8, 0 },
2718 [MEDIA_AND_POISON
][CLEAR_POISON
] = { "MEDIA_AND_POISON_CLEAR_POISON",
2719 cmd_media_clear_poison
, 72, 0 },
2720 [MEDIA_AND_POISON
][GET_SCAN_MEDIA_CAPABILITIES
] = {
2721 "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES",
2722 cmd_media_get_scan_media_capabilities
, 16, 0 },
2723 [MEDIA_AND_POISON
][SCAN_MEDIA
] = { "MEDIA_AND_POISON_SCAN_MEDIA",
2724 cmd_media_scan_media
, 17, CXL_MBOX_BACKGROUND_OPERATION
},
2725 [MEDIA_AND_POISON
][GET_SCAN_MEDIA_RESULTS
] = {
2726 "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS",
2727 cmd_media_get_scan_media_results
, 0, 0 },
2730 static const struct cxl_cmd cxl_cmd_set_dcd
[256][256] = {
2731 [DCD_CONFIG
][GET_DC_CONFIG
] = { "DCD_GET_DC_CONFIG",
2732 cmd_dcd_get_dyn_cap_config
, 2, 0 },
2733 [DCD_CONFIG
][GET_DYN_CAP_EXT_LIST
] = {
2734 "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list
,
2736 [DCD_CONFIG
][ADD_DYN_CAP_RSP
] = {
2737 "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp
,
2738 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE
},
2739 [DCD_CONFIG
][RELEASE_DYN_CAP
] = {
2740 "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap
,
2741 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE
},
2744 static const struct cxl_cmd cxl_cmd_set_sw
[256][256] = {
2745 [INFOSTAT
][IS_IDENTIFY
] = { "IDENTIFY", cmd_infostat_identify
, 0, 0 },
2746 [INFOSTAT
][BACKGROUND_OPERATION_STATUS
] = { "BACKGROUND_OPERATION_STATUS",
2747 cmd_infostat_bg_op_sts
, 0, 0 },
2748 [TIMESTAMP
][GET
] = { "TIMESTAMP_GET", cmd_timestamp_get
, 0, 0 },
2749 [TIMESTAMP
][SET
] = { "TIMESTAMP_SET", cmd_timestamp_set
, 8,
2750 CXL_MBOX_IMMEDIATE_POLICY_CHANGE
},
2751 [LOGS
][GET_SUPPORTED
] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported
, 0,
2753 [LOGS
][GET_LOG
] = { "LOGS_GET_LOG", cmd_logs_get_log
, 0x18, 0 },
2754 [PHYSICAL_SWITCH
][IDENTIFY_SWITCH_DEVICE
] = { "IDENTIFY_SWITCH_DEVICE",
2755 cmd_identify_switch_device
, 0, 0 },
2756 [PHYSICAL_SWITCH
][GET_PHYSICAL_PORT_STATE
] = { "SWITCH_PHYSICAL_PORT_STATS",
2757 cmd_get_physical_port_state
, ~0, 0 },
2758 [TUNNEL
][MANAGEMENT_COMMAND
] = { "TUNNEL_MANAGEMENT_COMMAND",
2759 cmd_tunnel_management_cmd
, ~0, 0 },
2763 * While the command is executing in the background, the device should
2764 * update the percentage complete in the Background Command Status Register
2765 * at least once per second.
2768 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL
2770 int cxl_process_cci_message(CXLCCI
*cci
, uint8_t set
, uint8_t cmd
,
2771 size_t len_in
, uint8_t *pl_in
, size_t *len_out
,
2772 uint8_t *pl_out
, bool *bg_started
)
2775 const struct cxl_cmd
*cxl_cmd
;
2777 CXLDeviceState
*cxl_dstate
;
2780 cxl_cmd
= &cci
->cxl_cmd_set
[set
][cmd
];
2781 h
= cxl_cmd
->handler
;
2783 qemu_log_mask(LOG_UNIMP
, "Command %04xh not implemented\n",
2785 return CXL_MBOX_UNSUPPORTED
;
2788 if (len_in
!= cxl_cmd
->in
&& cxl_cmd
->in
!= ~0) {
2789 return CXL_MBOX_INVALID_PAYLOAD_LENGTH
;
2792 /* Only one bg command at a time */
2793 if ((cxl_cmd
->effect
& CXL_MBOX_BACKGROUND_OPERATION
) &&
2794 cci
->bg
.runtime
> 0) {
2795 return CXL_MBOX_BUSY
;
2798 /* forbid any selected commands while the media is disabled */
2799 if (object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_TYPE3
)) {
2800 cxl_dstate
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
2802 if (cxl_dev_media_disabled(cxl_dstate
)) {
2803 if (h
== cmd_events_get_records
||
2804 h
== cmd_ccls_get_partition_info
||
2805 h
== cmd_ccls_set_lsa
||
2806 h
== cmd_ccls_get_lsa
||
2807 h
== cmd_logs_get_log
||
2808 h
== cmd_media_get_poison_list
||
2809 h
== cmd_media_inject_poison
||
2810 h
== cmd_media_clear_poison
||
2811 h
== cmd_sanitize_overwrite
||
2812 h
== cmd_firmware_update_transfer
||
2813 h
== cmd_firmware_update_activate
) {
2814 return CXL_MBOX_MEDIA_DISABLED
;
2819 ret
= (*h
)(cxl_cmd
, pl_in
, len_in
, pl_out
, len_out
, cci
);
2820 if ((cxl_cmd
->effect
& CXL_MBOX_BACKGROUND_OPERATION
) &&
2821 ret
== CXL_MBOX_BG_STARTED
) {
2824 *bg_started
= false;
2827 /* Set bg and the return code */
2831 cci
->bg
.opcode
= (set
<< 8) | cmd
;
2833 cci
->bg
.complete_pct
= 0;
2834 cci
->bg
.ret_code
= 0;
2836 now
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
2837 cci
->bg
.starttime
= now
;
2838 timer_mod(cci
->bg
.timer
, now
+ CXL_MBOX_BG_UPDATE_FREQ
);
2844 static void bg_timercb(void *opaque
)
2846 CXLCCI
*cci
= opaque
;
2847 uint64_t now
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
2848 uint64_t total_time
= cci
->bg
.starttime
+ cci
->bg
.runtime
;
2850 assert(cci
->bg
.runtime
> 0);
2852 if (now
>= total_time
) { /* we are done */
2853 uint16_t ret
= CXL_MBOX_SUCCESS
;
2855 cci
->bg
.complete_pct
= 100;
2856 cci
->bg
.ret_code
= ret
;
2857 switch (cci
->bg
.opcode
) {
2858 case 0x0201: /* fw transfer */
2859 __do_firmware_xfer(cci
);
2861 case 0x4400: /* sanitize */
2863 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
2865 __do_sanitization(ct3d
);
2866 cxl_dev_enable_media(&ct3d
->cxl_dstate
);
2869 case 0x4304: /* scan media */
2871 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
2873 __do_scan_media(ct3d
);
2877 __builtin_unreachable();
2882 cci
->bg
.complete_pct
= 100 * now
/ total_time
;
2883 timer_mod(cci
->bg
.timer
, now
+ CXL_MBOX_BG_UPDATE_FREQ
);
2886 if (cci
->bg
.complete_pct
== 100) {
2887 /* TODO: generalize to switch CCI */
2888 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
2889 CXLDeviceState
*cxl_dstate
= &ct3d
->cxl_dstate
;
2890 PCIDevice
*pdev
= PCI_DEVICE(cci
->d
);
2892 cci
->bg
.starttime
= 0;
2893 /* registers are updated, allow new bg-capable cmds */
2894 cci
->bg
.runtime
= 0;
2896 if (msix_enabled(pdev
)) {
2897 msix_notify(pdev
, cxl_dstate
->mbox_msi_n
);
2898 } else if (msi_enabled(pdev
)) {
2899 msi_notify(pdev
, cxl_dstate
->mbox_msi_n
);
2904 static void cxl_rebuild_cel(CXLCCI
*cci
)
2906 cci
->cel_size
= 0; /* Reset for a fresh build */
2907 for (int set
= 0; set
< 256; set
++) {
2908 for (int cmd
= 0; cmd
< 256; cmd
++) {
2909 if (cci
->cxl_cmd_set
[set
][cmd
].handler
) {
2910 const struct cxl_cmd
*c
= &cci
->cxl_cmd_set
[set
][cmd
];
2911 struct cel_log
*log
=
2912 &cci
->cel_log
[cci
->cel_size
];
2914 log
->opcode
= (set
<< 8) | cmd
;
2915 log
->effect
= c
->effect
;
2922 void cxl_init_cci(CXLCCI
*cci
, size_t payload_max
)
2924 cci
->payload_max
= payload_max
;
2925 cxl_rebuild_cel(cci
);
2927 cci
->bg
.complete_pct
= 0;
2928 cci
->bg
.starttime
= 0;
2929 cci
->bg
.runtime
= 0;
2930 cci
->bg
.timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
2933 memset(&cci
->fw
, 0, sizeof(cci
->fw
));
2934 cci
->fw
.active_slot
= 1;
2935 cci
->fw
.slot
[cci
->fw
.active_slot
- 1] = true;
2938 static void cxl_copy_cci_commands(CXLCCI
*cci
, const struct cxl_cmd (*cxl_cmds
)[256])
2940 for (int set
= 0; set
< 256; set
++) {
2941 for (int cmd
= 0; cmd
< 256; cmd
++) {
2942 if (cxl_cmds
[set
][cmd
].handler
) {
2943 cci
->cxl_cmd_set
[set
][cmd
] = cxl_cmds
[set
][cmd
];
2949 void cxl_add_cci_commands(CXLCCI
*cci
, const struct cxl_cmd (*cxl_cmd_set
)[256],
2952 cci
->payload_max
= MAX(payload_max
, cci
->payload_max
);
2953 cxl_copy_cci_commands(cci
, cxl_cmd_set
);
2954 cxl_rebuild_cel(cci
);
2957 void cxl_initialize_mailbox_swcci(CXLCCI
*cci
, DeviceState
*intf
,
2958 DeviceState
*d
, size_t payload_max
)
2960 cxl_copy_cci_commands(cci
, cxl_cmd_set_sw
);
2963 cxl_init_cci(cci
, payload_max
);
2966 void cxl_initialize_mailbox_t3(CXLCCI
*cci
, DeviceState
*d
, size_t payload_max
)
2968 CXLType3Dev
*ct3d
= CXL_TYPE3(d
);
2970 cxl_copy_cci_commands(cci
, cxl_cmd_set
);
2971 if (ct3d
->dc
.num_regions
) {
2972 cxl_copy_cci_commands(cci
, cxl_cmd_set_dcd
);
2976 /* No separation for PCI MB as protocol handled in PCI device */
2978 cxl_init_cci(cci
, payload_max
);
2981 static const struct cxl_cmd cxl_cmd_set_t3_ld
[256][256] = {
2982 [INFOSTAT
][IS_IDENTIFY
] = { "IDENTIFY", cmd_infostat_identify
, 0, 0 },
2983 [LOGS
][GET_SUPPORTED
] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported
, 0,
2985 [LOGS
][GET_LOG
] = { "LOGS_GET_LOG", cmd_logs_get_log
, 0x18, 0 },
2988 void cxl_initialize_t3_ld_cci(CXLCCI
*cci
, DeviceState
*d
, DeviceState
*intf
,
2991 cxl_copy_cci_commands(cci
, cxl_cmd_set_t3_ld
);
2994 cxl_init_cci(cci
, payload_max
);
2997 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp
[256][256] = {
2998 [INFOSTAT
][IS_IDENTIFY
] = { "IDENTIFY", cmd_infostat_identify
, 0, 0},
2999 [LOGS
][GET_SUPPORTED
] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported
, 0,
3001 [LOGS
][GET_LOG
] = { "LOGS_GET_LOG", cmd_logs_get_log
, 0x18, 0 },
3002 [TIMESTAMP
][GET
] = { "TIMESTAMP_GET", cmd_timestamp_get
, 0, 0 },
3003 [TUNNEL
][MANAGEMENT_COMMAND
] = { "TUNNEL_MANAGEMENT_COMMAND",
3004 cmd_tunnel_management_cmd
, ~0, 0 },
3007 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI
*cci
, DeviceState
*d
,
3011 cxl_copy_cci_commands(cci
, cxl_cmd_set_t3_fm_owned_ld_mctp
);
3014 cxl_init_cci(cci
, payload_max
);