1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/security.h>
4 #include <linux/debugfs.h>
5 #include <linux/ktime.h>
6 #include <linux/mutex.h>
7 #include <linux/unaligned.h>
15 static bool cxl_raw_allow_all
;
20 * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
21 * implementation is used by the cxl_pci driver to initialize the device
22 * and implement the cxl_mem.h IOCTL UAPI. It also implements the
23 * backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
26 #define cxl_for_each_cmd(cmd) \
27 for ((cmd) = &cxl_mem_commands[0]; \
28 ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
30 #define CXL_CMD(_id, sin, sout, _flags) \
31 [CXL_MEM_COMMAND_ID_##_id] = { \
33 .id = CXL_MEM_COMMAND_ID_##_id, \
37 .opcode = CXL_MBOX_OP_##_id, \
41 #define CXL_VARIABLE_PAYLOAD ~0U
43 * This table defines the supported mailbox commands for the driver. This table
44 * is made up of a UAPI structure. Non-negative values as parameters in the
45 * table will be validated against the user's input. For example, if size_in is
46 * 0, and the user passed in 1, it is an error.
48 static struct cxl_mem_command cxl_mem_commands
[CXL_MEM_COMMAND_ID_MAX
] = {
49 CXL_CMD(IDENTIFY
, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE
),
50 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS
51 CXL_CMD(RAW
, CXL_VARIABLE_PAYLOAD
, CXL_VARIABLE_PAYLOAD
, 0),
53 CXL_CMD(GET_SUPPORTED_LOGS
, 0, CXL_VARIABLE_PAYLOAD
, CXL_CMD_FLAG_FORCE_ENABLE
),
54 CXL_CMD(GET_FW_INFO
, 0, 0x50, 0),
55 CXL_CMD(GET_PARTITION_INFO
, 0, 0x20, 0),
56 CXL_CMD(GET_LSA
, 0x8, CXL_VARIABLE_PAYLOAD
, 0),
57 CXL_CMD(GET_HEALTH_INFO
, 0, 0x12, 0),
58 CXL_CMD(GET_LOG
, 0x18, CXL_VARIABLE_PAYLOAD
, CXL_CMD_FLAG_FORCE_ENABLE
),
59 CXL_CMD(GET_LOG_CAPS
, 0x10, 0x4, 0),
60 CXL_CMD(CLEAR_LOG
, 0x10, 0, 0),
61 CXL_CMD(GET_SUP_LOG_SUBLIST
, 0x2, CXL_VARIABLE_PAYLOAD
, 0),
62 CXL_CMD(SET_PARTITION_INFO
, 0x0a, 0, 0),
63 CXL_CMD(SET_LSA
, CXL_VARIABLE_PAYLOAD
, 0, 0),
64 CXL_CMD(GET_ALERT_CONFIG
, 0, 0x10, 0),
65 CXL_CMD(SET_ALERT_CONFIG
, 0xc, 0, 0),
66 CXL_CMD(GET_SHUTDOWN_STATE
, 0, 0x1, 0),
67 CXL_CMD(SET_SHUTDOWN_STATE
, 0x1, 0, 0),
68 CXL_CMD(GET_SCAN_MEDIA_CAPS
, 0x10, 0x4, 0),
69 CXL_CMD(GET_TIMESTAMP
, 0, 0x8, 0),
73 * Commands that RAW doesn't permit. The rationale for each:
75 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
76 * coordination of transaction timeout values at the root bridge level.
78 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
79 * and needs to be coordinated with HDM updates.
81 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
82 * driver and any writes from userspace invalidates those contents.
84 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
85 * to the device after it is marked clean, userspace can not make that
88 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
89 * is kept up to date with patrol notifications and error management.
91 * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel
92 * driver orchestration for safety.
94 static u16 cxl_disabled_raw_commands
[] = {
95 CXL_MBOX_OP_ACTIVATE_FW
,
96 CXL_MBOX_OP_SET_PARTITION_INFO
,
98 CXL_MBOX_OP_SET_SHUTDOWN_STATE
,
99 CXL_MBOX_OP_SCAN_MEDIA
,
100 CXL_MBOX_OP_GET_SCAN_MEDIA
,
101 CXL_MBOX_OP_GET_POISON
,
102 CXL_MBOX_OP_INJECT_POISON
,
103 CXL_MBOX_OP_CLEAR_POISON
,
107 * Command sets that RAW doesn't permit. All opcodes in this set are
108 * disabled because they pass plain text security payloads over the
109 * user/kernel boundary. This functionality is intended to be wrapped
110 * behind the keys ABI which allows for encrypted payloads in the UAPI
112 static u8 security_command_sets
[] = {
114 0x45, /* Persistent Memory Data-at-rest Security */
115 0x46, /* Security Passthrough */
118 static bool cxl_is_security_command(u16 opcode
)
122 for (i
= 0; i
< ARRAY_SIZE(security_command_sets
); i
++)
123 if (security_command_sets
[i
] == (opcode
>> 8))
128 static void cxl_set_security_cmd_enabled(struct cxl_security_state
*security
,
132 case CXL_MBOX_OP_SANITIZE
:
133 set_bit(CXL_SEC_ENABLED_SANITIZE
, security
->enabled_cmds
);
135 case CXL_MBOX_OP_SECURE_ERASE
:
136 set_bit(CXL_SEC_ENABLED_SECURE_ERASE
,
137 security
->enabled_cmds
);
139 case CXL_MBOX_OP_GET_SECURITY_STATE
:
140 set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE
,
141 security
->enabled_cmds
);
143 case CXL_MBOX_OP_SET_PASSPHRASE
:
144 set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE
,
145 security
->enabled_cmds
);
147 case CXL_MBOX_OP_DISABLE_PASSPHRASE
:
148 set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE
,
149 security
->enabled_cmds
);
151 case CXL_MBOX_OP_UNLOCK
:
152 set_bit(CXL_SEC_ENABLED_UNLOCK
, security
->enabled_cmds
);
154 case CXL_MBOX_OP_FREEZE_SECURITY
:
155 set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY
,
156 security
->enabled_cmds
);
158 case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE
:
159 set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE
,
160 security
->enabled_cmds
);
167 static bool cxl_is_poison_command(u16 opcode
)
169 #define CXL_MBOX_OP_POISON_CMDS 0x43
171 if ((opcode
>> 8) == CXL_MBOX_OP_POISON_CMDS
)
177 static void cxl_set_poison_cmd_enabled(struct cxl_poison_state
*poison
,
181 case CXL_MBOX_OP_GET_POISON
:
182 set_bit(CXL_POISON_ENABLED_LIST
, poison
->enabled_cmds
);
184 case CXL_MBOX_OP_INJECT_POISON
:
185 set_bit(CXL_POISON_ENABLED_INJECT
, poison
->enabled_cmds
);
187 case CXL_MBOX_OP_CLEAR_POISON
:
188 set_bit(CXL_POISON_ENABLED_CLEAR
, poison
->enabled_cmds
);
190 case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS
:
191 set_bit(CXL_POISON_ENABLED_SCAN_CAPS
, poison
->enabled_cmds
);
193 case CXL_MBOX_OP_SCAN_MEDIA
:
194 set_bit(CXL_POISON_ENABLED_SCAN_MEDIA
, poison
->enabled_cmds
);
196 case CXL_MBOX_OP_GET_SCAN_MEDIA
:
197 set_bit(CXL_POISON_ENABLED_SCAN_RESULTS
, poison
->enabled_cmds
);
204 static struct cxl_mem_command
*cxl_mem_find_command(u16 opcode
)
206 struct cxl_mem_command
*c
;
209 if (c
->opcode
== opcode
)
215 static const char *cxl_mem_opcode_to_name(u16 opcode
)
217 struct cxl_mem_command
*c
;
219 c
= cxl_mem_find_command(opcode
);
223 return cxl_command_names
[c
->info
.id
].name
;
227 * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
228 * @cxl_mbox: CXL mailbox context
229 * @mbox_cmd: initialized command to execute
231 * Context: Any context.
233 * * %>=0 - Number of bytes returned in @out.
234 * * %-E2BIG - Payload is too large for hardware.
235 * * %-EBUSY - Couldn't acquire exclusive mailbox access.
236 * * %-EFAULT - Hardware error occurred.
237 * * %-ENXIO - Command completed, but device reported an error.
238 * * %-EIO - Unexpected output size.
240 * Mailbox commands may execute successfully yet the device itself reported an
241 * error. While this distinction can be useful for commands from userspace, the
242 * kernel will only be able to use results when both are successful.
244 int cxl_internal_send_cmd(struct cxl_mailbox
*cxl_mbox
,
245 struct cxl_mbox_cmd
*mbox_cmd
)
247 size_t out_size
, min_out
;
250 if (mbox_cmd
->size_in
> cxl_mbox
->payload_size
||
251 mbox_cmd
->size_out
> cxl_mbox
->payload_size
)
254 out_size
= mbox_cmd
->size_out
;
255 min_out
= mbox_cmd
->min_out
;
256 rc
= cxl_mbox
->mbox_send(cxl_mbox
, mbox_cmd
);
258 * EIO is reserved for a payload size mismatch and mbox_send()
259 * may not return this error.
261 if (WARN_ONCE(rc
== -EIO
, "Bad return code: -EIO"))
266 if (mbox_cmd
->return_code
!= CXL_MBOX_CMD_RC_SUCCESS
&&
267 mbox_cmd
->return_code
!= CXL_MBOX_CMD_RC_BACKGROUND
)
268 return cxl_mbox_cmd_rc2errno(mbox_cmd
);
274 * Variable sized output needs to at least satisfy the caller's
275 * minimum if not the fully requested size.
280 if (mbox_cmd
->size_out
< min_out
)
284 EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd
, CXL
);
286 static bool cxl_mem_raw_command_allowed(u16 opcode
)
290 if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS
))
293 if (security_locked_down(LOCKDOWN_PCI_ACCESS
))
296 if (cxl_raw_allow_all
)
299 if (cxl_is_security_command(opcode
))
302 for (i
= 0; i
< ARRAY_SIZE(cxl_disabled_raw_commands
); i
++)
303 if (cxl_disabled_raw_commands
[i
] == opcode
)
310 * cxl_payload_from_user_allowed() - Check contents of in_payload.
311 * @opcode: The mailbox command opcode.
312 * @payload_in: Pointer to the input payload passed in from user space.
315 * * true - payload_in passes check for @opcode.
316 * * false - payload_in contains invalid or unsupported values.
318 * The driver may inspect payload contents before sending a mailbox
319 * command from user space to the device. The intent is to reject
320 * commands with input payloads that are known to be unsafe. This
321 * check is not intended to replace the users careful selection of
322 * mailbox command parameters and makes no guarantee that the user
323 * command will succeed, nor that it is appropriate.
325 * The specific checks are determined by the opcode.
327 static bool cxl_payload_from_user_allowed(u16 opcode
, void *payload_in
)
330 case CXL_MBOX_OP_SET_PARTITION_INFO
: {
331 struct cxl_mbox_set_partition_info
*pi
= payload_in
;
333 if (pi
->flags
& CXL_SET_PARTITION_IMMEDIATE_FLAG
)
337 case CXL_MBOX_OP_CLEAR_LOG
: {
338 const uuid_t
*uuid
= (uuid_t
*)payload_in
;
341 * Restrict the ‘Clear log’ action to only apply to
344 return uuid_equal(uuid
, &DEFINE_CXL_VENDOR_DEBUG_UUID
);
352 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd
*mbox
,
353 struct cxl_memdev_state
*mds
, u16 opcode
,
354 size_t in_size
, size_t out_size
, u64 in_payload
)
356 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
357 *mbox
= (struct cxl_mbox_cmd
) {
363 mbox
->payload_in
= vmemdup_user(u64_to_user_ptr(in_payload
),
365 if (IS_ERR(mbox
->payload_in
))
366 return PTR_ERR(mbox
->payload_in
);
368 if (!cxl_payload_from_user_allowed(opcode
, mbox
->payload_in
)) {
369 dev_dbg(mds
->cxlds
.dev
, "%s: input payload not allowed\n",
370 cxl_mem_opcode_to_name(opcode
));
371 kvfree(mbox
->payload_in
);
376 /* Prepare to handle a full payload for variable sized output */
377 if (out_size
== CXL_VARIABLE_PAYLOAD
)
378 mbox
->size_out
= cxl_mbox
->payload_size
;
380 mbox
->size_out
= out_size
;
382 if (mbox
->size_out
) {
383 mbox
->payload_out
= kvzalloc(mbox
->size_out
, GFP_KERNEL
);
384 if (!mbox
->payload_out
) {
385 kvfree(mbox
->payload_in
);
392 static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd
*mbox
)
394 kvfree(mbox
->payload_in
);
395 kvfree(mbox
->payload_out
);
398 static int cxl_to_mem_cmd_raw(struct cxl_mem_command
*mem_cmd
,
399 const struct cxl_send_command
*send_cmd
,
400 struct cxl_memdev_state
*mds
)
402 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
404 if (send_cmd
->raw
.rsvd
)
408 * Unlike supported commands, the output size of RAW commands
409 * gets passed along without further checking, so it must be
412 if (send_cmd
->out
.size
> cxl_mbox
->payload_size
)
415 if (!cxl_mem_raw_command_allowed(send_cmd
->raw
.opcode
))
418 dev_WARN_ONCE(mds
->cxlds
.dev
, true, "raw command path used\n");
420 *mem_cmd
= (struct cxl_mem_command
) {
422 .id
= CXL_MEM_COMMAND_ID_RAW
,
423 .size_in
= send_cmd
->in
.size
,
424 .size_out
= send_cmd
->out
.size
,
426 .opcode
= send_cmd
->raw
.opcode
432 static int cxl_to_mem_cmd(struct cxl_mem_command
*mem_cmd
,
433 const struct cxl_send_command
*send_cmd
,
434 struct cxl_memdev_state
*mds
)
436 struct cxl_mem_command
*c
= &cxl_mem_commands
[send_cmd
->id
];
437 const struct cxl_command_info
*info
= &c
->info
;
439 if (send_cmd
->flags
& ~CXL_MEM_COMMAND_FLAG_MASK
)
445 if (send_cmd
->in
.rsvd
|| send_cmd
->out
.rsvd
)
448 /* Check that the command is enabled for hardware */
449 if (!test_bit(info
->id
, mds
->enabled_cmds
))
452 /* Check that the command is not claimed for exclusive kernel use */
453 if (test_bit(info
->id
, mds
->exclusive_cmds
))
456 /* Check the input buffer is the expected size */
457 if ((info
->size_in
!= CXL_VARIABLE_PAYLOAD
) &&
458 (info
->size_in
!= send_cmd
->in
.size
))
461 /* Check the output buffer is at least large enough */
462 if ((info
->size_out
!= CXL_VARIABLE_PAYLOAD
) &&
463 (send_cmd
->out
.size
< info
->size_out
))
466 *mem_cmd
= (struct cxl_mem_command
) {
469 .flags
= info
->flags
,
470 .size_in
= send_cmd
->in
.size
,
471 .size_out
= send_cmd
->out
.size
,
480 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
481 * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
482 * @mds: The driver data for the operation
483 * @send_cmd: &struct cxl_send_command copied in from userspace.
486 * * %0 - @out_cmd is ready to send.
487 * * %-ENOTTY - Invalid command specified.
488 * * %-EINVAL - Reserved fields or invalid values were used.
489 * * %-ENOMEM - Input or output buffer wasn't sized properly.
490 * * %-EPERM - Attempted to use a protected command.
491 * * %-EBUSY - Kernel has claimed exclusive access to this opcode
493 * The result of this command is a fully validated command in @mbox_cmd that is
494 * safe to send to the hardware.
496 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd
*mbox_cmd
,
497 struct cxl_memdev_state
*mds
,
498 const struct cxl_send_command
*send_cmd
)
500 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
501 struct cxl_mem_command mem_cmd
;
504 if (send_cmd
->id
== 0 || send_cmd
->id
>= CXL_MEM_COMMAND_ID_MAX
)
508 * The user can never specify an input payload larger than what hardware
509 * supports, but output can be arbitrarily large (simply write out as
510 * much data as the hardware provides).
512 if (send_cmd
->in
.size
> cxl_mbox
->payload_size
)
515 /* Sanitize and construct a cxl_mem_command */
516 if (send_cmd
->id
== CXL_MEM_COMMAND_ID_RAW
)
517 rc
= cxl_to_mem_cmd_raw(&mem_cmd
, send_cmd
, mds
);
519 rc
= cxl_to_mem_cmd(&mem_cmd
, send_cmd
, mds
);
524 /* Sanitize and construct a cxl_mbox_cmd */
525 return cxl_mbox_cmd_ctor(mbox_cmd
, mds
, mem_cmd
.opcode
,
526 mem_cmd
.info
.size_in
, mem_cmd
.info
.size_out
,
527 send_cmd
->in
.payload
);
530 int cxl_query_cmd(struct cxl_memdev
*cxlmd
,
531 struct cxl_mem_query_commands __user
*q
)
533 struct cxl_memdev_state
*mds
= to_cxl_memdev_state(cxlmd
->cxlds
);
534 struct device
*dev
= &cxlmd
->dev
;
535 struct cxl_mem_command
*cmd
;
539 dev_dbg(dev
, "Query IOCTL\n");
541 if (get_user(n_commands
, &q
->n_commands
))
544 /* returns the total number if 0 elements are requested. */
546 return put_user(ARRAY_SIZE(cxl_mem_commands
), &q
->n_commands
);
549 * otherwise, return min(n_commands, total commands) cxl_command_info
552 cxl_for_each_cmd(cmd
) {
553 struct cxl_command_info info
= cmd
->info
;
555 if (test_bit(info
.id
, mds
->enabled_cmds
))
556 info
.flags
|= CXL_MEM_COMMAND_FLAG_ENABLED
;
557 if (test_bit(info
.id
, mds
->exclusive_cmds
))
558 info
.flags
|= CXL_MEM_COMMAND_FLAG_EXCLUSIVE
;
560 if (copy_to_user(&q
->commands
[j
++], &info
, sizeof(info
)))
571 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
572 * @mds: The driver data for the operation
573 * @mbox_cmd: The validated mailbox command.
574 * @out_payload: Pointer to userspace's output payload.
575 * @size_out: (Input) Max payload size to copy out.
576 * (Output) Payload size hardware generated.
577 * @retval: Hardware generated return code from the operation.
580 * * %0 - Mailbox transaction succeeded. This implies the mailbox
581 * protocol completed successfully not that the operation itself
583 * * %-ENOMEM - Couldn't allocate a bounce buffer.
584 * * %-EFAULT - Something happened with copy_to/from_user.
585 * * %-EINTR - Mailbox acquisition interrupted.
586 * * %-EXXX - Transaction level failures.
588 * Dispatches a mailbox command on behalf of a userspace request.
589 * The output payload is copied to userspace.
591 * See cxl_send_cmd().
593 static int handle_mailbox_cmd_from_user(struct cxl_memdev_state
*mds
,
594 struct cxl_mbox_cmd
*mbox_cmd
,
595 u64 out_payload
, s32
*size_out
,
598 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
599 struct device
*dev
= mds
->cxlds
.dev
;
603 "Submitting %s command for user\n"
606 cxl_mem_opcode_to_name(mbox_cmd
->opcode
),
607 mbox_cmd
->opcode
, mbox_cmd
->size_in
);
609 rc
= cxl_mbox
->mbox_send(cxl_mbox
, mbox_cmd
);
614 * @size_out contains the max size that's allowed to be written back out
615 * to userspace. While the payload may have written more output than
616 * this it will have to be ignored.
618 if (mbox_cmd
->size_out
) {
619 dev_WARN_ONCE(dev
, mbox_cmd
->size_out
> *size_out
,
620 "Invalid return size\n");
621 if (copy_to_user(u64_to_user_ptr(out_payload
),
622 mbox_cmd
->payload_out
, mbox_cmd
->size_out
)) {
628 *size_out
= mbox_cmd
->size_out
;
629 *retval
= mbox_cmd
->return_code
;
632 cxl_mbox_cmd_dtor(mbox_cmd
);
636 int cxl_send_cmd(struct cxl_memdev
*cxlmd
, struct cxl_send_command __user
*s
)
638 struct cxl_memdev_state
*mds
= to_cxl_memdev_state(cxlmd
->cxlds
);
639 struct device
*dev
= &cxlmd
->dev
;
640 struct cxl_send_command send
;
641 struct cxl_mbox_cmd mbox_cmd
;
644 dev_dbg(dev
, "Send IOCTL\n");
646 if (copy_from_user(&send
, s
, sizeof(send
)))
649 rc
= cxl_validate_cmd_from_user(&mbox_cmd
, mds
, &send
);
653 rc
= handle_mailbox_cmd_from_user(mds
, &mbox_cmd
, send
.out
.payload
,
654 &send
.out
.size
, &send
.retval
);
658 if (copy_to_user(s
, &send
, sizeof(send
)))
664 static int cxl_xfer_log(struct cxl_memdev_state
*mds
, uuid_t
*uuid
,
667 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
668 u32 remaining
= *size
;
672 u32 xfer_size
= min_t(u32
, remaining
, cxl_mbox
->payload_size
);
673 struct cxl_mbox_cmd mbox_cmd
;
674 struct cxl_mbox_get_log log
;
677 log
= (struct cxl_mbox_get_log
) {
679 .offset
= cpu_to_le32(offset
),
680 .length
= cpu_to_le32(xfer_size
),
683 mbox_cmd
= (struct cxl_mbox_cmd
) {
684 .opcode
= CXL_MBOX_OP_GET_LOG
,
685 .size_in
= sizeof(log
),
687 .size_out
= xfer_size
,
691 rc
= cxl_internal_send_cmd(cxl_mbox
, &mbox_cmd
);
694 * The output payload length that indicates the number
695 * of valid bytes can be smaller than the Log buffer
698 if (rc
== -EIO
&& mbox_cmd
.size_out
< xfer_size
) {
699 offset
+= mbox_cmd
.size_out
;
707 remaining
-= xfer_size
;
717 * cxl_walk_cel() - Walk through the Command Effects Log.
718 * @mds: The driver data for the operation
719 * @size: Length of the Command Effects Log.
722 * Iterate over each entry in the CEL and determine if the driver supports the
723 * command. If so, the command is enabled for the device and can be used later.
725 static void cxl_walk_cel(struct cxl_memdev_state
*mds
, size_t size
, u8
*cel
)
727 struct cxl_cel_entry
*cel_entry
;
728 const int cel_entries
= size
/ sizeof(*cel_entry
);
729 struct device
*dev
= mds
->cxlds
.dev
;
732 cel_entry
= (struct cxl_cel_entry
*) cel
;
734 for (i
= 0; i
< cel_entries
; i
++) {
735 u16 opcode
= le16_to_cpu(cel_entry
[i
].opcode
);
736 struct cxl_mem_command
*cmd
= cxl_mem_find_command(opcode
);
740 set_bit(cmd
->info
.id
, mds
->enabled_cmds
);
744 if (cxl_is_poison_command(opcode
)) {
745 cxl_set_poison_cmd_enabled(&mds
->poison
, opcode
);
749 if (cxl_is_security_command(opcode
)) {
750 cxl_set_security_cmd_enabled(&mds
->security
, opcode
);
754 dev_dbg(dev
, "Opcode 0x%04x %s\n", opcode
,
755 enabled
? "enabled" : "unsupported by driver");
759 static struct cxl_mbox_get_supported_logs
*cxl_get_gsl(struct cxl_memdev_state
*mds
)
761 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
762 struct cxl_mbox_get_supported_logs
*ret
;
763 struct cxl_mbox_cmd mbox_cmd
;
766 ret
= kvmalloc(cxl_mbox
->payload_size
, GFP_KERNEL
);
768 return ERR_PTR(-ENOMEM
);
770 mbox_cmd
= (struct cxl_mbox_cmd
) {
771 .opcode
= CXL_MBOX_OP_GET_SUPPORTED_LOGS
,
772 .size_out
= cxl_mbox
->payload_size
,
774 /* At least the record number field must be valid */
777 rc
= cxl_internal_send_cmd(cxl_mbox
, &mbox_cmd
);
792 /* See CXL 2.0 Table 170. Get Log Input Payload */
793 static const uuid_t log_uuid
[] = {
794 [CEL_UUID
] = DEFINE_CXL_CEL_UUID
,
795 [VENDOR_DEBUG_UUID
] = DEFINE_CXL_VENDOR_DEBUG_UUID
,
799 * cxl_enumerate_cmds() - Enumerate commands for a device.
800 * @mds: The driver data for the operation
802 * Returns 0 if enumerate completed successfully.
804 * CXL devices have optional support for certain commands. This function will
805 * determine the set of supported commands for the hardware and update the
806 * enabled_cmds bitmap in the @mds.
808 int cxl_enumerate_cmds(struct cxl_memdev_state
*mds
)
810 struct cxl_mbox_get_supported_logs
*gsl
;
811 struct device
*dev
= mds
->cxlds
.dev
;
812 struct cxl_mem_command
*cmd
;
815 gsl
= cxl_get_gsl(mds
);
820 for (i
= 0; i
< le16_to_cpu(gsl
->entries
); i
++) {
821 u32 size
= le32_to_cpu(gsl
->entry
[i
].size
);
822 uuid_t uuid
= gsl
->entry
[i
].uuid
;
825 dev_dbg(dev
, "Found LOG type %pU of size %d", &uuid
, size
);
827 if (!uuid_equal(&uuid
, &log_uuid
[CEL_UUID
]))
830 log
= kvmalloc(size
, GFP_KERNEL
);
836 rc
= cxl_xfer_log(mds
, &uuid
, &size
, log
);
842 cxl_walk_cel(mds
, size
, log
);
845 /* In case CEL was bogus, enable some default commands. */
846 cxl_for_each_cmd(cmd
)
847 if (cmd
->flags
& CXL_CMD_FLAG_FORCE_ENABLE
)
848 set_bit(cmd
->info
.id
, mds
->enabled_cmds
);
850 /* Found the required CEL */
857 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds
, CXL
);
859 void cxl_event_trace_record(const struct cxl_memdev
*cxlmd
,
860 enum cxl_event_log_type type
,
861 enum cxl_event_type event_type
,
862 const uuid_t
*uuid
, union cxl_event
*evt
)
864 if (event_type
== CXL_CPER_EVENT_MEM_MODULE
) {
865 trace_cxl_memory_module(cxlmd
, type
, &evt
->mem_module
);
868 if (event_type
== CXL_CPER_EVENT_GENERIC
) {
869 trace_cxl_generic_event(cxlmd
, type
, uuid
, &evt
->generic
);
873 if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
874 u64 dpa
, hpa
= ULLONG_MAX
;
875 struct cxl_region
*cxlr
;
878 * These trace points are annotated with HPA and region
879 * translations. Take topology mutation locks and lookup
880 * { HPA, REGION } from { DPA, MEMDEV } in the event record.
882 guard(rwsem_read
)(&cxl_region_rwsem
);
883 guard(rwsem_read
)(&cxl_dpa_rwsem
);
885 dpa
= le64_to_cpu(evt
->media_hdr
.phys_addr
) & CXL_DPA_MASK
;
886 cxlr
= cxl_dpa_to_region(cxlmd
, dpa
);
888 hpa
= cxl_dpa_to_hpa(cxlr
, cxlmd
, dpa
);
890 if (event_type
== CXL_CPER_EVENT_GEN_MEDIA
)
891 trace_cxl_general_media(cxlmd
, type
, cxlr
, hpa
,
893 else if (event_type
== CXL_CPER_EVENT_DRAM
)
894 trace_cxl_dram(cxlmd
, type
, cxlr
, hpa
, &evt
->dram
);
897 EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record
, CXL
);
899 static void __cxl_event_trace_record(const struct cxl_memdev
*cxlmd
,
900 enum cxl_event_log_type type
,
901 struct cxl_event_record_raw
*record
)
903 enum cxl_event_type ev_type
= CXL_CPER_EVENT_GENERIC
;
904 const uuid_t
*uuid
= &record
->id
;
906 if (uuid_equal(uuid
, &CXL_EVENT_GEN_MEDIA_UUID
))
907 ev_type
= CXL_CPER_EVENT_GEN_MEDIA
;
908 else if (uuid_equal(uuid
, &CXL_EVENT_DRAM_UUID
))
909 ev_type
= CXL_CPER_EVENT_DRAM
;
910 else if (uuid_equal(uuid
, &CXL_EVENT_MEM_MODULE_UUID
))
911 ev_type
= CXL_CPER_EVENT_MEM_MODULE
;
913 cxl_event_trace_record(cxlmd
, type
, ev_type
, uuid
, &record
->event
);
916 static int cxl_clear_event_record(struct cxl_memdev_state
*mds
,
917 enum cxl_event_log_type log
,
918 struct cxl_get_event_payload
*get_pl
)
920 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
921 struct cxl_mbox_clear_event_payload
*payload
;
922 u16 total
= le16_to_cpu(get_pl
->record_count
);
923 u8 max_handles
= CXL_CLEAR_EVENT_MAX_HANDLES
;
924 size_t pl_size
= struct_size(payload
, handles
, max_handles
);
925 struct cxl_mbox_cmd mbox_cmd
;
930 /* Payload size may limit the max handles */
931 if (pl_size
> cxl_mbox
->payload_size
) {
932 max_handles
= (cxl_mbox
->payload_size
- sizeof(*payload
)) /
934 pl_size
= struct_size(payload
, handles
, max_handles
);
937 payload
= kvzalloc(pl_size
, GFP_KERNEL
);
941 *payload
= (struct cxl_mbox_clear_event_payload
) {
945 mbox_cmd
= (struct cxl_mbox_cmd
) {
946 .opcode
= CXL_MBOX_OP_CLEAR_EVENT_RECORD
,
947 .payload_in
= payload
,
952 * Clear Event Records uses u8 for the handle cnt while Get Event
953 * Record can return up to 0xffff records.
956 for (cnt
= 0; cnt
< total
; cnt
++) {
957 struct cxl_event_record_raw
*raw
= &get_pl
->records
[cnt
];
958 struct cxl_event_generic
*gen
= &raw
->event
.generic
;
960 payload
->handles
[i
++] = gen
->hdr
.handle
;
961 dev_dbg(mds
->cxlds
.dev
, "Event log '%d': Clearing %u\n", log
,
962 le16_to_cpu(payload
->handles
[i
- 1]));
964 if (i
== max_handles
) {
965 payload
->nr_recs
= i
;
966 rc
= cxl_internal_send_cmd(cxl_mbox
, &mbox_cmd
);
973 /* Clear what is left if any */
975 payload
->nr_recs
= i
;
976 mbox_cmd
.size_in
= struct_size(payload
, handles
, i
);
977 rc
= cxl_internal_send_cmd(cxl_mbox
, &mbox_cmd
);
987 static void cxl_mem_get_records_log(struct cxl_memdev_state
*mds
,
988 enum cxl_event_log_type type
)
990 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
991 struct cxl_memdev
*cxlmd
= mds
->cxlds
.cxlmd
;
992 struct device
*dev
= mds
->cxlds
.dev
;
993 struct cxl_get_event_payload
*payload
;
997 mutex_lock(&mds
->event
.log_lock
);
998 payload
= mds
->event
.buf
;
1002 struct cxl_mbox_cmd mbox_cmd
= (struct cxl_mbox_cmd
) {
1003 .opcode
= CXL_MBOX_OP_GET_EVENT_RECORD
,
1004 .payload_in
= &log_type
,
1005 .size_in
= sizeof(log_type
),
1006 .payload_out
= payload
,
1007 .size_out
= cxl_mbox
->payload_size
,
1008 .min_out
= struct_size(payload
, records
, 0),
1011 rc
= cxl_internal_send_cmd(cxl_mbox
, &mbox_cmd
);
1013 dev_err_ratelimited(dev
,
1014 "Event log '%d': Failed to query event records : %d",
1019 nr_rec
= le16_to_cpu(payload
->record_count
);
1023 for (i
= 0; i
< nr_rec
; i
++)
1024 __cxl_event_trace_record(cxlmd
, type
,
1025 &payload
->records
[i
]);
1027 if (payload
->flags
& CXL_GET_EVENT_FLAG_OVERFLOW
)
1028 trace_cxl_overflow(cxlmd
, type
, payload
);
1030 rc
= cxl_clear_event_record(mds
, type
, payload
);
1032 dev_err_ratelimited(dev
,
1033 "Event log '%d': Failed to clear events : %d",
1039 mutex_unlock(&mds
->event
.log_lock
);
1043 * cxl_mem_get_event_records - Get Event Records from the device
1044 * @mds: The driver data for the operation
1045 * @status: Event Status register value identifying which events are available.
1047 * Retrieve all event records available on the device, report them as trace
1048 * events, and clear them.
1050 * See CXL rev 3.0 @8.2.9.2.2 Get Event Records
1051 * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
1053 void cxl_mem_get_event_records(struct cxl_memdev_state
*mds
, u32 status
)
1055 dev_dbg(mds
->cxlds
.dev
, "Reading event logs: %x\n", status
);
1057 if (status
& CXLDEV_EVENT_STATUS_FATAL
)
1058 cxl_mem_get_records_log(mds
, CXL_EVENT_TYPE_FATAL
);
1059 if (status
& CXLDEV_EVENT_STATUS_FAIL
)
1060 cxl_mem_get_records_log(mds
, CXL_EVENT_TYPE_FAIL
);
1061 if (status
& CXLDEV_EVENT_STATUS_WARN
)
1062 cxl_mem_get_records_log(mds
, CXL_EVENT_TYPE_WARN
);
1063 if (status
& CXLDEV_EVENT_STATUS_INFO
)
1064 cxl_mem_get_records_log(mds
, CXL_EVENT_TYPE_INFO
);
1066 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records
, CXL
);
1069 * cxl_mem_get_partition_info - Get partition info
1070 * @mds: The driver data for the operation
1072 * Retrieve the current partition info for the device specified. The active
1073 * values are the current capacity in bytes. If not 0, the 'next' values are
1074 * the pending values, in bytes, which take affect on next cold reset.
1076 * Return: 0 if no error: or the result of the mailbox command.
1078 * See CXL @8.2.9.5.2.1 Get Partition Info
1080 static int cxl_mem_get_partition_info(struct cxl_memdev_state
*mds
)
1082 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
1083 struct cxl_mbox_get_partition_info pi
;
1084 struct cxl_mbox_cmd mbox_cmd
;
1087 mbox_cmd
= (struct cxl_mbox_cmd
) {
1088 .opcode
= CXL_MBOX_OP_GET_PARTITION_INFO
,
1089 .size_out
= sizeof(pi
),
1092 rc
= cxl_internal_send_cmd(cxl_mbox
, &mbox_cmd
);
1096 mds
->active_volatile_bytes
=
1097 le64_to_cpu(pi
.active_volatile_cap
) * CXL_CAPACITY_MULTIPLIER
;
1098 mds
->active_persistent_bytes
=
1099 le64_to_cpu(pi
.active_persistent_cap
) * CXL_CAPACITY_MULTIPLIER
;
1100 mds
->next_volatile_bytes
=
1101 le64_to_cpu(pi
.next_volatile_cap
) * CXL_CAPACITY_MULTIPLIER
;
1102 mds
->next_persistent_bytes
=
1103 le64_to_cpu(pi
.next_volatile_cap
) * CXL_CAPACITY_MULTIPLIER
;
1109 * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
1110 * @mds: The driver data for the operation
1112 * Return: 0 if identify was executed successfully or media not ready.
1114 * This will dispatch the identify command to the device and on success populate
1115 * structures to be exported to sysfs.
1117 int cxl_dev_state_identify(struct cxl_memdev_state
*mds
)
1119 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
1120 /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
1121 struct cxl_mbox_identify id
;
1122 struct cxl_mbox_cmd mbox_cmd
;
1126 if (!mds
->cxlds
.media_ready
)
1129 mbox_cmd
= (struct cxl_mbox_cmd
) {
1130 .opcode
= CXL_MBOX_OP_IDENTIFY
,
1131 .size_out
= sizeof(id
),
1134 rc
= cxl_internal_send_cmd(cxl_mbox
, &mbox_cmd
);
1139 le64_to_cpu(id
.total_capacity
) * CXL_CAPACITY_MULTIPLIER
;
1140 mds
->volatile_only_bytes
=
1141 le64_to_cpu(id
.volatile_capacity
) * CXL_CAPACITY_MULTIPLIER
;
1142 mds
->persistent_only_bytes
=
1143 le64_to_cpu(id
.persistent_capacity
) * CXL_CAPACITY_MULTIPLIER
;
1144 mds
->partition_align_bytes
=
1145 le64_to_cpu(id
.partition_align
) * CXL_CAPACITY_MULTIPLIER
;
1147 mds
->lsa_size
= le32_to_cpu(id
.lsa_size
);
1148 memcpy(mds
->firmware_version
, id
.fw_revision
,
1149 sizeof(id
.fw_revision
));
1151 if (test_bit(CXL_POISON_ENABLED_LIST
, mds
->poison
.enabled_cmds
)) {
1152 val
= get_unaligned_le24(id
.poison_list_max_mer
);
1153 mds
->poison
.max_errors
= min_t(u32
, val
, CXL_POISON_LIST_MAX
);
1158 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify
, CXL
);
1160 static int __cxl_mem_sanitize(struct cxl_memdev_state
*mds
, u16 cmd
)
1162 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
1165 struct cxl_get_security_output
{
1168 struct cxl_mbox_cmd sec_cmd
= {
1169 .opcode
= CXL_MBOX_OP_GET_SECURITY_STATE
,
1170 .payload_out
= &out
,
1171 .size_out
= sizeof(out
),
1173 struct cxl_mbox_cmd mbox_cmd
= { .opcode
= cmd
};
1175 if (cmd
!= CXL_MBOX_OP_SANITIZE
&& cmd
!= CXL_MBOX_OP_SECURE_ERASE
)
1178 rc
= cxl_internal_send_cmd(cxl_mbox
, &sec_cmd
);
1180 dev_err(cxl_mbox
->host
, "Failed to get security state : %d", rc
);
1185 * Prior to using these commands, any security applied to
1186 * the user data areas of the device shall be DISABLED (or
1187 * UNLOCKED for secure erase case).
1189 sec_out
= le32_to_cpu(out
.flags
);
1190 if (sec_out
& CXL_PMEM_SEC_STATE_USER_PASS_SET
)
1193 if (cmd
== CXL_MBOX_OP_SECURE_ERASE
&&
1194 sec_out
& CXL_PMEM_SEC_STATE_LOCKED
)
1197 rc
= cxl_internal_send_cmd(cxl_mbox
, &mbox_cmd
);
1199 dev_err(cxl_mbox
->host
, "Failed to sanitize device : %d", rc
);
1208 * cxl_mem_sanitize() - Send a sanitization command to the device.
1209 * @cxlmd: The device for the operation
1210 * @cmd: The specific sanitization command opcode
1212 * Return: 0 if the command was executed successfully, regardless of
1213 * whether or not the actual security operation is done in the background,
1214 * such as for the Sanitize case.
1215 * Error return values can be the result of the mailbox command, -EINVAL
1216 * when security requirements are not met or invalid contexts, or -EBUSY
1217 * if the sanitize operation is already in flight.
1219 * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
1221 int cxl_mem_sanitize(struct cxl_memdev
*cxlmd
, u16 cmd
)
1223 struct cxl_memdev_state
*mds
= to_cxl_memdev_state(cxlmd
->cxlds
);
1224 struct cxl_port
*endpoint
;
1227 /* synchronize with cxl_mem_probe() and decoder write operations */
1228 guard(device
)(&cxlmd
->dev
);
1229 endpoint
= cxlmd
->endpoint
;
1230 down_read(&cxl_region_rwsem
);
1232 * Require an endpoint to be safe otherwise the driver can not
1233 * be sure that the device is unmapped.
1235 if (endpoint
&& cxl_num_decoders_committed(endpoint
) == 0)
1236 rc
= __cxl_mem_sanitize(mds
, cmd
);
1239 up_read(&cxl_region_rwsem
);
1244 static int add_dpa_res(struct device
*dev
, struct resource
*parent
,
1245 struct resource
*res
, resource_size_t start
,
1246 resource_size_t size
, const char *type
)
1252 res
->end
= start
+ size
- 1;
1253 res
->flags
= IORESOURCE_MEM
;
1254 if (resource_size(res
) == 0) {
1255 dev_dbg(dev
, "DPA(%s): no capacity\n", res
->name
);
1258 rc
= request_resource(parent
, res
);
1260 dev_err(dev
, "DPA(%s): failed to track %pr (%d)\n", res
->name
,
1265 dev_dbg(dev
, "DPA(%s): %pr\n", res
->name
, res
);
1270 int cxl_mem_create_range_info(struct cxl_memdev_state
*mds
)
1272 struct cxl_dev_state
*cxlds
= &mds
->cxlds
;
1273 struct device
*dev
= cxlds
->dev
;
1276 if (!cxlds
->media_ready
) {
1277 cxlds
->dpa_res
= DEFINE_RES_MEM(0, 0);
1278 cxlds
->ram_res
= DEFINE_RES_MEM(0, 0);
1279 cxlds
->pmem_res
= DEFINE_RES_MEM(0, 0);
1283 cxlds
->dpa_res
= DEFINE_RES_MEM(0, mds
->total_bytes
);
1285 if (mds
->partition_align_bytes
== 0) {
1286 rc
= add_dpa_res(dev
, &cxlds
->dpa_res
, &cxlds
->ram_res
, 0,
1287 mds
->volatile_only_bytes
, "ram");
1290 return add_dpa_res(dev
, &cxlds
->dpa_res
, &cxlds
->pmem_res
,
1291 mds
->volatile_only_bytes
,
1292 mds
->persistent_only_bytes
, "pmem");
1295 rc
= cxl_mem_get_partition_info(mds
);
1297 dev_err(dev
, "Failed to query partition information\n");
1301 rc
= add_dpa_res(dev
, &cxlds
->dpa_res
, &cxlds
->ram_res
, 0,
1302 mds
->active_volatile_bytes
, "ram");
1305 return add_dpa_res(dev
, &cxlds
->dpa_res
, &cxlds
->pmem_res
,
1306 mds
->active_volatile_bytes
,
1307 mds
->active_persistent_bytes
, "pmem");
1309 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info
, CXL
);
1311 int cxl_set_timestamp(struct cxl_memdev_state
*mds
)
1313 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
1314 struct cxl_mbox_cmd mbox_cmd
;
1315 struct cxl_mbox_set_timestamp_in pi
;
1318 pi
.timestamp
= cpu_to_le64(ktime_get_real_ns());
1319 mbox_cmd
= (struct cxl_mbox_cmd
) {
1320 .opcode
= CXL_MBOX_OP_SET_TIMESTAMP
,
1321 .size_in
= sizeof(pi
),
1325 rc
= cxl_internal_send_cmd(cxl_mbox
, &mbox_cmd
);
1327 * Command is optional. Devices may have another way of providing
1328 * a timestamp, or may return all 0s in timestamp fields.
1329 * Don't report an error if this command isn't supported
1331 if (rc
&& (mbox_cmd
.return_code
!= CXL_MBOX_CMD_RC_UNSUPPORTED
))
1336 EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp
, CXL
);
1338 int cxl_mem_get_poison(struct cxl_memdev
*cxlmd
, u64 offset
, u64 len
,
1339 struct cxl_region
*cxlr
)
1341 struct cxl_memdev_state
*mds
= to_cxl_memdev_state(cxlmd
->cxlds
);
1342 struct cxl_mailbox
*cxl_mbox
= &cxlmd
->cxlds
->cxl_mbox
;
1343 struct cxl_mbox_poison_out
*po
;
1344 struct cxl_mbox_poison_in pi
;
1348 rc
= mutex_lock_interruptible(&mds
->poison
.lock
);
1352 po
= mds
->poison
.list_out
;
1353 pi
.offset
= cpu_to_le64(offset
);
1354 pi
.length
= cpu_to_le64(len
/ CXL_POISON_LEN_MULT
);
1357 struct cxl_mbox_cmd mbox_cmd
= (struct cxl_mbox_cmd
){
1358 .opcode
= CXL_MBOX_OP_GET_POISON
,
1359 .size_in
= sizeof(pi
),
1361 .size_out
= cxl_mbox
->payload_size
,
1363 .min_out
= struct_size(po
, record
, 0),
1366 rc
= cxl_internal_send_cmd(cxl_mbox
, &mbox_cmd
);
1370 for (int i
= 0; i
< le16_to_cpu(po
->count
); i
++)
1371 trace_cxl_poison(cxlmd
, cxlr
, &po
->record
[i
],
1372 po
->flags
, po
->overflow_ts
,
1373 CXL_POISON_TRACE_LIST
);
1375 /* Protect against an uncleared _FLAG_MORE */
1376 nr_records
= nr_records
+ le16_to_cpu(po
->count
);
1377 if (nr_records
>= mds
->poison
.max_errors
) {
1378 dev_dbg(&cxlmd
->dev
, "Max Error Records reached: %d\n",
1382 } while (po
->flags
& CXL_POISON_FLAG_MORE
);
1384 mutex_unlock(&mds
->poison
.lock
);
1387 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison
, CXL
);
1389 static void free_poison_buf(void *buf
)
1394 /* Get Poison List output buffer is protected by mds->poison.lock */
1395 static int cxl_poison_alloc_buf(struct cxl_memdev_state
*mds
)
1397 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
1399 mds
->poison
.list_out
= kvmalloc(cxl_mbox
->payload_size
, GFP_KERNEL
);
1400 if (!mds
->poison
.list_out
)
1403 return devm_add_action_or_reset(mds
->cxlds
.dev
, free_poison_buf
,
1404 mds
->poison
.list_out
);
1407 int cxl_poison_state_init(struct cxl_memdev_state
*mds
)
1411 if (!test_bit(CXL_POISON_ENABLED_LIST
, mds
->poison
.enabled_cmds
))
1414 rc
= cxl_poison_alloc_buf(mds
);
1416 clear_bit(CXL_POISON_ENABLED_LIST
, mds
->poison
.enabled_cmds
);
1420 mutex_init(&mds
->poison
.lock
);
1423 EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init
, CXL
);
1425 int cxl_mailbox_init(struct cxl_mailbox
*cxl_mbox
, struct device
*host
)
1427 if (!cxl_mbox
|| !host
)
1430 cxl_mbox
->host
= host
;
1431 mutex_init(&cxl_mbox
->mbox_mutex
);
1432 rcuwait_init(&cxl_mbox
->mbox_wait
);
1436 EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init
, CXL
);
1438 struct cxl_memdev_state
*cxl_memdev_state_create(struct device
*dev
)
1440 struct cxl_memdev_state
*mds
;
1442 mds
= devm_kzalloc(dev
, sizeof(*mds
), GFP_KERNEL
);
1444 dev_err(dev
, "No memory available\n");
1445 return ERR_PTR(-ENOMEM
);
1448 mutex_init(&mds
->event
.log_lock
);
1449 mds
->cxlds
.dev
= dev
;
1450 mds
->cxlds
.reg_map
.host
= dev
;
1451 mds
->cxlds
.reg_map
.resource
= CXL_RESOURCE_NONE
;
1452 mds
->cxlds
.type
= CXL_DEVTYPE_CLASSMEM
;
1453 mds
->ram_perf
.qos_class
= CXL_QOS_CLASS_INVALID
;
1454 mds
->pmem_perf
.qos_class
= CXL_QOS_CLASS_INVALID
;
1458 EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create
, CXL
);
1460 void __init
cxl_mbox_init(void)
1462 struct dentry
*mbox_debugfs
;
1464 mbox_debugfs
= cxl_debugfs_create_dir("mbox");
1465 debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs
,
1466 &cxl_raw_allow_all
);