1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/vmalloc.h>
7 #include <linux/module.h>
8 #include <linux/delay.h>
9 #include <linux/sizes.h>
10 #include <linux/bits.h>
11 #include <cxl/mailbox.h>
12 #include <linux/unaligned.h>
13 #include <crypto/sha2.h>
18 #define LSA_SIZE SZ_128K
19 #define FW_SIZE SZ_64M
21 #define DEV_SIZE SZ_2G
22 #define EFFECT(x) (1U << x)
24 #define MOCK_INJECT_DEV_MAX 8
25 #define MOCK_INJECT_TEST_MAX 128
27 static unsigned int poison_inject_dev_max
= MOCK_INJECT_DEV_MAX
;
29 enum cxl_command_effects
{
30 CONF_CHANGE_COLD_RESET
= 0,
31 CONF_CHANGE_IMMEDIATE
,
32 DATA_CHANGE_IMMEDIATE
,
33 POLICY_CHANGE_IMMEDIATE
,
35 SECURITY_CHANGE_IMMEDIATE
,
37 SECONDARY_MBOX_SUPPORTED
,
40 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
42 static struct cxl_cel_entry mock_cel
[] = {
44 .opcode
= cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS
),
45 .effect
= CXL_CMD_EFFECT_NONE
,
48 .opcode
= cpu_to_le16(CXL_MBOX_OP_IDENTIFY
),
49 .effect
= CXL_CMD_EFFECT_NONE
,
52 .opcode
= cpu_to_le16(CXL_MBOX_OP_GET_LSA
),
53 .effect
= CXL_CMD_EFFECT_NONE
,
56 .opcode
= cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO
),
57 .effect
= CXL_CMD_EFFECT_NONE
,
60 .opcode
= cpu_to_le16(CXL_MBOX_OP_SET_LSA
),
61 .effect
= cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE
) |
62 EFFECT(DATA_CHANGE_IMMEDIATE
)),
65 .opcode
= cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO
),
66 .effect
= CXL_CMD_EFFECT_NONE
,
69 .opcode
= cpu_to_le16(CXL_MBOX_OP_GET_POISON
),
70 .effect
= CXL_CMD_EFFECT_NONE
,
73 .opcode
= cpu_to_le16(CXL_MBOX_OP_INJECT_POISON
),
74 .effect
= cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE
)),
77 .opcode
= cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON
),
78 .effect
= cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE
)),
81 .opcode
= cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO
),
82 .effect
= CXL_CMD_EFFECT_NONE
,
85 .opcode
= cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW
),
86 .effect
= cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET
) |
87 EFFECT(BACKGROUND_OP
)),
90 .opcode
= cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW
),
91 .effect
= cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET
) |
92 EFFECT(CONF_CHANGE_IMMEDIATE
)),
95 .opcode
= cpu_to_le16(CXL_MBOX_OP_SANITIZE
),
96 .effect
= cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE
) |
97 EFFECT(SECURITY_CHANGE_IMMEDIATE
) |
98 EFFECT(BACKGROUND_OP
)),
102 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
103 struct cxl_mbox_health_info
{
109 __le32 dirty_shutdowns
;
110 __le32 volatile_errors
;
115 struct cxl_mbox_get_supported_logs gsl
;
116 struct cxl_gsl_entry entry
;
117 } mock_gsl_payload
= {
119 .entries
= cpu_to_le16(1),
122 .uuid
= DEFINE_CXL_CEL_UUID
,
123 .size
= cpu_to_le32(sizeof(mock_cel
)),
127 #define PASS_TRY_LIMIT 3
129 #define CXL_TEST_EVENT_CNT_MAX 15
131 /* Set a number of events to return at a time for simulation. */
132 #define CXL_TEST_EVENT_RET_MAX 4
134 struct mock_event_log
{
140 struct cxl_event_record_raw
*events
[CXL_TEST_EVENT_CNT_MAX
];
143 struct mock_event_store
{
144 struct mock_event_log mock_logs
[CXL_EVENT_TYPE_MAX
];
148 struct cxl_mockmem_data
{
155 u8 user_pass
[NVDIMM_PASSPHRASE_LEN
];
156 u8 master_pass
[NVDIMM_PASSPHRASE_LEN
];
159 struct mock_event_store mes
;
160 struct cxl_memdev_state
*mds
;
163 unsigned long sanitize_timeout
;
166 static struct mock_event_log
*event_find_log(struct device
*dev
, int log_type
)
168 struct cxl_mockmem_data
*mdata
= dev_get_drvdata(dev
);
170 if (log_type
>= CXL_EVENT_TYPE_MAX
)
172 return &mdata
->mes
.mock_logs
[log_type
];
175 static struct cxl_event_record_raw
*event_get_current(struct mock_event_log
*log
)
177 return log
->events
[log
->cur_idx
];
180 static void event_reset_log(struct mock_event_log
*log
)
184 log
->nr_overflow
= log
->overflow_reset
;
187 /* Handle can never be 0 use 1 based indexing for handle */
188 static u16
event_get_clear_handle(struct mock_event_log
*log
)
190 return log
->clear_idx
+ 1;
193 /* Handle can never be 0 use 1 based indexing for handle */
194 static __le16
event_get_cur_event_handle(struct mock_event_log
*log
)
196 u16 cur_handle
= log
->cur_idx
+ 1;
198 return cpu_to_le16(cur_handle
);
201 static bool event_log_empty(struct mock_event_log
*log
)
203 return log
->cur_idx
== log
->nr_events
;
206 static void mes_add_event(struct mock_event_store
*mes
,
207 enum cxl_event_log_type log_type
,
208 struct cxl_event_record_raw
*event
)
210 struct mock_event_log
*log
;
212 if (WARN_ON(log_type
>= CXL_EVENT_TYPE_MAX
))
215 log
= &mes
->mock_logs
[log_type
];
217 if ((log
->nr_events
+ 1) > CXL_TEST_EVENT_CNT_MAX
) {
219 log
->overflow_reset
= log
->nr_overflow
;
223 log
->events
[log
->nr_events
] = event
;
228 * Vary the number of events returned to simulate events occuring while the
229 * logs are being read.
231 static int ret_limit
= 0;
233 static int mock_get_event(struct device
*dev
, struct cxl_mbox_cmd
*cmd
)
235 struct cxl_get_event_payload
*pl
;
236 struct mock_event_log
*log
;
241 if (cmd
->size_in
!= sizeof(log_type
))
244 ret_limit
= (ret_limit
+ 1) % CXL_TEST_EVENT_RET_MAX
;
248 if (cmd
->size_out
< struct_size(pl
, records
, ret_limit
))
251 log_type
= *((u8
*)cmd
->payload_in
);
252 if (log_type
>= CXL_EVENT_TYPE_MAX
)
255 memset(cmd
->payload_out
, 0, struct_size(pl
, records
, 0));
257 log
= event_find_log(dev
, log_type
);
258 if (!log
|| event_log_empty(log
))
261 pl
= cmd
->payload_out
;
263 for (i
= 0; i
< ret_limit
&& !event_log_empty(log
); i
++) {
264 memcpy(&pl
->records
[i
], event_get_current(log
),
265 sizeof(pl
->records
[i
]));
266 pl
->records
[i
].event
.generic
.hdr
.handle
=
267 event_get_cur_event_handle(log
);
271 cmd
->size_out
= struct_size(pl
, records
, i
);
272 pl
->record_count
= cpu_to_le16(i
);
273 if (!event_log_empty(log
))
274 pl
->flags
|= CXL_GET_EVENT_FLAG_MORE_RECORDS
;
276 if (log
->nr_overflow
) {
279 pl
->flags
|= CXL_GET_EVENT_FLAG_OVERFLOW
;
280 pl
->overflow_err_count
= cpu_to_le16(nr_overflow
);
281 ns
= ktime_get_real_ns();
282 ns
-= 5000000000; /* 5s ago */
283 pl
->first_overflow_timestamp
= cpu_to_le64(ns
);
284 ns
= ktime_get_real_ns();
285 ns
-= 1000000000; /* 1s ago */
286 pl
->last_overflow_timestamp
= cpu_to_le64(ns
);
292 static int mock_clear_event(struct device
*dev
, struct cxl_mbox_cmd
*cmd
)
294 struct cxl_mbox_clear_event_payload
*pl
= cmd
->payload_in
;
295 struct mock_event_log
*log
;
296 u8 log_type
= pl
->event_log
;
300 if (log_type
>= CXL_EVENT_TYPE_MAX
)
303 log
= event_find_log(dev
, log_type
);
305 return 0; /* No mock data in this log */
308 * This check is technically not invalid per the specification AFAICS.
309 * (The host could 'guess' handles and clear them in order).
310 * However, this is not good behavior for the host so test it.
312 if (log
->clear_idx
+ pl
->nr_recs
> log
->cur_idx
) {
314 "Attempting to clear more events than returned!\n");
318 /* Check handle order prior to clearing events */
319 for (nr
= 0, handle
= event_get_clear_handle(log
);
322 if (handle
!= le16_to_cpu(pl
->handles
[nr
])) {
323 dev_err(dev
, "Clearing events out of order\n");
328 if (log
->nr_overflow
)
329 log
->nr_overflow
= 0;
332 log
->clear_idx
+= pl
->nr_recs
;
336 static void cxl_mock_event_trigger(struct device
*dev
)
338 struct cxl_mockmem_data
*mdata
= dev_get_drvdata(dev
);
339 struct mock_event_store
*mes
= &mdata
->mes
;
342 for (i
= CXL_EVENT_TYPE_INFO
; i
< CXL_EVENT_TYPE_MAX
; i
++) {
343 struct mock_event_log
*log
;
345 log
= event_find_log(dev
, i
);
347 event_reset_log(log
);
350 cxl_mem_get_event_records(mdata
->mds
, mes
->ev_status
);
353 struct cxl_event_record_raw maint_needed
= {
354 .id
= UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
355 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
358 .length
= sizeof(struct cxl_event_record_raw
),
359 .flags
[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED
,
360 /* .handle = Set dynamically */
361 .related_handle
= cpu_to_le16(0xa5b6),
363 .data
= { 0xDE, 0xAD, 0xBE, 0xEF },
367 struct cxl_event_record_raw hardware_replace
= {
368 .id
= UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
369 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
372 .length
= sizeof(struct cxl_event_record_raw
),
373 .flags
[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE
,
374 /* .handle = Set dynamically */
375 .related_handle
= cpu_to_le16(0xb6a5),
377 .data
= { 0xDE, 0xAD, 0xBE, 0xEF },
381 struct cxl_test_gen_media
{
383 struct cxl_event_gen_media rec
;
386 struct cxl_test_gen_media gen_media
= {
387 .id
= CXL_EVENT_GEN_MEDIA_UUID
,
391 .length
= sizeof(struct cxl_test_gen_media
),
392 .flags
[0] = CXL_EVENT_RECORD_FLAG_PERMANENT
,
393 /* .handle = Set dynamically */
394 .related_handle
= cpu_to_le16(0),
396 .phys_addr
= cpu_to_le64(0x2000),
397 .descriptor
= CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT
,
398 .type
= CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR
,
399 .transaction_type
= CXL_GMER_TRANS_HOST_WRITE
,
400 /* .validity_flags = <set below> */
407 struct cxl_test_dram
{
409 struct cxl_event_dram rec
;
412 struct cxl_test_dram dram
= {
413 .id
= CXL_EVENT_DRAM_UUID
,
417 .length
= sizeof(struct cxl_test_dram
),
418 .flags
[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED
,
419 /* .handle = Set dynamically */
420 .related_handle
= cpu_to_le16(0),
422 .phys_addr
= cpu_to_le64(0x8000),
423 .descriptor
= CXL_GMER_EVT_DESC_THRESHOLD_EVENT
,
424 .type
= CXL_GMER_MEM_EVT_TYPE_INV_ADDR
,
425 .transaction_type
= CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB
,
426 /* .validity_flags = <set below> */
431 .column
= {0xDE, 0xAD},
435 struct cxl_test_mem_module
{
437 struct cxl_event_mem_module rec
;
440 struct cxl_test_mem_module mem_module
= {
441 .id
= CXL_EVENT_MEM_MODULE_UUID
,
444 .length
= sizeof(struct cxl_test_mem_module
),
445 /* .handle = Set dynamically */
446 .related_handle
= cpu_to_le16(0),
448 .event_type
= CXL_MMER_TEMP_CHANGE
,
450 .health_status
= CXL_DHI_HS_PERFORMANCE_DEGRADED
,
451 .media_status
= CXL_DHI_MS_ALL_DATA_LOST
,
452 .add_status
= (CXL_DHI_AS_CRITICAL
<< 2) |
453 (CXL_DHI_AS_WARNING
<< 4) |
454 (CXL_DHI_AS_WARNING
<< 5),
455 .device_temp
= { 0xDE, 0xAD},
456 .dirty_shutdown_cnt
= { 0xde, 0xad, 0xbe, 0xef },
457 .cor_vol_err_cnt
= { 0xde, 0xad, 0xbe, 0xef },
458 .cor_per_err_cnt
= { 0xde, 0xad, 0xbe, 0xef },
463 static int mock_set_timestamp(struct cxl_dev_state
*cxlds
,
464 struct cxl_mbox_cmd
*cmd
)
466 struct cxl_mockmem_data
*mdata
= dev_get_drvdata(cxlds
->dev
);
467 struct cxl_mbox_set_timestamp_in
*ts
= cmd
->payload_in
;
469 if (cmd
->size_in
!= sizeof(*ts
))
472 if (cmd
->size_out
!= 0)
475 mdata
->timestamp
= le64_to_cpu(ts
->timestamp
);
479 static void cxl_mock_add_event_logs(struct mock_event_store
*mes
)
481 put_unaligned_le16(CXL_GMER_VALID_CHANNEL
| CXL_GMER_VALID_RANK
,
482 &gen_media
.rec
.media_hdr
.validity_flags
);
484 put_unaligned_le16(CXL_DER_VALID_CHANNEL
| CXL_DER_VALID_BANK_GROUP
|
485 CXL_DER_VALID_BANK
| CXL_DER_VALID_COLUMN
,
486 &dram
.rec
.media_hdr
.validity_flags
);
488 mes_add_event(mes
, CXL_EVENT_TYPE_INFO
, &maint_needed
);
489 mes_add_event(mes
, CXL_EVENT_TYPE_INFO
,
490 (struct cxl_event_record_raw
*)&gen_media
);
491 mes_add_event(mes
, CXL_EVENT_TYPE_INFO
,
492 (struct cxl_event_record_raw
*)&mem_module
);
493 mes
->ev_status
|= CXLDEV_EVENT_STATUS_INFO
;
495 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
, &maint_needed
);
496 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
, &hardware_replace
);
497 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
,
498 (struct cxl_event_record_raw
*)&dram
);
499 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
,
500 (struct cxl_event_record_raw
*)&gen_media
);
501 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
,
502 (struct cxl_event_record_raw
*)&mem_module
);
503 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
, &hardware_replace
);
504 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
,
505 (struct cxl_event_record_raw
*)&dram
);
506 /* Overflow this log */
507 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
, &hardware_replace
);
508 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
, &hardware_replace
);
509 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
, &hardware_replace
);
510 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
, &hardware_replace
);
511 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
, &hardware_replace
);
512 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
, &hardware_replace
);
513 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
, &hardware_replace
);
514 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
, &hardware_replace
);
515 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
, &hardware_replace
);
516 mes_add_event(mes
, CXL_EVENT_TYPE_FAIL
, &hardware_replace
);
517 mes
->ev_status
|= CXLDEV_EVENT_STATUS_FAIL
;
519 mes_add_event(mes
, CXL_EVENT_TYPE_FATAL
, &hardware_replace
);
520 mes_add_event(mes
, CXL_EVENT_TYPE_FATAL
,
521 (struct cxl_event_record_raw
*)&dram
);
522 mes
->ev_status
|= CXLDEV_EVENT_STATUS_FATAL
;
525 static int mock_gsl(struct cxl_mbox_cmd
*cmd
)
527 if (cmd
->size_out
< sizeof(mock_gsl_payload
))
530 memcpy(cmd
->payload_out
, &mock_gsl_payload
, sizeof(mock_gsl_payload
));
531 cmd
->size_out
= sizeof(mock_gsl_payload
);
536 static int mock_get_log(struct cxl_memdev_state
*mds
, struct cxl_mbox_cmd
*cmd
)
538 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
539 struct cxl_mbox_get_log
*gl
= cmd
->payload_in
;
540 u32 offset
= le32_to_cpu(gl
->offset
);
541 u32 length
= le32_to_cpu(gl
->length
);
542 uuid_t uuid
= DEFINE_CXL_CEL_UUID
;
543 void *data
= &mock_cel
;
545 if (cmd
->size_in
< sizeof(*gl
))
547 if (length
> cxl_mbox
->payload_size
)
549 if (offset
+ length
> sizeof(mock_cel
))
551 if (!uuid_equal(&gl
->uuid
, &uuid
))
553 if (length
> cmd
->size_out
)
556 memcpy(cmd
->payload_out
, data
+ offset
, length
);
561 static int mock_rcd_id(struct cxl_mbox_cmd
*cmd
)
563 struct cxl_mbox_identify id
= {
564 .fw_revision
= { "mock fw v1 " },
566 cpu_to_le64(DEV_SIZE
/ CXL_CAPACITY_MULTIPLIER
),
568 cpu_to_le64(DEV_SIZE
/ CXL_CAPACITY_MULTIPLIER
),
571 if (cmd
->size_out
< sizeof(id
))
574 memcpy(cmd
->payload_out
, &id
, sizeof(id
));
579 static int mock_id(struct cxl_mbox_cmd
*cmd
)
581 struct cxl_mbox_identify id
= {
582 .fw_revision
= { "mock fw v1 " },
583 .lsa_size
= cpu_to_le32(LSA_SIZE
),
585 cpu_to_le64(SZ_256M
/ CXL_CAPACITY_MULTIPLIER
),
587 cpu_to_le64(DEV_SIZE
/ CXL_CAPACITY_MULTIPLIER
),
588 .inject_poison_limit
= cpu_to_le16(MOCK_INJECT_TEST_MAX
),
591 put_unaligned_le24(CXL_POISON_LIST_MAX
, id
.poison_list_max_mer
);
593 if (cmd
->size_out
< sizeof(id
))
596 memcpy(cmd
->payload_out
, &id
, sizeof(id
));
601 static int mock_partition_info(struct cxl_mbox_cmd
*cmd
)
603 struct cxl_mbox_get_partition_info pi
= {
604 .active_volatile_cap
=
605 cpu_to_le64(DEV_SIZE
/ 2 / CXL_CAPACITY_MULTIPLIER
),
606 .active_persistent_cap
=
607 cpu_to_le64(DEV_SIZE
/ 2 / CXL_CAPACITY_MULTIPLIER
),
610 if (cmd
->size_out
< sizeof(pi
))
613 memcpy(cmd
->payload_out
, &pi
, sizeof(pi
));
618 void cxl_mockmem_sanitize_work(struct work_struct
*work
)
620 struct cxl_memdev_state
*mds
=
621 container_of(work
, typeof(*mds
), security
.poll_dwork
.work
);
622 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
624 mutex_lock(&cxl_mbox
->mbox_mutex
);
625 if (mds
->security
.sanitize_node
)
626 sysfs_notify_dirent(mds
->security
.sanitize_node
);
627 mds
->security
.sanitize_active
= false;
628 mutex_unlock(&cxl_mbox
->mbox_mutex
);
630 dev_dbg(mds
->cxlds
.dev
, "sanitize complete\n");
633 static int mock_sanitize(struct cxl_mockmem_data
*mdata
,
634 struct cxl_mbox_cmd
*cmd
)
636 struct cxl_memdev_state
*mds
= mdata
->mds
;
637 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
640 if (cmd
->size_in
!= 0)
643 if (cmd
->size_out
!= 0)
646 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_USER_PASS_SET
) {
647 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
650 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_LOCKED
) {
651 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
655 mutex_lock(&cxl_mbox
->mbox_mutex
);
656 if (schedule_delayed_work(&mds
->security
.poll_dwork
,
657 msecs_to_jiffies(mdata
->sanitize_timeout
))) {
658 mds
->security
.sanitize_active
= true;
659 dev_dbg(mds
->cxlds
.dev
, "sanitize issued\n");
662 mutex_unlock(&cxl_mbox
->mbox_mutex
);
667 static int mock_secure_erase(struct cxl_mockmem_data
*mdata
,
668 struct cxl_mbox_cmd
*cmd
)
670 if (cmd
->size_in
!= 0)
673 if (cmd
->size_out
!= 0)
676 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_USER_PASS_SET
) {
677 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
681 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_LOCKED
) {
682 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
689 static int mock_get_security_state(struct cxl_mockmem_data
*mdata
,
690 struct cxl_mbox_cmd
*cmd
)
695 if (cmd
->size_out
!= sizeof(u32
))
698 memcpy(cmd
->payload_out
, &mdata
->security_state
, sizeof(u32
));
703 static void master_plimit_check(struct cxl_mockmem_data
*mdata
)
705 if (mdata
->master_limit
== PASS_TRY_LIMIT
)
707 mdata
->master_limit
++;
708 if (mdata
->master_limit
== PASS_TRY_LIMIT
)
709 mdata
->security_state
|= CXL_PMEM_SEC_STATE_MASTER_PLIMIT
;
712 static void user_plimit_check(struct cxl_mockmem_data
*mdata
)
714 if (mdata
->user_limit
== PASS_TRY_LIMIT
)
717 if (mdata
->user_limit
== PASS_TRY_LIMIT
)
718 mdata
->security_state
|= CXL_PMEM_SEC_STATE_USER_PLIMIT
;
721 static int mock_set_passphrase(struct cxl_mockmem_data
*mdata
,
722 struct cxl_mbox_cmd
*cmd
)
724 struct cxl_set_pass
*set_pass
;
726 if (cmd
->size_in
!= sizeof(*set_pass
))
729 if (cmd
->size_out
!= 0)
732 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_FROZEN
) {
733 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
737 set_pass
= cmd
->payload_in
;
738 switch (set_pass
->type
) {
739 case CXL_PMEM_SEC_PASS_MASTER
:
740 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_MASTER_PLIMIT
) {
741 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
745 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
746 * the security disabled state when the user passphrase is not set.
748 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_USER_PASS_SET
) {
749 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
752 if (memcmp(mdata
->master_pass
, set_pass
->old_pass
, NVDIMM_PASSPHRASE_LEN
)) {
753 master_plimit_check(mdata
);
754 cmd
->return_code
= CXL_MBOX_CMD_RC_PASSPHRASE
;
757 memcpy(mdata
->master_pass
, set_pass
->new_pass
, NVDIMM_PASSPHRASE_LEN
);
758 mdata
->security_state
|= CXL_PMEM_SEC_STATE_MASTER_PASS_SET
;
761 case CXL_PMEM_SEC_PASS_USER
:
762 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_USER_PLIMIT
) {
763 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
766 if (memcmp(mdata
->user_pass
, set_pass
->old_pass
, NVDIMM_PASSPHRASE_LEN
)) {
767 user_plimit_check(mdata
);
768 cmd
->return_code
= CXL_MBOX_CMD_RC_PASSPHRASE
;
771 memcpy(mdata
->user_pass
, set_pass
->new_pass
, NVDIMM_PASSPHRASE_LEN
);
772 mdata
->security_state
|= CXL_PMEM_SEC_STATE_USER_PASS_SET
;
776 cmd
->return_code
= CXL_MBOX_CMD_RC_INPUT
;
781 static int mock_disable_passphrase(struct cxl_mockmem_data
*mdata
,
782 struct cxl_mbox_cmd
*cmd
)
784 struct cxl_disable_pass
*dis_pass
;
786 if (cmd
->size_in
!= sizeof(*dis_pass
))
789 if (cmd
->size_out
!= 0)
792 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_FROZEN
) {
793 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
797 dis_pass
= cmd
->payload_in
;
798 switch (dis_pass
->type
) {
799 case CXL_PMEM_SEC_PASS_MASTER
:
800 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_MASTER_PLIMIT
) {
801 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
805 if (!(mdata
->security_state
& CXL_PMEM_SEC_STATE_MASTER_PASS_SET
)) {
806 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
810 if (memcmp(dis_pass
->pass
, mdata
->master_pass
, NVDIMM_PASSPHRASE_LEN
)) {
811 master_plimit_check(mdata
);
812 cmd
->return_code
= CXL_MBOX_CMD_RC_PASSPHRASE
;
816 mdata
->master_limit
= 0;
817 memset(mdata
->master_pass
, 0, NVDIMM_PASSPHRASE_LEN
);
818 mdata
->security_state
&= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET
;
821 case CXL_PMEM_SEC_PASS_USER
:
822 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_USER_PLIMIT
) {
823 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
827 if (!(mdata
->security_state
& CXL_PMEM_SEC_STATE_USER_PASS_SET
)) {
828 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
832 if (memcmp(dis_pass
->pass
, mdata
->user_pass
, NVDIMM_PASSPHRASE_LEN
)) {
833 user_plimit_check(mdata
);
834 cmd
->return_code
= CXL_MBOX_CMD_RC_PASSPHRASE
;
838 mdata
->user_limit
= 0;
839 memset(mdata
->user_pass
, 0, NVDIMM_PASSPHRASE_LEN
);
840 mdata
->security_state
&= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET
|
841 CXL_PMEM_SEC_STATE_LOCKED
);
845 cmd
->return_code
= CXL_MBOX_CMD_RC_INPUT
;
852 static int mock_freeze_security(struct cxl_mockmem_data
*mdata
,
853 struct cxl_mbox_cmd
*cmd
)
855 if (cmd
->size_in
!= 0)
858 if (cmd
->size_out
!= 0)
861 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_FROZEN
)
864 mdata
->security_state
|= CXL_PMEM_SEC_STATE_FROZEN
;
868 static int mock_unlock_security(struct cxl_mockmem_data
*mdata
,
869 struct cxl_mbox_cmd
*cmd
)
871 if (cmd
->size_in
!= NVDIMM_PASSPHRASE_LEN
)
874 if (cmd
->size_out
!= 0)
877 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_FROZEN
) {
878 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
882 if (!(mdata
->security_state
& CXL_PMEM_SEC_STATE_USER_PASS_SET
)) {
883 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
887 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_USER_PLIMIT
) {
888 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
892 if (!(mdata
->security_state
& CXL_PMEM_SEC_STATE_LOCKED
)) {
893 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
897 if (memcmp(cmd
->payload_in
, mdata
->user_pass
, NVDIMM_PASSPHRASE_LEN
)) {
898 if (++mdata
->user_limit
== PASS_TRY_LIMIT
)
899 mdata
->security_state
|= CXL_PMEM_SEC_STATE_USER_PLIMIT
;
900 cmd
->return_code
= CXL_MBOX_CMD_RC_PASSPHRASE
;
904 mdata
->user_limit
= 0;
905 mdata
->security_state
&= ~CXL_PMEM_SEC_STATE_LOCKED
;
909 static int mock_passphrase_secure_erase(struct cxl_mockmem_data
*mdata
,
910 struct cxl_mbox_cmd
*cmd
)
912 struct cxl_pass_erase
*erase
;
914 if (cmd
->size_in
!= sizeof(*erase
))
917 if (cmd
->size_out
!= 0)
920 erase
= cmd
->payload_in
;
921 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_FROZEN
) {
922 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
926 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_USER_PLIMIT
&&
927 erase
->type
== CXL_PMEM_SEC_PASS_USER
) {
928 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
932 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_MASTER_PLIMIT
&&
933 erase
->type
== CXL_PMEM_SEC_PASS_MASTER
) {
934 cmd
->return_code
= CXL_MBOX_CMD_RC_SECURITY
;
938 switch (erase
->type
) {
939 case CXL_PMEM_SEC_PASS_MASTER
:
941 * The spec does not clearly define the behavior of the scenario
942 * where a master passphrase is passed in while the master
943 * passphrase is not set and user passphrase is not set. The
944 * code will take the assumption that it will behave the same
945 * as a CXL secure erase command without passphrase (0x4401).
947 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_MASTER_PASS_SET
) {
948 if (memcmp(mdata
->master_pass
, erase
->pass
,
949 NVDIMM_PASSPHRASE_LEN
)) {
950 master_plimit_check(mdata
);
951 cmd
->return_code
= CXL_MBOX_CMD_RC_PASSPHRASE
;
954 mdata
->master_limit
= 0;
955 mdata
->user_limit
= 0;
956 mdata
->security_state
&= ~CXL_PMEM_SEC_STATE_USER_PASS_SET
;
957 memset(mdata
->user_pass
, 0, NVDIMM_PASSPHRASE_LEN
);
958 mdata
->security_state
&= ~CXL_PMEM_SEC_STATE_LOCKED
;
961 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
962 * When master passphrase is disabled, the device shall
963 * return Invalid Input for the Passphrase Secure Erase
964 * command with master passphrase.
968 /* Scramble encryption keys so that data is effectively erased */
970 case CXL_PMEM_SEC_PASS_USER
:
972 * The spec does not clearly define the behavior of the scenario
973 * where a user passphrase is passed in while the user
974 * passphrase is not set. The code will take the assumption that
975 * it will behave the same as a CXL secure erase command without
976 * passphrase (0x4401).
978 if (mdata
->security_state
& CXL_PMEM_SEC_STATE_USER_PASS_SET
) {
979 if (memcmp(mdata
->user_pass
, erase
->pass
,
980 NVDIMM_PASSPHRASE_LEN
)) {
981 user_plimit_check(mdata
);
982 cmd
->return_code
= CXL_MBOX_CMD_RC_PASSPHRASE
;
985 mdata
->user_limit
= 0;
986 mdata
->security_state
&= ~CXL_PMEM_SEC_STATE_USER_PASS_SET
;
987 memset(mdata
->user_pass
, 0, NVDIMM_PASSPHRASE_LEN
);
991 * CXL rev3 Table 8-118
992 * If user passphrase is not set or supported by device, current
993 * passphrase value is ignored. Will make the assumption that
994 * the operation will proceed as secure erase w/o passphrase
995 * since spec is not explicit.
998 /* Scramble encryption keys so that data is effectively erased */
1007 static int mock_get_lsa(struct cxl_mockmem_data
*mdata
,
1008 struct cxl_mbox_cmd
*cmd
)
1010 struct cxl_mbox_get_lsa
*get_lsa
= cmd
->payload_in
;
1011 void *lsa
= mdata
->lsa
;
1014 if (sizeof(*get_lsa
) > cmd
->size_in
)
1016 offset
= le32_to_cpu(get_lsa
->offset
);
1017 length
= le32_to_cpu(get_lsa
->length
);
1018 if (offset
+ length
> LSA_SIZE
)
1020 if (length
> cmd
->size_out
)
1023 memcpy(cmd
->payload_out
, lsa
+ offset
, length
);
1027 static int mock_set_lsa(struct cxl_mockmem_data
*mdata
,
1028 struct cxl_mbox_cmd
*cmd
)
1030 struct cxl_mbox_set_lsa
*set_lsa
= cmd
->payload_in
;
1031 void *lsa
= mdata
->lsa
;
1034 if (sizeof(*set_lsa
) > cmd
->size_in
)
1036 offset
= le32_to_cpu(set_lsa
->offset
);
1037 length
= cmd
->size_in
- sizeof(*set_lsa
);
1038 if (offset
+ length
> LSA_SIZE
)
1041 memcpy(lsa
+ offset
, &set_lsa
->data
[0], length
);
1045 static int mock_health_info(struct cxl_mbox_cmd
*cmd
)
1047 struct cxl_mbox_health_info health_info
= {
1048 /* set flags for maint needed, perf degraded, hw replacement */
1049 .health_status
= 0x7,
1050 /* set media status to "All Data Lost" */
1051 .media_status
= 0x3,
1053 * set ext_status flags for:
1054 * ext_life_used: normal,
1055 * ext_temperature: critical,
1056 * ext_corrected_volatile: warning,
1057 * ext_corrected_persistent: normal,
1061 .temperature
= cpu_to_le16(25),
1062 .dirty_shutdowns
= cpu_to_le32(10),
1063 .volatile_errors
= cpu_to_le32(20),
1064 .pmem_errors
= cpu_to_le32(30),
1067 if (cmd
->size_out
< sizeof(health_info
))
1070 memcpy(cmd
->payload_out
, &health_info
, sizeof(health_info
));
1074 static struct mock_poison
{
1075 struct cxl_dev_state
*cxlds
;
1077 } mock_poison_list
[MOCK_INJECT_TEST_MAX
];
1079 static struct cxl_mbox_poison_out
*
1080 cxl_get_injected_po(struct cxl_dev_state
*cxlds
, u64 offset
, u64 length
)
1082 struct cxl_mbox_poison_out
*po
;
1086 po
= kzalloc(struct_size(po
, record
, poison_inject_dev_max
), GFP_KERNEL
);
1090 for (int i
= 0; i
< MOCK_INJECT_TEST_MAX
; i
++) {
1091 if (mock_poison_list
[i
].cxlds
!= cxlds
)
1093 if (mock_poison_list
[i
].dpa
< offset
||
1094 mock_poison_list
[i
].dpa
> offset
+ length
- 1)
1097 dpa
= mock_poison_list
[i
].dpa
+ CXL_POISON_SOURCE_INJECTED
;
1098 po
->record
[nr_records
].address
= cpu_to_le64(dpa
);
1099 po
->record
[nr_records
].length
= cpu_to_le32(1);
1101 if (nr_records
== poison_inject_dev_max
)
1105 /* Always return count, even when zero */
1106 po
->count
= cpu_to_le16(nr_records
);
1111 static int mock_get_poison(struct cxl_dev_state
*cxlds
,
1112 struct cxl_mbox_cmd
*cmd
)
1114 struct cxl_mbox_poison_in
*pi
= cmd
->payload_in
;
1115 struct cxl_mbox_poison_out
*po
;
1116 u64 offset
= le64_to_cpu(pi
->offset
);
1117 u64 length
= le64_to_cpu(pi
->length
);
1120 po
= cxl_get_injected_po(cxlds
, offset
, length
);
1123 nr_records
= le16_to_cpu(po
->count
);
1124 memcpy(cmd
->payload_out
, po
, struct_size(po
, record
, nr_records
));
1125 cmd
->size_out
= struct_size(po
, record
, nr_records
);
1131 static bool mock_poison_dev_max_injected(struct cxl_dev_state
*cxlds
)
1135 for (int i
= 0; i
< MOCK_INJECT_TEST_MAX
; i
++) {
1136 if (mock_poison_list
[i
].cxlds
== cxlds
)
1139 return (count
>= poison_inject_dev_max
);
1142 static int mock_poison_add(struct cxl_dev_state
*cxlds
, u64 dpa
)
1144 /* Return EBUSY to match the CXL driver handling */
1145 if (mock_poison_dev_max_injected(cxlds
)) {
1147 "Device poison injection limit has been reached: %d\n",
1148 poison_inject_dev_max
);
1152 for (int i
= 0; i
< MOCK_INJECT_TEST_MAX
; i
++) {
1153 if (!mock_poison_list
[i
].cxlds
) {
1154 mock_poison_list
[i
].cxlds
= cxlds
;
1155 mock_poison_list
[i
].dpa
= dpa
;
1160 "Mock test poison injection limit has been reached: %d\n",
1161 MOCK_INJECT_TEST_MAX
);
1166 static bool mock_poison_found(struct cxl_dev_state
*cxlds
, u64 dpa
)
1168 for (int i
= 0; i
< MOCK_INJECT_TEST_MAX
; i
++) {
1169 if (mock_poison_list
[i
].cxlds
== cxlds
&&
1170 mock_poison_list
[i
].dpa
== dpa
)
1176 static int mock_inject_poison(struct cxl_dev_state
*cxlds
,
1177 struct cxl_mbox_cmd
*cmd
)
1179 struct cxl_mbox_inject_poison
*pi
= cmd
->payload_in
;
1180 u64 dpa
= le64_to_cpu(pi
->address
);
1182 if (mock_poison_found(cxlds
, dpa
)) {
1183 /* Not an error to inject poison if already poisoned */
1184 dev_dbg(cxlds
->dev
, "DPA: 0x%llx already poisoned\n", dpa
);
1188 return mock_poison_add(cxlds
, dpa
);
1191 static bool mock_poison_del(struct cxl_dev_state
*cxlds
, u64 dpa
)
1193 for (int i
= 0; i
< MOCK_INJECT_TEST_MAX
; i
++) {
1194 if (mock_poison_list
[i
].cxlds
== cxlds
&&
1195 mock_poison_list
[i
].dpa
== dpa
) {
1196 mock_poison_list
[i
].cxlds
= NULL
;
1203 static int mock_clear_poison(struct cxl_dev_state
*cxlds
,
1204 struct cxl_mbox_cmd
*cmd
)
1206 struct cxl_mbox_clear_poison
*pi
= cmd
->payload_in
;
1207 u64 dpa
= le64_to_cpu(pi
->address
);
1210 * A real CXL device will write pi->write_data to the address
1211 * being cleared. In this mock, just delete this address from
1212 * the mock poison list.
1214 if (!mock_poison_del(cxlds
, dpa
))
1215 dev_dbg(cxlds
->dev
, "DPA: 0x%llx not in poison list\n", dpa
);
1220 static bool mock_poison_list_empty(void)
1222 for (int i
= 0; i
< MOCK_INJECT_TEST_MAX
; i
++) {
1223 if (mock_poison_list
[i
].cxlds
)
1229 static ssize_t
poison_inject_max_show(struct device_driver
*drv
, char *buf
)
1231 return sysfs_emit(buf
, "%u\n", poison_inject_dev_max
);
1234 static ssize_t
poison_inject_max_store(struct device_driver
*drv
,
1235 const char *buf
, size_t len
)
1239 if (kstrtoint(buf
, 0, &val
) < 0)
1242 if (!mock_poison_list_empty())
1245 if (val
<= MOCK_INJECT_TEST_MAX
)
1246 poison_inject_dev_max
= val
;
1253 static DRIVER_ATTR_RW(poison_inject_max
);
1255 static struct attribute
*cxl_mock_mem_core_attrs
[] = {
1256 &driver_attr_poison_inject_max
.attr
,
1259 ATTRIBUTE_GROUPS(cxl_mock_mem_core
);
1261 static int mock_fw_info(struct cxl_mockmem_data
*mdata
,
1262 struct cxl_mbox_cmd
*cmd
)
1264 struct cxl_mbox_get_fw_info fw_info
= {
1265 .num_slots
= FW_SLOTS
,
1266 .slot_info
= (mdata
->fw_slot
& 0x7) |
1267 ((mdata
->fw_staged
& 0x7) << 3),
1268 .activation_cap
= 0,
1271 strcpy(fw_info
.slot_1_revision
, "cxl_test_fw_001");
1272 strcpy(fw_info
.slot_2_revision
, "cxl_test_fw_002");
1273 strcpy(fw_info
.slot_3_revision
, "cxl_test_fw_003");
1274 strcpy(fw_info
.slot_4_revision
, "");
1276 if (cmd
->size_out
< sizeof(fw_info
))
1279 memcpy(cmd
->payload_out
, &fw_info
, sizeof(fw_info
));
1283 static int mock_transfer_fw(struct cxl_mockmem_data
*mdata
,
1284 struct cxl_mbox_cmd
*cmd
)
1286 struct cxl_mbox_transfer_fw
*transfer
= cmd
->payload_in
;
1287 void *fw
= mdata
->fw
;
1288 size_t offset
, length
;
1290 offset
= le32_to_cpu(transfer
->offset
) * CXL_FW_TRANSFER_ALIGNMENT
;
1291 length
= cmd
->size_in
- sizeof(*transfer
);
1292 if (offset
+ length
> FW_SIZE
)
1295 switch (transfer
->action
) {
1296 case CXL_FW_TRANSFER_ACTION_FULL
:
1300 case CXL_FW_TRANSFER_ACTION_END
:
1301 if (transfer
->slot
== 0 || transfer
->slot
> FW_SLOTS
)
1303 mdata
->fw_size
= offset
+ length
;
1305 case CXL_FW_TRANSFER_ACTION_INITIATE
:
1306 case CXL_FW_TRANSFER_ACTION_CONTINUE
:
1308 case CXL_FW_TRANSFER_ACTION_ABORT
:
1314 memcpy(fw
+ offset
, transfer
->data
, length
);
1315 usleep_range(1500, 2000);
1319 static int mock_activate_fw(struct cxl_mockmem_data
*mdata
,
1320 struct cxl_mbox_cmd
*cmd
)
1322 struct cxl_mbox_activate_fw
*activate
= cmd
->payload_in
;
1324 if (activate
->slot
== 0 || activate
->slot
> FW_SLOTS
)
1327 switch (activate
->action
) {
1328 case CXL_FW_ACTIVATE_ONLINE
:
1329 mdata
->fw_slot
= activate
->slot
;
1330 mdata
->fw_staged
= 0;
1332 case CXL_FW_ACTIVATE_OFFLINE
:
1333 mdata
->fw_staged
= activate
->slot
;
1340 static int cxl_mock_mbox_send(struct cxl_mailbox
*cxl_mbox
,
1341 struct cxl_mbox_cmd
*cmd
)
1343 struct device
*dev
= cxl_mbox
->host
;
1344 struct cxl_mockmem_data
*mdata
= dev_get_drvdata(dev
);
1345 struct cxl_memdev_state
*mds
= mdata
->mds
;
1346 struct cxl_dev_state
*cxlds
= &mds
->cxlds
;
1349 switch (cmd
->opcode
) {
1350 case CXL_MBOX_OP_SET_TIMESTAMP
:
1351 rc
= mock_set_timestamp(cxlds
, cmd
);
1353 case CXL_MBOX_OP_GET_SUPPORTED_LOGS
:
1356 case CXL_MBOX_OP_GET_LOG
:
1357 rc
= mock_get_log(mds
, cmd
);
1359 case CXL_MBOX_OP_IDENTIFY
:
1361 rc
= mock_rcd_id(cmd
);
1365 case CXL_MBOX_OP_GET_LSA
:
1366 rc
= mock_get_lsa(mdata
, cmd
);
1368 case CXL_MBOX_OP_GET_PARTITION_INFO
:
1369 rc
= mock_partition_info(cmd
);
1371 case CXL_MBOX_OP_GET_EVENT_RECORD
:
1372 rc
= mock_get_event(dev
, cmd
);
1374 case CXL_MBOX_OP_CLEAR_EVENT_RECORD
:
1375 rc
= mock_clear_event(dev
, cmd
);
1377 case CXL_MBOX_OP_SET_LSA
:
1378 rc
= mock_set_lsa(mdata
, cmd
);
1380 case CXL_MBOX_OP_GET_HEALTH_INFO
:
1381 rc
= mock_health_info(cmd
);
1383 case CXL_MBOX_OP_SANITIZE
:
1384 rc
= mock_sanitize(mdata
, cmd
);
1386 case CXL_MBOX_OP_SECURE_ERASE
:
1387 rc
= mock_secure_erase(mdata
, cmd
);
1389 case CXL_MBOX_OP_GET_SECURITY_STATE
:
1390 rc
= mock_get_security_state(mdata
, cmd
);
1392 case CXL_MBOX_OP_SET_PASSPHRASE
:
1393 rc
= mock_set_passphrase(mdata
, cmd
);
1395 case CXL_MBOX_OP_DISABLE_PASSPHRASE
:
1396 rc
= mock_disable_passphrase(mdata
, cmd
);
1398 case CXL_MBOX_OP_FREEZE_SECURITY
:
1399 rc
= mock_freeze_security(mdata
, cmd
);
1401 case CXL_MBOX_OP_UNLOCK
:
1402 rc
= mock_unlock_security(mdata
, cmd
);
1404 case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE
:
1405 rc
= mock_passphrase_secure_erase(mdata
, cmd
);
1407 case CXL_MBOX_OP_GET_POISON
:
1408 rc
= mock_get_poison(cxlds
, cmd
);
1410 case CXL_MBOX_OP_INJECT_POISON
:
1411 rc
= mock_inject_poison(cxlds
, cmd
);
1413 case CXL_MBOX_OP_CLEAR_POISON
:
1414 rc
= mock_clear_poison(cxlds
, cmd
);
1416 case CXL_MBOX_OP_GET_FW_INFO
:
1417 rc
= mock_fw_info(mdata
, cmd
);
1419 case CXL_MBOX_OP_TRANSFER_FW
:
1420 rc
= mock_transfer_fw(mdata
, cmd
);
1422 case CXL_MBOX_OP_ACTIVATE_FW
:
1423 rc
= mock_activate_fw(mdata
, cmd
);
1429 dev_dbg(dev
, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd
->opcode
,
1430 cmd
->size_in
, cmd
->size_out
, rc
);
1435 static void label_area_release(void *lsa
)
1440 static void fw_buf_release(void *buf
)
1445 static bool is_rcd(struct platform_device
*pdev
)
1447 const struct platform_device_id
*id
= platform_get_device_id(pdev
);
1449 return !!id
->driver_data
;
1452 static ssize_t
event_trigger_store(struct device
*dev
,
1453 struct device_attribute
*attr
,
1454 const char *buf
, size_t count
)
1456 cxl_mock_event_trigger(dev
);
1459 static DEVICE_ATTR_WO(event_trigger
);
1461 static int cxl_mock_mailbox_create(struct cxl_dev_state
*cxlds
)
1465 rc
= cxl_mailbox_init(&cxlds
->cxl_mbox
, cxlds
->dev
);
1472 static int cxl_mock_mem_probe(struct platform_device
*pdev
)
1474 struct device
*dev
= &pdev
->dev
;
1475 struct cxl_memdev
*cxlmd
;
1476 struct cxl_memdev_state
*mds
;
1477 struct cxl_dev_state
*cxlds
;
1478 struct cxl_mockmem_data
*mdata
;
1479 struct cxl_mailbox
*cxl_mbox
;
1482 mdata
= devm_kzalloc(dev
, sizeof(*mdata
), GFP_KERNEL
);
1485 dev_set_drvdata(dev
, mdata
);
1487 mdata
->lsa
= vmalloc(LSA_SIZE
);
1490 mdata
->fw
= vmalloc(FW_SIZE
);
1495 rc
= devm_add_action_or_reset(dev
, label_area_release
, mdata
->lsa
);
1499 rc
= devm_add_action_or_reset(dev
, fw_buf_release
, mdata
->fw
);
1503 mds
= cxl_memdev_state_create(dev
);
1505 return PTR_ERR(mds
);
1507 cxlds
= &mds
->cxlds
;
1508 rc
= cxl_mock_mailbox_create(cxlds
);
1512 cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
1514 cxl_mbox
->mbox_send
= cxl_mock_mbox_send
;
1515 cxl_mbox
->payload_size
= SZ_4K
;
1516 mds
->event
.buf
= (struct cxl_get_event_payload
*) mdata
->event_buf
;
1517 INIT_DELAYED_WORK(&mds
->security
.poll_dwork
, cxl_mockmem_sanitize_work
);
1519 cxlds
->serial
= pdev
->id
;
1523 rc
= cxl_enumerate_cmds(mds
);
1527 rc
= cxl_poison_state_init(mds
);
1531 rc
= cxl_set_timestamp(mds
);
1535 cxlds
->media_ready
= true;
1536 rc
= cxl_dev_state_identify(mds
);
1540 rc
= cxl_mem_create_range_info(mds
);
1544 cxl_mock_add_event_logs(&mdata
->mes
);
1546 cxlmd
= devm_cxl_add_memdev(&pdev
->dev
, cxlds
);
1548 return PTR_ERR(cxlmd
);
1550 rc
= devm_cxl_setup_fw_upload(&pdev
->dev
, mds
);
1554 rc
= devm_cxl_sanitize_setup_notifier(&pdev
->dev
, cxlmd
);
1558 cxl_mem_get_event_records(mds
, CXLDEV_EVENT_STATUS_ALL
);
1563 static ssize_t
security_lock_show(struct device
*dev
,
1564 struct device_attribute
*attr
, char *buf
)
1566 struct cxl_mockmem_data
*mdata
= dev_get_drvdata(dev
);
1568 return sysfs_emit(buf
, "%u\n",
1569 !!(mdata
->security_state
& CXL_PMEM_SEC_STATE_LOCKED
));
1572 static ssize_t
security_lock_store(struct device
*dev
, struct device_attribute
*attr
,
1573 const char *buf
, size_t count
)
1575 struct cxl_mockmem_data
*mdata
= dev_get_drvdata(dev
);
1576 u32 mask
= CXL_PMEM_SEC_STATE_FROZEN
| CXL_PMEM_SEC_STATE_USER_PLIMIT
|
1577 CXL_PMEM_SEC_STATE_MASTER_PLIMIT
;
1580 if (kstrtoint(buf
, 0, &val
) < 0)
1584 if (!(mdata
->security_state
& CXL_PMEM_SEC_STATE_USER_PASS_SET
))
1586 mdata
->security_state
|= CXL_PMEM_SEC_STATE_LOCKED
;
1587 mdata
->security_state
&= ~mask
;
1594 static DEVICE_ATTR_RW(security_lock
);
1596 static ssize_t
fw_buf_checksum_show(struct device
*dev
,
1597 struct device_attribute
*attr
, char *buf
)
1599 struct cxl_mockmem_data
*mdata
= dev_get_drvdata(dev
);
1600 u8 hash
[SHA256_DIGEST_SIZE
];
1601 unsigned char *hstr
, *hptr
;
1602 struct sha256_state sctx
;
1603 ssize_t written
= 0;
1607 sha256_update(&sctx
, mdata
->fw
, mdata
->fw_size
);
1608 sha256_final(&sctx
, hash
);
1610 hstr
= kzalloc((SHA256_DIGEST_SIZE
* 2) + 1, GFP_KERNEL
);
1615 for (i
= 0; i
< SHA256_DIGEST_SIZE
; i
++)
1616 hptr
+= sprintf(hptr
, "%02x", hash
[i
]);
1618 written
= sysfs_emit(buf
, "%s\n", hstr
);
1624 static DEVICE_ATTR_RO(fw_buf_checksum
);
1626 static ssize_t
sanitize_timeout_show(struct device
*dev
,
1627 struct device_attribute
*attr
, char *buf
)
1629 struct cxl_mockmem_data
*mdata
= dev_get_drvdata(dev
);
1631 return sysfs_emit(buf
, "%lu\n", mdata
->sanitize_timeout
);
1634 static ssize_t
sanitize_timeout_store(struct device
*dev
,
1635 struct device_attribute
*attr
,
1636 const char *buf
, size_t count
)
1638 struct cxl_mockmem_data
*mdata
= dev_get_drvdata(dev
);
1642 rc
= kstrtoul(buf
, 0, &val
);
1646 mdata
->sanitize_timeout
= val
;
1651 static DEVICE_ATTR_RW(sanitize_timeout
);
1653 static struct attribute
*cxl_mock_mem_attrs
[] = {
1654 &dev_attr_security_lock
.attr
,
1655 &dev_attr_event_trigger
.attr
,
1656 &dev_attr_fw_buf_checksum
.attr
,
1657 &dev_attr_sanitize_timeout
.attr
,
1660 ATTRIBUTE_GROUPS(cxl_mock_mem
);
1662 static const struct platform_device_id cxl_mock_mem_ids
[] = {
1663 { .name
= "cxl_mem", 0 },
1664 { .name
= "cxl_rcd", 1 },
1667 MODULE_DEVICE_TABLE(platform
, cxl_mock_mem_ids
);
1669 static struct platform_driver cxl_mock_mem_driver
= {
1670 .probe
= cxl_mock_mem_probe
,
1671 .id_table
= cxl_mock_mem_ids
,
1673 .name
= KBUILD_MODNAME
,
1674 .dev_groups
= cxl_mock_mem_groups
,
1675 .groups
= cxl_mock_mem_core_groups
,
1676 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
1680 module_platform_driver(cxl_mock_mem_driver
);
1681 MODULE_LICENSE("GPL v2");
1682 MODULE_IMPORT_NS("CXL");