1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/ndctl.h>
7 #include <linux/slab.h>
10 #include <linux/cred.h>
11 #include <linux/key.h>
12 #include <linux/key-type.h>
13 #include <keys/user-type.h>
14 #include <keys/encrypted-type.h>
18 #define NVDIMM_BASE_KEY 0
19 #define NVDIMM_NEW_KEY 1
21 static bool key_revalidate
= true;
22 module_param(key_revalidate
, bool, 0444);
23 MODULE_PARM_DESC(key_revalidate
, "Require key validation at init.");
25 static const char zero_key
[NVDIMM_PASSPHRASE_LEN
];
27 static void *key_data(struct key
*key
)
29 struct encrypted_key_payload
*epayload
= dereference_key_locked(key
);
31 lockdep_assert_held_read(&key
->sem
);
33 return epayload
->decrypted_data
;
36 static void nvdimm_put_key(struct key
*key
)
46 * Retrieve kernel key for DIMM and request from user space if
47 * necessary. Returns a key held for read and must be put by
48 * nvdimm_put_key() before the usage goes out of scope.
50 static struct key
*nvdimm_request_key(struct nvdimm
*nvdimm
)
52 struct key
*key
= NULL
;
53 static const char NVDIMM_PREFIX
[] = "nvdimm:";
54 char desc
[NVDIMM_KEY_DESC_LEN
+ sizeof(NVDIMM_PREFIX
)];
55 struct device
*dev
= &nvdimm
->dev
;
57 sprintf(desc
, "%s%s", NVDIMM_PREFIX
, nvdimm
->dimm_id
);
58 key
= request_key(&key_type_encrypted
, desc
, "");
60 if (PTR_ERR(key
) == -ENOKEY
)
61 dev_dbg(dev
, "request_key() found no key\n");
63 dev_dbg(dev
, "request_key() upcall failed\n");
66 struct encrypted_key_payload
*epayload
;
69 epayload
= dereference_key_locked(key
);
70 if (epayload
->decrypted_datalen
!= NVDIMM_PASSPHRASE_LEN
) {
80 static const void *nvdimm_get_key_payload(struct nvdimm
*nvdimm
,
83 *key
= nvdimm_request_key(nvdimm
);
87 return key_data(*key
);
90 static struct key
*nvdimm_lookup_user_key(struct nvdimm
*nvdimm
,
91 key_serial_t id
, int subclass
)
95 struct encrypted_key_payload
*epayload
;
96 struct device
*dev
= &nvdimm
->dev
;
98 keyref
= lookup_user_key(id
, 0, KEY_NEED_SEARCH
);
102 key
= key_ref_to_ptr(keyref
);
103 if (key
->type
!= &key_type_encrypted
) {
108 dev_dbg(dev
, "%s: key found: %#x\n", __func__
, key_serial(key
));
110 down_read_nested(&key
->sem
, subclass
);
111 epayload
= dereference_key_locked(key
);
112 if (epayload
->decrypted_datalen
!= NVDIMM_PASSPHRASE_LEN
) {
120 static const void *nvdimm_get_user_key_payload(struct nvdimm
*nvdimm
,
121 key_serial_t id
, int subclass
, struct key
**key
)
125 if (subclass
== NVDIMM_BASE_KEY
)
131 *key
= nvdimm_lookup_user_key(nvdimm
, id
, subclass
);
135 return key_data(*key
);
139 static int nvdimm_key_revalidate(struct nvdimm
*nvdimm
)
145 if (!nvdimm
->sec
.ops
->change_key
)
148 data
= nvdimm_get_key_payload(nvdimm
, &key
);
151 * Send the same key to the hardware as new and old key to
152 * verify that the key is good.
154 rc
= nvdimm
->sec
.ops
->change_key(nvdimm
, data
, data
, NVDIMM_USER
);
161 nvdimm
->sec
.flags
= nvdimm_security_flags(nvdimm
, NVDIMM_USER
);
165 static int __nvdimm_security_unlock(struct nvdimm
*nvdimm
)
167 struct device
*dev
= &nvdimm
->dev
;
168 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
173 /* The bus lock should be held at the top level of the call stack */
174 lockdep_assert_held(&nvdimm_bus
->reconfig_mutex
);
176 if (!nvdimm
->sec
.ops
|| !nvdimm
->sec
.ops
->unlock
177 || !nvdimm
->sec
.flags
)
180 /* No need to go further if security is disabled */
181 if (test_bit(NVDIMM_SECURITY_DISABLED
, &nvdimm
->sec
.flags
))
184 if (test_bit(NDD_SECURITY_OVERWRITE
, &nvdimm
->flags
)) {
185 dev_dbg(dev
, "Security operation in progress.\n");
190 * If the pre-OS has unlocked the DIMM, attempt to send the key
191 * from request_key() to the hardware for verification. Failure
192 * to revalidate the key against the hardware results in a
193 * freeze of the security configuration. I.e. if the OS does not
194 * have the key, security is being managed pre-OS.
196 if (test_bit(NVDIMM_SECURITY_UNLOCKED
, &nvdimm
->sec
.flags
)) {
200 return nvdimm_key_revalidate(nvdimm
);
202 data
= nvdimm_get_key_payload(nvdimm
, &key
);
204 rc
= nvdimm
->sec
.ops
->unlock(nvdimm
, data
);
205 dev_dbg(dev
, "key: %d unlock: %s\n", key_serial(key
),
206 rc
== 0 ? "success" : "fail");
209 nvdimm
->sec
.flags
= nvdimm_security_flags(nvdimm
, NVDIMM_USER
);
213 int nvdimm_security_unlock(struct device
*dev
)
215 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
218 nvdimm_bus_lock(dev
);
219 rc
= __nvdimm_security_unlock(nvdimm
);
220 nvdimm_bus_unlock(dev
);
224 static int check_security_state(struct nvdimm
*nvdimm
)
226 struct device
*dev
= &nvdimm
->dev
;
228 if (test_bit(NVDIMM_SECURITY_FROZEN
, &nvdimm
->sec
.flags
)) {
229 dev_dbg(dev
, "Incorrect security state: %#lx\n",
234 if (test_bit(NDD_SECURITY_OVERWRITE
, &nvdimm
->flags
)) {
235 dev_dbg(dev
, "Security operation in progress.\n");
242 static int security_disable(struct nvdimm
*nvdimm
, unsigned int keyid
)
244 struct device
*dev
= &nvdimm
->dev
;
245 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
250 /* The bus lock should be held at the top level of the call stack */
251 lockdep_assert_held(&nvdimm_bus
->reconfig_mutex
);
253 if (!nvdimm
->sec
.ops
|| !nvdimm
->sec
.ops
->disable
254 || !nvdimm
->sec
.flags
)
257 rc
= check_security_state(nvdimm
);
261 data
= nvdimm_get_user_key_payload(nvdimm
, keyid
,
262 NVDIMM_BASE_KEY
, &key
);
266 rc
= nvdimm
->sec
.ops
->disable(nvdimm
, data
);
267 dev_dbg(dev
, "key: %d disable: %s\n", key_serial(key
),
268 rc
== 0 ? "success" : "fail");
271 nvdimm
->sec
.flags
= nvdimm_security_flags(nvdimm
, NVDIMM_USER
);
275 static int security_update(struct nvdimm
*nvdimm
, unsigned int keyid
,
276 unsigned int new_keyid
,
277 enum nvdimm_passphrase_type pass_type
)
279 struct device
*dev
= &nvdimm
->dev
;
280 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
281 struct key
*key
, *newkey
;
283 const void *data
, *newdata
;
285 /* The bus lock should be held at the top level of the call stack */
286 lockdep_assert_held(&nvdimm_bus
->reconfig_mutex
);
288 if (!nvdimm
->sec
.ops
|| !nvdimm
->sec
.ops
->change_key
289 || !nvdimm
->sec
.flags
)
292 rc
= check_security_state(nvdimm
);
296 data
= nvdimm_get_user_key_payload(nvdimm
, keyid
,
297 NVDIMM_BASE_KEY
, &key
);
301 newdata
= nvdimm_get_user_key_payload(nvdimm
, new_keyid
,
302 NVDIMM_NEW_KEY
, &newkey
);
308 rc
= nvdimm
->sec
.ops
->change_key(nvdimm
, data
, newdata
, pass_type
);
309 dev_dbg(dev
, "key: %d %d update%s: %s\n",
310 key_serial(key
), key_serial(newkey
),
311 pass_type
== NVDIMM_MASTER
? "(master)" : "(user)",
312 rc
== 0 ? "success" : "fail");
314 nvdimm_put_key(newkey
);
316 if (pass_type
== NVDIMM_MASTER
)
317 nvdimm
->sec
.ext_flags
= nvdimm_security_flags(nvdimm
,
320 nvdimm
->sec
.flags
= nvdimm_security_flags(nvdimm
,
325 static int security_erase(struct nvdimm
*nvdimm
, unsigned int keyid
,
326 enum nvdimm_passphrase_type pass_type
)
328 struct device
*dev
= &nvdimm
->dev
;
329 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
330 struct key
*key
= NULL
;
334 /* The bus lock should be held at the top level of the call stack */
335 lockdep_assert_held(&nvdimm_bus
->reconfig_mutex
);
337 if (!nvdimm
->sec
.ops
|| !nvdimm
->sec
.ops
->erase
338 || !nvdimm
->sec
.flags
)
341 rc
= check_security_state(nvdimm
);
345 if (!test_bit(NVDIMM_SECURITY_UNLOCKED
, &nvdimm
->sec
.ext_flags
)
346 && pass_type
== NVDIMM_MASTER
) {
348 "Attempt to secure erase in wrong master state.\n");
352 data
= nvdimm_get_user_key_payload(nvdimm
, keyid
,
353 NVDIMM_BASE_KEY
, &key
);
357 rc
= nvdimm
->sec
.ops
->erase(nvdimm
, data
, pass_type
);
358 dev_dbg(dev
, "key: %d erase%s: %s\n", key_serial(key
),
359 pass_type
== NVDIMM_MASTER
? "(master)" : "(user)",
360 rc
== 0 ? "success" : "fail");
363 nvdimm
->sec
.flags
= nvdimm_security_flags(nvdimm
, NVDIMM_USER
);
367 static int security_overwrite(struct nvdimm
*nvdimm
, unsigned int keyid
)
369 struct device
*dev
= &nvdimm
->dev
;
370 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
371 struct key
*key
= NULL
;
375 /* The bus lock should be held at the top level of the call stack */
376 lockdep_assert_held(&nvdimm_bus
->reconfig_mutex
);
378 if (!nvdimm
->sec
.ops
|| !nvdimm
->sec
.ops
->overwrite
379 || !nvdimm
->sec
.flags
)
382 if (dev
->driver
== NULL
) {
383 dev_dbg(dev
, "Unable to overwrite while DIMM active.\n");
387 rc
= check_security_state(nvdimm
);
391 data
= nvdimm_get_user_key_payload(nvdimm
, keyid
,
392 NVDIMM_BASE_KEY
, &key
);
396 rc
= nvdimm
->sec
.ops
->overwrite(nvdimm
, data
);
397 dev_dbg(dev
, "key: %d overwrite submission: %s\n", key_serial(key
),
398 rc
== 0 ? "success" : "fail");
402 set_bit(NDD_SECURITY_OVERWRITE
, &nvdimm
->flags
);
403 set_bit(NDD_WORK_PENDING
, &nvdimm
->flags
);
404 set_bit(NVDIMM_SECURITY_OVERWRITE
, &nvdimm
->sec
.flags
);
406 * Make sure we don't lose device while doing overwrite
410 queue_delayed_work(system_wq
, &nvdimm
->dwork
, 0);
416 void __nvdimm_security_overwrite_query(struct nvdimm
*nvdimm
)
418 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(&nvdimm
->dev
);
422 /* The bus lock should be held at the top level of the call stack */
423 lockdep_assert_held(&nvdimm_bus
->reconfig_mutex
);
426 * Abort and release device if we no longer have the overwrite
427 * flag set. It means the work has been canceled.
429 if (!test_bit(NDD_WORK_PENDING
, &nvdimm
->flags
))
432 tmo
= nvdimm
->sec
.overwrite_tmo
;
434 if (!nvdimm
->sec
.ops
|| !nvdimm
->sec
.ops
->query_overwrite
435 || !nvdimm
->sec
.flags
)
438 rc
= nvdimm
->sec
.ops
->query_overwrite(nvdimm
);
441 /* setup delayed work again */
443 queue_delayed_work(system_wq
, &nvdimm
->dwork
, tmo
* HZ
);
444 nvdimm
->sec
.overwrite_tmo
= min(15U * 60U, tmo
);
449 dev_dbg(&nvdimm
->dev
, "overwrite failed\n");
451 dev_dbg(&nvdimm
->dev
, "overwrite completed\n");
454 * Mark the overwrite work done and update dimm security flags,
455 * then send a sysfs event notification to wake up userspace
456 * poll threads to picked up the changed state.
458 nvdimm
->sec
.overwrite_tmo
= 0;
459 clear_bit(NDD_SECURITY_OVERWRITE
, &nvdimm
->flags
);
460 clear_bit(NDD_WORK_PENDING
, &nvdimm
->flags
);
461 nvdimm
->sec
.flags
= nvdimm_security_flags(nvdimm
, NVDIMM_USER
);
462 nvdimm
->sec
.ext_flags
= nvdimm_security_flags(nvdimm
, NVDIMM_MASTER
);
463 if (nvdimm
->sec
.overwrite_state
)
464 sysfs_notify_dirent(nvdimm
->sec
.overwrite_state
);
465 put_device(&nvdimm
->dev
);
468 void nvdimm_security_overwrite_query(struct work_struct
*work
)
470 struct nvdimm
*nvdimm
=
471 container_of(work
, typeof(*nvdimm
), dwork
.work
);
473 nvdimm_bus_lock(&nvdimm
->dev
);
474 __nvdimm_security_overwrite_query(nvdimm
);
475 nvdimm_bus_unlock(&nvdimm
->dev
);
479 C( OP_FREEZE, "freeze", 1), \
480 C( OP_DISABLE, "disable", 2), \
481 C( OP_UPDATE, "update", 3), \
482 C( OP_ERASE, "erase", 2), \
483 C( OP_OVERWRITE, "overwrite", 2), \
484 C( OP_MASTER_UPDATE, "master_update", 3), \
485 C( OP_MASTER_ERASE, "master_erase", 2)
488 enum nvdimmsec_op_ids
{ OPS
};
490 #define C(a, b, c) { b, c }
497 #define SEC_CMD_SIZE 32
498 #define KEY_ID_SIZE 10
500 ssize_t
nvdimm_security_store(struct device
*dev
, const char *buf
, size_t len
)
502 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
504 char cmd
[SEC_CMD_SIZE
+1], keystr
[KEY_ID_SIZE
+1],
505 nkeystr
[KEY_ID_SIZE
+1];
506 unsigned int key
, newkey
;
509 rc
= sscanf(buf
, "%"__stringify(SEC_CMD_SIZE
)"s"
510 " %"__stringify(KEY_ID_SIZE
)"s"
511 " %"__stringify(KEY_ID_SIZE
)"s",
512 cmd
, keystr
, nkeystr
);
515 for (i
= 0; i
< ARRAY_SIZE(ops
); i
++)
516 if (sysfs_streq(cmd
, ops
[i
].name
))
518 if (i
>= ARRAY_SIZE(ops
))
521 rc
= kstrtouint(keystr
, 0, &key
);
522 if (rc
>= 0 && ops
[i
].args
> 2)
523 rc
= kstrtouint(nkeystr
, 0, &newkey
);
527 if (i
== OP_FREEZE
) {
528 dev_dbg(dev
, "freeze\n");
529 rc
= nvdimm_security_freeze(nvdimm
);
530 } else if (i
== OP_DISABLE
) {
531 dev_dbg(dev
, "disable %u\n", key
);
532 rc
= security_disable(nvdimm
, key
);
533 } else if (i
== OP_UPDATE
|| i
== OP_MASTER_UPDATE
) {
534 dev_dbg(dev
, "%s %u %u\n", ops
[i
].name
, key
, newkey
);
535 rc
= security_update(nvdimm
, key
, newkey
, i
== OP_UPDATE
536 ? NVDIMM_USER
: NVDIMM_MASTER
);
537 } else if (i
== OP_ERASE
|| i
== OP_MASTER_ERASE
) {
538 dev_dbg(dev
, "%s %u\n", ops
[i
].name
, key
);
539 if (atomic_read(&nvdimm
->busy
)) {
540 dev_dbg(dev
, "Unable to secure erase while DIMM active.\n");
543 rc
= security_erase(nvdimm
, key
, i
== OP_ERASE
544 ? NVDIMM_USER
: NVDIMM_MASTER
);
545 } else if (i
== OP_OVERWRITE
) {
546 dev_dbg(dev
, "overwrite %u\n", key
);
547 if (atomic_read(&nvdimm
->busy
)) {
548 dev_dbg(dev
, "Unable to overwrite while DIMM active.\n");
551 rc
= security_overwrite(nvdimm
, key
);