1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
6 #include <linux/crc32.h>
7 #include <linux/base64.h>
8 #include <linux/prandom.h>
9 #include <linux/unaligned.h>
10 #include <crypto/hash.h>
11 #include <crypto/dh.h>
14 #include <linux/nvme-auth.h>
16 #define CHAP_BUF_SIZE 4096
17 static struct kmem_cache
*nvme_chap_buf_cache
;
18 static mempool_t
*nvme_chap_buf_pool
;
20 struct nvme_dhchap_queue_context
{
21 struct list_head entry
;
22 struct work_struct auth_work
;
23 struct nvme_ctrl
*ctrl
;
24 struct crypto_shash
*shash_tfm
;
25 struct crypto_kpp
*dh_tfm
;
26 struct nvme_dhchap_key
*transformed_key
;
49 static struct workqueue_struct
*nvme_auth_wq
;
51 static inline int ctrl_max_dhchaps(struct nvme_ctrl
*ctrl
)
53 return ctrl
->opts
->nr_io_queues
+ ctrl
->opts
->nr_write_queues
+
54 ctrl
->opts
->nr_poll_queues
+ 1;
57 static int nvme_auth_submit(struct nvme_ctrl
*ctrl
, int qid
,
58 void *data
, size_t data_len
, bool auth_send
)
60 struct nvme_command cmd
= {};
61 nvme_submit_flags_t flags
= NVME_SUBMIT_RETRY
;
62 struct request_queue
*q
= ctrl
->fabrics_q
;
66 flags
|= NVME_SUBMIT_NOWAIT
| NVME_SUBMIT_RESERVED
;
70 cmd
.auth_common
.opcode
= nvme_fabrics_command
;
71 cmd
.auth_common
.secp
= NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER
;
72 cmd
.auth_common
.spsp0
= 0x01;
73 cmd
.auth_common
.spsp1
= 0x01;
75 cmd
.auth_send
.fctype
= nvme_fabrics_type_auth_send
;
76 cmd
.auth_send
.tl
= cpu_to_le32(data_len
);
78 cmd
.auth_receive
.fctype
= nvme_fabrics_type_auth_receive
;
79 cmd
.auth_receive
.al
= cpu_to_le32(data_len
);
82 ret
= __nvme_submit_sync_cmd(q
, &cmd
, NULL
, data
, data_len
,
83 qid
== 0 ? NVME_QID_ANY
: qid
, flags
);
85 dev_warn(ctrl
->device
,
86 "qid %d auth_send failed with status %d\n", qid
, ret
);
89 "qid %d auth_send failed with error %d\n", qid
, ret
);
93 static int nvme_auth_receive_validate(struct nvme_ctrl
*ctrl
, int qid
,
94 struct nvmf_auth_dhchap_failure_data
*data
,
95 u16 transaction
, u8 expected_msg
)
97 dev_dbg(ctrl
->device
, "%s: qid %d auth_type %d auth_id %x\n",
98 __func__
, qid
, data
->auth_type
, data
->auth_id
);
100 if (data
->auth_type
== NVME_AUTH_COMMON_MESSAGES
&&
101 data
->auth_id
== NVME_AUTH_DHCHAP_MESSAGE_FAILURE1
) {
102 return data
->rescode_exp
;
104 if (data
->auth_type
!= NVME_AUTH_DHCHAP_MESSAGES
||
105 data
->auth_id
!= expected_msg
) {
106 dev_warn(ctrl
->device
,
107 "qid %d invalid message %02x/%02x\n",
108 qid
, data
->auth_type
, data
->auth_id
);
109 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE
;
111 if (le16_to_cpu(data
->t_id
) != transaction
) {
112 dev_warn(ctrl
->device
,
113 "qid %d invalid transaction ID %d\n",
114 qid
, le16_to_cpu(data
->t_id
));
115 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE
;
120 static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl
*ctrl
,
121 struct nvme_dhchap_queue_context
*chap
)
123 struct nvmf_auth_dhchap_negotiate_data
*data
= chap
->buf
;
124 size_t size
= sizeof(*data
) + sizeof(union nvmf_auth_protocol
);
126 if (size
> CHAP_BUF_SIZE
) {
127 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD
;
130 memset((u8
*)chap
->buf
, 0, size
);
131 data
->auth_type
= NVME_AUTH_COMMON_MESSAGES
;
132 data
->auth_id
= NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE
;
133 data
->t_id
= cpu_to_le16(chap
->transaction
);
134 data
->sc_c
= 0; /* No secure channel concatenation */
136 data
->auth_protocol
[0].dhchap
.authid
= NVME_AUTH_DHCHAP_AUTH_ID
;
137 data
->auth_protocol
[0].dhchap
.halen
= 3;
138 data
->auth_protocol
[0].dhchap
.dhlen
= 6;
139 data
->auth_protocol
[0].dhchap
.idlist
[0] = NVME_AUTH_HASH_SHA256
;
140 data
->auth_protocol
[0].dhchap
.idlist
[1] = NVME_AUTH_HASH_SHA384
;
141 data
->auth_protocol
[0].dhchap
.idlist
[2] = NVME_AUTH_HASH_SHA512
;
142 data
->auth_protocol
[0].dhchap
.idlist
[30] = NVME_AUTH_DHGROUP_NULL
;
143 data
->auth_protocol
[0].dhchap
.idlist
[31] = NVME_AUTH_DHGROUP_2048
;
144 data
->auth_protocol
[0].dhchap
.idlist
[32] = NVME_AUTH_DHGROUP_3072
;
145 data
->auth_protocol
[0].dhchap
.idlist
[33] = NVME_AUTH_DHGROUP_4096
;
146 data
->auth_protocol
[0].dhchap
.idlist
[34] = NVME_AUTH_DHGROUP_6144
;
147 data
->auth_protocol
[0].dhchap
.idlist
[35] = NVME_AUTH_DHGROUP_8192
;
152 static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl
*ctrl
,
153 struct nvme_dhchap_queue_context
*chap
)
155 struct nvmf_auth_dhchap_challenge_data
*data
= chap
->buf
;
156 u16 dhvlen
= le16_to_cpu(data
->dhvlen
);
157 size_t size
= sizeof(*data
) + data
->hl
+ dhvlen
;
158 const char *gid_name
= nvme_auth_dhgroup_name(data
->dhgid
);
159 const char *hmac_name
, *kpp_name
;
161 if (size
> CHAP_BUF_SIZE
) {
162 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD
;
166 hmac_name
= nvme_auth_hmac_name(data
->hashid
);
168 dev_warn(ctrl
->device
,
169 "qid %d: invalid HASH ID %d\n",
170 chap
->qid
, data
->hashid
);
171 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE
;
175 if (chap
->hash_id
== data
->hashid
&& chap
->shash_tfm
&&
176 !strcmp(crypto_shash_alg_name(chap
->shash_tfm
), hmac_name
) &&
177 crypto_shash_digestsize(chap
->shash_tfm
) == data
->hl
) {
178 dev_dbg(ctrl
->device
,
179 "qid %d: reuse existing hash %s\n",
180 chap
->qid
, hmac_name
);
184 /* Reset if hash cannot be reused */
185 if (chap
->shash_tfm
) {
186 crypto_free_shash(chap
->shash_tfm
);
190 chap
->shash_tfm
= crypto_alloc_shash(hmac_name
, 0,
191 CRYPTO_ALG_ALLOCATES_MEMORY
);
192 if (IS_ERR(chap
->shash_tfm
)) {
193 dev_warn(ctrl
->device
,
194 "qid %d: failed to allocate hash %s, error %ld\n",
195 chap
->qid
, hmac_name
, PTR_ERR(chap
->shash_tfm
));
196 chap
->shash_tfm
= NULL
;
197 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_FAILED
;
201 if (crypto_shash_digestsize(chap
->shash_tfm
) != data
->hl
) {
202 dev_warn(ctrl
->device
,
203 "qid %d: invalid hash length %d\n",
204 chap
->qid
, data
->hl
);
205 crypto_free_shash(chap
->shash_tfm
);
206 chap
->shash_tfm
= NULL
;
207 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE
;
211 chap
->hash_id
= data
->hashid
;
212 chap
->hash_len
= data
->hl
;
213 dev_dbg(ctrl
->device
, "qid %d: selected hash %s\n",
214 chap
->qid
, hmac_name
);
217 kpp_name
= nvme_auth_dhgroup_kpp(data
->dhgid
);
219 dev_warn(ctrl
->device
,
220 "qid %d: invalid DH group id %d\n",
221 chap
->qid
, data
->dhgid
);
222 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE
;
223 /* Leave previous dh_tfm intact */
227 if (chap
->dhgroup_id
== data
->dhgid
&&
228 (data
->dhgid
== NVME_AUTH_DHGROUP_NULL
|| chap
->dh_tfm
)) {
229 dev_dbg(ctrl
->device
,
230 "qid %d: reuse existing DH group %s\n",
231 chap
->qid
, gid_name
);
235 /* Reset dh_tfm if it can't be reused */
237 crypto_free_kpp(chap
->dh_tfm
);
241 if (data
->dhgid
!= NVME_AUTH_DHGROUP_NULL
) {
243 dev_warn(ctrl
->device
,
244 "qid %d: empty DH value\n",
246 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE
;
250 chap
->dh_tfm
= crypto_alloc_kpp(kpp_name
, 0, 0);
251 if (IS_ERR(chap
->dh_tfm
)) {
252 int ret
= PTR_ERR(chap
->dh_tfm
);
254 dev_warn(ctrl
->device
,
255 "qid %d: error %d initializing DH group %s\n",
256 chap
->qid
, ret
, gid_name
);
257 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE
;
261 dev_dbg(ctrl
->device
, "qid %d: selected DH group %s\n",
262 chap
->qid
, gid_name
);
263 } else if (dhvlen
!= 0) {
264 dev_warn(ctrl
->device
,
265 "qid %d: invalid DH value for NULL DH\n",
267 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD
;
270 chap
->dhgroup_id
= data
->dhgid
;
273 chap
->s1
= le32_to_cpu(data
->seqnum
);
274 memcpy(chap
->c1
, data
->cval
, chap
->hash_len
);
276 chap
->ctrl_key
= kmalloc(dhvlen
, GFP_KERNEL
);
277 if (!chap
->ctrl_key
) {
278 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_FAILED
;
281 chap
->ctrl_key_len
= dhvlen
;
282 memcpy(chap
->ctrl_key
, data
->cval
+ chap
->hash_len
,
284 dev_dbg(ctrl
->device
, "ctrl public key %*ph\n",
285 (int)chap
->ctrl_key_len
, chap
->ctrl_key
);
291 static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl
*ctrl
,
292 struct nvme_dhchap_queue_context
*chap
)
294 struct nvmf_auth_dhchap_reply_data
*data
= chap
->buf
;
295 size_t size
= sizeof(*data
);
297 size
+= 2 * chap
->hash_len
;
299 if (chap
->host_key_len
)
300 size
+= chap
->host_key_len
;
302 if (size
> CHAP_BUF_SIZE
) {
303 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD
;
307 memset(chap
->buf
, 0, size
);
308 data
->auth_type
= NVME_AUTH_DHCHAP_MESSAGES
;
309 data
->auth_id
= NVME_AUTH_DHCHAP_MESSAGE_REPLY
;
310 data
->t_id
= cpu_to_le16(chap
->transaction
);
311 data
->hl
= chap
->hash_len
;
312 data
->dhvlen
= cpu_to_le16(chap
->host_key_len
);
313 memcpy(data
->rval
, chap
->response
, chap
->hash_len
);
314 if (ctrl
->ctrl_key
) {
315 chap
->bi_directional
= true;
316 get_random_bytes(chap
->c2
, chap
->hash_len
);
318 memcpy(data
->rval
+ chap
->hash_len
, chap
->c2
,
320 dev_dbg(ctrl
->device
, "%s: qid %d ctrl challenge %*ph\n",
321 __func__
, chap
->qid
, (int)chap
->hash_len
, chap
->c2
);
323 memset(chap
->c2
, 0, chap
->hash_len
);
325 chap
->s2
= nvme_auth_get_seqnum();
326 data
->seqnum
= cpu_to_le32(chap
->s2
);
327 if (chap
->host_key_len
) {
328 dev_dbg(ctrl
->device
, "%s: qid %d host public key %*ph\n",
330 chap
->host_key_len
, chap
->host_key
);
331 memcpy(data
->rval
+ 2 * chap
->hash_len
, chap
->host_key
,
338 static int nvme_auth_process_dhchap_success1(struct nvme_ctrl
*ctrl
,
339 struct nvme_dhchap_queue_context
*chap
)
341 struct nvmf_auth_dhchap_success1_data
*data
= chap
->buf
;
342 size_t size
= sizeof(*data
) + chap
->hash_len
;
344 if (size
> CHAP_BUF_SIZE
) {
345 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD
;
349 if (data
->hl
!= chap
->hash_len
) {
350 dev_warn(ctrl
->device
,
351 "qid %d: invalid hash length %u\n",
352 chap
->qid
, data
->hl
);
353 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE
;
357 /* Just print out information for the admin queue */
359 dev_info(ctrl
->device
,
360 "qid 0: authenticated with hash %s dhgroup %s\n",
361 nvme_auth_hmac_name(chap
->hash_id
),
362 nvme_auth_dhgroup_name(chap
->dhgroup_id
));
367 /* Validate controller response */
368 if (memcmp(chap
->response
, data
->rval
, data
->hl
)) {
369 dev_dbg(ctrl
->device
, "%s: qid %d ctrl response %*ph\n",
370 __func__
, chap
->qid
, (int)chap
->hash_len
, data
->rval
);
371 dev_dbg(ctrl
->device
, "%s: qid %d host response %*ph\n",
372 __func__
, chap
->qid
, (int)chap
->hash_len
,
374 dev_warn(ctrl
->device
,
375 "qid %d: controller authentication failed\n",
377 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_FAILED
;
378 return -ECONNREFUSED
;
381 /* Just print out information for the admin queue */
383 dev_info(ctrl
->device
,
384 "qid 0: controller authenticated\n");
388 static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl
*ctrl
,
389 struct nvme_dhchap_queue_context
*chap
)
391 struct nvmf_auth_dhchap_success2_data
*data
= chap
->buf
;
392 size_t size
= sizeof(*data
);
394 memset(chap
->buf
, 0, size
);
395 data
->auth_type
= NVME_AUTH_DHCHAP_MESSAGES
;
396 data
->auth_id
= NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2
;
397 data
->t_id
= cpu_to_le16(chap
->transaction
);
402 static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl
*ctrl
,
403 struct nvme_dhchap_queue_context
*chap
)
405 struct nvmf_auth_dhchap_failure_data
*data
= chap
->buf
;
406 size_t size
= sizeof(*data
);
408 memset(chap
->buf
, 0, size
);
409 data
->auth_type
= NVME_AUTH_COMMON_MESSAGES
;
410 data
->auth_id
= NVME_AUTH_DHCHAP_MESSAGE_FAILURE2
;
411 data
->t_id
= cpu_to_le16(chap
->transaction
);
412 data
->rescode
= NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED
;
413 data
->rescode_exp
= chap
->status
;
418 static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl
*ctrl
,
419 struct nvme_dhchap_queue_context
*chap
)
421 SHASH_DESC_ON_STACK(shash
, chap
->shash_tfm
);
422 u8 buf
[4], *challenge
= chap
->c1
;
425 dev_dbg(ctrl
->device
, "%s: qid %d host response seq %u transaction %d\n",
426 __func__
, chap
->qid
, chap
->s1
, chap
->transaction
);
428 if (!chap
->transformed_key
) {
429 chap
->transformed_key
= nvme_auth_transform_key(ctrl
->host_key
,
430 ctrl
->opts
->host
->nqn
);
431 if (IS_ERR(chap
->transformed_key
)) {
432 ret
= PTR_ERR(chap
->transformed_key
);
433 chap
->transformed_key
= NULL
;
437 dev_dbg(ctrl
->device
, "%s: qid %d re-using host response\n",
438 __func__
, chap
->qid
);
441 ret
= crypto_shash_setkey(chap
->shash_tfm
,
442 chap
->transformed_key
->key
, chap
->transformed_key
->len
);
444 dev_warn(ctrl
->device
, "qid %d: failed to set key, error %d\n",
450 challenge
= kmalloc(chap
->hash_len
, GFP_KERNEL
);
455 ret
= nvme_auth_augmented_challenge(chap
->hash_id
,
464 shash
->tfm
= chap
->shash_tfm
;
465 ret
= crypto_shash_init(shash
);
468 ret
= crypto_shash_update(shash
, challenge
, chap
->hash_len
);
471 put_unaligned_le32(chap
->s1
, buf
);
472 ret
= crypto_shash_update(shash
, buf
, 4);
475 put_unaligned_le16(chap
->transaction
, buf
);
476 ret
= crypto_shash_update(shash
, buf
, 2);
479 memset(buf
, 0, sizeof(buf
));
480 ret
= crypto_shash_update(shash
, buf
, 1);
483 ret
= crypto_shash_update(shash
, "HostHost", 8);
486 ret
= crypto_shash_update(shash
, ctrl
->opts
->host
->nqn
,
487 strlen(ctrl
->opts
->host
->nqn
));
490 ret
= crypto_shash_update(shash
, buf
, 1);
493 ret
= crypto_shash_update(shash
, ctrl
->opts
->subsysnqn
,
494 strlen(ctrl
->opts
->subsysnqn
));
497 ret
= crypto_shash_final(shash
, chap
->response
);
499 if (challenge
!= chap
->c1
)
504 static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl
*ctrl
,
505 struct nvme_dhchap_queue_context
*chap
)
507 SHASH_DESC_ON_STACK(shash
, chap
->shash_tfm
);
508 struct nvme_dhchap_key
*transformed_key
;
509 u8 buf
[4], *challenge
= chap
->c2
;
512 transformed_key
= nvme_auth_transform_key(ctrl
->ctrl_key
,
513 ctrl
->opts
->subsysnqn
);
514 if (IS_ERR(transformed_key
)) {
515 ret
= PTR_ERR(transformed_key
);
519 ret
= crypto_shash_setkey(chap
->shash_tfm
,
520 transformed_key
->key
, transformed_key
->len
);
522 dev_warn(ctrl
->device
, "qid %d: failed to set key, error %d\n",
528 challenge
= kmalloc(chap
->hash_len
, GFP_KERNEL
);
533 ret
= nvme_auth_augmented_challenge(chap
->hash_id
,
541 dev_dbg(ctrl
->device
, "%s: qid %d ctrl response seq %u transaction %d\n",
542 __func__
, chap
->qid
, chap
->s2
, chap
->transaction
);
543 dev_dbg(ctrl
->device
, "%s: qid %d challenge %*ph\n",
544 __func__
, chap
->qid
, (int)chap
->hash_len
, challenge
);
545 dev_dbg(ctrl
->device
, "%s: qid %d subsysnqn %s\n",
546 __func__
, chap
->qid
, ctrl
->opts
->subsysnqn
);
547 dev_dbg(ctrl
->device
, "%s: qid %d hostnqn %s\n",
548 __func__
, chap
->qid
, ctrl
->opts
->host
->nqn
);
549 shash
->tfm
= chap
->shash_tfm
;
550 ret
= crypto_shash_init(shash
);
553 ret
= crypto_shash_update(shash
, challenge
, chap
->hash_len
);
556 put_unaligned_le32(chap
->s2
, buf
);
557 ret
= crypto_shash_update(shash
, buf
, 4);
560 put_unaligned_le16(chap
->transaction
, buf
);
561 ret
= crypto_shash_update(shash
, buf
, 2);
565 ret
= crypto_shash_update(shash
, buf
, 1);
568 ret
= crypto_shash_update(shash
, "Controller", 10);
571 ret
= crypto_shash_update(shash
, ctrl
->opts
->subsysnqn
,
572 strlen(ctrl
->opts
->subsysnqn
));
575 ret
= crypto_shash_update(shash
, buf
, 1);
578 ret
= crypto_shash_update(shash
, ctrl
->opts
->host
->nqn
,
579 strlen(ctrl
->opts
->host
->nqn
));
582 ret
= crypto_shash_final(shash
, chap
->response
);
584 if (challenge
!= chap
->c2
)
586 nvme_auth_free_key(transformed_key
);
590 static int nvme_auth_dhchap_exponential(struct nvme_ctrl
*ctrl
,
591 struct nvme_dhchap_queue_context
*chap
)
595 if (chap
->host_key
&& chap
->host_key_len
) {
596 dev_dbg(ctrl
->device
,
597 "qid %d: reusing host key\n", chap
->qid
);
600 ret
= nvme_auth_gen_privkey(chap
->dh_tfm
, chap
->dhgroup_id
);
602 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD
;
606 chap
->host_key_len
= crypto_kpp_maxsize(chap
->dh_tfm
);
608 chap
->host_key
= kzalloc(chap
->host_key_len
, GFP_KERNEL
);
609 if (!chap
->host_key
) {
610 chap
->host_key_len
= 0;
611 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_FAILED
;
614 ret
= nvme_auth_gen_pubkey(chap
->dh_tfm
,
615 chap
->host_key
, chap
->host_key_len
);
617 dev_dbg(ctrl
->device
,
618 "failed to generate public key, error %d\n", ret
);
619 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD
;
624 chap
->sess_key_len
= chap
->host_key_len
;
625 chap
->sess_key
= kmalloc(chap
->sess_key_len
, GFP_KERNEL
);
626 if (!chap
->sess_key
) {
627 chap
->sess_key_len
= 0;
628 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_FAILED
;
632 ret
= nvme_auth_gen_shared_secret(chap
->dh_tfm
,
633 chap
->ctrl_key
, chap
->ctrl_key_len
,
634 chap
->sess_key
, chap
->sess_key_len
);
636 dev_dbg(ctrl
->device
,
637 "failed to generate shared secret, error %d\n", ret
);
638 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD
;
641 dev_dbg(ctrl
->device
, "shared secret %*ph\n",
642 (int)chap
->sess_key_len
, chap
->sess_key
);
646 static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context
*chap
)
648 nvme_auth_free_key(chap
->transformed_key
);
649 chap
->transformed_key
= NULL
;
650 kfree_sensitive(chap
->host_key
);
651 chap
->host_key
= NULL
;
652 chap
->host_key_len
= 0;
653 kfree_sensitive(chap
->ctrl_key
);
654 chap
->ctrl_key
= NULL
;
655 chap
->ctrl_key_len
= 0;
656 kfree_sensitive(chap
->sess_key
);
657 chap
->sess_key
= NULL
;
658 chap
->sess_key_len
= 0;
663 chap
->bi_directional
= false;
664 chap
->transaction
= 0;
665 memset(chap
->c1
, 0, sizeof(chap
->c1
));
666 memset(chap
->c2
, 0, sizeof(chap
->c2
));
667 mempool_free(chap
->buf
, nvme_chap_buf_pool
);
671 static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context
*chap
)
673 nvme_auth_reset_dhchap(chap
);
675 crypto_free_shash(chap
->shash_tfm
);
677 crypto_free_kpp(chap
->dh_tfm
);
680 static void nvme_queue_auth_work(struct work_struct
*work
)
682 struct nvme_dhchap_queue_context
*chap
=
683 container_of(work
, struct nvme_dhchap_queue_context
, auth_work
);
684 struct nvme_ctrl
*ctrl
= chap
->ctrl
;
689 * Allocate a large enough buffer for the entire negotiation:
690 * 4k is enough to ffdhe8192.
692 chap
->buf
= mempool_alloc(nvme_chap_buf_pool
, GFP_KERNEL
);
694 chap
->error
= -ENOMEM
;
698 chap
->transaction
= ctrl
->transaction
++;
700 /* DH-HMAC-CHAP Step 1: send negotiate */
701 dev_dbg(ctrl
->device
, "%s: qid %d send negotiate\n",
702 __func__
, chap
->qid
);
703 ret
= nvme_auth_set_dhchap_negotiate_data(ctrl
, chap
);
709 ret
= nvme_auth_submit(ctrl
, chap
->qid
, chap
->buf
, tl
, true);
715 /* DH-HMAC-CHAP Step 2: receive challenge */
716 dev_dbg(ctrl
->device
, "%s: qid %d receive challenge\n",
717 __func__
, chap
->qid
);
719 memset(chap
->buf
, 0, CHAP_BUF_SIZE
);
720 ret
= nvme_auth_submit(ctrl
, chap
->qid
, chap
->buf
, CHAP_BUF_SIZE
,
723 dev_warn(ctrl
->device
,
724 "qid %d failed to receive challenge, %s %d\n",
725 chap
->qid
, ret
< 0 ? "error" : "nvme status", ret
);
729 ret
= nvme_auth_receive_validate(ctrl
, chap
->qid
, chap
->buf
, chap
->transaction
,
730 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE
);
733 chap
->error
= -EKEYREJECTED
;
737 ret
= nvme_auth_process_dhchap_challenge(ctrl
, chap
);
739 /* Invalid challenge parameters */
744 if (chap
->ctrl_key_len
) {
745 dev_dbg(ctrl
->device
,
746 "%s: qid %d DH exponential\n",
747 __func__
, chap
->qid
);
748 ret
= nvme_auth_dhchap_exponential(ctrl
, chap
);
755 dev_dbg(ctrl
->device
, "%s: qid %d host response\n",
756 __func__
, chap
->qid
);
757 mutex_lock(&ctrl
->dhchap_auth_mutex
);
758 ret
= nvme_auth_dhchap_setup_host_response(ctrl
, chap
);
759 mutex_unlock(&ctrl
->dhchap_auth_mutex
);
765 /* DH-HMAC-CHAP Step 3: send reply */
766 dev_dbg(ctrl
->device
, "%s: qid %d send reply\n",
767 __func__
, chap
->qid
);
768 ret
= nvme_auth_set_dhchap_reply_data(ctrl
, chap
);
775 ret
= nvme_auth_submit(ctrl
, chap
->qid
, chap
->buf
, tl
, true);
781 /* DH-HMAC-CHAP Step 4: receive success1 */
782 dev_dbg(ctrl
->device
, "%s: qid %d receive success1\n",
783 __func__
, chap
->qid
);
785 memset(chap
->buf
, 0, CHAP_BUF_SIZE
);
786 ret
= nvme_auth_submit(ctrl
, chap
->qid
, chap
->buf
, CHAP_BUF_SIZE
,
789 dev_warn(ctrl
->device
,
790 "qid %d failed to receive success1, %s %d\n",
791 chap
->qid
, ret
< 0 ? "error" : "nvme status", ret
);
795 ret
= nvme_auth_receive_validate(ctrl
, chap
->qid
,
796 chap
->buf
, chap
->transaction
,
797 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1
);
800 chap
->error
= -EKEYREJECTED
;
804 mutex_lock(&ctrl
->dhchap_auth_mutex
);
805 if (ctrl
->ctrl_key
) {
806 dev_dbg(ctrl
->device
,
807 "%s: qid %d controller response\n",
808 __func__
, chap
->qid
);
809 ret
= nvme_auth_dhchap_setup_ctrl_response(ctrl
, chap
);
811 mutex_unlock(&ctrl
->dhchap_auth_mutex
);
816 mutex_unlock(&ctrl
->dhchap_auth_mutex
);
818 ret
= nvme_auth_process_dhchap_success1(ctrl
, chap
);
820 /* Controller authentication failed */
821 chap
->error
= -EKEYREJECTED
;
825 if (chap
->bi_directional
) {
826 /* DH-HMAC-CHAP Step 5: send success2 */
827 dev_dbg(ctrl
->device
, "%s: qid %d send success2\n",
828 __func__
, chap
->qid
);
829 tl
= nvme_auth_set_dhchap_success2_data(ctrl
, chap
);
830 ret
= nvme_auth_submit(ctrl
, chap
->qid
, chap
->buf
, tl
, true);
840 if (chap
->status
== 0)
841 chap
->status
= NVME_AUTH_DHCHAP_FAILURE_FAILED
;
842 dev_dbg(ctrl
->device
, "%s: qid %d send failure2, status %x\n",
843 __func__
, chap
->qid
, chap
->status
);
844 tl
= nvme_auth_set_dhchap_failure2_data(ctrl
, chap
);
845 ret
= nvme_auth_submit(ctrl
, chap
->qid
, chap
->buf
, tl
, true);
847 * only update error if send failure2 failed and no other
848 * error had been set during authentication.
850 if (ret
&& !chap
->error
)
854 int nvme_auth_negotiate(struct nvme_ctrl
*ctrl
, int qid
)
856 struct nvme_dhchap_queue_context
*chap
;
858 if (!ctrl
->host_key
) {
859 dev_warn(ctrl
->device
, "qid %d: no key\n", qid
);
863 if (ctrl
->opts
->dhchap_ctrl_secret
&& !ctrl
->ctrl_key
) {
864 dev_warn(ctrl
->device
, "qid %d: invalid ctrl key\n", qid
);
868 chap
= &ctrl
->dhchap_ctxs
[qid
];
869 cancel_work_sync(&chap
->auth_work
);
870 queue_work(nvme_auth_wq
, &chap
->auth_work
);
873 EXPORT_SYMBOL_GPL(nvme_auth_negotiate
);
875 int nvme_auth_wait(struct nvme_ctrl
*ctrl
, int qid
)
877 struct nvme_dhchap_queue_context
*chap
;
880 chap
= &ctrl
->dhchap_ctxs
[qid
];
881 flush_work(&chap
->auth_work
);
883 /* clear sensitive info */
884 nvme_auth_reset_dhchap(chap
);
887 EXPORT_SYMBOL_GPL(nvme_auth_wait
);
889 static void nvme_ctrl_auth_work(struct work_struct
*work
)
891 struct nvme_ctrl
*ctrl
=
892 container_of(work
, struct nvme_ctrl
, dhchap_auth_work
);
896 * If the ctrl is no connected, bail as reconnect will handle
899 if (nvme_ctrl_state(ctrl
) != NVME_CTRL_LIVE
)
902 /* Authenticate admin queue first */
903 ret
= nvme_auth_negotiate(ctrl
, 0);
905 dev_warn(ctrl
->device
,
906 "qid 0: error %d setting up authentication\n", ret
);
909 ret
= nvme_auth_wait(ctrl
, 0);
911 dev_warn(ctrl
->device
,
912 "qid 0: authentication failed\n");
916 for (q
= 1; q
< ctrl
->queue_count
; q
++) {
917 ret
= nvme_auth_negotiate(ctrl
, q
);
919 dev_warn(ctrl
->device
,
920 "qid %d: error %d setting up authentication\n",
927 * Failure is a soft-state; credentials remain valid until
928 * the controller terminates the connection.
930 for (q
= 1; q
< ctrl
->queue_count
; q
++) {
931 ret
= nvme_auth_wait(ctrl
, q
);
933 dev_warn(ctrl
->device
,
934 "qid %d: authentication failed\n", q
);
938 int nvme_auth_init_ctrl(struct nvme_ctrl
*ctrl
)
940 struct nvme_dhchap_queue_context
*chap
;
943 mutex_init(&ctrl
->dhchap_auth_mutex
);
944 INIT_WORK(&ctrl
->dhchap_auth_work
, nvme_ctrl_auth_work
);
947 ret
= nvme_auth_generate_key(ctrl
->opts
->dhchap_secret
,
951 ret
= nvme_auth_generate_key(ctrl
->opts
->dhchap_ctrl_secret
,
954 goto err_free_dhchap_secret
;
956 if (!ctrl
->opts
->dhchap_secret
&& !ctrl
->opts
->dhchap_ctrl_secret
)
959 ctrl
->dhchap_ctxs
= kvcalloc(ctrl_max_dhchaps(ctrl
),
960 sizeof(*chap
), GFP_KERNEL
);
961 if (!ctrl
->dhchap_ctxs
) {
963 goto err_free_dhchap_ctrl_secret
;
966 for (i
= 0; i
< ctrl_max_dhchaps(ctrl
); i
++) {
967 chap
= &ctrl
->dhchap_ctxs
[i
];
970 INIT_WORK(&chap
->auth_work
, nvme_queue_auth_work
);
974 err_free_dhchap_ctrl_secret
:
975 nvme_auth_free_key(ctrl
->ctrl_key
);
976 ctrl
->ctrl_key
= NULL
;
977 err_free_dhchap_secret
:
978 nvme_auth_free_key(ctrl
->host_key
);
979 ctrl
->host_key
= NULL
;
982 EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl
);
984 void nvme_auth_stop(struct nvme_ctrl
*ctrl
)
986 cancel_work_sync(&ctrl
->dhchap_auth_work
);
988 EXPORT_SYMBOL_GPL(nvme_auth_stop
);
990 void nvme_auth_free(struct nvme_ctrl
*ctrl
)
994 if (ctrl
->dhchap_ctxs
) {
995 for (i
= 0; i
< ctrl_max_dhchaps(ctrl
); i
++)
996 nvme_auth_free_dhchap(&ctrl
->dhchap_ctxs
[i
]);
997 kfree(ctrl
->dhchap_ctxs
);
999 if (ctrl
->host_key
) {
1000 nvme_auth_free_key(ctrl
->host_key
);
1001 ctrl
->host_key
= NULL
;
1003 if (ctrl
->ctrl_key
) {
1004 nvme_auth_free_key(ctrl
->ctrl_key
);
1005 ctrl
->ctrl_key
= NULL
;
1008 EXPORT_SYMBOL_GPL(nvme_auth_free
);
1010 int __init
nvme_init_auth(void)
1012 nvme_auth_wq
= alloc_workqueue("nvme-auth-wq",
1013 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
1017 nvme_chap_buf_cache
= kmem_cache_create("nvme-chap-buf-cache",
1018 CHAP_BUF_SIZE
, 0, SLAB_HWCACHE_ALIGN
, NULL
);
1019 if (!nvme_chap_buf_cache
)
1020 goto err_destroy_workqueue
;
1022 nvme_chap_buf_pool
= mempool_create(16, mempool_alloc_slab
,
1023 mempool_free_slab
, nvme_chap_buf_cache
);
1024 if (!nvme_chap_buf_pool
)
1025 goto err_destroy_chap_buf_cache
;
1028 err_destroy_chap_buf_cache
:
1029 kmem_cache_destroy(nvme_chap_buf_cache
);
1030 err_destroy_workqueue
:
1031 destroy_workqueue(nvme_auth_wq
);
1035 void __exit
nvme_exit_auth(void)
1037 mempool_destroy(nvme_chap_buf_pool
);
1038 kmem_cache_destroy(nvme_chap_buf_cache
);
1039 destroy_workqueue(nvme_auth_wq
);