1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/key.h>
12 #include <linux/nvme-tcp.h>
13 #include <linux/nvme-keyring.h>
17 #include <net/tls_prot.h>
18 #include <net/handshake.h>
19 #include <linux/blk-mq.h>
20 #include <crypto/hash.h>
21 #include <net/busy_poll.h>
22 #include <trace/events/sock.h>
27 struct nvme_tcp_queue
;
29 /* Define the socket priority to use for connections were it is desirable
30 * that the NIC consider performing optimized packet processing or filtering.
31 * A non-zero value being sufficient to indicate general consideration of any
32 * possible optimization. Making it a module param allows for alternative
33 * values that may be unique for some NIC implementations.
35 static int so_priority
;
36 module_param(so_priority
, int, 0644);
37 MODULE_PARM_DESC(so_priority
, "nvme tcp socket optimize priority");
40 * Use the unbound workqueue for nvme_tcp_wq, then we can set the cpu affinity
43 static bool wq_unbound
;
44 module_param(wq_unbound
, bool, 0644);
45 MODULE_PARM_DESC(wq_unbound
, "Use unbound workqueue for nvme-tcp IO context (default false)");
48 * TLS handshake timeout
50 static int tls_handshake_timeout
= 10;
51 #ifdef CONFIG_NVME_TCP_TLS
52 module_param(tls_handshake_timeout
, int, 0644);
53 MODULE_PARM_DESC(tls_handshake_timeout
,
54 "nvme TLS handshake timeout in seconds (default 10)");
57 #ifdef CONFIG_DEBUG_LOCK_ALLOC
58 /* lockdep can detect a circular dependency of the form
59 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
60 * because dependencies are tracked for both nvme-tcp and user contexts. Using
61 * a separate class prevents lockdep from conflating nvme-tcp socket use with
62 * user-space socket API use.
64 static struct lock_class_key nvme_tcp_sk_key
[2];
65 static struct lock_class_key nvme_tcp_slock_key
[2];
67 static void nvme_tcp_reclassify_socket(struct socket
*sock
)
69 struct sock
*sk
= sock
->sk
;
71 if (WARN_ON_ONCE(!sock_allow_reclassification(sk
)))
74 switch (sk
->sk_family
) {
76 sock_lock_init_class_and_name(sk
, "slock-AF_INET-NVME",
77 &nvme_tcp_slock_key
[0],
78 "sk_lock-AF_INET-NVME",
82 sock_lock_init_class_and_name(sk
, "slock-AF_INET6-NVME",
83 &nvme_tcp_slock_key
[1],
84 "sk_lock-AF_INET6-NVME",
92 static void nvme_tcp_reclassify_socket(struct socket
*sock
) { }
95 enum nvme_tcp_send_state
{
96 NVME_TCP_SEND_CMD_PDU
= 0,
97 NVME_TCP_SEND_H2C_PDU
,
102 struct nvme_tcp_request
{
103 struct nvme_request req
;
105 struct nvme_tcp_queue
*queue
;
113 struct list_head entry
;
114 struct llist_node lentry
;
117 struct bio
*curr_bio
;
118 struct iov_iter iter
;
123 enum nvme_tcp_send_state state
;
126 enum nvme_tcp_queue_flags
{
127 NVME_TCP_Q_ALLOCATED
= 0,
129 NVME_TCP_Q_POLLING
= 2,
132 enum nvme_tcp_recv_state
{
133 NVME_TCP_RECV_PDU
= 0,
138 struct nvme_tcp_ctrl
;
139 struct nvme_tcp_queue
{
141 struct work_struct io_work
;
144 struct mutex queue_lock
;
145 struct mutex send_mutex
;
146 struct llist_head req_list
;
147 struct list_head send_list
;
153 size_t data_remaining
;
154 size_t ddgst_remaining
;
158 struct nvme_tcp_request
*request
;
161 size_t cmnd_capsule_len
;
162 struct nvme_tcp_ctrl
*ctrl
;
169 struct ahash_request
*rcv_hash
;
170 struct ahash_request
*snd_hash
;
173 struct completion tls_complete
;
175 struct page_frag_cache pf_cache
;
177 void (*state_change
)(struct sock
*);
178 void (*data_ready
)(struct sock
*);
179 void (*write_space
)(struct sock
*);
182 struct nvme_tcp_ctrl
{
183 /* read only in the hot path */
184 struct nvme_tcp_queue
*queues
;
185 struct blk_mq_tag_set tag_set
;
187 /* other member variables */
188 struct list_head list
;
189 struct blk_mq_tag_set admin_tag_set
;
190 struct sockaddr_storage addr
;
191 struct sockaddr_storage src_addr
;
192 struct nvme_ctrl ctrl
;
194 struct work_struct err_work
;
195 struct delayed_work connect_work
;
196 struct nvme_tcp_request async_req
;
197 u32 io_queues
[HCTX_MAX_TYPES
];
200 static LIST_HEAD(nvme_tcp_ctrl_list
);
201 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex
);
202 static struct workqueue_struct
*nvme_tcp_wq
;
203 static const struct blk_mq_ops nvme_tcp_mq_ops
;
204 static const struct blk_mq_ops nvme_tcp_admin_mq_ops
;
205 static int nvme_tcp_try_send(struct nvme_tcp_queue
*queue
);
207 static inline struct nvme_tcp_ctrl
*to_tcp_ctrl(struct nvme_ctrl
*ctrl
)
209 return container_of(ctrl
, struct nvme_tcp_ctrl
, ctrl
);
212 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue
*queue
)
214 return queue
- queue
->ctrl
->queues
;
218 * Check if the queue is TLS encrypted
220 static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue
*queue
)
222 if (!IS_ENABLED(CONFIG_NVME_TCP_TLS
))
225 return queue
->tls_enabled
;
229 * Check if TLS is configured for the controller.
231 static inline bool nvme_tcp_tls_configured(struct nvme_ctrl
*ctrl
)
233 if (!IS_ENABLED(CONFIG_NVME_TCP_TLS
))
236 return ctrl
->opts
->tls
;
239 static inline struct blk_mq_tags
*nvme_tcp_tagset(struct nvme_tcp_queue
*queue
)
241 u32 queue_idx
= nvme_tcp_queue_id(queue
);
244 return queue
->ctrl
->admin_tag_set
.tags
[queue_idx
];
245 return queue
->ctrl
->tag_set
.tags
[queue_idx
- 1];
248 static inline u8
nvme_tcp_hdgst_len(struct nvme_tcp_queue
*queue
)
250 return queue
->hdr_digest
? NVME_TCP_DIGEST_LENGTH
: 0;
253 static inline u8
nvme_tcp_ddgst_len(struct nvme_tcp_queue
*queue
)
255 return queue
->data_digest
? NVME_TCP_DIGEST_LENGTH
: 0;
258 static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request
*req
)
263 static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request
*req
)
265 /* use the pdu space in the back for the data pdu */
266 return req
->pdu
+ sizeof(struct nvme_tcp_cmd_pdu
) -
267 sizeof(struct nvme_tcp_data_pdu
);
270 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request
*req
)
272 if (nvme_is_fabrics(req
->req
.cmd
))
273 return NVME_TCP_ADMIN_CCSZ
;
274 return req
->queue
->cmnd_capsule_len
- sizeof(struct nvme_command
);
277 static inline bool nvme_tcp_async_req(struct nvme_tcp_request
*req
)
279 return req
== &req
->queue
->ctrl
->async_req
;
282 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request
*req
)
286 if (unlikely(nvme_tcp_async_req(req
)))
287 return false; /* async events don't have a request */
289 rq
= blk_mq_rq_from_pdu(req
);
291 return rq_data_dir(rq
) == WRITE
&& req
->data_len
&&
292 req
->data_len
<= nvme_tcp_inline_data_size(req
);
295 static inline struct page
*nvme_tcp_req_cur_page(struct nvme_tcp_request
*req
)
297 return req
->iter
.bvec
->bv_page
;
300 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request
*req
)
302 return req
->iter
.bvec
->bv_offset
+ req
->iter
.iov_offset
;
305 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request
*req
)
307 return min_t(size_t, iov_iter_single_seg_count(&req
->iter
),
308 req
->pdu_len
- req
->pdu_sent
);
311 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request
*req
)
313 return rq_data_dir(blk_mq_rq_from_pdu(req
)) == WRITE
?
314 req
->pdu_len
- req
->pdu_sent
: 0;
317 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request
*req
,
320 return nvme_tcp_pdu_data_left(req
) <= len
;
323 static void nvme_tcp_init_iter(struct nvme_tcp_request
*req
,
326 struct request
*rq
= blk_mq_rq_from_pdu(req
);
332 if (rq
->rq_flags
& RQF_SPECIAL_PAYLOAD
) {
333 vec
= &rq
->special_vec
;
335 size
= blk_rq_payload_bytes(rq
);
338 struct bio
*bio
= req
->curr_bio
;
342 vec
= __bvec_iter_bvec(bio
->bi_io_vec
, bio
->bi_iter
);
344 bio_for_each_bvec(bv
, bio
, bi
) {
347 size
= bio
->bi_iter
.bi_size
;
348 offset
= bio
->bi_iter
.bi_bvec_done
;
351 iov_iter_bvec(&req
->iter
, dir
, vec
, nr_bvec
, size
);
352 req
->iter
.iov_offset
= offset
;
355 static inline void nvme_tcp_advance_req(struct nvme_tcp_request
*req
,
358 req
->data_sent
+= len
;
359 req
->pdu_sent
+= len
;
360 iov_iter_advance(&req
->iter
, len
);
361 if (!iov_iter_count(&req
->iter
) &&
362 req
->data_sent
< req
->data_len
) {
363 req
->curr_bio
= req
->curr_bio
->bi_next
;
364 nvme_tcp_init_iter(req
, ITER_SOURCE
);
368 static inline void nvme_tcp_send_all(struct nvme_tcp_queue
*queue
)
372 /* drain the send queue as much as we can... */
374 ret
= nvme_tcp_try_send(queue
);
378 static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue
*queue
)
380 return !list_empty(&queue
->send_list
) ||
381 !llist_empty(&queue
->req_list
);
384 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue
*queue
)
386 return !nvme_tcp_queue_tls(queue
) &&
387 nvme_tcp_queue_has_pending(queue
);
390 static inline void nvme_tcp_queue_request(struct nvme_tcp_request
*req
,
391 bool sync
, bool last
)
393 struct nvme_tcp_queue
*queue
= req
->queue
;
396 empty
= llist_add(&req
->lentry
, &queue
->req_list
) &&
397 list_empty(&queue
->send_list
) && !queue
->request
;
400 * if we're the first on the send_list and we can try to send
401 * directly, otherwise queue io_work. Also, only do that if we
402 * are on the same cpu, so we don't introduce contention.
404 if (queue
->io_cpu
== raw_smp_processor_id() &&
405 sync
&& empty
&& mutex_trylock(&queue
->send_mutex
)) {
406 nvme_tcp_send_all(queue
);
407 mutex_unlock(&queue
->send_mutex
);
410 if (last
&& nvme_tcp_queue_has_pending(queue
))
411 queue_work_on(queue
->io_cpu
, nvme_tcp_wq
, &queue
->io_work
);
414 static void nvme_tcp_process_req_list(struct nvme_tcp_queue
*queue
)
416 struct nvme_tcp_request
*req
;
417 struct llist_node
*node
;
419 for (node
= llist_del_all(&queue
->req_list
); node
; node
= node
->next
) {
420 req
= llist_entry(node
, struct nvme_tcp_request
, lentry
);
421 list_add(&req
->entry
, &queue
->send_list
);
425 static inline struct nvme_tcp_request
*
426 nvme_tcp_fetch_request(struct nvme_tcp_queue
*queue
)
428 struct nvme_tcp_request
*req
;
430 req
= list_first_entry_or_null(&queue
->send_list
,
431 struct nvme_tcp_request
, entry
);
433 nvme_tcp_process_req_list(queue
);
434 req
= list_first_entry_or_null(&queue
->send_list
,
435 struct nvme_tcp_request
, entry
);
440 list_del(&req
->entry
);
444 static inline void nvme_tcp_ddgst_final(struct ahash_request
*hash
,
447 ahash_request_set_crypt(hash
, NULL
, (u8
*)dgst
, 0);
448 crypto_ahash_final(hash
);
451 static inline void nvme_tcp_ddgst_update(struct ahash_request
*hash
,
452 struct page
*page
, off_t off
, size_t len
)
454 struct scatterlist sg
;
456 sg_init_table(&sg
, 1);
457 sg_set_page(&sg
, page
, len
, off
);
458 ahash_request_set_crypt(hash
, &sg
, NULL
, len
);
459 crypto_ahash_update(hash
);
462 static inline void nvme_tcp_hdgst(struct ahash_request
*hash
,
463 void *pdu
, size_t len
)
465 struct scatterlist sg
;
467 sg_init_one(&sg
, pdu
, len
);
468 ahash_request_set_crypt(hash
, &sg
, pdu
+ len
, len
);
469 crypto_ahash_digest(hash
);
472 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue
*queue
,
473 void *pdu
, size_t pdu_len
)
475 struct nvme_tcp_hdr
*hdr
= pdu
;
479 if (unlikely(!(hdr
->flags
& NVME_TCP_F_HDGST
))) {
480 dev_err(queue
->ctrl
->ctrl
.device
,
481 "queue %d: header digest flag is cleared\n",
482 nvme_tcp_queue_id(queue
));
486 recv_digest
= *(__le32
*)(pdu
+ hdr
->hlen
);
487 nvme_tcp_hdgst(queue
->rcv_hash
, pdu
, pdu_len
);
488 exp_digest
= *(__le32
*)(pdu
+ hdr
->hlen
);
489 if (recv_digest
!= exp_digest
) {
490 dev_err(queue
->ctrl
->ctrl
.device
,
491 "header digest error: recv %#x expected %#x\n",
492 le32_to_cpu(recv_digest
), le32_to_cpu(exp_digest
));
499 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue
*queue
, void *pdu
)
501 struct nvme_tcp_hdr
*hdr
= pdu
;
502 u8 digest_len
= nvme_tcp_hdgst_len(queue
);
505 len
= le32_to_cpu(hdr
->plen
) - hdr
->hlen
-
506 ((hdr
->flags
& NVME_TCP_F_HDGST
) ? digest_len
: 0);
508 if (unlikely(len
&& !(hdr
->flags
& NVME_TCP_F_DDGST
))) {
509 dev_err(queue
->ctrl
->ctrl
.device
,
510 "queue %d: data digest flag is cleared\n",
511 nvme_tcp_queue_id(queue
));
514 crypto_ahash_init(queue
->rcv_hash
);
519 static void nvme_tcp_exit_request(struct blk_mq_tag_set
*set
,
520 struct request
*rq
, unsigned int hctx_idx
)
522 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
524 page_frag_free(req
->pdu
);
527 static int nvme_tcp_init_request(struct blk_mq_tag_set
*set
,
528 struct request
*rq
, unsigned int hctx_idx
,
529 unsigned int numa_node
)
531 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(set
->driver_data
);
532 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
533 struct nvme_tcp_cmd_pdu
*pdu
;
534 int queue_idx
= (set
== &ctrl
->tag_set
) ? hctx_idx
+ 1 : 0;
535 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[queue_idx
];
536 u8 hdgst
= nvme_tcp_hdgst_len(queue
);
538 req
->pdu
= page_frag_alloc(&queue
->pf_cache
,
539 sizeof(struct nvme_tcp_cmd_pdu
) + hdgst
,
540 GFP_KERNEL
| __GFP_ZERO
);
546 nvme_req(rq
)->ctrl
= &ctrl
->ctrl
;
547 nvme_req(rq
)->cmd
= &pdu
->cmd
;
552 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
553 unsigned int hctx_idx
)
555 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(data
);
556 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[hctx_idx
+ 1];
558 hctx
->driver_data
= queue
;
562 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
563 unsigned int hctx_idx
)
565 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(data
);
566 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[0];
568 hctx
->driver_data
= queue
;
572 static enum nvme_tcp_recv_state
573 nvme_tcp_recv_state(struct nvme_tcp_queue
*queue
)
575 return (queue
->pdu_remaining
) ? NVME_TCP_RECV_PDU
:
576 (queue
->ddgst_remaining
) ? NVME_TCP_RECV_DDGST
:
580 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue
*queue
)
582 queue
->pdu_remaining
= sizeof(struct nvme_tcp_rsp_pdu
) +
583 nvme_tcp_hdgst_len(queue
);
584 queue
->pdu_offset
= 0;
585 queue
->data_remaining
= -1;
586 queue
->ddgst_remaining
= 0;
589 static void nvme_tcp_error_recovery(struct nvme_ctrl
*ctrl
)
591 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
594 dev_warn(ctrl
->device
, "starting error recovery\n");
595 queue_work(nvme_reset_wq
, &to_tcp_ctrl(ctrl
)->err_work
);
598 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue
*queue
,
599 struct nvme_completion
*cqe
)
601 struct nvme_tcp_request
*req
;
604 rq
= nvme_find_rq(nvme_tcp_tagset(queue
), cqe
->command_id
);
606 dev_err(queue
->ctrl
->ctrl
.device
,
607 "got bad cqe.command_id %#x on queue %d\n",
608 cqe
->command_id
, nvme_tcp_queue_id(queue
));
609 nvme_tcp_error_recovery(&queue
->ctrl
->ctrl
);
613 req
= blk_mq_rq_to_pdu(rq
);
614 if (req
->status
== cpu_to_le16(NVME_SC_SUCCESS
))
615 req
->status
= cqe
->status
;
617 if (!nvme_try_complete_req(rq
, req
->status
, cqe
->result
))
618 nvme_complete_rq(rq
);
624 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue
*queue
,
625 struct nvme_tcp_data_pdu
*pdu
)
629 rq
= nvme_find_rq(nvme_tcp_tagset(queue
), pdu
->command_id
);
631 dev_err(queue
->ctrl
->ctrl
.device
,
632 "got bad c2hdata.command_id %#x on queue %d\n",
633 pdu
->command_id
, nvme_tcp_queue_id(queue
));
637 if (!blk_rq_payload_bytes(rq
)) {
638 dev_err(queue
->ctrl
->ctrl
.device
,
639 "queue %d tag %#x unexpected data\n",
640 nvme_tcp_queue_id(queue
), rq
->tag
);
644 queue
->data_remaining
= le32_to_cpu(pdu
->data_length
);
646 if (pdu
->hdr
.flags
& NVME_TCP_F_DATA_SUCCESS
&&
647 unlikely(!(pdu
->hdr
.flags
& NVME_TCP_F_DATA_LAST
))) {
648 dev_err(queue
->ctrl
->ctrl
.device
,
649 "queue %d tag %#x SUCCESS set but not last PDU\n",
650 nvme_tcp_queue_id(queue
), rq
->tag
);
651 nvme_tcp_error_recovery(&queue
->ctrl
->ctrl
);
658 static int nvme_tcp_handle_comp(struct nvme_tcp_queue
*queue
,
659 struct nvme_tcp_rsp_pdu
*pdu
)
661 struct nvme_completion
*cqe
= &pdu
->cqe
;
665 * AEN requests are special as they don't time out and can
666 * survive any kind of queue freeze and often don't respond to
667 * aborts. We don't even bother to allocate a struct request
668 * for them but rather special case them here.
670 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue
),
672 nvme_complete_async_event(&queue
->ctrl
->ctrl
, cqe
->status
,
675 ret
= nvme_tcp_process_nvme_cqe(queue
, cqe
);
680 static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request
*req
)
682 struct nvme_tcp_data_pdu
*data
= nvme_tcp_req_data_pdu(req
);
683 struct nvme_tcp_queue
*queue
= req
->queue
;
684 struct request
*rq
= blk_mq_rq_from_pdu(req
);
685 u32 h2cdata_sent
= req
->pdu_len
;
686 u8 hdgst
= nvme_tcp_hdgst_len(queue
);
687 u8 ddgst
= nvme_tcp_ddgst_len(queue
);
689 req
->state
= NVME_TCP_SEND_H2C_PDU
;
691 req
->pdu_len
= min(req
->h2cdata_left
, queue
->maxh2cdata
);
693 req
->h2cdata_left
-= req
->pdu_len
;
694 req
->h2cdata_offset
+= h2cdata_sent
;
696 memset(data
, 0, sizeof(*data
));
697 data
->hdr
.type
= nvme_tcp_h2c_data
;
698 if (!req
->h2cdata_left
)
699 data
->hdr
.flags
= NVME_TCP_F_DATA_LAST
;
700 if (queue
->hdr_digest
)
701 data
->hdr
.flags
|= NVME_TCP_F_HDGST
;
702 if (queue
->data_digest
)
703 data
->hdr
.flags
|= NVME_TCP_F_DDGST
;
704 data
->hdr
.hlen
= sizeof(*data
);
705 data
->hdr
.pdo
= data
->hdr
.hlen
+ hdgst
;
707 cpu_to_le32(data
->hdr
.hlen
+ hdgst
+ req
->pdu_len
+ ddgst
);
708 data
->ttag
= req
->ttag
;
709 data
->command_id
= nvme_cid(rq
);
710 data
->data_offset
= cpu_to_le32(req
->h2cdata_offset
);
711 data
->data_length
= cpu_to_le32(req
->pdu_len
);
714 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue
*queue
,
715 struct nvme_tcp_r2t_pdu
*pdu
)
717 struct nvme_tcp_request
*req
;
719 u32 r2t_length
= le32_to_cpu(pdu
->r2t_length
);
720 u32 r2t_offset
= le32_to_cpu(pdu
->r2t_offset
);
722 rq
= nvme_find_rq(nvme_tcp_tagset(queue
), pdu
->command_id
);
724 dev_err(queue
->ctrl
->ctrl
.device
,
725 "got bad r2t.command_id %#x on queue %d\n",
726 pdu
->command_id
, nvme_tcp_queue_id(queue
));
729 req
= blk_mq_rq_to_pdu(rq
);
731 if (unlikely(!r2t_length
)) {
732 dev_err(queue
->ctrl
->ctrl
.device
,
733 "req %d r2t len is %u, probably a bug...\n",
734 rq
->tag
, r2t_length
);
738 if (unlikely(req
->data_sent
+ r2t_length
> req
->data_len
)) {
739 dev_err(queue
->ctrl
->ctrl
.device
,
740 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
741 rq
->tag
, r2t_length
, req
->data_len
, req
->data_sent
);
745 if (unlikely(r2t_offset
< req
->data_sent
)) {
746 dev_err(queue
->ctrl
->ctrl
.device
,
747 "req %d unexpected r2t offset %u (expected %zu)\n",
748 rq
->tag
, r2t_offset
, req
->data_sent
);
753 req
->h2cdata_left
= r2t_length
;
754 req
->h2cdata_offset
= r2t_offset
;
755 req
->ttag
= pdu
->ttag
;
757 nvme_tcp_setup_h2c_data_pdu(req
);
758 nvme_tcp_queue_request(req
, false, true);
763 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue
*queue
, struct sk_buff
*skb
,
764 unsigned int *offset
, size_t *len
)
766 struct nvme_tcp_hdr
*hdr
;
767 char *pdu
= queue
->pdu
;
768 size_t rcv_len
= min_t(size_t, *len
, queue
->pdu_remaining
);
771 ret
= skb_copy_bits(skb
, *offset
,
772 &pdu
[queue
->pdu_offset
], rcv_len
);
776 queue
->pdu_remaining
-= rcv_len
;
777 queue
->pdu_offset
+= rcv_len
;
780 if (queue
->pdu_remaining
)
784 if (queue
->hdr_digest
) {
785 ret
= nvme_tcp_verify_hdgst(queue
, queue
->pdu
, hdr
->hlen
);
791 if (queue
->data_digest
) {
792 ret
= nvme_tcp_check_ddgst(queue
, queue
->pdu
);
798 case nvme_tcp_c2h_data
:
799 return nvme_tcp_handle_c2h_data(queue
, (void *)queue
->pdu
);
801 nvme_tcp_init_recv_ctx(queue
);
802 return nvme_tcp_handle_comp(queue
, (void *)queue
->pdu
);
804 nvme_tcp_init_recv_ctx(queue
);
805 return nvme_tcp_handle_r2t(queue
, (void *)queue
->pdu
);
807 dev_err(queue
->ctrl
->ctrl
.device
,
808 "unsupported pdu type (%d)\n", hdr
->type
);
813 static inline void nvme_tcp_end_request(struct request
*rq
, u16 status
)
815 union nvme_result res
= {};
817 if (!nvme_try_complete_req(rq
, cpu_to_le16(status
<< 1), res
))
818 nvme_complete_rq(rq
);
821 static int nvme_tcp_recv_data(struct nvme_tcp_queue
*queue
, struct sk_buff
*skb
,
822 unsigned int *offset
, size_t *len
)
824 struct nvme_tcp_data_pdu
*pdu
= (void *)queue
->pdu
;
826 nvme_cid_to_rq(nvme_tcp_tagset(queue
), pdu
->command_id
);
827 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
832 recv_len
= min_t(size_t, *len
, queue
->data_remaining
);
836 if (!iov_iter_count(&req
->iter
)) {
837 req
->curr_bio
= req
->curr_bio
->bi_next
;
840 * If we don`t have any bios it means that controller
841 * sent more data than we requested, hence error
843 if (!req
->curr_bio
) {
844 dev_err(queue
->ctrl
->ctrl
.device
,
845 "queue %d no space in request %#x",
846 nvme_tcp_queue_id(queue
), rq
->tag
);
847 nvme_tcp_init_recv_ctx(queue
);
850 nvme_tcp_init_iter(req
, ITER_DEST
);
853 /* we can read only from what is left in this bio */
854 recv_len
= min_t(size_t, recv_len
,
855 iov_iter_count(&req
->iter
));
857 if (queue
->data_digest
)
858 ret
= skb_copy_and_hash_datagram_iter(skb
, *offset
,
859 &req
->iter
, recv_len
, queue
->rcv_hash
);
861 ret
= skb_copy_datagram_iter(skb
, *offset
,
862 &req
->iter
, recv_len
);
864 dev_err(queue
->ctrl
->ctrl
.device
,
865 "queue %d failed to copy request %#x data",
866 nvme_tcp_queue_id(queue
), rq
->tag
);
872 queue
->data_remaining
-= recv_len
;
875 if (!queue
->data_remaining
) {
876 if (queue
->data_digest
) {
877 nvme_tcp_ddgst_final(queue
->rcv_hash
, &queue
->exp_ddgst
);
878 queue
->ddgst_remaining
= NVME_TCP_DIGEST_LENGTH
;
880 if (pdu
->hdr
.flags
& NVME_TCP_F_DATA_SUCCESS
) {
881 nvme_tcp_end_request(rq
,
882 le16_to_cpu(req
->status
));
885 nvme_tcp_init_recv_ctx(queue
);
892 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue
*queue
,
893 struct sk_buff
*skb
, unsigned int *offset
, size_t *len
)
895 struct nvme_tcp_data_pdu
*pdu
= (void *)queue
->pdu
;
896 char *ddgst
= (char *)&queue
->recv_ddgst
;
897 size_t recv_len
= min_t(size_t, *len
, queue
->ddgst_remaining
);
898 off_t off
= NVME_TCP_DIGEST_LENGTH
- queue
->ddgst_remaining
;
901 ret
= skb_copy_bits(skb
, *offset
, &ddgst
[off
], recv_len
);
905 queue
->ddgst_remaining
-= recv_len
;
908 if (queue
->ddgst_remaining
)
911 if (queue
->recv_ddgst
!= queue
->exp_ddgst
) {
912 struct request
*rq
= nvme_cid_to_rq(nvme_tcp_tagset(queue
),
914 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
916 req
->status
= cpu_to_le16(NVME_SC_DATA_XFER_ERROR
);
918 dev_err(queue
->ctrl
->ctrl
.device
,
919 "data digest error: recv %#x expected %#x\n",
920 le32_to_cpu(queue
->recv_ddgst
),
921 le32_to_cpu(queue
->exp_ddgst
));
924 if (pdu
->hdr
.flags
& NVME_TCP_F_DATA_SUCCESS
) {
925 struct request
*rq
= nvme_cid_to_rq(nvme_tcp_tagset(queue
),
927 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
929 nvme_tcp_end_request(rq
, le16_to_cpu(req
->status
));
933 nvme_tcp_init_recv_ctx(queue
);
937 static int nvme_tcp_recv_skb(read_descriptor_t
*desc
, struct sk_buff
*skb
,
938 unsigned int offset
, size_t len
)
940 struct nvme_tcp_queue
*queue
= desc
->arg
.data
;
941 size_t consumed
= len
;
944 if (unlikely(!queue
->rd_enabled
))
948 switch (nvme_tcp_recv_state(queue
)) {
949 case NVME_TCP_RECV_PDU
:
950 result
= nvme_tcp_recv_pdu(queue
, skb
, &offset
, &len
);
952 case NVME_TCP_RECV_DATA
:
953 result
= nvme_tcp_recv_data(queue
, skb
, &offset
, &len
);
955 case NVME_TCP_RECV_DDGST
:
956 result
= nvme_tcp_recv_ddgst(queue
, skb
, &offset
, &len
);
962 dev_err(queue
->ctrl
->ctrl
.device
,
963 "receive failed: %d\n", result
);
964 queue
->rd_enabled
= false;
965 nvme_tcp_error_recovery(&queue
->ctrl
->ctrl
);
973 static void nvme_tcp_data_ready(struct sock
*sk
)
975 struct nvme_tcp_queue
*queue
;
977 trace_sk_data_ready(sk
);
979 read_lock_bh(&sk
->sk_callback_lock
);
980 queue
= sk
->sk_user_data
;
981 if (likely(queue
&& queue
->rd_enabled
) &&
982 !test_bit(NVME_TCP_Q_POLLING
, &queue
->flags
))
983 queue_work_on(queue
->io_cpu
, nvme_tcp_wq
, &queue
->io_work
);
984 read_unlock_bh(&sk
->sk_callback_lock
);
987 static void nvme_tcp_write_space(struct sock
*sk
)
989 struct nvme_tcp_queue
*queue
;
991 read_lock_bh(&sk
->sk_callback_lock
);
992 queue
= sk
->sk_user_data
;
993 if (likely(queue
&& sk_stream_is_writeable(sk
))) {
994 clear_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
995 queue_work_on(queue
->io_cpu
, nvme_tcp_wq
, &queue
->io_work
);
997 read_unlock_bh(&sk
->sk_callback_lock
);
1000 static void nvme_tcp_state_change(struct sock
*sk
)
1002 struct nvme_tcp_queue
*queue
;
1004 read_lock_bh(&sk
->sk_callback_lock
);
1005 queue
= sk
->sk_user_data
;
1009 switch (sk
->sk_state
) {
1011 case TCP_CLOSE_WAIT
:
1015 nvme_tcp_error_recovery(&queue
->ctrl
->ctrl
);
1018 dev_info(queue
->ctrl
->ctrl
.device
,
1019 "queue %d socket state %d\n",
1020 nvme_tcp_queue_id(queue
), sk
->sk_state
);
1023 queue
->state_change(sk
);
1025 read_unlock_bh(&sk
->sk_callback_lock
);
1028 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue
*queue
)
1030 queue
->request
= NULL
;
1033 static void nvme_tcp_fail_request(struct nvme_tcp_request
*req
)
1035 if (nvme_tcp_async_req(req
)) {
1036 union nvme_result res
= {};
1038 nvme_complete_async_event(&req
->queue
->ctrl
->ctrl
,
1039 cpu_to_le16(NVME_SC_HOST_PATH_ERROR
), &res
);
1041 nvme_tcp_end_request(blk_mq_rq_from_pdu(req
),
1042 NVME_SC_HOST_PATH_ERROR
);
1046 static int nvme_tcp_try_send_data(struct nvme_tcp_request
*req
)
1048 struct nvme_tcp_queue
*queue
= req
->queue
;
1049 int req_data_len
= req
->data_len
;
1050 u32 h2cdata_left
= req
->h2cdata_left
;
1053 struct bio_vec bvec
;
1054 struct msghdr msg
= {
1055 .msg_flags
= MSG_DONTWAIT
| MSG_SPLICE_PAGES
,
1057 struct page
*page
= nvme_tcp_req_cur_page(req
);
1058 size_t offset
= nvme_tcp_req_cur_offset(req
);
1059 size_t len
= nvme_tcp_req_cur_length(req
);
1060 bool last
= nvme_tcp_pdu_last_send(req
, len
);
1061 int req_data_sent
= req
->data_sent
;
1064 if (last
&& !queue
->data_digest
&& !nvme_tcp_queue_more(queue
))
1065 msg
.msg_flags
|= MSG_EOR
;
1067 msg
.msg_flags
|= MSG_MORE
;
1069 if (!sendpages_ok(page
, len
, offset
))
1070 msg
.msg_flags
&= ~MSG_SPLICE_PAGES
;
1072 bvec_set_page(&bvec
, page
, len
, offset
);
1073 iov_iter_bvec(&msg
.msg_iter
, ITER_SOURCE
, &bvec
, 1, len
);
1074 ret
= sock_sendmsg(queue
->sock
, &msg
);
1078 if (queue
->data_digest
)
1079 nvme_tcp_ddgst_update(queue
->snd_hash
, page
,
1083 * update the request iterator except for the last payload send
1084 * in the request where we don't want to modify it as we may
1085 * compete with the RX path completing the request.
1087 if (req_data_sent
+ ret
< req_data_len
)
1088 nvme_tcp_advance_req(req
, ret
);
1090 /* fully successful last send in current PDU */
1091 if (last
&& ret
== len
) {
1092 if (queue
->data_digest
) {
1093 nvme_tcp_ddgst_final(queue
->snd_hash
,
1095 req
->state
= NVME_TCP_SEND_DDGST
;
1099 nvme_tcp_setup_h2c_data_pdu(req
);
1101 nvme_tcp_done_send_req(queue
);
1109 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request
*req
)
1111 struct nvme_tcp_queue
*queue
= req
->queue
;
1112 struct nvme_tcp_cmd_pdu
*pdu
= nvme_tcp_req_cmd_pdu(req
);
1113 struct bio_vec bvec
;
1114 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_SPLICE_PAGES
, };
1115 bool inline_data
= nvme_tcp_has_inline_data(req
);
1116 u8 hdgst
= nvme_tcp_hdgst_len(queue
);
1117 int len
= sizeof(*pdu
) + hdgst
- req
->offset
;
1120 if (inline_data
|| nvme_tcp_queue_more(queue
))
1121 msg
.msg_flags
|= MSG_MORE
;
1123 msg
.msg_flags
|= MSG_EOR
;
1125 if (queue
->hdr_digest
&& !req
->offset
)
1126 nvme_tcp_hdgst(queue
->snd_hash
, pdu
, sizeof(*pdu
));
1128 bvec_set_virt(&bvec
, (void *)pdu
+ req
->offset
, len
);
1129 iov_iter_bvec(&msg
.msg_iter
, ITER_SOURCE
, &bvec
, 1, len
);
1130 ret
= sock_sendmsg(queue
->sock
, &msg
);
1131 if (unlikely(ret
<= 0))
1137 req
->state
= NVME_TCP_SEND_DATA
;
1138 if (queue
->data_digest
)
1139 crypto_ahash_init(queue
->snd_hash
);
1141 nvme_tcp_done_send_req(queue
);
1150 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request
*req
)
1152 struct nvme_tcp_queue
*queue
= req
->queue
;
1153 struct nvme_tcp_data_pdu
*pdu
= nvme_tcp_req_data_pdu(req
);
1154 struct bio_vec bvec
;
1155 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_MORE
, };
1156 u8 hdgst
= nvme_tcp_hdgst_len(queue
);
1157 int len
= sizeof(*pdu
) - req
->offset
+ hdgst
;
1160 if (queue
->hdr_digest
&& !req
->offset
)
1161 nvme_tcp_hdgst(queue
->snd_hash
, pdu
, sizeof(*pdu
));
1163 if (!req
->h2cdata_left
)
1164 msg
.msg_flags
|= MSG_SPLICE_PAGES
;
1166 bvec_set_virt(&bvec
, (void *)pdu
+ req
->offset
, len
);
1167 iov_iter_bvec(&msg
.msg_iter
, ITER_SOURCE
, &bvec
, 1, len
);
1168 ret
= sock_sendmsg(queue
->sock
, &msg
);
1169 if (unlikely(ret
<= 0))
1174 req
->state
= NVME_TCP_SEND_DATA
;
1175 if (queue
->data_digest
)
1176 crypto_ahash_init(queue
->snd_hash
);
1184 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request
*req
)
1186 struct nvme_tcp_queue
*queue
= req
->queue
;
1187 size_t offset
= req
->offset
;
1188 u32 h2cdata_left
= req
->h2cdata_left
;
1190 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
};
1192 .iov_base
= (u8
*)&req
->ddgst
+ req
->offset
,
1193 .iov_len
= NVME_TCP_DIGEST_LENGTH
- req
->offset
1196 if (nvme_tcp_queue_more(queue
))
1197 msg
.msg_flags
|= MSG_MORE
;
1199 msg
.msg_flags
|= MSG_EOR
;
1201 ret
= kernel_sendmsg(queue
->sock
, &msg
, &iov
, 1, iov
.iov_len
);
1202 if (unlikely(ret
<= 0))
1205 if (offset
+ ret
== NVME_TCP_DIGEST_LENGTH
) {
1207 nvme_tcp_setup_h2c_data_pdu(req
);
1209 nvme_tcp_done_send_req(queue
);
1217 static int nvme_tcp_try_send(struct nvme_tcp_queue
*queue
)
1219 struct nvme_tcp_request
*req
;
1220 unsigned int noreclaim_flag
;
1223 if (!queue
->request
) {
1224 queue
->request
= nvme_tcp_fetch_request(queue
);
1225 if (!queue
->request
)
1228 req
= queue
->request
;
1230 noreclaim_flag
= memalloc_noreclaim_save();
1231 if (req
->state
== NVME_TCP_SEND_CMD_PDU
) {
1232 ret
= nvme_tcp_try_send_cmd_pdu(req
);
1235 if (!nvme_tcp_has_inline_data(req
))
1239 if (req
->state
== NVME_TCP_SEND_H2C_PDU
) {
1240 ret
= nvme_tcp_try_send_data_pdu(req
);
1245 if (req
->state
== NVME_TCP_SEND_DATA
) {
1246 ret
= nvme_tcp_try_send_data(req
);
1251 if (req
->state
== NVME_TCP_SEND_DDGST
)
1252 ret
= nvme_tcp_try_send_ddgst(req
);
1254 if (ret
== -EAGAIN
) {
1256 } else if (ret
< 0) {
1257 dev_err(queue
->ctrl
->ctrl
.device
,
1258 "failed to send request %d\n", ret
);
1259 nvme_tcp_fail_request(queue
->request
);
1260 nvme_tcp_done_send_req(queue
);
1263 memalloc_noreclaim_restore(noreclaim_flag
);
1267 static int nvme_tcp_try_recv(struct nvme_tcp_queue
*queue
)
1269 struct socket
*sock
= queue
->sock
;
1270 struct sock
*sk
= sock
->sk
;
1271 read_descriptor_t rd_desc
;
1274 rd_desc
.arg
.data
= queue
;
1278 consumed
= sock
->ops
->read_sock(sk
, &rd_desc
, nvme_tcp_recv_skb
);
1283 static void nvme_tcp_io_work(struct work_struct
*w
)
1285 struct nvme_tcp_queue
*queue
=
1286 container_of(w
, struct nvme_tcp_queue
, io_work
);
1287 unsigned long deadline
= jiffies
+ msecs_to_jiffies(1);
1290 bool pending
= false;
1293 if (mutex_trylock(&queue
->send_mutex
)) {
1294 result
= nvme_tcp_try_send(queue
);
1295 mutex_unlock(&queue
->send_mutex
);
1298 else if (unlikely(result
< 0))
1302 result
= nvme_tcp_try_recv(queue
);
1305 else if (unlikely(result
< 0))
1308 if (!pending
|| !queue
->rd_enabled
)
1311 } while (!time_after(jiffies
, deadline
)); /* quota is exhausted */
1313 queue_work_on(queue
->io_cpu
, nvme_tcp_wq
, &queue
->io_work
);
1316 static void nvme_tcp_free_crypto(struct nvme_tcp_queue
*queue
)
1318 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(queue
->rcv_hash
);
1320 ahash_request_free(queue
->rcv_hash
);
1321 ahash_request_free(queue
->snd_hash
);
1322 crypto_free_ahash(tfm
);
1325 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue
*queue
)
1327 struct crypto_ahash
*tfm
;
1329 tfm
= crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC
);
1331 return PTR_ERR(tfm
);
1333 queue
->snd_hash
= ahash_request_alloc(tfm
, GFP_KERNEL
);
1334 if (!queue
->snd_hash
)
1336 ahash_request_set_callback(queue
->snd_hash
, 0, NULL
, NULL
);
1338 queue
->rcv_hash
= ahash_request_alloc(tfm
, GFP_KERNEL
);
1339 if (!queue
->rcv_hash
)
1341 ahash_request_set_callback(queue
->rcv_hash
, 0, NULL
, NULL
);
1345 ahash_request_free(queue
->snd_hash
);
1347 crypto_free_ahash(tfm
);
1351 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl
*ctrl
)
1353 struct nvme_tcp_request
*async
= &ctrl
->async_req
;
1355 page_frag_free(async
->pdu
);
1358 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl
*ctrl
)
1360 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[0];
1361 struct nvme_tcp_request
*async
= &ctrl
->async_req
;
1362 u8 hdgst
= nvme_tcp_hdgst_len(queue
);
1364 async
->pdu
= page_frag_alloc(&queue
->pf_cache
,
1365 sizeof(struct nvme_tcp_cmd_pdu
) + hdgst
,
1366 GFP_KERNEL
| __GFP_ZERO
);
1370 async
->queue
= &ctrl
->queues
[0];
1374 static void nvme_tcp_free_queue(struct nvme_ctrl
*nctrl
, int qid
)
1376 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(nctrl
);
1377 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[qid
];
1378 unsigned int noreclaim_flag
;
1380 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED
, &queue
->flags
))
1383 if (queue
->hdr_digest
|| queue
->data_digest
)
1384 nvme_tcp_free_crypto(queue
);
1386 page_frag_cache_drain(&queue
->pf_cache
);
1388 noreclaim_flag
= memalloc_noreclaim_save();
1389 /* ->sock will be released by fput() */
1390 fput(queue
->sock
->file
);
1392 memalloc_noreclaim_restore(noreclaim_flag
);
1395 mutex_destroy(&queue
->send_mutex
);
1396 mutex_destroy(&queue
->queue_lock
);
1399 static int nvme_tcp_init_connection(struct nvme_tcp_queue
*queue
)
1401 struct nvme_tcp_icreq_pdu
*icreq
;
1402 struct nvme_tcp_icresp_pdu
*icresp
;
1403 char cbuf
[CMSG_LEN(sizeof(char))] = {};
1405 struct msghdr msg
= {};
1407 bool ctrl_hdgst
, ctrl_ddgst
;
1411 icreq
= kzalloc(sizeof(*icreq
), GFP_KERNEL
);
1415 icresp
= kzalloc(sizeof(*icresp
), GFP_KERNEL
);
1421 icreq
->hdr
.type
= nvme_tcp_icreq
;
1422 icreq
->hdr
.hlen
= sizeof(*icreq
);
1424 icreq
->hdr
.plen
= cpu_to_le32(icreq
->hdr
.hlen
);
1425 icreq
->pfv
= cpu_to_le16(NVME_TCP_PFV_1_0
);
1426 icreq
->maxr2t
= 0; /* single inflight r2t supported */
1427 icreq
->hpda
= 0; /* no alignment constraint */
1428 if (queue
->hdr_digest
)
1429 icreq
->digest
|= NVME_TCP_HDR_DIGEST_ENABLE
;
1430 if (queue
->data_digest
)
1431 icreq
->digest
|= NVME_TCP_DATA_DIGEST_ENABLE
;
1433 iov
.iov_base
= icreq
;
1434 iov
.iov_len
= sizeof(*icreq
);
1435 ret
= kernel_sendmsg(queue
->sock
, &msg
, &iov
, 1, iov
.iov_len
);
1437 pr_warn("queue %d: failed to send icreq, error %d\n",
1438 nvme_tcp_queue_id(queue
), ret
);
1442 memset(&msg
, 0, sizeof(msg
));
1443 iov
.iov_base
= icresp
;
1444 iov
.iov_len
= sizeof(*icresp
);
1445 if (nvme_tcp_queue_tls(queue
)) {
1446 msg
.msg_control
= cbuf
;
1447 msg
.msg_controllen
= sizeof(cbuf
);
1449 ret
= kernel_recvmsg(queue
->sock
, &msg
, &iov
, 1,
1450 iov
.iov_len
, msg
.msg_flags
);
1452 pr_warn("queue %d: failed to receive icresp, error %d\n",
1453 nvme_tcp_queue_id(queue
), ret
);
1457 if (nvme_tcp_queue_tls(queue
)) {
1458 ctype
= tls_get_record_type(queue
->sock
->sk
,
1459 (struct cmsghdr
*)cbuf
);
1460 if (ctype
!= TLS_RECORD_TYPE_DATA
) {
1461 pr_err("queue %d: unhandled TLS record %d\n",
1462 nvme_tcp_queue_id(queue
), ctype
);
1467 if (icresp
->hdr
.type
!= nvme_tcp_icresp
) {
1468 pr_err("queue %d: bad type returned %d\n",
1469 nvme_tcp_queue_id(queue
), icresp
->hdr
.type
);
1473 if (le32_to_cpu(icresp
->hdr
.plen
) != sizeof(*icresp
)) {
1474 pr_err("queue %d: bad pdu length returned %d\n",
1475 nvme_tcp_queue_id(queue
), icresp
->hdr
.plen
);
1479 if (icresp
->pfv
!= NVME_TCP_PFV_1_0
) {
1480 pr_err("queue %d: bad pfv returned %d\n",
1481 nvme_tcp_queue_id(queue
), icresp
->pfv
);
1485 ctrl_ddgst
= !!(icresp
->digest
& NVME_TCP_DATA_DIGEST_ENABLE
);
1486 if ((queue
->data_digest
&& !ctrl_ddgst
) ||
1487 (!queue
->data_digest
&& ctrl_ddgst
)) {
1488 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1489 nvme_tcp_queue_id(queue
),
1490 queue
->data_digest
? "enabled" : "disabled",
1491 ctrl_ddgst
? "enabled" : "disabled");
1495 ctrl_hdgst
= !!(icresp
->digest
& NVME_TCP_HDR_DIGEST_ENABLE
);
1496 if ((queue
->hdr_digest
&& !ctrl_hdgst
) ||
1497 (!queue
->hdr_digest
&& ctrl_hdgst
)) {
1498 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1499 nvme_tcp_queue_id(queue
),
1500 queue
->hdr_digest
? "enabled" : "disabled",
1501 ctrl_hdgst
? "enabled" : "disabled");
1505 if (icresp
->cpda
!= 0) {
1506 pr_err("queue %d: unsupported cpda returned %d\n",
1507 nvme_tcp_queue_id(queue
), icresp
->cpda
);
1511 maxh2cdata
= le32_to_cpu(icresp
->maxdata
);
1512 if ((maxh2cdata
% 4) || (maxh2cdata
< NVME_TCP_MIN_MAXH2CDATA
)) {
1513 pr_err("queue %d: invalid maxh2cdata returned %u\n",
1514 nvme_tcp_queue_id(queue
), maxh2cdata
);
1517 queue
->maxh2cdata
= maxh2cdata
;
1527 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue
*queue
)
1529 return nvme_tcp_queue_id(queue
) == 0;
1532 static bool nvme_tcp_default_queue(struct nvme_tcp_queue
*queue
)
1534 struct nvme_tcp_ctrl
*ctrl
= queue
->ctrl
;
1535 int qid
= nvme_tcp_queue_id(queue
);
1537 return !nvme_tcp_admin_queue(queue
) &&
1538 qid
< 1 + ctrl
->io_queues
[HCTX_TYPE_DEFAULT
];
1541 static bool nvme_tcp_read_queue(struct nvme_tcp_queue
*queue
)
1543 struct nvme_tcp_ctrl
*ctrl
= queue
->ctrl
;
1544 int qid
= nvme_tcp_queue_id(queue
);
1546 return !nvme_tcp_admin_queue(queue
) &&
1547 !nvme_tcp_default_queue(queue
) &&
1548 qid
< 1 + ctrl
->io_queues
[HCTX_TYPE_DEFAULT
] +
1549 ctrl
->io_queues
[HCTX_TYPE_READ
];
1552 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue
*queue
)
1554 struct nvme_tcp_ctrl
*ctrl
= queue
->ctrl
;
1555 int qid
= nvme_tcp_queue_id(queue
);
1557 return !nvme_tcp_admin_queue(queue
) &&
1558 !nvme_tcp_default_queue(queue
) &&
1559 !nvme_tcp_read_queue(queue
) &&
1560 qid
< 1 + ctrl
->io_queues
[HCTX_TYPE_DEFAULT
] +
1561 ctrl
->io_queues
[HCTX_TYPE_READ
] +
1562 ctrl
->io_queues
[HCTX_TYPE_POLL
];
1565 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue
*queue
)
1567 struct nvme_tcp_ctrl
*ctrl
= queue
->ctrl
;
1568 int qid
= nvme_tcp_queue_id(queue
);
1571 if (nvme_tcp_default_queue(queue
))
1573 else if (nvme_tcp_read_queue(queue
))
1574 n
= qid
- ctrl
->io_queues
[HCTX_TYPE_DEFAULT
] - 1;
1575 else if (nvme_tcp_poll_queue(queue
))
1576 n
= qid
- ctrl
->io_queues
[HCTX_TYPE_DEFAULT
] -
1577 ctrl
->io_queues
[HCTX_TYPE_READ
] - 1;
1579 queue
->io_cpu
= WORK_CPU_UNBOUND
;
1581 queue
->io_cpu
= cpumask_next_wrap(n
- 1, cpu_online_mask
, -1, false);
1584 static void nvme_tcp_tls_done(void *data
, int status
, key_serial_t pskid
)
1586 struct nvme_tcp_queue
*queue
= data
;
1587 struct nvme_tcp_ctrl
*ctrl
= queue
->ctrl
;
1588 int qid
= nvme_tcp_queue_id(queue
);
1589 struct key
*tls_key
;
1591 dev_dbg(ctrl
->ctrl
.device
, "queue %d: TLS handshake done, key %x, status %d\n",
1592 qid
, pskid
, status
);
1595 queue
->tls_err
= -status
;
1599 tls_key
= nvme_tls_key_lookup(pskid
);
1600 if (IS_ERR(tls_key
)) {
1601 dev_warn(ctrl
->ctrl
.device
, "queue %d: Invalid key %x\n",
1603 queue
->tls_err
= -ENOKEY
;
1605 queue
->tls_enabled
= true;
1607 ctrl
->ctrl
.tls_pskid
= key_serial(tls_key
);
1613 complete(&queue
->tls_complete
);
1616 static int nvme_tcp_start_tls(struct nvme_ctrl
*nctrl
,
1617 struct nvme_tcp_queue
*queue
,
1620 int qid
= nvme_tcp_queue_id(queue
);
1622 struct tls_handshake_args args
;
1623 unsigned long tmo
= tls_handshake_timeout
* HZ
;
1624 key_serial_t keyring
= nvme_keyring_id();
1626 dev_dbg(nctrl
->device
, "queue %d: start TLS with key %x\n",
1628 memset(&args
, 0, sizeof(args
));
1629 args
.ta_sock
= queue
->sock
;
1630 args
.ta_done
= nvme_tcp_tls_done
;
1631 args
.ta_data
= queue
;
1632 args
.ta_my_peerids
[0] = pskid
;
1633 args
.ta_num_peerids
= 1;
1634 if (nctrl
->opts
->keyring
)
1635 keyring
= key_serial(nctrl
->opts
->keyring
);
1636 args
.ta_keyring
= keyring
;
1637 args
.ta_timeout_ms
= tls_handshake_timeout
* 1000;
1638 queue
->tls_err
= -EOPNOTSUPP
;
1639 init_completion(&queue
->tls_complete
);
1640 ret
= tls_client_hello_psk(&args
, GFP_KERNEL
);
1642 dev_err(nctrl
->device
, "queue %d: failed to start TLS: %d\n",
1646 ret
= wait_for_completion_interruptible_timeout(&queue
->tls_complete
, tmo
);
1651 dev_err(nctrl
->device
,
1652 "queue %d: TLS handshake failed, error %d\n",
1654 tls_handshake_cancel(queue
->sock
->sk
);
1656 dev_dbg(nctrl
->device
,
1657 "queue %d: TLS handshake complete, error %d\n",
1658 qid
, queue
->tls_err
);
1659 ret
= queue
->tls_err
;
1664 static int nvme_tcp_alloc_queue(struct nvme_ctrl
*nctrl
, int qid
,
1667 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(nctrl
);
1668 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[qid
];
1669 int ret
, rcv_pdu_size
;
1670 struct file
*sock_file
;
1672 mutex_init(&queue
->queue_lock
);
1674 init_llist_head(&queue
->req_list
);
1675 INIT_LIST_HEAD(&queue
->send_list
);
1676 mutex_init(&queue
->send_mutex
);
1677 INIT_WORK(&queue
->io_work
, nvme_tcp_io_work
);
1680 queue
->cmnd_capsule_len
= nctrl
->ioccsz
* 16;
1682 queue
->cmnd_capsule_len
= sizeof(struct nvme_command
) +
1683 NVME_TCP_ADMIN_CCSZ
;
1685 ret
= sock_create(ctrl
->addr
.ss_family
, SOCK_STREAM
,
1686 IPPROTO_TCP
, &queue
->sock
);
1688 dev_err(nctrl
->device
,
1689 "failed to create socket: %d\n", ret
);
1690 goto err_destroy_mutex
;
1693 sock_file
= sock_alloc_file(queue
->sock
, O_CLOEXEC
, NULL
);
1694 if (IS_ERR(sock_file
)) {
1695 ret
= PTR_ERR(sock_file
);
1696 goto err_destroy_mutex
;
1698 nvme_tcp_reclassify_socket(queue
->sock
);
1700 /* Single syn retry */
1701 tcp_sock_set_syncnt(queue
->sock
->sk
, 1);
1703 /* Set TCP no delay */
1704 tcp_sock_set_nodelay(queue
->sock
->sk
);
1707 * Cleanup whatever is sitting in the TCP transmit queue on socket
1708 * close. This is done to prevent stale data from being sent should
1709 * the network connection be restored before TCP times out.
1711 sock_no_linger(queue
->sock
->sk
);
1713 if (so_priority
> 0)
1714 sock_set_priority(queue
->sock
->sk
, so_priority
);
1716 /* Set socket type of service */
1717 if (nctrl
->opts
->tos
>= 0)
1718 ip_sock_set_tos(queue
->sock
->sk
, nctrl
->opts
->tos
);
1720 /* Set 10 seconds timeout for icresp recvmsg */
1721 queue
->sock
->sk
->sk_rcvtimeo
= 10 * HZ
;
1723 queue
->sock
->sk
->sk_allocation
= GFP_ATOMIC
;
1724 queue
->sock
->sk
->sk_use_task_frag
= false;
1725 nvme_tcp_set_queue_io_cpu(queue
);
1726 queue
->request
= NULL
;
1727 queue
->data_remaining
= 0;
1728 queue
->ddgst_remaining
= 0;
1729 queue
->pdu_remaining
= 0;
1730 queue
->pdu_offset
= 0;
1731 sk_set_memalloc(queue
->sock
->sk
);
1733 if (nctrl
->opts
->mask
& NVMF_OPT_HOST_TRADDR
) {
1734 ret
= kernel_bind(queue
->sock
, (struct sockaddr
*)&ctrl
->src_addr
,
1735 sizeof(ctrl
->src_addr
));
1737 dev_err(nctrl
->device
,
1738 "failed to bind queue %d socket %d\n",
1744 if (nctrl
->opts
->mask
& NVMF_OPT_HOST_IFACE
) {
1745 char *iface
= nctrl
->opts
->host_iface
;
1746 sockptr_t optval
= KERNEL_SOCKPTR(iface
);
1748 ret
= sock_setsockopt(queue
->sock
, SOL_SOCKET
, SO_BINDTODEVICE
,
1749 optval
, strlen(iface
));
1751 dev_err(nctrl
->device
,
1752 "failed to bind to interface %s queue %d err %d\n",
1758 queue
->hdr_digest
= nctrl
->opts
->hdr_digest
;
1759 queue
->data_digest
= nctrl
->opts
->data_digest
;
1760 if (queue
->hdr_digest
|| queue
->data_digest
) {
1761 ret
= nvme_tcp_alloc_crypto(queue
);
1763 dev_err(nctrl
->device
,
1764 "failed to allocate queue %d crypto\n", qid
);
1769 rcv_pdu_size
= sizeof(struct nvme_tcp_rsp_pdu
) +
1770 nvme_tcp_hdgst_len(queue
);
1771 queue
->pdu
= kmalloc(rcv_pdu_size
, GFP_KERNEL
);
1777 dev_dbg(nctrl
->device
, "connecting queue %d\n",
1778 nvme_tcp_queue_id(queue
));
1780 ret
= kernel_connect(queue
->sock
, (struct sockaddr
*)&ctrl
->addr
,
1781 sizeof(ctrl
->addr
), 0);
1783 dev_err(nctrl
->device
,
1784 "failed to connect socket: %d\n", ret
);
1788 /* If PSKs are configured try to start TLS */
1789 if (nvme_tcp_tls_configured(nctrl
) && pskid
) {
1790 ret
= nvme_tcp_start_tls(nctrl
, queue
, pskid
);
1792 goto err_init_connect
;
1795 ret
= nvme_tcp_init_connection(queue
);
1797 goto err_init_connect
;
1799 set_bit(NVME_TCP_Q_ALLOCATED
, &queue
->flags
);
1804 kernel_sock_shutdown(queue
->sock
, SHUT_RDWR
);
1808 if (queue
->hdr_digest
|| queue
->data_digest
)
1809 nvme_tcp_free_crypto(queue
);
1811 /* ->sock will be released by fput() */
1812 fput(queue
->sock
->file
);
1815 mutex_destroy(&queue
->send_mutex
);
1816 mutex_destroy(&queue
->queue_lock
);
1820 static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue
*queue
)
1822 struct socket
*sock
= queue
->sock
;
1824 write_lock_bh(&sock
->sk
->sk_callback_lock
);
1825 sock
->sk
->sk_user_data
= NULL
;
1826 sock
->sk
->sk_data_ready
= queue
->data_ready
;
1827 sock
->sk
->sk_state_change
= queue
->state_change
;
1828 sock
->sk
->sk_write_space
= queue
->write_space
;
1829 write_unlock_bh(&sock
->sk
->sk_callback_lock
);
1832 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue
*queue
)
1834 kernel_sock_shutdown(queue
->sock
, SHUT_RDWR
);
1835 nvme_tcp_restore_sock_ops(queue
);
1836 cancel_work_sync(&queue
->io_work
);
1839 static void nvme_tcp_stop_queue(struct nvme_ctrl
*nctrl
, int qid
)
1841 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(nctrl
);
1842 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[qid
];
1844 if (!test_bit(NVME_TCP_Q_ALLOCATED
, &queue
->flags
))
1847 mutex_lock(&queue
->queue_lock
);
1848 if (test_and_clear_bit(NVME_TCP_Q_LIVE
, &queue
->flags
))
1849 __nvme_tcp_stop_queue(queue
);
1850 /* Stopping the queue will disable TLS */
1851 queue
->tls_enabled
= false;
1852 mutex_unlock(&queue
->queue_lock
);
1855 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue
*queue
)
1857 write_lock_bh(&queue
->sock
->sk
->sk_callback_lock
);
1858 queue
->sock
->sk
->sk_user_data
= queue
;
1859 queue
->state_change
= queue
->sock
->sk
->sk_state_change
;
1860 queue
->data_ready
= queue
->sock
->sk
->sk_data_ready
;
1861 queue
->write_space
= queue
->sock
->sk
->sk_write_space
;
1862 queue
->sock
->sk
->sk_data_ready
= nvme_tcp_data_ready
;
1863 queue
->sock
->sk
->sk_state_change
= nvme_tcp_state_change
;
1864 queue
->sock
->sk
->sk_write_space
= nvme_tcp_write_space
;
1865 #ifdef CONFIG_NET_RX_BUSY_POLL
1866 queue
->sock
->sk
->sk_ll_usec
= 1;
1868 write_unlock_bh(&queue
->sock
->sk
->sk_callback_lock
);
1871 static int nvme_tcp_start_queue(struct nvme_ctrl
*nctrl
, int idx
)
1873 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(nctrl
);
1874 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[idx
];
1877 queue
->rd_enabled
= true;
1878 nvme_tcp_init_recv_ctx(queue
);
1879 nvme_tcp_setup_sock_ops(queue
);
1882 ret
= nvmf_connect_io_queue(nctrl
, idx
);
1884 ret
= nvmf_connect_admin_queue(nctrl
);
1887 set_bit(NVME_TCP_Q_LIVE
, &queue
->flags
);
1889 if (test_bit(NVME_TCP_Q_ALLOCATED
, &queue
->flags
))
1890 __nvme_tcp_stop_queue(queue
);
1891 dev_err(nctrl
->device
,
1892 "failed to connect queue: %d ret=%d\n", idx
, ret
);
1897 static void nvme_tcp_free_admin_queue(struct nvme_ctrl
*ctrl
)
1899 if (to_tcp_ctrl(ctrl
)->async_req
.pdu
) {
1900 cancel_work_sync(&ctrl
->async_event_work
);
1901 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl
));
1902 to_tcp_ctrl(ctrl
)->async_req
.pdu
= NULL
;
1905 nvme_tcp_free_queue(ctrl
, 0);
1908 static void nvme_tcp_free_io_queues(struct nvme_ctrl
*ctrl
)
1912 for (i
= 1; i
< ctrl
->queue_count
; i
++)
1913 nvme_tcp_free_queue(ctrl
, i
);
1916 static void nvme_tcp_stop_io_queues(struct nvme_ctrl
*ctrl
)
1920 for (i
= 1; i
< ctrl
->queue_count
; i
++)
1921 nvme_tcp_stop_queue(ctrl
, i
);
1924 static int nvme_tcp_start_io_queues(struct nvme_ctrl
*ctrl
,
1925 int first
, int last
)
1929 for (i
= first
; i
< last
; i
++) {
1930 ret
= nvme_tcp_start_queue(ctrl
, i
);
1932 goto out_stop_queues
;
1938 for (i
--; i
>= first
; i
--)
1939 nvme_tcp_stop_queue(ctrl
, i
);
1943 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl
*ctrl
)
1946 key_serial_t pskid
= 0;
1948 if (nvme_tcp_tls_configured(ctrl
)) {
1949 if (ctrl
->opts
->tls_key
)
1950 pskid
= key_serial(ctrl
->opts
->tls_key
);
1952 pskid
= nvme_tls_psk_default(ctrl
->opts
->keyring
,
1953 ctrl
->opts
->host
->nqn
,
1954 ctrl
->opts
->subsysnqn
);
1956 dev_err(ctrl
->device
, "no valid PSK found\n");
1962 ret
= nvme_tcp_alloc_queue(ctrl
, 0, pskid
);
1966 ret
= nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl
));
1968 goto out_free_queue
;
1973 nvme_tcp_free_queue(ctrl
, 0);
1977 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl
*ctrl
)
1981 if (nvme_tcp_tls_configured(ctrl
) && !ctrl
->tls_pskid
) {
1982 dev_err(ctrl
->device
, "no PSK negotiated\n");
1986 for (i
= 1; i
< ctrl
->queue_count
; i
++) {
1987 ret
= nvme_tcp_alloc_queue(ctrl
, i
,
1990 goto out_free_queues
;
1996 for (i
--; i
>= 1; i
--)
1997 nvme_tcp_free_queue(ctrl
, i
);
2002 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl
*ctrl
)
2004 unsigned int nr_io_queues
;
2007 nr_io_queues
= nvmf_nr_io_queues(ctrl
->opts
);
2008 ret
= nvme_set_queue_count(ctrl
, &nr_io_queues
);
2012 if (nr_io_queues
== 0) {
2013 dev_err(ctrl
->device
,
2014 "unable to set any I/O queues\n");
2018 ctrl
->queue_count
= nr_io_queues
+ 1;
2019 dev_info(ctrl
->device
,
2020 "creating %d I/O queues.\n", nr_io_queues
);
2022 nvmf_set_io_queues(ctrl
->opts
, nr_io_queues
,
2023 to_tcp_ctrl(ctrl
)->io_queues
);
2024 return __nvme_tcp_alloc_io_queues(ctrl
);
2027 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl
*ctrl
, bool remove
)
2029 nvme_tcp_stop_io_queues(ctrl
);
2031 nvme_remove_io_tag_set(ctrl
);
2032 nvme_tcp_free_io_queues(ctrl
);
2035 static int nvme_tcp_configure_io_queues(struct nvme_ctrl
*ctrl
, bool new)
2039 ret
= nvme_tcp_alloc_io_queues(ctrl
);
2044 ret
= nvme_alloc_io_tag_set(ctrl
, &to_tcp_ctrl(ctrl
)->tag_set
,
2046 ctrl
->opts
->nr_poll_queues
? HCTX_MAX_TYPES
: 2,
2047 sizeof(struct nvme_tcp_request
));
2049 goto out_free_io_queues
;
2053 * Only start IO queues for which we have allocated the tagset
2054 * and limitted it to the available queues. On reconnects, the
2055 * queue number might have changed.
2057 nr_queues
= min(ctrl
->tagset
->nr_hw_queues
+ 1, ctrl
->queue_count
);
2058 ret
= nvme_tcp_start_io_queues(ctrl
, 1, nr_queues
);
2060 goto out_cleanup_connect_q
;
2063 nvme_start_freeze(ctrl
);
2064 nvme_unquiesce_io_queues(ctrl
);
2065 if (!nvme_wait_freeze_timeout(ctrl
, NVME_IO_TIMEOUT
)) {
2067 * If we timed out waiting for freeze we are likely to
2068 * be stuck. Fail the controller initialization just
2072 nvme_unfreeze(ctrl
);
2073 goto out_wait_freeze_timed_out
;
2075 blk_mq_update_nr_hw_queues(ctrl
->tagset
,
2076 ctrl
->queue_count
- 1);
2077 nvme_unfreeze(ctrl
);
2081 * If the number of queues has increased (reconnect case)
2082 * start all new queues now.
2084 ret
= nvme_tcp_start_io_queues(ctrl
, nr_queues
,
2085 ctrl
->tagset
->nr_hw_queues
+ 1);
2087 goto out_wait_freeze_timed_out
;
2091 out_wait_freeze_timed_out
:
2092 nvme_quiesce_io_queues(ctrl
);
2093 nvme_sync_io_queues(ctrl
);
2094 nvme_tcp_stop_io_queues(ctrl
);
2095 out_cleanup_connect_q
:
2096 nvme_cancel_tagset(ctrl
);
2098 nvme_remove_io_tag_set(ctrl
);
2100 nvme_tcp_free_io_queues(ctrl
);
2104 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl
*ctrl
, bool new)
2108 error
= nvme_tcp_alloc_admin_queue(ctrl
);
2113 error
= nvme_alloc_admin_tag_set(ctrl
,
2114 &to_tcp_ctrl(ctrl
)->admin_tag_set
,
2115 &nvme_tcp_admin_mq_ops
,
2116 sizeof(struct nvme_tcp_request
));
2118 goto out_free_queue
;
2121 error
= nvme_tcp_start_queue(ctrl
, 0);
2123 goto out_cleanup_tagset
;
2125 error
= nvme_enable_ctrl(ctrl
);
2127 goto out_stop_queue
;
2129 nvme_unquiesce_admin_queue(ctrl
);
2131 error
= nvme_init_ctrl_finish(ctrl
, false);
2133 goto out_quiesce_queue
;
2138 nvme_quiesce_admin_queue(ctrl
);
2139 blk_sync_queue(ctrl
->admin_q
);
2141 nvme_tcp_stop_queue(ctrl
, 0);
2142 nvme_cancel_admin_tagset(ctrl
);
2145 nvme_remove_admin_tag_set(ctrl
);
2147 nvme_tcp_free_admin_queue(ctrl
);
2151 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl
*ctrl
,
2154 nvme_quiesce_admin_queue(ctrl
);
2155 blk_sync_queue(ctrl
->admin_q
);
2156 nvme_tcp_stop_queue(ctrl
, 0);
2157 nvme_cancel_admin_tagset(ctrl
);
2159 nvme_unquiesce_admin_queue(ctrl
);
2160 nvme_remove_admin_tag_set(ctrl
);
2162 nvme_tcp_free_admin_queue(ctrl
);
2163 if (ctrl
->tls_pskid
) {
2164 dev_dbg(ctrl
->device
, "Wipe negotiated TLS_PSK %08x\n",
2166 ctrl
->tls_pskid
= 0;
2170 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl
*ctrl
,
2173 if (ctrl
->queue_count
<= 1)
2175 nvme_quiesce_io_queues(ctrl
);
2176 nvme_sync_io_queues(ctrl
);
2177 nvme_tcp_stop_io_queues(ctrl
);
2178 nvme_cancel_tagset(ctrl
);
2180 nvme_unquiesce_io_queues(ctrl
);
2181 nvme_tcp_destroy_io_queues(ctrl
, remove
);
2184 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl
*ctrl
,
2187 enum nvme_ctrl_state state
= nvme_ctrl_state(ctrl
);
2189 /* If we are resetting/deleting then do nothing */
2190 if (state
!= NVME_CTRL_CONNECTING
) {
2191 WARN_ON_ONCE(state
== NVME_CTRL_NEW
|| state
== NVME_CTRL_LIVE
);
2195 if (nvmf_should_reconnect(ctrl
, status
)) {
2196 dev_info(ctrl
->device
, "Reconnecting in %d seconds...\n",
2197 ctrl
->opts
->reconnect_delay
);
2198 queue_delayed_work(nvme_wq
, &to_tcp_ctrl(ctrl
)->connect_work
,
2199 ctrl
->opts
->reconnect_delay
* HZ
);
2201 dev_info(ctrl
->device
, "Removing controller (%d)...\n",
2203 nvme_delete_ctrl(ctrl
);
2207 static int nvme_tcp_setup_ctrl(struct nvme_ctrl
*ctrl
, bool new)
2209 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
2212 ret
= nvme_tcp_configure_admin_queue(ctrl
, new);
2218 dev_err(ctrl
->device
, "icdoff is not supported!\n");
2222 if (!nvme_ctrl_sgl_supported(ctrl
)) {
2224 dev_err(ctrl
->device
, "Mandatory sgls are not supported!\n");
2228 if (opts
->queue_size
> ctrl
->sqsize
+ 1)
2229 dev_warn(ctrl
->device
,
2230 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2231 opts
->queue_size
, ctrl
->sqsize
+ 1);
2233 if (ctrl
->sqsize
+ 1 > ctrl
->maxcmd
) {
2234 dev_warn(ctrl
->device
,
2235 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2236 ctrl
->sqsize
+ 1, ctrl
->maxcmd
);
2237 ctrl
->sqsize
= ctrl
->maxcmd
- 1;
2240 if (ctrl
->queue_count
> 1) {
2241 ret
= nvme_tcp_configure_io_queues(ctrl
, new);
2246 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_LIVE
)) {
2248 * state change failure is ok if we started ctrl delete,
2249 * unless we're during creation of a new controller to
2250 * avoid races with teardown flow.
2252 enum nvme_ctrl_state state
= nvme_ctrl_state(ctrl
);
2254 WARN_ON_ONCE(state
!= NVME_CTRL_DELETING
&&
2255 state
!= NVME_CTRL_DELETING_NOIO
);
2261 nvme_start_ctrl(ctrl
);
2265 if (ctrl
->queue_count
> 1) {
2266 nvme_quiesce_io_queues(ctrl
);
2267 nvme_sync_io_queues(ctrl
);
2268 nvme_tcp_stop_io_queues(ctrl
);
2269 nvme_cancel_tagset(ctrl
);
2270 nvme_tcp_destroy_io_queues(ctrl
, new);
2273 nvme_stop_keep_alive(ctrl
);
2274 nvme_tcp_teardown_admin_queue(ctrl
, new);
2278 static void nvme_tcp_reconnect_ctrl_work(struct work_struct
*work
)
2280 struct nvme_tcp_ctrl
*tcp_ctrl
= container_of(to_delayed_work(work
),
2281 struct nvme_tcp_ctrl
, connect_work
);
2282 struct nvme_ctrl
*ctrl
= &tcp_ctrl
->ctrl
;
2285 ++ctrl
->nr_reconnects
;
2287 ret
= nvme_tcp_setup_ctrl(ctrl
, false);
2291 dev_info(ctrl
->device
, "Successfully reconnected (attempt %d/%d)\n",
2292 ctrl
->nr_reconnects
, ctrl
->opts
->max_reconnects
);
2294 ctrl
->nr_reconnects
= 0;
2299 dev_info(ctrl
->device
, "Failed reconnect attempt %d/%d\n",
2300 ctrl
->nr_reconnects
, ctrl
->opts
->max_reconnects
);
2301 nvme_tcp_reconnect_or_remove(ctrl
, ret
);
2304 static void nvme_tcp_error_recovery_work(struct work_struct
*work
)
2306 struct nvme_tcp_ctrl
*tcp_ctrl
= container_of(work
,
2307 struct nvme_tcp_ctrl
, err_work
);
2308 struct nvme_ctrl
*ctrl
= &tcp_ctrl
->ctrl
;
2310 nvme_stop_keep_alive(ctrl
);
2311 flush_work(&ctrl
->async_event_work
);
2312 nvme_tcp_teardown_io_queues(ctrl
, false);
2313 /* unquiesce to fail fast pending requests */
2314 nvme_unquiesce_io_queues(ctrl
);
2315 nvme_tcp_teardown_admin_queue(ctrl
, false);
2316 nvme_unquiesce_admin_queue(ctrl
);
2317 nvme_auth_stop(ctrl
);
2319 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_CONNECTING
)) {
2320 /* state change failure is ok if we started ctrl delete */
2321 enum nvme_ctrl_state state
= nvme_ctrl_state(ctrl
);
2323 WARN_ON_ONCE(state
!= NVME_CTRL_DELETING
&&
2324 state
!= NVME_CTRL_DELETING_NOIO
);
2328 nvme_tcp_reconnect_or_remove(ctrl
, 0);
2331 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl
*ctrl
, bool shutdown
)
2333 nvme_tcp_teardown_io_queues(ctrl
, shutdown
);
2334 nvme_quiesce_admin_queue(ctrl
);
2335 nvme_disable_ctrl(ctrl
, shutdown
);
2336 nvme_tcp_teardown_admin_queue(ctrl
, shutdown
);
2339 static void nvme_tcp_delete_ctrl(struct nvme_ctrl
*ctrl
)
2341 nvme_tcp_teardown_ctrl(ctrl
, true);
2344 static void nvme_reset_ctrl_work(struct work_struct
*work
)
2346 struct nvme_ctrl
*ctrl
=
2347 container_of(work
, struct nvme_ctrl
, reset_work
);
2350 nvme_stop_ctrl(ctrl
);
2351 nvme_tcp_teardown_ctrl(ctrl
, false);
2353 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_CONNECTING
)) {
2354 /* state change failure is ok if we started ctrl delete */
2355 enum nvme_ctrl_state state
= nvme_ctrl_state(ctrl
);
2357 WARN_ON_ONCE(state
!= NVME_CTRL_DELETING
&&
2358 state
!= NVME_CTRL_DELETING_NOIO
);
2362 ret
= nvme_tcp_setup_ctrl(ctrl
, false);
2369 ++ctrl
->nr_reconnects
;
2370 nvme_tcp_reconnect_or_remove(ctrl
, ret
);
2373 static void nvme_tcp_stop_ctrl(struct nvme_ctrl
*ctrl
)
2375 flush_work(&to_tcp_ctrl(ctrl
)->err_work
);
2376 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl
)->connect_work
);
2379 static void nvme_tcp_free_ctrl(struct nvme_ctrl
*nctrl
)
2381 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(nctrl
);
2383 if (list_empty(&ctrl
->list
))
2386 mutex_lock(&nvme_tcp_ctrl_mutex
);
2387 list_del(&ctrl
->list
);
2388 mutex_unlock(&nvme_tcp_ctrl_mutex
);
2390 nvmf_free_options(nctrl
->opts
);
2392 kfree(ctrl
->queues
);
2396 static void nvme_tcp_set_sg_null(struct nvme_command
*c
)
2398 struct nvme_sgl_desc
*sg
= &c
->common
.dptr
.sgl
;
2402 sg
->type
= (NVME_TRANSPORT_SGL_DATA_DESC
<< 4) |
2403 NVME_SGL_FMT_TRANSPORT_A
;
2406 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue
*queue
,
2407 struct nvme_command
*c
, u32 data_len
)
2409 struct nvme_sgl_desc
*sg
= &c
->common
.dptr
.sgl
;
2411 sg
->addr
= cpu_to_le64(queue
->ctrl
->ctrl
.icdoff
);
2412 sg
->length
= cpu_to_le32(data_len
);
2413 sg
->type
= (NVME_SGL_FMT_DATA_DESC
<< 4) | NVME_SGL_FMT_OFFSET
;
2416 static void nvme_tcp_set_sg_host_data(struct nvme_command
*c
,
2419 struct nvme_sgl_desc
*sg
= &c
->common
.dptr
.sgl
;
2422 sg
->length
= cpu_to_le32(data_len
);
2423 sg
->type
= (NVME_TRANSPORT_SGL_DATA_DESC
<< 4) |
2424 NVME_SGL_FMT_TRANSPORT_A
;
2427 static void nvme_tcp_submit_async_event(struct nvme_ctrl
*arg
)
2429 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(arg
);
2430 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[0];
2431 struct nvme_tcp_cmd_pdu
*pdu
= ctrl
->async_req
.pdu
;
2432 struct nvme_command
*cmd
= &pdu
->cmd
;
2433 u8 hdgst
= nvme_tcp_hdgst_len(queue
);
2435 memset(pdu
, 0, sizeof(*pdu
));
2436 pdu
->hdr
.type
= nvme_tcp_cmd
;
2437 if (queue
->hdr_digest
)
2438 pdu
->hdr
.flags
|= NVME_TCP_F_HDGST
;
2439 pdu
->hdr
.hlen
= sizeof(*pdu
);
2440 pdu
->hdr
.plen
= cpu_to_le32(pdu
->hdr
.hlen
+ hdgst
);
2442 cmd
->common
.opcode
= nvme_admin_async_event
;
2443 cmd
->common
.command_id
= NVME_AQ_BLK_MQ_DEPTH
;
2444 cmd
->common
.flags
|= NVME_CMD_SGL_METABUF
;
2445 nvme_tcp_set_sg_null(cmd
);
2447 ctrl
->async_req
.state
= NVME_TCP_SEND_CMD_PDU
;
2448 ctrl
->async_req
.offset
= 0;
2449 ctrl
->async_req
.curr_bio
= NULL
;
2450 ctrl
->async_req
.data_len
= 0;
2452 nvme_tcp_queue_request(&ctrl
->async_req
, true, true);
2455 static void nvme_tcp_complete_timed_out(struct request
*rq
)
2457 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
2458 struct nvme_ctrl
*ctrl
= &req
->queue
->ctrl
->ctrl
;
2460 nvme_tcp_stop_queue(ctrl
, nvme_tcp_queue_id(req
->queue
));
2461 nvmf_complete_timed_out_request(rq
);
2464 static enum blk_eh_timer_return
nvme_tcp_timeout(struct request
*rq
)
2466 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
2467 struct nvme_ctrl
*ctrl
= &req
->queue
->ctrl
->ctrl
;
2468 struct nvme_tcp_cmd_pdu
*pdu
= nvme_tcp_req_cmd_pdu(req
);
2469 struct nvme_command
*cmd
= &pdu
->cmd
;
2470 int qid
= nvme_tcp_queue_id(req
->queue
);
2472 dev_warn(ctrl
->device
,
2473 "I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n",
2474 rq
->tag
, nvme_cid(rq
), pdu
->hdr
.type
, cmd
->common
.opcode
,
2475 nvme_fabrics_opcode_str(qid
, cmd
), qid
);
2477 if (nvme_ctrl_state(ctrl
) != NVME_CTRL_LIVE
) {
2479 * If we are resetting, connecting or deleting we should
2480 * complete immediately because we may block controller
2481 * teardown or setup sequence
2482 * - ctrl disable/shutdown fabrics requests
2483 * - connect requests
2484 * - initialization admin requests
2485 * - I/O requests that entered after unquiescing and
2486 * the controller stopped responding
2488 * All other requests should be cancelled by the error
2489 * recovery work, so it's fine that we fail it here.
2491 nvme_tcp_complete_timed_out(rq
);
2496 * LIVE state should trigger the normal error recovery which will
2497 * handle completing this request.
2499 nvme_tcp_error_recovery(ctrl
);
2500 return BLK_EH_RESET_TIMER
;
2503 static blk_status_t
nvme_tcp_map_data(struct nvme_tcp_queue
*queue
,
2506 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
2507 struct nvme_tcp_cmd_pdu
*pdu
= nvme_tcp_req_cmd_pdu(req
);
2508 struct nvme_command
*c
= &pdu
->cmd
;
2510 c
->common
.flags
|= NVME_CMD_SGL_METABUF
;
2512 if (!blk_rq_nr_phys_segments(rq
))
2513 nvme_tcp_set_sg_null(c
);
2514 else if (rq_data_dir(rq
) == WRITE
&&
2515 req
->data_len
<= nvme_tcp_inline_data_size(req
))
2516 nvme_tcp_set_sg_inline(queue
, c
, req
->data_len
);
2518 nvme_tcp_set_sg_host_data(c
, req
->data_len
);
2523 static blk_status_t
nvme_tcp_setup_cmd_pdu(struct nvme_ns
*ns
,
2526 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
2527 struct nvme_tcp_cmd_pdu
*pdu
= nvme_tcp_req_cmd_pdu(req
);
2528 struct nvme_tcp_queue
*queue
= req
->queue
;
2529 u8 hdgst
= nvme_tcp_hdgst_len(queue
), ddgst
= 0;
2532 ret
= nvme_setup_cmd(ns
, rq
);
2536 req
->state
= NVME_TCP_SEND_CMD_PDU
;
2537 req
->status
= cpu_to_le16(NVME_SC_SUCCESS
);
2542 req
->h2cdata_left
= 0;
2543 req
->data_len
= blk_rq_nr_phys_segments(rq
) ?
2544 blk_rq_payload_bytes(rq
) : 0;
2545 req
->curr_bio
= rq
->bio
;
2546 if (req
->curr_bio
&& req
->data_len
)
2547 nvme_tcp_init_iter(req
, rq_data_dir(rq
));
2549 if (rq_data_dir(rq
) == WRITE
&&
2550 req
->data_len
<= nvme_tcp_inline_data_size(req
))
2551 req
->pdu_len
= req
->data_len
;
2553 pdu
->hdr
.type
= nvme_tcp_cmd
;
2555 if (queue
->hdr_digest
)
2556 pdu
->hdr
.flags
|= NVME_TCP_F_HDGST
;
2557 if (queue
->data_digest
&& req
->pdu_len
) {
2558 pdu
->hdr
.flags
|= NVME_TCP_F_DDGST
;
2559 ddgst
= nvme_tcp_ddgst_len(queue
);
2561 pdu
->hdr
.hlen
= sizeof(*pdu
);
2562 pdu
->hdr
.pdo
= req
->pdu_len
? pdu
->hdr
.hlen
+ hdgst
: 0;
2564 cpu_to_le32(pdu
->hdr
.hlen
+ hdgst
+ req
->pdu_len
+ ddgst
);
2566 ret
= nvme_tcp_map_data(queue
, rq
);
2567 if (unlikely(ret
)) {
2568 nvme_cleanup_cmd(rq
);
2569 dev_err(queue
->ctrl
->ctrl
.device
,
2570 "Failed to map data (%d)\n", ret
);
2577 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx
*hctx
)
2579 struct nvme_tcp_queue
*queue
= hctx
->driver_data
;
2581 if (!llist_empty(&queue
->req_list
))
2582 queue_work_on(queue
->io_cpu
, nvme_tcp_wq
, &queue
->io_work
);
2585 static blk_status_t
nvme_tcp_queue_rq(struct blk_mq_hw_ctx
*hctx
,
2586 const struct blk_mq_queue_data
*bd
)
2588 struct nvme_ns
*ns
= hctx
->queue
->queuedata
;
2589 struct nvme_tcp_queue
*queue
= hctx
->driver_data
;
2590 struct request
*rq
= bd
->rq
;
2591 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
2592 bool queue_ready
= test_bit(NVME_TCP_Q_LIVE
, &queue
->flags
);
2595 if (!nvme_check_ready(&queue
->ctrl
->ctrl
, rq
, queue_ready
))
2596 return nvme_fail_nonready_command(&queue
->ctrl
->ctrl
, rq
);
2598 ret
= nvme_tcp_setup_cmd_pdu(ns
, rq
);
2602 nvme_start_request(rq
);
2604 nvme_tcp_queue_request(req
, true, bd
->last
);
2609 static void nvme_tcp_map_queues(struct blk_mq_tag_set
*set
)
2611 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(set
->driver_data
);
2613 nvmf_map_queues(set
, &ctrl
->ctrl
, ctrl
->io_queues
);
2616 static int nvme_tcp_poll(struct blk_mq_hw_ctx
*hctx
, struct io_comp_batch
*iob
)
2618 struct nvme_tcp_queue
*queue
= hctx
->driver_data
;
2619 struct sock
*sk
= queue
->sock
->sk
;
2621 if (!test_bit(NVME_TCP_Q_LIVE
, &queue
->flags
))
2624 set_bit(NVME_TCP_Q_POLLING
, &queue
->flags
);
2625 if (sk_can_busy_loop(sk
) && skb_queue_empty_lockless(&sk
->sk_receive_queue
))
2626 sk_busy_loop(sk
, true);
2627 nvme_tcp_try_recv(queue
);
2628 clear_bit(NVME_TCP_Q_POLLING
, &queue
->flags
);
2629 return queue
->nr_cqe
;
2632 static int nvme_tcp_get_address(struct nvme_ctrl
*ctrl
, char *buf
, int size
)
2634 struct nvme_tcp_queue
*queue
= &to_tcp_ctrl(ctrl
)->queues
[0];
2635 struct sockaddr_storage src_addr
;
2638 len
= nvmf_get_address(ctrl
, buf
, size
);
2640 if (!test_bit(NVME_TCP_Q_LIVE
, &queue
->flags
))
2643 mutex_lock(&queue
->queue_lock
);
2645 ret
= kernel_getsockname(queue
->sock
, (struct sockaddr
*)&src_addr
);
2648 len
--; /* strip trailing newline */
2649 len
+= scnprintf(buf
+ len
, size
- len
, "%ssrc_addr=%pISc\n",
2650 (len
) ? "," : "", &src_addr
);
2653 mutex_unlock(&queue
->queue_lock
);
2658 static const struct blk_mq_ops nvme_tcp_mq_ops
= {
2659 .queue_rq
= nvme_tcp_queue_rq
,
2660 .commit_rqs
= nvme_tcp_commit_rqs
,
2661 .complete
= nvme_complete_rq
,
2662 .init_request
= nvme_tcp_init_request
,
2663 .exit_request
= nvme_tcp_exit_request
,
2664 .init_hctx
= nvme_tcp_init_hctx
,
2665 .timeout
= nvme_tcp_timeout
,
2666 .map_queues
= nvme_tcp_map_queues
,
2667 .poll
= nvme_tcp_poll
,
2670 static const struct blk_mq_ops nvme_tcp_admin_mq_ops
= {
2671 .queue_rq
= nvme_tcp_queue_rq
,
2672 .complete
= nvme_complete_rq
,
2673 .init_request
= nvme_tcp_init_request
,
2674 .exit_request
= nvme_tcp_exit_request
,
2675 .init_hctx
= nvme_tcp_init_admin_hctx
,
2676 .timeout
= nvme_tcp_timeout
,
2679 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops
= {
2681 .module
= THIS_MODULE
,
2682 .flags
= NVME_F_FABRICS
| NVME_F_BLOCKING
,
2683 .reg_read32
= nvmf_reg_read32
,
2684 .reg_read64
= nvmf_reg_read64
,
2685 .reg_write32
= nvmf_reg_write32
,
2686 .subsystem_reset
= nvmf_subsystem_reset
,
2687 .free_ctrl
= nvme_tcp_free_ctrl
,
2688 .submit_async_event
= nvme_tcp_submit_async_event
,
2689 .delete_ctrl
= nvme_tcp_delete_ctrl
,
2690 .get_address
= nvme_tcp_get_address
,
2691 .stop_ctrl
= nvme_tcp_stop_ctrl
,
2695 nvme_tcp_existing_controller(struct nvmf_ctrl_options
*opts
)
2697 struct nvme_tcp_ctrl
*ctrl
;
2700 mutex_lock(&nvme_tcp_ctrl_mutex
);
2701 list_for_each_entry(ctrl
, &nvme_tcp_ctrl_list
, list
) {
2702 found
= nvmf_ip_options_match(&ctrl
->ctrl
, opts
);
2706 mutex_unlock(&nvme_tcp_ctrl_mutex
);
2711 static struct nvme_tcp_ctrl
*nvme_tcp_alloc_ctrl(struct device
*dev
,
2712 struct nvmf_ctrl_options
*opts
)
2714 struct nvme_tcp_ctrl
*ctrl
;
2717 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
2719 return ERR_PTR(-ENOMEM
);
2721 INIT_LIST_HEAD(&ctrl
->list
);
2722 ctrl
->ctrl
.opts
= opts
;
2723 ctrl
->ctrl
.queue_count
= opts
->nr_io_queues
+ opts
->nr_write_queues
+
2724 opts
->nr_poll_queues
+ 1;
2725 ctrl
->ctrl
.sqsize
= opts
->queue_size
- 1;
2726 ctrl
->ctrl
.kato
= opts
->kato
;
2728 INIT_DELAYED_WORK(&ctrl
->connect_work
,
2729 nvme_tcp_reconnect_ctrl_work
);
2730 INIT_WORK(&ctrl
->err_work
, nvme_tcp_error_recovery_work
);
2731 INIT_WORK(&ctrl
->ctrl
.reset_work
, nvme_reset_ctrl_work
);
2733 if (!(opts
->mask
& NVMF_OPT_TRSVCID
)) {
2735 kstrdup(__stringify(NVME_TCP_DISC_PORT
), GFP_KERNEL
);
2736 if (!opts
->trsvcid
) {
2740 opts
->mask
|= NVMF_OPT_TRSVCID
;
2743 ret
= inet_pton_with_scope(&init_net
, AF_UNSPEC
,
2744 opts
->traddr
, opts
->trsvcid
, &ctrl
->addr
);
2746 pr_err("malformed address passed: %s:%s\n",
2747 opts
->traddr
, opts
->trsvcid
);
2751 if (opts
->mask
& NVMF_OPT_HOST_TRADDR
) {
2752 ret
= inet_pton_with_scope(&init_net
, AF_UNSPEC
,
2753 opts
->host_traddr
, NULL
, &ctrl
->src_addr
);
2755 pr_err("malformed src address passed: %s\n",
2761 if (opts
->mask
& NVMF_OPT_HOST_IFACE
) {
2762 if (!__dev_get_by_name(&init_net
, opts
->host_iface
)) {
2763 pr_err("invalid interface passed: %s\n",
2770 if (!opts
->duplicate_connect
&& nvme_tcp_existing_controller(opts
)) {
2775 ctrl
->queues
= kcalloc(ctrl
->ctrl
.queue_count
, sizeof(*ctrl
->queues
),
2777 if (!ctrl
->queues
) {
2782 ret
= nvme_init_ctrl(&ctrl
->ctrl
, dev
, &nvme_tcp_ctrl_ops
, 0);
2784 goto out_kfree_queues
;
2788 kfree(ctrl
->queues
);
2791 return ERR_PTR(ret
);
2794 static struct nvme_ctrl
*nvme_tcp_create_ctrl(struct device
*dev
,
2795 struct nvmf_ctrl_options
*opts
)
2797 struct nvme_tcp_ctrl
*ctrl
;
2800 ctrl
= nvme_tcp_alloc_ctrl(dev
, opts
);
2802 return ERR_CAST(ctrl
);
2804 ret
= nvme_add_ctrl(&ctrl
->ctrl
);
2808 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_CONNECTING
)) {
2811 goto out_uninit_ctrl
;
2814 ret
= nvme_tcp_setup_ctrl(&ctrl
->ctrl
, true);
2816 goto out_uninit_ctrl
;
2818 dev_info(ctrl
->ctrl
.device
, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n",
2819 nvmf_ctrl_subsysnqn(&ctrl
->ctrl
), &ctrl
->addr
, opts
->host
->nqn
);
2821 mutex_lock(&nvme_tcp_ctrl_mutex
);
2822 list_add_tail(&ctrl
->list
, &nvme_tcp_ctrl_list
);
2823 mutex_unlock(&nvme_tcp_ctrl_mutex
);
2828 nvme_uninit_ctrl(&ctrl
->ctrl
);
2830 nvme_put_ctrl(&ctrl
->ctrl
);
2833 return ERR_PTR(ret
);
2836 static struct nvmf_transport_ops nvme_tcp_transport
= {
2838 .module
= THIS_MODULE
,
2839 .required_opts
= NVMF_OPT_TRADDR
,
2840 .allowed_opts
= NVMF_OPT_TRSVCID
| NVMF_OPT_RECONNECT_DELAY
|
2841 NVMF_OPT_HOST_TRADDR
| NVMF_OPT_CTRL_LOSS_TMO
|
2842 NVMF_OPT_HDR_DIGEST
| NVMF_OPT_DATA_DIGEST
|
2843 NVMF_OPT_NR_WRITE_QUEUES
| NVMF_OPT_NR_POLL_QUEUES
|
2844 NVMF_OPT_TOS
| NVMF_OPT_HOST_IFACE
| NVMF_OPT_TLS
|
2845 NVMF_OPT_KEYRING
| NVMF_OPT_TLS_KEY
,
2846 .create_ctrl
= nvme_tcp_create_ctrl
,
2849 static int __init
nvme_tcp_init_module(void)
2851 unsigned int wq_flags
= WQ_MEM_RECLAIM
| WQ_HIGHPRI
| WQ_SYSFS
;
2853 BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr
) != 8);
2854 BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu
) != 72);
2855 BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu
) != 24);
2856 BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu
) != 24);
2857 BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu
) != 24);
2858 BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu
) != 128);
2859 BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu
) != 128);
2860 BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu
) != 24);
2863 wq_flags
|= WQ_UNBOUND
;
2865 nvme_tcp_wq
= alloc_workqueue("nvme_tcp_wq", wq_flags
, 0);
2869 nvmf_register_transport(&nvme_tcp_transport
);
2873 static void __exit
nvme_tcp_cleanup_module(void)
2875 struct nvme_tcp_ctrl
*ctrl
;
2877 nvmf_unregister_transport(&nvme_tcp_transport
);
2879 mutex_lock(&nvme_tcp_ctrl_mutex
);
2880 list_for_each_entry(ctrl
, &nvme_tcp_ctrl_list
, list
)
2881 nvme_delete_ctrl(&ctrl
->ctrl
);
2882 mutex_unlock(&nvme_tcp_ctrl_mutex
);
2883 flush_workqueue(nvme_delete_wq
);
2885 destroy_workqueue(nvme_tcp_wq
);
2888 module_init(nvme_tcp_init_module
);
2889 module_exit(nvme_tcp_cleanup_module
);
2891 MODULE_DESCRIPTION("NVMe host TCP transport driver");
2892 MODULE_LICENSE("GPL v2");