2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
31 /* *************************** Data Structures/Defines ****************** */
34 #define NVMET_LS_CTX_COUNT 256
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
39 struct nvmet_fc_tgtport
;
40 struct nvmet_fc_tgt_assoc
;
42 struct nvmet_fc_ls_iod
{
43 struct nvmefc_tgt_ls_req
*lsreq
;
44 struct nvmefc_tgt_fcp_req
*fcpreq
; /* only if RS */
46 struct list_head ls_list
; /* tgtport->ls_list */
48 struct nvmet_fc_tgtport
*tgtport
;
49 struct nvmet_fc_tgt_assoc
*assoc
;
56 struct scatterlist sg
[2];
58 struct work_struct work
;
59 } __aligned(sizeof(unsigned long long));
61 /* desired maximum for a single sequence - if sg list allows it */
62 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
64 enum nvmet_fcp_datadir
{
71 struct nvmet_fc_fcp_iod
{
72 struct nvmefc_tgt_fcp_req
*fcpreq
;
74 struct nvme_fc_cmd_iu cmdiubuf
;
75 struct nvme_fc_ersp_iu rspiubuf
;
77 struct scatterlist
*next_sg
;
78 struct scatterlist
*data_sg
;
81 enum nvmet_fcp_datadir io_dir
;
89 struct work_struct defer_work
;
91 struct nvmet_fc_tgtport
*tgtport
;
92 struct nvmet_fc_tgt_queue
*queue
;
94 struct list_head fcp_list
; /* tgtport->fcp_list */
97 struct nvmet_fc_tgtport
{
99 struct nvmet_fc_target_port fc_target_port
;
101 struct list_head tgt_list
; /* nvmet_fc_target_list */
102 struct device
*dev
; /* dev for dma mapping */
103 struct nvmet_fc_target_template
*ops
;
105 struct nvmet_fc_ls_iod
*iod
;
107 struct list_head ls_list
;
108 struct list_head ls_busylist
;
109 struct list_head assoc_list
;
110 struct ida assoc_cnt
;
111 struct nvmet_fc_port_entry
*pe
;
116 struct nvmet_fc_port_entry
{
117 struct nvmet_fc_tgtport
*tgtport
;
118 struct nvmet_port
*port
;
121 struct list_head pe_list
;
124 struct nvmet_fc_defer_fcp_req
{
125 struct list_head req_list
;
126 struct nvmefc_tgt_fcp_req
*fcp_req
;
129 struct nvmet_fc_tgt_queue
{
140 struct nvmet_cq nvme_cq
;
141 struct nvmet_sq nvme_sq
;
142 struct nvmet_fc_tgt_assoc
*assoc
;
143 struct nvmet_fc_fcp_iod
*fod
; /* array of fcp_iods */
144 struct list_head fod_list
;
145 struct list_head pending_cmd_list
;
146 struct list_head avail_defer_list
;
147 struct workqueue_struct
*work_q
;
149 } __aligned(sizeof(unsigned long long));
151 struct nvmet_fc_tgt_assoc
{
154 struct nvmet_fc_tgtport
*tgtport
;
155 struct list_head a_list
;
156 struct nvmet_fc_tgt_queue
*queues
[NVMET_NR_QUEUES
+ 1];
158 struct work_struct del_work
;
163 nvmet_fc_iodnum(struct nvmet_fc_ls_iod
*iodptr
)
165 return (iodptr
- iodptr
->tgtport
->iod
);
169 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod
*fodptr
)
171 return (fodptr
- fodptr
->queue
->fod
);
176 * Association and Connection IDs:
178 * Association ID will have random number in upper 6 bytes and zero
181 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
183 * note: Association ID = Connection ID for queue 0
185 #define BYTES_FOR_QID sizeof(u16)
186 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
187 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
190 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc
*assoc
, u16 qid
)
192 return (assoc
->association_id
| qid
);
196 nvmet_fc_getassociationid(u64 connectionid
)
198 return connectionid
& ~NVMET_FC_QUEUEID_MASK
;
202 nvmet_fc_getqueueid(u64 connectionid
)
204 return (u16
)(connectionid
& NVMET_FC_QUEUEID_MASK
);
207 static inline struct nvmet_fc_tgtport
*
208 targetport_to_tgtport(struct nvmet_fc_target_port
*targetport
)
210 return container_of(targetport
, struct nvmet_fc_tgtport
,
214 static inline struct nvmet_fc_fcp_iod
*
215 nvmet_req_to_fod(struct nvmet_req
*nvme_req
)
217 return container_of(nvme_req
, struct nvmet_fc_fcp_iod
, req
);
221 /* *************************** Globals **************************** */
224 static DEFINE_SPINLOCK(nvmet_fc_tgtlock
);
226 static LIST_HEAD(nvmet_fc_target_list
);
227 static DEFINE_IDA(nvmet_fc_tgtport_cnt
);
228 static LIST_HEAD(nvmet_fc_portentry_list
);
231 static void nvmet_fc_handle_ls_rqst_work(struct work_struct
*work
);
232 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct
*work
);
233 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc
*assoc
);
234 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc
*assoc
);
235 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue
*queue
);
236 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue
*queue
);
237 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport
*tgtport
);
238 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport
*tgtport
);
239 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport
*tgtport
,
240 struct nvmet_fc_fcp_iod
*fod
);
241 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc
*assoc
);
244 /* *********************** FC-NVME DMA Handling **************************** */
247 * The fcloop device passes in a NULL device pointer. Real LLD's will
248 * pass in a valid device pointer. If NULL is passed to the dma mapping
249 * routines, depending on the platform, it may or may not succeed, and
253 * Wrapper all the dma routines and check the dev pointer.
255 * If simple mappings (return just a dma address, we'll noop them,
256 * returning a dma address of 0.
258 * On more complex mappings (dma_map_sg), a pseudo routine fills
259 * in the scatter list, setting all dma addresses to 0.
262 static inline dma_addr_t
263 fc_dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
264 enum dma_data_direction dir
)
266 return dev
? dma_map_single(dev
, ptr
, size
, dir
) : (dma_addr_t
)0L;
270 fc_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
272 return dev
? dma_mapping_error(dev
, dma_addr
) : 0;
276 fc_dma_unmap_single(struct device
*dev
, dma_addr_t addr
, size_t size
,
277 enum dma_data_direction dir
)
280 dma_unmap_single(dev
, addr
, size
, dir
);
284 fc_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
, size_t size
,
285 enum dma_data_direction dir
)
288 dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
292 fc_dma_sync_single_for_device(struct device
*dev
, dma_addr_t addr
, size_t size
,
293 enum dma_data_direction dir
)
296 dma_sync_single_for_device(dev
, addr
, size
, dir
);
299 /* pseudo dma_map_sg call */
301 fc_map_sg(struct scatterlist
*sg
, int nents
)
303 struct scatterlist
*s
;
306 WARN_ON(nents
== 0 || sg
[0].length
== 0);
308 for_each_sg(sg
, s
, nents
, i
) {
310 #ifdef CONFIG_NEED_SG_DMA_LENGTH
311 s
->dma_length
= s
->length
;
318 fc_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
319 enum dma_data_direction dir
)
321 return dev
? dma_map_sg(dev
, sg
, nents
, dir
) : fc_map_sg(sg
, nents
);
325 fc_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
326 enum dma_data_direction dir
)
329 dma_unmap_sg(dev
, sg
, nents
, dir
);
333 /* *********************** FC-NVME Port Management ************************ */
337 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport
*tgtport
)
339 struct nvmet_fc_ls_iod
*iod
;
342 iod
= kcalloc(NVMET_LS_CTX_COUNT
, sizeof(struct nvmet_fc_ls_iod
),
349 for (i
= 0; i
< NVMET_LS_CTX_COUNT
; iod
++, i
++) {
350 INIT_WORK(&iod
->work
, nvmet_fc_handle_ls_rqst_work
);
351 iod
->tgtport
= tgtport
;
352 list_add_tail(&iod
->ls_list
, &tgtport
->ls_list
);
354 iod
->rqstbuf
= kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE
,
359 iod
->rspbuf
= iod
->rqstbuf
+ NVME_FC_MAX_LS_BUFFER_SIZE
;
361 iod
->rspdma
= fc_dma_map_single(tgtport
->dev
, iod
->rspbuf
,
362 NVME_FC_MAX_LS_BUFFER_SIZE
,
364 if (fc_dma_mapping_error(tgtport
->dev
, iod
->rspdma
))
372 list_del(&iod
->ls_list
);
373 for (iod
--, i
--; i
>= 0; iod
--, i
--) {
374 fc_dma_unmap_single(tgtport
->dev
, iod
->rspdma
,
375 NVME_FC_MAX_LS_BUFFER_SIZE
, DMA_TO_DEVICE
);
377 list_del(&iod
->ls_list
);
386 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport
*tgtport
)
388 struct nvmet_fc_ls_iod
*iod
= tgtport
->iod
;
391 for (i
= 0; i
< NVMET_LS_CTX_COUNT
; iod
++, i
++) {
392 fc_dma_unmap_single(tgtport
->dev
,
393 iod
->rspdma
, NVME_FC_MAX_LS_BUFFER_SIZE
,
396 list_del(&iod
->ls_list
);
401 static struct nvmet_fc_ls_iod
*
402 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport
*tgtport
)
404 struct nvmet_fc_ls_iod
*iod
;
407 spin_lock_irqsave(&tgtport
->lock
, flags
);
408 iod
= list_first_entry_or_null(&tgtport
->ls_list
,
409 struct nvmet_fc_ls_iod
, ls_list
);
411 list_move_tail(&iod
->ls_list
, &tgtport
->ls_busylist
);
412 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
418 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport
*tgtport
,
419 struct nvmet_fc_ls_iod
*iod
)
423 spin_lock_irqsave(&tgtport
->lock
, flags
);
424 list_move(&iod
->ls_list
, &tgtport
->ls_list
);
425 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
429 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport
*tgtport
,
430 struct nvmet_fc_tgt_queue
*queue
)
432 struct nvmet_fc_fcp_iod
*fod
= queue
->fod
;
435 for (i
= 0; i
< queue
->sqsize
; fod
++, i
++) {
436 INIT_WORK(&fod
->defer_work
, nvmet_fc_fcp_rqst_op_defer_work
);
437 fod
->tgtport
= tgtport
;
441 fod
->aborted
= false;
443 list_add_tail(&fod
->fcp_list
, &queue
->fod_list
);
444 spin_lock_init(&fod
->flock
);
446 fod
->rspdma
= fc_dma_map_single(tgtport
->dev
, &fod
->rspiubuf
,
447 sizeof(fod
->rspiubuf
), DMA_TO_DEVICE
);
448 if (fc_dma_mapping_error(tgtport
->dev
, fod
->rspdma
)) {
449 list_del(&fod
->fcp_list
);
450 for (fod
--, i
--; i
>= 0; fod
--, i
--) {
451 fc_dma_unmap_single(tgtport
->dev
, fod
->rspdma
,
452 sizeof(fod
->rspiubuf
),
455 list_del(&fod
->fcp_list
);
464 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport
*tgtport
,
465 struct nvmet_fc_tgt_queue
*queue
)
467 struct nvmet_fc_fcp_iod
*fod
= queue
->fod
;
470 for (i
= 0; i
< queue
->sqsize
; fod
++, i
++) {
472 fc_dma_unmap_single(tgtport
->dev
, fod
->rspdma
,
473 sizeof(fod
->rspiubuf
), DMA_TO_DEVICE
);
477 static struct nvmet_fc_fcp_iod
*
478 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue
*queue
)
480 struct nvmet_fc_fcp_iod
*fod
;
482 lockdep_assert_held(&queue
->qlock
);
484 fod
= list_first_entry_or_null(&queue
->fod_list
,
485 struct nvmet_fc_fcp_iod
, fcp_list
);
487 list_del(&fod
->fcp_list
);
490 * no queue reference is taken, as it was taken by the
491 * queue lookup just prior to the allocation. The iod
492 * will "inherit" that reference.
500 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport
*tgtport
,
501 struct nvmet_fc_tgt_queue
*queue
,
502 struct nvmefc_tgt_fcp_req
*fcpreq
)
504 struct nvmet_fc_fcp_iod
*fod
= fcpreq
->nvmet_fc_private
;
507 * put all admin cmds on hw queue id 0. All io commands go to
508 * the respective hw queue based on a modulo basis
510 fcpreq
->hwqid
= queue
->qid
?
511 ((queue
->qid
- 1) % tgtport
->ops
->max_hw_queues
) : 0;
513 nvmet_fc_handle_fcp_rqst(tgtport
, fod
);
517 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct
*work
)
519 struct nvmet_fc_fcp_iod
*fod
=
520 container_of(work
, struct nvmet_fc_fcp_iod
, defer_work
);
522 /* Submit deferred IO for processing */
523 nvmet_fc_queue_fcp_req(fod
->tgtport
, fod
->queue
, fod
->fcpreq
);
528 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue
*queue
,
529 struct nvmet_fc_fcp_iod
*fod
)
531 struct nvmefc_tgt_fcp_req
*fcpreq
= fod
->fcpreq
;
532 struct nvmet_fc_tgtport
*tgtport
= fod
->tgtport
;
533 struct nvmet_fc_defer_fcp_req
*deferfcp
;
536 fc_dma_sync_single_for_cpu(tgtport
->dev
, fod
->rspdma
,
537 sizeof(fod
->rspiubuf
), DMA_TO_DEVICE
);
539 fcpreq
->nvmet_fc_private
= NULL
;
543 fod
->aborted
= false;
544 fod
->writedataactive
= false;
547 tgtport
->ops
->fcp_req_release(&tgtport
->fc_target_port
, fcpreq
);
549 /* release the queue lookup reference on the completed IO */
550 nvmet_fc_tgt_q_put(queue
);
552 spin_lock_irqsave(&queue
->qlock
, flags
);
553 deferfcp
= list_first_entry_or_null(&queue
->pending_cmd_list
,
554 struct nvmet_fc_defer_fcp_req
, req_list
);
556 list_add_tail(&fod
->fcp_list
, &fod
->queue
->fod_list
);
557 spin_unlock_irqrestore(&queue
->qlock
, flags
);
561 /* Re-use the fod for the next pending cmd that was deferred */
562 list_del(&deferfcp
->req_list
);
564 fcpreq
= deferfcp
->fcp_req
;
566 /* deferfcp can be reused for another IO at a later date */
567 list_add_tail(&deferfcp
->req_list
, &queue
->avail_defer_list
);
569 spin_unlock_irqrestore(&queue
->qlock
, flags
);
571 /* Save NVME CMD IO in fod */
572 memcpy(&fod
->cmdiubuf
, fcpreq
->rspaddr
, fcpreq
->rsplen
);
574 /* Setup new fcpreq to be processed */
575 fcpreq
->rspaddr
= NULL
;
577 fcpreq
->nvmet_fc_private
= fod
;
578 fod
->fcpreq
= fcpreq
;
581 /* inform LLDD IO is now being processed */
582 tgtport
->ops
->defer_rcv(&tgtport
->fc_target_port
, fcpreq
);
585 * Leave the queue lookup get reference taken when
586 * fod was originally allocated.
589 queue_work(queue
->work_q
, &fod
->defer_work
);
592 static struct nvmet_fc_tgt_queue
*
593 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc
*assoc
,
596 struct nvmet_fc_tgt_queue
*queue
;
600 if (qid
> NVMET_NR_QUEUES
)
603 queue
= kzalloc((sizeof(*queue
) +
604 (sizeof(struct nvmet_fc_fcp_iod
) * sqsize
)),
609 if (!nvmet_fc_tgt_a_get(assoc
))
612 queue
->work_q
= alloc_workqueue("ntfc%d.%d.%d", 0, 0,
613 assoc
->tgtport
->fc_target_port
.port_num
,
618 queue
->fod
= (struct nvmet_fc_fcp_iod
*)&queue
[1];
620 queue
->sqsize
= sqsize
;
621 queue
->assoc
= assoc
;
622 INIT_LIST_HEAD(&queue
->fod_list
);
623 INIT_LIST_HEAD(&queue
->avail_defer_list
);
624 INIT_LIST_HEAD(&queue
->pending_cmd_list
);
625 atomic_set(&queue
->connected
, 0);
626 atomic_set(&queue
->sqtail
, 0);
627 atomic_set(&queue
->rsn
, 1);
628 atomic_set(&queue
->zrspcnt
, 0);
629 spin_lock_init(&queue
->qlock
);
630 kref_init(&queue
->ref
);
632 nvmet_fc_prep_fcp_iodlist(assoc
->tgtport
, queue
);
634 ret
= nvmet_sq_init(&queue
->nvme_sq
);
636 goto out_fail_iodlist
;
638 WARN_ON(assoc
->queues
[qid
]);
639 spin_lock_irqsave(&assoc
->tgtport
->lock
, flags
);
640 assoc
->queues
[qid
] = queue
;
641 spin_unlock_irqrestore(&assoc
->tgtport
->lock
, flags
);
646 nvmet_fc_destroy_fcp_iodlist(assoc
->tgtport
, queue
);
647 destroy_workqueue(queue
->work_q
);
649 nvmet_fc_tgt_a_put(assoc
);
657 nvmet_fc_tgt_queue_free(struct kref
*ref
)
659 struct nvmet_fc_tgt_queue
*queue
=
660 container_of(ref
, struct nvmet_fc_tgt_queue
, ref
);
663 spin_lock_irqsave(&queue
->assoc
->tgtport
->lock
, flags
);
664 queue
->assoc
->queues
[queue
->qid
] = NULL
;
665 spin_unlock_irqrestore(&queue
->assoc
->tgtport
->lock
, flags
);
667 nvmet_fc_destroy_fcp_iodlist(queue
->assoc
->tgtport
, queue
);
669 nvmet_fc_tgt_a_put(queue
->assoc
);
671 destroy_workqueue(queue
->work_q
);
677 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue
*queue
)
679 kref_put(&queue
->ref
, nvmet_fc_tgt_queue_free
);
683 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue
*queue
)
685 return kref_get_unless_zero(&queue
->ref
);
690 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue
*queue
)
692 struct nvmet_fc_tgtport
*tgtport
= queue
->assoc
->tgtport
;
693 struct nvmet_fc_fcp_iod
*fod
= queue
->fod
;
694 struct nvmet_fc_defer_fcp_req
*deferfcp
, *tempptr
;
696 int i
, writedataactive
;
699 disconnect
= atomic_xchg(&queue
->connected
, 0);
701 spin_lock_irqsave(&queue
->qlock
, flags
);
702 /* about outstanding io's */
703 for (i
= 0; i
< queue
->sqsize
; fod
++, i
++) {
705 spin_lock(&fod
->flock
);
707 writedataactive
= fod
->writedataactive
;
708 spin_unlock(&fod
->flock
);
710 * only call lldd abort routine if waiting for
711 * writedata. other outstanding ops should finish
714 if (writedataactive
) {
715 spin_lock(&fod
->flock
);
717 spin_unlock(&fod
->flock
);
718 tgtport
->ops
->fcp_abort(
719 &tgtport
->fc_target_port
, fod
->fcpreq
);
724 /* Cleanup defer'ed IOs in queue */
725 list_for_each_entry_safe(deferfcp
, tempptr
, &queue
->avail_defer_list
,
727 list_del(&deferfcp
->req_list
);
732 deferfcp
= list_first_entry_or_null(&queue
->pending_cmd_list
,
733 struct nvmet_fc_defer_fcp_req
, req_list
);
737 list_del(&deferfcp
->req_list
);
738 spin_unlock_irqrestore(&queue
->qlock
, flags
);
740 tgtport
->ops
->defer_rcv(&tgtport
->fc_target_port
,
743 tgtport
->ops
->fcp_abort(&tgtport
->fc_target_port
,
746 tgtport
->ops
->fcp_req_release(&tgtport
->fc_target_port
,
749 /* release the queue lookup reference */
750 nvmet_fc_tgt_q_put(queue
);
754 spin_lock_irqsave(&queue
->qlock
, flags
);
756 spin_unlock_irqrestore(&queue
->qlock
, flags
);
758 flush_workqueue(queue
->work_q
);
761 nvmet_sq_destroy(&queue
->nvme_sq
);
763 nvmet_fc_tgt_q_put(queue
);
766 static struct nvmet_fc_tgt_queue
*
767 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport
*tgtport
,
770 struct nvmet_fc_tgt_assoc
*assoc
;
771 struct nvmet_fc_tgt_queue
*queue
;
772 u64 association_id
= nvmet_fc_getassociationid(connection_id
);
773 u16 qid
= nvmet_fc_getqueueid(connection_id
);
776 if (qid
> NVMET_NR_QUEUES
)
779 spin_lock_irqsave(&tgtport
->lock
, flags
);
780 list_for_each_entry(assoc
, &tgtport
->assoc_list
, a_list
) {
781 if (association_id
== assoc
->association_id
) {
782 queue
= assoc
->queues
[qid
];
784 (!atomic_read(&queue
->connected
) ||
785 !nvmet_fc_tgt_q_get(queue
)))
787 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
791 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
796 nvmet_fc_delete_assoc(struct work_struct
*work
)
798 struct nvmet_fc_tgt_assoc
*assoc
=
799 container_of(work
, struct nvmet_fc_tgt_assoc
, del_work
);
801 nvmet_fc_delete_target_assoc(assoc
);
802 nvmet_fc_tgt_a_put(assoc
);
805 static struct nvmet_fc_tgt_assoc
*
806 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport
*tgtport
)
808 struct nvmet_fc_tgt_assoc
*assoc
, *tmpassoc
;
812 bool needrandom
= true;
814 assoc
= kzalloc(sizeof(*assoc
), GFP_KERNEL
);
818 idx
= ida_simple_get(&tgtport
->assoc_cnt
, 0, 0, GFP_KERNEL
);
822 if (!nvmet_fc_tgtport_get(tgtport
))
825 assoc
->tgtport
= tgtport
;
827 INIT_LIST_HEAD(&assoc
->a_list
);
828 kref_init(&assoc
->ref
);
829 INIT_WORK(&assoc
->del_work
, nvmet_fc_delete_assoc
);
832 get_random_bytes(&ran
, sizeof(ran
) - BYTES_FOR_QID
);
833 ran
= ran
<< BYTES_FOR_QID_SHIFT
;
835 spin_lock_irqsave(&tgtport
->lock
, flags
);
837 list_for_each_entry(tmpassoc
, &tgtport
->assoc_list
, a_list
)
838 if (ran
== tmpassoc
->association_id
) {
843 assoc
->association_id
= ran
;
844 list_add_tail(&assoc
->a_list
, &tgtport
->assoc_list
);
846 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
852 ida_simple_remove(&tgtport
->assoc_cnt
, idx
);
859 nvmet_fc_target_assoc_free(struct kref
*ref
)
861 struct nvmet_fc_tgt_assoc
*assoc
=
862 container_of(ref
, struct nvmet_fc_tgt_assoc
, ref
);
863 struct nvmet_fc_tgtport
*tgtport
= assoc
->tgtport
;
866 spin_lock_irqsave(&tgtport
->lock
, flags
);
867 list_del(&assoc
->a_list
);
868 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
869 ida_simple_remove(&tgtport
->assoc_cnt
, assoc
->a_id
);
871 nvmet_fc_tgtport_put(tgtport
);
875 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc
*assoc
)
877 kref_put(&assoc
->ref
, nvmet_fc_target_assoc_free
);
881 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc
*assoc
)
883 return kref_get_unless_zero(&assoc
->ref
);
887 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc
*assoc
)
889 struct nvmet_fc_tgtport
*tgtport
= assoc
->tgtport
;
890 struct nvmet_fc_tgt_queue
*queue
;
894 spin_lock_irqsave(&tgtport
->lock
, flags
);
895 for (i
= NVMET_NR_QUEUES
; i
>= 0; i
--) {
896 queue
= assoc
->queues
[i
];
898 if (!nvmet_fc_tgt_q_get(queue
))
900 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
901 nvmet_fc_delete_target_queue(queue
);
902 nvmet_fc_tgt_q_put(queue
);
903 spin_lock_irqsave(&tgtport
->lock
, flags
);
906 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
908 nvmet_fc_tgt_a_put(assoc
);
911 static struct nvmet_fc_tgt_assoc
*
912 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport
*tgtport
,
915 struct nvmet_fc_tgt_assoc
*assoc
;
916 struct nvmet_fc_tgt_assoc
*ret
= NULL
;
919 spin_lock_irqsave(&tgtport
->lock
, flags
);
920 list_for_each_entry(assoc
, &tgtport
->assoc_list
, a_list
) {
921 if (association_id
== assoc
->association_id
) {
923 nvmet_fc_tgt_a_get(assoc
);
927 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
933 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport
*tgtport
,
934 struct nvmet_fc_port_entry
*pe
,
935 struct nvmet_port
*port
)
937 lockdep_assert_held(&nvmet_fc_tgtlock
);
939 pe
->tgtport
= tgtport
;
945 pe
->node_name
= tgtport
->fc_target_port
.node_name
;
946 pe
->port_name
= tgtport
->fc_target_port
.port_name
;
947 INIT_LIST_HEAD(&pe
->pe_list
);
949 list_add_tail(&pe
->pe_list
, &nvmet_fc_portentry_list
);
953 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry
*pe
)
957 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
959 pe
->tgtport
->pe
= NULL
;
960 list_del(&pe
->pe_list
);
961 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
965 * called when a targetport deregisters. Breaks the relationship
966 * with the nvmet port, but leaves the port_entry in place so that
967 * re-registration can resume operation.
970 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport
*tgtport
)
972 struct nvmet_fc_port_entry
*pe
;
975 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
980 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
984 * called when a new targetport is registered. Looks in the
985 * existing nvmet port_entries to see if the nvmet layer is
986 * configured for the targetport's wwn's. (the targetport existed,
987 * nvmet configured, the lldd unregistered the tgtport, and is now
988 * reregistering the same targetport). If so, set the nvmet port
989 * port entry on the targetport.
992 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport
*tgtport
)
994 struct nvmet_fc_port_entry
*pe
;
997 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
998 list_for_each_entry(pe
, &nvmet_fc_portentry_list
, pe_list
) {
999 if (tgtport
->fc_target_port
.node_name
== pe
->node_name
&&
1000 tgtport
->fc_target_port
.port_name
== pe
->port_name
) {
1001 WARN_ON(pe
->tgtport
);
1003 pe
->tgtport
= tgtport
;
1007 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
1011 * nvme_fc_register_targetport - transport entry point called by an
1012 * LLDD to register the existence of a local
1013 * NVME subystem FC port.
1014 * @pinfo: pointer to information about the port to be registered
1015 * @template: LLDD entrypoints and operational parameters for the port
1016 * @dev: physical hardware device node port corresponds to. Will be
1017 * used for DMA mappings
1018 * @portptr: pointer to a local port pointer. Upon success, the routine
1019 * will allocate a nvme_fc_local_port structure and place its
1020 * address in the local port pointer. Upon failure, local port
1021 * pointer will be set to NULL.
1024 * a completion status. Must be 0 upon success; a negative errno
1025 * (ex: -ENXIO) upon failure.
1028 nvmet_fc_register_targetport(struct nvmet_fc_port_info
*pinfo
,
1029 struct nvmet_fc_target_template
*template,
1031 struct nvmet_fc_target_port
**portptr
)
1033 struct nvmet_fc_tgtport
*newrec
;
1034 unsigned long flags
;
1037 if (!template->xmt_ls_rsp
|| !template->fcp_op
||
1038 !template->fcp_abort
||
1039 !template->fcp_req_release
|| !template->targetport_delete
||
1040 !template->max_hw_queues
|| !template->max_sgl_segments
||
1041 !template->max_dif_sgl_segments
|| !template->dma_boundary
) {
1043 goto out_regtgt_failed
;
1046 newrec
= kzalloc((sizeof(*newrec
) + template->target_priv_sz
),
1050 goto out_regtgt_failed
;
1053 idx
= ida_simple_get(&nvmet_fc_tgtport_cnt
, 0, 0, GFP_KERNEL
);
1056 goto out_fail_kfree
;
1059 if (!get_device(dev
) && dev
) {
1064 newrec
->fc_target_port
.node_name
= pinfo
->node_name
;
1065 newrec
->fc_target_port
.port_name
= pinfo
->port_name
;
1066 newrec
->fc_target_port
.private = &newrec
[1];
1067 newrec
->fc_target_port
.port_id
= pinfo
->port_id
;
1068 newrec
->fc_target_port
.port_num
= idx
;
1069 INIT_LIST_HEAD(&newrec
->tgt_list
);
1071 newrec
->ops
= template;
1072 spin_lock_init(&newrec
->lock
);
1073 INIT_LIST_HEAD(&newrec
->ls_list
);
1074 INIT_LIST_HEAD(&newrec
->ls_busylist
);
1075 INIT_LIST_HEAD(&newrec
->assoc_list
);
1076 kref_init(&newrec
->ref
);
1077 ida_init(&newrec
->assoc_cnt
);
1078 newrec
->max_sg_cnt
= template->max_sgl_segments
;
1080 ret
= nvmet_fc_alloc_ls_iodlist(newrec
);
1083 goto out_free_newrec
;
1086 nvmet_fc_portentry_rebind_tgt(newrec
);
1088 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
1089 list_add_tail(&newrec
->tgt_list
, &nvmet_fc_target_list
);
1090 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
1092 *portptr
= &newrec
->fc_target_port
;
1098 ida_simple_remove(&nvmet_fc_tgtport_cnt
, idx
);
1105 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport
);
1109 nvmet_fc_free_tgtport(struct kref
*ref
)
1111 struct nvmet_fc_tgtport
*tgtport
=
1112 container_of(ref
, struct nvmet_fc_tgtport
, ref
);
1113 struct device
*dev
= tgtport
->dev
;
1114 unsigned long flags
;
1116 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
1117 list_del(&tgtport
->tgt_list
);
1118 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
1120 nvmet_fc_free_ls_iodlist(tgtport
);
1122 /* let the LLDD know we've finished tearing it down */
1123 tgtport
->ops
->targetport_delete(&tgtport
->fc_target_port
);
1125 ida_simple_remove(&nvmet_fc_tgtport_cnt
,
1126 tgtport
->fc_target_port
.port_num
);
1128 ida_destroy(&tgtport
->assoc_cnt
);
1136 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport
*tgtport
)
1138 kref_put(&tgtport
->ref
, nvmet_fc_free_tgtport
);
1142 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport
*tgtport
)
1144 return kref_get_unless_zero(&tgtport
->ref
);
1148 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport
*tgtport
)
1150 struct nvmet_fc_tgt_assoc
*assoc
, *next
;
1151 unsigned long flags
;
1153 spin_lock_irqsave(&tgtport
->lock
, flags
);
1154 list_for_each_entry_safe(assoc
, next
,
1155 &tgtport
->assoc_list
, a_list
) {
1156 if (!nvmet_fc_tgt_a_get(assoc
))
1158 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
1159 nvmet_fc_delete_target_assoc(assoc
);
1160 nvmet_fc_tgt_a_put(assoc
);
1161 spin_lock_irqsave(&tgtport
->lock
, flags
);
1163 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
1167 * nvmet layer has called to terminate an association
1170 nvmet_fc_delete_ctrl(struct nvmet_ctrl
*ctrl
)
1172 struct nvmet_fc_tgtport
*tgtport
, *next
;
1173 struct nvmet_fc_tgt_assoc
*assoc
;
1174 struct nvmet_fc_tgt_queue
*queue
;
1175 unsigned long flags
;
1176 bool found_ctrl
= false;
1178 /* this is a bit ugly, but don't want to make locks layered */
1179 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
1180 list_for_each_entry_safe(tgtport
, next
, &nvmet_fc_target_list
,
1182 if (!nvmet_fc_tgtport_get(tgtport
))
1184 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
1186 spin_lock_irqsave(&tgtport
->lock
, flags
);
1187 list_for_each_entry(assoc
, &tgtport
->assoc_list
, a_list
) {
1188 queue
= assoc
->queues
[0];
1189 if (queue
&& queue
->nvme_sq
.ctrl
== ctrl
) {
1190 if (nvmet_fc_tgt_a_get(assoc
))
1195 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
1197 nvmet_fc_tgtport_put(tgtport
);
1200 schedule_work(&assoc
->del_work
);
1204 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
1206 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
1210 * nvme_fc_unregister_targetport - transport entry point called by an
1211 * LLDD to deregister/remove a previously
1212 * registered a local NVME subsystem FC port.
1213 * @target_port: pointer to the (registered) target port that is to be
1217 * a completion status. Must be 0 upon success; a negative errno
1218 * (ex: -ENXIO) upon failure.
1221 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port
*target_port
)
1223 struct nvmet_fc_tgtport
*tgtport
= targetport_to_tgtport(target_port
);
1225 nvmet_fc_portentry_unbind_tgt(tgtport
);
1227 /* terminate any outstanding associations */
1228 __nvmet_fc_free_assocs(tgtport
);
1230 nvmet_fc_tgtport_put(tgtport
);
1234 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport
);
1237 /* *********************** FC-NVME LS Handling **************************** */
1241 nvmet_fc_format_rsp_hdr(void *buf
, u8 ls_cmd
, __be32 desc_len
, u8 rqst_ls_cmd
)
1243 struct fcnvme_ls_acc_hdr
*acc
= buf
;
1245 acc
->w0
.ls_cmd
= ls_cmd
;
1246 acc
->desc_list_len
= desc_len
;
1247 acc
->rqst
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_RQST
);
1248 acc
->rqst
.desc_len
=
1249 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst
));
1250 acc
->rqst
.w0
.ls_cmd
= rqst_ls_cmd
;
1254 nvmet_fc_format_rjt(void *buf
, u16 buflen
, u8 ls_cmd
,
1255 u8 reason
, u8 explanation
, u8 vendor
)
1257 struct fcnvme_ls_rjt
*rjt
= buf
;
1259 nvmet_fc_format_rsp_hdr(buf
, FCNVME_LSDESC_RQST
,
1260 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt
)),
1262 rjt
->rjt
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_RJT
);
1263 rjt
->rjt
.desc_len
= fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt
));
1264 rjt
->rjt
.reason_code
= reason
;
1265 rjt
->rjt
.reason_explanation
= explanation
;
1266 rjt
->rjt
.vendor
= vendor
;
1268 return sizeof(struct fcnvme_ls_rjt
);
1271 /* Validation Error indexes into the string table below */
1274 VERR_CR_ASSOC_LEN
= 1,
1275 VERR_CR_ASSOC_RQST_LEN
= 2,
1276 VERR_CR_ASSOC_CMD
= 3,
1277 VERR_CR_ASSOC_CMD_LEN
= 4,
1278 VERR_ERSP_RATIO
= 5,
1279 VERR_ASSOC_ALLOC_FAIL
= 6,
1280 VERR_QUEUE_ALLOC_FAIL
= 7,
1281 VERR_CR_CONN_LEN
= 8,
1282 VERR_CR_CONN_RQST_LEN
= 9,
1284 VERR_ASSOC_ID_LEN
= 11,
1287 VERR_CONN_ID_LEN
= 14,
1289 VERR_CR_CONN_CMD
= 16,
1290 VERR_CR_CONN_CMD_LEN
= 17,
1291 VERR_DISCONN_LEN
= 18,
1292 VERR_DISCONN_RQST_LEN
= 19,
1293 VERR_DISCONN_CMD
= 20,
1294 VERR_DISCONN_CMD_LEN
= 21,
1295 VERR_DISCONN_SCOPE
= 22,
1297 VERR_RS_RQST_LEN
= 24,
1299 VERR_RS_CMD_LEN
= 26,
1304 static char *validation_errors
[] = {
1306 "Bad CR_ASSOC Length",
1307 "Bad CR_ASSOC Rqst Length",
1309 "Bad CR_ASSOC Cmd Length",
1311 "Association Allocation Failed",
1312 "Queue Allocation Failed",
1313 "Bad CR_CONN Length",
1314 "Bad CR_CONN Rqst Length",
1315 "Not Association ID",
1316 "Bad Association ID Length",
1318 "Not Connection ID",
1319 "Bad Connection ID Length",
1322 "Bad CR_CONN Cmd Length",
1323 "Bad DISCONN Length",
1324 "Bad DISCONN Rqst Length",
1326 "Bad DISCONN Cmd Length",
1327 "Bad Disconnect Scope",
1329 "Bad RS Rqst Length",
1331 "Bad RS Cmd Length",
1333 "Bad RS Relative Offset",
1337 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport
*tgtport
,
1338 struct nvmet_fc_ls_iod
*iod
)
1340 struct fcnvme_ls_cr_assoc_rqst
*rqst
=
1341 (struct fcnvme_ls_cr_assoc_rqst
*)iod
->rqstbuf
;
1342 struct fcnvme_ls_cr_assoc_acc
*acc
=
1343 (struct fcnvme_ls_cr_assoc_acc
*)iod
->rspbuf
;
1344 struct nvmet_fc_tgt_queue
*queue
;
1347 memset(acc
, 0, sizeof(*acc
));
1350 * FC-NVME spec changes. There are initiators sending different
1351 * lengths as padding sizes for Create Association Cmd descriptor
1353 * Accept anything of "minimum" length. Assume format per 1.15
1354 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1355 * trailing pad length is.
1357 if (iod
->rqstdatalen
< FCNVME_LSDESC_CRA_RQST_MINLEN
)
1358 ret
= VERR_CR_ASSOC_LEN
;
1359 else if (be32_to_cpu(rqst
->desc_list_len
) <
1360 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN
)
1361 ret
= VERR_CR_ASSOC_RQST_LEN
;
1362 else if (rqst
->assoc_cmd
.desc_tag
!=
1363 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD
))
1364 ret
= VERR_CR_ASSOC_CMD
;
1365 else if (be32_to_cpu(rqst
->assoc_cmd
.desc_len
) <
1366 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN
)
1367 ret
= VERR_CR_ASSOC_CMD_LEN
;
1368 else if (!rqst
->assoc_cmd
.ersp_ratio
||
1369 (be16_to_cpu(rqst
->assoc_cmd
.ersp_ratio
) >=
1370 be16_to_cpu(rqst
->assoc_cmd
.sqsize
)))
1371 ret
= VERR_ERSP_RATIO
;
1374 /* new association w/ admin queue */
1375 iod
->assoc
= nvmet_fc_alloc_target_assoc(tgtport
);
1377 ret
= VERR_ASSOC_ALLOC_FAIL
;
1379 queue
= nvmet_fc_alloc_target_queue(iod
->assoc
, 0,
1380 be16_to_cpu(rqst
->assoc_cmd
.sqsize
));
1382 ret
= VERR_QUEUE_ALLOC_FAIL
;
1387 dev_err(tgtport
->dev
,
1388 "Create Association LS failed: %s\n",
1389 validation_errors
[ret
]);
1390 iod
->lsreq
->rsplen
= nvmet_fc_format_rjt(acc
,
1391 NVME_FC_MAX_LS_BUFFER_SIZE
, rqst
->w0
.ls_cmd
,
1392 FCNVME_RJT_RC_LOGIC
,
1393 FCNVME_RJT_EXP_NONE
, 0);
1397 queue
->ersp_ratio
= be16_to_cpu(rqst
->assoc_cmd
.ersp_ratio
);
1398 atomic_set(&queue
->connected
, 1);
1399 queue
->sqhd
= 0; /* best place to init value */
1401 /* format a response */
1403 iod
->lsreq
->rsplen
= sizeof(*acc
);
1405 nvmet_fc_format_rsp_hdr(acc
, FCNVME_LS_ACC
,
1407 sizeof(struct fcnvme_ls_cr_assoc_acc
)),
1408 FCNVME_LS_CREATE_ASSOCIATION
);
1409 acc
->associd
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
);
1410 acc
->associd
.desc_len
=
1412 sizeof(struct fcnvme_lsdesc_assoc_id
));
1413 acc
->associd
.association_id
=
1414 cpu_to_be64(nvmet_fc_makeconnid(iod
->assoc
, 0));
1415 acc
->connectid
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_CONN_ID
);
1416 acc
->connectid
.desc_len
=
1418 sizeof(struct fcnvme_lsdesc_conn_id
));
1419 acc
->connectid
.connection_id
= acc
->associd
.association_id
;
1423 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport
*tgtport
,
1424 struct nvmet_fc_ls_iod
*iod
)
1426 struct fcnvme_ls_cr_conn_rqst
*rqst
=
1427 (struct fcnvme_ls_cr_conn_rqst
*)iod
->rqstbuf
;
1428 struct fcnvme_ls_cr_conn_acc
*acc
=
1429 (struct fcnvme_ls_cr_conn_acc
*)iod
->rspbuf
;
1430 struct nvmet_fc_tgt_queue
*queue
;
1433 memset(acc
, 0, sizeof(*acc
));
1435 if (iod
->rqstdatalen
< sizeof(struct fcnvme_ls_cr_conn_rqst
))
1436 ret
= VERR_CR_CONN_LEN
;
1437 else if (rqst
->desc_list_len
!=
1439 sizeof(struct fcnvme_ls_cr_conn_rqst
)))
1440 ret
= VERR_CR_CONN_RQST_LEN
;
1441 else if (rqst
->associd
.desc_tag
!= cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
))
1442 ret
= VERR_ASSOC_ID
;
1443 else if (rqst
->associd
.desc_len
!=
1445 sizeof(struct fcnvme_lsdesc_assoc_id
)))
1446 ret
= VERR_ASSOC_ID_LEN
;
1447 else if (rqst
->connect_cmd
.desc_tag
!=
1448 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD
))
1449 ret
= VERR_CR_CONN_CMD
;
1450 else if (rqst
->connect_cmd
.desc_len
!=
1452 sizeof(struct fcnvme_lsdesc_cr_conn_cmd
)))
1453 ret
= VERR_CR_CONN_CMD_LEN
;
1454 else if (!rqst
->connect_cmd
.ersp_ratio
||
1455 (be16_to_cpu(rqst
->connect_cmd
.ersp_ratio
) >=
1456 be16_to_cpu(rqst
->connect_cmd
.sqsize
)))
1457 ret
= VERR_ERSP_RATIO
;
1461 iod
->assoc
= nvmet_fc_find_target_assoc(tgtport
,
1462 be64_to_cpu(rqst
->associd
.association_id
));
1464 ret
= VERR_NO_ASSOC
;
1466 queue
= nvmet_fc_alloc_target_queue(iod
->assoc
,
1467 be16_to_cpu(rqst
->connect_cmd
.qid
),
1468 be16_to_cpu(rqst
->connect_cmd
.sqsize
));
1470 ret
= VERR_QUEUE_ALLOC_FAIL
;
1472 /* release get taken in nvmet_fc_find_target_assoc */
1473 nvmet_fc_tgt_a_put(iod
->assoc
);
1478 dev_err(tgtport
->dev
,
1479 "Create Connection LS failed: %s\n",
1480 validation_errors
[ret
]);
1481 iod
->lsreq
->rsplen
= nvmet_fc_format_rjt(acc
,
1482 NVME_FC_MAX_LS_BUFFER_SIZE
, rqst
->w0
.ls_cmd
,
1483 (ret
== VERR_NO_ASSOC
) ?
1484 FCNVME_RJT_RC_INV_ASSOC
:
1485 FCNVME_RJT_RC_LOGIC
,
1486 FCNVME_RJT_EXP_NONE
, 0);
1490 queue
->ersp_ratio
= be16_to_cpu(rqst
->connect_cmd
.ersp_ratio
);
1491 atomic_set(&queue
->connected
, 1);
1492 queue
->sqhd
= 0; /* best place to init value */
1494 /* format a response */
1496 iod
->lsreq
->rsplen
= sizeof(*acc
);
1498 nvmet_fc_format_rsp_hdr(acc
, FCNVME_LS_ACC
,
1499 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc
)),
1500 FCNVME_LS_CREATE_CONNECTION
);
1501 acc
->connectid
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_CONN_ID
);
1502 acc
->connectid
.desc_len
=
1504 sizeof(struct fcnvme_lsdesc_conn_id
));
1505 acc
->connectid
.connection_id
=
1506 cpu_to_be64(nvmet_fc_makeconnid(iod
->assoc
,
1507 be16_to_cpu(rqst
->connect_cmd
.qid
)));
1511 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport
*tgtport
,
1512 struct nvmet_fc_ls_iod
*iod
)
1514 struct fcnvme_ls_disconnect_rqst
*rqst
=
1515 (struct fcnvme_ls_disconnect_rqst
*)iod
->rqstbuf
;
1516 struct fcnvme_ls_disconnect_acc
*acc
=
1517 (struct fcnvme_ls_disconnect_acc
*)iod
->rspbuf
;
1518 struct nvmet_fc_tgt_queue
*queue
= NULL
;
1519 struct nvmet_fc_tgt_assoc
*assoc
;
1521 bool del_assoc
= false;
1523 memset(acc
, 0, sizeof(*acc
));
1525 if (iod
->rqstdatalen
< sizeof(struct fcnvme_ls_disconnect_rqst
))
1526 ret
= VERR_DISCONN_LEN
;
1527 else if (rqst
->desc_list_len
!=
1529 sizeof(struct fcnvme_ls_disconnect_rqst
)))
1530 ret
= VERR_DISCONN_RQST_LEN
;
1531 else if (rqst
->associd
.desc_tag
!= cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
))
1532 ret
= VERR_ASSOC_ID
;
1533 else if (rqst
->associd
.desc_len
!=
1535 sizeof(struct fcnvme_lsdesc_assoc_id
)))
1536 ret
= VERR_ASSOC_ID_LEN
;
1537 else if (rqst
->discon_cmd
.desc_tag
!=
1538 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD
))
1539 ret
= VERR_DISCONN_CMD
;
1540 else if (rqst
->discon_cmd
.desc_len
!=
1542 sizeof(struct fcnvme_lsdesc_disconn_cmd
)))
1543 ret
= VERR_DISCONN_CMD_LEN
;
1544 else if ((rqst
->discon_cmd
.scope
!= FCNVME_DISCONN_ASSOCIATION
) &&
1545 (rqst
->discon_cmd
.scope
!= FCNVME_DISCONN_CONNECTION
))
1546 ret
= VERR_DISCONN_SCOPE
;
1548 /* match an active association */
1549 assoc
= nvmet_fc_find_target_assoc(tgtport
,
1550 be64_to_cpu(rqst
->associd
.association_id
));
1553 if (rqst
->discon_cmd
.scope
==
1554 FCNVME_DISCONN_CONNECTION
) {
1555 queue
= nvmet_fc_find_target_queue(tgtport
,
1557 rqst
->discon_cmd
.id
));
1559 nvmet_fc_tgt_a_put(assoc
);
1564 ret
= VERR_NO_ASSOC
;
1568 dev_err(tgtport
->dev
,
1569 "Disconnect LS failed: %s\n",
1570 validation_errors
[ret
]);
1571 iod
->lsreq
->rsplen
= nvmet_fc_format_rjt(acc
,
1572 NVME_FC_MAX_LS_BUFFER_SIZE
, rqst
->w0
.ls_cmd
,
1573 (ret
== VERR_NO_ASSOC
) ?
1574 FCNVME_RJT_RC_INV_ASSOC
:
1575 (ret
== VERR_NO_CONN
) ?
1576 FCNVME_RJT_RC_INV_CONN
:
1577 FCNVME_RJT_RC_LOGIC
,
1578 FCNVME_RJT_EXP_NONE
, 0);
1582 /* format a response */
1584 iod
->lsreq
->rsplen
= sizeof(*acc
);
1586 nvmet_fc_format_rsp_hdr(acc
, FCNVME_LS_ACC
,
1588 sizeof(struct fcnvme_ls_disconnect_acc
)),
1589 FCNVME_LS_DISCONNECT
);
1592 /* are we to delete a Connection ID (queue) */
1594 int qid
= queue
->qid
;
1596 nvmet_fc_delete_target_queue(queue
);
1598 /* release the get taken by find_target_queue */
1599 nvmet_fc_tgt_q_put(queue
);
1601 /* tear association down if io queue terminated */
1606 /* release get taken in nvmet_fc_find_target_assoc */
1607 nvmet_fc_tgt_a_put(iod
->assoc
);
1610 nvmet_fc_delete_target_assoc(iod
->assoc
);
1614 /* *********************** NVME Ctrl Routines **************************** */
1617 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req
*nvme_req
);
1619 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops
;
1622 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req
*lsreq
)
1624 struct nvmet_fc_ls_iod
*iod
= lsreq
->nvmet_fc_private
;
1625 struct nvmet_fc_tgtport
*tgtport
= iod
->tgtport
;
1627 fc_dma_sync_single_for_cpu(tgtport
->dev
, iod
->rspdma
,
1628 NVME_FC_MAX_LS_BUFFER_SIZE
, DMA_TO_DEVICE
);
1629 nvmet_fc_free_ls_iod(tgtport
, iod
);
1630 nvmet_fc_tgtport_put(tgtport
);
1634 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport
*tgtport
,
1635 struct nvmet_fc_ls_iod
*iod
)
1639 fc_dma_sync_single_for_device(tgtport
->dev
, iod
->rspdma
,
1640 NVME_FC_MAX_LS_BUFFER_SIZE
, DMA_TO_DEVICE
);
1642 ret
= tgtport
->ops
->xmt_ls_rsp(&tgtport
->fc_target_port
, iod
->lsreq
);
1644 nvmet_fc_xmt_ls_rsp_done(iod
->lsreq
);
1648 * Actual processing routine for received FC-NVME LS Requests from the LLD
1651 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport
*tgtport
,
1652 struct nvmet_fc_ls_iod
*iod
)
1654 struct fcnvme_ls_rqst_w0
*w0
=
1655 (struct fcnvme_ls_rqst_w0
*)iod
->rqstbuf
;
1657 iod
->lsreq
->nvmet_fc_private
= iod
;
1658 iod
->lsreq
->rspbuf
= iod
->rspbuf
;
1659 iod
->lsreq
->rspdma
= iod
->rspdma
;
1660 iod
->lsreq
->done
= nvmet_fc_xmt_ls_rsp_done
;
1661 /* Be preventative. handlers will later set to valid length */
1662 iod
->lsreq
->rsplen
= 0;
1668 * parse request input, execute the request, and format the
1671 switch (w0
->ls_cmd
) {
1672 case FCNVME_LS_CREATE_ASSOCIATION
:
1673 /* Creates Association and initial Admin Queue/Connection */
1674 nvmet_fc_ls_create_association(tgtport
, iod
);
1676 case FCNVME_LS_CREATE_CONNECTION
:
1677 /* Creates an IO Queue/Connection */
1678 nvmet_fc_ls_create_connection(tgtport
, iod
);
1680 case FCNVME_LS_DISCONNECT
:
1681 /* Terminate a Queue/Connection or the Association */
1682 nvmet_fc_ls_disconnect(tgtport
, iod
);
1685 iod
->lsreq
->rsplen
= nvmet_fc_format_rjt(iod
->rspbuf
,
1686 NVME_FC_MAX_LS_BUFFER_SIZE
, w0
->ls_cmd
,
1687 FCNVME_RJT_RC_INVAL
, FCNVME_RJT_EXP_NONE
, 0);
1690 nvmet_fc_xmt_ls_rsp(tgtport
, iod
);
1694 * Actual processing routine for received FC-NVME LS Requests from the LLD
1697 nvmet_fc_handle_ls_rqst_work(struct work_struct
*work
)
1699 struct nvmet_fc_ls_iod
*iod
=
1700 container_of(work
, struct nvmet_fc_ls_iod
, work
);
1701 struct nvmet_fc_tgtport
*tgtport
= iod
->tgtport
;
1703 nvmet_fc_handle_ls_rqst(tgtport
, iod
);
1708 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1709 * upon the reception of a NVME LS request.
1711 * The nvmet-fc layer will copy payload to an internal structure for
1712 * processing. As such, upon completion of the routine, the LLDD may
1713 * immediately free/reuse the LS request buffer passed in the call.
1715 * If this routine returns error, the LLDD should abort the exchange.
1717 * @target_port: pointer to the (registered) target port the LS was
1719 * @lsreq: pointer to a lsreq request structure to be used to reference
1720 * the exchange corresponding to the LS.
1721 * @lsreqbuf: pointer to the buffer containing the LS Request
1722 * @lsreqbuf_len: length, in bytes, of the received LS request
1725 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port
*target_port
,
1726 struct nvmefc_tgt_ls_req
*lsreq
,
1727 void *lsreqbuf
, u32 lsreqbuf_len
)
1729 struct nvmet_fc_tgtport
*tgtport
= targetport_to_tgtport(target_port
);
1730 struct nvmet_fc_ls_iod
*iod
;
1732 if (lsreqbuf_len
> NVME_FC_MAX_LS_BUFFER_SIZE
)
1735 if (!nvmet_fc_tgtport_get(tgtport
))
1738 iod
= nvmet_fc_alloc_ls_iod(tgtport
);
1740 nvmet_fc_tgtport_put(tgtport
);
1746 memcpy(iod
->rqstbuf
, lsreqbuf
, lsreqbuf_len
);
1747 iod
->rqstdatalen
= lsreqbuf_len
;
1749 schedule_work(&iod
->work
);
1753 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req
);
1757 * **********************
1758 * Start of FCP handling
1759 * **********************
1763 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod
*fod
)
1765 struct scatterlist
*sg
;
1768 sg
= sgl_alloc(fod
->req
.transfer_len
, GFP_KERNEL
, &nent
);
1773 fod
->data_sg_cnt
= nent
;
1774 fod
->data_sg_cnt
= fc_dma_map_sg(fod
->tgtport
->dev
, sg
, nent
,
1775 ((fod
->io_dir
== NVMET_FCP_WRITE
) ?
1776 DMA_FROM_DEVICE
: DMA_TO_DEVICE
));
1777 /* note: write from initiator perspective */
1778 fod
->next_sg
= fod
->data_sg
;
1783 return NVME_SC_INTERNAL
;
1787 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod
*fod
)
1789 if (!fod
->data_sg
|| !fod
->data_sg_cnt
)
1792 fc_dma_unmap_sg(fod
->tgtport
->dev
, fod
->data_sg
, fod
->data_sg_cnt
,
1793 ((fod
->io_dir
== NVMET_FCP_WRITE
) ?
1794 DMA_FROM_DEVICE
: DMA_TO_DEVICE
));
1795 sgl_free(fod
->data_sg
);
1796 fod
->data_sg
= NULL
;
1797 fod
->data_sg_cnt
= 0;
1802 queue_90percent_full(struct nvmet_fc_tgt_queue
*q
, u32 sqhd
)
1806 /* egad, this is ugly. And sqtail is just a best guess */
1807 sqtail
= atomic_read(&q
->sqtail
) % q
->sqsize
;
1809 used
= (sqtail
< sqhd
) ? (sqtail
+ q
->sqsize
- sqhd
) : (sqtail
- sqhd
);
1810 return ((used
* 10) >= (((u32
)(q
->sqsize
- 1) * 9)));
1815 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1818 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport
*tgtport
,
1819 struct nvmet_fc_fcp_iod
*fod
)
1821 struct nvme_fc_ersp_iu
*ersp
= &fod
->rspiubuf
;
1822 struct nvme_common_command
*sqe
= &fod
->cmdiubuf
.sqe
.common
;
1823 struct nvme_completion
*cqe
= &ersp
->cqe
;
1824 u32
*cqewd
= (u32
*)cqe
;
1825 bool send_ersp
= false;
1826 u32 rsn
, rspcnt
, xfr_length
;
1828 if (fod
->fcpreq
->op
== NVMET_FCOP_READDATA_RSP
)
1829 xfr_length
= fod
->req
.transfer_len
;
1831 xfr_length
= fod
->offset
;
1834 * check to see if we can send a 0's rsp.
1835 * Note: to send a 0's response, the NVME-FC host transport will
1836 * recreate the CQE. The host transport knows: sq id, SQHD (last
1837 * seen in an ersp), and command_id. Thus it will create a
1838 * zero-filled CQE with those known fields filled in. Transport
1839 * must send an ersp for any condition where the cqe won't match
1842 * Here are the FC-NVME mandated cases where we must send an ersp:
1843 * every N responses, where N=ersp_ratio
1844 * force fabric commands to send ersp's (not in FC-NVME but good
1846 * normal cmds: any time status is non-zero, or status is zero
1847 * but words 0 or 1 are non-zero.
1848 * the SQ is 90% or more full
1849 * the cmd is a fused command
1850 * transferred data length not equal to cmd iu length
1852 rspcnt
= atomic_inc_return(&fod
->queue
->zrspcnt
);
1853 if (!(rspcnt
% fod
->queue
->ersp_ratio
) ||
1854 sqe
->opcode
== nvme_fabrics_command
||
1855 xfr_length
!= fod
->req
.transfer_len
||
1856 (le16_to_cpu(cqe
->status
) & 0xFFFE) || cqewd
[0] || cqewd
[1] ||
1857 (sqe
->flags
& (NVME_CMD_FUSE_FIRST
| NVME_CMD_FUSE_SECOND
)) ||
1858 queue_90percent_full(fod
->queue
, le16_to_cpu(cqe
->sq_head
)))
1861 /* re-set the fields */
1862 fod
->fcpreq
->rspaddr
= ersp
;
1863 fod
->fcpreq
->rspdma
= fod
->rspdma
;
1866 memset(ersp
, 0, NVME_FC_SIZEOF_ZEROS_RSP
);
1867 fod
->fcpreq
->rsplen
= NVME_FC_SIZEOF_ZEROS_RSP
;
1869 ersp
->iu_len
= cpu_to_be16(sizeof(*ersp
)/sizeof(u32
));
1870 rsn
= atomic_inc_return(&fod
->queue
->rsn
);
1871 ersp
->rsn
= cpu_to_be32(rsn
);
1872 ersp
->xfrd_len
= cpu_to_be32(xfr_length
);
1873 fod
->fcpreq
->rsplen
= sizeof(*ersp
);
1876 fc_dma_sync_single_for_device(tgtport
->dev
, fod
->rspdma
,
1877 sizeof(fod
->rspiubuf
), DMA_TO_DEVICE
);
1880 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req
*fcpreq
);
1883 nvmet_fc_abort_op(struct nvmet_fc_tgtport
*tgtport
,
1884 struct nvmet_fc_fcp_iod
*fod
)
1886 struct nvmefc_tgt_fcp_req
*fcpreq
= fod
->fcpreq
;
1888 /* data no longer needed */
1889 nvmet_fc_free_tgt_pgs(fod
);
1892 * if an ABTS was received or we issued the fcp_abort early
1893 * don't call abort routine again.
1895 /* no need to take lock - lock was taken earlier to get here */
1897 tgtport
->ops
->fcp_abort(&tgtport
->fc_target_port
, fcpreq
);
1899 nvmet_fc_free_fcp_iod(fod
->queue
, fod
);
1903 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport
*tgtport
,
1904 struct nvmet_fc_fcp_iod
*fod
)
1908 fod
->fcpreq
->op
= NVMET_FCOP_RSP
;
1909 fod
->fcpreq
->timeout
= 0;
1911 nvmet_fc_prep_fcp_rsp(tgtport
, fod
);
1913 ret
= tgtport
->ops
->fcp_op(&tgtport
->fc_target_port
, fod
->fcpreq
);
1915 nvmet_fc_abort_op(tgtport
, fod
);
1919 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport
*tgtport
,
1920 struct nvmet_fc_fcp_iod
*fod
, u8 op
)
1922 struct nvmefc_tgt_fcp_req
*fcpreq
= fod
->fcpreq
;
1923 struct scatterlist
*sg
= fod
->next_sg
;
1924 unsigned long flags
;
1925 u32 remaininglen
= fod
->req
.transfer_len
- fod
->offset
;
1930 fcpreq
->offset
= fod
->offset
;
1931 fcpreq
->timeout
= NVME_FC_TGTOP_TIMEOUT_SEC
;
1934 * for next sequence:
1935 * break at a sg element boundary
1936 * attempt to keep sequence length capped at
1937 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
1938 * be longer if a single sg element is larger
1939 * than that amount. This is done to avoid creating
1940 * a new sg list to use for the tgtport api.
1944 while (tlen
< remaininglen
&&
1945 fcpreq
->sg_cnt
< tgtport
->max_sg_cnt
&&
1946 tlen
+ sg_dma_len(sg
) < NVMET_FC_MAX_SEQ_LENGTH
) {
1948 tlen
+= sg_dma_len(sg
);
1951 if (tlen
< remaininglen
&& fcpreq
->sg_cnt
== 0) {
1953 tlen
+= min_t(u32
, sg_dma_len(sg
), remaininglen
);
1956 if (tlen
< remaininglen
)
1959 fod
->next_sg
= NULL
;
1961 fcpreq
->transfer_length
= tlen
;
1962 fcpreq
->transferred_length
= 0;
1963 fcpreq
->fcp_error
= 0;
1967 * If the last READDATA request: check if LLDD supports
1968 * combined xfr with response.
1970 if ((op
== NVMET_FCOP_READDATA
) &&
1971 ((fod
->offset
+ fcpreq
->transfer_length
) == fod
->req
.transfer_len
) &&
1972 (tgtport
->ops
->target_features
& NVMET_FCTGTFEAT_READDATA_RSP
)) {
1973 fcpreq
->op
= NVMET_FCOP_READDATA_RSP
;
1974 nvmet_fc_prep_fcp_rsp(tgtport
, fod
);
1977 ret
= tgtport
->ops
->fcp_op(&tgtport
->fc_target_port
, fod
->fcpreq
);
1980 * should be ok to set w/o lock as its in the thread of
1981 * execution (not an async timer routine) and doesn't
1982 * contend with any clearing action
1986 if (op
== NVMET_FCOP_WRITEDATA
) {
1987 spin_lock_irqsave(&fod
->flock
, flags
);
1988 fod
->writedataactive
= false;
1989 spin_unlock_irqrestore(&fod
->flock
, flags
);
1990 nvmet_req_complete(&fod
->req
, NVME_SC_INTERNAL
);
1991 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1992 fcpreq
->fcp_error
= ret
;
1993 fcpreq
->transferred_length
= 0;
1994 nvmet_fc_xmt_fcp_op_done(fod
->fcpreq
);
2000 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod
*fod
, bool abort
)
2002 struct nvmefc_tgt_fcp_req
*fcpreq
= fod
->fcpreq
;
2003 struct nvmet_fc_tgtport
*tgtport
= fod
->tgtport
;
2005 /* if in the middle of an io and we need to tear down */
2007 if (fcpreq
->op
== NVMET_FCOP_WRITEDATA
) {
2008 nvmet_req_complete(&fod
->req
, NVME_SC_INTERNAL
);
2012 nvmet_fc_abort_op(tgtport
, fod
);
2020 * actual done handler for FCP operations when completed by the lldd
2023 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod
*fod
)
2025 struct nvmefc_tgt_fcp_req
*fcpreq
= fod
->fcpreq
;
2026 struct nvmet_fc_tgtport
*tgtport
= fod
->tgtport
;
2027 unsigned long flags
;
2030 spin_lock_irqsave(&fod
->flock
, flags
);
2032 fod
->writedataactive
= false;
2033 spin_unlock_irqrestore(&fod
->flock
, flags
);
2035 switch (fcpreq
->op
) {
2037 case NVMET_FCOP_WRITEDATA
:
2038 if (__nvmet_fc_fod_op_abort(fod
, abort
))
2040 if (fcpreq
->fcp_error
||
2041 fcpreq
->transferred_length
!= fcpreq
->transfer_length
) {
2042 spin_lock(&fod
->flock
);
2044 spin_unlock(&fod
->flock
);
2046 nvmet_req_complete(&fod
->req
, NVME_SC_INTERNAL
);
2050 fod
->offset
+= fcpreq
->transferred_length
;
2051 if (fod
->offset
!= fod
->req
.transfer_len
) {
2052 spin_lock_irqsave(&fod
->flock
, flags
);
2053 fod
->writedataactive
= true;
2054 spin_unlock_irqrestore(&fod
->flock
, flags
);
2056 /* transfer the next chunk */
2057 nvmet_fc_transfer_fcp_data(tgtport
, fod
,
2058 NVMET_FCOP_WRITEDATA
);
2062 /* data transfer complete, resume with nvmet layer */
2063 nvmet_req_execute(&fod
->req
);
2066 case NVMET_FCOP_READDATA
:
2067 case NVMET_FCOP_READDATA_RSP
:
2068 if (__nvmet_fc_fod_op_abort(fod
, abort
))
2070 if (fcpreq
->fcp_error
||
2071 fcpreq
->transferred_length
!= fcpreq
->transfer_length
) {
2072 nvmet_fc_abort_op(tgtport
, fod
);
2078 if (fcpreq
->op
== NVMET_FCOP_READDATA_RSP
) {
2079 /* data no longer needed */
2080 nvmet_fc_free_tgt_pgs(fod
);
2081 nvmet_fc_free_fcp_iod(fod
->queue
, fod
);
2085 fod
->offset
+= fcpreq
->transferred_length
;
2086 if (fod
->offset
!= fod
->req
.transfer_len
) {
2087 /* transfer the next chunk */
2088 nvmet_fc_transfer_fcp_data(tgtport
, fod
,
2089 NVMET_FCOP_READDATA
);
2093 /* data transfer complete, send response */
2095 /* data no longer needed */
2096 nvmet_fc_free_tgt_pgs(fod
);
2098 nvmet_fc_xmt_fcp_rsp(tgtport
, fod
);
2102 case NVMET_FCOP_RSP
:
2103 if (__nvmet_fc_fod_op_abort(fod
, abort
))
2105 nvmet_fc_free_fcp_iod(fod
->queue
, fod
);
2114 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req
*fcpreq
)
2116 struct nvmet_fc_fcp_iod
*fod
= fcpreq
->nvmet_fc_private
;
2118 nvmet_fc_fod_op_done(fod
);
2122 * actual completion handler after execution by the nvmet layer
2125 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport
*tgtport
,
2126 struct nvmet_fc_fcp_iod
*fod
, int status
)
2128 struct nvme_common_command
*sqe
= &fod
->cmdiubuf
.sqe
.common
;
2129 struct nvme_completion
*cqe
= &fod
->rspiubuf
.cqe
;
2130 unsigned long flags
;
2133 spin_lock_irqsave(&fod
->flock
, flags
);
2135 spin_unlock_irqrestore(&fod
->flock
, flags
);
2137 /* if we have a CQE, snoop the last sq_head value */
2139 fod
->queue
->sqhd
= cqe
->sq_head
;
2142 nvmet_fc_abort_op(tgtport
, fod
);
2146 /* if an error handling the cmd post initial parsing */
2148 /* fudge up a failed CQE status for our transport error */
2149 memset(cqe
, 0, sizeof(*cqe
));
2150 cqe
->sq_head
= fod
->queue
->sqhd
; /* echo last cqe sqhd */
2151 cqe
->sq_id
= cpu_to_le16(fod
->queue
->qid
);
2152 cqe
->command_id
= sqe
->command_id
;
2153 cqe
->status
= cpu_to_le16(status
);
2157 * try to push the data even if the SQE status is non-zero.
2158 * There may be a status where data still was intended to
2161 if ((fod
->io_dir
== NVMET_FCP_READ
) && (fod
->data_sg_cnt
)) {
2162 /* push the data over before sending rsp */
2163 nvmet_fc_transfer_fcp_data(tgtport
, fod
,
2164 NVMET_FCOP_READDATA
);
2168 /* writes & no data - fall thru */
2171 /* data no longer needed */
2172 nvmet_fc_free_tgt_pgs(fod
);
2174 nvmet_fc_xmt_fcp_rsp(tgtport
, fod
);
2179 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req
*nvme_req
)
2181 struct nvmet_fc_fcp_iod
*fod
= nvmet_req_to_fod(nvme_req
);
2182 struct nvmet_fc_tgtport
*tgtport
= fod
->tgtport
;
2184 __nvmet_fc_fcp_nvme_cmd_done(tgtport
, fod
, 0);
2189 * Actual processing routine for received FC-NVME I/O Requests from the LLD
2192 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport
*tgtport
,
2193 struct nvmet_fc_fcp_iod
*fod
)
2195 struct nvme_fc_cmd_iu
*cmdiu
= &fod
->cmdiubuf
;
2196 u32 xfrlen
= be32_to_cpu(cmdiu
->data_len
);
2200 * if there is no nvmet mapping to the targetport there
2201 * shouldn't be requests. just terminate them.
2204 goto transport_error
;
2207 * Fused commands are currently not supported in the linux
2210 * As such, the implementation of the FC transport does not
2211 * look at the fused commands and order delivery to the upper
2212 * layer until we have both based on csn.
2215 fod
->fcpreq
->done
= nvmet_fc_xmt_fcp_op_done
;
2217 if (cmdiu
->flags
& FCNVME_CMD_FLAGS_WRITE
) {
2218 fod
->io_dir
= NVMET_FCP_WRITE
;
2219 if (!nvme_is_write(&cmdiu
->sqe
))
2220 goto transport_error
;
2221 } else if (cmdiu
->flags
& FCNVME_CMD_FLAGS_READ
) {
2222 fod
->io_dir
= NVMET_FCP_READ
;
2223 if (nvme_is_write(&cmdiu
->sqe
))
2224 goto transport_error
;
2226 fod
->io_dir
= NVMET_FCP_NODATA
;
2228 goto transport_error
;
2231 fod
->req
.cmd
= &fod
->cmdiubuf
.sqe
;
2232 fod
->req
.rsp
= &fod
->rspiubuf
.cqe
;
2233 fod
->req
.port
= tgtport
->pe
->port
;
2235 /* clear any response payload */
2236 memset(&fod
->rspiubuf
, 0, sizeof(fod
->rspiubuf
));
2238 fod
->data_sg
= NULL
;
2239 fod
->data_sg_cnt
= 0;
2241 ret
= nvmet_req_init(&fod
->req
,
2242 &fod
->queue
->nvme_cq
,
2243 &fod
->queue
->nvme_sq
,
2244 &nvmet_fc_tgt_fcp_ops
);
2246 /* bad SQE content or invalid ctrl state */
2247 /* nvmet layer has already called op done to send rsp. */
2251 fod
->req
.transfer_len
= xfrlen
;
2253 /* keep a running counter of tail position */
2254 atomic_inc(&fod
->queue
->sqtail
);
2256 if (fod
->req
.transfer_len
) {
2257 ret
= nvmet_fc_alloc_tgt_pgs(fod
);
2259 nvmet_req_complete(&fod
->req
, ret
);
2263 fod
->req
.sg
= fod
->data_sg
;
2264 fod
->req
.sg_cnt
= fod
->data_sg_cnt
;
2267 if (fod
->io_dir
== NVMET_FCP_WRITE
) {
2268 /* pull the data over before invoking nvmet layer */
2269 nvmet_fc_transfer_fcp_data(tgtport
, fod
, NVMET_FCOP_WRITEDATA
);
2276 * can invoke the nvmet_layer now. If read data, cmd completion will
2279 nvmet_req_execute(&fod
->req
);
2283 nvmet_fc_abort_op(tgtport
, fod
);
2287 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2288 * upon the reception of a NVME FCP CMD IU.
2290 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2291 * layer for processing.
2293 * The nvmet_fc layer allocates a local job structure (struct
2294 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2295 * CMD IU buffer to the job structure. As such, on a successful
2296 * completion (returns 0), the LLDD may immediately free/reuse
2297 * the CMD IU buffer passed in the call.
2299 * However, in some circumstances, due to the packetized nature of FC
2300 * and the api of the FC LLDD which may issue a hw command to send the
2301 * response, but the LLDD may not get the hw completion for that command
2302 * and upcall the nvmet_fc layer before a new command may be
2303 * asynchronously received - its possible for a command to be received
2304 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2305 * the appearance of more commands received than fits in the sq.
2306 * To alleviate this scenario, a temporary queue is maintained in the
2307 * transport for pending LLDD requests waiting for a queue job structure.
2308 * In these "overrun" cases, a temporary queue element is allocated
2309 * the LLDD request and CMD iu buffer information remembered, and the
2310 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2311 * structure is freed, it is immediately reallocated for anything on the
2312 * pending request list. The LLDDs defer_rcv() callback is called,
2313 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2314 * is then started normally with the transport.
2316 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2317 * the completion as successful but must not reuse the CMD IU buffer
2318 * until the LLDD's defer_rcv() callback has been called for the
2319 * corresponding struct nvmefc_tgt_fcp_req pointer.
2321 * If there is any other condition in which an error occurs, the
2322 * transport will return a non-zero status indicating the error.
2323 * In all cases other than -EOVERFLOW, the transport has not accepted the
2324 * request and the LLDD should abort the exchange.
2326 * @target_port: pointer to the (registered) target port the FCP CMD IU
2328 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2329 * the exchange corresponding to the FCP Exchange.
2330 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2331 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2334 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port
*target_port
,
2335 struct nvmefc_tgt_fcp_req
*fcpreq
,
2336 void *cmdiubuf
, u32 cmdiubuf_len
)
2338 struct nvmet_fc_tgtport
*tgtport
= targetport_to_tgtport(target_port
);
2339 struct nvme_fc_cmd_iu
*cmdiu
= cmdiubuf
;
2340 struct nvmet_fc_tgt_queue
*queue
;
2341 struct nvmet_fc_fcp_iod
*fod
;
2342 struct nvmet_fc_defer_fcp_req
*deferfcp
;
2343 unsigned long flags
;
2345 /* validate iu, so the connection id can be used to find the queue */
2346 if ((cmdiubuf_len
!= sizeof(*cmdiu
)) ||
2347 (cmdiu
->scsi_id
!= NVME_CMD_SCSI_ID
) ||
2348 (cmdiu
->fc_id
!= NVME_CMD_FC_ID
) ||
2349 (be16_to_cpu(cmdiu
->iu_len
) != (sizeof(*cmdiu
)/4)))
2352 queue
= nvmet_fc_find_target_queue(tgtport
,
2353 be64_to_cpu(cmdiu
->connection_id
));
2358 * note: reference taken by find_target_queue
2359 * After successful fod allocation, the fod will inherit the
2360 * ownership of that reference and will remove the reference
2361 * when the fod is freed.
2364 spin_lock_irqsave(&queue
->qlock
, flags
);
2366 fod
= nvmet_fc_alloc_fcp_iod(queue
);
2368 spin_unlock_irqrestore(&queue
->qlock
, flags
);
2370 fcpreq
->nvmet_fc_private
= fod
;
2371 fod
->fcpreq
= fcpreq
;
2373 memcpy(&fod
->cmdiubuf
, cmdiubuf
, cmdiubuf_len
);
2375 nvmet_fc_queue_fcp_req(tgtport
, queue
, fcpreq
);
2380 if (!tgtport
->ops
->defer_rcv
) {
2381 spin_unlock_irqrestore(&queue
->qlock
, flags
);
2382 /* release the queue lookup reference */
2383 nvmet_fc_tgt_q_put(queue
);
2387 deferfcp
= list_first_entry_or_null(&queue
->avail_defer_list
,
2388 struct nvmet_fc_defer_fcp_req
, req_list
);
2390 /* Just re-use one that was previously allocated */
2391 list_del(&deferfcp
->req_list
);
2393 spin_unlock_irqrestore(&queue
->qlock
, flags
);
2395 /* Now we need to dynamically allocate one */
2396 deferfcp
= kmalloc(sizeof(*deferfcp
), GFP_KERNEL
);
2398 /* release the queue lookup reference */
2399 nvmet_fc_tgt_q_put(queue
);
2402 spin_lock_irqsave(&queue
->qlock
, flags
);
2405 /* For now, use rspaddr / rsplen to save payload information */
2406 fcpreq
->rspaddr
= cmdiubuf
;
2407 fcpreq
->rsplen
= cmdiubuf_len
;
2408 deferfcp
->fcp_req
= fcpreq
;
2410 /* defer processing till a fod becomes available */
2411 list_add_tail(&deferfcp
->req_list
, &queue
->pending_cmd_list
);
2413 /* NOTE: the queue lookup reference is still valid */
2415 spin_unlock_irqrestore(&queue
->qlock
, flags
);
2419 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req
);
2422 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2423 * upon the reception of an ABTS for a FCP command
2425 * Notify the transport that an ABTS has been received for a FCP command
2426 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2427 * LLDD believes the command is still being worked on
2428 * (template_ops->fcp_req_release() has not been called).
2430 * The transport will wait for any outstanding work (an op to the LLDD,
2431 * which the lldd should complete with error due to the ABTS; or the
2432 * completion from the nvmet layer of the nvme command), then will
2433 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2434 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2435 * to the ABTS either after return from this function (assuming any
2436 * outstanding op work has been terminated) or upon the callback being
2439 * @target_port: pointer to the (registered) target port the FCP CMD IU
2441 * @fcpreq: pointer to the fcpreq request structure that corresponds
2442 * to the exchange that received the ABTS.
2445 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port
*target_port
,
2446 struct nvmefc_tgt_fcp_req
*fcpreq
)
2448 struct nvmet_fc_fcp_iod
*fod
= fcpreq
->nvmet_fc_private
;
2449 struct nvmet_fc_tgt_queue
*queue
;
2450 unsigned long flags
;
2452 if (!fod
|| fod
->fcpreq
!= fcpreq
)
2453 /* job appears to have already completed, ignore abort */
2458 spin_lock_irqsave(&queue
->qlock
, flags
);
2461 * mark as abort. The abort handler, invoked upon completion
2462 * of any work, will detect the aborted status and do the
2465 spin_lock(&fod
->flock
);
2467 fod
->aborted
= true;
2468 spin_unlock(&fod
->flock
);
2470 spin_unlock_irqrestore(&queue
->qlock
, flags
);
2472 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort
);
2475 struct nvmet_fc_traddr
{
2481 __nvme_fc_parse_u64(substring_t
*sstr
, u64
*val
)
2485 if (match_u64(sstr
, &token64
))
2493 * This routine validates and extracts the WWN's from the TRADDR string.
2494 * As kernel parsers need the 0x to determine number base, universally
2495 * build string to parse with 0x prefix before parsing name strings.
2498 nvme_fc_parse_traddr(struct nvmet_fc_traddr
*traddr
, char *buf
, size_t blen
)
2500 char name
[2 + NVME_FC_TRADDR_HEXNAMELEN
+ 1];
2501 substring_t wwn
= { name
, &name
[sizeof(name
)-1] };
2502 int nnoffset
, pnoffset
;
2504 /* validate if string is one of the 2 allowed formats */
2505 if (strnlen(buf
, blen
) == NVME_FC_TRADDR_MAXLENGTH
&&
2506 !strncmp(buf
, "nn-0x", NVME_FC_TRADDR_OXNNLEN
) &&
2507 !strncmp(&buf
[NVME_FC_TRADDR_MAX_PN_OFFSET
],
2508 "pn-0x", NVME_FC_TRADDR_OXNNLEN
)) {
2509 nnoffset
= NVME_FC_TRADDR_OXNNLEN
;
2510 pnoffset
= NVME_FC_TRADDR_MAX_PN_OFFSET
+
2511 NVME_FC_TRADDR_OXNNLEN
;
2512 } else if ((strnlen(buf
, blen
) == NVME_FC_TRADDR_MINLENGTH
&&
2513 !strncmp(buf
, "nn-", NVME_FC_TRADDR_NNLEN
) &&
2514 !strncmp(&buf
[NVME_FC_TRADDR_MIN_PN_OFFSET
],
2515 "pn-", NVME_FC_TRADDR_NNLEN
))) {
2516 nnoffset
= NVME_FC_TRADDR_NNLEN
;
2517 pnoffset
= NVME_FC_TRADDR_MIN_PN_OFFSET
+ NVME_FC_TRADDR_NNLEN
;
2523 name
[2 + NVME_FC_TRADDR_HEXNAMELEN
] = 0;
2525 memcpy(&name
[2], &buf
[nnoffset
], NVME_FC_TRADDR_HEXNAMELEN
);
2526 if (__nvme_fc_parse_u64(&wwn
, &traddr
->nn
))
2529 memcpy(&name
[2], &buf
[pnoffset
], NVME_FC_TRADDR_HEXNAMELEN
);
2530 if (__nvme_fc_parse_u64(&wwn
, &traddr
->pn
))
2536 pr_warn("%s: bad traddr string\n", __func__
);
2541 nvmet_fc_add_port(struct nvmet_port
*port
)
2543 struct nvmet_fc_tgtport
*tgtport
;
2544 struct nvmet_fc_port_entry
*pe
;
2545 struct nvmet_fc_traddr traddr
= { 0L, 0L };
2546 unsigned long flags
;
2549 /* validate the address info */
2550 if ((port
->disc_addr
.trtype
!= NVMF_TRTYPE_FC
) ||
2551 (port
->disc_addr
.adrfam
!= NVMF_ADDR_FAMILY_FC
))
2554 /* map the traddr address info to a target port */
2556 ret
= nvme_fc_parse_traddr(&traddr
, port
->disc_addr
.traddr
,
2557 sizeof(port
->disc_addr
.traddr
));
2561 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
2566 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
2567 list_for_each_entry(tgtport
, &nvmet_fc_target_list
, tgt_list
) {
2568 if ((tgtport
->fc_target_port
.node_name
== traddr
.nn
) &&
2569 (tgtport
->fc_target_port
.port_name
== traddr
.pn
)) {
2570 /* a FC port can only be 1 nvmet port id */
2572 nvmet_fc_portentry_bind(tgtport
, pe
, port
);
2579 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
2588 nvmet_fc_remove_port(struct nvmet_port
*port
)
2590 struct nvmet_fc_port_entry
*pe
= port
->priv
;
2592 nvmet_fc_portentry_unbind(pe
);
2597 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops
= {
2598 .owner
= THIS_MODULE
,
2599 .type
= NVMF_TRTYPE_FC
,
2601 .add_port
= nvmet_fc_add_port
,
2602 .remove_port
= nvmet_fc_remove_port
,
2603 .queue_response
= nvmet_fc_fcp_nvme_cmd_done
,
2604 .delete_ctrl
= nvmet_fc_delete_ctrl
,
2607 static int __init
nvmet_fc_init_module(void)
2609 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops
);
2612 static void __exit
nvmet_fc_exit_module(void)
2614 /* sanity check - all lports should be removed */
2615 if (!list_empty(&nvmet_fc_target_list
))
2616 pr_warn("%s: targetport list not empty\n", __func__
);
2618 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops
);
2620 ida_destroy(&nvmet_fc_tgtport_cnt
);
2623 module_init(nvmet_fc_init_module
);
2624 module_exit(nvmet_fc_exit_module
);
2626 MODULE_LICENSE("GPL v2");