1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/slab.h>
8 #include <linux/blk-mq.h>
9 #include <linux/parser.h>
10 #include <linux/random.h>
11 #include <uapi/scsi/fc/fc_fs.h>
12 #include <uapi/scsi/fc/fc_els.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
19 /* *************************** Data Structures/Defines ****************** */
22 #define NVMET_LS_CTX_COUNT 256
24 /* for this implementation, assume small single frame rqst/rsp */
25 #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
27 struct nvmet_fc_tgtport
;
28 struct nvmet_fc_tgt_assoc
;
30 struct nvmet_fc_ls_iod
{
31 struct nvmefc_tgt_ls_req
*lsreq
;
32 struct nvmefc_tgt_fcp_req
*fcpreq
; /* only if RS */
34 struct list_head ls_list
; /* tgtport->ls_list */
36 struct nvmet_fc_tgtport
*tgtport
;
37 struct nvmet_fc_tgt_assoc
*assoc
;
44 struct scatterlist sg
[2];
46 struct work_struct work
;
47 } __aligned(sizeof(unsigned long long));
49 /* desired maximum for a single sequence - if sg list allows it */
50 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
52 enum nvmet_fcp_datadir
{
59 struct nvmet_fc_fcp_iod
{
60 struct nvmefc_tgt_fcp_req
*fcpreq
;
62 struct nvme_fc_cmd_iu cmdiubuf
;
63 struct nvme_fc_ersp_iu rspiubuf
;
65 struct scatterlist
*next_sg
;
66 struct scatterlist
*data_sg
;
69 enum nvmet_fcp_datadir io_dir
;
77 struct work_struct defer_work
;
79 struct nvmet_fc_tgtport
*tgtport
;
80 struct nvmet_fc_tgt_queue
*queue
;
82 struct list_head fcp_list
; /* tgtport->fcp_list */
85 struct nvmet_fc_tgtport
{
87 struct nvmet_fc_target_port fc_target_port
;
89 struct list_head tgt_list
; /* nvmet_fc_target_list */
90 struct device
*dev
; /* dev for dma mapping */
91 struct nvmet_fc_target_template
*ops
;
93 struct nvmet_fc_ls_iod
*iod
;
95 struct list_head ls_list
;
96 struct list_head ls_busylist
;
97 struct list_head assoc_list
;
99 struct nvmet_fc_port_entry
*pe
;
104 struct nvmet_fc_port_entry
{
105 struct nvmet_fc_tgtport
*tgtport
;
106 struct nvmet_port
*port
;
109 struct list_head pe_list
;
112 struct nvmet_fc_defer_fcp_req
{
113 struct list_head req_list
;
114 struct nvmefc_tgt_fcp_req
*fcp_req
;
117 struct nvmet_fc_tgt_queue
{
128 struct nvmet_cq nvme_cq
;
129 struct nvmet_sq nvme_sq
;
130 struct nvmet_fc_tgt_assoc
*assoc
;
131 struct list_head fod_list
;
132 struct list_head pending_cmd_list
;
133 struct list_head avail_defer_list
;
134 struct workqueue_struct
*work_q
;
136 struct nvmet_fc_fcp_iod fod
[]; /* array of fcp_iods */
137 } __aligned(sizeof(unsigned long long));
139 struct nvmet_fc_tgt_assoc
{
142 struct nvmet_fc_tgtport
*tgtport
;
143 struct list_head a_list
;
144 struct nvmet_fc_tgt_queue
*queues
[NVMET_NR_QUEUES
+ 1];
146 struct work_struct del_work
;
151 nvmet_fc_iodnum(struct nvmet_fc_ls_iod
*iodptr
)
153 return (iodptr
- iodptr
->tgtport
->iod
);
157 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod
*fodptr
)
159 return (fodptr
- fodptr
->queue
->fod
);
164 * Association and Connection IDs:
166 * Association ID will have random number in upper 6 bytes and zero
169 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
171 * note: Association ID = Connection ID for queue 0
173 #define BYTES_FOR_QID sizeof(u16)
174 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
175 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
178 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc
*assoc
, u16 qid
)
180 return (assoc
->association_id
| qid
);
184 nvmet_fc_getassociationid(u64 connectionid
)
186 return connectionid
& ~NVMET_FC_QUEUEID_MASK
;
190 nvmet_fc_getqueueid(u64 connectionid
)
192 return (u16
)(connectionid
& NVMET_FC_QUEUEID_MASK
);
195 static inline struct nvmet_fc_tgtport
*
196 targetport_to_tgtport(struct nvmet_fc_target_port
*targetport
)
198 return container_of(targetport
, struct nvmet_fc_tgtport
,
202 static inline struct nvmet_fc_fcp_iod
*
203 nvmet_req_to_fod(struct nvmet_req
*nvme_req
)
205 return container_of(nvme_req
, struct nvmet_fc_fcp_iod
, req
);
209 /* *************************** Globals **************************** */
212 static DEFINE_SPINLOCK(nvmet_fc_tgtlock
);
214 static LIST_HEAD(nvmet_fc_target_list
);
215 static DEFINE_IDA(nvmet_fc_tgtport_cnt
);
216 static LIST_HEAD(nvmet_fc_portentry_list
);
219 static void nvmet_fc_handle_ls_rqst_work(struct work_struct
*work
);
220 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct
*work
);
221 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc
*assoc
);
222 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc
*assoc
);
223 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue
*queue
);
224 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue
*queue
);
225 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport
*tgtport
);
226 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport
*tgtport
);
227 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport
*tgtport
,
228 struct nvmet_fc_fcp_iod
*fod
);
229 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc
*assoc
);
232 /* *********************** FC-NVME DMA Handling **************************** */
235 * The fcloop device passes in a NULL device pointer. Real LLD's will
236 * pass in a valid device pointer. If NULL is passed to the dma mapping
237 * routines, depending on the platform, it may or may not succeed, and
241 * Wrapper all the dma routines and check the dev pointer.
243 * If simple mappings (return just a dma address, we'll noop them,
244 * returning a dma address of 0.
246 * On more complex mappings (dma_map_sg), a pseudo routine fills
247 * in the scatter list, setting all dma addresses to 0.
250 static inline dma_addr_t
251 fc_dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
252 enum dma_data_direction dir
)
254 return dev
? dma_map_single(dev
, ptr
, size
, dir
) : (dma_addr_t
)0L;
258 fc_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
260 return dev
? dma_mapping_error(dev
, dma_addr
) : 0;
264 fc_dma_unmap_single(struct device
*dev
, dma_addr_t addr
, size_t size
,
265 enum dma_data_direction dir
)
268 dma_unmap_single(dev
, addr
, size
, dir
);
272 fc_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
, size_t size
,
273 enum dma_data_direction dir
)
276 dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
280 fc_dma_sync_single_for_device(struct device
*dev
, dma_addr_t addr
, size_t size
,
281 enum dma_data_direction dir
)
284 dma_sync_single_for_device(dev
, addr
, size
, dir
);
287 /* pseudo dma_map_sg call */
289 fc_map_sg(struct scatterlist
*sg
, int nents
)
291 struct scatterlist
*s
;
294 WARN_ON(nents
== 0 || sg
[0].length
== 0);
296 for_each_sg(sg
, s
, nents
, i
) {
298 #ifdef CONFIG_NEED_SG_DMA_LENGTH
299 s
->dma_length
= s
->length
;
306 fc_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
307 enum dma_data_direction dir
)
309 return dev
? dma_map_sg(dev
, sg
, nents
, dir
) : fc_map_sg(sg
, nents
);
313 fc_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
314 enum dma_data_direction dir
)
317 dma_unmap_sg(dev
, sg
, nents
, dir
);
321 /* *********************** FC-NVME Port Management ************************ */
325 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport
*tgtport
)
327 struct nvmet_fc_ls_iod
*iod
;
330 iod
= kcalloc(NVMET_LS_CTX_COUNT
, sizeof(struct nvmet_fc_ls_iod
),
337 for (i
= 0; i
< NVMET_LS_CTX_COUNT
; iod
++, i
++) {
338 INIT_WORK(&iod
->work
, nvmet_fc_handle_ls_rqst_work
);
339 iod
->tgtport
= tgtport
;
340 list_add_tail(&iod
->ls_list
, &tgtport
->ls_list
);
342 iod
->rqstbuf
= kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE
,
347 iod
->rspbuf
= iod
->rqstbuf
+ NVME_FC_MAX_LS_BUFFER_SIZE
;
349 iod
->rspdma
= fc_dma_map_single(tgtport
->dev
, iod
->rspbuf
,
350 NVME_FC_MAX_LS_BUFFER_SIZE
,
352 if (fc_dma_mapping_error(tgtport
->dev
, iod
->rspdma
))
360 list_del(&iod
->ls_list
);
361 for (iod
--, i
--; i
>= 0; iod
--, i
--) {
362 fc_dma_unmap_single(tgtport
->dev
, iod
->rspdma
,
363 NVME_FC_MAX_LS_BUFFER_SIZE
, DMA_TO_DEVICE
);
365 list_del(&iod
->ls_list
);
374 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport
*tgtport
)
376 struct nvmet_fc_ls_iod
*iod
= tgtport
->iod
;
379 for (i
= 0; i
< NVMET_LS_CTX_COUNT
; iod
++, i
++) {
380 fc_dma_unmap_single(tgtport
->dev
,
381 iod
->rspdma
, NVME_FC_MAX_LS_BUFFER_SIZE
,
384 list_del(&iod
->ls_list
);
389 static struct nvmet_fc_ls_iod
*
390 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport
*tgtport
)
392 struct nvmet_fc_ls_iod
*iod
;
395 spin_lock_irqsave(&tgtport
->lock
, flags
);
396 iod
= list_first_entry_or_null(&tgtport
->ls_list
,
397 struct nvmet_fc_ls_iod
, ls_list
);
399 list_move_tail(&iod
->ls_list
, &tgtport
->ls_busylist
);
400 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
406 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport
*tgtport
,
407 struct nvmet_fc_ls_iod
*iod
)
411 spin_lock_irqsave(&tgtport
->lock
, flags
);
412 list_move(&iod
->ls_list
, &tgtport
->ls_list
);
413 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
417 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport
*tgtport
,
418 struct nvmet_fc_tgt_queue
*queue
)
420 struct nvmet_fc_fcp_iod
*fod
= queue
->fod
;
423 for (i
= 0; i
< queue
->sqsize
; fod
++, i
++) {
424 INIT_WORK(&fod
->defer_work
, nvmet_fc_fcp_rqst_op_defer_work
);
425 fod
->tgtport
= tgtport
;
429 fod
->aborted
= false;
431 list_add_tail(&fod
->fcp_list
, &queue
->fod_list
);
432 spin_lock_init(&fod
->flock
);
434 fod
->rspdma
= fc_dma_map_single(tgtport
->dev
, &fod
->rspiubuf
,
435 sizeof(fod
->rspiubuf
), DMA_TO_DEVICE
);
436 if (fc_dma_mapping_error(tgtport
->dev
, fod
->rspdma
)) {
437 list_del(&fod
->fcp_list
);
438 for (fod
--, i
--; i
>= 0; fod
--, i
--) {
439 fc_dma_unmap_single(tgtport
->dev
, fod
->rspdma
,
440 sizeof(fod
->rspiubuf
),
443 list_del(&fod
->fcp_list
);
452 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport
*tgtport
,
453 struct nvmet_fc_tgt_queue
*queue
)
455 struct nvmet_fc_fcp_iod
*fod
= queue
->fod
;
458 for (i
= 0; i
< queue
->sqsize
; fod
++, i
++) {
460 fc_dma_unmap_single(tgtport
->dev
, fod
->rspdma
,
461 sizeof(fod
->rspiubuf
), DMA_TO_DEVICE
);
465 static struct nvmet_fc_fcp_iod
*
466 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue
*queue
)
468 struct nvmet_fc_fcp_iod
*fod
;
470 lockdep_assert_held(&queue
->qlock
);
472 fod
= list_first_entry_or_null(&queue
->fod_list
,
473 struct nvmet_fc_fcp_iod
, fcp_list
);
475 list_del(&fod
->fcp_list
);
478 * no queue reference is taken, as it was taken by the
479 * queue lookup just prior to the allocation. The iod
480 * will "inherit" that reference.
488 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport
*tgtport
,
489 struct nvmet_fc_tgt_queue
*queue
,
490 struct nvmefc_tgt_fcp_req
*fcpreq
)
492 struct nvmet_fc_fcp_iod
*fod
= fcpreq
->nvmet_fc_private
;
495 * put all admin cmds on hw queue id 0. All io commands go to
496 * the respective hw queue based on a modulo basis
498 fcpreq
->hwqid
= queue
->qid
?
499 ((queue
->qid
- 1) % tgtport
->ops
->max_hw_queues
) : 0;
501 nvmet_fc_handle_fcp_rqst(tgtport
, fod
);
505 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct
*work
)
507 struct nvmet_fc_fcp_iod
*fod
=
508 container_of(work
, struct nvmet_fc_fcp_iod
, defer_work
);
510 /* Submit deferred IO for processing */
511 nvmet_fc_queue_fcp_req(fod
->tgtport
, fod
->queue
, fod
->fcpreq
);
516 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue
*queue
,
517 struct nvmet_fc_fcp_iod
*fod
)
519 struct nvmefc_tgt_fcp_req
*fcpreq
= fod
->fcpreq
;
520 struct nvmet_fc_tgtport
*tgtport
= fod
->tgtport
;
521 struct nvmet_fc_defer_fcp_req
*deferfcp
;
524 fc_dma_sync_single_for_cpu(tgtport
->dev
, fod
->rspdma
,
525 sizeof(fod
->rspiubuf
), DMA_TO_DEVICE
);
527 fcpreq
->nvmet_fc_private
= NULL
;
531 fod
->aborted
= false;
532 fod
->writedataactive
= false;
535 tgtport
->ops
->fcp_req_release(&tgtport
->fc_target_port
, fcpreq
);
537 /* release the queue lookup reference on the completed IO */
538 nvmet_fc_tgt_q_put(queue
);
540 spin_lock_irqsave(&queue
->qlock
, flags
);
541 deferfcp
= list_first_entry_or_null(&queue
->pending_cmd_list
,
542 struct nvmet_fc_defer_fcp_req
, req_list
);
544 list_add_tail(&fod
->fcp_list
, &fod
->queue
->fod_list
);
545 spin_unlock_irqrestore(&queue
->qlock
, flags
);
549 /* Re-use the fod for the next pending cmd that was deferred */
550 list_del(&deferfcp
->req_list
);
552 fcpreq
= deferfcp
->fcp_req
;
554 /* deferfcp can be reused for another IO at a later date */
555 list_add_tail(&deferfcp
->req_list
, &queue
->avail_defer_list
);
557 spin_unlock_irqrestore(&queue
->qlock
, flags
);
559 /* Save NVME CMD IO in fod */
560 memcpy(&fod
->cmdiubuf
, fcpreq
->rspaddr
, fcpreq
->rsplen
);
562 /* Setup new fcpreq to be processed */
563 fcpreq
->rspaddr
= NULL
;
565 fcpreq
->nvmet_fc_private
= fod
;
566 fod
->fcpreq
= fcpreq
;
569 /* inform LLDD IO is now being processed */
570 tgtport
->ops
->defer_rcv(&tgtport
->fc_target_port
, fcpreq
);
573 * Leave the queue lookup get reference taken when
574 * fod was originally allocated.
577 queue_work(queue
->work_q
, &fod
->defer_work
);
580 static struct nvmet_fc_tgt_queue
*
581 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc
*assoc
,
584 struct nvmet_fc_tgt_queue
*queue
;
588 if (qid
> NVMET_NR_QUEUES
)
591 queue
= kzalloc(struct_size(queue
, fod
, sqsize
), GFP_KERNEL
);
595 if (!nvmet_fc_tgt_a_get(assoc
))
598 queue
->work_q
= alloc_workqueue("ntfc%d.%d.%d", 0, 0,
599 assoc
->tgtport
->fc_target_port
.port_num
,
605 queue
->sqsize
= sqsize
;
606 queue
->assoc
= assoc
;
607 INIT_LIST_HEAD(&queue
->fod_list
);
608 INIT_LIST_HEAD(&queue
->avail_defer_list
);
609 INIT_LIST_HEAD(&queue
->pending_cmd_list
);
610 atomic_set(&queue
->connected
, 0);
611 atomic_set(&queue
->sqtail
, 0);
612 atomic_set(&queue
->rsn
, 1);
613 atomic_set(&queue
->zrspcnt
, 0);
614 spin_lock_init(&queue
->qlock
);
615 kref_init(&queue
->ref
);
617 nvmet_fc_prep_fcp_iodlist(assoc
->tgtport
, queue
);
619 ret
= nvmet_sq_init(&queue
->nvme_sq
);
621 goto out_fail_iodlist
;
623 WARN_ON(assoc
->queues
[qid
]);
624 spin_lock_irqsave(&assoc
->tgtport
->lock
, flags
);
625 assoc
->queues
[qid
] = queue
;
626 spin_unlock_irqrestore(&assoc
->tgtport
->lock
, flags
);
631 nvmet_fc_destroy_fcp_iodlist(assoc
->tgtport
, queue
);
632 destroy_workqueue(queue
->work_q
);
634 nvmet_fc_tgt_a_put(assoc
);
642 nvmet_fc_tgt_queue_free(struct kref
*ref
)
644 struct nvmet_fc_tgt_queue
*queue
=
645 container_of(ref
, struct nvmet_fc_tgt_queue
, ref
);
648 spin_lock_irqsave(&queue
->assoc
->tgtport
->lock
, flags
);
649 queue
->assoc
->queues
[queue
->qid
] = NULL
;
650 spin_unlock_irqrestore(&queue
->assoc
->tgtport
->lock
, flags
);
652 nvmet_fc_destroy_fcp_iodlist(queue
->assoc
->tgtport
, queue
);
654 nvmet_fc_tgt_a_put(queue
->assoc
);
656 destroy_workqueue(queue
->work_q
);
662 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue
*queue
)
664 kref_put(&queue
->ref
, nvmet_fc_tgt_queue_free
);
668 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue
*queue
)
670 return kref_get_unless_zero(&queue
->ref
);
675 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue
*queue
)
677 struct nvmet_fc_tgtport
*tgtport
= queue
->assoc
->tgtport
;
678 struct nvmet_fc_fcp_iod
*fod
= queue
->fod
;
679 struct nvmet_fc_defer_fcp_req
*deferfcp
, *tempptr
;
681 int i
, writedataactive
;
684 disconnect
= atomic_xchg(&queue
->connected
, 0);
686 spin_lock_irqsave(&queue
->qlock
, flags
);
687 /* about outstanding io's */
688 for (i
= 0; i
< queue
->sqsize
; fod
++, i
++) {
690 spin_lock(&fod
->flock
);
692 writedataactive
= fod
->writedataactive
;
693 spin_unlock(&fod
->flock
);
695 * only call lldd abort routine if waiting for
696 * writedata. other outstanding ops should finish
699 if (writedataactive
) {
700 spin_lock(&fod
->flock
);
702 spin_unlock(&fod
->flock
);
703 tgtport
->ops
->fcp_abort(
704 &tgtport
->fc_target_port
, fod
->fcpreq
);
709 /* Cleanup defer'ed IOs in queue */
710 list_for_each_entry_safe(deferfcp
, tempptr
, &queue
->avail_defer_list
,
712 list_del(&deferfcp
->req_list
);
717 deferfcp
= list_first_entry_or_null(&queue
->pending_cmd_list
,
718 struct nvmet_fc_defer_fcp_req
, req_list
);
722 list_del(&deferfcp
->req_list
);
723 spin_unlock_irqrestore(&queue
->qlock
, flags
);
725 tgtport
->ops
->defer_rcv(&tgtport
->fc_target_port
,
728 tgtport
->ops
->fcp_abort(&tgtport
->fc_target_port
,
731 tgtport
->ops
->fcp_req_release(&tgtport
->fc_target_port
,
734 /* release the queue lookup reference */
735 nvmet_fc_tgt_q_put(queue
);
739 spin_lock_irqsave(&queue
->qlock
, flags
);
741 spin_unlock_irqrestore(&queue
->qlock
, flags
);
743 flush_workqueue(queue
->work_q
);
746 nvmet_sq_destroy(&queue
->nvme_sq
);
748 nvmet_fc_tgt_q_put(queue
);
751 static struct nvmet_fc_tgt_queue
*
752 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport
*tgtport
,
755 struct nvmet_fc_tgt_assoc
*assoc
;
756 struct nvmet_fc_tgt_queue
*queue
;
757 u64 association_id
= nvmet_fc_getassociationid(connection_id
);
758 u16 qid
= nvmet_fc_getqueueid(connection_id
);
761 if (qid
> NVMET_NR_QUEUES
)
764 spin_lock_irqsave(&tgtport
->lock
, flags
);
765 list_for_each_entry(assoc
, &tgtport
->assoc_list
, a_list
) {
766 if (association_id
== assoc
->association_id
) {
767 queue
= assoc
->queues
[qid
];
769 (!atomic_read(&queue
->connected
) ||
770 !nvmet_fc_tgt_q_get(queue
)))
772 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
776 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
781 nvmet_fc_delete_assoc(struct work_struct
*work
)
783 struct nvmet_fc_tgt_assoc
*assoc
=
784 container_of(work
, struct nvmet_fc_tgt_assoc
, del_work
);
786 nvmet_fc_delete_target_assoc(assoc
);
787 nvmet_fc_tgt_a_put(assoc
);
790 static struct nvmet_fc_tgt_assoc
*
791 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport
*tgtport
)
793 struct nvmet_fc_tgt_assoc
*assoc
, *tmpassoc
;
797 bool needrandom
= true;
799 assoc
= kzalloc(sizeof(*assoc
), GFP_KERNEL
);
803 idx
= ida_simple_get(&tgtport
->assoc_cnt
, 0, 0, GFP_KERNEL
);
807 if (!nvmet_fc_tgtport_get(tgtport
))
810 assoc
->tgtport
= tgtport
;
812 INIT_LIST_HEAD(&assoc
->a_list
);
813 kref_init(&assoc
->ref
);
814 INIT_WORK(&assoc
->del_work
, nvmet_fc_delete_assoc
);
817 get_random_bytes(&ran
, sizeof(ran
) - BYTES_FOR_QID
);
818 ran
= ran
<< BYTES_FOR_QID_SHIFT
;
820 spin_lock_irqsave(&tgtport
->lock
, flags
);
822 list_for_each_entry(tmpassoc
, &tgtport
->assoc_list
, a_list
)
823 if (ran
== tmpassoc
->association_id
) {
828 assoc
->association_id
= ran
;
829 list_add_tail(&assoc
->a_list
, &tgtport
->assoc_list
);
831 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
837 ida_simple_remove(&tgtport
->assoc_cnt
, idx
);
844 nvmet_fc_target_assoc_free(struct kref
*ref
)
846 struct nvmet_fc_tgt_assoc
*assoc
=
847 container_of(ref
, struct nvmet_fc_tgt_assoc
, ref
);
848 struct nvmet_fc_tgtport
*tgtport
= assoc
->tgtport
;
851 spin_lock_irqsave(&tgtport
->lock
, flags
);
852 list_del(&assoc
->a_list
);
853 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
854 ida_simple_remove(&tgtport
->assoc_cnt
, assoc
->a_id
);
856 nvmet_fc_tgtport_put(tgtport
);
860 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc
*assoc
)
862 kref_put(&assoc
->ref
, nvmet_fc_target_assoc_free
);
866 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc
*assoc
)
868 return kref_get_unless_zero(&assoc
->ref
);
872 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc
*assoc
)
874 struct nvmet_fc_tgtport
*tgtport
= assoc
->tgtport
;
875 struct nvmet_fc_tgt_queue
*queue
;
879 spin_lock_irqsave(&tgtport
->lock
, flags
);
880 for (i
= NVMET_NR_QUEUES
; i
>= 0; i
--) {
881 queue
= assoc
->queues
[i
];
883 if (!nvmet_fc_tgt_q_get(queue
))
885 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
886 nvmet_fc_delete_target_queue(queue
);
887 nvmet_fc_tgt_q_put(queue
);
888 spin_lock_irqsave(&tgtport
->lock
, flags
);
891 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
893 nvmet_fc_tgt_a_put(assoc
);
896 static struct nvmet_fc_tgt_assoc
*
897 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport
*tgtport
,
900 struct nvmet_fc_tgt_assoc
*assoc
;
901 struct nvmet_fc_tgt_assoc
*ret
= NULL
;
904 spin_lock_irqsave(&tgtport
->lock
, flags
);
905 list_for_each_entry(assoc
, &tgtport
->assoc_list
, a_list
) {
906 if (association_id
== assoc
->association_id
) {
908 nvmet_fc_tgt_a_get(assoc
);
912 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
918 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport
*tgtport
,
919 struct nvmet_fc_port_entry
*pe
,
920 struct nvmet_port
*port
)
922 lockdep_assert_held(&nvmet_fc_tgtlock
);
924 pe
->tgtport
= tgtport
;
930 pe
->node_name
= tgtport
->fc_target_port
.node_name
;
931 pe
->port_name
= tgtport
->fc_target_port
.port_name
;
932 INIT_LIST_HEAD(&pe
->pe_list
);
934 list_add_tail(&pe
->pe_list
, &nvmet_fc_portentry_list
);
938 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry
*pe
)
942 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
944 pe
->tgtport
->pe
= NULL
;
945 list_del(&pe
->pe_list
);
946 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
950 * called when a targetport deregisters. Breaks the relationship
951 * with the nvmet port, but leaves the port_entry in place so that
952 * re-registration can resume operation.
955 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport
*tgtport
)
957 struct nvmet_fc_port_entry
*pe
;
960 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
965 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
969 * called when a new targetport is registered. Looks in the
970 * existing nvmet port_entries to see if the nvmet layer is
971 * configured for the targetport's wwn's. (the targetport existed,
972 * nvmet configured, the lldd unregistered the tgtport, and is now
973 * reregistering the same targetport). If so, set the nvmet port
974 * port entry on the targetport.
977 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport
*tgtport
)
979 struct nvmet_fc_port_entry
*pe
;
982 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
983 list_for_each_entry(pe
, &nvmet_fc_portentry_list
, pe_list
) {
984 if (tgtport
->fc_target_port
.node_name
== pe
->node_name
&&
985 tgtport
->fc_target_port
.port_name
== pe
->port_name
) {
986 WARN_ON(pe
->tgtport
);
988 pe
->tgtport
= tgtport
;
992 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
996 * nvme_fc_register_targetport - transport entry point called by an
997 * LLDD to register the existence of a local
998 * NVME subystem FC port.
999 * @pinfo: pointer to information about the port to be registered
1000 * @template: LLDD entrypoints and operational parameters for the port
1001 * @dev: physical hardware device node port corresponds to. Will be
1002 * used for DMA mappings
1003 * @portptr: pointer to a local port pointer. Upon success, the routine
1004 * will allocate a nvme_fc_local_port structure and place its
1005 * address in the local port pointer. Upon failure, local port
1006 * pointer will be set to NULL.
1009 * a completion status. Must be 0 upon success; a negative errno
1010 * (ex: -ENXIO) upon failure.
1013 nvmet_fc_register_targetport(struct nvmet_fc_port_info
*pinfo
,
1014 struct nvmet_fc_target_template
*template,
1016 struct nvmet_fc_target_port
**portptr
)
1018 struct nvmet_fc_tgtport
*newrec
;
1019 unsigned long flags
;
1022 if (!template->xmt_ls_rsp
|| !template->fcp_op
||
1023 !template->fcp_abort
||
1024 !template->fcp_req_release
|| !template->targetport_delete
||
1025 !template->max_hw_queues
|| !template->max_sgl_segments
||
1026 !template->max_dif_sgl_segments
|| !template->dma_boundary
) {
1028 goto out_regtgt_failed
;
1031 newrec
= kzalloc((sizeof(*newrec
) + template->target_priv_sz
),
1035 goto out_regtgt_failed
;
1038 idx
= ida_simple_get(&nvmet_fc_tgtport_cnt
, 0, 0, GFP_KERNEL
);
1041 goto out_fail_kfree
;
1044 if (!get_device(dev
) && dev
) {
1049 newrec
->fc_target_port
.node_name
= pinfo
->node_name
;
1050 newrec
->fc_target_port
.port_name
= pinfo
->port_name
;
1051 newrec
->fc_target_port
.private = &newrec
[1];
1052 newrec
->fc_target_port
.port_id
= pinfo
->port_id
;
1053 newrec
->fc_target_port
.port_num
= idx
;
1054 INIT_LIST_HEAD(&newrec
->tgt_list
);
1056 newrec
->ops
= template;
1057 spin_lock_init(&newrec
->lock
);
1058 INIT_LIST_HEAD(&newrec
->ls_list
);
1059 INIT_LIST_HEAD(&newrec
->ls_busylist
);
1060 INIT_LIST_HEAD(&newrec
->assoc_list
);
1061 kref_init(&newrec
->ref
);
1062 ida_init(&newrec
->assoc_cnt
);
1063 newrec
->max_sg_cnt
= template->max_sgl_segments
;
1065 ret
= nvmet_fc_alloc_ls_iodlist(newrec
);
1068 goto out_free_newrec
;
1071 nvmet_fc_portentry_rebind_tgt(newrec
);
1073 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
1074 list_add_tail(&newrec
->tgt_list
, &nvmet_fc_target_list
);
1075 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
1077 *portptr
= &newrec
->fc_target_port
;
1083 ida_simple_remove(&nvmet_fc_tgtport_cnt
, idx
);
1090 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport
);
1094 nvmet_fc_free_tgtport(struct kref
*ref
)
1096 struct nvmet_fc_tgtport
*tgtport
=
1097 container_of(ref
, struct nvmet_fc_tgtport
, ref
);
1098 struct device
*dev
= tgtport
->dev
;
1099 unsigned long flags
;
1101 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
1102 list_del(&tgtport
->tgt_list
);
1103 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
1105 nvmet_fc_free_ls_iodlist(tgtport
);
1107 /* let the LLDD know we've finished tearing it down */
1108 tgtport
->ops
->targetport_delete(&tgtport
->fc_target_port
);
1110 ida_simple_remove(&nvmet_fc_tgtport_cnt
,
1111 tgtport
->fc_target_port
.port_num
);
1113 ida_destroy(&tgtport
->assoc_cnt
);
1121 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport
*tgtport
)
1123 kref_put(&tgtport
->ref
, nvmet_fc_free_tgtport
);
1127 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport
*tgtport
)
1129 return kref_get_unless_zero(&tgtport
->ref
);
1133 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport
*tgtport
)
1135 struct nvmet_fc_tgt_assoc
*assoc
, *next
;
1136 unsigned long flags
;
1138 spin_lock_irqsave(&tgtport
->lock
, flags
);
1139 list_for_each_entry_safe(assoc
, next
,
1140 &tgtport
->assoc_list
, a_list
) {
1141 if (!nvmet_fc_tgt_a_get(assoc
))
1143 if (!schedule_work(&assoc
->del_work
))
1144 nvmet_fc_tgt_a_put(assoc
);
1146 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
1150 * nvmet layer has called to terminate an association
1153 nvmet_fc_delete_ctrl(struct nvmet_ctrl
*ctrl
)
1155 struct nvmet_fc_tgtport
*tgtport
, *next
;
1156 struct nvmet_fc_tgt_assoc
*assoc
;
1157 struct nvmet_fc_tgt_queue
*queue
;
1158 unsigned long flags
;
1159 bool found_ctrl
= false;
1161 /* this is a bit ugly, but don't want to make locks layered */
1162 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
1163 list_for_each_entry_safe(tgtport
, next
, &nvmet_fc_target_list
,
1165 if (!nvmet_fc_tgtport_get(tgtport
))
1167 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
1169 spin_lock_irqsave(&tgtport
->lock
, flags
);
1170 list_for_each_entry(assoc
, &tgtport
->assoc_list
, a_list
) {
1171 queue
= assoc
->queues
[0];
1172 if (queue
&& queue
->nvme_sq
.ctrl
== ctrl
) {
1173 if (nvmet_fc_tgt_a_get(assoc
))
1178 spin_unlock_irqrestore(&tgtport
->lock
, flags
);
1180 nvmet_fc_tgtport_put(tgtport
);
1183 if (!schedule_work(&assoc
->del_work
))
1184 nvmet_fc_tgt_a_put(assoc
);
1188 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
1190 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
1194 * nvme_fc_unregister_targetport - transport entry point called by an
1195 * LLDD to deregister/remove a previously
1196 * registered a local NVME subsystem FC port.
1197 * @target_port: pointer to the (registered) target port that is to be
1201 * a completion status. Must be 0 upon success; a negative errno
1202 * (ex: -ENXIO) upon failure.
1205 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port
*target_port
)
1207 struct nvmet_fc_tgtport
*tgtport
= targetport_to_tgtport(target_port
);
1209 nvmet_fc_portentry_unbind_tgt(tgtport
);
1211 /* terminate any outstanding associations */
1212 __nvmet_fc_free_assocs(tgtport
);
1214 nvmet_fc_tgtport_put(tgtport
);
1218 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport
);
1221 /* *********************** FC-NVME LS Handling **************************** */
1225 nvmet_fc_format_rsp_hdr(void *buf
, u8 ls_cmd
, __be32 desc_len
, u8 rqst_ls_cmd
)
1227 struct fcnvme_ls_acc_hdr
*acc
= buf
;
1229 acc
->w0
.ls_cmd
= ls_cmd
;
1230 acc
->desc_list_len
= desc_len
;
1231 acc
->rqst
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_RQST
);
1232 acc
->rqst
.desc_len
=
1233 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst
));
1234 acc
->rqst
.w0
.ls_cmd
= rqst_ls_cmd
;
1238 nvmet_fc_format_rjt(void *buf
, u16 buflen
, u8 ls_cmd
,
1239 u8 reason
, u8 explanation
, u8 vendor
)
1241 struct fcnvme_ls_rjt
*rjt
= buf
;
1243 nvmet_fc_format_rsp_hdr(buf
, FCNVME_LSDESC_RQST
,
1244 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt
)),
1246 rjt
->rjt
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_RJT
);
1247 rjt
->rjt
.desc_len
= fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt
));
1248 rjt
->rjt
.reason_code
= reason
;
1249 rjt
->rjt
.reason_explanation
= explanation
;
1250 rjt
->rjt
.vendor
= vendor
;
1252 return sizeof(struct fcnvme_ls_rjt
);
1255 /* Validation Error indexes into the string table below */
1258 VERR_CR_ASSOC_LEN
= 1,
1259 VERR_CR_ASSOC_RQST_LEN
= 2,
1260 VERR_CR_ASSOC_CMD
= 3,
1261 VERR_CR_ASSOC_CMD_LEN
= 4,
1262 VERR_ERSP_RATIO
= 5,
1263 VERR_ASSOC_ALLOC_FAIL
= 6,
1264 VERR_QUEUE_ALLOC_FAIL
= 7,
1265 VERR_CR_CONN_LEN
= 8,
1266 VERR_CR_CONN_RQST_LEN
= 9,
1268 VERR_ASSOC_ID_LEN
= 11,
1271 VERR_CONN_ID_LEN
= 14,
1273 VERR_CR_CONN_CMD
= 16,
1274 VERR_CR_CONN_CMD_LEN
= 17,
1275 VERR_DISCONN_LEN
= 18,
1276 VERR_DISCONN_RQST_LEN
= 19,
1277 VERR_DISCONN_CMD
= 20,
1278 VERR_DISCONN_CMD_LEN
= 21,
1279 VERR_DISCONN_SCOPE
= 22,
1281 VERR_RS_RQST_LEN
= 24,
1283 VERR_RS_CMD_LEN
= 26,
1288 static char *validation_errors
[] = {
1290 "Bad CR_ASSOC Length",
1291 "Bad CR_ASSOC Rqst Length",
1293 "Bad CR_ASSOC Cmd Length",
1295 "Association Allocation Failed",
1296 "Queue Allocation Failed",
1297 "Bad CR_CONN Length",
1298 "Bad CR_CONN Rqst Length",
1299 "Not Association ID",
1300 "Bad Association ID Length",
1302 "Not Connection ID",
1303 "Bad Connection ID Length",
1306 "Bad CR_CONN Cmd Length",
1307 "Bad DISCONN Length",
1308 "Bad DISCONN Rqst Length",
1310 "Bad DISCONN Cmd Length",
1311 "Bad Disconnect Scope",
1313 "Bad RS Rqst Length",
1315 "Bad RS Cmd Length",
1317 "Bad RS Relative Offset",
1321 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport
*tgtport
,
1322 struct nvmet_fc_ls_iod
*iod
)
1324 struct fcnvme_ls_cr_assoc_rqst
*rqst
=
1325 (struct fcnvme_ls_cr_assoc_rqst
*)iod
->rqstbuf
;
1326 struct fcnvme_ls_cr_assoc_acc
*acc
=
1327 (struct fcnvme_ls_cr_assoc_acc
*)iod
->rspbuf
;
1328 struct nvmet_fc_tgt_queue
*queue
;
1331 memset(acc
, 0, sizeof(*acc
));
1334 * FC-NVME spec changes. There are initiators sending different
1335 * lengths as padding sizes for Create Association Cmd descriptor
1337 * Accept anything of "minimum" length. Assume format per 1.15
1338 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1339 * trailing pad length is.
1341 if (iod
->rqstdatalen
< FCNVME_LSDESC_CRA_RQST_MINLEN
)
1342 ret
= VERR_CR_ASSOC_LEN
;
1343 else if (be32_to_cpu(rqst
->desc_list_len
) <
1344 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN
)
1345 ret
= VERR_CR_ASSOC_RQST_LEN
;
1346 else if (rqst
->assoc_cmd
.desc_tag
!=
1347 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD
))
1348 ret
= VERR_CR_ASSOC_CMD
;
1349 else if (be32_to_cpu(rqst
->assoc_cmd
.desc_len
) <
1350 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN
)
1351 ret
= VERR_CR_ASSOC_CMD_LEN
;
1352 else if (!rqst
->assoc_cmd
.ersp_ratio
||
1353 (be16_to_cpu(rqst
->assoc_cmd
.ersp_ratio
) >=
1354 be16_to_cpu(rqst
->assoc_cmd
.sqsize
)))
1355 ret
= VERR_ERSP_RATIO
;
1358 /* new association w/ admin queue */
1359 iod
->assoc
= nvmet_fc_alloc_target_assoc(tgtport
);
1361 ret
= VERR_ASSOC_ALLOC_FAIL
;
1363 queue
= nvmet_fc_alloc_target_queue(iod
->assoc
, 0,
1364 be16_to_cpu(rqst
->assoc_cmd
.sqsize
));
1366 ret
= VERR_QUEUE_ALLOC_FAIL
;
1371 dev_err(tgtport
->dev
,
1372 "Create Association LS failed: %s\n",
1373 validation_errors
[ret
]);
1374 iod
->lsreq
->rsplen
= nvmet_fc_format_rjt(acc
,
1375 NVME_FC_MAX_LS_BUFFER_SIZE
, rqst
->w0
.ls_cmd
,
1376 FCNVME_RJT_RC_LOGIC
,
1377 FCNVME_RJT_EXP_NONE
, 0);
1381 queue
->ersp_ratio
= be16_to_cpu(rqst
->assoc_cmd
.ersp_ratio
);
1382 atomic_set(&queue
->connected
, 1);
1383 queue
->sqhd
= 0; /* best place to init value */
1385 /* format a response */
1387 iod
->lsreq
->rsplen
= sizeof(*acc
);
1389 nvmet_fc_format_rsp_hdr(acc
, FCNVME_LS_ACC
,
1391 sizeof(struct fcnvme_ls_cr_assoc_acc
)),
1392 FCNVME_LS_CREATE_ASSOCIATION
);
1393 acc
->associd
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
);
1394 acc
->associd
.desc_len
=
1396 sizeof(struct fcnvme_lsdesc_assoc_id
));
1397 acc
->associd
.association_id
=
1398 cpu_to_be64(nvmet_fc_makeconnid(iod
->assoc
, 0));
1399 acc
->connectid
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_CONN_ID
);
1400 acc
->connectid
.desc_len
=
1402 sizeof(struct fcnvme_lsdesc_conn_id
));
1403 acc
->connectid
.connection_id
= acc
->associd
.association_id
;
1407 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport
*tgtport
,
1408 struct nvmet_fc_ls_iod
*iod
)
1410 struct fcnvme_ls_cr_conn_rqst
*rqst
=
1411 (struct fcnvme_ls_cr_conn_rqst
*)iod
->rqstbuf
;
1412 struct fcnvme_ls_cr_conn_acc
*acc
=
1413 (struct fcnvme_ls_cr_conn_acc
*)iod
->rspbuf
;
1414 struct nvmet_fc_tgt_queue
*queue
;
1417 memset(acc
, 0, sizeof(*acc
));
1419 if (iod
->rqstdatalen
< sizeof(struct fcnvme_ls_cr_conn_rqst
))
1420 ret
= VERR_CR_CONN_LEN
;
1421 else if (rqst
->desc_list_len
!=
1423 sizeof(struct fcnvme_ls_cr_conn_rqst
)))
1424 ret
= VERR_CR_CONN_RQST_LEN
;
1425 else if (rqst
->associd
.desc_tag
!= cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
))
1426 ret
= VERR_ASSOC_ID
;
1427 else if (rqst
->associd
.desc_len
!=
1429 sizeof(struct fcnvme_lsdesc_assoc_id
)))
1430 ret
= VERR_ASSOC_ID_LEN
;
1431 else if (rqst
->connect_cmd
.desc_tag
!=
1432 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD
))
1433 ret
= VERR_CR_CONN_CMD
;
1434 else if (rqst
->connect_cmd
.desc_len
!=
1436 sizeof(struct fcnvme_lsdesc_cr_conn_cmd
)))
1437 ret
= VERR_CR_CONN_CMD_LEN
;
1438 else if (!rqst
->connect_cmd
.ersp_ratio
||
1439 (be16_to_cpu(rqst
->connect_cmd
.ersp_ratio
) >=
1440 be16_to_cpu(rqst
->connect_cmd
.sqsize
)))
1441 ret
= VERR_ERSP_RATIO
;
1445 iod
->assoc
= nvmet_fc_find_target_assoc(tgtport
,
1446 be64_to_cpu(rqst
->associd
.association_id
));
1448 ret
= VERR_NO_ASSOC
;
1450 queue
= nvmet_fc_alloc_target_queue(iod
->assoc
,
1451 be16_to_cpu(rqst
->connect_cmd
.qid
),
1452 be16_to_cpu(rqst
->connect_cmd
.sqsize
));
1454 ret
= VERR_QUEUE_ALLOC_FAIL
;
1456 /* release get taken in nvmet_fc_find_target_assoc */
1457 nvmet_fc_tgt_a_put(iod
->assoc
);
1462 dev_err(tgtport
->dev
,
1463 "Create Connection LS failed: %s\n",
1464 validation_errors
[ret
]);
1465 iod
->lsreq
->rsplen
= nvmet_fc_format_rjt(acc
,
1466 NVME_FC_MAX_LS_BUFFER_SIZE
, rqst
->w0
.ls_cmd
,
1467 (ret
== VERR_NO_ASSOC
) ?
1468 FCNVME_RJT_RC_INV_ASSOC
:
1469 FCNVME_RJT_RC_LOGIC
,
1470 FCNVME_RJT_EXP_NONE
, 0);
1474 queue
->ersp_ratio
= be16_to_cpu(rqst
->connect_cmd
.ersp_ratio
);
1475 atomic_set(&queue
->connected
, 1);
1476 queue
->sqhd
= 0; /* best place to init value */
1478 /* format a response */
1480 iod
->lsreq
->rsplen
= sizeof(*acc
);
1482 nvmet_fc_format_rsp_hdr(acc
, FCNVME_LS_ACC
,
1483 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc
)),
1484 FCNVME_LS_CREATE_CONNECTION
);
1485 acc
->connectid
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_CONN_ID
);
1486 acc
->connectid
.desc_len
=
1488 sizeof(struct fcnvme_lsdesc_conn_id
));
1489 acc
->connectid
.connection_id
=
1490 cpu_to_be64(nvmet_fc_makeconnid(iod
->assoc
,
1491 be16_to_cpu(rqst
->connect_cmd
.qid
)));
1495 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport
*tgtport
,
1496 struct nvmet_fc_ls_iod
*iod
)
1498 struct fcnvme_ls_disconnect_assoc_rqst
*rqst
=
1499 (struct fcnvme_ls_disconnect_assoc_rqst
*)iod
->rqstbuf
;
1500 struct fcnvme_ls_disconnect_assoc_acc
*acc
=
1501 (struct fcnvme_ls_disconnect_assoc_acc
*)iod
->rspbuf
;
1502 struct nvmet_fc_tgt_assoc
*assoc
;
1505 memset(acc
, 0, sizeof(*acc
));
1507 if (iod
->rqstdatalen
< sizeof(struct fcnvme_ls_disconnect_assoc_rqst
))
1508 ret
= VERR_DISCONN_LEN
;
1509 else if (rqst
->desc_list_len
!=
1511 sizeof(struct fcnvme_ls_disconnect_assoc_rqst
)))
1512 ret
= VERR_DISCONN_RQST_LEN
;
1513 else if (rqst
->associd
.desc_tag
!= cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
))
1514 ret
= VERR_ASSOC_ID
;
1515 else if (rqst
->associd
.desc_len
!=
1517 sizeof(struct fcnvme_lsdesc_assoc_id
)))
1518 ret
= VERR_ASSOC_ID_LEN
;
1519 else if (rqst
->discon_cmd
.desc_tag
!=
1520 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD
))
1521 ret
= VERR_DISCONN_CMD
;
1522 else if (rqst
->discon_cmd
.desc_len
!=
1524 sizeof(struct fcnvme_lsdesc_disconn_cmd
)))
1525 ret
= VERR_DISCONN_CMD_LEN
;
1527 * As the standard changed on the LS, check if old format and scope
1528 * something other than Association (e.g. 0).
1530 else if (rqst
->discon_cmd
.rsvd8
[0])
1531 ret
= VERR_DISCONN_SCOPE
;
1533 /* match an active association */
1534 assoc
= nvmet_fc_find_target_assoc(tgtport
,
1535 be64_to_cpu(rqst
->associd
.association_id
));
1538 ret
= VERR_NO_ASSOC
;
1542 dev_err(tgtport
->dev
,
1543 "Disconnect LS failed: %s\n",
1544 validation_errors
[ret
]);
1545 iod
->lsreq
->rsplen
= nvmet_fc_format_rjt(acc
,
1546 NVME_FC_MAX_LS_BUFFER_SIZE
, rqst
->w0
.ls_cmd
,
1547 (ret
== VERR_NO_ASSOC
) ?
1548 FCNVME_RJT_RC_INV_ASSOC
:
1549 (ret
== VERR_NO_CONN
) ?
1550 FCNVME_RJT_RC_INV_CONN
:
1551 FCNVME_RJT_RC_LOGIC
,
1552 FCNVME_RJT_EXP_NONE
, 0);
1556 /* format a response */
1558 iod
->lsreq
->rsplen
= sizeof(*acc
);
1560 nvmet_fc_format_rsp_hdr(acc
, FCNVME_LS_ACC
,
1562 sizeof(struct fcnvme_ls_disconnect_assoc_acc
)),
1563 FCNVME_LS_DISCONNECT_ASSOC
);
1565 /* release get taken in nvmet_fc_find_target_assoc */
1566 nvmet_fc_tgt_a_put(iod
->assoc
);
1568 nvmet_fc_delete_target_assoc(iod
->assoc
);
1572 /* *********************** NVME Ctrl Routines **************************** */
1575 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req
*nvme_req
);
1577 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops
;
1580 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req
*lsreq
)
1582 struct nvmet_fc_ls_iod
*iod
= lsreq
->nvmet_fc_private
;
1583 struct nvmet_fc_tgtport
*tgtport
= iod
->tgtport
;
1585 fc_dma_sync_single_for_cpu(tgtport
->dev
, iod
->rspdma
,
1586 NVME_FC_MAX_LS_BUFFER_SIZE
, DMA_TO_DEVICE
);
1587 nvmet_fc_free_ls_iod(tgtport
, iod
);
1588 nvmet_fc_tgtport_put(tgtport
);
1592 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport
*tgtport
,
1593 struct nvmet_fc_ls_iod
*iod
)
1597 fc_dma_sync_single_for_device(tgtport
->dev
, iod
->rspdma
,
1598 NVME_FC_MAX_LS_BUFFER_SIZE
, DMA_TO_DEVICE
);
1600 ret
= tgtport
->ops
->xmt_ls_rsp(&tgtport
->fc_target_port
, iod
->lsreq
);
1602 nvmet_fc_xmt_ls_rsp_done(iod
->lsreq
);
1606 * Actual processing routine for received FC-NVME LS Requests from the LLD
1609 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport
*tgtport
,
1610 struct nvmet_fc_ls_iod
*iod
)
1612 struct fcnvme_ls_rqst_w0
*w0
=
1613 (struct fcnvme_ls_rqst_w0
*)iod
->rqstbuf
;
1615 iod
->lsreq
->nvmet_fc_private
= iod
;
1616 iod
->lsreq
->rspbuf
= iod
->rspbuf
;
1617 iod
->lsreq
->rspdma
= iod
->rspdma
;
1618 iod
->lsreq
->done
= nvmet_fc_xmt_ls_rsp_done
;
1619 /* Be preventative. handlers will later set to valid length */
1620 iod
->lsreq
->rsplen
= 0;
1626 * parse request input, execute the request, and format the
1629 switch (w0
->ls_cmd
) {
1630 case FCNVME_LS_CREATE_ASSOCIATION
:
1631 /* Creates Association and initial Admin Queue/Connection */
1632 nvmet_fc_ls_create_association(tgtport
, iod
);
1634 case FCNVME_LS_CREATE_CONNECTION
:
1635 /* Creates an IO Queue/Connection */
1636 nvmet_fc_ls_create_connection(tgtport
, iod
);
1638 case FCNVME_LS_DISCONNECT_ASSOC
:
1639 /* Terminate a Queue/Connection or the Association */
1640 nvmet_fc_ls_disconnect(tgtport
, iod
);
1643 iod
->lsreq
->rsplen
= nvmet_fc_format_rjt(iod
->rspbuf
,
1644 NVME_FC_MAX_LS_BUFFER_SIZE
, w0
->ls_cmd
,
1645 FCNVME_RJT_RC_INVAL
, FCNVME_RJT_EXP_NONE
, 0);
1648 nvmet_fc_xmt_ls_rsp(tgtport
, iod
);
1652 * Actual processing routine for received FC-NVME LS Requests from the LLD
1655 nvmet_fc_handle_ls_rqst_work(struct work_struct
*work
)
1657 struct nvmet_fc_ls_iod
*iod
=
1658 container_of(work
, struct nvmet_fc_ls_iod
, work
);
1659 struct nvmet_fc_tgtport
*tgtport
= iod
->tgtport
;
1661 nvmet_fc_handle_ls_rqst(tgtport
, iod
);
1666 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1667 * upon the reception of a NVME LS request.
1669 * The nvmet-fc layer will copy payload to an internal structure for
1670 * processing. As such, upon completion of the routine, the LLDD may
1671 * immediately free/reuse the LS request buffer passed in the call.
1673 * If this routine returns error, the LLDD should abort the exchange.
1675 * @target_port: pointer to the (registered) target port the LS was
1677 * @lsreq: pointer to a lsreq request structure to be used to reference
1678 * the exchange corresponding to the LS.
1679 * @lsreqbuf: pointer to the buffer containing the LS Request
1680 * @lsreqbuf_len: length, in bytes, of the received LS request
1683 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port
*target_port
,
1684 struct nvmefc_tgt_ls_req
*lsreq
,
1685 void *lsreqbuf
, u32 lsreqbuf_len
)
1687 struct nvmet_fc_tgtport
*tgtport
= targetport_to_tgtport(target_port
);
1688 struct nvmet_fc_ls_iod
*iod
;
1690 if (lsreqbuf_len
> NVME_FC_MAX_LS_BUFFER_SIZE
)
1693 if (!nvmet_fc_tgtport_get(tgtport
))
1696 iod
= nvmet_fc_alloc_ls_iod(tgtport
);
1698 nvmet_fc_tgtport_put(tgtport
);
1704 memcpy(iod
->rqstbuf
, lsreqbuf
, lsreqbuf_len
);
1705 iod
->rqstdatalen
= lsreqbuf_len
;
1707 schedule_work(&iod
->work
);
1711 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req
);
1715 * **********************
1716 * Start of FCP handling
1717 * **********************
1721 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod
*fod
)
1723 struct scatterlist
*sg
;
1726 sg
= sgl_alloc(fod
->req
.transfer_len
, GFP_KERNEL
, &nent
);
1731 fod
->data_sg_cnt
= nent
;
1732 fod
->data_sg_cnt
= fc_dma_map_sg(fod
->tgtport
->dev
, sg
, nent
,
1733 ((fod
->io_dir
== NVMET_FCP_WRITE
) ?
1734 DMA_FROM_DEVICE
: DMA_TO_DEVICE
));
1735 /* note: write from initiator perspective */
1736 fod
->next_sg
= fod
->data_sg
;
1741 return NVME_SC_INTERNAL
;
1745 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod
*fod
)
1747 if (!fod
->data_sg
|| !fod
->data_sg_cnt
)
1750 fc_dma_unmap_sg(fod
->tgtport
->dev
, fod
->data_sg
, fod
->data_sg_cnt
,
1751 ((fod
->io_dir
== NVMET_FCP_WRITE
) ?
1752 DMA_FROM_DEVICE
: DMA_TO_DEVICE
));
1753 sgl_free(fod
->data_sg
);
1754 fod
->data_sg
= NULL
;
1755 fod
->data_sg_cnt
= 0;
1760 queue_90percent_full(struct nvmet_fc_tgt_queue
*q
, u32 sqhd
)
1764 /* egad, this is ugly. And sqtail is just a best guess */
1765 sqtail
= atomic_read(&q
->sqtail
) % q
->sqsize
;
1767 used
= (sqtail
< sqhd
) ? (sqtail
+ q
->sqsize
- sqhd
) : (sqtail
- sqhd
);
1768 return ((used
* 10) >= (((u32
)(q
->sqsize
- 1) * 9)));
1773 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1776 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport
*tgtport
,
1777 struct nvmet_fc_fcp_iod
*fod
)
1779 struct nvme_fc_ersp_iu
*ersp
= &fod
->rspiubuf
;
1780 struct nvme_common_command
*sqe
= &fod
->cmdiubuf
.sqe
.common
;
1781 struct nvme_completion
*cqe
= &ersp
->cqe
;
1782 u32
*cqewd
= (u32
*)cqe
;
1783 bool send_ersp
= false;
1784 u32 rsn
, rspcnt
, xfr_length
;
1786 if (fod
->fcpreq
->op
== NVMET_FCOP_READDATA_RSP
)
1787 xfr_length
= fod
->req
.transfer_len
;
1789 xfr_length
= fod
->offset
;
1792 * check to see if we can send a 0's rsp.
1793 * Note: to send a 0's response, the NVME-FC host transport will
1794 * recreate the CQE. The host transport knows: sq id, SQHD (last
1795 * seen in an ersp), and command_id. Thus it will create a
1796 * zero-filled CQE with those known fields filled in. Transport
1797 * must send an ersp for any condition where the cqe won't match
1800 * Here are the FC-NVME mandated cases where we must send an ersp:
1801 * every N responses, where N=ersp_ratio
1802 * force fabric commands to send ersp's (not in FC-NVME but good
1804 * normal cmds: any time status is non-zero, or status is zero
1805 * but words 0 or 1 are non-zero.
1806 * the SQ is 90% or more full
1807 * the cmd is a fused command
1808 * transferred data length not equal to cmd iu length
1810 rspcnt
= atomic_inc_return(&fod
->queue
->zrspcnt
);
1811 if (!(rspcnt
% fod
->queue
->ersp_ratio
) ||
1812 nvme_is_fabrics((struct nvme_command
*) sqe
) ||
1813 xfr_length
!= fod
->req
.transfer_len
||
1814 (le16_to_cpu(cqe
->status
) & 0xFFFE) || cqewd
[0] || cqewd
[1] ||
1815 (sqe
->flags
& (NVME_CMD_FUSE_FIRST
| NVME_CMD_FUSE_SECOND
)) ||
1816 queue_90percent_full(fod
->queue
, le16_to_cpu(cqe
->sq_head
)))
1819 /* re-set the fields */
1820 fod
->fcpreq
->rspaddr
= ersp
;
1821 fod
->fcpreq
->rspdma
= fod
->rspdma
;
1824 memset(ersp
, 0, NVME_FC_SIZEOF_ZEROS_RSP
);
1825 fod
->fcpreq
->rsplen
= NVME_FC_SIZEOF_ZEROS_RSP
;
1827 ersp
->iu_len
= cpu_to_be16(sizeof(*ersp
)/sizeof(u32
));
1828 rsn
= atomic_inc_return(&fod
->queue
->rsn
);
1829 ersp
->rsn
= cpu_to_be32(rsn
);
1830 ersp
->xfrd_len
= cpu_to_be32(xfr_length
);
1831 fod
->fcpreq
->rsplen
= sizeof(*ersp
);
1834 fc_dma_sync_single_for_device(tgtport
->dev
, fod
->rspdma
,
1835 sizeof(fod
->rspiubuf
), DMA_TO_DEVICE
);
1838 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req
*fcpreq
);
1841 nvmet_fc_abort_op(struct nvmet_fc_tgtport
*tgtport
,
1842 struct nvmet_fc_fcp_iod
*fod
)
1844 struct nvmefc_tgt_fcp_req
*fcpreq
= fod
->fcpreq
;
1846 /* data no longer needed */
1847 nvmet_fc_free_tgt_pgs(fod
);
1850 * if an ABTS was received or we issued the fcp_abort early
1851 * don't call abort routine again.
1853 /* no need to take lock - lock was taken earlier to get here */
1855 tgtport
->ops
->fcp_abort(&tgtport
->fc_target_port
, fcpreq
);
1857 nvmet_fc_free_fcp_iod(fod
->queue
, fod
);
1861 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport
*tgtport
,
1862 struct nvmet_fc_fcp_iod
*fod
)
1866 fod
->fcpreq
->op
= NVMET_FCOP_RSP
;
1867 fod
->fcpreq
->timeout
= 0;
1869 nvmet_fc_prep_fcp_rsp(tgtport
, fod
);
1871 ret
= tgtport
->ops
->fcp_op(&tgtport
->fc_target_port
, fod
->fcpreq
);
1873 nvmet_fc_abort_op(tgtport
, fod
);
1877 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport
*tgtport
,
1878 struct nvmet_fc_fcp_iod
*fod
, u8 op
)
1880 struct nvmefc_tgt_fcp_req
*fcpreq
= fod
->fcpreq
;
1881 struct scatterlist
*sg
= fod
->next_sg
;
1882 unsigned long flags
;
1883 u32 remaininglen
= fod
->req
.transfer_len
- fod
->offset
;
1888 fcpreq
->offset
= fod
->offset
;
1889 fcpreq
->timeout
= NVME_FC_TGTOP_TIMEOUT_SEC
;
1892 * for next sequence:
1893 * break at a sg element boundary
1894 * attempt to keep sequence length capped at
1895 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
1896 * be longer if a single sg element is larger
1897 * than that amount. This is done to avoid creating
1898 * a new sg list to use for the tgtport api.
1902 while (tlen
< remaininglen
&&
1903 fcpreq
->sg_cnt
< tgtport
->max_sg_cnt
&&
1904 tlen
+ sg_dma_len(sg
) < NVMET_FC_MAX_SEQ_LENGTH
) {
1906 tlen
+= sg_dma_len(sg
);
1909 if (tlen
< remaininglen
&& fcpreq
->sg_cnt
== 0) {
1911 tlen
+= min_t(u32
, sg_dma_len(sg
), remaininglen
);
1914 if (tlen
< remaininglen
)
1917 fod
->next_sg
= NULL
;
1919 fcpreq
->transfer_length
= tlen
;
1920 fcpreq
->transferred_length
= 0;
1921 fcpreq
->fcp_error
= 0;
1925 * If the last READDATA request: check if LLDD supports
1926 * combined xfr with response.
1928 if ((op
== NVMET_FCOP_READDATA
) &&
1929 ((fod
->offset
+ fcpreq
->transfer_length
) == fod
->req
.transfer_len
) &&
1930 (tgtport
->ops
->target_features
& NVMET_FCTGTFEAT_READDATA_RSP
)) {
1931 fcpreq
->op
= NVMET_FCOP_READDATA_RSP
;
1932 nvmet_fc_prep_fcp_rsp(tgtport
, fod
);
1935 ret
= tgtport
->ops
->fcp_op(&tgtport
->fc_target_port
, fod
->fcpreq
);
1938 * should be ok to set w/o lock as its in the thread of
1939 * execution (not an async timer routine) and doesn't
1940 * contend with any clearing action
1944 if (op
== NVMET_FCOP_WRITEDATA
) {
1945 spin_lock_irqsave(&fod
->flock
, flags
);
1946 fod
->writedataactive
= false;
1947 spin_unlock_irqrestore(&fod
->flock
, flags
);
1948 nvmet_req_complete(&fod
->req
, NVME_SC_INTERNAL
);
1949 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1950 fcpreq
->fcp_error
= ret
;
1951 fcpreq
->transferred_length
= 0;
1952 nvmet_fc_xmt_fcp_op_done(fod
->fcpreq
);
1958 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod
*fod
, bool abort
)
1960 struct nvmefc_tgt_fcp_req
*fcpreq
= fod
->fcpreq
;
1961 struct nvmet_fc_tgtport
*tgtport
= fod
->tgtport
;
1963 /* if in the middle of an io and we need to tear down */
1965 if (fcpreq
->op
== NVMET_FCOP_WRITEDATA
) {
1966 nvmet_req_complete(&fod
->req
, NVME_SC_INTERNAL
);
1970 nvmet_fc_abort_op(tgtport
, fod
);
1978 * actual done handler for FCP operations when completed by the lldd
1981 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod
*fod
)
1983 struct nvmefc_tgt_fcp_req
*fcpreq
= fod
->fcpreq
;
1984 struct nvmet_fc_tgtport
*tgtport
= fod
->tgtport
;
1985 unsigned long flags
;
1988 spin_lock_irqsave(&fod
->flock
, flags
);
1990 fod
->writedataactive
= false;
1991 spin_unlock_irqrestore(&fod
->flock
, flags
);
1993 switch (fcpreq
->op
) {
1995 case NVMET_FCOP_WRITEDATA
:
1996 if (__nvmet_fc_fod_op_abort(fod
, abort
))
1998 if (fcpreq
->fcp_error
||
1999 fcpreq
->transferred_length
!= fcpreq
->transfer_length
) {
2000 spin_lock(&fod
->flock
);
2002 spin_unlock(&fod
->flock
);
2004 nvmet_req_complete(&fod
->req
, NVME_SC_INTERNAL
);
2008 fod
->offset
+= fcpreq
->transferred_length
;
2009 if (fod
->offset
!= fod
->req
.transfer_len
) {
2010 spin_lock_irqsave(&fod
->flock
, flags
);
2011 fod
->writedataactive
= true;
2012 spin_unlock_irqrestore(&fod
->flock
, flags
);
2014 /* transfer the next chunk */
2015 nvmet_fc_transfer_fcp_data(tgtport
, fod
,
2016 NVMET_FCOP_WRITEDATA
);
2020 /* data transfer complete, resume with nvmet layer */
2021 fod
->req
.execute(&fod
->req
);
2024 case NVMET_FCOP_READDATA
:
2025 case NVMET_FCOP_READDATA_RSP
:
2026 if (__nvmet_fc_fod_op_abort(fod
, abort
))
2028 if (fcpreq
->fcp_error
||
2029 fcpreq
->transferred_length
!= fcpreq
->transfer_length
) {
2030 nvmet_fc_abort_op(tgtport
, fod
);
2036 if (fcpreq
->op
== NVMET_FCOP_READDATA_RSP
) {
2037 /* data no longer needed */
2038 nvmet_fc_free_tgt_pgs(fod
);
2039 nvmet_fc_free_fcp_iod(fod
->queue
, fod
);
2043 fod
->offset
+= fcpreq
->transferred_length
;
2044 if (fod
->offset
!= fod
->req
.transfer_len
) {
2045 /* transfer the next chunk */
2046 nvmet_fc_transfer_fcp_data(tgtport
, fod
,
2047 NVMET_FCOP_READDATA
);
2051 /* data transfer complete, send response */
2053 /* data no longer needed */
2054 nvmet_fc_free_tgt_pgs(fod
);
2056 nvmet_fc_xmt_fcp_rsp(tgtport
, fod
);
2060 case NVMET_FCOP_RSP
:
2061 if (__nvmet_fc_fod_op_abort(fod
, abort
))
2063 nvmet_fc_free_fcp_iod(fod
->queue
, fod
);
2072 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req
*fcpreq
)
2074 struct nvmet_fc_fcp_iod
*fod
= fcpreq
->nvmet_fc_private
;
2076 nvmet_fc_fod_op_done(fod
);
2080 * actual completion handler after execution by the nvmet layer
2083 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport
*tgtport
,
2084 struct nvmet_fc_fcp_iod
*fod
, int status
)
2086 struct nvme_common_command
*sqe
= &fod
->cmdiubuf
.sqe
.common
;
2087 struct nvme_completion
*cqe
= &fod
->rspiubuf
.cqe
;
2088 unsigned long flags
;
2091 spin_lock_irqsave(&fod
->flock
, flags
);
2093 spin_unlock_irqrestore(&fod
->flock
, flags
);
2095 /* if we have a CQE, snoop the last sq_head value */
2097 fod
->queue
->sqhd
= cqe
->sq_head
;
2100 nvmet_fc_abort_op(tgtport
, fod
);
2104 /* if an error handling the cmd post initial parsing */
2106 /* fudge up a failed CQE status for our transport error */
2107 memset(cqe
, 0, sizeof(*cqe
));
2108 cqe
->sq_head
= fod
->queue
->sqhd
; /* echo last cqe sqhd */
2109 cqe
->sq_id
= cpu_to_le16(fod
->queue
->qid
);
2110 cqe
->command_id
= sqe
->command_id
;
2111 cqe
->status
= cpu_to_le16(status
);
2115 * try to push the data even if the SQE status is non-zero.
2116 * There may be a status where data still was intended to
2119 if ((fod
->io_dir
== NVMET_FCP_READ
) && (fod
->data_sg_cnt
)) {
2120 /* push the data over before sending rsp */
2121 nvmet_fc_transfer_fcp_data(tgtport
, fod
,
2122 NVMET_FCOP_READDATA
);
2126 /* writes & no data - fall thru */
2129 /* data no longer needed */
2130 nvmet_fc_free_tgt_pgs(fod
);
2132 nvmet_fc_xmt_fcp_rsp(tgtport
, fod
);
2137 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req
*nvme_req
)
2139 struct nvmet_fc_fcp_iod
*fod
= nvmet_req_to_fod(nvme_req
);
2140 struct nvmet_fc_tgtport
*tgtport
= fod
->tgtport
;
2142 __nvmet_fc_fcp_nvme_cmd_done(tgtport
, fod
, 0);
2147 * Actual processing routine for received FC-NVME I/O Requests from the LLD
2150 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport
*tgtport
,
2151 struct nvmet_fc_fcp_iod
*fod
)
2153 struct nvme_fc_cmd_iu
*cmdiu
= &fod
->cmdiubuf
;
2154 u32 xfrlen
= be32_to_cpu(cmdiu
->data_len
);
2158 * if there is no nvmet mapping to the targetport there
2159 * shouldn't be requests. just terminate them.
2162 goto transport_error
;
2165 * Fused commands are currently not supported in the linux
2168 * As such, the implementation of the FC transport does not
2169 * look at the fused commands and order delivery to the upper
2170 * layer until we have both based on csn.
2173 fod
->fcpreq
->done
= nvmet_fc_xmt_fcp_op_done
;
2175 if (cmdiu
->flags
& FCNVME_CMD_FLAGS_WRITE
) {
2176 fod
->io_dir
= NVMET_FCP_WRITE
;
2177 if (!nvme_is_write(&cmdiu
->sqe
))
2178 goto transport_error
;
2179 } else if (cmdiu
->flags
& FCNVME_CMD_FLAGS_READ
) {
2180 fod
->io_dir
= NVMET_FCP_READ
;
2181 if (nvme_is_write(&cmdiu
->sqe
))
2182 goto transport_error
;
2184 fod
->io_dir
= NVMET_FCP_NODATA
;
2186 goto transport_error
;
2189 fod
->req
.cmd
= &fod
->cmdiubuf
.sqe
;
2190 fod
->req
.cqe
= &fod
->rspiubuf
.cqe
;
2191 fod
->req
.port
= tgtport
->pe
->port
;
2193 /* clear any response payload */
2194 memset(&fod
->rspiubuf
, 0, sizeof(fod
->rspiubuf
));
2196 fod
->data_sg
= NULL
;
2197 fod
->data_sg_cnt
= 0;
2199 ret
= nvmet_req_init(&fod
->req
,
2200 &fod
->queue
->nvme_cq
,
2201 &fod
->queue
->nvme_sq
,
2202 &nvmet_fc_tgt_fcp_ops
);
2204 /* bad SQE content or invalid ctrl state */
2205 /* nvmet layer has already called op done to send rsp. */
2209 fod
->req
.transfer_len
= xfrlen
;
2211 /* keep a running counter of tail position */
2212 atomic_inc(&fod
->queue
->sqtail
);
2214 if (fod
->req
.transfer_len
) {
2215 ret
= nvmet_fc_alloc_tgt_pgs(fod
);
2217 nvmet_req_complete(&fod
->req
, ret
);
2221 fod
->req
.sg
= fod
->data_sg
;
2222 fod
->req
.sg_cnt
= fod
->data_sg_cnt
;
2225 if (fod
->io_dir
== NVMET_FCP_WRITE
) {
2226 /* pull the data over before invoking nvmet layer */
2227 nvmet_fc_transfer_fcp_data(tgtport
, fod
, NVMET_FCOP_WRITEDATA
);
2234 * can invoke the nvmet_layer now. If read data, cmd completion will
2237 fod
->req
.execute(&fod
->req
);
2241 nvmet_fc_abort_op(tgtport
, fod
);
2245 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2246 * upon the reception of a NVME FCP CMD IU.
2248 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2249 * layer for processing.
2251 * The nvmet_fc layer allocates a local job structure (struct
2252 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2253 * CMD IU buffer to the job structure. As such, on a successful
2254 * completion (returns 0), the LLDD may immediately free/reuse
2255 * the CMD IU buffer passed in the call.
2257 * However, in some circumstances, due to the packetized nature of FC
2258 * and the api of the FC LLDD which may issue a hw command to send the
2259 * response, but the LLDD may not get the hw completion for that command
2260 * and upcall the nvmet_fc layer before a new command may be
2261 * asynchronously received - its possible for a command to be received
2262 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2263 * the appearance of more commands received than fits in the sq.
2264 * To alleviate this scenario, a temporary queue is maintained in the
2265 * transport for pending LLDD requests waiting for a queue job structure.
2266 * In these "overrun" cases, a temporary queue element is allocated
2267 * the LLDD request and CMD iu buffer information remembered, and the
2268 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2269 * structure is freed, it is immediately reallocated for anything on the
2270 * pending request list. The LLDDs defer_rcv() callback is called,
2271 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2272 * is then started normally with the transport.
2274 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2275 * the completion as successful but must not reuse the CMD IU buffer
2276 * until the LLDD's defer_rcv() callback has been called for the
2277 * corresponding struct nvmefc_tgt_fcp_req pointer.
2279 * If there is any other condition in which an error occurs, the
2280 * transport will return a non-zero status indicating the error.
2281 * In all cases other than -EOVERFLOW, the transport has not accepted the
2282 * request and the LLDD should abort the exchange.
2284 * @target_port: pointer to the (registered) target port the FCP CMD IU
2286 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2287 * the exchange corresponding to the FCP Exchange.
2288 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2289 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2292 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port
*target_port
,
2293 struct nvmefc_tgt_fcp_req
*fcpreq
,
2294 void *cmdiubuf
, u32 cmdiubuf_len
)
2296 struct nvmet_fc_tgtport
*tgtport
= targetport_to_tgtport(target_port
);
2297 struct nvme_fc_cmd_iu
*cmdiu
= cmdiubuf
;
2298 struct nvmet_fc_tgt_queue
*queue
;
2299 struct nvmet_fc_fcp_iod
*fod
;
2300 struct nvmet_fc_defer_fcp_req
*deferfcp
;
2301 unsigned long flags
;
2303 /* validate iu, so the connection id can be used to find the queue */
2304 if ((cmdiubuf_len
!= sizeof(*cmdiu
)) ||
2305 (cmdiu
->format_id
!= NVME_CMD_FORMAT_ID
) ||
2306 (cmdiu
->fc_id
!= NVME_CMD_FC_ID
) ||
2307 (be16_to_cpu(cmdiu
->iu_len
) != (sizeof(*cmdiu
)/4)))
2310 queue
= nvmet_fc_find_target_queue(tgtport
,
2311 be64_to_cpu(cmdiu
->connection_id
));
2316 * note: reference taken by find_target_queue
2317 * After successful fod allocation, the fod will inherit the
2318 * ownership of that reference and will remove the reference
2319 * when the fod is freed.
2322 spin_lock_irqsave(&queue
->qlock
, flags
);
2324 fod
= nvmet_fc_alloc_fcp_iod(queue
);
2326 spin_unlock_irqrestore(&queue
->qlock
, flags
);
2328 fcpreq
->nvmet_fc_private
= fod
;
2329 fod
->fcpreq
= fcpreq
;
2331 memcpy(&fod
->cmdiubuf
, cmdiubuf
, cmdiubuf_len
);
2333 nvmet_fc_queue_fcp_req(tgtport
, queue
, fcpreq
);
2338 if (!tgtport
->ops
->defer_rcv
) {
2339 spin_unlock_irqrestore(&queue
->qlock
, flags
);
2340 /* release the queue lookup reference */
2341 nvmet_fc_tgt_q_put(queue
);
2345 deferfcp
= list_first_entry_or_null(&queue
->avail_defer_list
,
2346 struct nvmet_fc_defer_fcp_req
, req_list
);
2348 /* Just re-use one that was previously allocated */
2349 list_del(&deferfcp
->req_list
);
2351 spin_unlock_irqrestore(&queue
->qlock
, flags
);
2353 /* Now we need to dynamically allocate one */
2354 deferfcp
= kmalloc(sizeof(*deferfcp
), GFP_KERNEL
);
2356 /* release the queue lookup reference */
2357 nvmet_fc_tgt_q_put(queue
);
2360 spin_lock_irqsave(&queue
->qlock
, flags
);
2363 /* For now, use rspaddr / rsplen to save payload information */
2364 fcpreq
->rspaddr
= cmdiubuf
;
2365 fcpreq
->rsplen
= cmdiubuf_len
;
2366 deferfcp
->fcp_req
= fcpreq
;
2368 /* defer processing till a fod becomes available */
2369 list_add_tail(&deferfcp
->req_list
, &queue
->pending_cmd_list
);
2371 /* NOTE: the queue lookup reference is still valid */
2373 spin_unlock_irqrestore(&queue
->qlock
, flags
);
2377 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req
);
2380 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2381 * upon the reception of an ABTS for a FCP command
2383 * Notify the transport that an ABTS has been received for a FCP command
2384 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2385 * LLDD believes the command is still being worked on
2386 * (template_ops->fcp_req_release() has not been called).
2388 * The transport will wait for any outstanding work (an op to the LLDD,
2389 * which the lldd should complete with error due to the ABTS; or the
2390 * completion from the nvmet layer of the nvme command), then will
2391 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2392 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2393 * to the ABTS either after return from this function (assuming any
2394 * outstanding op work has been terminated) or upon the callback being
2397 * @target_port: pointer to the (registered) target port the FCP CMD IU
2399 * @fcpreq: pointer to the fcpreq request structure that corresponds
2400 * to the exchange that received the ABTS.
2403 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port
*target_port
,
2404 struct nvmefc_tgt_fcp_req
*fcpreq
)
2406 struct nvmet_fc_fcp_iod
*fod
= fcpreq
->nvmet_fc_private
;
2407 struct nvmet_fc_tgt_queue
*queue
;
2408 unsigned long flags
;
2410 if (!fod
|| fod
->fcpreq
!= fcpreq
)
2411 /* job appears to have already completed, ignore abort */
2416 spin_lock_irqsave(&queue
->qlock
, flags
);
2419 * mark as abort. The abort handler, invoked upon completion
2420 * of any work, will detect the aborted status and do the
2423 spin_lock(&fod
->flock
);
2425 fod
->aborted
= true;
2426 spin_unlock(&fod
->flock
);
2428 spin_unlock_irqrestore(&queue
->qlock
, flags
);
2430 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort
);
2433 struct nvmet_fc_traddr
{
2439 __nvme_fc_parse_u64(substring_t
*sstr
, u64
*val
)
2443 if (match_u64(sstr
, &token64
))
2451 * This routine validates and extracts the WWN's from the TRADDR string.
2452 * As kernel parsers need the 0x to determine number base, universally
2453 * build string to parse with 0x prefix before parsing name strings.
2456 nvme_fc_parse_traddr(struct nvmet_fc_traddr
*traddr
, char *buf
, size_t blen
)
2458 char name
[2 + NVME_FC_TRADDR_HEXNAMELEN
+ 1];
2459 substring_t wwn
= { name
, &name
[sizeof(name
)-1] };
2460 int nnoffset
, pnoffset
;
2462 /* validate if string is one of the 2 allowed formats */
2463 if (strnlen(buf
, blen
) == NVME_FC_TRADDR_MAXLENGTH
&&
2464 !strncmp(buf
, "nn-0x", NVME_FC_TRADDR_OXNNLEN
) &&
2465 !strncmp(&buf
[NVME_FC_TRADDR_MAX_PN_OFFSET
],
2466 "pn-0x", NVME_FC_TRADDR_OXNNLEN
)) {
2467 nnoffset
= NVME_FC_TRADDR_OXNNLEN
;
2468 pnoffset
= NVME_FC_TRADDR_MAX_PN_OFFSET
+
2469 NVME_FC_TRADDR_OXNNLEN
;
2470 } else if ((strnlen(buf
, blen
) == NVME_FC_TRADDR_MINLENGTH
&&
2471 !strncmp(buf
, "nn-", NVME_FC_TRADDR_NNLEN
) &&
2472 !strncmp(&buf
[NVME_FC_TRADDR_MIN_PN_OFFSET
],
2473 "pn-", NVME_FC_TRADDR_NNLEN
))) {
2474 nnoffset
= NVME_FC_TRADDR_NNLEN
;
2475 pnoffset
= NVME_FC_TRADDR_MIN_PN_OFFSET
+ NVME_FC_TRADDR_NNLEN
;
2481 name
[2 + NVME_FC_TRADDR_HEXNAMELEN
] = 0;
2483 memcpy(&name
[2], &buf
[nnoffset
], NVME_FC_TRADDR_HEXNAMELEN
);
2484 if (__nvme_fc_parse_u64(&wwn
, &traddr
->nn
))
2487 memcpy(&name
[2], &buf
[pnoffset
], NVME_FC_TRADDR_HEXNAMELEN
);
2488 if (__nvme_fc_parse_u64(&wwn
, &traddr
->pn
))
2494 pr_warn("%s: bad traddr string\n", __func__
);
2499 nvmet_fc_add_port(struct nvmet_port
*port
)
2501 struct nvmet_fc_tgtport
*tgtport
;
2502 struct nvmet_fc_port_entry
*pe
;
2503 struct nvmet_fc_traddr traddr
= { 0L, 0L };
2504 unsigned long flags
;
2507 /* validate the address info */
2508 if ((port
->disc_addr
.trtype
!= NVMF_TRTYPE_FC
) ||
2509 (port
->disc_addr
.adrfam
!= NVMF_ADDR_FAMILY_FC
))
2512 /* map the traddr address info to a target port */
2514 ret
= nvme_fc_parse_traddr(&traddr
, port
->disc_addr
.traddr
,
2515 sizeof(port
->disc_addr
.traddr
));
2519 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
2524 spin_lock_irqsave(&nvmet_fc_tgtlock
, flags
);
2525 list_for_each_entry(tgtport
, &nvmet_fc_target_list
, tgt_list
) {
2526 if ((tgtport
->fc_target_port
.node_name
== traddr
.nn
) &&
2527 (tgtport
->fc_target_port
.port_name
== traddr
.pn
)) {
2528 /* a FC port can only be 1 nvmet port id */
2530 nvmet_fc_portentry_bind(tgtport
, pe
, port
);
2537 spin_unlock_irqrestore(&nvmet_fc_tgtlock
, flags
);
2546 nvmet_fc_remove_port(struct nvmet_port
*port
)
2548 struct nvmet_fc_port_entry
*pe
= port
->priv
;
2550 nvmet_fc_portentry_unbind(pe
);
2556 nvmet_fc_discovery_chg(struct nvmet_port
*port
)
2558 struct nvmet_fc_port_entry
*pe
= port
->priv
;
2559 struct nvmet_fc_tgtport
*tgtport
= pe
->tgtport
;
2561 if (tgtport
&& tgtport
->ops
->discovery_event
)
2562 tgtport
->ops
->discovery_event(&tgtport
->fc_target_port
);
2565 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops
= {
2566 .owner
= THIS_MODULE
,
2567 .type
= NVMF_TRTYPE_FC
,
2569 .add_port
= nvmet_fc_add_port
,
2570 .remove_port
= nvmet_fc_remove_port
,
2571 .queue_response
= nvmet_fc_fcp_nvme_cmd_done
,
2572 .delete_ctrl
= nvmet_fc_delete_ctrl
,
2573 .discovery_chg
= nvmet_fc_discovery_chg
,
2576 static int __init
nvmet_fc_init_module(void)
2578 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops
);
2581 static void __exit
nvmet_fc_exit_module(void)
2583 /* sanity check - all lports should be removed */
2584 if (!list_empty(&nvmet_fc_target_list
))
2585 pr_warn("%s: targetport list not empty\n", __func__
);
2587 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops
);
2589 ida_destroy(&nvmet_fc_tgtport_cnt
);
2592 module_init(nvmet_fc_init_module
);
2593 module_exit(nvmet_fc_exit_module
);
2595 MODULE_LICENSE("GPL v2");