1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
7 #include <target/target_core_base.h>
8 #include <target/target_core_fabric.h>
9 #include "efct_driver.h"
13 * lio_wq is used to call the LIO backed during creation or deletion of
14 * sessions. This brings serialization to the session management as we create
15 * single threaded work queue.
17 static struct workqueue_struct
*lio_wq
;
20 efct_format_wwn(char *str
, size_t len
, const char *pre
, u64 wwn
)
24 put_unaligned_be64(wwn
, a
);
25 return snprintf(str
, len
, "%s%8phC", pre
, a
);
29 efct_lio_parse_wwn(const char *name
, u64
*wwp
, u8 npiv
)
36 "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx",
37 &b
[0], &b
[1], &b
[2], &b
[3], &b
[4], &b
[5], &b
[6],
41 "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
42 &b
[0], &b
[1], &b
[2], &b
[3], &b
[4], &b
[5], &b
[6],
49 *wwp
= get_unaligned_be64(b
);
54 efct_lio_parse_npiv_wwn(const char *name
, size_t size
, u64
*wwpn
, u64
*wwnn
)
56 unsigned int cnt
= size
;
60 if (name
[cnt
- 1] == '\n' || name
[cnt
- 1] == 0)
63 /* validate we have enough characters for WWPN */
64 if ((cnt
!= (16 + 1 + 16)) || (name
[16] != ':'))
67 rc
= efct_lio_parse_wwn(&name
[0], wwpn
, 1);
71 rc
= efct_lio_parse_wwn(&name
[17], wwnn
, 1);
79 efct_lio_tpg_enable_show(struct config_item
*item
, char *page
)
81 struct se_portal_group
*se_tpg
= to_tpg(item
);
82 struct efct_lio_tpg
*tpg
=
83 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
85 return snprintf(page
, PAGE_SIZE
, "%d\n", tpg
->enabled
);
89 efct_lio_tpg_enable_store(struct config_item
*item
, const char *page
,
92 struct se_portal_group
*se_tpg
= to_tpg(item
);
93 struct efct_lio_tpg
*tpg
=
94 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
99 if (!tpg
->nport
|| !tpg
->nport
->efct
) {
100 pr_err("%s: Unable to find EFCT device\n", __func__
);
104 efct
= tpg
->nport
->efct
;
107 if (kstrtoul(page
, 0, &op
) < 0)
114 efc_log_debug(efct
, "enable portal group %d\n", tpg
->tpgt
);
116 ret
= efct_xport_control(efct
->xport
, EFCT_XPORT_PORT_ONLINE
);
118 efct
->tgt_efct
.lio_nport
= NULL
;
119 efc_log_debug(efct
, "cannot bring port online\n");
122 } else if (op
== 0) {
123 efc_log_debug(efct
, "disable portal group %d\n", tpg
->tpgt
);
125 if (efc
->domain
&& efc
->domain
->nport
)
126 efct_scsi_tgt_del_nport(efc
, efc
->domain
->nport
);
128 tpg
->enabled
= false;
137 efct_lio_npiv_tpg_enable_show(struct config_item
*item
, char *page
)
139 struct se_portal_group
*se_tpg
= to_tpg(item
);
140 struct efct_lio_tpg
*tpg
=
141 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
143 return snprintf(page
, PAGE_SIZE
, "%d\n", tpg
->enabled
);
147 efct_lio_npiv_tpg_enable_store(struct config_item
*item
, const char *page
,
150 struct se_portal_group
*se_tpg
= to_tpg(item
);
151 struct efct_lio_tpg
*tpg
=
152 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
153 struct efct_lio_vport
*lio_vport
= tpg
->vport
;
158 if (kstrtoul(page
, 0, &op
) < 0)
162 pr_err("Unable to find vport\n");
166 efct
= lio_vport
->efct
;
171 efc_log_debug(efct
, "enable portal group %d\n", tpg
->tpgt
);
176 ret
= efc_nport_vport_new(efc
->domain
,
177 lio_vport
->npiv_wwpn
,
178 lio_vport
->npiv_wwnn
,
179 U32_MAX
, false, true,
182 efc_log_err(efct
, "Failed to create Vport\n");
188 if (!(efc_vport_create_spec(efc
, lio_vport
->npiv_wwnn
,
189 lio_vport
->npiv_wwpn
, U32_MAX
,
190 false, true, NULL
, NULL
)))
193 } else if (op
== 0) {
194 efc_log_debug(efct
, "disable portal group %d\n", tpg
->tpgt
);
196 tpg
->enabled
= false;
197 /* only physical nport should exist, free lio_nport
198 * allocated in efct_lio_make_nport
201 efc_nport_vport_del(efct
->efcport
, efc
->domain
,
202 lio_vport
->npiv_wwpn
,
203 lio_vport
->npiv_wwnn
);
212 static char *efct_lio_get_fabric_wwn(struct se_portal_group
*se_tpg
)
214 struct efct_lio_tpg
*tpg
=
215 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
217 return tpg
->nport
->wwpn_str
;
220 static char *efct_lio_get_npiv_fabric_wwn(struct se_portal_group
*se_tpg
)
222 struct efct_lio_tpg
*tpg
=
223 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
225 return tpg
->vport
->wwpn_str
;
228 static u16
efct_lio_get_tag(struct se_portal_group
*se_tpg
)
230 struct efct_lio_tpg
*tpg
=
231 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
236 static u16
efct_lio_get_npiv_tag(struct se_portal_group
*se_tpg
)
238 struct efct_lio_tpg
*tpg
=
239 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
244 static int efct_lio_check_demo_mode(struct se_portal_group
*se_tpg
)
249 static int efct_lio_check_demo_mode_cache(struct se_portal_group
*se_tpg
)
254 static int efct_lio_check_demo_write_protect(struct se_portal_group
*se_tpg
)
256 struct efct_lio_tpg
*tpg
=
257 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
259 return tpg
->tpg_attrib
.demo_mode_write_protect
;
263 efct_lio_npiv_check_demo_write_protect(struct se_portal_group
*se_tpg
)
265 struct efct_lio_tpg
*tpg
=
266 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
268 return tpg
->tpg_attrib
.demo_mode_write_protect
;
271 static int efct_lio_check_prod_write_protect(struct se_portal_group
*se_tpg
)
273 struct efct_lio_tpg
*tpg
=
274 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
276 return tpg
->tpg_attrib
.prod_mode_write_protect
;
280 efct_lio_npiv_check_prod_write_protect(struct se_portal_group
*se_tpg
)
282 struct efct_lio_tpg
*tpg
=
283 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
285 return tpg
->tpg_attrib
.prod_mode_write_protect
;
288 static int efct_lio_check_stop_free(struct se_cmd
*se_cmd
)
290 struct efct_scsi_tgt_io
*ocp
=
291 container_of(se_cmd
, struct efct_scsi_tgt_io
, cmd
);
292 struct efct_io
*io
= container_of(ocp
, struct efct_io
, tgt_io
);
294 efct_set_lio_io_state(io
, EFCT_LIO_STATE_TFO_CHK_STOP_FREE
);
295 return target_put_sess_cmd(se_cmd
);
299 efct_lio_abort_tgt_cb(struct efct_io
*io
,
300 enum efct_scsi_io_status scsi_status
,
301 u32 flags
, void *arg
)
303 efct_lio_io_printf(io
, "Abort done, status:%d\n", scsi_status
);
308 efct_lio_aborted_task(struct se_cmd
*se_cmd
)
310 struct efct_scsi_tgt_io
*ocp
=
311 container_of(se_cmd
, struct efct_scsi_tgt_io
, cmd
);
312 struct efct_io
*io
= container_of(ocp
, struct efct_io
, tgt_io
);
314 efct_set_lio_io_state(io
, EFCT_LIO_STATE_TFO_ABORTED_TASK
);
319 /* command has been aborted, cleanup here */
320 ocp
->aborting
= true;
321 ocp
->err
= EFCT_SCSI_STATUS_ABORTED
;
322 /* terminate the exchange */
323 efct_scsi_tgt_abort_io(io
, efct_lio_abort_tgt_cb
, NULL
);
326 static void efct_lio_release_cmd(struct se_cmd
*se_cmd
)
328 struct efct_scsi_tgt_io
*ocp
=
329 container_of(se_cmd
, struct efct_scsi_tgt_io
, cmd
);
330 struct efct_io
*io
= container_of(ocp
, struct efct_io
, tgt_io
);
331 struct efct
*efct
= io
->efct
;
333 efct_set_lio_io_state(io
, EFCT_LIO_STATE_TFO_RELEASE_CMD
);
334 efct_set_lio_io_state(io
, EFCT_LIO_STATE_SCSI_CMPL_CMD
);
335 efct_scsi_io_complete(io
);
336 atomic_sub_return(1, &efct
->tgt_efct
.ios_in_use
);
339 static void efct_lio_close_session(struct se_session
*se_sess
)
341 struct efc_node
*node
= se_sess
->fabric_sess_ptr
;
343 pr_debug("se_sess=%p node=%p", se_sess
, node
);
346 pr_debug("node is NULL");
350 efc_node_post_shutdown(node
, NULL
);
353 static int efct_lio_get_cmd_state(struct se_cmd
*cmd
)
355 struct efct_scsi_tgt_io
*ocp
=
356 container_of(cmd
, struct efct_scsi_tgt_io
, cmd
);
357 struct efct_io
*io
= container_of(ocp
, struct efct_io
, tgt_io
);
359 return io
->tgt_io
.state
;
363 efct_lio_sg_map(struct efct_io
*io
)
365 struct efct_scsi_tgt_io
*ocp
= &io
->tgt_io
;
366 struct se_cmd
*cmd
= &ocp
->cmd
;
368 ocp
->seg_map_cnt
= dma_map_sg(&io
->efct
->pci
->dev
, cmd
->t_data_sg
,
369 cmd
->t_data_nents
, cmd
->data_direction
);
370 if (ocp
->seg_map_cnt
== 0)
376 efct_lio_sg_unmap(struct efct_io
*io
)
378 struct efct_scsi_tgt_io
*ocp
= &io
->tgt_io
;
379 struct se_cmd
*cmd
= &ocp
->cmd
;
381 if (WARN_ON(!ocp
->seg_map_cnt
|| !cmd
->t_data_sg
))
384 dma_unmap_sg(&io
->efct
->pci
->dev
, cmd
->t_data_sg
,
385 ocp
->seg_map_cnt
, cmd
->data_direction
);
386 ocp
->seg_map_cnt
= 0;
390 efct_lio_status_done(struct efct_io
*io
,
391 enum efct_scsi_io_status scsi_status
,
392 u32 flags
, void *arg
)
394 struct efct_scsi_tgt_io
*ocp
= &io
->tgt_io
;
396 efct_set_lio_io_state(io
, EFCT_LIO_STATE_SCSI_RSP_DONE
);
397 if (scsi_status
!= EFCT_SCSI_STATUS_GOOD
) {
398 efct_lio_io_printf(io
, "callback completed with error=%d\n",
400 ocp
->err
= scsi_status
;
402 if (ocp
->seg_map_cnt
)
403 efct_lio_sg_unmap(io
);
405 efct_lio_io_printf(io
, "status=%d, err=%d flags=0x%x, dir=%d\n",
406 scsi_status
, ocp
->err
, flags
, ocp
->ddir
);
408 efct_set_lio_io_state(io
, EFCT_LIO_STATE_TGT_GENERIC_FREE
);
409 transport_generic_free_cmd(&io
->tgt_io
.cmd
, 0);
414 efct_lio_datamove_done(struct efct_io
*io
, enum efct_scsi_io_status scsi_status
,
415 u32 flags
, void *arg
);
418 efct_lio_write_pending(struct se_cmd
*cmd
)
420 struct efct_scsi_tgt_io
*ocp
=
421 container_of(cmd
, struct efct_scsi_tgt_io
, cmd
);
422 struct efct_io
*io
= container_of(ocp
, struct efct_io
, tgt_io
);
423 struct efct_scsi_sgl
*sgl
= io
->sgl
;
424 struct scatterlist
*sg
;
425 u32 flags
= 0, cnt
, curcnt
;
428 efct_set_lio_io_state(io
, EFCT_LIO_STATE_TFO_WRITE_PENDING
);
429 efct_lio_io_printf(io
, "trans_state=0x%x se_cmd_flags=0x%x\n",
430 cmd
->transport_state
, cmd
->se_cmd_flags
);
432 if (ocp
->seg_cnt
== 0) {
433 ocp
->seg_cnt
= cmd
->t_data_nents
;
435 if (efct_lio_sg_map(io
)) {
436 efct_lio_io_printf(io
, "efct_lio_sg_map failed\n");
440 curcnt
= (ocp
->seg_map_cnt
- ocp
->cur_seg
);
441 curcnt
= (curcnt
< io
->sgl_allocated
) ? curcnt
: io
->sgl_allocated
;
442 /* find current sg */
443 for (cnt
= 0, sg
= cmd
->t_data_sg
; cnt
< ocp
->cur_seg
; cnt
++,
447 for (cnt
= 0; cnt
< curcnt
; cnt
++, sg
= sg_next(sg
)) {
448 sgl
[cnt
].addr
= sg_dma_address(sg
);
449 sgl
[cnt
].dif_addr
= 0;
450 sgl
[cnt
].len
= sg_dma_len(sg
);
451 length
+= sgl
[cnt
].len
;
455 if (ocp
->cur_seg
== ocp
->seg_cnt
)
456 flags
= EFCT_SCSI_LAST_DATAPHASE
;
458 return efct_scsi_recv_wr_data(io
, flags
, sgl
, curcnt
, length
,
459 efct_lio_datamove_done
, NULL
);
463 efct_lio_queue_data_in(struct se_cmd
*cmd
)
465 struct efct_scsi_tgt_io
*ocp
=
466 container_of(cmd
, struct efct_scsi_tgt_io
, cmd
);
467 struct efct_io
*io
= container_of(ocp
, struct efct_io
, tgt_io
);
468 struct efct_scsi_sgl
*sgl
= io
->sgl
;
469 struct scatterlist
*sg
= NULL
;
470 uint flags
= 0, cnt
= 0, curcnt
= 0;
473 efct_set_lio_io_state(io
, EFCT_LIO_STATE_TFO_QUEUE_DATA_IN
);
475 if (ocp
->seg_cnt
== 0) {
476 if (cmd
->data_length
) {
477 ocp
->seg_cnt
= cmd
->t_data_nents
;
479 if (efct_lio_sg_map(io
)) {
480 efct_lio_io_printf(io
,
481 "efct_lio_sg_map failed\n");
485 /* If command length is 0, send the response status */
486 struct efct_scsi_cmd_resp rsp
;
488 memset(&rsp
, 0, sizeof(rsp
));
489 efct_lio_io_printf(io
,
490 "cmd : %p length 0, send status\n",
492 return efct_scsi_send_resp(io
, 0, &rsp
,
493 efct_lio_status_done
, NULL
);
496 curcnt
= min(ocp
->seg_map_cnt
- ocp
->cur_seg
, io
->sgl_allocated
);
498 while (cnt
< curcnt
) {
499 sg
= &cmd
->t_data_sg
[ocp
->cur_seg
];
500 sgl
[cnt
].addr
= sg_dma_address(sg
);
501 sgl
[cnt
].dif_addr
= 0;
502 if (ocp
->transferred_len
+ sg_dma_len(sg
) >= cmd
->data_length
)
503 sgl
[cnt
].len
= cmd
->data_length
- ocp
->transferred_len
;
505 sgl
[cnt
].len
= sg_dma_len(sg
);
507 ocp
->transferred_len
+= sgl
[cnt
].len
;
508 length
+= sgl
[cnt
].len
;
511 if (ocp
->transferred_len
== cmd
->data_length
)
515 if (ocp
->transferred_len
== cmd
->data_length
) {
516 flags
= EFCT_SCSI_LAST_DATAPHASE
;
517 ocp
->seg_cnt
= ocp
->cur_seg
;
520 /* If there is residual, disable Auto Good Response */
521 if (cmd
->residual_count
)
522 flags
|= EFCT_SCSI_NO_AUTO_RESPONSE
;
524 efct_set_lio_io_state(io
, EFCT_LIO_STATE_SCSI_SEND_RD_DATA
);
526 return efct_scsi_send_rd_data(io
, flags
, sgl
, curcnt
, length
,
527 efct_lio_datamove_done
, NULL
);
531 efct_lio_send_resp(struct efct_io
*io
, enum efct_scsi_io_status scsi_status
,
534 struct efct_scsi_cmd_resp rsp
;
535 struct efct_scsi_tgt_io
*ocp
= &io
->tgt_io
;
536 struct se_cmd
*cmd
= &io
->tgt_io
.cmd
;
539 if (flags
& EFCT_SCSI_IO_CMPL_RSP_SENT
) {
540 ocp
->rsp_sent
= true;
541 efct_set_lio_io_state(io
, EFCT_LIO_STATE_TGT_GENERIC_FREE
);
542 transport_generic_free_cmd(&io
->tgt_io
.cmd
, 0);
546 /* send check condition if an error occurred */
547 memset(&rsp
, 0, sizeof(rsp
));
548 rsp
.scsi_status
= cmd
->scsi_status
;
549 rsp
.sense_data
= (uint8_t *)io
->tgt_io
.sense_buffer
;
550 rsp
.sense_data_length
= cmd
->scsi_sense_length
;
552 /* Check for residual underrun or overrun */
553 if (cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
)
554 rsp
.residual
= -cmd
->residual_count
;
555 else if (cmd
->se_cmd_flags
& SCF_UNDERFLOW_BIT
)
556 rsp
.residual
= cmd
->residual_count
;
558 rc
= efct_scsi_send_resp(io
, 0, &rsp
, efct_lio_status_done
, NULL
);
559 efct_set_lio_io_state(io
, EFCT_LIO_STATE_SCSI_SEND_RSP
);
561 efct_lio_io_printf(io
, "Read done, send rsp failed %d\n", rc
);
562 efct_set_lio_io_state(io
, EFCT_LIO_STATE_TGT_GENERIC_FREE
);
563 transport_generic_free_cmd(&io
->tgt_io
.cmd
, 0);
565 ocp
->rsp_sent
= true;
570 efct_lio_datamove_done(struct efct_io
*io
, enum efct_scsi_io_status scsi_status
,
571 u32 flags
, void *arg
)
573 struct efct_scsi_tgt_io
*ocp
= &io
->tgt_io
;
575 efct_set_lio_io_state(io
, EFCT_LIO_STATE_SCSI_DATA_DONE
);
576 if (scsi_status
!= EFCT_SCSI_STATUS_GOOD
) {
577 efct_lio_io_printf(io
, "callback completed with error=%d\n",
579 ocp
->err
= scsi_status
;
581 efct_lio_io_printf(io
, "seg_map_cnt=%d\n", ocp
->seg_map_cnt
);
582 if (ocp
->seg_map_cnt
) {
583 if (ocp
->err
== EFCT_SCSI_STATUS_GOOD
&&
584 ocp
->cur_seg
< ocp
->seg_cnt
) {
587 efct_lio_io_printf(io
, "continuing cmd at segm=%d\n",
589 if (ocp
->ddir
== DMA_TO_DEVICE
)
590 rc
= efct_lio_write_pending(&ocp
->cmd
);
592 rc
= efct_lio_queue_data_in(&ocp
->cmd
);
596 ocp
->err
= EFCT_SCSI_STATUS_ERROR
;
597 efct_lio_io_printf(io
, "could not continue command\n");
599 efct_lio_sg_unmap(io
);
602 if (io
->tgt_io
.aborting
) {
603 efct_lio_io_printf(io
, "IO done aborted\n");
607 if (ocp
->ddir
== DMA_TO_DEVICE
) {
608 efct_lio_io_printf(io
, "Write done, trans_state=0x%x\n",
609 io
->tgt_io
.cmd
.transport_state
);
610 if (scsi_status
!= EFCT_SCSI_STATUS_GOOD
) {
611 transport_generic_request_failure(&io
->tgt_io
.cmd
,
612 TCM_CHECK_CONDITION_ABORT_CMD
);
613 efct_set_lio_io_state(io
,
614 EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE
);
616 efct_set_lio_io_state(io
,
617 EFCT_LIO_STATE_TGT_EXECUTE_CMD
);
618 target_execute_cmd(&io
->tgt_io
.cmd
);
621 efct_lio_send_resp(io
, scsi_status
, flags
);
627 efct_lio_tmf_done(struct efct_io
*io
, enum efct_scsi_io_status scsi_status
,
628 u32 flags
, void *arg
)
630 efct_lio_tmfio_printf(io
, "cmd=%p status=%d, flags=0x%x\n",
631 &io
->tgt_io
.cmd
, scsi_status
, flags
);
633 efct_set_lio_io_state(io
, EFCT_LIO_STATE_TGT_GENERIC_FREE
);
634 transport_generic_free_cmd(&io
->tgt_io
.cmd
, 0);
639 efct_lio_null_tmf_done(struct efct_io
*tmfio
,
640 enum efct_scsi_io_status scsi_status
,
641 u32 flags
, void *arg
)
643 efct_lio_tmfio_printf(tmfio
, "cmd=%p status=%d, flags=0x%x\n",
644 &tmfio
->tgt_io
.cmd
, scsi_status
, flags
);
646 /* free struct efct_io only, no active se_cmd */
647 efct_scsi_io_complete(tmfio
);
652 efct_lio_queue_status(struct se_cmd
*cmd
)
654 struct efct_scsi_cmd_resp rsp
;
655 struct efct_scsi_tgt_io
*ocp
=
656 container_of(cmd
, struct efct_scsi_tgt_io
, cmd
);
657 struct efct_io
*io
= container_of(ocp
, struct efct_io
, tgt_io
);
660 efct_set_lio_io_state(io
, EFCT_LIO_STATE_TFO_QUEUE_STATUS
);
661 efct_lio_io_printf(io
,
662 "status=0x%x trans_state=0x%x se_cmd_flags=0x%x sns_len=%d\n",
663 cmd
->scsi_status
, cmd
->transport_state
, cmd
->se_cmd_flags
,
664 cmd
->scsi_sense_length
);
666 memset(&rsp
, 0, sizeof(rsp
));
667 rsp
.scsi_status
= cmd
->scsi_status
;
668 rsp
.sense_data
= (u8
*)io
->tgt_io
.sense_buffer
;
669 rsp
.sense_data_length
= cmd
->scsi_sense_length
;
671 /* Check for residual underrun or overrun, mark negitive value for
672 * underrun to recognize in HW
674 if (cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
)
675 rsp
.residual
= -cmd
->residual_count
;
676 else if (cmd
->se_cmd_flags
& SCF_UNDERFLOW_BIT
)
677 rsp
.residual
= cmd
->residual_count
;
679 rc
= efct_scsi_send_resp(io
, 0, &rsp
, efct_lio_status_done
, NULL
);
680 efct_set_lio_io_state(io
, EFCT_LIO_STATE_SCSI_SEND_RSP
);
682 ocp
->rsp_sent
= true;
686 static void efct_lio_queue_tm_rsp(struct se_cmd
*cmd
)
688 struct efct_scsi_tgt_io
*ocp
=
689 container_of(cmd
, struct efct_scsi_tgt_io
, cmd
);
690 struct efct_io
*tmfio
= container_of(ocp
, struct efct_io
, tgt_io
);
691 struct se_tmr_req
*se_tmr
= cmd
->se_tmr_req
;
694 efct_lio_tmfio_printf(tmfio
, "cmd=%p function=0x%x tmr->response=%d\n",
695 cmd
, se_tmr
->function
, se_tmr
->response
);
696 switch (se_tmr
->response
) {
697 case TMR_FUNCTION_COMPLETE
:
698 rspcode
= EFCT_SCSI_TMF_FUNCTION_COMPLETE
;
700 case TMR_TASK_DOES_NOT_EXIST
:
701 rspcode
= EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND
;
703 case TMR_LUN_DOES_NOT_EXIST
:
704 rspcode
= EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER
;
706 case TMR_FUNCTION_REJECTED
:
708 rspcode
= EFCT_SCSI_TMF_FUNCTION_REJECTED
;
711 efct_scsi_send_tmf_resp(tmfio
, rspcode
, NULL
, efct_lio_tmf_done
, NULL
);
714 static struct efct
*efct_find_wwpn(u64 wwpn
)
718 /* Search for the HBA that has this WWPN */
719 list_for_each_entry(efct
, &efct_devices
, list_entry
) {
721 if (wwpn
== efct_get_wwpn(&efct
->hw
))
728 static struct se_wwn
*
729 efct_lio_make_nport(struct target_fabric_configfs
*tf
,
730 struct config_group
*group
, const char *name
)
732 struct efct_lio_nport
*lio_nport
;
737 ret
= efct_lio_parse_wwn(name
, &wwpn
, 0);
741 efct
= efct_find_wwpn(wwpn
);
743 pr_err("cannot find EFCT for base wwpn %s\n", name
);
744 return ERR_PTR(-ENXIO
);
747 lio_nport
= kzalloc(sizeof(*lio_nport
), GFP_KERNEL
);
749 return ERR_PTR(-ENOMEM
);
751 lio_nport
->efct
= efct
;
752 lio_nport
->wwpn
= wwpn
;
753 efct_format_wwn(lio_nport
->wwpn_str
, sizeof(lio_nport
->wwpn_str
),
755 efct
->tgt_efct
.lio_nport
= lio_nport
;
757 return &lio_nport
->nport_wwn
;
760 static struct se_wwn
*
761 efct_lio_npiv_make_nport(struct target_fabric_configfs
*tf
,
762 struct config_group
*group
, const char *name
)
764 struct efct_lio_vport
*lio_vport
;
767 u64 p_wwpn
, npiv_wwpn
, npiv_wwnn
;
768 char *p
, *pbuf
, tmp
[128];
769 struct efct_lio_vport_list_t
*vport_list
;
770 struct fc_vport
*new_fc_vport
;
771 struct fc_vport_identifiers vport_id
;
772 unsigned long flags
= 0;
774 snprintf(tmp
, sizeof(tmp
), "%s", name
);
777 p
= strsep(&pbuf
, "@");
780 pr_err("Unable to find separator operator(@)\n");
781 return ERR_PTR(-EINVAL
);
784 ret
= efct_lio_parse_wwn(p
, &p_wwpn
, 0);
788 ret
= efct_lio_parse_npiv_wwn(pbuf
, strlen(pbuf
), &npiv_wwpn
,
793 efct
= efct_find_wwpn(p_wwpn
);
795 pr_err("cannot find EFCT for base wwpn %s\n", name
);
796 return ERR_PTR(-ENXIO
);
799 lio_vport
= kzalloc(sizeof(*lio_vport
), GFP_KERNEL
);
801 return ERR_PTR(-ENOMEM
);
803 lio_vport
->efct
= efct
;
804 lio_vport
->wwpn
= p_wwpn
;
805 lio_vport
->npiv_wwpn
= npiv_wwpn
;
806 lio_vport
->npiv_wwnn
= npiv_wwnn
;
808 efct_format_wwn(lio_vport
->wwpn_str
, sizeof(lio_vport
->wwpn_str
),
811 vport_list
= kzalloc(sizeof(*vport_list
), GFP_KERNEL
);
814 return ERR_PTR(-ENOMEM
);
817 vport_list
->lio_vport
= lio_vport
;
819 memset(&vport_id
, 0, sizeof(vport_id
));
820 vport_id
.port_name
= npiv_wwpn
;
821 vport_id
.node_name
= npiv_wwnn
;
822 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
823 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
824 vport_id
.disable
= false;
826 new_fc_vport
= fc_vport_create(efct
->shost
, 0, &vport_id
);
828 efc_log_err(efct
, "fc_vport_create failed\n");
831 return ERR_PTR(-ENOMEM
);
834 lio_vport
->fc_vport
= new_fc_vport
;
835 spin_lock_irqsave(&efct
->tgt_efct
.efct_lio_lock
, flags
);
836 INIT_LIST_HEAD(&vport_list
->list_entry
);
837 list_add_tail(&vport_list
->list_entry
, &efct
->tgt_efct
.vport_list
);
838 spin_unlock_irqrestore(&efct
->tgt_efct
.efct_lio_lock
, flags
);
840 return &lio_vport
->vport_wwn
;
844 efct_lio_drop_nport(struct se_wwn
*wwn
)
846 struct efct_lio_nport
*lio_nport
=
847 container_of(wwn
, struct efct_lio_nport
, nport_wwn
);
848 struct efct
*efct
= lio_nport
->efct
;
850 /* only physical nport should exist, free lio_nport allocated
851 * in efct_lio_make_nport.
853 kfree(efct
->tgt_efct
.lio_nport
);
854 efct
->tgt_efct
.lio_nport
= NULL
;
858 efct_lio_npiv_drop_nport(struct se_wwn
*wwn
)
860 struct efct_lio_vport
*lio_vport
=
861 container_of(wwn
, struct efct_lio_vport
, vport_wwn
);
862 struct efct_lio_vport_list_t
*vport
, *next_vport
;
863 struct efct
*efct
= lio_vport
->efct
;
864 unsigned long flags
= 0;
866 if (lio_vport
->fc_vport
)
867 fc_vport_terminate(lio_vport
->fc_vport
);
869 spin_lock_irqsave(&efct
->tgt_efct
.efct_lio_lock
, flags
);
871 list_for_each_entry_safe(vport
, next_vport
, &efct
->tgt_efct
.vport_list
,
873 if (vport
->lio_vport
== lio_vport
) {
874 list_del(&vport
->list_entry
);
875 kfree(vport
->lio_vport
);
880 spin_unlock_irqrestore(&efct
->tgt_efct
.efct_lio_lock
, flags
);
883 static struct se_portal_group
*
884 efct_lio_make_tpg(struct se_wwn
*wwn
, const char *name
)
886 struct efct_lio_nport
*lio_nport
=
887 container_of(wwn
, struct efct_lio_nport
, nport_wwn
);
888 struct efct_lio_tpg
*tpg
;
893 if (strstr(name
, "tpgt_") != name
)
894 return ERR_PTR(-EINVAL
);
895 if (kstrtoul(name
+ 5, 10, &n
) || n
> USHRT_MAX
)
896 return ERR_PTR(-EINVAL
);
898 tpg
= kzalloc(sizeof(*tpg
), GFP_KERNEL
);
900 return ERR_PTR(-ENOMEM
);
902 tpg
->nport
= lio_nport
;
904 tpg
->enabled
= false;
906 tpg
->tpg_attrib
.generate_node_acls
= 1;
907 tpg
->tpg_attrib
.demo_mode_write_protect
= 1;
908 tpg
->tpg_attrib
.cache_dynamic_acls
= 1;
909 tpg
->tpg_attrib
.demo_mode_login_only
= 1;
910 tpg
->tpg_attrib
.session_deletion_wait
= 1;
912 ret
= core_tpg_register(wwn
, &tpg
->tpg
, SCSI_PROTOCOL_FCP
);
917 efct
= lio_nport
->efct
;
918 efct
->tgt_efct
.tpg
= tpg
;
919 efc_log_debug(efct
, "create portal group %d\n", tpg
->tpgt
);
921 xa_init(&efct
->lookup
);
926 efct_lio_drop_tpg(struct se_portal_group
*se_tpg
)
928 struct efct_lio_tpg
*tpg
=
929 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
931 struct efct
*efct
= tpg
->nport
->efct
;
933 efc_log_debug(efct
, "drop portal group %d\n", tpg
->tpgt
);
934 tpg
->nport
->efct
->tgt_efct
.tpg
= NULL
;
935 core_tpg_deregister(se_tpg
);
936 xa_destroy(&efct
->lookup
);
940 static struct se_portal_group
*
941 efct_lio_npiv_make_tpg(struct se_wwn
*wwn
, const char *name
)
943 struct efct_lio_vport
*lio_vport
=
944 container_of(wwn
, struct efct_lio_vport
, vport_wwn
);
945 struct efct_lio_tpg
*tpg
;
950 efct
= lio_vport
->efct
;
951 if (strstr(name
, "tpgt_") != name
)
952 return ERR_PTR(-EINVAL
);
953 if (kstrtoul(name
+ 5, 10, &n
) || n
> USHRT_MAX
)
954 return ERR_PTR(-EINVAL
);
957 efc_log_err(efct
, "Invalid tpgt index: %ld provided\n", n
);
958 return ERR_PTR(-EINVAL
);
961 tpg
= kzalloc(sizeof(*tpg
), GFP_KERNEL
);
963 return ERR_PTR(-ENOMEM
);
965 tpg
->vport
= lio_vport
;
967 tpg
->enabled
= false;
969 tpg
->tpg_attrib
.generate_node_acls
= 1;
970 tpg
->tpg_attrib
.demo_mode_write_protect
= 1;
971 tpg
->tpg_attrib
.cache_dynamic_acls
= 1;
972 tpg
->tpg_attrib
.demo_mode_login_only
= 1;
973 tpg
->tpg_attrib
.session_deletion_wait
= 1;
975 ret
= core_tpg_register(wwn
, &tpg
->tpg
, SCSI_PROTOCOL_FCP
);
981 lio_vport
->tpg
= tpg
;
982 efc_log_debug(efct
, "create vport portal group %d\n", tpg
->tpgt
);
988 efct_lio_npiv_drop_tpg(struct se_portal_group
*se_tpg
)
990 struct efct_lio_tpg
*tpg
=
991 container_of(se_tpg
, struct efct_lio_tpg
, tpg
);
993 efc_log_debug(tpg
->vport
->efct
, "drop npiv portal group %d\n",
995 core_tpg_deregister(se_tpg
);
1000 efct_lio_init_nodeacl(struct se_node_acl
*se_nacl
, const char *name
)
1002 struct efct_lio_nacl
*nacl
;
1005 if (efct_lio_parse_wwn(name
, &wwnn
, 0) < 0)
1008 nacl
= container_of(se_nacl
, struct efct_lio_nacl
, se_node_acl
);
1009 nacl
->nport_wwnn
= wwnn
;
1011 efct_format_wwn(nacl
->nport_name
, sizeof(nacl
->nport_name
), "", wwnn
);
1015 static int efct_lio_check_demo_mode_login_only(struct se_portal_group
*stpg
)
1017 struct efct_lio_tpg
*tpg
= container_of(stpg
, struct efct_lio_tpg
, tpg
);
1019 return tpg
->tpg_attrib
.demo_mode_login_only
;
1023 efct_lio_npiv_check_demo_mode_login_only(struct se_portal_group
*stpg
)
1025 struct efct_lio_tpg
*tpg
= container_of(stpg
, struct efct_lio_tpg
, tpg
);
1027 return tpg
->tpg_attrib
.demo_mode_login_only
;
1030 static struct efct_lio_tpg
*
1031 efct_get_vport_tpg(struct efc_node
*node
)
1034 u64 wwpn
= node
->nport
->wwpn
;
1035 struct efct_lio_vport_list_t
*vport
, *next
;
1036 struct efct_lio_vport
*lio_vport
= NULL
;
1037 struct efct_lio_tpg
*tpg
= NULL
;
1038 unsigned long flags
= 0;
1040 efct
= node
->efc
->base
;
1041 spin_lock_irqsave(&efct
->tgt_efct
.efct_lio_lock
, flags
);
1042 list_for_each_entry_safe(vport
, next
, &efct
->tgt_efct
.vport_list
,
1044 lio_vport
= vport
->lio_vport
;
1045 if (wwpn
&& lio_vport
&& lio_vport
->npiv_wwpn
== wwpn
) {
1046 efc_log_debug(efct
, "found tpg on vport\n");
1047 tpg
= lio_vport
->tpg
;
1051 spin_unlock_irqrestore(&efct
->tgt_efct
.efct_lio_lock
, flags
);
1056 _efct_tgt_node_free(struct kref
*arg
)
1058 struct efct_node
*tgt_node
= container_of(arg
, struct efct_node
, ref
);
1059 struct efc_node
*node
= tgt_node
->node
;
1061 efc_scsi_del_initiator_complete(node
->efc
, node
);
1065 static int efct_session_cb(struct se_portal_group
*se_tpg
,
1066 struct se_session
*se_sess
, void *private)
1068 struct efc_node
*node
= private;
1069 struct efct_node
*tgt_node
;
1070 struct efct
*efct
= node
->efc
->base
;
1072 tgt_node
= kzalloc(sizeof(*tgt_node
), GFP_KERNEL
);
1076 kref_init(&tgt_node
->ref
);
1077 tgt_node
->release
= _efct_tgt_node_free
;
1079 tgt_node
->session
= se_sess
;
1080 node
->tgt_node
= tgt_node
;
1081 tgt_node
->efct
= efct
;
1083 tgt_node
->node
= node
;
1085 tgt_node
->node_fc_id
= node
->rnode
.fc_id
;
1086 tgt_node
->port_fc_id
= node
->nport
->fc_id
;
1087 tgt_node
->vpi
= node
->nport
->indicator
;
1088 tgt_node
->rpi
= node
->rnode
.indicator
;
1090 spin_lock_init(&tgt_node
->active_ios_lock
);
1091 INIT_LIST_HEAD(&tgt_node
->active_ios
);
1096 int efct_scsi_tgt_new_device(struct efct
*efct
)
1100 /* Get the max settings */
1101 efct
->tgt_efct
.max_sge
= sli_get_max_sge(&efct
->hw
.sli
);
1102 efct
->tgt_efct
.max_sgl
= sli_get_max_sgl(&efct
->hw
.sli
);
1104 /* initialize IO watermark fields */
1105 atomic_set(&efct
->tgt_efct
.ios_in_use
, 0);
1106 total_ios
= efct
->hw
.config
.n_io
;
1107 efc_log_debug(efct
, "total_ios=%d\n", total_ios
);
1108 efct
->tgt_efct
.watermark_min
=
1109 (total_ios
* EFCT_WATERMARK_LOW_PCT
) / 100;
1110 efct
->tgt_efct
.watermark_max
=
1111 (total_ios
* EFCT_WATERMARK_HIGH_PCT
) / 100;
1112 atomic_set(&efct
->tgt_efct
.io_high_watermark
,
1113 efct
->tgt_efct
.watermark_max
);
1114 atomic_set(&efct
->tgt_efct
.watermark_hit
, 0);
1115 atomic_set(&efct
->tgt_efct
.initiator_count
, 0);
1117 lio_wq
= alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM
,
1120 efc_log_err(efct
, "workqueue create failed\n");
1124 spin_lock_init(&efct
->tgt_efct
.efct_lio_lock
);
1125 INIT_LIST_HEAD(&efct
->tgt_efct
.vport_list
);
1130 int efct_scsi_tgt_del_device(struct efct
*efct
)
1132 flush_workqueue(lio_wq
);
1138 efct_scsi_tgt_new_nport(struct efc
*efc
, struct efc_nport
*nport
)
1140 struct efct
*efct
= nport
->efc
->base
;
1142 efc_log_debug(efct
, "New SPORT: %s bound to %s\n", nport
->display_name
,
1143 efct
->tgt_efct
.lio_nport
->wwpn_str
);
1149 efct_scsi_tgt_del_nport(struct efc
*efc
, struct efc_nport
*nport
)
1151 efc_log_debug(efc
, "Del SPORT: %s\n", nport
->display_name
);
1154 static void efct_lio_setup_session(struct work_struct
*work
)
1156 struct efct_lio_wq_data
*wq_data
=
1157 container_of(work
, struct efct_lio_wq_data
, work
);
1158 struct efct
*efct
= wq_data
->efct
;
1159 struct efc_node
*node
= wq_data
->ptr
;
1160 char wwpn
[WWN_NAME_LEN
];
1161 struct efct_lio_tpg
*tpg
;
1162 struct efct_node
*tgt_node
;
1163 struct se_portal_group
*se_tpg
;
1164 struct se_session
*se_sess
;
1169 /* Check to see if it's belongs to vport,
1170 * if not get physical port
1172 tpg
= efct_get_vport_tpg(node
);
1175 } else if (efct
->tgt_efct
.tpg
) {
1176 tpg
= efct
->tgt_efct
.tpg
;
1179 efc_log_err(efct
, "failed to init session\n");
1184 * Format the FCP Initiator port_name into colon
1185 * separated values to match the format by our explicit
1186 * ConfigFS NodeACLs.
1188 efct_format_wwn(wwpn
, sizeof(wwpn
), "", efc_node_get_wwpn(node
));
1190 se_sess
= target_setup_session(se_tpg
, 0, 0, TARGET_PROT_NORMAL
, wwpn
,
1191 node
, efct_session_cb
);
1192 if (IS_ERR(se_sess
)) {
1193 efc_log_err(efct
, "failed to setup session\n");
1195 efc_scsi_sess_reg_complete(node
, -EIO
);
1199 tgt_node
= node
->tgt_node
;
1200 id
= (u64
) tgt_node
->port_fc_id
<< 32 | tgt_node
->node_fc_id
;
1202 efc_log_debug(efct
, "new initiator sess=%p node=%p id: %llx\n",
1205 if (xa_err(xa_store(&efct
->lookup
, id
, tgt_node
, GFP_KERNEL
)))
1206 efc_log_err(efct
, "Node lookup store failed\n");
1208 efc_scsi_sess_reg_complete(node
, 0);
1210 /* update IO watermark: increment initiator count */
1211 ini_count
= atomic_add_return(1, &efct
->tgt_efct
.initiator_count
);
1212 watermark
= efct
->tgt_efct
.watermark_max
-
1213 ini_count
* EFCT_IO_WATERMARK_PER_INITIATOR
;
1214 watermark
= (efct
->tgt_efct
.watermark_min
> watermark
) ?
1215 efct
->tgt_efct
.watermark_min
: watermark
;
1216 atomic_set(&efct
->tgt_efct
.io_high_watermark
, watermark
);
1221 int efct_scsi_new_initiator(struct efc
*efc
, struct efc_node
*node
)
1223 struct efct
*efct
= node
->efc
->base
;
1224 struct efct_lio_wq_data
*wq_data
;
1227 * Since LIO only supports initiator validation at thread level,
1228 * we are open minded and accept all callers.
1230 wq_data
= kzalloc(sizeof(*wq_data
), GFP_ATOMIC
);
1234 wq_data
->ptr
= node
;
1235 wq_data
->efct
= efct
;
1236 INIT_WORK(&wq_data
->work
, efct_lio_setup_session
);
1237 queue_work(lio_wq
, &wq_data
->work
);
1238 return EFC_SCSI_CALL_ASYNC
;
1241 static void efct_lio_remove_session(struct work_struct
*work
)
1243 struct efct_lio_wq_data
*wq_data
=
1244 container_of(work
, struct efct_lio_wq_data
, work
);
1245 struct efct
*efct
= wq_data
->efct
;
1246 struct efc_node
*node
= wq_data
->ptr
;
1247 struct efct_node
*tgt_node
;
1248 struct se_session
*se_sess
;
1250 tgt_node
= node
->tgt_node
;
1252 /* base driver has sent back-to-back requests
1253 * to unreg session with no intervening
1256 efc_log_err(efct
, "unreg session for NULL session\n");
1257 efc_scsi_del_initiator_complete(node
->efc
, node
);
1261 se_sess
= tgt_node
->session
;
1262 efc_log_debug(efct
, "unreg session se_sess=%p node=%p\n",
1265 /* first flag all session commands to complete */
1266 target_stop_session(se_sess
);
1268 /* now wait for session commands to complete */
1269 target_wait_for_sess_cmds(se_sess
);
1270 target_remove_session(se_sess
);
1271 tgt_node
->session
= NULL
;
1272 node
->tgt_node
= NULL
;
1273 kref_put(&tgt_node
->ref
, tgt_node
->release
);
1278 int efct_scsi_del_initiator(struct efc
*efc
, struct efc_node
*node
, int reason
)
1280 struct efct
*efct
= node
->efc
->base
;
1281 struct efct_node
*tgt_node
= node
->tgt_node
;
1282 struct efct_lio_wq_data
*wq_data
;
1287 if (reason
== EFCT_SCSI_INITIATOR_MISSING
)
1288 return EFC_SCSI_CALL_COMPLETE
;
1291 efc_log_err(efct
, "tgt_node is NULL\n");
1295 wq_data
= kzalloc(sizeof(*wq_data
), GFP_ATOMIC
);
1299 id
= (u64
) tgt_node
->port_fc_id
<< 32 | tgt_node
->node_fc_id
;
1300 xa_erase(&efct
->lookup
, id
);
1302 wq_data
->ptr
= node
;
1303 wq_data
->efct
= efct
;
1304 INIT_WORK(&wq_data
->work
, efct_lio_remove_session
);
1305 queue_work(lio_wq
, &wq_data
->work
);
1308 * update IO watermark: decrement initiator count
1310 ini_count
= atomic_sub_return(1, &efct
->tgt_efct
.initiator_count
);
1312 watermark
= efct
->tgt_efct
.watermark_max
-
1313 ini_count
* EFCT_IO_WATERMARK_PER_INITIATOR
;
1314 watermark
= (efct
->tgt_efct
.watermark_min
> watermark
) ?
1315 efct
->tgt_efct
.watermark_min
: watermark
;
1316 atomic_set(&efct
->tgt_efct
.io_high_watermark
, watermark
);
1318 return EFC_SCSI_CALL_ASYNC
;
1321 void efct_scsi_recv_cmd(struct efct_io
*io
, uint64_t lun
, u8
*cdb
,
1322 u32 cdb_len
, u32 flags
)
1324 struct efct_scsi_tgt_io
*ocp
= &io
->tgt_io
;
1325 struct se_cmd
*se_cmd
= &io
->tgt_io
.cmd
;
1326 struct efct
*efct
= io
->efct
;
1328 struct efct_node
*tgt_node
;
1329 struct se_session
*se_sess
;
1332 memset(ocp
, 0, sizeof(struct efct_scsi_tgt_io
));
1333 efct_set_lio_io_state(io
, EFCT_LIO_STATE_SCSI_RECV_CMD
);
1334 atomic_add_return(1, &efct
->tgt_efct
.ios_in_use
);
1336 /* set target timeout */
1337 io
->timeout
= efct
->target_io_timer_sec
;
1339 if (flags
& EFCT_SCSI_CMD_SIMPLE
)
1340 ocp
->task_attr
= TCM_SIMPLE_TAG
;
1341 else if (flags
& EFCT_SCSI_CMD_HEAD_OF_QUEUE
)
1342 ocp
->task_attr
= TCM_HEAD_TAG
;
1343 else if (flags
& EFCT_SCSI_CMD_ORDERED
)
1344 ocp
->task_attr
= TCM_ORDERED_TAG
;
1345 else if (flags
& EFCT_SCSI_CMD_ACA
)
1346 ocp
->task_attr
= TCM_ACA_TAG
;
1348 switch (flags
& (EFCT_SCSI_CMD_DIR_IN
| EFCT_SCSI_CMD_DIR_OUT
)) {
1349 case EFCT_SCSI_CMD_DIR_IN
:
1350 ddir
= "FROM_INITIATOR";
1351 ocp
->ddir
= DMA_TO_DEVICE
;
1353 case EFCT_SCSI_CMD_DIR_OUT
:
1354 ddir
= "TO_INITIATOR";
1355 ocp
->ddir
= DMA_FROM_DEVICE
;
1357 case EFCT_SCSI_CMD_DIR_IN
| EFCT_SCSI_CMD_DIR_OUT
:
1359 ocp
->ddir
= DMA_BIDIRECTIONAL
;
1363 ocp
->ddir
= DMA_NONE
;
1368 efct_lio_io_printf(io
, "new cmd=0x%x ddir=%s dl=%u\n",
1369 cdb
[0], ddir
, io
->exp_xfer_len
);
1371 tgt_node
= io
->node
;
1372 se_sess
= tgt_node
->session
;
1374 efc_log_err(efct
, "No session found to submit IO se_cmd: %p\n",
1376 efct_scsi_io_free(io
);
1380 efct_set_lio_io_state(io
, EFCT_LIO_STATE_TGT_SUBMIT_CMD
);
1381 rc
= target_init_cmd(se_cmd
, se_sess
, &io
->tgt_io
.sense_buffer
[0],
1382 ocp
->lun
, io
->exp_xfer_len
, ocp
->task_attr
,
1383 ocp
->ddir
, TARGET_SCF_ACK_KREF
);
1385 efc_log_err(efct
, "failed to init cmd se_cmd: %p\n", se_cmd
);
1386 efct_scsi_io_free(io
);
1390 if (target_submit_prep(se_cmd
, cdb
, NULL
, 0, NULL
, 0,
1391 NULL
, 0, GFP_ATOMIC
))
1394 target_submit(se_cmd
);
1398 efct_scsi_recv_tmf(struct efct_io
*tmfio
, u32 lun
, enum efct_scsi_tmf_cmd cmd
,
1399 struct efct_io
*io_to_abort
, u32 flags
)
1401 unsigned char tmr_func
;
1402 struct efct
*efct
= tmfio
->efct
;
1403 struct efct_scsi_tgt_io
*ocp
= &tmfio
->tgt_io
;
1404 struct efct_node
*tgt_node
;
1405 struct se_session
*se_sess
;
1408 memset(ocp
, 0, sizeof(struct efct_scsi_tgt_io
));
1409 efct_set_lio_io_state(tmfio
, EFCT_LIO_STATE_SCSI_RECV_TMF
);
1410 atomic_add_return(1, &efct
->tgt_efct
.ios_in_use
);
1411 efct_lio_tmfio_printf(tmfio
, "%s: new tmf %x lun=%u\n",
1412 tmfio
->display_name
, cmd
, lun
);
1415 case EFCT_SCSI_TMF_ABORT_TASK
:
1416 tmr_func
= TMR_ABORT_TASK
;
1418 case EFCT_SCSI_TMF_ABORT_TASK_SET
:
1419 tmr_func
= TMR_ABORT_TASK_SET
;
1421 case EFCT_SCSI_TMF_CLEAR_TASK_SET
:
1422 tmr_func
= TMR_CLEAR_TASK_SET
;
1424 case EFCT_SCSI_TMF_LOGICAL_UNIT_RESET
:
1425 tmr_func
= TMR_LUN_RESET
;
1427 case EFCT_SCSI_TMF_CLEAR_ACA
:
1428 tmr_func
= TMR_CLEAR_ACA
;
1430 case EFCT_SCSI_TMF_TARGET_RESET
:
1431 tmr_func
= TMR_TARGET_WARM_RESET
;
1433 case EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT
:
1434 case EFCT_SCSI_TMF_QUERY_TASK_SET
:
1439 tmfio
->tgt_io
.tmf
= tmr_func
;
1440 tmfio
->tgt_io
.lun
= lun
;
1441 tmfio
->tgt_io
.io_to_abort
= io_to_abort
;
1443 tgt_node
= tmfio
->node
;
1445 se_sess
= tgt_node
->session
;
1449 rc
= target_submit_tmr(&ocp
->cmd
, se_sess
, NULL
, lun
, ocp
, tmr_func
,
1450 GFP_ATOMIC
, tmfio
->init_task_tag
, TARGET_SCF_ACK_KREF
);
1452 efct_set_lio_io_state(tmfio
, EFCT_LIO_STATE_TGT_SUBMIT_TMR
);
1459 efct_scsi_send_tmf_resp(tmfio
, EFCT_SCSI_TMF_FUNCTION_REJECTED
,
1460 NULL
, efct_lio_null_tmf_done
, NULL
);
1464 /* Start items for efct_lio_tpg_attrib_cit */
1466 #define DEF_EFCT_TPG_ATTRIB(name) \
1468 static ssize_t efct_lio_tpg_attrib_##name##_show( \
1469 struct config_item *item, char *page) \
1471 struct se_portal_group *se_tpg = to_tpg(item); \
1472 struct efct_lio_tpg *tpg = container_of(se_tpg, \
1473 struct efct_lio_tpg, tpg); \
1475 return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
1478 static ssize_t efct_lio_tpg_attrib_##name##_store( \
1479 struct config_item *item, const char *page, size_t count) \
1481 struct se_portal_group *se_tpg = to_tpg(item); \
1482 struct efct_lio_tpg *tpg = container_of(se_tpg, \
1483 struct efct_lio_tpg, tpg); \
1484 struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
1485 unsigned long val; \
1488 ret = kstrtoul(page, 0, &val); \
1490 pr_err("kstrtoul() failed with ret: %d\n", ret); \
1494 if (val != 0 && val != 1) { \
1495 pr_err("Illegal boolean value %lu\n", val); \
1503 CONFIGFS_ATTR(efct_lio_tpg_attrib_, name)
1505 DEF_EFCT_TPG_ATTRIB(generate_node_acls
);
1506 DEF_EFCT_TPG_ATTRIB(cache_dynamic_acls
);
1507 DEF_EFCT_TPG_ATTRIB(demo_mode_write_protect
);
1508 DEF_EFCT_TPG_ATTRIB(prod_mode_write_protect
);
1509 DEF_EFCT_TPG_ATTRIB(demo_mode_login_only
);
1510 DEF_EFCT_TPG_ATTRIB(session_deletion_wait
);
1512 static struct configfs_attribute
*efct_lio_tpg_attrib_attrs
[] = {
1513 &efct_lio_tpg_attrib_attr_generate_node_acls
,
1514 &efct_lio_tpg_attrib_attr_cache_dynamic_acls
,
1515 &efct_lio_tpg_attrib_attr_demo_mode_write_protect
,
1516 &efct_lio_tpg_attrib_attr_prod_mode_write_protect
,
1517 &efct_lio_tpg_attrib_attr_demo_mode_login_only
,
1518 &efct_lio_tpg_attrib_attr_session_deletion_wait
,
1522 #define DEF_EFCT_NPIV_TPG_ATTRIB(name) \
1524 static ssize_t efct_lio_npiv_tpg_attrib_##name##_show( \
1525 struct config_item *item, char *page) \
1527 struct se_portal_group *se_tpg = to_tpg(item); \
1528 struct efct_lio_tpg *tpg = container_of(se_tpg, \
1529 struct efct_lio_tpg, tpg); \
1531 return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
1534 static ssize_t efct_lio_npiv_tpg_attrib_##name##_store( \
1535 struct config_item *item, const char *page, size_t count) \
1537 struct se_portal_group *se_tpg = to_tpg(item); \
1538 struct efct_lio_tpg *tpg = container_of(se_tpg, \
1539 struct efct_lio_tpg, tpg); \
1540 struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
1541 unsigned long val; \
1544 ret = kstrtoul(page, 0, &val); \
1546 pr_err("kstrtoul() failed with ret: %d\n", ret); \
1550 if (val != 0 && val != 1) { \
1551 pr_err("Illegal boolean value %lu\n", val); \
1559 CONFIGFS_ATTR(efct_lio_npiv_tpg_attrib_, name)
1561 DEF_EFCT_NPIV_TPG_ATTRIB(generate_node_acls
);
1562 DEF_EFCT_NPIV_TPG_ATTRIB(cache_dynamic_acls
);
1563 DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_write_protect
);
1564 DEF_EFCT_NPIV_TPG_ATTRIB(prod_mode_write_protect
);
1565 DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_login_only
);
1566 DEF_EFCT_NPIV_TPG_ATTRIB(session_deletion_wait
);
1568 static struct configfs_attribute
*efct_lio_npiv_tpg_attrib_attrs
[] = {
1569 &efct_lio_npiv_tpg_attrib_attr_generate_node_acls
,
1570 &efct_lio_npiv_tpg_attrib_attr_cache_dynamic_acls
,
1571 &efct_lio_npiv_tpg_attrib_attr_demo_mode_write_protect
,
1572 &efct_lio_npiv_tpg_attrib_attr_prod_mode_write_protect
,
1573 &efct_lio_npiv_tpg_attrib_attr_demo_mode_login_only
,
1574 &efct_lio_npiv_tpg_attrib_attr_session_deletion_wait
,
1578 CONFIGFS_ATTR(efct_lio_tpg_
, enable
);
1579 static struct configfs_attribute
*efct_lio_tpg_attrs
[] = {
1580 &efct_lio_tpg_attr_enable
, NULL
};
1581 CONFIGFS_ATTR(efct_lio_npiv_tpg_
, enable
);
1582 static struct configfs_attribute
*efct_lio_npiv_tpg_attrs
[] = {
1583 &efct_lio_npiv_tpg_attr_enable
, NULL
};
1585 static const struct target_core_fabric_ops efct_lio_ops
= {
1586 .module
= THIS_MODULE
,
1587 .fabric_name
= "efct",
1588 .node_acl_size
= sizeof(struct efct_lio_nacl
),
1589 .max_data_sg_nents
= 65535,
1590 .tpg_get_wwn
= efct_lio_get_fabric_wwn
,
1591 .tpg_get_tag
= efct_lio_get_tag
,
1592 .fabric_init_nodeacl
= efct_lio_init_nodeacl
,
1593 .tpg_check_demo_mode
= efct_lio_check_demo_mode
,
1594 .tpg_check_demo_mode_cache
= efct_lio_check_demo_mode_cache
,
1595 .tpg_check_demo_mode_write_protect
= efct_lio_check_demo_write_protect
,
1596 .tpg_check_prod_mode_write_protect
= efct_lio_check_prod_write_protect
,
1597 .check_stop_free
= efct_lio_check_stop_free
,
1598 .aborted_task
= efct_lio_aborted_task
,
1599 .release_cmd
= efct_lio_release_cmd
,
1600 .close_session
= efct_lio_close_session
,
1601 .write_pending
= efct_lio_write_pending
,
1602 .get_cmd_state
= efct_lio_get_cmd_state
,
1603 .queue_data_in
= efct_lio_queue_data_in
,
1604 .queue_status
= efct_lio_queue_status
,
1605 .queue_tm_rsp
= efct_lio_queue_tm_rsp
,
1606 .fabric_make_wwn
= efct_lio_make_nport
,
1607 .fabric_drop_wwn
= efct_lio_drop_nport
,
1608 .fabric_make_tpg
= efct_lio_make_tpg
,
1609 .fabric_drop_tpg
= efct_lio_drop_tpg
,
1610 .tpg_check_demo_mode_login_only
= efct_lio_check_demo_mode_login_only
,
1611 .tpg_check_prot_fabric_only
= NULL
,
1612 .sess_get_initiator_sid
= NULL
,
1613 .tfc_tpg_base_attrs
= efct_lio_tpg_attrs
,
1614 .tfc_tpg_attrib_attrs
= efct_lio_tpg_attrib_attrs
,
1615 .default_submit_type
= TARGET_DIRECT_SUBMIT
,
1616 .direct_submit_supp
= 1,
1619 static const struct target_core_fabric_ops efct_lio_npiv_ops
= {
1620 .module
= THIS_MODULE
,
1621 .fabric_name
= "efct_npiv",
1622 .node_acl_size
= sizeof(struct efct_lio_nacl
),
1623 .max_data_sg_nents
= 65535,
1624 .tpg_get_wwn
= efct_lio_get_npiv_fabric_wwn
,
1625 .tpg_get_tag
= efct_lio_get_npiv_tag
,
1626 .fabric_init_nodeacl
= efct_lio_init_nodeacl
,
1627 .tpg_check_demo_mode
= efct_lio_check_demo_mode
,
1628 .tpg_check_demo_mode_cache
= efct_lio_check_demo_mode_cache
,
1629 .tpg_check_demo_mode_write_protect
=
1630 efct_lio_npiv_check_demo_write_protect
,
1631 .tpg_check_prod_mode_write_protect
=
1632 efct_lio_npiv_check_prod_write_protect
,
1633 .check_stop_free
= efct_lio_check_stop_free
,
1634 .aborted_task
= efct_lio_aborted_task
,
1635 .release_cmd
= efct_lio_release_cmd
,
1636 .close_session
= efct_lio_close_session
,
1637 .write_pending
= efct_lio_write_pending
,
1638 .get_cmd_state
= efct_lio_get_cmd_state
,
1639 .queue_data_in
= efct_lio_queue_data_in
,
1640 .queue_status
= efct_lio_queue_status
,
1641 .queue_tm_rsp
= efct_lio_queue_tm_rsp
,
1642 .fabric_make_wwn
= efct_lio_npiv_make_nport
,
1643 .fabric_drop_wwn
= efct_lio_npiv_drop_nport
,
1644 .fabric_make_tpg
= efct_lio_npiv_make_tpg
,
1645 .fabric_drop_tpg
= efct_lio_npiv_drop_tpg
,
1646 .tpg_check_demo_mode_login_only
=
1647 efct_lio_npiv_check_demo_mode_login_only
,
1648 .tpg_check_prot_fabric_only
= NULL
,
1649 .sess_get_initiator_sid
= NULL
,
1650 .tfc_tpg_base_attrs
= efct_lio_npiv_tpg_attrs
,
1651 .tfc_tpg_attrib_attrs
= efct_lio_npiv_tpg_attrib_attrs
,
1653 .default_submit_type
= TARGET_DIRECT_SUBMIT
,
1654 .direct_submit_supp
= 1,
1657 int efct_scsi_tgt_driver_init(void)
1661 /* Register the top level struct config_item_type with TCM core */
1662 rc
= target_register_template(&efct_lio_ops
);
1664 pr_err("target_fabric_configfs_register failed with %d\n", rc
);
1667 rc
= target_register_template(&efct_lio_npiv_ops
);
1669 pr_err("target_fabric_configfs_register failed with %d\n", rc
);
1670 target_unregister_template(&efct_lio_ops
);
1676 int efct_scsi_tgt_driver_exit(void)
1678 target_unregister_template(&efct_lio_ops
);
1679 target_unregister_template(&efct_lio_npiv_ops
);