of: MSI: Simplify irqdomain lookup
[linux/fpc-iii.git] / drivers / target / loopback / tcm_loop.c
blob4fb0eca86857e2f3229603397138d4c793b57eff
1 /*******************************************************************************
3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4 * for emulated SAS initiator ports
6 * © Copyright 2011-2013 Datera, Inc.
8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/configfs.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_fabric.h>
38 #include "tcm_loop.h"
40 #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
42 static struct workqueue_struct *tcm_loop_workqueue;
43 static struct kmem_cache *tcm_loop_cmd_cache;
45 static int tcm_loop_hba_no_cnt;
47 static int tcm_loop_queue_status(struct se_cmd *se_cmd);
50 * Called from struct target_core_fabric_ops->check_stop_free()
52 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
55 * Do not release struct se_cmd's containing a valid TMR
56 * pointer. These will be released directly in tcm_loop_device_reset()
57 * with transport_generic_free_cmd().
59 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
60 return 0;
62 * Release the struct se_cmd, which will make a callback to release
63 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
65 transport_generic_free_cmd(se_cmd, 0);
66 return 1;
69 static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
71 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
72 struct tcm_loop_cmd, tl_se_cmd);
74 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
77 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
79 seq_printf(m, "tcm_loop_proc_info()\n");
80 return 0;
83 static int tcm_loop_driver_probe(struct device *);
84 static int tcm_loop_driver_remove(struct device *);
86 static int pseudo_lld_bus_match(struct device *dev,
87 struct device_driver *dev_driver)
89 return 1;
92 static struct bus_type tcm_loop_lld_bus = {
93 .name = "tcm_loop_bus",
94 .match = pseudo_lld_bus_match,
95 .probe = tcm_loop_driver_probe,
96 .remove = tcm_loop_driver_remove,
99 static struct device_driver tcm_loop_driverfs = {
100 .name = "tcm_loop",
101 .bus = &tcm_loop_lld_bus,
104 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
106 static struct device *tcm_loop_primary;
108 static void tcm_loop_submission_work(struct work_struct *work)
110 struct tcm_loop_cmd *tl_cmd =
111 container_of(work, struct tcm_loop_cmd, work);
112 struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
113 struct scsi_cmnd *sc = tl_cmd->sc;
114 struct tcm_loop_nexus *tl_nexus;
115 struct tcm_loop_hba *tl_hba;
116 struct tcm_loop_tpg *tl_tpg;
117 struct scatterlist *sgl_bidi = NULL;
118 u32 sgl_bidi_count = 0, transfer_length;
119 int rc;
121 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
122 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
125 * Ensure that this tl_tpg reference from the incoming sc->device->id
126 * has already been configured via tcm_loop_make_naa_tpg().
128 if (!tl_tpg->tl_hba) {
129 set_host_byte(sc, DID_NO_CONNECT);
130 goto out_done;
132 if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
133 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
134 goto out_done;
136 tl_nexus = tl_tpg->tl_nexus;
137 if (!tl_nexus) {
138 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
139 " does not exist\n");
140 set_host_byte(sc, DID_ERROR);
141 goto out_done;
143 if (scsi_bidi_cmnd(sc)) {
144 struct scsi_data_buffer *sdb = scsi_in(sc);
146 sgl_bidi = sdb->table.sgl;
147 sgl_bidi_count = sdb->table.nents;
148 se_cmd->se_cmd_flags |= SCF_BIDI;
152 transfer_length = scsi_transfer_length(sc);
153 if (!scsi_prot_sg_count(sc) &&
154 scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
155 se_cmd->prot_pto = true;
157 * loopback transport doesn't support
158 * WRITE_GENERATE, READ_STRIP protection
159 * information operations, go ahead unprotected.
161 transfer_length = scsi_bufflen(sc);
164 se_cmd->tag = tl_cmd->sc_cmd_tag;
165 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
166 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
167 transfer_length, TCM_SIMPLE_TAG,
168 sc->sc_data_direction, 0,
169 scsi_sglist(sc), scsi_sg_count(sc),
170 sgl_bidi, sgl_bidi_count,
171 scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
172 if (rc < 0) {
173 set_host_byte(sc, DID_NO_CONNECT);
174 goto out_done;
176 return;
178 out_done:
179 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
180 sc->scsi_done(sc);
181 return;
185 * ->queuecommand can be and usually is called from interrupt context, so
186 * defer the actual submission to a workqueue.
188 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
190 struct tcm_loop_cmd *tl_cmd;
192 pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
193 " scsi_buf_len: %u\n", sc->device->host->host_no,
194 sc->device->id, sc->device->channel, sc->device->lun,
195 sc->cmnd[0], scsi_bufflen(sc));
197 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
198 if (!tl_cmd) {
199 pr_err("Unable to allocate struct tcm_loop_cmd\n");
200 set_host_byte(sc, DID_ERROR);
201 sc->scsi_done(sc);
202 return 0;
205 tl_cmd->sc = sc;
206 tl_cmd->sc_cmd_tag = sc->request->tag;
207 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
208 queue_work(tcm_loop_workqueue, &tl_cmd->work);
209 return 0;
213 * Called from SCSI EH process context to issue a LUN_RESET TMR
214 * to struct scsi_device
216 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
217 u64 lun, int task, enum tcm_tmreq_table tmr)
219 struct se_cmd *se_cmd = NULL;
220 struct se_session *se_sess;
221 struct se_portal_group *se_tpg;
222 struct tcm_loop_nexus *tl_nexus;
223 struct tcm_loop_cmd *tl_cmd = NULL;
224 struct tcm_loop_tmr *tl_tmr = NULL;
225 int ret = TMR_FUNCTION_FAILED, rc;
228 * Locate the tl_nexus and se_sess pointers
230 tl_nexus = tl_tpg->tl_nexus;
231 if (!tl_nexus) {
232 pr_err("Unable to perform device reset without"
233 " active I_T Nexus\n");
234 return ret;
237 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
238 if (!tl_cmd) {
239 pr_err("Unable to allocate memory for tl_cmd\n");
240 return ret;
243 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
244 if (!tl_tmr) {
245 pr_err("Unable to allocate memory for tl_tmr\n");
246 goto release;
248 init_waitqueue_head(&tl_tmr->tl_tmr_wait);
250 se_cmd = &tl_cmd->tl_se_cmd;
251 se_tpg = &tl_tpg->tl_se_tpg;
252 se_sess = tl_tpg->tl_nexus->se_sess;
254 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
256 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
257 DMA_NONE, TCM_SIMPLE_TAG,
258 &tl_cmd->tl_sense_buf[0]);
260 rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
261 if (rc < 0)
262 goto release;
264 if (tmr == TMR_ABORT_TASK)
265 se_cmd->se_tmr_req->ref_task_tag = task;
268 * Locate the underlying TCM struct se_lun
270 if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
271 ret = TMR_LUN_DOES_NOT_EXIST;
272 goto release;
275 * Queue the TMR to TCM Core and sleep waiting for
276 * tcm_loop_queue_tm_rsp() to wake us up.
278 transport_generic_handle_tmr(se_cmd);
279 wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
281 * The TMR LUN_RESET has completed, check the response status and
282 * then release allocations.
284 ret = se_cmd->se_tmr_req->response;
285 release:
286 if (se_cmd)
287 transport_generic_free_cmd(se_cmd, 1);
288 else
289 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
290 kfree(tl_tmr);
291 return ret;
294 static int tcm_loop_abort_task(struct scsi_cmnd *sc)
296 struct tcm_loop_hba *tl_hba;
297 struct tcm_loop_tpg *tl_tpg;
298 int ret = FAILED;
301 * Locate the tcm_loop_hba_t pointer
303 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
304 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
305 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
306 sc->request->tag, TMR_ABORT_TASK);
307 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
311 * Called from SCSI EH process context to issue a LUN_RESET TMR
312 * to struct scsi_device
314 static int tcm_loop_device_reset(struct scsi_cmnd *sc)
316 struct tcm_loop_hba *tl_hba;
317 struct tcm_loop_tpg *tl_tpg;
318 int ret = FAILED;
321 * Locate the tcm_loop_hba_t pointer
323 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
324 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
326 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
327 0, TMR_LUN_RESET);
328 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
331 static int tcm_loop_target_reset(struct scsi_cmnd *sc)
333 struct tcm_loop_hba *tl_hba;
334 struct tcm_loop_tpg *tl_tpg;
337 * Locate the tcm_loop_hba_t pointer
339 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
340 if (!tl_hba) {
341 pr_err("Unable to perform device reset without"
342 " active I_T Nexus\n");
343 return FAILED;
346 * Locate the tl_tpg pointer from TargetID in sc->device->id
348 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
349 if (tl_tpg) {
350 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
351 return SUCCESS;
353 return FAILED;
356 static int tcm_loop_slave_alloc(struct scsi_device *sd)
358 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
359 return 0;
362 static struct scsi_host_template tcm_loop_driver_template = {
363 .show_info = tcm_loop_show_info,
364 .proc_name = "tcm_loopback",
365 .name = "TCM_Loopback",
366 .queuecommand = tcm_loop_queuecommand,
367 .change_queue_depth = scsi_change_queue_depth,
368 .eh_abort_handler = tcm_loop_abort_task,
369 .eh_device_reset_handler = tcm_loop_device_reset,
370 .eh_target_reset_handler = tcm_loop_target_reset,
371 .can_queue = 1024,
372 .this_id = -1,
373 .sg_tablesize = 256,
374 .cmd_per_lun = 1024,
375 .max_sectors = 0xFFFF,
376 .use_clustering = DISABLE_CLUSTERING,
377 .slave_alloc = tcm_loop_slave_alloc,
378 .module = THIS_MODULE,
379 .track_queue_depth = 1,
382 static int tcm_loop_driver_probe(struct device *dev)
384 struct tcm_loop_hba *tl_hba;
385 struct Scsi_Host *sh;
386 int error, host_prot;
388 tl_hba = to_tcm_loop_hba(dev);
390 sh = scsi_host_alloc(&tcm_loop_driver_template,
391 sizeof(struct tcm_loop_hba));
392 if (!sh) {
393 pr_err("Unable to allocate struct scsi_host\n");
394 return -ENODEV;
396 tl_hba->sh = sh;
399 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
401 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
403 * Setup single ID, Channel and LUN for now..
405 sh->max_id = 2;
406 sh->max_lun = 0;
407 sh->max_channel = 0;
408 sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
410 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
411 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
412 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
414 scsi_host_set_prot(sh, host_prot);
415 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
417 error = scsi_add_host(sh, &tl_hba->dev);
418 if (error) {
419 pr_err("%s: scsi_add_host failed\n", __func__);
420 scsi_host_put(sh);
421 return -ENODEV;
423 return 0;
426 static int tcm_loop_driver_remove(struct device *dev)
428 struct tcm_loop_hba *tl_hba;
429 struct Scsi_Host *sh;
431 tl_hba = to_tcm_loop_hba(dev);
432 sh = tl_hba->sh;
434 scsi_remove_host(sh);
435 scsi_host_put(sh);
436 return 0;
439 static void tcm_loop_release_adapter(struct device *dev)
441 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
443 kfree(tl_hba);
447 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
449 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
451 int ret;
453 tl_hba->dev.bus = &tcm_loop_lld_bus;
454 tl_hba->dev.parent = tcm_loop_primary;
455 tl_hba->dev.release = &tcm_loop_release_adapter;
456 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
458 ret = device_register(&tl_hba->dev);
459 if (ret) {
460 pr_err("device_register() failed for"
461 " tl_hba->dev: %d\n", ret);
462 return -ENODEV;
465 return 0;
469 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
470 * tcm_loop SCSI bus.
472 static int tcm_loop_alloc_core_bus(void)
474 int ret;
476 tcm_loop_primary = root_device_register("tcm_loop_0");
477 if (IS_ERR(tcm_loop_primary)) {
478 pr_err("Unable to allocate tcm_loop_primary\n");
479 return PTR_ERR(tcm_loop_primary);
482 ret = bus_register(&tcm_loop_lld_bus);
483 if (ret) {
484 pr_err("bus_register() failed for tcm_loop_lld_bus\n");
485 goto dev_unreg;
488 ret = driver_register(&tcm_loop_driverfs);
489 if (ret) {
490 pr_err("driver_register() failed for"
491 "tcm_loop_driverfs\n");
492 goto bus_unreg;
495 pr_debug("Initialized TCM Loop Core Bus\n");
496 return ret;
498 bus_unreg:
499 bus_unregister(&tcm_loop_lld_bus);
500 dev_unreg:
501 root_device_unregister(tcm_loop_primary);
502 return ret;
505 static void tcm_loop_release_core_bus(void)
507 driver_unregister(&tcm_loop_driverfs);
508 bus_unregister(&tcm_loop_lld_bus);
509 root_device_unregister(tcm_loop_primary);
511 pr_debug("Releasing TCM Loop Core BUS\n");
514 static char *tcm_loop_get_fabric_name(void)
516 return "loopback";
519 static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
521 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
524 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
527 * Return the passed NAA identifier for the Target Port
529 return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
532 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
535 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
536 * to represent the SCSI Target Port.
538 return tl_tpg(se_tpg)->tl_tpgt;
542 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
543 * based upon the incoming fabric dependent SCSI Initiator Port
545 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
547 return 1;
550 static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
552 return 0;
556 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
557 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
559 static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
561 return 0;
565 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
566 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
567 * It has been added here as a nop for target_fabric_tf_ops_check()
569 static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
571 return 0;
574 static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
576 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
577 tl_se_tpg);
578 return tl_tpg->tl_fabric_prot_type;
581 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
583 return 1;
586 static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
588 return 1;
591 static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
593 return;
596 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
598 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
599 struct tcm_loop_cmd, tl_se_cmd);
601 return tl_cmd->sc_cmd_state;
604 static int tcm_loop_shutdown_session(struct se_session *se_sess)
606 return 0;
609 static void tcm_loop_close_session(struct se_session *se_sess)
611 return;
614 static int tcm_loop_write_pending(struct se_cmd *se_cmd)
617 * Since Linux/SCSI has already sent down a struct scsi_cmnd
618 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
619 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
620 * format with transport_generic_map_mem_to_cmd().
622 * We now tell TCM to add this WRITE CDB directly into the TCM storage
623 * object execution queue.
625 target_execute_cmd(se_cmd);
626 return 0;
629 static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
631 return 0;
634 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
636 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
637 struct tcm_loop_cmd, tl_se_cmd);
638 struct scsi_cmnd *sc = tl_cmd->sc;
640 pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
641 " cdb: 0x%02x\n", sc, sc->cmnd[0]);
643 sc->result = SAM_STAT_GOOD;
644 set_host_byte(sc, DID_OK);
645 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
646 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
647 scsi_set_resid(sc, se_cmd->residual_count);
648 sc->scsi_done(sc);
649 return 0;
652 static int tcm_loop_queue_status(struct se_cmd *se_cmd)
654 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
655 struct tcm_loop_cmd, tl_se_cmd);
656 struct scsi_cmnd *sc = tl_cmd->sc;
658 pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
659 " cdb: 0x%02x\n", sc, sc->cmnd[0]);
661 if (se_cmd->sense_buffer &&
662 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
663 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
665 memcpy(sc->sense_buffer, se_cmd->sense_buffer,
666 SCSI_SENSE_BUFFERSIZE);
667 sc->result = SAM_STAT_CHECK_CONDITION;
668 set_driver_byte(sc, DRIVER_SENSE);
669 } else
670 sc->result = se_cmd->scsi_status;
672 set_host_byte(sc, DID_OK);
673 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
674 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
675 scsi_set_resid(sc, se_cmd->residual_count);
676 sc->scsi_done(sc);
677 return 0;
680 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
682 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
683 struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
685 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
686 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
688 atomic_set(&tl_tmr->tmr_complete, 1);
689 wake_up(&tl_tmr->tl_tmr_wait);
692 static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
694 return;
697 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
699 switch (tl_hba->tl_proto_id) {
700 case SCSI_PROTOCOL_SAS:
701 return "SAS";
702 case SCSI_PROTOCOL_FCP:
703 return "FCP";
704 case SCSI_PROTOCOL_ISCSI:
705 return "iSCSI";
706 default:
707 break;
710 return "Unknown";
713 /* Start items for tcm_loop_port_cit */
715 static int tcm_loop_port_link(
716 struct se_portal_group *se_tpg,
717 struct se_lun *lun)
719 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
720 struct tcm_loop_tpg, tl_se_tpg);
721 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
723 atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
725 * Add Linux/SCSI struct scsi_device by HCTL
727 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
729 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
730 return 0;
733 static void tcm_loop_port_unlink(
734 struct se_portal_group *se_tpg,
735 struct se_lun *se_lun)
737 struct scsi_device *sd;
738 struct tcm_loop_hba *tl_hba;
739 struct tcm_loop_tpg *tl_tpg;
741 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
742 tl_hba = tl_tpg->tl_hba;
744 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
745 se_lun->unpacked_lun);
746 if (!sd) {
747 pr_err("Unable to locate struct scsi_device for %d:%d:"
748 "%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
749 return;
752 * Remove Linux/SCSI struct scsi_device by HCTL
754 scsi_remove_device(sd);
755 scsi_device_put(sd);
757 atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
759 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
762 /* End items for tcm_loop_port_cit */
764 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
765 struct config_item *item, char *page)
767 struct se_portal_group *se_tpg = attrib_to_tpg(item);
768 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
769 tl_se_tpg);
771 return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
774 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
775 struct config_item *item, const char *page, size_t count)
777 struct se_portal_group *se_tpg = attrib_to_tpg(item);
778 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
779 tl_se_tpg);
780 unsigned long val;
781 int ret = kstrtoul(page, 0, &val);
783 if (ret) {
784 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
785 return ret;
787 if (val != 0 && val != 1 && val != 3) {
788 pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
789 return -EINVAL;
791 tl_tpg->tl_fabric_prot_type = val;
793 return count;
796 CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
798 static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
799 &tcm_loop_tpg_attrib_attr_fabric_prot_type,
800 NULL,
803 /* Start items for tcm_loop_nexus_cit */
805 static int tcm_loop_make_nexus(
806 struct tcm_loop_tpg *tl_tpg,
807 const char *name)
809 struct se_portal_group *se_tpg;
810 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
811 struct tcm_loop_nexus *tl_nexus;
812 int ret = -ENOMEM;
814 if (tl_tpg->tl_nexus) {
815 pr_debug("tl_tpg->tl_nexus already exists\n");
816 return -EEXIST;
818 se_tpg = &tl_tpg->tl_se_tpg;
820 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
821 if (!tl_nexus) {
822 pr_err("Unable to allocate struct tcm_loop_nexus\n");
823 return -ENOMEM;
826 * Initialize the struct se_session pointer
828 tl_nexus->se_sess = transport_init_session(
829 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
830 if (IS_ERR(tl_nexus->se_sess)) {
831 ret = PTR_ERR(tl_nexus->se_sess);
832 goto out;
835 * Since we are running in 'demo mode' this call with generate a
836 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
837 * Initiator port name of the passed configfs group 'name'.
839 tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
840 se_tpg, (unsigned char *)name);
841 if (!tl_nexus->se_sess->se_node_acl) {
842 transport_free_session(tl_nexus->se_sess);
843 goto out;
845 /* Now, register the I_T Nexus as active. */
846 transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
847 tl_nexus->se_sess, tl_nexus);
848 tl_tpg->tl_nexus = tl_nexus;
849 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
850 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
851 name);
852 return 0;
854 out:
855 kfree(tl_nexus);
856 return ret;
859 static int tcm_loop_drop_nexus(
860 struct tcm_loop_tpg *tpg)
862 struct se_session *se_sess;
863 struct tcm_loop_nexus *tl_nexus;
865 tl_nexus = tpg->tl_nexus;
866 if (!tl_nexus)
867 return -ENODEV;
869 se_sess = tl_nexus->se_sess;
870 if (!se_sess)
871 return -ENODEV;
873 if (atomic_read(&tpg->tl_tpg_port_count)) {
874 pr_err("Unable to remove TCM_Loop I_T Nexus with"
875 " active TPG port count: %d\n",
876 atomic_read(&tpg->tl_tpg_port_count));
877 return -EPERM;
880 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
881 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
882 tl_nexus->se_sess->se_node_acl->initiatorname);
884 * Release the SCSI I_T Nexus to the emulated Target Port
886 transport_deregister_session(tl_nexus->se_sess);
887 tpg->tl_nexus = NULL;
888 kfree(tl_nexus);
889 return 0;
892 /* End items for tcm_loop_nexus_cit */
894 static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
896 struct se_portal_group *se_tpg = to_tpg(item);
897 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
898 struct tcm_loop_tpg, tl_se_tpg);
899 struct tcm_loop_nexus *tl_nexus;
900 ssize_t ret;
902 tl_nexus = tl_tpg->tl_nexus;
903 if (!tl_nexus)
904 return -ENODEV;
906 ret = snprintf(page, PAGE_SIZE, "%s\n",
907 tl_nexus->se_sess->se_node_acl->initiatorname);
909 return ret;
912 static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
913 const char *page, size_t count)
915 struct se_portal_group *se_tpg = to_tpg(item);
916 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
917 struct tcm_loop_tpg, tl_se_tpg);
918 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
919 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
920 int ret;
922 * Shutdown the active I_T nexus if 'NULL' is passed..
924 if (!strncmp(page, "NULL", 4)) {
925 ret = tcm_loop_drop_nexus(tl_tpg);
926 return (!ret) ? count : ret;
929 * Otherwise make sure the passed virtual Initiator port WWN matches
930 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
931 * tcm_loop_make_nexus()
933 if (strlen(page) >= TL_WWN_ADDR_LEN) {
934 pr_err("Emulated NAA Sas Address: %s, exceeds"
935 " max: %d\n", page, TL_WWN_ADDR_LEN);
936 return -EINVAL;
938 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
940 ptr = strstr(i_port, "naa.");
941 if (ptr) {
942 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
943 pr_err("Passed SAS Initiator Port %s does not"
944 " match target port protoid: %s\n", i_port,
945 tcm_loop_dump_proto_id(tl_hba));
946 return -EINVAL;
948 port_ptr = &i_port[0];
949 goto check_newline;
951 ptr = strstr(i_port, "fc.");
952 if (ptr) {
953 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
954 pr_err("Passed FCP Initiator Port %s does not"
955 " match target port protoid: %s\n", i_port,
956 tcm_loop_dump_proto_id(tl_hba));
957 return -EINVAL;
959 port_ptr = &i_port[3]; /* Skip over "fc." */
960 goto check_newline;
962 ptr = strstr(i_port, "iqn.");
963 if (ptr) {
964 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
965 pr_err("Passed iSCSI Initiator Port %s does not"
966 " match target port protoid: %s\n", i_port,
967 tcm_loop_dump_proto_id(tl_hba));
968 return -EINVAL;
970 port_ptr = &i_port[0];
971 goto check_newline;
973 pr_err("Unable to locate prefix for emulated Initiator Port:"
974 " %s\n", i_port);
975 return -EINVAL;
977 * Clear any trailing newline for the NAA WWN
979 check_newline:
980 if (i_port[strlen(i_port)-1] == '\n')
981 i_port[strlen(i_port)-1] = '\0';
983 ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
984 if (ret < 0)
985 return ret;
987 return count;
990 static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
991 char *page)
993 struct se_portal_group *se_tpg = to_tpg(item);
994 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
995 struct tcm_loop_tpg, tl_se_tpg);
996 const char *status = NULL;
997 ssize_t ret = -EINVAL;
999 switch (tl_tpg->tl_transport_status) {
1000 case TCM_TRANSPORT_ONLINE:
1001 status = "online";
1002 break;
1003 case TCM_TRANSPORT_OFFLINE:
1004 status = "offline";
1005 break;
1006 default:
1007 break;
1010 if (status)
1011 ret = snprintf(page, PAGE_SIZE, "%s\n", status);
1013 return ret;
1016 static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
1017 const char *page, size_t count)
1019 struct se_portal_group *se_tpg = to_tpg(item);
1020 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1021 struct tcm_loop_tpg, tl_se_tpg);
1023 if (!strncmp(page, "online", 6)) {
1024 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
1025 return count;
1027 if (!strncmp(page, "offline", 7)) {
1028 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
1029 if (tl_tpg->tl_nexus) {
1030 struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
1032 core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
1034 return count;
1036 return -EINVAL;
1039 CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
1040 CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
1042 static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1043 &tcm_loop_tpg_attr_nexus,
1044 &tcm_loop_tpg_attr_transport_status,
1045 NULL,
1048 /* Start items for tcm_loop_naa_cit */
1050 static struct se_portal_group *tcm_loop_make_naa_tpg(
1051 struct se_wwn *wwn,
1052 struct config_group *group,
1053 const char *name)
1055 struct tcm_loop_hba *tl_hba = container_of(wwn,
1056 struct tcm_loop_hba, tl_hba_wwn);
1057 struct tcm_loop_tpg *tl_tpg;
1058 int ret;
1059 unsigned long tpgt;
1061 if (strstr(name, "tpgt_") != name) {
1062 pr_err("Unable to locate \"tpgt_#\" directory"
1063 " group\n");
1064 return ERR_PTR(-EINVAL);
1066 if (kstrtoul(name+5, 10, &tpgt))
1067 return ERR_PTR(-EINVAL);
1069 if (tpgt >= TL_TPGS_PER_HBA) {
1070 pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
1071 " %u\n", tpgt, TL_TPGS_PER_HBA);
1072 return ERR_PTR(-EINVAL);
1074 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1075 tl_tpg->tl_hba = tl_hba;
1076 tl_tpg->tl_tpgt = tpgt;
1078 * Register the tl_tpg as a emulated TCM Target Endpoint
1080 ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
1081 if (ret < 0)
1082 return ERR_PTR(-ENOMEM);
1084 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
1085 " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
1086 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1088 return &tl_tpg->tl_se_tpg;
1091 static void tcm_loop_drop_naa_tpg(
1092 struct se_portal_group *se_tpg)
1094 struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1095 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1096 struct tcm_loop_tpg, tl_se_tpg);
1097 struct tcm_loop_hba *tl_hba;
1098 unsigned short tpgt;
1100 tl_hba = tl_tpg->tl_hba;
1101 tpgt = tl_tpg->tl_tpgt;
1103 * Release the I_T Nexus for the Virtual target link if present
1105 tcm_loop_drop_nexus(tl_tpg);
1107 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1109 core_tpg_deregister(se_tpg);
1111 tl_tpg->tl_hba = NULL;
1112 tl_tpg->tl_tpgt = 0;
1114 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
1115 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1116 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1119 /* End items for tcm_loop_naa_cit */
1121 /* Start items for tcm_loop_cit */
1123 static struct se_wwn *tcm_loop_make_scsi_hba(
1124 struct target_fabric_configfs *tf,
1125 struct config_group *group,
1126 const char *name)
1128 struct tcm_loop_hba *tl_hba;
1129 struct Scsi_Host *sh;
1130 char *ptr;
1131 int ret, off = 0;
1133 tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1134 if (!tl_hba) {
1135 pr_err("Unable to allocate struct tcm_loop_hba\n");
1136 return ERR_PTR(-ENOMEM);
1139 * Determine the emulated Protocol Identifier and Target Port Name
1140 * based on the incoming configfs directory name.
1142 ptr = strstr(name, "naa.");
1143 if (ptr) {
1144 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1145 goto check_len;
1147 ptr = strstr(name, "fc.");
1148 if (ptr) {
1149 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1150 off = 3; /* Skip over "fc." */
1151 goto check_len;
1153 ptr = strstr(name, "iqn.");
1154 if (!ptr) {
1155 pr_err("Unable to locate prefix for emulated Target "
1156 "Port: %s\n", name);
1157 ret = -EINVAL;
1158 goto out;
1160 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1162 check_len:
1163 if (strlen(name) >= TL_WWN_ADDR_LEN) {
1164 pr_err("Emulated NAA %s Address: %s, exceeds"
1165 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1166 TL_WWN_ADDR_LEN);
1167 ret = -EINVAL;
1168 goto out;
1170 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1173 * Call device_register(tl_hba->dev) to register the emulated
1174 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1175 * device_register() callbacks in tcm_loop_driver_probe()
1177 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1178 if (ret)
1179 goto out;
1181 sh = tl_hba->sh;
1182 tcm_loop_hba_no_cnt++;
1183 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
1184 " %s Address: %s at Linux/SCSI Host ID: %d\n",
1185 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1187 return &tl_hba->tl_hba_wwn;
1188 out:
1189 kfree(tl_hba);
1190 return ERR_PTR(ret);
1193 static void tcm_loop_drop_scsi_hba(
1194 struct se_wwn *wwn)
1196 struct tcm_loop_hba *tl_hba = container_of(wwn,
1197 struct tcm_loop_hba, tl_hba_wwn);
1199 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1200 " %s Address: %s at Linux/SCSI Host ID: %d\n",
1201 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1202 tl_hba->sh->host_no);
1204 * Call device_unregister() on the original tl_hba->dev.
1205 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1206 * release *tl_hba;
1208 device_unregister(&tl_hba->dev);
1211 /* Start items for tcm_loop_cit */
1212 static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1214 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1217 CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1219 static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1220 &tcm_loop_wwn_attr_version,
1221 NULL,
1224 /* End items for tcm_loop_cit */
1226 static const struct target_core_fabric_ops loop_ops = {
1227 .module = THIS_MODULE,
1228 .name = "loopback",
1229 .get_fabric_name = tcm_loop_get_fabric_name,
1230 .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
1231 .tpg_get_tag = tcm_loop_get_tag,
1232 .tpg_check_demo_mode = tcm_loop_check_demo_mode,
1233 .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache,
1234 .tpg_check_demo_mode_write_protect =
1235 tcm_loop_check_demo_mode_write_protect,
1236 .tpg_check_prod_mode_write_protect =
1237 tcm_loop_check_prod_mode_write_protect,
1238 .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
1239 .tpg_get_inst_index = tcm_loop_get_inst_index,
1240 .check_stop_free = tcm_loop_check_stop_free,
1241 .release_cmd = tcm_loop_release_cmd,
1242 .shutdown_session = tcm_loop_shutdown_session,
1243 .close_session = tcm_loop_close_session,
1244 .sess_get_index = tcm_loop_sess_get_index,
1245 .write_pending = tcm_loop_write_pending,
1246 .write_pending_status = tcm_loop_write_pending_status,
1247 .set_default_node_attributes = tcm_loop_set_default_node_attributes,
1248 .get_cmd_state = tcm_loop_get_cmd_state,
1249 .queue_data_in = tcm_loop_queue_data_in,
1250 .queue_status = tcm_loop_queue_status,
1251 .queue_tm_rsp = tcm_loop_queue_tm_rsp,
1252 .aborted_task = tcm_loop_aborted_task,
1253 .fabric_make_wwn = tcm_loop_make_scsi_hba,
1254 .fabric_drop_wwn = tcm_loop_drop_scsi_hba,
1255 .fabric_make_tpg = tcm_loop_make_naa_tpg,
1256 .fabric_drop_tpg = tcm_loop_drop_naa_tpg,
1257 .fabric_post_link = tcm_loop_port_link,
1258 .fabric_pre_unlink = tcm_loop_port_unlink,
1259 .tfc_wwn_attrs = tcm_loop_wwn_attrs,
1260 .tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
1261 .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
1264 static int __init tcm_loop_fabric_init(void)
1266 int ret = -ENOMEM;
1268 tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1269 if (!tcm_loop_workqueue)
1270 goto out;
1272 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1273 sizeof(struct tcm_loop_cmd),
1274 __alignof__(struct tcm_loop_cmd),
1275 0, NULL);
1276 if (!tcm_loop_cmd_cache) {
1277 pr_debug("kmem_cache_create() for"
1278 " tcm_loop_cmd_cache failed\n");
1279 goto out_destroy_workqueue;
1282 ret = tcm_loop_alloc_core_bus();
1283 if (ret)
1284 goto out_destroy_cache;
1286 ret = target_register_template(&loop_ops);
1287 if (ret)
1288 goto out_release_core_bus;
1290 return 0;
1292 out_release_core_bus:
1293 tcm_loop_release_core_bus();
1294 out_destroy_cache:
1295 kmem_cache_destroy(tcm_loop_cmd_cache);
1296 out_destroy_workqueue:
1297 destroy_workqueue(tcm_loop_workqueue);
1298 out:
1299 return ret;
1302 static void __exit tcm_loop_fabric_exit(void)
1304 target_unregister_template(&loop_ops);
1305 tcm_loop_release_core_bus();
1306 kmem_cache_destroy(tcm_loop_cmd_cache);
1307 destroy_workqueue(tcm_loop_workqueue);
1310 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1311 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1312 MODULE_LICENSE("GPL");
1313 module_init(tcm_loop_fabric_init);
1314 module_exit(tcm_loop_fabric_exit);