Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / target / loopback / tcm_loop.c
blob9cd4ffe76c07ff4a0f9bb87be3b389d3a54126ee
1 /*******************************************************************************
3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4 * for emulated SAS initiator ports
6 * © Copyright 2011-2013 Datera, Inc.
8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/configfs.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_fabric.h>
38 #include "tcm_loop.h"
40 #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
42 static struct workqueue_struct *tcm_loop_workqueue;
43 static struct kmem_cache *tcm_loop_cmd_cache;
45 static int tcm_loop_hba_no_cnt;
47 static int tcm_loop_queue_status(struct se_cmd *se_cmd);
50 * Called from struct target_core_fabric_ops->check_stop_free()
52 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
54 return transport_generic_free_cmd(se_cmd, 0);
57 static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
59 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
60 struct tcm_loop_cmd, tl_se_cmd);
62 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
65 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
67 seq_puts(m, "tcm_loop_proc_info()\n");
68 return 0;
71 static int tcm_loop_driver_probe(struct device *);
72 static int tcm_loop_driver_remove(struct device *);
74 static int pseudo_lld_bus_match(struct device *dev,
75 struct device_driver *dev_driver)
77 return 1;
80 static struct bus_type tcm_loop_lld_bus = {
81 .name = "tcm_loop_bus",
82 .match = pseudo_lld_bus_match,
83 .probe = tcm_loop_driver_probe,
84 .remove = tcm_loop_driver_remove,
87 static struct device_driver tcm_loop_driverfs = {
88 .name = "tcm_loop",
89 .bus = &tcm_loop_lld_bus,
92 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
94 static struct device *tcm_loop_primary;
96 static void tcm_loop_submission_work(struct work_struct *work)
98 struct tcm_loop_cmd *tl_cmd =
99 container_of(work, struct tcm_loop_cmd, work);
100 struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
101 struct scsi_cmnd *sc = tl_cmd->sc;
102 struct tcm_loop_nexus *tl_nexus;
103 struct tcm_loop_hba *tl_hba;
104 struct tcm_loop_tpg *tl_tpg;
105 struct scatterlist *sgl_bidi = NULL;
106 u32 sgl_bidi_count = 0, transfer_length;
107 int rc;
109 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
110 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
113 * Ensure that this tl_tpg reference from the incoming sc->device->id
114 * has already been configured via tcm_loop_make_naa_tpg().
116 if (!tl_tpg->tl_hba) {
117 set_host_byte(sc, DID_NO_CONNECT);
118 goto out_done;
120 if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
121 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
122 goto out_done;
124 tl_nexus = tl_tpg->tl_nexus;
125 if (!tl_nexus) {
126 scmd_printk(KERN_ERR, sc,
127 "TCM_Loop I_T Nexus does not exist\n");
128 set_host_byte(sc, DID_ERROR);
129 goto out_done;
131 if (scsi_bidi_cmnd(sc)) {
132 struct scsi_data_buffer *sdb = scsi_in(sc);
134 sgl_bidi = sdb->table.sgl;
135 sgl_bidi_count = sdb->table.nents;
136 se_cmd->se_cmd_flags |= SCF_BIDI;
140 transfer_length = scsi_transfer_length(sc);
141 if (!scsi_prot_sg_count(sc) &&
142 scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
143 se_cmd->prot_pto = true;
145 * loopback transport doesn't support
146 * WRITE_GENERATE, READ_STRIP protection
147 * information operations, go ahead unprotected.
149 transfer_length = scsi_bufflen(sc);
152 se_cmd->tag = tl_cmd->sc_cmd_tag;
153 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
154 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
155 transfer_length, TCM_SIMPLE_TAG,
156 sc->sc_data_direction, 0,
157 scsi_sglist(sc), scsi_sg_count(sc),
158 sgl_bidi, sgl_bidi_count,
159 scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
160 if (rc < 0) {
161 set_host_byte(sc, DID_NO_CONNECT);
162 goto out_done;
164 return;
166 out_done:
167 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
168 sc->scsi_done(sc);
172 * ->queuecommand can be and usually is called from interrupt context, so
173 * defer the actual submission to a workqueue.
175 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
177 struct tcm_loop_cmd *tl_cmd;
179 pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
180 __func__, sc->device->host->host_no, sc->device->id,
181 sc->device->channel, sc->device->lun, sc->cmnd[0],
182 scsi_bufflen(sc));
184 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
185 if (!tl_cmd) {
186 set_host_byte(sc, DID_ERROR);
187 sc->scsi_done(sc);
188 return 0;
191 tl_cmd->sc = sc;
192 tl_cmd->sc_cmd_tag = sc->request->tag;
193 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
194 queue_work(tcm_loop_workqueue, &tl_cmd->work);
195 return 0;
199 * Called from SCSI EH process context to issue a LUN_RESET TMR
200 * to struct scsi_device
202 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
203 u64 lun, int task, enum tcm_tmreq_table tmr)
205 struct se_cmd *se_cmd;
206 struct se_session *se_sess;
207 struct tcm_loop_nexus *tl_nexus;
208 struct tcm_loop_cmd *tl_cmd;
209 int ret = TMR_FUNCTION_FAILED, rc;
212 * Locate the tl_nexus and se_sess pointers
214 tl_nexus = tl_tpg->tl_nexus;
215 if (!tl_nexus) {
216 pr_err("Unable to perform device reset without active I_T Nexus\n");
217 return ret;
220 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
221 if (!tl_cmd)
222 return ret;
224 init_completion(&tl_cmd->tmr_done);
226 se_cmd = &tl_cmd->tl_se_cmd;
227 se_sess = tl_tpg->tl_nexus->se_sess;
229 rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
230 NULL, tmr, GFP_KERNEL, task,
231 TARGET_SCF_ACK_KREF);
232 if (rc < 0)
233 goto release;
234 wait_for_completion(&tl_cmd->tmr_done);
235 ret = se_cmd->se_tmr_req->response;
236 target_put_sess_cmd(se_cmd);
238 out:
239 return ret;
241 release:
242 if (se_cmd)
243 transport_generic_free_cmd(se_cmd, 0);
244 else
245 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
246 goto out;
249 static int tcm_loop_abort_task(struct scsi_cmnd *sc)
251 struct tcm_loop_hba *tl_hba;
252 struct tcm_loop_tpg *tl_tpg;
253 int ret = FAILED;
256 * Locate the tcm_loop_hba_t pointer
258 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
259 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
260 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
261 sc->request->tag, TMR_ABORT_TASK);
262 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
266 * Called from SCSI EH process context to issue a LUN_RESET TMR
267 * to struct scsi_device
269 static int tcm_loop_device_reset(struct scsi_cmnd *sc)
271 struct tcm_loop_hba *tl_hba;
272 struct tcm_loop_tpg *tl_tpg;
273 int ret = FAILED;
276 * Locate the tcm_loop_hba_t pointer
278 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
279 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
281 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
282 0, TMR_LUN_RESET);
283 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
286 static int tcm_loop_target_reset(struct scsi_cmnd *sc)
288 struct tcm_loop_hba *tl_hba;
289 struct tcm_loop_tpg *tl_tpg;
292 * Locate the tcm_loop_hba_t pointer
294 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
295 if (!tl_hba) {
296 pr_err("Unable to perform device reset without active I_T Nexus\n");
297 return FAILED;
300 * Locate the tl_tpg pointer from TargetID in sc->device->id
302 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
303 if (tl_tpg) {
304 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
305 return SUCCESS;
307 return FAILED;
310 static int tcm_loop_slave_alloc(struct scsi_device *sd)
312 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
313 return 0;
316 static struct scsi_host_template tcm_loop_driver_template = {
317 .show_info = tcm_loop_show_info,
318 .proc_name = "tcm_loopback",
319 .name = "TCM_Loopback",
320 .queuecommand = tcm_loop_queuecommand,
321 .change_queue_depth = scsi_change_queue_depth,
322 .eh_abort_handler = tcm_loop_abort_task,
323 .eh_device_reset_handler = tcm_loop_device_reset,
324 .eh_target_reset_handler = tcm_loop_target_reset,
325 .can_queue = 1024,
326 .this_id = -1,
327 .sg_tablesize = 256,
328 .cmd_per_lun = 1024,
329 .max_sectors = 0xFFFF,
330 .use_clustering = DISABLE_CLUSTERING,
331 .slave_alloc = tcm_loop_slave_alloc,
332 .module = THIS_MODULE,
333 .track_queue_depth = 1,
336 static int tcm_loop_driver_probe(struct device *dev)
338 struct tcm_loop_hba *tl_hba;
339 struct Scsi_Host *sh;
340 int error, host_prot;
342 tl_hba = to_tcm_loop_hba(dev);
344 sh = scsi_host_alloc(&tcm_loop_driver_template,
345 sizeof(struct tcm_loop_hba));
346 if (!sh) {
347 pr_err("Unable to allocate struct scsi_host\n");
348 return -ENODEV;
350 tl_hba->sh = sh;
353 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
355 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
357 * Setup single ID, Channel and LUN for now..
359 sh->max_id = 2;
360 sh->max_lun = 0;
361 sh->max_channel = 0;
362 sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
364 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
365 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
366 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
368 scsi_host_set_prot(sh, host_prot);
369 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
371 error = scsi_add_host(sh, &tl_hba->dev);
372 if (error) {
373 pr_err("%s: scsi_add_host failed\n", __func__);
374 scsi_host_put(sh);
375 return -ENODEV;
377 return 0;
380 static int tcm_loop_driver_remove(struct device *dev)
382 struct tcm_loop_hba *tl_hba;
383 struct Scsi_Host *sh;
385 tl_hba = to_tcm_loop_hba(dev);
386 sh = tl_hba->sh;
388 scsi_remove_host(sh);
389 scsi_host_put(sh);
390 return 0;
393 static void tcm_loop_release_adapter(struct device *dev)
395 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
397 kfree(tl_hba);
401 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
403 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
405 int ret;
407 tl_hba->dev.bus = &tcm_loop_lld_bus;
408 tl_hba->dev.parent = tcm_loop_primary;
409 tl_hba->dev.release = &tcm_loop_release_adapter;
410 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
412 ret = device_register(&tl_hba->dev);
413 if (ret) {
414 pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
415 return -ENODEV;
418 return 0;
422 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
423 * tcm_loop SCSI bus.
425 static int tcm_loop_alloc_core_bus(void)
427 int ret;
429 tcm_loop_primary = root_device_register("tcm_loop_0");
430 if (IS_ERR(tcm_loop_primary)) {
431 pr_err("Unable to allocate tcm_loop_primary\n");
432 return PTR_ERR(tcm_loop_primary);
435 ret = bus_register(&tcm_loop_lld_bus);
436 if (ret) {
437 pr_err("bus_register() failed for tcm_loop_lld_bus\n");
438 goto dev_unreg;
441 ret = driver_register(&tcm_loop_driverfs);
442 if (ret) {
443 pr_err("driver_register() failed for tcm_loop_driverfs\n");
444 goto bus_unreg;
447 pr_debug("Initialized TCM Loop Core Bus\n");
448 return ret;
450 bus_unreg:
451 bus_unregister(&tcm_loop_lld_bus);
452 dev_unreg:
453 root_device_unregister(tcm_loop_primary);
454 return ret;
457 static void tcm_loop_release_core_bus(void)
459 driver_unregister(&tcm_loop_driverfs);
460 bus_unregister(&tcm_loop_lld_bus);
461 root_device_unregister(tcm_loop_primary);
463 pr_debug("Releasing TCM Loop Core BUS\n");
466 static char *tcm_loop_get_fabric_name(void)
468 return "loopback";
471 static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
473 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
476 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
479 * Return the passed NAA identifier for the Target Port
481 return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
484 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
487 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
488 * to represent the SCSI Target Port.
490 return tl_tpg(se_tpg)->tl_tpgt;
494 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
495 * based upon the incoming fabric dependent SCSI Initiator Port
497 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
499 return 1;
502 static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
504 return 0;
508 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
509 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
511 static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
513 return 0;
517 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
518 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
519 * It has been added here as a nop for target_fabric_tf_ops_check()
521 static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
523 return 0;
526 static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
528 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
529 tl_se_tpg);
530 return tl_tpg->tl_fabric_prot_type;
533 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
535 return 1;
538 static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
540 return 1;
543 static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
545 return;
548 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
550 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
551 struct tcm_loop_cmd, tl_se_cmd);
553 return tl_cmd->sc_cmd_state;
556 static int tcm_loop_write_pending(struct se_cmd *se_cmd)
559 * Since Linux/SCSI has already sent down a struct scsi_cmnd
560 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
561 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
562 * format with transport_generic_map_mem_to_cmd().
564 * We now tell TCM to add this WRITE CDB directly into the TCM storage
565 * object execution queue.
567 target_execute_cmd(se_cmd);
568 return 0;
571 static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
573 return 0;
576 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
578 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
579 struct tcm_loop_cmd, tl_se_cmd);
580 struct scsi_cmnd *sc = tl_cmd->sc;
582 pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
583 __func__, sc, sc->cmnd[0]);
585 sc->result = SAM_STAT_GOOD;
586 set_host_byte(sc, DID_OK);
587 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
588 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
589 scsi_set_resid(sc, se_cmd->residual_count);
590 sc->scsi_done(sc);
591 return 0;
594 static int tcm_loop_queue_status(struct se_cmd *se_cmd)
596 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
597 struct tcm_loop_cmd, tl_se_cmd);
598 struct scsi_cmnd *sc = tl_cmd->sc;
600 pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
601 __func__, sc, sc->cmnd[0]);
603 if (se_cmd->sense_buffer &&
604 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
605 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
607 memcpy(sc->sense_buffer, se_cmd->sense_buffer,
608 SCSI_SENSE_BUFFERSIZE);
609 sc->result = SAM_STAT_CHECK_CONDITION;
610 set_driver_byte(sc, DRIVER_SENSE);
611 } else
612 sc->result = se_cmd->scsi_status;
614 set_host_byte(sc, DID_OK);
615 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
616 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
617 scsi_set_resid(sc, se_cmd->residual_count);
618 sc->scsi_done(sc);
619 return 0;
622 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
624 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
625 struct tcm_loop_cmd, tl_se_cmd);
627 /* Wake up tcm_loop_issue_tmr(). */
628 complete(&tl_cmd->tmr_done);
631 static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
633 return;
636 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
638 switch (tl_hba->tl_proto_id) {
639 case SCSI_PROTOCOL_SAS:
640 return "SAS";
641 case SCSI_PROTOCOL_FCP:
642 return "FCP";
643 case SCSI_PROTOCOL_ISCSI:
644 return "iSCSI";
645 default:
646 break;
649 return "Unknown";
652 /* Start items for tcm_loop_port_cit */
654 static int tcm_loop_port_link(
655 struct se_portal_group *se_tpg,
656 struct se_lun *lun)
658 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
659 struct tcm_loop_tpg, tl_se_tpg);
660 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
662 atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
664 * Add Linux/SCSI struct scsi_device by HCTL
666 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
668 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
669 return 0;
672 static void tcm_loop_port_unlink(
673 struct se_portal_group *se_tpg,
674 struct se_lun *se_lun)
676 struct scsi_device *sd;
677 struct tcm_loop_hba *tl_hba;
678 struct tcm_loop_tpg *tl_tpg;
680 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
681 tl_hba = tl_tpg->tl_hba;
683 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
684 se_lun->unpacked_lun);
685 if (!sd) {
686 pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
687 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
688 return;
691 * Remove Linux/SCSI struct scsi_device by HCTL
693 scsi_remove_device(sd);
694 scsi_device_put(sd);
696 atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
698 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
701 /* End items for tcm_loop_port_cit */
703 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
704 struct config_item *item, char *page)
706 struct se_portal_group *se_tpg = attrib_to_tpg(item);
707 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
708 tl_se_tpg);
710 return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
713 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
714 struct config_item *item, const char *page, size_t count)
716 struct se_portal_group *se_tpg = attrib_to_tpg(item);
717 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
718 tl_se_tpg);
719 unsigned long val;
720 int ret = kstrtoul(page, 0, &val);
722 if (ret) {
723 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
724 return ret;
726 if (val != 0 && val != 1 && val != 3) {
727 pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
728 return -EINVAL;
730 tl_tpg->tl_fabric_prot_type = val;
732 return count;
735 CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
737 static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
738 &tcm_loop_tpg_attrib_attr_fabric_prot_type,
739 NULL,
742 /* Start items for tcm_loop_nexus_cit */
744 static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
745 struct se_session *se_sess, void *p)
747 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
748 struct tcm_loop_tpg, tl_se_tpg);
750 tl_tpg->tl_nexus = p;
751 return 0;
754 static int tcm_loop_make_nexus(
755 struct tcm_loop_tpg *tl_tpg,
756 const char *name)
758 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
759 struct tcm_loop_nexus *tl_nexus;
760 int ret;
762 if (tl_tpg->tl_nexus) {
763 pr_debug("tl_tpg->tl_nexus already exists\n");
764 return -EEXIST;
767 tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL);
768 if (!tl_nexus)
769 return -ENOMEM;
771 tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
772 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
773 name, tl_nexus, tcm_loop_alloc_sess_cb);
774 if (IS_ERR(tl_nexus->se_sess)) {
775 ret = PTR_ERR(tl_nexus->se_sess);
776 kfree(tl_nexus);
777 return ret;
780 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
781 tcm_loop_dump_proto_id(tl_hba), name);
782 return 0;
785 static int tcm_loop_drop_nexus(
786 struct tcm_loop_tpg *tpg)
788 struct se_session *se_sess;
789 struct tcm_loop_nexus *tl_nexus;
791 tl_nexus = tpg->tl_nexus;
792 if (!tl_nexus)
793 return -ENODEV;
795 se_sess = tl_nexus->se_sess;
796 if (!se_sess)
797 return -ENODEV;
799 if (atomic_read(&tpg->tl_tpg_port_count)) {
800 pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
801 atomic_read(&tpg->tl_tpg_port_count));
802 return -EPERM;
805 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
806 tcm_loop_dump_proto_id(tpg->tl_hba),
807 tl_nexus->se_sess->se_node_acl->initiatorname);
809 * Release the SCSI I_T Nexus to the emulated Target Port
811 transport_deregister_session(tl_nexus->se_sess);
812 tpg->tl_nexus = NULL;
813 kfree(tl_nexus);
814 return 0;
817 /* End items for tcm_loop_nexus_cit */
819 static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
821 struct se_portal_group *se_tpg = to_tpg(item);
822 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
823 struct tcm_loop_tpg, tl_se_tpg);
824 struct tcm_loop_nexus *tl_nexus;
825 ssize_t ret;
827 tl_nexus = tl_tpg->tl_nexus;
828 if (!tl_nexus)
829 return -ENODEV;
831 ret = snprintf(page, PAGE_SIZE, "%s\n",
832 tl_nexus->se_sess->se_node_acl->initiatorname);
834 return ret;
837 static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
838 const char *page, size_t count)
840 struct se_portal_group *se_tpg = to_tpg(item);
841 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
842 struct tcm_loop_tpg, tl_se_tpg);
843 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
844 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
845 int ret;
847 * Shutdown the active I_T nexus if 'NULL' is passed..
849 if (!strncmp(page, "NULL", 4)) {
850 ret = tcm_loop_drop_nexus(tl_tpg);
851 return (!ret) ? count : ret;
854 * Otherwise make sure the passed virtual Initiator port WWN matches
855 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
856 * tcm_loop_make_nexus()
858 if (strlen(page) >= TL_WWN_ADDR_LEN) {
859 pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
860 page, TL_WWN_ADDR_LEN);
861 return -EINVAL;
863 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
865 ptr = strstr(i_port, "naa.");
866 if (ptr) {
867 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
868 pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
869 i_port, tcm_loop_dump_proto_id(tl_hba));
870 return -EINVAL;
872 port_ptr = &i_port[0];
873 goto check_newline;
875 ptr = strstr(i_port, "fc.");
876 if (ptr) {
877 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
878 pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
879 i_port, tcm_loop_dump_proto_id(tl_hba));
880 return -EINVAL;
882 port_ptr = &i_port[3]; /* Skip over "fc." */
883 goto check_newline;
885 ptr = strstr(i_port, "iqn.");
886 if (ptr) {
887 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
888 pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
889 i_port, tcm_loop_dump_proto_id(tl_hba));
890 return -EINVAL;
892 port_ptr = &i_port[0];
893 goto check_newline;
895 pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
896 i_port);
897 return -EINVAL;
899 * Clear any trailing newline for the NAA WWN
901 check_newline:
902 if (i_port[strlen(i_port)-1] == '\n')
903 i_port[strlen(i_port)-1] = '\0';
905 ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
906 if (ret < 0)
907 return ret;
909 return count;
912 static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
913 char *page)
915 struct se_portal_group *se_tpg = to_tpg(item);
916 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
917 struct tcm_loop_tpg, tl_se_tpg);
918 const char *status = NULL;
919 ssize_t ret = -EINVAL;
921 switch (tl_tpg->tl_transport_status) {
922 case TCM_TRANSPORT_ONLINE:
923 status = "online";
924 break;
925 case TCM_TRANSPORT_OFFLINE:
926 status = "offline";
927 break;
928 default:
929 break;
932 if (status)
933 ret = snprintf(page, PAGE_SIZE, "%s\n", status);
935 return ret;
938 static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
939 const char *page, size_t count)
941 struct se_portal_group *se_tpg = to_tpg(item);
942 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
943 struct tcm_loop_tpg, tl_se_tpg);
945 if (!strncmp(page, "online", 6)) {
946 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
947 return count;
949 if (!strncmp(page, "offline", 7)) {
950 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
951 if (tl_tpg->tl_nexus) {
952 struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
954 core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
956 return count;
958 return -EINVAL;
961 static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
962 char *page)
964 struct se_portal_group *se_tpg = to_tpg(item);
965 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
966 struct tcm_loop_tpg, tl_se_tpg);
967 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
969 return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
970 tl_hba->sh->host_no, tl_tpg->tl_tpgt);
973 CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
974 CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
975 CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
977 static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
978 &tcm_loop_tpg_attr_nexus,
979 &tcm_loop_tpg_attr_transport_status,
980 &tcm_loop_tpg_attr_address,
981 NULL,
984 /* Start items for tcm_loop_naa_cit */
986 static struct se_portal_group *tcm_loop_make_naa_tpg(
987 struct se_wwn *wwn,
988 struct config_group *group,
989 const char *name)
991 struct tcm_loop_hba *tl_hba = container_of(wwn,
992 struct tcm_loop_hba, tl_hba_wwn);
993 struct tcm_loop_tpg *tl_tpg;
994 int ret;
995 unsigned long tpgt;
997 if (strstr(name, "tpgt_") != name) {
998 pr_err("Unable to locate \"tpgt_#\" directory group\n");
999 return ERR_PTR(-EINVAL);
1001 if (kstrtoul(name+5, 10, &tpgt))
1002 return ERR_PTR(-EINVAL);
1004 if (tpgt >= TL_TPGS_PER_HBA) {
1005 pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
1006 tpgt, TL_TPGS_PER_HBA);
1007 return ERR_PTR(-EINVAL);
1009 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1010 tl_tpg->tl_hba = tl_hba;
1011 tl_tpg->tl_tpgt = tpgt;
1013 * Register the tl_tpg as a emulated TCM Target Endpoint
1015 ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
1016 if (ret < 0)
1017 return ERR_PTR(-ENOMEM);
1019 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
1020 tcm_loop_dump_proto_id(tl_hba),
1021 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1022 return &tl_tpg->tl_se_tpg;
1025 static void tcm_loop_drop_naa_tpg(
1026 struct se_portal_group *se_tpg)
1028 struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1029 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1030 struct tcm_loop_tpg, tl_se_tpg);
1031 struct tcm_loop_hba *tl_hba;
1032 unsigned short tpgt;
1034 tl_hba = tl_tpg->tl_hba;
1035 tpgt = tl_tpg->tl_tpgt;
1037 * Release the I_T Nexus for the Virtual target link if present
1039 tcm_loop_drop_nexus(tl_tpg);
1041 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1043 core_tpg_deregister(se_tpg);
1045 tl_tpg->tl_hba = NULL;
1046 tl_tpg->tl_tpgt = 0;
1048 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
1049 tcm_loop_dump_proto_id(tl_hba),
1050 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1053 /* End items for tcm_loop_naa_cit */
1055 /* Start items for tcm_loop_cit */
1057 static struct se_wwn *tcm_loop_make_scsi_hba(
1058 struct target_fabric_configfs *tf,
1059 struct config_group *group,
1060 const char *name)
1062 struct tcm_loop_hba *tl_hba;
1063 struct Scsi_Host *sh;
1064 char *ptr;
1065 int ret, off = 0;
1067 tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL);
1068 if (!tl_hba)
1069 return ERR_PTR(-ENOMEM);
1072 * Determine the emulated Protocol Identifier and Target Port Name
1073 * based on the incoming configfs directory name.
1075 ptr = strstr(name, "naa.");
1076 if (ptr) {
1077 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1078 goto check_len;
1080 ptr = strstr(name, "fc.");
1081 if (ptr) {
1082 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1083 off = 3; /* Skip over "fc." */
1084 goto check_len;
1086 ptr = strstr(name, "iqn.");
1087 if (!ptr) {
1088 pr_err("Unable to locate prefix for emulated Target Port: %s\n",
1089 name);
1090 ret = -EINVAL;
1091 goto out;
1093 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1095 check_len:
1096 if (strlen(name) >= TL_WWN_ADDR_LEN) {
1097 pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
1098 name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
1099 ret = -EINVAL;
1100 goto out;
1102 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1105 * Call device_register(tl_hba->dev) to register the emulated
1106 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1107 * device_register() callbacks in tcm_loop_driver_probe()
1109 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1110 if (ret)
1111 goto out;
1113 sh = tl_hba->sh;
1114 tcm_loop_hba_no_cnt++;
1115 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1116 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1117 return &tl_hba->tl_hba_wwn;
1118 out:
1119 kfree(tl_hba);
1120 return ERR_PTR(ret);
1123 static void tcm_loop_drop_scsi_hba(
1124 struct se_wwn *wwn)
1126 struct tcm_loop_hba *tl_hba = container_of(wwn,
1127 struct tcm_loop_hba, tl_hba_wwn);
1129 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1130 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1131 tl_hba->sh->host_no);
1133 * Call device_unregister() on the original tl_hba->dev.
1134 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1135 * release *tl_hba;
1137 device_unregister(&tl_hba->dev);
1140 /* Start items for tcm_loop_cit */
1141 static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1143 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1146 CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1148 static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1149 &tcm_loop_wwn_attr_version,
1150 NULL,
1153 /* End items for tcm_loop_cit */
1155 static const struct target_core_fabric_ops loop_ops = {
1156 .module = THIS_MODULE,
1157 .name = "loopback",
1158 .get_fabric_name = tcm_loop_get_fabric_name,
1159 .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
1160 .tpg_get_tag = tcm_loop_get_tag,
1161 .tpg_check_demo_mode = tcm_loop_check_demo_mode,
1162 .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache,
1163 .tpg_check_demo_mode_write_protect =
1164 tcm_loop_check_demo_mode_write_protect,
1165 .tpg_check_prod_mode_write_protect =
1166 tcm_loop_check_prod_mode_write_protect,
1167 .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
1168 .tpg_get_inst_index = tcm_loop_get_inst_index,
1169 .check_stop_free = tcm_loop_check_stop_free,
1170 .release_cmd = tcm_loop_release_cmd,
1171 .sess_get_index = tcm_loop_sess_get_index,
1172 .write_pending = tcm_loop_write_pending,
1173 .write_pending_status = tcm_loop_write_pending_status,
1174 .set_default_node_attributes = tcm_loop_set_default_node_attributes,
1175 .get_cmd_state = tcm_loop_get_cmd_state,
1176 .queue_data_in = tcm_loop_queue_data_in,
1177 .queue_status = tcm_loop_queue_status,
1178 .queue_tm_rsp = tcm_loop_queue_tm_rsp,
1179 .aborted_task = tcm_loop_aborted_task,
1180 .fabric_make_wwn = tcm_loop_make_scsi_hba,
1181 .fabric_drop_wwn = tcm_loop_drop_scsi_hba,
1182 .fabric_make_tpg = tcm_loop_make_naa_tpg,
1183 .fabric_drop_tpg = tcm_loop_drop_naa_tpg,
1184 .fabric_post_link = tcm_loop_port_link,
1185 .fabric_pre_unlink = tcm_loop_port_unlink,
1186 .tfc_wwn_attrs = tcm_loop_wwn_attrs,
1187 .tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
1188 .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
1191 static int __init tcm_loop_fabric_init(void)
1193 int ret = -ENOMEM;
1195 tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1196 if (!tcm_loop_workqueue)
1197 goto out;
1199 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1200 sizeof(struct tcm_loop_cmd),
1201 __alignof__(struct tcm_loop_cmd),
1202 0, NULL);
1203 if (!tcm_loop_cmd_cache) {
1204 pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
1205 goto out_destroy_workqueue;
1208 ret = tcm_loop_alloc_core_bus();
1209 if (ret)
1210 goto out_destroy_cache;
1212 ret = target_register_template(&loop_ops);
1213 if (ret)
1214 goto out_release_core_bus;
1216 return 0;
1218 out_release_core_bus:
1219 tcm_loop_release_core_bus();
1220 out_destroy_cache:
1221 kmem_cache_destroy(tcm_loop_cmd_cache);
1222 out_destroy_workqueue:
1223 destroy_workqueue(tcm_loop_workqueue);
1224 out:
1225 return ret;
1228 static void __exit tcm_loop_fabric_exit(void)
1230 target_unregister_template(&loop_ops);
1231 tcm_loop_release_core_bus();
1232 kmem_cache_destroy(tcm_loop_cmd_cache);
1233 destroy_workqueue(tcm_loop_workqueue);
1236 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1237 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1238 MODULE_LICENSE("GPL");
1239 module_init(tcm_loop_fabric_init);
1240 module_exit(tcm_loop_fabric_exit);