Lynx framebuffers multidomain implementation.
[linux/elbrus.git] / drivers / vhost / scsi.c
blob486d710a52934b6a6caf6bb4bbe557ca34ad7021
1 /*******************************************************************************
2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
4 * (C) Copyright 2010-2013 Datera, Inc.
5 * (C) Copyright 2010-2012 IBM Corp.
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 ****************************************************************************/
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51 #include <linux/percpu_ida.h>
53 #include "vhost.h"
55 #define TCM_VHOST_VERSION "v0.1"
56 #define TCM_VHOST_NAMELEN 256
57 #define TCM_VHOST_MAX_CDB_SIZE 32
58 #define TCM_VHOST_DEFAULT_TAGS 256
59 #define TCM_VHOST_PREALLOC_SGLS 2048
60 #define TCM_VHOST_PREALLOC_PAGES 2048
62 struct vhost_scsi_inflight {
63 /* Wait for the flush operation to finish */
64 struct completion comp;
65 /* Refcount for the inflight reqs */
66 struct kref kref;
69 struct tcm_vhost_cmd {
70 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
71 int tvc_vq_desc;
72 /* virtio-scsi initiator task attribute */
73 int tvc_task_attr;
74 /* virtio-scsi initiator data direction */
75 enum dma_data_direction tvc_data_direction;
76 /* Expected data transfer length from virtio-scsi header */
77 u32 tvc_exp_data_len;
78 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
79 u64 tvc_tag;
80 /* The number of scatterlists associated with this cmd */
81 u32 tvc_sgl_count;
82 /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
83 u32 tvc_lun;
84 /* Pointer to the SGL formatted memory from virtio-scsi */
85 struct scatterlist *tvc_sgl;
86 struct page **tvc_upages;
87 /* Pointer to response */
88 struct virtio_scsi_cmd_resp __user *tvc_resp;
89 /* Pointer to vhost_scsi for our device */
90 struct vhost_scsi *tvc_vhost;
91 /* Pointer to vhost_virtqueue for the cmd */
92 struct vhost_virtqueue *tvc_vq;
93 /* Pointer to vhost nexus memory */
94 struct tcm_vhost_nexus *tvc_nexus;
95 /* The TCM I/O descriptor that is accessed via container_of() */
96 struct se_cmd tvc_se_cmd;
97 /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
98 struct work_struct work;
99 /* Copy of the incoming SCSI command descriptor block (CDB) */
100 unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
101 /* Sense buffer that will be mapped into outgoing status */
102 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
103 /* Completed commands list, serviced from vhost worker thread */
104 struct llist_node tvc_completion_list;
105 /* Used to track inflight cmd */
106 struct vhost_scsi_inflight *inflight;
109 struct tcm_vhost_nexus {
110 /* Pointer to TCM session for I_T Nexus */
111 struct se_session *tvn_se_sess;
114 struct tcm_vhost_nacl {
115 /* Binary World Wide unique Port Name for Vhost Initiator port */
116 u64 iport_wwpn;
117 /* ASCII formatted WWPN for Sas Initiator port */
118 char iport_name[TCM_VHOST_NAMELEN];
119 /* Returned by tcm_vhost_make_nodeacl() */
120 struct se_node_acl se_node_acl;
123 struct tcm_vhost_tpg {
124 /* Vhost port target portal group tag for TCM */
125 u16 tport_tpgt;
126 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
127 int tv_tpg_port_count;
128 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
129 int tv_tpg_vhost_count;
130 /* list for tcm_vhost_list */
131 struct list_head tv_tpg_list;
132 /* Used to protect access for tpg_nexus */
133 struct mutex tv_tpg_mutex;
134 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
135 struct tcm_vhost_nexus *tpg_nexus;
136 /* Pointer back to tcm_vhost_tport */
137 struct tcm_vhost_tport *tport;
138 /* Returned by tcm_vhost_make_tpg() */
139 struct se_portal_group se_tpg;
140 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
141 struct vhost_scsi *vhost_scsi;
144 struct tcm_vhost_tport {
145 /* SCSI protocol the tport is providing */
146 u8 tport_proto_id;
147 /* Binary World Wide unique Port Name for Vhost Target port */
148 u64 tport_wwpn;
149 /* ASCII formatted WWPN for Vhost Target port */
150 char tport_name[TCM_VHOST_NAMELEN];
151 /* Returned by tcm_vhost_make_tport() */
152 struct se_wwn tport_wwn;
155 struct tcm_vhost_evt {
156 /* event to be sent to guest */
157 struct virtio_scsi_event event;
158 /* event list, serviced from vhost worker thread */
159 struct llist_node list;
162 enum {
163 VHOST_SCSI_VQ_CTL = 0,
164 VHOST_SCSI_VQ_EVT = 1,
165 VHOST_SCSI_VQ_IO = 2,
168 enum {
169 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
172 #define VHOST_SCSI_MAX_TARGET 256
173 #define VHOST_SCSI_MAX_VQ 128
174 #define VHOST_SCSI_MAX_EVENT 128
176 struct vhost_scsi_virtqueue {
177 struct vhost_virtqueue vq;
179 * Reference counting for inflight reqs, used for flush operation. At
180 * each time, one reference tracks new commands submitted, while we
181 * wait for another one to reach 0.
183 struct vhost_scsi_inflight inflights[2];
185 * Indicate current inflight in use, protected by vq->mutex.
186 * Writers must also take dev mutex and flush under it.
188 int inflight_idx;
191 struct vhost_scsi {
192 /* Protected by vhost_scsi->dev.mutex */
193 struct tcm_vhost_tpg **vs_tpg;
194 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
196 struct vhost_dev dev;
197 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
199 struct vhost_work vs_completion_work; /* cmd completion work item */
200 struct llist_head vs_completion_list; /* cmd completion queue */
202 struct vhost_work vs_event_work; /* evt injection work item */
203 struct llist_head vs_event_list; /* evt injection queue */
205 bool vs_events_missed; /* any missed events, protected by vq->mutex */
206 int vs_events_nr; /* num of pending events, protected by vq->mutex */
209 /* Local pointer to allocated TCM configfs fabric module */
210 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
212 static struct workqueue_struct *tcm_vhost_workqueue;
214 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
215 static DEFINE_MUTEX(tcm_vhost_mutex);
216 static LIST_HEAD(tcm_vhost_list);
218 static int iov_num_pages(struct iovec *iov)
220 return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
221 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
224 static void tcm_vhost_done_inflight(struct kref *kref)
226 struct vhost_scsi_inflight *inflight;
228 inflight = container_of(kref, struct vhost_scsi_inflight, kref);
229 complete(&inflight->comp);
232 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
233 struct vhost_scsi_inflight *old_inflight[])
235 struct vhost_scsi_inflight *new_inflight;
236 struct vhost_virtqueue *vq;
237 int idx, i;
239 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
240 vq = &vs->vqs[i].vq;
242 mutex_lock(&vq->mutex);
244 /* store old infight */
245 idx = vs->vqs[i].inflight_idx;
246 if (old_inflight)
247 old_inflight[i] = &vs->vqs[i].inflights[idx];
249 /* setup new infight */
250 vs->vqs[i].inflight_idx = idx ^ 1;
251 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
252 kref_init(&new_inflight->kref);
253 init_completion(&new_inflight->comp);
255 mutex_unlock(&vq->mutex);
259 static struct vhost_scsi_inflight *
260 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
262 struct vhost_scsi_inflight *inflight;
263 struct vhost_scsi_virtqueue *svq;
265 svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
266 inflight = &svq->inflights[svq->inflight_idx];
267 kref_get(&inflight->kref);
269 return inflight;
272 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
274 kref_put(&inflight->kref, tcm_vhost_done_inflight);
277 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
279 return 1;
282 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
284 return 0;
287 static char *tcm_vhost_get_fabric_name(void)
289 return "vhost";
292 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
294 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
295 struct tcm_vhost_tpg, se_tpg);
296 struct tcm_vhost_tport *tport = tpg->tport;
298 switch (tport->tport_proto_id) {
299 case SCSI_PROTOCOL_SAS:
300 return sas_get_fabric_proto_ident(se_tpg);
301 case SCSI_PROTOCOL_FCP:
302 return fc_get_fabric_proto_ident(se_tpg);
303 case SCSI_PROTOCOL_ISCSI:
304 return iscsi_get_fabric_proto_ident(se_tpg);
305 default:
306 pr_err("Unknown tport_proto_id: 0x%02x, using"
307 " SAS emulation\n", tport->tport_proto_id);
308 break;
311 return sas_get_fabric_proto_ident(se_tpg);
314 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
316 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
317 struct tcm_vhost_tpg, se_tpg);
318 struct tcm_vhost_tport *tport = tpg->tport;
320 return &tport->tport_name[0];
323 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
325 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
326 struct tcm_vhost_tpg, se_tpg);
327 return tpg->tport_tpgt;
330 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
332 return 1;
335 static u32
336 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
337 struct se_node_acl *se_nacl,
338 struct t10_pr_registration *pr_reg,
339 int *format_code,
340 unsigned char *buf)
342 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
343 struct tcm_vhost_tpg, se_tpg);
344 struct tcm_vhost_tport *tport = tpg->tport;
346 switch (tport->tport_proto_id) {
347 case SCSI_PROTOCOL_SAS:
348 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
349 format_code, buf);
350 case SCSI_PROTOCOL_FCP:
351 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
352 format_code, buf);
353 case SCSI_PROTOCOL_ISCSI:
354 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
355 format_code, buf);
356 default:
357 pr_err("Unknown tport_proto_id: 0x%02x, using"
358 " SAS emulation\n", tport->tport_proto_id);
359 break;
362 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
363 format_code, buf);
366 static u32
367 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
368 struct se_node_acl *se_nacl,
369 struct t10_pr_registration *pr_reg,
370 int *format_code)
372 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
373 struct tcm_vhost_tpg, se_tpg);
374 struct tcm_vhost_tport *tport = tpg->tport;
376 switch (tport->tport_proto_id) {
377 case SCSI_PROTOCOL_SAS:
378 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
379 format_code);
380 case SCSI_PROTOCOL_FCP:
381 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
382 format_code);
383 case SCSI_PROTOCOL_ISCSI:
384 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
385 format_code);
386 default:
387 pr_err("Unknown tport_proto_id: 0x%02x, using"
388 " SAS emulation\n", tport->tport_proto_id);
389 break;
392 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
393 format_code);
396 static char *
397 tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
398 const char *buf,
399 u32 *out_tid_len,
400 char **port_nexus_ptr)
402 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
403 struct tcm_vhost_tpg, se_tpg);
404 struct tcm_vhost_tport *tport = tpg->tport;
406 switch (tport->tport_proto_id) {
407 case SCSI_PROTOCOL_SAS:
408 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
409 port_nexus_ptr);
410 case SCSI_PROTOCOL_FCP:
411 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
412 port_nexus_ptr);
413 case SCSI_PROTOCOL_ISCSI:
414 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
415 port_nexus_ptr);
416 default:
417 pr_err("Unknown tport_proto_id: 0x%02x, using"
418 " SAS emulation\n", tport->tport_proto_id);
419 break;
422 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
423 port_nexus_ptr);
426 static struct se_node_acl *
427 tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
429 struct tcm_vhost_nacl *nacl;
431 nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
432 if (!nacl) {
433 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
434 return NULL;
437 return &nacl->se_node_acl;
440 static void
441 tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
442 struct se_node_acl *se_nacl)
444 struct tcm_vhost_nacl *nacl = container_of(se_nacl,
445 struct tcm_vhost_nacl, se_node_acl);
446 kfree(nacl);
449 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
451 return 1;
454 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
456 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
457 struct tcm_vhost_cmd, tvc_se_cmd);
458 struct se_session *se_sess = se_cmd->se_sess;
460 if (tv_cmd->tvc_sgl_count) {
461 u32 i;
462 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
463 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
466 tcm_vhost_put_inflight(tv_cmd->inflight);
467 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
470 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
472 return 0;
475 static void tcm_vhost_close_session(struct se_session *se_sess)
477 return;
480 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
482 return 0;
485 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
487 /* Go ahead and process the write immediately */
488 target_execute_cmd(se_cmd);
489 return 0;
492 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
494 return 0;
497 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
499 return;
502 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
504 return 0;
507 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
509 return 0;
512 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
514 struct vhost_scsi *vs = cmd->tvc_vhost;
516 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
518 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
521 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
523 struct tcm_vhost_cmd *cmd = container_of(se_cmd,
524 struct tcm_vhost_cmd, tvc_se_cmd);
525 vhost_scsi_complete_cmd(cmd);
526 return 0;
529 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
531 struct tcm_vhost_cmd *cmd = container_of(se_cmd,
532 struct tcm_vhost_cmd, tvc_se_cmd);
533 vhost_scsi_complete_cmd(cmd);
534 return 0;
537 static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
539 return;
542 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
544 vs->vs_events_nr--;
545 kfree(evt);
548 static struct tcm_vhost_evt *
549 tcm_vhost_allocate_evt(struct vhost_scsi *vs,
550 u32 event, u32 reason)
552 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
553 struct tcm_vhost_evt *evt;
555 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
556 vs->vs_events_missed = true;
557 return NULL;
560 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
561 if (!evt) {
562 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
563 vs->vs_events_missed = true;
564 return NULL;
567 evt->event.event = event;
568 evt->event.reason = reason;
569 vs->vs_events_nr++;
571 return evt;
574 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
576 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
578 /* TODO locking against target/backend threads? */
579 transport_generic_free_cmd(se_cmd, 0);
583 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
585 return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
588 static void
589 tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
591 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
592 struct virtio_scsi_event *event = &evt->event;
593 struct virtio_scsi_event __user *eventp;
594 unsigned out, in;
595 int head, ret;
597 if (!vq->private_data) {
598 vs->vs_events_missed = true;
599 return;
602 again:
603 vhost_disable_notify(&vs->dev, vq);
604 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
605 ARRAY_SIZE(vq->iov), &out, &in,
606 NULL, NULL);
607 if (head < 0) {
608 vs->vs_events_missed = true;
609 return;
611 if (head == vq->num) {
612 if (vhost_enable_notify(&vs->dev, vq))
613 goto again;
614 vs->vs_events_missed = true;
615 return;
618 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
619 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
620 vq->iov[out].iov_len);
621 vs->vs_events_missed = true;
622 return;
625 if (vs->vs_events_missed) {
626 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
627 vs->vs_events_missed = false;
630 eventp = vq->iov[out].iov_base;
631 ret = __copy_to_user(eventp, event, sizeof(*event));
632 if (!ret)
633 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
634 else
635 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
638 static void tcm_vhost_evt_work(struct vhost_work *work)
640 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
641 vs_event_work);
642 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
643 struct tcm_vhost_evt *evt;
644 struct llist_node *llnode;
646 mutex_lock(&vq->mutex);
647 llnode = llist_del_all(&vs->vs_event_list);
648 while (llnode) {
649 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
650 llnode = llist_next(llnode);
651 tcm_vhost_do_evt_work(vs, evt);
652 tcm_vhost_free_evt(vs, evt);
654 mutex_unlock(&vq->mutex);
657 /* Fill in status and signal that we are done processing this command
659 * This is scheduled in the vhost work queue so we are called with the owner
660 * process mm and can access the vring.
662 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
664 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
665 vs_completion_work);
666 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
667 struct virtio_scsi_cmd_resp v_rsp;
668 struct tcm_vhost_cmd *cmd;
669 struct llist_node *llnode;
670 struct se_cmd *se_cmd;
671 int ret, vq;
673 bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
674 llnode = llist_del_all(&vs->vs_completion_list);
675 while (llnode) {
676 cmd = llist_entry(llnode, struct tcm_vhost_cmd,
677 tvc_completion_list);
678 llnode = llist_next(llnode);
679 se_cmd = &cmd->tvc_se_cmd;
681 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
682 cmd, se_cmd->residual_count, se_cmd->scsi_status);
684 memset(&v_rsp, 0, sizeof(v_rsp));
685 v_rsp.resid = se_cmd->residual_count;
686 /* TODO is status_qualifier field needed? */
687 v_rsp.status = se_cmd->scsi_status;
688 v_rsp.sense_len = se_cmd->scsi_sense_length;
689 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
690 v_rsp.sense_len);
691 ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
692 if (likely(ret == 0)) {
693 struct vhost_scsi_virtqueue *q;
694 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
695 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
696 vq = q - vs->vqs;
697 __set_bit(vq, signal);
698 } else
699 pr_err("Faulted on virtio_scsi_cmd_resp\n");
701 vhost_scsi_free_cmd(cmd);
704 vq = -1;
705 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
706 < VHOST_SCSI_MAX_VQ)
707 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
710 static struct tcm_vhost_cmd *
711 vhost_scsi_get_tag(struct vhost_virtqueue *vq,
712 struct tcm_vhost_tpg *tpg,
713 struct virtio_scsi_cmd_req *v_req,
714 u32 exp_data_len,
715 int data_direction)
717 struct tcm_vhost_cmd *cmd;
718 struct tcm_vhost_nexus *tv_nexus;
719 struct se_session *se_sess;
720 struct scatterlist *sg;
721 struct page **pages;
722 int tag;
724 tv_nexus = tpg->tpg_nexus;
725 if (!tv_nexus) {
726 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
727 return ERR_PTR(-EIO);
729 se_sess = tv_nexus->tvn_se_sess;
731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
732 if (tag < 0) {
733 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
734 return ERR_PTR(-ENOMEM);
737 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
738 sg = cmd->tvc_sgl;
739 pages = cmd->tvc_upages;
740 memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
742 cmd->tvc_sgl = sg;
743 cmd->tvc_upages = pages;
744 cmd->tvc_se_cmd.map_tag = tag;
745 cmd->tvc_tag = v_req->tag;
746 cmd->tvc_task_attr = v_req->task_attr;
747 cmd->tvc_exp_data_len = exp_data_len;
748 cmd->tvc_data_direction = data_direction;
749 cmd->tvc_nexus = tv_nexus;
750 cmd->inflight = tcm_vhost_get_inflight(vq);
752 return cmd;
756 * Map a user memory range into a scatterlist
758 * Returns the number of scatterlist entries used or -errno on error.
760 static int
761 vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
762 struct scatterlist *sgl,
763 unsigned int sgl_count,
764 struct iovec *iov,
765 int write)
767 unsigned int npages = 0, pages_nr, offset, nbytes;
768 struct scatterlist *sg = sgl;
769 void __user *ptr = iov->iov_base;
770 size_t len = iov->iov_len;
771 struct page **pages;
772 int ret, i;
774 if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
775 pr_err("vhost_scsi_map_to_sgl() psgl_count: %u greater than"
776 " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
777 sgl_count, TCM_VHOST_PREALLOC_SGLS);
778 return -ENOBUFS;
781 pages_nr = iov_num_pages(iov);
782 if (pages_nr > sgl_count)
783 return -ENOBUFS;
785 if (pages_nr > TCM_VHOST_PREALLOC_PAGES) {
786 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
787 " preallocated TCM_VHOST_PREALLOC_PAGES: %u\n",
788 pages_nr, TCM_VHOST_PREALLOC_PAGES);
789 return -ENOBUFS;
792 pages = tv_cmd->tvc_upages;
794 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
795 /* No pages were pinned */
796 if (ret < 0)
797 goto out;
798 /* Less pages pinned than wanted */
799 if (ret != pages_nr) {
800 for (i = 0; i < ret; i++)
801 put_page(pages[i]);
802 ret = -EFAULT;
803 goto out;
806 while (len > 0) {
807 offset = (uintptr_t)ptr & ~PAGE_MASK;
808 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
809 sg_set_page(sg, pages[npages], nbytes, offset);
810 ptr += nbytes;
811 len -= nbytes;
812 sg++;
813 npages++;
816 out:
817 return ret;
820 static int
821 vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
822 struct iovec *iov,
823 unsigned int niov,
824 int write)
826 int ret;
827 unsigned int i;
828 u32 sgl_count;
829 struct scatterlist *sg;
832 * Find out how long sglist needs to be
834 sgl_count = 0;
835 for (i = 0; i < niov; i++)
836 sgl_count += iov_num_pages(&iov[i]);
838 /* TODO overflow checking */
840 sg = cmd->tvc_sgl;
841 pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
842 sg_init_table(sg, sgl_count);
844 cmd->tvc_sgl_count = sgl_count;
846 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
847 for (i = 0; i < niov; i++) {
848 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
849 write);
850 if (ret < 0) {
851 for (i = 0; i < cmd->tvc_sgl_count; i++)
852 put_page(sg_page(&cmd->tvc_sgl[i]));
854 cmd->tvc_sgl_count = 0;
855 return ret;
858 sg += ret;
859 sgl_count -= ret;
861 return 0;
864 static int vhost_scsi_to_tcm_attr(int attr)
866 switch (attr) {
867 case VIRTIO_SCSI_S_SIMPLE:
868 return MSG_SIMPLE_TAG;
869 case VIRTIO_SCSI_S_ORDERED:
870 return MSG_ORDERED_TAG;
871 case VIRTIO_SCSI_S_HEAD:
872 return MSG_HEAD_TAG;
873 case VIRTIO_SCSI_S_ACA:
874 return MSG_ACA_TAG;
875 default:
876 break;
878 return MSG_SIMPLE_TAG;
881 static void tcm_vhost_submission_work(struct work_struct *work)
883 struct tcm_vhost_cmd *cmd =
884 container_of(work, struct tcm_vhost_cmd, work);
885 struct tcm_vhost_nexus *tv_nexus;
886 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
887 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
888 int rc, sg_no_bidi = 0;
890 if (cmd->tvc_sgl_count) {
891 sg_ptr = cmd->tvc_sgl;
892 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
893 #if 0
894 if (se_cmd->se_cmd_flags & SCF_BIDI) {
895 sg_bidi_ptr = NULL;
896 sg_no_bidi = 0;
898 #endif
899 } else {
900 sg_ptr = NULL;
902 tv_nexus = cmd->tvc_nexus;
904 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
905 cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
906 cmd->tvc_lun, cmd->tvc_exp_data_len,
907 vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
908 cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
909 sg_ptr, cmd->tvc_sgl_count, sg_bidi_ptr, sg_no_bidi,
910 NULL, 0);
911 if (rc < 0) {
912 transport_send_check_condition_and_sense(se_cmd,
913 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
914 transport_generic_free_cmd(se_cmd, 0);
918 static void
919 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
920 struct vhost_virtqueue *vq,
921 int head, unsigned out)
923 struct virtio_scsi_cmd_resp __user *resp;
924 struct virtio_scsi_cmd_resp rsp;
925 int ret;
927 memset(&rsp, 0, sizeof(rsp));
928 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
929 resp = vq->iov[out].iov_base;
930 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
931 if (!ret)
932 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
933 else
934 pr_err("Faulted on virtio_scsi_cmd_resp\n");
937 static void
938 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
940 struct tcm_vhost_tpg **vs_tpg;
941 struct virtio_scsi_cmd_req v_req;
942 struct tcm_vhost_tpg *tpg;
943 struct tcm_vhost_cmd *cmd;
944 u32 exp_data_len, data_first, data_num, data_direction;
945 unsigned out, in, i;
946 int head, ret;
947 u8 target;
949 mutex_lock(&vq->mutex);
951 * We can handle the vq only after the endpoint is setup by calling the
952 * VHOST_SCSI_SET_ENDPOINT ioctl.
954 vs_tpg = vq->private_data;
955 if (!vs_tpg)
956 goto out;
958 vhost_disable_notify(&vs->dev, vq);
960 for (;;) {
961 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
962 ARRAY_SIZE(vq->iov), &out, &in,
963 NULL, NULL);
964 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
965 head, out, in);
966 /* On error, stop handling until the next kick. */
967 if (unlikely(head < 0))
968 break;
969 /* Nothing new? Wait for eventfd to tell us they refilled. */
970 if (head == vq->num) {
971 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
972 vhost_disable_notify(&vs->dev, vq);
973 continue;
975 break;
978 /* FIXME: BIDI operation */
979 if (out == 1 && in == 1) {
980 data_direction = DMA_NONE;
981 data_first = 0;
982 data_num = 0;
983 } else if (out == 1 && in > 1) {
984 data_direction = DMA_FROM_DEVICE;
985 data_first = out + 1;
986 data_num = in - 1;
987 } else if (out > 1 && in == 1) {
988 data_direction = DMA_TO_DEVICE;
989 data_first = 1;
990 data_num = out - 1;
991 } else {
992 vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
993 out, in);
994 break;
998 * Check for a sane resp buffer so we can report errors to
999 * the guest.
1001 if (unlikely(vq->iov[out].iov_len !=
1002 sizeof(struct virtio_scsi_cmd_resp))) {
1003 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
1004 " bytes\n", vq->iov[out].iov_len);
1005 break;
1008 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
1009 vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
1010 " bytes\n", vq->iov[0].iov_len);
1011 break;
1013 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
1014 " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
1015 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
1016 sizeof(v_req));
1017 if (unlikely(ret)) {
1018 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
1019 break;
1022 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1023 if (unlikely(v_req.lun[0] != 1)) {
1024 vhost_scsi_send_bad_target(vs, vq, head, out);
1025 continue;
1028 /* Extract the tpgt */
1029 target = v_req.lun[1];
1030 tpg = ACCESS_ONCE(vs_tpg[target]);
1032 /* Target does not exist, fail the request */
1033 if (unlikely(!tpg)) {
1034 vhost_scsi_send_bad_target(vs, vq, head, out);
1035 continue;
1038 exp_data_len = 0;
1039 for (i = 0; i < data_num; i++)
1040 exp_data_len += vq->iov[data_first + i].iov_len;
1042 cmd = vhost_scsi_get_tag(vq, tpg, &v_req,
1043 exp_data_len, data_direction);
1044 if (IS_ERR(cmd)) {
1045 vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1046 PTR_ERR(cmd));
1047 goto err_cmd;
1049 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
1050 ": %d\n", cmd, exp_data_len, data_direction);
1052 cmd->tvc_vhost = vs;
1053 cmd->tvc_vq = vq;
1054 cmd->tvc_resp = vq->iov[out].iov_base;
1057 * Copy in the recieved CDB descriptor into cmd->tvc_cdb
1058 * that will be used by tcm_vhost_new_cmd_map() and down into
1059 * target_setup_cmd_from_cdb()
1061 memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
1063 * Check that the recieved CDB size does not exceeded our
1064 * hardcoded max for tcm_vhost
1066 /* TODO what if cdb was too small for varlen cdb header? */
1067 if (unlikely(scsi_command_size(cmd->tvc_cdb) >
1068 TCM_VHOST_MAX_CDB_SIZE)) {
1069 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1070 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1071 scsi_command_size(cmd->tvc_cdb),
1072 TCM_VHOST_MAX_CDB_SIZE);
1073 goto err_free;
1075 cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1077 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1078 cmd->tvc_cdb[0], cmd->tvc_lun);
1080 if (data_direction != DMA_NONE) {
1081 ret = vhost_scsi_map_iov_to_sgl(cmd,
1082 &vq->iov[data_first], data_num,
1083 data_direction == DMA_FROM_DEVICE);
1084 if (unlikely(ret)) {
1085 vq_err(vq, "Failed to map iov to sgl\n");
1086 goto err_free;
1091 * Save the descriptor from vhost_get_vq_desc() to be used to
1092 * complete the virtio-scsi request in TCM callback context via
1093 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1095 cmd->tvc_vq_desc = head;
1097 * Dispatch tv_cmd descriptor for cmwq execution in process
1098 * context provided by tcm_vhost_workqueue. This also ensures
1099 * tv_cmd is executed on the same kworker CPU as this vhost
1100 * thread to gain positive L2 cache locality effects..
1102 INIT_WORK(&cmd->work, tcm_vhost_submission_work);
1103 queue_work(tcm_vhost_workqueue, &cmd->work);
1106 mutex_unlock(&vq->mutex);
1107 return;
1109 err_free:
1110 vhost_scsi_free_cmd(cmd);
1111 err_cmd:
1112 vhost_scsi_send_bad_target(vs, vq, head, out);
1113 out:
1114 mutex_unlock(&vq->mutex);
1117 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1119 pr_debug("%s: The handling func for control queue.\n", __func__);
1122 static void
1123 tcm_vhost_send_evt(struct vhost_scsi *vs,
1124 struct tcm_vhost_tpg *tpg,
1125 struct se_lun *lun,
1126 u32 event,
1127 u32 reason)
1129 struct tcm_vhost_evt *evt;
1131 evt = tcm_vhost_allocate_evt(vs, event, reason);
1132 if (!evt)
1133 return;
1135 if (tpg && lun) {
1136 /* TODO: share lun setup code with virtio-scsi.ko */
1138 * Note: evt->event is zeroed when we allocate it and
1139 * lun[4-7] need to be zero according to virtio-scsi spec.
1141 evt->event.lun[0] = 0x01;
1142 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1143 if (lun->unpacked_lun >= 256)
1144 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1145 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1148 llist_add(&evt->list, &vs->vs_event_list);
1149 vhost_work_queue(&vs->dev, &vs->vs_event_work);
1152 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1154 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1155 poll.work);
1156 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1158 mutex_lock(&vq->mutex);
1159 if (!vq->private_data)
1160 goto out;
1162 if (vs->vs_events_missed)
1163 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1164 out:
1165 mutex_unlock(&vq->mutex);
1168 static void vhost_scsi_handle_kick(struct vhost_work *work)
1170 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1171 poll.work);
1172 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1174 vhost_scsi_handle_vq(vs, vq);
1177 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1179 vhost_poll_flush(&vs->vqs[index].vq.poll);
1182 /* Callers must hold dev mutex */
1183 static void vhost_scsi_flush(struct vhost_scsi *vs)
1185 struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1186 int i;
1188 /* Init new inflight and remember the old inflight */
1189 tcm_vhost_init_inflight(vs, old_inflight);
1192 * The inflight->kref was initialized to 1. We decrement it here to
1193 * indicate the start of the flush operation so that it will reach 0
1194 * when all the reqs are finished.
1196 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1197 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1199 /* Flush both the vhost poll and vhost work */
1200 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1201 vhost_scsi_flush_vq(vs, i);
1202 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1203 vhost_work_flush(&vs->dev, &vs->vs_event_work);
1205 /* Wait for all reqs issued before the flush to be finished */
1206 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1207 wait_for_completion(&old_inflight[i]->comp);
1211 * Called from vhost_scsi_ioctl() context to walk the list of available
1212 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1214 * The lock nesting rule is:
1215 * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1217 static int
1218 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1219 struct vhost_scsi_target *t)
1221 struct se_portal_group *se_tpg;
1222 struct tcm_vhost_tport *tv_tport;
1223 struct tcm_vhost_tpg *tpg;
1224 struct tcm_vhost_tpg **vs_tpg;
1225 struct vhost_virtqueue *vq;
1226 int index, ret, i, len;
1227 bool match = false;
1229 mutex_lock(&tcm_vhost_mutex);
1230 mutex_lock(&vs->dev.mutex);
1232 /* Verify that ring has been setup correctly. */
1233 for (index = 0; index < vs->dev.nvqs; ++index) {
1234 /* Verify that ring has been setup correctly. */
1235 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1236 ret = -EFAULT;
1237 goto out;
1241 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1242 vs_tpg = kzalloc(len, GFP_KERNEL);
1243 if (!vs_tpg) {
1244 ret = -ENOMEM;
1245 goto out;
1247 if (vs->vs_tpg)
1248 memcpy(vs_tpg, vs->vs_tpg, len);
1250 list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
1251 mutex_lock(&tpg->tv_tpg_mutex);
1252 if (!tpg->tpg_nexus) {
1253 mutex_unlock(&tpg->tv_tpg_mutex);
1254 continue;
1256 if (tpg->tv_tpg_vhost_count != 0) {
1257 mutex_unlock(&tpg->tv_tpg_mutex);
1258 continue;
1260 tv_tport = tpg->tport;
1262 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1263 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1264 kfree(vs_tpg);
1265 mutex_unlock(&tpg->tv_tpg_mutex);
1266 ret = -EEXIST;
1267 goto out;
1270 * In order to ensure individual vhost-scsi configfs
1271 * groups cannot be removed while in use by vhost ioctl,
1272 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1273 * dependency now.
1275 se_tpg = &tpg->se_tpg;
1276 ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
1277 &se_tpg->tpg_group.cg_item);
1278 if (ret) {
1279 pr_warn("configfs_depend_item() failed: %d\n", ret);
1280 kfree(vs_tpg);
1281 mutex_unlock(&tpg->tv_tpg_mutex);
1282 goto out;
1284 tpg->tv_tpg_vhost_count++;
1285 tpg->vhost_scsi = vs;
1286 vs_tpg[tpg->tport_tpgt] = tpg;
1287 smp_mb__after_atomic_inc();
1288 match = true;
1290 mutex_unlock(&tpg->tv_tpg_mutex);
1293 if (match) {
1294 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1295 sizeof(vs->vs_vhost_wwpn));
1296 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1297 vq = &vs->vqs[i].vq;
1298 mutex_lock(&vq->mutex);
1299 vq->private_data = vs_tpg;
1300 vhost_init_used(vq);
1301 mutex_unlock(&vq->mutex);
1303 ret = 0;
1304 } else {
1305 ret = -EEXIST;
1309 * Act as synchronize_rcu to make sure access to
1310 * old vs->vs_tpg is finished.
1312 vhost_scsi_flush(vs);
1313 kfree(vs->vs_tpg);
1314 vs->vs_tpg = vs_tpg;
1316 out:
1317 mutex_unlock(&vs->dev.mutex);
1318 mutex_unlock(&tcm_vhost_mutex);
1319 return ret;
1322 static int
1323 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1324 struct vhost_scsi_target *t)
1326 struct se_portal_group *se_tpg;
1327 struct tcm_vhost_tport *tv_tport;
1328 struct tcm_vhost_tpg *tpg;
1329 struct vhost_virtqueue *vq;
1330 bool match = false;
1331 int index, ret, i;
1332 u8 target;
1334 mutex_lock(&tcm_vhost_mutex);
1335 mutex_lock(&vs->dev.mutex);
1336 /* Verify that ring has been setup correctly. */
1337 for (index = 0; index < vs->dev.nvqs; ++index) {
1338 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1339 ret = -EFAULT;
1340 goto err_dev;
1344 if (!vs->vs_tpg) {
1345 ret = 0;
1346 goto err_dev;
1349 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1350 target = i;
1351 tpg = vs->vs_tpg[target];
1352 if (!tpg)
1353 continue;
1355 mutex_lock(&tpg->tv_tpg_mutex);
1356 tv_tport = tpg->tport;
1357 if (!tv_tport) {
1358 ret = -ENODEV;
1359 goto err_tpg;
1362 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1363 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1364 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1365 tv_tport->tport_name, tpg->tport_tpgt,
1366 t->vhost_wwpn, t->vhost_tpgt);
1367 ret = -EINVAL;
1368 goto err_tpg;
1370 tpg->tv_tpg_vhost_count--;
1371 tpg->vhost_scsi = NULL;
1372 vs->vs_tpg[target] = NULL;
1373 match = true;
1374 mutex_unlock(&tpg->tv_tpg_mutex);
1376 * Release se_tpg->tpg_group.cg_item configfs dependency now
1377 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1379 se_tpg = &tpg->se_tpg;
1380 configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
1381 &se_tpg->tpg_group.cg_item);
1383 if (match) {
1384 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1385 vq = &vs->vqs[i].vq;
1386 mutex_lock(&vq->mutex);
1387 vq->private_data = NULL;
1388 mutex_unlock(&vq->mutex);
1392 * Act as synchronize_rcu to make sure access to
1393 * old vs->vs_tpg is finished.
1395 vhost_scsi_flush(vs);
1396 kfree(vs->vs_tpg);
1397 vs->vs_tpg = NULL;
1398 WARN_ON(vs->vs_events_nr);
1399 mutex_unlock(&vs->dev.mutex);
1400 mutex_unlock(&tcm_vhost_mutex);
1401 return 0;
1403 err_tpg:
1404 mutex_unlock(&tpg->tv_tpg_mutex);
1405 err_dev:
1406 mutex_unlock(&vs->dev.mutex);
1407 mutex_unlock(&tcm_vhost_mutex);
1408 return ret;
1411 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1413 if (features & ~VHOST_SCSI_FEATURES)
1414 return -EOPNOTSUPP;
1416 mutex_lock(&vs->dev.mutex);
1417 if ((features & (1 << VHOST_F_LOG_ALL)) &&
1418 !vhost_log_access_ok(&vs->dev)) {
1419 mutex_unlock(&vs->dev.mutex);
1420 return -EFAULT;
1422 vs->dev.acked_features = features;
1423 smp_wmb();
1424 vhost_scsi_flush(vs);
1425 mutex_unlock(&vs->dev.mutex);
1426 return 0;
1429 static void vhost_scsi_free(struct vhost_scsi *vs)
1431 if (is_vmalloc_addr(vs))
1432 vfree(vs);
1433 else
1434 kfree(vs);
1437 static int vhost_scsi_open(struct inode *inode, struct file *f)
1439 struct vhost_scsi *vs;
1440 struct vhost_virtqueue **vqs;
1441 int r = -ENOMEM, i;
1443 vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1444 if (!vs) {
1445 vs = vzalloc(sizeof(*vs));
1446 if (!vs)
1447 goto err_vs;
1450 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1451 if (!vqs)
1452 goto err_vqs;
1454 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1455 vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
1457 vs->vs_events_nr = 0;
1458 vs->vs_events_missed = false;
1460 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1461 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1462 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1463 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1464 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1465 vqs[i] = &vs->vqs[i].vq;
1466 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1468 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1470 tcm_vhost_init_inflight(vs, NULL);
1472 f->private_data = vs;
1473 return 0;
1475 err_vqs:
1476 vhost_scsi_free(vs);
1477 err_vs:
1478 return r;
1481 static int vhost_scsi_release(struct inode *inode, struct file *f)
1483 struct vhost_scsi *vs = f->private_data;
1484 struct vhost_scsi_target t;
1486 mutex_lock(&vs->dev.mutex);
1487 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1488 mutex_unlock(&vs->dev.mutex);
1489 vhost_scsi_clear_endpoint(vs, &t);
1490 vhost_dev_stop(&vs->dev);
1491 vhost_dev_cleanup(&vs->dev, false);
1492 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1493 vhost_scsi_flush(vs);
1494 kfree(vs->dev.vqs);
1495 vhost_scsi_free(vs);
1496 return 0;
1499 static long
1500 vhost_scsi_ioctl(struct file *f,
1501 unsigned int ioctl,
1502 unsigned long arg)
1504 struct vhost_scsi *vs = f->private_data;
1505 struct vhost_scsi_target backend;
1506 void __user *argp = (void __user *)arg;
1507 u64 __user *featurep = argp;
1508 u32 __user *eventsp = argp;
1509 u32 events_missed;
1510 u64 features;
1511 int r, abi_version = VHOST_SCSI_ABI_VERSION;
1512 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1514 switch (ioctl) {
1515 case VHOST_SCSI_SET_ENDPOINT:
1516 if (copy_from_user(&backend, argp, sizeof backend))
1517 return -EFAULT;
1518 if (backend.reserved != 0)
1519 return -EOPNOTSUPP;
1521 return vhost_scsi_set_endpoint(vs, &backend);
1522 case VHOST_SCSI_CLEAR_ENDPOINT:
1523 if (copy_from_user(&backend, argp, sizeof backend))
1524 return -EFAULT;
1525 if (backend.reserved != 0)
1526 return -EOPNOTSUPP;
1528 return vhost_scsi_clear_endpoint(vs, &backend);
1529 case VHOST_SCSI_GET_ABI_VERSION:
1530 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1531 return -EFAULT;
1532 return 0;
1533 case VHOST_SCSI_SET_EVENTS_MISSED:
1534 if (get_user(events_missed, eventsp))
1535 return -EFAULT;
1536 mutex_lock(&vq->mutex);
1537 vs->vs_events_missed = events_missed;
1538 mutex_unlock(&vq->mutex);
1539 return 0;
1540 case VHOST_SCSI_GET_EVENTS_MISSED:
1541 mutex_lock(&vq->mutex);
1542 events_missed = vs->vs_events_missed;
1543 mutex_unlock(&vq->mutex);
1544 if (put_user(events_missed, eventsp))
1545 return -EFAULT;
1546 return 0;
1547 case VHOST_GET_FEATURES:
1548 features = VHOST_SCSI_FEATURES;
1549 if (copy_to_user(featurep, &features, sizeof features))
1550 return -EFAULT;
1551 return 0;
1552 case VHOST_SET_FEATURES:
1553 if (copy_from_user(&features, featurep, sizeof features))
1554 return -EFAULT;
1555 return vhost_scsi_set_features(vs, features);
1556 default:
1557 mutex_lock(&vs->dev.mutex);
1558 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1559 /* TODO: flush backend after dev ioctl. */
1560 if (r == -ENOIOCTLCMD)
1561 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1562 mutex_unlock(&vs->dev.mutex);
1563 return r;
1567 #ifdef CONFIG_COMPAT
1568 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1569 unsigned long arg)
1571 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1573 #endif
1575 static const struct file_operations vhost_scsi_fops = {
1576 .owner = THIS_MODULE,
1577 .release = vhost_scsi_release,
1578 .unlocked_ioctl = vhost_scsi_ioctl,
1579 #ifdef CONFIG_COMPAT
1580 .compat_ioctl = vhost_scsi_compat_ioctl,
1581 #endif
1582 .open = vhost_scsi_open,
1583 .llseek = noop_llseek,
1586 static struct miscdevice vhost_scsi_misc = {
1587 MISC_DYNAMIC_MINOR,
1588 "vhost-scsi",
1589 &vhost_scsi_fops,
1592 static int __init vhost_scsi_register(void)
1594 return misc_register(&vhost_scsi_misc);
1597 static int vhost_scsi_deregister(void)
1599 return misc_deregister(&vhost_scsi_misc);
1602 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1604 switch (tport->tport_proto_id) {
1605 case SCSI_PROTOCOL_SAS:
1606 return "SAS";
1607 case SCSI_PROTOCOL_FCP:
1608 return "FCP";
1609 case SCSI_PROTOCOL_ISCSI:
1610 return "iSCSI";
1611 default:
1612 break;
1615 return "Unknown";
1618 static void
1619 tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1620 struct se_lun *lun, bool plug)
1623 struct vhost_scsi *vs = tpg->vhost_scsi;
1624 struct vhost_virtqueue *vq;
1625 u32 reason;
1627 if (!vs)
1628 return;
1630 mutex_lock(&vs->dev.mutex);
1631 if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1632 mutex_unlock(&vs->dev.mutex);
1633 return;
1636 if (plug)
1637 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1638 else
1639 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1641 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1642 mutex_lock(&vq->mutex);
1643 tcm_vhost_send_evt(vs, tpg, lun,
1644 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1645 mutex_unlock(&vq->mutex);
1646 mutex_unlock(&vs->dev.mutex);
1649 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1651 tcm_vhost_do_plug(tpg, lun, true);
1654 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1656 tcm_vhost_do_plug(tpg, lun, false);
1659 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1660 struct se_lun *lun)
1662 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1663 struct tcm_vhost_tpg, se_tpg);
1665 mutex_lock(&tcm_vhost_mutex);
1667 mutex_lock(&tpg->tv_tpg_mutex);
1668 tpg->tv_tpg_port_count++;
1669 mutex_unlock(&tpg->tv_tpg_mutex);
1671 tcm_vhost_hotplug(tpg, lun);
1673 mutex_unlock(&tcm_vhost_mutex);
1675 return 0;
1678 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1679 struct se_lun *lun)
1681 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1682 struct tcm_vhost_tpg, se_tpg);
1684 mutex_lock(&tcm_vhost_mutex);
1686 mutex_lock(&tpg->tv_tpg_mutex);
1687 tpg->tv_tpg_port_count--;
1688 mutex_unlock(&tpg->tv_tpg_mutex);
1690 tcm_vhost_hotunplug(tpg, lun);
1692 mutex_unlock(&tcm_vhost_mutex);
1695 static struct se_node_acl *
1696 tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
1697 struct config_group *group,
1698 const char *name)
1700 struct se_node_acl *se_nacl, *se_nacl_new;
1701 struct tcm_vhost_nacl *nacl;
1702 u64 wwpn = 0;
1703 u32 nexus_depth;
1705 /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1706 return ERR_PTR(-EINVAL); */
1707 se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1708 if (!se_nacl_new)
1709 return ERR_PTR(-ENOMEM);
1711 nexus_depth = 1;
1713 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1714 * when converting a NodeACL from demo mode -> explict
1716 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1717 name, nexus_depth);
1718 if (IS_ERR(se_nacl)) {
1719 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1720 return se_nacl;
1723 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1725 nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1726 nacl->iport_wwpn = wwpn;
1728 return se_nacl;
1731 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1733 struct tcm_vhost_nacl *nacl = container_of(se_acl,
1734 struct tcm_vhost_nacl, se_node_acl);
1735 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1736 kfree(nacl);
1739 static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
1740 struct se_session *se_sess)
1742 struct tcm_vhost_cmd *tv_cmd;
1743 unsigned int i;
1745 if (!se_sess->sess_cmd_map)
1746 return;
1748 for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1749 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1751 kfree(tv_cmd->tvc_sgl);
1752 kfree(tv_cmd->tvc_upages);
1756 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1757 const char *name)
1759 struct se_portal_group *se_tpg;
1760 struct se_session *se_sess;
1761 struct tcm_vhost_nexus *tv_nexus;
1762 struct tcm_vhost_cmd *tv_cmd;
1763 unsigned int i;
1765 mutex_lock(&tpg->tv_tpg_mutex);
1766 if (tpg->tpg_nexus) {
1767 mutex_unlock(&tpg->tv_tpg_mutex);
1768 pr_debug("tpg->tpg_nexus already exists\n");
1769 return -EEXIST;
1771 se_tpg = &tpg->se_tpg;
1773 tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1774 if (!tv_nexus) {
1775 mutex_unlock(&tpg->tv_tpg_mutex);
1776 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1777 return -ENOMEM;
1780 * Initialize the struct se_session pointer and setup tagpool
1781 * for struct tcm_vhost_cmd descriptors
1783 tv_nexus->tvn_se_sess = transport_init_session_tags(
1784 TCM_VHOST_DEFAULT_TAGS,
1785 sizeof(struct tcm_vhost_cmd));
1786 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1787 mutex_unlock(&tpg->tv_tpg_mutex);
1788 kfree(tv_nexus);
1789 return -ENOMEM;
1791 se_sess = tv_nexus->tvn_se_sess;
1792 for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1793 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1795 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1796 TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
1797 if (!tv_cmd->tvc_sgl) {
1798 mutex_unlock(&tpg->tv_tpg_mutex);
1799 pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1800 goto out;
1803 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1804 TCM_VHOST_PREALLOC_PAGES, GFP_KERNEL);
1805 if (!tv_cmd->tvc_upages) {
1806 mutex_unlock(&tpg->tv_tpg_mutex);
1807 pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1808 goto out;
1812 * Since we are running in 'demo mode' this call with generate a
1813 * struct se_node_acl for the tcm_vhost struct se_portal_group with
1814 * the SCSI Initiator port name of the passed configfs group 'name'.
1816 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1817 se_tpg, (unsigned char *)name);
1818 if (!tv_nexus->tvn_se_sess->se_node_acl) {
1819 mutex_unlock(&tpg->tv_tpg_mutex);
1820 pr_debug("core_tpg_check_initiator_node_acl() failed"
1821 " for %s\n", name);
1822 goto out;
1825 * Now register the TCM vhost virtual I_T Nexus as active with the
1826 * call to __transport_register_session()
1828 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1829 tv_nexus->tvn_se_sess, tv_nexus);
1830 tpg->tpg_nexus = tv_nexus;
1832 mutex_unlock(&tpg->tv_tpg_mutex);
1833 return 0;
1835 out:
1836 tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1837 transport_free_session(se_sess);
1838 kfree(tv_nexus);
1839 return -ENOMEM;
1842 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1844 struct se_session *se_sess;
1845 struct tcm_vhost_nexus *tv_nexus;
1847 mutex_lock(&tpg->tv_tpg_mutex);
1848 tv_nexus = tpg->tpg_nexus;
1849 if (!tv_nexus) {
1850 mutex_unlock(&tpg->tv_tpg_mutex);
1851 return -ENODEV;
1854 se_sess = tv_nexus->tvn_se_sess;
1855 if (!se_sess) {
1856 mutex_unlock(&tpg->tv_tpg_mutex);
1857 return -ENODEV;
1860 if (tpg->tv_tpg_port_count != 0) {
1861 mutex_unlock(&tpg->tv_tpg_mutex);
1862 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1863 " active TPG port count: %d\n",
1864 tpg->tv_tpg_port_count);
1865 return -EBUSY;
1868 if (tpg->tv_tpg_vhost_count != 0) {
1869 mutex_unlock(&tpg->tv_tpg_mutex);
1870 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1871 " active TPG vhost count: %d\n",
1872 tpg->tv_tpg_vhost_count);
1873 return -EBUSY;
1876 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1877 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1878 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1880 tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1882 * Release the SCSI I_T Nexus to the emulated vhost Target Port
1884 transport_deregister_session(tv_nexus->tvn_se_sess);
1885 tpg->tpg_nexus = NULL;
1886 mutex_unlock(&tpg->tv_tpg_mutex);
1888 kfree(tv_nexus);
1889 return 0;
1892 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1893 char *page)
1895 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1896 struct tcm_vhost_tpg, se_tpg);
1897 struct tcm_vhost_nexus *tv_nexus;
1898 ssize_t ret;
1900 mutex_lock(&tpg->tv_tpg_mutex);
1901 tv_nexus = tpg->tpg_nexus;
1902 if (!tv_nexus) {
1903 mutex_unlock(&tpg->tv_tpg_mutex);
1904 return -ENODEV;
1906 ret = snprintf(page, PAGE_SIZE, "%s\n",
1907 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1908 mutex_unlock(&tpg->tv_tpg_mutex);
1910 return ret;
1913 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1914 const char *page,
1915 size_t count)
1917 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1918 struct tcm_vhost_tpg, se_tpg);
1919 struct tcm_vhost_tport *tport_wwn = tpg->tport;
1920 unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1921 int ret;
1923 * Shutdown the active I_T nexus if 'NULL' is passed..
1925 if (!strncmp(page, "NULL", 4)) {
1926 ret = tcm_vhost_drop_nexus(tpg);
1927 return (!ret) ? count : ret;
1930 * Otherwise make sure the passed virtual Initiator port WWN matches
1931 * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1932 * tcm_vhost_make_nexus().
1934 if (strlen(page) >= TCM_VHOST_NAMELEN) {
1935 pr_err("Emulated NAA Sas Address: %s, exceeds"
1936 " max: %d\n", page, TCM_VHOST_NAMELEN);
1937 return -EINVAL;
1939 snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1941 ptr = strstr(i_port, "naa.");
1942 if (ptr) {
1943 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1944 pr_err("Passed SAS Initiator Port %s does not"
1945 " match target port protoid: %s\n", i_port,
1946 tcm_vhost_dump_proto_id(tport_wwn));
1947 return -EINVAL;
1949 port_ptr = &i_port[0];
1950 goto check_newline;
1952 ptr = strstr(i_port, "fc.");
1953 if (ptr) {
1954 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1955 pr_err("Passed FCP Initiator Port %s does not"
1956 " match target port protoid: %s\n", i_port,
1957 tcm_vhost_dump_proto_id(tport_wwn));
1958 return -EINVAL;
1960 port_ptr = &i_port[3]; /* Skip over "fc." */
1961 goto check_newline;
1963 ptr = strstr(i_port, "iqn.");
1964 if (ptr) {
1965 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1966 pr_err("Passed iSCSI Initiator Port %s does not"
1967 " match target port protoid: %s\n", i_port,
1968 tcm_vhost_dump_proto_id(tport_wwn));
1969 return -EINVAL;
1971 port_ptr = &i_port[0];
1972 goto check_newline;
1974 pr_err("Unable to locate prefix for emulated Initiator Port:"
1975 " %s\n", i_port);
1976 return -EINVAL;
1978 * Clear any trailing newline for the NAA WWN
1980 check_newline:
1981 if (i_port[strlen(i_port)-1] == '\n')
1982 i_port[strlen(i_port)-1] = '\0';
1984 ret = tcm_vhost_make_nexus(tpg, port_ptr);
1985 if (ret < 0)
1986 return ret;
1988 return count;
1991 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1993 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1994 &tcm_vhost_tpg_nexus.attr,
1995 NULL,
1998 static struct se_portal_group *
1999 tcm_vhost_make_tpg(struct se_wwn *wwn,
2000 struct config_group *group,
2001 const char *name)
2003 struct tcm_vhost_tport *tport = container_of(wwn,
2004 struct tcm_vhost_tport, tport_wwn);
2006 struct tcm_vhost_tpg *tpg;
2007 unsigned long tpgt;
2008 int ret;
2010 if (strstr(name, "tpgt_") != name)
2011 return ERR_PTR(-EINVAL);
2012 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2013 return ERR_PTR(-EINVAL);
2015 tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
2016 if (!tpg) {
2017 pr_err("Unable to allocate struct tcm_vhost_tpg");
2018 return ERR_PTR(-ENOMEM);
2020 mutex_init(&tpg->tv_tpg_mutex);
2021 INIT_LIST_HEAD(&tpg->tv_tpg_list);
2022 tpg->tport = tport;
2023 tpg->tport_tpgt = tpgt;
2025 ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
2026 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
2027 if (ret < 0) {
2028 kfree(tpg);
2029 return NULL;
2031 mutex_lock(&tcm_vhost_mutex);
2032 list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
2033 mutex_unlock(&tcm_vhost_mutex);
2035 return &tpg->se_tpg;
2038 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
2040 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2041 struct tcm_vhost_tpg, se_tpg);
2043 mutex_lock(&tcm_vhost_mutex);
2044 list_del(&tpg->tv_tpg_list);
2045 mutex_unlock(&tcm_vhost_mutex);
2047 * Release the virtual I_T Nexus for this vhost TPG
2049 tcm_vhost_drop_nexus(tpg);
2051 * Deregister the se_tpg from TCM..
2053 core_tpg_deregister(se_tpg);
2054 kfree(tpg);
2057 static struct se_wwn *
2058 tcm_vhost_make_tport(struct target_fabric_configfs *tf,
2059 struct config_group *group,
2060 const char *name)
2062 struct tcm_vhost_tport *tport;
2063 char *ptr;
2064 u64 wwpn = 0;
2065 int off = 0;
2067 /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
2068 return ERR_PTR(-EINVAL); */
2070 tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
2071 if (!tport) {
2072 pr_err("Unable to allocate struct tcm_vhost_tport");
2073 return ERR_PTR(-ENOMEM);
2075 tport->tport_wwpn = wwpn;
2077 * Determine the emulated Protocol Identifier and Target Port Name
2078 * based on the incoming configfs directory name.
2080 ptr = strstr(name, "naa.");
2081 if (ptr) {
2082 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2083 goto check_len;
2085 ptr = strstr(name, "fc.");
2086 if (ptr) {
2087 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2088 off = 3; /* Skip over "fc." */
2089 goto check_len;
2091 ptr = strstr(name, "iqn.");
2092 if (ptr) {
2093 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2094 goto check_len;
2097 pr_err("Unable to locate prefix for emulated Target Port:"
2098 " %s\n", name);
2099 kfree(tport);
2100 return ERR_PTR(-EINVAL);
2102 check_len:
2103 if (strlen(name) >= TCM_VHOST_NAMELEN) {
2104 pr_err("Emulated %s Address: %s, exceeds"
2105 " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
2106 TCM_VHOST_NAMELEN);
2107 kfree(tport);
2108 return ERR_PTR(-EINVAL);
2110 snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
2112 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2113 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
2115 return &tport->tport_wwn;
2118 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
2120 struct tcm_vhost_tport *tport = container_of(wwn,
2121 struct tcm_vhost_tport, tport_wwn);
2123 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2124 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
2125 tport->tport_name);
2127 kfree(tport);
2130 static ssize_t
2131 tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
2132 char *page)
2134 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2135 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2136 utsname()->machine);
2139 TF_WWN_ATTR_RO(tcm_vhost, version);
2141 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
2142 &tcm_vhost_wwn_version.attr,
2143 NULL,
2146 static struct target_core_fabric_ops tcm_vhost_ops = {
2147 .get_fabric_name = tcm_vhost_get_fabric_name,
2148 .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident,
2149 .tpg_get_wwn = tcm_vhost_get_fabric_wwn,
2150 .tpg_get_tag = tcm_vhost_get_tag,
2151 .tpg_get_default_depth = tcm_vhost_get_default_depth,
2152 .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id,
2153 .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len,
2154 .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id,
2155 .tpg_check_demo_mode = tcm_vhost_check_true,
2156 .tpg_check_demo_mode_cache = tcm_vhost_check_true,
2157 .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2158 .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2159 .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl,
2160 .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl,
2161 .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index,
2162 .release_cmd = tcm_vhost_release_cmd,
2163 .check_stop_free = vhost_scsi_check_stop_free,
2164 .shutdown_session = tcm_vhost_shutdown_session,
2165 .close_session = tcm_vhost_close_session,
2166 .sess_get_index = tcm_vhost_sess_get_index,
2167 .sess_get_initiator_sid = NULL,
2168 .write_pending = tcm_vhost_write_pending,
2169 .write_pending_status = tcm_vhost_write_pending_status,
2170 .set_default_node_attributes = tcm_vhost_set_default_node_attrs,
2171 .get_task_tag = tcm_vhost_get_task_tag,
2172 .get_cmd_state = tcm_vhost_get_cmd_state,
2173 .queue_data_in = tcm_vhost_queue_data_in,
2174 .queue_status = tcm_vhost_queue_status,
2175 .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
2177 * Setup callers for generic logic in target_core_fabric_configfs.c
2179 .fabric_make_wwn = tcm_vhost_make_tport,
2180 .fabric_drop_wwn = tcm_vhost_drop_tport,
2181 .fabric_make_tpg = tcm_vhost_make_tpg,
2182 .fabric_drop_tpg = tcm_vhost_drop_tpg,
2183 .fabric_post_link = tcm_vhost_port_link,
2184 .fabric_pre_unlink = tcm_vhost_port_unlink,
2185 .fabric_make_np = NULL,
2186 .fabric_drop_np = NULL,
2187 .fabric_make_nodeacl = tcm_vhost_make_nodeacl,
2188 .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl,
2191 static int tcm_vhost_register_configfs(void)
2193 struct target_fabric_configfs *fabric;
2194 int ret;
2196 pr_debug("TCM_VHOST fabric module %s on %s/%s"
2197 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2198 utsname()->machine);
2200 * Register the top level struct config_item_type with TCM core
2202 fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2203 if (IS_ERR(fabric)) {
2204 pr_err("target_fabric_configfs_init() failed\n");
2205 return PTR_ERR(fabric);
2208 * Setup fabric->tf_ops from our local tcm_vhost_ops
2210 fabric->tf_ops = tcm_vhost_ops;
2212 * Setup default attribute lists for various fabric->tf_cit_tmpl
2214 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2215 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2216 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
2217 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2218 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2219 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2220 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2221 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2222 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2224 * Register the fabric for use within TCM
2226 ret = target_fabric_configfs_register(fabric);
2227 if (ret < 0) {
2228 pr_err("target_fabric_configfs_register() failed"
2229 " for TCM_VHOST\n");
2230 return ret;
2233 * Setup our local pointer to *fabric
2235 tcm_vhost_fabric_configfs = fabric;
2236 pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2237 return 0;
2240 static void tcm_vhost_deregister_configfs(void)
2242 if (!tcm_vhost_fabric_configfs)
2243 return;
2245 target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2246 tcm_vhost_fabric_configfs = NULL;
2247 pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2250 static int __init tcm_vhost_init(void)
2252 int ret = -ENOMEM;
2254 * Use our own dedicated workqueue for submitting I/O into
2255 * target core to avoid contention within system_wq.
2257 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2258 if (!tcm_vhost_workqueue)
2259 goto out;
2261 ret = vhost_scsi_register();
2262 if (ret < 0)
2263 goto out_destroy_workqueue;
2265 ret = tcm_vhost_register_configfs();
2266 if (ret < 0)
2267 goto out_vhost_scsi_deregister;
2269 return 0;
2271 out_vhost_scsi_deregister:
2272 vhost_scsi_deregister();
2273 out_destroy_workqueue:
2274 destroy_workqueue(tcm_vhost_workqueue);
2275 out:
2276 return ret;
2279 static void tcm_vhost_exit(void)
2281 tcm_vhost_deregister_configfs();
2282 vhost_scsi_deregister();
2283 destroy_workqueue(tcm_vhost_workqueue);
2286 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2287 MODULE_ALIAS("tcm_vhost");
2288 MODULE_LICENSE("GPL");
2289 module_init(tcm_vhost_init);
2290 module_exit(tcm_vhost_exit);