Linux 4.14.51
[linux/fpc-iii.git] / drivers / vhost / scsi.c
blobe47c5bc3ddcadfa9fd74ff48656c828c76b45d8b
1 /*******************************************************************************
2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
4 * (C) Copyright 2010-2013 Datera, Inc.
5 * (C) Copyright 2010-2012 IBM Corp.
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 ****************************************************************************/
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/vmalloc.h>
39 #include <linux/miscdevice.h>
40 #include <asm/unaligned.h>
41 #include <scsi/scsi_common.h>
42 #include <scsi/scsi_proto.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_fabric.h>
45 #include <linux/vhost.h>
46 #include <linux/virtio_scsi.h>
47 #include <linux/llist.h>
48 #include <linux/bitmap.h>
49 #include <linux/percpu_ida.h>
51 #include "vhost.h"
53 #define VHOST_SCSI_VERSION "v0.1"
54 #define VHOST_SCSI_NAMELEN 256
55 #define VHOST_SCSI_MAX_CDB_SIZE 32
56 #define VHOST_SCSI_DEFAULT_TAGS 256
57 #define VHOST_SCSI_PREALLOC_SGLS 2048
58 #define VHOST_SCSI_PREALLOC_UPAGES 2048
59 #define VHOST_SCSI_PREALLOC_PROT_SGLS 512
61 struct vhost_scsi_inflight {
62 /* Wait for the flush operation to finish */
63 struct completion comp;
64 /* Refcount for the inflight reqs */
65 struct kref kref;
68 struct vhost_scsi_cmd {
69 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
70 int tvc_vq_desc;
71 /* virtio-scsi initiator task attribute */
72 int tvc_task_attr;
73 /* virtio-scsi response incoming iovecs */
74 int tvc_in_iovs;
75 /* virtio-scsi initiator data direction */
76 enum dma_data_direction tvc_data_direction;
77 /* Expected data transfer length from virtio-scsi header */
78 u32 tvc_exp_data_len;
79 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
80 u64 tvc_tag;
81 /* The number of scatterlists associated with this cmd */
82 u32 tvc_sgl_count;
83 u32 tvc_prot_sgl_count;
84 /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
85 u32 tvc_lun;
86 /* Pointer to the SGL formatted memory from virtio-scsi */
87 struct scatterlist *tvc_sgl;
88 struct scatterlist *tvc_prot_sgl;
89 struct page **tvc_upages;
90 /* Pointer to response header iovec */
91 struct iovec tvc_resp_iov;
92 /* Pointer to vhost_scsi for our device */
93 struct vhost_scsi *tvc_vhost;
94 /* Pointer to vhost_virtqueue for the cmd */
95 struct vhost_virtqueue *tvc_vq;
96 /* Pointer to vhost nexus memory */
97 struct vhost_scsi_nexus *tvc_nexus;
98 /* The TCM I/O descriptor that is accessed via container_of() */
99 struct se_cmd tvc_se_cmd;
100 /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
101 struct work_struct work;
102 /* Copy of the incoming SCSI command descriptor block (CDB) */
103 unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
104 /* Sense buffer that will be mapped into outgoing status */
105 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
106 /* Completed commands list, serviced from vhost worker thread */
107 struct llist_node tvc_completion_list;
108 /* Used to track inflight cmd */
109 struct vhost_scsi_inflight *inflight;
112 struct vhost_scsi_nexus {
113 /* Pointer to TCM session for I_T Nexus */
114 struct se_session *tvn_se_sess;
117 struct vhost_scsi_tpg {
118 /* Vhost port target portal group tag for TCM */
119 u16 tport_tpgt;
120 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
121 int tv_tpg_port_count;
122 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
123 int tv_tpg_vhost_count;
124 /* Used for enabling T10-PI with legacy devices */
125 int tv_fabric_prot_type;
126 /* list for vhost_scsi_list */
127 struct list_head tv_tpg_list;
128 /* Used to protect access for tpg_nexus */
129 struct mutex tv_tpg_mutex;
130 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
131 struct vhost_scsi_nexus *tpg_nexus;
132 /* Pointer back to vhost_scsi_tport */
133 struct vhost_scsi_tport *tport;
134 /* Returned by vhost_scsi_make_tpg() */
135 struct se_portal_group se_tpg;
136 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
137 struct vhost_scsi *vhost_scsi;
140 struct vhost_scsi_tport {
141 /* SCSI protocol the tport is providing */
142 u8 tport_proto_id;
143 /* Binary World Wide unique Port Name for Vhost Target port */
144 u64 tport_wwpn;
145 /* ASCII formatted WWPN for Vhost Target port */
146 char tport_name[VHOST_SCSI_NAMELEN];
147 /* Returned by vhost_scsi_make_tport() */
148 struct se_wwn tport_wwn;
151 struct vhost_scsi_evt {
152 /* event to be sent to guest */
153 struct virtio_scsi_event event;
154 /* event list, serviced from vhost worker thread */
155 struct llist_node list;
158 enum {
159 VHOST_SCSI_VQ_CTL = 0,
160 VHOST_SCSI_VQ_EVT = 1,
161 VHOST_SCSI_VQ_IO = 2,
164 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
165 enum {
166 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
167 (1ULL << VIRTIO_SCSI_F_T10_PI)
170 #define VHOST_SCSI_MAX_TARGET 256
171 #define VHOST_SCSI_MAX_VQ 128
172 #define VHOST_SCSI_MAX_EVENT 128
174 struct vhost_scsi_virtqueue {
175 struct vhost_virtqueue vq;
177 * Reference counting for inflight reqs, used for flush operation. At
178 * each time, one reference tracks new commands submitted, while we
179 * wait for another one to reach 0.
181 struct vhost_scsi_inflight inflights[2];
183 * Indicate current inflight in use, protected by vq->mutex.
184 * Writers must also take dev mutex and flush under it.
186 int inflight_idx;
189 struct vhost_scsi {
190 /* Protected by vhost_scsi->dev.mutex */
191 struct vhost_scsi_tpg **vs_tpg;
192 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
194 struct vhost_dev dev;
195 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
197 struct vhost_work vs_completion_work; /* cmd completion work item */
198 struct llist_head vs_completion_list; /* cmd completion queue */
200 struct vhost_work vs_event_work; /* evt injection work item */
201 struct llist_head vs_event_list; /* evt injection queue */
203 bool vs_events_missed; /* any missed events, protected by vq->mutex */
204 int vs_events_nr; /* num of pending events, protected by vq->mutex */
207 static struct workqueue_struct *vhost_scsi_workqueue;
209 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
210 static DEFINE_MUTEX(vhost_scsi_mutex);
211 static LIST_HEAD(vhost_scsi_list);
213 static int iov_num_pages(void __user *iov_base, size_t iov_len)
215 return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
216 ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
219 static void vhost_scsi_done_inflight(struct kref *kref)
221 struct vhost_scsi_inflight *inflight;
223 inflight = container_of(kref, struct vhost_scsi_inflight, kref);
224 complete(&inflight->comp);
227 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
228 struct vhost_scsi_inflight *old_inflight[])
230 struct vhost_scsi_inflight *new_inflight;
231 struct vhost_virtqueue *vq;
232 int idx, i;
234 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
235 vq = &vs->vqs[i].vq;
237 mutex_lock(&vq->mutex);
239 /* store old infight */
240 idx = vs->vqs[i].inflight_idx;
241 if (old_inflight)
242 old_inflight[i] = &vs->vqs[i].inflights[idx];
244 /* setup new infight */
245 vs->vqs[i].inflight_idx = idx ^ 1;
246 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
247 kref_init(&new_inflight->kref);
248 init_completion(&new_inflight->comp);
250 mutex_unlock(&vq->mutex);
254 static struct vhost_scsi_inflight *
255 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
257 struct vhost_scsi_inflight *inflight;
258 struct vhost_scsi_virtqueue *svq;
260 svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
261 inflight = &svq->inflights[svq->inflight_idx];
262 kref_get(&inflight->kref);
264 return inflight;
267 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
269 kref_put(&inflight->kref, vhost_scsi_done_inflight);
272 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
274 return 1;
277 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
279 return 0;
282 static char *vhost_scsi_get_fabric_name(void)
284 return "vhost";
287 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
289 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
290 struct vhost_scsi_tpg, se_tpg);
291 struct vhost_scsi_tport *tport = tpg->tport;
293 return &tport->tport_name[0];
296 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
298 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
299 struct vhost_scsi_tpg, se_tpg);
300 return tpg->tport_tpgt;
303 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
305 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
306 struct vhost_scsi_tpg, se_tpg);
308 return tpg->tv_fabric_prot_type;
311 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
313 return 1;
316 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
318 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
319 struct vhost_scsi_cmd, tvc_se_cmd);
320 struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
321 int i;
323 if (tv_cmd->tvc_sgl_count) {
324 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
325 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
327 if (tv_cmd->tvc_prot_sgl_count) {
328 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
329 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
332 vhost_scsi_put_inflight(tv_cmd->inflight);
333 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
336 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
338 return 0;
341 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
343 /* Go ahead and process the write immediately */
344 target_execute_cmd(se_cmd);
345 return 0;
348 static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
350 return 0;
353 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
355 return;
358 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
360 return 0;
363 static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
365 struct vhost_scsi *vs = cmd->tvc_vhost;
367 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
369 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
372 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
374 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
375 struct vhost_scsi_cmd, tvc_se_cmd);
376 vhost_scsi_complete_cmd(cmd);
377 return 0;
380 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
382 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
383 struct vhost_scsi_cmd, tvc_se_cmd);
384 vhost_scsi_complete_cmd(cmd);
385 return 0;
388 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
390 return;
393 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
395 return;
398 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
400 vs->vs_events_nr--;
401 kfree(evt);
404 static struct vhost_scsi_evt *
405 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
406 u32 event, u32 reason)
408 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
409 struct vhost_scsi_evt *evt;
411 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
412 vs->vs_events_missed = true;
413 return NULL;
416 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
417 if (!evt) {
418 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
419 vs->vs_events_missed = true;
420 return NULL;
423 evt->event.event = cpu_to_vhost32(vq, event);
424 evt->event.reason = cpu_to_vhost32(vq, reason);
425 vs->vs_events_nr++;
427 return evt;
430 static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
432 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
434 /* TODO locking against target/backend threads? */
435 transport_generic_free_cmd(se_cmd, 0);
439 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
441 return target_put_sess_cmd(se_cmd);
444 static void
445 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
447 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
448 struct virtio_scsi_event *event = &evt->event;
449 struct virtio_scsi_event __user *eventp;
450 unsigned out, in;
451 int head, ret;
453 if (!vq->private_data) {
454 vs->vs_events_missed = true;
455 return;
458 again:
459 vhost_disable_notify(&vs->dev, vq);
460 head = vhost_get_vq_desc(vq, vq->iov,
461 ARRAY_SIZE(vq->iov), &out, &in,
462 NULL, NULL);
463 if (head < 0) {
464 vs->vs_events_missed = true;
465 return;
467 if (head == vq->num) {
468 if (vhost_enable_notify(&vs->dev, vq))
469 goto again;
470 vs->vs_events_missed = true;
471 return;
474 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
475 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
476 vq->iov[out].iov_len);
477 vs->vs_events_missed = true;
478 return;
481 if (vs->vs_events_missed) {
482 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
483 vs->vs_events_missed = false;
486 eventp = vq->iov[out].iov_base;
487 ret = __copy_to_user(eventp, event, sizeof(*event));
488 if (!ret)
489 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
490 else
491 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
494 static void vhost_scsi_evt_work(struct vhost_work *work)
496 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
497 vs_event_work);
498 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
499 struct vhost_scsi_evt *evt, *t;
500 struct llist_node *llnode;
502 mutex_lock(&vq->mutex);
503 llnode = llist_del_all(&vs->vs_event_list);
504 llist_for_each_entry_safe(evt, t, llnode, list) {
505 vhost_scsi_do_evt_work(vs, evt);
506 vhost_scsi_free_evt(vs, evt);
508 mutex_unlock(&vq->mutex);
511 /* Fill in status and signal that we are done processing this command
513 * This is scheduled in the vhost work queue so we are called with the owner
514 * process mm and can access the vring.
516 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
518 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
519 vs_completion_work);
520 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
521 struct virtio_scsi_cmd_resp v_rsp;
522 struct vhost_scsi_cmd *cmd;
523 struct llist_node *llnode;
524 struct se_cmd *se_cmd;
525 struct iov_iter iov_iter;
526 int ret, vq;
528 bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
529 llnode = llist_del_all(&vs->vs_completion_list);
530 llist_for_each_entry(cmd, llnode, tvc_completion_list) {
531 se_cmd = &cmd->tvc_se_cmd;
533 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
534 cmd, se_cmd->residual_count, se_cmd->scsi_status);
536 memset(&v_rsp, 0, sizeof(v_rsp));
537 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
538 /* TODO is status_qualifier field needed? */
539 v_rsp.status = se_cmd->scsi_status;
540 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
541 se_cmd->scsi_sense_length);
542 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
543 se_cmd->scsi_sense_length);
545 iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
546 cmd->tvc_in_iovs, sizeof(v_rsp));
547 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
548 if (likely(ret == sizeof(v_rsp))) {
549 struct vhost_scsi_virtqueue *q;
550 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
551 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
552 vq = q - vs->vqs;
553 __set_bit(vq, signal);
554 } else
555 pr_err("Faulted on virtio_scsi_cmd_resp\n");
557 vhost_scsi_free_cmd(cmd);
560 vq = -1;
561 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
562 < VHOST_SCSI_MAX_VQ)
563 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
566 static struct vhost_scsi_cmd *
567 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
568 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
569 u32 exp_data_len, int data_direction)
571 struct vhost_scsi_cmd *cmd;
572 struct vhost_scsi_nexus *tv_nexus;
573 struct se_session *se_sess;
574 struct scatterlist *sg, *prot_sg;
575 struct page **pages;
576 int tag;
578 tv_nexus = tpg->tpg_nexus;
579 if (!tv_nexus) {
580 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
581 return ERR_PTR(-EIO);
583 se_sess = tv_nexus->tvn_se_sess;
585 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
586 if (tag < 0) {
587 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
588 return ERR_PTR(-ENOMEM);
591 cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
592 sg = cmd->tvc_sgl;
593 prot_sg = cmd->tvc_prot_sgl;
594 pages = cmd->tvc_upages;
595 memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
597 cmd->tvc_sgl = sg;
598 cmd->tvc_prot_sgl = prot_sg;
599 cmd->tvc_upages = pages;
600 cmd->tvc_se_cmd.map_tag = tag;
601 cmd->tvc_tag = scsi_tag;
602 cmd->tvc_lun = lun;
603 cmd->tvc_task_attr = task_attr;
604 cmd->tvc_exp_data_len = exp_data_len;
605 cmd->tvc_data_direction = data_direction;
606 cmd->tvc_nexus = tv_nexus;
607 cmd->inflight = vhost_scsi_get_inflight(vq);
609 memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
611 return cmd;
615 * Map a user memory range into a scatterlist
617 * Returns the number of scatterlist entries used or -errno on error.
619 static int
620 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
621 void __user *ptr,
622 size_t len,
623 struct scatterlist *sgl,
624 bool write)
626 unsigned int npages = 0, offset, nbytes;
627 unsigned int pages_nr = iov_num_pages(ptr, len);
628 struct scatterlist *sg = sgl;
629 struct page **pages = cmd->tvc_upages;
630 int ret, i;
632 if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
633 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
634 " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
635 pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
636 return -ENOBUFS;
639 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
640 /* No pages were pinned */
641 if (ret < 0)
642 goto out;
643 /* Less pages pinned than wanted */
644 if (ret != pages_nr) {
645 for (i = 0; i < ret; i++)
646 put_page(pages[i]);
647 ret = -EFAULT;
648 goto out;
651 while (len > 0) {
652 offset = (uintptr_t)ptr & ~PAGE_MASK;
653 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
654 sg_set_page(sg, pages[npages], nbytes, offset);
655 ptr += nbytes;
656 len -= nbytes;
657 sg++;
658 npages++;
661 out:
662 return ret;
665 static int
666 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
668 int sgl_count = 0;
670 if (!iter || !iter->iov) {
671 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
672 " present\n", __func__, bytes);
673 return -EINVAL;
676 sgl_count = iov_iter_npages(iter, 0xffff);
677 if (sgl_count > max_sgls) {
678 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
679 " max_sgls: %d\n", __func__, sgl_count, max_sgls);
680 return -EINVAL;
682 return sgl_count;
685 static int
686 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
687 struct iov_iter *iter,
688 struct scatterlist *sg, int sg_count)
690 size_t off = iter->iov_offset;
691 struct scatterlist *p = sg;
692 int i, ret;
694 for (i = 0; i < iter->nr_segs; i++) {
695 void __user *base = iter->iov[i].iov_base + off;
696 size_t len = iter->iov[i].iov_len - off;
698 ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
699 if (ret < 0) {
700 while (p < sg) {
701 struct page *page = sg_page(p++);
702 if (page)
703 put_page(page);
705 return ret;
707 sg += ret;
708 off = 0;
710 return 0;
713 static int
714 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
715 size_t prot_bytes, struct iov_iter *prot_iter,
716 size_t data_bytes, struct iov_iter *data_iter)
718 int sgl_count, ret;
719 bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
721 if (prot_bytes) {
722 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
723 VHOST_SCSI_PREALLOC_PROT_SGLS);
724 if (sgl_count < 0)
725 return sgl_count;
727 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
728 cmd->tvc_prot_sgl_count = sgl_count;
729 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
730 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
732 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
733 cmd->tvc_prot_sgl,
734 cmd->tvc_prot_sgl_count);
735 if (ret < 0) {
736 cmd->tvc_prot_sgl_count = 0;
737 return ret;
740 sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
741 VHOST_SCSI_PREALLOC_SGLS);
742 if (sgl_count < 0)
743 return sgl_count;
745 sg_init_table(cmd->tvc_sgl, sgl_count);
746 cmd->tvc_sgl_count = sgl_count;
747 pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
748 cmd->tvc_sgl, cmd->tvc_sgl_count);
750 ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
751 cmd->tvc_sgl, cmd->tvc_sgl_count);
752 if (ret < 0) {
753 cmd->tvc_sgl_count = 0;
754 return ret;
756 return 0;
759 static int vhost_scsi_to_tcm_attr(int attr)
761 switch (attr) {
762 case VIRTIO_SCSI_S_SIMPLE:
763 return TCM_SIMPLE_TAG;
764 case VIRTIO_SCSI_S_ORDERED:
765 return TCM_ORDERED_TAG;
766 case VIRTIO_SCSI_S_HEAD:
767 return TCM_HEAD_TAG;
768 case VIRTIO_SCSI_S_ACA:
769 return TCM_ACA_TAG;
770 default:
771 break;
773 return TCM_SIMPLE_TAG;
776 static void vhost_scsi_submission_work(struct work_struct *work)
778 struct vhost_scsi_cmd *cmd =
779 container_of(work, struct vhost_scsi_cmd, work);
780 struct vhost_scsi_nexus *tv_nexus;
781 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
782 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
783 int rc;
785 /* FIXME: BIDI operation */
786 if (cmd->tvc_sgl_count) {
787 sg_ptr = cmd->tvc_sgl;
789 if (cmd->tvc_prot_sgl_count)
790 sg_prot_ptr = cmd->tvc_prot_sgl;
791 else
792 se_cmd->prot_pto = true;
793 } else {
794 sg_ptr = NULL;
796 tv_nexus = cmd->tvc_nexus;
798 se_cmd->tag = 0;
799 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
800 cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
801 cmd->tvc_lun, cmd->tvc_exp_data_len,
802 vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
803 cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
804 sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
805 cmd->tvc_prot_sgl_count);
806 if (rc < 0) {
807 transport_send_check_condition_and_sense(se_cmd,
808 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
809 transport_generic_free_cmd(se_cmd, 0);
813 static void
814 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
815 struct vhost_virtqueue *vq,
816 int head, unsigned out)
818 struct virtio_scsi_cmd_resp __user *resp;
819 struct virtio_scsi_cmd_resp rsp;
820 int ret;
822 memset(&rsp, 0, sizeof(rsp));
823 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
824 resp = vq->iov[out].iov_base;
825 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
826 if (!ret)
827 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
828 else
829 pr_err("Faulted on virtio_scsi_cmd_resp\n");
832 static void
833 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
835 struct vhost_scsi_tpg **vs_tpg, *tpg;
836 struct virtio_scsi_cmd_req v_req;
837 struct virtio_scsi_cmd_req_pi v_req_pi;
838 struct vhost_scsi_cmd *cmd;
839 struct iov_iter out_iter, in_iter, prot_iter, data_iter;
840 u64 tag;
841 u32 exp_data_len, data_direction;
842 unsigned int out = 0, in = 0;
843 int head, ret, prot_bytes;
844 size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
845 size_t out_size, in_size;
846 u16 lun;
847 u8 *target, *lunp, task_attr;
848 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
849 void *req, *cdb;
851 mutex_lock(&vq->mutex);
853 * We can handle the vq only after the endpoint is setup by calling the
854 * VHOST_SCSI_SET_ENDPOINT ioctl.
856 vs_tpg = vq->private_data;
857 if (!vs_tpg)
858 goto out;
860 vhost_disable_notify(&vs->dev, vq);
862 for (;;) {
863 head = vhost_get_vq_desc(vq, vq->iov,
864 ARRAY_SIZE(vq->iov), &out, &in,
865 NULL, NULL);
866 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
867 head, out, in);
868 /* On error, stop handling until the next kick. */
869 if (unlikely(head < 0))
870 break;
871 /* Nothing new? Wait for eventfd to tell us they refilled. */
872 if (head == vq->num) {
873 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
874 vhost_disable_notify(&vs->dev, vq);
875 continue;
877 break;
880 * Check for a sane response buffer so we can report early
881 * errors back to the guest.
883 if (unlikely(vq->iov[out].iov_len < rsp_size)) {
884 vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
885 " size, got %zu bytes\n", vq->iov[out].iov_len);
886 break;
889 * Setup pointers and values based upon different virtio-scsi
890 * request header if T10_PI is enabled in KVM guest.
892 if (t10_pi) {
893 req = &v_req_pi;
894 req_size = sizeof(v_req_pi);
895 lunp = &v_req_pi.lun[0];
896 target = &v_req_pi.lun[1];
897 } else {
898 req = &v_req;
899 req_size = sizeof(v_req);
900 lunp = &v_req.lun[0];
901 target = &v_req.lun[1];
904 * FIXME: Not correct for BIDI operation
906 out_size = iov_length(vq->iov, out);
907 in_size = iov_length(&vq->iov[out], in);
910 * Copy over the virtio-scsi request header, which for a
911 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
912 * single iovec may contain both the header + outgoing
913 * WRITE payloads.
915 * copy_from_iter() will advance out_iter, so that it will
916 * point at the start of the outgoing WRITE payload, if
917 * DMA_TO_DEVICE is set.
919 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
921 if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
922 vq_err(vq, "Faulted on copy_from_iter\n");
923 vhost_scsi_send_bad_target(vs, vq, head, out);
924 continue;
926 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
927 if (unlikely(*lunp != 1)) {
928 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
929 vhost_scsi_send_bad_target(vs, vq, head, out);
930 continue;
933 tpg = ACCESS_ONCE(vs_tpg[*target]);
934 if (unlikely(!tpg)) {
935 /* Target does not exist, fail the request */
936 vhost_scsi_send_bad_target(vs, vq, head, out);
937 continue;
940 * Determine data_direction by calculating the total outgoing
941 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
942 * response headers respectively.
944 * For DMA_TO_DEVICE this is out_iter, which is already pointing
945 * to the right place.
947 * For DMA_FROM_DEVICE, the iovec will be just past the end
948 * of the virtio-scsi response header in either the same
949 * or immediately following iovec.
951 * Any associated T10_PI bytes for the outgoing / incoming
952 * payloads are included in calculation of exp_data_len here.
954 prot_bytes = 0;
956 if (out_size > req_size) {
957 data_direction = DMA_TO_DEVICE;
958 exp_data_len = out_size - req_size;
959 data_iter = out_iter;
960 } else if (in_size > rsp_size) {
961 data_direction = DMA_FROM_DEVICE;
962 exp_data_len = in_size - rsp_size;
964 iov_iter_init(&in_iter, READ, &vq->iov[out], in,
965 rsp_size + exp_data_len);
966 iov_iter_advance(&in_iter, rsp_size);
967 data_iter = in_iter;
968 } else {
969 data_direction = DMA_NONE;
970 exp_data_len = 0;
973 * If T10_PI header + payload is present, setup prot_iter values
974 * and recalculate data_iter for vhost_scsi_mapal() mapping to
975 * host scatterlists via get_user_pages_fast().
977 if (t10_pi) {
978 if (v_req_pi.pi_bytesout) {
979 if (data_direction != DMA_TO_DEVICE) {
980 vq_err(vq, "Received non zero pi_bytesout,"
981 " but wrong data_direction\n");
982 vhost_scsi_send_bad_target(vs, vq, head, out);
983 continue;
985 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
986 } else if (v_req_pi.pi_bytesin) {
987 if (data_direction != DMA_FROM_DEVICE) {
988 vq_err(vq, "Received non zero pi_bytesin,"
989 " but wrong data_direction\n");
990 vhost_scsi_send_bad_target(vs, vq, head, out);
991 continue;
993 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
996 * Set prot_iter to data_iter, and advance past any
997 * preceeding prot_bytes that may be present.
999 * Also fix up the exp_data_len to reflect only the
1000 * actual data payload length.
1002 if (prot_bytes) {
1003 exp_data_len -= prot_bytes;
1004 prot_iter = data_iter;
1005 iov_iter_advance(&data_iter, prot_bytes);
1007 tag = vhost64_to_cpu(vq, v_req_pi.tag);
1008 task_attr = v_req_pi.task_attr;
1009 cdb = &v_req_pi.cdb[0];
1010 lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1011 } else {
1012 tag = vhost64_to_cpu(vq, v_req.tag);
1013 task_attr = v_req.task_attr;
1014 cdb = &v_req.cdb[0];
1015 lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1018 * Check that the received CDB size does not exceeded our
1019 * hardcoded max for vhost-scsi, then get a pre-allocated
1020 * cmd descriptor for the new virtio-scsi tag.
1022 * TODO what if cdb was too small for varlen cdb header?
1024 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1025 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1026 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1027 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1028 vhost_scsi_send_bad_target(vs, vq, head, out);
1029 continue;
1031 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1032 exp_data_len + prot_bytes,
1033 data_direction);
1034 if (IS_ERR(cmd)) {
1035 vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1036 PTR_ERR(cmd));
1037 vhost_scsi_send_bad_target(vs, vq, head, out);
1038 continue;
1040 cmd->tvc_vhost = vs;
1041 cmd->tvc_vq = vq;
1042 cmd->tvc_resp_iov = vq->iov[out];
1043 cmd->tvc_in_iovs = in;
1045 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1046 cmd->tvc_cdb[0], cmd->tvc_lun);
1047 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1048 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1050 if (data_direction != DMA_NONE) {
1051 ret = vhost_scsi_mapal(cmd,
1052 prot_bytes, &prot_iter,
1053 exp_data_len, &data_iter);
1054 if (unlikely(ret)) {
1055 vq_err(vq, "Failed to map iov to sgl\n");
1056 vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1057 vhost_scsi_send_bad_target(vs, vq, head, out);
1058 continue;
1062 * Save the descriptor from vhost_get_vq_desc() to be used to
1063 * complete the virtio-scsi request in TCM callback context via
1064 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1066 cmd->tvc_vq_desc = head;
1068 * Dispatch cmd descriptor for cmwq execution in process
1069 * context provided by vhost_scsi_workqueue. This also ensures
1070 * cmd is executed on the same kworker CPU as this vhost
1071 * thread to gain positive L2 cache locality effects.
1073 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1074 queue_work(vhost_scsi_workqueue, &cmd->work);
1076 out:
1077 mutex_unlock(&vq->mutex);
1080 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1082 pr_debug("%s: The handling func for control queue.\n", __func__);
1085 static void
1086 vhost_scsi_send_evt(struct vhost_scsi *vs,
1087 struct vhost_scsi_tpg *tpg,
1088 struct se_lun *lun,
1089 u32 event,
1090 u32 reason)
1092 struct vhost_scsi_evt *evt;
1094 evt = vhost_scsi_allocate_evt(vs, event, reason);
1095 if (!evt)
1096 return;
1098 if (tpg && lun) {
1099 /* TODO: share lun setup code with virtio-scsi.ko */
1101 * Note: evt->event is zeroed when we allocate it and
1102 * lun[4-7] need to be zero according to virtio-scsi spec.
1104 evt->event.lun[0] = 0x01;
1105 evt->event.lun[1] = tpg->tport_tpgt;
1106 if (lun->unpacked_lun >= 256)
1107 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1108 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1111 llist_add(&evt->list, &vs->vs_event_list);
1112 vhost_work_queue(&vs->dev, &vs->vs_event_work);
1115 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1117 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1118 poll.work);
1119 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1121 mutex_lock(&vq->mutex);
1122 if (!vq->private_data)
1123 goto out;
1125 if (vs->vs_events_missed)
1126 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1127 out:
1128 mutex_unlock(&vq->mutex);
1131 static void vhost_scsi_handle_kick(struct vhost_work *work)
1133 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1134 poll.work);
1135 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1137 vhost_scsi_handle_vq(vs, vq);
1140 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1142 vhost_poll_flush(&vs->vqs[index].vq.poll);
1145 /* Callers must hold dev mutex */
1146 static void vhost_scsi_flush(struct vhost_scsi *vs)
1148 struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1149 int i;
1151 /* Init new inflight and remember the old inflight */
1152 vhost_scsi_init_inflight(vs, old_inflight);
1155 * The inflight->kref was initialized to 1. We decrement it here to
1156 * indicate the start of the flush operation so that it will reach 0
1157 * when all the reqs are finished.
1159 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1160 kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1162 /* Flush both the vhost poll and vhost work */
1163 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1164 vhost_scsi_flush_vq(vs, i);
1165 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1166 vhost_work_flush(&vs->dev, &vs->vs_event_work);
1168 /* Wait for all reqs issued before the flush to be finished */
1169 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1170 wait_for_completion(&old_inflight[i]->comp);
1174 * Called from vhost_scsi_ioctl() context to walk the list of available
1175 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1177 * The lock nesting rule is:
1178 * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1180 static int
1181 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1182 struct vhost_scsi_target *t)
1184 struct se_portal_group *se_tpg;
1185 struct vhost_scsi_tport *tv_tport;
1186 struct vhost_scsi_tpg *tpg;
1187 struct vhost_scsi_tpg **vs_tpg;
1188 struct vhost_virtqueue *vq;
1189 int index, ret, i, len;
1190 bool match = false;
1192 mutex_lock(&vhost_scsi_mutex);
1193 mutex_lock(&vs->dev.mutex);
1195 /* Verify that ring has been setup correctly. */
1196 for (index = 0; index < vs->dev.nvqs; ++index) {
1197 /* Verify that ring has been setup correctly. */
1198 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1199 ret = -EFAULT;
1200 goto out;
1204 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1205 vs_tpg = kzalloc(len, GFP_KERNEL);
1206 if (!vs_tpg) {
1207 ret = -ENOMEM;
1208 goto out;
1210 if (vs->vs_tpg)
1211 memcpy(vs_tpg, vs->vs_tpg, len);
1213 list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1214 mutex_lock(&tpg->tv_tpg_mutex);
1215 if (!tpg->tpg_nexus) {
1216 mutex_unlock(&tpg->tv_tpg_mutex);
1217 continue;
1219 if (tpg->tv_tpg_vhost_count != 0) {
1220 mutex_unlock(&tpg->tv_tpg_mutex);
1221 continue;
1223 tv_tport = tpg->tport;
1225 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1226 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1227 kfree(vs_tpg);
1228 mutex_unlock(&tpg->tv_tpg_mutex);
1229 ret = -EEXIST;
1230 goto out;
1233 * In order to ensure individual vhost-scsi configfs
1234 * groups cannot be removed while in use by vhost ioctl,
1235 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1236 * dependency now.
1238 se_tpg = &tpg->se_tpg;
1239 ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1240 if (ret) {
1241 pr_warn("configfs_depend_item() failed: %d\n", ret);
1242 kfree(vs_tpg);
1243 mutex_unlock(&tpg->tv_tpg_mutex);
1244 goto out;
1246 tpg->tv_tpg_vhost_count++;
1247 tpg->vhost_scsi = vs;
1248 vs_tpg[tpg->tport_tpgt] = tpg;
1249 smp_mb__after_atomic();
1250 match = true;
1252 mutex_unlock(&tpg->tv_tpg_mutex);
1255 if (match) {
1256 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1257 sizeof(vs->vs_vhost_wwpn));
1258 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1259 vq = &vs->vqs[i].vq;
1260 mutex_lock(&vq->mutex);
1261 vq->private_data = vs_tpg;
1262 vhost_vq_init_access(vq);
1263 mutex_unlock(&vq->mutex);
1265 ret = 0;
1266 } else {
1267 ret = -EEXIST;
1271 * Act as synchronize_rcu to make sure access to
1272 * old vs->vs_tpg is finished.
1274 vhost_scsi_flush(vs);
1275 kfree(vs->vs_tpg);
1276 vs->vs_tpg = vs_tpg;
1278 out:
1279 mutex_unlock(&vs->dev.mutex);
1280 mutex_unlock(&vhost_scsi_mutex);
1281 return ret;
1284 static int
1285 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1286 struct vhost_scsi_target *t)
1288 struct se_portal_group *se_tpg;
1289 struct vhost_scsi_tport *tv_tport;
1290 struct vhost_scsi_tpg *tpg;
1291 struct vhost_virtqueue *vq;
1292 bool match = false;
1293 int index, ret, i;
1294 u8 target;
1296 mutex_lock(&vhost_scsi_mutex);
1297 mutex_lock(&vs->dev.mutex);
1298 /* Verify that ring has been setup correctly. */
1299 for (index = 0; index < vs->dev.nvqs; ++index) {
1300 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1301 ret = -EFAULT;
1302 goto err_dev;
1306 if (!vs->vs_tpg) {
1307 ret = 0;
1308 goto err_dev;
1311 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1312 target = i;
1313 tpg = vs->vs_tpg[target];
1314 if (!tpg)
1315 continue;
1317 mutex_lock(&tpg->tv_tpg_mutex);
1318 tv_tport = tpg->tport;
1319 if (!tv_tport) {
1320 ret = -ENODEV;
1321 goto err_tpg;
1324 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1325 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1326 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1327 tv_tport->tport_name, tpg->tport_tpgt,
1328 t->vhost_wwpn, t->vhost_tpgt);
1329 ret = -EINVAL;
1330 goto err_tpg;
1332 tpg->tv_tpg_vhost_count--;
1333 tpg->vhost_scsi = NULL;
1334 vs->vs_tpg[target] = NULL;
1335 match = true;
1336 mutex_unlock(&tpg->tv_tpg_mutex);
1338 * Release se_tpg->tpg_group.cg_item configfs dependency now
1339 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1341 se_tpg = &tpg->se_tpg;
1342 target_undepend_item(&se_tpg->tpg_group.cg_item);
1344 if (match) {
1345 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1346 vq = &vs->vqs[i].vq;
1347 mutex_lock(&vq->mutex);
1348 vq->private_data = NULL;
1349 mutex_unlock(&vq->mutex);
1353 * Act as synchronize_rcu to make sure access to
1354 * old vs->vs_tpg is finished.
1356 vhost_scsi_flush(vs);
1357 kfree(vs->vs_tpg);
1358 vs->vs_tpg = NULL;
1359 WARN_ON(vs->vs_events_nr);
1360 mutex_unlock(&vs->dev.mutex);
1361 mutex_unlock(&vhost_scsi_mutex);
1362 return 0;
1364 err_tpg:
1365 mutex_unlock(&tpg->tv_tpg_mutex);
1366 err_dev:
1367 mutex_unlock(&vs->dev.mutex);
1368 mutex_unlock(&vhost_scsi_mutex);
1369 return ret;
1372 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1374 struct vhost_virtqueue *vq;
1375 int i;
1377 if (features & ~VHOST_SCSI_FEATURES)
1378 return -EOPNOTSUPP;
1380 mutex_lock(&vs->dev.mutex);
1381 if ((features & (1 << VHOST_F_LOG_ALL)) &&
1382 !vhost_log_access_ok(&vs->dev)) {
1383 mutex_unlock(&vs->dev.mutex);
1384 return -EFAULT;
1387 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1388 vq = &vs->vqs[i].vq;
1389 mutex_lock(&vq->mutex);
1390 vq->acked_features = features;
1391 mutex_unlock(&vq->mutex);
1393 mutex_unlock(&vs->dev.mutex);
1394 return 0;
1397 static int vhost_scsi_open(struct inode *inode, struct file *f)
1399 struct vhost_scsi *vs;
1400 struct vhost_virtqueue **vqs;
1401 int r = -ENOMEM, i;
1403 vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
1404 if (!vs) {
1405 vs = vzalloc(sizeof(*vs));
1406 if (!vs)
1407 goto err_vs;
1410 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1411 if (!vqs)
1412 goto err_vqs;
1414 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1415 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1417 vs->vs_events_nr = 0;
1418 vs->vs_events_missed = false;
1420 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1421 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1422 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1423 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1424 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1425 vqs[i] = &vs->vqs[i].vq;
1426 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1428 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1430 vhost_scsi_init_inflight(vs, NULL);
1432 f->private_data = vs;
1433 return 0;
1435 err_vqs:
1436 kvfree(vs);
1437 err_vs:
1438 return r;
1441 static int vhost_scsi_release(struct inode *inode, struct file *f)
1443 struct vhost_scsi *vs = f->private_data;
1444 struct vhost_scsi_target t;
1446 mutex_lock(&vs->dev.mutex);
1447 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1448 mutex_unlock(&vs->dev.mutex);
1449 vhost_scsi_clear_endpoint(vs, &t);
1450 vhost_dev_stop(&vs->dev);
1451 vhost_dev_cleanup(&vs->dev, false);
1452 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1453 vhost_scsi_flush(vs);
1454 kfree(vs->dev.vqs);
1455 kvfree(vs);
1456 return 0;
1459 static long
1460 vhost_scsi_ioctl(struct file *f,
1461 unsigned int ioctl,
1462 unsigned long arg)
1464 struct vhost_scsi *vs = f->private_data;
1465 struct vhost_scsi_target backend;
1466 void __user *argp = (void __user *)arg;
1467 u64 __user *featurep = argp;
1468 u32 __user *eventsp = argp;
1469 u32 events_missed;
1470 u64 features;
1471 int r, abi_version = VHOST_SCSI_ABI_VERSION;
1472 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1474 switch (ioctl) {
1475 case VHOST_SCSI_SET_ENDPOINT:
1476 if (copy_from_user(&backend, argp, sizeof backend))
1477 return -EFAULT;
1478 if (backend.reserved != 0)
1479 return -EOPNOTSUPP;
1481 return vhost_scsi_set_endpoint(vs, &backend);
1482 case VHOST_SCSI_CLEAR_ENDPOINT:
1483 if (copy_from_user(&backend, argp, sizeof backend))
1484 return -EFAULT;
1485 if (backend.reserved != 0)
1486 return -EOPNOTSUPP;
1488 return vhost_scsi_clear_endpoint(vs, &backend);
1489 case VHOST_SCSI_GET_ABI_VERSION:
1490 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1491 return -EFAULT;
1492 return 0;
1493 case VHOST_SCSI_SET_EVENTS_MISSED:
1494 if (get_user(events_missed, eventsp))
1495 return -EFAULT;
1496 mutex_lock(&vq->mutex);
1497 vs->vs_events_missed = events_missed;
1498 mutex_unlock(&vq->mutex);
1499 return 0;
1500 case VHOST_SCSI_GET_EVENTS_MISSED:
1501 mutex_lock(&vq->mutex);
1502 events_missed = vs->vs_events_missed;
1503 mutex_unlock(&vq->mutex);
1504 if (put_user(events_missed, eventsp))
1505 return -EFAULT;
1506 return 0;
1507 case VHOST_GET_FEATURES:
1508 features = VHOST_SCSI_FEATURES;
1509 if (copy_to_user(featurep, &features, sizeof features))
1510 return -EFAULT;
1511 return 0;
1512 case VHOST_SET_FEATURES:
1513 if (copy_from_user(&features, featurep, sizeof features))
1514 return -EFAULT;
1515 return vhost_scsi_set_features(vs, features);
1516 default:
1517 mutex_lock(&vs->dev.mutex);
1518 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1519 /* TODO: flush backend after dev ioctl. */
1520 if (r == -ENOIOCTLCMD)
1521 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1522 mutex_unlock(&vs->dev.mutex);
1523 return r;
1527 #ifdef CONFIG_COMPAT
1528 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1529 unsigned long arg)
1531 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1533 #endif
1535 static const struct file_operations vhost_scsi_fops = {
1536 .owner = THIS_MODULE,
1537 .release = vhost_scsi_release,
1538 .unlocked_ioctl = vhost_scsi_ioctl,
1539 #ifdef CONFIG_COMPAT
1540 .compat_ioctl = vhost_scsi_compat_ioctl,
1541 #endif
1542 .open = vhost_scsi_open,
1543 .llseek = noop_llseek,
1546 static struct miscdevice vhost_scsi_misc = {
1547 MISC_DYNAMIC_MINOR,
1548 "vhost-scsi",
1549 &vhost_scsi_fops,
1552 static int __init vhost_scsi_register(void)
1554 return misc_register(&vhost_scsi_misc);
1557 static void vhost_scsi_deregister(void)
1559 misc_deregister(&vhost_scsi_misc);
1562 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1564 switch (tport->tport_proto_id) {
1565 case SCSI_PROTOCOL_SAS:
1566 return "SAS";
1567 case SCSI_PROTOCOL_FCP:
1568 return "FCP";
1569 case SCSI_PROTOCOL_ISCSI:
1570 return "iSCSI";
1571 default:
1572 break;
1575 return "Unknown";
1578 static void
1579 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1580 struct se_lun *lun, bool plug)
1583 struct vhost_scsi *vs = tpg->vhost_scsi;
1584 struct vhost_virtqueue *vq;
1585 u32 reason;
1587 if (!vs)
1588 return;
1590 mutex_lock(&vs->dev.mutex);
1592 if (plug)
1593 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1594 else
1595 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1597 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1598 mutex_lock(&vq->mutex);
1599 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1600 vhost_scsi_send_evt(vs, tpg, lun,
1601 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1602 mutex_unlock(&vq->mutex);
1603 mutex_unlock(&vs->dev.mutex);
1606 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1608 vhost_scsi_do_plug(tpg, lun, true);
1611 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1613 vhost_scsi_do_plug(tpg, lun, false);
1616 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1617 struct se_lun *lun)
1619 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1620 struct vhost_scsi_tpg, se_tpg);
1622 mutex_lock(&vhost_scsi_mutex);
1624 mutex_lock(&tpg->tv_tpg_mutex);
1625 tpg->tv_tpg_port_count++;
1626 mutex_unlock(&tpg->tv_tpg_mutex);
1628 vhost_scsi_hotplug(tpg, lun);
1630 mutex_unlock(&vhost_scsi_mutex);
1632 return 0;
1635 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1636 struct se_lun *lun)
1638 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1639 struct vhost_scsi_tpg, se_tpg);
1641 mutex_lock(&vhost_scsi_mutex);
1643 mutex_lock(&tpg->tv_tpg_mutex);
1644 tpg->tv_tpg_port_count--;
1645 mutex_unlock(&tpg->tv_tpg_mutex);
1647 vhost_scsi_hotunplug(tpg, lun);
1649 mutex_unlock(&vhost_scsi_mutex);
1652 static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
1654 struct vhost_scsi_cmd *tv_cmd;
1655 unsigned int i;
1657 if (!se_sess->sess_cmd_map)
1658 return;
1660 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1661 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1663 kfree(tv_cmd->tvc_sgl);
1664 kfree(tv_cmd->tvc_prot_sgl);
1665 kfree(tv_cmd->tvc_upages);
1669 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
1670 struct config_item *item, const char *page, size_t count)
1672 struct se_portal_group *se_tpg = attrib_to_tpg(item);
1673 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1674 struct vhost_scsi_tpg, se_tpg);
1675 unsigned long val;
1676 int ret = kstrtoul(page, 0, &val);
1678 if (ret) {
1679 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1680 return ret;
1682 if (val != 0 && val != 1 && val != 3) {
1683 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1684 return -EINVAL;
1686 tpg->tv_fabric_prot_type = val;
1688 return count;
1691 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
1692 struct config_item *item, char *page)
1694 struct se_portal_group *se_tpg = attrib_to_tpg(item);
1695 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1696 struct vhost_scsi_tpg, se_tpg);
1698 return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1701 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
1703 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1704 &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
1705 NULL,
1708 static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
1709 struct se_session *se_sess, void *p)
1711 struct vhost_scsi_cmd *tv_cmd;
1712 unsigned int i;
1714 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1715 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1717 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1718 VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
1719 if (!tv_cmd->tvc_sgl) {
1720 pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1721 goto out;
1724 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1725 VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
1726 if (!tv_cmd->tvc_upages) {
1727 pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1728 goto out;
1731 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1732 VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
1733 if (!tv_cmd->tvc_prot_sgl) {
1734 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1735 goto out;
1738 return 0;
1739 out:
1740 vhost_scsi_free_cmd_map_res(se_sess);
1741 return -ENOMEM;
1744 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1745 const char *name)
1747 struct vhost_scsi_nexus *tv_nexus;
1749 mutex_lock(&tpg->tv_tpg_mutex);
1750 if (tpg->tpg_nexus) {
1751 mutex_unlock(&tpg->tv_tpg_mutex);
1752 pr_debug("tpg->tpg_nexus already exists\n");
1753 return -EEXIST;
1756 tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1757 if (!tv_nexus) {
1758 mutex_unlock(&tpg->tv_tpg_mutex);
1759 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1760 return -ENOMEM;
1763 * Since we are running in 'demo mode' this call with generate a
1764 * struct se_node_acl for the vhost_scsi struct se_portal_group with
1765 * the SCSI Initiator port name of the passed configfs group 'name'.
1767 tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
1768 VHOST_SCSI_DEFAULT_TAGS,
1769 sizeof(struct vhost_scsi_cmd),
1770 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
1771 (unsigned char *)name, tv_nexus,
1772 vhost_scsi_nexus_cb);
1773 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1774 mutex_unlock(&tpg->tv_tpg_mutex);
1775 kfree(tv_nexus);
1776 return -ENOMEM;
1778 tpg->tpg_nexus = tv_nexus;
1780 mutex_unlock(&tpg->tv_tpg_mutex);
1781 return 0;
1784 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1786 struct se_session *se_sess;
1787 struct vhost_scsi_nexus *tv_nexus;
1789 mutex_lock(&tpg->tv_tpg_mutex);
1790 tv_nexus = tpg->tpg_nexus;
1791 if (!tv_nexus) {
1792 mutex_unlock(&tpg->tv_tpg_mutex);
1793 return -ENODEV;
1796 se_sess = tv_nexus->tvn_se_sess;
1797 if (!se_sess) {
1798 mutex_unlock(&tpg->tv_tpg_mutex);
1799 return -ENODEV;
1802 if (tpg->tv_tpg_port_count != 0) {
1803 mutex_unlock(&tpg->tv_tpg_mutex);
1804 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1805 " active TPG port count: %d\n",
1806 tpg->tv_tpg_port_count);
1807 return -EBUSY;
1810 if (tpg->tv_tpg_vhost_count != 0) {
1811 mutex_unlock(&tpg->tv_tpg_mutex);
1812 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1813 " active TPG vhost count: %d\n",
1814 tpg->tv_tpg_vhost_count);
1815 return -EBUSY;
1818 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1819 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
1820 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1822 vhost_scsi_free_cmd_map_res(se_sess);
1824 * Release the SCSI I_T Nexus to the emulated vhost Target Port
1826 transport_deregister_session(tv_nexus->tvn_se_sess);
1827 tpg->tpg_nexus = NULL;
1828 mutex_unlock(&tpg->tv_tpg_mutex);
1830 kfree(tv_nexus);
1831 return 0;
1834 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
1836 struct se_portal_group *se_tpg = to_tpg(item);
1837 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1838 struct vhost_scsi_tpg, se_tpg);
1839 struct vhost_scsi_nexus *tv_nexus;
1840 ssize_t ret;
1842 mutex_lock(&tpg->tv_tpg_mutex);
1843 tv_nexus = tpg->tpg_nexus;
1844 if (!tv_nexus) {
1845 mutex_unlock(&tpg->tv_tpg_mutex);
1846 return -ENODEV;
1848 ret = snprintf(page, PAGE_SIZE, "%s\n",
1849 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1850 mutex_unlock(&tpg->tv_tpg_mutex);
1852 return ret;
1855 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
1856 const char *page, size_t count)
1858 struct se_portal_group *se_tpg = to_tpg(item);
1859 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1860 struct vhost_scsi_tpg, se_tpg);
1861 struct vhost_scsi_tport *tport_wwn = tpg->tport;
1862 unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
1863 int ret;
1865 * Shutdown the active I_T nexus if 'NULL' is passed..
1867 if (!strncmp(page, "NULL", 4)) {
1868 ret = vhost_scsi_drop_nexus(tpg);
1869 return (!ret) ? count : ret;
1872 * Otherwise make sure the passed virtual Initiator port WWN matches
1873 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
1874 * vhost_scsi_make_nexus().
1876 if (strlen(page) >= VHOST_SCSI_NAMELEN) {
1877 pr_err("Emulated NAA Sas Address: %s, exceeds"
1878 " max: %d\n", page, VHOST_SCSI_NAMELEN);
1879 return -EINVAL;
1881 snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
1883 ptr = strstr(i_port, "naa.");
1884 if (ptr) {
1885 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1886 pr_err("Passed SAS Initiator Port %s does not"
1887 " match target port protoid: %s\n", i_port,
1888 vhost_scsi_dump_proto_id(tport_wwn));
1889 return -EINVAL;
1891 port_ptr = &i_port[0];
1892 goto check_newline;
1894 ptr = strstr(i_port, "fc.");
1895 if (ptr) {
1896 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1897 pr_err("Passed FCP Initiator Port %s does not"
1898 " match target port protoid: %s\n", i_port,
1899 vhost_scsi_dump_proto_id(tport_wwn));
1900 return -EINVAL;
1902 port_ptr = &i_port[3]; /* Skip over "fc." */
1903 goto check_newline;
1905 ptr = strstr(i_port, "iqn.");
1906 if (ptr) {
1907 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1908 pr_err("Passed iSCSI Initiator Port %s does not"
1909 " match target port protoid: %s\n", i_port,
1910 vhost_scsi_dump_proto_id(tport_wwn));
1911 return -EINVAL;
1913 port_ptr = &i_port[0];
1914 goto check_newline;
1916 pr_err("Unable to locate prefix for emulated Initiator Port:"
1917 " %s\n", i_port);
1918 return -EINVAL;
1920 * Clear any trailing newline for the NAA WWN
1922 check_newline:
1923 if (i_port[strlen(i_port)-1] == '\n')
1924 i_port[strlen(i_port)-1] = '\0';
1926 ret = vhost_scsi_make_nexus(tpg, port_ptr);
1927 if (ret < 0)
1928 return ret;
1930 return count;
1933 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
1935 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
1936 &vhost_scsi_tpg_attr_nexus,
1937 NULL,
1940 static struct se_portal_group *
1941 vhost_scsi_make_tpg(struct se_wwn *wwn,
1942 struct config_group *group,
1943 const char *name)
1945 struct vhost_scsi_tport *tport = container_of(wwn,
1946 struct vhost_scsi_tport, tport_wwn);
1948 struct vhost_scsi_tpg *tpg;
1949 u16 tpgt;
1950 int ret;
1952 if (strstr(name, "tpgt_") != name)
1953 return ERR_PTR(-EINVAL);
1954 if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
1955 return ERR_PTR(-EINVAL);
1957 tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
1958 if (!tpg) {
1959 pr_err("Unable to allocate struct vhost_scsi_tpg");
1960 return ERR_PTR(-ENOMEM);
1962 mutex_init(&tpg->tv_tpg_mutex);
1963 INIT_LIST_HEAD(&tpg->tv_tpg_list);
1964 tpg->tport = tport;
1965 tpg->tport_tpgt = tpgt;
1967 ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
1968 if (ret < 0) {
1969 kfree(tpg);
1970 return NULL;
1972 mutex_lock(&vhost_scsi_mutex);
1973 list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
1974 mutex_unlock(&vhost_scsi_mutex);
1976 return &tpg->se_tpg;
1979 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
1981 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1982 struct vhost_scsi_tpg, se_tpg);
1984 mutex_lock(&vhost_scsi_mutex);
1985 list_del(&tpg->tv_tpg_list);
1986 mutex_unlock(&vhost_scsi_mutex);
1988 * Release the virtual I_T Nexus for this vhost TPG
1990 vhost_scsi_drop_nexus(tpg);
1992 * Deregister the se_tpg from TCM..
1994 core_tpg_deregister(se_tpg);
1995 kfree(tpg);
1998 static struct se_wwn *
1999 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2000 struct config_group *group,
2001 const char *name)
2003 struct vhost_scsi_tport *tport;
2004 char *ptr;
2005 u64 wwpn = 0;
2006 int off = 0;
2008 /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2009 return ERR_PTR(-EINVAL); */
2011 tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
2012 if (!tport) {
2013 pr_err("Unable to allocate struct vhost_scsi_tport");
2014 return ERR_PTR(-ENOMEM);
2016 tport->tport_wwpn = wwpn;
2018 * Determine the emulated Protocol Identifier and Target Port Name
2019 * based on the incoming configfs directory name.
2021 ptr = strstr(name, "naa.");
2022 if (ptr) {
2023 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2024 goto check_len;
2026 ptr = strstr(name, "fc.");
2027 if (ptr) {
2028 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2029 off = 3; /* Skip over "fc." */
2030 goto check_len;
2032 ptr = strstr(name, "iqn.");
2033 if (ptr) {
2034 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2035 goto check_len;
2038 pr_err("Unable to locate prefix for emulated Target Port:"
2039 " %s\n", name);
2040 kfree(tport);
2041 return ERR_PTR(-EINVAL);
2043 check_len:
2044 if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2045 pr_err("Emulated %s Address: %s, exceeds"
2046 " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2047 VHOST_SCSI_NAMELEN);
2048 kfree(tport);
2049 return ERR_PTR(-EINVAL);
2051 snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2053 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2054 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2056 return &tport->tport_wwn;
2059 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2061 struct vhost_scsi_tport *tport = container_of(wwn,
2062 struct vhost_scsi_tport, tport_wwn);
2064 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2065 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2066 tport->tport_name);
2068 kfree(tport);
2071 static ssize_t
2072 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2074 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2075 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2076 utsname()->machine);
2079 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2081 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2082 &vhost_scsi_wwn_attr_version,
2083 NULL,
2086 static const struct target_core_fabric_ops vhost_scsi_ops = {
2087 .module = THIS_MODULE,
2088 .name = "vhost",
2089 .get_fabric_name = vhost_scsi_get_fabric_name,
2090 .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
2091 .tpg_get_tag = vhost_scsi_get_tpgt,
2092 .tpg_check_demo_mode = vhost_scsi_check_true,
2093 .tpg_check_demo_mode_cache = vhost_scsi_check_true,
2094 .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2095 .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2096 .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
2097 .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
2098 .release_cmd = vhost_scsi_release_cmd,
2099 .check_stop_free = vhost_scsi_check_stop_free,
2100 .sess_get_index = vhost_scsi_sess_get_index,
2101 .sess_get_initiator_sid = NULL,
2102 .write_pending = vhost_scsi_write_pending,
2103 .write_pending_status = vhost_scsi_write_pending_status,
2104 .set_default_node_attributes = vhost_scsi_set_default_node_attrs,
2105 .get_cmd_state = vhost_scsi_get_cmd_state,
2106 .queue_data_in = vhost_scsi_queue_data_in,
2107 .queue_status = vhost_scsi_queue_status,
2108 .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
2109 .aborted_task = vhost_scsi_aborted_task,
2111 * Setup callers for generic logic in target_core_fabric_configfs.c
2113 .fabric_make_wwn = vhost_scsi_make_tport,
2114 .fabric_drop_wwn = vhost_scsi_drop_tport,
2115 .fabric_make_tpg = vhost_scsi_make_tpg,
2116 .fabric_drop_tpg = vhost_scsi_drop_tpg,
2117 .fabric_post_link = vhost_scsi_port_link,
2118 .fabric_pre_unlink = vhost_scsi_port_unlink,
2120 .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
2121 .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
2122 .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
2125 static int __init vhost_scsi_init(void)
2127 int ret = -ENOMEM;
2129 pr_debug("TCM_VHOST fabric module %s on %s/%s"
2130 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2131 utsname()->machine);
2134 * Use our own dedicated workqueue for submitting I/O into
2135 * target core to avoid contention within system_wq.
2137 vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2138 if (!vhost_scsi_workqueue)
2139 goto out;
2141 ret = vhost_scsi_register();
2142 if (ret < 0)
2143 goto out_destroy_workqueue;
2145 ret = target_register_template(&vhost_scsi_ops);
2146 if (ret < 0)
2147 goto out_vhost_scsi_deregister;
2149 return 0;
2151 out_vhost_scsi_deregister:
2152 vhost_scsi_deregister();
2153 out_destroy_workqueue:
2154 destroy_workqueue(vhost_scsi_workqueue);
2155 out:
2156 return ret;
2159 static void vhost_scsi_exit(void)
2161 target_unregister_template(&vhost_scsi_ops);
2162 vhost_scsi_deregister();
2163 destroy_workqueue(vhost_scsi_workqueue);
2166 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2167 MODULE_ALIAS("tcm_vhost");
2168 MODULE_LICENSE("GPL");
2169 module_init(vhost_scsi_init);
2170 module_exit(vhost_scsi_exit);