x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / drivers / scsi / scsi_tgt_if.c
blob0e9533f7aabc4bcd38befa28c1727db45cd41a1e
1 /*
2 * SCSI target kernel/user interface functions
4 * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
5 * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
22 #include <linux/miscdevice.h>
23 #include <linux/file.h>
24 #include <linux/smp_lock.h>
25 #include <net/tcp.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_tgt.h>
31 #include <scsi/scsi_tgt_if.h>
33 #include <asm/cacheflush.h>
35 #include "scsi_tgt_priv.h"
37 #if TGT_RING_SIZE < PAGE_SIZE
38 # define TGT_RING_SIZE PAGE_SIZE
39 #endif
41 #define TGT_RING_PAGES (TGT_RING_SIZE >> PAGE_SHIFT)
42 #define TGT_EVENT_PER_PAGE (PAGE_SIZE / sizeof(struct tgt_event))
43 #define TGT_MAX_EVENTS (TGT_EVENT_PER_PAGE * TGT_RING_PAGES)
45 struct tgt_ring {
46 u32 tr_idx;
47 unsigned long tr_pages[TGT_RING_PAGES];
48 spinlock_t tr_lock;
51 /* tx_ring : kernel->user, rx_ring : user->kernel */
52 static struct tgt_ring tx_ring, rx_ring;
53 static DECLARE_WAIT_QUEUE_HEAD(tgt_poll_wait);
55 static inline void tgt_ring_idx_inc(struct tgt_ring *ring)
57 if (ring->tr_idx == TGT_MAX_EVENTS - 1)
58 ring->tr_idx = 0;
59 else
60 ring->tr_idx++;
63 static struct tgt_event *tgt_head_event(struct tgt_ring *ring, u32 idx)
65 u32 pidx, off;
67 pidx = idx / TGT_EVENT_PER_PAGE;
68 off = idx % TGT_EVENT_PER_PAGE;
70 return (struct tgt_event *)
71 (ring->tr_pages[pidx] + sizeof(struct tgt_event) * off);
74 static int tgt_uspace_send_event(u32 type, struct tgt_event *p)
76 struct tgt_event *ev;
77 struct tgt_ring *ring = &tx_ring;
78 unsigned long flags;
79 int err = 0;
81 spin_lock_irqsave(&ring->tr_lock, flags);
83 ev = tgt_head_event(ring, ring->tr_idx);
84 if (!ev->hdr.status)
85 tgt_ring_idx_inc(ring);
86 else
87 err = -BUSY;
89 spin_unlock_irqrestore(&ring->tr_lock, flags);
91 if (err)
92 return err;
94 memcpy(ev, p, sizeof(*ev));
95 ev->hdr.type = type;
96 mb();
97 ev->hdr.status = 1;
99 flush_dcache_page(virt_to_page(ev));
101 wake_up_interruptible(&tgt_poll_wait);
103 return 0;
106 int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, u64 itn_id,
107 struct scsi_lun *lun, u64 tag)
109 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
110 struct tgt_event ev;
111 int err;
113 memset(&ev, 0, sizeof(ev));
114 ev.p.cmd_req.host_no = shost->host_no;
115 ev.p.cmd_req.itn_id = itn_id;
116 ev.p.cmd_req.data_len = scsi_bufflen(cmd);
117 memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb));
118 memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun));
119 ev.p.cmd_req.attribute = cmd->tag;
120 ev.p.cmd_req.tag = tag;
122 dprintk("%p %d %u %x %llx\n", cmd, shost->host_no,
123 ev.p.cmd_req.data_len, cmd->tag,
124 (unsigned long long) ev.p.cmd_req.tag);
126 err = tgt_uspace_send_event(TGT_KEVENT_CMD_REQ, &ev);
127 if (err)
128 eprintk("tx buf is full, could not send\n");
130 return err;
133 int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 itn_id, u64 tag)
135 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
136 struct tgt_event ev;
137 int err;
139 memset(&ev, 0, sizeof(ev));
140 ev.p.cmd_done.host_no = shost->host_no;
141 ev.p.cmd_done.itn_id = itn_id;
142 ev.p.cmd_done.tag = tag;
143 ev.p.cmd_done.result = cmd->result;
145 dprintk("%p %d %llu %u %x\n", cmd, shost->host_no,
146 (unsigned long long) ev.p.cmd_req.tag,
147 ev.p.cmd_req.data_len, cmd->tag);
149 err = tgt_uspace_send_event(TGT_KEVENT_CMD_DONE, &ev);
150 if (err)
151 eprintk("tx buf is full, could not send\n");
153 return err;
156 int scsi_tgt_uspace_send_tsk_mgmt(int host_no, u64 itn_id, int function,
157 u64 tag, struct scsi_lun *scsilun, void *data)
159 struct tgt_event ev;
160 int err;
162 memset(&ev, 0, sizeof(ev));
163 ev.p.tsk_mgmt_req.host_no = host_no;
164 ev.p.tsk_mgmt_req.itn_id = itn_id;
165 ev.p.tsk_mgmt_req.function = function;
166 ev.p.tsk_mgmt_req.tag = tag;
167 memcpy(ev.p.tsk_mgmt_req.lun, scsilun, sizeof(ev.p.tsk_mgmt_req.lun));
168 ev.p.tsk_mgmt_req.mid = (u64) (unsigned long) data;
170 dprintk("%d %x %llx %llx\n", host_no, function, (unsigned long long) tag,
171 (unsigned long long) ev.p.tsk_mgmt_req.mid);
173 err = tgt_uspace_send_event(TGT_KEVENT_TSK_MGMT_REQ, &ev);
174 if (err)
175 eprintk("tx buf is full, could not send\n");
177 return err;
180 int scsi_tgt_uspace_send_it_nexus_request(int host_no, u64 itn_id,
181 int function, char *initiator_id)
183 struct tgt_event ev;
184 int err;
186 memset(&ev, 0, sizeof(ev));
187 ev.p.it_nexus_req.host_no = host_no;
188 ev.p.it_nexus_req.function = function;
189 ev.p.it_nexus_req.itn_id = itn_id;
190 if (initiator_id)
191 strncpy(ev.p.it_nexus_req.initiator_id, initiator_id,
192 sizeof(ev.p.it_nexus_req.initiator_id));
194 dprintk("%d %x %llx\n", host_no, function, (unsigned long long)itn_id);
196 err = tgt_uspace_send_event(TGT_KEVENT_IT_NEXUS_REQ, &ev);
197 if (err)
198 eprintk("tx buf is full, could not send\n");
200 return err;
203 static int event_recv_msg(struct tgt_event *ev)
205 int err = 0;
207 switch (ev->hdr.type) {
208 case TGT_UEVENT_CMD_RSP:
209 err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no,
210 ev->p.cmd_rsp.itn_id,
211 ev->p.cmd_rsp.result,
212 ev->p.cmd_rsp.tag,
213 ev->p.cmd_rsp.uaddr,
214 ev->p.cmd_rsp.len,
215 ev->p.cmd_rsp.sense_uaddr,
216 ev->p.cmd_rsp.sense_len,
217 ev->p.cmd_rsp.rw);
218 break;
219 case TGT_UEVENT_TSK_MGMT_RSP:
220 err = scsi_tgt_kspace_tsk_mgmt(ev->p.tsk_mgmt_rsp.host_no,
221 ev->p.tsk_mgmt_rsp.itn_id,
222 ev->p.tsk_mgmt_rsp.mid,
223 ev->p.tsk_mgmt_rsp.result);
224 break;
225 case TGT_UEVENT_IT_NEXUS_RSP:
226 err = scsi_tgt_kspace_it_nexus_rsp(ev->p.it_nexus_rsp.host_no,
227 ev->p.it_nexus_rsp.itn_id,
228 ev->p.it_nexus_rsp.result);
229 break;
230 default:
231 eprintk("unknown type %d\n", ev->hdr.type);
232 err = -EINVAL;
235 return err;
238 static ssize_t tgt_write(struct file *file, const char __user * buffer,
239 size_t count, loff_t * ppos)
241 struct tgt_event *ev;
242 struct tgt_ring *ring = &rx_ring;
244 while (1) {
245 ev = tgt_head_event(ring, ring->tr_idx);
246 /* do we need this? */
247 flush_dcache_page(virt_to_page(ev));
249 if (!ev->hdr.status)
250 break;
252 tgt_ring_idx_inc(ring);
253 event_recv_msg(ev);
254 ev->hdr.status = 0;
257 return count;
260 static unsigned int tgt_poll(struct file * file, struct poll_table_struct *wait)
262 struct tgt_event *ev;
263 struct tgt_ring *ring = &tx_ring;
264 unsigned long flags;
265 unsigned int mask = 0;
266 u32 idx;
268 poll_wait(file, &tgt_poll_wait, wait);
270 spin_lock_irqsave(&ring->tr_lock, flags);
272 idx = ring->tr_idx ? ring->tr_idx - 1 : TGT_MAX_EVENTS - 1;
273 ev = tgt_head_event(ring, idx);
274 if (ev->hdr.status)
275 mask |= POLLIN | POLLRDNORM;
277 spin_unlock_irqrestore(&ring->tr_lock, flags);
279 return mask;
282 static int uspace_ring_map(struct vm_area_struct *vma, unsigned long addr,
283 struct tgt_ring *ring)
285 int i, err;
287 for (i = 0; i < TGT_RING_PAGES; i++) {
288 struct page *page = virt_to_page(ring->tr_pages[i]);
289 err = vm_insert_page(vma, addr, page);
290 if (err)
291 return err;
292 addr += PAGE_SIZE;
295 return 0;
298 static int tgt_mmap(struct file *filp, struct vm_area_struct *vma)
300 unsigned long addr;
301 int err;
303 if (vma->vm_pgoff)
304 return -EINVAL;
306 if (vma->vm_end - vma->vm_start != TGT_RING_SIZE * 2) {
307 eprintk("mmap size must be %lu, not %lu \n",
308 TGT_RING_SIZE * 2, vma->vm_end - vma->vm_start);
309 return -EINVAL;
312 addr = vma->vm_start;
313 err = uspace_ring_map(vma, addr, &tx_ring);
314 if (err)
315 return err;
316 err = uspace_ring_map(vma, addr + TGT_RING_SIZE, &rx_ring);
318 return err;
321 static int tgt_open(struct inode *inode, struct file *file)
323 tx_ring.tr_idx = rx_ring.tr_idx = 0;
325 cycle_kernel_lock();
326 return 0;
329 static const struct file_operations tgt_fops = {
330 .owner = THIS_MODULE,
331 .open = tgt_open,
332 .poll = tgt_poll,
333 .write = tgt_write,
334 .mmap = tgt_mmap,
337 static struct miscdevice tgt_miscdev = {
338 .minor = MISC_DYNAMIC_MINOR,
339 .name = "tgt",
340 .fops = &tgt_fops,
343 static void tgt_ring_exit(struct tgt_ring *ring)
345 int i;
347 for (i = 0; i < TGT_RING_PAGES; i++)
348 free_page(ring->tr_pages[i]);
351 static int tgt_ring_init(struct tgt_ring *ring)
353 int i;
355 spin_lock_init(&ring->tr_lock);
357 for (i = 0; i < TGT_RING_PAGES; i++) {
358 ring->tr_pages[i] = get_zeroed_page(GFP_KERNEL);
359 if (!ring->tr_pages[i]) {
360 eprintk("out of memory\n");
361 return -ENOMEM;
365 return 0;
368 void scsi_tgt_if_exit(void)
370 tgt_ring_exit(&tx_ring);
371 tgt_ring_exit(&rx_ring);
372 misc_deregister(&tgt_miscdev);
375 int scsi_tgt_if_init(void)
377 int err;
379 err = tgt_ring_init(&tx_ring);
380 if (err)
381 return err;
383 err = tgt_ring_init(&rx_ring);
384 if (err)
385 goto free_tx_ring;
387 err = misc_register(&tgt_miscdev);
388 if (err)
389 goto free_rx_ring;
391 return 0;
392 free_rx_ring:
393 tgt_ring_exit(&rx_ring);
394 free_tx_ring:
395 tgt_ring_exit(&tx_ring);
397 return err;