3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY
7 * Original driver (sg.c):
8 * Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver:
10 * Copyright (C) 1998 - 2014 Douglas Gilbert
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
19 static int sg_version_num
= 30536; /* 2 digits for each component */
20 #define SG_VERSION_STR "3.5.36"
23 * D. P. Gilbert (dgilbert@interlog.com), notes:
24 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
25 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
26 * (otherwise the macros compile to empty statements).
29 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/mtio.h>
38 #include <linux/ioctl.h>
39 #include <linux/slab.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/moduleparam.h>
44 #include <linux/cdev.h>
45 #include <linux/idr.h>
46 #include <linux/seq_file.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/blktrace_api.h>
50 #include <linux/mutex.h>
51 #include <linux/atomic.h>
52 #include <linux/ratelimit.h>
53 #include <linux/uio.h>
54 #include <linux/cred.h> /* for sg_check_file_access() */
57 #include <scsi/scsi_dbg.h>
58 #include <scsi/scsi_host.h>
59 #include <scsi/scsi_driver.h>
60 #include <scsi/scsi_ioctl.h>
63 #include "scsi_logging.h"
65 #ifdef CONFIG_SCSI_PROC_FS
66 #include <linux/proc_fs.h>
67 static char *sg_version_date
= "20140603";
69 static int sg_proc_init(void);
70 static void sg_proc_cleanup(void);
73 #define SG_ALLOW_DIO_DEF 0
75 #define SG_MAX_DEVS 32768
77 /* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type
78 * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater
79 * than 16 bytes are "variable length" whose length is a multiple of 4
81 #define SG_MAX_CDB_SIZE 252
84 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
85 * Then when using 32 bit integers x * m may overflow during the calculation.
86 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
87 * calculates the same, but prevents the overflow when both m and d
88 * are "small" numbers (like HZ and USER_HZ).
89 * Of course an overflow is inavoidable if the result of muldiv doesn't fit
92 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
94 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
96 int sg_big_buff
= SG_DEF_RESERVED_SIZE
;
97 /* N.B. This variable is readable and writeable via
98 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
99 of this size (or less if there is not enough memory) will be reserved
100 for use by this file descriptor. [Deprecated usage: this variable is also
101 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
102 the kernel (i.e. it is not a module).] */
103 static int def_reserved_size
= -1; /* picks up init parameter */
104 static int sg_allow_dio
= SG_ALLOW_DIO_DEF
;
106 static int scatter_elem_sz
= SG_SCATTER_SZ
;
107 static int scatter_elem_sz_prev
= SG_SCATTER_SZ
;
109 #define SG_SECTOR_SZ 512
111 static int sg_add_device(struct device
*, struct class_interface
*);
112 static void sg_remove_device(struct device
*, struct class_interface
*);
114 static DEFINE_IDR(sg_index_idr
);
115 static DEFINE_RWLOCK(sg_index_lock
); /* Also used to lock
116 file descriptor list for device */
118 static struct class_interface sg_interface
= {
119 .add_dev
= sg_add_device
,
120 .remove_dev
= sg_remove_device
,
123 typedef struct sg_scatter_hold
{ /* holding area for scsi scatter gather info */
124 unsigned short k_use_sg
; /* Count of kernel scatter-gather pieces */
125 unsigned sglist_len
; /* size of malloc'd scatter-gather list ++ */
126 unsigned bufflen
; /* Size of (aggregate) data buffer */
129 char dio_in_use
; /* 0->indirect IO (or mmap), 1->dio */
130 unsigned char cmd_opcode
; /* first byte of command */
133 struct sg_device
; /* forward declarations */
136 typedef struct sg_request
{ /* SG_MAX_QUEUE requests outstanding per file */
137 struct list_head entry
; /* list entry */
138 struct sg_fd
*parentfp
; /* NULL -> not in use */
139 Sg_scatter_hold data
; /* hold buffer, perhaps scatter list */
140 sg_io_hdr_t header
; /* scsi command+info, see <scsi/sg.h> */
141 unsigned char sense_b
[SCSI_SENSE_BUFFERSIZE
];
142 char res_used
; /* 1 -> using reserve buffer, 0 -> not ... */
143 char orphan
; /* 1 -> drop on sight, 0 -> normal */
144 char sg_io_owned
; /* 1 -> packet belongs to SG_IO */
145 /* done protected by rq_list_lock */
146 char done
; /* 0->before bh, 1->before read, 2->read */
149 struct execute_work ew
;
152 typedef struct sg_fd
{ /* holds the state of a file descriptor */
153 struct list_head sfd_siblings
; /* protected by device's sfd_lock */
154 struct sg_device
*parentdp
; /* owning device */
155 wait_queue_head_t read_wait
; /* queue read until command done */
156 rwlock_t rq_list_lock
; /* protect access to list in req_arr */
157 struct mutex f_mutex
; /* protect against changes in this fd */
158 int timeout
; /* defaults to SG_DEFAULT_TIMEOUT */
159 int timeout_user
; /* defaults to SG_DEFAULT_TIMEOUT_USER */
160 Sg_scatter_hold reserve
; /* buffer held for this file descriptor */
161 struct list_head rq_list
; /* head of request list */
162 struct fasync_struct
*async_qp
; /* used by asynchronous notification */
163 Sg_request req_arr
[SG_MAX_QUEUE
]; /* used as singly-linked list */
164 char force_packid
; /* 1 -> pack_id input to read(), 0 -> ignored */
165 char cmd_q
; /* 1 -> allow command queuing, 0 -> don't */
166 unsigned char next_cmd_len
; /* 0: automatic, >0: use on next write() */
167 char keep_orphan
; /* 0 -> drop orphan (def), 1 -> keep for read() */
168 char mmap_called
; /* 0 -> mmap() never called on this fd */
169 char res_in_use
; /* 1 -> 'reserve' array in use */
171 struct execute_work ew
;
174 typedef struct sg_device
{ /* holds the state of each scsi generic device */
175 struct scsi_device
*device
;
176 wait_queue_head_t open_wait
; /* queue open() when O_EXCL present */
177 struct mutex open_rel_lock
; /* held when in open() or release() */
178 int sg_tablesize
; /* adapter's max scatter-gather table size */
179 u32 index
; /* device index number */
180 struct list_head sfds
;
181 rwlock_t sfd_lock
; /* protect access to sfd list */
182 atomic_t detaching
; /* 0->device usable, 1->device detaching */
183 bool exclude
; /* 1->open(O_EXCL) succeeded and is active */
184 int open_cnt
; /* count of opens (perhaps < num(sfds) ) */
185 char sgdebug
; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
186 struct gendisk
*disk
;
187 struct cdev
* cdev
; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
191 /* tasklet or soft irq callback */
192 static void sg_rq_end_io(struct request
*rq
, int uptodate
);
193 static int sg_start_req(Sg_request
*srp
, unsigned char *cmd
);
194 static int sg_finish_rem_req(Sg_request
* srp
);
195 static int sg_build_indirect(Sg_scatter_hold
* schp
, Sg_fd
* sfp
, int buff_size
);
196 static ssize_t
sg_new_read(Sg_fd
* sfp
, char __user
*buf
, size_t count
,
198 static ssize_t
sg_new_write(Sg_fd
*sfp
, struct file
*file
,
199 const char __user
*buf
, size_t count
, int blocking
,
200 int read_only
, int sg_io_owned
, Sg_request
**o_srp
);
201 static int sg_common_write(Sg_fd
* sfp
, Sg_request
* srp
,
202 unsigned char *cmnd
, int timeout
, int blocking
);
203 static int sg_read_oxfer(Sg_request
* srp
, char __user
*outp
, int num_read_xfer
);
204 static void sg_remove_scat(Sg_fd
* sfp
, Sg_scatter_hold
* schp
);
205 static void sg_build_reserve(Sg_fd
* sfp
, int req_size
);
206 static void sg_link_reserve(Sg_fd
* sfp
, Sg_request
* srp
, int size
);
207 static void sg_unlink_reserve(Sg_fd
* sfp
, Sg_request
* srp
);
208 static Sg_fd
*sg_add_sfp(Sg_device
* sdp
);
209 static void sg_remove_sfp(struct kref
*);
210 static Sg_request
*sg_get_rq_mark(Sg_fd
* sfp
, int pack_id
);
211 static Sg_request
*sg_add_request(Sg_fd
* sfp
);
212 static int sg_remove_request(Sg_fd
* sfp
, Sg_request
* srp
);
213 static Sg_device
*sg_get_dev(int dev
);
214 static void sg_device_destroy(struct kref
*kref
);
216 #define SZ_SG_HEADER sizeof(struct sg_header)
217 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
218 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
219 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
221 #define sg_printk(prefix, sdp, fmt, a...) \
222 sdev_prefix_printk(prefix, (sdp)->device, \
223 (sdp)->disk->disk_name, fmt, ##a)
226 * The SCSI interfaces that use read() and write() as an asynchronous variant of
227 * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways
228 * to trigger read() and write() calls from various contexts with elevated
229 * privileges. This can lead to kernel memory corruption (e.g. if these
230 * interfaces are called through splice()) and privilege escalation inside
231 * userspace (e.g. if a process with access to such a device passes a file
232 * descriptor to a SUID binary as stdin/stdout/stderr).
234 * This function provides protection for the legacy API by restricting the
237 static int sg_check_file_access(struct file
*filp
, const char *caller
)
239 if (filp
->f_cred
!= current_real_cred()) {
240 pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
241 caller
, task_tgid_vnr(current
), current
->comm
);
244 if (unlikely(segment_eq(get_fs(), KERNEL_DS
))) {
245 pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
246 caller
, task_tgid_vnr(current
), current
->comm
);
252 static int sg_allow_access(struct file
*filp
, unsigned char *cmd
)
254 struct sg_fd
*sfp
= filp
->private_data
;
256 if (sfp
->parentdp
->device
->type
== TYPE_SCANNER
)
259 return blk_verify_command(cmd
, filp
->f_mode
& FMODE_WRITE
);
263 open_wait(Sg_device
*sdp
, int flags
)
267 if (flags
& O_EXCL
) {
268 while (sdp
->open_cnt
> 0) {
269 mutex_unlock(&sdp
->open_rel_lock
);
270 retval
= wait_event_interruptible(sdp
->open_wait
,
271 (atomic_read(&sdp
->detaching
) ||
273 mutex_lock(&sdp
->open_rel_lock
);
275 if (retval
) /* -ERESTARTSYS */
277 if (atomic_read(&sdp
->detaching
))
281 while (sdp
->exclude
) {
282 mutex_unlock(&sdp
->open_rel_lock
);
283 retval
= wait_event_interruptible(sdp
->open_wait
,
284 (atomic_read(&sdp
->detaching
) ||
286 mutex_lock(&sdp
->open_rel_lock
);
288 if (retval
) /* -ERESTARTSYS */
290 if (atomic_read(&sdp
->detaching
))
298 /* Returns 0 on success, else a negated errno value */
300 sg_open(struct inode
*inode
, struct file
*filp
)
302 int dev
= iminor(inode
);
303 int flags
= filp
->f_flags
;
304 struct request_queue
*q
;
309 nonseekable_open(inode
, filp
);
310 if ((flags
& O_EXCL
) && (O_RDONLY
== (flags
& O_ACCMODE
)))
311 return -EPERM
; /* Can't lock it with read only access */
312 sdp
= sg_get_dev(dev
);
316 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
317 "sg_open: flags=0x%x\n", flags
));
319 /* This driver's module count bumped by fops_get in <linux/fs.h> */
320 /* Prevent the device driver from vanishing while we sleep */
321 retval
= scsi_device_get(sdp
->device
);
325 retval
= scsi_autopm_get_device(sdp
->device
);
329 /* scsi_block_when_processing_errors() may block so bypass
330 * check if O_NONBLOCK. Permits SCSI commands to be issued
331 * during error recovery. Tread carefully. */
332 if (!((flags
& O_NONBLOCK
) ||
333 scsi_block_when_processing_errors(sdp
->device
))) {
335 /* we are in error recovery for this device */
339 mutex_lock(&sdp
->open_rel_lock
);
340 if (flags
& O_NONBLOCK
) {
341 if (flags
& O_EXCL
) {
342 if (sdp
->open_cnt
> 0) {
344 goto error_mutex_locked
;
349 goto error_mutex_locked
;
353 retval
= open_wait(sdp
, flags
);
354 if (retval
) /* -ERESTARTSYS or -ENODEV */
355 goto error_mutex_locked
;
358 /* N.B. at this point we are holding the open_rel_lock */
362 if (sdp
->open_cnt
< 1) { /* no existing opens */
364 q
= sdp
->device
->request_queue
;
365 sdp
->sg_tablesize
= queue_max_segments(q
);
367 sfp
= sg_add_sfp(sdp
);
369 retval
= PTR_ERR(sfp
);
373 filp
->private_data
= sfp
;
375 mutex_unlock(&sdp
->open_rel_lock
);
379 kref_put(&sdp
->d_ref
, sg_device_destroy
);
383 if (flags
& O_EXCL
) {
384 sdp
->exclude
= false; /* undo if error */
385 wake_up_interruptible(&sdp
->open_wait
);
388 mutex_unlock(&sdp
->open_rel_lock
);
390 scsi_autopm_put_device(sdp
->device
);
392 scsi_device_put(sdp
->device
);
396 /* Release resources associated with a successful sg_open()
397 * Returns 0 on success, else a negated errno value */
399 sg_release(struct inode
*inode
, struct file
*filp
)
404 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
406 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
, "sg_release\n"));
408 mutex_lock(&sdp
->open_rel_lock
);
409 scsi_autopm_put_device(sdp
->device
);
410 kref_put(&sfp
->f_ref
, sg_remove_sfp
);
413 /* possibly many open()s waiting on exlude clearing, start many;
414 * only open(O_EXCL)s wait on 0==open_cnt so only start one */
416 sdp
->exclude
= false;
417 wake_up_interruptible_all(&sdp
->open_wait
);
418 } else if (0 == sdp
->open_cnt
) {
419 wake_up_interruptible(&sdp
->open_wait
);
421 mutex_unlock(&sdp
->open_rel_lock
);
426 sg_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
* ppos
)
431 int req_pack_id
= -1;
433 struct sg_header
*old_hdr
= NULL
;
437 * This could cause a response to be stranded. Close the associated
438 * file descriptor to free up any resources being held.
440 retval
= sg_check_file_access(filp
, __func__
);
444 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
446 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
447 "sg_read: count=%d\n", (int) count
));
449 if (!access_ok(VERIFY_WRITE
, buf
, count
))
451 if (sfp
->force_packid
&& (count
>= SZ_SG_HEADER
)) {
452 old_hdr
= kmalloc(SZ_SG_HEADER
, GFP_KERNEL
);
455 if (__copy_from_user(old_hdr
, buf
, SZ_SG_HEADER
)) {
459 if (old_hdr
->reply_len
< 0) {
460 if (count
>= SZ_SG_IO_HDR
) {
461 sg_io_hdr_t
*new_hdr
;
462 new_hdr
= kmalloc(SZ_SG_IO_HDR
, GFP_KERNEL
);
467 retval
=__copy_from_user
468 (new_hdr
, buf
, SZ_SG_IO_HDR
);
469 req_pack_id
= new_hdr
->pack_id
;
477 req_pack_id
= old_hdr
->pack_id
;
479 srp
= sg_get_rq_mark(sfp
, req_pack_id
);
480 if (!srp
) { /* now wait on packet to arrive */
481 if (atomic_read(&sdp
->detaching
)) {
485 if (filp
->f_flags
& O_NONBLOCK
) {
489 retval
= wait_event_interruptible(sfp
->read_wait
,
490 (atomic_read(&sdp
->detaching
) ||
491 (srp
= sg_get_rq_mark(sfp
, req_pack_id
))));
492 if (atomic_read(&sdp
->detaching
)) {
497 /* -ERESTARTSYS as signal hit process */
501 if (srp
->header
.interface_id
!= '\0') {
502 retval
= sg_new_read(sfp
, buf
, count
, srp
);
507 if (old_hdr
== NULL
) {
508 old_hdr
= kmalloc(SZ_SG_HEADER
, GFP_KERNEL
);
514 memset(old_hdr
, 0, SZ_SG_HEADER
);
515 old_hdr
->reply_len
= (int) hp
->timeout
;
516 old_hdr
->pack_len
= old_hdr
->reply_len
; /* old, strange behaviour */
517 old_hdr
->pack_id
= hp
->pack_id
;
518 old_hdr
->twelve_byte
=
519 ((srp
->data
.cmd_opcode
>= 0xc0) && (12 == hp
->cmd_len
)) ? 1 : 0;
520 old_hdr
->target_status
= hp
->masked_status
;
521 old_hdr
->host_status
= hp
->host_status
;
522 old_hdr
->driver_status
= hp
->driver_status
;
523 if ((CHECK_CONDITION
& hp
->masked_status
) ||
524 (DRIVER_SENSE
& hp
->driver_status
))
525 memcpy(old_hdr
->sense_buffer
, srp
->sense_b
,
526 sizeof (old_hdr
->sense_buffer
));
527 switch (hp
->host_status
) {
528 /* This setup of 'result' is for backward compatibility and is best
529 ignored by the user who should use target, host + driver status */
531 case DID_PASSTHROUGH
:
538 old_hdr
->result
= EBUSY
;
545 old_hdr
->result
= EIO
;
548 old_hdr
->result
= (srp
->sense_b
[0] == 0 &&
549 hp
->masked_status
== GOOD
) ? 0 : EIO
;
552 old_hdr
->result
= EIO
;
556 /* Now copy the result back to the user buffer. */
557 if (count
>= SZ_SG_HEADER
) {
558 if (__copy_to_user(buf
, old_hdr
, SZ_SG_HEADER
)) {
563 if (count
> old_hdr
->reply_len
)
564 count
= old_hdr
->reply_len
;
565 if (count
> SZ_SG_HEADER
) {
566 if (sg_read_oxfer(srp
, buf
, count
- SZ_SG_HEADER
)) {
572 count
= (old_hdr
->result
== 0) ? 0 : -EIO
;
573 sg_finish_rem_req(srp
);
574 sg_remove_request(sfp
, srp
);
582 sg_new_read(Sg_fd
* sfp
, char __user
*buf
, size_t count
, Sg_request
* srp
)
584 sg_io_hdr_t
*hp
= &srp
->header
;
588 if (count
< SZ_SG_IO_HDR
) {
593 if ((hp
->mx_sb_len
> 0) && hp
->sbp
) {
594 if ((CHECK_CONDITION
& hp
->masked_status
) ||
595 (DRIVER_SENSE
& hp
->driver_status
)) {
596 int sb_len
= SCSI_SENSE_BUFFERSIZE
;
597 sb_len
= (hp
->mx_sb_len
> sb_len
) ? sb_len
: hp
->mx_sb_len
;
598 len
= 8 + (int) srp
->sense_b
[7]; /* Additional sense length field */
599 len
= (len
> sb_len
) ? sb_len
: len
;
600 if (copy_to_user(hp
->sbp
, srp
->sense_b
, len
)) {
607 if (hp
->masked_status
|| hp
->host_status
|| hp
->driver_status
)
608 hp
->info
|= SG_INFO_CHECK
;
609 if (copy_to_user(buf
, hp
, SZ_SG_IO_HDR
)) {
614 err2
= sg_finish_rem_req(srp
);
615 sg_remove_request(sfp
, srp
);
616 return err
? : err2
? : count
;
620 sg_write(struct file
*filp
, const char __user
*buf
, size_t count
, loff_t
* ppos
)
622 int mxsize
, cmd_size
, k
;
623 int input_size
, blocking
;
624 unsigned char opcode
;
628 struct sg_header old_hdr
;
630 unsigned char cmnd
[SG_MAX_CDB_SIZE
];
633 retval
= sg_check_file_access(filp
, __func__
);
637 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
639 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
640 "sg_write: count=%d\n", (int) count
));
641 if (atomic_read(&sdp
->detaching
))
643 if (!((filp
->f_flags
& O_NONBLOCK
) ||
644 scsi_block_when_processing_errors(sdp
->device
)))
647 if (!access_ok(VERIFY_READ
, buf
, count
))
648 return -EFAULT
; /* protects following copy_from_user()s + get_user()s */
649 if (count
< SZ_SG_HEADER
)
651 if (__copy_from_user(&old_hdr
, buf
, SZ_SG_HEADER
))
653 blocking
= !(filp
->f_flags
& O_NONBLOCK
);
654 if (old_hdr
.reply_len
< 0)
655 return sg_new_write(sfp
, filp
, buf
, count
,
656 blocking
, 0, 0, NULL
);
657 if (count
< (SZ_SG_HEADER
+ 6))
658 return -EIO
; /* The minimum scsi command length is 6 bytes. */
660 if (!(srp
= sg_add_request(sfp
))) {
661 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO
, sdp
,
662 "sg_write: queue full\n"));
666 __get_user(opcode
, buf
);
667 mutex_lock(&sfp
->f_mutex
);
668 if (sfp
->next_cmd_len
> 0) {
669 cmd_size
= sfp
->next_cmd_len
;
670 sfp
->next_cmd_len
= 0; /* reset so only this write() effected */
672 cmd_size
= COMMAND_SIZE(opcode
); /* based on SCSI command group */
673 if ((opcode
>= 0xc0) && old_hdr
.twelve_byte
)
676 mutex_unlock(&sfp
->f_mutex
);
677 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sdp
,
678 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode
, cmd_size
));
679 /* Determine buffer size. */
680 input_size
= count
- cmd_size
;
681 mxsize
= (input_size
> old_hdr
.reply_len
) ? input_size
: old_hdr
.reply_len
;
682 mxsize
-= SZ_SG_HEADER
;
683 input_size
-= SZ_SG_HEADER
;
684 if (input_size
< 0) {
685 sg_remove_request(sfp
, srp
);
686 return -EIO
; /* User did not pass enough bytes for this command. */
689 hp
->interface_id
= '\0'; /* indicator of old interface tunnelled */
690 hp
->cmd_len
= (unsigned char) cmd_size
;
694 hp
->dxfer_direction
= (old_hdr
.reply_len
> SZ_SG_HEADER
) ?
695 SG_DXFER_TO_FROM_DEV
: SG_DXFER_TO_DEV
;
697 hp
->dxfer_direction
= (mxsize
> 0) ? SG_DXFER_FROM_DEV
: SG_DXFER_NONE
;
698 hp
->dxfer_len
= mxsize
;
699 if ((hp
->dxfer_direction
== SG_DXFER_TO_DEV
) ||
700 (hp
->dxfer_direction
== SG_DXFER_TO_FROM_DEV
))
701 hp
->dxferp
= (char __user
*)buf
+ cmd_size
;
705 hp
->timeout
= old_hdr
.reply_len
; /* structure abuse ... */
706 hp
->flags
= input_size
; /* structure abuse ... */
707 hp
->pack_id
= old_hdr
.pack_id
;
709 if (__copy_from_user(cmnd
, buf
, cmd_size
))
712 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
713 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
714 * is a non-zero input_size, so emit a warning.
716 if (hp
->dxfer_direction
== SG_DXFER_TO_FROM_DEV
) {
717 printk_ratelimited(KERN_WARNING
718 "sg_write: data in/out %d/%d bytes "
719 "for SCSI command 0x%x-- guessing "
720 "data in;\n program %s not setting "
721 "count and/or reply_len properly\n",
722 old_hdr
.reply_len
- (int)SZ_SG_HEADER
,
723 input_size
, (unsigned int) cmnd
[0],
726 k
= sg_common_write(sfp
, srp
, cmnd
, sfp
->timeout
, blocking
);
727 return (k
< 0) ? k
: count
;
731 sg_new_write(Sg_fd
*sfp
, struct file
*file
, const char __user
*buf
,
732 size_t count
, int blocking
, int read_only
, int sg_io_owned
,
738 unsigned char cmnd
[SG_MAX_CDB_SIZE
];
740 unsigned long ul_timeout
;
742 if (count
< SZ_SG_IO_HDR
)
744 if (!access_ok(VERIFY_READ
, buf
, count
))
745 return -EFAULT
; /* protects following copy_from_user()s + get_user()s */
747 sfp
->cmd_q
= 1; /* when sg_io_hdr seen, set command queuing on */
748 if (!(srp
= sg_add_request(sfp
))) {
749 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO
, sfp
->parentdp
,
750 "sg_new_write: queue full\n"));
753 srp
->sg_io_owned
= sg_io_owned
;
755 if (__copy_from_user(hp
, buf
, SZ_SG_IO_HDR
)) {
756 sg_remove_request(sfp
, srp
);
759 if (hp
->interface_id
!= 'S') {
760 sg_remove_request(sfp
, srp
);
763 if (hp
->flags
& SG_FLAG_MMAP_IO
) {
764 if (hp
->dxfer_len
> sfp
->reserve
.bufflen
) {
765 sg_remove_request(sfp
, srp
);
766 return -ENOMEM
; /* MMAP_IO size must fit in reserve buffer */
768 if (hp
->flags
& SG_FLAG_DIRECT_IO
) {
769 sg_remove_request(sfp
, srp
);
770 return -EINVAL
; /* either MMAP_IO or DIRECT_IO (not both) */
772 if (sfp
->res_in_use
) {
773 sg_remove_request(sfp
, srp
);
774 return -EBUSY
; /* reserve buffer already being used */
777 ul_timeout
= msecs_to_jiffies(srp
->header
.timeout
);
778 timeout
= (ul_timeout
< INT_MAX
) ? ul_timeout
: INT_MAX
;
779 if ((!hp
->cmdp
) || (hp
->cmd_len
< 6) || (hp
->cmd_len
> sizeof (cmnd
))) {
780 sg_remove_request(sfp
, srp
);
783 if (!access_ok(VERIFY_READ
, hp
->cmdp
, hp
->cmd_len
)) {
784 sg_remove_request(sfp
, srp
);
785 return -EFAULT
; /* protects following copy_from_user()s + get_user()s */
787 if (__copy_from_user(cmnd
, hp
->cmdp
, hp
->cmd_len
)) {
788 sg_remove_request(sfp
, srp
);
791 if (read_only
&& sg_allow_access(file
, cmnd
)) {
792 sg_remove_request(sfp
, srp
);
795 k
= sg_common_write(sfp
, srp
, cmnd
, timeout
, blocking
);
804 sg_common_write(Sg_fd
* sfp
, Sg_request
* srp
,
805 unsigned char *cmnd
, int timeout
, int blocking
)
808 Sg_device
*sdp
= sfp
->parentdp
;
809 sg_io_hdr_t
*hp
= &srp
->header
;
811 srp
->data
.cmd_opcode
= cmnd
[0]; /* hold opcode of command */
813 hp
->masked_status
= 0;
817 hp
->driver_status
= 0;
819 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
820 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
821 (int) cmnd
[0], (int) hp
->cmd_len
));
823 if (hp
->dxfer_len
>= SZ_256M
)
826 k
= sg_start_req(srp
, cmnd
);
828 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO
, sfp
->parentdp
,
829 "sg_common_write: start_req err=%d\n", k
));
830 sg_finish_rem_req(srp
);
831 sg_remove_request(sfp
, srp
);
832 return k
; /* probably out of space --> ENOMEM */
834 if (atomic_read(&sdp
->detaching
)) {
836 if (srp
->rq
->cmd
!= srp
->rq
->__cmd
)
839 blk_end_request_all(srp
->rq
, -EIO
);
843 sg_finish_rem_req(srp
);
844 sg_remove_request(sfp
, srp
);
848 hp
->duration
= jiffies_to_msecs(jiffies
);
849 if (hp
->interface_id
!= '\0' && /* v3 (or later) interface */
850 (SG_FLAG_Q_AT_TAIL
& hp
->flags
))
855 srp
->rq
->timeout
= timeout
;
856 kref_get(&sfp
->f_ref
); /* sg_rq_end_io() does kref_put(). */
857 blk_execute_rq_nowait(sdp
->device
->request_queue
, sdp
->disk
,
858 srp
->rq
, at_head
, sg_rq_end_io
);
862 static int srp_done(Sg_fd
*sfp
, Sg_request
*srp
)
867 read_lock_irqsave(&sfp
->rq_list_lock
, flags
);
869 read_unlock_irqrestore(&sfp
->rq_list_lock
, flags
);
873 static int max_sectors_bytes(struct request_queue
*q
)
875 unsigned int max_sectors
= queue_max_sectors(q
);
877 max_sectors
= min_t(unsigned int, max_sectors
, INT_MAX
>> 9);
879 return max_sectors
<< 9;
883 sg_fill_request_table(Sg_fd
*sfp
, sg_req_info_t
*rinfo
)
890 list_for_each_entry(srp
, &sfp
->rq_list
, entry
) {
891 if (val
>= SG_MAX_QUEUE
)
893 rinfo
[val
].req_state
= srp
->done
+ 1;
895 srp
->header
.masked_status
&
896 srp
->header
.host_status
&
897 srp
->header
.driver_status
;
899 rinfo
[val
].duration
=
900 srp
->header
.duration
;
902 ms
= jiffies_to_msecs(jiffies
);
903 rinfo
[val
].duration
=
904 (ms
> srp
->header
.duration
) ?
905 (ms
- srp
->header
.duration
) : 0;
907 rinfo
[val
].orphan
= srp
->orphan
;
908 rinfo
[val
].sg_io_owned
= srp
->sg_io_owned
;
909 rinfo
[val
].pack_id
= srp
->header
.pack_id
;
910 rinfo
[val
].usr_ptr
= srp
->header
.usr_ptr
;
916 sg_ioctl(struct file
*filp
, unsigned int cmd_in
, unsigned long arg
)
918 void __user
*p
= (void __user
*)arg
;
920 int result
, val
, read_only
;
924 unsigned long iflags
;
926 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
929 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
930 "sg_ioctl: cmd=0x%x\n", (int) cmd_in
));
931 read_only
= (O_RDWR
!= (filp
->f_flags
& O_ACCMODE
));
935 if (atomic_read(&sdp
->detaching
))
937 if (!scsi_block_when_processing_errors(sdp
->device
))
939 if (!access_ok(VERIFY_WRITE
, p
, SZ_SG_IO_HDR
))
941 result
= sg_new_write(sfp
, filp
, p
, SZ_SG_IO_HDR
,
942 1, read_only
, 1, &srp
);
945 result
= wait_event_interruptible(sfp
->read_wait
,
946 (srp_done(sfp
, srp
) || atomic_read(&sdp
->detaching
)));
947 if (atomic_read(&sdp
->detaching
))
949 write_lock_irq(&sfp
->rq_list_lock
);
952 write_unlock_irq(&sfp
->rq_list_lock
);
953 result
= sg_new_read(sfp
, p
, SZ_SG_IO_HDR
, srp
);
954 return (result
< 0) ? result
: 0;
957 write_unlock_irq(&sfp
->rq_list_lock
);
958 return result
; /* -ERESTARTSYS because signal hit process */
960 result
= get_user(val
, ip
);
965 if (val
>= MULDIV (INT_MAX
, USER_HZ
, HZ
))
966 val
= MULDIV (INT_MAX
, USER_HZ
, HZ
);
967 sfp
->timeout_user
= val
;
968 sfp
->timeout
= MULDIV (val
, HZ
, USER_HZ
);
971 case SG_GET_TIMEOUT
: /* N.B. User receives timeout as return value */
972 /* strange ..., for backward compatibility */
973 return sfp
->timeout_user
;
974 case SG_SET_FORCE_LOW_DMA
:
976 * N.B. This ioctl never worked properly, but failed to
977 * return an error value. So returning '0' to keep compability
978 * with legacy applications.
982 return put_user((int) sdp
->device
->host
->unchecked_isa_dma
, ip
);
984 if (!access_ok(VERIFY_WRITE
, p
, sizeof (sg_scsi_id_t
)))
987 sg_scsi_id_t __user
*sg_idp
= p
;
989 if (atomic_read(&sdp
->detaching
))
991 __put_user((int) sdp
->device
->host
->host_no
,
993 __put_user((int) sdp
->device
->channel
,
995 __put_user((int) sdp
->device
->id
, &sg_idp
->scsi_id
);
996 __put_user((int) sdp
->device
->lun
, &sg_idp
->lun
);
997 __put_user((int) sdp
->device
->type
, &sg_idp
->scsi_type
);
998 __put_user((short) sdp
->device
->host
->cmd_per_lun
,
999 &sg_idp
->h_cmd_per_lun
);
1000 __put_user((short) sdp
->device
->queue_depth
,
1001 &sg_idp
->d_queue_depth
);
1002 __put_user(0, &sg_idp
->unused
[0]);
1003 __put_user(0, &sg_idp
->unused
[1]);
1006 case SG_SET_FORCE_PACK_ID
:
1007 result
= get_user(val
, ip
);
1010 sfp
->force_packid
= val
? 1 : 0;
1012 case SG_GET_PACK_ID
:
1013 if (!access_ok(VERIFY_WRITE
, ip
, sizeof (int)))
1015 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
1016 list_for_each_entry(srp
, &sfp
->rq_list
, entry
) {
1017 if ((1 == srp
->done
) && (!srp
->sg_io_owned
)) {
1018 read_unlock_irqrestore(&sfp
->rq_list_lock
,
1020 __put_user(srp
->header
.pack_id
, ip
);
1024 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
1027 case SG_GET_NUM_WAITING
:
1028 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
1030 list_for_each_entry(srp
, &sfp
->rq_list
, entry
) {
1031 if ((1 == srp
->done
) && (!srp
->sg_io_owned
))
1034 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
1035 return put_user(val
, ip
);
1036 case SG_GET_SG_TABLESIZE
:
1037 return put_user(sdp
->sg_tablesize
, ip
);
1038 case SG_SET_RESERVED_SIZE
:
1039 result
= get_user(val
, ip
);
1044 val
= min_t(int, val
,
1045 max_sectors_bytes(sdp
->device
->request_queue
));
1046 mutex_lock(&sfp
->f_mutex
);
1047 if (val
!= sfp
->reserve
.bufflen
) {
1048 if (sfp
->mmap_called
||
1050 mutex_unlock(&sfp
->f_mutex
);
1054 sg_remove_scat(sfp
, &sfp
->reserve
);
1055 sg_build_reserve(sfp
, val
);
1057 mutex_unlock(&sfp
->f_mutex
);
1059 case SG_GET_RESERVED_SIZE
:
1060 val
= min_t(int, sfp
->reserve
.bufflen
,
1061 max_sectors_bytes(sdp
->device
->request_queue
));
1062 return put_user(val
, ip
);
1063 case SG_SET_COMMAND_Q
:
1064 result
= get_user(val
, ip
);
1067 sfp
->cmd_q
= val
? 1 : 0;
1069 case SG_GET_COMMAND_Q
:
1070 return put_user((int) sfp
->cmd_q
, ip
);
1071 case SG_SET_KEEP_ORPHAN
:
1072 result
= get_user(val
, ip
);
1075 sfp
->keep_orphan
= val
;
1077 case SG_GET_KEEP_ORPHAN
:
1078 return put_user((int) sfp
->keep_orphan
, ip
);
1079 case SG_NEXT_CMD_LEN
:
1080 result
= get_user(val
, ip
);
1083 if (val
> SG_MAX_CDB_SIZE
)
1085 sfp
->next_cmd_len
= (val
> 0) ? val
: 0;
1087 case SG_GET_VERSION_NUM
:
1088 return put_user(sg_version_num
, ip
);
1089 case SG_GET_ACCESS_COUNT
:
1090 /* faked - we don't have a real access count anymore */
1091 val
= (sdp
->device
? 1 : 0);
1092 return put_user(val
, ip
);
1093 case SG_GET_REQUEST_TABLE
:
1094 if (!access_ok(VERIFY_WRITE
, p
, SZ_SG_REQ_INFO
* SG_MAX_QUEUE
))
1097 sg_req_info_t
*rinfo
;
1099 rinfo
= kzalloc(SZ_SG_REQ_INFO
* SG_MAX_QUEUE
,
1103 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
1104 sg_fill_request_table(sfp
, rinfo
);
1105 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
1106 result
= __copy_to_user(p
, rinfo
,
1107 SZ_SG_REQ_INFO
* SG_MAX_QUEUE
);
1108 result
= result
? -EFAULT
: 0;
1112 case SG_EMULATED_HOST
:
1113 if (atomic_read(&sdp
->detaching
))
1115 return put_user(sdp
->device
->host
->hostt
->emulated
, ip
);
1116 case SCSI_IOCTL_SEND_COMMAND
:
1117 if (atomic_read(&sdp
->detaching
))
1120 unsigned char opcode
= WRITE_6
;
1121 Scsi_Ioctl_Command __user
*siocp
= p
;
1123 if (copy_from_user(&opcode
, siocp
->data
, 1))
1125 if (sg_allow_access(filp
, &opcode
))
1128 return sg_scsi_ioctl(sdp
->device
->request_queue
, NULL
, filp
->f_mode
, p
);
1130 result
= get_user(val
, ip
);
1133 sdp
->sgdebug
= (char) val
;
1136 return put_user(max_sectors_bytes(sdp
->device
->request_queue
),
1139 return blk_trace_setup(sdp
->device
->request_queue
,
1140 sdp
->disk
->disk_name
,
1141 MKDEV(SCSI_GENERIC_MAJOR
, sdp
->index
),
1145 return blk_trace_startstop(sdp
->device
->request_queue
, 1);
1147 return blk_trace_startstop(sdp
->device
->request_queue
, 0);
1148 case BLKTRACETEARDOWN
:
1149 return blk_trace_remove(sdp
->device
->request_queue
);
1150 case SCSI_IOCTL_GET_IDLUN
:
1151 case SCSI_IOCTL_GET_BUS_NUMBER
:
1152 case SCSI_IOCTL_PROBE_HOST
:
1153 case SG_GET_TRANSFORM
:
1155 if (atomic_read(&sdp
->detaching
))
1160 return -EPERM
; /* don't know so take safe approach */
1164 result
= scsi_ioctl_block_when_processing_errors(sdp
->device
,
1165 cmd_in
, filp
->f_flags
& O_NDELAY
);
1168 return scsi_ioctl(sdp
->device
, cmd_in
, p
);
1171 #ifdef CONFIG_COMPAT
1172 static long sg_compat_ioctl(struct file
*filp
, unsigned int cmd_in
, unsigned long arg
)
1176 struct scsi_device
*sdev
;
1178 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
1182 if (sdev
->host
->hostt
->compat_ioctl
) {
1185 ret
= sdev
->host
->hostt
->compat_ioctl(sdev
, cmd_in
, (void __user
*)arg
);
1190 return -ENOIOCTLCMD
;
1195 sg_poll(struct file
*filp
, poll_table
* wait
)
1197 unsigned int res
= 0;
1202 unsigned long iflags
;
1204 sfp
= filp
->private_data
;
1207 sdp
= sfp
->parentdp
;
1210 poll_wait(filp
, &sfp
->read_wait
, wait
);
1211 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
1212 list_for_each_entry(srp
, &sfp
->rq_list
, entry
) {
1213 /* if any read waiting, flag it */
1214 if ((0 == res
) && (1 == srp
->done
) && (!srp
->sg_io_owned
))
1215 res
= POLLIN
| POLLRDNORM
;
1218 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
1220 if (atomic_read(&sdp
->detaching
))
1222 else if (!sfp
->cmd_q
) {
1224 res
|= POLLOUT
| POLLWRNORM
;
1225 } else if (count
< SG_MAX_QUEUE
)
1226 res
|= POLLOUT
| POLLWRNORM
;
1227 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
1228 "sg_poll: res=0x%x\n", (int) res
));
1233 sg_fasync(int fd
, struct file
*filp
, int mode
)
1238 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
1240 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
1241 "sg_fasync: mode=%d\n", mode
));
1243 return fasync_helper(fd
, filp
, mode
, &sfp
->async_qp
);
1247 sg_vma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1250 unsigned long offset
, len
, sa
;
1251 Sg_scatter_hold
*rsv_schp
;
1254 if ((NULL
== vma
) || (!(sfp
= (Sg_fd
*) vma
->vm_private_data
)))
1255 return VM_FAULT_SIGBUS
;
1256 rsv_schp
= &sfp
->reserve
;
1257 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
1258 if (offset
>= rsv_schp
->bufflen
)
1259 return VM_FAULT_SIGBUS
;
1260 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sfp
->parentdp
,
1261 "sg_vma_fault: offset=%lu, scatg=%d\n",
1262 offset
, rsv_schp
->k_use_sg
));
1264 length
= 1 << (PAGE_SHIFT
+ rsv_schp
->page_order
);
1265 for (k
= 0; k
< rsv_schp
->k_use_sg
&& sa
< vma
->vm_end
; k
++) {
1266 len
= vma
->vm_end
- sa
;
1267 len
= (len
< length
) ? len
: length
;
1269 struct page
*page
= nth_page(rsv_schp
->pages
[k
],
1270 offset
>> PAGE_SHIFT
);
1271 get_page(page
); /* increment page count */
1273 return 0; /* success */
1279 return VM_FAULT_SIGBUS
;
1282 static const struct vm_operations_struct sg_mmap_vm_ops
= {
1283 .fault
= sg_vma_fault
,
1287 sg_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1290 unsigned long req_sz
, len
, sa
;
1291 Sg_scatter_hold
*rsv_schp
;
1295 if ((!filp
) || (!vma
) || (!(sfp
= (Sg_fd
*) filp
->private_data
)))
1297 req_sz
= vma
->vm_end
- vma
->vm_start
;
1298 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sfp
->parentdp
,
1299 "sg_mmap starting, vm_start=%p, len=%d\n",
1300 (void *) vma
->vm_start
, (int) req_sz
));
1302 return -EINVAL
; /* want no offset */
1303 rsv_schp
= &sfp
->reserve
;
1304 mutex_lock(&sfp
->f_mutex
);
1305 if (req_sz
> rsv_schp
->bufflen
) {
1306 ret
= -ENOMEM
; /* cannot map more than reserved buffer */
1311 length
= 1 << (PAGE_SHIFT
+ rsv_schp
->page_order
);
1312 for (k
= 0; k
< rsv_schp
->k_use_sg
&& sa
< vma
->vm_end
; k
++) {
1313 len
= vma
->vm_end
- sa
;
1314 len
= (len
< length
) ? len
: length
;
1318 sfp
->mmap_called
= 1;
1319 vma
->vm_flags
|= VM_IO
| VM_DONTEXPAND
| VM_DONTDUMP
;
1320 vma
->vm_private_data
= sfp
;
1321 vma
->vm_ops
= &sg_mmap_vm_ops
;
1323 mutex_unlock(&sfp
->f_mutex
);
1328 sg_rq_end_io_usercontext(struct work_struct
*work
)
1330 struct sg_request
*srp
= container_of(work
, struct sg_request
, ew
.work
);
1331 struct sg_fd
*sfp
= srp
->parentfp
;
1333 sg_finish_rem_req(srp
);
1334 sg_remove_request(sfp
, srp
);
1335 kref_put(&sfp
->f_ref
, sg_remove_sfp
);
1339 * This function is a "bottom half" handler that is called by the mid
1340 * level when a command is completed (or has failed).
1343 sg_rq_end_io(struct request
*rq
, int uptodate
)
1345 struct sg_request
*srp
= rq
->end_io_data
;
1348 unsigned long iflags
;
1351 int result
, resid
, done
= 1;
1353 if (WARN_ON(srp
->done
!= 0))
1356 sfp
= srp
->parentfp
;
1357 if (WARN_ON(sfp
== NULL
))
1360 sdp
= sfp
->parentdp
;
1361 if (unlikely(atomic_read(&sdp
->detaching
)))
1362 pr_info("%s: device detaching\n", __func__
);
1365 result
= rq
->errors
;
1366 resid
= rq
->resid_len
;
1368 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sdp
,
1369 "sg_cmd_done: pack_id=%d, res=0x%x\n",
1370 srp
->header
.pack_id
, result
));
1371 srp
->header
.resid
= resid
;
1372 ms
= jiffies_to_msecs(jiffies
);
1373 srp
->header
.duration
= (ms
> srp
->header
.duration
) ?
1374 (ms
- srp
->header
.duration
) : 0;
1376 struct scsi_sense_hdr sshdr
;
1378 srp
->header
.status
= 0xff & result
;
1379 srp
->header
.masked_status
= status_byte(result
);
1380 srp
->header
.msg_status
= msg_byte(result
);
1381 srp
->header
.host_status
= host_byte(result
);
1382 srp
->header
.driver_status
= driver_byte(result
);
1383 if ((sdp
->sgdebug
> 0) &&
1384 ((CHECK_CONDITION
== srp
->header
.masked_status
) ||
1385 (COMMAND_TERMINATED
== srp
->header
.masked_status
)))
1386 __scsi_print_sense(sdp
->device
, __func__
, sense
,
1387 SCSI_SENSE_BUFFERSIZE
);
1389 /* Following if statement is a patch supplied by Eric Youngdale */
1390 if (driver_byte(result
) != 0
1391 && scsi_normalize_sense(sense
, SCSI_SENSE_BUFFERSIZE
, &sshdr
)
1392 && !scsi_sense_is_deferred(&sshdr
)
1393 && sshdr
.sense_key
== UNIT_ATTENTION
1394 && sdp
->device
->removable
) {
1395 /* Detected possible disc change. Set the bit - this */
1396 /* may be used if there are filesystems using this device */
1397 sdp
->device
->changed
= 1;
1400 /* Rely on write phase to clean out srp status values, so no "else" */
1403 * Free the request as soon as it is complete so that its resources
1404 * can be reused without waiting for userspace to read() the
1405 * result. But keep the associated bio (if any) around until
1406 * blk_rq_unmap_user() can be called from user context.
1409 if (rq
->cmd
!= rq
->__cmd
)
1411 __blk_put_request(rq
->q
, rq
);
1413 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
1414 if (unlikely(srp
->orphan
)) {
1415 if (sfp
->keep_orphan
)
1416 srp
->sg_io_owned
= 0;
1421 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
1424 /* Now wake up any sg_read() that is waiting for this
1427 wake_up_interruptible(&sfp
->read_wait
);
1428 kill_fasync(&sfp
->async_qp
, SIGPOLL
, POLL_IN
);
1429 kref_put(&sfp
->f_ref
, sg_remove_sfp
);
1431 INIT_WORK(&srp
->ew
.work
, sg_rq_end_io_usercontext
);
1432 schedule_work(&srp
->ew
.work
);
1436 static const struct file_operations sg_fops
= {
1437 .owner
= THIS_MODULE
,
1441 .unlocked_ioctl
= sg_ioctl
,
1442 #ifdef CONFIG_COMPAT
1443 .compat_ioctl
= sg_compat_ioctl
,
1447 .release
= sg_release
,
1448 .fasync
= sg_fasync
,
1449 .llseek
= no_llseek
,
1452 static struct class *sg_sysfs_class
;
1454 static int sg_sysfs_valid
= 0;
1457 sg_alloc(struct gendisk
*disk
, struct scsi_device
*scsidp
)
1459 struct request_queue
*q
= scsidp
->request_queue
;
1461 unsigned long iflags
;
1465 sdp
= kzalloc(sizeof(Sg_device
), GFP_KERNEL
);
1467 sdev_printk(KERN_WARNING
, scsidp
, "%s: kmalloc Sg_device "
1468 "failure\n", __func__
);
1469 return ERR_PTR(-ENOMEM
);
1472 idr_preload(GFP_KERNEL
);
1473 write_lock_irqsave(&sg_index_lock
, iflags
);
1475 error
= idr_alloc(&sg_index_idr
, sdp
, 0, SG_MAX_DEVS
, GFP_NOWAIT
);
1477 if (error
== -ENOSPC
) {
1478 sdev_printk(KERN_WARNING
, scsidp
,
1479 "Unable to attach sg device type=%d, minor number exceeds %d\n",
1480 scsidp
->type
, SG_MAX_DEVS
- 1);
1483 sdev_printk(KERN_WARNING
, scsidp
, "%s: idr "
1484 "allocation Sg_device failure: %d\n",
1491 SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO
, scsidp
,
1492 "sg_alloc: dev=%d \n", k
));
1493 sprintf(disk
->disk_name
, "sg%d", k
);
1494 disk
->first_minor
= k
;
1496 sdp
->device
= scsidp
;
1497 mutex_init(&sdp
->open_rel_lock
);
1498 INIT_LIST_HEAD(&sdp
->sfds
);
1499 init_waitqueue_head(&sdp
->open_wait
);
1500 atomic_set(&sdp
->detaching
, 0);
1501 rwlock_init(&sdp
->sfd_lock
);
1502 sdp
->sg_tablesize
= queue_max_segments(q
);
1504 kref_init(&sdp
->d_ref
);
1508 write_unlock_irqrestore(&sg_index_lock
, iflags
);
1513 return ERR_PTR(error
);
1519 sg_add_device(struct device
*cl_dev
, struct class_interface
*cl_intf
)
1521 struct scsi_device
*scsidp
= to_scsi_device(cl_dev
->parent
);
1522 struct gendisk
*disk
;
1523 Sg_device
*sdp
= NULL
;
1524 struct cdev
* cdev
= NULL
;
1526 unsigned long iflags
;
1528 disk
= alloc_disk(1);
1530 pr_warn("%s: alloc_disk failed\n", __func__
);
1533 disk
->major
= SCSI_GENERIC_MAJOR
;
1536 cdev
= cdev_alloc();
1538 pr_warn("%s: cdev_alloc failed\n", __func__
);
1541 cdev
->owner
= THIS_MODULE
;
1542 cdev
->ops
= &sg_fops
;
1544 sdp
= sg_alloc(disk
, scsidp
);
1546 pr_warn("%s: sg_alloc failed\n", __func__
);
1547 error
= PTR_ERR(sdp
);
1551 error
= cdev_add(cdev
, MKDEV(SCSI_GENERIC_MAJOR
, sdp
->index
), 1);
1556 if (sg_sysfs_valid
) {
1557 struct device
*sg_class_member
;
1559 sg_class_member
= device_create(sg_sysfs_class
, cl_dev
->parent
,
1560 MKDEV(SCSI_GENERIC_MAJOR
,
1562 sdp
, "%s", disk
->disk_name
);
1563 if (IS_ERR(sg_class_member
)) {
1564 pr_err("%s: device_create failed\n", __func__
);
1565 error
= PTR_ERR(sg_class_member
);
1568 error
= sysfs_create_link(&scsidp
->sdev_gendev
.kobj
,
1569 &sg_class_member
->kobj
, "generic");
1571 pr_err("%s: unable to make symlink 'generic' back "
1572 "to sg%d\n", __func__
, sdp
->index
);
1574 pr_warn("%s: sg_sys Invalid\n", __func__
);
1576 sdev_printk(KERN_NOTICE
, scsidp
, "Attached scsi generic sg%d "
1577 "type %d\n", sdp
->index
, scsidp
->type
);
1579 dev_set_drvdata(cl_dev
, sdp
);
1584 write_lock_irqsave(&sg_index_lock
, iflags
);
1585 idr_remove(&sg_index_idr
, sdp
->index
);
1586 write_unlock_irqrestore(&sg_index_lock
, iflags
);
1597 sg_device_destroy(struct kref
*kref
)
1599 struct sg_device
*sdp
= container_of(kref
, struct sg_device
, d_ref
);
1600 unsigned long flags
;
1602 /* CAUTION! Note that the device can still be found via idr_find()
1603 * even though the refcount is 0. Therefore, do idr_remove() BEFORE
1604 * any other cleanup.
1607 write_lock_irqsave(&sg_index_lock
, flags
);
1608 idr_remove(&sg_index_idr
, sdp
->index
);
1609 write_unlock_irqrestore(&sg_index_lock
, flags
);
1612 sg_printk(KERN_INFO
, sdp
, "sg_device_destroy\n"));
1614 put_disk(sdp
->disk
);
1619 sg_remove_device(struct device
*cl_dev
, struct class_interface
*cl_intf
)
1621 struct scsi_device
*scsidp
= to_scsi_device(cl_dev
->parent
);
1622 Sg_device
*sdp
= dev_get_drvdata(cl_dev
);
1623 unsigned long iflags
;
1629 /* want sdp->detaching non-zero as soon as possible */
1630 val
= atomic_inc_return(&sdp
->detaching
);
1632 return; /* only want to do following once per device */
1634 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
1637 read_lock_irqsave(&sdp
->sfd_lock
, iflags
);
1638 list_for_each_entry(sfp
, &sdp
->sfds
, sfd_siblings
) {
1639 wake_up_interruptible_all(&sfp
->read_wait
);
1640 kill_fasync(&sfp
->async_qp
, SIGPOLL
, POLL_HUP
);
1642 wake_up_interruptible_all(&sdp
->open_wait
);
1643 read_unlock_irqrestore(&sdp
->sfd_lock
, iflags
);
1645 sysfs_remove_link(&scsidp
->sdev_gendev
.kobj
, "generic");
1646 device_destroy(sg_sysfs_class
, MKDEV(SCSI_GENERIC_MAJOR
, sdp
->index
));
1647 cdev_del(sdp
->cdev
);
1650 kref_put(&sdp
->d_ref
, sg_device_destroy
);
1653 module_param_named(scatter_elem_sz
, scatter_elem_sz
, int, S_IRUGO
| S_IWUSR
);
1654 module_param_named(def_reserved_size
, def_reserved_size
, int,
1656 module_param_named(allow_dio
, sg_allow_dio
, int, S_IRUGO
| S_IWUSR
);
1658 MODULE_AUTHOR("Douglas Gilbert");
1659 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1660 MODULE_LICENSE("GPL");
1661 MODULE_VERSION(SG_VERSION_STR
);
1662 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR
);
1664 MODULE_PARM_DESC(scatter_elem_sz
, "scatter gather element "
1665 "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
1666 MODULE_PARM_DESC(def_reserved_size
, "size of buffer reserved for each fd");
1667 MODULE_PARM_DESC(allow_dio
, "allow direct I/O (default: 0 (disallow))");
1674 if (scatter_elem_sz
< PAGE_SIZE
) {
1675 scatter_elem_sz
= PAGE_SIZE
;
1676 scatter_elem_sz_prev
= scatter_elem_sz
;
1678 if (def_reserved_size
>= 0)
1679 sg_big_buff
= def_reserved_size
;
1681 def_reserved_size
= sg_big_buff
;
1683 rc
= register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR
, 0),
1687 sg_sysfs_class
= class_create(THIS_MODULE
, "scsi_generic");
1688 if ( IS_ERR(sg_sysfs_class
) ) {
1689 rc
= PTR_ERR(sg_sysfs_class
);
1693 rc
= scsi_register_interface(&sg_interface
);
1695 #ifdef CONFIG_SCSI_PROC_FS
1697 #endif /* CONFIG_SCSI_PROC_FS */
1700 class_destroy(sg_sysfs_class
);
1702 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR
, 0), SG_MAX_DEVS
);
1709 #ifdef CONFIG_SCSI_PROC_FS
1711 #endif /* CONFIG_SCSI_PROC_FS */
1712 scsi_unregister_interface(&sg_interface
);
1713 class_destroy(sg_sysfs_class
);
1715 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR
, 0),
1717 idr_destroy(&sg_index_idr
);
1721 sg_start_req(Sg_request
*srp
, unsigned char *cmd
)
1725 Sg_fd
*sfp
= srp
->parentfp
;
1726 sg_io_hdr_t
*hp
= &srp
->header
;
1727 int dxfer_len
= (int) hp
->dxfer_len
;
1728 int dxfer_dir
= hp
->dxfer_direction
;
1729 unsigned int iov_count
= hp
->iovec_count
;
1730 Sg_scatter_hold
*req_schp
= &srp
->data
;
1731 Sg_scatter_hold
*rsv_schp
= &sfp
->reserve
;
1732 struct request_queue
*q
= sfp
->parentdp
->device
->request_queue
;
1733 struct rq_map_data
*md
, map_data
;
1734 int rw
= hp
->dxfer_direction
== SG_DXFER_TO_DEV
? WRITE
: READ
;
1735 unsigned char *long_cmdp
= NULL
;
1737 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
1738 "sg_start_req: dxfer_len=%d\n",
1741 if (hp
->cmd_len
> BLK_MAX_CDB
) {
1742 long_cmdp
= kzalloc(hp
->cmd_len
, GFP_KERNEL
);
1750 * With scsi-mq enabled, there are a fixed number of preallocated
1751 * requests equal in number to shost->can_queue. If all of the
1752 * preallocated requests are already in use, then using GFP_ATOMIC with
1753 * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
1754 * will cause blk_get_request() to sleep until an active command
1755 * completes, freeing up a request. Neither option is ideal, but
1756 * GFP_KERNEL is the better choice to prevent userspace from getting an
1757 * unexpected EWOULDBLOCK.
1759 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
1760 * does not sleep except under memory pressure.
1762 rq
= blk_get_request(q
, rw
, GFP_KERNEL
);
1768 blk_rq_set_block_pc(rq
);
1770 if (hp
->cmd_len
> BLK_MAX_CDB
)
1771 rq
->cmd
= long_cmdp
;
1772 memcpy(rq
->cmd
, cmd
, hp
->cmd_len
);
1773 rq
->cmd_len
= hp
->cmd_len
;
1776 rq
->end_io_data
= srp
;
1777 rq
->sense
= srp
->sense_b
;
1778 rq
->retries
= SG_DEFAULT_RETRIES
;
1780 if ((dxfer_len
<= 0) || (dxfer_dir
== SG_DXFER_NONE
))
1783 if (sg_allow_dio
&& hp
->flags
& SG_FLAG_DIRECT_IO
&&
1784 dxfer_dir
!= SG_DXFER_UNKNOWN
&& !iov_count
&&
1785 !sfp
->parentdp
->device
->host
->unchecked_isa_dma
&&
1786 blk_rq_aligned(q
, (unsigned long)hp
->dxferp
, dxfer_len
))
1792 mutex_lock(&sfp
->f_mutex
);
1793 if (dxfer_len
<= rsv_schp
->bufflen
&&
1795 sfp
->res_in_use
= 1;
1796 sg_link_reserve(sfp
, srp
, dxfer_len
);
1797 } else if (hp
->flags
& SG_FLAG_MMAP_IO
) {
1798 res
= -EBUSY
; /* sfp->res_in_use == 1 */
1799 if (dxfer_len
> rsv_schp
->bufflen
)
1801 mutex_unlock(&sfp
->f_mutex
);
1804 res
= sg_build_indirect(req_schp
, sfp
, dxfer_len
);
1806 mutex_unlock(&sfp
->f_mutex
);
1810 mutex_unlock(&sfp
->f_mutex
);
1812 md
->pages
= req_schp
->pages
;
1813 md
->page_order
= req_schp
->page_order
;
1814 md
->nr_entries
= req_schp
->k_use_sg
;
1816 md
->null_mapped
= hp
->dxferp
? 0 : 1;
1817 if (dxfer_dir
== SG_DXFER_TO_FROM_DEV
)
1824 struct iovec
*iov
= NULL
;
1827 res
= import_iovec(rw
, hp
->dxferp
, iov_count
, 0, &iov
, &i
);
1831 iov_iter_truncate(&i
, hp
->dxfer_len
);
1832 if (!iov_iter_count(&i
)) {
1837 res
= blk_rq_map_user_iov(q
, rq
, md
, &i
, GFP_ATOMIC
);
1840 res
= blk_rq_map_user(q
, rq
, md
, hp
->dxferp
,
1841 hp
->dxfer_len
, GFP_ATOMIC
);
1847 req_schp
->dio_in_use
= 1;
1848 hp
->info
|= SG_INFO_DIRECT_IO
;
1855 sg_finish_rem_req(Sg_request
*srp
)
1859 Sg_fd
*sfp
= srp
->parentfp
;
1860 Sg_scatter_hold
*req_schp
= &srp
->data
;
1862 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
1863 "sg_finish_rem_req: res_used=%d\n",
1864 (int) srp
->res_used
));
1866 ret
= blk_rq_unmap_user(srp
->bio
);
1869 if (srp
->rq
->cmd
!= srp
->rq
->__cmd
)
1870 kfree(srp
->rq
->cmd
);
1871 blk_put_request(srp
->rq
);
1875 sg_unlink_reserve(sfp
, srp
);
1877 sg_remove_scat(sfp
, req_schp
);
1883 sg_build_sgat(Sg_scatter_hold
* schp
, const Sg_fd
* sfp
, int tablesize
)
1885 int sg_bufflen
= tablesize
* sizeof(struct page
*);
1886 gfp_t gfp_flags
= GFP_ATOMIC
| __GFP_NOWARN
;
1888 schp
->pages
= kzalloc(sg_bufflen
, gfp_flags
);
1891 schp
->sglist_len
= sg_bufflen
;
1892 return tablesize
; /* number of scat_gath elements allocated */
1896 sg_build_indirect(Sg_scatter_hold
* schp
, Sg_fd
* sfp
, int buff_size
)
1898 int ret_sz
= 0, i
, k
, rem_sz
, num
, mx_sc_elems
;
1899 int sg_tablesize
= sfp
->parentdp
->sg_tablesize
;
1900 int blk_size
= buff_size
, order
;
1901 gfp_t gfp_mask
= GFP_ATOMIC
| __GFP_COMP
| __GFP_NOWARN
;
1902 struct sg_device
*sdp
= sfp
->parentdp
;
1907 ++blk_size
; /* don't know why */
1908 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1909 blk_size
= ALIGN(blk_size
, SG_SECTOR_SZ
);
1910 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
1911 "sg_build_indirect: buff_size=%d, blk_size=%d\n",
1912 buff_size
, blk_size
));
1914 /* N.B. ret_sz carried into this block ... */
1915 mx_sc_elems
= sg_build_sgat(schp
, sfp
, sg_tablesize
);
1916 if (mx_sc_elems
< 0)
1917 return mx_sc_elems
; /* most likely -ENOMEM */
1919 num
= scatter_elem_sz
;
1920 if (unlikely(num
!= scatter_elem_sz_prev
)) {
1921 if (num
< PAGE_SIZE
) {
1922 scatter_elem_sz
= PAGE_SIZE
;
1923 scatter_elem_sz_prev
= PAGE_SIZE
;
1925 scatter_elem_sz_prev
= num
;
1928 if (sdp
->device
->host
->unchecked_isa_dma
)
1929 gfp_mask
|= GFP_DMA
;
1931 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
1932 gfp_mask
|= __GFP_ZERO
;
1934 order
= get_order(num
);
1936 ret_sz
= 1 << (PAGE_SHIFT
+ order
);
1938 for (k
= 0, rem_sz
= blk_size
; rem_sz
> 0 && k
< mx_sc_elems
;
1939 k
++, rem_sz
-= ret_sz
) {
1941 num
= (rem_sz
> scatter_elem_sz_prev
) ?
1942 scatter_elem_sz_prev
: rem_sz
;
1944 schp
->pages
[k
] = alloc_pages(gfp_mask
| __GFP_ZERO
, order
);
1945 if (!schp
->pages
[k
])
1948 if (num
== scatter_elem_sz_prev
) {
1949 if (unlikely(ret_sz
> scatter_elem_sz_prev
)) {
1950 scatter_elem_sz
= ret_sz
;
1951 scatter_elem_sz_prev
= ret_sz
;
1955 SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO
, sfp
->parentdp
,
1956 "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
1958 } /* end of for loop */
1960 schp
->page_order
= order
;
1962 SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO
, sfp
->parentdp
,
1963 "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
1966 schp
->bufflen
= blk_size
;
1967 if (rem_sz
> 0) /* must have failed */
1971 for (i
= 0; i
< k
; i
++)
1972 __free_pages(schp
->pages
[i
], order
);
1981 sg_remove_scat(Sg_fd
* sfp
, Sg_scatter_hold
* schp
)
1983 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
1984 "sg_remove_scat: k_use_sg=%d\n", schp
->k_use_sg
));
1985 if (schp
->pages
&& schp
->sglist_len
> 0) {
1986 if (!schp
->dio_in_use
) {
1989 for (k
= 0; k
< schp
->k_use_sg
&& schp
->pages
[k
]; k
++) {
1991 sg_printk(KERN_INFO
, sfp
->parentdp
,
1992 "sg_remove_scat: k=%d, pg=0x%p\n",
1993 k
, schp
->pages
[k
]));
1994 __free_pages(schp
->pages
[k
], schp
->page_order
);
2000 memset(schp
, 0, sizeof (*schp
));
2004 sg_read_oxfer(Sg_request
* srp
, char __user
*outp
, int num_read_xfer
)
2006 Sg_scatter_hold
*schp
= &srp
->data
;
2009 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, srp
->parentfp
->parentdp
,
2010 "sg_read_oxfer: num_read_xfer=%d\n",
2012 if ((!outp
) || (num_read_xfer
<= 0))
2015 num
= 1 << (PAGE_SHIFT
+ schp
->page_order
);
2016 for (k
= 0; k
< schp
->k_use_sg
&& schp
->pages
[k
]; k
++) {
2017 if (num
> num_read_xfer
) {
2018 if (__copy_to_user(outp
, page_address(schp
->pages
[k
]),
2023 if (__copy_to_user(outp
, page_address(schp
->pages
[k
]),
2026 num_read_xfer
-= num
;
2027 if (num_read_xfer
<= 0)
2037 sg_build_reserve(Sg_fd
* sfp
, int req_size
)
2039 Sg_scatter_hold
*schp
= &sfp
->reserve
;
2041 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
2042 "sg_build_reserve: req_size=%d\n", req_size
));
2044 if (req_size
< PAGE_SIZE
)
2045 req_size
= PAGE_SIZE
;
2046 if (0 == sg_build_indirect(schp
, sfp
, req_size
))
2049 sg_remove_scat(sfp
, schp
);
2050 req_size
>>= 1; /* divide by 2 */
2051 } while (req_size
> (PAGE_SIZE
/ 2));
2055 sg_link_reserve(Sg_fd
* sfp
, Sg_request
* srp
, int size
)
2057 Sg_scatter_hold
*req_schp
= &srp
->data
;
2058 Sg_scatter_hold
*rsv_schp
= &sfp
->reserve
;
2062 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
2063 "sg_link_reserve: size=%d\n", size
));
2066 num
= 1 << (PAGE_SHIFT
+ rsv_schp
->page_order
);
2067 for (k
= 0; k
< rsv_schp
->k_use_sg
; k
++) {
2069 req_schp
->k_use_sg
= k
+ 1;
2070 req_schp
->sglist_len
= rsv_schp
->sglist_len
;
2071 req_schp
->pages
= rsv_schp
->pages
;
2073 req_schp
->bufflen
= size
;
2074 req_schp
->page_order
= rsv_schp
->page_order
;
2080 if (k
>= rsv_schp
->k_use_sg
)
2081 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO
, sfp
->parentdp
,
2082 "sg_link_reserve: BAD size\n"));
2086 sg_unlink_reserve(Sg_fd
* sfp
, Sg_request
* srp
)
2088 Sg_scatter_hold
*req_schp
= &srp
->data
;
2090 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, srp
->parentfp
->parentdp
,
2091 "sg_unlink_reserve: req->k_use_sg=%d\n",
2092 (int) req_schp
->k_use_sg
));
2093 req_schp
->k_use_sg
= 0;
2094 req_schp
->bufflen
= 0;
2095 req_schp
->pages
= NULL
;
2096 req_schp
->page_order
= 0;
2097 req_schp
->sglist_len
= 0;
2099 /* Called without mutex lock to avoid deadlock */
2100 sfp
->res_in_use
= 0;
2104 sg_get_rq_mark(Sg_fd
* sfp
, int pack_id
)
2107 unsigned long iflags
;
2109 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
2110 list_for_each_entry(resp
, &sfp
->rq_list
, entry
) {
2111 /* look for requests that are ready + not SG_IO owned */
2112 if ((1 == resp
->done
) && (!resp
->sg_io_owned
) &&
2113 ((-1 == pack_id
) || (resp
->header
.pack_id
== pack_id
))) {
2114 resp
->done
= 2; /* guard against other readers */
2115 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2119 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2123 /* always adds to end of list */
2125 sg_add_request(Sg_fd
* sfp
)
2128 unsigned long iflags
;
2129 Sg_request
*rp
= sfp
->req_arr
;
2131 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
2132 if (!list_empty(&sfp
->rq_list
)) {
2136 for (k
= 0; k
< SG_MAX_QUEUE
; ++k
, ++rp
) {
2140 if (k
>= SG_MAX_QUEUE
)
2143 memset(rp
, 0, sizeof (Sg_request
));
2145 rp
->header
.duration
= jiffies_to_msecs(jiffies
);
2146 list_add_tail(&rp
->entry
, &sfp
->rq_list
);
2147 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2150 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2154 /* Return of 1 for found; 0 for not found */
2156 sg_remove_request(Sg_fd
* sfp
, Sg_request
* srp
)
2158 unsigned long iflags
;
2161 if (!sfp
|| !srp
|| list_empty(&sfp
->rq_list
))
2163 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
2164 if (!list_empty(&srp
->entry
)) {
2165 list_del(&srp
->entry
);
2166 srp
->parentfp
= NULL
;
2169 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2174 sg_add_sfp(Sg_device
* sdp
)
2177 unsigned long iflags
;
2180 sfp
= kzalloc(sizeof(*sfp
), GFP_ATOMIC
| __GFP_NOWARN
);
2182 return ERR_PTR(-ENOMEM
);
2184 init_waitqueue_head(&sfp
->read_wait
);
2185 rwlock_init(&sfp
->rq_list_lock
);
2186 INIT_LIST_HEAD(&sfp
->rq_list
);
2187 kref_init(&sfp
->f_ref
);
2188 mutex_init(&sfp
->f_mutex
);
2189 sfp
->timeout
= SG_DEFAULT_TIMEOUT
;
2190 sfp
->timeout_user
= SG_DEFAULT_TIMEOUT_USER
;
2191 sfp
->force_packid
= SG_DEF_FORCE_PACK_ID
;
2192 sfp
->cmd_q
= SG_DEF_COMMAND_Q
;
2193 sfp
->keep_orphan
= SG_DEF_KEEP_ORPHAN
;
2194 sfp
->parentdp
= sdp
;
2195 write_lock_irqsave(&sdp
->sfd_lock
, iflags
);
2196 if (atomic_read(&sdp
->detaching
)) {
2197 write_unlock_irqrestore(&sdp
->sfd_lock
, iflags
);
2199 return ERR_PTR(-ENODEV
);
2201 list_add_tail(&sfp
->sfd_siblings
, &sdp
->sfds
);
2202 write_unlock_irqrestore(&sdp
->sfd_lock
, iflags
);
2203 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
2204 "sg_add_sfp: sfp=0x%p\n", sfp
));
2205 if (unlikely(sg_big_buff
!= def_reserved_size
))
2206 sg_big_buff
= def_reserved_size
;
2208 bufflen
= min_t(int, sg_big_buff
,
2209 max_sectors_bytes(sdp
->device
->request_queue
));
2210 sg_build_reserve(sfp
, bufflen
);
2211 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
2212 "sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2213 sfp
->reserve
.bufflen
,
2214 sfp
->reserve
.k_use_sg
));
2216 kref_get(&sdp
->d_ref
);
2217 __module_get(THIS_MODULE
);
2222 sg_remove_sfp_usercontext(struct work_struct
*work
)
2224 struct sg_fd
*sfp
= container_of(work
, struct sg_fd
, ew
.work
);
2225 struct sg_device
*sdp
= sfp
->parentdp
;
2227 unsigned long iflags
;
2229 /* Cleanup any responses which were never read(). */
2230 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
2231 while (!list_empty(&sfp
->rq_list
)) {
2232 srp
= list_first_entry(&sfp
->rq_list
, Sg_request
, entry
);
2233 sg_finish_rem_req(srp
);
2234 list_del(&srp
->entry
);
2235 srp
->parentfp
= NULL
;
2237 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2239 if (sfp
->reserve
.bufflen
> 0) {
2240 SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO
, sdp
,
2241 "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2242 (int) sfp
->reserve
.bufflen
,
2243 (int) sfp
->reserve
.k_use_sg
));
2244 sg_remove_scat(sfp
, &sfp
->reserve
);
2247 SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO
, sdp
,
2248 "sg_remove_sfp: sfp=0x%p\n", sfp
));
2251 scsi_device_put(sdp
->device
);
2252 kref_put(&sdp
->d_ref
, sg_device_destroy
);
2253 module_put(THIS_MODULE
);
2257 sg_remove_sfp(struct kref
*kref
)
2259 struct sg_fd
*sfp
= container_of(kref
, struct sg_fd
, f_ref
);
2260 struct sg_device
*sdp
= sfp
->parentdp
;
2261 unsigned long iflags
;
2263 write_lock_irqsave(&sdp
->sfd_lock
, iflags
);
2264 list_del(&sfp
->sfd_siblings
);
2265 write_unlock_irqrestore(&sdp
->sfd_lock
, iflags
);
2267 INIT_WORK(&sfp
->ew
.work
, sg_remove_sfp_usercontext
);
2268 schedule_work(&sfp
->ew
.work
);
2271 #ifdef CONFIG_SCSI_PROC_FS
2273 sg_idr_max_id(int id
, void *p
, void *data
)
2287 unsigned long iflags
;
2289 read_lock_irqsave(&sg_index_lock
, iflags
);
2290 idr_for_each(&sg_index_idr
, sg_idr_max_id
, &k
);
2291 read_unlock_irqrestore(&sg_index_lock
, iflags
);
2292 return k
+ 1; /* origin 1 */
2296 /* must be called with sg_index_lock held */
2297 static Sg_device
*sg_lookup_dev(int dev
)
2299 return idr_find(&sg_index_idr
, dev
);
2305 struct sg_device
*sdp
;
2306 unsigned long flags
;
2308 read_lock_irqsave(&sg_index_lock
, flags
);
2309 sdp
= sg_lookup_dev(dev
);
2311 sdp
= ERR_PTR(-ENXIO
);
2312 else if (atomic_read(&sdp
->detaching
)) {
2313 /* If sdp->detaching, then the refcount may already be 0, in
2314 * which case it would be a bug to do kref_get().
2316 sdp
= ERR_PTR(-ENODEV
);
2318 kref_get(&sdp
->d_ref
);
2319 read_unlock_irqrestore(&sg_index_lock
, flags
);
2324 #ifdef CONFIG_SCSI_PROC_FS
2326 static struct proc_dir_entry
*sg_proc_sgp
= NULL
;
2328 static char sg_proc_sg_dirname
[] = "scsi/sg";
2330 static int sg_proc_seq_show_int(struct seq_file
*s
, void *v
);
2332 static int sg_proc_single_open_adio(struct inode
*inode
, struct file
*file
);
2333 static ssize_t
sg_proc_write_adio(struct file
*filp
, const char __user
*buffer
,
2334 size_t count
, loff_t
*off
);
2335 static const struct file_operations adio_fops
= {
2336 .owner
= THIS_MODULE
,
2337 .open
= sg_proc_single_open_adio
,
2339 .llseek
= seq_lseek
,
2340 .write
= sg_proc_write_adio
,
2341 .release
= single_release
,
2344 static int sg_proc_single_open_dressz(struct inode
*inode
, struct file
*file
);
2345 static ssize_t
sg_proc_write_dressz(struct file
*filp
,
2346 const char __user
*buffer
, size_t count
, loff_t
*off
);
2347 static const struct file_operations dressz_fops
= {
2348 .owner
= THIS_MODULE
,
2349 .open
= sg_proc_single_open_dressz
,
2351 .llseek
= seq_lseek
,
2352 .write
= sg_proc_write_dressz
,
2353 .release
= single_release
,
2356 static int sg_proc_seq_show_version(struct seq_file
*s
, void *v
);
2357 static int sg_proc_single_open_version(struct inode
*inode
, struct file
*file
);
2358 static const struct file_operations version_fops
= {
2359 .owner
= THIS_MODULE
,
2360 .open
= sg_proc_single_open_version
,
2362 .llseek
= seq_lseek
,
2363 .release
= single_release
,
2366 static int sg_proc_seq_show_devhdr(struct seq_file
*s
, void *v
);
2367 static int sg_proc_single_open_devhdr(struct inode
*inode
, struct file
*file
);
2368 static const struct file_operations devhdr_fops
= {
2369 .owner
= THIS_MODULE
,
2370 .open
= sg_proc_single_open_devhdr
,
2372 .llseek
= seq_lseek
,
2373 .release
= single_release
,
2376 static int sg_proc_seq_show_dev(struct seq_file
*s
, void *v
);
2377 static int sg_proc_open_dev(struct inode
*inode
, struct file
*file
);
2378 static void * dev_seq_start(struct seq_file
*s
, loff_t
*pos
);
2379 static void * dev_seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
);
2380 static void dev_seq_stop(struct seq_file
*s
, void *v
);
2381 static const struct file_operations dev_fops
= {
2382 .owner
= THIS_MODULE
,
2383 .open
= sg_proc_open_dev
,
2385 .llseek
= seq_lseek
,
2386 .release
= seq_release
,
2388 static const struct seq_operations dev_seq_ops
= {
2389 .start
= dev_seq_start
,
2390 .next
= dev_seq_next
,
2391 .stop
= dev_seq_stop
,
2392 .show
= sg_proc_seq_show_dev
,
2395 static int sg_proc_seq_show_devstrs(struct seq_file
*s
, void *v
);
2396 static int sg_proc_open_devstrs(struct inode
*inode
, struct file
*file
);
2397 static const struct file_operations devstrs_fops
= {
2398 .owner
= THIS_MODULE
,
2399 .open
= sg_proc_open_devstrs
,
2401 .llseek
= seq_lseek
,
2402 .release
= seq_release
,
2404 static const struct seq_operations devstrs_seq_ops
= {
2405 .start
= dev_seq_start
,
2406 .next
= dev_seq_next
,
2407 .stop
= dev_seq_stop
,
2408 .show
= sg_proc_seq_show_devstrs
,
2411 static int sg_proc_seq_show_debug(struct seq_file
*s
, void *v
);
2412 static int sg_proc_open_debug(struct inode
*inode
, struct file
*file
);
2413 static const struct file_operations debug_fops
= {
2414 .owner
= THIS_MODULE
,
2415 .open
= sg_proc_open_debug
,
2417 .llseek
= seq_lseek
,
2418 .release
= seq_release
,
2420 static const struct seq_operations debug_seq_ops
= {
2421 .start
= dev_seq_start
,
2422 .next
= dev_seq_next
,
2423 .stop
= dev_seq_stop
,
2424 .show
= sg_proc_seq_show_debug
,
2428 struct sg_proc_leaf
{
2430 const struct file_operations
* fops
;
2433 static const struct sg_proc_leaf sg_proc_leaf_arr
[] = {
2434 {"allow_dio", &adio_fops
},
2435 {"debug", &debug_fops
},
2436 {"def_reserved_size", &dressz_fops
},
2437 {"device_hdr", &devhdr_fops
},
2438 {"devices", &dev_fops
},
2439 {"device_strs", &devstrs_fops
},
2440 {"version", &version_fops
}
2446 int num_leaves
= ARRAY_SIZE(sg_proc_leaf_arr
);
2449 sg_proc_sgp
= proc_mkdir(sg_proc_sg_dirname
, NULL
);
2452 for (k
= 0; k
< num_leaves
; ++k
) {
2453 const struct sg_proc_leaf
*leaf
= &sg_proc_leaf_arr
[k
];
2454 umode_t mask
= leaf
->fops
->write
? S_IRUGO
| S_IWUSR
: S_IRUGO
;
2455 proc_create(leaf
->name
, mask
, sg_proc_sgp
, leaf
->fops
);
2461 sg_proc_cleanup(void)
2464 int num_leaves
= ARRAY_SIZE(sg_proc_leaf_arr
);
2468 for (k
= 0; k
< num_leaves
; ++k
)
2469 remove_proc_entry(sg_proc_leaf_arr
[k
].name
, sg_proc_sgp
);
2470 remove_proc_entry(sg_proc_sg_dirname
, NULL
);
2474 static int sg_proc_seq_show_int(struct seq_file
*s
, void *v
)
2476 seq_printf(s
, "%d\n", *((int *)s
->private));
2480 static int sg_proc_single_open_adio(struct inode
*inode
, struct file
*file
)
2482 return single_open(file
, sg_proc_seq_show_int
, &sg_allow_dio
);
2486 sg_proc_write_adio(struct file
*filp
, const char __user
*buffer
,
2487 size_t count
, loff_t
*off
)
2492 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
2494 err
= kstrtoul_from_user(buffer
, count
, 0, &num
);
2497 sg_allow_dio
= num
? 1 : 0;
2501 static int sg_proc_single_open_dressz(struct inode
*inode
, struct file
*file
)
2503 return single_open(file
, sg_proc_seq_show_int
, &sg_big_buff
);
2507 sg_proc_write_dressz(struct file
*filp
, const char __user
*buffer
,
2508 size_t count
, loff_t
*off
)
2511 unsigned long k
= ULONG_MAX
;
2513 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
2516 err
= kstrtoul_from_user(buffer
, count
, 0, &k
);
2519 if (k
<= 1048576) { /* limit "big buff" to 1 MB */
2526 static int sg_proc_seq_show_version(struct seq_file
*s
, void *v
)
2528 seq_printf(s
, "%d\t%s [%s]\n", sg_version_num
, SG_VERSION_STR
,
2533 static int sg_proc_single_open_version(struct inode
*inode
, struct file
*file
)
2535 return single_open(file
, sg_proc_seq_show_version
, NULL
);
2538 static int sg_proc_seq_show_devhdr(struct seq_file
*s
, void *v
)
2540 seq_puts(s
, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
2544 static int sg_proc_single_open_devhdr(struct inode
*inode
, struct file
*file
)
2546 return single_open(file
, sg_proc_seq_show_devhdr
, NULL
);
2549 struct sg_proc_deviter
{
2554 static void * dev_seq_start(struct seq_file
*s
, loff_t
*pos
)
2556 struct sg_proc_deviter
* it
= kmalloc(sizeof(*it
), GFP_KERNEL
);
2563 it
->max
= sg_last_dev();
2564 if (it
->index
>= it
->max
)
2569 static void * dev_seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
2571 struct sg_proc_deviter
* it
= s
->private;
2574 return (it
->index
< it
->max
) ? it
: NULL
;
2577 static void dev_seq_stop(struct seq_file
*s
, void *v
)
2582 static int sg_proc_open_dev(struct inode
*inode
, struct file
*file
)
2584 return seq_open(file
, &dev_seq_ops
);
2587 static int sg_proc_seq_show_dev(struct seq_file
*s
, void *v
)
2589 struct sg_proc_deviter
* it
= (struct sg_proc_deviter
*) v
;
2591 struct scsi_device
*scsidp
;
2592 unsigned long iflags
;
2594 read_lock_irqsave(&sg_index_lock
, iflags
);
2595 sdp
= it
? sg_lookup_dev(it
->index
) : NULL
;
2596 if ((NULL
== sdp
) || (NULL
== sdp
->device
) ||
2597 (atomic_read(&sdp
->detaching
)))
2598 seq_puts(s
, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2600 scsidp
= sdp
->device
;
2601 seq_printf(s
, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n",
2602 scsidp
->host
->host_no
, scsidp
->channel
,
2603 scsidp
->id
, scsidp
->lun
, (int) scsidp
->type
,
2605 (int) scsidp
->queue_depth
,
2606 (int) atomic_read(&scsidp
->device_busy
),
2607 (int) scsi_device_online(scsidp
));
2609 read_unlock_irqrestore(&sg_index_lock
, iflags
);
2613 static int sg_proc_open_devstrs(struct inode
*inode
, struct file
*file
)
2615 return seq_open(file
, &devstrs_seq_ops
);
2618 static int sg_proc_seq_show_devstrs(struct seq_file
*s
, void *v
)
2620 struct sg_proc_deviter
* it
= (struct sg_proc_deviter
*) v
;
2622 struct scsi_device
*scsidp
;
2623 unsigned long iflags
;
2625 read_lock_irqsave(&sg_index_lock
, iflags
);
2626 sdp
= it
? sg_lookup_dev(it
->index
) : NULL
;
2627 scsidp
= sdp
? sdp
->device
: NULL
;
2628 if (sdp
&& scsidp
&& (!atomic_read(&sdp
->detaching
)))
2629 seq_printf(s
, "%8.8s\t%16.16s\t%4.4s\n",
2630 scsidp
->vendor
, scsidp
->model
, scsidp
->rev
);
2632 seq_puts(s
, "<no active device>\n");
2633 read_unlock_irqrestore(&sg_index_lock
, iflags
);
2637 /* must be called while holding sg_index_lock */
2638 static void sg_proc_debug_helper(struct seq_file
*s
, Sg_device
* sdp
)
2640 int k
, new_interface
, blen
, usg
;
2643 const sg_io_hdr_t
*hp
;
2648 list_for_each_entry(fp
, &sdp
->sfds
, sfd_siblings
) {
2650 read_lock(&fp
->rq_list_lock
); /* irqs already disabled */
2651 seq_printf(s
, " FD(%d): timeout=%dms bufflen=%d "
2652 "(res)sgat=%d low_dma=%d\n", k
,
2653 jiffies_to_msecs(fp
->timeout
),
2654 fp
->reserve
.bufflen
,
2655 (int) fp
->reserve
.k_use_sg
,
2656 (int) sdp
->device
->host
->unchecked_isa_dma
);
2657 seq_printf(s
, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
2658 (int) fp
->cmd_q
, (int) fp
->force_packid
,
2659 (int) fp
->keep_orphan
);
2660 list_for_each_entry(srp
, &fp
->rq_list
, entry
) {
2662 new_interface
= (hp
->interface_id
== '\0') ? 0 : 1;
2663 if (srp
->res_used
) {
2664 if (new_interface
&&
2665 (SG_FLAG_MMAP_IO
& hp
->flags
))
2670 if (SG_INFO_DIRECT_IO_MASK
& hp
->info
)
2676 blen
= srp
->data
.bufflen
;
2677 usg
= srp
->data
.k_use_sg
;
2678 seq_puts(s
, srp
->done
?
2679 ((1 == srp
->done
) ? "rcv:" : "fin:")
2681 seq_printf(s
, " id=%d blen=%d",
2682 srp
->header
.pack_id
, blen
);
2684 seq_printf(s
, " dur=%d", hp
->duration
);
2686 ms
= jiffies_to_msecs(jiffies
);
2687 seq_printf(s
, " t_o/elap=%d/%d",
2688 (new_interface
? hp
->timeout
:
2689 jiffies_to_msecs(fp
->timeout
)),
2690 (ms
> hp
->duration
? ms
- hp
->duration
: 0));
2692 seq_printf(s
, "ms sgat=%d op=0x%02x\n", usg
,
2693 (int) srp
->data
.cmd_opcode
);
2695 if (list_empty(&fp
->rq_list
))
2696 seq_puts(s
, " No requests active\n");
2697 read_unlock(&fp
->rq_list_lock
);
2701 static int sg_proc_open_debug(struct inode
*inode
, struct file
*file
)
2703 return seq_open(file
, &debug_seq_ops
);
2706 static int sg_proc_seq_show_debug(struct seq_file
*s
, void *v
)
2708 struct sg_proc_deviter
* it
= (struct sg_proc_deviter
*) v
;
2710 unsigned long iflags
;
2712 if (it
&& (0 == it
->index
))
2713 seq_printf(s
, "max_active_device=%d def_reserved_size=%d\n",
2714 (int)it
->max
, sg_big_buff
);
2716 read_lock_irqsave(&sg_index_lock
, iflags
);
2717 sdp
= it
? sg_lookup_dev(it
->index
) : NULL
;
2720 read_lock(&sdp
->sfd_lock
);
2721 if (!list_empty(&sdp
->sfds
)) {
2722 seq_printf(s
, " >>> device=%s ", sdp
->disk
->disk_name
);
2723 if (atomic_read(&sdp
->detaching
))
2724 seq_puts(s
, "detaching pending close ");
2725 else if (sdp
->device
) {
2726 struct scsi_device
*scsidp
= sdp
->device
;
2728 seq_printf(s
, "%d:%d:%d:%llu em=%d",
2729 scsidp
->host
->host_no
,
2730 scsidp
->channel
, scsidp
->id
,
2732 scsidp
->host
->hostt
->emulated
);
2734 seq_printf(s
, " sg_tablesize=%d excl=%d open_cnt=%d\n",
2735 sdp
->sg_tablesize
, sdp
->exclude
, sdp
->open_cnt
);
2736 sg_proc_debug_helper(s
, sdp
);
2738 read_unlock(&sdp
->sfd_lock
);
2740 read_unlock_irqrestore(&sg_index_lock
, iflags
);
2744 #endif /* CONFIG_SCSI_PROC_FS */
2746 module_init(init_sg
);
2747 module_exit(exit_sg
);