* better
[mascara-docs.git] / i386 / linux-2.3.21 / drivers / scsi / sg.c
blob4a2258cb53b1ad5a730af5726d26d469de88f692
1 /*
2 * History:
3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY
7 * Original driver (sg.c):
8 * Copyright (C) 1992 Lawrence Foard
9 * 2.x extensions to driver:
10 * Copyright (C) 1998, 1999 Douglas Gilbert
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * Borrows code from st driver. Thanks to Alessandro Rubini's "dd" book.
19 static char * sg_version_str = "Version: 2.3.35 (990708)";
20 static int sg_version_num = 20335; /* 2 digits for each component */
22 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
23 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
24 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
25 * (otherwise the macros compile to empty statements).
26 * Then before running the program to be debugged enter:
27 * # echo "scsi log timeout 7" > /proc/scsi/scsi
28 * This will send copious output to the console and the log which
29 * is usually /var/log/messages. To turn off debugging enter:
30 * # echo "scsi log timeout 0" > /proc/scsi/scsi
31 * The 'timeout' token was chosen because it is relatively unused.
32 * The token 'hlcomplete' should be used but that triggers too
33 * much output from the sd device driver. To dump the current
34 * state of the SCSI mid level data structures enter:
35 * # echo "scsi dump 1" > /proc/scsi/scsi
36 * To dump the state of sg's data structures get the 'sg_debug'
37 * program from the utilities and enter:
38 * # sg_debug /dev/sga
39 * or any valid sg device name. The state of _all_ sg devices
40 * will be sent to the console and the log.
42 * - The 'alt_address' field in the scatter_list structure and the
43 * related 'mem_src' indicate the source of the heap allocation.
46 #include <linux/module.h>
48 #include <linux/fs.h>
49 #include <linux/kernel.h>
50 #include <linux/sched.h>
51 #include <linux/string.h>
52 #include <linux/mm.h>
53 #include <linux/errno.h>
54 #include <linux/mtio.h>
55 #include <linux/ioctl.h>
56 #include <linux/fcntl.h>
57 #include <linux/poll.h>
58 #include <asm/io.h>
59 #include <asm/uaccess.h>
60 #include <asm/system.h>
62 #include <linux/blk.h>
63 #include "scsi.h"
64 #include "hosts.h"
65 #include <scsi/scsi_ioctl.h>
66 #include <scsi/sg.h>
69 int sg_big_buff = SG_DEF_RESERVED_SIZE; /* sg_big_buff is ro through sysctl */
70 /* N.B. This global is here to keep existing software happy. It now holds
71 the size of the reserve buffer of the most recent sucessful sg_open().
72 Only available when 'sg' compiled into kernel (rather than a module).
73 This is deprecated (use SG_GET_RESERVED_SIZE ioctl() instead). */
75 #define SG_SECTOR_SZ 512
76 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
78 #define SG_LOW_POOL_THRESHHOLD 30
79 #define SG_MAX_POOL_SECTORS 320 /* Max. number of pool sectors to take */
81 static int sg_pool_secs_avail = SG_MAX_POOL_SECTORS;
83 /* #define SG_DEBUG */ /* for counting varieties of allocations */
85 #ifdef SG_DEBUG
86 static int sg_num_kmal = 0;
87 static int sg_num_pool = 0;
88 static int sg_num_page = 0;
89 #endif
91 #define SG_HEAP_PAGE 1 /* heap from kernel via get_free_pages() */
92 #define SG_HEAP_KMAL 2 /* heap from kernel via kmalloc() */
93 #define SG_HEAP_POOL 3 /* heap from scsi dma pool (mid-level) */
96 static int sg_init(void);
97 static int sg_attach(Scsi_Device *);
98 static void sg_finish(void);
99 static int sg_detect(Scsi_Device *);
100 static void sg_detach(Scsi_Device *);
103 struct Scsi_Device_Template sg_template = {NULL, NULL, "sg", NULL, 0xff,
104 SCSI_GENERIC_MAJOR, 0, 0, 0, 0,
105 sg_detect, sg_init,
106 sg_finish, sg_attach, sg_detach};
109 typedef struct sg_scatter_hold /* holding area for scsi scatter gather info */
111 unsigned short use_sg; /* Number of pieces of scatter-gather */
112 unsigned short sglist_len; /* size of malloc'd scatter-gather list */
113 unsigned bufflen; /* Size of (aggregate) data buffer */
114 unsigned b_malloc_len; /* actual len malloc'ed in buffer */
115 void * buffer; /* Data buffer or scatter list,12 bytes each*/
116 char mem_src; /* heap whereabouts of 'buffer' */
117 } Sg_scatter_hold; /* 20 bytes long on i386 */
119 struct sg_device; /* forward declarations */
120 struct sg_fd;
122 typedef struct sg_request /* SG_MAX_QUEUE requests outstanding per file */
124 Scsi_Cmnd * my_cmdp; /* NULL -> ready to read, else id */
125 struct sg_request * nextrp; /* NULL -> tail request (slist) */
126 struct sg_fd * parentfp; /* NULL -> not in use */
127 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
128 struct sg_header header; /* scsi command+info, see <scsi/sg.h> */
129 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
130 } Sg_request; /* 72 bytes long on i386 */
132 typedef struct sg_fd /* holds the state of a file descriptor */
134 struct sg_fd * nextfp; /* NULL when last opened fd on this device */
135 struct sg_device * parentdp; /* owning device */
136 wait_queue_head_t read_wait; /* queue read until command done */
137 wait_queue_head_t write_wait; /* write waits on pending read */
138 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
139 Sg_scatter_hold reserve; /* buffer held for this file descriptor */
140 unsigned save_scat_len; /* original length of trunc. scat. element */
141 Sg_request * headrp; /* head of request slist, NULL->empty */
142 struct fasync_struct * async_qp; /* used by asynchronous notification */
143 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
144 char low_dma; /* as in parent but possibly overridden to 1 */
145 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
146 char closed; /* 1 -> fd closed but request(s) outstanding */
147 char my_mem_src; /* heap whereabouts of this Sg_fd object */
148 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
149 char underrun_flag; /* 1 -> flag underruns, 0 -> don't, 2 -> test */
150 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
151 } Sg_fd; /* 1212 bytes long on i386 */
153 typedef struct sg_device /* holds the state of each scsi generic device */
155 Scsi_Device * device;
156 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
157 int sg_tablesize; /* adapter's max scatter-gather table size */
158 Sg_fd * headfp; /* first open fd belonging to this device */
159 kdev_t i_rdev; /* holds device major+minor number */
160 char exclude; /* opened for exclusive access */
161 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
162 unsigned char merge_fd; /* 0->sequencing per fd, else fd count */
163 } Sg_device; /* 24 bytes long on i386 */
166 static int sg_fasync(int fd, struct file * filp, int mode);
167 static void sg_command_done(Scsi_Cmnd * SCpnt);
168 static int sg_start_req(Sg_request * srp, int max_buff_size,
169 const char * inp, int num_write_xfer);
170 static void sg_finish_rem_req(Sg_request * srp, char * outp,
171 int num_read_xfer);
172 static int sg_build_scat(Sg_scatter_hold * schp, int buff_size,
173 const Sg_fd * sfp);
174 static void sg_write_xfer(Sg_scatter_hold * schp, const char * inp,
175 int num_write_xfer);
176 static void sg_remove_scat(Sg_scatter_hold * schp);
177 static void sg_read_xfer(Sg_scatter_hold * schp, char * outp,
178 int num_read_xfer);
179 static void sg_build_reserve(Sg_fd * sfp, int req_size);
180 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
181 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
182 static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp,
183 int * mem_srcp);
184 static void sg_free(char * buff, int size, int mem_src);
185 static char * sg_low_malloc(int rqSz, int lowDma, int mem_src,
186 int * retSzp);
187 static void sg_low_free(char * buff, int size, int mem_src);
188 static Sg_fd * sg_add_sfp(Sg_device * sdp, int dev, int get_reserved);
189 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
190 static Sg_request * sg_get_request(const Sg_fd * sfp, int pack_id);
191 static Sg_request * sg_add_request(Sg_fd * sfp);
192 static int sg_remove_request(Sg_fd * sfp, const Sg_request * srp);
193 static int sg_res_in_use(const Sg_fd * sfp);
194 static void sg_clr_scpnt(Scsi_Cmnd * SCpnt);
195 static void sg_shorten_timeout(Scsi_Cmnd * scpnt);
196 static void sg_debug(const Sg_device * sdp, const Sg_fd * sfp, int part_of);
197 static void sg_debug_all(const Sg_fd * sfp);
199 static Sg_device * sg_dev_arr = NULL;
200 static const int size_sg_header = sizeof(struct sg_header);
203 static int sg_open(struct inode * inode, struct file * filp)
205 int dev = MINOR(inode->i_rdev);
206 int flags = filp->f_flags;
207 Sg_device * sdp;
208 Sg_fd * sfp;
209 int res;
211 if ((NULL == sg_dev_arr) || (dev < 0) || (dev >= sg_template.dev_max))
212 return -ENXIO;
213 sdp = &sg_dev_arr[dev];
214 if ((! sdp->device) || (! sdp->device->host))
215 return -ENXIO;
216 if (sdp->i_rdev != inode->i_rdev)
217 printk("sg_open: inode maj=%d, min=%d sdp maj=%d, min=%d\n",
218 MAJOR(inode->i_rdev), MINOR(inode->i_rdev),
219 MAJOR(sdp->i_rdev), MINOR(sdp->i_rdev));
220 /* If we are in the middle of error recovery, don't let anyone
221 * else try and use this device. Also, if error recovery fails, it
222 * may try and take the device offline, in which case all further
223 * access to the device is prohibited. */
224 if(! scsi_block_when_processing_errors(sdp->device))
225 return -ENXIO;
227 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
229 if (flags & O_EXCL) {
230 if (O_RDONLY == (flags & O_ACCMODE))
231 return -EACCES; /* Can't lock it with read only access */
232 if (sdp->headfp && (filp->f_flags & O_NONBLOCK))
233 return -EBUSY;
234 res = 0; /* following is a macro that beats race condition */
235 __wait_event_interruptible(sdp->o_excl_wait,
236 ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)),
237 res);
238 if (res)
239 return res; /* -ERESTARTSYS because signal hit process */
241 else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
242 if (filp->f_flags & O_NONBLOCK)
243 return -EBUSY;
244 res = 0; /* following is a macro that beats race condition */
245 __wait_event_interruptible(sdp->o_excl_wait, (! sdp->exclude), res);
246 if (res)
247 return res; /* -ERESTARTSYS because signal hit process */
249 if (! sdp->headfp) { /* no existing opens on this device */
250 sdp->sgdebug = 0;
251 sdp->sg_tablesize = sdp->device->host->sg_tablesize;
252 sdp->merge_fd = 0; /* A little tricky if SG_DEF_MERGE_FD set */
254 if ((sfp = sg_add_sfp(sdp, dev, O_RDWR == (flags & O_ACCMODE)))) {
255 filp->private_data = sfp;
256 #if SG_DEF_MERGE_FD
257 if (0 == sdp->merge_fd)
258 sdp->merge_fd = 1;
259 #endif
261 else {
262 if (flags & O_EXCL) sdp->exclude = 0; /* undo if error */
263 return -ENOMEM;
266 if (sdp->device->host->hostt->module)
267 __MOD_INC_USE_COUNT(sdp->device->host->hostt->module);
268 if (sg_template.module)
269 __MOD_INC_USE_COUNT(sg_template.module);
270 return 0;
273 /* Following function was formerly called 'sg_close' */
274 static int sg_release(struct inode * inode, struct file * filp)
276 Sg_device * sdp;
277 Sg_fd * sfp;
279 if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
280 return -ENXIO;
281 SCSI_LOG_TIMEOUT(3, printk("sg_release: dev=%d\n", MINOR(sdp->i_rdev)));
282 sg_fasync(-1, filp, 0); /* remove filp from async notification list */
283 sg_remove_sfp(sdp, sfp);
284 if (! sdp->headfp) {
285 filp->private_data = NULL;
286 sdp->merge_fd = 0;
289 if (sdp->device->host->hostt->module)
290 __MOD_DEC_USE_COUNT(sdp->device->host->hostt->module);
291 if(sg_template.module)
292 __MOD_DEC_USE_COUNT(sg_template.module);
293 sdp->exclude = 0;
294 wake_up_interruptible(&sdp->o_excl_wait);
295 return 0;
298 static ssize_t sg_read(struct file * filp, char * buf,
299 size_t count, loff_t *ppos)
301 int k, res;
302 Sg_device * sdp;
303 Sg_fd * sfp;
304 Sg_request * srp;
305 int req_pack_id = -1;
306 struct sg_header * shp = (struct sg_header *)buf;
308 if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
309 return -ENXIO;
310 SCSI_LOG_TIMEOUT(3, printk("sg_read: dev=%d, count=%d\n",
311 MINOR(sdp->i_rdev), (int)count));
313 if(! scsi_block_when_processing_errors(sdp->device))
314 return -ENXIO;
315 if (ppos != &filp->f_pos)
316 ; /* FIXME: Hmm. Seek to the right place, or fail? */
317 if ((k = verify_area(VERIFY_WRITE, buf, count)))
318 return k;
319 if (sfp->force_packid && (count >= size_sg_header))
320 req_pack_id = shp->pack_id;
321 srp = sg_get_request(sfp, req_pack_id);
322 if (! srp) { /* now wait on packet to arrive */
323 if (filp->f_flags & O_NONBLOCK)
324 return -EAGAIN;
325 res = 0; /* following is a macro that beats race condition */
326 __wait_event_interruptible(sfp->read_wait,
327 (srp = sg_get_request(sfp, req_pack_id)),
328 res);
329 if (res)
330 return res; /* -ERESTARTSYS because signal hit process */
332 if (2 != sfp->underrun_flag)
333 srp->header.pack_len = srp->header.reply_len; /* Why ????? */
335 /* Now copy the result back to the user buffer. */
336 if (count >= size_sg_header) {
337 __copy_to_user(buf, &srp->header, size_sg_header);
338 buf += size_sg_header;
339 if (count > srp->header.reply_len)
340 count = srp->header.reply_len;
341 if (count > size_sg_header) /* release does copy_to_user */
342 sg_finish_rem_req(srp, buf, count - size_sg_header);
343 else
344 sg_finish_rem_req(srp, NULL, 0);
346 else {
347 count = (srp->header.result == 0) ? 0 : -EIO;
348 sg_finish_rem_req(srp, NULL, 0);
350 if (! sfp->cmd_q)
351 wake_up_interruptible(&sfp->write_wait);
352 return count;
355 static ssize_t sg_write(struct file * filp, const char * buf,
356 size_t count, loff_t *ppos)
358 unsigned long flags;
359 int mxsize, cmd_size, k;
360 unsigned char cmnd[MAX_COMMAND_SIZE];
361 int input_size;
362 unsigned char opcode;
363 Scsi_Cmnd * SCpnt;
364 Sg_device * sdp;
365 Sg_fd * sfp;
366 Sg_request * srp;
368 if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
369 return -ENXIO;
370 SCSI_LOG_TIMEOUT(3, printk("sg_write: dev=%d, count=%d\n",
371 MINOR(sdp->i_rdev), (int)count));
373 if(! scsi_block_when_processing_errors(sdp->device) )
374 return -ENXIO;
375 if (ppos != &filp->f_pos)
376 ; /* FIXME: Hmm. Seek to the right place, or fail? */
378 if ((k = verify_area(VERIFY_READ, buf, count)))
379 return k; /* protects following copy_from_user()s + get_user()s */
380 if (count < (size_sg_header + 6))
381 return -EIO; /* The minimum scsi command length is 6 bytes. */
383 if (! (srp = sg_add_request(sfp))) {
384 if (sfp->cmd_q) {
385 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
386 return -EDOM;
388 else { /* old semantics: wait for pending read() to finish */
389 if (filp->f_flags & O_NONBLOCK)
390 return -EAGAIN;
391 k = 0;
392 __wait_event_interruptible(sfp->write_wait,
393 (srp = sg_add_request(sfp)),
395 if (k)
396 return k; /* -ERESTARTSYS because signal hit process */
399 __copy_from_user(&srp->header, buf, size_sg_header);
400 buf += size_sg_header;
401 srp->header.pack_len = count;
402 __get_user(opcode, buf);
403 if (sfp->next_cmd_len > 0) {
404 if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
405 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
406 sfp->next_cmd_len = 0;
407 return -EIO;
409 cmd_size = sfp->next_cmd_len;
410 sfp->next_cmd_len = 0; /* reset so only this write() effected */
412 else {
413 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
414 if ((opcode >= 0xc0) && srp->header.twelve_byte)
415 cmd_size = 12;
417 SCSI_LOG_TIMEOUT(4, printk("sg_write: scsi opcode=0x%02x, cmd_size=%d\n",
418 (int)opcode, cmd_size));
419 /* Determine buffer size. */
420 input_size = count - cmd_size;
421 mxsize = (input_size > srp->header.reply_len) ? input_size :
422 srp->header.reply_len;
423 mxsize -= size_sg_header;
424 input_size -= size_sg_header;
425 if (input_size < 0) {
426 sg_remove_request(sfp, srp);
427 return -EIO; /* User did not pass enough bytes for this command. */
429 if ((k = sg_start_req(srp, mxsize, buf + cmd_size, input_size))) {
430 SCSI_LOG_TIMEOUT(1, printk("sg_write: build err=%d\n", k));
431 sg_finish_rem_req(srp, NULL, 0);
432 return k; /* probably out of space --> ENOMEM */
434 /* SCSI_LOG_TIMEOUT(7, printk("sg_write: allocating device\n")); */
435 if (! (SCpnt = scsi_allocate_device(NULL, sdp->device,
436 !(filp->f_flags & O_NONBLOCK)))) {
437 sg_finish_rem_req(srp, NULL, 0);
438 return -EAGAIN; /* No available command blocks at the moment */
440 /* SCSI_LOG_TIMEOUT(7, printk("sg_write: device allocated\n")); */
441 srp->my_cmdp = SCpnt;
442 SCpnt->request.rq_dev = sdp->i_rdev;
443 SCpnt->request.rq_status = RQ_ACTIVE;
444 SCpnt->sense_buffer[0] = 0;
445 SCpnt->cmd_len = cmd_size;
446 __copy_from_user(cmnd, buf, cmd_size);
447 /* Set the LUN field in the command structure, overriding user input */
448 cmnd[1]= (cmnd[1] & 0x1f) | (sdp->device->lun << 5);
450 /* SCSI_LOG_TIMEOUT(7, printk("sg_write: do cmd\n")); */
451 spin_lock_irqsave(&io_request_lock, flags);
452 SCpnt->use_sg = srp->data.use_sg;
453 SCpnt->sglist_len = srp->data.sglist_len;
454 SCpnt->bufflen = srp->data.bufflen;
455 if (1 == sfp->underrun_flag)
456 SCpnt->underflow = srp->data.bufflen;
457 else
458 SCpnt->underflow = 0;
459 SCpnt->buffer = srp->data.buffer;
460 srp->data.use_sg = 0;
461 srp->data.sglist_len = 0;
462 srp->data.bufflen = 0;
463 srp->data.buffer = NULL;
464 /* Now send everything of to mid-level. The next time we hear about this
465 packet is when sg_command_done() is called (ie a callback). */
466 scsi_do_cmd(SCpnt, (void *)cmnd,
467 (void *)SCpnt->buffer, mxsize,
468 sg_command_done, sfp->timeout, SG_DEFAULT_RETRIES);
469 /* 'mxsize' overwrites SCpnt->bufflen, hence need for b_malloc_len */
470 spin_unlock_irqrestore(&io_request_lock, flags);
471 /* SCSI_LOG_TIMEOUT(6, printk("sg_write: sent scsi cmd to mid-level\n")); */
472 return count;
475 static int sg_ioctl(struct inode * inode, struct file * filp,
476 unsigned int cmd_in, unsigned long arg)
478 int result, val;
479 Sg_device * sdp;
480 Sg_fd * sfp;
481 Sg_request * srp;
483 if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
484 return -ENXIO;
485 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: dev=%d, cmd=0x%x\n",
486 MINOR(sdp->i_rdev), (int)cmd_in));
487 if(! scsi_block_when_processing_errors(sdp->device) )
488 return -ENXIO;
490 switch(cmd_in)
492 case SG_SET_TIMEOUT:
493 result = get_user(val, (int *)arg);
494 if (result) return result;
495 if (val < 0)
496 return -EIO;
497 sfp->timeout = val;
498 return 0;
499 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
500 return sfp->timeout; /* strange ..., for backward compatibility */
501 case SG_SET_FORCE_LOW_DMA:
502 result = get_user(val, (int *)arg);
503 if (result) return result;
504 if (val) {
505 sfp->low_dma = 1;
506 if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
507 val = (int)sfp->reserve.bufflen;
508 sg_remove_scat(&sfp->reserve);
509 sg_build_reserve(sfp, val);
512 else
513 sfp->low_dma = sdp->device->host->unchecked_isa_dma;
514 return 0;
515 case SG_GET_LOW_DMA:
516 return put_user((int)sfp->low_dma, (int *)arg);
517 case SG_GET_SCSI_ID:
518 result = verify_area(VERIFY_WRITE, (void *)arg, sizeof(Sg_scsi_id));
519 if (result) return result;
520 else {
521 Sg_scsi_id * sg_idp = (Sg_scsi_id *)arg;
522 __put_user((int)sdp->device->host->host_no, &sg_idp->host_no);
523 __put_user((int)sdp->device->channel, &sg_idp->channel);
524 __put_user((int)sdp->device->id, &sg_idp->scsi_id);
525 __put_user((int)sdp->device->lun, &sg_idp->lun);
526 __put_user((int)sdp->device->type, &sg_idp->scsi_type);
527 __put_user((short)sdp->device->host->cmd_per_lun,
528 &sg_idp->h_cmd_per_lun);
529 __put_user((short)sdp->device->queue_depth,
530 &sg_idp->d_queue_depth);
531 __put_user(0, &sg_idp->unused1);
532 __put_user(0, &sg_idp->unused2);
533 return 0;
535 case SG_SET_FORCE_PACK_ID:
536 result = get_user(val, (int *)arg);
537 if (result) return result;
538 sfp->force_packid = val ? 1 : 0;
539 return 0;
540 case SG_GET_PACK_ID:
541 result = verify_area(VERIFY_WRITE, (void *) arg, sizeof(int));
542 if (result) return result;
543 srp = sfp->headrp;
544 while (srp) {
545 if (! srp->my_cmdp) {
546 __put_user(srp->header.pack_id, (int *)arg);
547 return 0;
549 srp = srp->nextrp;
551 __put_user(-1, (int *)arg);
552 return 0;
553 case SG_GET_NUM_WAITING:
554 srp = sfp->headrp;
555 val = 0;
556 while (srp) {
557 if (! srp->my_cmdp)
558 ++val;
559 srp = srp->nextrp;
561 return put_user(val, (int *)arg);
562 case SG_GET_SG_TABLESIZE:
563 return put_user(sdp->sg_tablesize, (int *)arg);
564 case SG_SET_RESERVED_SIZE:
565 if (O_RDWR != (filp->f_flags & O_ACCMODE))
566 return -EACCES;
567 result = get_user(val, (int *)arg);
568 if (result) return result;
569 if (val != sfp->reserve.bufflen) {
570 if (sg_res_in_use(sfp))
571 return -EBUSY;
572 sg_remove_scat(&sfp->reserve);
573 sg_build_reserve(sfp, val);
575 return 0;
576 case SG_GET_RESERVED_SIZE:
577 val = (int)sfp->reserve.bufflen;
578 return put_user(val, (int *)arg);
579 case SG_GET_MERGE_FD:
580 return put_user((int)sdp->merge_fd, (int *)arg);
581 case SG_SET_MERGE_FD:
582 if (O_RDWR != (filp->f_flags & O_ACCMODE))
583 return -EACCES; /* require write access since effect wider
584 then just this fd */
585 result = get_user(val, (int *)arg);
586 if (result) return result;
587 val = val ? 1 : 0;
588 if ((val ^ (0 != sdp->merge_fd)) &&
589 sdp->headfp && sdp->headfp->nextfp)
590 return -EBUSY; /* too much work if multiple fds already */
591 sdp->merge_fd = val;
592 return 0;
593 case SG_SET_COMMAND_Q:
594 result = get_user(val, (int *)arg);
595 if (result) return result;
596 sfp->cmd_q = val ? 1 : 0;
597 return 0;
598 case SG_GET_COMMAND_Q:
599 return put_user((int)sfp->cmd_q, (int *)arg);
600 case SG_SET_UNDERRUN_FLAG:
601 result = get_user(val, (int *)arg);
602 if (result) return result;
603 sfp->underrun_flag = val;
604 return 0;
605 case SG_GET_UNDERRUN_FLAG:
606 return put_user((int)sfp->underrun_flag, (int *)arg);
607 case SG_NEXT_CMD_LEN:
608 result = get_user(val, (int *)arg);
609 if (result) return result;
610 sfp->next_cmd_len = (val > 0) ? val : 0;
611 return 0;
612 case SG_GET_VERSION_NUM:
613 return put_user(sg_version_num, (int *)arg);
614 case SG_EMULATED_HOST:
615 return put_user(sdp->device->host->hostt->emulated, (int *)arg);
616 case SG_SCSI_RESET:
617 if (! scsi_block_when_processing_errors(sdp->device))
618 return -EBUSY;
619 result = get_user(val, (int *)arg);
620 if (result) return result;
621 /* Don't do anything till scsi mod level visibility */
622 return 0;
623 case SCSI_IOCTL_SEND_COMMAND:
624 /* Allow SCSI_IOCTL_SEND_COMMAND without checking suser() since the
625 user already has read/write access to the generic device and so
626 can execute arbitrary SCSI commands. */
627 if (O_RDWR != (filp->f_flags & O_ACCMODE))
628 return -EACCES; /* very dangerous things can be done here */
629 return scsi_ioctl_send_command(sdp->device, (void *)arg);
630 case SG_SET_DEBUG:
631 result = get_user(val, (int *)arg);
632 if (result) return result;
633 sdp->sgdebug = (char)val;
634 if (9 == sdp->sgdebug)
635 sg_debug(sdp, sfp, 0);
636 else if (sdp->sgdebug > 9)
637 sg_debug_all(sfp);
638 return 0;
639 case SCSI_IOCTL_GET_IDLUN:
640 case SCSI_IOCTL_GET_BUS_NUMBER:
641 case SCSI_IOCTL_PROBE_HOST:
642 case SG_GET_TRANSFORM:
643 return scsi_ioctl(sdp->device, cmd_in, (void *)arg);
644 default:
645 if (O_RDWR != (filp->f_flags & O_ACCMODE))
646 return -EACCES; /* don't know so take safe approach */
647 return scsi_ioctl(sdp->device, cmd_in, (void *)arg);
651 static unsigned int sg_poll(struct file * filp, poll_table * wait)
653 unsigned int res = 0;
654 Sg_device * sdp;
655 Sg_fd * sfp;
656 Sg_request * srp;
657 int count = 0;
659 if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
660 return POLLERR;
661 poll_wait(filp, &sfp->read_wait, wait);
662 srp = sfp->headrp;
663 while (srp) { /* if any read waiting, flag it */
664 if (! (res || srp->my_cmdp))
665 res = POLLIN | POLLRDNORM;
666 ++count;
667 srp = srp->nextrp;
669 if (0 == sfp->cmd_q) {
670 if (0 == count)
671 res |= POLLOUT | POLLWRNORM;
673 else if (count < SG_MAX_QUEUE)
674 res |= POLLOUT | POLLWRNORM;
675 SCSI_LOG_TIMEOUT(3, printk("sg_poll: dev=%d, res=0x%x\n",
676 MINOR(sdp->i_rdev), (int)res));
677 return res;
680 static int sg_fasync(int fd, struct file * filp, int mode)
682 int retval;
683 Sg_device * sdp;
684 Sg_fd * sfp;
686 if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
687 return -ENXIO;
688 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: dev=%d, mode=%d\n",
689 MINOR(sdp->i_rdev), mode));
691 retval = fasync_helper(fd, filp, mode, &sfp->async_qp);
692 return (retval < 0) ? retval : 0;
695 /* This function is called by the interrupt handler when we
696 * actually have a command that is complete. */
697 static void sg_command_done(Scsi_Cmnd * SCpnt)
699 int dev = MINOR(SCpnt->request.rq_dev);
700 Sg_device * sdp;
701 Sg_fd * sfp;
702 Sg_request * srp = NULL;
703 int closed = 0;
704 static const int min_sb_len =
705 SG_MAX_SENSE > sizeof(SCpnt->sense_buffer) ?
706 sizeof(SCpnt->sense_buffer) : SG_MAX_SENSE;
708 if ((NULL == sg_dev_arr) || (dev < 0) || (dev >= sg_template.dev_max)) {
709 SCSI_LOG_TIMEOUT(1, printk("sg__done: bad args dev=%d\n", dev));
710 scsi_release_command(SCpnt);
711 SCpnt = NULL;
712 return;
714 sdp = &sg_dev_arr[dev];
715 if (NULL == sdp->device)
716 return; /* Get out of here quick ... */
718 sfp = sdp->headfp;
719 while (sfp) {
720 srp = sfp->headrp;
721 while (srp) {
722 if (SCpnt == srp->my_cmdp)
723 break;
724 srp = srp->nextrp;
726 if (srp)
727 break;
728 sfp = sfp->nextfp;
730 if (! srp) {
731 SCSI_LOG_TIMEOUT(1, printk("sg__done: req missing, dev=%d\n", dev));
732 scsi_release_command(SCpnt);
733 SCpnt = NULL;
734 return;
736 /* First transfer ownership of data buffers to sg_device object. */
737 srp->data.use_sg = SCpnt->use_sg;
738 srp->data.sglist_len = SCpnt->sglist_len;
739 srp->data.bufflen = SCpnt->bufflen;
740 srp->data.buffer = SCpnt->buffer;
741 if (2 == sfp->underrun_flag)
742 srp->header.pack_len = SCpnt->underflow;
743 sg_clr_scpnt(SCpnt);
744 srp->my_cmdp = NULL;
746 SCSI_LOG_TIMEOUT(4, printk("sg__done: dev=%d, scsi_stat=%d, res=0x%x\n",
747 dev, (int)status_byte(SCpnt->result), (int)SCpnt->result));
748 memcpy(srp->header.sense_buffer, SCpnt->sense_buffer, min_sb_len);
749 switch (host_byte(SCpnt->result))
750 { /* This setup of 'result' is for backward compatibility and is best
751 ignored by the user who should use target, host + driver status */
752 case DID_OK:
753 case DID_PASSTHROUGH:
754 case DID_SOFT_ERROR:
755 srp->header.result = 0;
756 break;
757 case DID_NO_CONNECT:
758 case DID_BUS_BUSY:
759 case DID_TIME_OUT:
760 srp->header.result = EBUSY;
761 break;
762 case DID_BAD_TARGET:
763 case DID_ABORT:
764 case DID_PARITY:
765 case DID_RESET:
766 case DID_BAD_INTR:
767 srp->header.result = EIO;
768 break;
769 case DID_ERROR:
770 if (SCpnt->sense_buffer[0] == 0 &&
771 status_byte(SCpnt->result) == GOOD)
772 srp->header.result = 0;
773 else
774 srp->header.result = EIO;
775 break;
776 default:
777 SCSI_LOG_TIMEOUT(1, printk(
778 "sg: unexpected host_byte=%d, dev=%d in 'done'\n",
779 host_byte(SCpnt->result), dev));
780 srp->header.result = EIO;
781 break;
784 /* Following if statement is a patch supplied by Eric Youngdale */
785 if (driver_byte(SCpnt->result) != 0
786 && (SCpnt->sense_buffer[0] & 0x7f) == 0x70
787 && (SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION
788 && sdp->device->removable) {
789 /* Detected disc change. Set the bit - this may be used if there are */
790 /* filesystems using this device. */
791 sdp->device->changed = 1;
793 srp->header.target_status = status_byte(SCpnt->result);
794 if ((sdp->sgdebug > 0) &&
795 ((CHECK_CONDITION == srp->header.target_status) ||
796 (COMMAND_TERMINATED == srp->header.target_status)))
797 print_sense("sg_command_done", SCpnt);
798 srp->header.host_status = host_byte(SCpnt->result);
799 srp->header.driver_status = driver_byte(SCpnt->result);
801 scsi_release_command(SCpnt);
802 SCpnt = NULL;
803 if (sfp->closed) { /* whoops this fd already released, cleanup */
804 closed = 1;
805 SCSI_LOG_TIMEOUT(1,
806 printk("sg__done: already closed, freeing ...\n"));
807 /* should check if module is unloaded <<<<<<< */
808 sg_finish_rem_req(srp, NULL, 0);
809 if (NULL == sfp->headrp) {
810 SCSI_LOG_TIMEOUT(1,
811 printk("sg__done: already closed, final cleanup\n"));
812 sg_remove_sfp(sdp, sfp);
815 /* Now wake up any sg_read() that is waiting for this packet. */
816 wake_up_interruptible(&sfp->read_wait);
817 if ((sfp->async_qp) && (! closed))
818 kill_fasync(sfp->async_qp, SIGPOLL, POLL_IN);
821 static void sg_debug_all(const Sg_fd * sfp)
823 const Sg_device * sdp = sg_dev_arr;
824 int k;
826 if (NULL == sg_dev_arr) {
827 printk("sg_debug_all: sg_dev_arr NULL, death is imminent\n");
828 return;
830 if (! sfp)
831 printk("sg_debug_all: sfp (file descriptor pointer) NULL\n");
833 printk("sg_debug_all: dev_max=%d, %s\n",
834 sg_template.dev_max, sg_version_str);
835 printk(" scsi_dma_free_sectors=%u, sg_pool_secs_aval=%d\n",
836 scsi_dma_free_sectors, sg_pool_secs_avail);
837 printk(" sg_big_buff=%d\n", sg_big_buff);
838 #ifdef SG_DEBUG
839 printk(" malloc counts, kmallocs=%d, dma_pool=%d, pages=%d\n",
840 sg_num_kmal, sg_num_pool, sg_num_page);
841 #endif
842 for (k = 0; k < sg_template.dev_max; ++k, ++sdp) {
843 if (sdp->headfp) {
844 if (! sfp)
845 sfp = sdp->headfp; /* just to keep things going */
846 else if (sdp == sfp->parentdp)
847 printk(" ***** Invoking device follows *****\n");
848 sg_debug(sdp, sfp, 1);
853 static void sg_debug(const Sg_device * sdp, const Sg_fd * sfp, int part_of)
855 Sg_fd * fp;
856 Sg_request * srp;
857 int dev;
858 int k;
860 if (! sfp)
861 printk("sg_debug: sfp (file descriptor pointer) NULL\n");
862 if (! sdp) {
863 printk("sg_debug: sdp pointer (to device) NULL\n");
864 return;
866 else if (! sdp->device) {
867 printk("sg_debug: device detached ??\n");
868 return;
870 dev = MINOR(sdp->i_rdev);
872 if (part_of)
873 printk(" >>> device=%d(sg%c), ", dev, 'a' + dev);
874 else
875 printk("sg_debug: device=%d(sg%c), ", dev, 'a' + dev);
876 printk("scsi%d chan=%d id=%d lun=%d em=%d\n", sdp->device->host->host_no,
877 sdp->device->channel, sdp->device->id, sdp->device->lun,
878 sdp->device->host->hostt->emulated);
879 printk(" sg_tablesize=%d, excl=%d, sgdebug=%d, merge_fd=%d\n",
880 sdp->sg_tablesize, sdp->exclude, sdp->sgdebug, sdp->merge_fd);
881 if (! part_of) {
882 printk(" scsi_dma_free_sectors=%u, sg_pool_secs_aval=%d\n",
883 scsi_dma_free_sectors, sg_pool_secs_avail);
884 #ifdef SG_DEBUG
885 printk(" mallocs: kmallocs=%d, dma_pool=%d, pages=%d\n",
886 sg_num_kmal, sg_num_pool, sg_num_page);
887 #endif
890 fp = sdp->headfp;
891 for (k = 1; fp; fp = fp->nextfp, ++k) {
892 if (sfp == fp)
893 printk(" *** Following data belongs to invoking FD ***\n");
894 else if (! fp->parentdp)
895 printk(">> Following FD has NULL parent pointer ???\n");
896 printk(" FD(%d): timeout=%d, bufflen=%d, use_sg=%d\n",
897 k, fp->timeout, fp->reserve.bufflen, (int)fp->reserve.use_sg);
898 printk(" low_dma=%d, cmd_q=%d, s_sc_len=%d, f_packid=%d\n",
899 (int)fp->low_dma, (int)fp->cmd_q, (int)fp->save_scat_len,
900 (int)fp->force_packid);
901 printk(" urun_flag=%d, next_cmd_len=%d, closed=%d\n",
902 (int)fp->underrun_flag, (int)fp->next_cmd_len,
903 (int)fp->closed);
904 srp = fp->headrp;
905 if (NULL == srp)
906 printk(" No requests active\n");
907 while (srp) {
908 if (srp->res_used)
909 printk("reserved buff >> ");
910 else
911 printk(" ");
912 if (srp->my_cmdp)
913 printk("written: pack_id=%d, bufflen=%d, use_sg=%d\n",
914 srp->header.pack_id, srp->my_cmdp->bufflen,
915 srp->my_cmdp->use_sg);
916 else
917 printk("to_read: pack_id=%d, bufflen=%d, use_sg=%d\n",
918 srp->header.pack_id, srp->data.bufflen, srp->data.use_sg);
919 if (! srp->parentfp)
920 printk(">> request has NULL parent pointer ???\n");
921 srp = srp->nextrp;
926 static struct file_operations sg_fops = {
927 NULL, /* lseek */
928 sg_read, /* read */
929 sg_write, /* write */
930 NULL, /* readdir */
931 sg_poll, /* poll */
932 sg_ioctl, /* ioctl */
933 NULL, /* mmap */
934 sg_open, /* open */
935 NULL, /* flush */
936 sg_release, /* release, was formerly sg_close */
937 NULL, /* fsync */
938 sg_fasync, /* fasync */
939 NULL, /* check_media_change */
940 NULL, /* revalidate */
941 NULL, /* lock */
945 static int sg_detect(Scsi_Device * scsidp)
947 switch (scsidp->type) {
948 case TYPE_DISK:
949 case TYPE_MOD:
950 case TYPE_ROM:
951 case TYPE_WORM:
952 case TYPE_TAPE: break;
953 default:
954 printk("Detected scsi generic sg%c at scsi%d,"
955 " channel %d, id %d, lun %d\n",
956 'a'+sg_template.dev_noticed,
957 scsidp->host->host_no, scsidp->channel,
958 scsidp->id, scsidp->lun);
960 sg_template.dev_noticed++;
961 return 1;
964 /* Driver initialization */
965 static int sg_init()
967 static int sg_registered = 0;
969 if (sg_template.dev_noticed == 0) return 0;
971 if(!sg_registered) {
972 if (register_chrdev(SCSI_GENERIC_MAJOR,"sg",&sg_fops))
974 printk("Unable to get major %d for generic SCSI device\n",
975 SCSI_GENERIC_MAJOR);
976 return 1;
978 sg_registered++;
981 /* If we have already been through here, return */
982 if(sg_dev_arr) return 0;
984 SCSI_LOG_TIMEOUT(3, printk("sg_init\n"));
985 sg_dev_arr = (Sg_device *)
986 scsi_init_malloc((sg_template.dev_noticed + SG_EXTRA_DEVS)
987 * sizeof(Sg_device), GFP_ATOMIC);
988 if (NULL == sg_dev_arr) {
989 printk("sg_init: no space for sg_dev_arr\n");
990 return 1;
992 sg_template.dev_max = sg_template.dev_noticed + SG_EXTRA_DEVS;
993 return 0;
996 static int sg_attach(Scsi_Device * scsidp)
998 Sg_device * sdp = sg_dev_arr;
999 int k;
1001 if ((sg_template.nr_dev >= sg_template.dev_max) || (! sdp))
1003 scsidp->attached--;
1004 return 1;
1007 for(k = 0; k < sg_template.dev_max; k++, sdp++)
1008 if(! sdp->device) break;
1010 if(k >= sg_template.dev_max) panic ("scsi_devices corrupt (sg)");
1012 SCSI_LOG_TIMEOUT(3, printk("sg_attach: dev=%d \n", k));
1013 sdp->device = scsidp;
1014 init_waitqueue_head(&sdp->o_excl_wait);
1015 sdp->headfp= NULL;
1016 sdp->exclude = 0;
1017 sdp->merge_fd = 0; /* Cope with SG_DEF_MERGE_FD on open */
1018 sdp->sgdebug = 0;
1019 sdp->sg_tablesize = scsidp->host ? scsidp->host->sg_tablesize : 0;
1020 sdp->i_rdev = MKDEV(SCSI_GENERIC_MAJOR, k);
1021 sg_template.nr_dev++;
1022 return 0;
1025 /* Called at 'finish' of init process, after all attaches */
1026 static void sg_finish(void)
1028 SCSI_LOG_TIMEOUT(3, printk("sg_finish: dma_free_sectors=%u\n",
1029 scsi_dma_free_sectors));
1032 static void sg_detach(Scsi_Device * scsidp)
1034 Sg_device * sdp = sg_dev_arr;
1035 unsigned long flags = 0;
1036 Sg_fd * sfp;
1037 Sg_request * srp;
1038 int k;
1040 if (NULL == sdp) return; /* all is not well ... */
1041 for (k = 0; k < sg_template.dev_max; k++, sdp++) {
1042 if(sdp->device != scsidp)
1043 continue; /* dirty but lowers nesting */
1044 if (sdp->headfp) {
1045 /* Need to stop sg_command_done() playing with this list during this loop */
1046 spin_lock_irqsave(&io_request_lock, flags);
1047 sfp = sdp->headfp;
1048 while (sfp) {
1049 srp = sfp->headrp;
1050 while (srp) {
1051 if (srp->my_cmdp)
1052 sg_shorten_timeout(srp->my_cmdp);
1053 srp = srp->nextrp;
1055 sfp = sfp->nextfp;
1057 spin_unlock_irqrestore(&io_request_lock, flags);
1058 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d, dirty, sleep(3)\n", k));
1059 scsi_sleep(3); /* sleep 3 jiffies, hoping for timeout to go off */
1061 else {
1062 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d\n", k));
1063 sdp->device = NULL;
1065 scsidp->attached--;
1066 sg_template.nr_dev--;
1067 /* avoid associated device /dev/sg? being incremented
1068 * each time module is inserted/removed , <dan@lectra.fr> */
1069 sg_template.dev_noticed--;
1070 return;
1072 return;
1075 #ifdef MODULE
1077 int init_module(void) {
1078 sg_template.module = &__this_module;
1079 return scsi_register_module(MODULE_SCSI_DEV, &sg_template);
1082 void cleanup_module( void)
1084 scsi_unregister_module(MODULE_SCSI_DEV, &sg_template);
1085 unregister_chrdev(SCSI_GENERIC_MAJOR, "sg");
1087 if(sg_dev_arr != NULL) {
1088 /* Really worrying situation of writes still pending and get here */
1089 /* Strategy: shorten timeout on release + wait on detach ... */
1090 scsi_init_free((char *) sg_dev_arr,
1091 (sg_template.dev_noticed + SG_EXTRA_DEVS)
1092 * sizeof(Sg_device));
1093 sg_dev_arr = NULL;
1095 sg_template.dev_max = 0;
1097 #endif /* MODULE */
1100 #if 0
1101 extern void scsi_times_out (Scsi_Cmnd * SCpnt);
1102 extern void scsi_old_times_out (Scsi_Cmnd * SCpnt);
1103 #endif
1105 /* Can't see clean way to abort a command so shorten timeout to 1 jiffy */
1106 static void sg_shorten_timeout(Scsi_Cmnd * scpnt)
1108 #if 0 /* scsi_syms.c is very miserly about exported functions */
1109 scsi_delete_timer(scpnt);
1110 if (! scpnt)
1111 return;
1112 scpnt->timeout_per_command = 1; /* try 1 jiffy (perhaps 0 jiffies) */
1113 if (scpnt->host->hostt->use_new_eh_code)
1114 scsi_add_timer(scpnt, scpnt->timeout_per_command, scsi_times_out);
1115 else
1116 scsi_add_timer(scpnt, scpnt->timeout_per_command,
1117 scsi_old_times_out);
1118 #else
1119 scsi_sleep(HZ); /* just sleep 1 second and hope ... */
1120 #endif
1123 static int sg_start_req(Sg_request * srp, int max_buff_size,
1124 const char * inp, int num_write_xfer)
1126 int res;
1127 Sg_fd * sfp = srp->parentfp;
1128 Sg_scatter_hold * req_schp = &srp->data;
1129 Sg_scatter_hold * rsv_schp = &sfp->reserve;
1131 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: max_buff_size=%d\n",
1132 max_buff_size));
1133 if ((! sg_res_in_use(sfp)) && (max_buff_size <= rsv_schp->bufflen)) {
1134 sg_link_reserve(sfp, srp, max_buff_size);
1135 sg_write_xfer(req_schp, inp, num_write_xfer);
1137 else {
1138 res = sg_build_scat(req_schp, max_buff_size, sfp);
1139 if (res) {
1140 sg_remove_scat(req_schp);
1141 return res;
1143 sg_write_xfer(req_schp, inp, num_write_xfer);
1145 return 0;
1148 static void sg_finish_rem_req(Sg_request * srp, char * outp,
1149 int num_read_xfer)
1151 Sg_fd * sfp = srp->parentfp;
1152 Sg_scatter_hold * req_schp = &srp->data;
1154 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n",
1155 (int)srp->res_used));
1156 if (num_read_xfer > 0)
1157 sg_read_xfer(req_schp, outp, num_read_xfer);
1158 if (srp->res_used)
1159 sg_unlink_reserve(sfp, srp);
1160 else
1161 sg_remove_scat(req_schp);
1162 sg_remove_request(sfp, srp);
1165 static int sg_build_scat(Sg_scatter_hold * schp, int buff_size,
1166 const Sg_fd * sfp)
1168 int ret_sz, mem_src;
1169 int blk_size = buff_size;
1170 char * p = NULL;
1172 if ((blk_size < 0) || (! sfp))
1173 return -EFAULT;
1174 if (0 == blk_size)
1175 ++blk_size; /* don't know why */
1176 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1177 blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK);
1178 SCSI_LOG_TIMEOUT(4, printk("sg_build_scat: buff_size=%d, blk_size=%d\n",
1179 buff_size, blk_size));
1180 if (blk_size <= SG_SCATTER_SZ) {
1181 mem_src = SG_HEAP_PAGE;
1182 p = sg_malloc(sfp, blk_size, &ret_sz, &mem_src);
1183 if (! p)
1184 return -ENOMEM;
1185 if (blk_size == ret_sz) { /* got it on the first attempt */
1186 schp->use_sg = 0;
1187 schp->buffer = p;
1188 schp->bufflen = blk_size;
1189 schp->mem_src = mem_src;
1190 schp->b_malloc_len = blk_size;
1191 return 0;
1194 else {
1195 mem_src = SG_HEAP_PAGE;
1196 p = sg_malloc(sfp, SG_SCATTER_SZ, &ret_sz, &mem_src);
1197 if (! p)
1198 return -ENOMEM;
1200 /* Want some local declarations, so start new block ... */
1201 { /* lets try and build a scatter gather list */
1202 struct scatterlist * sclp;
1203 int k, rem_sz, num, nxt;
1204 int sc_bufflen = PAGE_SIZE;
1205 int mx_sc_elems = (sc_bufflen / sizeof(struct scatterlist)) - 1;
1206 int sg_tablesize = sfp->parentdp->sg_tablesize;
1207 int first = 1;
1209 k = SG_HEAP_KMAL; /* want to protect mem_src, use k as scratch */
1210 schp->buffer = (struct scatterlist *)sg_malloc(sfp,
1211 sc_bufflen, &num, &k);
1212 schp->mem_src = (char)k;
1213 /* N.B. ret_sz and mem_src carried into this block ... */
1214 if (! schp->buffer)
1215 return -ENOMEM;
1216 else if (num != sc_bufflen) {
1217 sc_bufflen = num;
1218 mx_sc_elems = (sc_bufflen / sizeof(struct scatterlist)) - 1;
1220 schp->sglist_len = sc_bufflen;
1221 memset(schp->buffer, 0, sc_bufflen);
1222 for (k = 0, sclp = schp->buffer, rem_sz = blk_size, nxt =0;
1223 (k < sg_tablesize) && (rem_sz > 0) && (k < mx_sc_elems);
1224 ++k, rem_sz -= ret_sz, ++sclp) {
1225 if (first)
1226 first = 0;
1227 else {
1228 num = (rem_sz > SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz;
1229 mem_src = SG_HEAP_PAGE;
1230 p = sg_malloc(sfp, num, &ret_sz, &mem_src);
1231 if (! p)
1232 break;
1234 sclp->address = p;
1235 sclp->length = ret_sz;
1236 sclp->alt_address = (char *)(long)mem_src;
1238 SCSI_LOG_TIMEOUT(5,
1239 printk("sg_build_build: k=%d, a=0x%p, len=%d, ms=%d\n",
1240 k, sclp->address, ret_sz, mem_src));
1241 } /* end of for loop */
1242 schp->use_sg = k;
1243 SCSI_LOG_TIMEOUT(5,
1244 printk("sg_build_scat: use_sg=%d, rem_sz=%d\n", k, rem_sz));
1245 schp->bufflen = blk_size;
1246 if (rem_sz > 0) /* must have failed */
1247 return -ENOMEM;
1249 return 0;
1252 static void sg_write_xfer(Sg_scatter_hold * schp, const char * inp,
1253 int num_write_xfer)
1255 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_write_xfer=%d, use_sg=%d\n",
1256 num_write_xfer, schp->use_sg));
1257 if ((! inp) || (num_write_xfer <= 0))
1258 return;
1259 if (schp->use_sg > 0) {
1260 int k, num;
1261 struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
1263 for (k = 0; (k < schp->use_sg) && sclp->address; ++k, ++sclp) {
1264 num = (int)sclp->length;
1265 if (num > num_write_xfer) {
1266 __copy_from_user(sclp->address, inp, num_write_xfer);
1267 break;
1269 else {
1270 __copy_from_user(sclp->address, inp, num);
1271 num_write_xfer -= num;
1272 if (num_write_xfer <= 0)
1273 break;
1274 inp += num;
1278 else
1279 __copy_from_user(schp->buffer, inp, num_write_xfer);
1282 static void sg_remove_scat(Sg_scatter_hold * schp)
1284 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: use_sg=%d\n", schp->use_sg));
1285 if(schp->use_sg > 0) {
1286 int k, mem_src;
1287 struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
1289 for (k = 0; (k < schp->use_sg) && sclp->address; ++k, ++sclp) {
1290 mem_src = (int)(long)sclp->alt_address;
1291 SCSI_LOG_TIMEOUT(5,
1292 printk("sg_remove_scat: k=%d, a=0x%p, len=%d, ms=%d\n",
1293 k, sclp->address, sclp->length, mem_src));
1294 sg_free(sclp->address, sclp->length, mem_src);
1295 sclp->address = NULL;
1296 sclp->length = 0;
1298 sg_free(schp->buffer, schp->sglist_len, schp->mem_src);
1300 else if (schp->buffer)
1301 sg_free(schp->buffer, schp->b_malloc_len, schp->mem_src);
1302 schp->buffer = NULL;
1303 schp->bufflen = 0;
1304 schp->use_sg = 0;
1305 schp->sglist_len = 0;
1308 static void sg_read_xfer(Sg_scatter_hold * schp, char * outp,
1309 int num_read_xfer)
1311 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_read_xfer=%d\n",
1312 num_read_xfer));
1313 if ((! outp) || (num_read_xfer <= 0))
1314 return;
1315 if(schp->use_sg > 0) {
1316 int k, num;
1317 struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
1319 for (k = 0; (k < schp->use_sg) && sclp->address; ++k, ++sclp) {
1320 num = (int)sclp->length;
1321 if (num > num_read_xfer) {
1322 __copy_to_user(outp, sclp->address, num_read_xfer);
1323 break;
1325 else {
1326 __copy_to_user(outp, sclp->address, num);
1327 num_read_xfer -= num;
1328 if (num_read_xfer <= 0)
1329 break;
1330 outp += num;
1334 else
1335 __copy_to_user(outp, schp->buffer, num_read_xfer);
1338 static void sg_build_reserve(Sg_fd * sfp, int req_size)
1340 Sg_scatter_hold * schp = &sfp->reserve;
1342 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
1343 do {
1344 if (req_size < PAGE_SIZE)
1345 req_size = PAGE_SIZE;
1346 if (0 == sg_build_scat(schp, req_size, sfp))
1347 return;
1348 else
1349 sg_remove_scat(schp);
1350 req_size >>= 1; /* divide by 2 */
1351 } while (req_size > (PAGE_SIZE / 2));
1354 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
1356 Sg_scatter_hold * req_schp = &srp->data;
1357 Sg_scatter_hold * rsv_schp = &sfp->reserve;
1359 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
1360 if (rsv_schp->use_sg > 0) {
1361 int k, num;
1362 int rem = size;
1363 struct scatterlist * sclp = (struct scatterlist *)rsv_schp->buffer;
1365 for (k = 0; k < rsv_schp->use_sg; ++k, ++sclp) {
1366 num = (int)sclp->length;
1367 if (rem <= num) {
1368 sfp->save_scat_len = num;
1369 sclp->length = (unsigned)rem;
1370 break;
1372 else
1373 rem -= num;
1375 if (k < rsv_schp->use_sg) {
1376 req_schp->use_sg = k + 1; /* adjust scatter list length */
1377 req_schp->bufflen = size;
1378 req_schp->sglist_len = rsv_schp->sglist_len;
1379 req_schp->buffer = rsv_schp->buffer;
1380 req_schp->mem_src = rsv_schp->mem_src;
1381 req_schp->b_malloc_len = rsv_schp->b_malloc_len;
1383 else
1384 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
1386 else {
1387 req_schp->use_sg = 0;
1388 req_schp->bufflen = size;
1389 req_schp->buffer = rsv_schp->buffer;
1390 req_schp->mem_src = rsv_schp->mem_src;
1391 req_schp->use_sg = rsv_schp->use_sg;
1392 req_schp->b_malloc_len = rsv_schp->b_malloc_len;
1394 srp->res_used = 1;
1397 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
1399 Sg_scatter_hold * req_schp = &srp->data;
1400 Sg_scatter_hold * rsv_schp = &sfp->reserve;
1402 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->use_sg=%d\n",
1403 (int)req_schp->use_sg));
1404 if (rsv_schp->use_sg > 0) {
1405 struct scatterlist * sclp = (struct scatterlist *)rsv_schp->buffer;
1407 if (sfp->save_scat_len > 0)
1408 (sclp + (req_schp->use_sg - 1))->length =
1409 (unsigned)sfp->save_scat_len;
1410 else
1411 SCSI_LOG_TIMEOUT(1, printk(
1412 "sg_unlink_reserve: BAD save_scat_len\n"));
1414 req_schp->use_sg = 0;
1415 req_schp->bufflen = 0;
1416 req_schp->buffer = NULL;
1417 req_schp->sglist_len = 0;
1418 sfp->save_scat_len = 0;
1419 srp->res_used = 0;
1422 static Sg_request * sg_get_request(const Sg_fd * sfp, int pack_id)
1424 Sg_request * resp = NULL;
1426 resp = sfp->headrp;
1427 while (resp) {
1428 if ((! resp->my_cmdp) &&
1429 ((-1 == pack_id) || (resp->header.pack_id == pack_id)))
1430 return resp;
1431 resp = resp->nextrp;
1433 return resp;
1436 /* always adds to end of list */
1437 static Sg_request * sg_add_request(Sg_fd * sfp)
1439 int k;
1440 Sg_request * resp = NULL;
1441 Sg_request * rp;
1443 resp = sfp->headrp;
1444 rp = sfp->req_arr;
1445 if (! resp) {
1446 resp = rp;
1447 sfp->headrp = resp;
1449 else {
1450 if (0 == sfp->cmd_q)
1451 resp = NULL; /* command queuing disallowed */
1452 else {
1453 for (k = 0, rp; k < SG_MAX_QUEUE; ++k, ++rp) {
1454 if (! rp->parentfp)
1455 break;
1457 if (k < SG_MAX_QUEUE) {
1458 while (resp->nextrp) resp = resp->nextrp;
1459 resp->nextrp = rp;
1460 resp = rp;
1462 else
1463 resp = NULL;
1466 if (resp) {
1467 resp->parentfp = sfp;
1468 resp->nextrp = NULL;
1469 resp->res_used = 0;
1470 memset(&resp->data, 0, sizeof(Sg_scatter_hold));
1471 memset(&resp->header, 0, sizeof(struct sg_header));
1472 resp->my_cmdp = NULL;
1474 return resp;
1477 /* Return of 1 for found; 0 for not found */
1478 static int sg_remove_request(Sg_fd * sfp, const Sg_request * srp)
1480 Sg_request * prev_rp;
1481 Sg_request * rp;
1483 if ((! sfp) || (! srp) || (! sfp->headrp))
1484 return 0;
1485 prev_rp = sfp->headrp;
1486 if (srp == prev_rp) {
1487 prev_rp->parentfp = NULL;
1488 sfp->headrp = prev_rp->nextrp;
1489 return 1;
1491 while ((rp = prev_rp->nextrp)) {
1492 if (srp == rp) {
1493 rp->parentfp = NULL;
1494 prev_rp->nextrp = rp->nextrp;
1495 return 1;
1497 prev_rp = rp;
1499 return 0;
1502 static Sg_fd * sg_add_sfp(Sg_device * sdp, int dev, int get_reserved)
1504 Sg_fd * sfp;
1506 if (sdp->merge_fd) {
1507 ++sdp->merge_fd;
1508 return sdp->headfp;
1510 sfp = (Sg_fd *)sg_low_malloc(sizeof(Sg_fd), 0, SG_HEAP_KMAL, 0);
1511 if (sfp) {
1512 memset(sfp, 0, sizeof(Sg_fd));
1513 sfp->my_mem_src = SG_HEAP_KMAL;
1514 init_waitqueue_head(&sfp->read_wait);
1515 init_waitqueue_head(&sfp->write_wait);
1517 else
1518 return NULL;
1520 sfp->timeout = SG_DEFAULT_TIMEOUT;
1521 sfp->force_packid = SG_DEF_FORCE_PACK_ID;
1522 sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
1523 sdp->device->host->unchecked_isa_dma : 1;
1524 sfp->cmd_q = SG_DEF_COMMAND_Q;
1525 sfp->underrun_flag = SG_DEF_UNDERRUN_FLAG;
1526 sfp->parentdp = sdp;
1527 if (! sdp->headfp)
1528 sdp->headfp = sfp;
1529 else { /* add to tail of existing list */
1530 Sg_fd * pfp = sdp->headfp;
1531 while (pfp->nextfp)
1532 pfp = pfp->nextfp;
1533 pfp->nextfp = sfp;
1535 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p, m_s=%d\n",
1536 sfp, (int)sfp->my_mem_src));
1537 if (get_reserved) {
1538 sg_build_reserve(sfp, SG_DEF_RESERVED_SIZE);
1539 sg_big_buff = sfp->reserve.bufflen; /* sysctl shows most recent size */
1540 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, use_sg=%d\n",
1541 sfp->reserve.bufflen, sfp->reserve.use_sg));
1543 return sfp;
1546 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
1548 Sg_request * srp;
1549 Sg_request * tsrp;
1550 int dirty = 0;
1551 int res = 0;
1553 if (sdp->merge_fd) {
1554 if (--sdp->merge_fd)
1555 return 0; /* if merge_fd then dec merge_fd counter */
1557 srp = sfp->headrp;
1558 if (srp) {
1559 /* Need to stop sg_command_done() playing with this list during this loop */
1560 while (srp) {
1561 tsrp = srp->nextrp;
1562 if (! srp->my_cmdp)
1563 sg_finish_rem_req(srp, NULL, 0);
1564 else
1565 ++dirty;
1566 srp = tsrp;
1569 if (0 == dirty) {
1570 Sg_fd * fp;
1571 Sg_fd * prev_fp = sdp->headfp;
1573 if (sfp == prev_fp)
1574 sdp->headfp = prev_fp->nextfp;
1575 else {
1576 while ((fp = prev_fp->nextfp)) {
1577 if (sfp == fp) {
1578 prev_fp->nextfp = fp->nextfp;
1579 break;
1581 prev_fp = fp;
1584 if (sfp->reserve.bufflen > 0) {
1585 SCSI_LOG_TIMEOUT(6, printk("sg_remove_sfp: bufflen=%d, use_sg=%d\n",
1586 (int)sfp->reserve.bufflen, (int)sfp->reserve.use_sg));
1587 sg_remove_scat(&sfp->reserve);
1589 sfp->parentdp = NULL;
1590 SCSI_LOG_TIMEOUT(6, printk("sg_remove_sfp: sfp=0x%p\n", sfp));
1591 sg_low_free((char *)sfp, sizeof(Sg_fd), sfp->my_mem_src);
1592 res = 1;
1594 else {
1595 sfp->closed = 1; /* flag dirty state on this fd */
1596 SCSI_LOG_TIMEOUT(1, printk(
1597 "sg_remove_sfp: worrisome, %d writes pending\n", dirty));
1599 return res;
1602 static int sg_res_in_use(const Sg_fd * sfp)
1604 const Sg_request * srp = sfp->headrp;
1606 while (srp) {
1607 if (srp->res_used)
1608 return 1;
1609 srp = srp->nextrp;
1611 return 0;
1614 /* If retSzp==NULL want exact size or fail */
1615 /* sg_low_malloc() should always be called from a process context allowing
1616 GFP_KERNEL to be used instead of GFP_ATOMIC */
1617 static char * sg_low_malloc(int rqSz, int lowDma, int mem_src, int * retSzp)
1619 char * resp = NULL;
1620 int page_mask = lowDma ? (GFP_KERNEL | GFP_DMA) : GFP_KERNEL;
1622 if (rqSz <= 0)
1623 return resp;
1624 if (SG_HEAP_KMAL == mem_src) {
1625 page_mask = lowDma ? (GFP_ATOMIC | GFP_DMA) : GFP_ATOMIC;
1626 /* Seen kmalloc(..,GFP_KERNEL) hang for 40 secs! */
1627 resp = kmalloc(rqSz, page_mask);
1628 if (resp && retSzp) *retSzp = rqSz;
1629 #ifdef SG_DEBUG
1630 if (resp) ++sg_num_kmal;
1631 #endif
1632 return resp;
1634 if (SG_HEAP_POOL == mem_src) {
1635 int num_sect = rqSz / SG_SECTOR_SZ;
1637 if (0 != (rqSz & SG_SECTOR_MSK)) {
1638 if (! retSzp)
1639 return resp;
1640 ++num_sect;
1641 rqSz = num_sect * SG_SECTOR_SZ;
1643 while (num_sect > 0) {
1644 if ((num_sect <= sg_pool_secs_avail) &&
1645 (scsi_dma_free_sectors > (SG_LOW_POOL_THRESHHOLD + num_sect))) {
1646 resp = scsi_malloc(rqSz);
1647 if (resp) {
1648 if (retSzp) *retSzp = rqSz;
1649 sg_pool_secs_avail -= num_sect;
1650 #ifdef SG_DEBUG
1651 ++sg_num_pool;
1652 #endif
1653 return resp;
1656 if (! retSzp)
1657 return resp;
1658 num_sect /= 2; /* try half as many */
1659 rqSz = num_sect * SG_SECTOR_SZ;
1662 else if (SG_HEAP_PAGE == mem_src) {
1663 int order, a_size;
1664 int resSz = rqSz;
1666 for (order = 0, a_size = PAGE_SIZE;
1667 a_size < rqSz; order++, a_size <<= 1)
1669 resp = (char *)__get_free_pages(page_mask, order);
1670 while ((! resp) && order && retSzp) {
1671 --order;
1672 a_size >>= 1; /* divide by 2, until PAGE_SIZE */
1673 resp = (char *)__get_free_pages(page_mask, order); /* try half */
1674 resSz = a_size;
1676 if (retSzp) *retSzp = resSz;
1677 #ifdef SG_DEBUG
1678 if (resp) ++sg_num_page;
1679 #endif
1681 else
1682 printk("sg_low_malloc: bad mem_src=%d, rqSz=%df\n", mem_src, rqSz);
1683 return resp;
1686 static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp,
1687 int * mem_srcp)
1689 char * resp = NULL;
1691 if (retSzp) *retSzp = size;
1692 if (size <= 0)
1694 else {
1695 int low_dma = sfp->low_dma;
1696 int l_ms = -1; /* invalid value */
1698 switch (*mem_srcp)
1700 case SG_HEAP_PAGE:
1701 l_ms = (size < PAGE_SIZE) ? SG_HEAP_POOL : SG_HEAP_PAGE;
1702 resp = sg_low_malloc(size, low_dma, l_ms, 0);
1703 if (resp)
1704 break;
1705 resp = sg_low_malloc(size, low_dma, l_ms, &size);
1706 if (! resp) {
1707 l_ms = (SG_HEAP_POOL == l_ms) ? SG_HEAP_PAGE : SG_HEAP_POOL;
1708 resp = sg_low_malloc(size, low_dma, l_ms, &size);
1709 if (! resp) {
1710 l_ms = SG_HEAP_KMAL;
1711 resp = sg_low_malloc(size, low_dma, l_ms, &size);
1714 if (resp && retSzp) *retSzp = size;
1715 break;
1716 case SG_HEAP_KMAL:
1717 l_ms = SG_HEAP_PAGE;
1718 resp = sg_low_malloc(size, low_dma, l_ms, 0);
1719 if (resp)
1720 break;
1721 l_ms = SG_HEAP_POOL;
1722 resp = sg_low_malloc(size, low_dma, l_ms, &size);
1723 if (resp && retSzp) *retSzp = size;
1724 break;
1725 default:
1726 SCSI_LOG_TIMEOUT(1, printk("sg_malloc: bad ms=%d\n", *mem_srcp));
1727 break;
1729 if (resp) *mem_srcp = l_ms;
1731 SCSI_LOG_TIMEOUT(6, printk("sg_malloc: size=%d, ms=%d, ret=0x%p\n",
1732 size, *mem_srcp, resp));
1733 return resp;
1736 static void sg_low_free(char * buff, int size, int mem_src)
1738 if (! buff)
1739 return;
1740 if (SG_HEAP_POOL == mem_src) {
1741 int num_sect = size / SG_SECTOR_SZ;
1742 scsi_free(buff, size);
1743 sg_pool_secs_avail += num_sect;
1745 else if (SG_HEAP_KMAL == mem_src)
1746 kfree(buff); /* size not used */
1747 else if (SG_HEAP_PAGE == mem_src) {
1748 int order, a_size;
1750 for (order = 0, a_size = PAGE_SIZE;
1751 a_size < size; order++, a_size <<= 1)
1753 free_pages((unsigned long)buff, order);
1755 else
1756 printk("sg_low_free: bad mem_src=%d, buff=0x%p, rqSz=%df\n",
1757 mem_src, buff, size);
1760 static void sg_free(char * buff, int size, int mem_src)
1762 SCSI_LOG_TIMEOUT(6,
1763 printk("sg_free: buff=0x%p, size=%d\n", buff, size));
1764 if ((! buff) || (size <= 0))
1766 else
1767 sg_low_free(buff, size, mem_src);
1770 static void sg_clr_scpnt(Scsi_Cmnd * SCpnt)
1772 SCpnt->use_sg = 0;
1773 SCpnt->sglist_len = 0;
1774 SCpnt->bufflen = 0;
1775 SCpnt->buffer = NULL;
1776 SCpnt->underflow = 0;
1777 SCpnt->request.rq_dev = MKDEV(0, 0); /* "sg" _disowns_ command blk */