MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / scsi / scsi.c
blob481bea6a4b4de2d7ae02021f5d9a2b88fce30bab
1 /*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 * Copyright (C) 2002, 2003 Christoph Hellwig
6 * generic mid-level SCSI driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
10 * <drew@colorado.edu>
12 * Bug correction thanks go to :
13 * Rik Faith <faith@cs.unc.edu>
14 * Tommy Thorn <tthorn>
15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18 * add scatter-gather, multiple outstanding request, and other
19 * enhancements.
21 * Native multichannel, wide scsi, /proc/scsi and hot plugging
22 * support added by Michael Neuffer <mike@i-connect.net>
24 * Added request_module("scsi_hostadapter") for kerneld:
25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26 * Bjorn Ekwall <bj0rn@blox.se>
27 * (changed to kmod)
29 * Major improvements to the timeout, abort, and reset processing,
30 * as well as performance modifications for large queue depths by
31 * Leonard N. Zubkoff <lnz@dandelion.com>
33 * Converted cli() code to spinlocks, Ingo Molnar
35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
37 * out_of_space hacks, D. Gilbert (dpg) 990608
40 #include <linux/module.h>
41 #include <linux/moduleparam.h>
42 #include <linux/kernel.h>
43 #include <linux/sched.h>
44 #include <linux/timer.h>
45 #include <linux/string.h>
46 #include <linux/slab.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/init.h>
50 #include <linux/completion.h>
51 #include <linux/devfs_fs_kernel.h>
52 #include <linux/unistd.h>
53 #include <linux/spinlock.h>
54 #include <linux/kmod.h>
55 #include <linux/interrupt.h>
56 #include <linux/notifier.h>
57 #include <linux/cpu.h>
59 #include <scsi/scsi.h>
60 #include <scsi/scsi_cmnd.h>
61 #include <scsi/scsi_dbg.h>
62 #include <scsi/scsi_device.h>
63 #include <scsi/scsi_eh.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi_tcq.h>
66 #include <scsi/scsi_request.h>
68 #include "scsi_priv.h"
69 #include "scsi_logging.h"
73 * Definitions and constants.
76 #define MIN_RESET_DELAY (2*HZ)
78 /* Do not call reset on error if we just did a reset within 15 sec. */
79 #define MIN_RESET_PERIOD (15*HZ)
82 * Macro to determine the size of SCSI command. This macro takes vendor
83 * unique commands into account. SCSI commands in groups 6 and 7 are
84 * vendor unique and we will depend upon the command length being
85 * supplied correctly in cmd_len.
87 #define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
88 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
91 * Data declarations.
93 unsigned long scsi_pid;
94 static unsigned long serial_number;
97 * Note - the initial logging level can be set here to log events at boot time.
98 * After the system is up, you may enable logging via the /proc interface.
100 unsigned int scsi_logging_level;
102 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = {
103 "Direct-Access ",
104 "Sequential-Access",
105 "Printer ",
106 "Processor ",
107 "WORM ",
108 "CD-ROM ",
109 "Scanner ",
110 "Optical Device ",
111 "Medium Changer ",
112 "Communications ",
113 "Unknown ",
114 "Unknown ",
115 "RAID ",
116 "Enclosure ",
120 * Function: scsi_allocate_request
122 * Purpose: Allocate a request descriptor.
124 * Arguments: device - device for which we want a request
125 * gfp_mask - allocation flags passed to kmalloc
127 * Lock status: No locks assumed to be held. This function is SMP-safe.
129 * Returns: Pointer to request block.
131 struct scsi_request *scsi_allocate_request(struct scsi_device *sdev,
132 int gfp_mask)
134 const int offset = ALIGN(sizeof(struct scsi_request), 4);
135 const int size = offset + sizeof(struct request);
136 struct scsi_request *sreq;
138 sreq = kmalloc(size, gfp_mask);
139 if (likely(sreq != NULL)) {
140 memset(sreq, 0, size);
141 sreq->sr_request = (struct request *)(((char *)sreq) + offset);
142 sreq->sr_device = sdev;
143 sreq->sr_host = sdev->host;
144 sreq->sr_magic = SCSI_REQ_MAGIC;
145 sreq->sr_data_direction = DMA_BIDIRECTIONAL;
148 return sreq;
151 void __scsi_release_request(struct scsi_request *sreq)
153 struct request *req = sreq->sr_request;
155 /* unlikely because the tag was usually ended earlier by the
156 * mid-layer. However, for layering reasons ULD's don't end
157 * the tag of commands they generate. */
158 if (unlikely(blk_rq_tagged(req))) {
159 unsigned long flags;
160 struct request_queue *q = req->q;
162 spin_lock_irqsave(q->queue_lock, flags);
163 blk_queue_end_tag(q, req);
164 spin_unlock_irqrestore(q->queue_lock, flags);
168 if (likely(sreq->sr_command != NULL)) {
169 struct scsi_cmnd *cmd = sreq->sr_command;
171 sreq->sr_command = NULL;
172 scsi_next_command(cmd);
177 * Function: scsi_release_request
179 * Purpose: Release a request descriptor.
181 * Arguments: sreq - request to release
183 * Lock status: No locks assumed to be held. This function is SMP-safe.
185 void scsi_release_request(struct scsi_request *sreq)
187 __scsi_release_request(sreq);
188 kfree(sreq);
191 struct scsi_host_cmd_pool {
192 kmem_cache_t *slab;
193 unsigned int users;
194 char *name;
195 unsigned int slab_flags;
196 unsigned int gfp_mask;
199 static struct scsi_host_cmd_pool scsi_cmd_pool = {
200 .name = "scsi_cmd_cache",
201 .slab_flags = SLAB_HWCACHE_ALIGN,
204 static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
205 .name = "scsi_cmd_cache(DMA)",
206 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
207 .gfp_mask = __GFP_DMA,
210 static DECLARE_MUTEX(host_cmd_pool_mutex);
212 static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
213 int gfp_mask)
215 struct scsi_cmnd *cmd;
217 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
218 gfp_mask | shost->cmd_pool->gfp_mask);
220 if (unlikely(!cmd)) {
221 unsigned long flags;
223 spin_lock_irqsave(&shost->free_list_lock, flags);
224 if (likely(!list_empty(&shost->free_list))) {
225 cmd = list_entry(shost->free_list.next,
226 struct scsi_cmnd, list);
227 list_del_init(&cmd->list);
229 spin_unlock_irqrestore(&shost->free_list_lock, flags);
232 return cmd;
236 * Function: scsi_get_command()
238 * Purpose: Allocate and setup a scsi command block
240 * Arguments: dev - parent scsi device
241 * gfp_mask- allocator flags
243 * Returns: The allocated scsi command structure.
245 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask)
247 struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
249 if (likely(cmd != NULL)) {
250 unsigned long flags;
252 memset(cmd, 0, sizeof(*cmd));
253 cmd->device = dev;
254 cmd->state = SCSI_STATE_UNUSED;
255 cmd->owner = SCSI_OWNER_NOBODY;
256 init_timer(&cmd->eh_timeout);
257 INIT_LIST_HEAD(&cmd->list);
258 spin_lock_irqsave(&dev->list_lock, flags);
259 list_add_tail(&cmd->list, &dev->cmd_list);
260 spin_unlock_irqrestore(&dev->list_lock, flags);
263 return cmd;
267 * Function: scsi_put_command()
269 * Purpose: Free a scsi command block
271 * Arguments: cmd - command block to free
273 * Returns: Nothing.
275 * Notes: The command must not belong to any lists.
277 void scsi_put_command(struct scsi_cmnd *cmd)
279 struct Scsi_Host *shost = cmd->device->host;
280 unsigned long flags;
282 /* serious error if the command hasn't come from a device list */
283 spin_lock_irqsave(&cmd->device->list_lock, flags);
284 BUG_ON(list_empty(&cmd->list));
285 list_del_init(&cmd->list);
286 spin_unlock(&cmd->device->list_lock);
287 /* changing locks here, don't need to restore the irq state */
288 spin_lock(&shost->free_list_lock);
289 if (unlikely(list_empty(&shost->free_list))) {
290 list_add(&cmd->list, &shost->free_list);
291 cmd = NULL;
293 spin_unlock_irqrestore(&shost->free_list_lock, flags);
295 if (likely(cmd != NULL))
296 kmem_cache_free(shost->cmd_pool->slab, cmd);
300 * Function: scsi_setup_command_freelist()
302 * Purpose: Setup the command freelist for a scsi host.
304 * Arguments: shost - host to allocate the freelist for.
306 * Returns: Nothing.
308 int scsi_setup_command_freelist(struct Scsi_Host *shost)
310 struct scsi_host_cmd_pool *pool;
311 struct scsi_cmnd *cmd;
313 spin_lock_init(&shost->free_list_lock);
314 INIT_LIST_HEAD(&shost->free_list);
317 * Select a command slab for this host and create it if not
318 * yet existant.
320 down(&host_cmd_pool_mutex);
321 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
322 if (!pool->users) {
323 pool->slab = kmem_cache_create(pool->name,
324 sizeof(struct scsi_cmnd), 0,
325 pool->slab_flags, NULL, NULL);
326 if (!pool->slab)
327 goto fail;
330 pool->users++;
331 shost->cmd_pool = pool;
332 up(&host_cmd_pool_mutex);
335 * Get one backup command for this host.
337 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
338 GFP_KERNEL | shost->cmd_pool->gfp_mask);
339 if (!cmd)
340 goto fail2;
341 list_add(&cmd->list, &shost->free_list);
342 return 0;
344 fail2:
345 if (!--pool->users)
346 kmem_cache_destroy(pool->slab);
347 return -ENOMEM;
348 fail:
349 up(&host_cmd_pool_mutex);
350 return -ENOMEM;
355 * Function: scsi_destroy_command_freelist()
357 * Purpose: Release the command freelist for a scsi host.
359 * Arguments: shost - host that's freelist is going to be destroyed
361 void scsi_destroy_command_freelist(struct Scsi_Host *shost)
363 while (!list_empty(&shost->free_list)) {
364 struct scsi_cmnd *cmd;
366 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
367 list_del_init(&cmd->list);
368 kmem_cache_free(shost->cmd_pool->slab, cmd);
371 down(&host_cmd_pool_mutex);
372 if (!--shost->cmd_pool->users)
373 kmem_cache_destroy(shost->cmd_pool->slab);
374 up(&host_cmd_pool_mutex);
377 #ifdef CONFIG_SCSI_LOGGING
378 void scsi_log_send(struct scsi_cmnd *cmd)
380 unsigned int level;
381 struct scsi_device *sdev;
384 * If ML QUEUE log level is greater than or equal to:
386 * 1: nothing (match completion)
388 * 2: log opcode + command of all commands
390 * 3: same as 2 plus dump cmd address
392 * 4: same as 3 plus dump extra junk
394 if (unlikely(scsi_logging_level)) {
395 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
396 SCSI_LOG_MLQUEUE_BITS);
397 if (level > 1) {
398 sdev = cmd->device;
399 printk(KERN_INFO "scsi <%d:%d:%d:%d> send ",
400 sdev->host->host_no, sdev->channel, sdev->id,
401 sdev->lun);
402 if (level > 2)
403 printk("0x%p ", cmd);
405 * spaces to match disposition and cmd->result
406 * output in scsi_log_completion.
408 printk(" ");
409 scsi_print_command(cmd);
410 if (level > 3) {
411 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
412 " done = 0x%p, queuecommand 0x%p\n",
413 cmd->buffer, cmd->bufflen,
414 cmd->done,
415 sdev->host->hostt->queuecommand);
422 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
424 unsigned int level;
425 struct scsi_device *sdev;
428 * If ML COMPLETE log level is greater than or equal to:
430 * 1: log disposition, result, opcode + command, and conditionally
431 * sense data for failures or non SUCCESS dispositions.
433 * 2: same as 1 but for all command completions.
435 * 3: same as 2 plus dump cmd address
437 * 4: same as 3 plus dump extra junk
439 if (unlikely(scsi_logging_level)) {
440 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
441 SCSI_LOG_MLCOMPLETE_BITS);
442 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
443 (level > 1)) {
444 sdev = cmd->device;
445 printk(KERN_INFO "scsi <%d:%d:%d:%d> done ",
446 sdev->host->host_no, sdev->channel, sdev->id,
447 sdev->lun);
448 if (level > 2)
449 printk("0x%p ", cmd);
451 * Dump truncated values, so we usually fit within
452 * 80 chars.
454 switch (disposition) {
455 case SUCCESS:
456 printk("SUCCESS");
457 break;
458 case NEEDS_RETRY:
459 printk("RETRY ");
460 break;
461 case ADD_TO_MLQUEUE:
462 printk("MLQUEUE");
463 break;
464 case FAILED:
465 printk("FAILED ");
466 break;
467 case TIMEOUT_ERROR:
469 * If called via scsi_times_out.
471 printk("TIMEOUT");
472 break;
473 default:
474 printk("UNKNOWN");
476 printk(" %8x ", cmd->result);
477 scsi_print_command(cmd);
478 if (status_byte(cmd->result) & CHECK_CONDITION) {
480 * XXX The print_sense formatting/prefix
481 * doesn't match this function.
483 scsi_print_sense("", cmd);
485 if (level > 3) {
486 printk(KERN_INFO "scsi host busy %d failed %d\n",
487 sdev->host->host_busy,
488 sdev->host->host_failed);
493 #endif
496 * Function: scsi_dispatch_command
498 * Purpose: Dispatch a command to the low-level driver.
500 * Arguments: cmd - command block we are dispatching.
502 * Notes:
504 int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
506 struct Scsi_Host *host = cmd->device->host;
507 unsigned long flags = 0;
508 unsigned long timeout;
509 int rtn = 0;
511 /* check if the device is still usable */
512 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
513 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
514 * returns an immediate error upwards, and signals
515 * that the device is no longer present */
516 cmd->result = DID_NO_CONNECT << 16;
517 scsi_done(cmd);
518 /* return 0 (because the command has been processed) */
519 goto out;
521 /* Assign a unique nonzero serial_number. */
522 /* XXX(hch): this is racy */
523 if (++serial_number == 0)
524 serial_number = 1;
525 cmd->serial_number = serial_number;
526 cmd->pid = scsi_pid++;
529 * If SCSI-2 or lower, store the LUN value in cmnd.
531 if (cmd->device->scsi_level <= SCSI_2) {
532 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
533 (cmd->device->lun << 5 & 0xe0);
537 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
538 * we can avoid the drive not being ready.
540 timeout = host->last_reset + MIN_RESET_DELAY;
542 if (host->resetting && time_before(jiffies, timeout)) {
543 int ticks_remaining = timeout - jiffies;
545 * NOTE: This may be executed from within an interrupt
546 * handler! This is bad, but for now, it'll do. The irq
547 * level of the interrupt handler has been masked out by the
548 * platform dependent interrupt handling code already, so the
549 * sti() here will not cause another call to the SCSI host's
550 * interrupt handler (assuming there is one irq-level per
551 * host).
553 while (--ticks_remaining >= 0)
554 mdelay(1 + 999 / HZ);
555 host->resetting = 0;
558 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
560 scsi_log_send(cmd);
563 * We will use a queued command if possible, otherwise we will
564 * emulate the queuing and calling of completion function ourselves.
567 cmd->state = SCSI_STATE_QUEUED;
568 cmd->owner = SCSI_OWNER_LOWLEVEL;
571 * Before we queue this command, check if the command
572 * length exceeds what the host adapter can handle.
574 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) {
575 SCSI_LOG_MLQUEUE(3,
576 printk("queuecommand : command too long.\n"));
577 cmd->result = (DID_ABORT << 16);
579 spin_lock_irqsave(host->host_lock, flags);
580 scsi_done(cmd);
581 spin_unlock_irqrestore(host->host_lock, flags);
582 goto out;
585 spin_lock_irqsave(host->host_lock, flags);
586 if (unlikely(test_bit(SHOST_CANCEL, &host->shost_state))) {
587 cmd->result = (DID_NO_CONNECT << 16);
588 scsi_done(cmd);
589 } else {
590 rtn = host->hostt->queuecommand(cmd, scsi_done);
592 spin_unlock_irqrestore(host->host_lock, flags);
593 if (rtn) {
594 scsi_queue_insert(cmd,
595 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
596 rtn : SCSI_MLQUEUE_HOST_BUSY);
597 SCSI_LOG_MLQUEUE(3,
598 printk("queuecommand : request rejected\n"));
601 out:
602 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
603 return rtn;
607 * Function: scsi_init_cmd_from_req
609 * Purpose: Queue a SCSI command
610 * Purpose: Initialize a struct scsi_cmnd from a struct scsi_request
612 * Arguments: cmd - command descriptor.
613 * sreq - Request from the queue.
615 * Lock status: None needed.
617 * Returns: Nothing.
619 * Notes: Mainly transfer data from the request structure to the
620 * command structure. The request structure is allocated
621 * using the normal memory allocator, and requests can pile
622 * up to more or less any depth. The command structure represents
623 * a consumable resource, as these are allocated into a pool
624 * when the SCSI subsystem initializes. The preallocation is
625 * required so that in low-memory situations a disk I/O request
626 * won't cause the memory manager to try and write out a page.
627 * The request structure is generally used by ioctls and character
628 * devices.
630 void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq)
632 sreq->sr_command = cmd;
634 cmd->owner = SCSI_OWNER_MIDLEVEL;
635 cmd->cmd_len = sreq->sr_cmd_len;
636 cmd->use_sg = sreq->sr_use_sg;
638 cmd->request = sreq->sr_request;
639 memcpy(cmd->data_cmnd, sreq->sr_cmnd, sizeof(cmd->data_cmnd));
640 cmd->serial_number = 0;
641 cmd->serial_number_at_timeout = 0;
642 cmd->bufflen = sreq->sr_bufflen;
643 cmd->buffer = sreq->sr_buffer;
644 cmd->retries = 0;
645 cmd->allowed = sreq->sr_allowed;
646 cmd->done = sreq->sr_done;
647 cmd->timeout_per_command = sreq->sr_timeout_per_command;
648 cmd->sc_data_direction = sreq->sr_data_direction;
649 cmd->sglist_len = sreq->sr_sglist_len;
650 cmd->underflow = sreq->sr_underflow;
651 cmd->sc_request = sreq;
652 memcpy(cmd->cmnd, sreq->sr_cmnd, sizeof(sreq->sr_cmnd));
655 * Zero the sense buffer. Some host adapters automatically request
656 * sense on error. 0 is not a valid sense code.
658 memset(cmd->sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
659 cmd->request_buffer = sreq->sr_buffer;
660 cmd->request_bufflen = sreq->sr_bufflen;
661 cmd->old_use_sg = cmd->use_sg;
662 if (cmd->cmd_len == 0)
663 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
664 cmd->old_cmd_len = cmd->cmd_len;
665 cmd->sc_old_data_direction = cmd->sc_data_direction;
666 cmd->old_underflow = cmd->underflow;
669 * Start the timer ticking.
671 cmd->internal_timeout = NORMAL_TIMEOUT;
672 cmd->abort_reason = 0;
673 cmd->result = 0;
675 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n"));
679 * Per-CPU I/O completion queue.
681 static DEFINE_PER_CPU(struct list_head, scsi_done_q);
684 * scsi_done - Enqueue the finished SCSI command into the done queue.
685 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
686 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
688 * This function is the mid-level's (SCSI Core) interrupt routine, which
689 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues
690 * the command to the done queue for further processing.
692 * This is the producer of the done queue who enqueues at the tail.
694 * This function is interrupt context safe.
696 void scsi_done(struct scsi_cmnd *cmd)
699 * We don't have to worry about this one timing out any more.
700 * If we are unable to remove the timer, then the command
701 * has already timed out. In which case, we have no choice but to
702 * let the timeout function run, as we have no idea where in fact
703 * that function could really be. It might be on another processor,
704 * etc, etc.
706 if (!scsi_delete_timer(cmd))
707 return;
708 __scsi_done(cmd);
711 /* Private entry to scsi_done() to complete a command when the timer
712 * isn't running --- used by scsi_times_out */
713 void __scsi_done(struct scsi_cmnd *cmd)
715 unsigned long flags;
718 * Set the serial numbers back to zero
720 cmd->serial_number = 0;
721 cmd->serial_number_at_timeout = 0;
722 cmd->state = SCSI_STATE_BHQUEUE;
723 cmd->owner = SCSI_OWNER_BH_HANDLER;
726 * Next, enqueue the command into the done queue.
727 * It is a per-CPU queue, so we just disable local interrupts
728 * and need no spinlock.
730 local_irq_save(flags);
731 list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q));
732 raise_softirq_irqoff(SCSI_SOFTIRQ);
733 local_irq_restore(flags);
737 * scsi_softirq - Perform post-interrupt processing of finished SCSI commands.
739 * This is the consumer of the done queue.
741 * This is called with all interrupts enabled. This should reduce
742 * interrupt latency, stack depth, and reentrancy of the low-level
743 * drivers.
745 static void scsi_softirq(struct softirq_action *h)
747 int disposition;
748 LIST_HEAD(local_q);
750 local_irq_disable();
751 list_splice_init(&__get_cpu_var(scsi_done_q), &local_q);
752 local_irq_enable();
754 while (!list_empty(&local_q)) {
755 struct scsi_cmnd *cmd = list_entry(local_q.next,
756 struct scsi_cmnd, eh_entry);
757 list_del_init(&cmd->eh_entry);
759 disposition = scsi_decide_disposition(cmd);
760 scsi_log_completion(cmd, disposition);
761 switch (disposition) {
762 case SUCCESS:
763 scsi_finish_command(cmd);
764 break;
765 case NEEDS_RETRY:
766 scsi_retry_command(cmd);
767 break;
768 case ADD_TO_MLQUEUE:
769 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
770 break;
771 default:
772 if (!scsi_eh_scmd_add(cmd, 0))
773 scsi_finish_command(cmd);
779 * Function: scsi_retry_command
781 * Purpose: Send a command back to the low level to be retried.
783 * Notes: This command is always executed in the context of the
784 * bottom half handler, or the error handler thread. Low
785 * level drivers should not become re-entrant as a result of
786 * this.
788 int scsi_retry_command(struct scsi_cmnd *cmd)
791 * Restore the SCSI command state.
793 scsi_setup_cmd_retry(cmd);
796 * Zero the sense information from the last time we tried
797 * this command.
799 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
801 return scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
805 * Function: scsi_finish_command
807 * Purpose: Pass command off to upper layer for finishing of I/O
808 * request, waking processes that are waiting on results,
809 * etc.
811 void scsi_finish_command(struct scsi_cmnd *cmd)
813 struct scsi_device *sdev = cmd->device;
814 struct Scsi_Host *shost = sdev->host;
815 struct scsi_request *sreq;
817 scsi_device_unbusy(sdev);
820 * Clear the flags which say that the device/host is no longer
821 * capable of accepting new commands. These are set in scsi_queue.c
822 * for both the queue full condition on a device, and for a
823 * host full condition on the host.
825 * XXX(hch): What about locking?
827 shost->host_blocked = 0;
828 sdev->device_blocked = 0;
831 * If we have valid sense information, then some kind of recovery
832 * must have taken place. Make a note of this.
834 if (SCSI_SENSE_VALID(cmd))
835 cmd->result |= (DRIVER_SENSE << 24);
837 SCSI_LOG_MLCOMPLETE(4, printk("Notifying upper driver of completion "
838 "for device %d %x\n", sdev->id, cmd->result));
840 cmd->owner = SCSI_OWNER_HIGHLEVEL;
841 cmd->state = SCSI_STATE_FINISHED;
844 * We can get here with use_sg=0, causing a panic in the upper level
846 cmd->use_sg = cmd->old_use_sg;
849 * If there is an associated request structure, copy the data over
850 * before we call the completion function.
852 sreq = cmd->sc_request;
853 if (sreq) {
854 sreq->sr_result = sreq->sr_command->result;
855 if (sreq->sr_result) {
856 memcpy(sreq->sr_sense_buffer,
857 sreq->sr_command->sense_buffer,
858 sizeof(sreq->sr_sense_buffer));
862 cmd->done(cmd);
864 EXPORT_SYMBOL(scsi_finish_command);
867 * Function: scsi_adjust_queue_depth()
869 * Purpose: Allow low level drivers to tell us to change the queue depth
870 * on a specific SCSI device
872 * Arguments: sdev - SCSI Device in question
873 * tagged - Do we use tagged queueing (non-0) or do we treat
874 * this device as an untagged device (0)
875 * tags - Number of tags allowed if tagged queueing enabled,
876 * or number of commands the low level driver can
877 * queue up in non-tagged mode (as per cmd_per_lun).
879 * Returns: Nothing
881 * Lock Status: None held on entry
883 * Notes: Low level drivers may call this at any time and we will do
884 * the right thing depending on whether or not the device is
885 * currently active and whether or not it even has the
886 * command blocks built yet.
888 * XXX(hch): What exactly is device_request_lock trying to protect?
890 void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
892 static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED;
893 unsigned long flags;
896 * refuse to set tagged depth to an unworkable size
898 if (tags <= 0)
899 return;
901 spin_lock_irqsave(&device_request_lock, flags);
902 spin_lock(sdev->request_queue->queue_lock);
904 /* Check to see if the queue is managed by the block layer
905 * if it is, and we fail to adjust the depth, exit */
906 if (blk_queue_tagged(sdev->request_queue) &&
907 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
908 goto out;
910 sdev->queue_depth = tags;
911 switch (tagged) {
912 case MSG_ORDERED_TAG:
913 sdev->ordered_tags = 1;
914 sdev->simple_tags = 1;
915 break;
916 case MSG_SIMPLE_TAG:
917 sdev->ordered_tags = 0;
918 sdev->simple_tags = 1;
919 break;
920 default:
921 printk(KERN_WARNING "(scsi%d:%d:%d:%d) "
922 "scsi_adjust_queue_depth, bad queue type, "
923 "disabled\n", sdev->host->host_no,
924 sdev->channel, sdev->id, sdev->lun);
925 case 0:
926 sdev->ordered_tags = sdev->simple_tags = 0;
927 sdev->queue_depth = tags;
928 break;
930 out:
931 spin_unlock(sdev->request_queue->queue_lock);
932 spin_unlock_irqrestore(&device_request_lock, flags);
936 * Function: scsi_track_queue_full()
938 * Purpose: This function will track successive QUEUE_FULL events on a
939 * specific SCSI device to determine if and when there is a
940 * need to adjust the queue depth on the device.
942 * Arguments: sdev - SCSI Device in question
943 * depth - Current number of outstanding SCSI commands on
944 * this device, not counting the one returned as
945 * QUEUE_FULL.
947 * Returns: 0 - No change needed
948 * >0 - Adjust queue depth to this new depth
949 * -1 - Drop back to untagged operation using host->cmd_per_lun
950 * as the untagged command depth
952 * Lock Status: None held on entry
954 * Notes: Low level drivers may call this at any time and we will do
955 * "The Right Thing." We are interrupt context safe.
957 int scsi_track_queue_full(struct scsi_device *sdev, int depth)
959 if ((jiffies >> 4) == sdev->last_queue_full_time)
960 return 0;
962 sdev->last_queue_full_time = (jiffies >> 4);
963 if (sdev->last_queue_full_depth != depth) {
964 sdev->last_queue_full_count = 1;
965 sdev->last_queue_full_depth = depth;
966 } else {
967 sdev->last_queue_full_count++;
970 if (sdev->last_queue_full_count <= 10)
971 return 0;
972 if (sdev->last_queue_full_depth < 8) {
973 /* Drop back to untagged */
974 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
975 return -1;
978 if (sdev->ordered_tags)
979 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
980 else
981 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
982 return depth;
986 * scsi_device_get - get an addition reference to a scsi_device
987 * @sdev: device to get a reference to
989 * Gets a reference to the scsi_device and increments the use count
990 * of the underlying LLDD module. You must hold host_lock of the
991 * parent Scsi_Host or already have a reference when calling this.
993 int scsi_device_get(struct scsi_device *sdev)
995 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
996 return -ENXIO;
997 if (!get_device(&sdev->sdev_gendev))
998 return -ENXIO;
999 if (!try_module_get(sdev->host->hostt->module)) {
1000 put_device(&sdev->sdev_gendev);
1001 return -ENXIO;
1003 return 0;
1005 EXPORT_SYMBOL(scsi_device_get);
1008 * scsi_device_put - release a reference to a scsi_device
1009 * @sdev: device to release a reference on.
1011 * Release a reference to the scsi_device and decrements the use count
1012 * of the underlying LLDD module. The device is freed once the last
1013 * user vanishes.
1015 void scsi_device_put(struct scsi_device *sdev)
1017 module_put(sdev->host->hostt->module);
1018 put_device(&sdev->sdev_gendev);
1020 EXPORT_SYMBOL(scsi_device_put);
1022 /* helper for shost_for_each_device, thus not documented */
1023 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
1024 struct scsi_device *prev)
1026 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
1027 struct scsi_device *next = NULL;
1028 unsigned long flags;
1030 spin_lock_irqsave(shost->host_lock, flags);
1031 while (list->next != &shost->__devices) {
1032 next = list_entry(list->next, struct scsi_device, siblings);
1033 /* skip devices that we can't get a reference to */
1034 if (!scsi_device_get(next))
1035 break;
1036 list = list->next;
1038 spin_unlock_irqrestore(shost->host_lock, flags);
1040 if (prev)
1041 scsi_device_put(prev);
1042 return next;
1044 EXPORT_SYMBOL(__scsi_iterate_devices);
1047 * scsi_device_lookup - find a device given the host (UNLOCKED)
1048 * @shost: SCSI host pointer
1049 * @channel: SCSI channel (zero if only one channel)
1050 * @pun: SCSI target number (physical unit number)
1051 * @lun: SCSI Logical Unit Number
1053 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1054 * give host. The returned scsi_device does not have an additional reference.
1055 * You must hold the host's host_lock over this call and any access to the
1056 * returned scsi_device.
1058 * Note: The only reason why drivers would want to use this is because
1059 * they're need to access the device list in irq context. Otherwise you
1060 * really want to use scsi_device_lookup instead.
1062 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1063 uint channel, uint id, uint lun)
1065 struct scsi_device *sdev;
1067 list_for_each_entry(sdev, &shost->__devices, siblings) {
1068 if (sdev->channel == channel && sdev->id == id &&
1069 sdev->lun ==lun)
1070 return sdev;
1073 return NULL;
1075 EXPORT_SYMBOL(__scsi_device_lookup);
1078 * scsi_device_lookup - find a device given the host
1079 * @shost: SCSI host pointer
1080 * @channel: SCSI channel (zero if only one channel)
1081 * @id: SCSI target number (physical unit number)
1082 * @lun: SCSI Logical Unit Number
1084 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1085 * give host. The returned scsi_device has an additional reference that
1086 * needs to be release with scsi_host_put once you're done with it.
1088 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1089 uint channel, uint id, uint lun)
1091 struct scsi_device *sdev;
1092 unsigned long flags;
1094 spin_lock_irqsave(shost->host_lock, flags);
1095 sdev = __scsi_device_lookup(shost, channel, id, lun);
1096 if (sdev && scsi_device_get(sdev))
1097 sdev = NULL;
1098 spin_unlock_irqrestore(shost->host_lock, flags);
1100 return sdev;
1102 EXPORT_SYMBOL(scsi_device_lookup);
1105 * scsi_device_cancel - cancel outstanding IO to this device
1106 * @sdev: pointer to struct scsi_device
1107 * @data: pointer to cancel value.
1110 int scsi_device_cancel(struct scsi_device *sdev, int recovery)
1112 struct scsi_cmnd *scmd;
1113 LIST_HEAD(active_list);
1114 struct list_head *lh, *lh_sf;
1115 unsigned long flags;
1117 scsi_device_set_state(sdev, SDEV_CANCEL);
1119 spin_lock_irqsave(&sdev->list_lock, flags);
1120 list_for_each_entry(scmd, &sdev->cmd_list, list) {
1121 if (scmd->request && scmd->request->rq_status != RQ_INACTIVE) {
1123 * If we are unable to remove the timer, it means
1124 * that the command has already timed out or
1125 * finished.
1127 if (!scsi_delete_timer(scmd))
1128 continue;
1129 list_add_tail(&scmd->eh_entry, &active_list);
1132 spin_unlock_irqrestore(&sdev->list_lock, flags);
1134 if (!list_empty(&active_list)) {
1135 list_for_each_safe(lh, lh_sf, &active_list) {
1136 scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
1137 list_del_init(lh);
1138 if (recovery) {
1139 scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD);
1140 } else {
1141 scmd->result = (DID_ABORT << 16);
1142 scsi_finish_command(scmd);
1147 return 0;
1150 #ifdef CONFIG_HOTPLUG_CPU
1151 static int scsi_cpu_notify(struct notifier_block *self,
1152 unsigned long action, void *hcpu)
1154 int cpu = (unsigned long)hcpu;
1156 switch(action) {
1157 case CPU_DEAD:
1158 /* Drain scsi_done_q. */
1159 local_irq_disable();
1160 list_splice_init(&per_cpu(scsi_done_q, cpu),
1161 &__get_cpu_var(scsi_done_q));
1162 raise_softirq_irqoff(SCSI_SOFTIRQ);
1163 local_irq_enable();
1164 break;
1165 default:
1166 break;
1168 return NOTIFY_OK;
1171 static struct notifier_block __devinitdata scsi_cpu_nb = {
1172 .notifier_call = scsi_cpu_notify,
1175 #define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb)
1176 #define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb)
1177 #else
1178 #define register_scsi_cpu()
1179 #define unregister_scsi_cpu()
1180 #endif /* CONFIG_HOTPLUG_CPU */
1182 MODULE_DESCRIPTION("SCSI core");
1183 MODULE_LICENSE("GPL");
1185 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1186 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1188 static int __init init_scsi(void)
1190 int error, i;
1192 error = scsi_init_queue();
1193 if (error)
1194 return error;
1195 error = scsi_init_procfs();
1196 if (error)
1197 goto cleanup_queue;
1198 error = scsi_init_devinfo();
1199 if (error)
1200 goto cleanup_procfs;
1201 error = scsi_init_hosts();
1202 if (error)
1203 goto cleanup_devlist;
1204 error = scsi_init_sysctl();
1205 if (error)
1206 goto cleanup_hosts;
1207 error = scsi_sysfs_register();
1208 if (error)
1209 goto cleanup_sysctl;
1211 for (i = 0; i < NR_CPUS; i++)
1212 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
1214 devfs_mk_dir("scsi");
1215 open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL);
1216 register_scsi_cpu();
1217 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1218 return 0;
1220 cleanup_sysctl:
1221 scsi_exit_sysctl();
1222 cleanup_hosts:
1223 scsi_exit_hosts();
1224 cleanup_devlist:
1225 scsi_exit_devinfo();
1226 cleanup_procfs:
1227 scsi_exit_procfs();
1228 cleanup_queue:
1229 scsi_exit_queue();
1230 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1231 -error);
1232 return error;
1235 static void __exit exit_scsi(void)
1237 scsi_sysfs_unregister();
1238 scsi_exit_sysctl();
1239 scsi_exit_hosts();
1240 scsi_exit_devinfo();
1241 devfs_remove("scsi");
1242 scsi_exit_procfs();
1243 scsi_exit_queue();
1244 unregister_scsi_cpu();
1247 subsys_initcall(init_scsi);
1248 module_exit(exit_scsi);