4 * Basic PIO and command management functionality.
6 * This code was split off from ide.c. See ide.c for history and original
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/kernel.h>
31 #include <linux/timer.h>
33 #include <linux/interrupt.h>
34 #include <linux/major.h>
35 #include <linux/errno.h>
36 #include <linux/genhd.h>
37 #include <linux/blkpg.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ide.h>
43 #include <linux/completion.h>
44 #include <linux/reboot.h>
45 #include <linux/cdrom.h>
46 #include <linux/seq_file.h>
47 #include <linux/device.h>
48 #include <linux/kmod.h>
49 #include <linux/scatterlist.h>
50 #include <linux/bitops.h>
52 #include <asm/byteorder.h>
54 #include <asm/uaccess.h>
57 static int __ide_end_request(ide_drive_t
*drive
, struct request
*rq
,
58 int uptodate
, unsigned int nr_bytes
, int dequeue
)
63 * if failfast is set on a request, override number of sectors and
64 * complete the whole request right now
66 if (blk_noretry_request(rq
) && end_io_error(uptodate
))
67 nr_bytes
= rq
->hard_nr_sectors
<< 9;
69 if (!blk_fs_request(rq
) && end_io_error(uptodate
) && !rq
->errors
)
73 * decide whether to reenable DMA -- 3 is a random magic for now,
74 * if we DMA timeout more than 3 times, just stay in PIO
76 if (drive
->state
== DMA_PIO_RETRY
&& drive
->retry_pio
<= 3) {
81 if (!end_that_request_chunk(rq
, uptodate
, nr_bytes
)) {
82 add_disk_randomness(rq
->rq_disk
);
84 if (!list_empty(&rq
->queuelist
))
85 blkdev_dequeue_request(rq
);
86 HWGROUP(drive
)->rq
= NULL
;
88 end_that_request_last(rq
, uptodate
);
96 * ide_end_request - complete an IDE I/O
97 * @drive: IDE device for the I/O
99 * @nr_sectors: number of sectors completed
101 * This is our end_request wrapper function. We complete the I/O
102 * update random number input and dequeue the request, which if
103 * it was tagged may be out of order.
106 int ide_end_request (ide_drive_t
*drive
, int uptodate
, int nr_sectors
)
108 unsigned int nr_bytes
= nr_sectors
<< 9;
114 * room for locking improvements here, the calls below don't
115 * need the queue lock held at all
117 spin_lock_irqsave(&ide_lock
, flags
);
118 rq
= HWGROUP(drive
)->rq
;
121 if (blk_pc_request(rq
))
122 nr_bytes
= rq
->data_len
;
124 nr_bytes
= rq
->hard_cur_sectors
<< 9;
127 ret
= __ide_end_request(drive
, rq
, uptodate
, nr_bytes
, 1);
129 spin_unlock_irqrestore(&ide_lock
, flags
);
132 EXPORT_SYMBOL(ide_end_request
);
135 * Power Management state machine. This one is rather trivial for now,
136 * we should probably add more, like switching back to PIO on suspend
137 * to help some BIOSes, re-do the door locking on resume, etc...
141 ide_pm_flush_cache
= ide_pm_state_start_suspend
,
144 idedisk_pm_restore_pio
= ide_pm_state_start_resume
,
149 static void ide_complete_power_step(ide_drive_t
*drive
, struct request
*rq
, u8 stat
, u8 error
)
151 struct request_pm_state
*pm
= rq
->data
;
153 if (drive
->media
!= ide_disk
)
156 switch (pm
->pm_step
) {
157 case ide_pm_flush_cache
: /* Suspend step 1 (flush cache) complete */
158 if (pm
->pm_state
== PM_EVENT_FREEZE
)
159 pm
->pm_step
= ide_pm_state_completed
;
161 pm
->pm_step
= idedisk_pm_standby
;
163 case idedisk_pm_standby
: /* Suspend step 2 (standby) complete */
164 pm
->pm_step
= ide_pm_state_completed
;
166 case idedisk_pm_restore_pio
: /* Resume step 1 complete */
167 pm
->pm_step
= idedisk_pm_idle
;
169 case idedisk_pm_idle
: /* Resume step 2 (idle) complete */
170 pm
->pm_step
= ide_pm_restore_dma
;
175 static ide_startstop_t
ide_start_power_step(ide_drive_t
*drive
, struct request
*rq
)
177 struct request_pm_state
*pm
= rq
->data
;
178 ide_task_t
*args
= rq
->special
;
180 memset(args
, 0, sizeof(*args
));
182 switch (pm
->pm_step
) {
183 case ide_pm_flush_cache
: /* Suspend step 1 (flush cache) */
184 if (drive
->media
!= ide_disk
)
186 /* Not supported? Switch to next step now. */
187 if (!drive
->wcache
|| !ide_id_has_flush_cache(drive
->id
)) {
188 ide_complete_power_step(drive
, rq
, 0, 0);
191 if (ide_id_has_flush_cache_ext(drive
->id
))
192 args
->tf
.command
= WIN_FLUSH_CACHE_EXT
;
194 args
->tf
.command
= WIN_FLUSH_CACHE
;
197 case idedisk_pm_standby
: /* Suspend step 2 (standby) */
198 args
->tf
.command
= WIN_STANDBYNOW1
;
201 case idedisk_pm_restore_pio
: /* Resume step 1 (restore PIO) */
202 ide_set_max_pio(drive
);
204 * skip idedisk_pm_idle for ATAPI devices
206 if (drive
->media
!= ide_disk
)
207 pm
->pm_step
= ide_pm_restore_dma
;
209 ide_complete_power_step(drive
, rq
, 0, 0);
212 case idedisk_pm_idle
: /* Resume step 2 (idle) */
213 args
->tf
.command
= WIN_IDLEIMMEDIATE
;
216 case ide_pm_restore_dma
: /* Resume step 3 (restore DMA) */
218 * Right now, all we do is call ide_set_dma(drive),
219 * we could be smarter and check for current xfer_speed
220 * in struct drive etc...
222 if (drive
->hwif
->dma_host_set
== NULL
)
225 * TODO: respect ->using_dma setting
230 pm
->pm_step
= ide_pm_state_completed
;
234 args
->tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
;
235 args
->data_phase
= TASKFILE_NO_DATA
;
236 return do_rw_taskfile(drive
, args
);
240 * ide_end_dequeued_request - complete an IDE I/O
241 * @drive: IDE device for the I/O
243 * @nr_sectors: number of sectors completed
245 * Complete an I/O that is no longer on the request queue. This
246 * typically occurs when we pull the request and issue a REQUEST_SENSE.
247 * We must still finish the old request but we must not tamper with the
248 * queue in the meantime.
250 * NOTE: This path does not handle barrier, but barrier is not supported
254 int ide_end_dequeued_request(ide_drive_t
*drive
, struct request
*rq
,
255 int uptodate
, int nr_sectors
)
260 spin_lock_irqsave(&ide_lock
, flags
);
261 BUG_ON(!blk_rq_started(rq
));
262 ret
= __ide_end_request(drive
, rq
, uptodate
, nr_sectors
<< 9, 0);
263 spin_unlock_irqrestore(&ide_lock
, flags
);
267 EXPORT_SYMBOL_GPL(ide_end_dequeued_request
);
271 * ide_complete_pm_request - end the current Power Management request
272 * @drive: target drive
275 * This function cleans up the current PM request and stops the queue
278 static void ide_complete_pm_request (ide_drive_t
*drive
, struct request
*rq
)
283 printk("%s: completing PM request, %s\n", drive
->name
,
284 blk_pm_suspend_request(rq
) ? "suspend" : "resume");
286 spin_lock_irqsave(&ide_lock
, flags
);
287 if (blk_pm_suspend_request(rq
)) {
288 blk_stop_queue(drive
->queue
);
291 blk_start_queue(drive
->queue
);
293 blkdev_dequeue_request(rq
);
294 HWGROUP(drive
)->rq
= NULL
;
295 end_that_request_last(rq
, 1);
296 spin_unlock_irqrestore(&ide_lock
, flags
);
299 void ide_tf_read(ide_drive_t
*drive
, ide_task_t
*task
)
301 ide_hwif_t
*hwif
= drive
->hwif
;
302 struct ide_taskfile
*tf
= &task
->tf
;
304 if (task
->tf_flags
& IDE_TFLAG_IN_DATA
) {
305 u16 data
= hwif
->INW(IDE_DATA_REG
);
307 tf
->data
= data
& 0xff;
308 tf
->hob_data
= (data
>> 8) & 0xff;
311 /* be sure we're looking at the low order bits */
312 hwif
->OUTB(drive
->ctl
& ~0x80, IDE_CONTROL_REG
);
314 if (task
->tf_flags
& IDE_TFLAG_IN_NSECT
)
315 tf
->nsect
= hwif
->INB(IDE_NSECTOR_REG
);
316 if (task
->tf_flags
& IDE_TFLAG_IN_LBAL
)
317 tf
->lbal
= hwif
->INB(IDE_SECTOR_REG
);
318 if (task
->tf_flags
& IDE_TFLAG_IN_LBAM
)
319 tf
->lbam
= hwif
->INB(IDE_LCYL_REG
);
320 if (task
->tf_flags
& IDE_TFLAG_IN_LBAH
)
321 tf
->lbah
= hwif
->INB(IDE_HCYL_REG
);
322 if (task
->tf_flags
& IDE_TFLAG_IN_DEVICE
)
323 tf
->device
= hwif
->INB(IDE_SELECT_REG
);
325 if (task
->tf_flags
& IDE_TFLAG_LBA48
) {
326 hwif
->OUTB(drive
->ctl
| 0x80, IDE_CONTROL_REG
);
328 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_FEATURE
)
329 tf
->hob_feature
= hwif
->INB(IDE_FEATURE_REG
);
330 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_NSECT
)
331 tf
->hob_nsect
= hwif
->INB(IDE_NSECTOR_REG
);
332 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_LBAL
)
333 tf
->hob_lbal
= hwif
->INB(IDE_SECTOR_REG
);
334 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_LBAM
)
335 tf
->hob_lbam
= hwif
->INB(IDE_LCYL_REG
);
336 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_LBAH
)
337 tf
->hob_lbah
= hwif
->INB(IDE_HCYL_REG
);
342 * ide_end_drive_cmd - end an explicit drive command
347 * Clean up after success/failure of an explicit drive command.
348 * These get thrown onto the queue so they are synchronized with
349 * real I/O operations on the drive.
351 * In LBA48 mode we have to read the register set twice to get
352 * all the extra information out.
355 void ide_end_drive_cmd (ide_drive_t
*drive
, u8 stat
, u8 err
)
360 spin_lock_irqsave(&ide_lock
, flags
);
361 rq
= HWGROUP(drive
)->rq
;
362 spin_unlock_irqrestore(&ide_lock
, flags
);
364 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
365 ide_task_t
*args
= (ide_task_t
*) rq
->special
;
367 rq
->errors
= !OK_STAT(stat
,READY_STAT
,BAD_STAT
);
370 struct ide_taskfile
*tf
= &args
->tf
;
375 ide_tf_read(drive
, args
);
377 } else if (blk_pm_request(rq
)) {
378 struct request_pm_state
*pm
= rq
->data
;
380 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n",
381 drive
->name
, rq
->pm
->pm_step
, stat
, err
);
383 ide_complete_power_step(drive
, rq
, stat
, err
);
384 if (pm
->pm_step
== ide_pm_state_completed
)
385 ide_complete_pm_request(drive
, rq
);
389 spin_lock_irqsave(&ide_lock
, flags
);
390 blkdev_dequeue_request(rq
);
391 HWGROUP(drive
)->rq
= NULL
;
393 end_that_request_last(rq
, !rq
->errors
);
394 spin_unlock_irqrestore(&ide_lock
, flags
);
397 EXPORT_SYMBOL(ide_end_drive_cmd
);
400 * try_to_flush_leftover_data - flush junk
401 * @drive: drive to flush
403 * try_to_flush_leftover_data() is invoked in response to a drive
404 * unexpectedly having its DRQ_STAT bit set. As an alternative to
405 * resetting the drive, this routine tries to clear the condition
406 * by read a sector's worth of data from the drive. Of course,
407 * this may not help if the drive is *waiting* for data from *us*.
409 static void try_to_flush_leftover_data (ide_drive_t
*drive
)
411 int i
= (drive
->mult_count
? drive
->mult_count
: 1) * SECTOR_WORDS
;
413 if (drive
->media
!= ide_disk
)
417 u32 wcount
= (i
> 16) ? 16 : i
;
420 HWIF(drive
)->ata_input_data(drive
, buffer
, wcount
);
424 static void ide_kill_rq(ide_drive_t
*drive
, struct request
*rq
)
429 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
430 drv
->end_request(drive
, 0, 0);
432 ide_end_request(drive
, 0, 0);
435 static ide_startstop_t
ide_ata_error(ide_drive_t
*drive
, struct request
*rq
, u8 stat
, u8 err
)
437 ide_hwif_t
*hwif
= drive
->hwif
;
439 if (stat
& BUSY_STAT
|| ((stat
& WRERR_STAT
) && !drive
->nowerr
)) {
440 /* other bits are useless when BUSY */
441 rq
->errors
|= ERROR_RESET
;
442 } else if (stat
& ERR_STAT
) {
443 /* err has different meaning on cdrom and tape */
444 if (err
== ABRT_ERR
) {
445 if (drive
->select
.b
.lba
&&
446 /* some newer drives don't support WIN_SPECIFY */
447 hwif
->INB(IDE_COMMAND_REG
) == WIN_SPECIFY
)
449 } else if ((err
& BAD_CRC
) == BAD_CRC
) {
450 /* UDMA crc error, just retry the operation */
452 } else if (err
& (BBD_ERR
| ECC_ERR
)) {
453 /* retries won't help these */
454 rq
->errors
= ERROR_MAX
;
455 } else if (err
& TRK0_ERR
) {
456 /* help it find track zero */
457 rq
->errors
|= ERROR_RECAL
;
461 if ((stat
& DRQ_STAT
) && rq_data_dir(rq
) == READ
&&
462 (hwif
->host_flags
& IDE_HFLAG_ERROR_STOPS_FIFO
) == 0)
463 try_to_flush_leftover_data(drive
);
465 if (rq
->errors
>= ERROR_MAX
|| blk_noretry_request(rq
)) {
466 ide_kill_rq(drive
, rq
);
470 if (hwif
->INB(IDE_STATUS_REG
) & (BUSY_STAT
|DRQ_STAT
))
471 rq
->errors
|= ERROR_RESET
;
473 if ((rq
->errors
& ERROR_RESET
) == ERROR_RESET
) {
475 return ide_do_reset(drive
);
478 if ((rq
->errors
& ERROR_RECAL
) == ERROR_RECAL
)
479 drive
->special
.b
.recalibrate
= 1;
486 static ide_startstop_t
ide_atapi_error(ide_drive_t
*drive
, struct request
*rq
, u8 stat
, u8 err
)
488 ide_hwif_t
*hwif
= drive
->hwif
;
490 if (stat
& BUSY_STAT
|| ((stat
& WRERR_STAT
) && !drive
->nowerr
)) {
491 /* other bits are useless when BUSY */
492 rq
->errors
|= ERROR_RESET
;
494 /* add decoding error stuff */
497 if (hwif
->INB(IDE_STATUS_REG
) & (BUSY_STAT
|DRQ_STAT
))
499 hwif
->OUTB(WIN_IDLEIMMEDIATE
, IDE_COMMAND_REG
);
501 if (rq
->errors
>= ERROR_MAX
) {
502 ide_kill_rq(drive
, rq
);
504 if ((rq
->errors
& ERROR_RESET
) == ERROR_RESET
) {
506 return ide_do_reset(drive
);
515 __ide_error(ide_drive_t
*drive
, struct request
*rq
, u8 stat
, u8 err
)
517 if (drive
->media
== ide_disk
)
518 return ide_ata_error(drive
, rq
, stat
, err
);
519 return ide_atapi_error(drive
, rq
, stat
, err
);
522 EXPORT_SYMBOL_GPL(__ide_error
);
525 * ide_error - handle an error on the IDE
526 * @drive: drive the error occurred on
527 * @msg: message to report
530 * ide_error() takes action based on the error returned by the drive.
531 * For normal I/O that may well include retries. We deal with
532 * both new-style (taskfile) and old style command handling here.
533 * In the case of taskfile command handling there is work left to
537 ide_startstop_t
ide_error (ide_drive_t
*drive
, const char *msg
, u8 stat
)
542 err
= ide_dump_status(drive
, msg
, stat
);
544 if ((rq
= HWGROUP(drive
)->rq
) == NULL
)
547 /* retry only "normal" I/O: */
548 if (!blk_fs_request(rq
)) {
550 ide_end_drive_cmd(drive
, stat
, err
);
557 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
558 return drv
->error(drive
, rq
, stat
, err
);
560 return __ide_error(drive
, rq
, stat
, err
);
563 EXPORT_SYMBOL_GPL(ide_error
);
565 ide_startstop_t
__ide_abort(ide_drive_t
*drive
, struct request
*rq
)
567 if (drive
->media
!= ide_disk
)
568 rq
->errors
|= ERROR_RESET
;
570 ide_kill_rq(drive
, rq
);
575 EXPORT_SYMBOL_GPL(__ide_abort
);
578 * ide_abort - abort pending IDE operations
579 * @drive: drive the error occurred on
580 * @msg: message to report
582 * ide_abort kills and cleans up when we are about to do a
583 * host initiated reset on active commands. Longer term we
584 * want handlers to have sensible abort handling themselves
586 * This differs fundamentally from ide_error because in
587 * this case the command is doing just fine when we
591 ide_startstop_t
ide_abort(ide_drive_t
*drive
, const char *msg
)
595 if (drive
== NULL
|| (rq
= HWGROUP(drive
)->rq
) == NULL
)
598 /* retry only "normal" I/O: */
599 if (!blk_fs_request(rq
)) {
601 ide_end_drive_cmd(drive
, BUSY_STAT
, 0);
608 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
609 return drv
->abort(drive
, rq
);
611 return __ide_abort(drive
, rq
);
614 static void ide_tf_set_specify_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
616 tf
->nsect
= drive
->sect
;
617 tf
->lbal
= drive
->sect
;
618 tf
->lbam
= drive
->cyl
;
619 tf
->lbah
= drive
->cyl
>> 8;
620 tf
->device
= ((drive
->head
- 1) | drive
->select
.all
) & ~ATA_LBA
;
621 tf
->command
= WIN_SPECIFY
;
624 static void ide_tf_set_restore_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
626 tf
->nsect
= drive
->sect
;
627 tf
->command
= WIN_RESTORE
;
630 static void ide_tf_set_setmult_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
632 tf
->nsect
= drive
->mult_req
;
633 tf
->command
= WIN_SETMULT
;
636 static ide_startstop_t
ide_disk_special(ide_drive_t
*drive
)
638 special_t
*s
= &drive
->special
;
641 memset(&args
, 0, sizeof(ide_task_t
));
642 args
.data_phase
= TASKFILE_NO_DATA
;
644 if (s
->b
.set_geometry
) {
645 s
->b
.set_geometry
= 0;
646 ide_tf_set_specify_cmd(drive
, &args
.tf
);
647 } else if (s
->b
.recalibrate
) {
648 s
->b
.recalibrate
= 0;
649 ide_tf_set_restore_cmd(drive
, &args
.tf
);
650 } else if (s
->b
.set_multmode
) {
651 s
->b
.set_multmode
= 0;
652 if (drive
->mult_req
> drive
->id
->max_multsect
)
653 drive
->mult_req
= drive
->id
->max_multsect
;
654 ide_tf_set_setmult_cmd(drive
, &args
.tf
);
656 int special
= s
->all
;
658 printk(KERN_ERR
"%s: bad special flag: 0x%02x\n", drive
->name
, special
);
662 args
.tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
|
663 IDE_TFLAG_CUSTOM_HANDLER
;
665 do_rw_taskfile(drive
, &args
);
671 * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away
673 static int set_pio_mode_abuse(ide_hwif_t
*hwif
, u8 req_pio
)
682 return (hwif
->host_flags
& IDE_HFLAG_ABUSE_DMA_MODES
) ? 1 : 0;
685 return (hwif
->host_flags
& IDE_HFLAG_ABUSE_PREFETCH
) ? 1 : 0;
688 return (hwif
->host_flags
& IDE_HFLAG_ABUSE_FAST_DEVSEL
) ? 1 : 0;
695 * do_special - issue some special commands
696 * @drive: drive the command is for
698 * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT
699 * commands to a drive. It used to do much more, but has been scaled
703 static ide_startstop_t
do_special (ide_drive_t
*drive
)
705 special_t
*s
= &drive
->special
;
708 printk("%s: do_special: 0x%02x\n", drive
->name
, s
->all
);
711 ide_hwif_t
*hwif
= drive
->hwif
;
712 u8 req_pio
= drive
->tune_req
;
716 if (set_pio_mode_abuse(drive
->hwif
, req_pio
)) {
718 if (hwif
->set_pio_mode
== NULL
)
722 * take ide_lock for drive->[no_]unmask/[no_]io_32bit
724 if (req_pio
== 8 || req_pio
== 9) {
727 spin_lock_irqsave(&ide_lock
, flags
);
728 hwif
->set_pio_mode(drive
, req_pio
);
729 spin_unlock_irqrestore(&ide_lock
, flags
);
731 hwif
->set_pio_mode(drive
, req_pio
);
733 int keep_dma
= drive
->using_dma
;
735 ide_set_pio(drive
, req_pio
);
737 if (hwif
->host_flags
& IDE_HFLAG_SET_PIO_MODE_KEEP_DMA
) {
745 if (drive
->media
== ide_disk
)
746 return ide_disk_special(drive
);
754 void ide_map_sg(ide_drive_t
*drive
, struct request
*rq
)
756 ide_hwif_t
*hwif
= drive
->hwif
;
757 struct scatterlist
*sg
= hwif
->sg_table
;
759 if (hwif
->sg_mapped
) /* needed by ide-scsi */
762 if (rq
->cmd_type
!= REQ_TYPE_ATA_TASKFILE
) {
763 hwif
->sg_nents
= blk_rq_map_sg(drive
->queue
, rq
, sg
);
765 sg_init_one(sg
, rq
->buffer
, rq
->nr_sectors
* SECTOR_SIZE
);
770 EXPORT_SYMBOL_GPL(ide_map_sg
);
772 void ide_init_sg_cmd(ide_drive_t
*drive
, struct request
*rq
)
774 ide_hwif_t
*hwif
= drive
->hwif
;
776 hwif
->nsect
= hwif
->nleft
= rq
->nr_sectors
;
781 EXPORT_SYMBOL_GPL(ide_init_sg_cmd
);
784 * execute_drive_command - issue special drive command
785 * @drive: the drive to issue the command on
786 * @rq: the request structure holding the command
788 * execute_drive_cmd() issues a special drive command, usually
789 * initiated by ioctl() from the external hdparm program. The
790 * command can be a drive command, drive task or taskfile
791 * operation. Weirdly you can call it with NULL to wait for
792 * all commands to finish. Don't do this as that is due to change
795 static ide_startstop_t
execute_drive_cmd (ide_drive_t
*drive
,
798 ide_hwif_t
*hwif
= HWIF(drive
);
799 ide_task_t
*task
= rq
->special
;
802 hwif
->data_phase
= task
->data_phase
;
804 switch (hwif
->data_phase
) {
805 case TASKFILE_MULTI_OUT
:
807 case TASKFILE_MULTI_IN
:
809 ide_init_sg_cmd(drive
, rq
);
810 ide_map_sg(drive
, rq
);
815 return do_rw_taskfile(drive
, task
);
819 * NULL is actually a valid way of waiting for
820 * all current requests to be flushed from the queue.
823 printk("%s: DRIVE_CMD (null)\n", drive
->name
);
825 ide_end_drive_cmd(drive
,
826 hwif
->INB(IDE_STATUS_REG
),
827 hwif
->INB(IDE_ERROR_REG
));
831 static void ide_check_pm_state(ide_drive_t
*drive
, struct request
*rq
)
833 struct request_pm_state
*pm
= rq
->data
;
835 if (blk_pm_suspend_request(rq
) &&
836 pm
->pm_step
== ide_pm_state_start_suspend
)
837 /* Mark drive blocked when starting the suspend sequence. */
839 else if (blk_pm_resume_request(rq
) &&
840 pm
->pm_step
== ide_pm_state_start_resume
) {
842 * The first thing we do on wakeup is to wait for BSY bit to
843 * go away (with a looong timeout) as a drive on this hwif may
844 * just be POSTing itself.
845 * We do that before even selecting as the "other" device on
846 * the bus may be broken enough to walk on our toes at this
851 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive
->name
);
853 rc
= ide_wait_not_busy(HWIF(drive
), 35000);
855 printk(KERN_WARNING
"%s: bus not ready on wakeup\n", drive
->name
);
857 ide_set_irq(drive
, 1);
858 rc
= ide_wait_not_busy(HWIF(drive
), 100000);
860 printk(KERN_WARNING
"%s: drive not ready on wakeup\n", drive
->name
);
865 * start_request - start of I/O and command issuing for IDE
867 * start_request() initiates handling of a new I/O request. It
868 * accepts commands and I/O (read/write) requests. It also does
869 * the final remapping for weird stuff like EZDrive. Once
870 * device mapper can work sector level the EZDrive stuff can go away
872 * FIXME: this function needs a rename
875 static ide_startstop_t
start_request (ide_drive_t
*drive
, struct request
*rq
)
877 ide_startstop_t startstop
;
880 BUG_ON(!blk_rq_started(rq
));
883 printk("%s: start_request: current=0x%08lx\n",
884 HWIF(drive
)->name
, (unsigned long) rq
);
887 /* bail early if we've exceeded max_failures */
888 if (drive
->max_failures
&& (drive
->failures
> drive
->max_failures
)) {
889 rq
->cmd_flags
|= REQ_FAILED
;
894 if (blk_fs_request(rq
) &&
895 (drive
->media
== ide_disk
|| drive
->media
== ide_floppy
)) {
896 block
+= drive
->sect0
;
898 /* Yecch - this will shift the entire interval,
899 possibly killing some innocent following sector */
900 if (block
== 0 && drive
->remap_0_to_1
== 1)
901 block
= 1; /* redirect MBR access to EZ-Drive partn table */
903 if (blk_pm_request(rq
))
904 ide_check_pm_state(drive
, rq
);
907 if (ide_wait_stat(&startstop
, drive
, drive
->ready_stat
, BUSY_STAT
|DRQ_STAT
, WAIT_READY
)) {
908 printk(KERN_ERR
"%s: drive not ready for command\n", drive
->name
);
911 if (!drive
->special
.all
) {
915 * We reset the drive so we need to issue a SETFEATURES.
916 * Do it _after_ do_special() restored device parameters.
918 if (drive
->current_speed
== 0xff)
919 ide_config_drive_speed(drive
, drive
->desired_speed
);
921 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
)
922 return execute_drive_cmd(drive
, rq
);
923 else if (blk_pm_request(rq
)) {
924 struct request_pm_state
*pm
= rq
->data
;
926 printk("%s: start_power_step(step: %d)\n",
927 drive
->name
, rq
->pm
->pm_step
);
929 startstop
= ide_start_power_step(drive
, rq
);
930 if (startstop
== ide_stopped
&&
931 pm
->pm_step
== ide_pm_state_completed
)
932 ide_complete_pm_request(drive
, rq
);
936 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
937 return drv
->do_request(drive
, rq
, block
);
939 return do_special(drive
);
941 ide_kill_rq(drive
, rq
);
946 * ide_stall_queue - pause an IDE device
947 * @drive: drive to stall
948 * @timeout: time to stall for (jiffies)
950 * ide_stall_queue() can be used by a drive to give excess bandwidth back
951 * to the hwgroup by sleeping for timeout jiffies.
954 void ide_stall_queue (ide_drive_t
*drive
, unsigned long timeout
)
956 if (timeout
> WAIT_WORSTCASE
)
957 timeout
= WAIT_WORSTCASE
;
958 drive
->sleep
= timeout
+ jiffies
;
962 EXPORT_SYMBOL(ide_stall_queue
);
964 #define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time)
967 * choose_drive - select a drive to service
968 * @hwgroup: hardware group to select on
970 * choose_drive() selects the next drive which will be serviced.
971 * This is necessary because the IDE layer can't issue commands
972 * to both drives on the same cable, unlike SCSI.
975 static inline ide_drive_t
*choose_drive (ide_hwgroup_t
*hwgroup
)
977 ide_drive_t
*drive
, *best
;
981 drive
= hwgroup
->drive
;
984 * drive is doing pre-flush, ordered write, post-flush sequence. even
985 * though that is 3 requests, it must be seen as a single transaction.
986 * we must not preempt this drive until that is complete
988 if (blk_queue_flushing(drive
->queue
)) {
990 * small race where queue could get replugged during
991 * the 3-request flush cycle, just yank the plug since
992 * we want it to finish asap
994 blk_remove_plug(drive
->queue
);
999 if ((!drive
->sleeping
|| time_after_eq(jiffies
, drive
->sleep
))
1000 && !elv_queue_empty(drive
->queue
)) {
1002 || (drive
->sleeping
&& (!best
->sleeping
|| time_before(drive
->sleep
, best
->sleep
)))
1003 || (!best
->sleeping
&& time_before(WAKEUP(drive
), WAKEUP(best
))))
1005 if (!blk_queue_plugged(drive
->queue
))
1009 } while ((drive
= drive
->next
) != hwgroup
->drive
);
1010 if (best
&& best
->nice1
&& !best
->sleeping
&& best
!= hwgroup
->drive
&& best
->service_time
> WAIT_MIN_SLEEP
) {
1011 long t
= (signed long)(WAKEUP(best
) - jiffies
);
1012 if (t
>= WAIT_MIN_SLEEP
) {
1014 * We *may* have some time to spare, but first let's see if
1015 * someone can potentially benefit from our nice mood today..
1019 if (!drive
->sleeping
1020 && time_before(jiffies
- best
->service_time
, WAKEUP(drive
))
1021 && time_before(WAKEUP(drive
), jiffies
+ t
))
1023 ide_stall_queue(best
, min_t(long, t
, 10 * WAIT_MIN_SLEEP
));
1026 } while ((drive
= drive
->next
) != best
);
1033 * Issue a new request to a drive from hwgroup
1034 * Caller must have already done spin_lock_irqsave(&ide_lock, ..);
1036 * A hwgroup is a serialized group of IDE interfaces. Usually there is
1037 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
1038 * may have both interfaces in a single hwgroup to "serialize" access.
1039 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped
1040 * together into one hwgroup for serialized access.
1042 * Note also that several hwgroups can end up sharing a single IRQ,
1043 * possibly along with many other devices. This is especially common in
1044 * PCI-based systems with off-board IDE controller cards.
1046 * The IDE driver uses the single global ide_lock spinlock to protect
1047 * access to the request queues, and to protect the hwgroup->busy flag.
1049 * The first thread into the driver for a particular hwgroup sets the
1050 * hwgroup->busy flag to indicate that this hwgroup is now active,
1051 * and then initiates processing of the top request from the request queue.
1053 * Other threads attempting entry notice the busy setting, and will simply
1054 * queue their new requests and exit immediately. Note that hwgroup->busy
1055 * remains set even when the driver is merely awaiting the next interrupt.
1056 * Thus, the meaning is "this hwgroup is busy processing a request".
1058 * When processing of a request completes, the completing thread or IRQ-handler
1059 * will start the next request from the queue. If no more work remains,
1060 * the driver will clear the hwgroup->busy flag and exit.
1062 * The ide_lock (spinlock) is used to protect all access to the
1063 * hwgroup->busy flag, but is otherwise not needed for most processing in
1064 * the driver. This makes the driver much more friendlier to shared IRQs
1065 * than previous designs, while remaining 100% (?) SMP safe and capable.
1067 static void ide_do_request (ide_hwgroup_t
*hwgroup
, int masked_irq
)
1072 ide_startstop_t startstop
;
1075 /* for atari only: POSSIBLY BROKEN HERE(?) */
1076 ide_get_lock(ide_intr
, hwgroup
);
1078 /* caller must own ide_lock */
1079 BUG_ON(!irqs_disabled());
1081 while (!hwgroup
->busy
) {
1083 drive
= choose_drive(hwgroup
);
1084 if (drive
== NULL
) {
1086 unsigned long sleep
= 0; /* shut up, gcc */
1088 drive
= hwgroup
->drive
;
1090 if (drive
->sleeping
&& (!sleeping
|| time_before(drive
->sleep
, sleep
))) {
1092 sleep
= drive
->sleep
;
1094 } while ((drive
= drive
->next
) != hwgroup
->drive
);
1097 * Take a short snooze, and then wake up this hwgroup again.
1098 * This gives other hwgroups on the same a chance to
1099 * play fairly with us, just in case there are big differences
1100 * in relative throughputs.. don't want to hog the cpu too much.
1102 if (time_before(sleep
, jiffies
+ WAIT_MIN_SLEEP
))
1103 sleep
= jiffies
+ WAIT_MIN_SLEEP
;
1105 if (timer_pending(&hwgroup
->timer
))
1106 printk(KERN_CRIT
"ide_set_handler: timer already active\n");
1108 /* so that ide_timer_expiry knows what to do */
1109 hwgroup
->sleeping
= 1;
1110 hwgroup
->req_gen_timer
= hwgroup
->req_gen
;
1111 mod_timer(&hwgroup
->timer
, sleep
);
1112 /* we purposely leave hwgroup->busy==1
1115 /* Ugly, but how can we sleep for the lock
1116 * otherwise? perhaps from tq_disk?
1119 /* for atari only */
1124 /* no more work for this hwgroup (for now) */
1129 if (hwgroup
->hwif
->sharing_irq
&& hwif
!= hwgroup
->hwif
) {
1131 * set nIEN for previous hwif, drives in the
1132 * quirk_list may not like intr setups/cleanups
1134 if (drive
->quirk_list
!= 1)
1135 ide_set_irq(drive
, 0);
1137 hwgroup
->hwif
= hwif
;
1138 hwgroup
->drive
= drive
;
1139 drive
->sleeping
= 0;
1140 drive
->service_start
= jiffies
;
1142 if (blk_queue_plugged(drive
->queue
)) {
1143 printk(KERN_ERR
"ide: huh? queue was plugged!\n");
1148 * we know that the queue isn't empty, but this can happen
1149 * if the q->prep_rq_fn() decides to kill a request
1151 rq
= elv_next_request(drive
->queue
);
1158 * Sanity: don't accept a request that isn't a PM request
1159 * if we are currently power managed. This is very important as
1160 * blk_stop_queue() doesn't prevent the elv_next_request()
1161 * above to return us whatever is in the queue. Since we call
1162 * ide_do_request() ourselves, we end up taking requests while
1163 * the queue is blocked...
1165 * We let requests forced at head of queue with ide-preempt
1166 * though. I hope that doesn't happen too much, hopefully not
1167 * unless the subdriver triggers such a thing in its own PM
1170 * We count how many times we loop here to make sure we service
1171 * all drives in the hwgroup without looping for ever
1173 if (drive
->blocked
&& !blk_pm_request(rq
) && !(rq
->cmd_flags
& REQ_PREEMPT
)) {
1174 drive
= drive
->next
? drive
->next
: hwgroup
->drive
;
1175 if (loops
++ < 4 && !blk_queue_plugged(drive
->queue
))
1177 /* We clear busy, there should be no pending ATA command at this point. */
1185 * Some systems have trouble with IDE IRQs arriving while
1186 * the driver is still setting things up. So, here we disable
1187 * the IRQ used by this interface while the request is being started.
1188 * This may look bad at first, but pretty much the same thing
1189 * happens anyway when any interrupt comes in, IDE or otherwise
1190 * -- the kernel masks the IRQ while it is being handled.
1192 if (masked_irq
!= IDE_NO_IRQ
&& hwif
->irq
!= masked_irq
)
1193 disable_irq_nosync(hwif
->irq
);
1194 spin_unlock(&ide_lock
);
1195 local_irq_enable_in_hardirq();
1196 /* allow other IRQs while we start this request */
1197 startstop
= start_request(drive
, rq
);
1198 spin_lock_irq(&ide_lock
);
1199 if (masked_irq
!= IDE_NO_IRQ
&& hwif
->irq
!= masked_irq
)
1200 enable_irq(hwif
->irq
);
1201 if (startstop
== ide_stopped
)
1207 * Passes the stuff to ide_do_request
1209 void do_ide_request(struct request_queue
*q
)
1211 ide_drive_t
*drive
= q
->queuedata
;
1213 ide_do_request(HWGROUP(drive
), IDE_NO_IRQ
);
1217 * un-busy the hwgroup etc, and clear any pending DMA status. we want to
1218 * retry the current request in pio mode instead of risking tossing it
1221 static ide_startstop_t
ide_dma_timeout_retry(ide_drive_t
*drive
, int error
)
1223 ide_hwif_t
*hwif
= HWIF(drive
);
1225 ide_startstop_t ret
= ide_stopped
;
1228 * end current dma transaction
1232 printk(KERN_WARNING
"%s: DMA timeout error\n", drive
->name
);
1233 (void)HWIF(drive
)->ide_dma_end(drive
);
1234 ret
= ide_error(drive
, "dma timeout error",
1235 hwif
->INB(IDE_STATUS_REG
));
1237 printk(KERN_WARNING
"%s: DMA timeout retry\n", drive
->name
);
1238 hwif
->dma_timeout(drive
);
1242 * disable dma for now, but remember that we did so because of
1243 * a timeout -- we'll reenable after we finish this next request
1244 * (or rather the first chunk of it) in pio.
1247 drive
->state
= DMA_PIO_RETRY
;
1248 ide_dma_off_quietly(drive
);
1251 * un-busy drive etc (hwgroup->busy is cleared on return) and
1252 * make sure request is sane
1254 rq
= HWGROUP(drive
)->rq
;
1259 HWGROUP(drive
)->rq
= NULL
;
1266 rq
->sector
= rq
->bio
->bi_sector
;
1267 rq
->current_nr_sectors
= bio_iovec(rq
->bio
)->bv_len
>> 9;
1268 rq
->hard_cur_sectors
= rq
->current_nr_sectors
;
1269 rq
->buffer
= bio_data(rq
->bio
);
1275 * ide_timer_expiry - handle lack of an IDE interrupt
1276 * @data: timer callback magic (hwgroup)
1278 * An IDE command has timed out before the expected drive return
1279 * occurred. At this point we attempt to clean up the current
1280 * mess. If the current handler includes an expiry handler then
1281 * we invoke the expiry handler, and providing it is happy the
1282 * work is done. If that fails we apply generic recovery rules
1283 * invoking the handler and checking the drive DMA status. We
1284 * have an excessively incestuous relationship with the DMA
1285 * logic that wants cleaning up.
1288 void ide_timer_expiry (unsigned long data
)
1290 ide_hwgroup_t
*hwgroup
= (ide_hwgroup_t
*) data
;
1291 ide_handler_t
*handler
;
1292 ide_expiry_t
*expiry
;
1293 unsigned long flags
;
1294 unsigned long wait
= -1;
1296 spin_lock_irqsave(&ide_lock
, flags
);
1298 if (((handler
= hwgroup
->handler
) == NULL
) ||
1299 (hwgroup
->req_gen
!= hwgroup
->req_gen_timer
)) {
1301 * Either a marginal timeout occurred
1302 * (got the interrupt just as timer expired),
1303 * or we were "sleeping" to give other devices a chance.
1304 * Either way, we don't really want to complain about anything.
1306 if (hwgroup
->sleeping
) {
1307 hwgroup
->sleeping
= 0;
1311 ide_drive_t
*drive
= hwgroup
->drive
;
1313 printk(KERN_ERR
"ide_timer_expiry: hwgroup->drive was NULL\n");
1314 hwgroup
->handler
= NULL
;
1317 ide_startstop_t startstop
= ide_stopped
;
1318 if (!hwgroup
->busy
) {
1319 hwgroup
->busy
= 1; /* paranoia */
1320 printk(KERN_ERR
"%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive
->name
);
1322 if ((expiry
= hwgroup
->expiry
) != NULL
) {
1324 if ((wait
= expiry(drive
)) > 0) {
1326 hwgroup
->timer
.expires
= jiffies
+ wait
;
1327 hwgroup
->req_gen_timer
= hwgroup
->req_gen
;
1328 add_timer(&hwgroup
->timer
);
1329 spin_unlock_irqrestore(&ide_lock
, flags
);
1333 hwgroup
->handler
= NULL
;
1335 * We need to simulate a real interrupt when invoking
1336 * the handler() function, which means we need to
1337 * globally mask the specific IRQ:
1339 spin_unlock(&ide_lock
);
1341 /* disable_irq_nosync ?? */
1342 disable_irq(hwif
->irq
);
1344 * as if we were handling an interrupt */
1345 local_irq_disable();
1346 if (hwgroup
->polling
) {
1347 startstop
= handler(drive
);
1348 } else if (drive_is_ready(drive
)) {
1349 if (drive
->waiting_for_dma
)
1350 hwgroup
->hwif
->dma_lost_irq(drive
);
1351 (void)ide_ack_intr(hwif
);
1352 printk(KERN_WARNING
"%s: lost interrupt\n", drive
->name
);
1353 startstop
= handler(drive
);
1355 if (drive
->waiting_for_dma
) {
1356 startstop
= ide_dma_timeout_retry(drive
, wait
);
1359 ide_error(drive
, "irq timeout", hwif
->INB(IDE_STATUS_REG
));
1361 drive
->service_time
= jiffies
- drive
->service_start
;
1362 spin_lock_irq(&ide_lock
);
1363 enable_irq(hwif
->irq
);
1364 if (startstop
== ide_stopped
)
1368 ide_do_request(hwgroup
, IDE_NO_IRQ
);
1369 spin_unlock_irqrestore(&ide_lock
, flags
);
1373 * unexpected_intr - handle an unexpected IDE interrupt
1374 * @irq: interrupt line
1375 * @hwgroup: hwgroup being processed
1377 * There's nothing really useful we can do with an unexpected interrupt,
1378 * other than reading the status register (to clear it), and logging it.
1379 * There should be no way that an irq can happen before we're ready for it,
1380 * so we needn't worry much about losing an "important" interrupt here.
1382 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
1383 * the drive enters "idle", "standby", or "sleep" mode, so if the status
1384 * looks "good", we just ignore the interrupt completely.
1386 * This routine assumes __cli() is in effect when called.
1388 * If an unexpected interrupt happens on irq15 while we are handling irq14
1389 * and if the two interfaces are "serialized" (CMD640), then it looks like
1390 * we could screw up by interfering with a new request being set up for
1393 * In reality, this is a non-issue. The new command is not sent unless
1394 * the drive is ready to accept one, in which case we know the drive is
1395 * not trying to interrupt us. And ide_set_handler() is always invoked
1396 * before completing the issuance of any new drive command, so we will not
1397 * be accidentally invoked as a result of any valid command completion
1400 * Note that we must walk the entire hwgroup here. We know which hwif
1401 * is doing the current command, but we don't know which hwif burped
1405 static void unexpected_intr (int irq
, ide_hwgroup_t
*hwgroup
)
1408 ide_hwif_t
*hwif
= hwgroup
->hwif
;
1411 * handle the unexpected interrupt
1414 if (hwif
->irq
== irq
) {
1415 stat
= hwif
->INB(hwif
->io_ports
[IDE_STATUS_OFFSET
]);
1416 if (!OK_STAT(stat
, READY_STAT
, BAD_STAT
)) {
1417 /* Try to not flood the console with msgs */
1418 static unsigned long last_msgtime
, count
;
1420 if (time_after(jiffies
, last_msgtime
+ HZ
)) {
1421 last_msgtime
= jiffies
;
1422 printk(KERN_ERR
"%s%s: unexpected interrupt, "
1423 "status=0x%02x, count=%ld\n",
1425 (hwif
->next
==hwgroup
->hwif
) ? "" : "(?)", stat
, count
);
1429 } while ((hwif
= hwif
->next
) != hwgroup
->hwif
);
1433 * ide_intr - default IDE interrupt handler
1434 * @irq: interrupt number
1435 * @dev_id: hwif group
1436 * @regs: unused weirdness from the kernel irq layer
1438 * This is the default IRQ handler for the IDE layer. You should
1439 * not need to override it. If you do be aware it is subtle in
1442 * hwgroup->hwif is the interface in the group currently performing
1443 * a command. hwgroup->drive is the drive and hwgroup->handler is
1444 * the IRQ handler to call. As we issue a command the handlers
1445 * step through multiple states, reassigning the handler to the
1446 * next step in the process. Unlike a smart SCSI controller IDE
1447 * expects the main processor to sequence the various transfer
1448 * stages. We also manage a poll timer to catch up with most
1449 * timeout situations. There are still a few where the handlers
1450 * don't ever decide to give up.
1452 * The handler eventually returns ide_stopped to indicate the
1453 * request completed. At this point we issue the next request
1454 * on the hwgroup and the process begins again.
1457 irqreturn_t
ide_intr (int irq
, void *dev_id
)
1459 unsigned long flags
;
1460 ide_hwgroup_t
*hwgroup
= (ide_hwgroup_t
*)dev_id
;
1463 ide_handler_t
*handler
;
1464 ide_startstop_t startstop
;
1466 spin_lock_irqsave(&ide_lock
, flags
);
1467 hwif
= hwgroup
->hwif
;
1469 if (!ide_ack_intr(hwif
)) {
1470 spin_unlock_irqrestore(&ide_lock
, flags
);
1474 if ((handler
= hwgroup
->handler
) == NULL
|| hwgroup
->polling
) {
1476 * Not expecting an interrupt from this drive.
1477 * That means this could be:
1478 * (1) an interrupt from another PCI device
1479 * sharing the same PCI INT# as us.
1480 * or (2) a drive just entered sleep or standby mode,
1481 * and is interrupting to let us know.
1482 * or (3) a spurious interrupt of unknown origin.
1484 * For PCI, we cannot tell the difference,
1485 * so in that case we just ignore it and hope it goes away.
1487 * FIXME: unexpected_intr should be hwif-> then we can
1488 * remove all the ifdef PCI crap
1490 #ifdef CONFIG_BLK_DEV_IDEPCI
1491 if (hwif
->pci_dev
&& !hwif
->pci_dev
->vendor
)
1492 #endif /* CONFIG_BLK_DEV_IDEPCI */
1495 * Probably not a shared PCI interrupt,
1496 * so we can safely try to do something about it:
1498 unexpected_intr(irq
, hwgroup
);
1499 #ifdef CONFIG_BLK_DEV_IDEPCI
1502 * Whack the status register, just in case
1503 * we have a leftover pending IRQ.
1505 (void) hwif
->INB(hwif
->io_ports
[IDE_STATUS_OFFSET
]);
1506 #endif /* CONFIG_BLK_DEV_IDEPCI */
1508 spin_unlock_irqrestore(&ide_lock
, flags
);
1511 drive
= hwgroup
->drive
;
1514 * This should NEVER happen, and there isn't much
1515 * we could do about it here.
1517 * [Note - this can occur if the drive is hot unplugged]
1519 spin_unlock_irqrestore(&ide_lock
, flags
);
1522 if (!drive_is_ready(drive
)) {
1524 * This happens regularly when we share a PCI IRQ with
1525 * another device. Unfortunately, it can also happen
1526 * with some buggy drives that trigger the IRQ before
1527 * their status register is up to date. Hopefully we have
1528 * enough advance overhead that the latter isn't a problem.
1530 spin_unlock_irqrestore(&ide_lock
, flags
);
1533 if (!hwgroup
->busy
) {
1534 hwgroup
->busy
= 1; /* paranoia */
1535 printk(KERN_ERR
"%s: ide_intr: hwgroup->busy was 0 ??\n", drive
->name
);
1537 hwgroup
->handler
= NULL
;
1539 del_timer(&hwgroup
->timer
);
1540 spin_unlock(&ide_lock
);
1542 /* Some controllers might set DMA INTR no matter DMA or PIO;
1543 * bmdma status might need to be cleared even for
1544 * PIO interrupts to prevent spurious/lost irq.
1546 if (hwif
->ide_dma_clear_irq
&& !(drive
->waiting_for_dma
))
1547 /* ide_dma_end() needs bmdma status for error checking.
1548 * So, skip clearing bmdma status here and leave it
1549 * to ide_dma_end() if this is dma interrupt.
1551 hwif
->ide_dma_clear_irq(drive
);
1554 local_irq_enable_in_hardirq();
1555 /* service this interrupt, may set handler for next interrupt */
1556 startstop
= handler(drive
);
1557 spin_lock_irq(&ide_lock
);
1560 * Note that handler() may have set things up for another
1561 * interrupt to occur soon, but it cannot happen until
1562 * we exit from this routine, because it will be the
1563 * same irq as is currently being serviced here, and Linux
1564 * won't allow another of the same (on any CPU) until we return.
1566 drive
->service_time
= jiffies
- drive
->service_start
;
1567 if (startstop
== ide_stopped
) {
1568 if (hwgroup
->handler
== NULL
) { /* paranoia */
1570 ide_do_request(hwgroup
, hwif
->irq
);
1572 printk(KERN_ERR
"%s: ide_intr: huh? expected NULL handler "
1573 "on exit\n", drive
->name
);
1576 spin_unlock_irqrestore(&ide_lock
, flags
);
1581 * ide_init_drive_cmd - initialize a drive command request
1582 * @rq: request object
1584 * Initialize a request before we fill it in and send it down to
1585 * ide_do_drive_cmd. Commands must be set up by this function. Right
1586 * now it doesn't do a lot, but if that changes abusers will have a
1590 void ide_init_drive_cmd (struct request
*rq
)
1592 memset(rq
, 0, sizeof(*rq
));
1596 EXPORT_SYMBOL(ide_init_drive_cmd
);
1599 * ide_do_drive_cmd - issue IDE special command
1600 * @drive: device to issue command
1601 * @rq: request to issue
1602 * @action: action for processing
1604 * This function issues a special IDE device request
1605 * onto the request queue.
1607 * If action is ide_wait, then the rq is queued at the end of the
1608 * request queue, and the function sleeps until it has been processed.
1609 * This is for use when invoked from an ioctl handler.
1611 * If action is ide_preempt, then the rq is queued at the head of
1612 * the request queue, displacing the currently-being-processed
1613 * request and this function returns immediately without waiting
1614 * for the new rq to be completed. This is VERY DANGEROUS, and is
1615 * intended for careful use by the ATAPI tape/cdrom driver code.
1617 * If action is ide_end, then the rq is queued at the end of the
1618 * request queue, and the function returns immediately without waiting
1619 * for the new rq to be completed. This is again intended for careful
1620 * use by the ATAPI tape/cdrom driver code.
1623 int ide_do_drive_cmd (ide_drive_t
*drive
, struct request
*rq
, ide_action_t action
)
1625 unsigned long flags
;
1626 ide_hwgroup_t
*hwgroup
= HWGROUP(drive
);
1627 DECLARE_COMPLETION_ONSTACK(wait
);
1628 int where
= ELEVATOR_INSERT_BACK
, err
;
1629 int must_wait
= (action
== ide_wait
|| action
== ide_head_wait
);
1634 * we need to hold an extra reference to request for safe inspection
1639 rq
->end_io_data
= &wait
;
1640 rq
->end_io
= blk_end_sync_rq
;
1643 spin_lock_irqsave(&ide_lock
, flags
);
1644 if (action
== ide_preempt
)
1646 if (action
== ide_preempt
|| action
== ide_head_wait
) {
1647 where
= ELEVATOR_INSERT_FRONT
;
1648 rq
->cmd_flags
|= REQ_PREEMPT
;
1650 __elv_add_request(drive
->queue
, rq
, where
, 0);
1651 ide_do_request(hwgroup
, IDE_NO_IRQ
);
1652 spin_unlock_irqrestore(&ide_lock
, flags
);
1656 wait_for_completion(&wait
);
1660 blk_put_request(rq
);
1666 EXPORT_SYMBOL(ide_do_drive_cmd
);
1668 void ide_pktcmd_tf_load(ide_drive_t
*drive
, u32 tf_flags
, u16 bcount
, u8 dma
)
1672 memset(&task
, 0, sizeof(task
));
1673 task
.tf_flags
= IDE_TFLAG_OUT_LBAH
| IDE_TFLAG_OUT_LBAM
|
1674 IDE_TFLAG_OUT_FEATURE
| tf_flags
;
1675 task
.tf
.feature
= dma
; /* Use PIO/DMA */
1676 task
.tf
.lbam
= bcount
& 0xff;
1677 task
.tf
.lbah
= (bcount
>> 8) & 0xff;
1679 ide_tf_load(drive
, &task
);
1682 EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load
);