2 * linux/drivers/ide/ide-iops.c Version 0.37 Mar 05, 2003
4 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
5 * Copyright (C) 2003 Red Hat <alan@redhat.com>
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
16 #include <linux/interrupt.h>
17 #include <linux/major.h>
18 #include <linux/errno.h>
19 #include <linux/genhd.h>
20 #include <linux/blkpg.h>
21 #include <linux/slab.h>
22 #include <linux/pci.h>
23 #include <linux/delay.h>
24 #include <linux/hdreg.h>
25 #include <linux/ide.h>
27 #include <asm/byteorder.h>
29 #include <asm/uaccess.h>
31 #include <asm/bitops.h>
34 * Conventional PIO operations for ATA devices
37 static u8
ide_inb (unsigned long port
)
39 return (u8
) inb(port
);
42 static u16
ide_inw (unsigned long port
)
44 return (u16
) inw(port
);
47 static void ide_insw (unsigned long port
, void *addr
, u32 count
)
49 insw(port
, addr
, count
);
52 static u32
ide_inl (unsigned long port
)
54 return (u32
) inl(port
);
57 static void ide_insl (unsigned long port
, void *addr
, u32 count
)
59 insl(port
, addr
, count
);
62 static void ide_outb (u8 val
, unsigned long port
)
67 static void ide_outbsync (ide_drive_t
*drive
, u8 addr
, unsigned long port
)
72 static void ide_outw (u16 val
, unsigned long port
)
77 static void ide_outsw (unsigned long port
, void *addr
, u32 count
)
79 outsw(port
, addr
, count
);
82 static void ide_outl (u32 val
, unsigned long port
)
87 static void ide_outsl (unsigned long port
, void *addr
, u32 count
)
89 outsl(port
, addr
, count
);
92 void default_hwif_iops (ide_hwif_t
*hwif
)
94 hwif
->OUTB
= ide_outb
;
95 hwif
->OUTBSYNC
= ide_outbsync
;
96 hwif
->OUTW
= ide_outw
;
97 hwif
->OUTL
= ide_outl
;
98 hwif
->OUTSW
= ide_outsw
;
99 hwif
->OUTSL
= ide_outsl
;
103 hwif
->INSW
= ide_insw
;
104 hwif
->INSL
= ide_insl
;
107 EXPORT_SYMBOL(default_hwif_iops
);
110 * MMIO operations, typically used for SATA controllers
113 static u8
ide_mm_inb (unsigned long port
)
115 return (u8
) readb((void __iomem
*) port
);
118 static u16
ide_mm_inw (unsigned long port
)
120 return (u16
) readw((void __iomem
*) port
);
123 static void ide_mm_insw (unsigned long port
, void *addr
, u32 count
)
125 __ide_mm_insw((void __iomem
*) port
, addr
, count
);
128 static u32
ide_mm_inl (unsigned long port
)
130 return (u32
) readl((void __iomem
*) port
);
133 static void ide_mm_insl (unsigned long port
, void *addr
, u32 count
)
135 __ide_mm_insl((void __iomem
*) port
, addr
, count
);
138 static void ide_mm_outb (u8 value
, unsigned long port
)
140 writeb(value
, (void __iomem
*) port
);
143 static void ide_mm_outbsync (ide_drive_t
*drive
, u8 value
, unsigned long port
)
145 writeb(value
, (void __iomem
*) port
);
148 static void ide_mm_outw (u16 value
, unsigned long port
)
150 writew(value
, (void __iomem
*) port
);
153 static void ide_mm_outsw (unsigned long port
, void *addr
, u32 count
)
155 __ide_mm_outsw((void __iomem
*) port
, addr
, count
);
158 static void ide_mm_outl (u32 value
, unsigned long port
)
160 writel(value
, (void __iomem
*) port
);
163 static void ide_mm_outsl (unsigned long port
, void *addr
, u32 count
)
165 __ide_mm_outsl((void __iomem
*) port
, addr
, count
);
168 void default_hwif_mmiops (ide_hwif_t
*hwif
)
170 hwif
->OUTB
= ide_mm_outb
;
171 /* Most systems will need to override OUTBSYNC, alas however
172 this one is controller specific! */
173 hwif
->OUTBSYNC
= ide_mm_outbsync
;
174 hwif
->OUTW
= ide_mm_outw
;
175 hwif
->OUTL
= ide_mm_outl
;
176 hwif
->OUTSW
= ide_mm_outsw
;
177 hwif
->OUTSL
= ide_mm_outsl
;
178 hwif
->INB
= ide_mm_inb
;
179 hwif
->INW
= ide_mm_inw
;
180 hwif
->INL
= ide_mm_inl
;
181 hwif
->INSW
= ide_mm_insw
;
182 hwif
->INSL
= ide_mm_insl
;
185 EXPORT_SYMBOL(default_hwif_mmiops
);
187 void default_hwif_transport (ide_hwif_t
*hwif
)
189 hwif
->ata_input_data
= ata_input_data
;
190 hwif
->ata_output_data
= ata_output_data
;
191 hwif
->atapi_input_bytes
= atapi_input_bytes
;
192 hwif
->atapi_output_bytes
= atapi_output_bytes
;
195 EXPORT_SYMBOL(default_hwif_transport
);
197 u32
ide_read_24 (ide_drive_t
*drive
)
199 u8 hcyl
= HWIF(drive
)->INB(IDE_HCYL_REG
);
200 u8 lcyl
= HWIF(drive
)->INB(IDE_LCYL_REG
);
201 u8 sect
= HWIF(drive
)->INB(IDE_SECTOR_REG
);
202 return (hcyl
<<16)|(lcyl
<<8)|sect
;
205 EXPORT_SYMBOL(ide_read_24
);
207 void SELECT_DRIVE (ide_drive_t
*drive
)
209 if (HWIF(drive
)->selectproc
)
210 HWIF(drive
)->selectproc(drive
);
211 HWIF(drive
)->OUTB(drive
->select
.all
, IDE_SELECT_REG
);
214 EXPORT_SYMBOL(SELECT_DRIVE
);
216 void SELECT_INTERRUPT (ide_drive_t
*drive
)
218 if (HWIF(drive
)->intrproc
)
219 HWIF(drive
)->intrproc(drive
);
221 HWIF(drive
)->OUTB(drive
->ctl
|2, IDE_CONTROL_REG
);
224 EXPORT_SYMBOL(SELECT_INTERRUPT
);
226 void SELECT_MASK (ide_drive_t
*drive
, int mask
)
228 if (HWIF(drive
)->maskproc
)
229 HWIF(drive
)->maskproc(drive
, mask
);
232 EXPORT_SYMBOL(SELECT_MASK
);
234 void QUIRK_LIST (ide_drive_t
*drive
)
236 if (HWIF(drive
)->quirkproc
)
237 drive
->quirk_list
= HWIF(drive
)->quirkproc(drive
);
240 EXPORT_SYMBOL(QUIRK_LIST
);
243 * Some localbus EIDE interfaces require a special access sequence
244 * when using 32-bit I/O instructions to transfer data. We call this
245 * the "vlb_sync" sequence, which consists of three successive reads
246 * of the sector count register location, with interrupts disabled
247 * to ensure that the reads all happen together.
249 void ata_vlb_sync (ide_drive_t
*drive
, unsigned long port
)
251 (void) HWIF(drive
)->INB(port
);
252 (void) HWIF(drive
)->INB(port
);
253 (void) HWIF(drive
)->INB(port
);
256 EXPORT_SYMBOL(ata_vlb_sync
);
259 * This is used for most PIO data transfers *from* the IDE interface
261 void ata_input_data (ide_drive_t
*drive
, void *buffer
, u32 wcount
)
263 ide_hwif_t
*hwif
= HWIF(drive
);
264 u8 io_32bit
= drive
->io_32bit
;
269 local_irq_save(flags
);
270 ata_vlb_sync(drive
, IDE_NSECTOR_REG
);
271 hwif
->INSL(IDE_DATA_REG
, buffer
, wcount
);
272 local_irq_restore(flags
);
274 hwif
->INSL(IDE_DATA_REG
, buffer
, wcount
);
276 hwif
->INSW(IDE_DATA_REG
, buffer
, wcount
<<1);
280 EXPORT_SYMBOL(ata_input_data
);
283 * This is used for most PIO data transfers *to* the IDE interface
285 void ata_output_data (ide_drive_t
*drive
, void *buffer
, u32 wcount
)
287 ide_hwif_t
*hwif
= HWIF(drive
);
288 u8 io_32bit
= drive
->io_32bit
;
293 local_irq_save(flags
);
294 ata_vlb_sync(drive
, IDE_NSECTOR_REG
);
295 hwif
->OUTSL(IDE_DATA_REG
, buffer
, wcount
);
296 local_irq_restore(flags
);
298 hwif
->OUTSL(IDE_DATA_REG
, buffer
, wcount
);
300 hwif
->OUTSW(IDE_DATA_REG
, buffer
, wcount
<<1);
304 EXPORT_SYMBOL(ata_output_data
);
307 * The following routines are mainly used by the ATAPI drivers.
309 * These routines will round up any request for an odd number of bytes,
310 * so if an odd bytecount is specified, be sure that there's at least one
311 * extra byte allocated for the buffer.
314 void atapi_input_bytes (ide_drive_t
*drive
, void *buffer
, u32 bytecount
)
316 ide_hwif_t
*hwif
= HWIF(drive
);
319 #if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
320 if (MACH_IS_ATARI
|| MACH_IS_Q40
) {
321 /* Atari has a byte-swapped IDE interface */
322 insw_swapw(IDE_DATA_REG
, buffer
, bytecount
/ 2);
325 #endif /* CONFIG_ATARI || CONFIG_Q40 */
326 hwif
->ata_input_data(drive
, buffer
, bytecount
/ 4);
327 if ((bytecount
& 0x03) >= 2)
328 hwif
->INSW(IDE_DATA_REG
, ((u8
*)buffer
)+(bytecount
& ~0x03), 1);
331 EXPORT_SYMBOL(atapi_input_bytes
);
333 void atapi_output_bytes (ide_drive_t
*drive
, void *buffer
, u32 bytecount
)
335 ide_hwif_t
*hwif
= HWIF(drive
);
338 #if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
339 if (MACH_IS_ATARI
|| MACH_IS_Q40
) {
340 /* Atari has a byte-swapped IDE interface */
341 outsw_swapw(IDE_DATA_REG
, buffer
, bytecount
/ 2);
344 #endif /* CONFIG_ATARI || CONFIG_Q40 */
345 hwif
->ata_output_data(drive
, buffer
, bytecount
/ 4);
346 if ((bytecount
& 0x03) >= 2)
347 hwif
->OUTSW(IDE_DATA_REG
, ((u8
*)buffer
)+(bytecount
& ~0x03), 1);
350 EXPORT_SYMBOL(atapi_output_bytes
);
353 * Beginning of Taskfile OPCODE Library and feature sets.
355 void ide_fix_driveid (struct hd_driveid
*id
)
357 #ifndef __LITTLE_ENDIAN
362 id
->config
= __le16_to_cpu(id
->config
);
363 id
->cyls
= __le16_to_cpu(id
->cyls
);
364 id
->reserved2
= __le16_to_cpu(id
->reserved2
);
365 id
->heads
= __le16_to_cpu(id
->heads
);
366 id
->track_bytes
= __le16_to_cpu(id
->track_bytes
);
367 id
->sector_bytes
= __le16_to_cpu(id
->sector_bytes
);
368 id
->sectors
= __le16_to_cpu(id
->sectors
);
369 id
->vendor0
= __le16_to_cpu(id
->vendor0
);
370 id
->vendor1
= __le16_to_cpu(id
->vendor1
);
371 id
->vendor2
= __le16_to_cpu(id
->vendor2
);
372 stringcast
= (u16
*)&id
->serial_no
[0];
373 for (i
= 0; i
< (20/2); i
++)
374 stringcast
[i
] = __le16_to_cpu(stringcast
[i
]);
375 id
->buf_type
= __le16_to_cpu(id
->buf_type
);
376 id
->buf_size
= __le16_to_cpu(id
->buf_size
);
377 id
->ecc_bytes
= __le16_to_cpu(id
->ecc_bytes
);
378 stringcast
= (u16
*)&id
->fw_rev
[0];
379 for (i
= 0; i
< (8/2); i
++)
380 stringcast
[i
] = __le16_to_cpu(stringcast
[i
]);
381 stringcast
= (u16
*)&id
->model
[0];
382 for (i
= 0; i
< (40/2); i
++)
383 stringcast
[i
] = __le16_to_cpu(stringcast
[i
]);
384 id
->dword_io
= __le16_to_cpu(id
->dword_io
);
385 id
->reserved50
= __le16_to_cpu(id
->reserved50
);
386 id
->field_valid
= __le16_to_cpu(id
->field_valid
);
387 id
->cur_cyls
= __le16_to_cpu(id
->cur_cyls
);
388 id
->cur_heads
= __le16_to_cpu(id
->cur_heads
);
389 id
->cur_sectors
= __le16_to_cpu(id
->cur_sectors
);
390 id
->cur_capacity0
= __le16_to_cpu(id
->cur_capacity0
);
391 id
->cur_capacity1
= __le16_to_cpu(id
->cur_capacity1
);
392 id
->lba_capacity
= __le32_to_cpu(id
->lba_capacity
);
393 id
->dma_1word
= __le16_to_cpu(id
->dma_1word
);
394 id
->dma_mword
= __le16_to_cpu(id
->dma_mword
);
395 id
->eide_pio_modes
= __le16_to_cpu(id
->eide_pio_modes
);
396 id
->eide_dma_min
= __le16_to_cpu(id
->eide_dma_min
);
397 id
->eide_dma_time
= __le16_to_cpu(id
->eide_dma_time
);
398 id
->eide_pio
= __le16_to_cpu(id
->eide_pio
);
399 id
->eide_pio_iordy
= __le16_to_cpu(id
->eide_pio_iordy
);
400 for (i
= 0; i
< 2; ++i
)
401 id
->words69_70
[i
] = __le16_to_cpu(id
->words69_70
[i
]);
402 for (i
= 0; i
< 4; ++i
)
403 id
->words71_74
[i
] = __le16_to_cpu(id
->words71_74
[i
]);
404 id
->queue_depth
= __le16_to_cpu(id
->queue_depth
);
405 for (i
= 0; i
< 4; ++i
)
406 id
->words76_79
[i
] = __le16_to_cpu(id
->words76_79
[i
]);
407 id
->major_rev_num
= __le16_to_cpu(id
->major_rev_num
);
408 id
->minor_rev_num
= __le16_to_cpu(id
->minor_rev_num
);
409 id
->command_set_1
= __le16_to_cpu(id
->command_set_1
);
410 id
->command_set_2
= __le16_to_cpu(id
->command_set_2
);
411 id
->cfsse
= __le16_to_cpu(id
->cfsse
);
412 id
->cfs_enable_1
= __le16_to_cpu(id
->cfs_enable_1
);
413 id
->cfs_enable_2
= __le16_to_cpu(id
->cfs_enable_2
);
414 id
->csf_default
= __le16_to_cpu(id
->csf_default
);
415 id
->dma_ultra
= __le16_to_cpu(id
->dma_ultra
);
416 id
->trseuc
= __le16_to_cpu(id
->trseuc
);
417 id
->trsEuc
= __le16_to_cpu(id
->trsEuc
);
418 id
->CurAPMvalues
= __le16_to_cpu(id
->CurAPMvalues
);
419 id
->mprc
= __le16_to_cpu(id
->mprc
);
420 id
->hw_config
= __le16_to_cpu(id
->hw_config
);
421 id
->acoustic
= __le16_to_cpu(id
->acoustic
);
422 id
->msrqs
= __le16_to_cpu(id
->msrqs
);
423 id
->sxfert
= __le16_to_cpu(id
->sxfert
);
424 id
->sal
= __le16_to_cpu(id
->sal
);
425 id
->spg
= __le32_to_cpu(id
->spg
);
426 id
->lba_capacity_2
= __le64_to_cpu(id
->lba_capacity_2
);
427 for (i
= 0; i
< 22; i
++)
428 id
->words104_125
[i
] = __le16_to_cpu(id
->words104_125
[i
]);
429 id
->last_lun
= __le16_to_cpu(id
->last_lun
);
430 id
->word127
= __le16_to_cpu(id
->word127
);
431 id
->dlf
= __le16_to_cpu(id
->dlf
);
432 id
->csfo
= __le16_to_cpu(id
->csfo
);
433 for (i
= 0; i
< 26; i
++)
434 id
->words130_155
[i
] = __le16_to_cpu(id
->words130_155
[i
]);
435 id
->word156
= __le16_to_cpu(id
->word156
);
436 for (i
= 0; i
< 3; i
++)
437 id
->words157_159
[i
] = __le16_to_cpu(id
->words157_159
[i
]);
438 id
->cfa_power
= __le16_to_cpu(id
->cfa_power
);
439 for (i
= 0; i
< 14; i
++)
440 id
->words161_175
[i
] = __le16_to_cpu(id
->words161_175
[i
]);
441 for (i
= 0; i
< 31; i
++)
442 id
->words176_205
[i
] = __le16_to_cpu(id
->words176_205
[i
]);
443 for (i
= 0; i
< 48; i
++)
444 id
->words206_254
[i
] = __le16_to_cpu(id
->words206_254
[i
]);
445 id
->integrity_word
= __le16_to_cpu(id
->integrity_word
);
447 # error "Please fix <asm/byteorder.h>"
452 EXPORT_SYMBOL(ide_fix_driveid
);
454 void ide_fixstring (u8
*s
, const int bytecount
, const int byteswap
)
456 u8
*p
= s
, *end
= &s
[bytecount
& ~1]; /* bytecount must be even */
459 /* convert from big-endian to host byte order */
460 for (p
= end
; p
!= s
;) {
461 unsigned short *pp
= (unsigned short *) (p
-= 2);
465 /* strip leading blanks */
466 while (s
!= end
&& *s
== ' ')
468 /* compress internal blanks and strip trailing blanks */
469 while (s
!= end
&& *s
) {
470 if (*s
++ != ' ' || (s
!= end
&& *s
&& *s
!= ' '))
473 /* wipe out trailing garbage */
478 EXPORT_SYMBOL(ide_fixstring
);
481 * Needed for PCI irq sharing
483 int drive_is_ready (ide_drive_t
*drive
)
485 ide_hwif_t
*hwif
= HWIF(drive
);
488 if (drive
->waiting_for_dma
)
489 return hwif
->ide_dma_test_irq(drive
);
492 /* need to guarantee 400ns since last command was issued */
496 #ifdef CONFIG_IDEPCI_SHARE_IRQ
498 * We do a passive status test under shared PCI interrupts on
499 * cards that truly share the ATA side interrupt, but may also share
500 * an interrupt with another pci card/device. We make no assumptions
501 * about possible isa-pnp and pci-pnp issues yet.
504 stat
= hwif
->INB(IDE_ALTSTATUS_REG
);
506 #endif /* CONFIG_IDEPCI_SHARE_IRQ */
507 /* Note: this may clear a pending IRQ!! */
508 stat
= hwif
->INB(IDE_STATUS_REG
);
510 if (stat
& BUSY_STAT
)
511 /* drive busy: definitely not interrupting */
514 /* drive ready: *might* be interrupting */
518 EXPORT_SYMBOL(drive_is_ready
);
521 * Global for All, and taken from ide-pmac.c. Can be called
522 * with spinlock held & IRQs disabled, so don't schedule !
524 int wait_for_ready (ide_drive_t
*drive
, int timeout
)
526 ide_hwif_t
*hwif
= HWIF(drive
);
530 stat
= hwif
->INB(IDE_STATUS_REG
);
531 if (!(stat
& BUSY_STAT
)) {
532 if (drive
->ready_stat
== 0)
534 else if ((stat
& drive
->ready_stat
)||(stat
& ERR_STAT
))
539 if ((stat
& ERR_STAT
) || timeout
<= 0) {
540 if (stat
& ERR_STAT
) {
541 printk(KERN_ERR
"%s: wait_for_ready, "
542 "error status: %x\n", drive
->name
, stat
);
549 EXPORT_SYMBOL(wait_for_ready
);
552 * This routine busy-waits for the drive status to be not "busy".
553 * It then checks the status for all of the "good" bits and none
554 * of the "bad" bits, and if all is okay it returns 0. All other
555 * cases return 1 after invoking ide_error() -- caller should just return.
557 * This routine should get fixed to not hog the cpu during extra long waits..
558 * That could be done by busy-waiting for the first jiffy or two, and then
559 * setting a timer to wake up at half second intervals thereafter,
560 * until timeout is achieved, before timing out.
562 int ide_wait_stat (ide_startstop_t
*startstop
, ide_drive_t
*drive
, u8 good
, u8 bad
, unsigned long timeout
)
564 ide_hwif_t
*hwif
= HWIF(drive
);
569 /* bail early if we've exceeded max_failures */
570 if (drive
->max_failures
&& (drive
->failures
> drive
->max_failures
)) {
571 *startstop
= ide_stopped
;
575 udelay(1); /* spec allows drive 400ns to assert "BUSY" */
576 if ((stat
= hwif
->INB(IDE_STATUS_REG
)) & BUSY_STAT
) {
577 local_irq_set(flags
);
579 while ((stat
= hwif
->INB(IDE_STATUS_REG
)) & BUSY_STAT
) {
580 if (time_after(jiffies
, timeout
)) {
582 * One last read after the timeout in case
583 * heavy interrupt load made us not make any
584 * progress during the timeout..
586 stat
= hwif
->INB(IDE_STATUS_REG
);
587 if (!(stat
& BUSY_STAT
))
590 local_irq_restore(flags
);
591 *startstop
= DRIVER(drive
)->error(drive
, "status timeout", stat
);
595 local_irq_restore(flags
);
598 * Allow status to settle, then read it again.
599 * A few rare drives vastly violate the 400ns spec here,
600 * so we'll wait up to 10usec for a "good" status
601 * rather than expensively fail things immediately.
602 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
604 for (i
= 0; i
< 10; i
++) {
606 if (OK_STAT((stat
= hwif
->INB(IDE_STATUS_REG
)), good
, bad
))
609 *startstop
= DRIVER(drive
)->error(drive
, "status error", stat
);
613 EXPORT_SYMBOL(ide_wait_stat
);
616 * All hosts that use the 80c ribbon must use!
617 * The name is derived from upper byte of word 93 and the 80c ribbon.
619 u8
eighty_ninty_three (ide_drive_t
*drive
)
622 if (!HWIF(drive
)->udma_four
)
625 if (drive
->id
->major_rev_num
) {
629 * Determine highest Supported SPEC
631 for (i
=1; i
<=15; i
++)
632 if (drive
->id
->major_rev_num
& (1<<i
))
639 /* ATA-4 and older do not support above Ultra 33 */
646 #ifndef CONFIG_IDEDMA_IVB
647 (drive
->id
->hw_config
& 0x4000) &&
648 #endif /* CONFIG_IDEDMA_IVB */
649 (drive
->id
->hw_config
& 0x6000)) ? 1 : 0);
653 return ((u8
) ((HWIF(drive
)->udma_four
) &&
654 #ifndef CONFIG_IDEDMA_IVB
655 (drive
->id
->hw_config
& 0x4000) &&
656 #endif /* CONFIG_IDEDMA_IVB */
657 (drive
->id
->hw_config
& 0x6000)) ? 1 : 0);
661 EXPORT_SYMBOL(eighty_ninty_three
);
663 int ide_ata66_check (ide_drive_t
*drive
, ide_task_t
*args
)
665 if ((args
->tfRegister
[IDE_COMMAND_OFFSET
] == WIN_SETFEATURES
) &&
666 (args
->tfRegister
[IDE_SECTOR_OFFSET
] > XFER_UDMA_2
) &&
667 (args
->tfRegister
[IDE_FEATURE_OFFSET
] == SETFEATURES_XFER
)) {
668 #ifndef CONFIG_IDEDMA_IVB
669 if ((drive
->id
->hw_config
& 0x6000) == 0) {
670 #else /* !CONFIG_IDEDMA_IVB */
671 if (((drive
->id
->hw_config
& 0x2000) == 0) ||
672 ((drive
->id
->hw_config
& 0x4000) == 0)) {
673 #endif /* CONFIG_IDEDMA_IVB */
674 printk("%s: Speed warnings UDMA 3/4/5 is not "
675 "functional.\n", drive
->name
);
678 if (!HWIF(drive
)->udma_four
) {
679 printk("%s: Speed warnings UDMA 3/4/5 is not "
688 EXPORT_SYMBOL(ide_ata66_check
);
691 * Backside of HDIO_DRIVE_CMD call of SETFEATURES_XFER.
692 * 1 : Safe to update drive->id DMA registers.
693 * 0 : OOPs not allowed.
695 int set_transfer (ide_drive_t
*drive
, ide_task_t
*args
)
697 if ((args
->tfRegister
[IDE_COMMAND_OFFSET
] == WIN_SETFEATURES
) &&
698 (args
->tfRegister
[IDE_SECTOR_OFFSET
] >= XFER_SW_DMA_0
) &&
699 (args
->tfRegister
[IDE_FEATURE_OFFSET
] == SETFEATURES_XFER
) &&
700 (drive
->id
->dma_ultra
||
701 drive
->id
->dma_mword
||
702 drive
->id
->dma_1word
))
708 EXPORT_SYMBOL(set_transfer
);
710 u8
ide_auto_reduce_xfer (ide_drive_t
*drive
)
712 if (!drive
->crc_count
)
713 return drive
->current_speed
;
714 drive
->crc_count
= 0;
716 switch(drive
->current_speed
) {
717 case XFER_UDMA_7
: return XFER_UDMA_6
;
718 case XFER_UDMA_6
: return XFER_UDMA_5
;
719 case XFER_UDMA_5
: return XFER_UDMA_4
;
720 case XFER_UDMA_4
: return XFER_UDMA_3
;
721 case XFER_UDMA_3
: return XFER_UDMA_2
;
722 case XFER_UDMA_2
: return XFER_UDMA_1
;
723 case XFER_UDMA_1
: return XFER_UDMA_0
;
725 * OOPS we do not goto non Ultra DMA modes
726 * without iCRC's available we force
727 * the system to PIO and make the user
728 * invoke the ATA-1 ATA-2 DMA modes.
731 default: return XFER_PIO_4
;
735 EXPORT_SYMBOL(ide_auto_reduce_xfer
);
740 int ide_driveid_update (ide_drive_t
*drive
)
742 ide_hwif_t
*hwif
= HWIF(drive
);
743 struct hd_driveid
*id
;
745 id
= kmalloc(SECTOR_WORDS
*4, GFP_ATOMIC
);
749 taskfile_lib_get_identify(drive
, (char *)&id
);
753 drive
->id
->dma_ultra
= id
->dma_ultra
;
754 drive
->id
->dma_mword
= id
->dma_mword
;
755 drive
->id
->dma_1word
= id
->dma_1word
;
756 /* anything more ? */
762 * Re-read drive->id for possible DMA mode
763 * change (copied from ide-probe.c)
765 unsigned long timeout
, flags
;
767 SELECT_MASK(drive
, 1);
769 hwif
->OUTB(drive
->ctl
,IDE_CONTROL_REG
);
771 hwif
->OUTB(WIN_IDENTIFY
, IDE_COMMAND_REG
);
772 timeout
= jiffies
+ WAIT_WORSTCASE
;
774 if (time_after(jiffies
, timeout
)) {
775 SELECT_MASK(drive
, 0);
776 return 0; /* drive timed-out */
778 msleep(50); /* give drive a breather */
779 } while (hwif
->INB(IDE_ALTSTATUS_REG
) & BUSY_STAT
);
780 msleep(50); /* wait for IRQ and DRQ_STAT */
781 if (!OK_STAT(hwif
->INB(IDE_STATUS_REG
),DRQ_STAT
,BAD_R_STAT
)) {
782 SELECT_MASK(drive
, 0);
783 printk("%s: CHECK for good STATUS\n", drive
->name
);
786 local_irq_save(flags
);
787 SELECT_MASK(drive
, 0);
788 id
= kmalloc(SECTOR_WORDS
*4, GFP_ATOMIC
);
790 local_irq_restore(flags
);
793 ata_input_data(drive
, id
, SECTOR_WORDS
);
794 (void) hwif
->INB(IDE_STATUS_REG
); /* clear drive IRQ */
796 local_irq_restore(flags
);
799 drive
->id
->dma_ultra
= id
->dma_ultra
;
800 drive
->id
->dma_mword
= id
->dma_mword
;
801 drive
->id
->dma_1word
= id
->dma_1word
;
802 /* anything more ? */
810 EXPORT_SYMBOL(ide_driveid_update
);
813 * Similar to ide_wait_stat(), except it never calls ide_error internally.
814 * This is a kludge to handle the new ide_config_drive_speed() function,
815 * and should not otherwise be used anywhere. Eventually, the tuneproc's
816 * should be updated to return ide_startstop_t, in which case we can get
817 * rid of this abomination again. :) -ml
819 * It is gone..........
821 * const char *msg == consider adding for verbose errors.
823 int ide_config_drive_speed (ide_drive_t
*drive
, u8 speed
)
825 ide_hwif_t
*hwif
= HWIF(drive
);
829 // while (HWGROUP(drive)->busy)
832 #ifdef CONFIG_BLK_DEV_IDEDMA
833 if (hwif
->ide_dma_check
) /* check if host supports DMA */
834 hwif
->ide_dma_host_off(drive
);
838 * Don't use ide_wait_cmd here - it will
839 * attempt to set_geometry and recalibrate,
840 * but for some reason these don't work at
841 * this point (lost interrupt).
844 * Select the drive, and issue the SETFEATURES command
846 disable_irq_nosync(hwif
->irq
);
849 * FIXME: we race against the running IRQ here if
850 * this is called from non IRQ context. If we use
851 * disable_irq() we hang on the error path. Work
857 SELECT_MASK(drive
, 0);
860 hwif
->OUTB(drive
->ctl
| 2, IDE_CONTROL_REG
);
861 hwif
->OUTB(speed
, IDE_NSECTOR_REG
);
862 hwif
->OUTB(SETFEATURES_XFER
, IDE_FEATURE_REG
);
863 hwif
->OUTB(WIN_SETFEATURES
, IDE_COMMAND_REG
);
864 if ((IDE_CONTROL_REG
) && (drive
->quirk_list
== 2))
865 hwif
->OUTB(drive
->ctl
, IDE_CONTROL_REG
);
868 * Wait for drive to become non-BUSY
870 if ((stat
= hwif
->INB(IDE_STATUS_REG
)) & BUSY_STAT
) {
871 unsigned long flags
, timeout
;
872 local_irq_set(flags
);
873 timeout
= jiffies
+ WAIT_CMD
;
874 while ((stat
= hwif
->INB(IDE_STATUS_REG
)) & BUSY_STAT
) {
875 if (time_after(jiffies
, timeout
))
878 local_irq_restore(flags
);
882 * Allow status to settle, then read it again.
883 * A few rare drives vastly violate the 400ns spec here,
884 * so we'll wait up to 10usec for a "good" status
885 * rather than expensively fail things immediately.
886 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
888 for (i
= 0; i
< 10; i
++) {
890 if (OK_STAT((stat
= hwif
->INB(IDE_STATUS_REG
)), DRIVE_READY
, BUSY_STAT
|DRQ_STAT
|ERR_STAT
)) {
896 SELECT_MASK(drive
, 0);
898 enable_irq(hwif
->irq
);
901 (void) ide_dump_status(drive
, "set_drive_speed_status", stat
);
905 drive
->id
->dma_ultra
&= ~0xFF00;
906 drive
->id
->dma_mword
&= ~0x0F00;
907 drive
->id
->dma_1word
&= ~0x0F00;
909 #ifdef CONFIG_BLK_DEV_IDEDMA
910 if (speed
>= XFER_SW_DMA_0
)
911 hwif
->ide_dma_host_on(drive
);
912 else if (hwif
->ide_dma_check
) /* check if host supports DMA */
913 hwif
->ide_dma_off_quietly(drive
);
917 case XFER_UDMA_7
: drive
->id
->dma_ultra
|= 0x8080; break;
918 case XFER_UDMA_6
: drive
->id
->dma_ultra
|= 0x4040; break;
919 case XFER_UDMA_5
: drive
->id
->dma_ultra
|= 0x2020; break;
920 case XFER_UDMA_4
: drive
->id
->dma_ultra
|= 0x1010; break;
921 case XFER_UDMA_3
: drive
->id
->dma_ultra
|= 0x0808; break;
922 case XFER_UDMA_2
: drive
->id
->dma_ultra
|= 0x0404; break;
923 case XFER_UDMA_1
: drive
->id
->dma_ultra
|= 0x0202; break;
924 case XFER_UDMA_0
: drive
->id
->dma_ultra
|= 0x0101; break;
925 case XFER_MW_DMA_2
: drive
->id
->dma_mword
|= 0x0404; break;
926 case XFER_MW_DMA_1
: drive
->id
->dma_mword
|= 0x0202; break;
927 case XFER_MW_DMA_0
: drive
->id
->dma_mword
|= 0x0101; break;
928 case XFER_SW_DMA_2
: drive
->id
->dma_1word
|= 0x0404; break;
929 case XFER_SW_DMA_1
: drive
->id
->dma_1word
|= 0x0202; break;
930 case XFER_SW_DMA_0
: drive
->id
->dma_1word
|= 0x0101; break;
933 if (!drive
->init_speed
)
934 drive
->init_speed
= speed
;
935 drive
->current_speed
= speed
;
939 EXPORT_SYMBOL(ide_config_drive_speed
);
943 * This should get invoked any time we exit the driver to
944 * wait for an interrupt response from a drive. handler() points
945 * at the appropriate code to handle the next interrupt, and a
946 * timer is started to prevent us from waiting forever in case
947 * something goes wrong (see the ide_timer_expiry() handler later on).
949 * See also ide_execute_command
951 void __ide_set_handler (ide_drive_t
*drive
, ide_handler_t
*handler
,
952 unsigned int timeout
, ide_expiry_t
*expiry
)
954 ide_hwgroup_t
*hwgroup
= HWGROUP(drive
);
956 if (hwgroup
->handler
!= NULL
) {
957 printk(KERN_CRIT
"%s: ide_set_handler: handler not null; "
959 drive
->name
, hwgroup
->handler
, handler
);
961 hwgroup
->handler
= handler
;
962 hwgroup
->expiry
= expiry
;
963 hwgroup
->timer
.expires
= jiffies
+ timeout
;
964 add_timer(&hwgroup
->timer
);
967 EXPORT_SYMBOL(__ide_set_handler
);
969 void ide_set_handler (ide_drive_t
*drive
, ide_handler_t
*handler
,
970 unsigned int timeout
, ide_expiry_t
*expiry
)
973 spin_lock_irqsave(&ide_lock
, flags
);
974 __ide_set_handler(drive
, handler
, timeout
, expiry
);
975 spin_unlock_irqrestore(&ide_lock
, flags
);
978 EXPORT_SYMBOL(ide_set_handler
);
981 * ide_execute_command - execute an IDE command
982 * @drive: IDE drive to issue the command against
983 * @command: command byte to write
984 * @handler: handler for next phase
985 * @timeout: timeout for command
986 * @expiry: handler to run on timeout
988 * Helper function to issue an IDE command. This handles the
989 * atomicity requirements, command timing and ensures that the
990 * handler and IRQ setup do not race. All IDE command kick off
991 * should go via this function or do equivalent locking.
994 void ide_execute_command(ide_drive_t
*drive
, task_ioreg_t cmd
, ide_handler_t
*handler
, unsigned timeout
, ide_expiry_t
*expiry
)
997 ide_hwgroup_t
*hwgroup
= HWGROUP(drive
);
998 ide_hwif_t
*hwif
= HWIF(drive
);
1000 spin_lock_irqsave(&ide_lock
, flags
);
1002 if(hwgroup
->handler
)
1004 hwgroup
->handler
= handler
;
1005 hwgroup
->expiry
= expiry
;
1006 hwgroup
->timer
.expires
= jiffies
+ timeout
;
1007 add_timer(&hwgroup
->timer
);
1008 hwif
->OUTBSYNC(drive
, cmd
, IDE_COMMAND_REG
);
1009 /* Drive takes 400nS to respond, we must avoid the IRQ being
1010 serviced before that.
1012 FIXME: we could skip this delay with care on non shared
1016 spin_unlock_irqrestore(&ide_lock
, flags
);
1019 EXPORT_SYMBOL(ide_execute_command
);
1023 static ide_startstop_t
do_reset1 (ide_drive_t
*, int);
1026 * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
1027 * during an atapi drive reset operation. If the drive has not yet responded,
1028 * and we have not yet hit our maximum waiting time, then the timer is restarted
1031 static ide_startstop_t
atapi_reset_pollfunc (ide_drive_t
*drive
)
1033 ide_hwgroup_t
*hwgroup
= HWGROUP(drive
);
1034 ide_hwif_t
*hwif
= HWIF(drive
);
1037 SELECT_DRIVE(drive
);
1040 if (OK_STAT(stat
= hwif
->INB(IDE_STATUS_REG
), 0, BUSY_STAT
)) {
1041 printk("%s: ATAPI reset complete\n", drive
->name
);
1043 if (time_before(jiffies
, hwgroup
->poll_timeout
)) {
1044 if (HWGROUP(drive
)->handler
!= NULL
)
1046 ide_set_handler(drive
, &atapi_reset_pollfunc
, HZ
/20, NULL
);
1047 /* continue polling */
1050 /* end of polling */
1051 hwgroup
->poll_timeout
= 0;
1052 printk("%s: ATAPI reset timed-out, status=0x%02x\n",
1054 /* do it the old fashioned way */
1055 return do_reset1(drive
, 1);
1058 hwgroup
->poll_timeout
= 0;
1063 * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
1064 * during an ide reset operation. If the drives have not yet responded,
1065 * and we have not yet hit our maximum waiting time, then the timer is restarted
1068 static ide_startstop_t
reset_pollfunc (ide_drive_t
*drive
)
1070 ide_hwgroup_t
*hwgroup
= HWGROUP(drive
);
1071 ide_hwif_t
*hwif
= HWIF(drive
);
1074 if (hwif
->reset_poll
!= NULL
) {
1075 if (hwif
->reset_poll(drive
)) {
1076 printk(KERN_ERR
"%s: host reset_poll failure for %s.\n",
1077 hwif
->name
, drive
->name
);
1082 if (!OK_STAT(tmp
= hwif
->INB(IDE_STATUS_REG
), 0, BUSY_STAT
)) {
1083 if (time_before(jiffies
, hwgroup
->poll_timeout
)) {
1084 if (HWGROUP(drive
)->handler
!= NULL
)
1086 ide_set_handler(drive
, &reset_pollfunc
, HZ
/20, NULL
);
1087 /* continue polling */
1090 printk("%s: reset timed-out, status=0x%02x\n", hwif
->name
, tmp
);
1093 printk("%s: reset: ", hwif
->name
);
1094 if ((tmp
= hwif
->INB(IDE_ERROR_REG
)) == 1) {
1095 printk("success\n");
1096 drive
->failures
= 0;
1100 switch (tmp
& 0x7f) {
1101 case 1: printk("passed");
1103 case 2: printk("formatter device error");
1105 case 3: printk("sector buffer error");
1107 case 4: printk("ECC circuitry error");
1109 case 5: printk("controlling MPU error");
1111 default:printk("error (0x%02x?)", tmp
);
1114 printk("; slave: failed");
1118 hwgroup
->poll_timeout
= 0; /* done polling */
1122 static void check_dma_crc(ide_drive_t
*drive
)
1124 #ifdef CONFIG_BLK_DEV_IDEDMA
1125 if (drive
->crc_count
) {
1126 (void) HWIF(drive
)->ide_dma_off_quietly(drive
);
1127 ide_set_xfer_rate(drive
, ide_auto_reduce_xfer(drive
));
1128 if (drive
->current_speed
>= XFER_SW_DMA_0
)
1129 (void) HWIF(drive
)->ide_dma_on(drive
);
1131 (void)__ide_dma_off(drive
);
1135 void pre_reset (ide_drive_t
*drive
)
1137 DRIVER(drive
)->pre_reset(drive
);
1139 if (!drive
->keep_settings
) {
1140 if (drive
->using_dma
) {
1141 check_dma_crc(drive
);
1144 drive
->io_32bit
= 0;
1148 if (drive
->using_dma
)
1149 check_dma_crc(drive
);
1151 if (HWIF(drive
)->pre_reset
!= NULL
)
1152 HWIF(drive
)->pre_reset(drive
);
1157 * do_reset1() attempts to recover a confused drive by resetting it.
1158 * Unfortunately, resetting a disk drive actually resets all devices on
1159 * the same interface, so it can really be thought of as resetting the
1160 * interface rather than resetting the drive.
1162 * ATAPI devices have their own reset mechanism which allows them to be
1163 * individually reset without clobbering other devices on the same interface.
1165 * Unfortunately, the IDE interface does not generate an interrupt to let
1166 * us know when the reset operation has finished, so we must poll for this.
1167 * Equally poor, though, is the fact that this may a very long time to complete,
1168 * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
1169 * we set a timer to poll at 50ms intervals.
1171 static ide_startstop_t
do_reset1 (ide_drive_t
*drive
, int do_not_try_atapi
)
1174 unsigned long flags
;
1176 ide_hwgroup_t
*hwgroup
;
1178 spin_lock_irqsave(&ide_lock
, flags
);
1180 hwgroup
= HWGROUP(drive
);
1182 /* We must not reset with running handlers */
1183 if(hwgroup
->handler
!= NULL
)
1186 /* For an ATAPI device, first try an ATAPI SRST. */
1187 if (drive
->media
!= ide_disk
&& !do_not_try_atapi
) {
1189 SELECT_DRIVE(drive
);
1191 hwif
->OUTB(WIN_SRST
, IDE_COMMAND_REG
);
1192 hwgroup
->poll_timeout
= jiffies
+ WAIT_WORSTCASE
;
1193 __ide_set_handler(drive
, &atapi_reset_pollfunc
, HZ
/20, NULL
);
1194 spin_unlock_irqrestore(&ide_lock
, flags
);
1199 * First, reset any device state data we were maintaining
1200 * for any of the drives on this interface.
1202 for (unit
= 0; unit
< MAX_DRIVES
; ++unit
)
1203 pre_reset(&hwif
->drives
[unit
]);
1205 #if OK_TO_RESET_CONTROLLER
1206 if (!IDE_CONTROL_REG
) {
1207 spin_unlock_irqrestore(&ide_lock
, flags
);
1212 * Note that we also set nIEN while resetting the device,
1213 * to mask unwanted interrupts from the interface during the reset.
1214 * However, due to the design of PC hardware, this will cause an
1215 * immediate interrupt due to the edge transition it produces.
1216 * This single interrupt gives us a "fast poll" for drives that
1217 * recover from reset very quickly, saving us the first 50ms wait time.
1219 /* set SRST and nIEN */
1220 hwif
->OUTBSYNC(drive
, drive
->ctl
|6,IDE_CONTROL_REG
);
1221 /* more than enough time */
1223 if (drive
->quirk_list
== 2) {
1224 /* clear SRST and nIEN */
1225 hwif
->OUTBSYNC(drive
, drive
->ctl
, IDE_CONTROL_REG
);
1227 /* clear SRST, leave nIEN */
1228 hwif
->OUTBSYNC(drive
, drive
->ctl
|2, IDE_CONTROL_REG
);
1230 /* more than enough time */
1232 hwgroup
->poll_timeout
= jiffies
+ WAIT_WORSTCASE
;
1233 __ide_set_handler(drive
, &reset_pollfunc
, HZ
/20, NULL
);
1236 * Some weird controller like resetting themselves to a strange
1237 * state when the disks are reset this way. At least, the Winbond
1238 * 553 documentation says that
1240 if (hwif
->resetproc
!= NULL
) {
1241 hwif
->resetproc(drive
);
1244 #endif /* OK_TO_RESET_CONTROLLER */
1246 spin_unlock_irqrestore(&ide_lock
, flags
);
1251 * ide_do_reset() is the entry point to the drive/interface reset code.
1254 ide_startstop_t
ide_do_reset (ide_drive_t
*drive
)
1256 return do_reset1(drive
, 0);
1259 EXPORT_SYMBOL(ide_do_reset
);
1262 * ide_wait_not_busy() waits for the currently selected device on the hwif
1263 * to report a non-busy status, see comments in probe_hwif().
1265 int ide_wait_not_busy(ide_hwif_t
*hwif
, unsigned long timeout
)
1271 * Turn this into a schedule() sleep once I'm sure
1272 * about locking issues (2.5 work ?).
1275 stat
= hwif
->INB(hwif
->io_ports
[IDE_STATUS_OFFSET
]);
1276 if ((stat
& BUSY_STAT
) == 0)
1279 * Assume a value of 0xff means nothing is connected to
1280 * the interface and it doesn't implement the pull-down
1289 EXPORT_SYMBOL_GPL(ide_wait_not_busy
);