Cleaned up AMD Au1200 IDE driver:
[linux-2.6/verdex.git] / drivers / ide / mips / au1xxx-ide.c
blob2b6327c576b9f081bf4f4a4d5658cb4e19ae584f
1 /*
2 * linux/drivers/ide/mips/au1xxx-ide.c version 01.30.00 Aug. 02 2005
4 * BRIEF MODULE DESCRIPTION
5 * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
7 * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
9 * This program is free software; you can redistribute it and/or modify it under
10 * the terms of the GNU General Public License as published by the Free Software
11 * Foundation; either version 2 of the License, or (at your option) any later
12 * version.
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
15 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
16 * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23 * POSSIBILITY OF SUCH DAMAGE.
25 * You should have received a copy of the GNU General Public License along with
26 * this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA.
29 * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
30 * Interface and Linux Device Driver" Application Note.
32 #undef REALLY_SLOW_IO /* most systems can safely undef this */
34 #include <linux/config.h> /* for CONFIG_BLK_DEV_IDEPCI */
35 #include <linux/types.h>
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/timer.h>
40 #include <linux/mm.h>
41 #include <linux/ioport.h>
42 #include <linux/hdreg.h>
43 #include <linux/init.h>
44 #include <linux/ide.h>
45 #include <linux/sysdev.h>
47 #include <linux/dma-mapping.h>
49 #include <asm/io.h>
50 #include <asm/mach-au1x00/au1xxx.h>
51 #include <asm/mach-au1x00/au1xxx_dbdma.h>
53 #if CONFIG_PM
54 #include <asm/mach-au1x00/au1xxx_pm.h>
55 #endif
57 #include <asm/mach-au1x00/au1xxx_ide.h>
59 #define DRV_NAME "au1200-ide"
60 #define DRV_VERSION "1.0"
61 #define DRV_AUTHOR "AMD PCS / Pete Popov <ppopov@embeddedalley.com>"
62 #define DRV_DESC "Au1200 IDE"
64 static _auide_hwif auide_hwif;
65 static spinlock_t ide_tune_drive_spin_lock = SPIN_LOCK_UNLOCKED;
66 static spinlock_t ide_tune_chipset_spin_lock = SPIN_LOCK_UNLOCKED;
67 static int dbdma_init_done = 0;
70 * local I/O functions
72 u8 auide_inb(unsigned long port)
74 return (au_readb(port));
77 u16 auide_inw(unsigned long port)
79 return (au_readw(port));
82 u32 auide_inl(unsigned long port)
84 return (au_readl(port));
87 void auide_insw(unsigned long port, void *addr, u32 count)
89 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
91 _auide_hwif *ahwif = &auide_hwif;
92 chan_tab_t *ctp;
93 au1x_ddma_desc_t *dp;
95 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
96 DDMA_FLAGS_NOIE)) {
97 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
98 return;
100 ctp = *((chan_tab_t **)ahwif->rx_chan);
101 dp = ctp->cur_ptr;
102 while (dp->dscr_cmd0 & DSCR_CMD0_V)
104 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
105 #else
106 while (count--)
108 *(u16 *)addr = au_readw(port);
109 addr +=2 ;
111 #endif
114 void auide_insl(unsigned long port, void *addr, u32 count)
116 while (count--)
118 *(u32 *)addr = au_readl(port);
119 /* NOTE: For IDE interfaces over PCMCIA,
120 * 32-bit access does not work
122 addr += 4;
126 void auide_outb(u8 addr, unsigned long port)
128 return (au_writeb(addr, port));
131 void auide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port)
133 return (au_writeb(addr, port));
136 void auide_outw(u16 addr, unsigned long port)
138 return (au_writew(addr, port));
141 void auide_outl(u32 addr, unsigned long port)
143 return (au_writel(addr, port));
146 void auide_outsw(unsigned long port, void *addr, u32 count)
148 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
149 _auide_hwif *ahwif = &auide_hwif;
150 chan_tab_t *ctp;
151 au1x_ddma_desc_t *dp;
153 if(!put_source_flags(ahwif->tx_chan, (void*)addr,
154 count << 1, DDMA_FLAGS_NOIE)) {
155 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
156 return;
158 ctp = *((chan_tab_t **)ahwif->tx_chan);
159 dp = ctp->cur_ptr;
160 while (dp->dscr_cmd0 & DSCR_CMD0_V)
162 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
163 #else
164 while (count--)
166 au_writew(*(u16 *)addr, port);
167 addr += 2;
169 #endif
172 void auide_outsl(unsigned long port, void *addr, u32 count)
174 while (count--)
176 au_writel(*(u32 *)addr, port);
177 /* NOTE: For IDE interfaces over PCMCIA,
178 * 32-bit access does not work
180 addr += 4;
184 static void auide_tune_drive(ide_drive_t *drive, byte pio)
186 int mem_sttime;
187 int mem_stcfg;
188 unsigned long flags;
189 u8 speed;
191 /* get the best pio mode for the drive */
192 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
194 printk("%s: setting Au1XXX IDE to PIO mode%d\n",
195 drive->name, pio);
197 spin_lock_irqsave(&ide_tune_drive_spin_lock, flags);
199 mem_sttime = 0;
200 mem_stcfg = au_readl(MEM_STCFG2);
202 /* set pio mode! */
203 switch(pio) {
204 case 0:
205 /* set timing parameters for RCS2# */
206 mem_sttime = SBC_IDE_PIO0_TWCS
207 | SBC_IDE_PIO0_TCSH
208 | SBC_IDE_PIO0_TCSOFF
209 | SBC_IDE_PIO0_TWP
210 | SBC_IDE_PIO0_TCSW
211 | SBC_IDE_PIO0_TPM
212 | SBC_IDE_PIO0_TA;
213 /* set configuration for RCS2# */
214 mem_stcfg |= TS_MASK;
215 mem_stcfg &= ~TCSOE_MASK;
216 mem_stcfg &= ~TOECS_MASK;
217 mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
219 au_writel(mem_sttime,MEM_STTIME2);
220 au_writel(mem_stcfg,MEM_STCFG2);
221 break;
223 case 1:
224 /* set timing parameters for RCS2# */
225 mem_sttime = SBC_IDE_PIO1_TWCS
226 | SBC_IDE_PIO1_TCSH
227 | SBC_IDE_PIO1_TCSOFF
228 | SBC_IDE_PIO1_TWP
229 | SBC_IDE_PIO1_TCSW
230 | SBC_IDE_PIO1_TPM
231 | SBC_IDE_PIO1_TA;
232 /* set configuration for RCS2# */
233 mem_stcfg |= TS_MASK;
234 mem_stcfg &= ~TCSOE_MASK;
235 mem_stcfg &= ~TOECS_MASK;
236 mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
237 break;
239 case 2:
240 /* set timing parameters for RCS2# */
241 mem_sttime = SBC_IDE_PIO2_TWCS
242 | SBC_IDE_PIO2_TCSH
243 | SBC_IDE_PIO2_TCSOFF
244 | SBC_IDE_PIO2_TWP
245 | SBC_IDE_PIO2_TCSW
246 | SBC_IDE_PIO2_TPM
247 | SBC_IDE_PIO2_TA;
248 /* set configuration for RCS2# */
249 mem_stcfg &= ~TS_MASK;
250 mem_stcfg &= ~TCSOE_MASK;
251 mem_stcfg &= ~TOECS_MASK;
252 mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
253 break;
255 case 3:
256 /* set timing parameters for RCS2# */
257 mem_sttime = SBC_IDE_PIO3_TWCS
258 | SBC_IDE_PIO3_TCSH
259 | SBC_IDE_PIO3_TCSOFF
260 | SBC_IDE_PIO3_TWP
261 | SBC_IDE_PIO3_TCSW
262 | SBC_IDE_PIO3_TPM
263 | SBC_IDE_PIO3_TA;
264 /* set configuration for RCS2# */
265 mem_stcfg |= TS_MASK;
266 mem_stcfg &= ~TS_MASK;
267 mem_stcfg &= ~TCSOE_MASK;
268 mem_stcfg &= ~TOECS_MASK;
269 mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
271 break;
273 case 4:
274 /* set timing parameters for RCS2# */
275 mem_sttime = SBC_IDE_PIO4_TWCS
276 | SBC_IDE_PIO4_TCSH
277 | SBC_IDE_PIO4_TCSOFF
278 | SBC_IDE_PIO4_TWP
279 | SBC_IDE_PIO4_TCSW
280 | SBC_IDE_PIO4_TPM
281 | SBC_IDE_PIO4_TA;
282 /* set configuration for RCS2# */
283 mem_stcfg &= ~TS_MASK;
284 mem_stcfg &= ~TCSOE_MASK;
285 mem_stcfg &= ~TOECS_MASK;
286 mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
287 break;
290 au_writel(mem_sttime,MEM_STTIME2);
291 au_writel(mem_stcfg,MEM_STCFG2);
293 spin_unlock_irqrestore(&ide_tune_drive_spin_lock, flags);
295 speed = pio + XFER_PIO_0;
296 ide_config_drive_speed(drive, speed);
299 static int auide_tune_chipset (ide_drive_t *drive, u8 speed)
301 u8 mode = 0;
302 int mem_sttime;
303 int mem_stcfg;
304 unsigned long flags;
305 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
306 struct hd_driveid *id = drive->id;
309 * Now see what the current drive is capable of,
310 * selecting UDMA only if the mate said it was ok.
312 if (id && (id->capability & 1) && drive->autodma &&
313 !__ide_dma_bad_drive(drive)) {
314 if (!mode && (id->field_valid & 2) && (id->dma_mword & 7)) {
315 if (id->dma_mword & 4)
316 mode = XFER_MW_DMA_2;
317 else if (id->dma_mword & 2)
318 mode = XFER_MW_DMA_1;
319 else if (id->dma_mword & 1)
320 mode = XFER_MW_DMA_0;
323 #endif
325 spin_lock_irqsave(&ide_tune_chipset_spin_lock, flags);
327 mem_sttime = 0;
328 mem_stcfg = au_readl(MEM_STCFG2);
330 switch(speed) {
331 case XFER_PIO_4:
332 case XFER_PIO_3:
333 case XFER_PIO_2:
334 case XFER_PIO_1:
335 case XFER_PIO_0:
336 auide_tune_drive(drive, (speed - XFER_PIO_0));
337 break;
338 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
339 case XFER_MW_DMA_2:
340 /* set timing parameters for RCS2# */
341 mem_sttime = SBC_IDE_MDMA2_TWCS
342 | SBC_IDE_MDMA2_TCSH
343 | SBC_IDE_MDMA2_TCSOFF
344 | SBC_IDE_MDMA2_TWP
345 | SBC_IDE_MDMA2_TCSW
346 | SBC_IDE_MDMA2_TPM
347 | SBC_IDE_MDMA2_TA;
348 /* set configuration for RCS2# */
349 mem_stcfg &= ~TS_MASK;
350 mem_stcfg &= ~TCSOE_MASK;
351 mem_stcfg &= ~TOECS_MASK;
352 mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
354 mode = XFER_MW_DMA_2;
355 break;
356 case XFER_MW_DMA_1:
357 /* set timing parameters for RCS2# */
358 mem_sttime = SBC_IDE_MDMA1_TWCS
359 | SBC_IDE_MDMA1_TCSH
360 | SBC_IDE_MDMA1_TCSOFF
361 | SBC_IDE_MDMA1_TWP
362 | SBC_IDE_MDMA1_TCSW
363 | SBC_IDE_MDMA1_TPM
364 | SBC_IDE_MDMA1_TA;
365 /* set configuration for RCS2# */
366 mem_stcfg &= ~TS_MASK;
367 mem_stcfg &= ~TCSOE_MASK;
368 mem_stcfg &= ~TOECS_MASK;
369 mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
371 mode = XFER_MW_DMA_1;
372 break;
373 case XFER_MW_DMA_0:
374 /* set timing parameters for RCS2# */
375 mem_sttime = SBC_IDE_MDMA0_TWCS
376 | SBC_IDE_MDMA0_TCSH
377 | SBC_IDE_MDMA0_TCSOFF
378 | SBC_IDE_MDMA0_TWP
379 | SBC_IDE_MDMA0_TCSW
380 | SBC_IDE_MDMA0_TPM
381 | SBC_IDE_MDMA0_TA;
382 /* set configuration for RCS2# */
383 mem_stcfg |= TS_MASK;
384 mem_stcfg &= ~TCSOE_MASK;
385 mem_stcfg &= ~TOECS_MASK;
386 mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
388 mode = XFER_MW_DMA_0;
389 break;
390 #endif
391 default:
392 return 1;
396 * Tell the drive to switch to the new mode; abort on failure.
398 if (!mode || ide_config_drive_speed(drive, mode))
400 return 1; /* failure */
404 au_writel(mem_sttime,MEM_STTIME2);
405 au_writel(mem_stcfg,MEM_STCFG2);
407 spin_unlock_irqrestore(&ide_tune_chipset_spin_lock, flags);
409 return 0;
413 * Multi-Word DMA + DbDMA functions
415 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
417 static int in_drive_list(struct hd_driveid *id,
418 const struct drive_list_entry *drive_table)
420 for ( ; drive_table->id_model ; drive_table++){
421 if ((!strcmp(drive_table->id_model, id->model)) &&
422 ((strstr(drive_table->id_firmware, id->fw_rev)) ||
423 (!strcmp(drive_table->id_firmware, "ALL")))
425 return 1;
427 return 0;
430 static int auide_build_sglist(ide_drive_t *drive, struct request *rq)
432 ide_hwif_t *hwif = drive->hwif;
433 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
434 struct scatterlist *sg = hwif->sg_table;
436 ide_map_sg(drive, rq);
438 if (rq_data_dir(rq) == READ)
439 hwif->sg_dma_direction = DMA_FROM_DEVICE;
440 else
441 hwif->sg_dma_direction = DMA_TO_DEVICE;
443 return dma_map_sg(ahwif->dev, sg, hwif->sg_nents,
444 hwif->sg_dma_direction);
447 static int auide_build_dmatable(ide_drive_t *drive)
449 int i, iswrite, count = 0;
450 ide_hwif_t *hwif = HWIF(drive);
452 struct request *rq = HWGROUP(drive)->rq;
454 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
455 struct scatterlist *sg;
457 iswrite = (rq_data_dir(rq) == WRITE);
458 /* Save for interrupt context */
459 ahwif->drive = drive;
461 /* Build sglist */
462 hwif->sg_nents = i = auide_build_sglist(drive, rq);
464 if (!i)
465 return 0;
467 /* fill the descriptors */
468 sg = hwif->sg_table;
469 while (i && sg_dma_len(sg)) {
470 u32 cur_addr;
471 u32 cur_len;
473 cur_addr = sg_dma_address(sg);
474 cur_len = sg_dma_len(sg);
476 while (cur_len) {
477 u32 flags = DDMA_FLAGS_NOIE;
478 unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
480 if (++count >= PRD_ENTRIES) {
481 printk(KERN_WARNING "%s: DMA table too small\n",
482 drive->name);
483 goto use_pio_instead;
486 /* Lets enable intr for the last descriptor only */
487 if (1==i)
488 flags = DDMA_FLAGS_IE;
489 else
490 flags = DDMA_FLAGS_NOIE;
492 if (iswrite) {
493 if(!put_source_flags(ahwif->tx_chan,
494 (void*)(page_address(sg->page)
495 + sg->offset),
496 tc, flags)) {
497 printk(KERN_ERR "%s failed %d\n",
498 __FUNCTION__, __LINE__);
500 } else
502 if(!put_dest_flags(ahwif->rx_chan,
503 (void*)(page_address(sg->page)
504 + sg->offset),
505 tc, flags)) {
506 printk(KERN_ERR "%s failed %d\n",
507 __FUNCTION__, __LINE__);
511 cur_addr += tc;
512 cur_len -= tc;
514 sg++;
515 i--;
518 if (count)
519 return 1;
521 use_pio_instead:
522 dma_unmap_sg(ahwif->dev,
523 hwif->sg_table,
524 hwif->sg_nents,
525 hwif->sg_dma_direction);
527 return 0; /* revert to PIO for this request */
530 static int auide_dma_end(ide_drive_t *drive)
532 ide_hwif_t *hwif = HWIF(drive);
533 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
535 if (hwif->sg_nents) {
536 dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents,
537 hwif->sg_dma_direction);
538 hwif->sg_nents = 0;
541 return 0;
544 static void auide_dma_start(ide_drive_t *drive )
546 // printk("%s\n", __FUNCTION__);
549 ide_startstop_t auide_dma_intr(ide_drive_t *drive)
551 //printk("%s\n", __FUNCTION__);
553 u8 stat = 0, dma_stat = 0;
555 dma_stat = HWIF(drive)->ide_dma_end(drive);
556 stat = HWIF(drive)->INB(IDE_STATUS_REG); /* get drive status */
557 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
558 if (!dma_stat) {
559 struct request *rq = HWGROUP(drive)->rq;
561 ide_end_request(drive, 1, rq->nr_sectors);
562 return ide_stopped;
564 printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n",
565 drive->name, dma_stat);
567 return ide_error(drive, "dma_intr", stat);
570 static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
572 //printk("%s\n", __FUNCTION__);
574 /* issue cmd to drive */
575 ide_execute_command(drive, command, &auide_dma_intr,
576 (2*WAIT_CMD), NULL);
579 static int auide_dma_setup(ide_drive_t *drive)
581 // printk("%s\n", __FUNCTION__);
583 if (drive->media != ide_disk)
584 return 1;
586 if (!auide_build_dmatable(drive))
587 /* try PIO instead of DMA */
588 return 1;
590 drive->waiting_for_dma = 1;
592 return 0;
595 static int auide_dma_check(ide_drive_t *drive)
597 // printk("%s\n", __FUNCTION__);
599 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
600 if( !dbdma_init_done ){
601 auide_hwif.white_list = in_drive_list(drive->id,
602 dma_white_list);
603 auide_hwif.black_list = in_drive_list(drive->id,
604 dma_black_list);
605 auide_hwif.drive = drive;
606 auide_ddma_init(&auide_hwif);
607 dbdma_init_done = 1;
609 #endif
611 /* Is the drive in our DMA black list? */
612 if ( auide_hwif.black_list ) {
613 drive->using_dma = 0;
614 printk("%s found in dma_blacklist[]! Disabling DMA.\n",
615 drive->id->model);
617 else
618 drive->using_dma = 1;
620 return HWIF(drive)->ide_dma_host_on(drive);
623 static int auide_dma_test_irq(ide_drive_t *drive)
625 // printk("%s\n", __FUNCTION__);
627 if (!drive->waiting_for_dma)
628 printk(KERN_WARNING "%s: ide_dma_test_irq \
629 called while not waiting\n", drive->name);
631 /* If dbdma didn't execute the STOP command yet, the
632 * active bit is still set
634 drive->waiting_for_dma++;
635 if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
636 printk(KERN_WARNING "%s: timeout waiting for ddma to \
637 complete\n", drive->name);
638 return 1;
640 udelay(10);
641 return 0;
644 static int auide_dma_host_on(ide_drive_t *drive)
646 // printk("%s\n", __FUNCTION__);
647 return 0;
650 static int auide_dma_on(ide_drive_t *drive)
652 // printk("%s\n", __FUNCTION__);
653 drive->using_dma = 1;
654 return auide_dma_host_on(drive);
658 static int auide_dma_host_off(ide_drive_t *drive)
660 // printk("%s\n", __FUNCTION__);
661 return 0;
664 static int auide_dma_off_quietly(ide_drive_t *drive)
666 // printk("%s\n", __FUNCTION__);
667 drive->using_dma = 0;
668 return auide_dma_host_off(drive);
671 static int auide_dma_lostirq(ide_drive_t *drive)
673 // printk("%s\n", __FUNCTION__);
675 printk(KERN_ERR "%s: IRQ lost\n", drive->name);
676 return 0;
679 static void auide_ddma_tx_callback(int irq, void *param, struct pt_regs *regs)
681 // printk("%s\n", __FUNCTION__);
683 _auide_hwif *ahwif = (_auide_hwif*)param;
684 ahwif->drive->waiting_for_dma = 0;
685 return;
688 static void auide_ddma_rx_callback(int irq, void *param, struct pt_regs *regs)
690 // printk("%s\n", __FUNCTION__);
692 _auide_hwif *ahwif = (_auide_hwif*)param;
693 ahwif->drive->waiting_for_dma = 0;
694 return;
697 static int auide_dma_timeout(ide_drive_t *drive)
699 // printk("%s\n", __FUNCTION__);
701 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
703 if (HWIF(drive)->ide_dma_test_irq(drive))
704 return 0;
706 return HWIF(drive)->ide_dma_end(drive);
708 #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
711 static int auide_ddma_init( _auide_hwif *auide )
713 // printk("%s\n", __FUNCTION__);
715 dbdev_tab_t source_dev_tab;
716 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
717 dbdev_tab_t target_dev_tab;
718 ide_hwif_t *hwif = auide->hwif;
719 char warning_output [2][80];
720 int i;
721 #endif
723 /* Add our custom device to DDMA device table */
724 /* Create our new device entries in the table */
725 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
726 source_dev_tab.dev_id = AU1XXX_ATA_DDMA_REQ;
728 if( auide->white_list || auide->black_list ){
729 source_dev_tab.dev_tsize = 8;
730 source_dev_tab.dev_devwidth = 32;
731 source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
732 source_dev_tab.dev_intlevel = 0;
733 source_dev_tab.dev_intpolarity = 0;
735 /* init device table for target - static bus controller - */
736 target_dev_tab.dev_id = DSCR_CMD0_ALWAYS;
737 target_dev_tab.dev_tsize = 8;
738 target_dev_tab.dev_devwidth = 32;
739 target_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
740 target_dev_tab.dev_intlevel = 0;
741 target_dev_tab.dev_intpolarity = 0;
742 target_dev_tab.dev_flags = DEV_FLAGS_ANYUSE;
744 else{
745 source_dev_tab.dev_tsize = 1;
746 source_dev_tab.dev_devwidth = 16;
747 source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
748 source_dev_tab.dev_intlevel = 0;
749 source_dev_tab.dev_intpolarity = 0;
751 /* init device table for target - static bus controller - */
752 target_dev_tab.dev_id = DSCR_CMD0_ALWAYS;
753 target_dev_tab.dev_tsize = 1;
754 target_dev_tab.dev_devwidth = 16;
755 target_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
756 target_dev_tab.dev_intlevel = 0;
757 target_dev_tab.dev_intpolarity = 0;
758 target_dev_tab.dev_flags = DEV_FLAGS_ANYUSE;
760 sprintf(&warning_output[0][0],
761 "%s is not on ide driver white list.",
762 auide_hwif.drive->id->model);
763 for ( i=strlen(&warning_output[0][0]) ; i<76; i++ ){
764 sprintf(&warning_output[0][i]," ");
767 sprintf(&warning_output[1][0],
768 "To add %s please read 'Documentation/mips/AU1xxx_IDE.README'.",
769 auide_hwif.drive->id->model);
770 for ( i=strlen(&warning_output[1][0]) ; i<76; i++ ){
771 sprintf(&warning_output[1][i]," ");
774 printk("\n****************************************");
775 printk("****************************************\n");
776 printk("* %s *\n",&warning_output[0][0]);
777 printk("* Switch to safe MWDMA Mode! ");
778 printk(" *\n");
779 printk("* %s *\n",&warning_output[1][0]);
780 printk("****************************************");
781 printk("****************************************\n\n");
783 #else
784 source_dev_tab.dev_id = DSCR_CMD0_ALWAYS;
785 source_dev_tab.dev_tsize = 8;
786 source_dev_tab.dev_devwidth = 32;
787 source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
788 source_dev_tab.dev_intlevel = 0;
789 source_dev_tab.dev_intpolarity = 0;
790 #endif
792 #if CONFIG_BLK_DEV_IDE_AU1XXX_BURSTABLE_ON
793 /* set flags for tx channel */
794 source_dev_tab.dev_flags = DEV_FLAGS_OUT
795 | DEV_FLAGS_SYNC
796 | DEV_FLAGS_BURSTABLE;
797 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
798 /* set flags for rx channel */
799 source_dev_tab.dev_flags = DEV_FLAGS_IN
800 | DEV_FLAGS_SYNC
801 | DEV_FLAGS_BURSTABLE;
802 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
803 #else
804 /* set flags for tx channel */
805 source_dev_tab.dev_flags = DEV_FLAGS_OUT | DEV_FLAGS_SYNC;
806 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
807 /* set flags for rx channel */
808 source_dev_tab.dev_flags = DEV_FLAGS_IN | DEV_FLAGS_SYNC;
809 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
810 #endif
812 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
814 auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
816 /* Get a channel for TX */
817 auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
818 auide->tx_dev_id,
819 auide_ddma_tx_callback,
820 (void*)auide);
821 /* Get a channel for RX */
822 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
823 auide->target_dev_id,
824 auide_ddma_rx_callback,
825 (void*)auide);
826 #else /* CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA */
828 * Note: if call back is not enabled, update ctp->cur_ptr manually
830 auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
831 auide->tx_dev_id,
832 NULL,
833 (void*)auide);
834 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
835 DSCR_CMD0_ALWAYS,
836 NULL,
837 (void*)auide);
838 #endif
839 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
840 NUM_DESCRIPTORS);
841 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
842 NUM_DESCRIPTORS);
844 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
845 hwif->dmatable_cpu = dma_alloc_coherent(auide->dev,
846 PRD_ENTRIES * PRD_BYTES, /* 1 Page */
847 &hwif->dmatable_dma, GFP_KERNEL);
849 auide->sg_table = kmalloc(sizeof(struct scatterlist) * PRD_ENTRIES,
850 GFP_KERNEL|GFP_DMA);
851 if (auide->sg_table == NULL) {
852 return -ENOMEM;
854 #endif
855 au1xxx_dbdma_start( auide->tx_chan );
856 au1xxx_dbdma_start( auide->rx_chan );
857 return 0;
860 static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
862 int i;
863 #define ide_ioreg_t unsigned long
864 ide_ioreg_t *ata_regs = hw->io_ports;
866 /* fixme */
867 for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
868 *ata_regs++ = (ide_ioreg_t) ahwif->regbase
869 + (ide_ioreg_t)(i << AU1XXX_ATA_REG_OFFSET);
872 /* set the Alternative Status register */
873 *ata_regs = (ide_ioreg_t) ahwif->regbase
874 + (ide_ioreg_t)(14 << AU1XXX_ATA_REG_OFFSET);
877 static int au_ide_probe(struct device *dev)
879 struct platform_device *pdev = to_platform_device(dev);
880 _auide_hwif *ahwif = &auide_hwif;
881 ide_hwif_t *hwif;
882 struct resource *res;
883 int ret = 0;
885 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
886 char *mode = "MWDMA2";
887 #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
888 char *mode = "PIO+DDMA(offload)";
889 #endif
891 memset(&auide_hwif, 0, sizeof(_auide_hwif));
892 auide_hwif.dev = 0;
894 ahwif->dev = dev;
895 ahwif->irq = platform_get_irq(pdev, 0);
897 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
899 if (res == NULL) {
900 pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
901 ret = -ENODEV;
902 goto out;
905 if (!request_mem_region (res->start, res->end-res->start, pdev->name)) {
906 pr_debug("%s: request_mem_region failed\n", DRV_NAME);
907 ret = -EBUSY;
908 goto out;
911 ahwif->regbase = (u32)ioremap(res->start, res->end-res->start);
912 if (ahwif->regbase == 0) {
913 ret = -ENOMEM;
914 goto out;
917 hwif = &ide_hwifs[pdev->id];
918 hw_regs_t *hw = &hwif->hw;
919 hwif->irq = hw->irq = ahwif->irq;
920 hwif->chipset = ide_au1xxx;
922 auide_setup_ports(hw, ahwif);
923 memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
925 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ
926 hwif->rqsize = CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ;
927 hwif->rqsize = ((hwif->rqsize > AU1XXX_ATA_RQSIZE)
928 || (hwif->rqsize < 32)) ? AU1XXX_ATA_RQSIZE : hwif->rqsize;
929 #else /* if kernel config is not set */
930 hwif->rqsize = AU1XXX_ATA_RQSIZE;
931 #endif
933 hwif->ultra_mask = 0x0; /* Disable Ultra DMA */
934 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
935 hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */
936 hwif->swdma_mask = 0x07;
937 #else
938 hwif->mwdma_mask = 0x0;
939 hwif->swdma_mask = 0x0;
940 #endif
941 //hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET];
942 hwif->noprobe = 0;
943 hwif->drives[0].unmask = 1;
944 hwif->drives[1].unmask = 1;
946 /* hold should be on in all cases */
947 hwif->hold = 1;
948 hwif->mmio = 2;
950 /* set up local I/O function entry points */
951 hwif->INB = auide_inb;
952 hwif->INW = auide_inw;
953 hwif->INL = auide_inl;
954 hwif->INSW = auide_insw;
955 hwif->INSL = auide_insl;
956 hwif->OUTB = auide_outb;
957 hwif->OUTBSYNC = auide_outbsync;
958 hwif->OUTW = auide_outw;
959 hwif->OUTL = auide_outl;
960 hwif->OUTSW = auide_outsw;
961 hwif->OUTSL = auide_outsl;
963 hwif->tuneproc = &auide_tune_drive;
964 hwif->speedproc = &auide_tune_chipset;
966 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
967 hwif->ide_dma_off_quietly = &auide_dma_off_quietly;
968 hwif->ide_dma_timeout = &auide_dma_timeout;
970 hwif->ide_dma_check = &auide_dma_check;
971 hwif->dma_exec_cmd = &auide_dma_exec_cmd;
972 hwif->dma_start = &auide_dma_start;
973 hwif->ide_dma_end = &auide_dma_end;
974 hwif->dma_setup = &auide_dma_setup;
975 hwif->ide_dma_test_irq = &auide_dma_test_irq;
976 hwif->ide_dma_host_off = &auide_dma_host_off;
977 hwif->ide_dma_host_on = &auide_dma_host_on;
978 hwif->ide_dma_lostirq = &auide_dma_lostirq;
979 hwif->ide_dma_on = &auide_dma_on;
981 hwif->autodma = 1;
982 hwif->drives[0].autodma = hwif->autodma;
983 hwif->drives[1].autodma = hwif->autodma;
984 hwif->atapi_dma = 1;
985 hwif->drives[0].using_dma = 1;
986 hwif->drives[1].using_dma = 1;
987 #else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
988 hwif->autodma = 0;
989 hwif->channel = 0;
990 hwif->hold = 1;
991 hwif->select_data = 0; /* no chipset-specific code */
992 hwif->config_data = 0; /* no chipset-specific code */
994 hwif->drives[0].autodma = 0;
995 hwif->drives[0].drive_data = 0; /* no drive data */
996 hwif->drives[0].using_dma = 0;
997 hwif->drives[0].waiting_for_dma = 0;
998 hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */
999 /* secondary hdd not supported */
1000 hwif->drives[1].autodma = 0;
1002 hwif->drives[1].drive_data = 0;
1003 hwif->drives[1].using_dma = 0;
1004 hwif->drives[1].waiting_for_dma = 0;
1005 hwif->drives[1].autotune = 2; /* 1=autotune, 2=noautotune, 0=default */
1006 #endif
1007 hwif->drives[0].io_32bit = 0; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
1008 hwif->drives[1].io_32bit = 0; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
1010 /*Register Driver with PM Framework*/
1011 #ifdef CONFIG_PM
1012 auide_hwif.pm.lock = SPIN_LOCK_UNLOCKED;
1013 auide_hwif.pm.stopped = 0;
1015 auide_hwif.pm.dev = new_au1xxx_power_device( "ide",
1016 &au1200ide_pm_callback,
1017 NULL);
1018 if ( auide_hwif.pm.dev == NULL )
1019 printk(KERN_INFO "Unable to create a power management \
1020 device entry for the au1200-IDE.\n");
1021 else
1022 printk(KERN_INFO "Power management device entry for the \
1023 au1200-IDE loaded.\n");
1024 #endif
1026 auide_hwif.hwif = hwif;
1027 hwif->hwif_data = &auide_hwif;
1029 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
1030 auide_ddma_init(&auide_hwif);
1031 dbdma_init_done = 1;
1032 #endif
1034 probe_hwif_init(hwif);
1035 dev_set_drvdata(dev, hwif);
1037 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
1039 out:
1040 return ret;
1043 static int au_ide_remove(struct device *dev)
1045 struct platform_device *pdev = to_platform_device(dev);
1046 struct resource *res;
1047 ide_hwif_t *hwif = dev_get_drvdata(dev);
1048 _auide_hwif *ahwif = &auide_hwif;
1050 ide_unregister(hwif - ide_hwifs);
1052 iounmap((void *)ahwif->regbase);
1054 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1055 release_mem_region(res->start, res->end - res->start);
1057 return 0;
1060 static struct device_driver au1200_ide_driver = {
1061 .name = "au1200-ide",
1062 .bus = &platform_bus_type,
1063 .probe = au_ide_probe,
1064 .remove = au_ide_remove,
1067 static int __init au_ide_init(void)
1069 return driver_register(&au1200_ide_driver);
1072 static void __init au_ide_exit(void)
1074 driver_unregister(&au1200_ide_driver);
1077 #ifdef CONFIG_PM
1078 int au1200ide_pm_callback( au1xxx_power_dev_t *dev,\
1079 au1xxx_request_t request, void *data) {
1081 unsigned int d, err = 0;
1082 unsigned long flags;
1084 spin_lock_irqsave(auide_hwif.pm.lock, flags);
1086 switch (request){
1087 case AU1XXX_PM_SLEEP:
1088 err = au1xxxide_pm_sleep(dev);
1089 break;
1090 case AU1XXX_PM_WAKEUP:
1091 d = *((unsigned int*)data);
1092 if ( d > 0 && d <= 99) {
1093 err = au1xxxide_pm_standby(dev);
1095 else {
1096 err = au1xxxide_pm_resume(dev);
1098 break;
1099 case AU1XXX_PM_GETSTATUS:
1100 err = au1xxxide_pm_getstatus(dev);
1101 break;
1102 case AU1XXX_PM_ACCESS:
1103 err = au1xxxide_pm_access(dev);
1104 break;
1105 case AU1XXX_PM_IDLE:
1106 err = au1xxxide_pm_idle(dev);
1107 break;
1108 case AU1XXX_PM_CLEANUP:
1109 err = au1xxxide_pm_cleanup(dev);
1110 break;
1111 default:
1112 err = -1;
1113 break;
1116 spin_unlock_irqrestore(auide_hwif.pm.lock, flags);
1118 return err;
1121 static int au1xxxide_pm_standby( au1xxx_power_dev_t *dev ) {
1122 return 0;
1125 static int au1xxxide_pm_sleep( au1xxx_power_dev_t *dev ) {
1127 int retval;
1128 ide_hwif_t *hwif = auide_hwif.hwif;
1129 struct request rq;
1130 struct request_pm_state rqpm;
1131 ide_task_t args;
1133 if(auide_hwif.pm.stopped)
1134 return -1;
1137 * wait until hard disc is ready
1139 if ( wait_for_ready(&hwif->drives[0], 35000) ) {
1140 printk("Wait for drive sleep timeout!\n");
1141 retval = -1;
1145 * sequenz to tell the high level ide driver that pm is resuming
1147 memset(&rq, 0, sizeof(rq));
1148 memset(&rqpm, 0, sizeof(rqpm));
1149 memset(&args, 0, sizeof(args));
1150 rq.flags = REQ_PM_SUSPEND;
1151 rq.special = &args;
1152 rq.pm = &rqpm;
1153 rqpm.pm_step = ide_pm_state_start_suspend;
1154 rqpm.pm_state = PMSG_SUSPEND;
1156 retval = ide_do_drive_cmd(&hwif->drives[0], &rq, ide_wait);
1158 if (wait_for_ready (&hwif->drives[0], 35000)) {
1159 printk("Wait for drive sleep timeout!\n");
1160 retval = -1;
1164 * stop dbdma channels
1166 au1xxx_dbdma_reset(auide_hwif.tx_chan);
1167 au1xxx_dbdma_reset(auide_hwif.rx_chan);
1169 auide_hwif.pm.stopped = 1;
1171 return retval;
1174 static int au1xxxide_pm_resume( au1xxx_power_dev_t *dev ) {
1176 int retval;
1177 ide_hwif_t *hwif = auide_hwif.hwif;
1178 struct request rq;
1179 struct request_pm_state rqpm;
1180 ide_task_t args;
1182 if(!auide_hwif.pm.stopped)
1183 return -1;
1186 * start dbdma channels
1188 au1xxx_dbdma_start(auide_hwif.tx_chan);
1189 au1xxx_dbdma_start(auide_hwif.rx_chan);
1192 * wait until hard disc is ready
1194 if (wait_for_ready ( &hwif->drives[0], 35000)) {
1195 printk("Wait for drive wake up timeout!\n");
1196 retval = -1;
1200 * sequenz to tell the high level ide driver that pm is resuming
1202 memset(&rq, 0, sizeof(rq));
1203 memset(&rqpm, 0, sizeof(rqpm));
1204 memset(&args, 0, sizeof(args));
1205 rq.flags = REQ_PM_RESUME;
1206 rq.special = &args;
1207 rq.pm = &rqpm;
1208 rqpm.pm_step = ide_pm_state_start_resume;
1209 rqpm.pm_state = PMSG_ON;
1211 retval = ide_do_drive_cmd(&hwif->drives[0], &rq, ide_head_wait);
1214 * wait for hard disc
1216 if ( wait_for_ready(&hwif->drives[0], 35000) ) {
1217 printk("Wait for drive wake up timeout!\n");
1218 retval = -1;
1221 auide_hwif.pm.stopped = 0;
1223 return retval;
1226 static int au1xxxide_pm_getstatus( au1xxx_power_dev_t *dev ) {
1227 return dev->cur_state;
1230 static int au1xxxide_pm_access( au1xxx_power_dev_t *dev ) {
1231 if (dev->cur_state != AWAKE_STATE)
1232 return 0;
1233 else
1234 return -1;
1237 static int au1xxxide_pm_idle( au1xxx_power_dev_t *dev ) {
1238 return 0;
1241 static int au1xxxide_pm_cleanup( au1xxx_power_dev_t *dev ) {
1242 return 0;
1244 #endif /* CONFIG_PM */
1246 MODULE_LICENSE("GPL");
1247 MODULE_DESCRIPTION("AU1200 IDE driver");
1249 module_init(au_ide_init);
1250 module_exit(au_ide_exit);