2 * linux/drivers/ide/mips/au1xxx-ide.c version 01.30.00 Aug. 02 2005
4 * BRIEF MODULE DESCRIPTION
5 * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
7 * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
9 * This program is free software; you can redistribute it and/or modify it under
10 * the terms of the GNU General Public License as published by the Free Software
11 * Foundation; either version 2 of the License, or (at your option) any later
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
15 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
16 * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23 * POSSIBILITY OF SUCH DAMAGE.
25 * You should have received a copy of the GNU General Public License along with
26 * this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA.
29 * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
30 * Interface and Linux Device Driver" Application Note.
32 #undef REALLY_SLOW_IO /* most systems can safely undef this */
34 #include <linux/config.h> /* for CONFIG_BLK_DEV_IDEPCI */
35 #include <linux/types.h>
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/timer.h>
41 #include <linux/ioport.h>
42 #include <linux/hdreg.h>
43 #include <linux/init.h>
44 #include <linux/ide.h>
45 #include <linux/sysdev.h>
47 #include <linux/dma-mapping.h>
50 #include <asm/mach-au1x00/au1xxx.h>
51 #include <asm/mach-au1x00/au1xxx_dbdma.h>
54 #include <asm/mach-au1x00/au1xxx_pm.h>
57 #include <asm/mach-au1x00/au1xxx_ide.h>
59 #define DRV_NAME "au1200-ide"
60 #define DRV_VERSION "1.0"
61 #define DRV_AUTHOR "AMD PCS / Pete Popov <ppopov@embeddedalley.com>"
62 #define DRV_DESC "Au1200 IDE"
64 static _auide_hwif auide_hwif
;
65 static spinlock_t ide_tune_drive_spin_lock
= SPIN_LOCK_UNLOCKED
;
66 static spinlock_t ide_tune_chipset_spin_lock
= SPIN_LOCK_UNLOCKED
;
67 static int dbdma_init_done
= 0;
72 u8
auide_inb(unsigned long port
)
74 return (au_readb(port
));
77 u16
auide_inw(unsigned long port
)
79 return (au_readw(port
));
82 u32
auide_inl(unsigned long port
)
84 return (au_readl(port
));
87 void auide_insw(unsigned long port
, void *addr
, u32 count
)
89 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
91 _auide_hwif
*ahwif
= &auide_hwif
;
95 if(!put_dest_flags(ahwif
->rx_chan
, (void*)addr
, count
<< 1,
97 printk(KERN_ERR
"%s failed %d\n", __FUNCTION__
, __LINE__
);
100 ctp
= *((chan_tab_t
**)ahwif
->rx_chan
);
102 while (dp
->dscr_cmd0
& DSCR_CMD0_V
)
104 ctp
->cur_ptr
= au1xxx_ddma_get_nextptr_virt(dp
);
108 *(u16
*)addr
= au_readw(port
);
114 void auide_insl(unsigned long port
, void *addr
, u32 count
)
118 *(u32
*)addr
= au_readl(port
);
119 /* NOTE: For IDE interfaces over PCMCIA,
120 * 32-bit access does not work
126 void auide_outb(u8 addr
, unsigned long port
)
128 return (au_writeb(addr
, port
));
131 void auide_outbsync(ide_drive_t
*drive
, u8 addr
, unsigned long port
)
133 return (au_writeb(addr
, port
));
136 void auide_outw(u16 addr
, unsigned long port
)
138 return (au_writew(addr
, port
));
141 void auide_outl(u32 addr
, unsigned long port
)
143 return (au_writel(addr
, port
));
146 void auide_outsw(unsigned long port
, void *addr
, u32 count
)
148 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
149 _auide_hwif
*ahwif
= &auide_hwif
;
151 au1x_ddma_desc_t
*dp
;
153 if(!put_source_flags(ahwif
->tx_chan
, (void*)addr
,
154 count
<< 1, DDMA_FLAGS_NOIE
)) {
155 printk(KERN_ERR
"%s failed %d\n", __FUNCTION__
, __LINE__
);
158 ctp
= *((chan_tab_t
**)ahwif
->tx_chan
);
160 while (dp
->dscr_cmd0
& DSCR_CMD0_V
)
162 ctp
->cur_ptr
= au1xxx_ddma_get_nextptr_virt(dp
);
166 au_writew(*(u16
*)addr
, port
);
172 void auide_outsl(unsigned long port
, void *addr
, u32 count
)
176 au_writel(*(u32
*)addr
, port
);
177 /* NOTE: For IDE interfaces over PCMCIA,
178 * 32-bit access does not work
184 static void auide_tune_drive(ide_drive_t
*drive
, byte pio
)
191 /* get the best pio mode for the drive */
192 pio
= ide_get_best_pio_mode(drive
, pio
, 4, NULL
);
194 printk("%s: setting Au1XXX IDE to PIO mode%d\n",
197 spin_lock_irqsave(&ide_tune_drive_spin_lock
, flags
);
200 mem_stcfg
= au_readl(MEM_STCFG2
);
205 /* set timing parameters for RCS2# */
206 mem_sttime
= SBC_IDE_PIO0_TWCS
208 | SBC_IDE_PIO0_TCSOFF
213 /* set configuration for RCS2# */
214 mem_stcfg
|= TS_MASK
;
215 mem_stcfg
&= ~TCSOE_MASK
;
216 mem_stcfg
&= ~TOECS_MASK
;
217 mem_stcfg
|= SBC_IDE_PIO0_TCSOE
| SBC_IDE_PIO0_TOECS
;
219 au_writel(mem_sttime
,MEM_STTIME2
);
220 au_writel(mem_stcfg
,MEM_STCFG2
);
224 /* set timing parameters for RCS2# */
225 mem_sttime
= SBC_IDE_PIO1_TWCS
227 | SBC_IDE_PIO1_TCSOFF
232 /* set configuration for RCS2# */
233 mem_stcfg
|= TS_MASK
;
234 mem_stcfg
&= ~TCSOE_MASK
;
235 mem_stcfg
&= ~TOECS_MASK
;
236 mem_stcfg
|= SBC_IDE_PIO1_TCSOE
| SBC_IDE_PIO1_TOECS
;
240 /* set timing parameters for RCS2# */
241 mem_sttime
= SBC_IDE_PIO2_TWCS
243 | SBC_IDE_PIO2_TCSOFF
248 /* set configuration for RCS2# */
249 mem_stcfg
&= ~TS_MASK
;
250 mem_stcfg
&= ~TCSOE_MASK
;
251 mem_stcfg
&= ~TOECS_MASK
;
252 mem_stcfg
|= SBC_IDE_PIO2_TCSOE
| SBC_IDE_PIO2_TOECS
;
256 /* set timing parameters for RCS2# */
257 mem_sttime
= SBC_IDE_PIO3_TWCS
259 | SBC_IDE_PIO3_TCSOFF
264 /* set configuration for RCS2# */
265 mem_stcfg
|= TS_MASK
;
266 mem_stcfg
&= ~TS_MASK
;
267 mem_stcfg
&= ~TCSOE_MASK
;
268 mem_stcfg
&= ~TOECS_MASK
;
269 mem_stcfg
|= SBC_IDE_PIO3_TCSOE
| SBC_IDE_PIO3_TOECS
;
274 /* set timing parameters for RCS2# */
275 mem_sttime
= SBC_IDE_PIO4_TWCS
277 | SBC_IDE_PIO4_TCSOFF
282 /* set configuration for RCS2# */
283 mem_stcfg
&= ~TS_MASK
;
284 mem_stcfg
&= ~TCSOE_MASK
;
285 mem_stcfg
&= ~TOECS_MASK
;
286 mem_stcfg
|= SBC_IDE_PIO4_TCSOE
| SBC_IDE_PIO4_TOECS
;
290 au_writel(mem_sttime
,MEM_STTIME2
);
291 au_writel(mem_stcfg
,MEM_STCFG2
);
293 spin_unlock_irqrestore(&ide_tune_drive_spin_lock
, flags
);
295 speed
= pio
+ XFER_PIO_0
;
296 ide_config_drive_speed(drive
, speed
);
299 static int auide_tune_chipset (ide_drive_t
*drive
, u8 speed
)
305 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
306 struct hd_driveid
*id
= drive
->id
;
309 * Now see what the current drive is capable of,
310 * selecting UDMA only if the mate said it was ok.
312 if (id
&& (id
->capability
& 1) && drive
->autodma
&&
313 !__ide_dma_bad_drive(drive
)) {
314 if (!mode
&& (id
->field_valid
& 2) && (id
->dma_mword
& 7)) {
315 if (id
->dma_mword
& 4)
316 mode
= XFER_MW_DMA_2
;
317 else if (id
->dma_mword
& 2)
318 mode
= XFER_MW_DMA_1
;
319 else if (id
->dma_mword
& 1)
320 mode
= XFER_MW_DMA_0
;
325 spin_lock_irqsave(&ide_tune_chipset_spin_lock
, flags
);
328 mem_stcfg
= au_readl(MEM_STCFG2
);
336 auide_tune_drive(drive
, (speed
- XFER_PIO_0
));
338 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
340 /* set timing parameters for RCS2# */
341 mem_sttime
= SBC_IDE_MDMA2_TWCS
343 | SBC_IDE_MDMA2_TCSOFF
348 /* set configuration for RCS2# */
349 mem_stcfg
&= ~TS_MASK
;
350 mem_stcfg
&= ~TCSOE_MASK
;
351 mem_stcfg
&= ~TOECS_MASK
;
352 mem_stcfg
|= SBC_IDE_MDMA2_TCSOE
| SBC_IDE_MDMA2_TOECS
;
354 mode
= XFER_MW_DMA_2
;
357 /* set timing parameters for RCS2# */
358 mem_sttime
= SBC_IDE_MDMA1_TWCS
360 | SBC_IDE_MDMA1_TCSOFF
365 /* set configuration for RCS2# */
366 mem_stcfg
&= ~TS_MASK
;
367 mem_stcfg
&= ~TCSOE_MASK
;
368 mem_stcfg
&= ~TOECS_MASK
;
369 mem_stcfg
|= SBC_IDE_MDMA1_TCSOE
| SBC_IDE_MDMA1_TOECS
;
371 mode
= XFER_MW_DMA_1
;
374 /* set timing parameters for RCS2# */
375 mem_sttime
= SBC_IDE_MDMA0_TWCS
377 | SBC_IDE_MDMA0_TCSOFF
382 /* set configuration for RCS2# */
383 mem_stcfg
|= TS_MASK
;
384 mem_stcfg
&= ~TCSOE_MASK
;
385 mem_stcfg
&= ~TOECS_MASK
;
386 mem_stcfg
|= SBC_IDE_MDMA0_TCSOE
| SBC_IDE_MDMA0_TOECS
;
388 mode
= XFER_MW_DMA_0
;
396 * Tell the drive to switch to the new mode; abort on failure.
398 if (!mode
|| ide_config_drive_speed(drive
, mode
))
400 return 1; /* failure */
404 au_writel(mem_sttime
,MEM_STTIME2
);
405 au_writel(mem_stcfg
,MEM_STCFG2
);
407 spin_unlock_irqrestore(&ide_tune_chipset_spin_lock
, flags
);
413 * Multi-Word DMA + DbDMA functions
415 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
417 static int in_drive_list(struct hd_driveid
*id
,
418 const struct drive_list_entry
*drive_table
)
420 for ( ; drive_table
->id_model
; drive_table
++){
421 if ((!strcmp(drive_table
->id_model
, id
->model
)) &&
422 ((strstr(drive_table
->id_firmware
, id
->fw_rev
)) ||
423 (!strcmp(drive_table
->id_firmware
, "ALL")))
430 static int auide_build_sglist(ide_drive_t
*drive
, struct request
*rq
)
432 ide_hwif_t
*hwif
= drive
->hwif
;
433 _auide_hwif
*ahwif
= (_auide_hwif
*)hwif
->hwif_data
;
434 struct scatterlist
*sg
= hwif
->sg_table
;
436 ide_map_sg(drive
, rq
);
438 if (rq_data_dir(rq
) == READ
)
439 hwif
->sg_dma_direction
= DMA_FROM_DEVICE
;
441 hwif
->sg_dma_direction
= DMA_TO_DEVICE
;
443 return dma_map_sg(ahwif
->dev
, sg
, hwif
->sg_nents
,
444 hwif
->sg_dma_direction
);
447 static int auide_build_dmatable(ide_drive_t
*drive
)
449 int i
, iswrite
, count
= 0;
450 ide_hwif_t
*hwif
= HWIF(drive
);
452 struct request
*rq
= HWGROUP(drive
)->rq
;
454 _auide_hwif
*ahwif
= (_auide_hwif
*)hwif
->hwif_data
;
455 struct scatterlist
*sg
;
457 iswrite
= (rq_data_dir(rq
) == WRITE
);
458 /* Save for interrupt context */
459 ahwif
->drive
= drive
;
462 hwif
->sg_nents
= i
= auide_build_sglist(drive
, rq
);
467 /* fill the descriptors */
469 while (i
&& sg_dma_len(sg
)) {
473 cur_addr
= sg_dma_address(sg
);
474 cur_len
= sg_dma_len(sg
);
477 u32 flags
= DDMA_FLAGS_NOIE
;
478 unsigned int tc
= (cur_len
< 0xfe00)? cur_len
: 0xfe00;
480 if (++count
>= PRD_ENTRIES
) {
481 printk(KERN_WARNING
"%s: DMA table too small\n",
483 goto use_pio_instead
;
486 /* Lets enable intr for the last descriptor only */
488 flags
= DDMA_FLAGS_IE
;
490 flags
= DDMA_FLAGS_NOIE
;
493 if(!put_source_flags(ahwif
->tx_chan
,
494 (void*)(page_address(sg
->page
)
497 printk(KERN_ERR
"%s failed %d\n",
498 __FUNCTION__
, __LINE__
);
502 if(!put_dest_flags(ahwif
->rx_chan
,
503 (void*)(page_address(sg
->page
)
506 printk(KERN_ERR
"%s failed %d\n",
507 __FUNCTION__
, __LINE__
);
522 dma_unmap_sg(ahwif
->dev
,
525 hwif
->sg_dma_direction
);
527 return 0; /* revert to PIO for this request */
530 static int auide_dma_end(ide_drive_t
*drive
)
532 ide_hwif_t
*hwif
= HWIF(drive
);
533 _auide_hwif
*ahwif
= (_auide_hwif
*)hwif
->hwif_data
;
535 if (hwif
->sg_nents
) {
536 dma_unmap_sg(ahwif
->dev
, hwif
->sg_table
, hwif
->sg_nents
,
537 hwif
->sg_dma_direction
);
544 static void auide_dma_start(ide_drive_t
*drive
)
546 // printk("%s\n", __FUNCTION__);
549 ide_startstop_t
auide_dma_intr(ide_drive_t
*drive
)
551 //printk("%s\n", __FUNCTION__);
553 u8 stat
= 0, dma_stat
= 0;
555 dma_stat
= HWIF(drive
)->ide_dma_end(drive
);
556 stat
= HWIF(drive
)->INB(IDE_STATUS_REG
); /* get drive status */
557 if (OK_STAT(stat
,DRIVE_READY
,drive
->bad_wstat
|DRQ_STAT
)) {
559 struct request
*rq
= HWGROUP(drive
)->rq
;
561 ide_end_request(drive
, 1, rq
->nr_sectors
);
564 printk(KERN_ERR
"%s: dma_intr: bad DMA status (dma_stat=%x)\n",
565 drive
->name
, dma_stat
);
567 return ide_error(drive
, "dma_intr", stat
);
570 static void auide_dma_exec_cmd(ide_drive_t
*drive
, u8 command
)
572 //printk("%s\n", __FUNCTION__);
574 /* issue cmd to drive */
575 ide_execute_command(drive
, command
, &auide_dma_intr
,
579 static int auide_dma_setup(ide_drive_t
*drive
)
581 // printk("%s\n", __FUNCTION__);
583 if (drive
->media
!= ide_disk
)
586 if (!auide_build_dmatable(drive
))
587 /* try PIO instead of DMA */
590 drive
->waiting_for_dma
= 1;
595 static int auide_dma_check(ide_drive_t
*drive
)
597 // printk("%s\n", __FUNCTION__);
599 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
600 if( !dbdma_init_done
){
601 auide_hwif
.white_list
= in_drive_list(drive
->id
,
603 auide_hwif
.black_list
= in_drive_list(drive
->id
,
605 auide_hwif
.drive
= drive
;
606 auide_ddma_init(&auide_hwif
);
611 /* Is the drive in our DMA black list? */
612 if ( auide_hwif
.black_list
) {
613 drive
->using_dma
= 0;
614 printk("%s found in dma_blacklist[]! Disabling DMA.\n",
618 drive
->using_dma
= 1;
620 return HWIF(drive
)->ide_dma_host_on(drive
);
623 static int auide_dma_test_irq(ide_drive_t
*drive
)
625 // printk("%s\n", __FUNCTION__);
627 if (!drive
->waiting_for_dma
)
628 printk(KERN_WARNING
"%s: ide_dma_test_irq \
629 called while not waiting\n", drive
->name
);
631 /* If dbdma didn't execute the STOP command yet, the
632 * active bit is still set
634 drive
->waiting_for_dma
++;
635 if (drive
->waiting_for_dma
>= DMA_WAIT_TIMEOUT
) {
636 printk(KERN_WARNING
"%s: timeout waiting for ddma to \
637 complete\n", drive
->name
);
644 static int auide_dma_host_on(ide_drive_t
*drive
)
646 // printk("%s\n", __FUNCTION__);
650 static int auide_dma_on(ide_drive_t
*drive
)
652 // printk("%s\n", __FUNCTION__);
653 drive
->using_dma
= 1;
654 return auide_dma_host_on(drive
);
658 static int auide_dma_host_off(ide_drive_t
*drive
)
660 // printk("%s\n", __FUNCTION__);
664 static int auide_dma_off_quietly(ide_drive_t
*drive
)
666 // printk("%s\n", __FUNCTION__);
667 drive
->using_dma
= 0;
668 return auide_dma_host_off(drive
);
671 static int auide_dma_lostirq(ide_drive_t
*drive
)
673 // printk("%s\n", __FUNCTION__);
675 printk(KERN_ERR
"%s: IRQ lost\n", drive
->name
);
679 static void auide_ddma_tx_callback(int irq
, void *param
, struct pt_regs
*regs
)
681 // printk("%s\n", __FUNCTION__);
683 _auide_hwif
*ahwif
= (_auide_hwif
*)param
;
684 ahwif
->drive
->waiting_for_dma
= 0;
688 static void auide_ddma_rx_callback(int irq
, void *param
, struct pt_regs
*regs
)
690 // printk("%s\n", __FUNCTION__);
692 _auide_hwif
*ahwif
= (_auide_hwif
*)param
;
693 ahwif
->drive
->waiting_for_dma
= 0;
697 static int auide_dma_timeout(ide_drive_t
*drive
)
699 // printk("%s\n", __FUNCTION__);
701 printk(KERN_ERR
"%s: DMA timeout occurred: ", drive
->name
);
703 if (HWIF(drive
)->ide_dma_test_irq(drive
))
706 return HWIF(drive
)->ide_dma_end(drive
);
708 #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
711 static int auide_ddma_init( _auide_hwif
*auide
)
713 // printk("%s\n", __FUNCTION__);
715 dbdev_tab_t source_dev_tab
;
716 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
717 dbdev_tab_t target_dev_tab
;
718 ide_hwif_t
*hwif
= auide
->hwif
;
719 char warning_output
[2][80];
723 /* Add our custom device to DDMA device table */
724 /* Create our new device entries in the table */
725 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
726 source_dev_tab
.dev_id
= AU1XXX_ATA_DDMA_REQ
;
728 if( auide
->white_list
|| auide
->black_list
){
729 source_dev_tab
.dev_tsize
= 8;
730 source_dev_tab
.dev_devwidth
= 32;
731 source_dev_tab
.dev_physaddr
= (u32
)AU1XXX_ATA_PHYS_ADDR
;
732 source_dev_tab
.dev_intlevel
= 0;
733 source_dev_tab
.dev_intpolarity
= 0;
735 /* init device table for target - static bus controller - */
736 target_dev_tab
.dev_id
= DSCR_CMD0_ALWAYS
;
737 target_dev_tab
.dev_tsize
= 8;
738 target_dev_tab
.dev_devwidth
= 32;
739 target_dev_tab
.dev_physaddr
= (u32
)AU1XXX_ATA_PHYS_ADDR
;
740 target_dev_tab
.dev_intlevel
= 0;
741 target_dev_tab
.dev_intpolarity
= 0;
742 target_dev_tab
.dev_flags
= DEV_FLAGS_ANYUSE
;
745 source_dev_tab
.dev_tsize
= 1;
746 source_dev_tab
.dev_devwidth
= 16;
747 source_dev_tab
.dev_physaddr
= (u32
)AU1XXX_ATA_PHYS_ADDR
;
748 source_dev_tab
.dev_intlevel
= 0;
749 source_dev_tab
.dev_intpolarity
= 0;
751 /* init device table for target - static bus controller - */
752 target_dev_tab
.dev_id
= DSCR_CMD0_ALWAYS
;
753 target_dev_tab
.dev_tsize
= 1;
754 target_dev_tab
.dev_devwidth
= 16;
755 target_dev_tab
.dev_physaddr
= (u32
)AU1XXX_ATA_PHYS_ADDR
;
756 target_dev_tab
.dev_intlevel
= 0;
757 target_dev_tab
.dev_intpolarity
= 0;
758 target_dev_tab
.dev_flags
= DEV_FLAGS_ANYUSE
;
760 sprintf(&warning_output
[0][0],
761 "%s is not on ide driver white list.",
762 auide_hwif
.drive
->id
->model
);
763 for ( i
=strlen(&warning_output
[0][0]) ; i
<76; i
++ ){
764 sprintf(&warning_output
[0][i
]," ");
767 sprintf(&warning_output
[1][0],
768 "To add %s please read 'Documentation/mips/AU1xxx_IDE.README'.",
769 auide_hwif
.drive
->id
->model
);
770 for ( i
=strlen(&warning_output
[1][0]) ; i
<76; i
++ ){
771 sprintf(&warning_output
[1][i
]," ");
774 printk("\n****************************************");
775 printk("****************************************\n");
776 printk("* %s *\n",&warning_output
[0][0]);
777 printk("* Switch to safe MWDMA Mode! ");
779 printk("* %s *\n",&warning_output
[1][0]);
780 printk("****************************************");
781 printk("****************************************\n\n");
784 source_dev_tab
.dev_id
= DSCR_CMD0_ALWAYS
;
785 source_dev_tab
.dev_tsize
= 8;
786 source_dev_tab
.dev_devwidth
= 32;
787 source_dev_tab
.dev_physaddr
= (u32
)AU1XXX_ATA_PHYS_ADDR
;
788 source_dev_tab
.dev_intlevel
= 0;
789 source_dev_tab
.dev_intpolarity
= 0;
792 #if CONFIG_BLK_DEV_IDE_AU1XXX_BURSTABLE_ON
793 /* set flags for tx channel */
794 source_dev_tab
.dev_flags
= DEV_FLAGS_OUT
796 | DEV_FLAGS_BURSTABLE
;
797 auide
->tx_dev_id
= au1xxx_ddma_add_device( &source_dev_tab
);
798 /* set flags for rx channel */
799 source_dev_tab
.dev_flags
= DEV_FLAGS_IN
801 | DEV_FLAGS_BURSTABLE
;
802 auide
->rx_dev_id
= au1xxx_ddma_add_device( &source_dev_tab
);
804 /* set flags for tx channel */
805 source_dev_tab
.dev_flags
= DEV_FLAGS_OUT
| DEV_FLAGS_SYNC
;
806 auide
->tx_dev_id
= au1xxx_ddma_add_device( &source_dev_tab
);
807 /* set flags for rx channel */
808 source_dev_tab
.dev_flags
= DEV_FLAGS_IN
| DEV_FLAGS_SYNC
;
809 auide
->rx_dev_id
= au1xxx_ddma_add_device( &source_dev_tab
);
812 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
814 auide
->target_dev_id
= au1xxx_ddma_add_device(&target_dev_tab
);
816 /* Get a channel for TX */
817 auide
->tx_chan
= au1xxx_dbdma_chan_alloc(auide
->target_dev_id
,
819 auide_ddma_tx_callback
,
821 /* Get a channel for RX */
822 auide
->rx_chan
= au1xxx_dbdma_chan_alloc(auide
->rx_dev_id
,
823 auide
->target_dev_id
,
824 auide_ddma_rx_callback
,
826 #else /* CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA */
828 * Note: if call back is not enabled, update ctp->cur_ptr manually
830 auide
->tx_chan
= au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS
,
834 auide
->rx_chan
= au1xxx_dbdma_chan_alloc(auide
->rx_dev_id
,
839 auide
->tx_desc_head
= (void*)au1xxx_dbdma_ring_alloc(auide
->tx_chan
,
841 auide
->rx_desc_head
= (void*)au1xxx_dbdma_ring_alloc(auide
->rx_chan
,
844 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
845 hwif
->dmatable_cpu
= dma_alloc_coherent(auide
->dev
,
846 PRD_ENTRIES
* PRD_BYTES
, /* 1 Page */
847 &hwif
->dmatable_dma
, GFP_KERNEL
);
849 auide
->sg_table
= kmalloc(sizeof(struct scatterlist
) * PRD_ENTRIES
,
851 if (auide
->sg_table
== NULL
) {
855 au1xxx_dbdma_start( auide
->tx_chan
);
856 au1xxx_dbdma_start( auide
->rx_chan
);
860 static void auide_setup_ports(hw_regs_t
*hw
, _auide_hwif
*ahwif
)
863 #define ide_ioreg_t unsigned long
864 ide_ioreg_t
*ata_regs
= hw
->io_ports
;
867 for (i
= 0; i
< IDE_CONTROL_OFFSET
; i
++) {
868 *ata_regs
++ = (ide_ioreg_t
) ahwif
->regbase
869 + (ide_ioreg_t
)(i
<< AU1XXX_ATA_REG_OFFSET
);
872 /* set the Alternative Status register */
873 *ata_regs
= (ide_ioreg_t
) ahwif
->regbase
874 + (ide_ioreg_t
)(14 << AU1XXX_ATA_REG_OFFSET
);
877 static int au_ide_probe(struct device
*dev
)
879 struct platform_device
*pdev
= to_platform_device(dev
);
880 _auide_hwif
*ahwif
= &auide_hwif
;
882 struct resource
*res
;
885 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
886 char *mode
= "MWDMA2";
887 #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
888 char *mode
= "PIO+DDMA(offload)";
891 memset(&auide_hwif
, 0, sizeof(_auide_hwif
));
895 ahwif
->irq
= platform_get_irq(pdev
, 0);
897 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
900 pr_debug("%s %d: no base address\n", DRV_NAME
, pdev
->id
);
905 if (!request_mem_region (res
->start
, res
->end
-res
->start
, pdev
->name
)) {
906 pr_debug("%s: request_mem_region failed\n", DRV_NAME
);
911 ahwif
->regbase
= (u32
)ioremap(res
->start
, res
->end
-res
->start
);
912 if (ahwif
->regbase
== 0) {
917 hwif
= &ide_hwifs
[pdev
->id
];
918 hw_regs_t
*hw
= &hwif
->hw
;
919 hwif
->irq
= hw
->irq
= ahwif
->irq
;
920 hwif
->chipset
= ide_au1xxx
;
922 auide_setup_ports(hw
, ahwif
);
923 memcpy(hwif
->io_ports
, hw
->io_ports
, sizeof(hwif
->io_ports
));
925 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ
926 hwif
->rqsize
= CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ
;
927 hwif
->rqsize
= ((hwif
->rqsize
> AU1XXX_ATA_RQSIZE
)
928 || (hwif
->rqsize
< 32)) ? AU1XXX_ATA_RQSIZE
: hwif
->rqsize
;
929 #else /* if kernel config is not set */
930 hwif
->rqsize
= AU1XXX_ATA_RQSIZE
;
933 hwif
->ultra_mask
= 0x0; /* Disable Ultra DMA */
934 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
935 hwif
->mwdma_mask
= 0x07; /* Multimode-2 DMA */
936 hwif
->swdma_mask
= 0x07;
938 hwif
->mwdma_mask
= 0x0;
939 hwif
->swdma_mask
= 0x0;
941 //hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET];
943 hwif
->drives
[0].unmask
= 1;
944 hwif
->drives
[1].unmask
= 1;
946 /* hold should be on in all cases */
950 /* set up local I/O function entry points */
951 hwif
->INB
= auide_inb
;
952 hwif
->INW
= auide_inw
;
953 hwif
->INL
= auide_inl
;
954 hwif
->INSW
= auide_insw
;
955 hwif
->INSL
= auide_insl
;
956 hwif
->OUTB
= auide_outb
;
957 hwif
->OUTBSYNC
= auide_outbsync
;
958 hwif
->OUTW
= auide_outw
;
959 hwif
->OUTL
= auide_outl
;
960 hwif
->OUTSW
= auide_outsw
;
961 hwif
->OUTSL
= auide_outsl
;
963 hwif
->tuneproc
= &auide_tune_drive
;
964 hwif
->speedproc
= &auide_tune_chipset
;
966 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
967 hwif
->ide_dma_off_quietly
= &auide_dma_off_quietly
;
968 hwif
->ide_dma_timeout
= &auide_dma_timeout
;
970 hwif
->ide_dma_check
= &auide_dma_check
;
971 hwif
->dma_exec_cmd
= &auide_dma_exec_cmd
;
972 hwif
->dma_start
= &auide_dma_start
;
973 hwif
->ide_dma_end
= &auide_dma_end
;
974 hwif
->dma_setup
= &auide_dma_setup
;
975 hwif
->ide_dma_test_irq
= &auide_dma_test_irq
;
976 hwif
->ide_dma_host_off
= &auide_dma_host_off
;
977 hwif
->ide_dma_host_on
= &auide_dma_host_on
;
978 hwif
->ide_dma_lostirq
= &auide_dma_lostirq
;
979 hwif
->ide_dma_on
= &auide_dma_on
;
982 hwif
->drives
[0].autodma
= hwif
->autodma
;
983 hwif
->drives
[1].autodma
= hwif
->autodma
;
985 hwif
->drives
[0].using_dma
= 1;
986 hwif
->drives
[1].using_dma
= 1;
987 #else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
991 hwif
->select_data
= 0; /* no chipset-specific code */
992 hwif
->config_data
= 0; /* no chipset-specific code */
994 hwif
->drives
[0].autodma
= 0;
995 hwif
->drives
[0].drive_data
= 0; /* no drive data */
996 hwif
->drives
[0].using_dma
= 0;
997 hwif
->drives
[0].waiting_for_dma
= 0;
998 hwif
->drives
[0].autotune
= 1; /* 1=autotune, 2=noautotune, 0=default */
999 /* secondary hdd not supported */
1000 hwif
->drives
[1].autodma
= 0;
1002 hwif
->drives
[1].drive_data
= 0;
1003 hwif
->drives
[1].using_dma
= 0;
1004 hwif
->drives
[1].waiting_for_dma
= 0;
1005 hwif
->drives
[1].autotune
= 2; /* 1=autotune, 2=noautotune, 0=default */
1007 hwif
->drives
[0].io_32bit
= 0; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
1008 hwif
->drives
[1].io_32bit
= 0; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
1010 /*Register Driver with PM Framework*/
1012 auide_hwif
.pm
.lock
= SPIN_LOCK_UNLOCKED
;
1013 auide_hwif
.pm
.stopped
= 0;
1015 auide_hwif
.pm
.dev
= new_au1xxx_power_device( "ide",
1016 &au1200ide_pm_callback
,
1018 if ( auide_hwif
.pm
.dev
== NULL
)
1019 printk(KERN_INFO
"Unable to create a power management \
1020 device entry for the au1200-IDE.\n");
1022 printk(KERN_INFO
"Power management device entry for the \
1023 au1200-IDE loaded.\n");
1026 auide_hwif
.hwif
= hwif
;
1027 hwif
->hwif_data
= &auide_hwif
;
1029 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
1030 auide_ddma_init(&auide_hwif
);
1031 dbdma_init_done
= 1;
1034 probe_hwif_init(hwif
);
1035 dev_set_drvdata(dev
, hwif
);
1037 printk(KERN_INFO
"Au1xxx IDE(builtin) configured for %s\n", mode
);
1043 static int au_ide_remove(struct device
*dev
)
1045 struct platform_device
*pdev
= to_platform_device(dev
);
1046 struct resource
*res
;
1047 ide_hwif_t
*hwif
= dev_get_drvdata(dev
);
1048 _auide_hwif
*ahwif
= &auide_hwif
;
1050 ide_unregister(hwif
- ide_hwifs
);
1052 iounmap((void *)ahwif
->regbase
);
1054 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1055 release_mem_region(res
->start
, res
->end
- res
->start
);
1060 static struct device_driver au1200_ide_driver
= {
1061 .name
= "au1200-ide",
1062 .bus
= &platform_bus_type
,
1063 .probe
= au_ide_probe
,
1064 .remove
= au_ide_remove
,
1067 static int __init
au_ide_init(void)
1069 return driver_register(&au1200_ide_driver
);
1072 static void __init
au_ide_exit(void)
1074 driver_unregister(&au1200_ide_driver
);
1078 int au1200ide_pm_callback( au1xxx_power_dev_t
*dev
,\
1079 au1xxx_request_t request
, void *data
) {
1081 unsigned int d
, err
= 0;
1082 unsigned long flags
;
1084 spin_lock_irqsave(auide_hwif
.pm
.lock
, flags
);
1087 case AU1XXX_PM_SLEEP
:
1088 err
= au1xxxide_pm_sleep(dev
);
1090 case AU1XXX_PM_WAKEUP
:
1091 d
= *((unsigned int*)data
);
1092 if ( d
> 0 && d
<= 99) {
1093 err
= au1xxxide_pm_standby(dev
);
1096 err
= au1xxxide_pm_resume(dev
);
1099 case AU1XXX_PM_GETSTATUS
:
1100 err
= au1xxxide_pm_getstatus(dev
);
1102 case AU1XXX_PM_ACCESS
:
1103 err
= au1xxxide_pm_access(dev
);
1105 case AU1XXX_PM_IDLE
:
1106 err
= au1xxxide_pm_idle(dev
);
1108 case AU1XXX_PM_CLEANUP
:
1109 err
= au1xxxide_pm_cleanup(dev
);
1116 spin_unlock_irqrestore(auide_hwif
.pm
.lock
, flags
);
1121 static int au1xxxide_pm_standby( au1xxx_power_dev_t
*dev
) {
1125 static int au1xxxide_pm_sleep( au1xxx_power_dev_t
*dev
) {
1128 ide_hwif_t
*hwif
= auide_hwif
.hwif
;
1130 struct request_pm_state rqpm
;
1133 if(auide_hwif
.pm
.stopped
)
1137 * wait until hard disc is ready
1139 if ( wait_for_ready(&hwif
->drives
[0], 35000) ) {
1140 printk("Wait for drive sleep timeout!\n");
1145 * sequenz to tell the high level ide driver that pm is resuming
1147 memset(&rq
, 0, sizeof(rq
));
1148 memset(&rqpm
, 0, sizeof(rqpm
));
1149 memset(&args
, 0, sizeof(args
));
1150 rq
.flags
= REQ_PM_SUSPEND
;
1153 rqpm
.pm_step
= ide_pm_state_start_suspend
;
1154 rqpm
.pm_state
= PMSG_SUSPEND
;
1156 retval
= ide_do_drive_cmd(&hwif
->drives
[0], &rq
, ide_wait
);
1158 if (wait_for_ready (&hwif
->drives
[0], 35000)) {
1159 printk("Wait for drive sleep timeout!\n");
1164 * stop dbdma channels
1166 au1xxx_dbdma_reset(auide_hwif
.tx_chan
);
1167 au1xxx_dbdma_reset(auide_hwif
.rx_chan
);
1169 auide_hwif
.pm
.stopped
= 1;
1174 static int au1xxxide_pm_resume( au1xxx_power_dev_t
*dev
) {
1177 ide_hwif_t
*hwif
= auide_hwif
.hwif
;
1179 struct request_pm_state rqpm
;
1182 if(!auide_hwif
.pm
.stopped
)
1186 * start dbdma channels
1188 au1xxx_dbdma_start(auide_hwif
.tx_chan
);
1189 au1xxx_dbdma_start(auide_hwif
.rx_chan
);
1192 * wait until hard disc is ready
1194 if (wait_for_ready ( &hwif
->drives
[0], 35000)) {
1195 printk("Wait for drive wake up timeout!\n");
1200 * sequenz to tell the high level ide driver that pm is resuming
1202 memset(&rq
, 0, sizeof(rq
));
1203 memset(&rqpm
, 0, sizeof(rqpm
));
1204 memset(&args
, 0, sizeof(args
));
1205 rq
.flags
= REQ_PM_RESUME
;
1208 rqpm
.pm_step
= ide_pm_state_start_resume
;
1209 rqpm
.pm_state
= PMSG_ON
;
1211 retval
= ide_do_drive_cmd(&hwif
->drives
[0], &rq
, ide_head_wait
);
1214 * wait for hard disc
1216 if ( wait_for_ready(&hwif
->drives
[0], 35000) ) {
1217 printk("Wait for drive wake up timeout!\n");
1221 auide_hwif
.pm
.stopped
= 0;
1226 static int au1xxxide_pm_getstatus( au1xxx_power_dev_t
*dev
) {
1227 return dev
->cur_state
;
1230 static int au1xxxide_pm_access( au1xxx_power_dev_t
*dev
) {
1231 if (dev
->cur_state
!= AWAKE_STATE
)
1237 static int au1xxxide_pm_idle( au1xxx_power_dev_t
*dev
) {
1241 static int au1xxxide_pm_cleanup( au1xxx_power_dev_t
*dev
) {
1244 #endif /* CONFIG_PM */
1246 MODULE_LICENSE("GPL");
1247 MODULE_DESCRIPTION("AU1200 IDE driver");
1249 module_init(au_ide_init
);
1250 module_exit(au_ide_exit
);