2 * linux/drivers/ide/arm/icside.c
4 * Copyright (c) 1996-2003 Russell King.
7 #include <linux/config.h>
8 #include <linux/string.h>
9 #include <linux/module.h>
10 #include <linux/ioport.h>
11 #include <linux/slab.h>
12 #include <linux/blkdev.h>
13 #include <linux/errno.h>
14 #include <linux/hdreg.h>
15 #include <linux/ide.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/device.h>
18 #include <linux/init.h>
21 #include <asm/ecard.h>
24 #define ICS_IDENT_OFFSET 0x2280
26 #define ICS_ARCIN_V5_INTRSTAT 0x000
27 #define ICS_ARCIN_V5_INTROFFSET 0x001
28 #define ICS_ARCIN_V5_IDEOFFSET 0xa00
29 #define ICS_ARCIN_V5_IDEALTOFFSET 0xae0
30 #define ICS_ARCIN_V5_IDESTEPPING 4
32 #define ICS_ARCIN_V6_IDEOFFSET_1 0x800
33 #define ICS_ARCIN_V6_INTROFFSET_1 0x880
34 #define ICS_ARCIN_V6_INTRSTAT_1 0x8a4
35 #define ICS_ARCIN_V6_IDEALTOFFSET_1 0x8e0
36 #define ICS_ARCIN_V6_IDEOFFSET_2 0xc00
37 #define ICS_ARCIN_V6_INTROFFSET_2 0xc80
38 #define ICS_ARCIN_V6_INTRSTAT_2 0xca4
39 #define ICS_ARCIN_V6_IDEALTOFFSET_2 0xce0
40 #define ICS_ARCIN_V6_IDESTEPPING 4
43 unsigned int dataoffset
;
44 unsigned int ctrloffset
;
45 unsigned int stepping
;
48 static struct cardinfo icside_cardinfo_v5
= {
49 ICS_ARCIN_V5_IDEOFFSET
,
50 ICS_ARCIN_V5_IDEALTOFFSET
,
51 ICS_ARCIN_V5_IDESTEPPING
54 static struct cardinfo icside_cardinfo_v6_1
= {
55 ICS_ARCIN_V6_IDEOFFSET_1
,
56 ICS_ARCIN_V6_IDEALTOFFSET_1
,
57 ICS_ARCIN_V6_IDESTEPPING
60 static struct cardinfo icside_cardinfo_v6_2
= {
61 ICS_ARCIN_V6_IDEOFFSET_2
,
62 ICS_ARCIN_V6_IDEALTOFFSET_2
,
63 ICS_ARCIN_V6_IDESTEPPING
69 unsigned long irq_port
;
70 unsigned long slot_port
;
72 /* parent device... until the IDE core gets one of its own */
77 #define ICS_TYPE_A3IN 0
78 #define ICS_TYPE_A3USER 1
80 #define ICS_TYPE_V5 15
81 #define ICS_TYPE_NOTYPE ((unsigned int)-1)
83 /* ---------------- Version 5 PCB Support Functions --------------------- */
84 /* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
85 * Purpose : enable interrupts from card
87 static void icside_irqenable_arcin_v5 (struct expansion_card
*ec
, int irqnr
)
89 struct icside_state
*state
= ec
->irq_data
;
90 unsigned int base
= state
->irq_port
;
92 outb(0, base
+ ICS_ARCIN_V5_INTROFFSET
);
95 /* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
96 * Purpose : disable interrupts from card
98 static void icside_irqdisable_arcin_v5 (struct expansion_card
*ec
, int irqnr
)
100 struct icside_state
*state
= ec
->irq_data
;
101 unsigned int base
= state
->irq_port
;
103 inb(base
+ ICS_ARCIN_V5_INTROFFSET
);
106 static const expansioncard_ops_t icside_ops_arcin_v5
= {
107 .irqenable
= icside_irqenable_arcin_v5
,
108 .irqdisable
= icside_irqdisable_arcin_v5
,
112 /* ---------------- Version 6 PCB Support Functions --------------------- */
113 /* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
114 * Purpose : enable interrupts from card
116 static void icside_irqenable_arcin_v6 (struct expansion_card
*ec
, int irqnr
)
118 struct icside_state
*state
= ec
->irq_data
;
119 unsigned int base
= state
->irq_port
;
123 switch (state
->channel
) {
125 outb(0, base
+ ICS_ARCIN_V6_INTROFFSET_1
);
126 inb(base
+ ICS_ARCIN_V6_INTROFFSET_2
);
129 outb(0, base
+ ICS_ARCIN_V6_INTROFFSET_2
);
130 inb(base
+ ICS_ARCIN_V6_INTROFFSET_1
);
135 /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
136 * Purpose : disable interrupts from card
138 static void icside_irqdisable_arcin_v6 (struct expansion_card
*ec
, int irqnr
)
140 struct icside_state
*state
= ec
->irq_data
;
144 inb (state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
145 inb (state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
148 /* Prototype: icside_irqprobe(struct expansion_card *ec)
149 * Purpose : detect an active interrupt from card
151 static int icside_irqpending_arcin_v6(struct expansion_card
*ec
)
153 struct icside_state
*state
= ec
->irq_data
;
155 return inb(state
->irq_port
+ ICS_ARCIN_V6_INTRSTAT_1
) & 1 ||
156 inb(state
->irq_port
+ ICS_ARCIN_V6_INTRSTAT_2
) & 1;
159 static const expansioncard_ops_t icside_ops_arcin_v6
= {
160 .irqenable
= icside_irqenable_arcin_v6
,
161 .irqdisable
= icside_irqdisable_arcin_v6
,
162 .irqpending
= icside_irqpending_arcin_v6
,
166 * Handle routing of interrupts. This is called before
167 * we write the command to the drive.
169 static void icside_maskproc(ide_drive_t
*drive
, int mask
)
171 ide_hwif_t
*hwif
= HWIF(drive
);
172 struct icside_state
*state
= hwif
->hwif_data
;
175 local_irq_save(flags
);
177 state
->channel
= hwif
->channel
;
179 if (state
->enabled
&& !mask
) {
180 switch (hwif
->channel
) {
182 outb(0, state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
183 inb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
186 outb(0, state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
187 inb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
191 inb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
192 inb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
195 local_irq_restore(flags
);
198 #ifdef CONFIG_BLK_DEV_IDEDMA_ICS
202 * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
203 * There is only one DMA controller per card, which means that only
204 * one drive can be accessed at one time. NOTE! We do not enforce that
205 * here, but we rely on the main IDE driver spotting that both
206 * interfaces use the same IRQ, which should guarantee this.
208 #define NR_ENTRIES 256
209 #define TABLE_SIZE (NR_ENTRIES * 8)
211 static void icside_build_sglist(ide_drive_t
*drive
, struct request
*rq
)
213 ide_hwif_t
*hwif
= drive
->hwif
;
214 struct icside_state
*state
= hwif
->hwif_data
;
215 struct scatterlist
*sg
= hwif
->sg_table
;
218 if (rq
->flags
& REQ_DRIVE_TASKFILE
) {
219 ide_task_t
*args
= rq
->special
;
221 if (args
->command_type
== IDE_DRIVE_TASK_RAW_WRITE
)
222 hwif
->sg_dma_direction
= DMA_TO_DEVICE
;
224 hwif
->sg_dma_direction
= DMA_FROM_DEVICE
;
226 memset(sg
, 0, sizeof(*sg
));
227 sg
->page
= virt_to_page(rq
->buffer
);
228 sg
->offset
= offset_in_page(rq
->buffer
);
229 sg
->length
= rq
->nr_sectors
* SECTOR_SIZE
;
232 nents
= blk_rq_map_sg(drive
->queue
, rq
, sg
);
234 if (rq_data_dir(rq
) == READ
)
235 hwif
->sg_dma_direction
= DMA_FROM_DEVICE
;
237 hwif
->sg_dma_direction
= DMA_TO_DEVICE
;
240 nents
= dma_map_sg(state
->dev
, sg
, nents
, hwif
->sg_dma_direction
);
242 hwif
->sg_nents
= nents
;
247 * Configure the IOMD to give the appropriate timings for the transfer
248 * mode being requested. We take the advice of the ATA standards, and
249 * calculate the cycle time based on the transfer mode, and the EIDE
250 * MW DMA specs that the drive provides in the IDENTIFY command.
252 * We have the following IOMD DMA modes to choose from:
254 * Type Active Recovery Cycle
255 * A 250 (250) 312 (550) 562 (800)
257 * C 125 (125) 125 (375) 250 (500)
260 * (figures in brackets are actual measured timings)
262 * However, we also need to take care of the read/write active and
266 * Mode Active -- Recovery -- Cycle IOMD type
267 * MW0 215 50 215 480 A
271 static int icside_set_speed(ide_drive_t
*drive
, u8 xfer_mode
)
273 int on
= 0, cycle_time
= 0, use_dma_info
= 0;
276 * Limit the transfer speed to MW_DMA_2.
278 if (xfer_mode
> XFER_MW_DMA_2
)
279 xfer_mode
= XFER_MW_DMA_2
;
304 * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should
305 * take care to note the values in the ID...
307 if (use_dma_info
&& drive
->id
->eide_dma_time
> cycle_time
)
308 cycle_time
= drive
->id
->eide_dma_time
;
310 drive
->drive_data
= cycle_time
;
312 if (cycle_time
&& ide_config_drive_speed(drive
, xfer_mode
) == 0)
315 drive
->drive_data
= 480;
317 printk("%s: %s selected (peak %dMB/s)\n", drive
->name
,
318 ide_xfer_verbose(xfer_mode
), 2000 / drive
->drive_data
);
320 drive
->current_speed
= xfer_mode
;
325 static int icside_dma_host_off(ide_drive_t
*drive
)
330 static int icside_dma_off_quietly(ide_drive_t
*drive
)
332 drive
->using_dma
= 0;
333 return icside_dma_host_off(drive
);
336 static int icside_dma_host_on(ide_drive_t
*drive
)
341 static int icside_dma_on(ide_drive_t
*drive
)
343 drive
->using_dma
= 1;
344 return icside_dma_host_on(drive
);
347 static int icside_dma_check(ide_drive_t
*drive
)
349 struct hd_driveid
*id
= drive
->id
;
350 ide_hwif_t
*hwif
= HWIF(drive
);
351 int xfer_mode
= XFER_PIO_2
;
354 if (!(id
->capability
& 1) || !hwif
->autodma
)
358 * Consult the list of known "bad" drives
360 if (__ide_dma_bad_drive(drive
))
364 * Enable DMA on any drive that has multiword DMA
366 if (id
->field_valid
& 2) {
367 xfer_mode
= ide_dma_speed(drive
, 0);
372 * Consult the list of known "good" drives
374 if (__ide_dma_good_drive(drive
)) {
375 if (id
->eide_dma_time
> 150)
377 xfer_mode
= XFER_MW_DMA_1
;
381 on
= icside_set_speed(drive
, xfer_mode
);
384 return icside_dma_on(drive
);
386 return icside_dma_off_quietly(drive
);
389 static int icside_dma_end(ide_drive_t
*drive
)
391 ide_hwif_t
*hwif
= HWIF(drive
);
392 struct icside_state
*state
= hwif
->hwif_data
;
394 drive
->waiting_for_dma
= 0;
396 disable_dma(hwif
->hw
.dma
);
398 /* Teardown mappings after DMA has completed. */
399 dma_unmap_sg(state
->dev
, hwif
->sg_table
, hwif
->sg_nents
,
400 hwif
->sg_dma_direction
);
402 return get_dma_residue(hwif
->hw
.dma
) != 0;
405 static int icside_dma_begin(ide_drive_t
*drive
)
407 ide_hwif_t
*hwif
= HWIF(drive
);
409 /* We can not enable DMA on both channels simultaneously. */
410 BUG_ON(dma_channel_active(hwif
->hw
.dma
));
411 enable_dma(hwif
->hw
.dma
);
416 * dma_intr() is the handler for disk read/write DMA interrupts
418 static ide_startstop_t
icside_dmaintr(ide_drive_t
*drive
)
423 dma_stat
= icside_dma_end(drive
);
424 stat
= HWIF(drive
)->INB(IDE_STATUS_REG
);
425 if (OK_STAT(stat
, DRIVE_READY
, drive
->bad_wstat
| DRQ_STAT
)) {
427 struct request
*rq
= HWGROUP(drive
)->rq
;
430 for (i
= rq
->nr_sectors
; i
> 0; ) {
431 i
-= rq
->current_nr_sectors
;
432 DRIVER(drive
)->end_request(drive
, 1, rq
->nr_sectors
);
437 printk(KERN_ERR
"%s: bad DMA status (dma_stat=%x)\n",
438 drive
->name
, dma_stat
);
441 return DRIVER(drive
)->error(drive
, __FUNCTION__
, stat
);
445 icside_dma_common(ide_drive_t
*drive
, struct request
*rq
,
446 unsigned int dma_mode
)
448 ide_hwif_t
*hwif
= HWIF(drive
);
451 * We can not enable DMA on both channels.
453 BUG_ON(dma_channel_active(hwif
->hw
.dma
));
455 icside_build_sglist(drive
, rq
);
458 * Ensure that we have the right interrupt routed.
460 icside_maskproc(drive
, 0);
463 * Route the DMA signals to the correct interface.
465 outb(hwif
->select_data
, hwif
->config_data
);
468 * Select the correct timing for this drive.
470 set_dma_speed(hwif
->hw
.dma
, drive
->drive_data
);
473 * Tell the DMA engine about the SG table and
476 set_dma_sg(hwif
->hw
.dma
, hwif
->sg_table
, hwif
->sg_nents
);
477 set_dma_mode(hwif
->hw
.dma
, dma_mode
);
479 drive
->waiting_for_dma
= 1;
484 static int icside_dma_read(ide_drive_t
*drive
)
486 struct request
*rq
= HWGROUP(drive
)->rq
;
489 if (icside_dma_common(drive
, rq
, DMA_MODE_READ
))
492 if (drive
->media
!= ide_disk
)
495 BUG_ON(HWGROUP(drive
)->handler
!= NULL
);
498 * FIX ME to use only ACB ide_task_t args Struct
502 ide_task_t
*args
= rq
->special
;
503 cmd
= args
->tfRegister
[IDE_COMMAND_OFFSET
];
506 if (rq
->flags
& REQ_DRIVE_TASKFILE
) {
507 ide_task_t
*args
= rq
->special
;
508 cmd
= args
->tfRegister
[IDE_COMMAND_OFFSET
];
509 } else if (drive
->addressing
== 1) {
510 cmd
= WIN_READDMA_EXT
;
515 /* issue cmd to drive */
516 ide_execute_command(drive
, cmd
, icside_dmaintr
, 2*WAIT_CMD
, NULL
);
518 return icside_dma_begin(drive
);
521 static int icside_dma_write(ide_drive_t
*drive
)
523 struct request
*rq
= HWGROUP(drive
)->rq
;
526 if (icside_dma_common(drive
, rq
, DMA_MODE_WRITE
))
529 if (drive
->media
!= ide_disk
)
532 BUG_ON(HWGROUP(drive
)->handler
!= NULL
);
535 * FIX ME to use only ACB ide_task_t args Struct
539 ide_task_t
*args
= rq
->special
;
540 cmd
= args
->tfRegister
[IDE_COMMAND_OFFSET
];
543 if (rq
->flags
& REQ_DRIVE_TASKFILE
) {
544 ide_task_t
*args
= rq
->special
;
545 cmd
= args
->tfRegister
[IDE_COMMAND_OFFSET
];
546 } else if (drive
->addressing
== 1) {
547 cmd
= WIN_WRITEDMA_EXT
;
553 /* issue cmd to drive */
554 ide_execute_command(drive
, cmd
, icside_dmaintr
, 2*WAIT_CMD
, NULL
);
556 return icside_dma_begin(drive
);
559 static int icside_dma_test_irq(ide_drive_t
*drive
)
561 ide_hwif_t
*hwif
= HWIF(drive
);
562 struct icside_state
*state
= hwif
->hwif_data
;
564 return inb(state
->irq_port
+
566 ICS_ARCIN_V6_INTRSTAT_2
:
567 ICS_ARCIN_V6_INTRSTAT_1
)) & 1;
570 static int icside_dma_verbose(ide_drive_t
*drive
)
572 printk(", %s (peak %dMB/s)",
573 ide_xfer_verbose(drive
->current_speed
),
574 2000 / drive
->drive_data
);
578 static int icside_dma_timeout(ide_drive_t
*drive
)
580 printk(KERN_ERR
"%s: DMA timeout occurred: ", drive
->name
);
582 if (icside_dma_test_irq(drive
))
585 ide_dump_status(drive
, "DMA timeout",
586 HWIF(drive
)->INB(IDE_STATUS_REG
));
588 return icside_dma_end(drive
);
591 static int icside_dma_lostirq(ide_drive_t
*drive
)
593 printk(KERN_ERR
"%s: IRQ lost\n", drive
->name
);
597 static int icside_dma_init(ide_hwif_t
*hwif
)
601 #ifdef CONFIG_IDEDMA_ICS_AUTO
605 printk(" %s: SG-DMA", hwif
->name
);
607 hwif
->sg_table
= kmalloc(sizeof(struct scatterlist
) * NR_ENTRIES
,
613 hwif
->mwdma_mask
= 7; /* MW0..2 */
614 hwif
->swdma_mask
= 7; /* SW0..2 */
616 hwif
->dmatable_cpu
= NULL
;
617 hwif
->dmatable_dma
= 0;
618 hwif
->speedproc
= icside_set_speed
;
619 hwif
->autodma
= autodma
;
621 hwif
->ide_dma_check
= icside_dma_check
;
622 hwif
->ide_dma_host_off
= icside_dma_host_off
;
623 hwif
->ide_dma_off_quietly
= icside_dma_off_quietly
;
624 hwif
->ide_dma_host_on
= icside_dma_host_on
;
625 hwif
->ide_dma_on
= icside_dma_on
;
626 hwif
->ide_dma_read
= icside_dma_read
;
627 hwif
->ide_dma_write
= icside_dma_write
;
628 hwif
->ide_dma_begin
= icside_dma_begin
;
629 hwif
->ide_dma_end
= icside_dma_end
;
630 hwif
->ide_dma_test_irq
= icside_dma_test_irq
;
631 hwif
->ide_dma_verbose
= icside_dma_verbose
;
632 hwif
->ide_dma_timeout
= icside_dma_timeout
;
633 hwif
->ide_dma_lostirq
= icside_dma_lostirq
;
635 hwif
->drives
[0].autodma
= hwif
->autodma
;
636 hwif
->drives
[1].autodma
= hwif
->autodma
;
638 printk(" capable%s\n", hwif
->autodma
? ", auto-enable" : "");
643 printk(" disabled, unable to allocate DMA table\n");
647 static void icside_dma_exit(ide_hwif_t
*hwif
)
649 if (hwif
->sg_table
) {
650 kfree(hwif
->sg_table
);
651 hwif
->sg_table
= NULL
;
655 #define icside_dma_init(hwif) (0)
656 #define icside_dma_exit(hwif) do { } while (0)
659 static ide_hwif_t
*icside_find_hwif(unsigned long dataport
)
664 for (index
= 0; index
< MAX_HWIFS
; ++index
) {
665 hwif
= &ide_hwifs
[index
];
666 if (hwif
->io_ports
[IDE_DATA_OFFSET
] == dataport
)
670 for (index
= 0; index
< MAX_HWIFS
; ++index
) {
671 hwif
= &ide_hwifs
[index
];
672 if (!hwif
->io_ports
[IDE_DATA_OFFSET
])
682 icside_setup(unsigned long base
, struct cardinfo
*info
, struct expansion_card
*ec
)
684 unsigned long port
= base
+ info
->dataoffset
;
687 hwif
= icside_find_hwif(base
);
691 memset(&hwif
->hw
, 0, sizeof(hw_regs_t
));
693 for (i
= IDE_DATA_OFFSET
; i
<= IDE_STATUS_OFFSET
; i
++) {
694 hwif
->hw
.io_ports
[i
] = port
;
695 hwif
->io_ports
[i
] = port
;
696 port
+= 1 << info
->stepping
;
698 hwif
->hw
.io_ports
[IDE_CONTROL_OFFSET
] = base
+ info
->ctrloffset
;
699 hwif
->io_ports
[IDE_CONTROL_OFFSET
] = base
+ info
->ctrloffset
;
700 hwif
->hw
.irq
= ec
->irq
;
703 hwif
->chipset
= ide_acorn
;
704 hwif
->gendev
.parent
= &ec
->dev
;
711 icside_register_v5(struct icside_state
*state
, struct expansion_card
*ec
)
713 unsigned long slot_port
;
716 slot_port
= ecard_address(ec
, ECARD_MEMC
, 0);
718 state
->irq_port
= slot_port
;
720 ec
->irqaddr
= (unsigned char *)ioaddr(slot_port
+ ICS_ARCIN_V5_INTRSTAT
);
722 ec
->irq_data
= state
;
723 ec
->ops
= &icside_ops_arcin_v5
;
726 * Be on the safe side - disable interrupts
728 inb(slot_port
+ ICS_ARCIN_V5_INTROFFSET
);
730 hwif
= icside_setup(slot_port
, &icside_cardinfo_v5
, ec
);
732 state
->hwif
[0] = hwif
;
734 return hwif
? 0 : -ENODEV
;
738 icside_register_v6(struct icside_state
*state
, struct expansion_card
*ec
)
740 unsigned long slot_port
, port
;
741 ide_hwif_t
*hwif
, *mate
;
742 unsigned int sel
= 0;
744 slot_port
= ecard_address(ec
, ECARD_IOC
, ECARD_FAST
);
745 port
= ecard_address(ec
, ECARD_EASI
, ECARD_FAST
);
752 outb(sel
, slot_port
);
755 * Be on the safe side - disable interrupts
757 inb(port
+ ICS_ARCIN_V6_INTROFFSET_1
);
758 inb(port
+ ICS_ARCIN_V6_INTROFFSET_2
);
761 * Find and register the interfaces.
763 hwif
= icside_setup(port
, &icside_cardinfo_v6_1
, ec
);
764 mate
= icside_setup(port
, &icside_cardinfo_v6_2
, ec
);
769 state
->irq_port
= port
;
770 state
->slot_port
= slot_port
;
771 state
->hwif
[0] = hwif
;
772 state
->hwif
[1] = mate
;
774 ec
->irq_data
= state
;
775 ec
->ops
= &icside_ops_arcin_v6
;
777 hwif
->maskproc
= icside_maskproc
;
779 hwif
->hwif_data
= state
;
781 hwif
->serialized
= 1;
782 hwif
->config_data
= slot_port
;
783 hwif
->select_data
= sel
;
784 hwif
->hw
.dma
= ec
->dma
;
786 mate
->maskproc
= icside_maskproc
;
788 mate
->hwif_data
= state
;
790 mate
->serialized
= 1;
791 mate
->config_data
= slot_port
;
792 mate
->select_data
= sel
| 1;
793 mate
->hw
.dma
= ec
->dma
;
795 if (ec
->dma
!= NO_DMA
&& !request_dma(ec
->dma
, hwif
->name
)) {
796 icside_dma_init(hwif
);
797 icside_dma_init(mate
);
804 icside_probe(struct expansion_card
*ec
, const struct ecard_id
*id
)
806 struct icside_state
*state
;
810 state
= kmalloc(sizeof(struct icside_state
), GFP_KERNEL
);
816 memset(state
, 0, sizeof(state
));
817 state
->type
= ICS_TYPE_NOTYPE
;
818 state
->dev
= &ec
->dev
;
820 idmem
= ioremap(ecard_resource_start(ec
, ECARD_RES_IOCFAST
),
821 ecard_resource_len(ec
, ECARD_RES_IOCFAST
));
825 type
= readb(idmem
+ ICS_IDENT_OFFSET
) & 1;
826 type
|= (readb(idmem
+ ICS_IDENT_OFFSET
+ 4) & 1) << 1;
827 type
|= (readb(idmem
+ ICS_IDENT_OFFSET
+ 8) & 1) << 2;
828 type
|= (readb(idmem
+ ICS_IDENT_OFFSET
+ 12) & 1) << 3;
834 switch (state
->type
) {
836 printk(KERN_WARNING
"icside: A3IN unsupported\n");
840 case ICS_TYPE_A3USER
:
841 printk(KERN_WARNING
"icside: A3USER unsupported\n");
846 ret
= icside_register_v5(state
, ec
);
850 ret
= icside_register_v6(state
, ec
);
854 printk(KERN_WARNING
"icside: unknown interface type\n");
860 ecard_set_drvdata(ec
, state
);
867 static void __devexit
icside_remove(struct expansion_card
*ec
)
869 struct icside_state
*state
= ecard_get_drvdata(ec
);
871 switch (state
->type
) {
873 /* FIXME: tell IDE to stop using the interface */
875 /* Disable interrupts */
876 inb(state
->slot_port
+ ICS_ARCIN_V5_INTROFFSET
);
880 /* FIXME: tell IDE to stop using the interface */
881 icside_dma_exit(state
->hwif
[1]);
882 icside_dma_exit(state
->hwif
[0]);
884 if (ec
->dma
!= NO_DMA
)
887 /* Disable interrupts */
888 inb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
889 inb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
891 /* Reset the ROM pointer/EASI selection */
892 outb(0, state
->slot_port
);
896 ecard_set_drvdata(ec
, NULL
);
903 static void icside_shutdown(struct expansion_card
*ec
)
905 struct icside_state
*state
= ecard_get_drvdata(ec
);
907 switch (state
->type
) {
909 /* Disable interrupts */
910 inb(state
->slot_port
+ ICS_ARCIN_V5_INTROFFSET
);
914 /* Disable interrupts */
915 inb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
916 inb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
918 /* Reset the ROM pointer/EASI selection */
919 outb(0, state
->slot_port
);
924 static const struct ecard_id icside_ids
[] = {
925 { MANU_ICS
, PROD_ICS_IDE
},
926 { MANU_ICS2
, PROD_ICS2_IDE
},
930 static struct ecard_driver icside_driver
= {
931 .probe
= icside_probe
,
932 .remove
= __devexit_p(icside_remove
),
933 .shutdown
= icside_shutdown
,
934 .id_table
= icside_ids
,
940 static int __init
icside_init(void)
942 return ecard_register_driver(&icside_driver
);
945 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
946 MODULE_LICENSE("GPL");
947 MODULE_DESCRIPTION("ICS IDE driver");
949 module_init(icside_init
);