2 * linux/drivers/ide/arm/icside.c
4 * Copyright (c) 1996-2004 Russell King.
6 * Please note that this platform does not support 32-bit IDE IO.
9 #include <linux/config.h>
10 #include <linux/string.h>
11 #include <linux/module.h>
12 #include <linux/ioport.h>
13 #include <linux/slab.h>
14 #include <linux/blkdev.h>
15 #include <linux/errno.h>
16 #include <linux/hdreg.h>
17 #include <linux/ide.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/scatterlist.h>
24 #include <asm/ecard.h>
27 #define ICS_IDENT_OFFSET 0x2280
29 #define ICS_ARCIN_V5_INTRSTAT 0x0000
30 #define ICS_ARCIN_V5_INTROFFSET 0x0004
31 #define ICS_ARCIN_V5_IDEOFFSET 0x2800
32 #define ICS_ARCIN_V5_IDEALTOFFSET 0x2b80
33 #define ICS_ARCIN_V5_IDESTEPPING 6
35 #define ICS_ARCIN_V6_IDEOFFSET_1 0x2000
36 #define ICS_ARCIN_V6_INTROFFSET_1 0x2200
37 #define ICS_ARCIN_V6_INTRSTAT_1 0x2290
38 #define ICS_ARCIN_V6_IDEALTOFFSET_1 0x2380
39 #define ICS_ARCIN_V6_IDEOFFSET_2 0x3000
40 #define ICS_ARCIN_V6_INTROFFSET_2 0x3200
41 #define ICS_ARCIN_V6_INTRSTAT_2 0x3290
42 #define ICS_ARCIN_V6_IDEALTOFFSET_2 0x3380
43 #define ICS_ARCIN_V6_IDESTEPPING 6
46 unsigned int dataoffset
;
47 unsigned int ctrloffset
;
48 unsigned int stepping
;
51 static struct cardinfo icside_cardinfo_v5
= {
52 .dataoffset
= ICS_ARCIN_V5_IDEOFFSET
,
53 .ctrloffset
= ICS_ARCIN_V5_IDEALTOFFSET
,
54 .stepping
= ICS_ARCIN_V5_IDESTEPPING
,
57 static struct cardinfo icside_cardinfo_v6_1
= {
58 .dataoffset
= ICS_ARCIN_V6_IDEOFFSET_1
,
59 .ctrloffset
= ICS_ARCIN_V6_IDEALTOFFSET_1
,
60 .stepping
= ICS_ARCIN_V6_IDESTEPPING
,
63 static struct cardinfo icside_cardinfo_v6_2
= {
64 .dataoffset
= ICS_ARCIN_V6_IDEOFFSET_2
,
65 .ctrloffset
= ICS_ARCIN_V6_IDEALTOFFSET_2
,
66 .stepping
= ICS_ARCIN_V6_IDESTEPPING
,
72 void __iomem
*irq_port
;
73 void __iomem
*ioc_base
;
75 /* parent device... until the IDE core gets one of its own */
80 #define ICS_TYPE_A3IN 0
81 #define ICS_TYPE_A3USER 1
83 #define ICS_TYPE_V5 15
84 #define ICS_TYPE_NOTYPE ((unsigned int)-1)
86 /* ---------------- Version 5 PCB Support Functions --------------------- */
87 /* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
88 * Purpose : enable interrupts from card
90 static void icside_irqenable_arcin_v5 (struct expansion_card
*ec
, int irqnr
)
92 struct icside_state
*state
= ec
->irq_data
;
94 writeb(0, state
->irq_port
+ ICS_ARCIN_V5_INTROFFSET
);
97 /* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
98 * Purpose : disable interrupts from card
100 static void icside_irqdisable_arcin_v5 (struct expansion_card
*ec
, int irqnr
)
102 struct icside_state
*state
= ec
->irq_data
;
104 readb(state
->irq_port
+ ICS_ARCIN_V5_INTROFFSET
);
107 static const expansioncard_ops_t icside_ops_arcin_v5
= {
108 .irqenable
= icside_irqenable_arcin_v5
,
109 .irqdisable
= icside_irqdisable_arcin_v5
,
113 /* ---------------- Version 6 PCB Support Functions --------------------- */
114 /* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
115 * Purpose : enable interrupts from card
117 static void icside_irqenable_arcin_v6 (struct expansion_card
*ec
, int irqnr
)
119 struct icside_state
*state
= ec
->irq_data
;
120 void __iomem
*base
= state
->irq_port
;
124 switch (state
->channel
) {
126 writeb(0, base
+ ICS_ARCIN_V6_INTROFFSET_1
);
127 readb(base
+ ICS_ARCIN_V6_INTROFFSET_2
);
130 writeb(0, base
+ ICS_ARCIN_V6_INTROFFSET_2
);
131 readb(base
+ ICS_ARCIN_V6_INTROFFSET_1
);
136 /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
137 * Purpose : disable interrupts from card
139 static void icside_irqdisable_arcin_v6 (struct expansion_card
*ec
, int irqnr
)
141 struct icside_state
*state
= ec
->irq_data
;
145 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
146 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
149 /* Prototype: icside_irqprobe(struct expansion_card *ec)
150 * Purpose : detect an active interrupt from card
152 static int icside_irqpending_arcin_v6(struct expansion_card
*ec
)
154 struct icside_state
*state
= ec
->irq_data
;
156 return readb(state
->irq_port
+ ICS_ARCIN_V6_INTRSTAT_1
) & 1 ||
157 readb(state
->irq_port
+ ICS_ARCIN_V6_INTRSTAT_2
) & 1;
160 static const expansioncard_ops_t icside_ops_arcin_v6
= {
161 .irqenable
= icside_irqenable_arcin_v6
,
162 .irqdisable
= icside_irqdisable_arcin_v6
,
163 .irqpending
= icside_irqpending_arcin_v6
,
167 * Handle routing of interrupts. This is called before
168 * we write the command to the drive.
170 static void icside_maskproc(ide_drive_t
*drive
, int mask
)
172 ide_hwif_t
*hwif
= HWIF(drive
);
173 struct icside_state
*state
= hwif
->hwif_data
;
176 local_irq_save(flags
);
178 state
->channel
= hwif
->channel
;
180 if (state
->enabled
&& !mask
) {
181 switch (hwif
->channel
) {
183 writeb(0, state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
184 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
187 writeb(0, state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
188 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
192 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
193 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
196 local_irq_restore(flags
);
199 #ifdef CONFIG_BLK_DEV_IDEDMA_ICS
201 #ifndef CONFIG_IDEDMA_ICS_AUTO
202 #warning CONFIG_IDEDMA_ICS_AUTO=n support is obsolete, and will be removed soon.
208 * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
209 * There is only one DMA controller per card, which means that only
210 * one drive can be accessed at one time. NOTE! We do not enforce that
211 * here, but we rely on the main IDE driver spotting that both
212 * interfaces use the same IRQ, which should guarantee this.
215 static void icside_build_sglist(ide_drive_t
*drive
, struct request
*rq
)
217 ide_hwif_t
*hwif
= drive
->hwif
;
218 struct icside_state
*state
= hwif
->hwif_data
;
219 struct scatterlist
*sg
= hwif
->sg_table
;
221 ide_map_sg(drive
, rq
);
223 if (rq_data_dir(rq
) == READ
)
224 hwif
->sg_dma_direction
= DMA_FROM_DEVICE
;
226 hwif
->sg_dma_direction
= DMA_TO_DEVICE
;
228 hwif
->sg_nents
= dma_map_sg(state
->dev
, sg
, hwif
->sg_nents
,
229 hwif
->sg_dma_direction
);
233 * Configure the IOMD to give the appropriate timings for the transfer
234 * mode being requested. We take the advice of the ATA standards, and
235 * calculate the cycle time based on the transfer mode, and the EIDE
236 * MW DMA specs that the drive provides in the IDENTIFY command.
238 * We have the following IOMD DMA modes to choose from:
240 * Type Active Recovery Cycle
241 * A 250 (250) 312 (550) 562 (800)
243 * C 125 (125) 125 (375) 250 (500)
246 * (figures in brackets are actual measured timings)
248 * However, we also need to take care of the read/write active and
252 * Mode Active -- Recovery -- Cycle IOMD type
253 * MW0 215 50 215 480 A
257 static int icside_set_speed(ide_drive_t
*drive
, u8 xfer_mode
)
259 int on
= 0, cycle_time
= 0, use_dma_info
= 0;
262 * Limit the transfer speed to MW_DMA_2.
264 if (xfer_mode
> XFER_MW_DMA_2
)
265 xfer_mode
= XFER_MW_DMA_2
;
290 * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should
291 * take care to note the values in the ID...
293 if (use_dma_info
&& drive
->id
->eide_dma_time
> cycle_time
)
294 cycle_time
= drive
->id
->eide_dma_time
;
296 drive
->drive_data
= cycle_time
;
298 if (cycle_time
&& ide_config_drive_speed(drive
, xfer_mode
) == 0)
301 drive
->drive_data
= 480;
303 printk("%s: %s selected (peak %dMB/s)\n", drive
->name
,
304 ide_xfer_verbose(xfer_mode
), 2000 / drive
->drive_data
);
306 drive
->current_speed
= xfer_mode
;
311 static int icside_dma_host_off(ide_drive_t
*drive
)
316 static int icside_dma_off_quietly(ide_drive_t
*drive
)
318 drive
->using_dma
= 0;
319 return icside_dma_host_off(drive
);
322 static int icside_dma_host_on(ide_drive_t
*drive
)
327 static int icside_dma_on(ide_drive_t
*drive
)
329 drive
->using_dma
= 1;
330 return icside_dma_host_on(drive
);
333 static int icside_dma_check(ide_drive_t
*drive
)
335 struct hd_driveid
*id
= drive
->id
;
336 ide_hwif_t
*hwif
= HWIF(drive
);
337 int xfer_mode
= XFER_PIO_2
;
340 if (!(id
->capability
& 1) || !hwif
->autodma
)
344 * Consult the list of known "bad" drives
346 if (__ide_dma_bad_drive(drive
))
350 * Enable DMA on any drive that has multiword DMA
352 if (id
->field_valid
& 2) {
353 xfer_mode
= ide_dma_speed(drive
, 0);
358 * Consult the list of known "good" drives
360 if (__ide_dma_good_drive(drive
)) {
361 if (id
->eide_dma_time
> 150)
363 xfer_mode
= XFER_MW_DMA_1
;
367 on
= icside_set_speed(drive
, xfer_mode
);
370 return icside_dma_on(drive
);
372 return icside_dma_off_quietly(drive
);
375 static int icside_dma_end(ide_drive_t
*drive
)
377 ide_hwif_t
*hwif
= HWIF(drive
);
378 struct icside_state
*state
= hwif
->hwif_data
;
380 drive
->waiting_for_dma
= 0;
382 disable_dma(hwif
->hw
.dma
);
384 /* Teardown mappings after DMA has completed. */
385 dma_unmap_sg(state
->dev
, hwif
->sg_table
, hwif
->sg_nents
,
386 hwif
->sg_dma_direction
);
388 return get_dma_residue(hwif
->hw
.dma
) != 0;
391 static void icside_dma_start(ide_drive_t
*drive
)
393 ide_hwif_t
*hwif
= HWIF(drive
);
395 /* We can not enable DMA on both channels simultaneously. */
396 BUG_ON(dma_channel_active(hwif
->hw
.dma
));
397 enable_dma(hwif
->hw
.dma
);
400 static int icside_dma_setup(ide_drive_t
*drive
)
402 ide_hwif_t
*hwif
= HWIF(drive
);
403 struct request
*rq
= hwif
->hwgroup
->rq
;
404 unsigned int dma_mode
;
407 dma_mode
= DMA_MODE_WRITE
;
409 dma_mode
= DMA_MODE_READ
;
412 * We can not enable DMA on both channels.
414 BUG_ON(dma_channel_active(hwif
->hw
.dma
));
416 icside_build_sglist(drive
, rq
);
419 * Ensure that we have the right interrupt routed.
421 icside_maskproc(drive
, 0);
424 * Route the DMA signals to the correct interface.
426 writeb(hwif
->select_data
, hwif
->config_data
);
429 * Select the correct timing for this drive.
431 set_dma_speed(hwif
->hw
.dma
, drive
->drive_data
);
434 * Tell the DMA engine about the SG table and
437 set_dma_sg(hwif
->hw
.dma
, hwif
->sg_table
, hwif
->sg_nents
);
438 set_dma_mode(hwif
->hw
.dma
, dma_mode
);
440 drive
->waiting_for_dma
= 1;
445 static void icside_dma_exec_cmd(ide_drive_t
*drive
, u8 cmd
)
447 /* issue cmd to drive */
448 ide_execute_command(drive
, cmd
, ide_dma_intr
, 2 * WAIT_CMD
, NULL
);
451 static int icside_dma_test_irq(ide_drive_t
*drive
)
453 ide_hwif_t
*hwif
= HWIF(drive
);
454 struct icside_state
*state
= hwif
->hwif_data
;
456 return readb(state
->irq_port
+
458 ICS_ARCIN_V6_INTRSTAT_2
:
459 ICS_ARCIN_V6_INTRSTAT_1
)) & 1;
462 static int icside_dma_timeout(ide_drive_t
*drive
)
464 printk(KERN_ERR
"%s: DMA timeout occurred: ", drive
->name
);
466 if (icside_dma_test_irq(drive
))
469 ide_dump_status(drive
, "DMA timeout",
470 HWIF(drive
)->INB(IDE_STATUS_REG
));
472 return icside_dma_end(drive
);
475 static int icside_dma_lostirq(ide_drive_t
*drive
)
477 printk(KERN_ERR
"%s: IRQ lost\n", drive
->name
);
481 static void icside_dma_init(ide_hwif_t
*hwif
)
485 #ifdef CONFIG_IDEDMA_ICS_AUTO
489 printk(" %s: SG-DMA", hwif
->name
);
492 hwif
->mwdma_mask
= 7; /* MW0..2 */
493 hwif
->swdma_mask
= 7; /* SW0..2 */
495 hwif
->dmatable_cpu
= NULL
;
496 hwif
->dmatable_dma
= 0;
497 hwif
->speedproc
= icside_set_speed
;
498 hwif
->autodma
= autodma
;
500 hwif
->ide_dma_check
= icside_dma_check
;
501 hwif
->ide_dma_host_off
= icside_dma_host_off
;
502 hwif
->ide_dma_off_quietly
= icside_dma_off_quietly
;
503 hwif
->ide_dma_host_on
= icside_dma_host_on
;
504 hwif
->ide_dma_on
= icside_dma_on
;
505 hwif
->dma_setup
= icside_dma_setup
;
506 hwif
->dma_exec_cmd
= icside_dma_exec_cmd
;
507 hwif
->dma_start
= icside_dma_start
;
508 hwif
->ide_dma_end
= icside_dma_end
;
509 hwif
->ide_dma_test_irq
= icside_dma_test_irq
;
510 hwif
->ide_dma_timeout
= icside_dma_timeout
;
511 hwif
->ide_dma_lostirq
= icside_dma_lostirq
;
513 hwif
->drives
[0].autodma
= hwif
->autodma
;
514 hwif
->drives
[1].autodma
= hwif
->autodma
;
516 printk(" capable%s\n", hwif
->autodma
? ", auto-enable" : "");
519 #define icside_dma_init(hwif) (0)
522 static ide_hwif_t
*icside_find_hwif(unsigned long dataport
)
527 for (index
= 0; index
< MAX_HWIFS
; ++index
) {
528 hwif
= &ide_hwifs
[index
];
529 if (hwif
->io_ports
[IDE_DATA_OFFSET
] == dataport
)
533 for (index
= 0; index
< MAX_HWIFS
; ++index
) {
534 hwif
= &ide_hwifs
[index
];
535 if (!hwif
->io_ports
[IDE_DATA_OFFSET
])
545 icside_setup(void __iomem
*base
, struct cardinfo
*info
, struct expansion_card
*ec
)
547 unsigned long port
= (unsigned long)base
+ info
->dataoffset
;
550 hwif
= icside_find_hwif(port
);
554 memset(&hwif
->hw
, 0, sizeof(hw_regs_t
));
557 * Ensure we're using MMIO
559 default_hwif_mmiops(hwif
);
562 for (i
= IDE_DATA_OFFSET
; i
<= IDE_STATUS_OFFSET
; i
++) {
563 hwif
->hw
.io_ports
[i
] = port
;
564 hwif
->io_ports
[i
] = port
;
565 port
+= 1 << info
->stepping
;
567 hwif
->hw
.io_ports
[IDE_CONTROL_OFFSET
] = (unsigned long)base
+ info
->ctrloffset
;
568 hwif
->io_ports
[IDE_CONTROL_OFFSET
] = (unsigned long)base
+ info
->ctrloffset
;
569 hwif
->hw
.irq
= ec
->irq
;
572 hwif
->chipset
= ide_acorn
;
573 hwif
->gendev
.parent
= &ec
->dev
;
580 icside_register_v5(struct icside_state
*state
, struct expansion_card
*ec
)
585 base
= ioremap(ecard_resource_start(ec
, ECARD_RES_MEMC
),
586 ecard_resource_len(ec
, ECARD_RES_MEMC
));
590 state
->irq_port
= base
;
592 ec
->irqaddr
= base
+ ICS_ARCIN_V5_INTRSTAT
;
594 ec
->irq_data
= state
;
595 ec
->ops
= &icside_ops_arcin_v5
;
598 * Be on the safe side - disable interrupts
600 icside_irqdisable_arcin_v5(ec
, 0);
602 hwif
= icside_setup(base
, &icside_cardinfo_v5
, ec
);
608 state
->hwif
[0] = hwif
;
610 probe_hwif_init(hwif
);
611 create_proc_ide_interfaces();
617 icside_register_v6(struct icside_state
*state
, struct expansion_card
*ec
)
619 ide_hwif_t
*hwif
, *mate
;
620 void __iomem
*ioc_base
, *easi_base
;
621 unsigned int sel
= 0;
624 ioc_base
= ioremap(ecard_resource_start(ec
, ECARD_RES_IOCFAST
),
625 ecard_resource_len(ec
, ECARD_RES_IOCFAST
));
631 easi_base
= ioc_base
;
633 if (ecard_resource_flags(ec
, ECARD_RES_EASI
)) {
634 easi_base
= ioremap(ecard_resource_start(ec
, ECARD_RES_EASI
),
635 ecard_resource_len(ec
, ECARD_RES_EASI
));
642 * Enable access to the EASI region.
647 writeb(sel
, ioc_base
);
649 ec
->irq_data
= state
;
650 ec
->ops
= &icside_ops_arcin_v6
;
652 state
->irq_port
= easi_base
;
653 state
->ioc_base
= ioc_base
;
656 * Be on the safe side - disable interrupts
658 icside_irqdisable_arcin_v6(ec
, 0);
661 * Find and register the interfaces.
663 hwif
= icside_setup(easi_base
, &icside_cardinfo_v6_1
, ec
);
664 mate
= icside_setup(easi_base
, &icside_cardinfo_v6_2
, ec
);
666 if (!hwif
|| !mate
) {
671 state
->hwif
[0] = hwif
;
672 state
->hwif
[1] = mate
;
674 hwif
->maskproc
= icside_maskproc
;
676 hwif
->hwif_data
= state
;
678 hwif
->serialized
= 1;
679 hwif
->config_data
= (unsigned long)ioc_base
;
680 hwif
->select_data
= sel
;
681 hwif
->hw
.dma
= ec
->dma
;
683 mate
->maskproc
= icside_maskproc
;
685 mate
->hwif_data
= state
;
687 mate
->serialized
= 1;
688 mate
->config_data
= (unsigned long)ioc_base
;
689 mate
->select_data
= sel
| 1;
690 mate
->hw
.dma
= ec
->dma
;
692 if (ec
->dma
!= NO_DMA
&& !request_dma(ec
->dma
, hwif
->name
)) {
693 icside_dma_init(hwif
);
694 icside_dma_init(mate
);
697 probe_hwif_init(hwif
);
698 probe_hwif_init(mate
);
699 create_proc_ide_interfaces();
704 if (easi_base
!= ioc_base
)
713 icside_probe(struct expansion_card
*ec
, const struct ecard_id
*id
)
715 struct icside_state
*state
;
719 ret
= ecard_request_resources(ec
);
723 state
= kmalloc(sizeof(struct icside_state
), GFP_KERNEL
);
729 memset(state
, 0, sizeof(state
));
730 state
->type
= ICS_TYPE_NOTYPE
;
731 state
->dev
= &ec
->dev
;
733 idmem
= ioremap(ecard_resource_start(ec
, ECARD_RES_IOCFAST
),
734 ecard_resource_len(ec
, ECARD_RES_IOCFAST
));
738 type
= readb(idmem
+ ICS_IDENT_OFFSET
) & 1;
739 type
|= (readb(idmem
+ ICS_IDENT_OFFSET
+ 4) & 1) << 1;
740 type
|= (readb(idmem
+ ICS_IDENT_OFFSET
+ 8) & 1) << 2;
741 type
|= (readb(idmem
+ ICS_IDENT_OFFSET
+ 12) & 1) << 3;
747 switch (state
->type
) {
749 dev_warn(&ec
->dev
, "A3IN unsupported\n");
753 case ICS_TYPE_A3USER
:
754 dev_warn(&ec
->dev
, "A3USER unsupported\n");
759 ret
= icside_register_v5(state
, ec
);
763 ret
= icside_register_v6(state
, ec
);
767 dev_warn(&ec
->dev
, "unknown interface type\n");
773 ecard_set_drvdata(ec
, state
);
779 ecard_release_resources(ec
);
784 static void __devexit
icside_remove(struct expansion_card
*ec
)
786 struct icside_state
*state
= ecard_get_drvdata(ec
);
788 switch (state
->type
) {
790 /* FIXME: tell IDE to stop using the interface */
792 /* Disable interrupts */
793 icside_irqdisable_arcin_v5(ec
, 0);
797 /* FIXME: tell IDE to stop using the interface */
798 if (ec
->dma
!= NO_DMA
)
801 /* Disable interrupts */
802 icside_irqdisable_arcin_v6(ec
, 0);
804 /* Reset the ROM pointer/EASI selection */
805 writeb(0, state
->ioc_base
);
809 ecard_set_drvdata(ec
, NULL
);
814 iounmap(state
->ioc_base
);
815 if (state
->ioc_base
!= state
->irq_port
)
816 iounmap(state
->irq_port
);
819 ecard_release_resources(ec
);
822 static void icside_shutdown(struct expansion_card
*ec
)
824 struct icside_state
*state
= ecard_get_drvdata(ec
);
828 * Disable interrupts from this card. We need to do
829 * this before disabling EASI since we may be accessing
830 * this register via that region.
832 local_irq_save(flags
);
833 ec
->ops
->irqdisable(ec
, 0);
834 local_irq_restore(flags
);
837 * Reset the ROM pointer so that we can read the ROM
838 * after a soft reboot. This also disables access to
839 * the IDE taskfile via the EASI region.
842 writeb(0, state
->ioc_base
);
845 static const struct ecard_id icside_ids
[] = {
846 { MANU_ICS
, PROD_ICS_IDE
},
847 { MANU_ICS2
, PROD_ICS2_IDE
},
851 static struct ecard_driver icside_driver
= {
852 .probe
= icside_probe
,
853 .remove
= __devexit_p(icside_remove
),
854 .shutdown
= icside_shutdown
,
855 .id_table
= icside_ids
,
861 static int __init
icside_init(void)
863 return ecard_register_driver(&icside_driver
);
866 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
867 MODULE_LICENSE("GPL");
868 MODULE_DESCRIPTION("ICS IDE driver");
870 module_init(icside_init
);