2 * linux/drivers/ide/arm/icside.c
4 * Copyright (c) 1996-2004 Russell King.
6 * Please note that this platform does not support 32-bit IDE IO.
9 #include <linux/string.h>
10 #include <linux/module.h>
11 #include <linux/ioport.h>
12 #include <linux/slab.h>
13 #include <linux/blkdev.h>
14 #include <linux/errno.h>
15 #include <linux/hdreg.h>
16 #include <linux/ide.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/device.h>
19 #include <linux/init.h>
20 #include <linux/scatterlist.h>
24 #include <asm/ecard.h>
26 #define ICS_IDENT_OFFSET 0x2280
28 #define ICS_ARCIN_V5_INTRSTAT 0x0000
29 #define ICS_ARCIN_V5_INTROFFSET 0x0004
30 #define ICS_ARCIN_V5_IDEOFFSET 0x2800
31 #define ICS_ARCIN_V5_IDEALTOFFSET 0x2b80
32 #define ICS_ARCIN_V5_IDESTEPPING 6
34 #define ICS_ARCIN_V6_IDEOFFSET_1 0x2000
35 #define ICS_ARCIN_V6_INTROFFSET_1 0x2200
36 #define ICS_ARCIN_V6_INTRSTAT_1 0x2290
37 #define ICS_ARCIN_V6_IDEALTOFFSET_1 0x2380
38 #define ICS_ARCIN_V6_IDEOFFSET_2 0x3000
39 #define ICS_ARCIN_V6_INTROFFSET_2 0x3200
40 #define ICS_ARCIN_V6_INTRSTAT_2 0x3290
41 #define ICS_ARCIN_V6_IDEALTOFFSET_2 0x3380
42 #define ICS_ARCIN_V6_IDESTEPPING 6
45 unsigned int dataoffset
;
46 unsigned int ctrloffset
;
47 unsigned int stepping
;
50 static struct cardinfo icside_cardinfo_v5
= {
51 .dataoffset
= ICS_ARCIN_V5_IDEOFFSET
,
52 .ctrloffset
= ICS_ARCIN_V5_IDEALTOFFSET
,
53 .stepping
= ICS_ARCIN_V5_IDESTEPPING
,
56 static struct cardinfo icside_cardinfo_v6_1
= {
57 .dataoffset
= ICS_ARCIN_V6_IDEOFFSET_1
,
58 .ctrloffset
= ICS_ARCIN_V6_IDEALTOFFSET_1
,
59 .stepping
= ICS_ARCIN_V6_IDESTEPPING
,
62 static struct cardinfo icside_cardinfo_v6_2
= {
63 .dataoffset
= ICS_ARCIN_V6_IDEOFFSET_2
,
64 .ctrloffset
= ICS_ARCIN_V6_IDEALTOFFSET_2
,
65 .stepping
= ICS_ARCIN_V6_IDESTEPPING
,
71 void __iomem
*irq_port
;
72 void __iomem
*ioc_base
;
74 /* parent device... until the IDE core gets one of its own */
79 #define ICS_TYPE_A3IN 0
80 #define ICS_TYPE_A3USER 1
82 #define ICS_TYPE_V5 15
83 #define ICS_TYPE_NOTYPE ((unsigned int)-1)
85 /* ---------------- Version 5 PCB Support Functions --------------------- */
86 /* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
87 * Purpose : enable interrupts from card
89 static void icside_irqenable_arcin_v5 (struct expansion_card
*ec
, int irqnr
)
91 struct icside_state
*state
= ec
->irq_data
;
93 writeb(0, state
->irq_port
+ ICS_ARCIN_V5_INTROFFSET
);
96 /* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
97 * Purpose : disable interrupts from card
99 static void icside_irqdisable_arcin_v5 (struct expansion_card
*ec
, int irqnr
)
101 struct icside_state
*state
= ec
->irq_data
;
103 readb(state
->irq_port
+ ICS_ARCIN_V5_INTROFFSET
);
106 static const expansioncard_ops_t icside_ops_arcin_v5
= {
107 .irqenable
= icside_irqenable_arcin_v5
,
108 .irqdisable
= icside_irqdisable_arcin_v5
,
112 /* ---------------- Version 6 PCB Support Functions --------------------- */
113 /* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
114 * Purpose : enable interrupts from card
116 static void icside_irqenable_arcin_v6 (struct expansion_card
*ec
, int irqnr
)
118 struct icside_state
*state
= ec
->irq_data
;
119 void __iomem
*base
= state
->irq_port
;
123 switch (state
->channel
) {
125 writeb(0, base
+ ICS_ARCIN_V6_INTROFFSET_1
);
126 readb(base
+ ICS_ARCIN_V6_INTROFFSET_2
);
129 writeb(0, base
+ ICS_ARCIN_V6_INTROFFSET_2
);
130 readb(base
+ ICS_ARCIN_V6_INTROFFSET_1
);
135 /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
136 * Purpose : disable interrupts from card
138 static void icside_irqdisable_arcin_v6 (struct expansion_card
*ec
, int irqnr
)
140 struct icside_state
*state
= ec
->irq_data
;
144 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
145 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
148 /* Prototype: icside_irqprobe(struct expansion_card *ec)
149 * Purpose : detect an active interrupt from card
151 static int icside_irqpending_arcin_v6(struct expansion_card
*ec
)
153 struct icside_state
*state
= ec
->irq_data
;
155 return readb(state
->irq_port
+ ICS_ARCIN_V6_INTRSTAT_1
) & 1 ||
156 readb(state
->irq_port
+ ICS_ARCIN_V6_INTRSTAT_2
) & 1;
159 static const expansioncard_ops_t icside_ops_arcin_v6
= {
160 .irqenable
= icside_irqenable_arcin_v6
,
161 .irqdisable
= icside_irqdisable_arcin_v6
,
162 .irqpending
= icside_irqpending_arcin_v6
,
166 * Handle routing of interrupts. This is called before
167 * we write the command to the drive.
169 static void icside_maskproc(ide_drive_t
*drive
, int mask
)
171 ide_hwif_t
*hwif
= HWIF(drive
);
172 struct icside_state
*state
= hwif
->hwif_data
;
175 local_irq_save(flags
);
177 state
->channel
= hwif
->channel
;
179 if (state
->enabled
&& !mask
) {
180 switch (hwif
->channel
) {
182 writeb(0, state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
183 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
186 writeb(0, state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
187 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
191 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
192 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
195 local_irq_restore(flags
);
198 #ifdef CONFIG_BLK_DEV_IDEDMA_ICS
202 * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
203 * There is only one DMA controller per card, which means that only
204 * one drive can be accessed at one time. NOTE! We do not enforce that
205 * here, but we rely on the main IDE driver spotting that both
206 * interfaces use the same IRQ, which should guarantee this.
209 static void icside_build_sglist(ide_drive_t
*drive
, struct request
*rq
)
211 ide_hwif_t
*hwif
= drive
->hwif
;
212 struct icside_state
*state
= hwif
->hwif_data
;
213 struct scatterlist
*sg
= hwif
->sg_table
;
215 ide_map_sg(drive
, rq
);
217 if (rq_data_dir(rq
) == READ
)
218 hwif
->sg_dma_direction
= DMA_FROM_DEVICE
;
220 hwif
->sg_dma_direction
= DMA_TO_DEVICE
;
222 hwif
->sg_nents
= dma_map_sg(state
->dev
, sg
, hwif
->sg_nents
,
223 hwif
->sg_dma_direction
);
227 * Configure the IOMD to give the appropriate timings for the transfer
228 * mode being requested. We take the advice of the ATA standards, and
229 * calculate the cycle time based on the transfer mode, and the EIDE
230 * MW DMA specs that the drive provides in the IDENTIFY command.
232 * We have the following IOMD DMA modes to choose from:
234 * Type Active Recovery Cycle
235 * A 250 (250) 312 (550) 562 (800)
237 * C 125 (125) 125 (375) 250 (500)
240 * (figures in brackets are actual measured timings)
242 * However, we also need to take care of the read/write active and
246 * Mode Active -- Recovery -- Cycle IOMD type
247 * MW0 215 50 215 480 A
251 static void icside_set_dma_mode(ide_drive_t
*drive
, const u8 xfer_mode
)
253 int cycle_time
, use_dma_info
= 0;
278 * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should
279 * take care to note the values in the ID...
281 if (use_dma_info
&& drive
->id
->eide_dma_time
> cycle_time
)
282 cycle_time
= drive
->id
->eide_dma_time
;
284 drive
->drive_data
= cycle_time
;
286 printk("%s: %s selected (peak %dMB/s)\n", drive
->name
,
287 ide_xfer_verbose(xfer_mode
), 2000 / drive
->drive_data
);
290 static void icside_dma_host_set(ide_drive_t
*drive
, int on
)
294 static int icside_dma_end(ide_drive_t
*drive
)
296 ide_hwif_t
*hwif
= HWIF(drive
);
297 struct icside_state
*state
= hwif
->hwif_data
;
299 drive
->waiting_for_dma
= 0;
301 disable_dma(ECARD_DEV(state
->dev
)->dma
);
303 /* Teardown mappings after DMA has completed. */
304 dma_unmap_sg(state
->dev
, hwif
->sg_table
, hwif
->sg_nents
,
305 hwif
->sg_dma_direction
);
307 return get_dma_residue(ECARD_DEV(state
->dev
)->dma
) != 0;
310 static void icside_dma_start(ide_drive_t
*drive
)
312 ide_hwif_t
*hwif
= HWIF(drive
);
313 struct icside_state
*state
= hwif
->hwif_data
;
315 /* We can not enable DMA on both channels simultaneously. */
316 BUG_ON(dma_channel_active(ECARD_DEV(state
->dev
)->dma
));
317 enable_dma(ECARD_DEV(state
->dev
)->dma
);
320 static int icside_dma_setup(ide_drive_t
*drive
)
322 ide_hwif_t
*hwif
= HWIF(drive
);
323 struct icside_state
*state
= hwif
->hwif_data
;
324 struct request
*rq
= hwif
->hwgroup
->rq
;
325 unsigned int dma_mode
;
328 dma_mode
= DMA_MODE_WRITE
;
330 dma_mode
= DMA_MODE_READ
;
333 * We can not enable DMA on both channels.
335 BUG_ON(dma_channel_active(ECARD_DEV(state
->dev
)->dma
));
337 icside_build_sglist(drive
, rq
);
340 * Ensure that we have the right interrupt routed.
342 icside_maskproc(drive
, 0);
345 * Route the DMA signals to the correct interface.
347 writeb(hwif
->select_data
, hwif
->config_data
);
350 * Select the correct timing for this drive.
352 set_dma_speed(ECARD_DEV(state
->dev
)->dma
, drive
->drive_data
);
355 * Tell the DMA engine about the SG table and
358 set_dma_sg(ECARD_DEV(state
->dev
)->dma
, hwif
->sg_table
, hwif
->sg_nents
);
359 set_dma_mode(ECARD_DEV(state
->dev
)->dma
, dma_mode
);
361 drive
->waiting_for_dma
= 1;
366 static void icside_dma_exec_cmd(ide_drive_t
*drive
, u8 cmd
)
368 /* issue cmd to drive */
369 ide_execute_command(drive
, cmd
, ide_dma_intr
, 2 * WAIT_CMD
, NULL
);
372 static int icside_dma_test_irq(ide_drive_t
*drive
)
374 ide_hwif_t
*hwif
= HWIF(drive
);
375 struct icside_state
*state
= hwif
->hwif_data
;
377 return readb(state
->irq_port
+
379 ICS_ARCIN_V6_INTRSTAT_2
:
380 ICS_ARCIN_V6_INTRSTAT_1
)) & 1;
383 static void icside_dma_timeout(ide_drive_t
*drive
)
385 printk(KERN_ERR
"%s: DMA timeout occurred: ", drive
->name
);
387 if (icside_dma_test_irq(drive
))
390 ide_dump_status(drive
, "DMA timeout", HWIF(drive
)->INB(IDE_STATUS_REG
));
392 icside_dma_end(drive
);
395 static void icside_dma_lost_irq(ide_drive_t
*drive
)
397 printk(KERN_ERR
"%s: IRQ lost\n", drive
->name
);
400 static void icside_dma_init(ide_hwif_t
*hwif
)
402 hwif
->mwdma_mask
= 7; /* MW0..2 */
403 hwif
->swdma_mask
= 7; /* SW0..2 */
405 hwif
->dmatable_cpu
= NULL
;
406 hwif
->dmatable_dma
= 0;
407 hwif
->set_dma_mode
= icside_set_dma_mode
;
409 hwif
->dma_host_set
= icside_dma_host_set
;
410 hwif
->dma_setup
= icside_dma_setup
;
411 hwif
->dma_exec_cmd
= icside_dma_exec_cmd
;
412 hwif
->dma_start
= icside_dma_start
;
413 hwif
->ide_dma_end
= icside_dma_end
;
414 hwif
->ide_dma_test_irq
= icside_dma_test_irq
;
415 hwif
->dma_timeout
= icside_dma_timeout
;
416 hwif
->dma_lost_irq
= icside_dma_lost_irq
;
419 #define icside_dma_init(hwif) (0)
423 icside_setup(void __iomem
*base
, struct cardinfo
*info
, struct expansion_card
*ec
)
425 unsigned long port
= (unsigned long)base
+ info
->dataoffset
;
428 hwif
= ide_find_port(port
);
433 * Ensure we're using MMIO
435 default_hwif_mmiops(hwif
);
438 for (i
= IDE_DATA_OFFSET
; i
<= IDE_STATUS_OFFSET
; i
++) {
439 hwif
->io_ports
[i
] = port
;
440 port
+= 1 << info
->stepping
;
442 hwif
->io_ports
[IDE_CONTROL_OFFSET
] = (unsigned long)base
+ info
->ctrloffset
;
445 hwif
->chipset
= ide_acorn
;
446 hwif
->gendev
.parent
= &ec
->dev
;
453 icside_register_v5(struct icside_state
*state
, struct expansion_card
*ec
)
457 u8 idx
[4] = { 0xff, 0xff, 0xff, 0xff };
459 base
= ecardm_iomap(ec
, ECARD_RES_MEMC
, 0, 0);
463 state
->irq_port
= base
;
465 ec
->irqaddr
= base
+ ICS_ARCIN_V5_INTRSTAT
;
468 ecard_setirq(ec
, &icside_ops_arcin_v5
, state
);
471 * Be on the safe side - disable interrupts
473 icside_irqdisable_arcin_v5(ec
, 0);
475 hwif
= icside_setup(base
, &icside_cardinfo_v5
, ec
);
479 state
->hwif
[0] = hwif
;
481 idx
[0] = hwif
->index
;
489 icside_register_v6(struct icside_state
*state
, struct expansion_card
*ec
)
491 ide_hwif_t
*hwif
, *mate
;
492 void __iomem
*ioc_base
, *easi_base
;
493 unsigned int sel
= 0;
495 u8 idx
[4] = { 0xff, 0xff, 0xff, 0xff };
497 ioc_base
= ecardm_iomap(ec
, ECARD_RES_IOCFAST
, 0, 0);
503 easi_base
= ioc_base
;
505 if (ecard_resource_flags(ec
, ECARD_RES_EASI
)) {
506 easi_base
= ecardm_iomap(ec
, ECARD_RES_EASI
, 0, 0);
513 * Enable access to the EASI region.
518 writeb(sel
, ioc_base
);
520 ecard_setirq(ec
, &icside_ops_arcin_v6
, state
);
522 state
->irq_port
= easi_base
;
523 state
->ioc_base
= ioc_base
;
526 * Be on the safe side - disable interrupts
528 icside_irqdisable_arcin_v6(ec
, 0);
531 * Find and register the interfaces.
533 hwif
= icside_setup(easi_base
, &icside_cardinfo_v6_1
, ec
);
534 mate
= icside_setup(easi_base
, &icside_cardinfo_v6_2
, ec
);
536 if (!hwif
|| !mate
) {
541 state
->hwif
[0] = hwif
;
542 state
->hwif
[1] = mate
;
544 hwif
->maskproc
= icside_maskproc
;
546 hwif
->hwif_data
= state
;
548 hwif
->serialized
= 1;
549 hwif
->config_data
= (unsigned long)ioc_base
;
550 hwif
->select_data
= sel
;
552 mate
->maskproc
= icside_maskproc
;
554 mate
->hwif_data
= state
;
556 mate
->serialized
= 1;
557 mate
->config_data
= (unsigned long)ioc_base
;
558 mate
->select_data
= sel
| 1;
560 if (ec
->dma
!= NO_DMA
&& !request_dma(ec
->dma
, hwif
->name
)) {
561 icside_dma_init(hwif
);
562 icside_dma_init(mate
);
565 idx
[0] = hwif
->index
;
566 idx
[1] = mate
->index
;
577 icside_probe(struct expansion_card
*ec
, const struct ecard_id
*id
)
579 struct icside_state
*state
;
583 ret
= ecard_request_resources(ec
);
587 state
= kzalloc(sizeof(struct icside_state
), GFP_KERNEL
);
593 state
->type
= ICS_TYPE_NOTYPE
;
594 state
->dev
= &ec
->dev
;
596 idmem
= ecardm_iomap(ec
, ECARD_RES_IOCFAST
, 0, 0);
600 type
= readb(idmem
+ ICS_IDENT_OFFSET
) & 1;
601 type
|= (readb(idmem
+ ICS_IDENT_OFFSET
+ 4) & 1) << 1;
602 type
|= (readb(idmem
+ ICS_IDENT_OFFSET
+ 8) & 1) << 2;
603 type
|= (readb(idmem
+ ICS_IDENT_OFFSET
+ 12) & 1) << 3;
604 ecardm_iounmap(ec
, idmem
);
609 switch (state
->type
) {
611 dev_warn(&ec
->dev
, "A3IN unsupported\n");
615 case ICS_TYPE_A3USER
:
616 dev_warn(&ec
->dev
, "A3USER unsupported\n");
621 ret
= icside_register_v5(state
, ec
);
625 ret
= icside_register_v6(state
, ec
);
629 dev_warn(&ec
->dev
, "unknown interface type\n");
635 ecard_set_drvdata(ec
, state
);
641 ecard_release_resources(ec
);
646 static void __devexit
icside_remove(struct expansion_card
*ec
)
648 struct icside_state
*state
= ecard_get_drvdata(ec
);
650 switch (state
->type
) {
652 /* FIXME: tell IDE to stop using the interface */
654 /* Disable interrupts */
655 icside_irqdisable_arcin_v5(ec
, 0);
659 /* FIXME: tell IDE to stop using the interface */
660 if (ec
->dma
!= NO_DMA
)
663 /* Disable interrupts */
664 icside_irqdisable_arcin_v6(ec
, 0);
666 /* Reset the ROM pointer/EASI selection */
667 writeb(0, state
->ioc_base
);
671 ecard_set_drvdata(ec
, NULL
);
674 ecard_release_resources(ec
);
677 static void icside_shutdown(struct expansion_card
*ec
)
679 struct icside_state
*state
= ecard_get_drvdata(ec
);
683 * Disable interrupts from this card. We need to do
684 * this before disabling EASI since we may be accessing
685 * this register via that region.
687 local_irq_save(flags
);
688 ec
->ops
->irqdisable(ec
, 0);
689 local_irq_restore(flags
);
692 * Reset the ROM pointer so that we can read the ROM
693 * after a soft reboot. This also disables access to
694 * the IDE taskfile via the EASI region.
697 writeb(0, state
->ioc_base
);
700 static const struct ecard_id icside_ids
[] = {
701 { MANU_ICS
, PROD_ICS_IDE
},
702 { MANU_ICS2
, PROD_ICS2_IDE
},
706 static struct ecard_driver icside_driver
= {
707 .probe
= icside_probe
,
708 .remove
= __devexit_p(icside_remove
),
709 .shutdown
= icside_shutdown
,
710 .id_table
= icside_ids
,
716 static int __init
icside_init(void)
718 return ecard_register_driver(&icside_driver
);
721 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
722 MODULE_LICENSE("GPL");
723 MODULE_DESCRIPTION("ICS IDE driver");
725 module_init(icside_init
);