1 // SPDX-License-Identifier: GPL-2.0
3 * ESP front-end for Amiga ZORRO SCSI systems.
5 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
7 * Copyright (C) 2011,2018 Michael Schmitz (schmitz@debian.org) for
8 * migration to ESP SCSI core
10 * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
11 * Blizzard 1230 DMA and probe function fixes
14 * ZORRO bus code from:
17 * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
18 * Amiga MacroSystemUS WarpEngine SCSI controller.
19 * Amiga Technologies/DKB A4091 SCSI controller.
21 * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
22 * plus modifications of the 53c7xx.c driver to support the Amiga.
24 * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
27 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/scatterlist.h>
34 #include <linux/delay.h>
35 #include <linux/zorro.h>
36 #include <linux/slab.h>
37 #include <linux/pgtable.h>
40 #include <asm/cacheflush.h>
41 #include <asm/amigahw.h>
42 #include <asm/amigaints.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_transport_spi.h>
46 #include <scsi/scsi_device.h>
47 #include <scsi/scsi_tcq.h>
51 MODULE_AUTHOR("Michael Schmitz <schmitz@debian.org>");
52 MODULE_DESCRIPTION("Amiga Zorro NCR5C9x (ESP) driver");
53 MODULE_LICENSE("GPL");
55 /* per-board register layout definitions */
57 /* Blizzard 1230 DMA interface */
59 struct blz1230_dma_registers
{
60 unsigned char dma_addr
; /* DMA address [0x0000] */
61 unsigned char dmapad2
[0x7fff];
62 unsigned char dma_latch
; /* DMA latch [0x8000] */
65 /* Blizzard 1230II DMA interface */
67 struct blz1230II_dma_registers
{
68 unsigned char dma_addr
; /* DMA address [0x0000] */
69 unsigned char dmapad2
[0xf];
70 unsigned char dma_latch
; /* DMA latch [0x0010] */
73 /* Blizzard 2060 DMA interface */
75 struct blz2060_dma_registers
{
76 unsigned char dma_led_ctrl
; /* DMA led control [0x000] */
77 unsigned char dmapad1
[0x0f];
78 unsigned char dma_addr0
; /* DMA address (MSB) [0x010] */
79 unsigned char dmapad2
[0x03];
80 unsigned char dma_addr1
; /* DMA address [0x014] */
81 unsigned char dmapad3
[0x03];
82 unsigned char dma_addr2
; /* DMA address [0x018] */
83 unsigned char dmapad4
[0x03];
84 unsigned char dma_addr3
; /* DMA address (LSB) [0x01c] */
87 /* DMA control bits */
88 #define DMA_WRITE 0x80000000
90 /* Cyberstorm DMA interface */
92 struct cyber_dma_registers
{
93 unsigned char dma_addr0
; /* DMA address (MSB) [0x000] */
94 unsigned char dmapad1
[1];
95 unsigned char dma_addr1
; /* DMA address [0x002] */
96 unsigned char dmapad2
[1];
97 unsigned char dma_addr2
; /* DMA address [0x004] */
98 unsigned char dmapad3
[1];
99 unsigned char dma_addr3
; /* DMA address (LSB) [0x006] */
100 unsigned char dmapad4
[0x3fb];
101 unsigned char cond_reg
; /* DMA cond (ro) [0x402] */
102 #define ctrl_reg cond_reg /* DMA control (wo) [0x402] */
105 /* DMA control bits */
106 #define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */
107 #define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
109 /* DMA status bits */
110 #define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */
112 /* The CyberStorm II DMA interface */
113 struct cyberII_dma_registers
{
114 unsigned char cond_reg
; /* DMA cond (ro) [0x000] */
115 #define ctrl_reg cond_reg /* DMA control (wo) [0x000] */
116 unsigned char dmapad4
[0x3f];
117 unsigned char dma_addr0
; /* DMA address (MSB) [0x040] */
118 unsigned char dmapad1
[3];
119 unsigned char dma_addr1
; /* DMA address [0x044] */
120 unsigned char dmapad2
[3];
121 unsigned char dma_addr2
; /* DMA address [0x048] */
122 unsigned char dmapad3
[3];
123 unsigned char dma_addr3
; /* DMA address (LSB) [0x04c] */
126 /* Fastlane DMA interface */
128 struct fastlane_dma_registers
{
129 unsigned char cond_reg
; /* DMA status (ro) [0x0000] */
130 #define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */
132 unsigned char clear_strobe
; /* DMA clear (wo) [0x0040] */
136 * The controller registers can be found in the Z2 config area at these
139 #define FASTLANE_ESP_ADDR 0x1000001
141 /* DMA status bits */
142 #define FASTLANE_DMA_MINT 0x80
143 #define FASTLANE_DMA_IACT 0x40
144 #define FASTLANE_DMA_CREQ 0x20
146 /* DMA control bits */
147 #define FASTLANE_DMA_FCODE 0xa0
148 #define FASTLANE_DMA_MASK 0xf3
149 #define FASTLANE_DMA_WRITE 0x08 /* 1 = write */
150 #define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */
151 #define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */
152 #define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */
155 * private data used for driver
157 struct zorro_esp_priv
{
158 struct esp
*esp
; /* our ESP instance - for Scsi_host* */
159 void __iomem
*board_base
; /* virtual address (Zorro III board) */
160 int zorro3
; /* board is Zorro III */
161 unsigned char ctrl_data
; /* shadow copy of ctrl_reg */
165 * On all implementations except for the Oktagon, padding between ESP
166 * registers is three bytes.
167 * On Oktagon, it is one byte - use a different accessor there.
169 * Oktagon needs PDMA - currently unsupported!
172 static void zorro_esp_write8(struct esp
*esp
, u8 val
, unsigned long reg
)
174 writeb(val
, esp
->regs
+ (reg
* 4UL));
177 static u8
zorro_esp_read8(struct esp
*esp
, unsigned long reg
)
179 return readb(esp
->regs
+ (reg
* 4UL));
182 static int zorro_esp_irq_pending(struct esp
*esp
)
184 /* check ESP status register; DMA has no status reg. */
185 if (zorro_esp_read8(esp
, ESP_STATUS
) & ESP_STAT_INTR
)
191 static int cyber_esp_irq_pending(struct esp
*esp
)
193 struct cyber_dma_registers __iomem
*dregs
= esp
->dma_regs
;
194 unsigned char dma_status
= readb(&dregs
->cond_reg
);
196 /* It's important to check the DMA IRQ bit in the correct way! */
197 return ((zorro_esp_read8(esp
, ESP_STATUS
) & ESP_STAT_INTR
) &&
198 (dma_status
& CYBER_DMA_HNDL_INTR
));
201 static int fastlane_esp_irq_pending(struct esp
*esp
)
203 struct fastlane_dma_registers __iomem
*dregs
= esp
->dma_regs
;
204 unsigned char dma_status
;
206 dma_status
= readb(&dregs
->cond_reg
);
208 if (dma_status
& FASTLANE_DMA_IACT
)
209 return 0; /* not our IRQ */
211 /* Return non-zero if ESP requested IRQ */
213 (dma_status
& FASTLANE_DMA_CREQ
) &&
214 (!(dma_status
& FASTLANE_DMA_MINT
)) &&
215 (zorro_esp_read8(esp
, ESP_STATUS
) & ESP_STAT_INTR
));
218 static u32
zorro_esp_dma_length_limit(struct esp
*esp
, u32 dma_addr
,
221 return dma_len
> (1U << 16) ? (1U << 16) : dma_len
;
224 static u32
fastlane_esp_dma_length_limit(struct esp
*esp
, u32 dma_addr
,
227 /* The old driver used 0xfffc as limit, so do that here too */
228 return dma_len
> 0xfffc ? 0xfffc : dma_len
;
231 static void zorro_esp_reset_dma(struct esp
*esp
)
233 /* nothing to do here */
236 static void zorro_esp_dma_drain(struct esp
*esp
)
238 /* nothing to do here */
241 static void zorro_esp_dma_invalidate(struct esp
*esp
)
243 /* nothing to do here */
246 static void fastlane_esp_dma_invalidate(struct esp
*esp
)
248 struct zorro_esp_priv
*zep
= dev_get_drvdata(esp
->dev
);
249 struct fastlane_dma_registers __iomem
*dregs
= esp
->dma_regs
;
250 unsigned char *ctrl_data
= &zep
->ctrl_data
;
252 *ctrl_data
= (*ctrl_data
& FASTLANE_DMA_MASK
);
253 writeb(0, &dregs
->clear_strobe
);
254 z_writel(0, zep
->board_base
);
257 /* Blizzard 1230/60 SCSI-IV DMA */
259 static void zorro_esp_send_blz1230_dma_cmd(struct esp
*esp
, u32 addr
,
260 u32 esp_count
, u32 dma_count
, int write
, u8 cmd
)
262 struct blz1230_dma_registers __iomem
*dregs
= esp
->dma_regs
;
263 u8 phase
= esp
->sreg
& ESP_STAT_PMASK
;
266 * Use PIO if transferring message bytes to esp->command_block_dma.
267 * PIO requires a virtual address, so substitute esp->command_block
270 if (phase
== ESP_MIP
&& addr
== esp
->command_block_dma
) {
271 esp_send_pio_cmd(esp
, (u32
)esp
->command_block
, esp_count
,
272 dma_count
, write
, cmd
);
276 /* Clear the results of a possible prior esp->ops->send_dma_cmd() */
277 esp
->send_cmd_error
= 0;
278 esp
->send_cmd_residual
= 0;
282 dma_sync_single_for_device(esp
->dev
, addr
, esp_count
,
286 dma_sync_single_for_device(esp
->dev
, addr
, esp_count
,
291 addr
&= ~(DMA_WRITE
);
295 writeb((addr
>> 24) & 0xff, &dregs
->dma_latch
);
296 writeb((addr
>> 24) & 0xff, &dregs
->dma_addr
);
297 writeb((addr
>> 16) & 0xff, &dregs
->dma_addr
);
298 writeb((addr
>> 8) & 0xff, &dregs
->dma_addr
);
299 writeb(addr
& 0xff, &dregs
->dma_addr
);
301 scsi_esp_cmd(esp
, ESP_CMD_DMA
);
302 zorro_esp_write8(esp
, (esp_count
>> 0) & 0xff, ESP_TCLOW
);
303 zorro_esp_write8(esp
, (esp_count
>> 8) & 0xff, ESP_TCMED
);
305 scsi_esp_cmd(esp
, cmd
);
308 /* Blizzard 1230-II DMA */
310 static void zorro_esp_send_blz1230II_dma_cmd(struct esp
*esp
, u32 addr
,
311 u32 esp_count
, u32 dma_count
, int write
, u8 cmd
)
313 struct blz1230II_dma_registers __iomem
*dregs
= esp
->dma_regs
;
314 u8 phase
= esp
->sreg
& ESP_STAT_PMASK
;
316 /* Use PIO if transferring message bytes to esp->command_block_dma */
317 if (phase
== ESP_MIP
&& addr
== esp
->command_block_dma
) {
318 esp_send_pio_cmd(esp
, (u32
)esp
->command_block
, esp_count
,
319 dma_count
, write
, cmd
);
323 esp
->send_cmd_error
= 0;
324 esp
->send_cmd_residual
= 0;
328 dma_sync_single_for_device(esp
->dev
, addr
, esp_count
,
332 dma_sync_single_for_device(esp
->dev
, addr
, esp_count
,
337 addr
&= ~(DMA_WRITE
);
341 writeb((addr
>> 24) & 0xff, &dregs
->dma_latch
);
342 writeb((addr
>> 16) & 0xff, &dregs
->dma_addr
);
343 writeb((addr
>> 8) & 0xff, &dregs
->dma_addr
);
344 writeb(addr
& 0xff, &dregs
->dma_addr
);
346 scsi_esp_cmd(esp
, ESP_CMD_DMA
);
347 zorro_esp_write8(esp
, (esp_count
>> 0) & 0xff, ESP_TCLOW
);
348 zorro_esp_write8(esp
, (esp_count
>> 8) & 0xff, ESP_TCMED
);
350 scsi_esp_cmd(esp
, cmd
);
353 /* Blizzard 2060 DMA */
355 static void zorro_esp_send_blz2060_dma_cmd(struct esp
*esp
, u32 addr
,
356 u32 esp_count
, u32 dma_count
, int write
, u8 cmd
)
358 struct blz2060_dma_registers __iomem
*dregs
= esp
->dma_regs
;
359 u8 phase
= esp
->sreg
& ESP_STAT_PMASK
;
361 /* Use PIO if transferring message bytes to esp->command_block_dma */
362 if (phase
== ESP_MIP
&& addr
== esp
->command_block_dma
) {
363 esp_send_pio_cmd(esp
, (u32
)esp
->command_block
, esp_count
,
364 dma_count
, write
, cmd
);
368 esp
->send_cmd_error
= 0;
369 esp
->send_cmd_residual
= 0;
373 dma_sync_single_for_device(esp
->dev
, addr
, esp_count
,
377 dma_sync_single_for_device(esp
->dev
, addr
, esp_count
,
382 addr
&= ~(DMA_WRITE
);
386 writeb(addr
& 0xff, &dregs
->dma_addr3
);
387 writeb((addr
>> 8) & 0xff, &dregs
->dma_addr2
);
388 writeb((addr
>> 16) & 0xff, &dregs
->dma_addr1
);
389 writeb((addr
>> 24) & 0xff, &dregs
->dma_addr0
);
391 scsi_esp_cmd(esp
, ESP_CMD_DMA
);
392 zorro_esp_write8(esp
, (esp_count
>> 0) & 0xff, ESP_TCLOW
);
393 zorro_esp_write8(esp
, (esp_count
>> 8) & 0xff, ESP_TCMED
);
395 scsi_esp_cmd(esp
, cmd
);
398 /* Cyberstorm I DMA */
400 static void zorro_esp_send_cyber_dma_cmd(struct esp
*esp
, u32 addr
,
401 u32 esp_count
, u32 dma_count
, int write
, u8 cmd
)
403 struct zorro_esp_priv
*zep
= dev_get_drvdata(esp
->dev
);
404 struct cyber_dma_registers __iomem
*dregs
= esp
->dma_regs
;
405 u8 phase
= esp
->sreg
& ESP_STAT_PMASK
;
406 unsigned char *ctrl_data
= &zep
->ctrl_data
;
408 /* Use PIO if transferring message bytes to esp->command_block_dma */
409 if (phase
== ESP_MIP
&& addr
== esp
->command_block_dma
) {
410 esp_send_pio_cmd(esp
, (u32
)esp
->command_block
, esp_count
,
411 dma_count
, write
, cmd
);
415 esp
->send_cmd_error
= 0;
416 esp
->send_cmd_residual
= 0;
418 zorro_esp_write8(esp
, (esp_count
>> 0) & 0xff, ESP_TCLOW
);
419 zorro_esp_write8(esp
, (esp_count
>> 8) & 0xff, ESP_TCMED
);
423 dma_sync_single_for_device(esp
->dev
, addr
, esp_count
,
428 dma_sync_single_for_device(esp
->dev
, addr
, esp_count
,
433 writeb((addr
>> 24) & 0xff, &dregs
->dma_addr0
);
434 writeb((addr
>> 16) & 0xff, &dregs
->dma_addr1
);
435 writeb((addr
>> 8) & 0xff, &dregs
->dma_addr2
);
436 writeb(addr
& 0xff, &dregs
->dma_addr3
);
439 *ctrl_data
&= ~(CYBER_DMA_WRITE
);
441 *ctrl_data
|= CYBER_DMA_WRITE
;
443 *ctrl_data
&= ~(CYBER_DMA_Z3
); /* Z2, do 16 bit DMA */
445 writeb(*ctrl_data
, &dregs
->ctrl_reg
);
447 scsi_esp_cmd(esp
, cmd
);
450 /* Cyberstorm II DMA */
452 static void zorro_esp_send_cyberII_dma_cmd(struct esp
*esp
, u32 addr
,
453 u32 esp_count
, u32 dma_count
, int write
, u8 cmd
)
455 struct cyberII_dma_registers __iomem
*dregs
= esp
->dma_regs
;
456 u8 phase
= esp
->sreg
& ESP_STAT_PMASK
;
458 /* Use PIO if transferring message bytes to esp->command_block_dma */
459 if (phase
== ESP_MIP
&& addr
== esp
->command_block_dma
) {
460 esp_send_pio_cmd(esp
, (u32
)esp
->command_block
, esp_count
,
461 dma_count
, write
, cmd
);
465 esp
->send_cmd_error
= 0;
466 esp
->send_cmd_residual
= 0;
468 zorro_esp_write8(esp
, (esp_count
>> 0) & 0xff, ESP_TCLOW
);
469 zorro_esp_write8(esp
, (esp_count
>> 8) & 0xff, ESP_TCMED
);
473 dma_sync_single_for_device(esp
->dev
, addr
, esp_count
,
478 dma_sync_single_for_device(esp
->dev
, addr
, esp_count
,
483 writeb((addr
>> 24) & 0xff, &dregs
->dma_addr0
);
484 writeb((addr
>> 16) & 0xff, &dregs
->dma_addr1
);
485 writeb((addr
>> 8) & 0xff, &dregs
->dma_addr2
);
486 writeb(addr
& 0xff, &dregs
->dma_addr3
);
488 scsi_esp_cmd(esp
, cmd
);
493 static void zorro_esp_send_fastlane_dma_cmd(struct esp
*esp
, u32 addr
,
494 u32 esp_count
, u32 dma_count
, int write
, u8 cmd
)
496 struct zorro_esp_priv
*zep
= dev_get_drvdata(esp
->dev
);
497 struct fastlane_dma_registers __iomem
*dregs
= esp
->dma_regs
;
498 u8 phase
= esp
->sreg
& ESP_STAT_PMASK
;
499 unsigned char *ctrl_data
= &zep
->ctrl_data
;
501 /* Use PIO if transferring message bytes to esp->command_block_dma */
502 if (phase
== ESP_MIP
&& addr
== esp
->command_block_dma
) {
503 esp_send_pio_cmd(esp
, (u32
)esp
->command_block
, esp_count
,
504 dma_count
, write
, cmd
);
508 esp
->send_cmd_error
= 0;
509 esp
->send_cmd_residual
= 0;
511 zorro_esp_write8(esp
, (esp_count
>> 0) & 0xff, ESP_TCLOW
);
512 zorro_esp_write8(esp
, (esp_count
>> 8) & 0xff, ESP_TCMED
);
516 dma_sync_single_for_device(esp
->dev
, addr
, esp_count
,
521 dma_sync_single_for_device(esp
->dev
, addr
, esp_count
,
526 writeb(0, &dregs
->clear_strobe
);
527 z_writel(addr
, ((addr
& 0x00ffffff) + zep
->board_base
));
530 *ctrl_data
= (*ctrl_data
& FASTLANE_DMA_MASK
) |
533 *ctrl_data
= ((*ctrl_data
& FASTLANE_DMA_MASK
) |
534 FASTLANE_DMA_ENABLE
|
538 writeb(*ctrl_data
, &dregs
->ctrl_reg
);
540 scsi_esp_cmd(esp
, cmd
);
543 static int zorro_esp_dma_error(struct esp
*esp
)
545 return esp
->send_cmd_error
;
548 /* per-board ESP driver ops */
550 static const struct esp_driver_ops blz1230_esp_ops
= {
551 .esp_write8
= zorro_esp_write8
,
552 .esp_read8
= zorro_esp_read8
,
553 .irq_pending
= zorro_esp_irq_pending
,
554 .dma_length_limit
= zorro_esp_dma_length_limit
,
555 .reset_dma
= zorro_esp_reset_dma
,
556 .dma_drain
= zorro_esp_dma_drain
,
557 .dma_invalidate
= zorro_esp_dma_invalidate
,
558 .send_dma_cmd
= zorro_esp_send_blz1230_dma_cmd
,
559 .dma_error
= zorro_esp_dma_error
,
562 static const struct esp_driver_ops blz1230II_esp_ops
= {
563 .esp_write8
= zorro_esp_write8
,
564 .esp_read8
= zorro_esp_read8
,
565 .irq_pending
= zorro_esp_irq_pending
,
566 .dma_length_limit
= zorro_esp_dma_length_limit
,
567 .reset_dma
= zorro_esp_reset_dma
,
568 .dma_drain
= zorro_esp_dma_drain
,
569 .dma_invalidate
= zorro_esp_dma_invalidate
,
570 .send_dma_cmd
= zorro_esp_send_blz1230II_dma_cmd
,
571 .dma_error
= zorro_esp_dma_error
,
574 static const struct esp_driver_ops blz2060_esp_ops
= {
575 .esp_write8
= zorro_esp_write8
,
576 .esp_read8
= zorro_esp_read8
,
577 .irq_pending
= zorro_esp_irq_pending
,
578 .dma_length_limit
= zorro_esp_dma_length_limit
,
579 .reset_dma
= zorro_esp_reset_dma
,
580 .dma_drain
= zorro_esp_dma_drain
,
581 .dma_invalidate
= zorro_esp_dma_invalidate
,
582 .send_dma_cmd
= zorro_esp_send_blz2060_dma_cmd
,
583 .dma_error
= zorro_esp_dma_error
,
586 static const struct esp_driver_ops cyber_esp_ops
= {
587 .esp_write8
= zorro_esp_write8
,
588 .esp_read8
= zorro_esp_read8
,
589 .irq_pending
= cyber_esp_irq_pending
,
590 .dma_length_limit
= zorro_esp_dma_length_limit
,
591 .reset_dma
= zorro_esp_reset_dma
,
592 .dma_drain
= zorro_esp_dma_drain
,
593 .dma_invalidate
= zorro_esp_dma_invalidate
,
594 .send_dma_cmd
= zorro_esp_send_cyber_dma_cmd
,
595 .dma_error
= zorro_esp_dma_error
,
598 static const struct esp_driver_ops cyberII_esp_ops
= {
599 .esp_write8
= zorro_esp_write8
,
600 .esp_read8
= zorro_esp_read8
,
601 .irq_pending
= zorro_esp_irq_pending
,
602 .dma_length_limit
= zorro_esp_dma_length_limit
,
603 .reset_dma
= zorro_esp_reset_dma
,
604 .dma_drain
= zorro_esp_dma_drain
,
605 .dma_invalidate
= zorro_esp_dma_invalidate
,
606 .send_dma_cmd
= zorro_esp_send_cyberII_dma_cmd
,
607 .dma_error
= zorro_esp_dma_error
,
610 static const struct esp_driver_ops fastlane_esp_ops
= {
611 .esp_write8
= zorro_esp_write8
,
612 .esp_read8
= zorro_esp_read8
,
613 .irq_pending
= fastlane_esp_irq_pending
,
614 .dma_length_limit
= fastlane_esp_dma_length_limit
,
615 .reset_dma
= zorro_esp_reset_dma
,
616 .dma_drain
= zorro_esp_dma_drain
,
617 .dma_invalidate
= fastlane_esp_dma_invalidate
,
618 .send_dma_cmd
= zorro_esp_send_fastlane_dma_cmd
,
619 .dma_error
= zorro_esp_dma_error
,
622 /* Zorro driver config data */
624 struct zorro_driver_data
{
626 unsigned long offset
;
627 unsigned long dma_offset
;
628 int absolute
; /* offset is absolute address */
630 const struct esp_driver_ops
*esp_ops
;
644 /* per-board config data */
646 static const struct zorro_driver_data zorro_esp_boards
[] = {
648 .name
= "Blizzard 1230",
650 .dma_offset
= 0x10000,
652 .esp_ops
= &blz1230_esp_ops
,
654 [ZORRO_BLZ1230II
] = {
655 .name
= "Blizzard 1230II",
657 .dma_offset
= 0x10021,
659 .esp_ops
= &blz1230II_esp_ops
,
662 .name
= "Blizzard 2060",
664 .dma_offset
= 0x1ffe0,
665 .esp_ops
= &blz2060_esp_ops
,
668 .name
= "CyberStormI",
670 .dma_offset
= 0xf800,
671 .esp_ops
= &cyber_esp_ops
,
674 .name
= "CyberStormII",
676 .dma_offset
= 0x1ff43,
678 .esp_ops
= &cyberII_esp_ops
,
683 .dma_offset
= 0x1000041,
684 .esp_ops
= &fastlane_esp_ops
,
688 static const struct zorro_device_id zorro_esp_zorro_tbl
[] = {
689 { /* Blizzard 1230 IV */
690 .id
= ZORRO_ID(PHASE5
, 0x11, 0),
691 .driver_data
= ZORRO_BLZ1230
,
693 { /* Blizzard 1230 II (Zorro II) or Fastlane (Zorro III) */
694 .id
= ZORRO_ID(PHASE5
, 0x0B, 0),
695 .driver_data
= ZORRO_BLZ1230II
,
697 { /* Blizzard 2060 */
698 .id
= ZORRO_ID(PHASE5
, 0x18, 0),
699 .driver_data
= ZORRO_BLZ2060
,
702 .id
= ZORRO_ID(PHASE5
, 0x0C, 0),
703 .driver_data
= ZORRO_CYBER
,
705 { /* Cyberstorm II */
706 .id
= ZORRO_ID(PHASE5
, 0x19, 0),
707 .driver_data
= ZORRO_CYBERII
,
711 MODULE_DEVICE_TABLE(zorro
, zorro_esp_zorro_tbl
);
713 static int zorro_esp_probe(struct zorro_dev
*z
,
714 const struct zorro_device_id
*ent
)
716 const struct scsi_host_template
*tpnt
= &scsi_esp_template
;
717 struct Scsi_Host
*host
;
719 const struct zorro_driver_data
*zdd
;
720 struct zorro_esp_priv
*zep
;
721 unsigned long board
, ioaddr
, dmaaddr
;
724 board
= zorro_resource_start(z
);
725 zdd
= &zorro_esp_boards
[ent
->driver_data
];
727 pr_info("%s found at address 0x%lx.\n", zdd
->name
, board
);
729 zep
= kzalloc(sizeof(*zep
), GFP_KERNEL
);
731 pr_err("Can't allocate device private data!\n");
735 /* let's figure out whether we have a Zorro II or Zorro III board */
736 if ((z
->rom
.er_Type
& ERT_TYPEMASK
) == ERT_ZORROIII
) {
737 if (board
> 0xffffff)
741 * Even though most of these boards identify as Zorro II,
742 * they are in fact CPU expansion slot boards and have full
743 * access to all of memory. Fix up DMA bitmask here.
745 z
->dev
.coherent_dma_mask
= DMA_BIT_MASK(32);
749 * If Zorro III and ID matches Fastlane, our device table entry
750 * contains data for the Blizzard 1230 II board which does share the
751 * same ID. Fix up device table entry here.
752 * TODO: Some Cyberstom060 boards also share this ID but would need
753 * to use the Cyberstorm I driver data ... we catch this by checking
754 * for presence of ESP chip later, but don't try to fix up yet.
756 if (zep
->zorro3
&& ent
->driver_data
== ZORRO_BLZ1230II
) {
757 pr_info("%s at address 0x%lx is Fastlane Z3, fixing data!\n",
759 zdd
= &zorro_esp_boards
[ZORRO_FASTLANE
];
763 ioaddr
= zdd
->offset
;
764 dmaaddr
= zdd
->dma_offset
;
766 ioaddr
= board
+ zdd
->offset
;
767 dmaaddr
= board
+ zdd
->dma_offset
;
770 if (!zorro_request_device(z
, zdd
->name
)) {
771 pr_err("cannot reserve region 0x%lx, abort\n",
777 host
= scsi_host_alloc(tpnt
, sizeof(struct esp
));
780 pr_err("No host detected; board configuration problem?\n");
782 goto fail_release_device
;
788 esp
= shost_priv(host
);
792 esp
->scsi_id
= host
->this_id
;
793 esp
->scsi_id_mask
= (1 << esp
->scsi_id
);
795 esp
->cfreq
= 40000000;
799 dev_set_drvdata(esp
->dev
, zep
);
801 /* additional setup required for Fastlane */
802 if (zep
->zorro3
&& ent
->driver_data
== ZORRO_BLZ1230II
) {
803 /* map full address space up to ESP base for DMA */
804 zep
->board_base
= ioremap(board
, FASTLANE_ESP_ADDR
- 1);
805 if (!zep
->board_base
) {
806 pr_err("Cannot allocate board address space\n");
810 /* initialize DMA control shadow register */
811 zep
->ctrl_data
= (FASTLANE_DMA_FCODE
|
812 FASTLANE_DMA_EDI
| FASTLANE_DMA_ESI
);
815 esp
->ops
= zdd
->esp_ops
;
817 if (ioaddr
> 0xffffff)
818 esp
->regs
= ioremap(ioaddr
, 0x20);
820 /* ZorroII address space remapped nocache by early startup */
821 esp
->regs
= ZTWO_VADDR(ioaddr
);
825 goto fail_unmap_fastlane
;
828 esp
->fifo_reg
= esp
->regs
+ ESP_FDATA
* 4;
830 /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
831 if (zdd
->scsi_option
) {
832 zorro_esp_write8(esp
, (ESP_CONFIG1_PENABLE
| 7), ESP_CFG1
);
833 if (zorro_esp_read8(esp
, ESP_CFG1
) != (ESP_CONFIG1_PENABLE
|7)) {
835 goto fail_unmap_regs
;
841 * Only Fastlane Z3 for now - add switch for correct struct
842 * dma_registers size if adding any more
844 esp
->dma_regs
= ioremap(dmaaddr
,
845 sizeof(struct fastlane_dma_registers
));
847 /* ZorroII address space remapped nocache by early startup */
848 esp
->dma_regs
= ZTWO_VADDR(dmaaddr
);
850 if (!esp
->dma_regs
) {
852 goto fail_unmap_regs
;
855 esp
->command_block
= dma_alloc_coherent(esp
->dev
, 16,
856 &esp
->command_block_dma
,
859 if (!esp
->command_block
) {
861 goto fail_unmap_dma_regs
;
864 host
->irq
= IRQ_AMIGA_PORTS
;
865 err
= request_irq(host
->irq
, scsi_esp_intr
, IRQF_SHARED
,
866 "Amiga Zorro ESP", esp
);
869 goto fail_free_command_block
;
872 /* register the chip */
873 err
= scsi_esp_register(esp
);
883 free_irq(host
->irq
, esp
);
885 fail_free_command_block
:
886 dma_free_coherent(esp
->dev
, 16,
888 esp
->command_block_dma
);
892 iounmap(esp
->dma_regs
);
895 if (ioaddr
> 0xffffff)
900 iounmap(zep
->board_base
);
906 zorro_release_device(z
);
914 static void zorro_esp_remove(struct zorro_dev
*z
)
916 struct zorro_esp_priv
*zep
= dev_get_drvdata(&z
->dev
);
917 struct esp
*esp
= zep
->esp
;
918 struct Scsi_Host
*host
= esp
->host
;
920 scsi_esp_unregister(esp
);
922 free_irq(host
->irq
, esp
);
923 dma_free_coherent(esp
->dev
, 16,
925 esp
->command_block_dma
);
928 iounmap(zep
->board_base
);
929 iounmap(esp
->dma_regs
);
932 if (host
->base
> 0xffffff)
937 zorro_release_device(z
);
942 static struct zorro_driver zorro_esp_driver
= {
943 .name
= KBUILD_MODNAME
,
944 .id_table
= zorro_esp_zorro_tbl
,
945 .probe
= zorro_esp_probe
,
946 .remove
= zorro_esp_remove
,
949 static int __init
zorro_esp_scsi_init(void)
951 return zorro_register_driver(&zorro_esp_driver
);
954 static void __exit
zorro_esp_scsi_exit(void)
956 zorro_unregister_driver(&zorro_esp_driver
);
959 module_init(zorro_esp_scsi_init
);
960 module_exit(zorro_esp_scsi_exit
);