Linux 4.19.133
[linux/fpc-iii.git] / drivers / scsi / zorro_esp.c
blob6a5b547eae5902502590994af92d1db1d646c619
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ESP front-end for Amiga ZORRO SCSI systems.
5 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
7 * Copyright (C) 2011,2018 Michael Schmitz (schmitz@debian.org) for
8 * migration to ESP SCSI core
10 * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
11 * Blizzard 1230 DMA and probe function fixes
13 * Copyright (C) 2017 Finn Thain for PIO code from Mac ESP driver adapted here
16 * ZORRO bus code from:
19 * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
20 * Amiga MacroSystemUS WarpEngine SCSI controller.
21 * Amiga Technologies/DKB A4091 SCSI controller.
23 * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
24 * plus modifications of the 53c7xx.c driver to support the Amiga.
26 * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/scatterlist.h>
36 #include <linux/delay.h>
37 #include <linux/zorro.h>
38 #include <linux/slab.h>
40 #include <asm/page.h>
41 #include <asm/pgtable.h>
42 #include <asm/cacheflush.h>
43 #include <asm/amigahw.h>
44 #include <asm/amigaints.h>
46 #include <scsi/scsi_host.h>
47 #include <scsi/scsi_transport_spi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_tcq.h>
51 #include "esp_scsi.h"
53 MODULE_AUTHOR("Michael Schmitz <schmitz@debian.org>");
54 MODULE_DESCRIPTION("Amiga Zorro NCR5C9x (ESP) driver");
55 MODULE_LICENSE("GPL");
57 /* per-board register layout definitions */
59 /* Blizzard 1230 DMA interface */
61 struct blz1230_dma_registers {
62 unsigned char dma_addr; /* DMA address [0x0000] */
63 unsigned char dmapad2[0x7fff];
64 unsigned char dma_latch; /* DMA latch [0x8000] */
67 /* Blizzard 1230II DMA interface */
69 struct blz1230II_dma_registers {
70 unsigned char dma_addr; /* DMA address [0x0000] */
71 unsigned char dmapad2[0xf];
72 unsigned char dma_latch; /* DMA latch [0x0010] */
75 /* Blizzard 2060 DMA interface */
77 struct blz2060_dma_registers {
78 unsigned char dma_led_ctrl; /* DMA led control [0x000] */
79 unsigned char dmapad1[0x0f];
80 unsigned char dma_addr0; /* DMA address (MSB) [0x010] */
81 unsigned char dmapad2[0x03];
82 unsigned char dma_addr1; /* DMA address [0x014] */
83 unsigned char dmapad3[0x03];
84 unsigned char dma_addr2; /* DMA address [0x018] */
85 unsigned char dmapad4[0x03];
86 unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */
89 /* DMA control bits */
90 #define DMA_WRITE 0x80000000
92 /* Cyberstorm DMA interface */
94 struct cyber_dma_registers {
95 unsigned char dma_addr0; /* DMA address (MSB) [0x000] */
96 unsigned char dmapad1[1];
97 unsigned char dma_addr1; /* DMA address [0x002] */
98 unsigned char dmapad2[1];
99 unsigned char dma_addr2; /* DMA address [0x004] */
100 unsigned char dmapad3[1];
101 unsigned char dma_addr3; /* DMA address (LSB) [0x006] */
102 unsigned char dmapad4[0x3fb];
103 unsigned char cond_reg; /* DMA cond (ro) [0x402] */
104 #define ctrl_reg cond_reg /* DMA control (wo) [0x402] */
107 /* DMA control bits */
108 #define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */
109 #define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
111 /* DMA status bits */
112 #define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */
114 /* The CyberStorm II DMA interface */
115 struct cyberII_dma_registers {
116 unsigned char cond_reg; /* DMA cond (ro) [0x000] */
117 #define ctrl_reg cond_reg /* DMA control (wo) [0x000] */
118 unsigned char dmapad4[0x3f];
119 unsigned char dma_addr0; /* DMA address (MSB) [0x040] */
120 unsigned char dmapad1[3];
121 unsigned char dma_addr1; /* DMA address [0x044] */
122 unsigned char dmapad2[3];
123 unsigned char dma_addr2; /* DMA address [0x048] */
124 unsigned char dmapad3[3];
125 unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */
128 /* Fastlane DMA interface */
130 struct fastlane_dma_registers {
131 unsigned char cond_reg; /* DMA status (ro) [0x0000] */
132 #define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */
133 char dmapad1[0x3f];
134 unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */
138 * The controller registers can be found in the Z2 config area at these
139 * offsets:
141 #define FASTLANE_ESP_ADDR 0x1000001
143 /* DMA status bits */
144 #define FASTLANE_DMA_MINT 0x80
145 #define FASTLANE_DMA_IACT 0x40
146 #define FASTLANE_DMA_CREQ 0x20
148 /* DMA control bits */
149 #define FASTLANE_DMA_FCODE 0xa0
150 #define FASTLANE_DMA_MASK 0xf3
151 #define FASTLANE_DMA_WRITE 0x08 /* 1 = write */
152 #define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */
153 #define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */
154 #define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */
157 * private data used for driver
159 struct zorro_esp_priv {
160 struct esp *esp; /* our ESP instance - for Scsi_host* */
161 void __iomem *board_base; /* virtual address (Zorro III board) */
162 int error; /* PIO error flag */
163 int zorro3; /* board is Zorro III */
164 unsigned char ctrl_data; /* shadow copy of ctrl_reg */
168 * On all implementations except for the Oktagon, padding between ESP
169 * registers is three bytes.
170 * On Oktagon, it is one byte - use a different accessor there.
172 * Oktagon needs PDMA - currently unsupported!
175 static void zorro_esp_write8(struct esp *esp, u8 val, unsigned long reg)
177 writeb(val, esp->regs + (reg * 4UL));
180 static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
182 return readb(esp->regs + (reg * 4UL));
185 static dma_addr_t zorro_esp_map_single(struct esp *esp, void *buf,
186 size_t sz, int dir)
188 return dma_map_single(esp->dev, buf, sz, dir);
191 static int zorro_esp_map_sg(struct esp *esp, struct scatterlist *sg,
192 int num_sg, int dir)
194 return dma_map_sg(esp->dev, sg, num_sg, dir);
197 static void zorro_esp_unmap_single(struct esp *esp, dma_addr_t addr,
198 size_t sz, int dir)
200 dma_unmap_single(esp->dev, addr, sz, dir);
203 static void zorro_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
204 int num_sg, int dir)
206 dma_unmap_sg(esp->dev, sg, num_sg, dir);
209 static int zorro_esp_irq_pending(struct esp *esp)
211 /* check ESP status register; DMA has no status reg. */
212 if (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
213 return 1;
215 return 0;
218 static int cyber_esp_irq_pending(struct esp *esp)
220 struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
221 unsigned char dma_status = readb(&dregs->cond_reg);
223 /* It's important to check the DMA IRQ bit in the correct way! */
224 return ((zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) &&
225 (dma_status & CYBER_DMA_HNDL_INTR));
228 static int fastlane_esp_irq_pending(struct esp *esp)
230 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
231 unsigned char dma_status;
233 dma_status = readb(&dregs->cond_reg);
235 if (dma_status & FASTLANE_DMA_IACT)
236 return 0; /* not our IRQ */
238 /* Return non-zero if ESP requested IRQ */
239 return (
240 (dma_status & FASTLANE_DMA_CREQ) &&
241 (!(dma_status & FASTLANE_DMA_MINT)) &&
242 (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR));
245 static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
246 u32 dma_len)
248 return dma_len > (1U << 16) ? (1U << 16) : dma_len;
251 static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
252 u32 dma_len)
254 /* The old driver used 0xfffc as limit, so do that here too */
255 return dma_len > 0xfffc ? 0xfffc : dma_len;
258 static void zorro_esp_reset_dma(struct esp *esp)
260 /* nothing to do here */
263 static void zorro_esp_dma_drain(struct esp *esp)
265 /* nothing to do here */
268 static void zorro_esp_dma_invalidate(struct esp *esp)
270 /* nothing to do here */
273 static void fastlane_esp_dma_invalidate(struct esp *esp)
275 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
276 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
277 unsigned char *ctrl_data = &zep->ctrl_data;
279 *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK);
280 writeb(0, &dregs->clear_strobe);
281 z_writel(0, zep->board_base);
285 * Programmed IO routines follow.
288 static inline unsigned int zorro_esp_wait_for_fifo(struct esp *esp)
290 int i = 500000;
292 do {
293 unsigned int fbytes = zorro_esp_read8(esp, ESP_FFLAGS)
294 & ESP_FF_FBYTES;
296 if (fbytes)
297 return fbytes;
299 udelay(2);
300 } while (--i);
302 pr_err("FIFO is empty (sreg %02x)\n",
303 zorro_esp_read8(esp, ESP_STATUS));
304 return 0;
307 static inline int zorro_esp_wait_for_intr(struct esp *esp)
309 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
310 int i = 500000;
312 do {
313 esp->sreg = zorro_esp_read8(esp, ESP_STATUS);
314 if (esp->sreg & ESP_STAT_INTR)
315 return 0;
317 udelay(2);
318 } while (--i);
320 pr_err("IRQ timeout (sreg %02x)\n", esp->sreg);
321 zep->error = 1;
322 return 1;
326 * PIO macros as used in mac_esp.c.
327 * Note that addr and fifo arguments are local-scope variables declared
328 * in zorro_esp_send_pio_cmd(), the macros are only used in that function,
329 * and addr and fifo are referenced in each use of the macros so there
330 * is no need to pass them as macro parameters.
332 #define ZORRO_ESP_PIO_LOOP(operands, reg1) \
333 asm volatile ( \
334 "1: moveb " operands "\n" \
335 " subqw #1,%1 \n" \
336 " jbne 1b \n" \
337 : "+a" (addr), "+r" (reg1) \
338 : "a" (fifo));
340 #define ZORRO_ESP_PIO_FILL(operands, reg1) \
341 asm volatile ( \
342 " moveb " operands "\n" \
343 " moveb " operands "\n" \
344 " moveb " operands "\n" \
345 " moveb " operands "\n" \
346 " moveb " operands "\n" \
347 " moveb " operands "\n" \
348 " moveb " operands "\n" \
349 " moveb " operands "\n" \
350 " moveb " operands "\n" \
351 " moveb " operands "\n" \
352 " moveb " operands "\n" \
353 " moveb " operands "\n" \
354 " moveb " operands "\n" \
355 " moveb " operands "\n" \
356 " moveb " operands "\n" \
357 " moveb " operands "\n" \
358 " subqw #8,%1 \n" \
359 " subqw #8,%1 \n" \
360 : "+a" (addr), "+r" (reg1) \
361 : "a" (fifo));
363 #define ZORRO_ESP_FIFO_SIZE 16
365 static void zorro_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
366 u32 dma_count, int write, u8 cmd)
368 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
369 u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
370 u8 phase = esp->sreg & ESP_STAT_PMASK;
372 cmd &= ~ESP_CMD_DMA;
374 if (write) {
375 u8 *dst = (u8 *)addr;
376 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
378 scsi_esp_cmd(esp, cmd);
380 while (1) {
381 if (!zorro_esp_wait_for_fifo(esp))
382 break;
384 *dst++ = zorro_esp_read8(esp, ESP_FDATA);
385 --esp_count;
387 if (!esp_count)
388 break;
390 if (zorro_esp_wait_for_intr(esp))
391 break;
393 if ((esp->sreg & ESP_STAT_PMASK) != phase)
394 break;
396 esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
397 if (esp->ireg & mask) {
398 zep->error = 1;
399 break;
402 if (phase == ESP_MIP)
403 scsi_esp_cmd(esp, ESP_CMD_MOK);
405 scsi_esp_cmd(esp, ESP_CMD_TI);
407 } else { /* unused, as long as we only handle MIP here */
408 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
410 if (esp_count >= ZORRO_ESP_FIFO_SIZE)
411 ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
412 else
413 ZORRO_ESP_PIO_LOOP("%0@+,%2@", esp_count)
415 scsi_esp_cmd(esp, cmd);
417 while (esp_count) {
418 unsigned int n;
420 if (zorro_esp_wait_for_intr(esp))
421 break;
423 if ((esp->sreg & ESP_STAT_PMASK) != phase)
424 break;
426 esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
427 if (esp->ireg & ~ESP_INTR_BSERV) {
428 zep->error = 1;
429 break;
432 n = ZORRO_ESP_FIFO_SIZE -
433 (zorro_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES);
434 if (n > esp_count)
435 n = esp_count;
437 if (n == ZORRO_ESP_FIFO_SIZE)
438 ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
439 else {
440 esp_count -= n;
441 ZORRO_ESP_PIO_LOOP("%0@+,%2@", n)
444 scsi_esp_cmd(esp, ESP_CMD_TI);
449 /* Blizzard 1230/60 SCSI-IV DMA */
451 static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
452 u32 esp_count, u32 dma_count, int write, u8 cmd)
454 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
455 struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
456 u8 phase = esp->sreg & ESP_STAT_PMASK;
458 zep->error = 0;
460 * Use PIO if transferring message bytes to esp->command_block_dma.
461 * PIO requires a virtual address, so substitute esp->command_block
462 * for addr.
464 if (phase == ESP_MIP && addr == esp->command_block_dma) {
465 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
466 esp_count, dma_count, write, cmd);
467 return;
470 if (write)
471 /* DMA receive */
472 dma_sync_single_for_device(esp->dev, addr, esp_count,
473 DMA_FROM_DEVICE);
474 else
475 /* DMA send */
476 dma_sync_single_for_device(esp->dev, addr, esp_count,
477 DMA_TO_DEVICE);
479 addr >>= 1;
480 if (write)
481 addr &= ~(DMA_WRITE);
482 else
483 addr |= DMA_WRITE;
485 writeb((addr >> 24) & 0xff, &dregs->dma_latch);
486 writeb((addr >> 24) & 0xff, &dregs->dma_addr);
487 writeb((addr >> 16) & 0xff, &dregs->dma_addr);
488 writeb((addr >> 8) & 0xff, &dregs->dma_addr);
489 writeb(addr & 0xff, &dregs->dma_addr);
491 scsi_esp_cmd(esp, ESP_CMD_DMA);
492 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
493 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
495 scsi_esp_cmd(esp, cmd);
498 /* Blizzard 1230-II DMA */
500 static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
501 u32 esp_count, u32 dma_count, int write, u8 cmd)
503 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
504 struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
505 u8 phase = esp->sreg & ESP_STAT_PMASK;
507 zep->error = 0;
508 /* Use PIO if transferring message bytes to esp->command_block_dma */
509 if (phase == ESP_MIP && addr == esp->command_block_dma) {
510 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
511 esp_count, dma_count, write, cmd);
512 return;
515 if (write)
516 /* DMA receive */
517 dma_sync_single_for_device(esp->dev, addr, esp_count,
518 DMA_FROM_DEVICE);
519 else
520 /* DMA send */
521 dma_sync_single_for_device(esp->dev, addr, esp_count,
522 DMA_TO_DEVICE);
524 addr >>= 1;
525 if (write)
526 addr &= ~(DMA_WRITE);
527 else
528 addr |= DMA_WRITE;
530 writeb((addr >> 24) & 0xff, &dregs->dma_latch);
531 writeb((addr >> 16) & 0xff, &dregs->dma_addr);
532 writeb((addr >> 8) & 0xff, &dregs->dma_addr);
533 writeb(addr & 0xff, &dregs->dma_addr);
535 scsi_esp_cmd(esp, ESP_CMD_DMA);
536 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
537 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
539 scsi_esp_cmd(esp, cmd);
542 /* Blizzard 2060 DMA */
544 static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
545 u32 esp_count, u32 dma_count, int write, u8 cmd)
547 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
548 struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
549 u8 phase = esp->sreg & ESP_STAT_PMASK;
551 zep->error = 0;
552 /* Use PIO if transferring message bytes to esp->command_block_dma */
553 if (phase == ESP_MIP && addr == esp->command_block_dma) {
554 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
555 esp_count, dma_count, write, cmd);
556 return;
559 if (write)
560 /* DMA receive */
561 dma_sync_single_for_device(esp->dev, addr, esp_count,
562 DMA_FROM_DEVICE);
563 else
564 /* DMA send */
565 dma_sync_single_for_device(esp->dev, addr, esp_count,
566 DMA_TO_DEVICE);
568 addr >>= 1;
569 if (write)
570 addr &= ~(DMA_WRITE);
571 else
572 addr |= DMA_WRITE;
574 writeb(addr & 0xff, &dregs->dma_addr3);
575 writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
576 writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
577 writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
579 scsi_esp_cmd(esp, ESP_CMD_DMA);
580 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
581 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
583 scsi_esp_cmd(esp, cmd);
586 /* Cyberstorm I DMA */
588 static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
589 u32 esp_count, u32 dma_count, int write, u8 cmd)
591 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
592 struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
593 u8 phase = esp->sreg & ESP_STAT_PMASK;
594 unsigned char *ctrl_data = &zep->ctrl_data;
596 zep->error = 0;
597 /* Use PIO if transferring message bytes to esp->command_block_dma */
598 if (phase == ESP_MIP && addr == esp->command_block_dma) {
599 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
600 esp_count, dma_count, write, cmd);
601 return;
604 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
605 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
607 if (write) {
608 /* DMA receive */
609 dma_sync_single_for_device(esp->dev, addr, esp_count,
610 DMA_FROM_DEVICE);
611 addr &= ~(1);
612 } else {
613 /* DMA send */
614 dma_sync_single_for_device(esp->dev, addr, esp_count,
615 DMA_TO_DEVICE);
616 addr |= 1;
619 writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
620 writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
621 writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
622 writeb(addr & 0xff, &dregs->dma_addr3);
624 if (write)
625 *ctrl_data &= ~(CYBER_DMA_WRITE);
626 else
627 *ctrl_data |= CYBER_DMA_WRITE;
629 *ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
631 writeb(*ctrl_data, &dregs->ctrl_reg);
633 scsi_esp_cmd(esp, cmd);
636 /* Cyberstorm II DMA */
638 static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
639 u32 esp_count, u32 dma_count, int write, u8 cmd)
641 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
642 struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
643 u8 phase = esp->sreg & ESP_STAT_PMASK;
645 zep->error = 0;
646 /* Use PIO if transferring message bytes to esp->command_block_dma */
647 if (phase == ESP_MIP && addr == esp->command_block_dma) {
648 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
649 esp_count, dma_count, write, cmd);
650 return;
653 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
654 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
656 if (write) {
657 /* DMA receive */
658 dma_sync_single_for_device(esp->dev, addr, esp_count,
659 DMA_FROM_DEVICE);
660 addr &= ~(1);
661 } else {
662 /* DMA send */
663 dma_sync_single_for_device(esp->dev, addr, esp_count,
664 DMA_TO_DEVICE);
665 addr |= 1;
668 writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
669 writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
670 writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
671 writeb(addr & 0xff, &dregs->dma_addr3);
673 scsi_esp_cmd(esp, cmd);
676 /* Fastlane DMA */
678 static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
679 u32 esp_count, u32 dma_count, int write, u8 cmd)
681 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
682 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
683 u8 phase = esp->sreg & ESP_STAT_PMASK;
684 unsigned char *ctrl_data = &zep->ctrl_data;
686 zep->error = 0;
687 /* Use PIO if transferring message bytes to esp->command_block_dma */
688 if (phase == ESP_MIP && addr == esp->command_block_dma) {
689 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
690 esp_count, dma_count, write, cmd);
691 return;
694 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
695 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
697 if (write) {
698 /* DMA receive */
699 dma_sync_single_for_device(esp->dev, addr, esp_count,
700 DMA_FROM_DEVICE);
701 addr &= ~(1);
702 } else {
703 /* DMA send */
704 dma_sync_single_for_device(esp->dev, addr, esp_count,
705 DMA_TO_DEVICE);
706 addr |= 1;
709 writeb(0, &dregs->clear_strobe);
710 z_writel(addr, ((addr & 0x00ffffff) + zep->board_base));
712 if (write) {
713 *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK) |
714 FASTLANE_DMA_ENABLE;
715 } else {
716 *ctrl_data = ((*ctrl_data & FASTLANE_DMA_MASK) |
717 FASTLANE_DMA_ENABLE |
718 FASTLANE_DMA_WRITE);
721 writeb(*ctrl_data, &dregs->ctrl_reg);
723 scsi_esp_cmd(esp, cmd);
726 static int zorro_esp_dma_error(struct esp *esp)
728 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
730 /* check for error in case we've been doing PIO */
731 if (zep->error == 1)
732 return 1;
734 /* do nothing - there seems to be no way to check for DMA errors */
735 return 0;
738 /* per-board ESP driver ops */
740 static const struct esp_driver_ops blz1230_esp_ops = {
741 .esp_write8 = zorro_esp_write8,
742 .esp_read8 = zorro_esp_read8,
743 .map_single = zorro_esp_map_single,
744 .map_sg = zorro_esp_map_sg,
745 .unmap_single = zorro_esp_unmap_single,
746 .unmap_sg = zorro_esp_unmap_sg,
747 .irq_pending = zorro_esp_irq_pending,
748 .dma_length_limit = zorro_esp_dma_length_limit,
749 .reset_dma = zorro_esp_reset_dma,
750 .dma_drain = zorro_esp_dma_drain,
751 .dma_invalidate = zorro_esp_dma_invalidate,
752 .send_dma_cmd = zorro_esp_send_blz1230_dma_cmd,
753 .dma_error = zorro_esp_dma_error,
756 static const struct esp_driver_ops blz1230II_esp_ops = {
757 .esp_write8 = zorro_esp_write8,
758 .esp_read8 = zorro_esp_read8,
759 .map_single = zorro_esp_map_single,
760 .map_sg = zorro_esp_map_sg,
761 .unmap_single = zorro_esp_unmap_single,
762 .unmap_sg = zorro_esp_unmap_sg,
763 .irq_pending = zorro_esp_irq_pending,
764 .dma_length_limit = zorro_esp_dma_length_limit,
765 .reset_dma = zorro_esp_reset_dma,
766 .dma_drain = zorro_esp_dma_drain,
767 .dma_invalidate = zorro_esp_dma_invalidate,
768 .send_dma_cmd = zorro_esp_send_blz1230II_dma_cmd,
769 .dma_error = zorro_esp_dma_error,
772 static const struct esp_driver_ops blz2060_esp_ops = {
773 .esp_write8 = zorro_esp_write8,
774 .esp_read8 = zorro_esp_read8,
775 .map_single = zorro_esp_map_single,
776 .map_sg = zorro_esp_map_sg,
777 .unmap_single = zorro_esp_unmap_single,
778 .unmap_sg = zorro_esp_unmap_sg,
779 .irq_pending = zorro_esp_irq_pending,
780 .dma_length_limit = zorro_esp_dma_length_limit,
781 .reset_dma = zorro_esp_reset_dma,
782 .dma_drain = zorro_esp_dma_drain,
783 .dma_invalidate = zorro_esp_dma_invalidate,
784 .send_dma_cmd = zorro_esp_send_blz2060_dma_cmd,
785 .dma_error = zorro_esp_dma_error,
788 static const struct esp_driver_ops cyber_esp_ops = {
789 .esp_write8 = zorro_esp_write8,
790 .esp_read8 = zorro_esp_read8,
791 .map_single = zorro_esp_map_single,
792 .map_sg = zorro_esp_map_sg,
793 .unmap_single = zorro_esp_unmap_single,
794 .unmap_sg = zorro_esp_unmap_sg,
795 .irq_pending = cyber_esp_irq_pending,
796 .dma_length_limit = zorro_esp_dma_length_limit,
797 .reset_dma = zorro_esp_reset_dma,
798 .dma_drain = zorro_esp_dma_drain,
799 .dma_invalidate = zorro_esp_dma_invalidate,
800 .send_dma_cmd = zorro_esp_send_cyber_dma_cmd,
801 .dma_error = zorro_esp_dma_error,
804 static const struct esp_driver_ops cyberII_esp_ops = {
805 .esp_write8 = zorro_esp_write8,
806 .esp_read8 = zorro_esp_read8,
807 .map_single = zorro_esp_map_single,
808 .map_sg = zorro_esp_map_sg,
809 .unmap_single = zorro_esp_unmap_single,
810 .unmap_sg = zorro_esp_unmap_sg,
811 .irq_pending = zorro_esp_irq_pending,
812 .dma_length_limit = zorro_esp_dma_length_limit,
813 .reset_dma = zorro_esp_reset_dma,
814 .dma_drain = zorro_esp_dma_drain,
815 .dma_invalidate = zorro_esp_dma_invalidate,
816 .send_dma_cmd = zorro_esp_send_cyberII_dma_cmd,
817 .dma_error = zorro_esp_dma_error,
820 static const struct esp_driver_ops fastlane_esp_ops = {
821 .esp_write8 = zorro_esp_write8,
822 .esp_read8 = zorro_esp_read8,
823 .map_single = zorro_esp_map_single,
824 .map_sg = zorro_esp_map_sg,
825 .unmap_single = zorro_esp_unmap_single,
826 .unmap_sg = zorro_esp_unmap_sg,
827 .irq_pending = fastlane_esp_irq_pending,
828 .dma_length_limit = fastlane_esp_dma_length_limit,
829 .reset_dma = zorro_esp_reset_dma,
830 .dma_drain = zorro_esp_dma_drain,
831 .dma_invalidate = fastlane_esp_dma_invalidate,
832 .send_dma_cmd = zorro_esp_send_fastlane_dma_cmd,
833 .dma_error = zorro_esp_dma_error,
836 /* Zorro driver config data */
838 struct zorro_driver_data {
839 const char *name;
840 unsigned long offset;
841 unsigned long dma_offset;
842 int absolute; /* offset is absolute address */
843 int scsi_option;
844 const struct esp_driver_ops *esp_ops;
847 /* board types */
849 enum {
850 ZORRO_BLZ1230,
851 ZORRO_BLZ1230II,
852 ZORRO_BLZ2060,
853 ZORRO_CYBER,
854 ZORRO_CYBERII,
855 ZORRO_FASTLANE,
858 /* per-board config data */
860 static const struct zorro_driver_data zorro_esp_boards[] = {
861 [ZORRO_BLZ1230] = {
862 .name = "Blizzard 1230",
863 .offset = 0x8000,
864 .dma_offset = 0x10000,
865 .scsi_option = 1,
866 .esp_ops = &blz1230_esp_ops,
868 [ZORRO_BLZ1230II] = {
869 .name = "Blizzard 1230II",
870 .offset = 0x10000,
871 .dma_offset = 0x10021,
872 .scsi_option = 1,
873 .esp_ops = &blz1230II_esp_ops,
875 [ZORRO_BLZ2060] = {
876 .name = "Blizzard 2060",
877 .offset = 0x1ff00,
878 .dma_offset = 0x1ffe0,
879 .esp_ops = &blz2060_esp_ops,
881 [ZORRO_CYBER] = {
882 .name = "CyberStormI",
883 .offset = 0xf400,
884 .dma_offset = 0xf800,
885 .esp_ops = &cyber_esp_ops,
887 [ZORRO_CYBERII] = {
888 .name = "CyberStormII",
889 .offset = 0x1ff03,
890 .dma_offset = 0x1ff43,
891 .scsi_option = 1,
892 .esp_ops = &cyberII_esp_ops,
894 [ZORRO_FASTLANE] = {
895 .name = "Fastlane",
896 .offset = 0x1000001,
897 .dma_offset = 0x1000041,
898 .esp_ops = &fastlane_esp_ops,
902 static const struct zorro_device_id zorro_esp_zorro_tbl[] = {
903 { /* Blizzard 1230 IV */
904 .id = ZORRO_ID(PHASE5, 0x11, 0),
905 .driver_data = ZORRO_BLZ1230,
907 { /* Blizzard 1230 II (Zorro II) or Fastlane (Zorro III) */
908 .id = ZORRO_ID(PHASE5, 0x0B, 0),
909 .driver_data = ZORRO_BLZ1230II,
911 { /* Blizzard 2060 */
912 .id = ZORRO_ID(PHASE5, 0x18, 0),
913 .driver_data = ZORRO_BLZ2060,
915 { /* Cyberstorm */
916 .id = ZORRO_ID(PHASE5, 0x0C, 0),
917 .driver_data = ZORRO_CYBER,
919 { /* Cyberstorm II */
920 .id = ZORRO_ID(PHASE5, 0x19, 0),
921 .driver_data = ZORRO_CYBERII,
923 { 0 }
925 MODULE_DEVICE_TABLE(zorro, zorro_esp_zorro_tbl);
927 static int zorro_esp_probe(struct zorro_dev *z,
928 const struct zorro_device_id *ent)
930 struct scsi_host_template *tpnt = &scsi_esp_template;
931 struct Scsi_Host *host;
932 struct esp *esp;
933 const struct zorro_driver_data *zdd;
934 struct zorro_esp_priv *zep;
935 unsigned long board, ioaddr, dmaaddr;
936 int err;
938 board = zorro_resource_start(z);
939 zdd = &zorro_esp_boards[ent->driver_data];
941 pr_info("%s found at address 0x%lx.\n", zdd->name, board);
943 zep = kzalloc(sizeof(*zep), GFP_KERNEL);
944 if (!zep) {
945 pr_err("Can't allocate device private data!\n");
946 return -ENOMEM;
949 /* let's figure out whether we have a Zorro II or Zorro III board */
950 if ((z->rom.er_Type & ERT_TYPEMASK) == ERT_ZORROIII) {
951 if (board > 0xffffff)
952 zep->zorro3 = 1;
953 } else {
955 * Even though most of these boards identify as Zorro II,
956 * they are in fact CPU expansion slot boards and have full
957 * access to all of memory. Fix up DMA bitmask here.
959 z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
963 * If Zorro III and ID matches Fastlane, our device table entry
964 * contains data for the Blizzard 1230 II board which does share the
965 * same ID. Fix up device table entry here.
966 * TODO: Some Cyberstom060 boards also share this ID but would need
967 * to use the Cyberstorm I driver data ... we catch this by checking
968 * for presence of ESP chip later, but don't try to fix up yet.
970 if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
971 pr_info("%s at address 0x%lx is Fastlane Z3, fixing data!\n",
972 zdd->name, board);
973 zdd = &zorro_esp_boards[ZORRO_FASTLANE];
976 if (zdd->absolute) {
977 ioaddr = zdd->offset;
978 dmaaddr = zdd->dma_offset;
979 } else {
980 ioaddr = board + zdd->offset;
981 dmaaddr = board + zdd->dma_offset;
984 if (!zorro_request_device(z, zdd->name)) {
985 pr_err("cannot reserve region 0x%lx, abort\n",
986 board);
987 err = -EBUSY;
988 goto fail_free_zep;
991 host = scsi_host_alloc(tpnt, sizeof(struct esp));
993 if (!host) {
994 pr_err("No host detected; board configuration problem?\n");
995 err = -ENOMEM;
996 goto fail_release_device;
999 host->base = ioaddr;
1000 host->this_id = 7;
1002 esp = shost_priv(host);
1003 esp->host = host;
1004 esp->dev = &z->dev;
1006 esp->scsi_id = host->this_id;
1007 esp->scsi_id_mask = (1 << esp->scsi_id);
1009 esp->cfreq = 40000000;
1011 zep->esp = esp;
1013 dev_set_drvdata(esp->dev, zep);
1015 /* additional setup required for Fastlane */
1016 if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
1017 /* map full address space up to ESP base for DMA */
1018 zep->board_base = ioremap_nocache(board,
1019 FASTLANE_ESP_ADDR-1);
1020 if (!zep->board_base) {
1021 pr_err("Cannot allocate board address space\n");
1022 err = -ENOMEM;
1023 goto fail_free_host;
1025 /* initialize DMA control shadow register */
1026 zep->ctrl_data = (FASTLANE_DMA_FCODE |
1027 FASTLANE_DMA_EDI | FASTLANE_DMA_ESI);
1030 esp->ops = zdd->esp_ops;
1032 if (ioaddr > 0xffffff)
1033 esp->regs = ioremap_nocache(ioaddr, 0x20);
1034 else
1035 /* ZorroII address space remapped nocache by early startup */
1036 esp->regs = ZTWO_VADDR(ioaddr);
1038 if (!esp->regs) {
1039 err = -ENOMEM;
1040 goto fail_unmap_fastlane;
1043 /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
1044 if (zdd->scsi_option) {
1045 zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
1046 if (zorro_esp_read8(esp, ESP_CFG1) != (ESP_CONFIG1_PENABLE|7)) {
1047 err = -ENODEV;
1048 goto fail_unmap_regs;
1052 if (zep->zorro3) {
1054 * Only Fastlane Z3 for now - add switch for correct struct
1055 * dma_registers size if adding any more
1057 esp->dma_regs = ioremap_nocache(dmaaddr,
1058 sizeof(struct fastlane_dma_registers));
1059 } else
1060 /* ZorroII address space remapped nocache by early startup */
1061 esp->dma_regs = ZTWO_VADDR(dmaaddr);
1063 if (!esp->dma_regs) {
1064 err = -ENOMEM;
1065 goto fail_unmap_regs;
1068 esp->command_block = dma_alloc_coherent(esp->dev, 16,
1069 &esp->command_block_dma,
1070 GFP_KERNEL);
1072 if (!esp->command_block) {
1073 err = -ENOMEM;
1074 goto fail_unmap_dma_regs;
1077 host->irq = IRQ_AMIGA_PORTS;
1078 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
1079 "Amiga Zorro ESP", esp);
1080 if (err < 0) {
1081 err = -ENODEV;
1082 goto fail_free_command_block;
1085 /* register the chip */
1086 err = scsi_esp_register(esp, &z->dev);
1088 if (err) {
1089 err = -ENOMEM;
1090 goto fail_free_irq;
1093 return 0;
1095 fail_free_irq:
1096 free_irq(host->irq, esp);
1098 fail_free_command_block:
1099 dma_free_coherent(esp->dev, 16,
1100 esp->command_block,
1101 esp->command_block_dma);
1103 fail_unmap_dma_regs:
1104 if (zep->zorro3)
1105 iounmap(esp->dma_regs);
1107 fail_unmap_regs:
1108 if (ioaddr > 0xffffff)
1109 iounmap(esp->regs);
1111 fail_unmap_fastlane:
1112 if (zep->zorro3)
1113 iounmap(zep->board_base);
1115 fail_free_host:
1116 scsi_host_put(host);
1118 fail_release_device:
1119 zorro_release_device(z);
1121 fail_free_zep:
1122 kfree(zep);
1124 return err;
1127 static void zorro_esp_remove(struct zorro_dev *z)
1129 struct zorro_esp_priv *zep = dev_get_drvdata(&z->dev);
1130 struct esp *esp = zep->esp;
1131 struct Scsi_Host *host = esp->host;
1133 scsi_esp_unregister(esp);
1135 free_irq(host->irq, esp);
1136 dma_free_coherent(esp->dev, 16,
1137 esp->command_block,
1138 esp->command_block_dma);
1140 if (zep->zorro3) {
1141 iounmap(zep->board_base);
1142 iounmap(esp->dma_regs);
1145 if (host->base > 0xffffff)
1146 iounmap(esp->regs);
1148 scsi_host_put(host);
1150 zorro_release_device(z);
1152 kfree(zep);
1155 static struct zorro_driver zorro_esp_driver = {
1156 .name = KBUILD_MODNAME,
1157 .id_table = zorro_esp_zorro_tbl,
1158 .probe = zorro_esp_probe,
1159 .remove = zorro_esp_remove,
1162 static int __init zorro_esp_scsi_init(void)
1164 return zorro_register_driver(&zorro_esp_driver);
1167 static void __exit zorro_esp_scsi_exit(void)
1169 zorro_unregister_driver(&zorro_esp_driver);
1172 module_init(zorro_esp_scsi_init);
1173 module_exit(zorro_esp_scsi_exit);