[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / drivers / scsi / fastlane.c
blobae47612b36143c67ad6e1e631af7b0e043735127
1 /* fastlane.c: Driver for Phase5's Fastlane SCSI Controller.
3 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
5 * This driver is based on the CyberStorm driver, hence the occasional
6 * reference to CyberStorm.
8 * Betatesting & crucial adjustments by
9 * Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
13 /* TODO:
15 * o According to the doc from laire, it is required to reset the DMA when
16 * the transfer is done. ATM we reset DMA just before every new
17 * dma_init_(read|write).
19 * 1) Figure out how to make a cleaner merge with the sparc driver with regard
20 * to the caches and the Sparc MMU mapping.
21 * 2) Make as few routines required outside the generic driver. A lot of the
22 * routines in this file used to be inline!
25 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/kernel.h>
29 #include <linux/delay.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/slab.h>
33 #include <linux/blkdev.h>
34 #include <linux/proc_fs.h>
35 #include <linux/stat.h>
36 #include <linux/interrupt.h>
38 #include "scsi.h"
39 #include <scsi/scsi_host.h>
40 #include "NCR53C9x.h"
42 #include <linux/zorro.h>
43 #include <asm/irq.h>
45 #include <asm/amigaints.h>
46 #include <asm/amigahw.h>
48 #include <asm/pgtable.h>
50 /* Such day has just come... */
51 #if 0
52 /* Let this defined unless you really need to enable DMA IRQ one day */
53 #define NODMAIRQ
54 #endif
56 /* The controller registers can be found in the Z2 config area at these
57 * offsets:
59 #define FASTLANE_ESP_ADDR 0x1000001
60 #define FASTLANE_DMA_ADDR 0x1000041
63 /* The Fastlane DMA interface */
64 struct fastlane_dma_registers {
65 volatile unsigned char cond_reg; /* DMA status (ro) [0x0000] */
66 #define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */
67 unsigned char dmapad1[0x3f];
68 volatile unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */
72 /* DMA status bits */
73 #define FASTLANE_DMA_MINT 0x80
74 #define FASTLANE_DMA_IACT 0x40
75 #define FASTLANE_DMA_CREQ 0x20
77 /* DMA control bits */
78 #define FASTLANE_DMA_FCODE 0xa0
79 #define FASTLANE_DMA_MASK 0xf3
80 #define FASTLANE_DMA_LED 0x10 /* HD led control 1 = on */
81 #define FASTLANE_DMA_WRITE 0x08 /* 1 = write */
82 #define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */
83 #define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */
84 #define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */
86 static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
87 static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
88 static void dma_dump_state(struct NCR_ESP *esp);
89 static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
90 static void dma_init_write(struct NCR_ESP *esp, __u32 vaddr, int length);
91 static void dma_ints_off(struct NCR_ESP *esp);
92 static void dma_ints_on(struct NCR_ESP *esp);
93 static int dma_irq_p(struct NCR_ESP *esp);
94 static void dma_irq_exit(struct NCR_ESP *esp);
95 static void dma_led_off(struct NCR_ESP *esp);
96 static void dma_led_on(struct NCR_ESP *esp);
97 static int dma_ports_p(struct NCR_ESP *esp);
98 static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
100 static unsigned char ctrl_data = 0; /* Keep backup of the stuff written
101 * to ctrl_reg. Always write a copy
102 * to this register when writing to
103 * the hardware register!
106 static volatile unsigned char cmd_buffer[16];
107 /* This is where all commands are put
108 * before they are transferred to the ESP chip
109 * via PIO.
112 static inline void dma_clear(struct NCR_ESP *esp)
114 struct fastlane_dma_registers *dregs =
115 (struct fastlane_dma_registers *) (esp->dregs);
116 unsigned long *t;
118 ctrl_data = (ctrl_data & FASTLANE_DMA_MASK);
119 dregs->ctrl_reg = ctrl_data;
121 t = (unsigned long *)(esp->edev);
123 dregs->clear_strobe = 0;
124 *t = 0 ;
127 /***************************************************************** Detection */
128 int __init fastlane_esp_detect(Scsi_Host_Template *tpnt)
130 struct NCR_ESP *esp;
131 struct zorro_dev *z = NULL;
132 unsigned long address;
134 if ((z = zorro_find_device(ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060, z))) {
135 unsigned long board = z->resource.start;
136 if (request_mem_region(board+FASTLANE_ESP_ADDR,
137 sizeof(struct ESP_regs), "NCR53C9x")) {
138 /* Check if this is really a fastlane controller. The problem
139 * is that also the cyberstorm and blizzard controllers use
140 * this ID value. Fortunately only Fastlane maps in Z3 space
142 if (board < 0x1000000) {
143 goto err_release;
145 esp = esp_allocate(tpnt, (void *)board+FASTLANE_ESP_ADDR);
147 /* Do command transfer with programmed I/O */
148 esp->do_pio_cmds = 1;
150 /* Required functions */
151 esp->dma_bytes_sent = &dma_bytes_sent;
152 esp->dma_can_transfer = &dma_can_transfer;
153 esp->dma_dump_state = &dma_dump_state;
154 esp->dma_init_read = &dma_init_read;
155 esp->dma_init_write = &dma_init_write;
156 esp->dma_ints_off = &dma_ints_off;
157 esp->dma_ints_on = &dma_ints_on;
158 esp->dma_irq_p = &dma_irq_p;
159 esp->dma_ports_p = &dma_ports_p;
160 esp->dma_setup = &dma_setup;
162 /* Optional functions */
163 esp->dma_barrier = 0;
164 esp->dma_drain = 0;
165 esp->dma_invalidate = 0;
166 esp->dma_irq_entry = 0;
167 esp->dma_irq_exit = &dma_irq_exit;
168 esp->dma_led_on = &dma_led_on;
169 esp->dma_led_off = &dma_led_off;
170 esp->dma_poll = 0;
171 esp->dma_reset = 0;
173 /* Initialize the portBits (enable IRQs) */
174 ctrl_data = (FASTLANE_DMA_FCODE |
175 #ifndef NODMAIRQ
176 FASTLANE_DMA_EDI |
177 #endif
178 FASTLANE_DMA_ESI);
181 /* SCSI chip clock */
182 esp->cfreq = 40000000;
185 /* Map the physical address space into virtual kernel space */
186 address = (unsigned long)
187 z_ioremap(board, z->resource.end-board+1);
189 if(!address){
190 printk("Could not remap Fastlane controller memory!");
191 goto err_unregister;
195 /* The DMA registers on the Fastlane are mapped
196 * relative to the device (i.e. in the same Zorro
197 * I/O block).
199 esp->dregs = (void *)(address + FASTLANE_DMA_ADDR);
201 /* ESP register base */
202 esp->eregs = (struct ESP_regs *)(address + FASTLANE_ESP_ADDR);
204 /* Board base */
205 esp->edev = (void *) address;
207 /* Set the command buffer */
208 esp->esp_command = cmd_buffer;
209 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
211 esp->irq = IRQ_AMIGA_PORTS;
212 esp->slot = board+FASTLANE_ESP_ADDR;
213 if (request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
214 "Fastlane SCSI", esp->ehost)) {
215 printk(KERN_WARNING "Fastlane: Could not get IRQ%d, aborting.\n", IRQ_AMIGA_PORTS);
216 goto err_unmap;
219 /* Controller ID */
220 esp->scsi_id = 7;
222 /* We don't have a differential SCSI-bus. */
223 esp->diff = 0;
225 dma_clear(esp);
226 esp_initialize(esp);
228 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
229 esps_running = esps_in_use;
230 return esps_in_use;
233 return 0;
235 err_unmap:
236 z_iounmap((void *)address);
237 err_unregister:
238 scsi_unregister (esp->ehost);
239 err_release:
240 release_mem_region(z->resource.start+FASTLANE_ESP_ADDR,
241 sizeof(struct ESP_regs));
242 return 0;
246 /************************************************************* DMA Functions */
247 static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
249 /* Since the Fastlane DMA is fully dedicated to the ESP chip,
250 * the number of bytes sent (to the ESP chip) equals the number
251 * of bytes in the FIFO - there is no buffering in the DMA controller.
252 * XXXX Do I read this right? It is from host to ESP, right?
254 return fifo_count;
257 static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
259 unsigned long sz = sp->SCp.this_residual;
260 if(sz > 0xfffc)
261 sz = 0xfffc;
262 return sz;
265 static void dma_dump_state(struct NCR_ESP *esp)
267 ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
268 esp->esp_id, ((struct fastlane_dma_registers *)
269 (esp->dregs))->cond_reg));
270 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
271 custom.intreqr, custom.intenar));
274 static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
276 struct fastlane_dma_registers *dregs =
277 (struct fastlane_dma_registers *) (esp->dregs);
278 unsigned long *t;
280 cache_clear(addr, length);
282 dma_clear(esp);
284 t = (unsigned long *)((addr & 0x00ffffff) + esp->edev);
286 dregs->clear_strobe = 0;
287 *t = addr;
289 ctrl_data = (ctrl_data & FASTLANE_DMA_MASK) | FASTLANE_DMA_ENABLE;
290 dregs->ctrl_reg = ctrl_data;
293 static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
295 struct fastlane_dma_registers *dregs =
296 (struct fastlane_dma_registers *) (esp->dregs);
297 unsigned long *t;
299 cache_push(addr, length);
301 dma_clear(esp);
303 t = (unsigned long *)((addr & 0x00ffffff) + (esp->edev));
305 dregs->clear_strobe = 0;
306 *t = addr;
308 ctrl_data = ((ctrl_data & FASTLANE_DMA_MASK) |
309 FASTLANE_DMA_ENABLE |
310 FASTLANE_DMA_WRITE);
311 dregs->ctrl_reg = ctrl_data;
315 static void dma_ints_off(struct NCR_ESP *esp)
317 disable_irq(esp->irq);
320 static void dma_ints_on(struct NCR_ESP *esp)
322 enable_irq(esp->irq);
325 static void dma_irq_exit(struct NCR_ESP *esp)
327 struct fastlane_dma_registers *dregs =
328 (struct fastlane_dma_registers *) (esp->dregs);
330 dregs->ctrl_reg = ctrl_data & ~(FASTLANE_DMA_EDI|FASTLANE_DMA_ESI);
331 #ifdef __mc68000__
332 nop();
333 #endif
334 dregs->ctrl_reg = ctrl_data;
337 static int dma_irq_p(struct NCR_ESP *esp)
339 struct fastlane_dma_registers *dregs =
340 (struct fastlane_dma_registers *) (esp->dregs);
341 unsigned char dma_status;
343 dma_status = dregs->cond_reg;
345 if(dma_status & FASTLANE_DMA_IACT)
346 return 0; /* not our IRQ */
348 /* Return non-zero if ESP requested IRQ */
349 return (
350 #ifndef NODMAIRQ
351 (dma_status & FASTLANE_DMA_CREQ) &&
352 #endif
353 (!(dma_status & FASTLANE_DMA_MINT)) &&
354 (esp_read(((struct ESP_regs *) (esp->eregs))->esp_status) & ESP_STAT_INTR));
357 static void dma_led_off(struct NCR_ESP *esp)
359 ctrl_data &= ~FASTLANE_DMA_LED;
360 ((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
363 static void dma_led_on(struct NCR_ESP *esp)
365 ctrl_data |= FASTLANE_DMA_LED;
366 ((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
369 static int dma_ports_p(struct NCR_ESP *esp)
371 return ((custom.intenar) & IF_PORTS);
374 static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
376 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
377 * so when (write) is true, it actually means READ!
379 if(write){
380 dma_init_read(esp, addr, count);
381 } else {
382 dma_init_write(esp, addr, count);
386 #define HOSTS_C
388 int fastlane_esp_release(struct Scsi_Host *instance)
390 #ifdef MODULE
391 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
392 esp_deallocate((struct NCR_ESP *)instance->hostdata);
393 esp_release();
394 release_mem_region(address, sizeof(struct ESP_regs));
395 free_irq(IRQ_AMIGA_PORTS, esp_intr);
396 #endif
397 return 1;
401 static Scsi_Host_Template driver_template = {
402 .proc_name = "esp-fastlane",
403 .proc_info = esp_proc_info,
404 .name = "Fastlane SCSI",
405 .detect = fastlane_esp_detect,
406 .slave_alloc = esp_slave_alloc,
407 .slave_destroy = esp_slave_destroy,
408 .release = fastlane_esp_release,
409 .queuecommand = esp_queue,
410 .eh_abort_handler = esp_abort,
411 .eh_bus_reset_handler = esp_reset,
412 .can_queue = 7,
413 .this_id = 7,
414 .sg_tablesize = SG_ALL,
415 .cmd_per_lun = 1,
416 .use_clustering = ENABLE_CLUSTERING
419 #include "scsi_module.c"
421 MODULE_LICENSE("GPL");