Refactor aio callback allocation to use an aiocb pool (Avi Kivity)
[sniper_test.git] / hw / esp.c
blobaa1a76eabbfbfc64373a03247aeb91313bdf4297
1 /*
2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "hw.h"
26 #include "scsi-disk.h"
27 #include "scsi.h"
29 /* debug ESP card */
30 //#define DEBUG_ESP
33 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
34 * also produced as NCR89C100. See
35 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
36 * and
37 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
40 #ifdef DEBUG_ESP
41 #define DPRINTF(fmt, args...) \
42 do { printf("ESP: " fmt , ##args); } while (0)
43 #else
44 #define DPRINTF(fmt, args...) do {} while (0)
45 #endif
47 #define ESP_ERROR(fmt, args...) \
48 do { printf("ESP ERROR: %s: " fmt, __func__ , ##args); } while (0)
50 #define ESP_REGS 16
51 #define TI_BUFSZ 16
53 typedef struct ESPState ESPState;
55 struct ESPState {
56 uint32_t it_shift;
57 qemu_irq irq;
58 uint8_t rregs[ESP_REGS];
59 uint8_t wregs[ESP_REGS];
60 int32_t ti_size;
61 uint32_t ti_rptr, ti_wptr;
62 uint8_t ti_buf[TI_BUFSZ];
63 uint32_t sense;
64 uint32_t dma;
65 SCSIDevice *scsi_dev[ESP_MAX_DEVS];
66 SCSIDevice *current_dev;
67 uint8_t cmdbuf[TI_BUFSZ];
68 uint32_t cmdlen;
69 uint32_t do_cmd;
71 /* The amount of data left in the current DMA transfer. */
72 uint32_t dma_left;
73 /* The size of the current DMA transfer. Zero if no transfer is in
74 progress. */
75 uint32_t dma_counter;
76 uint8_t *async_buf;
77 uint32_t async_len;
79 espdma_memory_read_write dma_memory_read;
80 espdma_memory_read_write dma_memory_write;
81 void *dma_opaque;
84 #define ESP_TCLO 0x0
85 #define ESP_TCMID 0x1
86 #define ESP_FIFO 0x2
87 #define ESP_CMD 0x3
88 #define ESP_RSTAT 0x4
89 #define ESP_WBUSID 0x4
90 #define ESP_RINTR 0x5
91 #define ESP_WSEL 0x5
92 #define ESP_RSEQ 0x6
93 #define ESP_WSYNTP 0x6
94 #define ESP_RFLAGS 0x7
95 #define ESP_WSYNO 0x7
96 #define ESP_CFG1 0x8
97 #define ESP_RRES1 0x9
98 #define ESP_WCCF 0x9
99 #define ESP_RRES2 0xa
100 #define ESP_WTEST 0xa
101 #define ESP_CFG2 0xb
102 #define ESP_CFG3 0xc
103 #define ESP_RES3 0xd
104 #define ESP_TCHI 0xe
105 #define ESP_RES4 0xf
107 #define CMD_DMA 0x80
108 #define CMD_CMD 0x7f
110 #define CMD_NOP 0x00
111 #define CMD_FLUSH 0x01
112 #define CMD_RESET 0x02
113 #define CMD_BUSRESET 0x03
114 #define CMD_TI 0x10
115 #define CMD_ICCS 0x11
116 #define CMD_MSGACC 0x12
117 #define CMD_SATN 0x1a
118 #define CMD_SELATN 0x42
119 #define CMD_SELATNS 0x43
120 #define CMD_ENSEL 0x44
122 #define STAT_DO 0x00
123 #define STAT_DI 0x01
124 #define STAT_CD 0x02
125 #define STAT_ST 0x03
126 #define STAT_MO 0x06
127 #define STAT_MI 0x07
128 #define STAT_PIO_MASK 0x06
130 #define STAT_TC 0x10
131 #define STAT_PE 0x20
132 #define STAT_GE 0x40
133 #define STAT_INT 0x80
135 #define BUSID_DID 0x07
137 #define INTR_FC 0x08
138 #define INTR_BS 0x10
139 #define INTR_DC 0x20
140 #define INTR_RST 0x80
142 #define SEQ_0 0x0
143 #define SEQ_CD 0x4
145 #define CFG1_RESREPT 0x40
147 #define TCHI_FAS100A 0x4
149 static void esp_raise_irq(ESPState *s)
151 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
152 s->rregs[ESP_RSTAT] |= STAT_INT;
153 qemu_irq_raise(s->irq);
157 static void esp_lower_irq(ESPState *s)
159 if (s->rregs[ESP_RSTAT] & STAT_INT) {
160 s->rregs[ESP_RSTAT] &= ~STAT_INT;
161 qemu_irq_lower(s->irq);
165 static uint32_t get_cmd(ESPState *s, uint8_t *buf)
167 uint32_t dmalen;
168 int target;
170 target = s->wregs[ESP_WBUSID] & BUSID_DID;
171 if (s->dma) {
172 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
173 s->dma_memory_read(s->dma_opaque, buf, dmalen);
174 } else {
175 dmalen = s->ti_size;
176 memcpy(buf, s->ti_buf, dmalen);
177 buf[0] = 0;
179 DPRINTF("get_cmd: len %d target %d\n", dmalen, target);
181 s->ti_size = 0;
182 s->ti_rptr = 0;
183 s->ti_wptr = 0;
185 if (s->current_dev) {
186 /* Started a new command before the old one finished. Cancel it. */
187 s->current_dev->cancel_io(s->current_dev, 0);
188 s->async_len = 0;
191 if (target >= ESP_MAX_DEVS || !s->scsi_dev[target]) {
192 // No such drive
193 s->rregs[ESP_RSTAT] = 0;
194 s->rregs[ESP_RINTR] = INTR_DC;
195 s->rregs[ESP_RSEQ] = SEQ_0;
196 esp_raise_irq(s);
197 return 0;
199 s->current_dev = s->scsi_dev[target];
200 return dmalen;
203 static void do_cmd(ESPState *s, uint8_t *buf)
205 int32_t datalen;
206 int lun;
208 DPRINTF("do_cmd: busid 0x%x\n", buf[0]);
209 lun = buf[0] & 7;
210 datalen = s->current_dev->send_command(s->current_dev, 0, &buf[1], lun);
211 s->ti_size = datalen;
212 if (datalen != 0) {
213 s->rregs[ESP_RSTAT] = STAT_TC;
214 s->dma_left = 0;
215 s->dma_counter = 0;
216 if (datalen > 0) {
217 s->rregs[ESP_RSTAT] |= STAT_DI;
218 s->current_dev->read_data(s->current_dev, 0);
219 } else {
220 s->rregs[ESP_RSTAT] |= STAT_DO;
221 s->current_dev->write_data(s->current_dev, 0);
224 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
225 s->rregs[ESP_RSEQ] = SEQ_CD;
226 esp_raise_irq(s);
229 static void handle_satn(ESPState *s)
231 uint8_t buf[32];
232 int len;
234 len = get_cmd(s, buf);
235 if (len)
236 do_cmd(s, buf);
239 static void handle_satn_stop(ESPState *s)
241 s->cmdlen = get_cmd(s, s->cmdbuf);
242 if (s->cmdlen) {
243 DPRINTF("Set ATN & Stop: cmdlen %d\n", s->cmdlen);
244 s->do_cmd = 1;
245 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
246 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
247 s->rregs[ESP_RSEQ] = SEQ_CD;
248 esp_raise_irq(s);
252 static void write_response(ESPState *s)
254 DPRINTF("Transfer status (sense=%d)\n", s->sense);
255 s->ti_buf[0] = s->sense;
256 s->ti_buf[1] = 0;
257 if (s->dma) {
258 s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
259 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
260 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
261 s->rregs[ESP_RSEQ] = SEQ_CD;
262 } else {
263 s->ti_size = 2;
264 s->ti_rptr = 0;
265 s->ti_wptr = 0;
266 s->rregs[ESP_RFLAGS] = 2;
268 esp_raise_irq(s);
271 static void esp_dma_done(ESPState *s)
273 s->rregs[ESP_RSTAT] |= STAT_TC;
274 s->rregs[ESP_RINTR] = INTR_BS;
275 s->rregs[ESP_RSEQ] = 0;
276 s->rregs[ESP_RFLAGS] = 0;
277 s->rregs[ESP_TCLO] = 0;
278 s->rregs[ESP_TCMID] = 0;
279 esp_raise_irq(s);
282 static void esp_do_dma(ESPState *s)
284 uint32_t len;
285 int to_device;
287 to_device = (s->ti_size < 0);
288 len = s->dma_left;
289 if (s->do_cmd) {
290 DPRINTF("command len %d + %d\n", s->cmdlen, len);
291 s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
292 s->ti_size = 0;
293 s->cmdlen = 0;
294 s->do_cmd = 0;
295 do_cmd(s, s->cmdbuf);
296 return;
298 if (s->async_len == 0) {
299 /* Defer until data is available. */
300 return;
302 if (len > s->async_len) {
303 len = s->async_len;
305 if (to_device) {
306 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
307 } else {
308 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
310 s->dma_left -= len;
311 s->async_buf += len;
312 s->async_len -= len;
313 if (to_device)
314 s->ti_size += len;
315 else
316 s->ti_size -= len;
317 if (s->async_len == 0) {
318 if (to_device) {
319 // ti_size is negative
320 s->current_dev->write_data(s->current_dev, 0);
321 } else {
322 s->current_dev->read_data(s->current_dev, 0);
323 /* If there is still data to be read from the device then
324 complete the DMA operation immediately. Otherwise defer
325 until the scsi layer has completed. */
326 if (s->dma_left == 0 && s->ti_size > 0) {
327 esp_dma_done(s);
330 } else {
331 /* Partially filled a scsi buffer. Complete immediately. */
332 esp_dma_done(s);
336 static void esp_command_complete(void *opaque, int reason, uint32_t tag,
337 uint32_t arg)
339 ESPState *s = (ESPState *)opaque;
341 if (reason == SCSI_REASON_DONE) {
342 DPRINTF("SCSI Command complete\n");
343 if (s->ti_size != 0)
344 DPRINTF("SCSI command completed unexpectedly\n");
345 s->ti_size = 0;
346 s->dma_left = 0;
347 s->async_len = 0;
348 if (arg)
349 DPRINTF("Command failed\n");
350 s->sense = arg;
351 s->rregs[ESP_RSTAT] = STAT_ST;
352 esp_dma_done(s);
353 s->current_dev = NULL;
354 } else {
355 DPRINTF("transfer %d/%d\n", s->dma_left, s->ti_size);
356 s->async_len = arg;
357 s->async_buf = s->current_dev->get_buf(s->current_dev, 0);
358 if (s->dma_left) {
359 esp_do_dma(s);
360 } else if (s->dma_counter != 0 && s->ti_size <= 0) {
361 /* If this was the last part of a DMA transfer then the
362 completion interrupt is deferred to here. */
363 esp_dma_done(s);
368 static void handle_ti(ESPState *s)
370 uint32_t dmalen, minlen;
372 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
373 if (dmalen==0) {
374 dmalen=0x10000;
376 s->dma_counter = dmalen;
378 if (s->do_cmd)
379 minlen = (dmalen < 32) ? dmalen : 32;
380 else if (s->ti_size < 0)
381 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
382 else
383 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
384 DPRINTF("Transfer Information len %d\n", minlen);
385 if (s->dma) {
386 s->dma_left = minlen;
387 s->rregs[ESP_RSTAT] &= ~STAT_TC;
388 esp_do_dma(s);
389 } else if (s->do_cmd) {
390 DPRINTF("command len %d\n", s->cmdlen);
391 s->ti_size = 0;
392 s->cmdlen = 0;
393 s->do_cmd = 0;
394 do_cmd(s, s->cmdbuf);
395 return;
399 static void esp_reset(void *opaque)
401 ESPState *s = opaque;
403 esp_lower_irq(s);
405 memset(s->rregs, 0, ESP_REGS);
406 memset(s->wregs, 0, ESP_REGS);
407 s->rregs[ESP_TCHI] = TCHI_FAS100A; // Indicate fas100a
408 s->ti_size = 0;
409 s->ti_rptr = 0;
410 s->ti_wptr = 0;
411 s->dma = 0;
412 s->do_cmd = 0;
414 s->rregs[ESP_CFG1] = 7;
417 static void parent_esp_reset(void *opaque, int irq, int level)
419 if (level)
420 esp_reset(opaque);
423 static uint32_t esp_mem_readb(void *opaque, target_phys_addr_t addr)
425 ESPState *s = opaque;
426 uint32_t saddr;
428 saddr = addr >> s->it_shift;
429 DPRINTF("read reg[%d]: 0x%2.2x\n", saddr, s->rregs[saddr]);
430 switch (saddr) {
431 case ESP_FIFO:
432 if (s->ti_size > 0) {
433 s->ti_size--;
434 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
435 /* Data out. */
436 ESP_ERROR("PIO data read not implemented\n");
437 s->rregs[ESP_FIFO] = 0;
438 } else {
439 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
441 esp_raise_irq(s);
443 if (s->ti_size == 0) {
444 s->ti_rptr = 0;
445 s->ti_wptr = 0;
447 break;
448 case ESP_RINTR:
449 // Clear interrupt/error status bits
450 s->rregs[ESP_RSTAT] &= ~(STAT_GE | STAT_PE);
451 esp_lower_irq(s);
452 break;
453 default:
454 break;
456 return s->rregs[saddr];
459 static void esp_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
461 ESPState *s = opaque;
462 uint32_t saddr;
464 saddr = addr >> s->it_shift;
465 DPRINTF("write reg[%d]: 0x%2.2x -> 0x%2.2x\n", saddr, s->wregs[saddr],
466 val);
467 switch (saddr) {
468 case ESP_TCLO:
469 case ESP_TCMID:
470 s->rregs[ESP_RSTAT] &= ~STAT_TC;
471 break;
472 case ESP_FIFO:
473 if (s->do_cmd) {
474 s->cmdbuf[s->cmdlen++] = val & 0xff;
475 } else if (s->ti_size == TI_BUFSZ - 1) {
476 ESP_ERROR("fifo overrun\n");
477 } else {
478 s->ti_size++;
479 s->ti_buf[s->ti_wptr++] = val & 0xff;
481 break;
482 case ESP_CMD:
483 s->rregs[saddr] = val;
484 if (val & CMD_DMA) {
485 s->dma = 1;
486 /* Reload DMA counter. */
487 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
488 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
489 } else {
490 s->dma = 0;
492 switch(val & CMD_CMD) {
493 case CMD_NOP:
494 DPRINTF("NOP (%2.2x)\n", val);
495 break;
496 case CMD_FLUSH:
497 DPRINTF("Flush FIFO (%2.2x)\n", val);
498 //s->ti_size = 0;
499 s->rregs[ESP_RINTR] = INTR_FC;
500 s->rregs[ESP_RSEQ] = 0;
501 s->rregs[ESP_RFLAGS] = 0;
502 break;
503 case CMD_RESET:
504 DPRINTF("Chip reset (%2.2x)\n", val);
505 esp_reset(s);
506 break;
507 case CMD_BUSRESET:
508 DPRINTF("Bus reset (%2.2x)\n", val);
509 s->rregs[ESP_RINTR] = INTR_RST;
510 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
511 esp_raise_irq(s);
513 break;
514 case CMD_TI:
515 handle_ti(s);
516 break;
517 case CMD_ICCS:
518 DPRINTF("Initiator Command Complete Sequence (%2.2x)\n", val);
519 write_response(s);
520 s->rregs[ESP_RINTR] = INTR_FC;
521 s->rregs[ESP_RSTAT] |= STAT_MI;
522 break;
523 case CMD_MSGACC:
524 DPRINTF("Message Accepted (%2.2x)\n", val);
525 write_response(s);
526 s->rregs[ESP_RINTR] = INTR_DC;
527 s->rregs[ESP_RSEQ] = 0;
528 break;
529 case CMD_SATN:
530 DPRINTF("Set ATN (%2.2x)\n", val);
531 break;
532 case CMD_SELATN:
533 DPRINTF("Set ATN (%2.2x)\n", val);
534 handle_satn(s);
535 break;
536 case CMD_SELATNS:
537 DPRINTF("Set ATN & stop (%2.2x)\n", val);
538 handle_satn_stop(s);
539 break;
540 case CMD_ENSEL:
541 DPRINTF("Enable selection (%2.2x)\n", val);
542 s->rregs[ESP_RINTR] = 0;
543 break;
544 default:
545 ESP_ERROR("Unhandled ESP command (%2.2x)\n", val);
546 break;
548 break;
549 case ESP_WBUSID ... ESP_WSYNO:
550 break;
551 case ESP_CFG1:
552 s->rregs[saddr] = val;
553 break;
554 case ESP_WCCF ... ESP_WTEST:
555 break;
556 case ESP_CFG2 ... ESP_RES4:
557 s->rregs[saddr] = val;
558 break;
559 default:
560 ESP_ERROR("invalid write of 0x%02x at [0x%x]\n", val, saddr);
561 return;
563 s->wregs[saddr] = val;
566 static CPUReadMemoryFunc *esp_mem_read[3] = {
567 esp_mem_readb,
568 NULL,
569 NULL,
572 static CPUWriteMemoryFunc *esp_mem_write[3] = {
573 esp_mem_writeb,
574 NULL,
575 esp_mem_writeb,
578 static void esp_save(QEMUFile *f, void *opaque)
580 ESPState *s = opaque;
582 qemu_put_buffer(f, s->rregs, ESP_REGS);
583 qemu_put_buffer(f, s->wregs, ESP_REGS);
584 qemu_put_sbe32s(f, &s->ti_size);
585 qemu_put_be32s(f, &s->ti_rptr);
586 qemu_put_be32s(f, &s->ti_wptr);
587 qemu_put_buffer(f, s->ti_buf, TI_BUFSZ);
588 qemu_put_be32s(f, &s->sense);
589 qemu_put_be32s(f, &s->dma);
590 qemu_put_buffer(f, s->cmdbuf, TI_BUFSZ);
591 qemu_put_be32s(f, &s->cmdlen);
592 qemu_put_be32s(f, &s->do_cmd);
593 qemu_put_be32s(f, &s->dma_left);
594 // There should be no transfers in progress, so dma_counter is not saved
597 static int esp_load(QEMUFile *f, void *opaque, int version_id)
599 ESPState *s = opaque;
601 if (version_id != 3)
602 return -EINVAL; // Cannot emulate 2
604 qemu_get_buffer(f, s->rregs, ESP_REGS);
605 qemu_get_buffer(f, s->wregs, ESP_REGS);
606 qemu_get_sbe32s(f, &s->ti_size);
607 qemu_get_be32s(f, &s->ti_rptr);
608 qemu_get_be32s(f, &s->ti_wptr);
609 qemu_get_buffer(f, s->ti_buf, TI_BUFSZ);
610 qemu_get_be32s(f, &s->sense);
611 qemu_get_be32s(f, &s->dma);
612 qemu_get_buffer(f, s->cmdbuf, TI_BUFSZ);
613 qemu_get_be32s(f, &s->cmdlen);
614 qemu_get_be32s(f, &s->do_cmd);
615 qemu_get_be32s(f, &s->dma_left);
617 return 0;
620 void esp_scsi_attach(void *opaque, BlockDriverState *bd, int id)
622 ESPState *s = (ESPState *)opaque;
624 if (id < 0) {
625 for (id = 0; id < ESP_MAX_DEVS; id++) {
626 if (id == (s->rregs[ESP_CFG1] & 0x7))
627 continue;
628 if (s->scsi_dev[id] == NULL)
629 break;
632 if (id >= ESP_MAX_DEVS) {
633 DPRINTF("Bad Device ID %d\n", id);
634 return;
636 if (s->scsi_dev[id]) {
637 DPRINTF("Destroying device %d\n", id);
638 s->scsi_dev[id]->destroy(s->scsi_dev[id]);
640 DPRINTF("Attaching block device %d\n", id);
641 /* Command queueing is not implemented. */
642 s->scsi_dev[id] = scsi_generic_init(bd, 0, esp_command_complete, s);
643 if (s->scsi_dev[id] == NULL)
644 s->scsi_dev[id] = scsi_disk_init(bd, 0, esp_command_complete, s);
647 void *esp_init(target_phys_addr_t espaddr, int it_shift,
648 espdma_memory_read_write dma_memory_read,
649 espdma_memory_read_write dma_memory_write,
650 void *dma_opaque, qemu_irq irq, qemu_irq *reset)
652 ESPState *s;
653 int esp_io_memory;
655 s = qemu_mallocz(sizeof(ESPState));
657 s->irq = irq;
658 s->it_shift = it_shift;
659 s->dma_memory_read = dma_memory_read;
660 s->dma_memory_write = dma_memory_write;
661 s->dma_opaque = dma_opaque;
663 esp_io_memory = cpu_register_io_memory(0, esp_mem_read, esp_mem_write, s);
664 cpu_register_physical_memory(espaddr, ESP_REGS << it_shift, esp_io_memory);
666 esp_reset(s);
668 register_savevm("esp", espaddr, 3, esp_save, esp_load, s);
669 qemu_register_reset(esp_reset, s);
671 *reset = *qemu_allocate_irqs(parent_esp_reset, s, 1);
673 return s;