4 * Copyright (c) 2003-2004 Vassili Karpov (malc)
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 /* #define DEBUG_DMA */
29 #define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
31 #define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
32 #define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
46 DMA_transfer_handler transfer_handler
;
53 static struct dma_cont
{
59 struct dma_regs regs
[4];
60 qemu_irq
*cpu_request_exit
;
64 CMD_MEMORY_TO_MEMORY
= 0x01,
65 CMD_FIXED_ADDRESS
= 0x02,
66 CMD_BLOCK_CONTROLLER
= 0x04,
67 CMD_COMPRESSED_TIME
= 0x08,
68 CMD_CYCLIC_PRIORITY
= 0x10,
69 CMD_EXTENDED_WRITE
= 0x20,
72 CMD_NOT_SUPPORTED
= CMD_MEMORY_TO_MEMORY
| CMD_FIXED_ADDRESS
73 | CMD_COMPRESSED_TIME
| CMD_CYCLIC_PRIORITY
| CMD_EXTENDED_WRITE
74 | CMD_LOW_DREQ
| CMD_LOW_DACK
78 static void DMA_run (void);
80 static int channels
[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
82 static void write_page (void *opaque
, uint32_t nport
, uint32_t data
)
84 struct dma_cont
*d
= opaque
;
87 ichan
= channels
[nport
& 7];
89 dolog ("invalid channel %#x %#x\n", nport
, data
);
92 d
->regs
[ichan
].page
= data
;
95 static void write_pageh (void *opaque
, uint32_t nport
, uint32_t data
)
97 struct dma_cont
*d
= opaque
;
100 ichan
= channels
[nport
& 7];
102 dolog ("invalid channel %#x %#x\n", nport
, data
);
105 d
->regs
[ichan
].pageh
= data
;
108 static uint32_t read_page (void *opaque
, uint32_t nport
)
110 struct dma_cont
*d
= opaque
;
113 ichan
= channels
[nport
& 7];
115 dolog ("invalid channel read %#x\n", nport
);
118 return d
->regs
[ichan
].page
;
121 static uint32_t read_pageh (void *opaque
, uint32_t nport
)
123 struct dma_cont
*d
= opaque
;
126 ichan
= channels
[nport
& 7];
128 dolog ("invalid channel read %#x\n", nport
);
131 return d
->regs
[ichan
].pageh
;
134 static inline void init_chan (struct dma_cont
*d
, int ichan
)
139 r
->now
[ADDR
] = r
->base
[ADDR
] << d
->dshift
;
143 static inline int getff (struct dma_cont
*d
)
152 static uint32_t read_chan (void *opaque
, uint32_t nport
)
154 struct dma_cont
*d
= opaque
;
155 int ichan
, nreg
, iport
, ff
, val
, dir
;
158 iport
= (nport
>> d
->dshift
) & 0x0f;
163 dir
= ((r
->mode
>> 5) & 1) ? -1 : 1;
166 val
= (r
->base
[COUNT
] << d
->dshift
) - r
->now
[COUNT
];
168 val
= r
->now
[ADDR
] + r
->now
[COUNT
] * dir
;
170 ldebug ("read_chan %#x -> %d\n", iport
, val
);
171 return (val
>> (d
->dshift
+ (ff
<< 3))) & 0xff;
174 static void write_chan (void *opaque
, uint32_t nport
, uint32_t data
)
176 struct dma_cont
*d
= opaque
;
177 int iport
, ichan
, nreg
;
180 iport
= (nport
>> d
->dshift
) & 0x0f;
185 r
->base
[nreg
] = (r
->base
[nreg
] & 0xff) | ((data
<< 8) & 0xff00);
186 init_chan (d
, ichan
);
188 r
->base
[nreg
] = (r
->base
[nreg
] & 0xff00) | (data
& 0xff);
192 static void write_cont (void *opaque
, uint32_t nport
, uint32_t data
)
194 struct dma_cont
*d
= opaque
;
195 int iport
, ichan
= 0;
197 iport
= (nport
>> d
->dshift
) & 0x0f;
199 case 0x08: /* command */
200 if ((data
!= 0) && (data
& CMD_NOT_SUPPORTED
)) {
201 dolog ("command %#x not supported\n", data
);
210 d
->status
|= 1 << (ichan
+ 4);
213 d
->status
&= ~(1 << (ichan
+ 4));
215 d
->status
&= ~(1 << ichan
);
219 case 0x0a: /* single mask */
221 d
->mask
|= 1 << (data
& 3);
223 d
->mask
&= ~(1 << (data
& 3));
227 case 0x0b: /* mode */
232 int op
, ai
, dir
, opmode
;
233 op
= (data
>> 2) & 3;
234 ai
= (data
>> 4) & 1;
235 dir
= (data
>> 5) & 1;
236 opmode
= (data
>> 6) & 3;
238 linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
239 ichan
, op
, ai
, dir
, opmode
);
242 d
->regs
[ichan
].mode
= data
;
246 case 0x0c: /* clear flip flop */
250 case 0x0d: /* reset */
257 case 0x0e: /* clear mask for all channels */
262 case 0x0f: /* write mask for all channels */
268 dolog ("unknown iport %#x\n", iport
);
274 linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
280 static uint32_t read_cont (void *opaque
, uint32_t nport
)
282 struct dma_cont
*d
= opaque
;
285 iport
= (nport
>> d
->dshift
) & 0x0f;
287 case 0x08: /* status */
291 case 0x0f: /* mask */
299 ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport
, iport
, val
);
303 int DMA_get_channel_mode (int nchan
)
305 return dma_controllers
[nchan
> 3].regs
[nchan
& 3].mode
;
308 void DMA_hold_DREQ (int nchan
)
314 linfo ("held cont=%d chan=%d\n", ncont
, ichan
);
315 dma_controllers
[ncont
].status
|= 1 << (ichan
+ 4);
319 void DMA_release_DREQ (int nchan
)
325 linfo ("released cont=%d chan=%d\n", ncont
, ichan
);
326 dma_controllers
[ncont
].status
&= ~(1 << (ichan
+ 4));
330 static void channel_run (int ncont
, int ichan
)
333 struct dma_regs
*r
= &dma_controllers
[ncont
].regs
[ichan
];
337 dir
= (r
->mode
>> 5) & 1;
338 opmode
= (r
->mode
>> 6) & 3;
341 dolog ("DMA in address decrement mode\n");
344 dolog ("DMA not in single mode select %#x\n", opmode
);
348 n
= r
->transfer_handler (r
->opaque
, ichan
+ (ncont
<< 2),
349 r
->now
[COUNT
], (r
->base
[COUNT
] + 1) << ncont
);
351 ldebug ("dma_pos %d size %d\n", n
, (r
->base
[COUNT
] + 1) << ncont
);
354 static QEMUBH
*dma_bh
;
356 static void DMA_run (void)
361 static int running
= 0;
372 for (icont
= 0; icont
< 2; icont
++, d
++) {
373 for (ichan
= 0; ichan
< 4; ichan
++) {
378 if ((0 == (d
->mask
& mask
)) && (0 != (d
->status
& (mask
<< 4)))) {
379 channel_run (icont
, ichan
);
388 qemu_bh_schedule_idle(dma_bh
);
391 static void DMA_run_bh(void *unused
)
396 void DMA_register_channel (int nchan
,
397 DMA_transfer_handler transfer_handler
,
406 r
= dma_controllers
[ncont
].regs
+ ichan
;
407 r
->transfer_handler
= transfer_handler
;
411 int DMA_read_memory (int nchan
, void *buf
, int pos
, int len
)
413 struct dma_regs
*r
= &dma_controllers
[nchan
> 3].regs
[nchan
& 3];
414 hwaddr addr
= ((r
->pageh
& 0x7f) << 24) | (r
->page
<< 16) | r
->now
[ADDR
];
416 if (r
->mode
& 0x20) {
420 cpu_physical_memory_read (addr
- pos
- len
, buf
, len
);
421 /* What about 16bit transfers? */
422 for (i
= 0; i
< len
>> 1; i
++) {
423 uint8_t b
= p
[len
- i
- 1];
428 cpu_physical_memory_read (addr
+ pos
, buf
, len
);
433 int DMA_write_memory (int nchan
, void *buf
, int pos
, int len
)
435 struct dma_regs
*r
= &dma_controllers
[nchan
> 3].regs
[nchan
& 3];
436 hwaddr addr
= ((r
->pageh
& 0x7f) << 24) | (r
->page
<< 16) | r
->now
[ADDR
];
438 if (r
->mode
& 0x20) {
442 cpu_physical_memory_write (addr
- pos
- len
, buf
, len
);
443 /* What about 16bit transfers? */
444 for (i
= 0; i
< len
; i
++) {
445 uint8_t b
= p
[len
- i
- 1];
450 cpu_physical_memory_write (addr
+ pos
, buf
, len
);
455 /* request the emulator to transfer a new DMA memory block ASAP */
456 void DMA_schedule(int nchan
)
458 struct dma_cont
*d
= &dma_controllers
[nchan
> 3];
460 qemu_irq_pulse(*d
->cpu_request_exit
);
463 static void dma_reset(void *opaque
)
465 struct dma_cont
*d
= opaque
;
466 write_cont (d
, (0x0d << d
->dshift
), 0);
469 static int dma_phony_handler (void *opaque
, int nchan
, int dma_pos
, int dma_len
)
471 dolog ("unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d\n",
472 nchan
, dma_pos
, dma_len
);
476 /* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
477 static void dma_init2(struct dma_cont
*d
, int base
, int dshift
,
478 int page_base
, int pageh_base
,
479 qemu_irq
*cpu_request_exit
)
481 static const int page_port_list
[] = { 0x1, 0x2, 0x3, 0x7 };
485 d
->cpu_request_exit
= cpu_request_exit
;
486 for (i
= 0; i
< 8; i
++) {
487 register_ioport_write (base
+ (i
<< dshift
), 1, 1, write_chan
, d
);
488 register_ioport_read (base
+ (i
<< dshift
), 1, 1, read_chan
, d
);
490 for (i
= 0; i
< ARRAY_SIZE (page_port_list
); i
++) {
491 register_ioport_write (page_base
+ page_port_list
[i
], 1, 1,
493 register_ioport_read (page_base
+ page_port_list
[i
], 1, 1,
495 if (pageh_base
>= 0) {
496 register_ioport_write (pageh_base
+ page_port_list
[i
], 1, 1,
498 register_ioport_read (pageh_base
+ page_port_list
[i
], 1, 1,
502 for (i
= 0; i
< 8; i
++) {
503 register_ioport_write (base
+ ((i
+ 8) << dshift
), 1, 1,
505 register_ioport_read (base
+ ((i
+ 8) << dshift
), 1, 1,
508 qemu_register_reset(dma_reset
, d
);
510 for (i
= 0; i
< ARRAY_SIZE (d
->regs
); ++i
) {
511 d
->regs
[i
].transfer_handler
= dma_phony_handler
;
515 static const VMStateDescription vmstate_dma_regs
= {
518 .minimum_version_id
= 1,
519 .minimum_version_id_old
= 1,
520 .fields
= (VMStateField
[]) {
521 VMSTATE_INT32_ARRAY(now
, struct dma_regs
, 2),
522 VMSTATE_UINT16_ARRAY(base
, struct dma_regs
, 2),
523 VMSTATE_UINT8(mode
, struct dma_regs
),
524 VMSTATE_UINT8(page
, struct dma_regs
),
525 VMSTATE_UINT8(pageh
, struct dma_regs
),
526 VMSTATE_UINT8(dack
, struct dma_regs
),
527 VMSTATE_UINT8(eop
, struct dma_regs
),
528 VMSTATE_END_OF_LIST()
532 static int dma_post_load(void *opaque
, int version_id
)
539 static const VMStateDescription vmstate_dma
= {
542 .minimum_version_id
= 1,
543 .minimum_version_id_old
= 1,
544 .post_load
= dma_post_load
,
545 .fields
= (VMStateField
[]) {
546 VMSTATE_UINT8(command
, struct dma_cont
),
547 VMSTATE_UINT8(mask
, struct dma_cont
),
548 VMSTATE_UINT8(flip_flop
, struct dma_cont
),
549 VMSTATE_INT32(dshift
, struct dma_cont
),
550 VMSTATE_STRUCT_ARRAY(regs
, struct dma_cont
, 4, 1, vmstate_dma_regs
, struct dma_regs
),
551 VMSTATE_END_OF_LIST()
555 void DMA_init(int high_page_enable
, qemu_irq
*cpu_request_exit
)
557 dma_init2(&dma_controllers
[0], 0x00, 0, 0x80,
558 high_page_enable
? 0x480 : -1, cpu_request_exit
);
559 dma_init2(&dma_controllers
[1], 0xc0, 1, 0x88,
560 high_page_enable
? 0x488 : -1, cpu_request_exit
);
561 vmstate_register (NULL
, 0, &vmstate_dma
, &dma_controllers
[0]);
562 vmstate_register (NULL
, 1, &vmstate_dma
, &dma_controllers
[1]);
564 dma_bh
= qemu_bh_new(DMA_run_bh
, NULL
);