2 * QEMU IDE Emulation: PCI Bus support.
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
28 #include "hw/pci/pci.h"
29 #include "migration/vmstate.h"
30 #include "sysemu/dma.h"
31 #include "qemu/error-report.h"
32 #include "qemu/module.h"
33 #include "hw/ide/pci.h"
34 #include "ide-internal.h"
37 #define BMDMA_PAGE_SIZE 4096
39 #define BM_MIGRATION_COMPAT_STATUS_BITS \
40 (IDE_RETRY_DMA | IDE_RETRY_PIO | \
41 IDE_RETRY_READ | IDE_RETRY_FLUSH)
43 static uint64_t pci_ide_status_read(void *opaque
, hwaddr addr
, unsigned size
)
47 if (addr
!= 2 || size
!= 1) {
48 return ((uint64_t)1 << (size
* 8)) - 1;
50 return ide_status_read(bus
, addr
+ 2);
53 static void pci_ide_ctrl_write(void *opaque
, hwaddr addr
,
54 uint64_t data
, unsigned size
)
58 if (addr
!= 2 || size
!= 1) {
61 ide_ctrl_write(bus
, addr
+ 2, data
);
64 const MemoryRegionOps pci_ide_cmd_le_ops
= {
65 .read
= pci_ide_status_read
,
66 .write
= pci_ide_ctrl_write
,
67 .endianness
= DEVICE_LITTLE_ENDIAN
,
70 static uint64_t pci_ide_data_read(void *opaque
, hwaddr addr
, unsigned size
)
75 return ide_ioport_read(bus
, addr
);
76 } else if (addr
== 0) {
78 return ide_data_readw(bus
, addr
);
80 return ide_data_readl(bus
, addr
);
83 return ((uint64_t)1 << (size
* 8)) - 1;
86 static void pci_ide_data_write(void *opaque
, hwaddr addr
,
87 uint64_t data
, unsigned size
)
92 ide_ioport_write(bus
, addr
, data
);
93 } else if (addr
== 0) {
95 ide_data_writew(bus
, addr
, data
);
97 ide_data_writel(bus
, addr
, data
);
102 const MemoryRegionOps pci_ide_data_le_ops
= {
103 .read
= pci_ide_data_read
,
104 .write
= pci_ide_data_write
,
105 .endianness
= DEVICE_LITTLE_ENDIAN
,
108 void pci_ide_update_mode(PCIIDEState
*s
)
110 PCIDevice
*d
= PCI_DEVICE(s
);
111 uint8_t mode
= d
->config
[PCI_CLASS_PROG
];
114 * This function only configures the BARs/ioports for now: PCI IDE
115 * controllers must manage their own IRQ routing
118 switch (mode
& 0xf) {
120 /* Both channels legacy mode */
123 * TODO: according to the PCI IDE specification the BARs should
124 * be completely disabled, however Linux for the pegasos2
125 * machine stil accesses the BAR addresses after switching to legacy
126 * mode. Hence we leave them active for now.
129 /* Clear interrupt pin */
130 pci_config_set_interrupt_pin(d
->config
, 0);
132 /* Add legacy IDE ports */
133 if (!s
->bus
[0].portio_list
.owner
) {
134 portio_list_init(&s
->bus
[0].portio_list
, OBJECT(d
),
135 ide_portio_list
, &s
->bus
[0], "ide");
136 portio_list_add(&s
->bus
[0].portio_list
,
137 pci_address_space_io(d
), 0x1f0);
140 if (!s
->bus
[0].portio2_list
.owner
) {
141 portio_list_init(&s
->bus
[0].portio2_list
, OBJECT(d
),
142 ide_portio2_list
, &s
->bus
[0], "ide");
143 portio_list_add(&s
->bus
[0].portio2_list
,
144 pci_address_space_io(d
), 0x3f6);
147 if (!s
->bus
[1].portio_list
.owner
) {
148 portio_list_init(&s
->bus
[1].portio_list
, OBJECT(d
),
149 ide_portio_list
, &s
->bus
[1], "ide");
150 portio_list_add(&s
->bus
[1].portio_list
,
151 pci_address_space_io(d
), 0x170);
154 if (!s
->bus
[1].portio2_list
.owner
) {
155 portio_list_init(&s
->bus
[1].portio2_list
, OBJECT(d
),
156 ide_portio2_list
, &s
->bus
[1], "ide");
157 portio_list_add(&s
->bus
[1].portio2_list
,
158 pci_address_space_io(d
), 0x376);
163 /* Both channels native mode */
165 /* Set interrupt pin */
166 pci_config_set_interrupt_pin(d
->config
, 1);
168 /* Remove legacy IDE ports */
169 if (s
->bus
[0].portio_list
.owner
) {
170 portio_list_del(&s
->bus
[0].portio_list
);
171 portio_list_destroy(&s
->bus
[0].portio_list
);
174 if (s
->bus
[0].portio2_list
.owner
) {
175 portio_list_del(&s
->bus
[0].portio2_list
);
176 portio_list_destroy(&s
->bus
[0].portio2_list
);
179 if (s
->bus
[1].portio_list
.owner
) {
180 portio_list_del(&s
->bus
[1].portio_list
);
181 portio_list_destroy(&s
->bus
[1].portio_list
);
184 if (s
->bus
[1].portio2_list
.owner
) {
185 portio_list_del(&s
->bus
[1].portio2_list
);
186 portio_list_destroy(&s
->bus
[1].portio2_list
);
192 static IDEState
*bmdma_active_if(BMDMAState
*bmdma
)
194 assert(bmdma
->bus
->retry_unit
!= (uint8_t)-1);
195 return bmdma
->bus
->ifs
+ bmdma
->bus
->retry_unit
;
198 static void bmdma_start_dma(const IDEDMA
*dma
, IDEState
*s
,
199 BlockCompletionFunc
*dma_cb
)
201 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
204 bm
->cur_prd_last
= 0;
205 bm
->cur_prd_addr
= 0;
208 if (bm
->status
& BM_STATUS_DMAING
) {
209 bm
->dma_cb(bmdma_active_if(bm
), 0);
214 * Prepare an sglist based on available PRDs.
215 * @limit: How many bytes to prepare total.
217 * Returns the number of bytes prepared, -1 on error.
218 * IDEState.io_buffer_size will contain the number of bytes described
219 * by the PRDs, whether or not we added them to the sglist.
221 static int32_t bmdma_prepare_buf(const IDEDMA
*dma
, int32_t limit
)
223 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
224 IDEState
*s
= bmdma_active_if(bm
);
225 PCIDevice
*pci_dev
= PCI_DEVICE(bm
->pci_dev
);
232 pci_dma_sglist_init(&s
->sg
, pci_dev
,
233 s
->nsector
/ (BMDMA_PAGE_SIZE
/ BDRV_SECTOR_SIZE
) + 1);
234 s
->io_buffer_size
= 0;
236 if (bm
->cur_prd_len
== 0) {
237 /* end of table (with a fail safe of one page) */
238 if (bm
->cur_prd_last
||
239 (bm
->cur_addr
- bm
->addr
) >= BMDMA_PAGE_SIZE
) {
242 pci_dma_read(pci_dev
, bm
->cur_addr
, &prd
, 8);
244 prd
.addr
= le32_to_cpu(prd
.addr
);
245 prd
.size
= le32_to_cpu(prd
.size
);
246 len
= prd
.size
& 0xfffe;
249 bm
->cur_prd_len
= len
;
250 bm
->cur_prd_addr
= prd
.addr
;
251 bm
->cur_prd_last
= (prd
.size
& 0x80000000);
257 /* Don't add extra bytes to the SGList; consume any remaining
258 * PRDs from the guest, but ignore them. */
259 sg_len
= MIN(limit
- s
->sg
.size
, bm
->cur_prd_len
);
261 qemu_sglist_add(&s
->sg
, bm
->cur_prd_addr
, sg_len
);
264 bm
->cur_prd_addr
+= l
;
265 bm
->cur_prd_len
-= l
;
266 s
->io_buffer_size
+= l
;
272 /* return 0 if buffer completed */
273 static int bmdma_rw_buf(const IDEDMA
*dma
, bool is_write
)
275 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
276 IDEState
*s
= bmdma_active_if(bm
);
277 PCIDevice
*pci_dev
= PCI_DEVICE(bm
->pci_dev
);
285 l
= s
->io_buffer_size
- s
->io_buffer_index
;
288 if (bm
->cur_prd_len
== 0) {
289 /* end of table (with a fail safe of one page) */
290 if (bm
->cur_prd_last
||
291 (bm
->cur_addr
- bm
->addr
) >= BMDMA_PAGE_SIZE
)
293 pci_dma_read(pci_dev
, bm
->cur_addr
, &prd
, 8);
295 prd
.addr
= le32_to_cpu(prd
.addr
);
296 prd
.size
= le32_to_cpu(prd
.size
);
297 len
= prd
.size
& 0xfffe;
300 bm
->cur_prd_len
= len
;
301 bm
->cur_prd_addr
= prd
.addr
;
302 bm
->cur_prd_last
= (prd
.size
& 0x80000000);
304 if (l
> bm
->cur_prd_len
)
308 pci_dma_write(pci_dev
, bm
->cur_prd_addr
,
309 s
->io_buffer
+ s
->io_buffer_index
, l
);
311 pci_dma_read(pci_dev
, bm
->cur_prd_addr
,
312 s
->io_buffer
+ s
->io_buffer_index
, l
);
314 bm
->cur_prd_addr
+= l
;
315 bm
->cur_prd_len
-= l
;
316 s
->io_buffer_index
+= l
;
322 static void bmdma_set_inactive(const IDEDMA
*dma
, bool more
)
324 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
328 bm
->status
|= BM_STATUS_DMAING
;
330 bm
->status
&= ~BM_STATUS_DMAING
;
334 static void bmdma_restart_dma(const IDEDMA
*dma
)
336 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
338 bm
->cur_addr
= bm
->addr
;
341 static void bmdma_cancel(BMDMAState
*bm
)
343 if (bm
->status
& BM_STATUS_DMAING
) {
344 /* cancel DMA request */
345 bmdma_set_inactive(&bm
->dma
, false);
349 static void bmdma_reset(const IDEDMA
*dma
)
351 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
359 bm
->cur_prd_last
= 0;
360 bm
->cur_prd_addr
= 0;
364 static void bmdma_irq(void *opaque
, int n
, int level
)
366 BMDMAState
*bm
= opaque
;
369 /* pass through lower */
370 qemu_set_irq(bm
->irq
, level
);
374 bm
->status
|= BM_STATUS_INT
;
376 /* trigger the real irq */
377 qemu_set_irq(bm
->irq
, level
);
380 void bmdma_cmd_writeb(BMDMAState
*bm
, uint32_t val
)
382 trace_bmdma_cmd_writeb(val
);
384 /* Ignore writes to SSBM if it keeps the old value */
385 if ((val
& BM_CMD_START
) != (bm
->cmd
& BM_CMD_START
)) {
386 if (!(val
& BM_CMD_START
)) {
387 ide_cancel_dma_sync(ide_bus_active_if(bm
->bus
));
388 bm
->status
&= ~BM_STATUS_DMAING
;
390 bm
->cur_addr
= bm
->addr
;
391 if (!(bm
->status
& BM_STATUS_DMAING
)) {
392 bm
->status
|= BM_STATUS_DMAING
;
393 /* start dma transfer if possible */
395 bm
->dma_cb(bmdma_active_if(bm
), 0);
400 bm
->cmd
= val
& 0x09;
403 void bmdma_status_writeb(BMDMAState
*bm
, uint32_t val
)
405 bm
->status
= (val
& 0x60) | (bm
->status
& BM_STATUS_DMAING
)
406 | (bm
->status
& ~val
& (BM_STATUS_ERROR
| BM_STATUS_INT
));
409 static uint64_t bmdma_addr_read(void *opaque
, hwaddr addr
,
412 BMDMAState
*bm
= opaque
;
413 uint32_t mask
= (1ULL << (width
* 8)) - 1;
416 data
= (bm
->addr
>> (addr
* 8)) & mask
;
417 trace_bmdma_addr_read(data
);
421 static void bmdma_addr_write(void *opaque
, hwaddr addr
,
422 uint64_t data
, unsigned width
)
424 BMDMAState
*bm
= opaque
;
425 int shift
= addr
* 8;
426 uint32_t mask
= (1ULL << (width
* 8)) - 1;
428 trace_bmdma_addr_write(data
);
429 bm
->addr
&= ~(mask
<< shift
);
430 bm
->addr
|= ((data
& mask
) << shift
) & ~3;
433 MemoryRegionOps bmdma_addr_ioport_ops
= {
434 .read
= bmdma_addr_read
,
435 .write
= bmdma_addr_write
,
436 .endianness
= DEVICE_LITTLE_ENDIAN
,
439 static bool ide_bmdma_current_needed(void *opaque
)
441 BMDMAState
*bm
= opaque
;
443 return (bm
->cur_prd_len
!= 0);
446 static bool ide_bmdma_status_needed(void *opaque
)
448 BMDMAState
*bm
= opaque
;
450 /* Older versions abused some bits in the status register for internal
451 * error state. If any of these bits are set, we must add a subsection to
452 * transfer the real status register */
453 uint8_t abused_bits
= BM_MIGRATION_COMPAT_STATUS_BITS
;
455 return ((bm
->status
& abused_bits
) != 0);
458 static int ide_bmdma_pre_save(void *opaque
)
460 BMDMAState
*bm
= opaque
;
461 uint8_t abused_bits
= BM_MIGRATION_COMPAT_STATUS_BITS
;
463 if (!(bm
->status
& BM_STATUS_DMAING
) && bm
->dma_cb
) {
464 bm
->bus
->error_status
=
465 ide_dma_cmd_to_retry(bmdma_active_if(bm
)->dma_cmd
);
467 bm
->migration_retry_unit
= bm
->bus
->retry_unit
;
468 bm
->migration_retry_sector_num
= bm
->bus
->retry_sector_num
;
469 bm
->migration_retry_nsector
= bm
->bus
->retry_nsector
;
470 bm
->migration_compat_status
=
471 (bm
->status
& ~abused_bits
) | (bm
->bus
->error_status
& abused_bits
);
476 /* This function accesses bm->bus->error_status which is loaded only after
477 * BMDMA itself. This is why the function is called from ide_pci_post_load
478 * instead of being registered with VMState where it would run too early. */
479 static int ide_bmdma_post_load(void *opaque
, int version_id
)
481 BMDMAState
*bm
= opaque
;
482 uint8_t abused_bits
= BM_MIGRATION_COMPAT_STATUS_BITS
;
484 if (bm
->status
== 0) {
485 bm
->status
= bm
->migration_compat_status
& ~abused_bits
;
486 bm
->bus
->error_status
|= bm
->migration_compat_status
& abused_bits
;
488 if (bm
->bus
->error_status
) {
489 bm
->bus
->retry_sector_num
= bm
->migration_retry_sector_num
;
490 bm
->bus
->retry_nsector
= bm
->migration_retry_nsector
;
491 bm
->bus
->retry_unit
= bm
->migration_retry_unit
;
497 static const VMStateDescription vmstate_bmdma_current
= {
498 .name
= "ide bmdma_current",
500 .minimum_version_id
= 1,
501 .needed
= ide_bmdma_current_needed
,
502 .fields
= (const VMStateField
[]) {
503 VMSTATE_UINT32(cur_addr
, BMDMAState
),
504 VMSTATE_UINT32(cur_prd_last
, BMDMAState
),
505 VMSTATE_UINT32(cur_prd_addr
, BMDMAState
),
506 VMSTATE_UINT32(cur_prd_len
, BMDMAState
),
507 VMSTATE_END_OF_LIST()
511 static const VMStateDescription vmstate_bmdma_status
= {
512 .name
="ide bmdma/status",
514 .minimum_version_id
= 1,
515 .needed
= ide_bmdma_status_needed
,
516 .fields
= (const VMStateField
[]) {
517 VMSTATE_UINT8(status
, BMDMAState
),
518 VMSTATE_END_OF_LIST()
522 static const VMStateDescription vmstate_bmdma
= {
525 .minimum_version_id
= 0,
526 .pre_save
= ide_bmdma_pre_save
,
527 .fields
= (const VMStateField
[]) {
528 VMSTATE_UINT8(cmd
, BMDMAState
),
529 VMSTATE_UINT8(migration_compat_status
, BMDMAState
),
530 VMSTATE_UINT32(addr
, BMDMAState
),
531 VMSTATE_INT64(migration_retry_sector_num
, BMDMAState
),
532 VMSTATE_UINT32(migration_retry_nsector
, BMDMAState
),
533 VMSTATE_UINT8(migration_retry_unit
, BMDMAState
),
534 VMSTATE_END_OF_LIST()
536 .subsections
= (const VMStateDescription
* const []) {
537 &vmstate_bmdma_current
,
538 &vmstate_bmdma_status
,
543 static int ide_pci_post_load(void *opaque
, int version_id
)
545 PCIIDEState
*d
= opaque
;
548 for(i
= 0; i
< 2; i
++) {
549 /* current versions always store 0/1, but older version
550 stored bigger values. We only need last bit */
551 d
->bmdma
[i
].migration_retry_unit
&= 1;
552 ide_bmdma_post_load(&d
->bmdma
[i
], -1);
558 const VMStateDescription vmstate_ide_pci
= {
561 .minimum_version_id
= 0,
562 .post_load
= ide_pci_post_load
,
563 .fields
= (const VMStateField
[]) {
564 VMSTATE_PCI_DEVICE(parent_obj
, PCIIDEState
),
565 VMSTATE_STRUCT_ARRAY(bmdma
, PCIIDEState
, 2, 0,
566 vmstate_bmdma
, BMDMAState
),
567 VMSTATE_IDE_BUS_ARRAY(bus
, PCIIDEState
, 2),
568 VMSTATE_IDE_DRIVES(bus
[0].ifs
, PCIIDEState
),
569 VMSTATE_IDE_DRIVES(bus
[1].ifs
, PCIIDEState
),
570 VMSTATE_END_OF_LIST()
574 /* hd_table must contain 4 block drivers */
575 void pci_ide_create_devs(PCIDevice
*dev
)
577 PCIIDEState
*d
= PCI_IDE(dev
);
578 DriveInfo
*hd_table
[2 * MAX_IDE_DEVS
];
579 static const int bus
[4] = { 0, 0, 1, 1 };
580 static const int unit
[4] = { 0, 1, 0, 1 };
583 ide_drive_get(hd_table
, ARRAY_SIZE(hd_table
));
584 for (i
= 0; i
< 4; i
++) {
586 ide_bus_create_drive(d
->bus
+ bus
[i
], unit
[i
], hd_table
[i
]);
591 static const struct IDEDMAOps bmdma_ops
= {
592 .start_dma
= bmdma_start_dma
,
593 .prepare_buf
= bmdma_prepare_buf
,
594 .rw_buf
= bmdma_rw_buf
,
595 .restart_dma
= bmdma_restart_dma
,
596 .set_inactive
= bmdma_set_inactive
,
597 .reset
= bmdma_reset
,
600 void bmdma_init(IDEBus
*bus
, BMDMAState
*bm
, PCIIDEState
*d
)
602 if (bus
->dma
== &bm
->dma
) {
606 bm
->dma
.ops
= &bmdma_ops
;
609 bus
->irq
= qemu_allocate_irq(bmdma_irq
, bm
, 0);
614 static void pci_ide_init(Object
*obj
)
616 PCIIDEState
*d
= PCI_IDE(obj
);
618 qdev_init_gpio_out_named(DEVICE(d
), d
->isa_irq
, "isa-irq",
619 ARRAY_SIZE(d
->isa_irq
));
622 static const TypeInfo pci_ide_type_info
= {
623 .name
= TYPE_PCI_IDE
,
624 .parent
= TYPE_PCI_DEVICE
,
625 .instance_size
= sizeof(PCIIDEState
),
626 .instance_init
= pci_ide_init
,
628 .interfaces
= (InterfaceInfo
[]) {
629 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
634 static void pci_ide_register_types(void)
636 type_register_static(&pci_ide_type_info
);
639 type_init(pci_ide_register_types
)