2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include <hw/i386/pc.h>
27 #include <hw/pci/pci.h>
28 #include <hw/isa/isa.h>
29 #include "qemu/error-report.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/dma.h"
33 #include "hw/block/block.h"
34 #include "sysemu/block-backend.h"
36 #include <hw/ide/internal.h>
38 /* These values were based on a Seagate ST3500418AS but have been modified
39 to make more sense in QEMU */
40 static const int smart_attributes
[][12] = {
41 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
42 /* raw read error rate*/
43 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
45 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
46 /* start stop count */
47 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
48 /* remapped sectors */
49 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
51 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
52 /* power cycle count */
53 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54 /* airflow-temperature-celsius */
55 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
58 static int ide_handle_rw_error(IDEState
*s
, int error
, int op
);
59 static void ide_dummy_transfer_stop(IDEState
*s
);
61 static void padstr(char *str
, const char *src
, int len
)
64 for(i
= 0; i
< len
; i
++) {
73 static void put_le16(uint16_t *p
, unsigned int v
)
78 static void ide_identify_size(IDEState
*s
)
80 uint16_t *p
= (uint16_t *)s
->identify_data
;
81 put_le16(p
+ 60, s
->nb_sectors
);
82 put_le16(p
+ 61, s
->nb_sectors
>> 16);
83 put_le16(p
+ 100, s
->nb_sectors
);
84 put_le16(p
+ 101, s
->nb_sectors
>> 16);
85 put_le16(p
+ 102, s
->nb_sectors
>> 32);
86 put_le16(p
+ 103, s
->nb_sectors
>> 48);
89 static void ide_identify(IDEState
*s
)
93 IDEDevice
*dev
= s
->unit
? s
->bus
->slave
: s
->bus
->master
;
95 p
= (uint16_t *)s
->identify_data
;
96 if (s
->identify_set
) {
99 memset(p
, 0, sizeof(s
->identify_data
));
101 put_le16(p
+ 0, 0x0040);
102 put_le16(p
+ 1, s
->cylinders
);
103 put_le16(p
+ 3, s
->heads
);
104 put_le16(p
+ 4, 512 * s
->sectors
); /* XXX: retired, remove ? */
105 put_le16(p
+ 5, 512); /* XXX: retired, remove ? */
106 put_le16(p
+ 6, s
->sectors
);
107 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
108 put_le16(p
+ 20, 3); /* XXX: retired, remove ? */
109 put_le16(p
+ 21, 512); /* cache size in sectors */
110 put_le16(p
+ 22, 4); /* ecc bytes */
111 padstr((char *)(p
+ 23), s
->version
, 8); /* firmware version */
112 padstr((char *)(p
+ 27), s
->drive_model_str
, 40); /* model */
113 #if MAX_MULT_SECTORS > 1
114 put_le16(p
+ 47, 0x8000 | MAX_MULT_SECTORS
);
116 put_le16(p
+ 48, 1); /* dword I/O */
117 put_le16(p
+ 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
118 put_le16(p
+ 51, 0x200); /* PIO transfer cycle */
119 put_le16(p
+ 52, 0x200); /* DMA transfer cycle */
120 put_le16(p
+ 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
121 put_le16(p
+ 54, s
->cylinders
);
122 put_le16(p
+ 55, s
->heads
);
123 put_le16(p
+ 56, s
->sectors
);
124 oldsize
= s
->cylinders
* s
->heads
* s
->sectors
;
125 put_le16(p
+ 57, oldsize
);
126 put_le16(p
+ 58, oldsize
>> 16);
128 put_le16(p
+ 59, 0x100 | s
->mult_sectors
);
129 /* *(p + 60) := nb_sectors -- see ide_identify_size */
130 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
131 put_le16(p
+ 62, 0x07); /* single word dma0-2 supported */
132 put_le16(p
+ 63, 0x07); /* mdma0-2 supported */
133 put_le16(p
+ 64, 0x03); /* pio3-4 supported */
134 put_le16(p
+ 65, 120);
135 put_le16(p
+ 66, 120);
136 put_le16(p
+ 67, 120);
137 put_le16(p
+ 68, 120);
138 if (dev
&& dev
->conf
.discard_granularity
) {
139 put_le16(p
+ 69, (1 << 14)); /* determinate TRIM behavior */
143 put_le16(p
+ 75, s
->ncq_queues
- 1);
145 put_le16(p
+ 76, (1 << 8));
148 put_le16(p
+ 80, 0xf0); /* ata3 -> ata6 supported */
149 put_le16(p
+ 81, 0x16); /* conforms to ata5 */
150 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
151 put_le16(p
+ 82, (1 << 14) | (1 << 5) | 1);
152 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
153 put_le16(p
+ 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
154 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
156 put_le16(p
+ 84, (1 << 14) | (1 << 8) | 0);
158 put_le16(p
+ 84, (1 << 14) | 0);
160 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
161 if (blk_enable_write_cache(s
->blk
)) {
162 put_le16(p
+ 85, (1 << 14) | (1 << 5) | 1);
164 put_le16(p
+ 85, (1 << 14) | 1);
166 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
167 put_le16(p
+ 86, (1 << 13) | (1 <<12) | (1 << 10));
168 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
170 put_le16(p
+ 87, (1 << 14) | (1 << 8) | 0);
172 put_le16(p
+ 87, (1 << 14) | 0);
174 put_le16(p
+ 88, 0x3f | (1 << 13)); /* udma5 set and supported */
175 put_le16(p
+ 93, 1 | (1 << 14) | 0x2000);
176 /* *(p + 100) := nb_sectors -- see ide_identify_size */
177 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
178 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
179 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
181 if (dev
&& dev
->conf
.physical_block_size
)
182 put_le16(p
+ 106, 0x6000 | get_physical_block_exp(&dev
->conf
));
184 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
185 put_le16(p
+ 108, s
->wwn
>> 48);
186 put_le16(p
+ 109, s
->wwn
>> 32);
187 put_le16(p
+ 110, s
->wwn
>> 16);
188 put_le16(p
+ 111, s
->wwn
);
190 if (dev
&& dev
->conf
.discard_granularity
) {
191 put_le16(p
+ 169, 1); /* TRIM support */
194 ide_identify_size(s
);
198 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
201 static void ide_atapi_identify(IDEState
*s
)
205 p
= (uint16_t *)s
->identify_data
;
206 if (s
->identify_set
) {
209 memset(p
, 0, sizeof(s
->identify_data
));
211 /* Removable CDROM, 50us response, 12 byte packets */
212 put_le16(p
+ 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
213 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
214 put_le16(p
+ 20, 3); /* buffer type */
215 put_le16(p
+ 21, 512); /* cache size in sectors */
216 put_le16(p
+ 22, 4); /* ecc bytes */
217 padstr((char *)(p
+ 23), s
->version
, 8); /* firmware version */
218 padstr((char *)(p
+ 27), s
->drive_model_str
, 40); /* model */
219 put_le16(p
+ 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
221 put_le16(p
+ 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
222 put_le16(p
+ 53, 7); /* words 64-70, 54-58, 88 valid */
223 put_le16(p
+ 62, 7); /* single word dma0-2 supported */
224 put_le16(p
+ 63, 7); /* mdma0-2 supported */
226 put_le16(p
+ 49, 1 << 9); /* LBA supported, no DMA */
227 put_le16(p
+ 53, 3); /* words 64-70, 54-58 valid */
228 put_le16(p
+ 63, 0x103); /* DMA modes XXX: may be incorrect */
230 put_le16(p
+ 64, 3); /* pio3-4 supported */
231 put_le16(p
+ 65, 0xb4); /* minimum DMA multiword tx cycle time */
232 put_le16(p
+ 66, 0xb4); /* recommended DMA multiword tx cycle time */
233 put_le16(p
+ 67, 0x12c); /* minimum PIO cycle time without flow control */
234 put_le16(p
+ 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
236 put_le16(p
+ 71, 30); /* in ns */
237 put_le16(p
+ 72, 30); /* in ns */
240 put_le16(p
+ 75, s
->ncq_queues
- 1);
242 put_le16(p
+ 76, (1 << 8));
245 put_le16(p
+ 80, 0x1e); /* support up to ATA/ATAPI-4 */
247 put_le16(p
+ 84, (1 << 8)); /* supports WWN for words 108-111 */
248 put_le16(p
+ 87, (1 << 8)); /* WWN enabled */
252 put_le16(p
+ 88, 0x3f | (1 << 13)); /* udma5 set and supported */
256 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
257 put_le16(p
+ 108, s
->wwn
>> 48);
258 put_le16(p
+ 109, s
->wwn
>> 32);
259 put_le16(p
+ 110, s
->wwn
>> 16);
260 put_le16(p
+ 111, s
->wwn
);
266 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
269 static void ide_cfata_identify_size(IDEState
*s
)
271 uint16_t *p
= (uint16_t *)s
->identify_data
;
272 put_le16(p
+ 7, s
->nb_sectors
>> 16); /* Sectors per card */
273 put_le16(p
+ 8, s
->nb_sectors
); /* Sectors per card */
274 put_le16(p
+ 60, s
->nb_sectors
); /* Total LBA sectors */
275 put_le16(p
+ 61, s
->nb_sectors
>> 16); /* Total LBA sectors */
278 static void ide_cfata_identify(IDEState
*s
)
283 p
= (uint16_t *)s
->identify_data
;
284 if (s
->identify_set
) {
287 memset(p
, 0, sizeof(s
->identify_data
));
289 cur_sec
= s
->cylinders
* s
->heads
* s
->sectors
;
291 put_le16(p
+ 0, 0x848a); /* CF Storage Card signature */
292 put_le16(p
+ 1, s
->cylinders
); /* Default cylinders */
293 put_le16(p
+ 3, s
->heads
); /* Default heads */
294 put_le16(p
+ 6, s
->sectors
); /* Default sectors per track */
295 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
296 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
297 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
298 put_le16(p
+ 22, 0x0004); /* ECC bytes */
299 padstr((char *) (p
+ 23), s
->version
, 8); /* Firmware Revision */
300 padstr((char *) (p
+ 27), s
->drive_model_str
, 40);/* Model number */
301 #if MAX_MULT_SECTORS > 1
302 put_le16(p
+ 47, 0x8000 | MAX_MULT_SECTORS
);
304 put_le16(p
+ 47, 0x0000);
306 put_le16(p
+ 49, 0x0f00); /* Capabilities */
307 put_le16(p
+ 51, 0x0002); /* PIO cycle timing mode */
308 put_le16(p
+ 52, 0x0001); /* DMA cycle timing mode */
309 put_le16(p
+ 53, 0x0003); /* Translation params valid */
310 put_le16(p
+ 54, s
->cylinders
); /* Current cylinders */
311 put_le16(p
+ 55, s
->heads
); /* Current heads */
312 put_le16(p
+ 56, s
->sectors
); /* Current sectors */
313 put_le16(p
+ 57, cur_sec
); /* Current capacity */
314 put_le16(p
+ 58, cur_sec
>> 16); /* Current capacity */
315 if (s
->mult_sectors
) /* Multiple sector setting */
316 put_le16(p
+ 59, 0x100 | s
->mult_sectors
);
317 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
318 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
319 put_le16(p
+ 63, 0x0203); /* Multiword DMA capability */
320 put_le16(p
+ 64, 0x0001); /* Flow Control PIO support */
321 put_le16(p
+ 65, 0x0096); /* Min. Multiword DMA cycle */
322 put_le16(p
+ 66, 0x0096); /* Rec. Multiword DMA cycle */
323 put_le16(p
+ 68, 0x00b4); /* Min. PIO cycle time */
324 put_le16(p
+ 82, 0x400c); /* Command Set supported */
325 put_le16(p
+ 83, 0x7068); /* Command Set supported */
326 put_le16(p
+ 84, 0x4000); /* Features supported */
327 put_le16(p
+ 85, 0x000c); /* Command Set enabled */
328 put_le16(p
+ 86, 0x7044); /* Command Set enabled */
329 put_le16(p
+ 87, 0x4000); /* Features enabled */
330 put_le16(p
+ 91, 0x4060); /* Current APM level */
331 put_le16(p
+ 129, 0x0002); /* Current features option */
332 put_le16(p
+ 130, 0x0005); /* Reassigned sectors */
333 put_le16(p
+ 131, 0x0001); /* Initial power mode */
334 put_le16(p
+ 132, 0x0000); /* User signature */
335 put_le16(p
+ 160, 0x8100); /* Power requirement */
336 put_le16(p
+ 161, 0x8001); /* CF command set */
338 ide_cfata_identify_size(s
);
342 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
345 static void ide_set_signature(IDEState
*s
)
347 s
->select
&= 0xf0; /* clear head */
351 if (s
->drive_kind
== IDE_CD
) {
363 typedef struct TrimAIOCB
{
373 static void trim_aio_cancel(BlockAIOCB
*acb
)
375 TrimAIOCB
*iocb
= container_of(acb
, TrimAIOCB
, common
);
377 /* Exit the loop so ide_issue_trim_cb will not continue */
378 iocb
->j
= iocb
->qiov
->niov
- 1;
379 iocb
->i
= (iocb
->qiov
->iov
[iocb
->j
].iov_len
/ 8) - 1;
381 iocb
->ret
= -ECANCELED
;
384 blk_aio_cancel_async(iocb
->aiocb
);
389 static const AIOCBInfo trim_aiocb_info
= {
390 .aiocb_size
= sizeof(TrimAIOCB
),
391 .cancel_async
= trim_aio_cancel
,
394 static void ide_trim_bh_cb(void *opaque
)
396 TrimAIOCB
*iocb
= opaque
;
398 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
400 qemu_bh_delete(iocb
->bh
);
402 qemu_aio_unref(iocb
);
405 static void ide_issue_trim_cb(void *opaque
, int ret
)
407 TrimAIOCB
*iocb
= opaque
;
409 while (iocb
->j
< iocb
->qiov
->niov
) {
411 while (++iocb
->i
< iocb
->qiov
->iov
[j
].iov_len
/ 8) {
413 uint64_t *buffer
= iocb
->qiov
->iov
[j
].iov_base
;
415 /* 6-byte LBA + 2-byte range per entry */
416 uint64_t entry
= le64_to_cpu(buffer
[i
]);
417 uint64_t sector
= entry
& 0x0000ffffffffffffULL
;
418 uint16_t count
= entry
>> 48;
424 /* Got an entry! Submit and exit. */
425 iocb
->aiocb
= blk_aio_discard(iocb
->blk
, sector
, count
,
426 ide_issue_trim_cb
, opaque
);
439 qemu_bh_schedule(iocb
->bh
);
443 BlockAIOCB
*ide_issue_trim(BlockBackend
*blk
,
444 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
445 BlockCompletionFunc
*cb
, void *opaque
)
449 iocb
= blk_aio_get(&trim_aiocb_info
, blk
, cb
, opaque
);
451 iocb
->bh
= qemu_bh_new(ide_trim_bh_cb
, iocb
);
456 ide_issue_trim_cb(iocb
, 0);
457 return &iocb
->common
;
460 static inline void ide_abort_command(IDEState
*s
)
462 ide_transfer_stop(s
);
463 s
->status
= READY_STAT
| ERR_STAT
;
467 /* prepare data transfer and tell what to do after */
468 void ide_transfer_start(IDEState
*s
, uint8_t *buf
, int size
,
469 EndTransferFunc
*end_transfer_func
)
471 s
->end_transfer_func
= end_transfer_func
;
473 s
->data_end
= buf
+ size
;
474 if (!(s
->status
& ERR_STAT
)) {
475 s
->status
|= DRQ_STAT
;
477 if (s
->bus
->dma
->ops
->start_transfer
) {
478 s
->bus
->dma
->ops
->start_transfer(s
->bus
->dma
);
482 static void ide_cmd_done(IDEState
*s
)
484 if (s
->bus
->dma
->ops
->cmd_done
) {
485 s
->bus
->dma
->ops
->cmd_done(s
->bus
->dma
);
489 void ide_transfer_stop(IDEState
*s
)
491 s
->end_transfer_func
= ide_transfer_stop
;
492 s
->data_ptr
= s
->io_buffer
;
493 s
->data_end
= s
->io_buffer
;
494 s
->status
&= ~DRQ_STAT
;
498 int64_t ide_get_sector(IDEState
*s
)
501 if (s
->select
& 0x40) {
504 sector_num
= ((s
->select
& 0x0f) << 24) | (s
->hcyl
<< 16) |
505 (s
->lcyl
<< 8) | s
->sector
;
507 sector_num
= ((int64_t)s
->hob_hcyl
<< 40) |
508 ((int64_t) s
->hob_lcyl
<< 32) |
509 ((int64_t) s
->hob_sector
<< 24) |
510 ((int64_t) s
->hcyl
<< 16) |
511 ((int64_t) s
->lcyl
<< 8) | s
->sector
;
514 sector_num
= ((s
->hcyl
<< 8) | s
->lcyl
) * s
->heads
* s
->sectors
+
515 (s
->select
& 0x0f) * s
->sectors
+ (s
->sector
- 1);
520 void ide_set_sector(IDEState
*s
, int64_t sector_num
)
523 if (s
->select
& 0x40) {
525 s
->select
= (s
->select
& 0xf0) | (sector_num
>> 24);
526 s
->hcyl
= (sector_num
>> 16);
527 s
->lcyl
= (sector_num
>> 8);
528 s
->sector
= (sector_num
);
530 s
->sector
= sector_num
;
531 s
->lcyl
= sector_num
>> 8;
532 s
->hcyl
= sector_num
>> 16;
533 s
->hob_sector
= sector_num
>> 24;
534 s
->hob_lcyl
= sector_num
>> 32;
535 s
->hob_hcyl
= sector_num
>> 40;
538 cyl
= sector_num
/ (s
->heads
* s
->sectors
);
539 r
= sector_num
% (s
->heads
* s
->sectors
);
542 s
->select
= (s
->select
& 0xf0) | ((r
/ s
->sectors
) & 0x0f);
543 s
->sector
= (r
% s
->sectors
) + 1;
547 static void ide_rw_error(IDEState
*s
) {
548 ide_abort_command(s
);
552 static bool ide_sect_range_ok(IDEState
*s
,
553 uint64_t sector
, uint64_t nb_sectors
)
555 uint64_t total_sectors
;
557 blk_get_geometry(s
->blk
, &total_sectors
);
558 if (sector
> total_sectors
|| nb_sectors
> total_sectors
- sector
) {
564 static void ide_sector_read(IDEState
*s
);
566 static void ide_sector_read_cb(void *opaque
, int ret
)
568 IDEState
*s
= opaque
;
572 s
->status
&= ~BUSY_STAT
;
574 if (ret
== -ECANCELED
) {
577 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
579 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_PIO
|
586 if (n
> s
->req_nb_sectors
) {
587 n
= s
->req_nb_sectors
;
590 ide_set_sector(s
, ide_get_sector(s
) + n
);
592 /* Allow the guest to read the io_buffer */
593 ide_transfer_start(s
, s
->io_buffer
, n
* BDRV_SECTOR_SIZE
, ide_sector_read
);
594 s
->io_buffer_offset
+= 512 * n
;
598 static void ide_sector_read(IDEState
*s
)
603 s
->status
= READY_STAT
| SEEK_STAT
;
604 s
->error
= 0; /* not needed by IDE spec, but needed by Windows */
605 sector_num
= ide_get_sector(s
);
609 ide_transfer_stop(s
);
613 s
->status
|= BUSY_STAT
;
615 if (n
> s
->req_nb_sectors
) {
616 n
= s
->req_nb_sectors
;
619 #if defined(DEBUG_IDE)
620 printf("sector=%" PRId64
"\n", sector_num
);
623 if (!ide_sect_range_ok(s
, sector_num
, n
)) {
628 s
->iov
.iov_base
= s
->io_buffer
;
629 s
->iov
.iov_len
= n
* BDRV_SECTOR_SIZE
;
630 qemu_iovec_init_external(&s
->qiov
, &s
->iov
, 1);
632 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
633 n
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_READ
);
634 s
->pio_aiocb
= blk_aio_readv(s
->blk
, sector_num
, &s
->qiov
, n
,
635 ide_sector_read_cb
, s
);
638 static void dma_buf_commit(IDEState
*s
, uint32_t tx_bytes
)
640 if (s
->bus
->dma
->ops
->commit_buf
) {
641 s
->bus
->dma
->ops
->commit_buf(s
->bus
->dma
, tx_bytes
);
643 qemu_sglist_destroy(&s
->sg
);
646 void ide_set_inactive(IDEState
*s
, bool more
)
648 s
->bus
->dma
->aiocb
= NULL
;
649 s
->bus
->retry_unit
= -1;
650 s
->bus
->retry_sector_num
= 0;
651 s
->bus
->retry_nsector
= 0;
652 if (s
->bus
->dma
->ops
->set_inactive
) {
653 s
->bus
->dma
->ops
->set_inactive(s
->bus
->dma
, more
);
658 void ide_dma_error(IDEState
*s
)
660 dma_buf_commit(s
, 0);
661 ide_abort_command(s
);
662 ide_set_inactive(s
, false);
666 static int ide_handle_rw_error(IDEState
*s
, int error
, int op
)
668 bool is_read
= (op
& IDE_RETRY_READ
) != 0;
669 BlockErrorAction action
= blk_get_error_action(s
->blk
, is_read
, error
);
671 if (action
== BLOCK_ERROR_ACTION_STOP
) {
672 assert(s
->bus
->retry_unit
== s
->unit
);
673 s
->bus
->error_status
= op
;
674 } else if (action
== BLOCK_ERROR_ACTION_REPORT
) {
675 if (op
& IDE_RETRY_DMA
) {
681 blk_error_action(s
->blk
, action
, is_read
, error
);
682 return action
!= BLOCK_ERROR_ACTION_IGNORE
;
685 static void ide_dma_cb(void *opaque
, int ret
)
687 IDEState
*s
= opaque
;
690 bool stay_active
= false;
692 if (ret
== -ECANCELED
) {
696 int op
= IDE_RETRY_DMA
;
698 if (s
->dma_cmd
== IDE_DMA_READ
)
699 op
|= IDE_RETRY_READ
;
700 else if (s
->dma_cmd
== IDE_DMA_TRIM
)
701 op
|= IDE_RETRY_TRIM
;
703 if (ide_handle_rw_error(s
, -ret
, op
)) {
708 n
= s
->io_buffer_size
>> 9;
709 if (n
> s
->nsector
) {
710 /* The PRDs were longer than needed for this request. Shorten them so
711 * we don't get a negative remainder. The Active bit must remain set
712 * after the request completes. */
717 sector_num
= ide_get_sector(s
);
719 assert(s
->io_buffer_size
== s
->sg
.size
);
720 dma_buf_commit(s
, s
->io_buffer_size
);
722 ide_set_sector(s
, sector_num
);
726 /* end of transfer ? */
727 if (s
->nsector
== 0) {
728 s
->status
= READY_STAT
| SEEK_STAT
;
733 /* launch next transfer */
735 s
->io_buffer_index
= 0;
736 s
->io_buffer_size
= n
* 512;
737 if (s
->bus
->dma
->ops
->prepare_buf(s
->bus
->dma
, ide_cmd_is_read(s
)) < 512) {
738 /* The PRDs were too short. Reset the Active bit, but don't raise an
740 s
->status
= READY_STAT
| SEEK_STAT
;
741 dma_buf_commit(s
, 0);
746 printf("ide_dma_cb: sector_num=%" PRId64
" n=%d, cmd_cmd=%d\n",
747 sector_num
, n
, s
->dma_cmd
);
750 if ((s
->dma_cmd
== IDE_DMA_READ
|| s
->dma_cmd
== IDE_DMA_WRITE
) &&
751 !ide_sect_range_ok(s
, sector_num
, n
)) {
756 switch (s
->dma_cmd
) {
758 s
->bus
->dma
->aiocb
= dma_blk_read(s
->blk
, &s
->sg
, sector_num
,
762 s
->bus
->dma
->aiocb
= dma_blk_write(s
->blk
, &s
->sg
, sector_num
,
766 s
->bus
->dma
->aiocb
= dma_blk_io(s
->blk
, &s
->sg
, sector_num
,
767 ide_issue_trim
, ide_dma_cb
, s
,
768 DMA_DIRECTION_TO_DEVICE
);
774 if (s
->dma_cmd
== IDE_DMA_READ
|| s
->dma_cmd
== IDE_DMA_WRITE
) {
775 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
777 ide_set_inactive(s
, stay_active
);
780 static void ide_sector_start_dma(IDEState
*s
, enum ide_dma_cmd dma_cmd
)
782 s
->status
= READY_STAT
| SEEK_STAT
| DRQ_STAT
| BUSY_STAT
;
783 s
->io_buffer_size
= 0;
784 s
->dma_cmd
= dma_cmd
;
788 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
789 s
->nsector
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_READ
);
792 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
793 s
->nsector
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_WRITE
);
799 ide_start_dma(s
, ide_dma_cb
);
802 void ide_start_dma(IDEState
*s
, BlockCompletionFunc
*cb
)
804 s
->io_buffer_index
= 0;
805 s
->bus
->retry_unit
= s
->unit
;
806 s
->bus
->retry_sector_num
= ide_get_sector(s
);
807 s
->bus
->retry_nsector
= s
->nsector
;
808 if (s
->bus
->dma
->ops
->start_dma
) {
809 s
->bus
->dma
->ops
->start_dma(s
->bus
->dma
, s
, cb
);
813 static void ide_sector_write(IDEState
*s
);
815 static void ide_sector_write_timer_cb(void *opaque
)
817 IDEState
*s
= opaque
;
821 static void ide_sector_write_cb(void *opaque
, int ret
)
823 IDEState
*s
= opaque
;
826 if (ret
== -ECANCELED
) {
829 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
832 s
->status
&= ~BUSY_STAT
;
835 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_PIO
)) {
841 if (n
> s
->req_nb_sectors
) {
842 n
= s
->req_nb_sectors
;
845 s
->io_buffer_offset
+= 512 * n
;
847 ide_set_sector(s
, ide_get_sector(s
) + n
);
848 if (s
->nsector
== 0) {
849 /* no more sectors to write */
850 ide_transfer_stop(s
);
853 if (n1
> s
->req_nb_sectors
) {
854 n1
= s
->req_nb_sectors
;
856 ide_transfer_start(s
, s
->io_buffer
, n1
* BDRV_SECTOR_SIZE
,
860 if (win2k_install_hack
&& ((++s
->irq_count
% 16) == 0)) {
861 /* It seems there is a bug in the Windows 2000 installer HDD
862 IDE driver which fills the disk with empty logs when the
863 IDE write IRQ comes too early. This hack tries to correct
864 that at the expense of slower write performances. Use this
865 option _only_ to install Windows 2000. You must disable it
867 timer_mod(s
->sector_write_timer
,
868 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + (get_ticks_per_sec() / 1000));
874 static void ide_sector_write(IDEState
*s
)
879 s
->status
= READY_STAT
| SEEK_STAT
| BUSY_STAT
;
880 sector_num
= ide_get_sector(s
);
881 #if defined(DEBUG_IDE)
882 printf("sector=%" PRId64
"\n", sector_num
);
885 if (n
> s
->req_nb_sectors
) {
886 n
= s
->req_nb_sectors
;
889 if (!ide_sect_range_ok(s
, sector_num
, n
)) {
894 s
->iov
.iov_base
= s
->io_buffer
;
895 s
->iov
.iov_len
= n
* BDRV_SECTOR_SIZE
;
896 qemu_iovec_init_external(&s
->qiov
, &s
->iov
, 1);
898 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
899 n
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_READ
);
900 s
->pio_aiocb
= blk_aio_writev(s
->blk
, sector_num
, &s
->qiov
, n
,
901 ide_sector_write_cb
, s
);
904 static void ide_flush_cb(void *opaque
, int ret
)
906 IDEState
*s
= opaque
;
910 if (ret
== -ECANCELED
) {
914 /* XXX: What sector number to set here? */
915 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_FLUSH
)) {
921 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
923 s
->status
= READY_STAT
| SEEK_STAT
;
928 static void ide_flush_cache(IDEState
*s
)
930 if (s
->blk
== NULL
) {
935 s
->status
|= BUSY_STAT
;
936 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
, 0, BLOCK_ACCT_FLUSH
);
937 s
->pio_aiocb
= blk_aio_flush(s
->blk
, ide_flush_cb
, s
);
940 static void ide_cfata_metadata_inquiry(IDEState
*s
)
945 p
= (uint16_t *) s
->io_buffer
;
947 spd
= ((s
->mdata_size
- 1) >> 9) + 1;
949 put_le16(p
+ 0, 0x0001); /* Data format revision */
950 put_le16(p
+ 1, 0x0000); /* Media property: silicon */
951 put_le16(p
+ 2, s
->media_changed
); /* Media status */
952 put_le16(p
+ 3, s
->mdata_size
& 0xffff); /* Capacity in bytes (low) */
953 put_le16(p
+ 4, s
->mdata_size
>> 16); /* Capacity in bytes (high) */
954 put_le16(p
+ 5, spd
& 0xffff); /* Sectors per device (low) */
955 put_le16(p
+ 6, spd
>> 16); /* Sectors per device (high) */
958 static void ide_cfata_metadata_read(IDEState
*s
)
962 if (((s
->hcyl
<< 16) | s
->lcyl
) << 9 > s
->mdata_size
+ 2) {
963 s
->status
= ERR_STAT
;
968 p
= (uint16_t *) s
->io_buffer
;
971 put_le16(p
+ 0, s
->media_changed
); /* Media status */
972 memcpy(p
+ 1, s
->mdata_storage
+ (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
973 MIN(MIN(s
->mdata_size
- (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
974 s
->nsector
<< 9), 0x200 - 2));
977 static void ide_cfata_metadata_write(IDEState
*s
)
979 if (((s
->hcyl
<< 16) | s
->lcyl
) << 9 > s
->mdata_size
+ 2) {
980 s
->status
= ERR_STAT
;
985 s
->media_changed
= 0;
987 memcpy(s
->mdata_storage
+ (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
989 MIN(MIN(s
->mdata_size
- (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
990 s
->nsector
<< 9), 0x200 - 2));
993 /* called when the inserted state of the media has changed */
994 static void ide_cd_change_cb(void *opaque
, bool load
)
996 IDEState
*s
= opaque
;
999 s
->tray_open
= !load
;
1000 blk_get_geometry(s
->blk
, &nb_sectors
);
1001 s
->nb_sectors
= nb_sectors
;
1004 * First indicate to the guest that a CD has been removed. That's
1005 * done on the next command the guest sends us.
1007 * Then we set UNIT_ATTENTION, by which the guest will
1008 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1010 s
->cdrom_changed
= 1;
1011 s
->events
.new_media
= true;
1012 s
->events
.eject_request
= false;
1013 ide_set_irq(s
->bus
);
1016 static void ide_cd_eject_request_cb(void *opaque
, bool force
)
1018 IDEState
*s
= opaque
;
1020 s
->events
.eject_request
= true;
1022 s
->tray_locked
= false;
1024 ide_set_irq(s
->bus
);
1027 static void ide_cmd_lba48_transform(IDEState
*s
, int lba48
)
1031 /* handle the 'magic' 0 nsector count conversion here. to avoid
1032 * fiddling with the rest of the read logic, we just store the
1033 * full sector count in ->nsector and ignore ->hob_nsector from now
1039 if (!s
->nsector
&& !s
->hob_nsector
)
1042 int lo
= s
->nsector
;
1043 int hi
= s
->hob_nsector
;
1045 s
->nsector
= (hi
<< 8) | lo
;
1050 static void ide_clear_hob(IDEBus
*bus
)
1052 /* any write clears HOB high bit of device control register */
1053 bus
->ifs
[0].select
&= ~(1 << 7);
1054 bus
->ifs
[1].select
&= ~(1 << 7);
1057 void ide_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
1059 IDEBus
*bus
= opaque
;
1062 printf("IDE: write addr=0x%x val=0x%02x\n", addr
, val
);
1067 /* ignore writes to command block while busy with previous command */
1068 if (addr
!= 7 && (idebus_active_if(bus
)->status
& (BUSY_STAT
|DRQ_STAT
)))
1076 /* NOTE: data is written to the two drives */
1077 bus
->ifs
[0].hob_feature
= bus
->ifs
[0].feature
;
1078 bus
->ifs
[1].hob_feature
= bus
->ifs
[1].feature
;
1079 bus
->ifs
[0].feature
= val
;
1080 bus
->ifs
[1].feature
= val
;
1084 bus
->ifs
[0].hob_nsector
= bus
->ifs
[0].nsector
;
1085 bus
->ifs
[1].hob_nsector
= bus
->ifs
[1].nsector
;
1086 bus
->ifs
[0].nsector
= val
;
1087 bus
->ifs
[1].nsector
= val
;
1091 bus
->ifs
[0].hob_sector
= bus
->ifs
[0].sector
;
1092 bus
->ifs
[1].hob_sector
= bus
->ifs
[1].sector
;
1093 bus
->ifs
[0].sector
= val
;
1094 bus
->ifs
[1].sector
= val
;
1098 bus
->ifs
[0].hob_lcyl
= bus
->ifs
[0].lcyl
;
1099 bus
->ifs
[1].hob_lcyl
= bus
->ifs
[1].lcyl
;
1100 bus
->ifs
[0].lcyl
= val
;
1101 bus
->ifs
[1].lcyl
= val
;
1105 bus
->ifs
[0].hob_hcyl
= bus
->ifs
[0].hcyl
;
1106 bus
->ifs
[1].hob_hcyl
= bus
->ifs
[1].hcyl
;
1107 bus
->ifs
[0].hcyl
= val
;
1108 bus
->ifs
[1].hcyl
= val
;
1111 /* FIXME: HOB readback uses bit 7 */
1112 bus
->ifs
[0].select
= (val
& ~0x10) | 0xa0;
1113 bus
->ifs
[1].select
= (val
| 0x10) | 0xa0;
1115 bus
->unit
= (val
>> 4) & 1;
1120 ide_exec_cmd(bus
, val
);
1125 static bool cmd_nop(IDEState
*s
, uint8_t cmd
)
1130 static bool cmd_data_set_management(IDEState
*s
, uint8_t cmd
)
1132 switch (s
->feature
) {
1135 ide_sector_start_dma(s
, IDE_DMA_TRIM
);
1141 ide_abort_command(s
);
1145 static bool cmd_identify(IDEState
*s
, uint8_t cmd
)
1147 if (s
->blk
&& s
->drive_kind
!= IDE_CD
) {
1148 if (s
->drive_kind
!= IDE_CFATA
) {
1151 ide_cfata_identify(s
);
1153 s
->status
= READY_STAT
| SEEK_STAT
;
1154 ide_transfer_start(s
, s
->io_buffer
, 512, ide_transfer_stop
);
1155 ide_set_irq(s
->bus
);
1158 if (s
->drive_kind
== IDE_CD
) {
1159 ide_set_signature(s
);
1161 ide_abort_command(s
);
1167 static bool cmd_verify(IDEState
*s
, uint8_t cmd
)
1169 bool lba48
= (cmd
== WIN_VERIFY_EXT
);
1171 /* do sector number check ? */
1172 ide_cmd_lba48_transform(s
, lba48
);
1177 static bool cmd_set_multiple_mode(IDEState
*s
, uint8_t cmd
)
1179 if (s
->drive_kind
== IDE_CFATA
&& s
->nsector
== 0) {
1180 /* Disable Read and Write Multiple */
1181 s
->mult_sectors
= 0;
1182 } else if ((s
->nsector
& 0xff) != 0 &&
1183 ((s
->nsector
& 0xff) > MAX_MULT_SECTORS
||
1184 (s
->nsector
& (s
->nsector
- 1)) != 0)) {
1185 ide_abort_command(s
);
1187 s
->mult_sectors
= s
->nsector
& 0xff;
1193 static bool cmd_read_multiple(IDEState
*s
, uint8_t cmd
)
1195 bool lba48
= (cmd
== WIN_MULTREAD_EXT
);
1197 if (!s
->blk
|| !s
->mult_sectors
) {
1198 ide_abort_command(s
);
1202 ide_cmd_lba48_transform(s
, lba48
);
1203 s
->req_nb_sectors
= s
->mult_sectors
;
1208 static bool cmd_write_multiple(IDEState
*s
, uint8_t cmd
)
1210 bool lba48
= (cmd
== WIN_MULTWRITE_EXT
);
1213 if (!s
->blk
|| !s
->mult_sectors
) {
1214 ide_abort_command(s
);
1218 ide_cmd_lba48_transform(s
, lba48
);
1220 s
->req_nb_sectors
= s
->mult_sectors
;
1221 n
= MIN(s
->nsector
, s
->req_nb_sectors
);
1223 s
->status
= SEEK_STAT
| READY_STAT
;
1224 ide_transfer_start(s
, s
->io_buffer
, 512 * n
, ide_sector_write
);
1226 s
->media_changed
= 1;
1231 static bool cmd_read_pio(IDEState
*s
, uint8_t cmd
)
1233 bool lba48
= (cmd
== WIN_READ_EXT
);
1235 if (s
->drive_kind
== IDE_CD
) {
1236 ide_set_signature(s
); /* odd, but ATA4 8.27.5.2 requires it */
1237 ide_abort_command(s
);
1242 ide_abort_command(s
);
1246 ide_cmd_lba48_transform(s
, lba48
);
1247 s
->req_nb_sectors
= 1;
1253 static bool cmd_write_pio(IDEState
*s
, uint8_t cmd
)
1255 bool lba48
= (cmd
== WIN_WRITE_EXT
);
1258 ide_abort_command(s
);
1262 ide_cmd_lba48_transform(s
, lba48
);
1264 s
->req_nb_sectors
= 1;
1265 s
->status
= SEEK_STAT
| READY_STAT
;
1266 ide_transfer_start(s
, s
->io_buffer
, 512, ide_sector_write
);
1268 s
->media_changed
= 1;
1273 static bool cmd_read_dma(IDEState
*s
, uint8_t cmd
)
1275 bool lba48
= (cmd
== WIN_READDMA_EXT
);
1278 ide_abort_command(s
);
1282 ide_cmd_lba48_transform(s
, lba48
);
1283 ide_sector_start_dma(s
, IDE_DMA_READ
);
1288 static bool cmd_write_dma(IDEState
*s
, uint8_t cmd
)
1290 bool lba48
= (cmd
== WIN_WRITEDMA_EXT
);
1293 ide_abort_command(s
);
1297 ide_cmd_lba48_transform(s
, lba48
);
1298 ide_sector_start_dma(s
, IDE_DMA_WRITE
);
1300 s
->media_changed
= 1;
1305 static bool cmd_flush_cache(IDEState
*s
, uint8_t cmd
)
1311 static bool cmd_seek(IDEState
*s
, uint8_t cmd
)
1313 /* XXX: Check that seek is within bounds */
1317 static bool cmd_read_native_max(IDEState
*s
, uint8_t cmd
)
1319 bool lba48
= (cmd
== WIN_READ_NATIVE_MAX_EXT
);
1321 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1322 if (s
->nb_sectors
== 0) {
1323 ide_abort_command(s
);
1327 ide_cmd_lba48_transform(s
, lba48
);
1328 ide_set_sector(s
, s
->nb_sectors
- 1);
1333 static bool cmd_check_power_mode(IDEState
*s
, uint8_t cmd
)
1335 s
->nsector
= 0xff; /* device active or idle */
1339 static bool cmd_set_features(IDEState
*s
, uint8_t cmd
)
1341 uint16_t *identify_data
;
1344 ide_abort_command(s
);
1348 /* XXX: valid for CDROM ? */
1349 switch (s
->feature
) {
1350 case 0x02: /* write cache enable */
1351 blk_set_enable_write_cache(s
->blk
, true);
1352 identify_data
= (uint16_t *)s
->identify_data
;
1353 put_le16(identify_data
+ 85, (1 << 14) | (1 << 5) | 1);
1355 case 0x82: /* write cache disable */
1356 blk_set_enable_write_cache(s
->blk
, false);
1357 identify_data
= (uint16_t *)s
->identify_data
;
1358 put_le16(identify_data
+ 85, (1 << 14) | 1);
1361 case 0xcc: /* reverting to power-on defaults enable */
1362 case 0x66: /* reverting to power-on defaults disable */
1363 case 0xaa: /* read look-ahead enable */
1364 case 0x55: /* read look-ahead disable */
1365 case 0x05: /* set advanced power management mode */
1366 case 0x85: /* disable advanced power management mode */
1367 case 0x69: /* NOP */
1368 case 0x67: /* NOP */
1369 case 0x96: /* NOP */
1370 case 0x9a: /* NOP */
1371 case 0x42: /* enable Automatic Acoustic Mode */
1372 case 0xc2: /* disable Automatic Acoustic Mode */
1374 case 0x03: /* set transfer mode */
1376 uint8_t val
= s
->nsector
& 0x07;
1377 identify_data
= (uint16_t *)s
->identify_data
;
1379 switch (s
->nsector
>> 3) {
1380 case 0x00: /* pio default */
1381 case 0x01: /* pio mode */
1382 put_le16(identify_data
+ 62, 0x07);
1383 put_le16(identify_data
+ 63, 0x07);
1384 put_le16(identify_data
+ 88, 0x3f);
1386 case 0x02: /* sigle word dma mode*/
1387 put_le16(identify_data
+ 62, 0x07 | (1 << (val
+ 8)));
1388 put_le16(identify_data
+ 63, 0x07);
1389 put_le16(identify_data
+ 88, 0x3f);
1391 case 0x04: /* mdma mode */
1392 put_le16(identify_data
+ 62, 0x07);
1393 put_le16(identify_data
+ 63, 0x07 | (1 << (val
+ 8)));
1394 put_le16(identify_data
+ 88, 0x3f);
1396 case 0x08: /* udma mode */
1397 put_le16(identify_data
+ 62, 0x07);
1398 put_le16(identify_data
+ 63, 0x07);
1399 put_le16(identify_data
+ 88, 0x3f | (1 << (val
+ 8)));
1409 ide_abort_command(s
);
1414 /*** ATAPI commands ***/
1416 static bool cmd_identify_packet(IDEState
*s
, uint8_t cmd
)
1418 ide_atapi_identify(s
);
1419 s
->status
= READY_STAT
| SEEK_STAT
;
1420 ide_transfer_start(s
, s
->io_buffer
, 512, ide_transfer_stop
);
1421 ide_set_irq(s
->bus
);
1425 static bool cmd_exec_dev_diagnostic(IDEState
*s
, uint8_t cmd
)
1427 ide_set_signature(s
);
1429 if (s
->drive_kind
== IDE_CD
) {
1430 s
->status
= 0; /* ATAPI spec (v6) section 9.10 defines packet
1431 * devices to return a clear status register
1432 * with READY_STAT *not* set. */
1435 s
->status
= READY_STAT
| SEEK_STAT
;
1436 /* The bits of the error register are not as usual for this command!
1437 * They are part of the regular output (this is why ERR_STAT isn't set)
1438 * Device 0 passed, Device 1 passed or not present. */
1440 ide_set_irq(s
->bus
);
1446 static bool cmd_device_reset(IDEState
*s
, uint8_t cmd
)
1448 ide_set_signature(s
);
1449 s
->status
= 0x00; /* NOTE: READY is _not_ set */
1455 static bool cmd_packet(IDEState
*s
, uint8_t cmd
)
1457 /* overlapping commands not supported */
1458 if (s
->feature
& 0x02) {
1459 ide_abort_command(s
);
1463 s
->status
= READY_STAT
| SEEK_STAT
;
1464 s
->atapi_dma
= s
->feature
& 1;
1466 ide_transfer_start(s
, s
->io_buffer
, ATAPI_PACKET_SIZE
,
1472 /*** CF-ATA commands ***/
1474 static bool cmd_cfa_req_ext_error_code(IDEState
*s
, uint8_t cmd
)
1476 s
->error
= 0x09; /* miscellaneous error */
1477 s
->status
= READY_STAT
| SEEK_STAT
;
1478 ide_set_irq(s
->bus
);
1483 static bool cmd_cfa_erase_sectors(IDEState
*s
, uint8_t cmd
)
1485 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1486 * required for Windows 8 to work with AHCI */
1488 if (cmd
== CFA_WEAR_LEVEL
) {
1492 if (cmd
== CFA_ERASE_SECTORS
) {
1493 s
->media_changed
= 1;
1499 static bool cmd_cfa_translate_sector(IDEState
*s
, uint8_t cmd
)
1501 s
->status
= READY_STAT
| SEEK_STAT
;
1503 memset(s
->io_buffer
, 0, 0x200);
1504 s
->io_buffer
[0x00] = s
->hcyl
; /* Cyl MSB */
1505 s
->io_buffer
[0x01] = s
->lcyl
; /* Cyl LSB */
1506 s
->io_buffer
[0x02] = s
->select
; /* Head */
1507 s
->io_buffer
[0x03] = s
->sector
; /* Sector */
1508 s
->io_buffer
[0x04] = ide_get_sector(s
) >> 16; /* LBA MSB */
1509 s
->io_buffer
[0x05] = ide_get_sector(s
) >> 8; /* LBA */
1510 s
->io_buffer
[0x06] = ide_get_sector(s
) >> 0; /* LBA LSB */
1511 s
->io_buffer
[0x13] = 0x00; /* Erase flag */
1512 s
->io_buffer
[0x18] = 0x00; /* Hot count */
1513 s
->io_buffer
[0x19] = 0x00; /* Hot count */
1514 s
->io_buffer
[0x1a] = 0x01; /* Hot count */
1516 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1517 ide_set_irq(s
->bus
);
1522 static bool cmd_cfa_access_metadata_storage(IDEState
*s
, uint8_t cmd
)
1524 switch (s
->feature
) {
1525 case 0x02: /* Inquiry Metadata Storage */
1526 ide_cfata_metadata_inquiry(s
);
1528 case 0x03: /* Read Metadata Storage */
1529 ide_cfata_metadata_read(s
);
1531 case 0x04: /* Write Metadata Storage */
1532 ide_cfata_metadata_write(s
);
1535 ide_abort_command(s
);
1539 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1540 s
->status
= 0x00; /* NOTE: READY is _not_ set */
1541 ide_set_irq(s
->bus
);
1546 static bool cmd_ibm_sense_condition(IDEState
*s
, uint8_t cmd
)
1548 switch (s
->feature
) {
1549 case 0x01: /* sense temperature in device */
1550 s
->nsector
= 0x50; /* +20 C */
1553 ide_abort_command(s
);
1561 /*** SMART commands ***/
1563 static bool cmd_smart(IDEState
*s
, uint8_t cmd
)
1567 if (s
->hcyl
!= 0xc2 || s
->lcyl
!= 0x4f) {
1571 if (!s
->smart_enabled
&& s
->feature
!= SMART_ENABLE
) {
1575 switch (s
->feature
) {
1577 s
->smart_enabled
= 0;
1581 s
->smart_enabled
= 1;
1584 case SMART_ATTR_AUTOSAVE
:
1585 switch (s
->sector
) {
1587 s
->smart_autosave
= 0;
1590 s
->smart_autosave
= 1;
1598 if (!s
->smart_errors
) {
1607 case SMART_READ_THRESH
:
1608 memset(s
->io_buffer
, 0, 0x200);
1609 s
->io_buffer
[0] = 0x01; /* smart struct version */
1611 for (n
= 0; n
< ARRAY_SIZE(smart_attributes
); n
++) {
1612 s
->io_buffer
[2 + 0 + (n
* 12)] = smart_attributes
[n
][0];
1613 s
->io_buffer
[2 + 1 + (n
* 12)] = smart_attributes
[n
][11];
1617 for (n
= 0; n
< 511; n
++) {
1618 s
->io_buffer
[511] += s
->io_buffer
[n
];
1620 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1622 s
->status
= READY_STAT
| SEEK_STAT
;
1623 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1624 ide_set_irq(s
->bus
);
1627 case SMART_READ_DATA
:
1628 memset(s
->io_buffer
, 0, 0x200);
1629 s
->io_buffer
[0] = 0x01; /* smart struct version */
1631 for (n
= 0; n
< ARRAY_SIZE(smart_attributes
); n
++) {
1633 for (i
= 0; i
< 11; i
++) {
1634 s
->io_buffer
[2 + i
+ (n
* 12)] = smart_attributes
[n
][i
];
1638 s
->io_buffer
[362] = 0x02 | (s
->smart_autosave
? 0x80 : 0x00);
1639 if (s
->smart_selftest_count
== 0) {
1640 s
->io_buffer
[363] = 0;
1643 s
->smart_selftest_data
[3 +
1644 (s
->smart_selftest_count
- 1) *
1647 s
->io_buffer
[364] = 0x20;
1648 s
->io_buffer
[365] = 0x01;
1649 /* offline data collection capacity: execute + self-test*/
1650 s
->io_buffer
[367] = (1 << 4 | 1 << 3 | 1);
1651 s
->io_buffer
[368] = 0x03; /* smart capability (1) */
1652 s
->io_buffer
[369] = 0x00; /* smart capability (2) */
1653 s
->io_buffer
[370] = 0x01; /* error logging supported */
1654 s
->io_buffer
[372] = 0x02; /* minutes for poll short test */
1655 s
->io_buffer
[373] = 0x36; /* minutes for poll ext test */
1656 s
->io_buffer
[374] = 0x01; /* minutes for poll conveyance */
1658 for (n
= 0; n
< 511; n
++) {
1659 s
->io_buffer
[511] += s
->io_buffer
[n
];
1661 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1663 s
->status
= READY_STAT
| SEEK_STAT
;
1664 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1665 ide_set_irq(s
->bus
);
1668 case SMART_READ_LOG
:
1669 switch (s
->sector
) {
1670 case 0x01: /* summary smart error log */
1671 memset(s
->io_buffer
, 0, 0x200);
1672 s
->io_buffer
[0] = 0x01;
1673 s
->io_buffer
[1] = 0x00; /* no error entries */
1674 s
->io_buffer
[452] = s
->smart_errors
& 0xff;
1675 s
->io_buffer
[453] = (s
->smart_errors
& 0xff00) >> 8;
1677 for (n
= 0; n
< 511; n
++) {
1678 s
->io_buffer
[511] += s
->io_buffer
[n
];
1680 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1682 case 0x06: /* smart self test log */
1683 memset(s
->io_buffer
, 0, 0x200);
1684 s
->io_buffer
[0] = 0x01;
1685 if (s
->smart_selftest_count
== 0) {
1686 s
->io_buffer
[508] = 0;
1688 s
->io_buffer
[508] = s
->smart_selftest_count
;
1689 for (n
= 2; n
< 506; n
++) {
1690 s
->io_buffer
[n
] = s
->smart_selftest_data
[n
];
1694 for (n
= 0; n
< 511; n
++) {
1695 s
->io_buffer
[511] += s
->io_buffer
[n
];
1697 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1702 s
->status
= READY_STAT
| SEEK_STAT
;
1703 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1704 ide_set_irq(s
->bus
);
1707 case SMART_EXECUTE_OFFLINE
:
1708 switch (s
->sector
) {
1709 case 0: /* off-line routine */
1710 case 1: /* short self test */
1711 case 2: /* extended self test */
1712 s
->smart_selftest_count
++;
1713 if (s
->smart_selftest_count
> 21) {
1714 s
->smart_selftest_count
= 1;
1716 n
= 2 + (s
->smart_selftest_count
- 1) * 24;
1717 s
->smart_selftest_data
[n
] = s
->sector
;
1718 s
->smart_selftest_data
[n
+ 1] = 0x00; /* OK and finished */
1719 s
->smart_selftest_data
[n
+ 2] = 0x34; /* hour count lsb */
1720 s
->smart_selftest_data
[n
+ 3] = 0x12; /* hour count msb */
1729 ide_abort_command(s
);
1733 #define HD_OK (1u << IDE_HD)
1734 #define CD_OK (1u << IDE_CD)
1735 #define CFA_OK (1u << IDE_CFATA)
1736 #define HD_CFA_OK (HD_OK | CFA_OK)
1737 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1739 /* Set the Disk Seek Completed status bit during completion */
1740 #define SET_DSC (1u << 8)
1742 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1743 static const struct {
1744 /* Returns true if the completion code should be run */
1745 bool (*handler
)(IDEState
*s
, uint8_t cmd
);
1747 } ide_cmd_table
[0x100] = {
1748 /* NOP not implemented, mandatory for CD */
1749 [CFA_REQ_EXT_ERROR_CODE
] = { cmd_cfa_req_ext_error_code
, CFA_OK
},
1750 [WIN_DSM
] = { cmd_data_set_management
, ALL_OK
},
1751 [WIN_DEVICE_RESET
] = { cmd_device_reset
, CD_OK
},
1752 [WIN_RECAL
] = { cmd_nop
, HD_CFA_OK
| SET_DSC
},
1753 [WIN_READ
] = { cmd_read_pio
, ALL_OK
},
1754 [WIN_READ_ONCE
] = { cmd_read_pio
, ALL_OK
},
1755 [WIN_READ_EXT
] = { cmd_read_pio
, HD_CFA_OK
},
1756 [WIN_READDMA_EXT
] = { cmd_read_dma
, HD_CFA_OK
},
1757 [WIN_READ_NATIVE_MAX_EXT
] = { cmd_read_native_max
, HD_CFA_OK
| SET_DSC
},
1758 [WIN_MULTREAD_EXT
] = { cmd_read_multiple
, HD_CFA_OK
},
1759 [WIN_WRITE
] = { cmd_write_pio
, HD_CFA_OK
},
1760 [WIN_WRITE_ONCE
] = { cmd_write_pio
, HD_CFA_OK
},
1761 [WIN_WRITE_EXT
] = { cmd_write_pio
, HD_CFA_OK
},
1762 [WIN_WRITEDMA_EXT
] = { cmd_write_dma
, HD_CFA_OK
},
1763 [CFA_WRITE_SECT_WO_ERASE
] = { cmd_write_pio
, CFA_OK
},
1764 [WIN_MULTWRITE_EXT
] = { cmd_write_multiple
, HD_CFA_OK
},
1765 [WIN_WRITE_VERIFY
] = { cmd_write_pio
, HD_CFA_OK
},
1766 [WIN_VERIFY
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
1767 [WIN_VERIFY_ONCE
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
1768 [WIN_VERIFY_EXT
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
1769 [WIN_SEEK
] = { cmd_seek
, HD_CFA_OK
| SET_DSC
},
1770 [CFA_TRANSLATE_SECTOR
] = { cmd_cfa_translate_sector
, CFA_OK
},
1771 [WIN_DIAGNOSE
] = { cmd_exec_dev_diagnostic
, ALL_OK
},
1772 [WIN_SPECIFY
] = { cmd_nop
, HD_CFA_OK
| SET_DSC
},
1773 [WIN_STANDBYNOW2
] = { cmd_nop
, ALL_OK
},
1774 [WIN_IDLEIMMEDIATE2
] = { cmd_nop
, ALL_OK
},
1775 [WIN_STANDBY2
] = { cmd_nop
, ALL_OK
},
1776 [WIN_SETIDLE2
] = { cmd_nop
, ALL_OK
},
1777 [WIN_CHECKPOWERMODE2
] = { cmd_check_power_mode
, ALL_OK
| SET_DSC
},
1778 [WIN_SLEEPNOW2
] = { cmd_nop
, ALL_OK
},
1779 [WIN_PACKETCMD
] = { cmd_packet
, CD_OK
},
1780 [WIN_PIDENTIFY
] = { cmd_identify_packet
, CD_OK
},
1781 [WIN_SMART
] = { cmd_smart
, HD_CFA_OK
| SET_DSC
},
1782 [CFA_ACCESS_METADATA_STORAGE
] = { cmd_cfa_access_metadata_storage
, CFA_OK
},
1783 [CFA_ERASE_SECTORS
] = { cmd_cfa_erase_sectors
, CFA_OK
| SET_DSC
},
1784 [WIN_MULTREAD
] = { cmd_read_multiple
, HD_CFA_OK
},
1785 [WIN_MULTWRITE
] = { cmd_write_multiple
, HD_CFA_OK
},
1786 [WIN_SETMULT
] = { cmd_set_multiple_mode
, HD_CFA_OK
| SET_DSC
},
1787 [WIN_READDMA
] = { cmd_read_dma
, HD_CFA_OK
},
1788 [WIN_READDMA_ONCE
] = { cmd_read_dma
, HD_CFA_OK
},
1789 [WIN_WRITEDMA
] = { cmd_write_dma
, HD_CFA_OK
},
1790 [WIN_WRITEDMA_ONCE
] = { cmd_write_dma
, HD_CFA_OK
},
1791 [CFA_WRITE_MULTI_WO_ERASE
] = { cmd_write_multiple
, CFA_OK
},
1792 [WIN_STANDBYNOW1
] = { cmd_nop
, ALL_OK
},
1793 [WIN_IDLEIMMEDIATE
] = { cmd_nop
, ALL_OK
},
1794 [WIN_STANDBY
] = { cmd_nop
, ALL_OK
},
1795 [WIN_SETIDLE1
] = { cmd_nop
, ALL_OK
},
1796 [WIN_CHECKPOWERMODE1
] = { cmd_check_power_mode
, ALL_OK
| SET_DSC
},
1797 [WIN_SLEEPNOW1
] = { cmd_nop
, ALL_OK
},
1798 [WIN_FLUSH_CACHE
] = { cmd_flush_cache
, ALL_OK
},
1799 [WIN_FLUSH_CACHE_EXT
] = { cmd_flush_cache
, HD_CFA_OK
},
1800 [WIN_IDENTIFY
] = { cmd_identify
, ALL_OK
},
1801 [WIN_SETFEATURES
] = { cmd_set_features
, ALL_OK
| SET_DSC
},
1802 [IBM_SENSE_CONDITION
] = { cmd_ibm_sense_condition
, CFA_OK
| SET_DSC
},
1803 [CFA_WEAR_LEVEL
] = { cmd_cfa_erase_sectors
, HD_CFA_OK
| SET_DSC
},
1804 [WIN_READ_NATIVE_MAX
] = { cmd_read_native_max
, ALL_OK
| SET_DSC
},
1807 static bool ide_cmd_permitted(IDEState
*s
, uint32_t cmd
)
1809 return cmd
< ARRAY_SIZE(ide_cmd_table
)
1810 && (ide_cmd_table
[cmd
].flags
& (1u << s
->drive_kind
));
1813 void ide_exec_cmd(IDEBus
*bus
, uint32_t val
)
1818 #if defined(DEBUG_IDE)
1819 printf("ide: CMD=%02x\n", val
);
1821 s
= idebus_active_if(bus
);
1822 /* ignore commands to non existent slave */
1823 if (s
!= bus
->ifs
&& !s
->blk
) {
1827 /* Only DEVICE RESET is allowed while BSY or/and DRQ are set */
1828 if ((s
->status
& (BUSY_STAT
|DRQ_STAT
)) && val
!= WIN_DEVICE_RESET
)
1831 if (!ide_cmd_permitted(s
, val
)) {
1832 ide_abort_command(s
);
1833 ide_set_irq(s
->bus
);
1837 s
->status
= READY_STAT
| BUSY_STAT
;
1839 s
->io_buffer_offset
= 0;
1841 complete
= ide_cmd_table
[val
].handler(s
, val
);
1843 s
->status
&= ~BUSY_STAT
;
1844 assert(!!s
->error
== !!(s
->status
& ERR_STAT
));
1846 if ((ide_cmd_table
[val
].flags
& SET_DSC
) && !s
->error
) {
1847 s
->status
|= SEEK_STAT
;
1851 ide_set_irq(s
->bus
);
1855 uint32_t ide_ioport_read(void *opaque
, uint32_t addr1
)
1857 IDEBus
*bus
= opaque
;
1858 IDEState
*s
= idebus_active_if(bus
);
1863 /* FIXME: HOB readback uses bit 7, but it's always set right now */
1864 //hob = s->select & (1 << 7);
1871 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
1872 (s
!= bus
->ifs
&& !s
->blk
)) {
1877 ret
= s
->hob_feature
;
1881 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
1884 ret
= s
->nsector
& 0xff;
1886 ret
= s
->hob_nsector
;
1890 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
1895 ret
= s
->hob_sector
;
1899 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
1908 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
1917 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
1925 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
1926 (s
!= bus
->ifs
&& !s
->blk
)) {
1931 qemu_irq_lower(bus
->irq
);
1935 printf("ide: read addr=0x%x val=%02x\n", addr1
, ret
);
1940 uint32_t ide_status_read(void *opaque
, uint32_t addr
)
1942 IDEBus
*bus
= opaque
;
1943 IDEState
*s
= idebus_active_if(bus
);
1946 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
1947 (s
!= bus
->ifs
&& !s
->blk
)) {
1953 printf("ide: read status addr=0x%x val=%02x\n", addr
, ret
);
1958 void ide_cmd_write(void *opaque
, uint32_t addr
, uint32_t val
)
1960 IDEBus
*bus
= opaque
;
1965 printf("ide: write control addr=0x%x val=%02x\n", addr
, val
);
1967 /* common for both drives */
1968 if (!(bus
->cmd
& IDE_CMD_RESET
) &&
1969 (val
& IDE_CMD_RESET
)) {
1970 /* reset low to high */
1971 for(i
= 0;i
< 2; i
++) {
1973 s
->status
= BUSY_STAT
| SEEK_STAT
;
1976 } else if ((bus
->cmd
& IDE_CMD_RESET
) &&
1977 !(val
& IDE_CMD_RESET
)) {
1979 for(i
= 0;i
< 2; i
++) {
1981 if (s
->drive_kind
== IDE_CD
)
1982 s
->status
= 0x00; /* NOTE: READY is _not_ set */
1984 s
->status
= READY_STAT
| SEEK_STAT
;
1985 ide_set_signature(s
);
1993 * Returns true if the running PIO transfer is a PIO out (i.e. data is
1994 * transferred from the device to the guest), false if it's a PIO in
1996 static bool ide_is_pio_out(IDEState
*s
)
1998 if (s
->end_transfer_func
== ide_sector_write
||
1999 s
->end_transfer_func
== ide_atapi_cmd
) {
2001 } else if (s
->end_transfer_func
== ide_sector_read
||
2002 s
->end_transfer_func
== ide_transfer_stop
||
2003 s
->end_transfer_func
== ide_atapi_cmd_reply_end
||
2004 s
->end_transfer_func
== ide_dummy_transfer_stop
) {
2011 void ide_data_writew(void *opaque
, uint32_t addr
, uint32_t val
)
2013 IDEBus
*bus
= opaque
;
2014 IDEState
*s
= idebus_active_if(bus
);
2017 /* PIO data access allowed only when DRQ bit is set. The result of a write
2018 * during PIO out is indeterminate, just ignore it. */
2019 if (!(s
->status
& DRQ_STAT
) || ide_is_pio_out(s
)) {
2024 *(uint16_t *)p
= le16_to_cpu(val
);
2027 if (p
>= s
->data_end
)
2028 s
->end_transfer_func(s
);
2031 uint32_t ide_data_readw(void *opaque
, uint32_t addr
)
2033 IDEBus
*bus
= opaque
;
2034 IDEState
*s
= idebus_active_if(bus
);
2038 /* PIO data access allowed only when DRQ bit is set. The result of a read
2039 * during PIO in is indeterminate, return 0 and don't move forward. */
2040 if (!(s
->status
& DRQ_STAT
) || !ide_is_pio_out(s
)) {
2045 ret
= cpu_to_le16(*(uint16_t *)p
);
2048 if (p
>= s
->data_end
)
2049 s
->end_transfer_func(s
);
2053 void ide_data_writel(void *opaque
, uint32_t addr
, uint32_t val
)
2055 IDEBus
*bus
= opaque
;
2056 IDEState
*s
= idebus_active_if(bus
);
2059 /* PIO data access allowed only when DRQ bit is set. The result of a write
2060 * during PIO out is indeterminate, just ignore it. */
2061 if (!(s
->status
& DRQ_STAT
) || ide_is_pio_out(s
)) {
2066 *(uint32_t *)p
= le32_to_cpu(val
);
2069 if (p
>= s
->data_end
)
2070 s
->end_transfer_func(s
);
2073 uint32_t ide_data_readl(void *opaque
, uint32_t addr
)
2075 IDEBus
*bus
= opaque
;
2076 IDEState
*s
= idebus_active_if(bus
);
2080 /* PIO data access allowed only when DRQ bit is set. The result of a read
2081 * during PIO in is indeterminate, return 0 and don't move forward. */
2082 if (!(s
->status
& DRQ_STAT
) || !ide_is_pio_out(s
)) {
2087 ret
= cpu_to_le32(*(uint32_t *)p
);
2090 if (p
>= s
->data_end
)
2091 s
->end_transfer_func(s
);
2095 static void ide_dummy_transfer_stop(IDEState
*s
)
2097 s
->data_ptr
= s
->io_buffer
;
2098 s
->data_end
= s
->io_buffer
;
2099 s
->io_buffer
[0] = 0xff;
2100 s
->io_buffer
[1] = 0xff;
2101 s
->io_buffer
[2] = 0xff;
2102 s
->io_buffer
[3] = 0xff;
2105 static void ide_reset(IDEState
*s
)
2108 printf("ide: reset\n");
2112 blk_aio_cancel(s
->pio_aiocb
);
2113 s
->pio_aiocb
= NULL
;
2116 if (s
->drive_kind
== IDE_CFATA
)
2117 s
->mult_sectors
= 0;
2119 s
->mult_sectors
= MAX_MULT_SECTORS
;
2136 s
->status
= READY_STAT
| SEEK_STAT
;
2140 /* ATAPI specific */
2143 s
->cdrom_changed
= 0;
2144 s
->packet_transfer_size
= 0;
2145 s
->elementary_transfer_size
= 0;
2146 s
->io_buffer_index
= 0;
2147 s
->cd_sector_size
= 0;
2152 s
->io_buffer_size
= 0;
2153 s
->req_nb_sectors
= 0;
2155 ide_set_signature(s
);
2156 /* init the transfer handler so that 0xffff is returned on data
2158 s
->end_transfer_func
= ide_dummy_transfer_stop
;
2159 ide_dummy_transfer_stop(s
);
2160 s
->media_changed
= 0;
2163 void ide_bus_reset(IDEBus
*bus
)
2167 ide_reset(&bus
->ifs
[0]);
2168 ide_reset(&bus
->ifs
[1]);
2171 /* pending async DMA */
2172 if (bus
->dma
->aiocb
) {
2174 printf("aio_cancel\n");
2176 blk_aio_cancel(bus
->dma
->aiocb
);
2177 bus
->dma
->aiocb
= NULL
;
2180 /* reset dma provider too */
2181 if (bus
->dma
->ops
->reset
) {
2182 bus
->dma
->ops
->reset(bus
->dma
);
2186 static bool ide_cd_is_tray_open(void *opaque
)
2188 return ((IDEState
*)opaque
)->tray_open
;
2191 static bool ide_cd_is_medium_locked(void *opaque
)
2193 return ((IDEState
*)opaque
)->tray_locked
;
2196 static void ide_resize_cb(void *opaque
)
2198 IDEState
*s
= opaque
;
2199 uint64_t nb_sectors
;
2201 if (!s
->identify_set
) {
2205 blk_get_geometry(s
->blk
, &nb_sectors
);
2206 s
->nb_sectors
= nb_sectors
;
2208 /* Update the identify data buffer. */
2209 if (s
->drive_kind
== IDE_CFATA
) {
2210 ide_cfata_identify_size(s
);
2212 /* IDE_CD uses a different set of callbacks entirely. */
2213 assert(s
->drive_kind
!= IDE_CD
);
2214 ide_identify_size(s
);
2218 static const BlockDevOps ide_cd_block_ops
= {
2219 .change_media_cb
= ide_cd_change_cb
,
2220 .eject_request_cb
= ide_cd_eject_request_cb
,
2221 .is_tray_open
= ide_cd_is_tray_open
,
2222 .is_medium_locked
= ide_cd_is_medium_locked
,
2225 static const BlockDevOps ide_hd_block_ops
= {
2226 .resize_cb
= ide_resize_cb
,
2229 int ide_init_drive(IDEState
*s
, BlockBackend
*blk
, IDEDriveKind kind
,
2230 const char *version
, const char *serial
, const char *model
,
2232 uint32_t cylinders
, uint32_t heads
, uint32_t secs
,
2235 uint64_t nb_sectors
;
2238 s
->drive_kind
= kind
;
2240 blk_get_geometry(blk
, &nb_sectors
);
2241 s
->cylinders
= cylinders
;
2244 s
->chs_trans
= chs_trans
;
2245 s
->nb_sectors
= nb_sectors
;
2247 /* The SMART values should be preserved across power cycles
2249 s
->smart_enabled
= 1;
2250 s
->smart_autosave
= 1;
2251 s
->smart_errors
= 0;
2252 s
->smart_selftest_count
= 0;
2253 if (kind
== IDE_CD
) {
2254 blk_set_dev_ops(blk
, &ide_cd_block_ops
, s
);
2255 blk_set_guest_block_size(blk
, 2048);
2257 if (!blk_is_inserted(s
->blk
)) {
2258 error_report("Device needs media, but drive is empty");
2261 if (blk_is_read_only(blk
)) {
2262 error_report("Can't use a read-only drive");
2265 blk_set_dev_ops(blk
, &ide_hd_block_ops
, s
);
2268 pstrcpy(s
->drive_serial_str
, sizeof(s
->drive_serial_str
), serial
);
2270 snprintf(s
->drive_serial_str
, sizeof(s
->drive_serial_str
),
2271 "QM%05d", s
->drive_serial
);
2274 pstrcpy(s
->drive_model_str
, sizeof(s
->drive_model_str
), model
);
2278 strcpy(s
->drive_model_str
, "QEMU DVD-ROM");
2281 strcpy(s
->drive_model_str
, "QEMU MICRODRIVE");
2284 strcpy(s
->drive_model_str
, "QEMU HARDDISK");
2290 pstrcpy(s
->version
, sizeof(s
->version
), version
);
2292 pstrcpy(s
->version
, sizeof(s
->version
), qemu_get_version());
2296 blk_iostatus_enable(blk
);
2300 static void ide_init1(IDEBus
*bus
, int unit
)
2302 static int drive_serial
= 1;
2303 IDEState
*s
= &bus
->ifs
[unit
];
2307 s
->drive_serial
= drive_serial
++;
2308 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2309 s
->io_buffer_total_len
= IDE_DMA_BUF_SECTORS
*512 + 4;
2310 s
->io_buffer
= qemu_memalign(2048, s
->io_buffer_total_len
);
2311 memset(s
->io_buffer
, 0, s
->io_buffer_total_len
);
2313 s
->smart_selftest_data
= blk_blockalign(s
->blk
, 512);
2314 memset(s
->smart_selftest_data
, 0, 512);
2316 s
->sector_write_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
2317 ide_sector_write_timer_cb
, s
);
2320 static int ide_nop_int(IDEDMA
*dma
, int x
)
2325 static void ide_nop(IDEDMA
*dma
)
2329 static int32_t ide_nop_int32(IDEDMA
*dma
, int x
)
2334 static const IDEDMAOps ide_dma_nop_ops
= {
2335 .prepare_buf
= ide_nop_int32
,
2336 .restart_dma
= ide_nop
,
2337 .rw_buf
= ide_nop_int
,
2340 static void ide_restart_dma(IDEState
*s
, enum ide_dma_cmd dma_cmd
)
2342 s
->unit
= s
->bus
->retry_unit
;
2343 ide_set_sector(s
, s
->bus
->retry_sector_num
);
2344 s
->nsector
= s
->bus
->retry_nsector
;
2345 s
->bus
->dma
->ops
->restart_dma(s
->bus
->dma
);
2346 s
->io_buffer_size
= 0;
2347 s
->dma_cmd
= dma_cmd
;
2348 ide_start_dma(s
, ide_dma_cb
);
2351 static void ide_restart_bh(void *opaque
)
2353 IDEBus
*bus
= opaque
;
2358 qemu_bh_delete(bus
->bh
);
2361 error_status
= bus
->error_status
;
2362 if (bus
->error_status
== 0) {
2366 s
= idebus_active_if(bus
);
2367 is_read
= (bus
->error_status
& IDE_RETRY_READ
) != 0;
2369 /* The error status must be cleared before resubmitting the request: The
2370 * request may fail again, and this case can only be distinguished if the
2371 * called function can set a new error status. */
2372 bus
->error_status
= 0;
2374 if (error_status
& IDE_RETRY_DMA
) {
2375 if (error_status
& IDE_RETRY_TRIM
) {
2376 ide_restart_dma(s
, IDE_DMA_TRIM
);
2378 ide_restart_dma(s
, is_read
? IDE_DMA_READ
: IDE_DMA_WRITE
);
2380 } else if (error_status
& IDE_RETRY_PIO
) {
2384 ide_sector_write(s
);
2386 } else if (error_status
& IDE_RETRY_FLUSH
) {
2390 * We've not got any bits to tell us about ATAPI - but
2391 * we do have the end_transfer_func that tells us what
2392 * we're trying to do.
2394 if (s
->end_transfer_func
== ide_atapi_cmd
) {
2395 ide_atapi_dma_restart(s
);
2400 static void ide_restart_cb(void *opaque
, int running
, RunState state
)
2402 IDEBus
*bus
= opaque
;
2408 bus
->bh
= qemu_bh_new(ide_restart_bh
, bus
);
2409 qemu_bh_schedule(bus
->bh
);
2413 void ide_register_restart_cb(IDEBus
*bus
)
2415 if (bus
->dma
->ops
->restart_dma
) {
2416 qemu_add_vm_change_state_handler(ide_restart_cb
, bus
);
2420 static IDEDMA ide_dma_nop
= {
2421 .ops
= &ide_dma_nop_ops
,
2425 void ide_init2(IDEBus
*bus
, qemu_irq irq
)
2429 for(i
= 0; i
< 2; i
++) {
2431 ide_reset(&bus
->ifs
[i
]);
2434 bus
->dma
= &ide_dma_nop
;
2437 static const MemoryRegionPortio ide_portio_list
[] = {
2438 { 0, 8, 1, .read
= ide_ioport_read
, .write
= ide_ioport_write
},
2439 { 0, 1, 2, .read
= ide_data_readw
, .write
= ide_data_writew
},
2440 { 0, 1, 4, .read
= ide_data_readl
, .write
= ide_data_writel
},
2441 PORTIO_END_OF_LIST(),
2444 static const MemoryRegionPortio ide_portio2_list
[] = {
2445 { 0, 1, 1, .read
= ide_status_read
, .write
= ide_cmd_write
},
2446 PORTIO_END_OF_LIST(),
2449 void ide_init_ioport(IDEBus
*bus
, ISADevice
*dev
, int iobase
, int iobase2
)
2451 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2452 bridge has been setup properly to always register with ISA. */
2453 isa_register_portio_list(dev
, iobase
, ide_portio_list
, bus
, "ide");
2456 isa_register_portio_list(dev
, iobase2
, ide_portio2_list
, bus
, "ide");
2460 static bool is_identify_set(void *opaque
, int version_id
)
2462 IDEState
*s
= opaque
;
2464 return s
->identify_set
!= 0;
2467 static EndTransferFunc
* transfer_end_table
[] = {
2471 ide_atapi_cmd_reply_end
,
2473 ide_dummy_transfer_stop
,
2476 static int transfer_end_table_idx(EndTransferFunc
*fn
)
2480 for (i
= 0; i
< ARRAY_SIZE(transfer_end_table
); i
++)
2481 if (transfer_end_table
[i
] == fn
)
2487 static int ide_drive_post_load(void *opaque
, int version_id
)
2489 IDEState
*s
= opaque
;
2491 if (s
->blk
&& s
->identify_set
) {
2492 blk_set_enable_write_cache(s
->blk
, !!(s
->identify_data
[85] & (1 << 5)));
2497 static int ide_drive_pio_post_load(void *opaque
, int version_id
)
2499 IDEState
*s
= opaque
;
2501 if (s
->end_transfer_fn_idx
>= ARRAY_SIZE(transfer_end_table
)) {
2504 s
->end_transfer_func
= transfer_end_table
[s
->end_transfer_fn_idx
];
2505 s
->data_ptr
= s
->io_buffer
+ s
->cur_io_buffer_offset
;
2506 s
->data_end
= s
->data_ptr
+ s
->cur_io_buffer_len
;
2507 s
->atapi_dma
= s
->feature
& 1; /* as per cmd_packet */
2512 static void ide_drive_pio_pre_save(void *opaque
)
2514 IDEState
*s
= opaque
;
2517 s
->cur_io_buffer_offset
= s
->data_ptr
- s
->io_buffer
;
2518 s
->cur_io_buffer_len
= s
->data_end
- s
->data_ptr
;
2520 idx
= transfer_end_table_idx(s
->end_transfer_func
);
2522 fprintf(stderr
, "%s: invalid end_transfer_func for DRQ_STAT\n",
2524 s
->end_transfer_fn_idx
= 2;
2526 s
->end_transfer_fn_idx
= idx
;
2530 static bool ide_drive_pio_state_needed(void *opaque
)
2532 IDEState
*s
= opaque
;
2534 return ((s
->status
& DRQ_STAT
) != 0)
2535 || (s
->bus
->error_status
& IDE_RETRY_PIO
);
2538 static bool ide_tray_state_needed(void *opaque
)
2540 IDEState
*s
= opaque
;
2542 return s
->tray_open
|| s
->tray_locked
;
2545 static bool ide_atapi_gesn_needed(void *opaque
)
2547 IDEState
*s
= opaque
;
2549 return s
->events
.new_media
|| s
->events
.eject_request
;
2552 static bool ide_error_needed(void *opaque
)
2554 IDEBus
*bus
= opaque
;
2556 return (bus
->error_status
!= 0);
2559 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2560 static const VMStateDescription vmstate_ide_atapi_gesn_state
= {
2561 .name
="ide_drive/atapi/gesn_state",
2563 .minimum_version_id
= 1,
2564 .fields
= (VMStateField
[]) {
2565 VMSTATE_BOOL(events
.new_media
, IDEState
),
2566 VMSTATE_BOOL(events
.eject_request
, IDEState
),
2567 VMSTATE_END_OF_LIST()
2571 static const VMStateDescription vmstate_ide_tray_state
= {
2572 .name
= "ide_drive/tray_state",
2574 .minimum_version_id
= 1,
2575 .fields
= (VMStateField
[]) {
2576 VMSTATE_BOOL(tray_open
, IDEState
),
2577 VMSTATE_BOOL(tray_locked
, IDEState
),
2578 VMSTATE_END_OF_LIST()
2582 static const VMStateDescription vmstate_ide_drive_pio_state
= {
2583 .name
= "ide_drive/pio_state",
2585 .minimum_version_id
= 1,
2586 .pre_save
= ide_drive_pio_pre_save
,
2587 .post_load
= ide_drive_pio_post_load
,
2588 .fields
= (VMStateField
[]) {
2589 VMSTATE_INT32(req_nb_sectors
, IDEState
),
2590 VMSTATE_VARRAY_INT32(io_buffer
, IDEState
, io_buffer_total_len
, 1,
2591 vmstate_info_uint8
, uint8_t),
2592 VMSTATE_INT32(cur_io_buffer_offset
, IDEState
),
2593 VMSTATE_INT32(cur_io_buffer_len
, IDEState
),
2594 VMSTATE_UINT8(end_transfer_fn_idx
, IDEState
),
2595 VMSTATE_INT32(elementary_transfer_size
, IDEState
),
2596 VMSTATE_INT32(packet_transfer_size
, IDEState
),
2597 VMSTATE_END_OF_LIST()
2601 const VMStateDescription vmstate_ide_drive
= {
2602 .name
= "ide_drive",
2604 .minimum_version_id
= 0,
2605 .post_load
= ide_drive_post_load
,
2606 .fields
= (VMStateField
[]) {
2607 VMSTATE_INT32(mult_sectors
, IDEState
),
2608 VMSTATE_INT32(identify_set
, IDEState
),
2609 VMSTATE_BUFFER_TEST(identify_data
, IDEState
, is_identify_set
),
2610 VMSTATE_UINT8(feature
, IDEState
),
2611 VMSTATE_UINT8(error
, IDEState
),
2612 VMSTATE_UINT32(nsector
, IDEState
),
2613 VMSTATE_UINT8(sector
, IDEState
),
2614 VMSTATE_UINT8(lcyl
, IDEState
),
2615 VMSTATE_UINT8(hcyl
, IDEState
),
2616 VMSTATE_UINT8(hob_feature
, IDEState
),
2617 VMSTATE_UINT8(hob_sector
, IDEState
),
2618 VMSTATE_UINT8(hob_nsector
, IDEState
),
2619 VMSTATE_UINT8(hob_lcyl
, IDEState
),
2620 VMSTATE_UINT8(hob_hcyl
, IDEState
),
2621 VMSTATE_UINT8(select
, IDEState
),
2622 VMSTATE_UINT8(status
, IDEState
),
2623 VMSTATE_UINT8(lba48
, IDEState
),
2624 VMSTATE_UINT8(sense_key
, IDEState
),
2625 VMSTATE_UINT8(asc
, IDEState
),
2626 VMSTATE_UINT8_V(cdrom_changed
, IDEState
, 3),
2627 VMSTATE_END_OF_LIST()
2629 .subsections
= (VMStateSubsection
[]) {
2631 .vmsd
= &vmstate_ide_drive_pio_state
,
2632 .needed
= ide_drive_pio_state_needed
,
2634 .vmsd
= &vmstate_ide_tray_state
,
2635 .needed
= ide_tray_state_needed
,
2637 .vmsd
= &vmstate_ide_atapi_gesn_state
,
2638 .needed
= ide_atapi_gesn_needed
,
2645 static const VMStateDescription vmstate_ide_error_status
= {
2646 .name
="ide_bus/error",
2648 .minimum_version_id
= 1,
2649 .fields
= (VMStateField
[]) {
2650 VMSTATE_INT32(error_status
, IDEBus
),
2651 VMSTATE_INT64_V(retry_sector_num
, IDEBus
, 2),
2652 VMSTATE_UINT32_V(retry_nsector
, IDEBus
, 2),
2653 VMSTATE_UINT8_V(retry_unit
, IDEBus
, 2),
2654 VMSTATE_END_OF_LIST()
2658 const VMStateDescription vmstate_ide_bus
= {
2661 .minimum_version_id
= 1,
2662 .fields
= (VMStateField
[]) {
2663 VMSTATE_UINT8(cmd
, IDEBus
),
2664 VMSTATE_UINT8(unit
, IDEBus
),
2665 VMSTATE_END_OF_LIST()
2667 .subsections
= (VMStateSubsection
[]) {
2669 .vmsd
= &vmstate_ide_error_status
,
2670 .needed
= ide_error_needed
,
2677 void ide_drive_get(DriveInfo
**hd
, int n
)
2680 int highest_bus
= drive_get_max_bus(IF_IDE
) + 1;
2681 int max_devs
= drive_get_max_devs(IF_IDE
);
2682 int n_buses
= max_devs
? (n
/ max_devs
) : n
;
2685 * Note: The number of actual buses available is not known.
2686 * We compute this based on the size of the DriveInfo* array, n.
2687 * If it is less than max_devs * <num_real_buses>,
2688 * We will stop looking for drives prematurely instead of overfilling
2692 if (highest_bus
> n_buses
) {
2693 error_report("Too many IDE buses defined (%d > %d)",
2694 highest_bus
, n_buses
);
2698 for (i
= 0; i
< n
; i
++) {
2699 hd
[i
] = drive_get_by_index(IF_IDE
, i
);