BPicture: Fix archive constructor.
[haiku.git] / src / add-ons / kernel / bus_managers / ide / ata.c
bloba97024c120317c68356f76e4c9e51d91a41b3981
1 /*
2 ** Copyright 2002/03, Thomas Kurschel. All rights reserved.
3 ** Distributed under the terms of the OpenBeOS License.
4 */
6 /*
7 Part of Open IDE bus manager
9 ATA command protocol
13 #include "ide_internal.h"
15 #include "ide_sim.h"
16 #include "ide_cmds.h"
19 /** verify that device is ready for further PIO transmission */
21 static bool
22 check_rw_status(ide_device_info *device, bool drqStatus)
24 ide_bus_info *bus = device->bus;
25 int status;
27 status = bus->controller->get_altstatus(bus->channel_cookie);
29 if ((status & ide_status_bsy) != 0) {
30 device->subsys_status = SCSI_SEQUENCE_FAIL;
31 return false;
34 if (drqStatus != ((status & ide_status_drq) != 0)) {
35 device->subsys_status = SCSI_SEQUENCE_FAIL;
36 return false;
39 return true;
43 /** DPC called at
44 * - begin of each PIO read/write block
45 * - end of PUI write transmission
48 void
49 ata_dpc_PIO(ide_qrequest *qrequest)
51 ide_device_info *device = qrequest->device;
52 uint32 timeout = qrequest->request->timeout > 0 ?
53 qrequest->request->timeout : IDE_STD_TIMEOUT;
55 SHOW_FLOW0(3, "");
57 if (check_rw_error(device, qrequest)
58 || !check_rw_status(device, qrequest->is_write ? device->left_blocks > 0 : true))
60 // failure reported by device
61 SHOW_FLOW0( 3, "command finished unsuccessfully" );
63 finish_checksense(qrequest);
64 return;
67 if (qrequest->is_write) {
68 if (device->left_blocks == 0) {
69 // this was the end-of-transmission IRQ
70 SHOW_FLOW0(3, "write access finished");
71 if (!wait_for_drqdown(device)) {
72 SHOW_ERROR0(3, "device wants to transmit data though command is finished");
73 goto finish;
75 goto finish;
78 // wait until device requests data
79 SHOW_FLOW0(3, "Waiting for device ready to transmit");
80 if (!wait_for_drq(device)) {
81 SHOW_FLOW0(3, "device not ready for data transmission - abort");
82 goto finish;
85 // start async waiting for next block/end of command
86 // we should start that when block is transmitted, but with bad
87 // luck the IRQ fires exactly between transmission and start of waiting,
88 // so we better start waiting too early; as we are in service thread,
89 // a DPC initiated by IRQ cannot overtake us, so there is no need to block
90 // IRQs during sent
91 start_waiting_nolock(device->bus, timeout, ide_state_async_waiting);
93 // having a too short data buffer shouldn't happen here
94 // anyway - we are prepared
95 SHOW_FLOW0(3, "Writing one block");
96 if (write_PIO_block(qrequest, 512) == B_ERROR)
97 goto finish_cancel_timeout;
99 --device->left_blocks;
100 } else {
101 if (device->left_blocks > 1) {
102 // start async waiting for next command (see above)
103 start_waiting_nolock(device->bus, timeout, ide_state_async_waiting);
106 // see write
107 SHOW_FLOW0( 3, "Reading one block" );
108 if (read_PIO_block(qrequest, 512) == B_ERROR)
109 goto finish_cancel_timeout;
111 --device->left_blocks;
113 if (device->left_blocks == 0) {
114 // at end of transmission, wait for data request going low
115 SHOW_FLOW0( 3, "Waiting for device to finish transmission" );
117 if (!wait_for_drqdown(device))
118 SHOW_FLOW0( 3, "Device continues data transmission - abort command" );
120 // we don't cancel timeout as no timeout is started during last block
121 goto finish;
125 return;
127 finish_cancel_timeout:
128 cancel_irq_timeout(device->bus);
130 finish:
131 finish_checksense(qrequest);
135 /** DPC called when IRQ was fired at end of DMA transmission */
137 void
138 ata_dpc_DMA(ide_qrequest *qrequest)
140 ide_device_info *device = qrequest->device;
141 bool dma_success, dev_err;
143 dma_success = finish_dma(device);
144 dev_err = check_rw_error(device, qrequest);
146 if (dma_success && !dev_err) {
147 // reset error count if DMA worked
148 device->DMA_failures = 0;
149 device->CQ_failures = 0;
150 qrequest->request->data_resid = 0;
151 finish_checksense(qrequest);
152 } else {
153 SHOW_ERROR0( 2, "Error in DMA transmission" );
155 set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_FAILURE);
157 if (++device->DMA_failures >= MAX_DMA_FAILURES) {
158 SHOW_ERROR0( 2, "Disabled DMA because of too many errors" );
159 device->DMA_enabled = false;
162 // reset queue in case queuing is active
163 finish_reset_queue(qrequest);
168 // list of LBA48 opcodes
169 static uint8 cmd_48[2][2] = {
170 { IDE_CMD_READ_SECTORS_EXT, IDE_CMD_WRITE_SECTORS_EXT },
171 { IDE_CMD_READ_DMA_EXT, IDE_CMD_WRITE_DMA_EXT }
175 // list of normal LBA opcodes
176 static uint8 cmd_28[2][2] = {
177 { IDE_CMD_READ_SECTORS, IDE_CMD_WRITE_SECTORS },
178 { IDE_CMD_READ_DMA, IDE_CMD_WRITE_DMA }
182 /** create IDE read/write command */
184 static bool
185 create_rw_taskfile(ide_device_info *device, ide_qrequest *qrequest,
186 uint64 pos, size_t length, bool write)
188 SHOW_FLOW0( 3, "" );
190 // XXX disable any writes
191 /* if( write )
192 goto err;*/
194 if (device->use_LBA) {
195 if (device->use_48bits && (pos + length > 0xfffffff || length > 0x100)) {
196 // use LBA48 only if necessary
197 SHOW_FLOW0( 3, "using LBA48" );
199 if (length > 0xffff)
200 goto err;
202 if (qrequest->queuable) {
203 // queued LBA48
204 device->tf_param_mask = ide_mask_features_48
205 | ide_mask_sector_count
206 | ide_mask_LBA_low_48
207 | ide_mask_LBA_mid_48
208 | ide_mask_LBA_high_48;
210 device->tf.queued48.sector_count_0_7 = length & 0xff;
211 device->tf.queued48.sector_count_8_15 = (length >> 8) & 0xff;
212 device->tf.queued48.tag = qrequest->tag;
213 device->tf.queued48.lba_0_7 = pos & 0xff;
214 device->tf.queued48.lba_8_15 = (pos >> 8) & 0xff;
215 device->tf.queued48.lba_16_23 = (pos >> 16) & 0xff;
216 device->tf.queued48.lba_24_31 = (pos >> 24) & 0xff;
217 device->tf.queued48.lba_32_39 = (pos >> 32) & 0xff;
218 device->tf.queued48.lba_40_47 = (pos >> 40) & 0xff;
219 device->tf.queued48.command = write ? IDE_CMD_WRITE_DMA_QUEUED_EXT
220 : IDE_CMD_READ_DMA_QUEUED_EXT;
221 return true;
222 } else {
223 // non-queued LBA48
224 device->tf_param_mask = ide_mask_sector_count_48
225 | ide_mask_LBA_low_48
226 | ide_mask_LBA_mid_48
227 | ide_mask_LBA_high_48;
229 device->tf.lba48.sector_count_0_7 = length & 0xff;
230 device->tf.lba48.sector_count_8_15 = (length >> 8) & 0xff;
231 device->tf.lba48.lba_0_7 = pos & 0xff;
232 device->tf.lba48.lba_8_15 = (pos >> 8) & 0xff;
233 device->tf.lba48.lba_16_23 = (pos >> 16) & 0xff;
234 device->tf.lba48.lba_24_31 = (pos >> 24) & 0xff;
235 device->tf.lba48.lba_32_39 = (pos >> 32) & 0xff;
236 device->tf.lba48.lba_40_47 = (pos >> 40) & 0xff;
237 device->tf.lba48.command = cmd_48[qrequest->uses_dma][write];
238 return true;
240 } else {
241 // normal LBA
242 SHOW_FLOW0(3, "using LBA");
244 if (length > 0x100)
245 goto err;
247 if (qrequest->queuable) {
248 // queued LBA
249 SHOW_FLOW( 3, "creating DMA queued command, tag=%d", qrequest->tag );
250 device->tf_param_mask = ide_mask_features
251 | ide_mask_sector_count
252 | ide_mask_LBA_low
253 | ide_mask_LBA_mid
254 | ide_mask_LBA_high
255 | ide_mask_device_head;
257 device->tf.queued.sector_count = length & 0xff;
258 device->tf.queued.tag = qrequest->tag;
259 device->tf.queued.lba_0_7 = pos & 0xff;
260 device->tf.queued.lba_8_15 = (pos >> 8) & 0xff;
261 device->tf.queued.lba_16_23 = (pos >> 16) & 0xff;
262 device->tf.queued.lba_24_27 = (pos >> 24) & 0xf;
263 device->tf.queued.command = write ? IDE_CMD_WRITE_DMA_QUEUED
264 : IDE_CMD_READ_DMA_QUEUED;
265 return true;
266 } else {
267 // non-queued LBA
268 SHOW_FLOW0( 3, "creating normal DMA/PIO command" );
269 device->tf_param_mask = ide_mask_sector_count
270 | ide_mask_LBA_low
271 | ide_mask_LBA_mid
272 | ide_mask_LBA_high
273 | ide_mask_device_head;
275 device->tf.lba.sector_count = length & 0xff;
276 device->tf.lba.lba_0_7 = pos & 0xff;
277 device->tf.lba.lba_8_15 = (pos >> 8) & 0xff;
278 device->tf.lba.lba_16_23 = (pos >> 16) & 0xff;
279 device->tf.lba.lba_24_27 = (pos >> 24) & 0xf;
280 device->tf.lba.command = cmd_28[qrequest->uses_dma][write];
281 return true;
284 } else {
285 // CHS mode
286 // (probably, noone would notice if we'd dropped support)
287 uint32 track_size, cylinder_offset, cylinder;
288 ide_device_infoblock *infoblock = &device->infoblock;
290 if (length > 0x100)
291 goto err;
293 device->tf.chs.mode = ide_mode_chs;
295 device->tf_param_mask = ide_mask_sector_count
296 | ide_mask_sector_number
297 | ide_mask_cylinder_low
298 | ide_mask_cylinder_high
299 | ide_mask_device_head;
301 device->tf.chs.sector_count = length & 0xff;
303 track_size = infoblock->current_heads * infoblock->current_sectors;
305 if (track_size == 0) {
306 set_sense(device,
307 SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_MEDIUM_FORMAT_CORRUPTED);
308 return false;
311 cylinder = pos / track_size;
313 device->tf.chs.cylinder_0_7 = cylinder & 0xff;
314 device->tf.chs.cylinder_8_15 = (cylinder >> 8) & 0xff;
316 cylinder_offset = pos - cylinder * track_size;
318 device->tf.chs.sector_number = (cylinder_offset % infoblock->current_sectors + 1) & 0xff;
319 device->tf.chs.head = cylinder_offset / infoblock->current_sectors;
321 device->tf.chs.command = cmd_28[qrequest->uses_dma][write];
322 return true;
325 return true;
327 err:
328 set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
329 return false;
333 /** execute read/write command
334 * pos - first block
335 * length - number of blocks
338 void
339 ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
340 uint64 pos, size_t length, bool write)
342 ide_bus_info *bus = device->bus;
343 uint32 timeout;
345 // make a copy first as settings may get changed by user during execution
346 qrequest->is_write = write;
347 qrequest->uses_dma = device->DMA_enabled;
349 if (qrequest->uses_dma) {
350 if (!prepare_dma(device, qrequest)) {
351 // fall back to PIO on error
353 // if command queueing is used and there is another command
354 // already running, we cannot fallback to PIO immediately -> declare
355 // command as not queuable and resubmit it, so the scsi bus manager
356 // will block other requests on retry
357 // (XXX this is not fine if the caller wants to recycle the CCB)
358 if (device->num_running_reqs > 1) {
359 qrequest->request->flags &= ~SCSI_ORDERED_QTAG;
360 finish_retry(qrequest);
361 return;
364 qrequest->uses_dma = false;
368 if (!qrequest->uses_dma) {
369 prep_PIO_transfer(device, qrequest);
370 device->left_blocks = length;
373 // compose command
374 if (!create_rw_taskfile(device, qrequest, pos, length, write))
375 goto err_setup;
377 // if no timeout is specified, use standard
378 timeout = qrequest->request->timeout > 0 ?
379 qrequest->request->timeout : IDE_STD_TIMEOUT;
381 // in DMA mode, we continue with "accessing",
382 // on PIO read, we continue with "async waiting"
383 // on PIO write, we continue with "accessing"
384 if (!send_command(device, qrequest, !device->is_atapi, timeout,
385 (!qrequest->uses_dma && !qrequest->is_write) ?
386 ide_state_async_waiting : ide_state_accessing))
387 goto err_send;
389 if (qrequest->uses_dma) {
390 // if queuing used, we have to ask device first whether it wants
391 // to postpone the command
392 // XXX: using the bus release IRQ we don't have to busy wait for
393 // a response, but I heard that IBM drives have problems with
394 // that IRQ; to be evaluated
395 if (qrequest->queuable) {
396 if (!wait_for_drdy(device))
397 goto err_send;
399 if (check_rw_error(device, qrequest))
400 goto err_send;
402 if (device_released_bus(device)) {
403 // device enqueued command, so we have to wait;
404 // in access_finished, we'll ask device whether it wants to
405 // continue some other command
406 bus->active_qrequest = NULL;
408 access_finished(bus, device);
409 // we may have rejected commands meanwhile, so tell
410 // the SIM that it can resend them now
411 scsi->cont_send_bus(bus->scsi_cookie);
412 return;
415 //SHOW_ERROR0( 2, "device executes command instantly" );
418 start_dma_wait_no_lock(device, qrequest);
419 } else {
420 // on PIO read, we start with waiting, on PIO write we can
421 // transmit data immediately; we let the service thread do
422 // the writing, so the caller can issue the next command
423 // immediately (this optimisation really pays on SMP systems
424 // only)
425 SHOW_FLOW0(3, "Ready for PIO");
426 if (qrequest->is_write) {
427 SHOW_FLOW0(3, "Scheduling write DPC");
428 scsi->schedule_dpc(bus->scsi_cookie, bus->irq_dpc, ide_dpc, bus);
432 return;
434 err_setup:
435 // error during setup
436 if (qrequest->uses_dma)
437 abort_dma(device, qrequest);
439 finish_checksense(qrequest);
440 return;
442 err_send:
443 // error during/after send;
444 // in this case, the device discards queued request automatically
445 if (qrequest->uses_dma)
446 abort_dma(device, qrequest);
448 finish_reset_queue(qrequest);
452 /** check for errors reported by read/write command
453 * return: true, if an error occured
456 bool
457 check_rw_error(ide_device_info *device, ide_qrequest *qrequest)
459 ide_bus_info *bus = device->bus;
460 uint8 status;
462 status = bus->controller->get_altstatus(bus->channel_cookie);
464 if ((status & ide_status_err) != 0) {
465 uint8 error;
467 if (bus->controller->read_command_block_regs(bus->channel_cookie,
468 &device->tf, ide_mask_error) != B_OK) {
469 device->subsys_status = SCSI_HBA_ERR;
470 return true;
473 error = device->tf.read.error;
475 if ((error & ide_error_icrc) != 0) {
476 set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_CRC);
477 return true;
480 if (qrequest->is_write) {
481 if ((error & ide_error_wp) != 0) {
482 set_sense(device, SCSIS_KEY_DATA_PROTECT, SCSIS_ASC_WRITE_PROTECTED);
483 return true;
485 } else {
486 if ((error & ide_error_unc) != 0) {
487 set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_UNREC_READ_ERR);
488 return true;
492 if ((error & ide_error_mc) != 0) {
493 set_sense(device, SCSIS_KEY_UNIT_ATTENTION, SCSIS_ASC_MEDIUM_CHANGED);
494 return true;
497 if ((error & ide_error_idnf) != 0) {
498 // ID not found - invalid CHS mapping (was: seek error?)
499 set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_RANDOM_POS_ERROR);
500 return true;
503 if ((error & ide_error_mcr) != 0) {
504 // XXX proper sense key?
505 // for TUR this case is not defined !?
506 set_sense(device, SCSIS_KEY_UNIT_ATTENTION, SCSIS_ASC_REMOVAL_REQUESTED);
507 return true;
510 if ((error & ide_error_nm) != 0) {
511 set_sense(device, SCSIS_KEY_NOT_READY, SCSIS_ASC_NO_MEDIUM);
512 return true;
515 if ((error & ide_error_abrt) != 0) {
516 set_sense(device, SCSIS_KEY_ABORTED_COMMAND, SCSIS_ASC_NO_SENSE);
517 return true;
520 set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
521 return true;
524 return false;
528 /** check result of ATA command
529 * drdy_required - true if drdy must be set by device
530 * error_mask - bits to be checked in error register
531 * is_write - true, if command was a write command
534 bool
535 check_output(ide_device_info *device, bool drdy_required,
536 int error_mask, bool is_write)
538 ide_bus_info *bus = device->bus;
539 uint8 status;
541 // check IRQ timeout
542 if (bus->sync_wait_timeout) {
543 bus->sync_wait_timeout = false;
545 device->subsys_status = SCSI_CMD_TIMEOUT;
546 return false;
549 status = bus->controller->get_altstatus(bus->channel_cookie);
551 // if device is busy, other flags are indeterminate
552 if ((status & ide_status_bsy) != 0) {
553 device->subsys_status = SCSI_SEQUENCE_FAIL;
554 return false;
557 if (drdy_required && ((status & ide_status_drdy) == 0)) {
558 device->subsys_status = SCSI_SEQUENCE_FAIL;
559 return false;
562 if ((status & ide_status_err) != 0) {
563 uint8 error;
565 if (bus->controller->read_command_block_regs(bus->channel_cookie,
566 &device->tf, ide_mask_error) != B_OK) {
567 device->subsys_status = SCSI_HBA_ERR;
568 return false;
571 error = device->tf.read.error & error_mask;
573 if ((error & ide_error_icrc) != 0) {
574 set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_CRC);
575 return false;
578 if (is_write) {
579 if ((error & ide_error_wp) != 0) {
580 set_sense(device, SCSIS_KEY_DATA_PROTECT, SCSIS_ASC_WRITE_PROTECTED);
581 return false;
583 } else {
584 if ((error & ide_error_unc) != 0) {
585 set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_UNREC_READ_ERR);
586 return false;
590 if ((error & ide_error_mc) != 0) {
591 // XXX proper sense key?
592 set_sense(device, SCSIS_KEY_UNIT_ATTENTION, SCSIS_ASC_MEDIUM_CHANGED);
593 return false;
596 if ((error & ide_error_idnf) != 0) {
597 // XXX strange error code, don't really know what it means
598 set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_RANDOM_POS_ERROR);
599 return false;
602 if ((error & ide_error_mcr) != 0) {
603 // XXX proper sense key?
604 set_sense(device, SCSIS_KEY_UNIT_ATTENTION, SCSIS_ASC_REMOVAL_REQUESTED);
605 return false;
608 if ((error & ide_error_nm) != 0) {
609 set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_NO_MEDIUM);
610 return false;
613 if ((error & ide_error_abrt) != 0) {
614 set_sense(device, SCSIS_KEY_ABORTED_COMMAND, SCSIS_ASC_NO_SENSE);
615 return false;
618 // either there was no error bit set or it was masked out
619 set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
620 return false;
623 return true;
627 /** execute SET FEATURE command
628 * set subcommand in task file before calling this
631 static bool
632 device_set_feature(ide_device_info *device, int feature)
634 device->tf_param_mask = ide_mask_features;
636 device->tf.write.features = feature;
637 device->tf.write.command = IDE_CMD_SET_FEATURES;
639 if (!send_command(device, NULL, true, 1, ide_state_sync_waiting))
640 return false;
642 wait_for_sync(device->bus);
644 return check_output(device, true, ide_error_abrt, false);
648 static bool
649 configure_rmsn(ide_device_info *device)
651 ide_bus_info *bus = device->bus;
652 int i;
654 if (!device->infoblock.RMSN_supported
655 || device->infoblock._127_RMSN_support != 1)
656 return true;
658 if (!device_set_feature(device, IDE_CMD_SET_FEATURES_ENABLE_MSN))
659 return false;
661 bus->controller->read_command_block_regs(bus->channel_cookie, &device->tf,
662 ide_mask_LBA_mid | ide_mask_LBA_high);
664 for (i = 0; i < 5; ++i) {
665 // don't use TUR as it checks not ide_error_mcr | ide_error_mc | ide_error_wp
666 // but: we don't check wp as well
667 device->combined_sense = 0;
669 device->tf_param_mask = 0;
670 device->tf.write.command = IDE_CMD_GET_MEDIA_STATUS;
672 if (!send_command(device, NULL, true, 15, ide_state_sync_waiting))
673 continue;
675 if (check_output(device, true,
676 ide_error_nm | ide_error_abrt | ide_error_mcr | ide_error_mc,
677 true)
678 || decode_sense_asc_ascq(device->combined_sense) == SCSIS_ASC_NO_MEDIUM)
679 return true;
682 return false;
686 static bool
687 configure_command_queueing(ide_device_info *device)
689 device->CQ_enabled = device->CQ_supported = false;
691 if (!device->bus->can_CQ
692 || !device->infoblock.DMA_QUEUED_supported)
693 return initialize_qreq_array(device, 1);
695 if (device->infoblock.RELEASE_irq_supported
696 && !device_set_feature( device, IDE_CMD_SET_FEATURES_DISABLE_REL_INT))
697 dprintf("Cannot disable release irq\n");
699 if (device->infoblock.SERVICE_irq_supported
700 && !device_set_feature(device, IDE_CMD_SET_FEATURES_DISABLE_SERV_INT))
701 dprintf("Cannot disable service irq\n");
703 device->CQ_enabled = device->CQ_supported = true;
705 SHOW_INFO0(2, "Enabled command queueing");
707 // official IBM docs talk about 31 queue entries, though
708 // their disks report 32; let's hope their docs are wrong
709 return initialize_qreq_array(device, device->infoblock.queue_depth + 1);
713 bool
714 prep_ata(ide_device_info *device)
716 ide_device_infoblock *infoblock = &device->infoblock;
717 uint32 chs_capacity;
719 SHOW_FLOW0(3, "");
721 device->is_atapi = false;
722 device->exec_io = ata_exec_io;
723 device->last_lun = 0;
725 // warning: ata == 0 means "this is ata"...
726 if (infoblock->_0.ata.ATA != 0) {
727 // CF has either magic header or CFA bit set
728 // we merge it to "CFA bit set" for easier (later) testing
729 if (*(uint16 *)infoblock == 0x848a)
730 infoblock->CFA_supported = true;
731 else
732 return false;
735 SHOW_FLOW0(3, "1");
737 if (!infoblock->_54_58_valid) {
738 // normally, current_xxx contains active CHS mapping,
739 // but if BIOS didn't call INITIALIZE DEVICE PARAMETERS
740 // the default mapping is used
741 infoblock->current_sectors = infoblock->sectors;
742 infoblock->current_cylinders = infoblock->cylinders;
743 infoblock->current_heads = infoblock->heads;
746 // just in case capacity_xxx isn't initialized - calculate it manually
747 // (seems that this information is really redundant; hopefully)
748 chs_capacity = infoblock->current_sectors * infoblock->current_cylinders *
749 infoblock->current_heads;
751 infoblock->capacity_low = chs_capacity & 0xff;
752 infoblock->capacity_high = chs_capacity >> 8;
754 // checking LBA_supported flag should be sufficient, but it seems
755 // that checking LBA_total_sectors is a good idea
756 device->use_LBA = infoblock->LBA_supported && infoblock->LBA_total_sectors != 0;
758 if (device->use_LBA) {
759 device->total_sectors = infoblock->LBA_total_sectors;
760 device->tf.lba.mode = ide_mode_lba;
761 } else {
762 device->total_sectors = chs_capacity;
763 device->tf.chs.mode = ide_mode_chs;
766 device->use_48bits = infoblock->_48_bit_addresses_supported;
768 if (device->use_48bits)
769 device->total_sectors = infoblock->LBA48_total_sectors;
771 SHOW_FLOW0(3, "2");
773 if (!configure_dma(device)
774 || !configure_command_queueing(device)
775 || !configure_rmsn(device))
776 return false;
778 SHOW_FLOW0(3, "3");
780 return true;
784 void
785 enable_CQ(ide_device_info *device, bool enable)