2 ** Copyright 2002/03, Thomas Kurschel. All rights reserved.
3 ** Distributed under the terms of the OpenBeOS License.
7 Part of Open IDE bus manager
9 Command queuing functions
12 #include "ide_internal.h"
20 // maximum number of errors until command queuing is disabled
21 #define MAX_CQ_FAILURES 3
24 /** convert tag to request */
26 static inline ide_qrequest
*
27 tag2request(ide_device_info
*device
, int tag
)
29 ide_qrequest
*qrequest
= &device
->qreq_array
[tag
];
31 if (qrequest
->running
)
40 * (expects locked bus and bus in "accessing" state)
41 * returns true if servicing a command (implies having bus unlocked)
42 * returns false on error
46 service_device(ide_device_info
*device
)
48 ide_qrequest
*qrequest
;
51 SHOW_FLOW0( 3, "Start servicing" );
53 // delete timeout first
55 // we must unlock bus before cancelling timer: if the timeout has
56 // just been fired we have to wait for it, but in turn it waits
57 // for the ide bus -> deadlock
58 IDE_UNLOCK(device
->bus
);
60 cancel_timer(&device
->reconnect_timer
.te
);
62 // between IDE_UNLOCK and cancel_timer the request may got
63 // discarded due to timeout, so it's not a hardware problem
66 // further, the device discards the entire queue if anything goes
67 // wrong, thus we call send_abort_queue on each error
68 // (we could also discard the queue without telling the device,
69 // but we prefer setting the device into a safe state)
71 // ask device to continue
72 if (!device_start_service(device
, &tag
)) {
73 send_abort_queue(device
);
77 SHOW_FLOW0( 3, "device starts service" );
80 qrequest
= tag2request(device
, tag
);
81 if (qrequest
== NULL
) {
82 send_abort_queue(device
);
86 SHOW_FLOW( 3, "continue request %p with tag %d", qrequest
, tag
);
88 device
->bus
->active_qrequest
= qrequest
;
90 // from here on, queuing is ATA read/write specific, so you have to
91 // modify that if you want to support ATAPI queuing!
92 if (check_rw_error(device
, qrequest
)) {
93 // if a read/write error occured, the request really failed
94 finish_reset_queue(qrequest
);
98 // all ATA commands continue with a DMA request
99 if (!prepare_dma(device
, qrequest
)) {
100 // this is effectively impossible: before the command was initially
101 // sent, prepare_dma had been called and obviously didn't fail,
102 // so why should it fail now?
103 device
->subsys_status
= SCSI_HBA_ERR
;
104 finish_reset_queue(qrequest
);
108 SHOW_FLOW0( 3, "launch DMA" );
110 start_dma_wait_no_lock(device
, qrequest
);
115 // don't start timeout - all requests have been discarded at this point
116 IDE_LOCK(device
->bus
);
121 /** check if some device on bus wants to continue queued requests;
123 * (expects locked bus and bus in "accessing" state)
124 * returns true if servicing a command (implies having bus unlocked)
125 * returns false if nothing to service
129 try_service(ide_device_info
*device
)
131 bool this_device_needs_service
;
132 ide_device_info
*other_device
;
134 other_device
= device
->other_device
;
136 // first check whether current device requests service
137 // (the current device is selected anyway, so asking it is fast)
138 this_device_needs_service
= check_service_req(device
);
140 // service other device first as it was certainly waiting
141 // longer then the current device
142 if (other_device
!= device
&& check_service_req(other_device
)) {
143 if (service_device(other_device
)) {
144 // we handed over control; start timeout for device
145 // (see below about fairness)
146 if (device
->num_running_reqs
> 0) {
147 if (!device
->reconnect_timer_installed
) {
148 device
->reconnect_timer_installed
= true;
149 add_timer(&device
->reconnect_timer
.te
, reconnect_timeout
,
150 IDE_RELEASE_TIMEOUT
, B_ONE_SHOT_RELATIVE_TIMER
);
157 // service our device second
158 if (this_device_needs_service
) {
159 if (service_device(device
))
163 // if device has pending reqs, start timeout.
164 // this may sound strange as we cannot be blamed if the
165 // other device blocks us. But: the timeout is delayed until
166 // the bus is idle, so once the other device finishes its
167 // access, we have a chance of servicing all the pending
168 // commands before the timeout handler is executed
169 if (device
->num_running_reqs
> 0) {
170 if (!device
->reconnect_timer_installed
) {
171 device
->reconnect_timer_installed
= true;
172 add_timer(&device
->reconnect_timer
.te
, reconnect_timeout
,
173 IDE_RELEASE_TIMEOUT
, B_ONE_SHOT_RELATIVE_TIMER
);
182 initialize_qreq_array(ide_device_info
*device
, int queue_depth
)
186 device
->queue_depth
= queue_depth
;
188 SHOW_FLOW( 3, "queue depth=%d", device
->queue_depth
);
190 device
->qreq_array
= (ide_qrequest
*)malloc(queue_depth
* sizeof(ide_qrequest
));
191 if (device
->qreq_array
== NULL
)
194 memset(device
->qreq_array
, 0, queue_depth
* sizeof(ide_qrequest
));
196 device
->free_qrequests
= NULL
;
198 for (i
= queue_depth
- 1; i
>= 0 ; --i
) {
199 ide_qrequest
*qrequest
= &device
->qreq_array
[i
];
201 qrequest
->next
= device
->free_qrequests
;
202 device
->free_qrequests
= qrequest
;
204 qrequest
->running
= false;
205 qrequest
->device
= device
;
207 qrequest
->request
= NULL
;
215 destroy_qreq_array(ide_device_info
*device
)
217 if (device
->qreq_array
) {
218 free(device
->qreq_array
);
219 device
->qreq_array
= NULL
;
222 device
->num_running_reqs
= 0;
223 device
->queue_depth
= 0;
224 device
->free_qrequests
= NULL
;
228 /** change maximum number of queuable requests */
231 change_qreq_array(ide_device_info
*device
, int queue_depth
)
233 ide_qrequest
*qreq_array
= device
->qreq_array
;
234 ide_qrequest
*old_free_qrequests
= device
->free_qrequests
;
235 int old_queue_depth
= queue_depth
;
237 // be very causious - even if no queuing supported, we still need
238 // one queue entry; if this allocation fails, we have a device that
239 // cannot accept any command, which would be odd
240 if (initialize_qreq_array( device
, queue_depth
)) {
245 device
->qreq_array
= qreq_array
;
246 device
->num_running_reqs
= 0;
247 device
->queue_depth
= old_queue_depth
;
248 device
->free_qrequests
= old_free_qrequests
;
253 /** reconnect timeout worker
254 * must be called as a synced procedure call, i.e.
255 * the bus is allocated for us
259 reconnect_timeout_worker(ide_bus_info
*bus
, void *arg
)
261 ide_device_info
*device
= (ide_device_info
*)arg
;
263 // perhaps all requests have been successfully finished
264 // when the synced pc was waiting; in this case, everything's fine
265 // (this is _very_ important if the other device blocks the bus
266 // for a long time - if this leads to a reconnect timeout, the
267 // device has a last chance by servicing all requests without
268 // delay, in which case this function gets delayed until all
269 // pending requests are finished and the following test would
270 // make sure that this false alarm gets ignored)
271 if (device
->num_running_reqs
> 0) {
272 // if one queued command fails, all of them fail
273 send_abort_queue(device
);
275 // if too many timeouts occure, disable CQ
276 if (++device
->CQ_failures
> MAX_CQ_FAILURES
) {
277 device
->CQ_enabled
= false;
279 change_qreq_array(device
, 1);
283 // we've blocked the bus in dpc - undo that
284 scsi
->unblock_bus(device
->bus
->scsi_cookie
);
288 /** dpc callback for reconnect timeout */
291 reconnect_timeout_dpc(void *arg
)
293 ide_device_info
*device
= (ide_device_info
*)arg
;
295 // even though we are in the service thread,
296 // the bus can be in use (e.g. by an ongoing PIO command),
297 // so we have to issue a synced procedure call which
298 // waits for the command to be finished
300 // meanwhile, we don't want any command to be issued to this device
301 // as we are going to discard the entire device queue;
302 // sadly, we don't have a reliable XPT device handle, so we block
303 // bus instead (as this is an error handler, so performance is
305 scsi
->block_bus(device
->bus
->scsi_cookie
);
306 schedule_synced_pc(device
->bus
, &device
->reconnect_timeout_synced_pc
, device
);
310 /** timer function for reconnect timeout */
313 reconnect_timeout(timer
*arg
)
315 ide_device_info
*device
= ((ide_device_timer_info
*)arg
)->device
;
316 ide_bus_info
*bus
= device
->bus
;
318 // we are polite and let the service thread do the job
319 scsi
->schedule_dpc(bus
->scsi_cookie
, device
->reconnect_timeout_dpc
,
320 reconnect_timeout_dpc
, device
);
322 return B_INVOKE_SCHEDULER
;
326 /** tell device to abort all queued requests
327 * (tells XPT to resubmit these requests)
328 * return: true - abort successful
329 * false - abort failed (in this case, nothing can be done)
333 send_abort_queue(ide_device_info
*device
)
336 ide_bus_info
*bus
= device
->bus
;
340 device
->tf
.write
.command
= IDE_CMD_NOP
;
341 // = discard outstanding commands
342 device
->tf
.write
.features
= IDE_CMD_NOP_NOP
;
344 device
->tf_param_mask
= ide_mask_features
;
346 if (!send_command(device
, NULL
, true, 0, ide_state_accessing
))
349 if (!wait_for_drdy(device
))
352 // device must answer "command rejected" and discard outstanding commands
353 status
= bus
->controller
->get_altstatus(bus
->channel_cookie
);
355 if ((status
& ide_status_err
) == 0)
358 if (!bus
->controller
->read_command_block_regs(bus
->channel_cookie
,
359 &device
->tf
, ide_mask_error
)) {
360 // don't bother trying bus_reset as controller disappeared
361 device
->subsys_status
= SCSI_HBA_ERR
;
365 if ((device
->tf
.read
.error
& ide_error_abrt
) == 0)
368 finish_all_requests(device
, NULL
, 0, true);
372 // ouch! device didn't react - we have to reset it
373 return reset_device(device
, NULL
);