2 * Copyright 2002/03, Thomas Kurschel. All rights reserved.
3 * Distributed under the terms of the MIT License.
7 Part of Open IDE bus manager
9 Handling of passive waiting and synchronized procedure calls.
10 The latter are calls that get delayed until the bus is idle.
14 #include "ide_internal.h"
22 # define TRACE(x) { dprintf("%s(): ", __FUNCTION__); dprintf x ; }
28 /** DPC handler for IRQs */
33 ide_bus_info
*bus
= (ide_bus_info
*)arg
;
34 ide_qrequest
*qrequest
;
35 ide_device_info
*device
;
41 // IRQ handler doesn't tell us whether this bus was in async_wait or
42 // in idle state, so we just check whether there is an active request,
43 // which means that we were async_waiting
44 if (bus
->active_qrequest
!= NULL
) {
45 TRACE(("continue command\n"));
48 cancel_timer(&bus
->timer
.te
);
50 qrequest
= bus
->active_qrequest
;
51 device
= qrequest
->device
;
53 // not perfect but simple: we simply know who is waiting why
57 if (qrequest
->uses_dma
)
58 ata_dpc_DMA(qrequest
);
60 ata_dpc_PIO(qrequest
);
63 // no request active, so this must be a service request or
64 // a spurious IRQ; access_finished will take care of testing
65 // for service requests
66 TRACE(("irq in idle mode - possible service request\n"));
68 device
= get_current_device(bus
);
70 // got an interrupt from a non-existing device
71 // either this is a spurious interrupt or there *is* a device
72 // but we haven't detected it - we better ignore it silently
73 access_finished(bus
, bus
->first_device
);
75 // access_finished always checks the other device first, but as
76 // we do have a service request, we negate the negation
77 access_finished(bus
, device
->other_device
);
80 // let XPT resend commands that got blocked
81 scsi
->cont_send_bus(bus
->scsi_cookie
);
87 xpt->cont_send( bus->xpt_cookie );*/
91 /** handler for IDE IRQs */
94 ide_irq_handler(ide_bus_info
*bus
, uint8 status
)
96 ide_device_info
*device
;
98 // we need to lock bus to have a solid bus state
99 // (side effect: we lock out the timeout handler and get
100 // delayed if the IRQ happens at the same time as a command is
101 // issued; in the latter case, we have no official way to determine
102 // whether the command was issued before or afterwards; if it was
103 // afterwards, the device must not be busy; if it was before,
104 // the device is either busy because of the sent command, or it's
105 // not busy as the command has already been finished, i.e. there
106 // was a second IRQ which we've overlooked as we didn't acknowledge
110 device
= bus
->active_device
;
112 if (device
== NULL
) {
115 TRACE(("IRQ though there is no active device\n"));
116 return B_UNHANDLED_INTERRUPT
;
119 if ((status
& ide_status_bsy
) != 0) {
120 // the IRQ seems to be fired before the last command was sent,
121 // i.e. it's not the one that signals finishing of command
124 TRACE(("IRQ though device is busy\n"));
125 return B_UNHANDLED_INTERRUPT
;
128 switch (bus
->state
) {
129 case ide_state_async_waiting
:
130 TRACE(("state: async waiting\n"));
132 bus
->state
= ide_state_accessing
;
136 scsi
->schedule_dpc(bus
->scsi_cookie
, bus
->irq_dpc
, ide_dpc
, bus
);
137 return B_INVOKE_SCHEDULER
;
140 TRACE(("state: idle, num_running_reqs %d\n", bus
->num_running_reqs
));
142 // this must be a service request;
143 // if no request is pending, the IRQ was fired wrongly
144 if (bus
->num_running_reqs
== 0) {
146 return B_UNHANDLED_INTERRUPT
;
149 bus
->state
= ide_state_accessing
;
153 scsi
->schedule_dpc(bus
->scsi_cookie
, bus
->irq_dpc
, ide_dpc
, bus
);
154 return B_INVOKE_SCHEDULER
;
156 case ide_state_sync_waiting
:
157 TRACE(("state: sync waiting\n"));
159 bus
->state
= ide_state_accessing
;
160 bus
->sync_wait_timeout
= false;
164 release_sem_etc(bus
->sync_wait_sem
, 1, B_DO_NOT_RESCHEDULE
);
165 return B_INVOKE_SCHEDULER
;
167 case ide_state_accessing
:
168 TRACE(("state: spurious IRQ - there is a command being executed\n"));
171 return B_UNHANDLED_INTERRUPT
;
174 dprintf("BUG: unknown state (%d)\n", bus
->state
);
178 return B_UNHANDLED_INTERRUPT
;
183 /** cancel IRQ timeout
184 * it doesn't matter whether there really was a timout running;
185 * on return, bus state is set to _accessing_
189 cancel_irq_timeout(ide_bus_info
*bus
)
192 bus
->state
= ide_state_accessing
;
195 cancel_timer(&bus
->timer
.te
);
199 /** start waiting for IRQ with bus lock hold
200 * new_state must be either sync_wait or async_wait
204 start_waiting(ide_bus_info
*bus
, uint32 timeout
, int new_state
)
208 TRACE(("timeout = %u\n", (uint
)timeout
));
210 bus
->state
= new_state
;
212 res
= add_timer(&bus
->timer
.te
, ide_timeout
,
213 (bigtime_t
)timeout
* 1000000, B_ONE_SHOT_RELATIVE_TIMER
);
216 panic("Error setting timeout (%s)", strerror(res
));
222 /** start waiting for IRQ with bus lock not hold */
225 start_waiting_nolock(ide_bus_info
*bus
, uint32 timeout
, int new_state
)
228 start_waiting(bus
, timeout
, new_state
);
232 /** wait for sync IRQ */
235 wait_for_sync(ide_bus_info
*bus
)
237 acquire_sem(bus
->sync_wait_sem
);
238 cancel_timer(&bus
->timer
.te
);
242 /** timeout dpc handler */
245 ide_timeout_dpc(void *arg
)
247 ide_bus_info
*bus
= (ide_bus_info
*)arg
;
248 ide_qrequest
*qrequest
;
249 ide_device_info
*device
;
251 qrequest
= bus
->active_qrequest
;
252 device
= qrequest
->device
;
254 dprintf("ide: ide_timeout_dpc() bus %p, device %p\n", bus
, device
);
256 // this also resets overlapped commands
257 reset_device(device
, qrequest
);
259 device
->subsys_status
= SCSI_CMD_TIMEOUT
;
261 if (qrequest
->uses_dma
) {
262 if (++device
->DMA_failures
>= MAX_DMA_FAILURES
) {
263 dprintf("Disabling DMA because of too many errors\n");
265 device
->DMA_enabled
= false;
269 // let upper layer do the retry
270 finish_checksense(qrequest
);
274 /** timeout handler, called by system timer */
277 ide_timeout(timer
*arg
)
279 ide_bus_info
*bus
= ((ide_bus_timer_info
*)arg
)->bus
;
281 TRACE(("ide_timeout(): %p\n", bus
));
283 dprintf("ide: ide_timeout() bus %p\n", bus
);
285 // we need to lock bus to have a solid bus state
286 // (side effect: we lock out the IRQ handler)
289 switch (bus
->state
) {
290 case ide_state_async_waiting
:
291 TRACE(("async waiting\n"));
293 bus
->state
= ide_state_accessing
;
297 scsi
->schedule_dpc(bus
->scsi_cookie
, bus
->irq_dpc
, ide_timeout_dpc
, bus
);
298 return B_INVOKE_SCHEDULER
;
300 case ide_state_sync_waiting
:
301 TRACE(("sync waiting\n"));
303 bus
->state
= ide_state_accessing
;
304 bus
->sync_wait_timeout
= true;
308 release_sem_etc(bus
->sync_wait_sem
, 1, B_DO_NOT_RESCHEDULE
);
309 return B_INVOKE_SCHEDULER
;
311 case ide_state_accessing
:
312 TRACE(("came too late - IRQ occured already\n"));
315 return B_DO_NOT_RESCHEDULE
;
318 // this case also happens if a timeout fires too late;
319 // unless there is a bug, the timeout should always be canceled
320 // before declaring bus as being idle
321 dprintf("BUG: unknown state (%d)\n", (int)bus
->state
);
324 return B_DO_NOT_RESCHEDULE
;
330 init_synced_pc(ide_synced_pc
*pc
, ide_synced_pc_func func
)
333 pc
->registered
= false;
338 uninit_synced_pc(ide_synced_pc
*pc
)
341 panic("Tried to clean up pending synced PC\n");
345 /** schedule a synced pc
346 * a synced pc gets executed as soon as the bus becomes idle
350 schedule_synced_pc(ide_bus_info
*bus
, ide_synced_pc
*pc
, void *arg
)
356 if (pc
->registered
) {
357 // spc cannot be registered twice
358 TRACE(("already registered\n"));
360 } else if( bus
->state
!= ide_state_idle
) {
361 // bus isn't idle - spc must be added to pending list
362 TRACE(("adding to pending list\n"));
364 pc
->next
= bus
->synced_pc_list
;
365 bus
->synced_pc_list
= pc
;
367 pc
->registered
= true;
373 // we have luck - bus is idle, so grab it before
374 // releasing the lock
376 TRACE(("exec immediately\n"));
378 bus
->state
= ide_state_accessing
;
384 TRACE(("finished\n"));
385 access_finished(bus
, bus
->first_device
);
387 // meanwhile, we may have rejected SCSI commands;
388 // usually, the XPT resends them once a command
389 // has finished, but in this case XPT doesn't know
390 // about our "private" command, so we have to tell about
392 TRACE(("tell SCSI bus manager about idle bus\n"));
393 scsi
->cont_send_bus(bus
->scsi_cookie
);
398 /** execute list of synced pcs */
401 exec_synced_pcs(ide_bus_info
*bus
, ide_synced_pc
*pc_list
)
405 // noone removes items from pc_list, so we don't need lock
407 for (pc
= pc_list
; pc
; pc
= pc
->next
) {
408 pc
->func(bus
, pc
->arg
);
411 // need lock now as items can be added to pc_list again as soon
412 // as <registered> is reset
415 for (pc
= pc_list
; pc
; pc
= pc
->next
) {
416 pc
->registered
= false;
423 /** finish bus access;
424 * check if any device wants to service pending commands + execute synced_pc
428 access_finished(ide_bus_info
*bus
, ide_device_info
*device
)
430 TRACE(("bus = %p, device = %p\n", bus
, device
));
433 ide_synced_pc
*synced_pc_list
;
437 // normally, there is always an device; only exception is a
438 // bus without devices, not sure whether this can really happen though
440 if (try_service(device
))
444 // noone wants it, so execute pending synced_pc
445 if (bus
->synced_pc_list
== NULL
) {
446 bus
->state
= ide_state_idle
;
451 synced_pc_list
= bus
->synced_pc_list
;
452 bus
->synced_pc_list
= NULL
;
456 exec_synced_pcs(bus
, synced_pc_list
);
458 // executed synced_pc may have generated other sync_pc,