2 * Copyright 2002/03, Thomas Kurschel. All rights reserved.
3 * Distributed under the terms of the MIT License.
5 #ifndef __IDE_INTERNAL_H__
6 #define __IDE_INTERNAL_H__
13 #include "ide_device_infoblock.h"
14 #include <ide_types.h>
15 #include <device_manager.h>
17 #define debug_level_error 2
18 #define debug_level_info 1
19 #define debug_level_flow 0
21 #define DEBUG_MSG_PREFIX "IDE -- "
26 #define IDE_STD_TIMEOUT 10
27 #define IDE_RELEASE_TIMEOUT 10000000
29 // number of timeouts before we disable DMA automatically
30 #define MAX_DMA_FAILURES 3
32 // name of pnp generator of channel ids
33 #define IDE_CHANNEL_ID_GENERATOR "ide/channel_id"
34 // node item containing channel id (uint32)
35 #define IDE_CHANNEL_ID_ITEM "ide/channel_id"
37 extern device_manager_info
*pnp
;
40 typedef struct ide_bus_info ide_bus_info
;
42 typedef void (*ide_synced_pc_func
)(ide_bus_info
*bus
, void *arg
);
44 typedef struct ide_synced_pc
{
45 struct ide_synced_pc
*next
;
46 ide_synced_pc_func func
;
51 // structure for device time-outs
52 typedef struct ide_device_timer_info
{
54 struct ide_device_info
*device
;
55 } ide_device_timer_info
;
57 // structure for bus time-outs
58 typedef struct ide_bus_timer_info
{
60 struct ide_bus_info
*bus
;
64 typedef struct ide_device_info
{
65 struct ide_bus_info
*bus
;
67 uint8 use_LBA
: 1; // true for LBA, false for CHS
68 uint8 use_48bits
: 1; // true for LBA48
69 uint8 is_atapi
: 1; // true for ATAPI, false for ATA
70 uint8 CQ_supported
: 1; // Command Queuing supported
71 uint8 CQ_enabled
: 1; // Command Queuing enabled
72 uint8 DMA_supported
: 1; // DMA supported
73 uint8 DMA_enabled
: 1; // DMA enabled
74 uint8 is_device1
: 1; // true for slave, false for master
76 uint8 queue_depth
; // maximum Command Queueing depth
78 uint8 last_lun
; // last LUN
80 uint8 DMA_failures
; // DMA failures in a row
81 uint8 CQ_failures
; // Command Queuing failures during _last_ command
82 uint8 num_failed_send
; // number of consequetive send problems
84 // next two error codes are copied to request on finish_request & co.
85 uint8 subsys_status
; // subsystem status of current request
86 uint32 new_combined_sense
; // emulated sense of current request
88 // pending error codes
89 uint32 combined_sense
; // emulated sense of device
91 struct ide_qrequest
*qreq_array
; // array of ide requests
92 struct ide_qrequest
*free_qrequests
; // free list
93 int num_running_reqs
; // number of running requests
95 struct ide_device_info
*other_device
; // other device on same bus
97 // entry for scsi's exec_io request
98 void (*exec_io
)( struct ide_device_info
*device
, struct ide_qrequest
*qrequest
);
100 int target_id
; // target id (currently, same as is_device1)
102 ide_reg_mask tf_param_mask
; // flag of valid bytes in task file
103 ide_task_file tf
; // task file
106 uint64 total_sectors
; // size in sectors
108 // atapi from here on
109 uint8 packet
[12]; // atapi command packet
112 uint8 packet_irq
: 1; // true, if command packet irq required
113 bigtime_t packet_irq_timeout
; // timeout for it
116 uint8 device_type
; // atapi device type
118 bool reconnect_timer_installed
; // true, if reconnect timer is running
119 ide_device_timer_info reconnect_timer
; // reconnect timeout
120 scsi_dpc_cookie reconnect_timeout_dpc
; // dpc fired by timeout
121 ide_synced_pc reconnect_timeout_synced_pc
; // spc fired by dpc
124 int left_sg_elem
; // remaining sg elements
125 const physical_entry
*cur_sg_elem
; // active sg element
126 int cur_sg_ofs
; // offset in active sg element
128 int left_blocks
; // remaining blocks
130 bool has_odd_byte
; // remaining odd byte
131 int odd_byte
; // content off odd byte
133 ide_device_infoblock infoblock
; // infoblock of device
137 /*// state as stored in sim_state of scsi_ccb
139 ide_request_normal = 0, // this must be zero as this is initial value
140 ide_request_start_autosense = 1,
141 ide_request_autosense = 2
142 } ide_request_state;*/
145 typedef struct ide_qrequest
{
146 struct ide_qrequest
*next
;
147 ide_device_info
*device
;
148 scsi_ccb
*request
; // basic request
150 uint8 is_write
: 1; // true for write request
151 uint8 running
: 1; // true if "on bus"
152 uint8 uses_dma
: 1; // true if using dma
153 uint8 packet_irq
: 1; // true if waiting for command packet irq
154 uint8 queuable
: 1; // true if command queuing is used
155 uint8 tag
; // command queuing tag
161 ide_state_idle
, // noone is using it, but overlapped
162 // commands may be pending
163 ide_state_accessing
, // bus is in use
164 ide_state_async_waiting
, // waiting for IRQ, to be reported via irq_dpc
165 ide_state_sync_waiting
, // waiting for IRQ, to be reported via sync_wait_sem
168 struct ide_bus_info
{
169 ide_qrequest
*active_qrequest
;
172 ide_controller_interface
*controller
;
173 void *channel_cookie
;
175 // lock, used for changes of bus state
177 cpu_status prev_irq_state
;
179 ide_bus_state state
; // current state of bus
181 mutex status_report_ben
; // to lock when you report XPT about bus state
182 // i.e. during requeue, resubmit or finished
184 bool disconnected
; // true, if controller is lost
185 int num_running_reqs
; // total number of running requests
187 scsi_bus scsi_cookie
; // cookie for scsi bus
189 ide_bus_timer_info timer
; // timeout
190 scsi_dpc_cookie irq_dpc
;
191 ide_synced_pc
*synced_pc_list
;
193 sem_id sync_wait_sem
; // released when sync_wait finished
194 bool sync_wait_timeout
; // true, if timeout occured
196 ide_device_info
*active_device
;
197 ide_device_info
*devices
[2];
198 ide_device_info
*first_device
;
200 ide_synced_pc scan_bus_syncinfo
; // used to start bus scan
201 sem_id scan_device_sem
; // released when device has been scanned
203 ide_synced_pc disconnect_syncinfo
; // used to handle lost controller
207 device_node
*node
; // our pnp node
209 // restrictions, read from controller node
218 // call this before you change bus state
219 #define IDE_LOCK( bus ) { \
220 cpu_status prev_irq_state = disable_interrupts(); \
221 acquire_spinlock( &bus->lock ); \
222 bus->prev_irq_state = prev_irq_state; \
225 // call this after you changed bus state
226 #define IDE_UNLOCK( bus ) { \
227 cpu_status prev_irq_state = bus->prev_irq_state; \
228 release_spinlock( &bus->lock ); \
229 restore_interrupts( prev_irq_state ); \
233 #define IDE_SIM_MODULE_NAME "bus_managers/ide/sim/v1"
237 ev_ide_send_command
= 1,
238 ev_ide_device_start_service
,
239 ev_ide_device_start_service2
,
243 ev_ide_cancel_irq_timeout
,
244 ev_ide_start_waiting
,
252 ev_ide_scsi_io_invalid_device
,
253 ev_ide_scsi_io_bus_busy
,
254 ev_ide_scsi_io_device_busy
,
255 ev_ide_scsi_io_disconnected
,
256 ev_ide_finish_request
,
257 ev_ide_finish_norelease
,
259 ev_ide_scan_device_int
,
260 ev_ide_scan_device_int_cant_send
,
261 ev_ide_scan_device_int_keeps_busy
,
262 ev_ide_scan_device_int_found
267 // get selected device
268 static inline ide_device_info
*
269 get_current_device(ide_bus_info
*bus
)
273 bus
->controller
->read_command_block_regs(bus
->channel_cookie
, &tf
,
274 ide_mask_device_head
);
276 return bus
->devices
[tf
.lba
.device
];
280 // check if device has released the bus
281 // return: true, if bus was released
283 device_released_bus(ide_device_info
*device
)
285 ide_bus_info
*bus
= device
->bus
;
287 bus
->controller
->read_command_block_regs(bus
->channel_cookie
,
288 &device
->tf
, ide_mask_sector_count
);
290 return device
->tf
.queued
.release
;
299 bool check_rw_error(ide_device_info
*device
, ide_qrequest
*qrequest
);
300 bool check_output(ide_device_info
*device
, bool drdy_required
, int error_mask
, bool is_write
);
302 bool prep_ata(ide_device_info
*device
);
303 void enable_CQ(ide_device_info
*device
, bool enable
);
304 void ata_send_rw(ide_device_info
*device
, ide_qrequest
*qrequest
,
305 uint64 pos
, size_t length
, bool write
);
307 void ata_dpc_DMA(ide_qrequest
*qrequest
);
308 void ata_dpc_PIO(ide_qrequest
*qrequest
);
310 void ata_exec_io(ide_device_info
*device
, ide_qrequest
*qrequest
);
315 bool prep_atapi(ide_device_info
*device
);
317 void send_packet(ide_device_info
*device
, ide_qrequest
*qrequest
, bool write
);
318 void packet_dpc(ide_qrequest
*qrequest
);
319 void atapi_exec_io(ide_device_info
*device
, ide_qrequest
*qrequest
);
324 bool ide_wait(ide_device_info
*device
, int mask
, int not_mask
, bool check_err
,
326 bool wait_for_drq(ide_device_info
*device
);
327 bool wait_for_drqdown(ide_device_info
*device
);
328 bool wait_for_drdy(ide_device_info
*device
);
330 // timeout in seconds
331 bool send_command(ide_device_info
*device
, ide_qrequest
*qrequest
,
332 bool need_drdy
, uint32 timeout
, ide_bus_state new_state
);
333 bool device_start_service( ide_device_info
*device
, int *tag
);
334 bool reset_device(ide_device_info
*device
, ide_qrequest
*ignore
);
335 bool reset_bus(ide_device_info
*device
, ide_qrequest
*ignore
);
337 bool check_service_req(ide_device_info
*device
);
342 extern ide_for_controller_interface ide_for_controller_module
;
347 void scan_device_worker(ide_bus_info
*bus
, void *arg
);
352 bool prepare_dma(ide_device_info
*device
, ide_qrequest
*qrequest
);
353 void start_dma(ide_device_info
*device
, ide_qrequest
*qrequest
);
354 void start_dma_wait(ide_device_info
*device
, ide_qrequest
*qrequest
);
355 void start_dma_wait_no_lock(ide_device_info
*device
, ide_qrequest
*qrequest
);
356 bool finish_dma(ide_device_info
*device
);
357 void abort_dma(ide_device_info
*device
, ide_qrequest
*qrequest
);
359 bool configure_dma(ide_device_info
*device
);
360 int get_device_dma_mode(ide_device_info
*device
);
364 bool copy_sg_data(scsi_ccb
*request
, uint offset
, uint req_size_limit
,
365 void *buffer
, int size
, bool to_buffer
);
366 void ide_request_sense(ide_device_info
*device
, ide_qrequest
*qrequest
);
371 void prep_PIO_transfer(ide_device_info
*device
, ide_qrequest
*qrequest
);
372 status_t
read_PIO_block(ide_qrequest
*qrequest
, int length
);
373 status_t
write_PIO_block(ide_qrequest
*qrequest
, int length
);
378 bool send_abort_queue(ide_device_info
*device
);
379 bool try_service(ide_device_info
*device
);
381 void reconnect_timeout_worker(ide_bus_info
*bus
, void *arg
);
382 int32
reconnect_timeout(timer
*arg
);
384 bool initialize_qreq_array(ide_device_info
*device
, int queue_depth
);
385 void destroy_qreq_array(ide_device_info
*device
);
390 // timeout in seconds (according to CAM)
391 void start_waiting(ide_bus_info
*bus
, uint32 timeout
, int new_state
);
392 void start_waiting_nolock(ide_bus_info
*bus
, uint32 timeout
, int new_state
);
393 void wait_for_sync(ide_bus_info
*bus
);
394 void cancel_irq_timeout(ide_bus_info
*bus
);
396 status_t
schedule_synced_pc(ide_bus_info
*bus
, ide_synced_pc
*pc
, void *arg
);
397 void init_synced_pc(ide_synced_pc
*pc
, ide_synced_pc_func func
);
398 void uninit_synced_pc(ide_synced_pc
*pc
);
400 void ide_dpc(void *arg
);
401 void access_finished(ide_bus_info
*bus
, ide_device_info
*device
);
403 status_t
ide_irq_handler(ide_bus_info
*bus
, uint8 status
);
404 status_t
ide_timeout(timer
*arg
);
410 #endif /* __IDE_INTERNAL_H__ */