Linux 2.6.25.20
[linux/fpc-iii.git] / include / scsi / scsi_device.h
blobab7acbe809602b94e693da7fac9bc12a8ad47a89
1 #ifndef _SCSI_SCSI_DEVICE_H
2 #define _SCSI_SCSI_DEVICE_H
4 #include <linux/device.h>
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/workqueue.h>
8 #include <linux/blkdev.h>
9 #include <asm/atomic.h>
11 struct request_queue;
12 struct scsi_cmnd;
13 struct scsi_lun;
14 struct scsi_sense_hdr;
16 struct scsi_mode_data {
17 __u32 length;
18 __u16 block_descriptor_length;
19 __u8 medium_type;
20 __u8 device_specific;
21 __u8 header_length;
22 __u8 longlba:1;
26 * sdev state: If you alter this, you also need to alter scsi_sysfs.c
27 * (for the ascii descriptions) and the state model enforcer:
28 * scsi_lib:scsi_device_set_state().
30 enum scsi_device_state {
31 SDEV_CREATED = 1, /* device created but not added to sysfs
32 * Only internal commands allowed (for inq) */
33 SDEV_RUNNING, /* device properly configured
34 * All commands allowed */
35 SDEV_CANCEL, /* beginning to delete device
36 * Only error handler commands allowed */
37 SDEV_DEL, /* device deleted
38 * no commands allowed */
39 SDEV_QUIESCE, /* Device quiescent. No block commands
40 * will be accepted, only specials (which
41 * originate in the mid-layer) */
42 SDEV_OFFLINE, /* Device offlined (by error handling or
43 * user request */
44 SDEV_BLOCK, /* Device blocked by scsi lld. No scsi
45 * commands from user or midlayer should be issued
46 * to the scsi lld. */
49 enum scsi_device_event {
50 SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */
52 SDEV_EVT_LAST = SDEV_EVT_MEDIA_CHANGE,
53 SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1
56 struct scsi_event {
57 enum scsi_device_event evt_type;
58 struct list_head node;
60 /* put union of data structures, for non-simple event types,
61 * here
65 struct scsi_device {
66 struct Scsi_Host *host;
67 struct request_queue *request_queue;
69 /* the next two are protected by the host->host_lock */
70 struct list_head siblings; /* list of all devices on this host */
71 struct list_head same_target_siblings; /* just the devices sharing same target id */
73 /* this is now protected by the request_queue->queue_lock */
74 unsigned int device_busy; /* commands actually active on
75 * low-level. protected by queue_lock. */
76 spinlock_t list_lock;
77 struct list_head cmd_list; /* queue of in use SCSI Command structures */
78 struct list_head starved_entry;
79 struct scsi_cmnd *current_cmnd; /* currently active command */
80 unsigned short queue_depth; /* How deep of a queue we want */
81 unsigned short last_queue_full_depth; /* These two are used by */
82 unsigned short last_queue_full_count; /* scsi_track_queue_full() */
83 unsigned long last_queue_full_time;/* don't let QUEUE_FULLs on the same
84 jiffie count on our counter, they
85 could all be from the same event. */
87 unsigned int id, lun, channel;
89 unsigned int manufacturer; /* Manufacturer of device, for using
90 * vendor-specific cmd's */
91 unsigned sector_size; /* size in bytes */
93 void *hostdata; /* available to low-level driver */
94 char type;
95 char scsi_level;
96 char inq_periph_qual; /* PQ from INQUIRY data */
97 unsigned char inquiry_len; /* valid bytes in 'inquiry' */
98 unsigned char * inquiry; /* INQUIRY response data */
99 const char * vendor; /* [back_compat] point into 'inquiry' ... */
100 const char * model; /* ... after scan; point to static string */
101 const char * rev; /* ... "nullnullnullnull" before scan */
102 unsigned char current_tag; /* current tag */
103 struct scsi_target *sdev_target; /* used only for single_lun */
105 unsigned int sdev_bflags; /* black/white flags as also found in
106 * scsi_devinfo.[hc]. For now used only to
107 * pass settings from slave_alloc to scsi
108 * core. */
109 unsigned writeable:1;
110 unsigned removable:1;
111 unsigned changed:1; /* Data invalid due to media change */
112 unsigned busy:1; /* Used to prevent races */
113 unsigned lockable:1; /* Able to prevent media removal */
114 unsigned locked:1; /* Media removal disabled */
115 unsigned borken:1; /* Tell the Seagate driver to be
116 * painfully slow on this device */
117 unsigned disconnect:1; /* can disconnect */
118 unsigned soft_reset:1; /* Uses soft reset option */
119 unsigned sdtr:1; /* Device supports SDTR messages */
120 unsigned wdtr:1; /* Device supports WDTR messages */
121 unsigned ppr:1; /* Device supports PPR messages */
122 unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */
123 unsigned simple_tags:1; /* simple queue tag messages are enabled */
124 unsigned ordered_tags:1;/* ordered queue tag messages are enabled */
125 unsigned was_reset:1; /* There was a bus reset on the bus for
126 * this device */
127 unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN
128 * because we did a bus reset. */
129 unsigned use_10_for_rw:1; /* first try 10-byte read / write */
130 unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
131 unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */
132 unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */
133 unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
134 unsigned no_start_on_add:1; /* do not issue start on add */
135 unsigned allow_restart:1; /* issue START_UNIT in error handler */
136 unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */
137 unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
138 unsigned select_no_atn:1;
139 unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */
140 unsigned guess_capacity:1; /* READ_CAPACITY might be too high by 1 */
141 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */
142 unsigned last_sector_bug:1; /* Always read last sector in a 1 sector read */
144 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
145 struct list_head event_list; /* asserted events */
146 struct work_struct event_work;
148 unsigned int device_blocked; /* Device returned QUEUE_FULL. */
150 unsigned int max_device_blocked; /* what device_blocked counts down from */
151 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
153 atomic_t iorequest_cnt;
154 atomic_t iodone_cnt;
155 atomic_t ioerr_cnt;
157 int timeout;
159 struct device sdev_gendev;
160 struct class_device sdev_classdev;
162 struct execute_work ew; /* used to get process context on put */
164 enum scsi_device_state sdev_state;
165 unsigned long sdev_data[0];
166 } __attribute__((aligned(sizeof(unsigned long))));
167 #define to_scsi_device(d) \
168 container_of(d, struct scsi_device, sdev_gendev)
169 #define class_to_sdev(d) \
170 container_of(d, struct scsi_device, sdev_classdev)
171 #define transport_class_to_sdev(class_dev) \
172 to_scsi_device(class_dev->dev)
174 #define sdev_printk(prefix, sdev, fmt, a...) \
175 dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a)
177 #define scmd_printk(prefix, scmd, fmt, a...) \
178 (scmd)->request->rq_disk ? \
179 sdev_printk(prefix, (scmd)->device, "[%s] " fmt, \
180 (scmd)->request->rq_disk->disk_name, ##a) : \
181 sdev_printk(prefix, (scmd)->device, fmt, ##a)
183 enum scsi_target_state {
184 STARGET_RUNNING = 1,
185 STARGET_DEL,
189 * scsi_target: representation of a scsi target, for now, this is only
190 * used for single_lun devices. If no one has active IO to the target,
191 * starget_sdev_user is NULL, else it points to the active sdev.
193 struct scsi_target {
194 struct scsi_device *starget_sdev_user;
195 struct list_head siblings;
196 struct list_head devices;
197 struct device dev;
198 unsigned int reap_ref; /* protected by the host lock */
199 unsigned int channel;
200 unsigned int id; /* target id ... replace
201 * scsi_device.id eventually */
202 unsigned int create:1; /* signal that it needs to be added */
203 unsigned int single_lun:1; /* Indicates we should only
204 * allow I/O to one of the luns
205 * for the device at a time. */
206 unsigned int pdt_1f_for_no_lun; /* PDT = 0x1f */
207 /* means no lun present */
209 char scsi_level;
210 struct execute_work ew;
211 enum scsi_target_state state;
212 void *hostdata; /* available to low-level driver */
213 unsigned long starget_data[0]; /* for the transport */
214 /* starget_data must be the last element!!!! */
215 } __attribute__((aligned(sizeof(unsigned long))));
217 #define to_scsi_target(d) container_of(d, struct scsi_target, dev)
218 static inline struct scsi_target *scsi_target(struct scsi_device *sdev)
220 return to_scsi_target(sdev->sdev_gendev.parent);
222 #define transport_class_to_starget(class_dev) \
223 to_scsi_target(class_dev->dev)
225 #define starget_printk(prefix, starget, fmt, a...) \
226 dev_printk(prefix, &(starget)->dev, fmt, ##a)
228 extern struct scsi_device *__scsi_add_device(struct Scsi_Host *,
229 uint, uint, uint, void *hostdata);
230 extern int scsi_add_device(struct Scsi_Host *host, uint channel,
231 uint target, uint lun);
232 extern void scsi_remove_device(struct scsi_device *);
234 extern int scsi_device_get(struct scsi_device *);
235 extern void scsi_device_put(struct scsi_device *);
236 extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *,
237 uint, uint, uint);
238 extern struct scsi_device *__scsi_device_lookup(struct Scsi_Host *,
239 uint, uint, uint);
240 extern struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *,
241 uint);
242 extern struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *,
243 uint);
244 extern void starget_for_each_device(struct scsi_target *, void *,
245 void (*fn)(struct scsi_device *, void *));
246 extern void __starget_for_each_device(struct scsi_target *, void *,
247 void (*fn)(struct scsi_device *,
248 void *));
250 /* only exposed to implement shost_for_each_device */
251 extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
252 struct scsi_device *);
255 * shost_for_each_device - iterate over all devices of a host
256 * @sdev: the &struct scsi_device to use as a cursor
257 * @shost: the &struct scsi_host to iterate over
259 * Iterator that returns each device attached to @shost. This loop
260 * takes a reference on each device and releases it at the end. If
261 * you break out of the loop, you must call scsi_device_put(sdev).
263 #define shost_for_each_device(sdev, shost) \
264 for ((sdev) = __scsi_iterate_devices((shost), NULL); \
265 (sdev); \
266 (sdev) = __scsi_iterate_devices((shost), (sdev)))
269 * __shost_for_each_device - iterate over all devices of a host (UNLOCKED)
270 * @sdev: the &struct scsi_device to use as a cursor
271 * @shost: the &struct scsi_host to iterate over
273 * Iterator that returns each device attached to @shost. It does _not_
274 * take a reference on the scsi_device, so the whole loop must be
275 * protected by shost->host_lock.
277 * Note: The only reason to use this is because you need to access the
278 * device list in interrupt context. Otherwise you really want to use
279 * shost_for_each_device instead.
281 #define __shost_for_each_device(sdev, shost) \
282 list_for_each_entry((sdev), &((shost)->__devices), siblings)
284 extern void scsi_adjust_queue_depth(struct scsi_device *, int, int);
285 extern int scsi_track_queue_full(struct scsi_device *, int);
287 extern int scsi_set_medium_removal(struct scsi_device *, char);
289 extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
290 unsigned char *buffer, int len, int timeout,
291 int retries, struct scsi_mode_data *data,
292 struct scsi_sense_hdr *);
293 extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
294 int modepage, unsigned char *buffer, int len,
295 int timeout, int retries,
296 struct scsi_mode_data *data,
297 struct scsi_sense_hdr *);
298 extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
299 int retries, struct scsi_sense_hdr *sshdr);
300 extern int scsi_device_set_state(struct scsi_device *sdev,
301 enum scsi_device_state state);
302 extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
303 gfp_t gfpflags);
304 extern void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt);
305 extern void sdev_evt_send_simple(struct scsi_device *sdev,
306 enum scsi_device_event evt_type, gfp_t gfpflags);
307 extern int scsi_device_quiesce(struct scsi_device *sdev);
308 extern void scsi_device_resume(struct scsi_device *sdev);
309 extern void scsi_target_quiesce(struct scsi_target *);
310 extern void scsi_target_resume(struct scsi_target *);
311 extern void scsi_scan_target(struct device *parent, unsigned int channel,
312 unsigned int id, unsigned int lun, int rescan);
313 extern void scsi_target_reap(struct scsi_target *);
314 extern void scsi_target_block(struct device *);
315 extern void scsi_target_unblock(struct device *);
316 extern void scsi_remove_target(struct device *);
317 extern void int_to_scsilun(unsigned int, struct scsi_lun *);
318 extern int scsilun_to_int(struct scsi_lun *);
319 extern const char *scsi_device_state_name(enum scsi_device_state);
320 extern int scsi_is_sdev_device(const struct device *);
321 extern int scsi_is_target_device(const struct device *);
322 extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
323 int data_direction, void *buffer, unsigned bufflen,
324 unsigned char *sense, int timeout, int retries,
325 int flag);
326 extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
327 int data_direction, void *buffer, unsigned bufflen,
328 struct scsi_sense_hdr *, int timeout, int retries);
329 extern int scsi_execute_async(struct scsi_device *sdev,
330 const unsigned char *cmd, int cmd_len, int data_direction,
331 void *buffer, unsigned bufflen, int use_sg,
332 int timeout, int retries, void *privdata,
333 void (*done)(void *, char *, int, int),
334 gfp_t gfp);
336 static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev)
338 return device_reprobe(&sdev->sdev_gendev);
341 static inline unsigned int sdev_channel(struct scsi_device *sdev)
343 return sdev->channel;
346 static inline unsigned int sdev_id(struct scsi_device *sdev)
348 return sdev->id;
351 #define scmd_id(scmd) sdev_id((scmd)->device)
352 #define scmd_channel(scmd) sdev_channel((scmd)->device)
354 static inline int scsi_device_online(struct scsi_device *sdev)
356 return sdev->sdev_state != SDEV_OFFLINE;
359 /* accessor functions for the SCSI parameters */
360 static inline int scsi_device_sync(struct scsi_device *sdev)
362 return sdev->sdtr;
364 static inline int scsi_device_wide(struct scsi_device *sdev)
366 return sdev->wdtr;
368 static inline int scsi_device_dt(struct scsi_device *sdev)
370 return sdev->ppr;
372 static inline int scsi_device_dt_only(struct scsi_device *sdev)
374 if (sdev->inquiry_len < 57)
375 return 0;
376 return (sdev->inquiry[56] & 0x0c) == 0x04;
378 static inline int scsi_device_ius(struct scsi_device *sdev)
380 if (sdev->inquiry_len < 57)
381 return 0;
382 return sdev->inquiry[56] & 0x01;
384 static inline int scsi_device_qas(struct scsi_device *sdev)
386 if (sdev->inquiry_len < 57)
387 return 0;
388 return sdev->inquiry[56] & 0x02;
390 static inline int scsi_device_enclosure(struct scsi_device *sdev)
392 return sdev->inquiry[6] & (1<<6);
395 #define MODULE_ALIAS_SCSI_DEVICE(type) \
396 MODULE_ALIAS("scsi:t-" __stringify(type) "*")
397 #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
399 #endif /* _SCSI_SCSI_DEVICE_H */