2 * The low performance USB storage driver (ub).
4 * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
5 * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
7 * This work is a part of Linux kernel, is derived from it,
8 * and is not licensed separately. See file COPYING for details.
10 * TODO (sorted by decreasing priority)
11 * -- Kill first_open (Al Viro fixed the block layer now)
12 * -- Do resets with usb_device_reset (needs a thread context, use khubd)
13 * -- set readonly flag for CDs, set removable flag for CF readers
14 * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
15 * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries
16 * -- verify the 13 conditions and do bulk resets
17 * -- kill last_pipe and simply do two-state clearing on both pipes
18 * -- verify protocol (bulk) from USB descriptors (maybe...)
20 * -- move top_sense and work_bcs into separate allocations (if they survive)
21 * for cache purists and esoteric architectures.
22 * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
23 * -- prune comments, they are too volumnous
24 * -- Exterminate P3 printks
26 * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=?
27 * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/usb.h>
32 #include <linux/blkdev.h>
33 #include <linux/devfs_fs_kernel.h>
34 #include <linux/timer.h>
35 #include <scsi/scsi.h>
38 #define DEVFS_NAME DRV_NAME
43 * The command state machine is the key model for understanding of this driver.
45 * The general rule is that all transitions are done towards the bottom
46 * of the diagram, thus preventing any loops.
48 * An exception to that is how the STAT state is handled. A counter allows it
49 * to be re-entered along the path marked with [C].
55 * ub_scsi_cmd_start fails ->--------------------------------------\
62 * was -EPIPE -->-------------------------------->! CLEAR ! !
65 * was error -->------------------------------------- ! --------->\
67 * /--<-- cmd->dir == NONE ? ! !
74 * ! was -EPIPE -->--------------->! CLR2STS ! ! !
77 * ! ! was error -->---- ! --------->\
78 * ! was error -->--------------------- ! ------------- ! --------->\
81 * \--->+--------+ ! ! !
82 * ! STAT !<--------------------------/ ! !
85 * [C] was -EPIPE -->-----------\ ! !
87 * +<---- len == 0 ! ! !
89 * ! was error -->--------------------------------------!---------->\
91 * +<---- bad CSW ! ! !
92 * +<---- bad tag ! ! !
98 * \------- ! --------------------[C]--------\ ! !
100 * cmd->error---\ +--------+ ! !
101 * ! +--------------->! SENSE !<----------/ !
102 * STAT_FAIL----/ +--------+ !
105 * \--------------------------------\--------------------->! DONE !
110 * Definitions which have to be scattered once we understand the layout better.
113 /* Transport (despite PR in the name) */
114 #define US_PR_BULK 0x50 /* bulk only */
117 #define US_SC_SCSI 0x06 /* Transparent */
120 * This many LUNs per USB device.
121 * Every one of them takes a host, see UB_MAX_HOSTS.
123 #define UB_MAX_LUNS 9
128 #define UB_MINORS_PER_MAJOR 8
130 #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */
132 #define UB_SENSE_SIZE 18
137 /* command block wrapper */
138 struct bulk_cb_wrap
{
139 __le32 Signature
; /* contains 'USBC' */
140 u32 Tag
; /* unique per command id */
141 __le32 DataTransferLength
; /* size of data */
142 u8 Flags
; /* direction in bit 0 */
144 u8 Length
; /* of of the CDB */
145 u8 CDB
[UB_MAX_CDB_SIZE
]; /* max command */
148 #define US_BULK_CB_WRAP_LEN 31
149 #define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */
150 #define US_BULK_FLAG_IN 1
151 #define US_BULK_FLAG_OUT 0
153 /* command status wrapper */
154 struct bulk_cs_wrap
{
155 __le32 Signature
; /* should = 'USBS' */
156 u32 Tag
; /* same as original command */
157 __le32 Residue
; /* amount not transferred */
158 u8 Status
; /* see below */
161 #define US_BULK_CS_WRAP_LEN 13
162 #define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */
163 #define US_BULK_STAT_OK 0
164 #define US_BULK_STAT_FAIL 1
165 #define US_BULK_STAT_PHASE 2
167 /* bulk-only class specific requests */
168 #define US_BULK_RESET_REQUEST 0xff
169 #define US_BULK_GET_MAX_LUN 0xfe
175 #define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */
176 #define UB_MAX_SECTORS 64
179 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
180 * even if a webcam hogs the bus, but some devices need time to spin up.
182 #define UB_URB_TIMEOUT (HZ*2)
183 #define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */
184 #define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */
185 #define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */
188 * An instance of a SCSI command in transit.
190 #define UB_DIR_NONE 0
191 #define UB_DIR_READ 1
192 #define UB_DIR_ILLEGAL2 2
193 #define UB_DIR_WRITE 3
195 #define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \
196 (((c)==UB_DIR_READ)? 'r': 'n'))
198 enum ub_scsi_cmd_state
{
199 UB_CMDST_INIT
, /* Initial state */
200 UB_CMDST_CMD
, /* Command submitted */
201 UB_CMDST_DATA
, /* Data phase */
202 UB_CMDST_CLR2STS
, /* Clearing before requesting status */
203 UB_CMDST_STAT
, /* Status phase */
204 UB_CMDST_CLEAR
, /* Clearing a stall (halt, actually) */
205 UB_CMDST_CLRRS
, /* Clearing before retrying status */
206 UB_CMDST_SENSE
, /* Sending Request Sense */
207 UB_CMDST_DONE
/* Final state */
210 static char *ub_scsi_cmd_stname
[] = {
223 unsigned char cdb
[UB_MAX_CDB_SIZE
];
224 unsigned char cdb_len
;
226 unsigned char dir
; /* 0 - none, 1 - read, 3 - write. */
227 unsigned char trace_index
;
228 enum ub_scsi_cmd_state state
;
230 struct ub_scsi_cmd
*next
;
232 int error
; /* Return code - valid upon done */
233 unsigned int act_len
; /* Return size */
234 unsigned char key
, asc
, ascq
; /* May be valid if error==-EIO */
236 int stat_count
; /* Retries getting status. */
238 unsigned int len
; /* Requested length */
239 unsigned int current_sg
;
240 unsigned int nsg
; /* sgv[nsg] */
241 struct scatterlist sgv
[UB_MAX_REQ_SG
];
244 void (*done
)(struct ub_dev
*, struct ub_scsi_cmd
*);
251 unsigned long nsec
; /* Linux size - 512 byte sectors */
252 unsigned int bsize
; /* Linux hardsect_size */
253 unsigned int bshift
; /* Shift between 512 and hard sects */
257 * The SCSI command tracing structure.
260 #define SCMD_ST_HIST_SZ 8
261 #define SCMD_TRACE_SZ 63 /* Less than 4KB of 61-byte lines */
263 struct ub_scsi_cmd_trace
{
266 unsigned int req_size
, act_size
;
269 unsigned char key
, asc
, ascq
;
270 char st_hst
[SCMD_ST_HIST_SZ
];
273 struct ub_scsi_trace
{
275 struct ub_scsi_cmd_trace vec
[SCMD_TRACE_SZ
];
279 * This is a direct take-off from linux/include/completion.h
280 * The difference is that I do not wait on this thing, just poll.
281 * When I want to wait (ub_probe), I just use the stock completion.
283 * Note that INIT_COMPLETION takes no lock. It is correct. But why
284 * in the bloody hell that thing takes struct instead of pointer to struct
285 * is quite beyond me. I just copied it from the stock completion.
287 struct ub_completion
{
292 static inline void ub_init_completion(struct ub_completion
*x
)
295 spin_lock_init(&x
->lock
);
298 #define UB_INIT_COMPLETION(x) ((x).done = 0)
300 static void ub_complete(struct ub_completion
*x
)
304 spin_lock_irqsave(&x
->lock
, flags
);
306 spin_unlock_irqrestore(&x
->lock
, flags
);
309 static int ub_is_completed(struct ub_completion
*x
)
314 spin_lock_irqsave(&x
->lock
, flags
);
316 spin_unlock_irqrestore(&x
->lock
, flags
);
322 struct ub_scsi_cmd_queue
{
324 struct ub_scsi_cmd
*head
, *tail
;
328 * The block device instance (one per LUN).
332 struct list_head link
;
333 struct gendisk
*disk
;
334 int id
; /* Host index */
335 int num
; /* LUN number */
338 int changed
; /* Media was changed */
341 int first_open
; /* Kludge. See ub_bd_open. */
343 /* Use Ingo's mempool if or when we have more than one command. */
345 * Currently we never need more than one command for the whole device.
346 * However, giving every LUN a command is a cheap and automatic way
347 * to enforce fairness between them.
350 struct ub_scsi_cmd cmdv
[1];
352 struct ub_capacity capacity
;
356 * The USB device instance.
360 atomic_t poison
; /* The USB device is disconnected */
361 int openc
; /* protected by ub_lock! */
362 /* kref is too implicit for our taste */
365 struct usb_device
*dev
;
366 struct usb_interface
*intf
;
368 struct list_head luns
;
370 unsigned int send_bulk_pipe
; /* cached pipe values */
371 unsigned int recv_bulk_pipe
;
372 unsigned int send_ctrl_pipe
;
373 unsigned int recv_ctrl_pipe
;
375 struct tasklet_struct tasklet
;
377 struct ub_scsi_cmd_queue cmd_queue
;
378 struct ub_scsi_cmd top_rqs_cmd
; /* REQUEST SENSE */
379 unsigned char top_sense
[UB_SENSE_SIZE
];
381 struct ub_completion work_done
;
383 struct timer_list work_timer
;
384 int last_pipe
; /* What might need clearing */
385 __le32 signature
; /* Learned signature */
386 struct bulk_cb_wrap work_bcb
;
387 struct bulk_cs_wrap work_bcs
;
388 struct usb_ctrlrequest work_cr
;
391 struct ub_scsi_trace tr
;
396 static void ub_cleanup(struct ub_dev
*sc
);
397 static int ub_request_fn_1(struct ub_lun
*lun
, struct request
*rq
);
398 static int ub_cmd_build_block(struct ub_dev
*sc
, struct ub_lun
*lun
,
399 struct ub_scsi_cmd
*cmd
, struct request
*rq
);
400 static int ub_cmd_build_packet(struct ub_dev
*sc
, struct ub_lun
*lun
,
401 struct ub_scsi_cmd
*cmd
, struct request
*rq
);
402 static void ub_rw_cmd_done(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
);
403 static void ub_end_rq(struct request
*rq
, int uptodate
);
404 static int ub_submit_scsi(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
);
405 static void ub_urb_complete(struct urb
*urb
, struct pt_regs
*pt
);
406 static void ub_scsi_action(unsigned long _dev
);
407 static void ub_scsi_dispatch(struct ub_dev
*sc
);
408 static void ub_scsi_urb_compl(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
);
409 static void ub_data_start(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
);
410 static void ub_state_done(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
, int rc
);
411 static int __ub_state_stat(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
);
412 static void ub_state_stat(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
);
413 static void ub_state_stat_counted(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
);
414 static void ub_state_sense(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
);
415 static int ub_submit_clear_stall(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
,
417 static void ub_top_sense_done(struct ub_dev
*sc
, struct ub_scsi_cmd
*scmd
);
418 static int ub_sync_tur(struct ub_dev
*sc
, struct ub_lun
*lun
);
419 static int ub_sync_read_cap(struct ub_dev
*sc
, struct ub_lun
*lun
,
420 struct ub_capacity
*ret
);
421 static int ub_probe_lun(struct ub_dev
*sc
, int lnum
);
425 static struct usb_device_id ub_usb_ids
[] = {
426 // { USB_DEVICE_VER(0x0781, 0x0002, 0x0009, 0x0009) }, /* SDDR-31 */
427 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE
, US_SC_SCSI
, US_PR_BULK
) },
431 MODULE_DEVICE_TABLE(usb
, ub_usb_ids
);
434 * Find me a way to identify "next free minor" for add_disk(),
435 * and the array disappears the next day. However, the number of
436 * hosts has something to do with the naming and /proc/partitions.
437 * This has to be thought out in detail before changing.
438 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
440 #define UB_MAX_HOSTS 26
441 static char ub_hostv
[UB_MAX_HOSTS
];
443 static DEFINE_SPINLOCK(ub_lock
); /* Locks globals and ->openc */
446 * The SCSI command tracing procedures.
449 static void ub_cmdtr_new(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
452 struct ub_scsi_cmd_trace
*t
;
454 if ((n
= sc
->tr
.cur
+ 1) == SCMD_TRACE_SZ
) n
= 0;
457 memset(t
, 0, sizeof(struct ub_scsi_cmd_trace
));
461 t
->req_size
= cmd
->len
;
462 t
->st_hst
[0] = cmd
->state
;
465 cmd
->trace_index
= n
;
468 static void ub_cmdtr_state(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
471 struct ub_scsi_cmd_trace
*t
;
473 t
= &sc
->tr
.vec
[cmd
->trace_index
];
474 if (t
->tag
== cmd
->tag
) {
475 if ((n
= t
->hcur
+ 1) == SCMD_ST_HIST_SZ
) n
= 0;
476 t
->st_hst
[n
] = cmd
->state
;
481 static void ub_cmdtr_act_len(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
483 struct ub_scsi_cmd_trace
*t
;
485 t
= &sc
->tr
.vec
[cmd
->trace_index
];
486 if (t
->tag
== cmd
->tag
)
487 t
->act_size
= cmd
->act_len
;
490 static void ub_cmdtr_sense(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
,
491 unsigned char *sense
)
493 struct ub_scsi_cmd_trace
*t
;
495 t
= &sc
->tr
.vec
[cmd
->trace_index
];
496 if (t
->tag
== cmd
->tag
) {
497 t
->key
= sense
[2] & 0x0F;
503 static ssize_t
ub_diag_show(struct device
*dev
, struct device_attribute
*attr
,
506 struct usb_interface
*intf
;
514 struct ub_scsi_cmd_trace
*t
;
516 intf
= to_usb_interface(dev
);
517 sc
= usb_get_intfdata(intf
);
522 spin_lock_irqsave(&sc
->lock
, flags
);
524 cnt
+= sprintf(page
+ cnt
,
526 sc
->cmd_queue
.qlen
, sc
->cmd_queue
.qmax
);
527 cnt
+= sprintf(page
+ cnt
,
528 "sg %d %d %d %d %d .. %d\n",
536 list_for_each (p
, &sc
->luns
) {
537 lun
= list_entry(p
, struct ub_lun
, link
);
538 cnt
+= sprintf(page
+ cnt
,
539 "lun %u changed %d removable %d readonly %d\n",
540 lun
->num
, lun
->changed
, lun
->removable
, lun
->readonly
);
543 if ((nc
= sc
->tr
.cur
+ 1) == SCMD_TRACE_SZ
) nc
= 0;
544 for (j
= 0; j
< SCMD_TRACE_SZ
; j
++) {
547 cnt
+= sprintf(page
+ cnt
, "%08x %02x", t
->tag
, t
->op
);
548 if (t
->op
== REQUEST_SENSE
) {
549 cnt
+= sprintf(page
+ cnt
, " [sense %x %02x %02x]",
550 t
->key
, t
->asc
, t
->ascq
);
552 cnt
+= sprintf(page
+ cnt
, " %c", UB_DIR_CHAR(t
->dir
));
553 cnt
+= sprintf(page
+ cnt
, " [%5d %5d]",
554 t
->req_size
, t
->act_size
);
556 if ((nh
= t
->hcur
+ 1) == SCMD_ST_HIST_SZ
) nh
= 0;
557 for (i
= 0; i
< SCMD_ST_HIST_SZ
; i
++) {
558 cnt
+= sprintf(page
+ cnt
, " %s",
559 ub_scsi_cmd_stname
[(int)t
->st_hst
[nh
]]);
560 if (++nh
== SCMD_ST_HIST_SZ
) nh
= 0;
562 cnt
+= sprintf(page
+ cnt
, "\n");
564 if (++nc
== SCMD_TRACE_SZ
) nc
= 0;
567 spin_unlock_irqrestore(&sc
->lock
, flags
);
571 static DEVICE_ATTR(diag
, S_IRUGO
, ub_diag_show
, NULL
); /* N.B. World readable */
576 * This also stores the host for indexing by minor, which is somewhat dirty.
578 static int ub_id_get(void)
583 spin_lock_irqsave(&ub_lock
, flags
);
584 for (i
= 0; i
< UB_MAX_HOSTS
; i
++) {
585 if (ub_hostv
[i
] == 0) {
587 spin_unlock_irqrestore(&ub_lock
, flags
);
591 spin_unlock_irqrestore(&ub_lock
, flags
);
595 static void ub_id_put(int id
)
599 if (id
< 0 || id
>= UB_MAX_HOSTS
) {
600 printk(KERN_ERR DRV_NAME
": bad host ID %d\n", id
);
604 spin_lock_irqsave(&ub_lock
, flags
);
605 if (ub_hostv
[id
] == 0) {
606 spin_unlock_irqrestore(&ub_lock
, flags
);
607 printk(KERN_ERR DRV_NAME
": freeing free host ID %d\n", id
);
611 spin_unlock_irqrestore(&ub_lock
, flags
);
615 * Downcount for deallocation. This rides on two assumptions:
616 * - once something is poisoned, its refcount cannot grow
617 * - opens cannot happen at this time (del_gendisk was done)
618 * If the above is true, we can drop the lock, which we need for
619 * blk_cleanup_queue(): the silly thing may attempt to sleep.
620 * [Actually, it never needs to sleep for us, but it calls might_sleep()]
622 static void ub_put(struct ub_dev
*sc
)
626 spin_lock_irqsave(&ub_lock
, flags
);
628 if (sc
->openc
== 0 && atomic_read(&sc
->poison
)) {
629 spin_unlock_irqrestore(&ub_lock
, flags
);
632 spin_unlock_irqrestore(&ub_lock
, flags
);
637 * Final cleanup and deallocation.
639 static void ub_cleanup(struct ub_dev
*sc
)
645 while (!list_empty(&sc
->luns
)) {
647 lun
= list_entry(p
, struct ub_lun
, link
);
650 /* I don't think queue can be NULL. But... Stolen from sx8.c */
651 if ((q
= lun
->disk
->queue
) != NULL
)
652 blk_cleanup_queue(q
);
654 * If we zero disk->private_data BEFORE put_disk, we have
655 * to check for NULL all over the place in open, release,
656 * check_media and revalidate, because the block level
657 * semaphore is well inside the put_disk.
658 * But we cannot zero after the call, because *disk is gone.
659 * The sd.c is blatantly racy in this area.
661 /* disk->private_data = NULL; */
673 * The "command allocator".
675 static struct ub_scsi_cmd
*ub_get_cmd(struct ub_lun
*lun
)
677 struct ub_scsi_cmd
*ret
;
686 static void ub_put_cmd(struct ub_lun
*lun
, struct ub_scsi_cmd
*cmd
)
688 if (cmd
!= &lun
->cmdv
[0]) {
689 printk(KERN_WARNING
"%s: releasing a foreign cmd %p\n",
694 printk(KERN_WARNING
"%s: releasing a free cmd\n", lun
->name
);
703 static void ub_cmdq_add(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
705 struct ub_scsi_cmd_queue
*t
= &sc
->cmd_queue
;
707 if (t
->qlen
++ == 0) {
715 if (t
->qlen
> t
->qmax
)
719 static void ub_cmdq_insert(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
721 struct ub_scsi_cmd_queue
*t
= &sc
->cmd_queue
;
723 if (t
->qlen
++ == 0) {
731 if (t
->qlen
> t
->qmax
)
735 static struct ub_scsi_cmd
*ub_cmdq_pop(struct ub_dev
*sc
)
737 struct ub_scsi_cmd_queue
*t
= &sc
->cmd_queue
;
738 struct ub_scsi_cmd
*cmd
;
750 #define ub_cmdq_peek(sc) ((sc)->cmd_queue.head)
753 * The request function is our main entry point
756 static void ub_request_fn(request_queue_t
*q
)
758 struct ub_lun
*lun
= q
->queuedata
;
761 while ((rq
= elv_next_request(q
)) != NULL
) {
762 if (ub_request_fn_1(lun
, rq
) != 0) {
769 static int ub_request_fn_1(struct ub_lun
*lun
, struct request
*rq
)
771 struct ub_dev
*sc
= lun
->udev
;
772 struct ub_scsi_cmd
*cmd
;
775 if (atomic_read(&sc
->poison
) || lun
->changed
) {
776 blkdev_dequeue_request(rq
);
781 if ((cmd
= ub_get_cmd(lun
)) == NULL
)
783 memset(cmd
, 0, sizeof(struct ub_scsi_cmd
));
785 blkdev_dequeue_request(rq
);
786 if (blk_pc_request(rq
)) {
787 rc
= ub_cmd_build_packet(sc
, lun
, cmd
, rq
);
789 rc
= ub_cmd_build_block(sc
, lun
, cmd
, rq
);
792 ub_put_cmd(lun
, cmd
);
796 cmd
->state
= UB_CMDST_INIT
;
798 cmd
->done
= ub_rw_cmd_done
;
801 cmd
->tag
= sc
->tagcnt
++;
802 if (ub_submit_scsi(sc
, cmd
) != 0) {
803 ub_put_cmd(lun
, cmd
);
811 static int ub_cmd_build_block(struct ub_dev
*sc
, struct ub_lun
*lun
,
812 struct ub_scsi_cmd
*cmd
, struct request
*rq
)
816 unsigned int block
, nblks
;
818 if (rq_data_dir(rq
) == WRITE
)
819 ub_dir
= UB_DIR_WRITE
;
821 ub_dir
= UB_DIR_READ
;
825 * get scatterlist from block layer
827 n_elem
= blk_rq_map_sg(lun
->disk
->queue
, rq
, &cmd
->sgv
[0]);
829 printk(KERN_INFO
"%s: failed request map (%d)\n",
830 sc
->name
, n_elem
); /* P3 */
831 return -1; /* request with no s/g entries? */
833 if (n_elem
> UB_MAX_REQ_SG
) { /* Paranoia */
834 printk(KERN_WARNING
"%s: request with %d segments\n",
839 sc
->sg_stat
[n_elem
< 5 ? n_elem
: 5]++;
844 * The call to blk_queue_hardsect_size() guarantees that request
845 * is aligned, but it is given in terms of 512 byte units, always.
847 block
= rq
->sector
>> lun
->capacity
.bshift
;
848 nblks
= rq
->nr_sectors
>> lun
->capacity
.bshift
;
850 cmd
->cdb
[0] = (ub_dir
== UB_DIR_READ
)? READ_10
: WRITE_10
;
851 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
852 cmd
->cdb
[2] = block
>> 24;
853 cmd
->cdb
[3] = block
>> 16;
854 cmd
->cdb
[4] = block
>> 8;
856 cmd
->cdb
[7] = nblks
>> 8;
860 cmd
->len
= rq
->nr_sectors
* 512;
865 static int ub_cmd_build_packet(struct ub_dev
*sc
, struct ub_lun
*lun
,
866 struct ub_scsi_cmd
*cmd
, struct request
*rq
)
870 if (rq
->data_len
== 0) {
871 cmd
->dir
= UB_DIR_NONE
;
873 if (rq_data_dir(rq
) == WRITE
)
874 cmd
->dir
= UB_DIR_WRITE
;
876 cmd
->dir
= UB_DIR_READ
;
881 * get scatterlist from block layer
883 n_elem
= blk_rq_map_sg(lun
->disk
->queue
, rq
, &cmd
->sgv
[0]);
885 printk(KERN_INFO
"%s: failed request map (%d)\n",
886 sc
->name
, n_elem
); /* P3 */
889 if (n_elem
> UB_MAX_REQ_SG
) { /* Paranoia */
890 printk(KERN_WARNING
"%s: request with %d segments\n",
895 sc
->sg_stat
[n_elem
< 5 ? n_elem
: 5]++;
897 memcpy(&cmd
->cdb
, rq
->cmd
, rq
->cmd_len
);
898 cmd
->cdb_len
= rq
->cmd_len
;
900 cmd
->len
= rq
->data_len
;
905 static void ub_rw_cmd_done(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
907 struct request
*rq
= cmd
->back
;
908 struct ub_lun
*lun
= cmd
->lun
;
911 if (cmd
->error
== 0) {
914 if (blk_pc_request(rq
)) {
915 if (cmd
->act_len
>= rq
->data_len
)
918 rq
->data_len
-= cmd
->act_len
;
923 if (blk_pc_request(rq
)) {
924 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
925 memcpy(rq
->sense
, sc
->top_sense
, UB_SENSE_SIZE
);
926 rq
->sense_len
= UB_SENSE_SIZE
;
927 if (sc
->top_sense
[0] != 0)
928 rq
->errors
= SAM_STAT_CHECK_CONDITION
;
930 rq
->errors
= DID_ERROR
<< 16;
934 ub_put_cmd(lun
, cmd
);
935 ub_end_rq(rq
, uptodate
);
936 blk_start_queue(lun
->disk
->queue
);
939 static void ub_end_rq(struct request
*rq
, int uptodate
)
943 rc
= end_that_request_first(rq
, uptodate
, rq
->hard_nr_sectors
);
945 end_that_request_last(rq
);
949 * Submit a regular SCSI operation (not an auto-sense).
951 * The Iron Law of Good Submit Routine is:
952 * Zero return - callback is done, Nonzero return - callback is not done.
955 * Host is assumed locked.
957 * XXX We only support Bulk for the moment.
959 static int ub_submit_scsi(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
962 if (cmd
->state
!= UB_CMDST_INIT
||
963 (cmd
->dir
!= UB_DIR_NONE
&& cmd
->len
== 0)) {
967 ub_cmdq_add(sc
, cmd
);
969 * We can call ub_scsi_dispatch(sc) right away here, but it's a little
970 * safer to jump to a tasklet, in case upper layers do something silly.
972 tasklet_schedule(&sc
->tasklet
);
977 * Submit the first URB for the queued command.
978 * This function does not deal with queueing in any way.
980 static int ub_scsi_cmd_start(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
982 struct bulk_cb_wrap
*bcb
;
988 * ``If the allocation length is eighteen or greater, and a device
989 * server returns less than eithteen bytes of data, the application
990 * client should assume that the bytes not transferred would have been
991 * zeroes had the device server returned those bytes.''
993 * We zero sense for all commands so that when a packet request
994 * fails it does not return a stale sense.
996 memset(&sc
->top_sense
, 0, UB_SENSE_SIZE
);
998 /* set up the command wrapper */
999 bcb
->Signature
= cpu_to_le32(US_BULK_CB_SIGN
);
1000 bcb
->Tag
= cmd
->tag
; /* Endianness is not important */
1001 bcb
->DataTransferLength
= cpu_to_le32(cmd
->len
);
1002 bcb
->Flags
= (cmd
->dir
== UB_DIR_READ
) ? 0x80 : 0;
1003 bcb
->Lun
= (cmd
->lun
!= NULL
) ? cmd
->lun
->num
: 0;
1004 bcb
->Length
= cmd
->cdb_len
;
1006 /* copy the command payload */
1007 memcpy(bcb
->CDB
, cmd
->cdb
, UB_MAX_CDB_SIZE
);
1009 UB_INIT_COMPLETION(sc
->work_done
);
1011 sc
->last_pipe
= sc
->send_bulk_pipe
;
1012 usb_fill_bulk_urb(&sc
->work_urb
, sc
->dev
, sc
->send_bulk_pipe
,
1013 bcb
, US_BULK_CB_WRAP_LEN
, ub_urb_complete
, sc
);
1015 /* Fill what we shouldn't be filling, because usb-storage did so. */
1016 sc
->work_urb
.actual_length
= 0;
1017 sc
->work_urb
.error_count
= 0;
1018 sc
->work_urb
.status
= 0;
1020 if ((rc
= usb_submit_urb(&sc
->work_urb
, GFP_ATOMIC
)) != 0) {
1021 /* XXX Clear stalls */
1022 ub_complete(&sc
->work_done
);
1026 sc
->work_timer
.expires
= jiffies
+ UB_URB_TIMEOUT
;
1027 add_timer(&sc
->work_timer
);
1029 cmd
->state
= UB_CMDST_CMD
;
1030 ub_cmdtr_state(sc
, cmd
);
1037 static void ub_urb_timeout(unsigned long arg
)
1039 struct ub_dev
*sc
= (struct ub_dev
*) arg
;
1040 unsigned long flags
;
1042 spin_lock_irqsave(&sc
->lock
, flags
);
1043 usb_unlink_urb(&sc
->work_urb
);
1044 spin_unlock_irqrestore(&sc
->lock
, flags
);
1048 * Completion routine for the work URB.
1050 * This can be called directly from usb_submit_urb (while we have
1051 * the sc->lock taken) and from an interrupt (while we do NOT have
1052 * the sc->lock taken). Therefore, bounce this off to a tasklet.
1054 static void ub_urb_complete(struct urb
*urb
, struct pt_regs
*pt
)
1056 struct ub_dev
*sc
= urb
->context
;
1058 ub_complete(&sc
->work_done
);
1059 tasklet_schedule(&sc
->tasklet
);
1062 static void ub_scsi_action(unsigned long _dev
)
1064 struct ub_dev
*sc
= (struct ub_dev
*) _dev
;
1065 unsigned long flags
;
1067 spin_lock_irqsave(&sc
->lock
, flags
);
1068 del_timer(&sc
->work_timer
);
1069 ub_scsi_dispatch(sc
);
1070 spin_unlock_irqrestore(&sc
->lock
, flags
);
1073 static void ub_scsi_dispatch(struct ub_dev
*sc
)
1075 struct ub_scsi_cmd
*cmd
;
1078 while ((cmd
= ub_cmdq_peek(sc
)) != NULL
) {
1079 if (cmd
->state
== UB_CMDST_DONE
) {
1081 (*cmd
->done
)(sc
, cmd
);
1082 } else if (cmd
->state
== UB_CMDST_INIT
) {
1083 ub_cmdtr_new(sc
, cmd
);
1084 if ((rc
= ub_scsi_cmd_start(sc
, cmd
)) == 0)
1087 cmd
->state
= UB_CMDST_DONE
;
1088 ub_cmdtr_state(sc
, cmd
);
1090 if (!ub_is_completed(&sc
->work_done
))
1092 ub_scsi_urb_compl(sc
, cmd
);
1097 static void ub_scsi_urb_compl(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
1099 struct urb
*urb
= &sc
->work_urb
;
1100 struct bulk_cs_wrap
*bcs
;
1103 if (atomic_read(&sc
->poison
)) {
1104 /* A little too simplistic, I feel... */
1108 if (cmd
->state
== UB_CMDST_CLEAR
) {
1109 if (urb
->status
== -EPIPE
) {
1111 * STALL while clearning STALL.
1112 * The control pipe clears itself - nothing to do.
1113 * XXX Might try to reset the device here and retry.
1115 printk(KERN_NOTICE
"%s: stall on control pipe\n",
1121 * We ignore the result for the halt clear.
1124 /* reset the endpoint toggle */
1125 usb_settoggle(sc
->dev
, usb_pipeendpoint(sc
->last_pipe
),
1126 usb_pipeout(sc
->last_pipe
), 0);
1128 ub_state_sense(sc
, cmd
);
1130 } else if (cmd
->state
== UB_CMDST_CLR2STS
) {
1131 if (urb
->status
== -EPIPE
) {
1133 * STALL while clearning STALL.
1134 * The control pipe clears itself - nothing to do.
1135 * XXX Might try to reset the device here and retry.
1137 printk(KERN_NOTICE
"%s: stall on control pipe\n",
1143 * We ignore the result for the halt clear.
1146 /* reset the endpoint toggle */
1147 usb_settoggle(sc
->dev
, usb_pipeendpoint(sc
->last_pipe
),
1148 usb_pipeout(sc
->last_pipe
), 0);
1150 ub_state_stat(sc
, cmd
);
1152 } else if (cmd
->state
== UB_CMDST_CLRRS
) {
1153 if (urb
->status
== -EPIPE
) {
1155 * STALL while clearning STALL.
1156 * The control pipe clears itself - nothing to do.
1157 * XXX Might try to reset the device here and retry.
1159 printk(KERN_NOTICE
"%s: stall on control pipe\n",
1165 * We ignore the result for the halt clear.
1168 /* reset the endpoint toggle */
1169 usb_settoggle(sc
->dev
, usb_pipeendpoint(sc
->last_pipe
),
1170 usb_pipeout(sc
->last_pipe
), 0);
1172 ub_state_stat_counted(sc
, cmd
);
1174 } else if (cmd
->state
== UB_CMDST_CMD
) {
1175 if (urb
->status
== -EPIPE
) {
1176 rc
= ub_submit_clear_stall(sc
, cmd
, sc
->last_pipe
);
1178 printk(KERN_NOTICE
"%s: "
1179 "unable to submit clear (%d)\n",
1182 * This is typically ENOMEM or some other such shit.
1183 * Retrying is pointless. Just do Bad End on it...
1187 cmd
->state
= UB_CMDST_CLEAR
;
1188 ub_cmdtr_state(sc
, cmd
);
1191 if (urb
->status
!= 0) {
1194 if (urb
->actual_length
!= US_BULK_CB_WRAP_LEN
) {
1195 /* XXX Must do reset here to unconfuse the device */
1199 if (cmd
->dir
== UB_DIR_NONE
|| cmd
->nsg
< 1) {
1200 ub_state_stat(sc
, cmd
);
1204 // udelay(125); // usb-storage has this
1205 ub_data_start(sc
, cmd
);
1207 } else if (cmd
->state
== UB_CMDST_DATA
) {
1208 if (urb
->status
== -EPIPE
) {
1209 rc
= ub_submit_clear_stall(sc
, cmd
, sc
->last_pipe
);
1211 printk(KERN_NOTICE
"%s: "
1212 "unable to submit clear (%d)\n",
1215 * This is typically ENOMEM or some other such shit.
1216 * Retrying is pointless. Just do Bad End on it...
1220 cmd
->state
= UB_CMDST_CLR2STS
;
1221 ub_cmdtr_state(sc
, cmd
);
1224 if (urb
->status
== -EOVERFLOW
) {
1226 * A babble? Failure, but we must transfer CSW now.
1227 * XXX This is going to end in perpetual babble. Reset.
1229 cmd
->error
= -EOVERFLOW
; /* A cheap trick... */
1230 ub_state_stat(sc
, cmd
);
1233 if (urb
->status
!= 0)
1236 cmd
->act_len
+= urb
->actual_length
;
1237 ub_cmdtr_act_len(sc
, cmd
);
1239 if (++cmd
->current_sg
< cmd
->nsg
) {
1240 ub_data_start(sc
, cmd
);
1243 ub_state_stat(sc
, cmd
);
1245 } else if (cmd
->state
== UB_CMDST_STAT
) {
1246 if (urb
->status
== -EPIPE
) {
1247 rc
= ub_submit_clear_stall(sc
, cmd
, sc
->last_pipe
);
1249 printk(KERN_NOTICE
"%s: "
1250 "unable to submit clear (%d)\n",
1253 * This is typically ENOMEM or some other such shit.
1254 * Retrying is pointless. Just do Bad End on it...
1260 * Having a stall when getting CSW is an error, so
1261 * make sure uppper levels are not oblivious to it.
1263 cmd
->error
= -EIO
; /* A cheap trick... */
1265 cmd
->state
= UB_CMDST_CLRRS
;
1266 ub_cmdtr_state(sc
, cmd
);
1269 if (urb
->status
== -EOVERFLOW
) {
1271 * XXX We are screwed here. Retrying is pointless,
1272 * because the pipelined data will not get in until
1273 * we read with a big enough buffer. We must reset XXX.
1277 if (urb
->status
!= 0)
1280 if (urb
->actual_length
== 0) {
1281 ub_state_stat_counted(sc
, cmd
);
1286 * Check the returned Bulk protocol status.
1287 * The status block has to be validated first.
1290 bcs
= &sc
->work_bcs
;
1292 if (sc
->signature
== cpu_to_le32(0)) {
1294 * This is the first reply, so do not perform the check.
1295 * Instead, remember the signature the device uses
1296 * for future checks. But do not allow a nul.
1298 sc
->signature
= bcs
->Signature
;
1299 if (sc
->signature
== cpu_to_le32(0)) {
1300 ub_state_stat_counted(sc
, cmd
);
1304 if (bcs
->Signature
!= sc
->signature
) {
1305 ub_state_stat_counted(sc
, cmd
);
1310 if (bcs
->Tag
!= cmd
->tag
) {
1312 * This usually happens when we disagree with the
1313 * device's microcode about something. For instance,
1314 * a few of them throw this after timeouts. They buffer
1315 * commands and reply at commands we timed out before.
1316 * Without flushing these replies we loop forever.
1318 ub_state_stat_counted(sc
, cmd
);
1322 rc
= le32_to_cpu(bcs
->Residue
);
1323 if (rc
!= cmd
->len
- cmd
->act_len
) {
1325 * It is all right to transfer less, the caller has
1326 * to check. But it's not all right if the device
1327 * counts disagree with our counts.
1329 /* P3 */ printk("%s: resid %d len %d act %d\n",
1330 sc
->name
, rc
, cmd
->len
, cmd
->act_len
);
1334 switch (bcs
->Status
) {
1335 case US_BULK_STAT_OK
:
1337 case US_BULK_STAT_FAIL
:
1338 ub_state_sense(sc
, cmd
);
1340 case US_BULK_STAT_PHASE
:
1341 /* XXX We must reset the transport here */
1342 /* P3 */ printk("%s: status PHASE\n", sc
->name
);
1345 printk(KERN_INFO
"%s: unknown CSW status 0x%x\n",
1346 sc
->name
, bcs
->Status
);
1350 /* Not zeroing error to preserve a babble indicator */
1351 if (cmd
->error
!= 0) {
1352 ub_state_sense(sc
, cmd
);
1355 cmd
->state
= UB_CMDST_DONE
;
1356 ub_cmdtr_state(sc
, cmd
);
1358 (*cmd
->done
)(sc
, cmd
);
1360 } else if (cmd
->state
== UB_CMDST_SENSE
) {
1361 ub_state_done(sc
, cmd
, -EIO
);
1364 printk(KERN_WARNING
"%s: "
1365 "wrong command state %d\n",
1366 sc
->name
, cmd
->state
);
1371 Bad_End
: /* Little Excel is dead */
1372 ub_state_done(sc
, cmd
, -EIO
);
1376 * Factorization helper for the command state machine:
1377 * Initiate a data segment transfer.
1379 static void ub_data_start(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
1381 struct scatterlist
*sg
= &cmd
->sgv
[cmd
->current_sg
];
1385 UB_INIT_COMPLETION(sc
->work_done
);
1387 if (cmd
->dir
== UB_DIR_READ
)
1388 pipe
= sc
->recv_bulk_pipe
;
1390 pipe
= sc
->send_bulk_pipe
;
1391 sc
->last_pipe
= pipe
;
1392 usb_fill_bulk_urb(&sc
->work_urb
, sc
->dev
, pipe
,
1393 page_address(sg
->page
) + sg
->offset
, sg
->length
,
1394 ub_urb_complete
, sc
);
1395 sc
->work_urb
.actual_length
= 0;
1396 sc
->work_urb
.error_count
= 0;
1397 sc
->work_urb
.status
= 0;
1399 if ((rc
= usb_submit_urb(&sc
->work_urb
, GFP_ATOMIC
)) != 0) {
1400 /* XXX Clear stalls */
1401 ub_complete(&sc
->work_done
);
1402 ub_state_done(sc
, cmd
, rc
);
1406 sc
->work_timer
.expires
= jiffies
+ UB_DATA_TIMEOUT
;
1407 add_timer(&sc
->work_timer
);
1409 cmd
->state
= UB_CMDST_DATA
;
1410 ub_cmdtr_state(sc
, cmd
);
1414 * Factorization helper for the command state machine:
1415 * Finish the command.
1417 static void ub_state_done(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
, int rc
)
1421 cmd
->state
= UB_CMDST_DONE
;
1422 ub_cmdtr_state(sc
, cmd
);
1424 (*cmd
->done
)(sc
, cmd
);
1428 * Factorization helper for the command state machine:
1429 * Submit a CSW read.
1431 static int __ub_state_stat(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
1435 UB_INIT_COMPLETION(sc
->work_done
);
1437 sc
->last_pipe
= sc
->recv_bulk_pipe
;
1438 usb_fill_bulk_urb(&sc
->work_urb
, sc
->dev
, sc
->recv_bulk_pipe
,
1439 &sc
->work_bcs
, US_BULK_CS_WRAP_LEN
, ub_urb_complete
, sc
);
1440 sc
->work_urb
.actual_length
= 0;
1441 sc
->work_urb
.error_count
= 0;
1442 sc
->work_urb
.status
= 0;
1444 if ((rc
= usb_submit_urb(&sc
->work_urb
, GFP_ATOMIC
)) != 0) {
1445 /* XXX Clear stalls */
1446 ub_complete(&sc
->work_done
);
1447 ub_state_done(sc
, cmd
, rc
);
1451 sc
->work_timer
.expires
= jiffies
+ UB_STAT_TIMEOUT
;
1452 add_timer(&sc
->work_timer
);
1457 * Factorization helper for the command state machine:
1458 * Submit a CSW read and go to STAT state.
1460 static void ub_state_stat(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
1463 if (__ub_state_stat(sc
, cmd
) != 0)
1466 cmd
->stat_count
= 0;
1467 cmd
->state
= UB_CMDST_STAT
;
1468 ub_cmdtr_state(sc
, cmd
);
1472 * Factorization helper for the command state machine:
1473 * Submit a CSW read and go to STAT state with counter (along [C] path).
1475 static void ub_state_stat_counted(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
1478 if (++cmd
->stat_count
>= 4) {
1479 ub_state_sense(sc
, cmd
);
1483 if (__ub_state_stat(sc
, cmd
) != 0)
1486 cmd
->state
= UB_CMDST_STAT
;
1487 ub_cmdtr_state(sc
, cmd
);
1491 * Factorization helper for the command state machine:
1492 * Submit a REQUEST SENSE and go to SENSE state.
1494 static void ub_state_sense(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
1496 struct ub_scsi_cmd
*scmd
;
1497 struct scatterlist
*sg
;
1500 if (cmd
->cdb
[0] == REQUEST_SENSE
) {
1505 scmd
= &sc
->top_rqs_cmd
;
1506 memset(scmd
, 0, sizeof(struct ub_scsi_cmd
));
1507 scmd
->cdb
[0] = REQUEST_SENSE
;
1508 scmd
->cdb
[4] = UB_SENSE_SIZE
;
1510 scmd
->dir
= UB_DIR_READ
;
1511 scmd
->state
= UB_CMDST_INIT
;
1514 sg
->page
= virt_to_page(sc
->top_sense
);
1515 sg
->offset
= (unsigned long)sc
->top_sense
& (PAGE_SIZE
-1);
1516 sg
->length
= UB_SENSE_SIZE
;
1517 scmd
->len
= UB_SENSE_SIZE
;
1518 scmd
->lun
= cmd
->lun
;
1519 scmd
->done
= ub_top_sense_done
;
1522 scmd
->tag
= sc
->tagcnt
++;
1524 cmd
->state
= UB_CMDST_SENSE
;
1525 ub_cmdtr_state(sc
, cmd
);
1527 ub_cmdq_insert(sc
, scmd
);
1531 ub_state_done(sc
, cmd
, rc
);
1535 * A helper for the command's state machine:
1536 * Submit a stall clear.
1538 static int ub_submit_clear_stall(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
,
1542 struct usb_ctrlrequest
*cr
;
1545 endp
= usb_pipeendpoint(stalled_pipe
);
1546 if (usb_pipein (stalled_pipe
))
1550 cr
->bRequestType
= USB_RECIP_ENDPOINT
;
1551 cr
->bRequest
= USB_REQ_CLEAR_FEATURE
;
1552 cr
->wValue
= cpu_to_le16(USB_ENDPOINT_HALT
);
1553 cr
->wIndex
= cpu_to_le16(endp
);
1554 cr
->wLength
= cpu_to_le16(0);
1556 UB_INIT_COMPLETION(sc
->work_done
);
1558 usb_fill_control_urb(&sc
->work_urb
, sc
->dev
, sc
->send_ctrl_pipe
,
1559 (unsigned char*) cr
, NULL
, 0, ub_urb_complete
, sc
);
1560 sc
->work_urb
.actual_length
= 0;
1561 sc
->work_urb
.error_count
= 0;
1562 sc
->work_urb
.status
= 0;
1564 if ((rc
= usb_submit_urb(&sc
->work_urb
, GFP_ATOMIC
)) != 0) {
1565 ub_complete(&sc
->work_done
);
1569 sc
->work_timer
.expires
= jiffies
+ UB_CTRL_TIMEOUT
;
1570 add_timer(&sc
->work_timer
);
1576 static void ub_top_sense_done(struct ub_dev
*sc
, struct ub_scsi_cmd
*scmd
)
1578 unsigned char *sense
= sc
->top_sense
;
1579 struct ub_scsi_cmd
*cmd
;
1582 * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1584 ub_cmdtr_sense(sc
, scmd
, sense
);
1587 * Find the command which triggered the unit attention or a check,
1588 * save the sense into it, and advance its state machine.
1590 if ((cmd
= ub_cmdq_peek(sc
)) == NULL
) {
1591 printk(KERN_WARNING
"%s: sense done while idle\n", sc
->name
);
1594 if (cmd
!= scmd
->back
) {
1595 printk(KERN_WARNING
"%s: "
1596 "sense done for wrong command 0x%x\n",
1597 sc
->name
, cmd
->tag
);
1600 if (cmd
->state
!= UB_CMDST_SENSE
) {
1601 printk(KERN_WARNING
"%s: "
1602 "sense done with bad cmd state %d\n",
1603 sc
->name
, cmd
->state
);
1607 cmd
->key
= sense
[2] & 0x0F;
1608 cmd
->asc
= sense
[12];
1609 cmd
->ascq
= sense
[13];
1611 ub_scsi_urb_compl(sc
, cmd
);
1615 * This is called from a process context.
1617 static void ub_revalidate(struct ub_dev
*sc
, struct ub_lun
*lun
)
1620 lun
->readonly
= 0; /* XXX Query this from the device */
1622 lun
->capacity
.nsec
= 0;
1623 lun
->capacity
.bsize
= 512;
1624 lun
->capacity
.bshift
= 0;
1626 if (ub_sync_tur(sc
, lun
) != 0)
1627 return; /* Not ready */
1630 if (ub_sync_read_cap(sc
, lun
, &lun
->capacity
) != 0) {
1632 * The retry here means something is wrong, either with the
1633 * device, with the transport, or with our code.
1634 * We keep this because sd.c has retries for capacity.
1636 if (ub_sync_read_cap(sc
, lun
, &lun
->capacity
) != 0) {
1637 lun
->capacity
.nsec
= 0;
1638 lun
->capacity
.bsize
= 512;
1639 lun
->capacity
.bshift
= 0;
1646 * This is mostly needed to keep refcounting, but also to support
1647 * media checks on removable media drives.
1649 static int ub_bd_open(struct inode
*inode
, struct file
*filp
)
1651 struct gendisk
*disk
= inode
->i_bdev
->bd_disk
;
1654 unsigned long flags
;
1657 if ((lun
= disk
->private_data
) == NULL
)
1661 spin_lock_irqsave(&ub_lock
, flags
);
1662 if (atomic_read(&sc
->poison
)) {
1663 spin_unlock_irqrestore(&ub_lock
, flags
);
1667 spin_unlock_irqrestore(&ub_lock
, flags
);
1670 * This is a workaround for a specific problem in our block layer.
1671 * In 2.6.9, register_disk duplicates the code from rescan_partitions.
1672 * However, if we do add_disk with a device which persistently reports
1673 * a changed media, add_disk calls register_disk, which does do_open,
1674 * which will call rescan_paritions for changed media. After that,
1675 * register_disk attempts to do it all again and causes double kobject
1676 * registration and a eventually an oops on module removal.
1678 * The bottom line is, Al Viro says that we should not allow
1679 * bdev->bd_invalidated to be set when doing add_disk no matter what.
1681 if (lun
->first_open
) {
1682 lun
->first_open
= 0;
1689 if (lun
->removable
|| lun
->readonly
)
1690 check_disk_change(inode
->i_bdev
);
1693 * The sd.c considers ->media_present and ->changed not equivalent,
1694 * under some pretty murky conditions (a failure of READ CAPACITY).
1695 * We may need it one day.
1697 if (lun
->removable
&& lun
->changed
&& !(filp
->f_flags
& O_NDELAY
)) {
1702 if (lun
->readonly
&& (filp
->f_mode
& FMODE_WRITE
)) {
1716 static int ub_bd_release(struct inode
*inode
, struct file
*filp
)
1718 struct gendisk
*disk
= inode
->i_bdev
->bd_disk
;
1719 struct ub_lun
*lun
= disk
->private_data
;
1720 struct ub_dev
*sc
= lun
->udev
;
1727 * The ioctl interface.
1729 static int ub_bd_ioctl(struct inode
*inode
, struct file
*filp
,
1730 unsigned int cmd
, unsigned long arg
)
1732 struct gendisk
*disk
= inode
->i_bdev
->bd_disk
;
1733 void __user
*usermem
= (void __user
*) arg
;
1735 return scsi_cmd_ioctl(filp
, disk
, cmd
, usermem
);
1739 * This is called once a new disk was seen by the block layer or by ub_probe().
1740 * The main onjective here is to discover the features of the media such as
1741 * the capacity, read-only status, etc. USB storage generally does not
1742 * need to be spun up, but if we needed it, this would be the place.
1744 * This call can sleep.
1746 * The return code is not used.
1748 static int ub_bd_revalidate(struct gendisk
*disk
)
1750 struct ub_lun
*lun
= disk
->private_data
;
1752 ub_revalidate(lun
->udev
, lun
);
1754 /* XXX Support sector size switching like in sr.c */
1755 blk_queue_hardsect_size(disk
->queue
, lun
->capacity
.bsize
);
1756 set_capacity(disk
, lun
->capacity
.nsec
);
1757 // set_disk_ro(sdkp->disk, lun->readonly);
1763 * The check is called by the block layer to verify if the media
1764 * is still available. It is supposed to be harmless, lightweight and
1765 * non-intrusive in case the media was not changed.
1767 * This call can sleep.
1769 * The return code is bool!
1771 static int ub_bd_media_changed(struct gendisk
*disk
)
1773 struct ub_lun
*lun
= disk
->private_data
;
1775 if (!lun
->removable
)
1779 * We clean checks always after every command, so this is not
1780 * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1781 * the device is actually not ready with operator or software
1782 * intervention required. One dangerous item might be a drive which
1783 * spins itself down, and come the time to write dirty pages, this
1784 * will fail, then block layer discards the data. Since we never
1785 * spin drives up, such devices simply cannot be used with ub anyway.
1787 if (ub_sync_tur(lun
->udev
, lun
) != 0) {
1792 return lun
->changed
;
1795 static struct block_device_operations ub_bd_fops
= {
1796 .owner
= THIS_MODULE
,
1798 .release
= ub_bd_release
,
1799 .ioctl
= ub_bd_ioctl
,
1800 .media_changed
= ub_bd_media_changed
,
1801 .revalidate_disk
= ub_bd_revalidate
,
1805 * Common ->done routine for commands executed synchronously.
1807 static void ub_probe_done(struct ub_dev
*sc
, struct ub_scsi_cmd
*cmd
)
1809 struct completion
*cop
= cmd
->back
;
1814 * Test if the device has a check condition on it, synchronously.
1816 static int ub_sync_tur(struct ub_dev
*sc
, struct ub_lun
*lun
)
1818 struct ub_scsi_cmd
*cmd
;
1819 enum { ALLOC_SIZE
= sizeof(struct ub_scsi_cmd
) };
1820 unsigned long flags
;
1821 struct completion
compl;
1824 init_completion(&compl);
1827 if ((cmd
= kmalloc(ALLOC_SIZE
, GFP_KERNEL
)) == NULL
)
1829 memset(cmd
, 0, ALLOC_SIZE
);
1831 cmd
->cdb
[0] = TEST_UNIT_READY
;
1833 cmd
->dir
= UB_DIR_NONE
;
1834 cmd
->state
= UB_CMDST_INIT
;
1835 cmd
->lun
= lun
; /* This may be NULL, but that's ok */
1836 cmd
->done
= ub_probe_done
;
1839 spin_lock_irqsave(&sc
->lock
, flags
);
1840 cmd
->tag
= sc
->tagcnt
++;
1842 rc
= ub_submit_scsi(sc
, cmd
);
1843 spin_unlock_irqrestore(&sc
->lock
, flags
);
1846 printk("ub: testing ready: submit error (%d)\n", rc
); /* P3 */
1850 wait_for_completion(&compl);
1854 if (rc
== -EIO
&& cmd
->key
!= 0) /* Retries for benh's key */
1864 * Read the SCSI capacity synchronously (for probing).
1866 static int ub_sync_read_cap(struct ub_dev
*sc
, struct ub_lun
*lun
,
1867 struct ub_capacity
*ret
)
1869 struct ub_scsi_cmd
*cmd
;
1870 struct scatterlist
*sg
;
1872 enum { ALLOC_SIZE
= sizeof(struct ub_scsi_cmd
) + 8 };
1873 unsigned long flags
;
1874 unsigned int bsize
, shift
;
1876 struct completion
compl;
1879 init_completion(&compl);
1882 if ((cmd
= kmalloc(ALLOC_SIZE
, GFP_KERNEL
)) == NULL
)
1884 memset(cmd
, 0, ALLOC_SIZE
);
1885 p
= (char *)cmd
+ sizeof(struct ub_scsi_cmd
);
1889 cmd
->dir
= UB_DIR_READ
;
1890 cmd
->state
= UB_CMDST_INIT
;
1893 sg
->page
= virt_to_page(p
);
1894 sg
->offset
= (unsigned long)p
& (PAGE_SIZE
-1);
1898 cmd
->done
= ub_probe_done
;
1901 spin_lock_irqsave(&sc
->lock
, flags
);
1902 cmd
->tag
= sc
->tagcnt
++;
1904 rc
= ub_submit_scsi(sc
, cmd
);
1905 spin_unlock_irqrestore(&sc
->lock
, flags
);
1908 printk("ub: reading capacity: submit error (%d)\n", rc
); /* P3 */
1912 wait_for_completion(&compl);
1914 if (cmd
->error
!= 0) {
1915 printk("ub: reading capacity: error %d\n", cmd
->error
); /* P3 */
1919 if (cmd
->act_len
!= 8) {
1920 printk("ub: reading capacity: size %d\n", cmd
->act_len
); /* P3 */
1925 /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
1926 nsec
= be32_to_cpu(*(__be32
*)p
) + 1;
1927 bsize
= be32_to_cpu(*(__be32
*)(p
+ 4));
1929 case 512: shift
= 0; break;
1930 case 1024: shift
= 1; break;
1931 case 2048: shift
= 2; break;
1932 case 4096: shift
= 3; break;
1934 printk("ub: Bad sector size %u\n", bsize
); /* P3 */
1940 ret
->bshift
= shift
;
1941 ret
->nsec
= nsec
<< shift
;
1954 static void ub_probe_urb_complete(struct urb
*urb
, struct pt_regs
*pt
)
1956 struct completion
*cop
= urb
->context
;
1960 static void ub_probe_timeout(unsigned long arg
)
1962 struct completion
*cop
= (struct completion
*) arg
;
1967 * Get number of LUNs by the way of Bulk GetMaxLUN command.
1969 static int ub_sync_getmaxlun(struct ub_dev
*sc
)
1971 int ifnum
= sc
->intf
->cur_altsetting
->desc
.bInterfaceNumber
;
1973 enum { ALLOC_SIZE
= 1 };
1974 struct usb_ctrlrequest
*cr
;
1975 struct completion
compl;
1976 struct timer_list timer
;
1980 init_completion(&compl);
1983 if ((p
= kmalloc(ALLOC_SIZE
, GFP_KERNEL
)) == NULL
)
1988 cr
->bRequestType
= USB_DIR_IN
| USB_TYPE_CLASS
| USB_RECIP_INTERFACE
;
1989 cr
->bRequest
= US_BULK_GET_MAX_LUN
;
1990 cr
->wValue
= cpu_to_le16(0);
1991 cr
->wIndex
= cpu_to_le16(ifnum
);
1992 cr
->wLength
= cpu_to_le16(1);
1994 usb_fill_control_urb(&sc
->work_urb
, sc
->dev
, sc
->recv_ctrl_pipe
,
1995 (unsigned char*) cr
, p
, 1, ub_probe_urb_complete
, &compl);
1996 sc
->work_urb
.actual_length
= 0;
1997 sc
->work_urb
.error_count
= 0;
1998 sc
->work_urb
.status
= 0;
2000 if ((rc
= usb_submit_urb(&sc
->work_urb
, GFP_KERNEL
)) != 0) {
2002 printk("%s: Stall submitting GetMaxLUN, using 1 LUN\n",
2006 "%s: Unable to submit GetMaxLUN (%d)\n",
2013 timer
.function
= ub_probe_timeout
;
2014 timer
.data
= (unsigned long) &compl;
2015 timer
.expires
= jiffies
+ UB_CTRL_TIMEOUT
;
2018 wait_for_completion(&compl);
2020 del_timer_sync(&timer
);
2021 usb_kill_urb(&sc
->work_urb
);
2023 if ((rc
= sc
->work_urb
.status
) < 0) {
2025 printk("%s: Stall at GetMaxLUN, using 1 LUN\n",
2029 "%s: Error at GetMaxLUN (%d)\n",
2035 if (sc
->work_urb
.actual_length
!= 1) {
2036 printk("%s: GetMaxLUN returned %d bytes\n", sc
->name
,
2037 sc
->work_urb
.actual_length
); /* P3 */
2040 if ((nluns
= *p
) == 55) {
2043 /* GetMaxLUN returns the maximum LUN number */
2045 if (nluns
> UB_MAX_LUNS
)
2046 nluns
= UB_MAX_LUNS
;
2048 printk("%s: GetMaxLUN returned %d, using %d LUNs\n", sc
->name
,
2049 *p
, nluns
); /* P3 */
2063 * Clear initial stalls.
2065 static int ub_probe_clear_stall(struct ub_dev
*sc
, int stalled_pipe
)
2068 struct usb_ctrlrequest
*cr
;
2069 struct completion
compl;
2070 struct timer_list timer
;
2073 init_completion(&compl);
2075 endp
= usb_pipeendpoint(stalled_pipe
);
2076 if (usb_pipein (stalled_pipe
))
2080 cr
->bRequestType
= USB_RECIP_ENDPOINT
;
2081 cr
->bRequest
= USB_REQ_CLEAR_FEATURE
;
2082 cr
->wValue
= cpu_to_le16(USB_ENDPOINT_HALT
);
2083 cr
->wIndex
= cpu_to_le16(endp
);
2084 cr
->wLength
= cpu_to_le16(0);
2086 usb_fill_control_urb(&sc
->work_urb
, sc
->dev
, sc
->send_ctrl_pipe
,
2087 (unsigned char*) cr
, NULL
, 0, ub_probe_urb_complete
, &compl);
2088 sc
->work_urb
.actual_length
= 0;
2089 sc
->work_urb
.error_count
= 0;
2090 sc
->work_urb
.status
= 0;
2092 if ((rc
= usb_submit_urb(&sc
->work_urb
, GFP_KERNEL
)) != 0) {
2094 "%s: Unable to submit a probe clear (%d)\n", sc
->name
, rc
);
2099 timer
.function
= ub_probe_timeout
;
2100 timer
.data
= (unsigned long) &compl;
2101 timer
.expires
= jiffies
+ UB_CTRL_TIMEOUT
;
2104 wait_for_completion(&compl);
2106 del_timer_sync(&timer
);
2107 usb_kill_urb(&sc
->work_urb
);
2109 /* reset the endpoint toggle */
2110 usb_settoggle(sc
->dev
, endp
, usb_pipeout(sc
->last_pipe
), 0);
2116 * Get the pipe settings.
2118 static int ub_get_pipes(struct ub_dev
*sc
, struct usb_device
*dev
,
2119 struct usb_interface
*intf
)
2121 struct usb_host_interface
*altsetting
= intf
->cur_altsetting
;
2122 struct usb_endpoint_descriptor
*ep_in
= NULL
;
2123 struct usb_endpoint_descriptor
*ep_out
= NULL
;
2124 struct usb_endpoint_descriptor
*ep
;
2128 * Find the endpoints we need.
2129 * We are expecting a minimum of 2 endpoints - in and out (bulk).
2130 * We will ignore any others.
2132 for (i
= 0; i
< altsetting
->desc
.bNumEndpoints
; i
++) {
2133 ep
= &altsetting
->endpoint
[i
].desc
;
2135 /* Is it a BULK endpoint? */
2136 if ((ep
->bmAttributes
& USB_ENDPOINT_XFERTYPE_MASK
)
2137 == USB_ENDPOINT_XFER_BULK
) {
2138 /* BULK in or out? */
2139 if (ep
->bEndpointAddress
& USB_DIR_IN
)
2146 if (ep_in
== NULL
|| ep_out
== NULL
) {
2147 printk(KERN_NOTICE
"%s: failed endpoint check\n",
2152 /* Calculate and store the pipe values */
2153 sc
->send_ctrl_pipe
= usb_sndctrlpipe(dev
, 0);
2154 sc
->recv_ctrl_pipe
= usb_rcvctrlpipe(dev
, 0);
2155 sc
->send_bulk_pipe
= usb_sndbulkpipe(dev
,
2156 ep_out
->bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
);
2157 sc
->recv_bulk_pipe
= usb_rcvbulkpipe(dev
,
2158 ep_in
->bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
);
2164 * Probing is done in the process context, which allows us to cheat
2165 * and not to build a state machine for the discovery.
2167 static int ub_probe(struct usb_interface
*intf
,
2168 const struct usb_device_id
*dev_id
)
2176 if ((sc
= kmalloc(sizeof(struct ub_dev
), GFP_KERNEL
)) == NULL
)
2178 memset(sc
, 0, sizeof(struct ub_dev
));
2179 spin_lock_init(&sc
->lock
);
2180 INIT_LIST_HEAD(&sc
->luns
);
2181 usb_init_urb(&sc
->work_urb
);
2182 tasklet_init(&sc
->tasklet
, ub_scsi_action
, (unsigned long)sc
);
2183 atomic_set(&sc
->poison
, 0);
2185 init_timer(&sc
->work_timer
);
2186 sc
->work_timer
.data
= (unsigned long) sc
;
2187 sc
->work_timer
.function
= ub_urb_timeout
;
2189 ub_init_completion(&sc
->work_done
);
2190 sc
->work_done
.done
= 1; /* A little yuk, but oh well... */
2192 sc
->dev
= interface_to_usbdev(intf
);
2194 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2195 usb_set_intfdata(intf
, sc
);
2196 usb_get_dev(sc
->dev
);
2197 // usb_get_intf(sc->intf); /* Do we need this? */
2199 snprintf(sc
->name
, 12, DRV_NAME
"(%d.%d)",
2200 sc
->dev
->bus
->busnum
, sc
->dev
->devnum
);
2202 /* XXX Verify that we can handle the device (from descriptors) */
2204 ub_get_pipes(sc
, sc
->dev
, intf
);
2206 if (device_create_file(&sc
->intf
->dev
, &dev_attr_diag
) != 0)
2210 * At this point, all USB initialization is done, do upper layer.
2211 * We really hate halfway initialized structures, so from the
2212 * invariants perspective, this ub_dev is fully constructed at
2217 * This is needed to clear toggles. It is a problem only if we do
2218 * `rmmod ub && modprobe ub` without disconnects, but we like that.
2220 #if 0 /* iPod Mini fails if we do this (big white iPod works) */
2221 ub_probe_clear_stall(sc
, sc
->recv_bulk_pipe
);
2222 ub_probe_clear_stall(sc
, sc
->send_bulk_pipe
);
2226 * The way this is used by the startup code is a little specific.
2227 * A SCSI check causes a USB stall. Our common case code sees it
2228 * and clears the check, after which the device is ready for use.
2229 * But if a check was not present, any command other than
2230 * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2232 * If we neglect to clear the SCSI check, the first real command fails
2233 * (which is the capacity readout). We clear that and retry, but why
2234 * causing spurious retries for no reason.
2236 * Revalidation may start with its own TEST_UNIT_READY, but that one
2237 * has to succeed, so we clear checks with an additional one here.
2238 * In any case it's not our business how revaliadation is implemented.
2240 for (i
= 0; i
< 3; i
++) { /* Retries for benh's key */
2241 if ((rc
= ub_sync_tur(sc
, NULL
)) <= 0) break;
2242 if (rc
!= 0x6) break;
2247 for (i
= 0; i
< 3; i
++) {
2248 if ((rc
= ub_sync_getmaxlun(sc
)) < 0) {
2250 * This segment is taken from usb-storage. They say
2251 * that ZIP-100 needs this, but my own ZIP-100 works
2252 * fine without this.
2253 * Still, it does not seem to hurt anything.
2256 ub_probe_clear_stall(sc
, sc
->recv_bulk_pipe
);
2257 ub_probe_clear_stall(sc
, sc
->send_bulk_pipe
);
2268 for (i
= 0; i
< nluns
; i
++) {
2269 ub_probe_lun(sc
, i
);
2273 /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */
2275 usb_set_intfdata(intf
, NULL
);
2276 // usb_put_intf(sc->intf);
2277 usb_put_dev(sc
->dev
);
2283 static int ub_probe_lun(struct ub_dev
*sc
, int lnum
)
2287 struct gendisk
*disk
;
2291 if ((lun
= kmalloc(sizeof(struct ub_lun
), GFP_KERNEL
)) == NULL
)
2293 memset(lun
, 0, sizeof(struct ub_lun
));
2297 if ((lun
->id
= ub_id_get()) == -1)
2301 list_add(&lun
->link
, &sc
->luns
);
2303 snprintf(lun
->name
, 16, DRV_NAME
"%c(%d.%d.%d)",
2304 lun
->id
+ 'a', sc
->dev
->bus
->busnum
, sc
->dev
->devnum
, lun
->num
);
2306 lun
->removable
= 1; /* XXX Query this from the device */
2307 lun
->changed
= 1; /* ub_revalidate clears only */
2308 lun
->first_open
= 1;
2309 ub_revalidate(sc
, lun
);
2312 if ((disk
= alloc_disk(UB_MINORS_PER_MAJOR
)) == NULL
)
2316 sprintf(disk
->disk_name
, DRV_NAME
"%c", lun
->id
+ 'a');
2317 sprintf(disk
->devfs_name
, DEVFS_NAME
"/%c", lun
->id
+ 'a');
2318 disk
->major
= UB_MAJOR
;
2319 disk
->first_minor
= lun
->id
* UB_MINORS_PER_MAJOR
;
2320 disk
->fops
= &ub_bd_fops
;
2321 disk
->private_data
= lun
;
2322 disk
->driverfs_dev
= &sc
->intf
->dev
;
2325 if ((q
= blk_init_queue(ub_request_fn
, &sc
->lock
)) == NULL
)
2330 blk_queue_bounce_limit(q
, BLK_BOUNCE_HIGH
);
2331 blk_queue_max_hw_segments(q
, UB_MAX_REQ_SG
);
2332 blk_queue_max_phys_segments(q
, UB_MAX_REQ_SG
);
2333 blk_queue_segment_boundary(q
, 0xffffffff); /* Dubious. */
2334 blk_queue_max_sectors(q
, UB_MAX_SECTORS
);
2335 blk_queue_hardsect_size(q
, lun
->capacity
.bsize
);
2339 set_capacity(disk
, lun
->capacity
.nsec
);
2341 disk
->flags
|= GENHD_FL_REMOVABLE
;
2350 list_del(&lun
->link
);
2358 static void ub_disconnect(struct usb_interface
*intf
)
2360 struct ub_dev
*sc
= usb_get_intfdata(intf
);
2361 struct list_head
*p
;
2363 struct gendisk
*disk
;
2364 unsigned long flags
;
2367 * Prevent ub_bd_release from pulling the rug from under us.
2368 * XXX This is starting to look like a kref.
2369 * XXX Why not to take this ref at probe time?
2371 spin_lock_irqsave(&ub_lock
, flags
);
2373 spin_unlock_irqrestore(&ub_lock
, flags
);
2376 * Fence stall clearnings, operations triggered by unlinkings and so on.
2377 * We do not attempt to unlink any URBs, because we do not trust the
2378 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2380 atomic_set(&sc
->poison
, 1);
2383 * Blow away queued commands.
2385 * Actually, this never works, because before we get here
2386 * the HCD terminates outstanding URB(s). It causes our
2387 * SCSI command queue to advance, commands fail to submit,
2388 * and the whole queue drains. So, we just use this code to
2391 spin_lock_irqsave(&sc
->lock
, flags
);
2393 struct ub_scsi_cmd
*cmd
;
2395 while ((cmd
= ub_cmdq_pop(sc
)) != NULL
) {
2396 cmd
->error
= -ENOTCONN
;
2397 cmd
->state
= UB_CMDST_DONE
;
2398 ub_cmdtr_state(sc
, cmd
);
2400 (*cmd
->done
)(sc
, cmd
);
2404 printk(KERN_WARNING
"%s: "
2405 "%d was queued after shutdown\n", sc
->name
, cnt
);
2408 spin_unlock_irqrestore(&sc
->lock
, flags
);
2411 * Unregister the upper layer.
2413 list_for_each (p
, &sc
->luns
) {
2414 lun
= list_entry(p
, struct ub_lun
, link
);
2416 if (disk
->flags
& GENHD_FL_UP
)
2419 * I wish I could do:
2420 * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
2421 * As it is, we rely on our internal poisoning and let
2422 * the upper levels to spin furiously failing all the I/O.
2427 * Taking a lock on a structure which is about to be freed
2428 * is very nonsensual. Here it is largely a way to do a debug freeze,
2429 * and a bracket which shows where the nonsensual code segment ends.
2431 * Testing for -EINPROGRESS is always a bug, so we are bending
2432 * the rules a little.
2434 spin_lock_irqsave(&sc
->lock
, flags
);
2435 if (sc
->work_urb
.status
== -EINPROGRESS
) { /* janitors: ignore */
2436 printk(KERN_WARNING
"%s: "
2437 "URB is active after disconnect\n", sc
->name
);
2439 spin_unlock_irqrestore(&sc
->lock
, flags
);
2442 * There is virtually no chance that other CPU runs times so long
2443 * after ub_urb_complete should have called del_timer, but only if HCD
2444 * didn't forget to deliver a callback on unlink.
2446 del_timer_sync(&sc
->work_timer
);
2449 * At this point there must be no commands coming from anyone
2450 * and no URBs left in transit.
2453 device_remove_file(&sc
->intf
->dev
, &dev_attr_diag
);
2454 usb_set_intfdata(intf
, NULL
);
2455 // usb_put_intf(sc->intf);
2457 usb_put_dev(sc
->dev
);
2463 static struct usb_driver ub_driver
= {
2464 .owner
= THIS_MODULE
,
2467 .disconnect
= ub_disconnect
,
2468 .id_table
= ub_usb_ids
,
2471 static int __init
ub_init(void)
2475 if ((rc
= register_blkdev(UB_MAJOR
, DRV_NAME
)) != 0)
2477 devfs_mk_dir(DEVFS_NAME
);
2479 if ((rc
= usb_register(&ub_driver
)) != 0)
2485 devfs_remove(DEVFS_NAME
);
2486 unregister_blkdev(UB_MAJOR
, DRV_NAME
);
2491 static void __exit
ub_exit(void)
2493 usb_deregister(&ub_driver
);
2495 devfs_remove(DEVFS_NAME
);
2496 unregister_blkdev(UB_MAJOR
, DRV_NAME
);
2499 module_init(ub_init
);
2500 module_exit(ub_exit
);
2502 MODULE_LICENSE("GPL");