1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/scatterlist.h>
9 #include <linux/mutex.h>
11 #include <linux/usb.h>
14 /*-------------------------------------------------------------------------*/
16 /* FIXME make these public somewhere; usbdevfs.h? */
17 struct usbtest_param
{
19 unsigned test_num
; /* 0..(TEST_CASES-1) */
26 struct timeval duration
;
28 #define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
30 /*-------------------------------------------------------------------------*/
32 #define GENERIC /* let probe() bind using module params */
34 /* Some devices that can be used for testing will have "real" drivers.
35 * Entries for those need to be enabled here by hand, after disabling
38 //#define IBOT2 /* grab iBOT2 webcams */
39 //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
41 /*-------------------------------------------------------------------------*/
45 u8 ep_in
; /* bulk/intr source */
46 u8 ep_out
; /* bulk/intr sink */
49 unsigned iso
:1; /* try iso in/out */
53 /* this is accessed only through usbfs ioctl calls.
54 * one ioctl to issue a test ... one lock per device.
55 * tests create other threads if they need them.
56 * urbs and buffers are allocated dynamically,
57 * and data generated deterministically.
60 struct usb_interface
*intf
;
61 struct usbtest_info
*info
;
66 struct usb_endpoint_descriptor
*iso_in
, *iso_out
;
73 static struct usb_device
*testdev_to_usbdev(struct usbtest_dev
*test
)
75 return interface_to_usbdev(test
->intf
);
78 /* set up all urbs so they can be used with either bulk or interrupt */
79 #define INTERRUPT_RATE 1 /* msec/transfer */
81 #define ERROR(tdev, fmt, args...) \
82 dev_err(&(tdev)->intf->dev , fmt , ## args)
83 #define WARNING(tdev, fmt, args...) \
84 dev_warn(&(tdev)->intf->dev , fmt , ## args)
86 #define GUARD_BYTE 0xA5
88 /*-------------------------------------------------------------------------*/
91 get_endpoints(struct usbtest_dev
*dev
, struct usb_interface
*intf
)
94 struct usb_host_interface
*alt
;
95 struct usb_host_endpoint
*in
, *out
;
96 struct usb_host_endpoint
*iso_in
, *iso_out
;
97 struct usb_device
*udev
;
99 for (tmp
= 0; tmp
< intf
->num_altsetting
; tmp
++) {
103 iso_in
= iso_out
= NULL
;
104 alt
= intf
->altsetting
+ tmp
;
106 /* take the first altsetting with in-bulk + out-bulk;
107 * ignore other endpoints and altsettings.
109 for (ep
= 0; ep
< alt
->desc
.bNumEndpoints
; ep
++) {
110 struct usb_host_endpoint
*e
;
112 e
= alt
->endpoint
+ ep
;
113 switch (e
->desc
.bmAttributes
) {
114 case USB_ENDPOINT_XFER_BULK
:
116 case USB_ENDPOINT_XFER_ISOC
:
123 if (usb_endpoint_dir_in(&e
->desc
)) {
132 if (usb_endpoint_dir_in(&e
->desc
)) {
140 if ((in
&& out
) || iso_in
|| iso_out
)
146 udev
= testdev_to_usbdev(dev
);
147 if (alt
->desc
.bAlternateSetting
!= 0) {
148 tmp
= usb_set_interface(udev
,
149 alt
->desc
.bInterfaceNumber
,
150 alt
->desc
.bAlternateSetting
);
156 dev
->in_pipe
= usb_rcvbulkpipe(udev
,
157 in
->desc
.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
);
158 dev
->out_pipe
= usb_sndbulkpipe(udev
,
159 out
->desc
.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
);
162 dev
->iso_in
= &iso_in
->desc
;
163 dev
->in_iso_pipe
= usb_rcvisocpipe(udev
,
164 iso_in
->desc
.bEndpointAddress
165 & USB_ENDPOINT_NUMBER_MASK
);
169 dev
->iso_out
= &iso_out
->desc
;
170 dev
->out_iso_pipe
= usb_sndisocpipe(udev
,
171 iso_out
->desc
.bEndpointAddress
172 & USB_ENDPOINT_NUMBER_MASK
);
177 /*-------------------------------------------------------------------------*/
179 /* Support for testing basic non-queued I/O streams.
181 * These just package urbs as requests that can be easily canceled.
182 * Each urb's data buffer is dynamically allocated; callers can fill
183 * them with non-zero test data (or test for it) when appropriate.
186 static void simple_callback(struct urb
*urb
)
188 complete(urb
->context
);
191 static struct urb
*usbtest_alloc_urb(
192 struct usb_device
*udev
,
195 unsigned transfer_flags
,
200 urb
= usb_alloc_urb(0, GFP_KERNEL
);
203 usb_fill_bulk_urb(urb
, udev
, pipe
, NULL
, bytes
, simple_callback
, NULL
);
204 urb
->interval
= (udev
->speed
== USB_SPEED_HIGH
)
205 ? (INTERRUPT_RATE
<< 3)
207 urb
->transfer_flags
= transfer_flags
;
208 if (usb_pipein(pipe
))
209 urb
->transfer_flags
|= URB_SHORT_NOT_OK
;
211 if (urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
)
212 urb
->transfer_buffer
= usb_alloc_coherent(udev
, bytes
+ offset
,
213 GFP_KERNEL
, &urb
->transfer_dma
);
215 urb
->transfer_buffer
= kmalloc(bytes
+ offset
, GFP_KERNEL
);
217 if (!urb
->transfer_buffer
) {
222 /* To test unaligned transfers add an offset and fill the
223 unused memory with a guard value */
225 memset(urb
->transfer_buffer
, GUARD_BYTE
, offset
);
226 urb
->transfer_buffer
+= offset
;
227 if (urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
)
228 urb
->transfer_dma
+= offset
;
231 /* For inbound transfers use guard byte so that test fails if
232 data not correctly copied */
233 memset(urb
->transfer_buffer
,
234 usb_pipein(urb
->pipe
) ? GUARD_BYTE
: 0,
239 static struct urb
*simple_alloc_urb(
240 struct usb_device
*udev
,
244 return usbtest_alloc_urb(udev
, pipe
, bytes
, URB_NO_TRANSFER_DMA_MAP
, 0);
247 static unsigned pattern
;
248 static unsigned mod_pattern
;
249 module_param_named(pattern
, mod_pattern
, uint
, S_IRUGO
| S_IWUSR
);
250 MODULE_PARM_DESC(mod_pattern
, "i/o pattern (0 == zeroes)");
252 static inline void simple_fill_buf(struct urb
*urb
)
255 u8
*buf
= urb
->transfer_buffer
;
256 unsigned len
= urb
->transfer_buffer_length
;
265 for (i
= 0; i
< len
; i
++)
266 *buf
++ = (u8
) (i
% 63);
271 static inline unsigned long buffer_offset(void *buf
)
273 return (unsigned long)buf
& (ARCH_KMALLOC_MINALIGN
- 1);
276 static int check_guard_bytes(struct usbtest_dev
*tdev
, struct urb
*urb
)
278 u8
*buf
= urb
->transfer_buffer
;
279 u8
*guard
= buf
- buffer_offset(buf
);
282 for (i
= 0; guard
< buf
; i
++, guard
++) {
283 if (*guard
!= GUARD_BYTE
) {
284 ERROR(tdev
, "guard byte[%d] %d (not %d)\n",
285 i
, *guard
, GUARD_BYTE
);
292 static int simple_check_buf(struct usbtest_dev
*tdev
, struct urb
*urb
)
296 u8
*buf
= urb
->transfer_buffer
;
297 unsigned len
= urb
->actual_length
;
299 int ret
= check_guard_bytes(tdev
, urb
);
303 for (i
= 0; i
< len
; i
++, buf
++) {
305 /* all-zeroes has no synchronization issues */
309 /* mod63 stays in sync with short-terminated transfers,
310 * or otherwise when host and gadget agree on how large
311 * each usb transfer request should be. resync is done
312 * with set_interface or set_config.
317 /* always fail unsupported patterns */
322 if (*buf
== expected
)
324 ERROR(tdev
, "buf[%d] = %d (not %d)\n", i
, *buf
, expected
);
330 static void simple_free_urb(struct urb
*urb
)
332 unsigned long offset
= buffer_offset(urb
->transfer_buffer
);
334 if (urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
)
337 urb
->transfer_buffer_length
+ offset
,
338 urb
->transfer_buffer
- offset
,
339 urb
->transfer_dma
- offset
);
341 kfree(urb
->transfer_buffer
- offset
);
345 static int simple_io(
346 struct usbtest_dev
*tdev
,
354 struct usb_device
*udev
= urb
->dev
;
355 int max
= urb
->transfer_buffer_length
;
356 struct completion completion
;
359 urb
->context
= &completion
;
360 while (retval
== 0 && iterations
-- > 0) {
361 init_completion(&completion
);
362 if (usb_pipeout(urb
->pipe
)) {
363 simple_fill_buf(urb
);
364 urb
->transfer_flags
|= URB_ZERO_PACKET
;
366 retval
= usb_submit_urb(urb
, GFP_KERNEL
);
370 /* NOTE: no timeouts; can't be broken out of by interrupt */
371 wait_for_completion(&completion
);
372 retval
= urb
->status
;
374 if (retval
== 0 && usb_pipein(urb
->pipe
))
375 retval
= simple_check_buf(tdev
, urb
);
378 int len
= urb
->transfer_buffer_length
;
383 len
= (vary
< max
) ? vary
: max
;
384 urb
->transfer_buffer_length
= len
;
387 /* FIXME if endpoint halted, clear halt (and log) */
389 urb
->transfer_buffer_length
= max
;
391 if (expected
!= retval
)
393 "%s failed, iterations left %d, status %d (not %d)\n",
394 label
, iterations
, retval
, expected
);
399 /*-------------------------------------------------------------------------*/
401 /* We use scatterlist primitives to test queued I/O.
402 * Yes, this also tests the scatterlist primitives.
405 static void free_sglist(struct scatterlist
*sg
, int nents
)
411 for (i
= 0; i
< nents
; i
++) {
412 if (!sg_page(&sg
[i
]))
414 kfree(sg_virt(&sg
[i
]));
419 static struct scatterlist
*
420 alloc_sglist(int nents
, int max
, int vary
)
422 struct scatterlist
*sg
;
426 sg
= kmalloc(nents
* sizeof *sg
, GFP_KERNEL
);
429 sg_init_table(sg
, nents
);
431 for (i
= 0; i
< nents
; i
++) {
435 buf
= kzalloc(size
, GFP_KERNEL
);
441 /* kmalloc pages are always physically contiguous! */
442 sg_set_buf(&sg
[i
], buf
, size
);
449 for (j
= 0; j
< size
; j
++)
450 *buf
++ = (u8
) (j
% 63);
458 size
= (vary
< max
) ? vary
: max
;
465 static int perform_sglist(
466 struct usbtest_dev
*tdev
,
469 struct usb_sg_request
*req
,
470 struct scatterlist
*sg
,
474 struct usb_device
*udev
= testdev_to_usbdev(tdev
);
477 while (retval
== 0 && iterations
-- > 0) {
478 retval
= usb_sg_init(req
, udev
, pipe
,
479 (udev
->speed
== USB_SPEED_HIGH
)
480 ? (INTERRUPT_RATE
<< 3)
482 sg
, nents
, 0, GFP_KERNEL
);
487 retval
= req
->status
;
489 /* FIXME check resulting data pattern */
491 /* FIXME if endpoint halted, clear halt (and log) */
494 /* FIXME for unlink or fault handling tests, don't report
495 * failure if retval is as we expected ...
498 ERROR(tdev
, "perform_sglist failed, "
499 "iterations left %d, status %d\n",
505 /*-------------------------------------------------------------------------*/
507 /* unqueued control message testing
509 * there's a nice set of device functional requirements in chapter 9 of the
510 * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
511 * special test firmware.
513 * we know the device is configured (or suspended) by the time it's visible
514 * through usbfs. we can't change that, so we won't test enumeration (which
515 * worked 'well enough' to get here, this time), power management (ditto),
516 * or remote wakeup (which needs human interaction).
519 static unsigned realworld
= 1;
520 module_param(realworld
, uint
, 0);
521 MODULE_PARM_DESC(realworld
, "clear to demand stricter spec compliance");
523 static int get_altsetting(struct usbtest_dev
*dev
)
525 struct usb_interface
*iface
= dev
->intf
;
526 struct usb_device
*udev
= interface_to_usbdev(iface
);
529 retval
= usb_control_msg(udev
, usb_rcvctrlpipe(udev
, 0),
530 USB_REQ_GET_INTERFACE
, USB_DIR_IN
|USB_RECIP_INTERFACE
,
531 0, iface
->altsetting
[0].desc
.bInterfaceNumber
,
532 dev
->buf
, 1, USB_CTRL_GET_TIMEOUT
);
544 static int set_altsetting(struct usbtest_dev
*dev
, int alternate
)
546 struct usb_interface
*iface
= dev
->intf
;
547 struct usb_device
*udev
;
549 if (alternate
< 0 || alternate
>= 256)
552 udev
= interface_to_usbdev(iface
);
553 return usb_set_interface(udev
,
554 iface
->altsetting
[0].desc
.bInterfaceNumber
,
558 static int is_good_config(struct usbtest_dev
*tdev
, int len
)
560 struct usb_config_descriptor
*config
;
562 if (len
< sizeof *config
)
564 config
= (struct usb_config_descriptor
*) tdev
->buf
;
566 switch (config
->bDescriptorType
) {
568 case USB_DT_OTHER_SPEED_CONFIG
:
569 if (config
->bLength
!= 9) {
570 ERROR(tdev
, "bogus config descriptor length\n");
573 /* this bit 'must be 1' but often isn't */
574 if (!realworld
&& !(config
->bmAttributes
& 0x80)) {
575 ERROR(tdev
, "high bit of config attributes not set\n");
578 if (config
->bmAttributes
& 0x1f) { /* reserved == 0 */
579 ERROR(tdev
, "reserved config bits set\n");
587 if (le16_to_cpu(config
->wTotalLength
) == len
) /* read it all */
589 if (le16_to_cpu(config
->wTotalLength
) >= TBUF_SIZE
) /* max partial read */
591 ERROR(tdev
, "bogus config descriptor read size\n");
595 /* sanity test for standard requests working with usb_control_mesg() and some
596 * of the utility functions which use it.
598 * this doesn't test how endpoint halts behave or data toggles get set, since
599 * we won't do I/O to bulk/interrupt endpoints here (which is how to change
600 * halt or toggle). toggle testing is impractical without support from hcds.
602 * this avoids failing devices linux would normally work with, by not testing
603 * config/altsetting operations for devices that only support their defaults.
604 * such devices rarely support those needless operations.
606 * NOTE that since this is a sanity test, it's not examining boundary cases
607 * to see if usbcore, hcd, and device all behave right. such testing would
608 * involve varied read sizes and other operation sequences.
610 static int ch9_postconfig(struct usbtest_dev
*dev
)
612 struct usb_interface
*iface
= dev
->intf
;
613 struct usb_device
*udev
= interface_to_usbdev(iface
);
616 /* [9.2.3] if there's more than one altsetting, we need to be able to
617 * set and get each one. mostly trusts the descriptors from usbcore.
619 for (i
= 0; i
< iface
->num_altsetting
; i
++) {
621 /* 9.2.3 constrains the range here */
622 alt
= iface
->altsetting
[i
].desc
.bAlternateSetting
;
623 if (alt
< 0 || alt
>= iface
->num_altsetting
) {
625 "invalid alt [%d].bAltSetting = %d\n",
629 /* [real world] get/set unimplemented if there's only one */
630 if (realworld
&& iface
->num_altsetting
== 1)
633 /* [9.4.10] set_interface */
634 retval
= set_altsetting(dev
, alt
);
636 dev_err(&iface
->dev
, "can't set_interface = %d, %d\n",
641 /* [9.4.4] get_interface always works */
642 retval
= get_altsetting(dev
);
644 dev_err(&iface
->dev
, "get alt should be %d, was %d\n",
646 return (retval
< 0) ? retval
: -EDOM
;
651 /* [real world] get_config unimplemented if there's only one */
652 if (!realworld
|| udev
->descriptor
.bNumConfigurations
!= 1) {
653 int expected
= udev
->actconfig
->desc
.bConfigurationValue
;
655 /* [9.4.2] get_configuration always works
656 * ... although some cheap devices (like one TI Hub I've got)
657 * won't return config descriptors except before set_config.
659 retval
= usb_control_msg(udev
, usb_rcvctrlpipe(udev
, 0),
660 USB_REQ_GET_CONFIGURATION
,
661 USB_DIR_IN
| USB_RECIP_DEVICE
,
662 0, 0, dev
->buf
, 1, USB_CTRL_GET_TIMEOUT
);
663 if (retval
!= 1 || dev
->buf
[0] != expected
) {
664 dev_err(&iface
->dev
, "get config --> %d %d (1 %d)\n",
665 retval
, dev
->buf
[0], expected
);
666 return (retval
< 0) ? retval
: -EDOM
;
670 /* there's always [9.4.3] a device descriptor [9.6.1] */
671 retval
= usb_get_descriptor(udev
, USB_DT_DEVICE
, 0,
672 dev
->buf
, sizeof udev
->descriptor
);
673 if (retval
!= sizeof udev
->descriptor
) {
674 dev_err(&iface
->dev
, "dev descriptor --> %d\n", retval
);
675 return (retval
< 0) ? retval
: -EDOM
;
678 /* there's always [9.4.3] at least one config descriptor [9.6.3] */
679 for (i
= 0; i
< udev
->descriptor
.bNumConfigurations
; i
++) {
680 retval
= usb_get_descriptor(udev
, USB_DT_CONFIG
, i
,
681 dev
->buf
, TBUF_SIZE
);
682 if (!is_good_config(dev
, retval
)) {
684 "config [%d] descriptor --> %d\n",
686 return (retval
< 0) ? retval
: -EDOM
;
689 /* FIXME cross-checking udev->config[i] to make sure usbcore
690 * parsed it right (etc) would be good testing paranoia
694 /* and sometimes [9.2.6.6] speed dependent descriptors */
695 if (le16_to_cpu(udev
->descriptor
.bcdUSB
) == 0x0200) {
696 struct usb_qualifier_descriptor
*d
= NULL
;
698 /* device qualifier [9.6.2] */
699 retval
= usb_get_descriptor(udev
,
700 USB_DT_DEVICE_QUALIFIER
, 0, dev
->buf
,
701 sizeof(struct usb_qualifier_descriptor
));
702 if (retval
== -EPIPE
) {
703 if (udev
->speed
== USB_SPEED_HIGH
) {
705 "hs dev qualifier --> %d\n",
707 return (retval
< 0) ? retval
: -EDOM
;
709 /* usb2.0 but not high-speed capable; fine */
710 } else if (retval
!= sizeof(struct usb_qualifier_descriptor
)) {
711 dev_err(&iface
->dev
, "dev qualifier --> %d\n", retval
);
712 return (retval
< 0) ? retval
: -EDOM
;
714 d
= (struct usb_qualifier_descriptor
*) dev
->buf
;
716 /* might not have [9.6.2] any other-speed configs [9.6.4] */
718 unsigned max
= d
->bNumConfigurations
;
719 for (i
= 0; i
< max
; i
++) {
720 retval
= usb_get_descriptor(udev
,
721 USB_DT_OTHER_SPEED_CONFIG
, i
,
722 dev
->buf
, TBUF_SIZE
);
723 if (!is_good_config(dev
, retval
)) {
725 "other speed config --> %d\n",
727 return (retval
< 0) ? retval
: -EDOM
;
732 /* FIXME fetch strings from at least the device descriptor */
734 /* [9.4.5] get_status always works */
735 retval
= usb_get_status(udev
, USB_RECIP_DEVICE
, 0, dev
->buf
);
737 dev_err(&iface
->dev
, "get dev status --> %d\n", retval
);
738 return (retval
< 0) ? retval
: -EDOM
;
741 /* FIXME configuration.bmAttributes says if we could try to set/clear
742 * the device's remote wakeup feature ... if we can, test that here
745 retval
= usb_get_status(udev
, USB_RECIP_INTERFACE
,
746 iface
->altsetting
[0].desc
.bInterfaceNumber
, dev
->buf
);
748 dev_err(&iface
->dev
, "get interface status --> %d\n", retval
);
749 return (retval
< 0) ? retval
: -EDOM
;
751 /* FIXME get status for each endpoint in the interface */
756 /*-------------------------------------------------------------------------*/
758 /* use ch9 requests to test whether:
759 * (a) queues work for control, keeping N subtests queued and
760 * active (auto-resubmit) for M loops through the queue.
761 * (b) protocol stalls (control-only) will autorecover.
762 * it's not like bulk/intr; no halt clearing.
763 * (c) short control reads are reported and handled.
764 * (d) queues are always processed in-order
769 struct usbtest_dev
*dev
;
770 struct completion complete
;
775 struct usbtest_param
*param
;
779 #define NUM_SUBCASES 15 /* how many test subcases here? */
782 struct usb_ctrlrequest setup
;
787 static void ctrl_complete(struct urb
*urb
)
789 struct ctrl_ctx
*ctx
= urb
->context
;
790 struct usb_ctrlrequest
*reqp
;
791 struct subcase
*subcase
;
792 int status
= urb
->status
;
794 reqp
= (struct usb_ctrlrequest
*)urb
->setup_packet
;
795 subcase
= container_of(reqp
, struct subcase
, setup
);
797 spin_lock(&ctx
->lock
);
801 /* queue must transfer and complete in fifo order, unless
802 * usb_unlink_urb() is used to unlink something not at the
803 * physical queue head (not tested).
805 if (subcase
->number
> 0) {
806 if ((subcase
->number
- ctx
->last
) != 1) {
808 "subcase %d completed out of order, last %d\n",
809 subcase
->number
, ctx
->last
);
811 ctx
->last
= subcase
->number
;
815 ctx
->last
= subcase
->number
;
817 /* succeed or fault in only one way? */
818 if (status
== subcase
->expected
)
821 /* async unlink for cleanup? */
822 else if (status
!= -ECONNRESET
) {
824 /* some faults are allowed, not required */
825 if (subcase
->expected
> 0 && (
826 ((status
== -subcase
->expected
/* happened */
827 || status
== 0)))) /* didn't */
829 /* sometimes more than one fault is allowed */
830 else if (subcase
->number
== 12 && status
== -EPIPE
)
833 ERROR(ctx
->dev
, "subtest %d error, status %d\n",
834 subcase
->number
, status
);
837 /* unexpected status codes mean errors; ideally, in hardware */
840 if (ctx
->status
== 0) {
843 ctx
->status
= status
;
844 ERROR(ctx
->dev
, "control queue %02x.%02x, err %d, "
845 "%d left, subcase %d, len %d/%d\n",
846 reqp
->bRequestType
, reqp
->bRequest
,
847 status
, ctx
->count
, subcase
->number
,
849 urb
->transfer_buffer_length
);
851 /* FIXME this "unlink everything" exit route should
852 * be a separate test case.
855 /* unlink whatever's still pending */
856 for (i
= 1; i
< ctx
->param
->sglen
; i
++) {
857 struct urb
*u
= ctx
->urb
[
858 (i
+ subcase
->number
)
859 % ctx
->param
->sglen
];
861 if (u
== urb
|| !u
->dev
)
863 spin_unlock(&ctx
->lock
);
864 status
= usb_unlink_urb(u
);
865 spin_lock(&ctx
->lock
);
872 ERROR(ctx
->dev
, "urb unlink --> %d\n",
876 status
= ctx
->status
;
880 /* resubmit if we need to, else mark this as done */
881 if ((status
== 0) && (ctx
->pending
< ctx
->count
)) {
882 status
= usb_submit_urb(urb
, GFP_ATOMIC
);
885 "can't resubmit ctrl %02x.%02x, err %d\n",
886 reqp
->bRequestType
, reqp
->bRequest
, status
);
893 /* signal completion when nothing's queued */
894 if (ctx
->pending
== 0)
895 complete(&ctx
->complete
);
896 spin_unlock(&ctx
->lock
);
900 test_ctrl_queue(struct usbtest_dev
*dev
, struct usbtest_param
*param
)
902 struct usb_device
*udev
= testdev_to_usbdev(dev
);
904 struct ctrl_ctx context
;
907 spin_lock_init(&context
.lock
);
909 init_completion(&context
.complete
);
910 context
.count
= param
->sglen
* param
->iterations
;
912 context
.status
= -ENOMEM
;
913 context
.param
= param
;
916 /* allocate and init the urbs we'll queue.
917 * as with bulk/intr sglists, sglen is the queue depth; it also
918 * controls which subtests run (more tests than sglen) or rerun.
920 urb
= kcalloc(param
->sglen
, sizeof(struct urb
*), GFP_KERNEL
);
923 for (i
= 0; i
< param
->sglen
; i
++) {
924 int pipe
= usb_rcvctrlpipe(udev
, 0);
927 struct usb_ctrlrequest req
;
928 struct subcase
*reqp
;
930 /* sign of this variable means:
931 * -: tested code must return this (negative) error code
932 * +: tested code may return this (negative too) error code
936 /* requests here are mostly expected to succeed on any
937 * device, but some are chosen to trigger protocol stalls
940 memset(&req
, 0, sizeof req
);
941 req
.bRequest
= USB_REQ_GET_DESCRIPTOR
;
942 req
.bRequestType
= USB_DIR_IN
|USB_RECIP_DEVICE
;
944 switch (i
% NUM_SUBCASES
) {
945 case 0: /* get device descriptor */
946 req
.wValue
= cpu_to_le16(USB_DT_DEVICE
<< 8);
947 len
= sizeof(struct usb_device_descriptor
);
949 case 1: /* get first config descriptor (only) */
950 req
.wValue
= cpu_to_le16((USB_DT_CONFIG
<< 8) | 0);
951 len
= sizeof(struct usb_config_descriptor
);
953 case 2: /* get altsetting (OFTEN STALLS) */
954 req
.bRequest
= USB_REQ_GET_INTERFACE
;
955 req
.bRequestType
= USB_DIR_IN
|USB_RECIP_INTERFACE
;
956 /* index = 0 means first interface */
960 case 3: /* get interface status */
961 req
.bRequest
= USB_REQ_GET_STATUS
;
962 req
.bRequestType
= USB_DIR_IN
|USB_RECIP_INTERFACE
;
966 case 4: /* get device status */
967 req
.bRequest
= USB_REQ_GET_STATUS
;
968 req
.bRequestType
= USB_DIR_IN
|USB_RECIP_DEVICE
;
971 case 5: /* get device qualifier (MAY STALL) */
972 req
.wValue
= cpu_to_le16 (USB_DT_DEVICE_QUALIFIER
<< 8);
973 len
= sizeof(struct usb_qualifier_descriptor
);
974 if (udev
->speed
!= USB_SPEED_HIGH
)
977 case 6: /* get first config descriptor, plus interface */
978 req
.wValue
= cpu_to_le16((USB_DT_CONFIG
<< 8) | 0);
979 len
= sizeof(struct usb_config_descriptor
);
980 len
+= sizeof(struct usb_interface_descriptor
);
982 case 7: /* get interface descriptor (ALWAYS STALLS) */
983 req
.wValue
= cpu_to_le16 (USB_DT_INTERFACE
<< 8);
985 len
= sizeof(struct usb_interface_descriptor
);
988 /* NOTE: two consecutive stalls in the queue here.
989 * that tests fault recovery a bit more aggressively. */
990 case 8: /* clear endpoint halt (MAY STALL) */
991 req
.bRequest
= USB_REQ_CLEAR_FEATURE
;
992 req
.bRequestType
= USB_RECIP_ENDPOINT
;
993 /* wValue 0 == ep halt */
994 /* wIndex 0 == ep0 (shouldn't halt!) */
996 pipe
= usb_sndctrlpipe(udev
, 0);
999 case 9: /* get endpoint status */
1000 req
.bRequest
= USB_REQ_GET_STATUS
;
1001 req
.bRequestType
= USB_DIR_IN
|USB_RECIP_ENDPOINT
;
1005 case 10: /* trigger short read (EREMOTEIO) */
1006 req
.wValue
= cpu_to_le16((USB_DT_CONFIG
<< 8) | 0);
1008 expected
= -EREMOTEIO
;
1010 /* NOTE: two consecutive _different_ faults in the queue. */
1011 case 11: /* get endpoint descriptor (ALWAYS STALLS) */
1012 req
.wValue
= cpu_to_le16(USB_DT_ENDPOINT
<< 8);
1014 len
= sizeof(struct usb_interface_descriptor
);
1017 /* NOTE: sometimes even a third fault in the queue! */
1018 case 12: /* get string 0 descriptor (MAY STALL) */
1019 req
.wValue
= cpu_to_le16(USB_DT_STRING
<< 8);
1020 /* string == 0, for language IDs */
1021 len
= sizeof(struct usb_interface_descriptor
);
1022 /* may succeed when > 4 languages */
1023 expected
= EREMOTEIO
; /* or EPIPE, if no strings */
1025 case 13: /* short read, resembling case 10 */
1026 req
.wValue
= cpu_to_le16((USB_DT_CONFIG
<< 8) | 0);
1027 /* last data packet "should" be DATA1, not DATA0 */
1028 len
= 1024 - udev
->descriptor
.bMaxPacketSize0
;
1029 expected
= -EREMOTEIO
;
1031 case 14: /* short read; try to fill the last packet */
1032 req
.wValue
= cpu_to_le16((USB_DT_DEVICE
<< 8) | 0);
1033 /* device descriptor size == 18 bytes */
1034 len
= udev
->descriptor
.bMaxPacketSize0
;
1035 if (udev
->speed
== USB_SPEED_SUPER
)
1045 expected
= -EREMOTEIO
;
1048 ERROR(dev
, "bogus number of ctrl queue testcases!\n");
1049 context
.status
= -EINVAL
;
1052 req
.wLength
= cpu_to_le16(len
);
1053 urb
[i
] = u
= simple_alloc_urb(udev
, pipe
, len
);
1057 reqp
= kmalloc(sizeof *reqp
, GFP_KERNEL
);
1061 reqp
->number
= i
% NUM_SUBCASES
;
1062 reqp
->expected
= expected
;
1063 u
->setup_packet
= (char *) &reqp
->setup
;
1065 u
->context
= &context
;
1066 u
->complete
= ctrl_complete
;
1069 /* queue the urbs */
1071 spin_lock_irq(&context
.lock
);
1072 for (i
= 0; i
< param
->sglen
; i
++) {
1073 context
.status
= usb_submit_urb(urb
[i
], GFP_ATOMIC
);
1074 if (context
.status
!= 0) {
1075 ERROR(dev
, "can't submit urb[%d], status %d\n",
1077 context
.count
= context
.pending
;
1082 spin_unlock_irq(&context
.lock
);
1084 /* FIXME set timer and time out; provide a disconnect hook */
1086 /* wait for the last one to complete */
1087 if (context
.pending
> 0)
1088 wait_for_completion(&context
.complete
);
1091 for (i
= 0; i
< param
->sglen
; i
++) {
1095 kfree(urb
[i
]->setup_packet
);
1096 simple_free_urb(urb
[i
]);
1099 return context
.status
;
1104 /*-------------------------------------------------------------------------*/
1106 static void unlink1_callback(struct urb
*urb
)
1108 int status
= urb
->status
;
1110 /* we "know" -EPIPE (stall) never happens */
1112 status
= usb_submit_urb(urb
, GFP_ATOMIC
);
1114 urb
->status
= status
;
1115 complete(urb
->context
);
1119 static int unlink1(struct usbtest_dev
*dev
, int pipe
, int size
, int async
)
1122 struct completion completion
;
1125 init_completion(&completion
);
1126 urb
= simple_alloc_urb(testdev_to_usbdev(dev
), pipe
, size
);
1129 urb
->context
= &completion
;
1130 urb
->complete
= unlink1_callback
;
1132 /* keep the endpoint busy. there are lots of hc/hcd-internal
1133 * states, and testing should get to all of them over time.
1135 * FIXME want additional tests for when endpoint is STALLing
1136 * due to errors, or is just NAKing requests.
1138 retval
= usb_submit_urb(urb
, GFP_KERNEL
);
1140 dev_err(&dev
->intf
->dev
, "submit fail %d\n", retval
);
1144 /* unlinking that should always work. variable delay tests more
1145 * hcd states and code paths, even with little other system load.
1147 msleep(jiffies
% (2 * INTERRUPT_RATE
));
1149 while (!completion_done(&completion
)) {
1150 retval
= usb_unlink_urb(urb
);
1155 /* we can't unlink urbs while they're completing
1156 * or if they've completed, and we haven't
1157 * resubmitted. "normal" drivers would prevent
1158 * resubmission, but since we're testing unlink
1161 ERROR(dev
, "unlink retry\n");
1168 dev_err(&dev
->intf
->dev
,
1169 "unlink fail %d\n", retval
);
1178 wait_for_completion(&completion
);
1179 retval
= urb
->status
;
1180 simple_free_urb(urb
);
1183 return (retval
== -ECONNRESET
) ? 0 : retval
- 1000;
1185 return (retval
== -ENOENT
|| retval
== -EPERM
) ?
1189 static int unlink_simple(struct usbtest_dev
*dev
, int pipe
, int len
)
1193 /* test sync and async paths */
1194 retval
= unlink1(dev
, pipe
, len
, 1);
1196 retval
= unlink1(dev
, pipe
, len
, 0);
1200 /*-------------------------------------------------------------------------*/
1203 struct completion complete
;
1210 static void unlink_queued_callback(struct urb
*urb
)
1212 int status
= urb
->status
;
1213 struct queued_ctx
*ctx
= urb
->context
;
1217 if (urb
== ctx
->urbs
[ctx
->num
- 4] || urb
== ctx
->urbs
[ctx
->num
- 2]) {
1218 if (status
== -ECONNRESET
)
1220 /* What error should we report if the URB completed normally? */
1223 ctx
->status
= status
;
1226 if (atomic_dec_and_test(&ctx
->pending
))
1227 complete(&ctx
->complete
);
1230 static int unlink_queued(struct usbtest_dev
*dev
, int pipe
, unsigned num
,
1233 struct queued_ctx ctx
;
1234 struct usb_device
*udev
= testdev_to_usbdev(dev
);
1238 int retval
= -ENOMEM
;
1240 init_completion(&ctx
.complete
);
1241 atomic_set(&ctx
.pending
, 1); /* One more than the actual value */
1245 buf
= usb_alloc_coherent(udev
, size
, GFP_KERNEL
, &buf_dma
);
1248 memset(buf
, 0, size
);
1250 /* Allocate and init the urbs we'll queue */
1251 ctx
.urbs
= kcalloc(num
, sizeof(struct urb
*), GFP_KERNEL
);
1254 for (i
= 0; i
< num
; i
++) {
1255 ctx
.urbs
[i
] = usb_alloc_urb(0, GFP_KERNEL
);
1258 usb_fill_bulk_urb(ctx
.urbs
[i
], udev
, pipe
, buf
, size
,
1259 unlink_queued_callback
, &ctx
);
1260 ctx
.urbs
[i
]->transfer_dma
= buf_dma
;
1261 ctx
.urbs
[i
]->transfer_flags
= URB_NO_TRANSFER_DMA_MAP
;
1264 /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1265 for (i
= 0; i
< num
; i
++) {
1266 atomic_inc(&ctx
.pending
);
1267 retval
= usb_submit_urb(ctx
.urbs
[i
], GFP_KERNEL
);
1269 dev_err(&dev
->intf
->dev
, "submit urbs[%d] fail %d\n",
1271 atomic_dec(&ctx
.pending
);
1272 ctx
.status
= retval
;
1277 usb_unlink_urb(ctx
.urbs
[num
- 4]);
1278 usb_unlink_urb(ctx
.urbs
[num
- 2]);
1281 usb_unlink_urb(ctx
.urbs
[i
]);
1284 if (atomic_dec_and_test(&ctx
.pending
)) /* The extra count */
1285 complete(&ctx
.complete
);
1286 wait_for_completion(&ctx
.complete
);
1287 retval
= ctx
.status
;
1290 for (i
= 0; i
< num
; i
++)
1291 usb_free_urb(ctx
.urbs
[i
]);
1294 usb_free_coherent(udev
, size
, buf
, buf_dma
);
1298 /*-------------------------------------------------------------------------*/
1300 static int verify_not_halted(struct usbtest_dev
*tdev
, int ep
, struct urb
*urb
)
1305 /* shouldn't look or act halted */
1306 retval
= usb_get_status(urb
->dev
, USB_RECIP_ENDPOINT
, ep
, &status
);
1308 ERROR(tdev
, "ep %02x couldn't get no-halt status, %d\n",
1313 ERROR(tdev
, "ep %02x bogus status: %04x != 0\n", ep
, status
);
1316 retval
= simple_io(tdev
, urb
, 1, 0, 0, __func__
);
1322 static int verify_halted(struct usbtest_dev
*tdev
, int ep
, struct urb
*urb
)
1327 /* should look and act halted */
1328 retval
= usb_get_status(urb
->dev
, USB_RECIP_ENDPOINT
, ep
, &status
);
1330 ERROR(tdev
, "ep %02x couldn't get halt status, %d\n",
1334 le16_to_cpus(&status
);
1336 ERROR(tdev
, "ep %02x bogus status: %04x != 1\n", ep
, status
);
1339 retval
= simple_io(tdev
, urb
, 1, 0, -EPIPE
, __func__
);
1340 if (retval
!= -EPIPE
)
1342 retval
= simple_io(tdev
, urb
, 1, 0, -EPIPE
, "verify_still_halted");
1343 if (retval
!= -EPIPE
)
1348 static int test_halt(struct usbtest_dev
*tdev
, int ep
, struct urb
*urb
)
1352 /* shouldn't look or act halted now */
1353 retval
= verify_not_halted(tdev
, ep
, urb
);
1357 /* set halt (protocol test only), verify it worked */
1358 retval
= usb_control_msg(urb
->dev
, usb_sndctrlpipe(urb
->dev
, 0),
1359 USB_REQ_SET_FEATURE
, USB_RECIP_ENDPOINT
,
1360 USB_ENDPOINT_HALT
, ep
,
1361 NULL
, 0, USB_CTRL_SET_TIMEOUT
);
1363 ERROR(tdev
, "ep %02x couldn't set halt, %d\n", ep
, retval
);
1366 retval
= verify_halted(tdev
, ep
, urb
);
1370 /* clear halt (tests API + protocol), verify it worked */
1371 retval
= usb_clear_halt(urb
->dev
, urb
->pipe
);
1373 ERROR(tdev
, "ep %02x couldn't clear halt, %d\n", ep
, retval
);
1376 retval
= verify_not_halted(tdev
, ep
, urb
);
1380 /* NOTE: could also verify SET_INTERFACE clear halts ... */
1385 static int halt_simple(struct usbtest_dev
*dev
)
1391 urb
= simple_alloc_urb(testdev_to_usbdev(dev
), 0, 512);
1396 ep
= usb_pipeendpoint(dev
->in_pipe
) | USB_DIR_IN
;
1397 urb
->pipe
= dev
->in_pipe
;
1398 retval
= test_halt(dev
, ep
, urb
);
1403 if (dev
->out_pipe
) {
1404 ep
= usb_pipeendpoint(dev
->out_pipe
);
1405 urb
->pipe
= dev
->out_pipe
;
1406 retval
= test_halt(dev
, ep
, urb
);
1409 simple_free_urb(urb
);
1413 /*-------------------------------------------------------------------------*/
1415 /* Control OUT tests use the vendor control requests from Intel's
1416 * USB 2.0 compliance test device: write a buffer, read it back.
1418 * Intel's spec only _requires_ that it work for one packet, which
1419 * is pretty weak. Some HCDs place limits here; most devices will
1420 * need to be able to handle more than one OUT data packet. We'll
1421 * try whatever we're told to try.
1423 static int ctrl_out(struct usbtest_dev
*dev
,
1424 unsigned count
, unsigned length
, unsigned vary
, unsigned offset
)
1430 struct usb_device
*udev
;
1432 if (length
< 1 || length
> 0xffff || vary
>= length
)
1435 buf
= kmalloc(length
+ offset
, GFP_KERNEL
);
1440 udev
= testdev_to_usbdev(dev
);
1444 /* NOTE: hardware might well act differently if we pushed it
1445 * with lots back-to-back queued requests.
1447 for (i
= 0; i
< count
; i
++) {
1448 /* write patterned data */
1449 for (j
= 0; j
< len
; j
++)
1451 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
1452 0x5b, USB_DIR_OUT
|USB_TYPE_VENDOR
,
1453 0, 0, buf
, len
, USB_CTRL_SET_TIMEOUT
);
1454 if (retval
!= len
) {
1457 ERROR(dev
, "ctrl_out, wlen %d (expected %d)\n",
1464 /* read it back -- assuming nothing intervened!! */
1465 retval
= usb_control_msg(udev
, usb_rcvctrlpipe(udev
, 0),
1466 0x5c, USB_DIR_IN
|USB_TYPE_VENDOR
,
1467 0, 0, buf
, len
, USB_CTRL_GET_TIMEOUT
);
1468 if (retval
!= len
) {
1471 ERROR(dev
, "ctrl_out, rlen %d (expected %d)\n",
1478 /* fail if we can't verify */
1479 for (j
= 0; j
< len
; j
++) {
1480 if (buf
[j
] != (u8
) (i
+ j
)) {
1481 ERROR(dev
, "ctrl_out, byte %d is %d not %d\n",
1482 j
, buf
[j
], (u8
) i
+ j
);
1494 /* [real world] the "zero bytes IN" case isn't really used.
1495 * hardware can easily trip up in this weird case, since its
1496 * status stage is IN, not OUT like other ep0in transfers.
1499 len
= realworld
? 1 : 0;
1503 ERROR(dev
, "ctrl_out %s failed, code %d, count %d\n",
1506 kfree(buf
- offset
);
1510 /*-------------------------------------------------------------------------*/
1512 /* ISO tests ... mimics common usage
1513 * - buffer length is split into N packets (mostly maxpacket sized)
1514 * - multi-buffers according to sglen
1517 struct iso_context
{
1521 struct completion done
;
1523 unsigned long errors
;
1524 unsigned long packet_count
;
1525 struct usbtest_dev
*dev
;
1528 static void iso_callback(struct urb
*urb
)
1530 struct iso_context
*ctx
= urb
->context
;
1532 spin_lock(&ctx
->lock
);
1535 ctx
->packet_count
+= urb
->number_of_packets
;
1536 if (urb
->error_count
> 0)
1537 ctx
->errors
+= urb
->error_count
;
1538 else if (urb
->status
!= 0)
1539 ctx
->errors
+= urb
->number_of_packets
;
1540 else if (urb
->actual_length
!= urb
->transfer_buffer_length
)
1542 else if (check_guard_bytes(ctx
->dev
, urb
) != 0)
1545 if (urb
->status
== 0 && ctx
->count
> (ctx
->pending
- 1)
1546 && !ctx
->submit_error
) {
1547 int status
= usb_submit_urb(urb
, GFP_ATOMIC
);
1552 dev_err(&ctx
->dev
->intf
->dev
,
1553 "iso resubmit err %d\n",
1556 case -ENODEV
: /* disconnected */
1557 case -ESHUTDOWN
: /* endpoint disabled */
1558 ctx
->submit_error
= 1;
1564 if (ctx
->pending
== 0) {
1566 dev_err(&ctx
->dev
->intf
->dev
,
1567 "iso test, %lu errors out of %lu\n",
1568 ctx
->errors
, ctx
->packet_count
);
1569 complete(&ctx
->done
);
1572 spin_unlock(&ctx
->lock
);
1575 static struct urb
*iso_alloc_urb(
1576 struct usb_device
*udev
,
1578 struct usb_endpoint_descriptor
*desc
,
1584 unsigned i
, maxp
, packets
;
1586 if (bytes
< 0 || !desc
)
1588 maxp
= 0x7ff & usb_endpoint_maxp(desc
);
1589 maxp
*= 1 + (0x3 & (usb_endpoint_maxp(desc
) >> 11));
1590 packets
= DIV_ROUND_UP(bytes
, maxp
);
1592 urb
= usb_alloc_urb(packets
, GFP_KERNEL
);
1598 urb
->number_of_packets
= packets
;
1599 urb
->transfer_buffer_length
= bytes
;
1600 urb
->transfer_buffer
= usb_alloc_coherent(udev
, bytes
+ offset
,
1602 &urb
->transfer_dma
);
1603 if (!urb
->transfer_buffer
) {
1608 memset(urb
->transfer_buffer
, GUARD_BYTE
, offset
);
1609 urb
->transfer_buffer
+= offset
;
1610 urb
->transfer_dma
+= offset
;
1612 /* For inbound transfers use guard byte so that test fails if
1613 data not correctly copied */
1614 memset(urb
->transfer_buffer
,
1615 usb_pipein(urb
->pipe
) ? GUARD_BYTE
: 0,
1618 for (i
= 0; i
< packets
; i
++) {
1619 /* here, only the last packet will be short */
1620 urb
->iso_frame_desc
[i
].length
= min((unsigned) bytes
, maxp
);
1621 bytes
-= urb
->iso_frame_desc
[i
].length
;
1623 urb
->iso_frame_desc
[i
].offset
= maxp
* i
;
1626 urb
->complete
= iso_callback
;
1627 /* urb->context = SET BY CALLER */
1628 urb
->interval
= 1 << (desc
->bInterval
- 1);
1629 urb
->transfer_flags
= URB_ISO_ASAP
| URB_NO_TRANSFER_DMA_MAP
;
1634 test_iso_queue(struct usbtest_dev
*dev
, struct usbtest_param
*param
,
1635 int pipe
, struct usb_endpoint_descriptor
*desc
, unsigned offset
)
1637 struct iso_context context
;
1638 struct usb_device
*udev
;
1640 unsigned long packets
= 0;
1642 struct urb
*urbs
[10]; /* FIXME no limit */
1644 if (param
->sglen
> 10)
1647 memset(&context
, 0, sizeof context
);
1648 context
.count
= param
->iterations
* param
->sglen
;
1650 init_completion(&context
.done
);
1651 spin_lock_init(&context
.lock
);
1653 memset(urbs
, 0, sizeof urbs
);
1654 udev
= testdev_to_usbdev(dev
);
1655 dev_info(&dev
->intf
->dev
,
1656 "... iso period %d %sframes, wMaxPacket %04x\n",
1657 1 << (desc
->bInterval
- 1),
1658 (udev
->speed
== USB_SPEED_HIGH
) ? "micro" : "",
1659 usb_endpoint_maxp(desc
));
1661 for (i
= 0; i
< param
->sglen
; i
++) {
1662 urbs
[i
] = iso_alloc_urb(udev
, pipe
, desc
,
1663 param
->length
, offset
);
1668 packets
+= urbs
[i
]->number_of_packets
;
1669 urbs
[i
]->context
= &context
;
1671 packets
*= param
->iterations
;
1672 dev_info(&dev
->intf
->dev
,
1673 "... total %lu msec (%lu packets)\n",
1674 (packets
* (1 << (desc
->bInterval
- 1)))
1675 / ((udev
->speed
== USB_SPEED_HIGH
) ? 8 : 1),
1678 spin_lock_irq(&context
.lock
);
1679 for (i
= 0; i
< param
->sglen
; i
++) {
1681 status
= usb_submit_urb(urbs
[i
], GFP_ATOMIC
);
1683 ERROR(dev
, "submit iso[%d], error %d\n", i
, status
);
1685 spin_unlock_irq(&context
.lock
);
1689 simple_free_urb(urbs
[i
]);
1692 context
.submit_error
= 1;
1696 spin_unlock_irq(&context
.lock
);
1698 wait_for_completion(&context
.done
);
1700 for (i
= 0; i
< param
->sglen
; i
++) {
1702 simple_free_urb(urbs
[i
]);
1705 * Isochronous transfers are expected to fail sometimes. As an
1706 * arbitrary limit, we will report an error if any submissions
1707 * fail or if the transfer failure rate is > 10%.
1711 else if (context
.submit_error
)
1713 else if (context
.errors
> context
.packet_count
/ 10)
1718 for (i
= 0; i
< param
->sglen
; i
++) {
1720 simple_free_urb(urbs
[i
]);
1725 static int test_unaligned_bulk(
1726 struct usbtest_dev
*tdev
,
1730 unsigned transfer_flags
,
1734 struct urb
*urb
= usbtest_alloc_urb(
1735 testdev_to_usbdev(tdev
), pipe
, length
, transfer_flags
, 1);
1740 retval
= simple_io(tdev
, urb
, iterations
, 0, 0, label
);
1741 simple_free_urb(urb
);
1745 /*-------------------------------------------------------------------------*/
1747 /* We only have this one interface to user space, through usbfs.
1748 * User mode code can scan usbfs to find N different devices (maybe on
1749 * different busses) to use when testing, and allocate one thread per
1750 * test. So discovery is simplified, and we have no device naming issues.
1752 * Don't use these only as stress/load tests. Use them along with with
1753 * other USB bus activity: plugging, unplugging, mousing, mp3 playback,
1754 * video capture, and so on. Run different tests at different times, in
1755 * different sequences. Nothing here should interact with other devices,
1756 * except indirectly by consuming USB bandwidth and CPU resources for test
1757 * threads and request completion. But the only way to know that for sure
1758 * is to test when HC queues are in use by many devices.
1760 * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
1761 * it locks out usbcore in certain code paths. Notably, if you disconnect
1762 * the device-under-test, khubd will wait block forever waiting for the
1763 * ioctl to complete ... so that usb_disconnect() can abort the pending
1764 * urbs and then call usbtest_disconnect(). To abort a test, you're best
1765 * off just killing the userspace task and waiting for it to exit.
1769 usbtest_ioctl(struct usb_interface
*intf
, unsigned int code
, void *buf
)
1771 struct usbtest_dev
*dev
= usb_get_intfdata(intf
);
1772 struct usb_device
*udev
= testdev_to_usbdev(dev
);
1773 struct usbtest_param
*param
= buf
;
1774 int retval
= -EOPNOTSUPP
;
1776 struct scatterlist
*sg
;
1777 struct usb_sg_request req
;
1778 struct timeval start
;
1781 /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
1783 pattern
= mod_pattern
;
1785 if (code
!= USBTEST_REQUEST
)
1788 if (param
->iterations
<= 0)
1791 if (mutex_lock_interruptible(&dev
->lock
))
1792 return -ERESTARTSYS
;
1794 /* FIXME: What if a system sleep starts while a test is running? */
1796 /* some devices, like ez-usb default devices, need a non-default
1797 * altsetting to have any active endpoints. some tests change
1798 * altsettings; force a default so most tests don't need to check.
1800 if (dev
->info
->alt
>= 0) {
1803 if (intf
->altsetting
->desc
.bInterfaceNumber
) {
1804 mutex_unlock(&dev
->lock
);
1807 res
= set_altsetting(dev
, dev
->info
->alt
);
1810 "set altsetting to %d failed, %d\n",
1811 dev
->info
->alt
, res
);
1812 mutex_unlock(&dev
->lock
);
1818 * Just a bunch of test cases that every HCD is expected to handle.
1820 * Some may need specific firmware, though it'd be good to have
1821 * one firmware image to handle all the test cases.
1823 * FIXME add more tests! cancel requests, verify the data, control
1824 * queueing, concurrent read+write threads, and so on.
1826 do_gettimeofday(&start
);
1827 switch (param
->test_num
) {
1830 dev_info(&intf
->dev
, "TEST 0: NOP\n");
1834 /* Simple non-queued bulk I/O tests */
1836 if (dev
->out_pipe
== 0)
1838 dev_info(&intf
->dev
,
1839 "TEST 1: write %d bytes %u times\n",
1840 param
->length
, param
->iterations
);
1841 urb
= simple_alloc_urb(udev
, dev
->out_pipe
, param
->length
);
1846 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1847 retval
= simple_io(dev
, urb
, param
->iterations
, 0, 0, "test1");
1848 simple_free_urb(urb
);
1851 if (dev
->in_pipe
== 0)
1853 dev_info(&intf
->dev
,
1854 "TEST 2: read %d bytes %u times\n",
1855 param
->length
, param
->iterations
);
1856 urb
= simple_alloc_urb(udev
, dev
->in_pipe
, param
->length
);
1861 /* FIRMWARE: bulk source (maybe generates short writes) */
1862 retval
= simple_io(dev
, urb
, param
->iterations
, 0, 0, "test2");
1863 simple_free_urb(urb
);
1866 if (dev
->out_pipe
== 0 || param
->vary
== 0)
1868 dev_info(&intf
->dev
,
1869 "TEST 3: write/%d 0..%d bytes %u times\n",
1870 param
->vary
, param
->length
, param
->iterations
);
1871 urb
= simple_alloc_urb(udev
, dev
->out_pipe
, param
->length
);
1876 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1877 retval
= simple_io(dev
, urb
, param
->iterations
, param
->vary
,
1879 simple_free_urb(urb
);
1882 if (dev
->in_pipe
== 0 || param
->vary
== 0)
1884 dev_info(&intf
->dev
,
1885 "TEST 4: read/%d 0..%d bytes %u times\n",
1886 param
->vary
, param
->length
, param
->iterations
);
1887 urb
= simple_alloc_urb(udev
, dev
->in_pipe
, param
->length
);
1892 /* FIRMWARE: bulk source (maybe generates short writes) */
1893 retval
= simple_io(dev
, urb
, param
->iterations
, param
->vary
,
1895 simple_free_urb(urb
);
1898 /* Queued bulk I/O tests */
1900 if (dev
->out_pipe
== 0 || param
->sglen
== 0)
1902 dev_info(&intf
->dev
,
1903 "TEST 5: write %d sglists %d entries of %d bytes\n",
1905 param
->sglen
, param
->length
);
1906 sg
= alloc_sglist(param
->sglen
, param
->length
, 0);
1911 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1912 retval
= perform_sglist(dev
, param
->iterations
, dev
->out_pipe
,
1913 &req
, sg
, param
->sglen
);
1914 free_sglist(sg
, param
->sglen
);
1918 if (dev
->in_pipe
== 0 || param
->sglen
== 0)
1920 dev_info(&intf
->dev
,
1921 "TEST 6: read %d sglists %d entries of %d bytes\n",
1923 param
->sglen
, param
->length
);
1924 sg
= alloc_sglist(param
->sglen
, param
->length
, 0);
1929 /* FIRMWARE: bulk source (maybe generates short writes) */
1930 retval
= perform_sglist(dev
, param
->iterations
, dev
->in_pipe
,
1931 &req
, sg
, param
->sglen
);
1932 free_sglist(sg
, param
->sglen
);
1935 if (dev
->out_pipe
== 0 || param
->sglen
== 0 || param
->vary
== 0)
1937 dev_info(&intf
->dev
,
1938 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
1939 param
->vary
, param
->iterations
,
1940 param
->sglen
, param
->length
);
1941 sg
= alloc_sglist(param
->sglen
, param
->length
, param
->vary
);
1946 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1947 retval
= perform_sglist(dev
, param
->iterations
, dev
->out_pipe
,
1948 &req
, sg
, param
->sglen
);
1949 free_sglist(sg
, param
->sglen
);
1952 if (dev
->in_pipe
== 0 || param
->sglen
== 0 || param
->vary
== 0)
1954 dev_info(&intf
->dev
,
1955 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
1956 param
->vary
, param
->iterations
,
1957 param
->sglen
, param
->length
);
1958 sg
= alloc_sglist(param
->sglen
, param
->length
, param
->vary
);
1963 /* FIRMWARE: bulk source (maybe generates short writes) */
1964 retval
= perform_sglist(dev
, param
->iterations
, dev
->in_pipe
,
1965 &req
, sg
, param
->sglen
);
1966 free_sglist(sg
, param
->sglen
);
1969 /* non-queued sanity tests for control (chapter 9 subset) */
1972 dev_info(&intf
->dev
,
1973 "TEST 9: ch9 (subset) control tests, %d times\n",
1975 for (i
= param
->iterations
; retval
== 0 && i
--; /* NOP */)
1976 retval
= ch9_postconfig(dev
);
1978 dev_err(&intf
->dev
, "ch9 subset failed, "
1979 "iterations left %d\n", i
);
1982 /* queued control messaging */
1984 if (param
->sglen
== 0)
1987 dev_info(&intf
->dev
,
1988 "TEST 10: queue %d control calls, %d times\n",
1991 retval
= test_ctrl_queue(dev
, param
);
1994 /* simple non-queued unlinks (ring with one urb) */
1996 if (dev
->in_pipe
== 0 || !param
->length
)
1999 dev_info(&intf
->dev
, "TEST 11: unlink %d reads of %d\n",
2000 param
->iterations
, param
->length
);
2001 for (i
= param
->iterations
; retval
== 0 && i
--; /* NOP */)
2002 retval
= unlink_simple(dev
, dev
->in_pipe
,
2005 dev_err(&intf
->dev
, "unlink reads failed %d, "
2006 "iterations left %d\n", retval
, i
);
2009 if (dev
->out_pipe
== 0 || !param
->length
)
2012 dev_info(&intf
->dev
, "TEST 12: unlink %d writes of %d\n",
2013 param
->iterations
, param
->length
);
2014 for (i
= param
->iterations
; retval
== 0 && i
--; /* NOP */)
2015 retval
= unlink_simple(dev
, dev
->out_pipe
,
2018 dev_err(&intf
->dev
, "unlink writes failed %d, "
2019 "iterations left %d\n", retval
, i
);
2024 if (dev
->out_pipe
== 0 && dev
->in_pipe
== 0)
2027 dev_info(&intf
->dev
, "TEST 13: set/clear %d halts\n",
2029 for (i
= param
->iterations
; retval
== 0 && i
--; /* NOP */)
2030 retval
= halt_simple(dev
);
2033 ERROR(dev
, "halts failed, iterations left %d\n", i
);
2036 /* control write tests */
2038 if (!dev
->info
->ctrl_out
)
2040 dev_info(&intf
->dev
, "TEST 14: %d ep0out, %d..%d vary %d\n",
2042 realworld
? 1 : 0, param
->length
,
2044 retval
= ctrl_out(dev
, param
->iterations
,
2045 param
->length
, param
->vary
, 0);
2048 /* iso write tests */
2050 if (dev
->out_iso_pipe
== 0 || param
->sglen
== 0)
2052 dev_info(&intf
->dev
,
2053 "TEST 15: write %d iso, %d entries of %d bytes\n",
2055 param
->sglen
, param
->length
);
2056 /* FIRMWARE: iso sink */
2057 retval
= test_iso_queue(dev
, param
,
2058 dev
->out_iso_pipe
, dev
->iso_out
, 0);
2061 /* iso read tests */
2063 if (dev
->in_iso_pipe
== 0 || param
->sglen
== 0)
2065 dev_info(&intf
->dev
,
2066 "TEST 16: read %d iso, %d entries of %d bytes\n",
2068 param
->sglen
, param
->length
);
2069 /* FIRMWARE: iso source */
2070 retval
= test_iso_queue(dev
, param
,
2071 dev
->in_iso_pipe
, dev
->iso_in
, 0);
2074 /* FIXME scatterlist cancel (needs helper thread) */
2076 /* Tests for bulk I/O using DMA mapping by core and odd address */
2078 if (dev
->out_pipe
== 0)
2080 dev_info(&intf
->dev
,
2081 "TEST 17: write odd addr %d bytes %u times core map\n",
2082 param
->length
, param
->iterations
);
2084 retval
= test_unaligned_bulk(
2086 param
->length
, param
->iterations
,
2091 if (dev
->in_pipe
== 0)
2093 dev_info(&intf
->dev
,
2094 "TEST 18: read odd addr %d bytes %u times core map\n",
2095 param
->length
, param
->iterations
);
2097 retval
= test_unaligned_bulk(
2099 param
->length
, param
->iterations
,
2103 /* Tests for bulk I/O using premapped coherent buffer and odd address */
2105 if (dev
->out_pipe
== 0)
2107 dev_info(&intf
->dev
,
2108 "TEST 19: write odd addr %d bytes %u times premapped\n",
2109 param
->length
, param
->iterations
);
2111 retval
= test_unaligned_bulk(
2113 param
->length
, param
->iterations
,
2114 URB_NO_TRANSFER_DMA_MAP
, "test19");
2118 if (dev
->in_pipe
== 0)
2120 dev_info(&intf
->dev
,
2121 "TEST 20: read odd addr %d bytes %u times premapped\n",
2122 param
->length
, param
->iterations
);
2124 retval
= test_unaligned_bulk(
2126 param
->length
, param
->iterations
,
2127 URB_NO_TRANSFER_DMA_MAP
, "test20");
2130 /* control write tests with unaligned buffer */
2132 if (!dev
->info
->ctrl_out
)
2134 dev_info(&intf
->dev
,
2135 "TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
2137 realworld
? 1 : 0, param
->length
,
2139 retval
= ctrl_out(dev
, param
->iterations
,
2140 param
->length
, param
->vary
, 1);
2143 /* unaligned iso tests */
2145 if (dev
->out_iso_pipe
== 0 || param
->sglen
== 0)
2147 dev_info(&intf
->dev
,
2148 "TEST 22: write %d iso odd, %d entries of %d bytes\n",
2150 param
->sglen
, param
->length
);
2151 retval
= test_iso_queue(dev
, param
,
2152 dev
->out_iso_pipe
, dev
->iso_out
, 1);
2156 if (dev
->in_iso_pipe
== 0 || param
->sglen
== 0)
2158 dev_info(&intf
->dev
,
2159 "TEST 23: read %d iso odd, %d entries of %d bytes\n",
2161 param
->sglen
, param
->length
);
2162 retval
= test_iso_queue(dev
, param
,
2163 dev
->in_iso_pipe
, dev
->iso_in
, 1);
2166 /* unlink URBs from a bulk-OUT queue */
2168 if (dev
->out_pipe
== 0 || !param
->length
|| param
->sglen
< 4)
2171 dev_info(&intf
->dev
, "TEST 17: unlink from %d queues of "
2172 "%d %d-byte writes\n",
2173 param
->iterations
, param
->sglen
, param
->length
);
2174 for (i
= param
->iterations
; retval
== 0 && i
> 0; --i
) {
2175 retval
= unlink_queued(dev
, dev
->out_pipe
,
2176 param
->sglen
, param
->length
);
2179 "unlink queued writes failed %d, "
2180 "iterations left %d\n", retval
, i
);
2187 do_gettimeofday(¶m
->duration
);
2188 param
->duration
.tv_sec
-= start
.tv_sec
;
2189 param
->duration
.tv_usec
-= start
.tv_usec
;
2190 if (param
->duration
.tv_usec
< 0) {
2191 param
->duration
.tv_usec
+= 1000 * 1000;
2192 param
->duration
.tv_sec
-= 1;
2194 mutex_unlock(&dev
->lock
);
2198 /*-------------------------------------------------------------------------*/
2200 static unsigned force_interrupt
;
2201 module_param(force_interrupt
, uint
, 0);
2202 MODULE_PARM_DESC(force_interrupt
, "0 = test default; else interrupt");
2205 static unsigned short vendor
;
2206 module_param(vendor
, ushort
, 0);
2207 MODULE_PARM_DESC(vendor
, "vendor code (from usb-if)");
2209 static unsigned short product
;
2210 module_param(product
, ushort
, 0);
2211 MODULE_PARM_DESC(product
, "product code (from vendor)");
2215 usbtest_probe(struct usb_interface
*intf
, const struct usb_device_id
*id
)
2217 struct usb_device
*udev
;
2218 struct usbtest_dev
*dev
;
2219 struct usbtest_info
*info
;
2220 char *rtest
, *wtest
;
2221 char *irtest
, *iwtest
;
2223 udev
= interface_to_usbdev(intf
);
2226 /* specify devices by module parameters? */
2227 if (id
->match_flags
== 0) {
2228 /* vendor match required, product match optional */
2229 if (!vendor
|| le16_to_cpu(udev
->descriptor
.idVendor
) != (u16
)vendor
)
2231 if (product
&& le16_to_cpu(udev
->descriptor
.idProduct
) != (u16
)product
)
2233 dev_info(&intf
->dev
, "matched module params, "
2234 "vend=0x%04x prod=0x%04x\n",
2235 le16_to_cpu(udev
->descriptor
.idVendor
),
2236 le16_to_cpu(udev
->descriptor
.idProduct
));
2240 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
2243 info
= (struct usbtest_info
*) id
->driver_info
;
2245 mutex_init(&dev
->lock
);
2249 /* cacheline-aligned scratch for i/o */
2250 dev
->buf
= kmalloc(TBUF_SIZE
, GFP_KERNEL
);
2251 if (dev
->buf
== NULL
) {
2256 /* NOTE this doesn't yet test the handful of difference that are
2257 * visible with high speed interrupts: bigger maxpacket (1K) and
2258 * "high bandwidth" modes (up to 3 packets/uframe).
2261 irtest
= iwtest
= "";
2262 if (force_interrupt
|| udev
->speed
== USB_SPEED_LOW
) {
2264 dev
->in_pipe
= usb_rcvintpipe(udev
, info
->ep_in
);
2268 dev
->out_pipe
= usb_sndintpipe(udev
, info
->ep_out
);
2269 wtest
= " intr-out";
2272 if (info
->autoconf
) {
2275 status
= get_endpoints(dev
, intf
);
2277 WARNING(dev
, "couldn't get endpoints, %d\n",
2281 /* may find bulk or ISO pipes */
2284 dev
->in_pipe
= usb_rcvbulkpipe(udev
,
2287 dev
->out_pipe
= usb_sndbulkpipe(udev
,
2293 wtest
= " bulk-out";
2294 if (dev
->in_iso_pipe
)
2296 if (dev
->out_iso_pipe
)
2297 iwtest
= " iso-out";
2300 usb_set_intfdata(intf
, dev
);
2301 dev_info(&intf
->dev
, "%s\n", info
->name
);
2302 dev_info(&intf
->dev
, "%s {control%s%s%s%s%s} tests%s\n",
2303 usb_speed_string(udev
->speed
),
2304 info
->ctrl_out
? " in/out" : "",
2307 info
->alt
>= 0 ? " (+alt)" : "");
2311 static int usbtest_suspend(struct usb_interface
*intf
, pm_message_t message
)
2316 static int usbtest_resume(struct usb_interface
*intf
)
2322 static void usbtest_disconnect(struct usb_interface
*intf
)
2324 struct usbtest_dev
*dev
= usb_get_intfdata(intf
);
2326 usb_set_intfdata(intf
, NULL
);
2327 dev_dbg(&intf
->dev
, "disconnect\n");
2331 /* Basic testing only needs a device that can source or sink bulk traffic.
2332 * Any device can test control transfers (default with GENERIC binding).
2334 * Several entries work with the default EP0 implementation that's built
2335 * into EZ-USB chips. There's a default vendor ID which can be overridden
2336 * by (very) small config EEPROMS, but otherwise all these devices act
2337 * identically until firmware is loaded: only EP0 works. It turns out
2338 * to be easy to make other endpoints work, without modifying that EP0
2339 * behavior. For now, we expect that kind of firmware.
2342 /* an21xx or fx versions of ez-usb */
2343 static struct usbtest_info ez1_info
= {
2344 .name
= "EZ-USB device",
2350 /* fx2 version of ez-usb */
2351 static struct usbtest_info ez2_info
= {
2352 .name
= "FX2 device",
2358 /* ezusb family device with dedicated usb test firmware,
2360 static struct usbtest_info fw_info
= {
2361 .name
= "usb test device",
2365 .autoconf
= 1, /* iso and ctrl_out need autoconf */
2367 .iso
= 1, /* iso_ep's are #8 in/out */
2370 /* peripheral running Linux and 'zero.c' test firmware, or
2371 * its user-mode cousin. different versions of this use
2372 * different hardware with the same vendor/product codes.
2373 * host side MUST rely on the endpoint descriptors.
2375 static struct usbtest_info gz_info
= {
2376 .name
= "Linux gadget zero",
2382 static struct usbtest_info um_info
= {
2383 .name
= "Linux user mode test driver",
2388 static struct usbtest_info um2_info
= {
2389 .name
= "Linux user mode ISO test driver",
2396 /* this is a nice source of high speed bulk data;
2397 * uses an FX2, with firmware provided in the device
2399 static struct usbtest_info ibot2_info
= {
2400 .name
= "iBOT2 webcam",
2407 /* we can use any device to test control traffic */
2408 static struct usbtest_info generic_info
= {
2409 .name
= "Generic USB device",
2415 static const struct usb_device_id id_table
[] = {
2417 /*-------------------------------------------------------------*/
2419 /* EZ-USB devices which download firmware to replace (or in our
2420 * case augment) the default device implementation.
2423 /* generic EZ-USB FX controller */
2424 { USB_DEVICE(0x0547, 0x2235),
2425 .driver_info
= (unsigned long) &ez1_info
,
2428 /* CY3671 development board with EZ-USB FX */
2429 { USB_DEVICE(0x0547, 0x0080),
2430 .driver_info
= (unsigned long) &ez1_info
,
2433 /* generic EZ-USB FX2 controller (or development board) */
2434 { USB_DEVICE(0x04b4, 0x8613),
2435 .driver_info
= (unsigned long) &ez2_info
,
2438 /* re-enumerated usb test device firmware */
2439 { USB_DEVICE(0xfff0, 0xfff0),
2440 .driver_info
= (unsigned long) &fw_info
,
2443 /* "Gadget Zero" firmware runs under Linux */
2444 { USB_DEVICE(0x0525, 0xa4a0),
2445 .driver_info
= (unsigned long) &gz_info
,
2448 /* so does a user-mode variant */
2449 { USB_DEVICE(0x0525, 0xa4a4),
2450 .driver_info
= (unsigned long) &um_info
,
2453 /* ... and a user-mode variant that talks iso */
2454 { USB_DEVICE(0x0525, 0xa4a3),
2455 .driver_info
= (unsigned long) &um2_info
,
2459 /* Keyspan 19qi uses an21xx (original EZ-USB) */
2460 /* this does not coexist with the real Keyspan 19qi driver! */
2461 { USB_DEVICE(0x06cd, 0x010b),
2462 .driver_info
= (unsigned long) &ez1_info
,
2466 /*-------------------------------------------------------------*/
2469 /* iBOT2 makes a nice source of high speed bulk-in data */
2470 /* this does not coexist with a real iBOT2 driver! */
2471 { USB_DEVICE(0x0b62, 0x0059),
2472 .driver_info
= (unsigned long) &ibot2_info
,
2476 /*-------------------------------------------------------------*/
2479 /* module params can specify devices to use for control tests */
2480 { .driver_info
= (unsigned long) &generic_info
, },
2483 /*-------------------------------------------------------------*/
2487 MODULE_DEVICE_TABLE(usb
, id_table
);
2489 static struct usb_driver usbtest_driver
= {
2491 .id_table
= id_table
,
2492 .probe
= usbtest_probe
,
2493 .unlocked_ioctl
= usbtest_ioctl
,
2494 .disconnect
= usbtest_disconnect
,
2495 .suspend
= usbtest_suspend
,
2496 .resume
= usbtest_resume
,
2499 /*-------------------------------------------------------------------------*/
2501 static int __init
usbtest_init(void)
2505 pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor
, product
);
2507 return usb_register(&usbtest_driver
);
2509 module_init(usbtest_init
);
2511 static void __exit
usbtest_exit(void)
2513 usb_deregister(&usbtest_driver
);
2515 module_exit(usbtest_exit
);
2517 MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
2518 MODULE_LICENSE("GPL");