1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/scatterlist.h>
9 #include <linux/mutex.h>
11 #include <linux/usb.h>
14 /*-------------------------------------------------------------------------*/
16 /* FIXME make these public somewhere; usbdevfs.h? */
17 struct usbtest_param
{
19 unsigned test_num
; /* 0..(TEST_CASES-1) */
26 struct timeval duration
;
28 #define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
30 /*-------------------------------------------------------------------------*/
32 #define GENERIC /* let probe() bind using module params */
34 /* Some devices that can be used for testing will have "real" drivers.
35 * Entries for those need to be enabled here by hand, after disabling
38 //#define IBOT2 /* grab iBOT2 webcams */
39 //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
41 /*-------------------------------------------------------------------------*/
45 u8 ep_in
; /* bulk/intr source */
46 u8 ep_out
; /* bulk/intr sink */
49 unsigned iso
:1; /* try iso in/out */
53 /* this is accessed only through usbfs ioctl calls.
54 * one ioctl to issue a test ... one lock per device.
55 * tests create other threads if they need them.
56 * urbs and buffers are allocated dynamically,
57 * and data generated deterministically.
60 struct usb_interface
*intf
;
61 struct usbtest_info
*info
;
66 struct usb_endpoint_descriptor
*iso_in
, *iso_out
;
73 static struct usb_device
*testdev_to_usbdev(struct usbtest_dev
*test
)
75 return interface_to_usbdev(test
->intf
);
78 /* set up all urbs so they can be used with either bulk or interrupt */
79 #define INTERRUPT_RATE 1 /* msec/transfer */
81 #define ERROR(tdev, fmt, args...) \
82 dev_err(&(tdev)->intf->dev , fmt , ## args)
83 #define WARNING(tdev, fmt, args...) \
84 dev_warn(&(tdev)->intf->dev , fmt , ## args)
86 #define GUARD_BYTE 0xA5
88 /*-------------------------------------------------------------------------*/
91 get_endpoints(struct usbtest_dev
*dev
, struct usb_interface
*intf
)
94 struct usb_host_interface
*alt
;
95 struct usb_host_endpoint
*in
, *out
;
96 struct usb_host_endpoint
*iso_in
, *iso_out
;
97 struct usb_device
*udev
;
99 for (tmp
= 0; tmp
< intf
->num_altsetting
; tmp
++) {
103 iso_in
= iso_out
= NULL
;
104 alt
= intf
->altsetting
+ tmp
;
106 /* take the first altsetting with in-bulk + out-bulk;
107 * ignore other endpoints and altsettings.
109 for (ep
= 0; ep
< alt
->desc
.bNumEndpoints
; ep
++) {
110 struct usb_host_endpoint
*e
;
112 e
= alt
->endpoint
+ ep
;
113 switch (e
->desc
.bmAttributes
) {
114 case USB_ENDPOINT_XFER_BULK
:
116 case USB_ENDPOINT_XFER_ISOC
:
123 if (usb_endpoint_dir_in(&e
->desc
)) {
132 if (usb_endpoint_dir_in(&e
->desc
)) {
140 if ((in
&& out
) || iso_in
|| iso_out
)
146 udev
= testdev_to_usbdev(dev
);
147 if (alt
->desc
.bAlternateSetting
!= 0) {
148 tmp
= usb_set_interface(udev
,
149 alt
->desc
.bInterfaceNumber
,
150 alt
->desc
.bAlternateSetting
);
156 dev
->in_pipe
= usb_rcvbulkpipe(udev
,
157 in
->desc
.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
);
158 dev
->out_pipe
= usb_sndbulkpipe(udev
,
159 out
->desc
.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
);
162 dev
->iso_in
= &iso_in
->desc
;
163 dev
->in_iso_pipe
= usb_rcvisocpipe(udev
,
164 iso_in
->desc
.bEndpointAddress
165 & USB_ENDPOINT_NUMBER_MASK
);
169 dev
->iso_out
= &iso_out
->desc
;
170 dev
->out_iso_pipe
= usb_sndisocpipe(udev
,
171 iso_out
->desc
.bEndpointAddress
172 & USB_ENDPOINT_NUMBER_MASK
);
177 /*-------------------------------------------------------------------------*/
179 /* Support for testing basic non-queued I/O streams.
181 * These just package urbs as requests that can be easily canceled.
182 * Each urb's data buffer is dynamically allocated; callers can fill
183 * them with non-zero test data (or test for it) when appropriate.
186 static void simple_callback(struct urb
*urb
)
188 complete(urb
->context
);
191 static struct urb
*usbtest_alloc_urb(
192 struct usb_device
*udev
,
195 unsigned transfer_flags
,
200 urb
= usb_alloc_urb(0, GFP_KERNEL
);
203 usb_fill_bulk_urb(urb
, udev
, pipe
, NULL
, bytes
, simple_callback
, NULL
);
204 urb
->interval
= (udev
->speed
== USB_SPEED_HIGH
)
205 ? (INTERRUPT_RATE
<< 3)
207 urb
->transfer_flags
= transfer_flags
;
208 if (usb_pipein(pipe
))
209 urb
->transfer_flags
|= URB_SHORT_NOT_OK
;
211 if (urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
)
212 urb
->transfer_buffer
= usb_alloc_coherent(udev
, bytes
+ offset
,
213 GFP_KERNEL
, &urb
->transfer_dma
);
215 urb
->transfer_buffer
= kmalloc(bytes
+ offset
, GFP_KERNEL
);
217 if (!urb
->transfer_buffer
) {
222 /* To test unaligned transfers add an offset and fill the
223 unused memory with a guard value */
225 memset(urb
->transfer_buffer
, GUARD_BYTE
, offset
);
226 urb
->transfer_buffer
+= offset
;
227 if (urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
)
228 urb
->transfer_dma
+= offset
;
231 /* For inbound transfers use guard byte so that test fails if
232 data not correctly copied */
233 memset(urb
->transfer_buffer
,
234 usb_pipein(urb
->pipe
) ? GUARD_BYTE
: 0,
239 static struct urb
*simple_alloc_urb(
240 struct usb_device
*udev
,
244 return usbtest_alloc_urb(udev
, pipe
, bytes
, URB_NO_TRANSFER_DMA_MAP
, 0);
247 static unsigned pattern
;
248 static unsigned mod_pattern
;
249 module_param_named(pattern
, mod_pattern
, uint
, S_IRUGO
| S_IWUSR
);
250 MODULE_PARM_DESC(mod_pattern
, "i/o pattern (0 == zeroes)");
252 static inline void simple_fill_buf(struct urb
*urb
)
255 u8
*buf
= urb
->transfer_buffer
;
256 unsigned len
= urb
->transfer_buffer_length
;
265 for (i
= 0; i
< len
; i
++)
266 *buf
++ = (u8
) (i
% 63);
271 static inline unsigned long buffer_offset(void *buf
)
273 return (unsigned long)buf
& (ARCH_KMALLOC_MINALIGN
- 1);
276 static int check_guard_bytes(struct usbtest_dev
*tdev
, struct urb
*urb
)
278 u8
*buf
= urb
->transfer_buffer
;
279 u8
*guard
= buf
- buffer_offset(buf
);
282 for (i
= 0; guard
< buf
; i
++, guard
++) {
283 if (*guard
!= GUARD_BYTE
) {
284 ERROR(tdev
, "guard byte[%d] %d (not %d)\n",
285 i
, *guard
, GUARD_BYTE
);
292 static int simple_check_buf(struct usbtest_dev
*tdev
, struct urb
*urb
)
296 u8
*buf
= urb
->transfer_buffer
;
297 unsigned len
= urb
->actual_length
;
299 int ret
= check_guard_bytes(tdev
, urb
);
303 for (i
= 0; i
< len
; i
++, buf
++) {
305 /* all-zeroes has no synchronization issues */
309 /* mod63 stays in sync with short-terminated transfers,
310 * or otherwise when host and gadget agree on how large
311 * each usb transfer request should be. resync is done
312 * with set_interface or set_config.
317 /* always fail unsupported patterns */
322 if (*buf
== expected
)
324 ERROR(tdev
, "buf[%d] = %d (not %d)\n", i
, *buf
, expected
);
330 static void simple_free_urb(struct urb
*urb
)
332 unsigned long offset
= buffer_offset(urb
->transfer_buffer
);
334 if (urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
)
337 urb
->transfer_buffer_length
+ offset
,
338 urb
->transfer_buffer
- offset
,
339 urb
->transfer_dma
- offset
);
341 kfree(urb
->transfer_buffer
- offset
);
345 static int simple_io(
346 struct usbtest_dev
*tdev
,
354 struct usb_device
*udev
= urb
->dev
;
355 int max
= urb
->transfer_buffer_length
;
356 struct completion completion
;
359 urb
->context
= &completion
;
360 while (retval
== 0 && iterations
-- > 0) {
361 init_completion(&completion
);
362 if (usb_pipeout(urb
->pipe
)) {
363 simple_fill_buf(urb
);
364 urb
->transfer_flags
|= URB_ZERO_PACKET
;
366 retval
= usb_submit_urb(urb
, GFP_KERNEL
);
370 /* NOTE: no timeouts; can't be broken out of by interrupt */
371 wait_for_completion(&completion
);
372 retval
= urb
->status
;
374 if (retval
== 0 && usb_pipein(urb
->pipe
))
375 retval
= simple_check_buf(tdev
, urb
);
378 int len
= urb
->transfer_buffer_length
;
383 len
= (vary
< max
) ? vary
: max
;
384 urb
->transfer_buffer_length
= len
;
387 /* FIXME if endpoint halted, clear halt (and log) */
389 urb
->transfer_buffer_length
= max
;
391 if (expected
!= retval
)
393 "%s failed, iterations left %d, status %d (not %d)\n",
394 label
, iterations
, retval
, expected
);
399 /*-------------------------------------------------------------------------*/
401 /* We use scatterlist primitives to test queued I/O.
402 * Yes, this also tests the scatterlist primitives.
405 static void free_sglist(struct scatterlist
*sg
, int nents
)
411 for (i
= 0; i
< nents
; i
++) {
412 if (!sg_page(&sg
[i
]))
414 kfree(sg_virt(&sg
[i
]));
419 static struct scatterlist
*
420 alloc_sglist(int nents
, int max
, int vary
)
422 struct scatterlist
*sg
;
426 sg
= kmalloc_array(nents
, sizeof *sg
, GFP_KERNEL
);
429 sg_init_table(sg
, nents
);
431 for (i
= 0; i
< nents
; i
++) {
435 buf
= kzalloc(size
, GFP_KERNEL
);
441 /* kmalloc pages are always physically contiguous! */
442 sg_set_buf(&sg
[i
], buf
, size
);
449 for (j
= 0; j
< size
; j
++)
450 *buf
++ = (u8
) (j
% 63);
458 size
= (vary
< max
) ? vary
: max
;
465 static int perform_sglist(
466 struct usbtest_dev
*tdev
,
469 struct usb_sg_request
*req
,
470 struct scatterlist
*sg
,
474 struct usb_device
*udev
= testdev_to_usbdev(tdev
);
477 while (retval
== 0 && iterations
-- > 0) {
478 retval
= usb_sg_init(req
, udev
, pipe
,
479 (udev
->speed
== USB_SPEED_HIGH
)
480 ? (INTERRUPT_RATE
<< 3)
482 sg
, nents
, 0, GFP_KERNEL
);
487 retval
= req
->status
;
489 /* FIXME check resulting data pattern */
491 /* FIXME if endpoint halted, clear halt (and log) */
494 /* FIXME for unlink or fault handling tests, don't report
495 * failure if retval is as we expected ...
498 ERROR(tdev
, "perform_sglist failed, "
499 "iterations left %d, status %d\n",
505 /*-------------------------------------------------------------------------*/
507 /* unqueued control message testing
509 * there's a nice set of device functional requirements in chapter 9 of the
510 * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
511 * special test firmware.
513 * we know the device is configured (or suspended) by the time it's visible
514 * through usbfs. we can't change that, so we won't test enumeration (which
515 * worked 'well enough' to get here, this time), power management (ditto),
516 * or remote wakeup (which needs human interaction).
519 static unsigned realworld
= 1;
520 module_param(realworld
, uint
, 0);
521 MODULE_PARM_DESC(realworld
, "clear to demand stricter spec compliance");
523 static int get_altsetting(struct usbtest_dev
*dev
)
525 struct usb_interface
*iface
= dev
->intf
;
526 struct usb_device
*udev
= interface_to_usbdev(iface
);
529 retval
= usb_control_msg(udev
, usb_rcvctrlpipe(udev
, 0),
530 USB_REQ_GET_INTERFACE
, USB_DIR_IN
|USB_RECIP_INTERFACE
,
531 0, iface
->altsetting
[0].desc
.bInterfaceNumber
,
532 dev
->buf
, 1, USB_CTRL_GET_TIMEOUT
);
544 static int set_altsetting(struct usbtest_dev
*dev
, int alternate
)
546 struct usb_interface
*iface
= dev
->intf
;
547 struct usb_device
*udev
;
549 if (alternate
< 0 || alternate
>= 256)
552 udev
= interface_to_usbdev(iface
);
553 return usb_set_interface(udev
,
554 iface
->altsetting
[0].desc
.bInterfaceNumber
,
558 static int is_good_config(struct usbtest_dev
*tdev
, int len
)
560 struct usb_config_descriptor
*config
;
562 if (len
< sizeof *config
)
564 config
= (struct usb_config_descriptor
*) tdev
->buf
;
566 switch (config
->bDescriptorType
) {
568 case USB_DT_OTHER_SPEED_CONFIG
:
569 if (config
->bLength
!= 9) {
570 ERROR(tdev
, "bogus config descriptor length\n");
573 /* this bit 'must be 1' but often isn't */
574 if (!realworld
&& !(config
->bmAttributes
& 0x80)) {
575 ERROR(tdev
, "high bit of config attributes not set\n");
578 if (config
->bmAttributes
& 0x1f) { /* reserved == 0 */
579 ERROR(tdev
, "reserved config bits set\n");
587 if (le16_to_cpu(config
->wTotalLength
) == len
) /* read it all */
589 if (le16_to_cpu(config
->wTotalLength
) >= TBUF_SIZE
) /* max partial read */
591 ERROR(tdev
, "bogus config descriptor read size\n");
595 /* sanity test for standard requests working with usb_control_mesg() and some
596 * of the utility functions which use it.
598 * this doesn't test how endpoint halts behave or data toggles get set, since
599 * we won't do I/O to bulk/interrupt endpoints here (which is how to change
600 * halt or toggle). toggle testing is impractical without support from hcds.
602 * this avoids failing devices linux would normally work with, by not testing
603 * config/altsetting operations for devices that only support their defaults.
604 * such devices rarely support those needless operations.
606 * NOTE that since this is a sanity test, it's not examining boundary cases
607 * to see if usbcore, hcd, and device all behave right. such testing would
608 * involve varied read sizes and other operation sequences.
610 static int ch9_postconfig(struct usbtest_dev
*dev
)
612 struct usb_interface
*iface
= dev
->intf
;
613 struct usb_device
*udev
= interface_to_usbdev(iface
);
616 /* [9.2.3] if there's more than one altsetting, we need to be able to
617 * set and get each one. mostly trusts the descriptors from usbcore.
619 for (i
= 0; i
< iface
->num_altsetting
; i
++) {
621 /* 9.2.3 constrains the range here */
622 alt
= iface
->altsetting
[i
].desc
.bAlternateSetting
;
623 if (alt
< 0 || alt
>= iface
->num_altsetting
) {
625 "invalid alt [%d].bAltSetting = %d\n",
629 /* [real world] get/set unimplemented if there's only one */
630 if (realworld
&& iface
->num_altsetting
== 1)
633 /* [9.4.10] set_interface */
634 retval
= set_altsetting(dev
, alt
);
636 dev_err(&iface
->dev
, "can't set_interface = %d, %d\n",
641 /* [9.4.4] get_interface always works */
642 retval
= get_altsetting(dev
);
644 dev_err(&iface
->dev
, "get alt should be %d, was %d\n",
646 return (retval
< 0) ? retval
: -EDOM
;
651 /* [real world] get_config unimplemented if there's only one */
652 if (!realworld
|| udev
->descriptor
.bNumConfigurations
!= 1) {
653 int expected
= udev
->actconfig
->desc
.bConfigurationValue
;
655 /* [9.4.2] get_configuration always works
656 * ... although some cheap devices (like one TI Hub I've got)
657 * won't return config descriptors except before set_config.
659 retval
= usb_control_msg(udev
, usb_rcvctrlpipe(udev
, 0),
660 USB_REQ_GET_CONFIGURATION
,
661 USB_DIR_IN
| USB_RECIP_DEVICE
,
662 0, 0, dev
->buf
, 1, USB_CTRL_GET_TIMEOUT
);
663 if (retval
!= 1 || dev
->buf
[0] != expected
) {
664 dev_err(&iface
->dev
, "get config --> %d %d (1 %d)\n",
665 retval
, dev
->buf
[0], expected
);
666 return (retval
< 0) ? retval
: -EDOM
;
670 /* there's always [9.4.3] a device descriptor [9.6.1] */
671 retval
= usb_get_descriptor(udev
, USB_DT_DEVICE
, 0,
672 dev
->buf
, sizeof udev
->descriptor
);
673 if (retval
!= sizeof udev
->descriptor
) {
674 dev_err(&iface
->dev
, "dev descriptor --> %d\n", retval
);
675 return (retval
< 0) ? retval
: -EDOM
;
678 /* there's always [9.4.3] at least one config descriptor [9.6.3] */
679 for (i
= 0; i
< udev
->descriptor
.bNumConfigurations
; i
++) {
680 retval
= usb_get_descriptor(udev
, USB_DT_CONFIG
, i
,
681 dev
->buf
, TBUF_SIZE
);
682 if (!is_good_config(dev
, retval
)) {
684 "config [%d] descriptor --> %d\n",
686 return (retval
< 0) ? retval
: -EDOM
;
689 /* FIXME cross-checking udev->config[i] to make sure usbcore
690 * parsed it right (etc) would be good testing paranoia
694 /* and sometimes [9.2.6.6] speed dependent descriptors */
695 if (le16_to_cpu(udev
->descriptor
.bcdUSB
) == 0x0200) {
696 struct usb_qualifier_descriptor
*d
= NULL
;
698 /* device qualifier [9.6.2] */
699 retval
= usb_get_descriptor(udev
,
700 USB_DT_DEVICE_QUALIFIER
, 0, dev
->buf
,
701 sizeof(struct usb_qualifier_descriptor
));
702 if (retval
== -EPIPE
) {
703 if (udev
->speed
== USB_SPEED_HIGH
) {
705 "hs dev qualifier --> %d\n",
707 return (retval
< 0) ? retval
: -EDOM
;
709 /* usb2.0 but not high-speed capable; fine */
710 } else if (retval
!= sizeof(struct usb_qualifier_descriptor
)) {
711 dev_err(&iface
->dev
, "dev qualifier --> %d\n", retval
);
712 return (retval
< 0) ? retval
: -EDOM
;
714 d
= (struct usb_qualifier_descriptor
*) dev
->buf
;
716 /* might not have [9.6.2] any other-speed configs [9.6.4] */
718 unsigned max
= d
->bNumConfigurations
;
719 for (i
= 0; i
< max
; i
++) {
720 retval
= usb_get_descriptor(udev
,
721 USB_DT_OTHER_SPEED_CONFIG
, i
,
722 dev
->buf
, TBUF_SIZE
);
723 if (!is_good_config(dev
, retval
)) {
725 "other speed config --> %d\n",
727 return (retval
< 0) ? retval
: -EDOM
;
732 /* FIXME fetch strings from at least the device descriptor */
734 /* [9.4.5] get_status always works */
735 retval
= usb_get_status(udev
, USB_RECIP_DEVICE
, 0, dev
->buf
);
737 dev_err(&iface
->dev
, "get dev status --> %d\n", retval
);
738 return (retval
< 0) ? retval
: -EDOM
;
741 /* FIXME configuration.bmAttributes says if we could try to set/clear
742 * the device's remote wakeup feature ... if we can, test that here
745 retval
= usb_get_status(udev
, USB_RECIP_INTERFACE
,
746 iface
->altsetting
[0].desc
.bInterfaceNumber
, dev
->buf
);
748 dev_err(&iface
->dev
, "get interface status --> %d\n", retval
);
749 return (retval
< 0) ? retval
: -EDOM
;
751 /* FIXME get status for each endpoint in the interface */
756 /*-------------------------------------------------------------------------*/
758 /* use ch9 requests to test whether:
759 * (a) queues work for control, keeping N subtests queued and
760 * active (auto-resubmit) for M loops through the queue.
761 * (b) protocol stalls (control-only) will autorecover.
762 * it's not like bulk/intr; no halt clearing.
763 * (c) short control reads are reported and handled.
764 * (d) queues are always processed in-order
769 struct usbtest_dev
*dev
;
770 struct completion complete
;
775 struct usbtest_param
*param
;
779 #define NUM_SUBCASES 15 /* how many test subcases here? */
782 struct usb_ctrlrequest setup
;
787 static void ctrl_complete(struct urb
*urb
)
789 struct ctrl_ctx
*ctx
= urb
->context
;
790 struct usb_ctrlrequest
*reqp
;
791 struct subcase
*subcase
;
792 int status
= urb
->status
;
794 reqp
= (struct usb_ctrlrequest
*)urb
->setup_packet
;
795 subcase
= container_of(reqp
, struct subcase
, setup
);
797 spin_lock(&ctx
->lock
);
801 /* queue must transfer and complete in fifo order, unless
802 * usb_unlink_urb() is used to unlink something not at the
803 * physical queue head (not tested).
805 if (subcase
->number
> 0) {
806 if ((subcase
->number
- ctx
->last
) != 1) {
808 "subcase %d completed out of order, last %d\n",
809 subcase
->number
, ctx
->last
);
811 ctx
->last
= subcase
->number
;
815 ctx
->last
= subcase
->number
;
817 /* succeed or fault in only one way? */
818 if (status
== subcase
->expected
)
821 /* async unlink for cleanup? */
822 else if (status
!= -ECONNRESET
) {
824 /* some faults are allowed, not required */
825 if (subcase
->expected
> 0 && (
826 ((status
== -subcase
->expected
/* happened */
827 || status
== 0)))) /* didn't */
829 /* sometimes more than one fault is allowed */
830 else if (subcase
->number
== 12 && status
== -EPIPE
)
833 ERROR(ctx
->dev
, "subtest %d error, status %d\n",
834 subcase
->number
, status
);
837 /* unexpected status codes mean errors; ideally, in hardware */
840 if (ctx
->status
== 0) {
843 ctx
->status
= status
;
844 ERROR(ctx
->dev
, "control queue %02x.%02x, err %d, "
845 "%d left, subcase %d, len %d/%d\n",
846 reqp
->bRequestType
, reqp
->bRequest
,
847 status
, ctx
->count
, subcase
->number
,
849 urb
->transfer_buffer_length
);
851 /* FIXME this "unlink everything" exit route should
852 * be a separate test case.
855 /* unlink whatever's still pending */
856 for (i
= 1; i
< ctx
->param
->sglen
; i
++) {
857 struct urb
*u
= ctx
->urb
[
858 (i
+ subcase
->number
)
859 % ctx
->param
->sglen
];
861 if (u
== urb
|| !u
->dev
)
863 spin_unlock(&ctx
->lock
);
864 status
= usb_unlink_urb(u
);
865 spin_lock(&ctx
->lock
);
872 ERROR(ctx
->dev
, "urb unlink --> %d\n",
876 status
= ctx
->status
;
880 /* resubmit if we need to, else mark this as done */
881 if ((status
== 0) && (ctx
->pending
< ctx
->count
)) {
882 status
= usb_submit_urb(urb
, GFP_ATOMIC
);
885 "can't resubmit ctrl %02x.%02x, err %d\n",
886 reqp
->bRequestType
, reqp
->bRequest
, status
);
893 /* signal completion when nothing's queued */
894 if (ctx
->pending
== 0)
895 complete(&ctx
->complete
);
896 spin_unlock(&ctx
->lock
);
900 test_ctrl_queue(struct usbtest_dev
*dev
, struct usbtest_param
*param
)
902 struct usb_device
*udev
= testdev_to_usbdev(dev
);
904 struct ctrl_ctx context
;
907 if (param
->sglen
== 0 || param
->iterations
> UINT_MAX
/ param
->sglen
)
910 spin_lock_init(&context
.lock
);
912 init_completion(&context
.complete
);
913 context
.count
= param
->sglen
* param
->iterations
;
915 context
.status
= -ENOMEM
;
916 context
.param
= param
;
919 /* allocate and init the urbs we'll queue.
920 * as with bulk/intr sglists, sglen is the queue depth; it also
921 * controls which subtests run (more tests than sglen) or rerun.
923 urb
= kcalloc(param
->sglen
, sizeof(struct urb
*), GFP_KERNEL
);
926 for (i
= 0; i
< param
->sglen
; i
++) {
927 int pipe
= usb_rcvctrlpipe(udev
, 0);
930 struct usb_ctrlrequest req
;
931 struct subcase
*reqp
;
933 /* sign of this variable means:
934 * -: tested code must return this (negative) error code
935 * +: tested code may return this (negative too) error code
939 /* requests here are mostly expected to succeed on any
940 * device, but some are chosen to trigger protocol stalls
943 memset(&req
, 0, sizeof req
);
944 req
.bRequest
= USB_REQ_GET_DESCRIPTOR
;
945 req
.bRequestType
= USB_DIR_IN
|USB_RECIP_DEVICE
;
947 switch (i
% NUM_SUBCASES
) {
948 case 0: /* get device descriptor */
949 req
.wValue
= cpu_to_le16(USB_DT_DEVICE
<< 8);
950 len
= sizeof(struct usb_device_descriptor
);
952 case 1: /* get first config descriptor (only) */
953 req
.wValue
= cpu_to_le16((USB_DT_CONFIG
<< 8) | 0);
954 len
= sizeof(struct usb_config_descriptor
);
956 case 2: /* get altsetting (OFTEN STALLS) */
957 req
.bRequest
= USB_REQ_GET_INTERFACE
;
958 req
.bRequestType
= USB_DIR_IN
|USB_RECIP_INTERFACE
;
959 /* index = 0 means first interface */
963 case 3: /* get interface status */
964 req
.bRequest
= USB_REQ_GET_STATUS
;
965 req
.bRequestType
= USB_DIR_IN
|USB_RECIP_INTERFACE
;
969 case 4: /* get device status */
970 req
.bRequest
= USB_REQ_GET_STATUS
;
971 req
.bRequestType
= USB_DIR_IN
|USB_RECIP_DEVICE
;
974 case 5: /* get device qualifier (MAY STALL) */
975 req
.wValue
= cpu_to_le16 (USB_DT_DEVICE_QUALIFIER
<< 8);
976 len
= sizeof(struct usb_qualifier_descriptor
);
977 if (udev
->speed
!= USB_SPEED_HIGH
)
980 case 6: /* get first config descriptor, plus interface */
981 req
.wValue
= cpu_to_le16((USB_DT_CONFIG
<< 8) | 0);
982 len
= sizeof(struct usb_config_descriptor
);
983 len
+= sizeof(struct usb_interface_descriptor
);
985 case 7: /* get interface descriptor (ALWAYS STALLS) */
986 req
.wValue
= cpu_to_le16 (USB_DT_INTERFACE
<< 8);
988 len
= sizeof(struct usb_interface_descriptor
);
991 /* NOTE: two consecutive stalls in the queue here.
992 * that tests fault recovery a bit more aggressively. */
993 case 8: /* clear endpoint halt (MAY STALL) */
994 req
.bRequest
= USB_REQ_CLEAR_FEATURE
;
995 req
.bRequestType
= USB_RECIP_ENDPOINT
;
996 /* wValue 0 == ep halt */
997 /* wIndex 0 == ep0 (shouldn't halt!) */
999 pipe
= usb_sndctrlpipe(udev
, 0);
1002 case 9: /* get endpoint status */
1003 req
.bRequest
= USB_REQ_GET_STATUS
;
1004 req
.bRequestType
= USB_DIR_IN
|USB_RECIP_ENDPOINT
;
1008 case 10: /* trigger short read (EREMOTEIO) */
1009 req
.wValue
= cpu_to_le16((USB_DT_CONFIG
<< 8) | 0);
1011 expected
= -EREMOTEIO
;
1013 /* NOTE: two consecutive _different_ faults in the queue. */
1014 case 11: /* get endpoint descriptor (ALWAYS STALLS) */
1015 req
.wValue
= cpu_to_le16(USB_DT_ENDPOINT
<< 8);
1017 len
= sizeof(struct usb_interface_descriptor
);
1020 /* NOTE: sometimes even a third fault in the queue! */
1021 case 12: /* get string 0 descriptor (MAY STALL) */
1022 req
.wValue
= cpu_to_le16(USB_DT_STRING
<< 8);
1023 /* string == 0, for language IDs */
1024 len
= sizeof(struct usb_interface_descriptor
);
1025 /* may succeed when > 4 languages */
1026 expected
= EREMOTEIO
; /* or EPIPE, if no strings */
1028 case 13: /* short read, resembling case 10 */
1029 req
.wValue
= cpu_to_le16((USB_DT_CONFIG
<< 8) | 0);
1030 /* last data packet "should" be DATA1, not DATA0 */
1031 if (udev
->speed
== USB_SPEED_SUPER
)
1034 len
= 1024 - udev
->descriptor
.bMaxPacketSize0
;
1035 expected
= -EREMOTEIO
;
1037 case 14: /* short read; try to fill the last packet */
1038 req
.wValue
= cpu_to_le16((USB_DT_DEVICE
<< 8) | 0);
1039 /* device descriptor size == 18 bytes */
1040 len
= udev
->descriptor
.bMaxPacketSize0
;
1041 if (udev
->speed
== USB_SPEED_SUPER
)
1051 expected
= -EREMOTEIO
;
1054 ERROR(dev
, "bogus number of ctrl queue testcases!\n");
1055 context
.status
= -EINVAL
;
1058 req
.wLength
= cpu_to_le16(len
);
1059 urb
[i
] = u
= simple_alloc_urb(udev
, pipe
, len
);
1063 reqp
= kmalloc(sizeof *reqp
, GFP_KERNEL
);
1067 reqp
->number
= i
% NUM_SUBCASES
;
1068 reqp
->expected
= expected
;
1069 u
->setup_packet
= (char *) &reqp
->setup
;
1071 u
->context
= &context
;
1072 u
->complete
= ctrl_complete
;
1075 /* queue the urbs */
1077 spin_lock_irq(&context
.lock
);
1078 for (i
= 0; i
< param
->sglen
; i
++) {
1079 context
.status
= usb_submit_urb(urb
[i
], GFP_ATOMIC
);
1080 if (context
.status
!= 0) {
1081 ERROR(dev
, "can't submit urb[%d], status %d\n",
1083 context
.count
= context
.pending
;
1088 spin_unlock_irq(&context
.lock
);
1090 /* FIXME set timer and time out; provide a disconnect hook */
1092 /* wait for the last one to complete */
1093 if (context
.pending
> 0)
1094 wait_for_completion(&context
.complete
);
1097 for (i
= 0; i
< param
->sglen
; i
++) {
1101 kfree(urb
[i
]->setup_packet
);
1102 simple_free_urb(urb
[i
]);
1105 return context
.status
;
1110 /*-------------------------------------------------------------------------*/
1112 static void unlink1_callback(struct urb
*urb
)
1114 int status
= urb
->status
;
1116 /* we "know" -EPIPE (stall) never happens */
1118 status
= usb_submit_urb(urb
, GFP_ATOMIC
);
1120 urb
->status
= status
;
1121 complete(urb
->context
);
1125 static int unlink1(struct usbtest_dev
*dev
, int pipe
, int size
, int async
)
1128 struct completion completion
;
1131 init_completion(&completion
);
1132 urb
= simple_alloc_urb(testdev_to_usbdev(dev
), pipe
, size
);
1135 urb
->context
= &completion
;
1136 urb
->complete
= unlink1_callback
;
1138 /* keep the endpoint busy. there are lots of hc/hcd-internal
1139 * states, and testing should get to all of them over time.
1141 * FIXME want additional tests for when endpoint is STALLing
1142 * due to errors, or is just NAKing requests.
1144 retval
= usb_submit_urb(urb
, GFP_KERNEL
);
1146 dev_err(&dev
->intf
->dev
, "submit fail %d\n", retval
);
1150 /* unlinking that should always work. variable delay tests more
1151 * hcd states and code paths, even with little other system load.
1153 msleep(jiffies
% (2 * INTERRUPT_RATE
));
1155 while (!completion_done(&completion
)) {
1156 retval
= usb_unlink_urb(urb
);
1161 /* we can't unlink urbs while they're completing
1162 * or if they've completed, and we haven't
1163 * resubmitted. "normal" drivers would prevent
1164 * resubmission, but since we're testing unlink
1167 ERROR(dev
, "unlink retry\n");
1174 dev_err(&dev
->intf
->dev
,
1175 "unlink fail %d\n", retval
);
1184 wait_for_completion(&completion
);
1185 retval
= urb
->status
;
1186 simple_free_urb(urb
);
1189 return (retval
== -ECONNRESET
) ? 0 : retval
- 1000;
1191 return (retval
== -ENOENT
|| retval
== -EPERM
) ?
1195 static int unlink_simple(struct usbtest_dev
*dev
, int pipe
, int len
)
1199 /* test sync and async paths */
1200 retval
= unlink1(dev
, pipe
, len
, 1);
1202 retval
= unlink1(dev
, pipe
, len
, 0);
1206 /*-------------------------------------------------------------------------*/
1209 struct completion complete
;
1216 static void unlink_queued_callback(struct urb
*urb
)
1218 int status
= urb
->status
;
1219 struct queued_ctx
*ctx
= urb
->context
;
1223 if (urb
== ctx
->urbs
[ctx
->num
- 4] || urb
== ctx
->urbs
[ctx
->num
- 2]) {
1224 if (status
== -ECONNRESET
)
1226 /* What error should we report if the URB completed normally? */
1229 ctx
->status
= status
;
1232 if (atomic_dec_and_test(&ctx
->pending
))
1233 complete(&ctx
->complete
);
1236 static int unlink_queued(struct usbtest_dev
*dev
, int pipe
, unsigned num
,
1239 struct queued_ctx ctx
;
1240 struct usb_device
*udev
= testdev_to_usbdev(dev
);
1244 int retval
= -ENOMEM
;
1246 init_completion(&ctx
.complete
);
1247 atomic_set(&ctx
.pending
, 1); /* One more than the actual value */
1251 buf
= usb_alloc_coherent(udev
, size
, GFP_KERNEL
, &buf_dma
);
1254 memset(buf
, 0, size
);
1256 /* Allocate and init the urbs we'll queue */
1257 ctx
.urbs
= kcalloc(num
, sizeof(struct urb
*), GFP_KERNEL
);
1260 for (i
= 0; i
< num
; i
++) {
1261 ctx
.urbs
[i
] = usb_alloc_urb(0, GFP_KERNEL
);
1264 usb_fill_bulk_urb(ctx
.urbs
[i
], udev
, pipe
, buf
, size
,
1265 unlink_queued_callback
, &ctx
);
1266 ctx
.urbs
[i
]->transfer_dma
= buf_dma
;
1267 ctx
.urbs
[i
]->transfer_flags
= URB_NO_TRANSFER_DMA_MAP
;
1270 /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1271 for (i
= 0; i
< num
; i
++) {
1272 atomic_inc(&ctx
.pending
);
1273 retval
= usb_submit_urb(ctx
.urbs
[i
], GFP_KERNEL
);
1275 dev_err(&dev
->intf
->dev
, "submit urbs[%d] fail %d\n",
1277 atomic_dec(&ctx
.pending
);
1278 ctx
.status
= retval
;
1283 usb_unlink_urb(ctx
.urbs
[num
- 4]);
1284 usb_unlink_urb(ctx
.urbs
[num
- 2]);
1287 usb_unlink_urb(ctx
.urbs
[i
]);
1290 if (atomic_dec_and_test(&ctx
.pending
)) /* The extra count */
1291 complete(&ctx
.complete
);
1292 wait_for_completion(&ctx
.complete
);
1293 retval
= ctx
.status
;
1296 for (i
= 0; i
< num
; i
++)
1297 usb_free_urb(ctx
.urbs
[i
]);
1300 usb_free_coherent(udev
, size
, buf
, buf_dma
);
1304 /*-------------------------------------------------------------------------*/
1306 static int verify_not_halted(struct usbtest_dev
*tdev
, int ep
, struct urb
*urb
)
1311 /* shouldn't look or act halted */
1312 retval
= usb_get_status(urb
->dev
, USB_RECIP_ENDPOINT
, ep
, &status
);
1314 ERROR(tdev
, "ep %02x couldn't get no-halt status, %d\n",
1319 ERROR(tdev
, "ep %02x bogus status: %04x != 0\n", ep
, status
);
1322 retval
= simple_io(tdev
, urb
, 1, 0, 0, __func__
);
1328 static int verify_halted(struct usbtest_dev
*tdev
, int ep
, struct urb
*urb
)
1333 /* should look and act halted */
1334 retval
= usb_get_status(urb
->dev
, USB_RECIP_ENDPOINT
, ep
, &status
);
1336 ERROR(tdev
, "ep %02x couldn't get halt status, %d\n",
1340 le16_to_cpus(&status
);
1342 ERROR(tdev
, "ep %02x bogus status: %04x != 1\n", ep
, status
);
1345 retval
= simple_io(tdev
, urb
, 1, 0, -EPIPE
, __func__
);
1346 if (retval
!= -EPIPE
)
1348 retval
= simple_io(tdev
, urb
, 1, 0, -EPIPE
, "verify_still_halted");
1349 if (retval
!= -EPIPE
)
1354 static int test_halt(struct usbtest_dev
*tdev
, int ep
, struct urb
*urb
)
1358 /* shouldn't look or act halted now */
1359 retval
= verify_not_halted(tdev
, ep
, urb
);
1363 /* set halt (protocol test only), verify it worked */
1364 retval
= usb_control_msg(urb
->dev
, usb_sndctrlpipe(urb
->dev
, 0),
1365 USB_REQ_SET_FEATURE
, USB_RECIP_ENDPOINT
,
1366 USB_ENDPOINT_HALT
, ep
,
1367 NULL
, 0, USB_CTRL_SET_TIMEOUT
);
1369 ERROR(tdev
, "ep %02x couldn't set halt, %d\n", ep
, retval
);
1372 retval
= verify_halted(tdev
, ep
, urb
);
1376 /* clear halt (tests API + protocol), verify it worked */
1377 retval
= usb_clear_halt(urb
->dev
, urb
->pipe
);
1379 ERROR(tdev
, "ep %02x couldn't clear halt, %d\n", ep
, retval
);
1382 retval
= verify_not_halted(tdev
, ep
, urb
);
1386 /* NOTE: could also verify SET_INTERFACE clear halts ... */
1391 static int halt_simple(struct usbtest_dev
*dev
)
1396 struct usb_device
*udev
= testdev_to_usbdev(dev
);
1398 if (udev
->speed
== USB_SPEED_SUPER
)
1399 urb
= simple_alloc_urb(udev
, 0, 1024);
1401 urb
= simple_alloc_urb(udev
, 0, 512);
1406 ep
= usb_pipeendpoint(dev
->in_pipe
) | USB_DIR_IN
;
1407 urb
->pipe
= dev
->in_pipe
;
1408 retval
= test_halt(dev
, ep
, urb
);
1413 if (dev
->out_pipe
) {
1414 ep
= usb_pipeendpoint(dev
->out_pipe
);
1415 urb
->pipe
= dev
->out_pipe
;
1416 retval
= test_halt(dev
, ep
, urb
);
1419 simple_free_urb(urb
);
1423 /*-------------------------------------------------------------------------*/
1425 /* Control OUT tests use the vendor control requests from Intel's
1426 * USB 2.0 compliance test device: write a buffer, read it back.
1428 * Intel's spec only _requires_ that it work for one packet, which
1429 * is pretty weak. Some HCDs place limits here; most devices will
1430 * need to be able to handle more than one OUT data packet. We'll
1431 * try whatever we're told to try.
1433 static int ctrl_out(struct usbtest_dev
*dev
,
1434 unsigned count
, unsigned length
, unsigned vary
, unsigned offset
)
1440 struct usb_device
*udev
;
1442 if (length
< 1 || length
> 0xffff || vary
>= length
)
1445 buf
= kmalloc(length
+ offset
, GFP_KERNEL
);
1450 udev
= testdev_to_usbdev(dev
);
1454 /* NOTE: hardware might well act differently if we pushed it
1455 * with lots back-to-back queued requests.
1457 for (i
= 0; i
< count
; i
++) {
1458 /* write patterned data */
1459 for (j
= 0; j
< len
; j
++)
1461 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
1462 0x5b, USB_DIR_OUT
|USB_TYPE_VENDOR
,
1463 0, 0, buf
, len
, USB_CTRL_SET_TIMEOUT
);
1464 if (retval
!= len
) {
1467 ERROR(dev
, "ctrl_out, wlen %d (expected %d)\n",
1474 /* read it back -- assuming nothing intervened!! */
1475 retval
= usb_control_msg(udev
, usb_rcvctrlpipe(udev
, 0),
1476 0x5c, USB_DIR_IN
|USB_TYPE_VENDOR
,
1477 0, 0, buf
, len
, USB_CTRL_GET_TIMEOUT
);
1478 if (retval
!= len
) {
1481 ERROR(dev
, "ctrl_out, rlen %d (expected %d)\n",
1488 /* fail if we can't verify */
1489 for (j
= 0; j
< len
; j
++) {
1490 if (buf
[j
] != (u8
) (i
+ j
)) {
1491 ERROR(dev
, "ctrl_out, byte %d is %d not %d\n",
1492 j
, buf
[j
], (u8
) i
+ j
);
1504 /* [real world] the "zero bytes IN" case isn't really used.
1505 * hardware can easily trip up in this weird case, since its
1506 * status stage is IN, not OUT like other ep0in transfers.
1509 len
= realworld
? 1 : 0;
1513 ERROR(dev
, "ctrl_out %s failed, code %d, count %d\n",
1516 kfree(buf
- offset
);
1520 /*-------------------------------------------------------------------------*/
1522 /* ISO tests ... mimics common usage
1523 * - buffer length is split into N packets (mostly maxpacket sized)
1524 * - multi-buffers according to sglen
1527 struct iso_context
{
1531 struct completion done
;
1533 unsigned long errors
;
1534 unsigned long packet_count
;
1535 struct usbtest_dev
*dev
;
1538 static void iso_callback(struct urb
*urb
)
1540 struct iso_context
*ctx
= urb
->context
;
1542 spin_lock(&ctx
->lock
);
1545 ctx
->packet_count
+= urb
->number_of_packets
;
1546 if (urb
->error_count
> 0)
1547 ctx
->errors
+= urb
->error_count
;
1548 else if (urb
->status
!= 0)
1549 ctx
->errors
+= urb
->number_of_packets
;
1550 else if (urb
->actual_length
!= urb
->transfer_buffer_length
)
1552 else if (check_guard_bytes(ctx
->dev
, urb
) != 0)
1555 if (urb
->status
== 0 && ctx
->count
> (ctx
->pending
- 1)
1556 && !ctx
->submit_error
) {
1557 int status
= usb_submit_urb(urb
, GFP_ATOMIC
);
1562 dev_err(&ctx
->dev
->intf
->dev
,
1563 "iso resubmit err %d\n",
1566 case -ENODEV
: /* disconnected */
1567 case -ESHUTDOWN
: /* endpoint disabled */
1568 ctx
->submit_error
= 1;
1574 if (ctx
->pending
== 0) {
1576 dev_err(&ctx
->dev
->intf
->dev
,
1577 "iso test, %lu errors out of %lu\n",
1578 ctx
->errors
, ctx
->packet_count
);
1579 complete(&ctx
->done
);
1582 spin_unlock(&ctx
->lock
);
1585 static struct urb
*iso_alloc_urb(
1586 struct usb_device
*udev
,
1588 struct usb_endpoint_descriptor
*desc
,
1594 unsigned i
, maxp
, packets
;
1596 if (bytes
< 0 || !desc
)
1598 maxp
= 0x7ff & usb_endpoint_maxp(desc
);
1599 maxp
*= 1 + (0x3 & (usb_endpoint_maxp(desc
) >> 11));
1600 packets
= DIV_ROUND_UP(bytes
, maxp
);
1602 urb
= usb_alloc_urb(packets
, GFP_KERNEL
);
1608 urb
->number_of_packets
= packets
;
1609 urb
->transfer_buffer_length
= bytes
;
1610 urb
->transfer_buffer
= usb_alloc_coherent(udev
, bytes
+ offset
,
1612 &urb
->transfer_dma
);
1613 if (!urb
->transfer_buffer
) {
1618 memset(urb
->transfer_buffer
, GUARD_BYTE
, offset
);
1619 urb
->transfer_buffer
+= offset
;
1620 urb
->transfer_dma
+= offset
;
1622 /* For inbound transfers use guard byte so that test fails if
1623 data not correctly copied */
1624 memset(urb
->transfer_buffer
,
1625 usb_pipein(urb
->pipe
) ? GUARD_BYTE
: 0,
1628 for (i
= 0; i
< packets
; i
++) {
1629 /* here, only the last packet will be short */
1630 urb
->iso_frame_desc
[i
].length
= min((unsigned) bytes
, maxp
);
1631 bytes
-= urb
->iso_frame_desc
[i
].length
;
1633 urb
->iso_frame_desc
[i
].offset
= maxp
* i
;
1636 urb
->complete
= iso_callback
;
1637 /* urb->context = SET BY CALLER */
1638 urb
->interval
= 1 << (desc
->bInterval
- 1);
1639 urb
->transfer_flags
= URB_ISO_ASAP
| URB_NO_TRANSFER_DMA_MAP
;
1644 test_iso_queue(struct usbtest_dev
*dev
, struct usbtest_param
*param
,
1645 int pipe
, struct usb_endpoint_descriptor
*desc
, unsigned offset
)
1647 struct iso_context context
;
1648 struct usb_device
*udev
;
1650 unsigned long packets
= 0;
1652 struct urb
*urbs
[10]; /* FIXME no limit */
1654 if (param
->sglen
> 10)
1657 memset(&context
, 0, sizeof context
);
1658 context
.count
= param
->iterations
* param
->sglen
;
1660 init_completion(&context
.done
);
1661 spin_lock_init(&context
.lock
);
1663 memset(urbs
, 0, sizeof urbs
);
1664 udev
= testdev_to_usbdev(dev
);
1665 dev_info(&dev
->intf
->dev
,
1666 "... iso period %d %sframes, wMaxPacket %04x\n",
1667 1 << (desc
->bInterval
- 1),
1668 (udev
->speed
== USB_SPEED_HIGH
) ? "micro" : "",
1669 usb_endpoint_maxp(desc
));
1671 for (i
= 0; i
< param
->sglen
; i
++) {
1672 urbs
[i
] = iso_alloc_urb(udev
, pipe
, desc
,
1673 param
->length
, offset
);
1678 packets
+= urbs
[i
]->number_of_packets
;
1679 urbs
[i
]->context
= &context
;
1681 packets
*= param
->iterations
;
1682 dev_info(&dev
->intf
->dev
,
1683 "... total %lu msec (%lu packets)\n",
1684 (packets
* (1 << (desc
->bInterval
- 1)))
1685 / ((udev
->speed
== USB_SPEED_HIGH
) ? 8 : 1),
1688 spin_lock_irq(&context
.lock
);
1689 for (i
= 0; i
< param
->sglen
; i
++) {
1691 status
= usb_submit_urb(urbs
[i
], GFP_ATOMIC
);
1693 ERROR(dev
, "submit iso[%d], error %d\n", i
, status
);
1695 spin_unlock_irq(&context
.lock
);
1699 simple_free_urb(urbs
[i
]);
1702 context
.submit_error
= 1;
1706 spin_unlock_irq(&context
.lock
);
1708 wait_for_completion(&context
.done
);
1710 for (i
= 0; i
< param
->sglen
; i
++) {
1712 simple_free_urb(urbs
[i
]);
1715 * Isochronous transfers are expected to fail sometimes. As an
1716 * arbitrary limit, we will report an error if any submissions
1717 * fail or if the transfer failure rate is > 10%.
1721 else if (context
.submit_error
)
1723 else if (context
.errors
> context
.packet_count
/ 10)
1728 for (i
= 0; i
< param
->sglen
; i
++) {
1730 simple_free_urb(urbs
[i
]);
1735 static int test_unaligned_bulk(
1736 struct usbtest_dev
*tdev
,
1740 unsigned transfer_flags
,
1744 struct urb
*urb
= usbtest_alloc_urb(
1745 testdev_to_usbdev(tdev
), pipe
, length
, transfer_flags
, 1);
1750 retval
= simple_io(tdev
, urb
, iterations
, 0, 0, label
);
1751 simple_free_urb(urb
);
1755 /*-------------------------------------------------------------------------*/
1757 /* We only have this one interface to user space, through usbfs.
1758 * User mode code can scan usbfs to find N different devices (maybe on
1759 * different busses) to use when testing, and allocate one thread per
1760 * test. So discovery is simplified, and we have no device naming issues.
1762 * Don't use these only as stress/load tests. Use them along with with
1763 * other USB bus activity: plugging, unplugging, mousing, mp3 playback,
1764 * video capture, and so on. Run different tests at different times, in
1765 * different sequences. Nothing here should interact with other devices,
1766 * except indirectly by consuming USB bandwidth and CPU resources for test
1767 * threads and request completion. But the only way to know that for sure
1768 * is to test when HC queues are in use by many devices.
1770 * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
1771 * it locks out usbcore in certain code paths. Notably, if you disconnect
1772 * the device-under-test, khubd will wait block forever waiting for the
1773 * ioctl to complete ... so that usb_disconnect() can abort the pending
1774 * urbs and then call usbtest_disconnect(). To abort a test, you're best
1775 * off just killing the userspace task and waiting for it to exit.
1779 usbtest_ioctl(struct usb_interface
*intf
, unsigned int code
, void *buf
)
1781 struct usbtest_dev
*dev
= usb_get_intfdata(intf
);
1782 struct usb_device
*udev
= testdev_to_usbdev(dev
);
1783 struct usbtest_param
*param
= buf
;
1784 int retval
= -EOPNOTSUPP
;
1786 struct scatterlist
*sg
;
1787 struct usb_sg_request req
;
1788 struct timeval start
;
1791 /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
1793 pattern
= mod_pattern
;
1795 if (code
!= USBTEST_REQUEST
)
1798 if (param
->iterations
<= 0)
1801 if (mutex_lock_interruptible(&dev
->lock
))
1802 return -ERESTARTSYS
;
1804 /* FIXME: What if a system sleep starts while a test is running? */
1806 /* some devices, like ez-usb default devices, need a non-default
1807 * altsetting to have any active endpoints. some tests change
1808 * altsettings; force a default so most tests don't need to check.
1810 if (dev
->info
->alt
>= 0) {
1813 if (intf
->altsetting
->desc
.bInterfaceNumber
) {
1814 mutex_unlock(&dev
->lock
);
1817 res
= set_altsetting(dev
, dev
->info
->alt
);
1820 "set altsetting to %d failed, %d\n",
1821 dev
->info
->alt
, res
);
1822 mutex_unlock(&dev
->lock
);
1828 * Just a bunch of test cases that every HCD is expected to handle.
1830 * Some may need specific firmware, though it'd be good to have
1831 * one firmware image to handle all the test cases.
1833 * FIXME add more tests! cancel requests, verify the data, control
1834 * queueing, concurrent read+write threads, and so on.
1836 do_gettimeofday(&start
);
1837 switch (param
->test_num
) {
1840 dev_info(&intf
->dev
, "TEST 0: NOP\n");
1844 /* Simple non-queued bulk I/O tests */
1846 if (dev
->out_pipe
== 0)
1848 dev_info(&intf
->dev
,
1849 "TEST 1: write %d bytes %u times\n",
1850 param
->length
, param
->iterations
);
1851 urb
= simple_alloc_urb(udev
, dev
->out_pipe
, param
->length
);
1856 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1857 retval
= simple_io(dev
, urb
, param
->iterations
, 0, 0, "test1");
1858 simple_free_urb(urb
);
1861 if (dev
->in_pipe
== 0)
1863 dev_info(&intf
->dev
,
1864 "TEST 2: read %d bytes %u times\n",
1865 param
->length
, param
->iterations
);
1866 urb
= simple_alloc_urb(udev
, dev
->in_pipe
, param
->length
);
1871 /* FIRMWARE: bulk source (maybe generates short writes) */
1872 retval
= simple_io(dev
, urb
, param
->iterations
, 0, 0, "test2");
1873 simple_free_urb(urb
);
1876 if (dev
->out_pipe
== 0 || param
->vary
== 0)
1878 dev_info(&intf
->dev
,
1879 "TEST 3: write/%d 0..%d bytes %u times\n",
1880 param
->vary
, param
->length
, param
->iterations
);
1881 urb
= simple_alloc_urb(udev
, dev
->out_pipe
, param
->length
);
1886 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1887 retval
= simple_io(dev
, urb
, param
->iterations
, param
->vary
,
1889 simple_free_urb(urb
);
1892 if (dev
->in_pipe
== 0 || param
->vary
== 0)
1894 dev_info(&intf
->dev
,
1895 "TEST 4: read/%d 0..%d bytes %u times\n",
1896 param
->vary
, param
->length
, param
->iterations
);
1897 urb
= simple_alloc_urb(udev
, dev
->in_pipe
, param
->length
);
1902 /* FIRMWARE: bulk source (maybe generates short writes) */
1903 retval
= simple_io(dev
, urb
, param
->iterations
, param
->vary
,
1905 simple_free_urb(urb
);
1908 /* Queued bulk I/O tests */
1910 if (dev
->out_pipe
== 0 || param
->sglen
== 0)
1912 dev_info(&intf
->dev
,
1913 "TEST 5: write %d sglists %d entries of %d bytes\n",
1915 param
->sglen
, param
->length
);
1916 sg
= alloc_sglist(param
->sglen
, param
->length
, 0);
1921 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1922 retval
= perform_sglist(dev
, param
->iterations
, dev
->out_pipe
,
1923 &req
, sg
, param
->sglen
);
1924 free_sglist(sg
, param
->sglen
);
1928 if (dev
->in_pipe
== 0 || param
->sglen
== 0)
1930 dev_info(&intf
->dev
,
1931 "TEST 6: read %d sglists %d entries of %d bytes\n",
1933 param
->sglen
, param
->length
);
1934 sg
= alloc_sglist(param
->sglen
, param
->length
, 0);
1939 /* FIRMWARE: bulk source (maybe generates short writes) */
1940 retval
= perform_sglist(dev
, param
->iterations
, dev
->in_pipe
,
1941 &req
, sg
, param
->sglen
);
1942 free_sglist(sg
, param
->sglen
);
1945 if (dev
->out_pipe
== 0 || param
->sglen
== 0 || param
->vary
== 0)
1947 dev_info(&intf
->dev
,
1948 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
1949 param
->vary
, param
->iterations
,
1950 param
->sglen
, param
->length
);
1951 sg
= alloc_sglist(param
->sglen
, param
->length
, param
->vary
);
1956 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1957 retval
= perform_sglist(dev
, param
->iterations
, dev
->out_pipe
,
1958 &req
, sg
, param
->sglen
);
1959 free_sglist(sg
, param
->sglen
);
1962 if (dev
->in_pipe
== 0 || param
->sglen
== 0 || param
->vary
== 0)
1964 dev_info(&intf
->dev
,
1965 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
1966 param
->vary
, param
->iterations
,
1967 param
->sglen
, param
->length
);
1968 sg
= alloc_sglist(param
->sglen
, param
->length
, param
->vary
);
1973 /* FIRMWARE: bulk source (maybe generates short writes) */
1974 retval
= perform_sglist(dev
, param
->iterations
, dev
->in_pipe
,
1975 &req
, sg
, param
->sglen
);
1976 free_sglist(sg
, param
->sglen
);
1979 /* non-queued sanity tests for control (chapter 9 subset) */
1982 dev_info(&intf
->dev
,
1983 "TEST 9: ch9 (subset) control tests, %d times\n",
1985 for (i
= param
->iterations
; retval
== 0 && i
--; /* NOP */)
1986 retval
= ch9_postconfig(dev
);
1988 dev_err(&intf
->dev
, "ch9 subset failed, "
1989 "iterations left %d\n", i
);
1992 /* queued control messaging */
1995 dev_info(&intf
->dev
,
1996 "TEST 10: queue %d control calls, %d times\n",
1999 retval
= test_ctrl_queue(dev
, param
);
2002 /* simple non-queued unlinks (ring with one urb) */
2004 if (dev
->in_pipe
== 0 || !param
->length
)
2007 dev_info(&intf
->dev
, "TEST 11: unlink %d reads of %d\n",
2008 param
->iterations
, param
->length
);
2009 for (i
= param
->iterations
; retval
== 0 && i
--; /* NOP */)
2010 retval
= unlink_simple(dev
, dev
->in_pipe
,
2013 dev_err(&intf
->dev
, "unlink reads failed %d, "
2014 "iterations left %d\n", retval
, i
);
2017 if (dev
->out_pipe
== 0 || !param
->length
)
2020 dev_info(&intf
->dev
, "TEST 12: unlink %d writes of %d\n",
2021 param
->iterations
, param
->length
);
2022 for (i
= param
->iterations
; retval
== 0 && i
--; /* NOP */)
2023 retval
= unlink_simple(dev
, dev
->out_pipe
,
2026 dev_err(&intf
->dev
, "unlink writes failed %d, "
2027 "iterations left %d\n", retval
, i
);
2032 if (dev
->out_pipe
== 0 && dev
->in_pipe
== 0)
2035 dev_info(&intf
->dev
, "TEST 13: set/clear %d halts\n",
2037 for (i
= param
->iterations
; retval
== 0 && i
--; /* NOP */)
2038 retval
= halt_simple(dev
);
2041 ERROR(dev
, "halts failed, iterations left %d\n", i
);
2044 /* control write tests */
2046 if (!dev
->info
->ctrl_out
)
2048 dev_info(&intf
->dev
, "TEST 14: %d ep0out, %d..%d vary %d\n",
2050 realworld
? 1 : 0, param
->length
,
2052 retval
= ctrl_out(dev
, param
->iterations
,
2053 param
->length
, param
->vary
, 0);
2056 /* iso write tests */
2058 if (dev
->out_iso_pipe
== 0 || param
->sglen
== 0)
2060 dev_info(&intf
->dev
,
2061 "TEST 15: write %d iso, %d entries of %d bytes\n",
2063 param
->sglen
, param
->length
);
2064 /* FIRMWARE: iso sink */
2065 retval
= test_iso_queue(dev
, param
,
2066 dev
->out_iso_pipe
, dev
->iso_out
, 0);
2069 /* iso read tests */
2071 if (dev
->in_iso_pipe
== 0 || param
->sglen
== 0)
2073 dev_info(&intf
->dev
,
2074 "TEST 16: read %d iso, %d entries of %d bytes\n",
2076 param
->sglen
, param
->length
);
2077 /* FIRMWARE: iso source */
2078 retval
= test_iso_queue(dev
, param
,
2079 dev
->in_iso_pipe
, dev
->iso_in
, 0);
2082 /* FIXME scatterlist cancel (needs helper thread) */
2084 /* Tests for bulk I/O using DMA mapping by core and odd address */
2086 if (dev
->out_pipe
== 0)
2088 dev_info(&intf
->dev
,
2089 "TEST 17: write odd addr %d bytes %u times core map\n",
2090 param
->length
, param
->iterations
);
2092 retval
= test_unaligned_bulk(
2094 param
->length
, param
->iterations
,
2099 if (dev
->in_pipe
== 0)
2101 dev_info(&intf
->dev
,
2102 "TEST 18: read odd addr %d bytes %u times core map\n",
2103 param
->length
, param
->iterations
);
2105 retval
= test_unaligned_bulk(
2107 param
->length
, param
->iterations
,
2111 /* Tests for bulk I/O using premapped coherent buffer and odd address */
2113 if (dev
->out_pipe
== 0)
2115 dev_info(&intf
->dev
,
2116 "TEST 19: write odd addr %d bytes %u times premapped\n",
2117 param
->length
, param
->iterations
);
2119 retval
= test_unaligned_bulk(
2121 param
->length
, param
->iterations
,
2122 URB_NO_TRANSFER_DMA_MAP
, "test19");
2126 if (dev
->in_pipe
== 0)
2128 dev_info(&intf
->dev
,
2129 "TEST 20: read odd addr %d bytes %u times premapped\n",
2130 param
->length
, param
->iterations
);
2132 retval
= test_unaligned_bulk(
2134 param
->length
, param
->iterations
,
2135 URB_NO_TRANSFER_DMA_MAP
, "test20");
2138 /* control write tests with unaligned buffer */
2140 if (!dev
->info
->ctrl_out
)
2142 dev_info(&intf
->dev
,
2143 "TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
2145 realworld
? 1 : 0, param
->length
,
2147 retval
= ctrl_out(dev
, param
->iterations
,
2148 param
->length
, param
->vary
, 1);
2151 /* unaligned iso tests */
2153 if (dev
->out_iso_pipe
== 0 || param
->sglen
== 0)
2155 dev_info(&intf
->dev
,
2156 "TEST 22: write %d iso odd, %d entries of %d bytes\n",
2158 param
->sglen
, param
->length
);
2159 retval
= test_iso_queue(dev
, param
,
2160 dev
->out_iso_pipe
, dev
->iso_out
, 1);
2164 if (dev
->in_iso_pipe
== 0 || param
->sglen
== 0)
2166 dev_info(&intf
->dev
,
2167 "TEST 23: read %d iso odd, %d entries of %d bytes\n",
2169 param
->sglen
, param
->length
);
2170 retval
= test_iso_queue(dev
, param
,
2171 dev
->in_iso_pipe
, dev
->iso_in
, 1);
2174 /* unlink URBs from a bulk-OUT queue */
2176 if (dev
->out_pipe
== 0 || !param
->length
|| param
->sglen
< 4)
2179 dev_info(&intf
->dev
, "TEST 17: unlink from %d queues of "
2180 "%d %d-byte writes\n",
2181 param
->iterations
, param
->sglen
, param
->length
);
2182 for (i
= param
->iterations
; retval
== 0 && i
> 0; --i
) {
2183 retval
= unlink_queued(dev
, dev
->out_pipe
,
2184 param
->sglen
, param
->length
);
2187 "unlink queued writes failed %d, "
2188 "iterations left %d\n", retval
, i
);
2195 do_gettimeofday(¶m
->duration
);
2196 param
->duration
.tv_sec
-= start
.tv_sec
;
2197 param
->duration
.tv_usec
-= start
.tv_usec
;
2198 if (param
->duration
.tv_usec
< 0) {
2199 param
->duration
.tv_usec
+= 1000 * 1000;
2200 param
->duration
.tv_sec
-= 1;
2202 mutex_unlock(&dev
->lock
);
2206 /*-------------------------------------------------------------------------*/
2208 static unsigned force_interrupt
;
2209 module_param(force_interrupt
, uint
, 0);
2210 MODULE_PARM_DESC(force_interrupt
, "0 = test default; else interrupt");
2213 static unsigned short vendor
;
2214 module_param(vendor
, ushort
, 0);
2215 MODULE_PARM_DESC(vendor
, "vendor code (from usb-if)");
2217 static unsigned short product
;
2218 module_param(product
, ushort
, 0);
2219 MODULE_PARM_DESC(product
, "product code (from vendor)");
2223 usbtest_probe(struct usb_interface
*intf
, const struct usb_device_id
*id
)
2225 struct usb_device
*udev
;
2226 struct usbtest_dev
*dev
;
2227 struct usbtest_info
*info
;
2228 char *rtest
, *wtest
;
2229 char *irtest
, *iwtest
;
2231 udev
= interface_to_usbdev(intf
);
2234 /* specify devices by module parameters? */
2235 if (id
->match_flags
== 0) {
2236 /* vendor match required, product match optional */
2237 if (!vendor
|| le16_to_cpu(udev
->descriptor
.idVendor
) != (u16
)vendor
)
2239 if (product
&& le16_to_cpu(udev
->descriptor
.idProduct
) != (u16
)product
)
2241 dev_info(&intf
->dev
, "matched module params, "
2242 "vend=0x%04x prod=0x%04x\n",
2243 le16_to_cpu(udev
->descriptor
.idVendor
),
2244 le16_to_cpu(udev
->descriptor
.idProduct
));
2248 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
2251 info
= (struct usbtest_info
*) id
->driver_info
;
2253 mutex_init(&dev
->lock
);
2257 /* cacheline-aligned scratch for i/o */
2258 dev
->buf
= kmalloc(TBUF_SIZE
, GFP_KERNEL
);
2259 if (dev
->buf
== NULL
) {
2264 /* NOTE this doesn't yet test the handful of difference that are
2265 * visible with high speed interrupts: bigger maxpacket (1K) and
2266 * "high bandwidth" modes (up to 3 packets/uframe).
2269 irtest
= iwtest
= "";
2270 if (force_interrupt
|| udev
->speed
== USB_SPEED_LOW
) {
2272 dev
->in_pipe
= usb_rcvintpipe(udev
, info
->ep_in
);
2276 dev
->out_pipe
= usb_sndintpipe(udev
, info
->ep_out
);
2277 wtest
= " intr-out";
2280 if (info
->autoconf
) {
2283 status
= get_endpoints(dev
, intf
);
2285 WARNING(dev
, "couldn't get endpoints, %d\n",
2291 /* may find bulk or ISO pipes */
2294 dev
->in_pipe
= usb_rcvbulkpipe(udev
,
2297 dev
->out_pipe
= usb_sndbulkpipe(udev
,
2303 wtest
= " bulk-out";
2304 if (dev
->in_iso_pipe
)
2306 if (dev
->out_iso_pipe
)
2307 iwtest
= " iso-out";
2310 usb_set_intfdata(intf
, dev
);
2311 dev_info(&intf
->dev
, "%s\n", info
->name
);
2312 dev_info(&intf
->dev
, "%s {control%s%s%s%s%s} tests%s\n",
2313 usb_speed_string(udev
->speed
),
2314 info
->ctrl_out
? " in/out" : "",
2317 info
->alt
>= 0 ? " (+alt)" : "");
2321 static int usbtest_suspend(struct usb_interface
*intf
, pm_message_t message
)
2326 static int usbtest_resume(struct usb_interface
*intf
)
2332 static void usbtest_disconnect(struct usb_interface
*intf
)
2334 struct usbtest_dev
*dev
= usb_get_intfdata(intf
);
2336 usb_set_intfdata(intf
, NULL
);
2337 dev_dbg(&intf
->dev
, "disconnect\n");
2341 /* Basic testing only needs a device that can source or sink bulk traffic.
2342 * Any device can test control transfers (default with GENERIC binding).
2344 * Several entries work with the default EP0 implementation that's built
2345 * into EZ-USB chips. There's a default vendor ID which can be overridden
2346 * by (very) small config EEPROMS, but otherwise all these devices act
2347 * identically until firmware is loaded: only EP0 works. It turns out
2348 * to be easy to make other endpoints work, without modifying that EP0
2349 * behavior. For now, we expect that kind of firmware.
2352 /* an21xx or fx versions of ez-usb */
2353 static struct usbtest_info ez1_info
= {
2354 .name
= "EZ-USB device",
2360 /* fx2 version of ez-usb */
2361 static struct usbtest_info ez2_info
= {
2362 .name
= "FX2 device",
2368 /* ezusb family device with dedicated usb test firmware,
2370 static struct usbtest_info fw_info
= {
2371 .name
= "usb test device",
2375 .autoconf
= 1, /* iso and ctrl_out need autoconf */
2377 .iso
= 1, /* iso_ep's are #8 in/out */
2380 /* peripheral running Linux and 'zero.c' test firmware, or
2381 * its user-mode cousin. different versions of this use
2382 * different hardware with the same vendor/product codes.
2383 * host side MUST rely on the endpoint descriptors.
2385 static struct usbtest_info gz_info
= {
2386 .name
= "Linux gadget zero",
2392 static struct usbtest_info um_info
= {
2393 .name
= "Linux user mode test driver",
2398 static struct usbtest_info um2_info
= {
2399 .name
= "Linux user mode ISO test driver",
2406 /* this is a nice source of high speed bulk data;
2407 * uses an FX2, with firmware provided in the device
2409 static struct usbtest_info ibot2_info
= {
2410 .name
= "iBOT2 webcam",
2417 /* we can use any device to test control traffic */
2418 static struct usbtest_info generic_info
= {
2419 .name
= "Generic USB device",
2425 static const struct usb_device_id id_table
[] = {
2427 /*-------------------------------------------------------------*/
2429 /* EZ-USB devices which download firmware to replace (or in our
2430 * case augment) the default device implementation.
2433 /* generic EZ-USB FX controller */
2434 { USB_DEVICE(0x0547, 0x2235),
2435 .driver_info
= (unsigned long) &ez1_info
,
2438 /* CY3671 development board with EZ-USB FX */
2439 { USB_DEVICE(0x0547, 0x0080),
2440 .driver_info
= (unsigned long) &ez1_info
,
2443 /* generic EZ-USB FX2 controller (or development board) */
2444 { USB_DEVICE(0x04b4, 0x8613),
2445 .driver_info
= (unsigned long) &ez2_info
,
2448 /* re-enumerated usb test device firmware */
2449 { USB_DEVICE(0xfff0, 0xfff0),
2450 .driver_info
= (unsigned long) &fw_info
,
2453 /* "Gadget Zero" firmware runs under Linux */
2454 { USB_DEVICE(0x0525, 0xa4a0),
2455 .driver_info
= (unsigned long) &gz_info
,
2458 /* so does a user-mode variant */
2459 { USB_DEVICE(0x0525, 0xa4a4),
2460 .driver_info
= (unsigned long) &um_info
,
2463 /* ... and a user-mode variant that talks iso */
2464 { USB_DEVICE(0x0525, 0xa4a3),
2465 .driver_info
= (unsigned long) &um2_info
,
2469 /* Keyspan 19qi uses an21xx (original EZ-USB) */
2470 /* this does not coexist with the real Keyspan 19qi driver! */
2471 { USB_DEVICE(0x06cd, 0x010b),
2472 .driver_info
= (unsigned long) &ez1_info
,
2476 /*-------------------------------------------------------------*/
2479 /* iBOT2 makes a nice source of high speed bulk-in data */
2480 /* this does not coexist with a real iBOT2 driver! */
2481 { USB_DEVICE(0x0b62, 0x0059),
2482 .driver_info
= (unsigned long) &ibot2_info
,
2486 /*-------------------------------------------------------------*/
2489 /* module params can specify devices to use for control tests */
2490 { .driver_info
= (unsigned long) &generic_info
, },
2493 /*-------------------------------------------------------------*/
2497 MODULE_DEVICE_TABLE(usb
, id_table
);
2499 static struct usb_driver usbtest_driver
= {
2501 .id_table
= id_table
,
2502 .probe
= usbtest_probe
,
2503 .unlocked_ioctl
= usbtest_ioctl
,
2504 .disconnect
= usbtest_disconnect
,
2505 .suspend
= usbtest_suspend
,
2506 .resume
= usbtest_resume
,
2509 /*-------------------------------------------------------------------------*/
2511 static int __init
usbtest_init(void)
2515 pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor
, product
);
2517 return usb_register(&usbtest_driver
);
2519 module_init(usbtest_init
);
2521 static void __exit
usbtest_exit(void)
2523 usb_deregister(&usbtest_driver
);
2525 module_exit(usbtest_exit
);
2527 MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
2528 MODULE_LICENSE("GPL");