2 * Host Controller Driver for the Elan Digital Systems U132 adapter
4 * Copyright(C) 2006 Elan Digital Systems Limited
5 * http://www.elandigitalsystems.com
7 * Author and Maintainer - Tony Olech - Elan Digital Systems
8 * tony.olech@elandigitalsystems.com
10 * This program is free software;you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2.
15 * This driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
16 * based on various USB host drivers in the 2.6.15 linux kernel
17 * with constant reference to the 3rd Edition of Linux Device Drivers
18 * published by O'Reilly
20 * The U132 adapter is a USB to CardBus adapter specifically designed
21 * for PC cards that contain an OHCI host controller. Typical PC cards
22 * are the Orange Mobile 3G Option GlobeTrotter Fusion card.
24 * The U132 adapter will *NOT *work with PC cards that do not contain
25 * an OHCI controller. A simple way to test whether a PC card has an
26 * OHCI controller as an interface is to insert the PC card directly
27 * into a laptop(or desktop) with a CardBus slot and if "lspci" shows
28 * a new USB controller and "lsusb -v" shows a new OHCI Host Controller
29 * then there is a good chance that the U132 adapter will support the
30 * PC card.(you also need the specific client driver for the PC card)
32 * Please inform the Author and Maintainer about any PC cards that
33 * contain OHCI Host Controller and work when directly connected to
34 * an embedded CardBus slot but do not work when they are connected
35 * via an ELAN U132 adapter.
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/moduleparam.h>
41 #include <linux/delay.h>
42 #include <linux/ioport.h>
43 #include <linux/pci_ids.h>
44 #include <linux/sched.h>
45 #include <linux/slab.h>
46 #include <linux/errno.h>
47 #include <linux/init.h>
48 #include <linux/timer.h>
49 #include <linux/list.h>
50 #include <linux/interrupt.h>
51 #include <linux/usb.h>
52 #include <linux/usb/hcd.h>
53 #include <linux/workqueue.h>
54 #include <linux/platform_device.h>
55 #include <linux/mutex.h>
58 #include <asm/system.h>
59 #include <asm/byteorder.h>
61 /* FIXME ohci.h is ONLY for internal use by the OHCI driver.
62 * If you're going to try stuff like this, you need to split
63 * out shareable stuff (register declarations?) into its own
64 * file, maybe name <linux/usb/ohci.h>
68 #define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
69 #define OHCI_INTR_INIT (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | \
71 MODULE_AUTHOR("Tony Olech - Elan Digital Systems Limited");
72 MODULE_DESCRIPTION("U132 USB Host Controller Driver");
73 MODULE_LICENSE("GPL");
74 #define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
75 INT_MODULE_PARM(testing
, 0);
76 /* Some boards misreport power switching/overcurrent*/
77 static bool distrust_firmware
= 1;
78 module_param(distrust_firmware
, bool, 0);
79 MODULE_PARM_DESC(distrust_firmware
, "true to distrust firmware power/overcurren"
81 static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait
);
83 * u132_module_lock exists to protect access to global variables
86 static struct mutex u132_module_lock
;
87 static int u132_exiting
;
88 static int u132_instances
;
89 static struct list_head u132_static_list
;
91 * end of the global variables protected by u132_module_lock
93 static struct workqueue_struct
*workqueue
;
94 #define MAX_U132_PORTS 7
95 #define MAX_U132_ADDRS 128
96 #define MAX_U132_UDEVS 4
97 #define MAX_U132_ENDPS 100
98 #define MAX_U132_RINGS 4
99 static const char *cc_to_text
[16] = {
129 struct usb_device
*usb_device
;
134 u8 endp_number_in
[16];
135 u8 endp_number_out
[16];
137 #define ENDP_QUEUE_SHIFT 3
138 #define ENDP_QUEUE_SIZE (1<<ENDP_QUEUE_SHIFT)
139 #define ENDP_QUEUE_MASK (ENDP_QUEUE_SIZE-1)
141 struct list_head urb_more
;
154 struct list_head endp_ring
;
155 struct u132_ring
*ring
;
156 unsigned toggle_bits
:2;
162 unsigned dequeueing
:1;
163 unsigned edset_flush
:1;
164 unsigned spare_bits
:14;
165 unsigned long jiffies
;
166 struct usb_host_endpoint
*hep
;
167 struct u132_spin queue_lock
;
171 struct urb
*urb_list
[ENDP_QUEUE_SIZE
];
172 struct list_head urb_more
;
173 struct delayed_work scheduler
;
180 struct u132_endp
*curr_endp
;
181 struct delayed_work scheduler
;
185 struct list_head u132_list
;
186 struct mutex sw_lock
;
187 struct mutex scheduler_lock
;
188 struct u132_platform_data
*board
;
189 struct platform_device
*platform_dev
;
190 struct u132_ring ring
[MAX_U132_RINGS
];
198 u32 hc_roothub_status
;
200 u32 hc_roothub_portstatus
[MAX_ROOT_PORTS
];
202 unsigned long next_statechange
;
203 struct delayed_work monitor
;
205 struct u132_addr addr
[MAX_U132_ADDRS
];
206 struct u132_udev udev
[MAX_U132_UDEVS
];
207 struct u132_port port
[MAX_U132_PORTS
];
208 struct u132_endp
*endp
[MAX_U132_ENDPS
];
212 * these cannot be inlines because we need the structure offset!!
213 * Does anyone have a better way?????
215 #define ftdi_read_pcimem(pdev, member, data) usb_ftdi_elan_read_pcimem(pdev, \
216 offsetof(struct ohci_regs, member), 0, data);
217 #define ftdi_write_pcimem(pdev, member, data) usb_ftdi_elan_write_pcimem(pdev, \
218 offsetof(struct ohci_regs, member), 0, data);
219 #define u132_read_pcimem(u132, member, data) \
220 usb_ftdi_elan_read_pcimem(u132->platform_dev, offsetof(struct \
221 ohci_regs, member), 0, data);
222 #define u132_write_pcimem(u132, member, data) \
223 usb_ftdi_elan_write_pcimem(u132->platform_dev, offsetof(struct \
224 ohci_regs, member), 0, data);
225 static inline struct u132
*udev_to_u132(struct u132_udev
*udev
)
227 u8 udev_number
= udev
->udev_number
;
228 return container_of(udev
, struct u132
, udev
[udev_number
]);
231 static inline struct u132
*hcd_to_u132(struct usb_hcd
*hcd
)
233 return (struct u132
*)(hcd
->hcd_priv
);
236 static inline struct usb_hcd
*u132_to_hcd(struct u132
*u132
)
238 return container_of((void *)u132
, struct usb_hcd
, hcd_priv
);
241 static inline void u132_disable(struct u132
*u132
)
243 u132_to_hcd(u132
)->state
= HC_STATE_HALT
;
247 #define kref_to_u132(d) container_of(d, struct u132, kref)
248 #define kref_to_u132_endp(d) container_of(d, struct u132_endp, kref)
249 #define kref_to_u132_udev(d) container_of(d, struct u132_udev, kref)
250 #include "../misc/usb_u132.h"
251 static const char hcd_name
[] = "u132_hcd";
252 #define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE | \
253 USB_PORT_STAT_C_SUSPEND | USB_PORT_STAT_C_OVERCURRENT | \
254 USB_PORT_STAT_C_RESET) << 16)
255 static void u132_hcd_delete(struct kref
*kref
)
257 struct u132
*u132
= kref_to_u132(kref
);
258 struct platform_device
*pdev
= u132
->platform_dev
;
259 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
261 mutex_lock(&u132_module_lock
);
262 list_del_init(&u132
->u132_list
);
264 mutex_unlock(&u132_module_lock
);
265 dev_warn(&u132
->platform_dev
->dev
, "FREEING the hcd=%p and thus the u13"
266 "2=%p going=%d pdev=%p\n", hcd
, u132
, u132
->going
, pdev
);
270 static inline void u132_u132_put_kref(struct u132
*u132
)
272 kref_put(&u132
->kref
, u132_hcd_delete
);
275 static inline void u132_u132_init_kref(struct u132
*u132
)
277 kref_init(&u132
->kref
);
280 static void u132_udev_delete(struct kref
*kref
)
282 struct u132_udev
*udev
= kref_to_u132_udev(kref
);
283 udev
->udev_number
= 0;
284 udev
->usb_device
= NULL
;
286 udev
->enumeration
= 0;
289 static inline void u132_udev_put_kref(struct u132
*u132
, struct u132_udev
*udev
)
291 kref_put(&udev
->kref
, u132_udev_delete
);
294 static inline void u132_udev_get_kref(struct u132
*u132
, struct u132_udev
*udev
)
296 kref_get(&udev
->kref
);
299 static inline void u132_udev_init_kref(struct u132
*u132
,
300 struct u132_udev
*udev
)
302 kref_init(&udev
->kref
);
305 static inline void u132_ring_put_kref(struct u132
*u132
, struct u132_ring
*ring
)
307 kref_put(&u132
->kref
, u132_hcd_delete
);
310 static void u132_ring_requeue_work(struct u132
*u132
, struct u132_ring
*ring
,
314 if (queue_delayed_work(workqueue
, &ring
->scheduler
, delta
))
316 } else if (queue_delayed_work(workqueue
, &ring
->scheduler
, 0))
318 kref_put(&u132
->kref
, u132_hcd_delete
);
321 static void u132_ring_queue_work(struct u132
*u132
, struct u132_ring
*ring
,
324 kref_get(&u132
->kref
);
325 u132_ring_requeue_work(u132
, ring
, delta
);
328 static void u132_ring_cancel_work(struct u132
*u132
, struct u132_ring
*ring
)
330 if (cancel_delayed_work(&ring
->scheduler
))
331 kref_put(&u132
->kref
, u132_hcd_delete
);
334 static void u132_endp_delete(struct kref
*kref
)
336 struct u132_endp
*endp
= kref_to_u132_endp(kref
);
337 struct u132
*u132
= endp
->u132
;
338 u8 usb_addr
= endp
->usb_addr
;
339 u8 usb_endp
= endp
->usb_endp
;
340 u8 address
= u132
->addr
[usb_addr
].address
;
341 struct u132_udev
*udev
= &u132
->udev
[address
];
342 u8 endp_number
= endp
->endp_number
;
343 struct usb_host_endpoint
*hep
= endp
->hep
;
344 struct u132_ring
*ring
= endp
->ring
;
345 struct list_head
*head
= &endp
->endp_ring
;
347 if (endp
== ring
->curr_endp
) {
348 if (list_empty(head
)) {
349 ring
->curr_endp
= NULL
;
352 struct u132_endp
*next_endp
= list_entry(head
->next
,
353 struct u132_endp
, endp_ring
);
354 ring
->curr_endp
= next_endp
;
360 udev
->endp_number_in
[usb_endp
] = 0;
361 u132_udev_put_kref(u132
, udev
);
364 udev
->endp_number_out
[usb_endp
] = 0;
365 u132_udev_put_kref(u132
, udev
);
367 u132
->endp
[endp_number
- 1] = NULL
;
370 u132_u132_put_kref(u132
);
373 static inline void u132_endp_put_kref(struct u132
*u132
, struct u132_endp
*endp
)
375 kref_put(&endp
->kref
, u132_endp_delete
);
378 static inline void u132_endp_get_kref(struct u132
*u132
, struct u132_endp
*endp
)
380 kref_get(&endp
->kref
);
383 static inline void u132_endp_init_kref(struct u132
*u132
,
384 struct u132_endp
*endp
)
386 kref_init(&endp
->kref
);
387 kref_get(&u132
->kref
);
390 static void u132_endp_queue_work(struct u132
*u132
, struct u132_endp
*endp
,
393 if (queue_delayed_work(workqueue
, &endp
->scheduler
, delta
))
394 kref_get(&endp
->kref
);
397 static void u132_endp_cancel_work(struct u132
*u132
, struct u132_endp
*endp
)
399 if (cancel_delayed_work(&endp
->scheduler
))
400 kref_put(&endp
->kref
, u132_endp_delete
);
403 static inline void u132_monitor_put_kref(struct u132
*u132
)
405 kref_put(&u132
->kref
, u132_hcd_delete
);
408 static void u132_monitor_queue_work(struct u132
*u132
, unsigned int delta
)
410 if (queue_delayed_work(workqueue
, &u132
->monitor
, delta
))
411 kref_get(&u132
->kref
);
414 static void u132_monitor_requeue_work(struct u132
*u132
, unsigned int delta
)
416 if (!queue_delayed_work(workqueue
, &u132
->monitor
, delta
))
417 kref_put(&u132
->kref
, u132_hcd_delete
);
420 static void u132_monitor_cancel_work(struct u132
*u132
)
422 if (cancel_delayed_work(&u132
->monitor
))
423 kref_put(&u132
->kref
, u132_hcd_delete
);
426 static int read_roothub_info(struct u132
*u132
)
430 retval
= u132_read_pcimem(u132
, revision
, &revision
);
432 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device co"
435 } else if ((revision
& 0xFF) == 0x10) {
436 } else if ((revision
& 0xFF) == 0x11) {
438 dev_err(&u132
->platform_dev
->dev
, "device revision is not valid"
439 " %08X\n", revision
);
442 retval
= u132_read_pcimem(u132
, control
, &u132
->hc_control
);
444 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device co"
448 retval
= u132_read_pcimem(u132
, roothub
.status
,
449 &u132
->hc_roothub_status
);
451 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device re"
452 "g roothub.status\n", retval
);
455 retval
= u132_read_pcimem(u132
, roothub
.a
, &u132
->hc_roothub_a
);
457 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device re"
458 "g roothub.a\n", retval
);
462 int I
= u132
->num_ports
;
465 retval
= u132_read_pcimem(u132
, roothub
.portstatus
[i
],
466 &u132
->hc_roothub_portstatus
[i
]);
468 dev_err(&u132
->platform_dev
->dev
, "error %d acc"
469 "essing device roothub.portstatus[%d]\n"
479 static void u132_hcd_monitor_work(struct work_struct
*work
)
481 struct u132
*u132
= container_of(work
, struct u132
, monitor
.work
);
482 if (u132
->going
> 1) {
483 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
485 u132_monitor_put_kref(u132
);
487 } else if (u132
->going
> 0) {
488 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
489 u132_monitor_put_kref(u132
);
493 mutex_lock(&u132
->sw_lock
);
494 retval
= read_roothub_info(u132
);
496 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
499 mutex_unlock(&u132
->sw_lock
);
501 ftdi_elan_gone_away(u132
->platform_dev
);
502 u132_monitor_put_kref(u132
);
505 u132_monitor_requeue_work(u132
, 500);
506 mutex_unlock(&u132
->sw_lock
);
512 static void u132_hcd_giveback_urb(struct u132
*u132
, struct u132_endp
*endp
,
513 struct urb
*urb
, int status
)
515 struct u132_ring
*ring
;
517 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
518 urb
->error_count
= 0;
519 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
520 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
521 endp
->queue_next
+= 1;
522 if (ENDP_QUEUE_SIZE
> --endp
->queue_size
) {
524 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
526 struct list_head
*next
= endp
->urb_more
.next
;
527 struct u132_urbq
*urbq
= list_entry(next
, struct u132_urbq
,
530 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
533 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
536 mutex_lock(&u132
->scheduler_lock
);
539 u132_ring_cancel_work(u132
, ring
);
540 u132_ring_queue_work(u132
, ring
, 0);
541 mutex_unlock(&u132
->scheduler_lock
);
542 u132_endp_put_kref(u132
, endp
);
543 usb_hcd_giveback_urb(hcd
, urb
, status
);
546 static void u132_hcd_forget_urb(struct u132
*u132
, struct u132_endp
*endp
,
547 struct urb
*urb
, int status
)
549 u132_endp_put_kref(u132
, endp
);
552 static void u132_hcd_abandon_urb(struct u132
*u132
, struct u132_endp
*endp
,
553 struct urb
*urb
, int status
)
556 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
557 urb
->error_count
= 0;
558 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
559 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
560 endp
->queue_next
+= 1;
561 if (ENDP_QUEUE_SIZE
> --endp
->queue_size
) {
563 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
565 struct list_head
*next
= endp
->urb_more
.next
;
566 struct u132_urbq
*urbq
= list_entry(next
, struct u132_urbq
,
569 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
572 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
575 usb_hcd_giveback_urb(hcd
, urb
, status
);
578 static inline int edset_input(struct u132
*u132
, struct u132_ring
*ring
,
579 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
580 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
581 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
582 int halted
, int skipped
, int actual
, int non_null
))
584 return usb_ftdi_elan_edset_input(u132
->platform_dev
, ring
->number
, endp
,
585 urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
588 static inline int edset_setup(struct u132
*u132
, struct u132_ring
*ring
,
589 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
590 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
591 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
592 int halted
, int skipped
, int actual
, int non_null
))
594 return usb_ftdi_elan_edset_setup(u132
->platform_dev
, ring
->number
, endp
,
595 urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
598 static inline int edset_single(struct u132
*u132
, struct u132_ring
*ring
,
599 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
600 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
601 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
602 int halted
, int skipped
, int actual
, int non_null
))
604 return usb_ftdi_elan_edset_single(u132
->platform_dev
, ring
->number
,
605 endp
, urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
608 static inline int edset_output(struct u132
*u132
, struct u132_ring
*ring
,
609 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
610 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
611 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
612 int halted
, int skipped
, int actual
, int non_null
))
614 return usb_ftdi_elan_edset_output(u132
->platform_dev
, ring
->number
,
615 endp
, urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
620 * must not LOCK sw_lock
623 static void u132_hcd_interrupt_recv(void *data
, struct urb
*urb
, u8
*buf
,
624 int len
, int toggle_bits
, int error_count
, int condition_code
,
625 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
627 struct u132_endp
*endp
= data
;
628 struct u132
*u132
= endp
->u132
;
629 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
630 struct u132_udev
*udev
= &u132
->udev
[address
];
631 mutex_lock(&u132
->scheduler_lock
);
632 if (u132
->going
> 1) {
633 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
635 mutex_unlock(&u132
->scheduler_lock
);
636 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
638 } else if (endp
->dequeueing
) {
639 endp
->dequeueing
= 0;
640 mutex_unlock(&u132
->scheduler_lock
);
641 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
643 } else if (u132
->going
> 0) {
644 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
646 mutex_unlock(&u132
->scheduler_lock
);
647 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
649 } else if (!urb
->unlinked
) {
650 struct u132_ring
*ring
= endp
->ring
;
651 u8
*u
= urb
->transfer_buffer
+ urb
->actual_length
;
658 urb
->actual_length
+= len
;
659 if ((condition_code
== TD_CC_NOERROR
) &&
660 (urb
->transfer_buffer_length
> urb
->actual_length
)) {
661 endp
->toggle_bits
= toggle_bits
;
662 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
664 if (urb
->actual_length
> 0) {
666 mutex_unlock(&u132
->scheduler_lock
);
667 retval
= edset_single(u132
, ring
, endp
, urb
,
668 address
, endp
->toggle_bits
,
669 u132_hcd_interrupt_recv
);
671 u132_hcd_giveback_urb(u132
, endp
, urb
,
676 endp
->jiffies
= jiffies
+
677 msecs_to_jiffies(urb
->interval
);
678 u132_ring_cancel_work(u132
, ring
);
679 u132_ring_queue_work(u132
, ring
, 0);
680 mutex_unlock(&u132
->scheduler_lock
);
681 u132_endp_put_kref(u132
, endp
);
684 } else if ((condition_code
== TD_DATAUNDERRUN
) &&
685 ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0)) {
686 endp
->toggle_bits
= toggle_bits
;
687 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
689 mutex_unlock(&u132
->scheduler_lock
);
690 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
693 if (condition_code
== TD_CC_NOERROR
) {
694 endp
->toggle_bits
= toggle_bits
;
695 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
697 } else if (condition_code
== TD_CC_STALL
) {
698 endp
->toggle_bits
= 0x2;
699 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
702 endp
->toggle_bits
= 0x2;
703 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
705 dev_err(&u132
->platform_dev
->dev
, "urb=%p givin"
706 "g back INTERRUPT %s\n", urb
,
707 cc_to_text
[condition_code
]);
709 mutex_unlock(&u132
->scheduler_lock
);
710 u132_hcd_giveback_urb(u132
, endp
, urb
,
711 cc_to_error
[condition_code
]);
715 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
716 "unlinked=%d\n", urb
, urb
->unlinked
);
717 mutex_unlock(&u132
->scheduler_lock
);
718 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
723 static void u132_hcd_bulk_output_sent(void *data
, struct urb
*urb
, u8
*buf
,
724 int len
, int toggle_bits
, int error_count
, int condition_code
,
725 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
727 struct u132_endp
*endp
= data
;
728 struct u132
*u132
= endp
->u132
;
729 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
730 mutex_lock(&u132
->scheduler_lock
);
731 if (u132
->going
> 1) {
732 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
734 mutex_unlock(&u132
->scheduler_lock
);
735 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
737 } else if (endp
->dequeueing
) {
738 endp
->dequeueing
= 0;
739 mutex_unlock(&u132
->scheduler_lock
);
740 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
742 } else if (u132
->going
> 0) {
743 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
745 mutex_unlock(&u132
->scheduler_lock
);
746 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
748 } else if (!urb
->unlinked
) {
749 struct u132_ring
*ring
= endp
->ring
;
750 urb
->actual_length
+= len
;
751 endp
->toggle_bits
= toggle_bits
;
752 if (urb
->transfer_buffer_length
> urb
->actual_length
) {
754 mutex_unlock(&u132
->scheduler_lock
);
755 retval
= edset_output(u132
, ring
, endp
, urb
, address
,
756 endp
->toggle_bits
, u132_hcd_bulk_output_sent
);
758 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
761 mutex_unlock(&u132
->scheduler_lock
);
762 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
766 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
767 "unlinked=%d\n", urb
, urb
->unlinked
);
768 mutex_unlock(&u132
->scheduler_lock
);
769 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
774 static void u132_hcd_bulk_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
775 int len
, int toggle_bits
, int error_count
, int condition_code
,
776 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
778 struct u132_endp
*endp
= data
;
779 struct u132
*u132
= endp
->u132
;
780 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
781 struct u132_udev
*udev
= &u132
->udev
[address
];
782 mutex_lock(&u132
->scheduler_lock
);
783 if (u132
->going
> 1) {
784 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
786 mutex_unlock(&u132
->scheduler_lock
);
787 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
789 } else if (endp
->dequeueing
) {
790 endp
->dequeueing
= 0;
791 mutex_unlock(&u132
->scheduler_lock
);
792 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
794 } else if (u132
->going
> 0) {
795 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
797 mutex_unlock(&u132
->scheduler_lock
);
798 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
800 } else if (!urb
->unlinked
) {
801 struct u132_ring
*ring
= endp
->ring
;
802 u8
*u
= urb
->transfer_buffer
+ urb
->actual_length
;
809 urb
->actual_length
+= len
;
810 if ((condition_code
== TD_CC_NOERROR
) &&
811 (urb
->transfer_buffer_length
> urb
->actual_length
)) {
813 endp
->toggle_bits
= toggle_bits
;
814 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
816 mutex_unlock(&u132
->scheduler_lock
);
817 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
818 ring
->number
, endp
, urb
, address
,
819 endp
->usb_endp
, endp
->toggle_bits
,
820 u132_hcd_bulk_input_recv
);
822 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
824 } else if (condition_code
== TD_CC_NOERROR
) {
825 endp
->toggle_bits
= toggle_bits
;
826 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
828 mutex_unlock(&u132
->scheduler_lock
);
829 u132_hcd_giveback_urb(u132
, endp
, urb
,
830 cc_to_error
[condition_code
]);
832 } else if ((condition_code
== TD_DATAUNDERRUN
) &&
833 ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0)) {
834 endp
->toggle_bits
= toggle_bits
;
835 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
837 mutex_unlock(&u132
->scheduler_lock
);
838 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
840 } else if (condition_code
== TD_DATAUNDERRUN
) {
841 endp
->toggle_bits
= toggle_bits
;
842 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
844 dev_warn(&u132
->platform_dev
->dev
, "urb=%p(SHORT NOT OK"
845 ") giving back BULK IN %s\n", urb
,
846 cc_to_text
[condition_code
]);
847 mutex_unlock(&u132
->scheduler_lock
);
848 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
850 } else if (condition_code
== TD_CC_STALL
) {
851 endp
->toggle_bits
= 0x2;
852 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0, 0);
853 mutex_unlock(&u132
->scheduler_lock
);
854 u132_hcd_giveback_urb(u132
, endp
, urb
,
855 cc_to_error
[condition_code
]);
858 endp
->toggle_bits
= 0x2;
859 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0, 0);
860 dev_err(&u132
->platform_dev
->dev
, "urb=%p giving back B"
861 "ULK IN code=%d %s\n", urb
, condition_code
,
862 cc_to_text
[condition_code
]);
863 mutex_unlock(&u132
->scheduler_lock
);
864 u132_hcd_giveback_urb(u132
, endp
, urb
,
865 cc_to_error
[condition_code
]);
869 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
870 "unlinked=%d\n", urb
, urb
->unlinked
);
871 mutex_unlock(&u132
->scheduler_lock
);
872 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
877 static void u132_hcd_configure_empty_sent(void *data
, struct urb
*urb
, u8
*buf
,
878 int len
, int toggle_bits
, int error_count
, int condition_code
,
879 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
881 struct u132_endp
*endp
= data
;
882 struct u132
*u132
= endp
->u132
;
883 mutex_lock(&u132
->scheduler_lock
);
884 if (u132
->going
> 1) {
885 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
887 mutex_unlock(&u132
->scheduler_lock
);
888 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
890 } else if (endp
->dequeueing
) {
891 endp
->dequeueing
= 0;
892 mutex_unlock(&u132
->scheduler_lock
);
893 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
895 } else if (u132
->going
> 0) {
896 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
898 mutex_unlock(&u132
->scheduler_lock
);
899 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
901 } else if (!urb
->unlinked
) {
902 mutex_unlock(&u132
->scheduler_lock
);
903 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
906 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
907 "unlinked=%d\n", urb
, urb
->unlinked
);
908 mutex_unlock(&u132
->scheduler_lock
);
909 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
914 static void u132_hcd_configure_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
915 int len
, int toggle_bits
, int error_count
, int condition_code
,
916 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
918 struct u132_endp
*endp
= data
;
919 struct u132
*u132
= endp
->u132
;
920 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
921 mutex_lock(&u132
->scheduler_lock
);
922 if (u132
->going
> 1) {
923 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
925 mutex_unlock(&u132
->scheduler_lock
);
926 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
928 } else if (endp
->dequeueing
) {
929 endp
->dequeueing
= 0;
930 mutex_unlock(&u132
->scheduler_lock
);
931 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
933 } else if (u132
->going
> 0) {
934 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
936 mutex_unlock(&u132
->scheduler_lock
);
937 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
939 } else if (!urb
->unlinked
) {
940 struct u132_ring
*ring
= endp
->ring
;
941 u8
*u
= urb
->transfer_buffer
;
948 urb
->actual_length
= len
;
949 if ((condition_code
== TD_CC_NOERROR
) || ((condition_code
==
950 TD_DATAUNDERRUN
) && ((urb
->transfer_flags
&
951 URB_SHORT_NOT_OK
) == 0))) {
953 mutex_unlock(&u132
->scheduler_lock
);
954 retval
= usb_ftdi_elan_edset_empty(u132
->platform_dev
,
955 ring
->number
, endp
, urb
, address
,
957 u132_hcd_configure_empty_sent
);
959 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
961 } else if (condition_code
== TD_CC_STALL
) {
962 mutex_unlock(&u132
->scheduler_lock
);
963 dev_warn(&u132
->platform_dev
->dev
, "giving back SETUP I"
964 "NPUT STALL urb %p\n", urb
);
965 u132_hcd_giveback_urb(u132
, endp
, urb
,
966 cc_to_error
[condition_code
]);
969 mutex_unlock(&u132
->scheduler_lock
);
970 dev_err(&u132
->platform_dev
->dev
, "giving back SETUP IN"
971 "PUT %s urb %p\n", cc_to_text
[condition_code
],
973 u132_hcd_giveback_urb(u132
, endp
, urb
,
974 cc_to_error
[condition_code
]);
978 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
979 "unlinked=%d\n", urb
, urb
->unlinked
);
980 mutex_unlock(&u132
->scheduler_lock
);
981 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
986 static void u132_hcd_configure_empty_recv(void *data
, struct urb
*urb
, u8
*buf
,
987 int len
, int toggle_bits
, int error_count
, int condition_code
,
988 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
990 struct u132_endp
*endp
= data
;
991 struct u132
*u132
= endp
->u132
;
992 mutex_lock(&u132
->scheduler_lock
);
993 if (u132
->going
> 1) {
994 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
996 mutex_unlock(&u132
->scheduler_lock
);
997 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
999 } else if (endp
->dequeueing
) {
1000 endp
->dequeueing
= 0;
1001 mutex_unlock(&u132
->scheduler_lock
);
1002 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1004 } else if (u132
->going
> 0) {
1005 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1007 mutex_unlock(&u132
->scheduler_lock
);
1008 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1010 } else if (!urb
->unlinked
) {
1011 mutex_unlock(&u132
->scheduler_lock
);
1012 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1015 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1016 "unlinked=%d\n", urb
, urb
->unlinked
);
1017 mutex_unlock(&u132
->scheduler_lock
);
1018 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1023 static void u132_hcd_configure_setup_sent(void *data
, struct urb
*urb
, u8
*buf
,
1024 int len
, int toggle_bits
, int error_count
, int condition_code
,
1025 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1027 struct u132_endp
*endp
= data
;
1028 struct u132
*u132
= endp
->u132
;
1029 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1030 mutex_lock(&u132
->scheduler_lock
);
1031 if (u132
->going
> 1) {
1032 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1034 mutex_unlock(&u132
->scheduler_lock
);
1035 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1037 } else if (endp
->dequeueing
) {
1038 endp
->dequeueing
= 0;
1039 mutex_unlock(&u132
->scheduler_lock
);
1040 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1042 } else if (u132
->going
> 0) {
1043 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1045 mutex_unlock(&u132
->scheduler_lock
);
1046 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1048 } else if (!urb
->unlinked
) {
1049 if (usb_pipein(urb
->pipe
)) {
1051 struct u132_ring
*ring
= endp
->ring
;
1052 mutex_unlock(&u132
->scheduler_lock
);
1053 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1054 ring
->number
, endp
, urb
, address
,
1056 u132_hcd_configure_input_recv
);
1058 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1062 struct u132_ring
*ring
= endp
->ring
;
1063 mutex_unlock(&u132
->scheduler_lock
);
1064 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1065 ring
->number
, endp
, urb
, address
,
1067 u132_hcd_configure_empty_recv
);
1069 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1073 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1074 "unlinked=%d\n", urb
, urb
->unlinked
);
1075 mutex_unlock(&u132
->scheduler_lock
);
1076 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1081 static void u132_hcd_enumeration_empty_recv(void *data
, struct urb
*urb
,
1082 u8
*buf
, int len
, int toggle_bits
, int error_count
, int condition_code
,
1083 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1085 struct u132_endp
*endp
= data
;
1086 struct u132
*u132
= endp
->u132
;
1087 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1088 struct u132_udev
*udev
= &u132
->udev
[address
];
1089 mutex_lock(&u132
->scheduler_lock
);
1090 if (u132
->going
> 1) {
1091 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1093 mutex_unlock(&u132
->scheduler_lock
);
1094 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1096 } else if (endp
->dequeueing
) {
1097 endp
->dequeueing
= 0;
1098 mutex_unlock(&u132
->scheduler_lock
);
1099 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1101 } else if (u132
->going
> 0) {
1102 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1104 mutex_unlock(&u132
->scheduler_lock
);
1105 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1107 } else if (!urb
->unlinked
) {
1108 u132
->addr
[0].address
= 0;
1109 endp
->usb_addr
= udev
->usb_addr
;
1110 mutex_unlock(&u132
->scheduler_lock
);
1111 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1114 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1115 "unlinked=%d\n", urb
, urb
->unlinked
);
1116 mutex_unlock(&u132
->scheduler_lock
);
1117 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1122 static void u132_hcd_enumeration_address_sent(void *data
, struct urb
*urb
,
1123 u8
*buf
, int len
, int toggle_bits
, int error_count
, int condition_code
,
1124 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1126 struct u132_endp
*endp
= data
;
1127 struct u132
*u132
= endp
->u132
;
1128 mutex_lock(&u132
->scheduler_lock
);
1129 if (u132
->going
> 1) {
1130 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1132 mutex_unlock(&u132
->scheduler_lock
);
1133 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1135 } else if (endp
->dequeueing
) {
1136 endp
->dequeueing
= 0;
1137 mutex_unlock(&u132
->scheduler_lock
);
1138 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1140 } else if (u132
->going
> 0) {
1141 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1143 mutex_unlock(&u132
->scheduler_lock
);
1144 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1146 } else if (!urb
->unlinked
) {
1148 struct u132_ring
*ring
= endp
->ring
;
1149 mutex_unlock(&u132
->scheduler_lock
);
1150 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1151 ring
->number
, endp
, urb
, 0, endp
->usb_endp
, 0,
1152 u132_hcd_enumeration_empty_recv
);
1154 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1157 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1158 "unlinked=%d\n", urb
, urb
->unlinked
);
1159 mutex_unlock(&u132
->scheduler_lock
);
1160 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1165 static void u132_hcd_initial_empty_sent(void *data
, struct urb
*urb
, u8
*buf
,
1166 int len
, int toggle_bits
, int error_count
, int condition_code
,
1167 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1169 struct u132_endp
*endp
= data
;
1170 struct u132
*u132
= endp
->u132
;
1171 mutex_lock(&u132
->scheduler_lock
);
1172 if (u132
->going
> 1) {
1173 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1175 mutex_unlock(&u132
->scheduler_lock
);
1176 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1178 } else if (endp
->dequeueing
) {
1179 endp
->dequeueing
= 0;
1180 mutex_unlock(&u132
->scheduler_lock
);
1181 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1183 } else if (u132
->going
> 0) {
1184 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1186 mutex_unlock(&u132
->scheduler_lock
);
1187 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1189 } else if (!urb
->unlinked
) {
1190 mutex_unlock(&u132
->scheduler_lock
);
1191 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1194 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1195 "unlinked=%d\n", urb
, urb
->unlinked
);
1196 mutex_unlock(&u132
->scheduler_lock
);
1197 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1202 static void u132_hcd_initial_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
1203 int len
, int toggle_bits
, int error_count
, int condition_code
,
1204 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1206 struct u132_endp
*endp
= data
;
1207 struct u132
*u132
= endp
->u132
;
1208 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1209 mutex_lock(&u132
->scheduler_lock
);
1210 if (u132
->going
> 1) {
1211 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1213 mutex_unlock(&u132
->scheduler_lock
);
1214 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1216 } else if (endp
->dequeueing
) {
1217 endp
->dequeueing
= 0;
1218 mutex_unlock(&u132
->scheduler_lock
);
1219 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1221 } else if (u132
->going
> 0) {
1222 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1224 mutex_unlock(&u132
->scheduler_lock
);
1225 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1227 } else if (!urb
->unlinked
) {
1229 struct u132_ring
*ring
= endp
->ring
;
1230 u8
*u
= urb
->transfer_buffer
;
1237 urb
->actual_length
= len
;
1238 mutex_unlock(&u132
->scheduler_lock
);
1239 retval
= usb_ftdi_elan_edset_empty(u132
->platform_dev
,
1240 ring
->number
, endp
, urb
, address
, endp
->usb_endp
, 0x3,
1241 u132_hcd_initial_empty_sent
);
1243 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1246 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1247 "unlinked=%d\n", urb
, urb
->unlinked
);
1248 mutex_unlock(&u132
->scheduler_lock
);
1249 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1254 static void u132_hcd_initial_setup_sent(void *data
, struct urb
*urb
, u8
*buf
,
1255 int len
, int toggle_bits
, int error_count
, int condition_code
,
1256 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1258 struct u132_endp
*endp
= data
;
1259 struct u132
*u132
= endp
->u132
;
1260 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1261 mutex_lock(&u132
->scheduler_lock
);
1262 if (u132
->going
> 1) {
1263 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1265 mutex_unlock(&u132
->scheduler_lock
);
1266 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1268 } else if (endp
->dequeueing
) {
1269 endp
->dequeueing
= 0;
1270 mutex_unlock(&u132
->scheduler_lock
);
1271 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1273 } else if (u132
->going
> 0) {
1274 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1276 mutex_unlock(&u132
->scheduler_lock
);
1277 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1279 } else if (!urb
->unlinked
) {
1281 struct u132_ring
*ring
= endp
->ring
;
1282 mutex_unlock(&u132
->scheduler_lock
);
1283 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1284 ring
->number
, endp
, urb
, address
, endp
->usb_endp
, 0,
1285 u132_hcd_initial_input_recv
);
1287 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1290 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1291 "unlinked=%d\n", urb
, urb
->unlinked
);
1292 mutex_unlock(&u132
->scheduler_lock
);
1293 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1299 * this work function is only executed from the work queue
1302 static void u132_hcd_ring_work_scheduler(struct work_struct
*work
)
1304 struct u132_ring
*ring
=
1305 container_of(work
, struct u132_ring
, scheduler
.work
);
1306 struct u132
*u132
= ring
->u132
;
1307 mutex_lock(&u132
->scheduler_lock
);
1309 mutex_unlock(&u132
->scheduler_lock
);
1310 u132_ring_put_kref(u132
, ring
);
1312 } else if (ring
->curr_endp
) {
1313 struct u132_endp
*last_endp
= ring
->curr_endp
;
1314 struct list_head
*scan
;
1315 struct list_head
*head
= &last_endp
->endp_ring
;
1316 unsigned long wakeup
= 0;
1317 list_for_each(scan
, head
) {
1318 struct u132_endp
*endp
= list_entry(scan
,
1319 struct u132_endp
, endp_ring
);
1320 if (endp
->queue_next
== endp
->queue_last
) {
1321 } else if ((endp
->delayed
== 0)
1322 || time_after_eq(jiffies
, endp
->jiffies
)) {
1323 ring
->curr_endp
= endp
;
1324 u132_endp_cancel_work(u132
, last_endp
);
1325 u132_endp_queue_work(u132
, last_endp
, 0);
1326 mutex_unlock(&u132
->scheduler_lock
);
1327 u132_ring_put_kref(u132
, ring
);
1330 unsigned long delta
= endp
->jiffies
- jiffies
;
1335 if (last_endp
->queue_next
== last_endp
->queue_last
) {
1336 } else if ((last_endp
->delayed
== 0) || time_after_eq(jiffies
,
1337 last_endp
->jiffies
)) {
1338 u132_endp_cancel_work(u132
, last_endp
);
1339 u132_endp_queue_work(u132
, last_endp
, 0);
1340 mutex_unlock(&u132
->scheduler_lock
);
1341 u132_ring_put_kref(u132
, ring
);
1344 unsigned long delta
= last_endp
->jiffies
- jiffies
;
1349 u132_ring_requeue_work(u132
, ring
, wakeup
);
1350 mutex_unlock(&u132
->scheduler_lock
);
1353 mutex_unlock(&u132
->scheduler_lock
);
1354 u132_ring_put_kref(u132
, ring
);
1358 mutex_unlock(&u132
->scheduler_lock
);
1359 u132_ring_put_kref(u132
, ring
);
1364 static void u132_hcd_endp_work_scheduler(struct work_struct
*work
)
1366 struct u132_ring
*ring
;
1367 struct u132_endp
*endp
=
1368 container_of(work
, struct u132_endp
, scheduler
.work
);
1369 struct u132
*u132
= endp
->u132
;
1370 mutex_lock(&u132
->scheduler_lock
);
1372 if (endp
->edset_flush
) {
1373 endp
->edset_flush
= 0;
1374 if (endp
->dequeueing
)
1375 usb_ftdi_elan_edset_flush(u132
->platform_dev
,
1376 ring
->number
, endp
);
1377 mutex_unlock(&u132
->scheduler_lock
);
1378 u132_endp_put_kref(u132
, endp
);
1380 } else if (endp
->active
) {
1381 mutex_unlock(&u132
->scheduler_lock
);
1382 u132_endp_put_kref(u132
, endp
);
1384 } else if (ring
->in_use
) {
1385 mutex_unlock(&u132
->scheduler_lock
);
1386 u132_endp_put_kref(u132
, endp
);
1388 } else if (endp
->queue_next
== endp
->queue_last
) {
1389 mutex_unlock(&u132
->scheduler_lock
);
1390 u132_endp_put_kref(u132
, endp
);
1392 } else if (endp
->pipetype
== PIPE_INTERRUPT
) {
1393 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1395 mutex_unlock(&u132
->scheduler_lock
);
1396 u132_endp_put_kref(u132
, endp
);
1400 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1403 ring
->curr_endp
= endp
;
1405 mutex_unlock(&u132
->scheduler_lock
);
1406 retval
= edset_single(u132
, ring
, endp
, urb
, address
,
1407 endp
->toggle_bits
, u132_hcd_interrupt_recv
);
1409 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1412 } else if (endp
->pipetype
== PIPE_CONTROL
) {
1413 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1415 mutex_unlock(&u132
->scheduler_lock
);
1416 u132_endp_put_kref(u132
, endp
);
1418 } else if (address
== 0) {
1420 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1423 ring
->curr_endp
= endp
;
1425 mutex_unlock(&u132
->scheduler_lock
);
1426 retval
= edset_setup(u132
, ring
, endp
, urb
, address
,
1427 0x2, u132_hcd_initial_setup_sent
);
1429 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1431 } else if (endp
->usb_addr
== 0) {
1433 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1436 ring
->curr_endp
= endp
;
1438 mutex_unlock(&u132
->scheduler_lock
);
1439 retval
= edset_setup(u132
, ring
, endp
, urb
, 0, 0x2,
1440 u132_hcd_enumeration_address_sent
);
1442 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1446 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1448 address
= u132
->addr
[endp
->usb_addr
].address
;
1450 ring
->curr_endp
= endp
;
1452 mutex_unlock(&u132
->scheduler_lock
);
1453 retval
= edset_setup(u132
, ring
, endp
, urb
, address
,
1454 0x2, u132_hcd_configure_setup_sent
);
1456 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1461 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1463 mutex_unlock(&u132
->scheduler_lock
);
1464 u132_endp_put_kref(u132
, endp
);
1468 struct urb
*urb
= endp
->urb_list
[
1469 ENDP_QUEUE_MASK
& endp
->queue_next
];
1471 ring
->curr_endp
= endp
;
1473 mutex_unlock(&u132
->scheduler_lock
);
1474 retval
= edset_input(u132
, ring
, endp
, urb
,
1475 address
, endp
->toggle_bits
,
1476 u132_hcd_bulk_input_recv
);
1479 u132_hcd_giveback_urb(u132
, endp
, urb
,
1483 } else { /* output pipe */
1484 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1486 mutex_unlock(&u132
->scheduler_lock
);
1487 u132_endp_put_kref(u132
, endp
);
1491 struct urb
*urb
= endp
->urb_list
[
1492 ENDP_QUEUE_MASK
& endp
->queue_next
];
1494 ring
->curr_endp
= endp
;
1496 mutex_unlock(&u132
->scheduler_lock
);
1497 retval
= edset_output(u132
, ring
, endp
, urb
,
1498 address
, endp
->toggle_bits
,
1499 u132_hcd_bulk_output_sent
);
1502 u132_hcd_giveback_urb(u132
, endp
, urb
,
1511 static void port_power(struct u132
*u132
, int pn
, int is_on
)
1513 u132
->port
[pn
].power
= is_on
;
1518 static void u132_power(struct u132
*u132
, int is_on
)
1520 struct usb_hcd
*hcd
= u132_to_hcd(u132
)
1521 ; /* hub is inactive unless the port is powered */
1528 hcd
->state
= HC_STATE_HALT
;
1532 static int u132_periodic_reinit(struct u132
*u132
)
1535 u32 fi
= u132
->hc_fminterval
& 0x03fff;
1538 retval
= u132_read_pcimem(u132
, fminterval
, &fminterval
);
1541 fit
= fminterval
& FIT
;
1542 retval
= u132_write_pcimem(u132
, fminterval
,
1543 (fit
^ FIT
) | u132
->hc_fminterval
);
1546 retval
= u132_write_pcimem(u132
, periodicstart
,
1547 ((9 * fi
) / 10) & 0x3fff);
1553 static char *hcfs2string(int state
)
1556 case OHCI_USB_RESET
:
1558 case OHCI_USB_RESUME
:
1561 return "operational";
1562 case OHCI_USB_SUSPEND
:
1568 static int u132_init(struct u132
*u132
)
1573 u132
->next_statechange
= jiffies
;
1574 retval
= u132_write_pcimem(u132
, intrdisable
, OHCI_INTR_MIE
);
1577 retval
= u132_read_pcimem(u132
, control
, &control
);
1580 if (u132
->num_ports
== 0) {
1582 retval
= u132_read_pcimem(u132
, roothub
.a
, &rh_a
);
1585 u132
->num_ports
= rh_a
& RH_A_NDP
;
1586 retval
= read_roothub_info(u132
);
1590 if (u132
->num_ports
> MAX_U132_PORTS
)
1597 /* Start an OHCI controller, set the BUS operational
1598 * resets USB and controller
1601 static int u132_run(struct u132
*u132
)
1610 int mask
= OHCI_INTR_INIT
;
1611 int first
= u132
->hc_fminterval
== 0;
1613 int reset_timeout
= 30; /* ... allow extra time */
1617 retval
= u132_read_pcimem(u132
, fminterval
, &temp
);
1620 u132
->hc_fminterval
= temp
& 0x3fff;
1621 u132
->hc_fminterval
|= FSMP(u132
->hc_fminterval
) << 16;
1623 retval
= u132_read_pcimem(u132
, control
, &u132
->hc_control
);
1626 dev_info(&u132
->platform_dev
->dev
, "resetting from state '%s', control "
1627 "= %08X\n", hcfs2string(u132
->hc_control
& OHCI_CTRL_HCFS
),
1629 switch (u132
->hc_control
& OHCI_CTRL_HCFS
) {
1633 case OHCI_USB_SUSPEND
:
1634 case OHCI_USB_RESUME
:
1635 u132
->hc_control
&= OHCI_CTRL_RWC
;
1636 u132
->hc_control
|= OHCI_USB_RESUME
;
1640 u132
->hc_control
&= OHCI_CTRL_RWC
;
1641 u132
->hc_control
|= OHCI_USB_RESET
;
1645 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1648 retval
= u132_read_pcimem(u132
, control
, &control
);
1652 retval
= u132_read_pcimem(u132
, roothub
.a
, &roothub_a
);
1655 if (!(roothub_a
& RH_A_NPS
)) {
1656 int temp
; /* power down each port */
1657 for (temp
= 0; temp
< u132
->num_ports
; temp
++) {
1658 retval
= u132_write_pcimem(u132
,
1659 roothub
.portstatus
[temp
], RH_PS_LSDA
);
1664 retval
= u132_read_pcimem(u132
, control
, &control
);
1668 retval
= u132_read_pcimem(u132
, cmdstatus
, &status
);
1671 retval
= u132_write_pcimem(u132
, cmdstatus
, OHCI_HCR
);
1675 retval
= u132_read_pcimem(u132
, cmdstatus
, &status
);
1678 if (0 != (status
& OHCI_HCR
)) {
1679 if (--reset_timeout
== 0) {
1680 dev_err(&u132
->platform_dev
->dev
, "USB HC reset"
1689 if (u132
->flags
& OHCI_QUIRK_INITRESET
) {
1690 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1693 retval
= u132_read_pcimem(u132
, control
, &control
);
1697 retval
= u132_write_pcimem(u132
, ed_controlhead
, 0x00000000);
1700 retval
= u132_write_pcimem(u132
, ed_bulkhead
, 0x11000000);
1703 retval
= u132_write_pcimem(u132
, hcca
, 0x00000000);
1706 retval
= u132_periodic_reinit(u132
);
1709 retval
= u132_read_pcimem(u132
, fminterval
, &fminterval
);
1712 retval
= u132_read_pcimem(u132
, periodicstart
, &periodicstart
);
1715 if (0 == (fminterval
& 0x3fff0000) || 0 == periodicstart
) {
1716 if (!(u132
->flags
& OHCI_QUIRK_INITRESET
)) {
1717 u132
->flags
|= OHCI_QUIRK_INITRESET
;
1720 dev_err(&u132
->platform_dev
->dev
, "init err(%08x %04x)"
1721 "\n", fminterval
, periodicstart
);
1722 } /* start controller operations */
1723 u132
->hc_control
&= OHCI_CTRL_RWC
;
1724 u132
->hc_control
|= OHCI_CONTROL_INIT
| OHCI_CTRL_BLE
| OHCI_USB_OPER
;
1725 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1728 retval
= u132_write_pcimem(u132
, cmdstatus
, OHCI_BLF
);
1731 retval
= u132_read_pcimem(u132
, cmdstatus
, &cmdstatus
);
1734 retval
= u132_read_pcimem(u132
, control
, &control
);
1737 u132_to_hcd(u132
)->state
= HC_STATE_RUNNING
;
1738 retval
= u132_write_pcimem(u132
, roothub
.status
, RH_HS_DRWE
);
1741 retval
= u132_write_pcimem(u132
, intrstatus
, mask
);
1744 retval
= u132_write_pcimem(u132
, intrdisable
,
1745 OHCI_INTR_MIE
| OHCI_INTR_OC
| OHCI_INTR_RHSC
| OHCI_INTR_FNO
|
1746 OHCI_INTR_UE
| OHCI_INTR_RD
| OHCI_INTR_SF
| OHCI_INTR_WDH
|
1749 return retval
; /* handle root hub init quirks ... */
1750 retval
= u132_read_pcimem(u132
, roothub
.a
, &roothub_a
);
1753 roothub_a
&= ~(RH_A_PSM
| RH_A_OCPM
);
1754 if (u132
->flags
& OHCI_QUIRK_SUPERIO
) {
1755 roothub_a
|= RH_A_NOCP
;
1756 roothub_a
&= ~(RH_A_POTPGT
| RH_A_NPS
);
1757 retval
= u132_write_pcimem(u132
, roothub
.a
, roothub_a
);
1760 } else if ((u132
->flags
& OHCI_QUIRK_AMD756
) || distrust_firmware
) {
1761 roothub_a
|= RH_A_NPS
;
1762 retval
= u132_write_pcimem(u132
, roothub
.a
, roothub_a
);
1766 retval
= u132_write_pcimem(u132
, roothub
.status
, RH_HS_LPSC
);
1769 retval
= u132_write_pcimem(u132
, roothub
.b
,
1770 (roothub_a
& RH_A_NPS
) ? 0 : RH_B_PPCM
);
1773 retval
= u132_read_pcimem(u132
, control
, &control
);
1776 mdelay((roothub_a
>> 23) & 0x1fe);
1777 u132_to_hcd(u132
)->state
= HC_STATE_RUNNING
;
1781 static void u132_hcd_stop(struct usb_hcd
*hcd
)
1783 struct u132
*u132
= hcd_to_u132(hcd
);
1784 if (u132
->going
> 1) {
1785 dev_err(&u132
->platform_dev
->dev
, "u132 device %p(hcd=%p) has b"
1786 "een removed %d\n", u132
, hcd
, u132
->going
);
1787 } else if (u132
->going
> 0) {
1788 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p is being remov"
1791 mutex_lock(&u132
->sw_lock
);
1793 u132_power(u132
, 0);
1794 mutex_unlock(&u132
->sw_lock
);
1798 static int u132_hcd_start(struct usb_hcd
*hcd
)
1800 struct u132
*u132
= hcd_to_u132(hcd
);
1801 if (u132
->going
> 1) {
1802 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1805 } else if (u132
->going
> 0) {
1806 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
1808 } else if (hcd
->self
.controller
) {
1810 struct platform_device
*pdev
=
1811 to_platform_device(hcd
->self
.controller
);
1812 u16 vendor
= ((struct u132_platform_data
*)
1813 (pdev
->dev
.platform_data
))->vendor
;
1814 u16 device
= ((struct u132_platform_data
*)
1815 (pdev
->dev
.platform_data
))->device
;
1816 mutex_lock(&u132
->sw_lock
);
1818 if (vendor
== PCI_VENDOR_ID_AMD
&& device
== 0x740c) {
1819 u132
->flags
= OHCI_QUIRK_AMD756
;
1820 } else if (vendor
== PCI_VENDOR_ID_OPTI
&& device
== 0xc861) {
1821 dev_err(&u132
->platform_dev
->dev
, "WARNING: OPTi workar"
1822 "ounds unavailable\n");
1823 } else if (vendor
== PCI_VENDOR_ID_COMPAQ
&& device
== 0xa0f8)
1824 u132
->flags
|= OHCI_QUIRK_ZFMICRO
;
1825 retval
= u132_run(u132
);
1831 mutex_unlock(&u132
->sw_lock
);
1834 dev_err(&u132
->platform_dev
->dev
, "platform_device missing\n");
1839 static int u132_hcd_reset(struct usb_hcd
*hcd
)
1841 struct u132
*u132
= hcd_to_u132(hcd
);
1842 if (u132
->going
> 1) {
1843 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1846 } else if (u132
->going
> 0) {
1847 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
1851 mutex_lock(&u132
->sw_lock
);
1852 retval
= u132_init(u132
);
1857 mutex_unlock(&u132
->sw_lock
);
1862 static int create_endpoint_and_queue_int(struct u132
*u132
,
1863 struct u132_udev
*udev
, struct urb
*urb
,
1864 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
, u8 address
,
1867 struct u132_ring
*ring
;
1871 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
1876 spin_lock_init(&endp
->queue_lock
.slock
);
1877 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
1878 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
1880 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1885 endp_number
= ++u132
->num_endpoints
;
1886 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
1887 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
1888 INIT_LIST_HEAD(&endp
->urb_more
);
1889 ring
= endp
->ring
= &u132
->ring
[0];
1890 if (ring
->curr_endp
) {
1891 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
1893 INIT_LIST_HEAD(&endp
->endp_ring
);
1894 ring
->curr_endp
= endp
;
1897 endp
->dequeueing
= 0;
1898 endp
->edset_flush
= 0;
1901 endp
->endp_number
= endp_number
;
1903 endp
->hep
= urb
->ep
;
1904 endp
->pipetype
= usb_pipetype(urb
->pipe
);
1905 u132_endp_init_kref(u132
, endp
);
1906 if (usb_pipein(urb
->pipe
)) {
1907 endp
->toggle_bits
= 0x2;
1908 usb_settoggle(udev
->usb_device
, usb_endp
, 0, 0);
1911 udev
->endp_number_in
[usb_endp
] = endp_number
;
1912 u132_udev_get_kref(u132
, udev
);
1914 endp
->toggle_bits
= 0x2;
1915 usb_settoggle(udev
->usb_device
, usb_endp
, 1, 0);
1918 udev
->endp_number_out
[usb_endp
] = endp_number
;
1919 u132_udev_get_kref(u132
, udev
);
1923 endp
->jiffies
= jiffies
+ msecs_to_jiffies(urb
->interval
);
1924 endp
->udev_number
= address
;
1925 endp
->usb_addr
= usb_addr
;
1926 endp
->usb_endp
= usb_endp
;
1927 endp
->queue_size
= 1;
1928 endp
->queue_last
= 0;
1929 endp
->queue_next
= 0;
1930 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
1931 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1932 u132_endp_queue_work(u132
, endp
, msecs_to_jiffies(urb
->interval
));
1936 static int queue_int_on_old_endpoint(struct u132
*u132
,
1937 struct u132_udev
*udev
, struct urb
*urb
,
1938 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
1939 u8 usb_endp
, u8 address
)
1943 endp
->jiffies
= jiffies
+ msecs_to_jiffies(urb
->interval
);
1944 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
1945 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
1947 struct u132_urbq
*urbq
= kmalloc(sizeof(struct u132_urbq
),
1950 endp
->queue_size
-= 1;
1953 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
1960 static int create_endpoint_and_queue_bulk(struct u132
*u132
,
1961 struct u132_udev
*udev
, struct urb
*urb
,
1962 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
, u8 address
,
1966 struct u132_ring
*ring
;
1970 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
1975 spin_lock_init(&endp
->queue_lock
.slock
);
1976 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
1977 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
1979 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1984 endp_number
= ++u132
->num_endpoints
;
1985 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
1986 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
1987 INIT_LIST_HEAD(&endp
->urb_more
);
1988 endp
->dequeueing
= 0;
1989 endp
->edset_flush
= 0;
1992 endp
->endp_number
= endp_number
;
1994 endp
->hep
= urb
->ep
;
1995 endp
->pipetype
= usb_pipetype(urb
->pipe
);
1996 u132_endp_init_kref(u132
, endp
);
1997 if (usb_pipein(urb
->pipe
)) {
1998 endp
->toggle_bits
= 0x2;
1999 usb_settoggle(udev
->usb_device
, usb_endp
, 0, 0);
2003 udev
->endp_number_in
[usb_endp
] = endp_number
;
2004 u132_udev_get_kref(u132
, udev
);
2006 endp
->toggle_bits
= 0x2;
2007 usb_settoggle(udev
->usb_device
, usb_endp
, 1, 0);
2011 udev
->endp_number_out
[usb_endp
] = endp_number
;
2012 u132_udev_get_kref(u132
, udev
);
2014 ring
= endp
->ring
= &u132
->ring
[ring_number
- 1];
2015 if (ring
->curr_endp
) {
2016 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
2018 INIT_LIST_HEAD(&endp
->endp_ring
);
2019 ring
->curr_endp
= endp
;
2023 endp
->udev_number
= address
;
2024 endp
->usb_addr
= usb_addr
;
2025 endp
->usb_endp
= usb_endp
;
2026 endp
->queue_size
= 1;
2027 endp
->queue_last
= 0;
2028 endp
->queue_next
= 0;
2029 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2030 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2031 u132_endp_queue_work(u132
, endp
, 0);
2035 static int queue_bulk_on_old_endpoint(struct u132
*u132
, struct u132_udev
*udev
,
2037 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
2038 u8 usb_endp
, u8 address
)
2041 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2042 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2044 struct u132_urbq
*urbq
= kmalloc(sizeof(struct u132_urbq
),
2047 endp
->queue_size
-= 1;
2050 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
2057 static int create_endpoint_and_queue_control(struct u132
*u132
,
2059 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
,
2062 struct u132_ring
*ring
;
2066 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
2071 spin_lock_init(&endp
->queue_lock
.slock
);
2072 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
2073 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
2075 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2080 endp_number
= ++u132
->num_endpoints
;
2081 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
2082 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
2083 INIT_LIST_HEAD(&endp
->urb_more
);
2084 ring
= endp
->ring
= &u132
->ring
[0];
2085 if (ring
->curr_endp
) {
2086 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
2088 INIT_LIST_HEAD(&endp
->endp_ring
);
2089 ring
->curr_endp
= endp
;
2092 endp
->dequeueing
= 0;
2093 endp
->edset_flush
= 0;
2096 endp
->endp_number
= endp_number
;
2098 endp
->hep
= urb
->ep
;
2099 u132_endp_init_kref(u132
, endp
);
2100 u132_endp_get_kref(u132
, endp
);
2101 if (usb_addr
== 0) {
2102 u8 address
= u132
->addr
[usb_addr
].address
;
2103 struct u132_udev
*udev
= &u132
->udev
[address
];
2104 endp
->udev_number
= address
;
2105 endp
->usb_addr
= usb_addr
;
2106 endp
->usb_endp
= usb_endp
;
2109 endp
->pipetype
= usb_pipetype(urb
->pipe
);
2110 u132_udev_init_kref(u132
, udev
);
2111 u132_udev_get_kref(u132
, udev
);
2112 udev
->endp_number_in
[usb_endp
] = endp_number
;
2113 udev
->endp_number_out
[usb_endp
] = endp_number
;
2115 endp
->queue_size
= 1;
2116 endp
->queue_last
= 0;
2117 endp
->queue_next
= 0;
2118 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2119 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2120 u132_endp_queue_work(u132
, endp
, 0);
2122 } else { /*(usb_addr > 0) */
2123 u8 address
= u132
->addr
[usb_addr
].address
;
2124 struct u132_udev
*udev
= &u132
->udev
[address
];
2125 endp
->udev_number
= address
;
2126 endp
->usb_addr
= usb_addr
;
2127 endp
->usb_endp
= usb_endp
;
2130 endp
->pipetype
= usb_pipetype(urb
->pipe
);
2131 u132_udev_get_kref(u132
, udev
);
2132 udev
->enumeration
= 2;
2133 udev
->endp_number_in
[usb_endp
] = endp_number
;
2134 udev
->endp_number_out
[usb_endp
] = endp_number
;
2136 endp
->queue_size
= 1;
2137 endp
->queue_last
= 0;
2138 endp
->queue_next
= 0;
2139 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2140 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2141 u132_endp_queue_work(u132
, endp
, 0);
2146 static int queue_control_on_old_endpoint(struct u132
*u132
,
2148 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
2151 if (usb_addr
== 0) {
2152 if (usb_pipein(urb
->pipe
)) {
2154 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2155 endp
->urb_list
[ENDP_QUEUE_MASK
&
2156 endp
->queue_last
++] = urb
;
2158 struct u132_urbq
*urbq
=
2159 kmalloc(sizeof(struct u132_urbq
),
2162 endp
->queue_size
-= 1;
2165 list_add_tail(&urbq
->urb_more
,
2171 } else { /* usb_pipeout(urb->pipe) */
2172 struct u132_addr
*addr
= &u132
->addr
[usb_dev
->devnum
];
2173 int I
= MAX_U132_UDEVS
;
2176 struct u132_udev
*udev
= &u132
->udev
[++i
];
2177 if (udev
->usb_device
) {
2180 udev
->enumeration
= 1;
2181 u132
->addr
[0].address
= i
;
2182 endp
->udev_number
= i
;
2183 udev
->udev_number
= i
;
2184 udev
->usb_addr
= usb_dev
->devnum
;
2185 u132_udev_init_kref(u132
, udev
);
2186 udev
->endp_number_in
[usb_endp
] =
2188 u132_udev_get_kref(u132
, udev
);
2189 udev
->endp_number_out
[usb_endp
] =
2191 udev
->usb_device
= usb_dev
;
2192 ((u8
*) (urb
->setup_packet
))[2] =
2194 u132_udev_get_kref(u132
, udev
);
2199 dev_err(&u132
->platform_dev
->dev
, "run out of d"
2204 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2205 endp
->urb_list
[ENDP_QUEUE_MASK
&
2206 endp
->queue_last
++] = urb
;
2208 struct u132_urbq
*urbq
=
2209 kmalloc(sizeof(struct u132_urbq
),
2212 endp
->queue_size
-= 1;
2215 list_add_tail(&urbq
->urb_more
,
2222 } else { /*(usb_addr > 0) */
2223 u8 address
= u132
->addr
[usb_addr
].address
;
2224 struct u132_udev
*udev
= &u132
->udev
[address
];
2226 if (udev
->enumeration
!= 2)
2227 udev
->enumeration
= 2;
2228 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2229 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
2232 struct u132_urbq
*urbq
=
2233 kmalloc(sizeof(struct u132_urbq
), GFP_ATOMIC
);
2235 endp
->queue_size
-= 1;
2238 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
2246 static int u132_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
,
2249 struct u132
*u132
= hcd_to_u132(hcd
);
2250 if (irqs_disabled()) {
2251 if (__GFP_WAIT
& mem_flags
) {
2252 printk(KERN_ERR
"invalid context for function that migh"
2257 if (u132
->going
> 1) {
2258 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2261 } else if (u132
->going
> 0) {
2262 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
2266 u8 usb_addr
= usb_pipedevice(urb
->pipe
);
2267 u8 usb_endp
= usb_pipeendpoint(urb
->pipe
);
2268 struct usb_device
*usb_dev
= urb
->dev
;
2269 if (usb_pipetype(urb
->pipe
) == PIPE_INTERRUPT
) {
2270 u8 address
= u132
->addr
[usb_addr
].address
;
2271 struct u132_udev
*udev
= &u132
->udev
[address
];
2272 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2273 urb
->actual_length
= 0;
2277 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2279 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2281 retval
= queue_int_on_old_endpoint(
2287 usb_hcd_unlink_urb_from_ep(
2290 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2295 u132_endp_queue_work(u132
, endp
,
2296 msecs_to_jiffies(urb
->interval
))
2300 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2302 } else { /*(endp == NULL) */
2303 return create_endpoint_and_queue_int(u132
, udev
,
2304 urb
, usb_dev
, usb_addr
,
2305 usb_endp
, address
, mem_flags
);
2307 } else if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
2308 dev_err(&u132
->platform_dev
->dev
, "the hardware does no"
2309 "t support PIPE_ISOCHRONOUS\n");
2311 } else if (usb_pipetype(urb
->pipe
) == PIPE_BULK
) {
2312 u8 address
= u132
->addr
[usb_addr
].address
;
2313 struct u132_udev
*udev
= &u132
->udev
[address
];
2314 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2315 urb
->actual_length
= 0;
2319 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2321 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2323 retval
= queue_bulk_on_old_endpoint(
2329 usb_hcd_unlink_urb_from_ep(
2332 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2337 u132_endp_queue_work(u132
, endp
, 0);
2340 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2343 return create_endpoint_and_queue_bulk(u132
,
2344 udev
, urb
, usb_dev
, usb_addr
,
2345 usb_endp
, address
, mem_flags
);
2347 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2349 u8
*b
= urb
->setup_packet
;
2351 char data
[30 * 3 + 4];
2353 int m
= (sizeof(data
) - 1) / 3;
2356 while (urb_size
-- > 0) {
2358 } else if (i
++ < m
) {
2359 int w
= sprintf(d
, " %02X", *b
++);
2363 d
+= sprintf(d
, " ..");
2368 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2370 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2372 retval
= queue_control_on_old_endpoint(
2377 usb_hcd_unlink_urb_from_ep(
2380 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2385 u132_endp_queue_work(u132
, endp
, 0);
2388 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2391 return create_endpoint_and_queue_control(u132
,
2392 urb
, usb_dev
, usb_addr
, usb_endp
,
2398 static int dequeue_from_overflow_chain(struct u132
*u132
,
2399 struct u132_endp
*endp
, struct urb
*urb
)
2401 struct list_head
*scan
;
2402 struct list_head
*head
= &endp
->urb_more
;
2403 list_for_each(scan
, head
) {
2404 struct u132_urbq
*urbq
= list_entry(scan
, struct u132_urbq
,
2406 if (urbq
->urb
== urb
) {
2407 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
2409 endp
->queue_size
-= 1;
2410 urb
->error_count
= 0;
2411 usb_hcd_giveback_urb(hcd
, urb
, 0);
2416 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in endp[%d]=%p ring"
2417 "[%d] %c%c usb_endp=%d usb_addr=%d size=%d next=%04X last=%04X"
2418 "\n", urb
, endp
->endp_number
, endp
, endp
->ring
->number
,
2419 endp
->input
? 'I' : ' ', endp
->output
? 'O' : ' ',
2420 endp
->usb_endp
, endp
->usb_addr
, endp
->queue_size
,
2421 endp
->queue_next
, endp
->queue_last
);
2425 static int u132_endp_urb_dequeue(struct u132
*u132
, struct u132_endp
*endp
,
2426 struct urb
*urb
, int status
)
2431 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
2432 rc
= usb_hcd_check_unlink_urb(u132_to_hcd(u132
), urb
, status
);
2434 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2437 if (endp
->queue_size
== 0) {
2438 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in endp[%d]"
2439 "=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb
,
2440 endp
->endp_number
, endp
, endp
->ring
->number
,
2441 endp
->input
? 'I' : ' ', endp
->output
? 'O' : ' ',
2442 endp
->usb_endp
, endp
->usb_addr
);
2443 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2446 if (urb
== endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_next
]) {
2448 endp
->dequeueing
= 1;
2449 endp
->edset_flush
= 1;
2450 u132_endp_queue_work(u132
, endp
, 0);
2451 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2454 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2455 u132_hcd_abandon_urb(u132
, endp
, urb
, status
);
2460 u16 queue_size
= endp
->queue_size
;
2461 u16 queue_scan
= endp
->queue_next
;
2462 struct urb
**urb_slot
= NULL
;
2463 while (++queue_list
< ENDP_QUEUE_SIZE
&& --queue_size
> 0) {
2464 if (urb
== endp
->urb_list
[ENDP_QUEUE_MASK
&
2466 urb_slot
= &endp
->urb_list
[ENDP_QUEUE_MASK
&
2472 while (++queue_list
< ENDP_QUEUE_SIZE
&& --queue_size
> 0) {
2473 *urb_slot
= endp
->urb_list
[ENDP_QUEUE_MASK
&
2475 urb_slot
= &endp
->urb_list
[ENDP_QUEUE_MASK
&
2479 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
2481 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
2482 endp
->queue_size
-= 1;
2483 if (list_empty(&endp
->urb_more
)) {
2484 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2487 struct list_head
*next
= endp
->urb_more
.next
;
2488 struct u132_urbq
*urbq
= list_entry(next
,
2489 struct u132_urbq
, urb_more
);
2491 *urb_slot
= urbq
->urb
;
2492 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2495 } urb
->error_count
= 0;
2496 usb_hcd_giveback_urb(hcd
, urb
, status
);
2498 } else if (list_empty(&endp
->urb_more
)) {
2499 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in "
2500 "endp[%d]=%p ring[%d] %c%c usb_endp=%d usb_addr"
2501 "=%d size=%d next=%04X last=%04X\n", urb
,
2502 endp
->endp_number
, endp
, endp
->ring
->number
,
2503 endp
->input
? 'I' : ' ',
2504 endp
->output
? 'O' : ' ', endp
->usb_endp
,
2505 endp
->usb_addr
, endp
->queue_size
,
2506 endp
->queue_next
, endp
->queue_last
);
2507 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2512 usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132
), urb
);
2513 retval
= dequeue_from_overflow_chain(u132
, endp
,
2515 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2521 static int u132_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
2523 struct u132
*u132
= hcd_to_u132(hcd
);
2524 if (u132
->going
> 2) {
2525 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2529 u8 usb_addr
= usb_pipedevice(urb
->pipe
);
2530 u8 usb_endp
= usb_pipeendpoint(urb
->pipe
);
2531 u8 address
= u132
->addr
[usb_addr
].address
;
2532 struct u132_udev
*udev
= &u132
->udev
[address
];
2533 if (usb_pipein(urb
->pipe
)) {
2534 u8 endp_number
= udev
->endp_number_in
[usb_endp
];
2535 struct u132_endp
*endp
= u132
->endp
[endp_number
- 1];
2536 return u132_endp_urb_dequeue(u132
, endp
, urb
, status
);
2538 u8 endp_number
= udev
->endp_number_out
[usb_endp
];
2539 struct u132_endp
*endp
= u132
->endp
[endp_number
- 1];
2540 return u132_endp_urb_dequeue(u132
, endp
, urb
, status
);
2545 static void u132_endpoint_disable(struct usb_hcd
*hcd
,
2546 struct usb_host_endpoint
*hep
)
2548 struct u132
*u132
= hcd_to_u132(hcd
);
2549 if (u132
->going
> 2) {
2550 dev_err(&u132
->platform_dev
->dev
, "u132 device %p(hcd=%p hep=%p"
2551 ") has been removed %d\n", u132
, hcd
, hep
,
2554 struct u132_endp
*endp
= hep
->hcpriv
;
2556 u132_endp_put_kref(u132
, endp
);
2560 static int u132_get_frame(struct usb_hcd
*hcd
)
2562 struct u132
*u132
= hcd_to_u132(hcd
);
2563 if (u132
->going
> 1) {
2564 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2567 } else if (u132
->going
> 0) {
2568 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2572 dev_err(&u132
->platform_dev
->dev
, "TODO: u132_get_frame\n");
2578 static int u132_roothub_descriptor(struct u132
*u132
,
2579 struct usb_hub_descriptor
*desc
)
2585 retval
= u132_read_pcimem(u132
, roothub
.a
, &rh_a
);
2588 desc
->bDescriptorType
= 0x29;
2589 desc
->bPwrOn2PwrGood
= (rh_a
& RH_A_POTPGT
) >> 24;
2590 desc
->bHubContrCurrent
= 0;
2591 desc
->bNbrPorts
= u132
->num_ports
;
2592 temp
= 1 + (u132
->num_ports
/ 8);
2593 desc
->bDescLength
= 7 + 2 * temp
;
2595 if (rh_a
& RH_A_NPS
)
2597 if (rh_a
& RH_A_PSM
)
2599 if (rh_a
& RH_A_NOCP
)
2601 else if (rh_a
& RH_A_OCPM
)
2603 desc
->wHubCharacteristics
= cpu_to_le16(temp
);
2604 retval
= u132_read_pcimem(u132
, roothub
.b
, &rh_b
);
2607 memset(desc
->u
.hs
.DeviceRemovable
, 0xff,
2608 sizeof(desc
->u
.hs
.DeviceRemovable
));
2609 desc
->u
.hs
.DeviceRemovable
[0] = rh_b
& RH_B_DR
;
2610 if (u132
->num_ports
> 7) {
2611 desc
->u
.hs
.DeviceRemovable
[1] = (rh_b
& RH_B_DR
) >> 8;
2612 desc
->u
.hs
.DeviceRemovable
[2] = 0xff;
2614 desc
->u
.hs
.DeviceRemovable
[1] = 0xff;
2618 static int u132_roothub_status(struct u132
*u132
, __le32
*desc
)
2621 int ret_status
= u132_read_pcimem(u132
, roothub
.status
, &rh_status
);
2622 *desc
= cpu_to_le32(rh_status
);
2626 static int u132_roothub_portstatus(struct u132
*u132
, __le32
*desc
, u16 wIndex
)
2628 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2631 int port
= wIndex
- 1;
2632 u32 rh_portstatus
= -1;
2633 int ret_portstatus
= u132_read_pcimem(u132
,
2634 roothub
.portstatus
[port
], &rh_portstatus
);
2635 *desc
= cpu_to_le32(rh_portstatus
);
2636 if (*(u16
*) (desc
+ 2)) {
2637 dev_info(&u132
->platform_dev
->dev
, "Port %d Status Chan"
2638 "ge = %08X\n", port
, *desc
);
2640 return ret_portstatus
;
2645 /* this timer value might be vendor-specific ... */
2646 #define PORT_RESET_HW_MSEC 10
2647 #define PORT_RESET_MSEC 10
2648 /* wrap-aware logic morphed from <linux/jiffies.h> */
2649 #define tick_before(t1, t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0)
2650 static int u132_roothub_portreset(struct u132
*u132
, int port_index
)
2656 retval
= u132_read_pcimem(u132
, fmnumber
, &fmnumber
);
2660 reset_done
= now
+ PORT_RESET_MSEC
;
2664 retval
= u132_read_pcimem(u132
,
2665 roothub
.portstatus
[port_index
], &portstat
);
2668 if (RH_PS_PRS
& portstat
)
2672 } while (tick_before(now
, reset_done
));
2673 if (RH_PS_PRS
& portstat
)
2675 if (RH_PS_CCS
& portstat
) {
2676 if (RH_PS_PRSC
& portstat
) {
2677 retval
= u132_write_pcimem(u132
,
2678 roothub
.portstatus
[port_index
],
2684 break; /* start the next reset,
2685 sleep till it's probably done */
2686 retval
= u132_write_pcimem(u132
, roothub
.portstatus
[port_index
],
2690 msleep(PORT_RESET_HW_MSEC
);
2691 retval
= u132_read_pcimem(u132
, fmnumber
, &fmnumber
);
2695 } while (tick_before(now
, reset_done
));
2699 static int u132_roothub_setportfeature(struct u132
*u132
, u16 wValue
,
2702 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2706 int port_index
= wIndex
- 1;
2707 struct u132_port
*port
= &u132
->port
[port_index
];
2708 port
->Status
&= ~(1 << wValue
);
2710 case USB_PORT_FEAT_SUSPEND
:
2711 retval
= u132_write_pcimem(u132
,
2712 roothub
.portstatus
[port_index
], RH_PS_PSS
);
2716 case USB_PORT_FEAT_POWER
:
2717 retval
= u132_write_pcimem(u132
,
2718 roothub
.portstatus
[port_index
], RH_PS_PPS
);
2722 case USB_PORT_FEAT_RESET
:
2723 retval
= u132_roothub_portreset(u132
, port_index
);
2733 static int u132_roothub_clearportfeature(struct u132
*u132
, u16 wValue
,
2736 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2739 int port_index
= wIndex
- 1;
2742 struct u132_port
*port
= &u132
->port
[port_index
];
2743 port
->Status
&= ~(1 << wValue
);
2745 case USB_PORT_FEAT_ENABLE
:
2748 case USB_PORT_FEAT_C_ENABLE
:
2751 case USB_PORT_FEAT_SUSPEND
:
2753 if ((u132
->hc_control
& OHCI_CTRL_HCFS
)
2755 dev_err(&u132
->platform_dev
->dev
, "TODO resume_"
2759 case USB_PORT_FEAT_C_SUSPEND
:
2762 case USB_PORT_FEAT_POWER
:
2765 case USB_PORT_FEAT_C_CONNECTION
:
2768 case USB_PORT_FEAT_C_OVER_CURRENT
:
2771 case USB_PORT_FEAT_C_RESET
:
2777 retval
= u132_write_pcimem(u132
, roothub
.portstatus
[port_index
],
2786 /* the virtual root hub timer IRQ checks for hub status*/
2787 static int u132_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
2789 struct u132
*u132
= hcd_to_u132(hcd
);
2790 if (u132
->going
> 1) {
2791 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p has been remov"
2792 "ed %d\n", hcd
, u132
->going
);
2794 } else if (u132
->going
> 0) {
2795 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p is being remov"
2799 int i
, changed
= 0, length
= 1;
2800 if (u132
->flags
& OHCI_QUIRK_AMD756
) {
2801 if ((u132
->hc_roothub_a
& RH_A_NDP
) > MAX_ROOT_PORTS
) {
2802 dev_err(&u132
->platform_dev
->dev
, "bogus NDP, r"
2803 "ereads as NDP=%d\n",
2804 u132
->hc_roothub_a
& RH_A_NDP
);
2808 if (u132
->hc_roothub_status
& (RH_HS_LPSC
| RH_HS_OCIC
))
2809 buf
[0] = changed
= 1;
2812 if (u132
->num_ports
> 7) {
2816 for (i
= 0; i
< u132
->num_ports
; i
++) {
2817 if (u132
->hc_roothub_portstatus
[i
] & (RH_PS_CSC
|
2818 RH_PS_PESC
| RH_PS_PSSC
| RH_PS_OCIC
|
2822 buf
[0] |= 1 << (i
+ 1);
2824 buf
[1] |= 1 << (i
- 7);
2827 if (!(u132
->hc_roothub_portstatus
[i
] & RH_PS_CCS
))
2830 if ((u132
->hc_roothub_portstatus
[i
] & RH_PS_PSS
))
2834 return changed
? length
: 0;
2838 static int u132_hub_control(struct usb_hcd
*hcd
, u16 typeReq
, u16 wValue
,
2839 u16 wIndex
, char *buf
, u16 wLength
)
2841 struct u132
*u132
= hcd_to_u132(hcd
);
2842 if (u132
->going
> 1) {
2843 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2846 } else if (u132
->going
> 0) {
2847 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2851 mutex_lock(&u132
->sw_lock
);
2853 case ClearHubFeature
:
2855 case C_HUB_OVER_CURRENT
:
2856 case C_HUB_LOCAL_POWER
:
2864 case C_HUB_OVER_CURRENT
:
2865 case C_HUB_LOCAL_POWER
:
2871 case ClearPortFeature
:{
2872 retval
= u132_roothub_clearportfeature(u132
,
2878 case GetHubDescriptor
:{
2879 retval
= u132_roothub_descriptor(u132
,
2880 (struct usb_hub_descriptor
*)buf
);
2886 retval
= u132_roothub_status(u132
,
2892 case GetPortStatus
:{
2893 retval
= u132_roothub_portstatus(u132
,
2894 (__le32
*) buf
, wIndex
);
2899 case SetPortFeature
:{
2900 retval
= u132_roothub_setportfeature(u132
,
2916 mutex_unlock(&u132
->sw_lock
);
2921 static int u132_start_port_reset(struct usb_hcd
*hcd
, unsigned port_num
)
2923 struct u132
*u132
= hcd_to_u132(hcd
);
2924 if (u132
->going
> 1) {
2925 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2928 } else if (u132
->going
> 0) {
2929 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2937 static int u132_bus_suspend(struct usb_hcd
*hcd
)
2939 struct u132
*u132
= hcd_to_u132(hcd
);
2940 if (u132
->going
> 1) {
2941 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2944 } else if (u132
->going
> 0) {
2945 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2951 static int u132_bus_resume(struct usb_hcd
*hcd
)
2953 struct u132
*u132
= hcd_to_u132(hcd
);
2954 if (u132
->going
> 1) {
2955 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2958 } else if (u132
->going
> 0) {
2959 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2966 #define u132_bus_suspend NULL
2967 #define u132_bus_resume NULL
2969 static struct hc_driver u132_hc_driver
= {
2970 .description
= hcd_name
,
2971 .hcd_priv_size
= sizeof(struct u132
),
2973 .flags
= HCD_USB11
| HCD_MEMORY
,
2974 .reset
= u132_hcd_reset
,
2975 .start
= u132_hcd_start
,
2976 .stop
= u132_hcd_stop
,
2977 .urb_enqueue
= u132_urb_enqueue
,
2978 .urb_dequeue
= u132_urb_dequeue
,
2979 .endpoint_disable
= u132_endpoint_disable
,
2980 .get_frame_number
= u132_get_frame
,
2981 .hub_status_data
= u132_hub_status_data
,
2982 .hub_control
= u132_hub_control
,
2983 .bus_suspend
= u132_bus_suspend
,
2984 .bus_resume
= u132_bus_resume
,
2985 .start_port_reset
= u132_start_port_reset
,
2989 * This function may be called by the USB core whilst the "usb_all_devices_rwsem"
2990 * is held for writing, thus this module must not call usb_remove_hcd()
2991 * synchronously - but instead should immediately stop activity to the
2992 * device and asynchronously call usb_remove_hcd()
2994 static int __devexit
u132_remove(struct platform_device
*pdev
)
2996 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
2998 struct u132
*u132
= hcd_to_u132(hcd
);
2999 if (u132
->going
++ > 1) {
3000 dev_err(&u132
->platform_dev
->dev
, "already being remove"
3004 int rings
= MAX_U132_RINGS
;
3005 int endps
= MAX_U132_ENDPS
;
3006 dev_err(&u132
->platform_dev
->dev
, "removing device u132"
3007 ".%d\n", u132
->sequence_num
);
3009 mutex_lock(&u132
->sw_lock
);
3010 u132_monitor_cancel_work(u132
);
3011 while (rings
-- > 0) {
3012 struct u132_ring
*ring
= &u132
->ring
[rings
];
3013 u132_ring_cancel_work(u132
, ring
);
3014 } while (endps
-- > 0) {
3015 struct u132_endp
*endp
= u132
->endp
[endps
];
3017 u132_endp_cancel_work(u132
, endp
);
3020 printk(KERN_INFO
"removing device u132.%d\n",
3021 u132
->sequence_num
);
3022 mutex_unlock(&u132
->sw_lock
);
3023 usb_remove_hcd(hcd
);
3024 u132_u132_put_kref(u132
);
3031 static void u132_initialise(struct u132
*u132
, struct platform_device
*pdev
)
3033 int rings
= MAX_U132_RINGS
;
3034 int ports
= MAX_U132_PORTS
;
3035 int addrs
= MAX_U132_ADDRS
;
3036 int udevs
= MAX_U132_UDEVS
;
3037 int endps
= MAX_U132_ENDPS
;
3038 u132
->board
= pdev
->dev
.platform_data
;
3039 u132
->platform_dev
= pdev
;
3042 mutex_init(&u132
->sw_lock
);
3043 mutex_init(&u132
->scheduler_lock
);
3044 while (rings
-- > 0) {
3045 struct u132_ring
*ring
= &u132
->ring
[rings
];
3047 ring
->number
= rings
+ 1;
3049 ring
->curr_endp
= NULL
;
3050 INIT_DELAYED_WORK(&ring
->scheduler
,
3051 u132_hcd_ring_work_scheduler
);
3053 mutex_lock(&u132
->sw_lock
);
3054 INIT_DELAYED_WORK(&u132
->monitor
, u132_hcd_monitor_work
);
3055 while (ports
-- > 0) {
3056 struct u132_port
*port
= &u132
->port
[ports
];
3063 while (addrs
-- > 0) {
3064 struct u132_addr
*addr
= &u132
->addr
[addrs
];
3067 while (udevs
-- > 0) {
3068 struct u132_udev
*udev
= &u132
->udev
[udevs
];
3069 int i
= ARRAY_SIZE(udev
->endp_number_in
);
3070 int o
= ARRAY_SIZE(udev
->endp_number_out
);
3071 udev
->usb_device
= NULL
;
3072 udev
->udev_number
= 0;
3074 udev
->portnumber
= 0;
3076 udev
->endp_number_in
[i
] = 0;
3079 udev
->endp_number_out
[o
] = 0;
3083 u132
->endp
[endps
] = NULL
;
3085 mutex_unlock(&u132
->sw_lock
);
3088 static int __devinit
u132_probe(struct platform_device
*pdev
)
3090 struct usb_hcd
*hcd
;
3097 if (u132_exiting
> 0)
3100 retval
= ftdi_write_pcimem(pdev
, intrdisable
, OHCI_INTR_MIE
);
3103 retval
= ftdi_read_pcimem(pdev
, control
, &control
);
3106 retval
= ftdi_read_pcimem(pdev
, roothub
.a
, &rh_a
);
3109 num_ports
= rh_a
& RH_A_NDP
; /* refuse to confuse usbcore */
3110 if (pdev
->dev
.dma_mask
)
3113 hcd
= usb_create_hcd(&u132_hc_driver
, &pdev
->dev
, dev_name(&pdev
->dev
));
3115 printk(KERN_ERR
"failed to create the usb hcd struct for U132\n"
3117 ftdi_elan_gone_away(pdev
);
3120 struct u132
*u132
= hcd_to_u132(hcd
);
3122 hcd
->rsrc_start
= 0;
3123 mutex_lock(&u132_module_lock
);
3124 list_add_tail(&u132
->u132_list
, &u132_static_list
);
3125 u132
->sequence_num
= ++u132_instances
;
3126 mutex_unlock(&u132_module_lock
);
3127 u132_u132_init_kref(u132
);
3128 u132_initialise(u132
, pdev
);
3129 hcd
->product_desc
= "ELAN U132 Host Controller";
3130 retval
= usb_add_hcd(hcd
, 0, 0);
3132 dev_err(&u132
->platform_dev
->dev
, "init error %d\n",
3134 u132_u132_put_kref(u132
);
3137 u132_monitor_queue_work(u132
, 100);
3145 /* for this device there's no useful distinction between the controller
3146 * and its root hub, except that the root hub only gets direct PM calls
3147 * when CONFIG_USB_SUSPEND is enabled.
3149 static int u132_suspend(struct platform_device
*pdev
, pm_message_t state
)
3151 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
3152 struct u132
*u132
= hcd_to_u132(hcd
);
3153 if (u132
->going
> 1) {
3154 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
3157 } else if (u132
->going
> 0) {
3158 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
3161 int retval
= 0, ports
;
3163 switch (state
.event
) {
3164 case PM_EVENT_FREEZE
:
3165 retval
= u132_bus_suspend(hcd
);
3167 case PM_EVENT_SUSPEND
:
3168 case PM_EVENT_HIBERNATE
:
3169 ports
= MAX_U132_PORTS
;
3170 while (ports
-- > 0) {
3171 port_power(u132
, ports
, 0);
3179 static int u132_resume(struct platform_device
*pdev
)
3181 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
3182 struct u132
*u132
= hcd_to_u132(hcd
);
3183 if (u132
->going
> 1) {
3184 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
3187 } else if (u132
->going
> 0) {
3188 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
3192 if (!u132
->port
[0].power
) {
3193 int ports
= MAX_U132_PORTS
;
3194 while (ports
-- > 0) {
3195 port_power(u132
, ports
, 1);
3199 retval
= u132_bus_resume(hcd
);
3206 #define u132_suspend NULL
3207 #define u132_resume NULL
3210 * this driver is loaded explicitly by ftdi_u132
3212 * the platform_driver struct is static because it is per type of module
3214 static struct platform_driver u132_platform_driver
= {
3215 .probe
= u132_probe
,
3216 .remove
= __devexit_p(u132_remove
),
3217 .suspend
= u132_suspend
,
3218 .resume
= u132_resume
,
3220 .name
= (char *)hcd_name
,
3221 .owner
= THIS_MODULE
,
3224 static int __init
u132_hcd_init(void)
3227 INIT_LIST_HEAD(&u132_static_list
);
3230 mutex_init(&u132_module_lock
);
3233 printk(KERN_INFO
"driver %s\n", hcd_name
);
3234 workqueue
= create_singlethread_workqueue("u132");
3235 retval
= platform_driver_register(&u132_platform_driver
);
3240 module_init(u132_hcd_init
);
3241 static void __exit
u132_hcd_exit(void)
3245 mutex_lock(&u132_module_lock
);
3247 mutex_unlock(&u132_module_lock
);
3248 list_for_each_entry_safe(u132
, temp
, &u132_static_list
, u132_list
) {
3249 platform_device_unregister(u132
->platform_dev
);
3251 platform_driver_unregister(&u132_platform_driver
);
3252 printk(KERN_INFO
"u132-hcd driver deregistered\n");
3253 wait_event(u132_hcd_wait
, u132_instances
== 0);
3254 flush_workqueue(workqueue
);
3255 destroy_workqueue(workqueue
);
3259 module_exit(u132_hcd_exit
);
3260 MODULE_LICENSE("GPL");
3261 MODULE_ALIAS("platform:u132_hcd");