2 * Host Controller Driver for the Elan Digital Systems U132 adapter
4 * Copyright(C) 2006 Elan Digital Systems Limited
5 * http://www.elandigitalsystems.com
7 * Author and Maintainer - Tony Olech - Elan Digital Systems
8 * tony.olech@elandigitalsystems.com
10 * This program is free software;you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2.
15 * This driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
16 * based on various USB host drivers in the 2.6.15 linux kernel
17 * with constant reference to the 3rd Edition of Linux Device Drivers
18 * published by O'Reilly
20 * The U132 adapter is a USB to CardBus adapter specifically designed
21 * for PC cards that contain an OHCI host controller. Typical PC cards
22 * are the Orange Mobile 3G Option GlobeTrotter Fusion card.
24 * The U132 adapter will *NOT *work with PC cards that do not contain
25 * an OHCI controller. A simple way to test whether a PC card has an
26 * OHCI controller as an interface is to insert the PC card directly
27 * into a laptop(or desktop) with a CardBus slot and if "lspci" shows
28 * a new USB controller and "lsusb -v" shows a new OHCI Host Controller
29 * then there is a good chance that the U132 adapter will support the
30 * PC card.(you also need the specific client driver for the PC card)
32 * Please inform the Author and Maintainer about any PC cards that
33 * contain OHCI Host Controller and work when directly connected to
34 * an embedded CardBus slot but do not work when they are connected
35 * via an ELAN U132 adapter.
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/moduleparam.h>
41 #include <linux/delay.h>
42 #include <linux/ioport.h>
43 #include <linux/pci_ids.h>
44 #include <linux/sched.h>
45 #include <linux/slab.h>
46 #include <linux/errno.h>
47 #include <linux/init.h>
48 #include <linux/timer.h>
49 #include <linux/list.h>
50 #include <linux/interrupt.h>
51 #include <linux/usb.h>
52 #include <linux/usb/hcd.h>
53 #include <linux/workqueue.h>
54 #include <linux/platform_device.h>
55 #include <linux/mutex.h>
58 #include <asm/byteorder.h>
60 /* FIXME ohci.h is ONLY for internal use by the OHCI driver.
61 * If you're going to try stuff like this, you need to split
62 * out shareable stuff (register declarations?) into its own
63 * file, maybe name <linux/usb/ohci.h>
67 #define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
68 #define OHCI_INTR_INIT (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | \
70 MODULE_AUTHOR("Tony Olech - Elan Digital Systems Limited");
71 MODULE_DESCRIPTION("U132 USB Host Controller Driver");
72 MODULE_LICENSE("GPL");
73 #define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
74 INT_MODULE_PARM(testing
, 0);
75 /* Some boards misreport power switching/overcurrent*/
76 static bool distrust_firmware
= 1;
77 module_param(distrust_firmware
, bool, 0);
78 MODULE_PARM_DESC(distrust_firmware
, "true to distrust firmware power/overcurren"
80 static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait
);
82 * u132_module_lock exists to protect access to global variables
85 static struct mutex u132_module_lock
;
86 static int u132_exiting
;
87 static int u132_instances
;
88 static struct list_head u132_static_list
;
90 * end of the global variables protected by u132_module_lock
92 static struct workqueue_struct
*workqueue
;
93 #define MAX_U132_PORTS 7
94 #define MAX_U132_ADDRS 128
95 #define MAX_U132_UDEVS 4
96 #define MAX_U132_ENDPS 100
97 #define MAX_U132_RINGS 4
98 static const char *cc_to_text
[16] = {
128 struct usb_device
*usb_device
;
133 u8 endp_number_in
[16];
134 u8 endp_number_out
[16];
136 #define ENDP_QUEUE_SHIFT 3
137 #define ENDP_QUEUE_SIZE (1<<ENDP_QUEUE_SHIFT)
138 #define ENDP_QUEUE_MASK (ENDP_QUEUE_SIZE-1)
140 struct list_head urb_more
;
153 struct list_head endp_ring
;
154 struct u132_ring
*ring
;
155 unsigned toggle_bits
:2;
161 unsigned dequeueing
:1;
162 unsigned edset_flush
:1;
163 unsigned spare_bits
:14;
164 unsigned long jiffies
;
165 struct usb_host_endpoint
*hep
;
166 struct u132_spin queue_lock
;
170 struct urb
*urb_list
[ENDP_QUEUE_SIZE
];
171 struct list_head urb_more
;
172 struct delayed_work scheduler
;
179 struct u132_endp
*curr_endp
;
180 struct delayed_work scheduler
;
184 struct list_head u132_list
;
185 struct mutex sw_lock
;
186 struct mutex scheduler_lock
;
187 struct u132_platform_data
*board
;
188 struct platform_device
*platform_dev
;
189 struct u132_ring ring
[MAX_U132_RINGS
];
197 u32 hc_roothub_status
;
199 u32 hc_roothub_portstatus
[MAX_ROOT_PORTS
];
201 unsigned long next_statechange
;
202 struct delayed_work monitor
;
204 struct u132_addr addr
[MAX_U132_ADDRS
];
205 struct u132_udev udev
[MAX_U132_UDEVS
];
206 struct u132_port port
[MAX_U132_PORTS
];
207 struct u132_endp
*endp
[MAX_U132_ENDPS
];
211 * these cannot be inlines because we need the structure offset!!
212 * Does anyone have a better way?????
214 #define ftdi_read_pcimem(pdev, member, data) usb_ftdi_elan_read_pcimem(pdev, \
215 offsetof(struct ohci_regs, member), 0, data);
216 #define ftdi_write_pcimem(pdev, member, data) usb_ftdi_elan_write_pcimem(pdev, \
217 offsetof(struct ohci_regs, member), 0, data);
218 #define u132_read_pcimem(u132, member, data) \
219 usb_ftdi_elan_read_pcimem(u132->platform_dev, offsetof(struct \
220 ohci_regs, member), 0, data);
221 #define u132_write_pcimem(u132, member, data) \
222 usb_ftdi_elan_write_pcimem(u132->platform_dev, offsetof(struct \
223 ohci_regs, member), 0, data);
224 static inline struct u132
*udev_to_u132(struct u132_udev
*udev
)
226 u8 udev_number
= udev
->udev_number
;
227 return container_of(udev
, struct u132
, udev
[udev_number
]);
230 static inline struct u132
*hcd_to_u132(struct usb_hcd
*hcd
)
232 return (struct u132
*)(hcd
->hcd_priv
);
235 static inline struct usb_hcd
*u132_to_hcd(struct u132
*u132
)
237 return container_of((void *)u132
, struct usb_hcd
, hcd_priv
);
240 static inline void u132_disable(struct u132
*u132
)
242 u132_to_hcd(u132
)->state
= HC_STATE_HALT
;
246 #define kref_to_u132(d) container_of(d, struct u132, kref)
247 #define kref_to_u132_endp(d) container_of(d, struct u132_endp, kref)
248 #define kref_to_u132_udev(d) container_of(d, struct u132_udev, kref)
249 #include "../misc/usb_u132.h"
250 static const char hcd_name
[] = "u132_hcd";
251 #define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE | \
252 USB_PORT_STAT_C_SUSPEND | USB_PORT_STAT_C_OVERCURRENT | \
253 USB_PORT_STAT_C_RESET) << 16)
254 static void u132_hcd_delete(struct kref
*kref
)
256 struct u132
*u132
= kref_to_u132(kref
);
257 struct platform_device
*pdev
= u132
->platform_dev
;
258 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
260 mutex_lock(&u132_module_lock
);
261 list_del_init(&u132
->u132_list
);
263 mutex_unlock(&u132_module_lock
);
264 dev_warn(&u132
->platform_dev
->dev
, "FREEING the hcd=%p and thus the u13"
265 "2=%p going=%d pdev=%p\n", hcd
, u132
, u132
->going
, pdev
);
269 static inline void u132_u132_put_kref(struct u132
*u132
)
271 kref_put(&u132
->kref
, u132_hcd_delete
);
274 static inline void u132_u132_init_kref(struct u132
*u132
)
276 kref_init(&u132
->kref
);
279 static void u132_udev_delete(struct kref
*kref
)
281 struct u132_udev
*udev
= kref_to_u132_udev(kref
);
282 udev
->udev_number
= 0;
283 udev
->usb_device
= NULL
;
285 udev
->enumeration
= 0;
288 static inline void u132_udev_put_kref(struct u132
*u132
, struct u132_udev
*udev
)
290 kref_put(&udev
->kref
, u132_udev_delete
);
293 static inline void u132_udev_get_kref(struct u132
*u132
, struct u132_udev
*udev
)
295 kref_get(&udev
->kref
);
298 static inline void u132_udev_init_kref(struct u132
*u132
,
299 struct u132_udev
*udev
)
301 kref_init(&udev
->kref
);
304 static inline void u132_ring_put_kref(struct u132
*u132
, struct u132_ring
*ring
)
306 kref_put(&u132
->kref
, u132_hcd_delete
);
309 static void u132_ring_requeue_work(struct u132
*u132
, struct u132_ring
*ring
,
313 if (queue_delayed_work(workqueue
, &ring
->scheduler
, delta
))
315 } else if (queue_delayed_work(workqueue
, &ring
->scheduler
, 0))
317 kref_put(&u132
->kref
, u132_hcd_delete
);
320 static void u132_ring_queue_work(struct u132
*u132
, struct u132_ring
*ring
,
323 kref_get(&u132
->kref
);
324 u132_ring_requeue_work(u132
, ring
, delta
);
327 static void u132_ring_cancel_work(struct u132
*u132
, struct u132_ring
*ring
)
329 if (cancel_delayed_work(&ring
->scheduler
))
330 kref_put(&u132
->kref
, u132_hcd_delete
);
333 static void u132_endp_delete(struct kref
*kref
)
335 struct u132_endp
*endp
= kref_to_u132_endp(kref
);
336 struct u132
*u132
= endp
->u132
;
337 u8 usb_addr
= endp
->usb_addr
;
338 u8 usb_endp
= endp
->usb_endp
;
339 u8 address
= u132
->addr
[usb_addr
].address
;
340 struct u132_udev
*udev
= &u132
->udev
[address
];
341 u8 endp_number
= endp
->endp_number
;
342 struct usb_host_endpoint
*hep
= endp
->hep
;
343 struct u132_ring
*ring
= endp
->ring
;
344 struct list_head
*head
= &endp
->endp_ring
;
346 if (endp
== ring
->curr_endp
) {
347 if (list_empty(head
)) {
348 ring
->curr_endp
= NULL
;
351 struct u132_endp
*next_endp
= list_entry(head
->next
,
352 struct u132_endp
, endp_ring
);
353 ring
->curr_endp
= next_endp
;
359 udev
->endp_number_in
[usb_endp
] = 0;
360 u132_udev_put_kref(u132
, udev
);
363 udev
->endp_number_out
[usb_endp
] = 0;
364 u132_udev_put_kref(u132
, udev
);
366 u132
->endp
[endp_number
- 1] = NULL
;
369 u132_u132_put_kref(u132
);
372 static inline void u132_endp_put_kref(struct u132
*u132
, struct u132_endp
*endp
)
374 kref_put(&endp
->kref
, u132_endp_delete
);
377 static inline void u132_endp_get_kref(struct u132
*u132
, struct u132_endp
*endp
)
379 kref_get(&endp
->kref
);
382 static inline void u132_endp_init_kref(struct u132
*u132
,
383 struct u132_endp
*endp
)
385 kref_init(&endp
->kref
);
386 kref_get(&u132
->kref
);
389 static void u132_endp_queue_work(struct u132
*u132
, struct u132_endp
*endp
,
392 if (queue_delayed_work(workqueue
, &endp
->scheduler
, delta
))
393 kref_get(&endp
->kref
);
396 static void u132_endp_cancel_work(struct u132
*u132
, struct u132_endp
*endp
)
398 if (cancel_delayed_work(&endp
->scheduler
))
399 kref_put(&endp
->kref
, u132_endp_delete
);
402 static inline void u132_monitor_put_kref(struct u132
*u132
)
404 kref_put(&u132
->kref
, u132_hcd_delete
);
407 static void u132_monitor_queue_work(struct u132
*u132
, unsigned int delta
)
409 if (queue_delayed_work(workqueue
, &u132
->monitor
, delta
))
410 kref_get(&u132
->kref
);
413 static void u132_monitor_requeue_work(struct u132
*u132
, unsigned int delta
)
415 if (!queue_delayed_work(workqueue
, &u132
->monitor
, delta
))
416 kref_put(&u132
->kref
, u132_hcd_delete
);
419 static void u132_monitor_cancel_work(struct u132
*u132
)
421 if (cancel_delayed_work(&u132
->monitor
))
422 kref_put(&u132
->kref
, u132_hcd_delete
);
425 static int read_roothub_info(struct u132
*u132
)
429 retval
= u132_read_pcimem(u132
, revision
, &revision
);
431 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device co"
434 } else if ((revision
& 0xFF) == 0x10) {
435 } else if ((revision
& 0xFF) == 0x11) {
437 dev_err(&u132
->platform_dev
->dev
, "device revision is not valid"
438 " %08X\n", revision
);
441 retval
= u132_read_pcimem(u132
, control
, &u132
->hc_control
);
443 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device co"
447 retval
= u132_read_pcimem(u132
, roothub
.status
,
448 &u132
->hc_roothub_status
);
450 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device re"
451 "g roothub.status\n", retval
);
454 retval
= u132_read_pcimem(u132
, roothub
.a
, &u132
->hc_roothub_a
);
456 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device re"
457 "g roothub.a\n", retval
);
461 int I
= u132
->num_ports
;
464 retval
= u132_read_pcimem(u132
, roothub
.portstatus
[i
],
465 &u132
->hc_roothub_portstatus
[i
]);
467 dev_err(&u132
->platform_dev
->dev
, "error %d acc"
468 "essing device roothub.portstatus[%d]\n"
478 static void u132_hcd_monitor_work(struct work_struct
*work
)
480 struct u132
*u132
= container_of(work
, struct u132
, monitor
.work
);
481 if (u132
->going
> 1) {
482 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
484 u132_monitor_put_kref(u132
);
486 } else if (u132
->going
> 0) {
487 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
488 u132_monitor_put_kref(u132
);
492 mutex_lock(&u132
->sw_lock
);
493 retval
= read_roothub_info(u132
);
495 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
498 mutex_unlock(&u132
->sw_lock
);
500 ftdi_elan_gone_away(u132
->platform_dev
);
501 u132_monitor_put_kref(u132
);
504 u132_monitor_requeue_work(u132
, 500);
505 mutex_unlock(&u132
->sw_lock
);
511 static void u132_hcd_giveback_urb(struct u132
*u132
, struct u132_endp
*endp
,
512 struct urb
*urb
, int status
)
514 struct u132_ring
*ring
;
516 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
517 urb
->error_count
= 0;
518 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
519 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
520 endp
->queue_next
+= 1;
521 if (ENDP_QUEUE_SIZE
> --endp
->queue_size
) {
523 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
525 struct list_head
*next
= endp
->urb_more
.next
;
526 struct u132_urbq
*urbq
= list_entry(next
, struct u132_urbq
,
529 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
532 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
535 mutex_lock(&u132
->scheduler_lock
);
538 u132_ring_cancel_work(u132
, ring
);
539 u132_ring_queue_work(u132
, ring
, 0);
540 mutex_unlock(&u132
->scheduler_lock
);
541 u132_endp_put_kref(u132
, endp
);
542 usb_hcd_giveback_urb(hcd
, urb
, status
);
545 static void u132_hcd_forget_urb(struct u132
*u132
, struct u132_endp
*endp
,
546 struct urb
*urb
, int status
)
548 u132_endp_put_kref(u132
, endp
);
551 static void u132_hcd_abandon_urb(struct u132
*u132
, struct u132_endp
*endp
,
552 struct urb
*urb
, int status
)
555 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
556 urb
->error_count
= 0;
557 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
558 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
559 endp
->queue_next
+= 1;
560 if (ENDP_QUEUE_SIZE
> --endp
->queue_size
) {
562 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
564 struct list_head
*next
= endp
->urb_more
.next
;
565 struct u132_urbq
*urbq
= list_entry(next
, struct u132_urbq
,
568 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
571 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
574 usb_hcd_giveback_urb(hcd
, urb
, status
);
577 static inline int edset_input(struct u132
*u132
, struct u132_ring
*ring
,
578 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
579 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
580 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
581 int halted
, int skipped
, int actual
, int non_null
))
583 return usb_ftdi_elan_edset_input(u132
->platform_dev
, ring
->number
, endp
,
584 urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
587 static inline int edset_setup(struct u132
*u132
, struct u132_ring
*ring
,
588 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
589 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
590 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
591 int halted
, int skipped
, int actual
, int non_null
))
593 return usb_ftdi_elan_edset_setup(u132
->platform_dev
, ring
->number
, endp
,
594 urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
597 static inline int edset_single(struct u132
*u132
, struct u132_ring
*ring
,
598 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
599 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
600 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
601 int halted
, int skipped
, int actual
, int non_null
))
603 return usb_ftdi_elan_edset_single(u132
->platform_dev
, ring
->number
,
604 endp
, urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
607 static inline int edset_output(struct u132
*u132
, struct u132_ring
*ring
,
608 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
609 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
610 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
611 int halted
, int skipped
, int actual
, int non_null
))
613 return usb_ftdi_elan_edset_output(u132
->platform_dev
, ring
->number
,
614 endp
, urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
619 * must not LOCK sw_lock
622 static void u132_hcd_interrupt_recv(void *data
, struct urb
*urb
, u8
*buf
,
623 int len
, int toggle_bits
, int error_count
, int condition_code
,
624 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
626 struct u132_endp
*endp
= data
;
627 struct u132
*u132
= endp
->u132
;
628 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
629 struct u132_udev
*udev
= &u132
->udev
[address
];
630 mutex_lock(&u132
->scheduler_lock
);
631 if (u132
->going
> 1) {
632 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
634 mutex_unlock(&u132
->scheduler_lock
);
635 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
637 } else if (endp
->dequeueing
) {
638 endp
->dequeueing
= 0;
639 mutex_unlock(&u132
->scheduler_lock
);
640 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
642 } else if (u132
->going
> 0) {
643 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
645 mutex_unlock(&u132
->scheduler_lock
);
646 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
648 } else if (!urb
->unlinked
) {
649 struct u132_ring
*ring
= endp
->ring
;
650 u8
*u
= urb
->transfer_buffer
+ urb
->actual_length
;
657 urb
->actual_length
+= len
;
658 if ((condition_code
== TD_CC_NOERROR
) &&
659 (urb
->transfer_buffer_length
> urb
->actual_length
)) {
660 endp
->toggle_bits
= toggle_bits
;
661 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
663 if (urb
->actual_length
> 0) {
665 mutex_unlock(&u132
->scheduler_lock
);
666 retval
= edset_single(u132
, ring
, endp
, urb
,
667 address
, endp
->toggle_bits
,
668 u132_hcd_interrupt_recv
);
670 u132_hcd_giveback_urb(u132
, endp
, urb
,
675 endp
->jiffies
= jiffies
+
676 msecs_to_jiffies(urb
->interval
);
677 u132_ring_cancel_work(u132
, ring
);
678 u132_ring_queue_work(u132
, ring
, 0);
679 mutex_unlock(&u132
->scheduler_lock
);
680 u132_endp_put_kref(u132
, endp
);
683 } else if ((condition_code
== TD_DATAUNDERRUN
) &&
684 ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0)) {
685 endp
->toggle_bits
= toggle_bits
;
686 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
688 mutex_unlock(&u132
->scheduler_lock
);
689 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
692 if (condition_code
== TD_CC_NOERROR
) {
693 endp
->toggle_bits
= toggle_bits
;
694 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
696 } else if (condition_code
== TD_CC_STALL
) {
697 endp
->toggle_bits
= 0x2;
698 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
701 endp
->toggle_bits
= 0x2;
702 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
704 dev_err(&u132
->platform_dev
->dev
, "urb=%p givin"
705 "g back INTERRUPT %s\n", urb
,
706 cc_to_text
[condition_code
]);
708 mutex_unlock(&u132
->scheduler_lock
);
709 u132_hcd_giveback_urb(u132
, endp
, urb
,
710 cc_to_error
[condition_code
]);
714 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
715 "unlinked=%d\n", urb
, urb
->unlinked
);
716 mutex_unlock(&u132
->scheduler_lock
);
717 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
722 static void u132_hcd_bulk_output_sent(void *data
, struct urb
*urb
, u8
*buf
,
723 int len
, int toggle_bits
, int error_count
, int condition_code
,
724 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
726 struct u132_endp
*endp
= data
;
727 struct u132
*u132
= endp
->u132
;
728 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
729 mutex_lock(&u132
->scheduler_lock
);
730 if (u132
->going
> 1) {
731 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
733 mutex_unlock(&u132
->scheduler_lock
);
734 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
736 } else if (endp
->dequeueing
) {
737 endp
->dequeueing
= 0;
738 mutex_unlock(&u132
->scheduler_lock
);
739 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
741 } else if (u132
->going
> 0) {
742 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
744 mutex_unlock(&u132
->scheduler_lock
);
745 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
747 } else if (!urb
->unlinked
) {
748 struct u132_ring
*ring
= endp
->ring
;
749 urb
->actual_length
+= len
;
750 endp
->toggle_bits
= toggle_bits
;
751 if (urb
->transfer_buffer_length
> urb
->actual_length
) {
753 mutex_unlock(&u132
->scheduler_lock
);
754 retval
= edset_output(u132
, ring
, endp
, urb
, address
,
755 endp
->toggle_bits
, u132_hcd_bulk_output_sent
);
757 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
760 mutex_unlock(&u132
->scheduler_lock
);
761 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
765 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
766 "unlinked=%d\n", urb
, urb
->unlinked
);
767 mutex_unlock(&u132
->scheduler_lock
);
768 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
773 static void u132_hcd_bulk_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
774 int len
, int toggle_bits
, int error_count
, int condition_code
,
775 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
777 struct u132_endp
*endp
= data
;
778 struct u132
*u132
= endp
->u132
;
779 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
780 struct u132_udev
*udev
= &u132
->udev
[address
];
781 mutex_lock(&u132
->scheduler_lock
);
782 if (u132
->going
> 1) {
783 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
785 mutex_unlock(&u132
->scheduler_lock
);
786 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
788 } else if (endp
->dequeueing
) {
789 endp
->dequeueing
= 0;
790 mutex_unlock(&u132
->scheduler_lock
);
791 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
793 } else if (u132
->going
> 0) {
794 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
796 mutex_unlock(&u132
->scheduler_lock
);
797 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
799 } else if (!urb
->unlinked
) {
800 struct u132_ring
*ring
= endp
->ring
;
801 u8
*u
= urb
->transfer_buffer
+ urb
->actual_length
;
808 urb
->actual_length
+= len
;
809 if ((condition_code
== TD_CC_NOERROR
) &&
810 (urb
->transfer_buffer_length
> urb
->actual_length
)) {
812 endp
->toggle_bits
= toggle_bits
;
813 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
815 mutex_unlock(&u132
->scheduler_lock
);
816 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
817 ring
->number
, endp
, urb
, address
,
818 endp
->usb_endp
, endp
->toggle_bits
,
819 u132_hcd_bulk_input_recv
);
821 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
823 } else if (condition_code
== TD_CC_NOERROR
) {
824 endp
->toggle_bits
= toggle_bits
;
825 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
827 mutex_unlock(&u132
->scheduler_lock
);
828 u132_hcd_giveback_urb(u132
, endp
, urb
,
829 cc_to_error
[condition_code
]);
831 } else if ((condition_code
== TD_DATAUNDERRUN
) &&
832 ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0)) {
833 endp
->toggle_bits
= toggle_bits
;
834 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
836 mutex_unlock(&u132
->scheduler_lock
);
837 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
839 } else if (condition_code
== TD_DATAUNDERRUN
) {
840 endp
->toggle_bits
= toggle_bits
;
841 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
843 dev_warn(&u132
->platform_dev
->dev
, "urb=%p(SHORT NOT OK"
844 ") giving back BULK IN %s\n", urb
,
845 cc_to_text
[condition_code
]);
846 mutex_unlock(&u132
->scheduler_lock
);
847 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
849 } else if (condition_code
== TD_CC_STALL
) {
850 endp
->toggle_bits
= 0x2;
851 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0, 0);
852 mutex_unlock(&u132
->scheduler_lock
);
853 u132_hcd_giveback_urb(u132
, endp
, urb
,
854 cc_to_error
[condition_code
]);
857 endp
->toggle_bits
= 0x2;
858 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0, 0);
859 dev_err(&u132
->platform_dev
->dev
, "urb=%p giving back B"
860 "ULK IN code=%d %s\n", urb
, condition_code
,
861 cc_to_text
[condition_code
]);
862 mutex_unlock(&u132
->scheduler_lock
);
863 u132_hcd_giveback_urb(u132
, endp
, urb
,
864 cc_to_error
[condition_code
]);
868 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
869 "unlinked=%d\n", urb
, urb
->unlinked
);
870 mutex_unlock(&u132
->scheduler_lock
);
871 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
876 static void u132_hcd_configure_empty_sent(void *data
, struct urb
*urb
, u8
*buf
,
877 int len
, int toggle_bits
, int error_count
, int condition_code
,
878 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
880 struct u132_endp
*endp
= data
;
881 struct u132
*u132
= endp
->u132
;
882 mutex_lock(&u132
->scheduler_lock
);
883 if (u132
->going
> 1) {
884 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
886 mutex_unlock(&u132
->scheduler_lock
);
887 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
889 } else if (endp
->dequeueing
) {
890 endp
->dequeueing
= 0;
891 mutex_unlock(&u132
->scheduler_lock
);
892 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
894 } else if (u132
->going
> 0) {
895 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
897 mutex_unlock(&u132
->scheduler_lock
);
898 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
900 } else if (!urb
->unlinked
) {
901 mutex_unlock(&u132
->scheduler_lock
);
902 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
905 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
906 "unlinked=%d\n", urb
, urb
->unlinked
);
907 mutex_unlock(&u132
->scheduler_lock
);
908 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
913 static void u132_hcd_configure_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
914 int len
, int toggle_bits
, int error_count
, int condition_code
,
915 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
917 struct u132_endp
*endp
= data
;
918 struct u132
*u132
= endp
->u132
;
919 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
920 mutex_lock(&u132
->scheduler_lock
);
921 if (u132
->going
> 1) {
922 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
924 mutex_unlock(&u132
->scheduler_lock
);
925 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
927 } else if (endp
->dequeueing
) {
928 endp
->dequeueing
= 0;
929 mutex_unlock(&u132
->scheduler_lock
);
930 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
932 } else if (u132
->going
> 0) {
933 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
935 mutex_unlock(&u132
->scheduler_lock
);
936 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
938 } else if (!urb
->unlinked
) {
939 struct u132_ring
*ring
= endp
->ring
;
940 u8
*u
= urb
->transfer_buffer
;
947 urb
->actual_length
= len
;
948 if ((condition_code
== TD_CC_NOERROR
) || ((condition_code
==
949 TD_DATAUNDERRUN
) && ((urb
->transfer_flags
&
950 URB_SHORT_NOT_OK
) == 0))) {
952 mutex_unlock(&u132
->scheduler_lock
);
953 retval
= usb_ftdi_elan_edset_empty(u132
->platform_dev
,
954 ring
->number
, endp
, urb
, address
,
956 u132_hcd_configure_empty_sent
);
958 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
960 } else if (condition_code
== TD_CC_STALL
) {
961 mutex_unlock(&u132
->scheduler_lock
);
962 dev_warn(&u132
->platform_dev
->dev
, "giving back SETUP I"
963 "NPUT STALL urb %p\n", urb
);
964 u132_hcd_giveback_urb(u132
, endp
, urb
,
965 cc_to_error
[condition_code
]);
968 mutex_unlock(&u132
->scheduler_lock
);
969 dev_err(&u132
->platform_dev
->dev
, "giving back SETUP IN"
970 "PUT %s urb %p\n", cc_to_text
[condition_code
],
972 u132_hcd_giveback_urb(u132
, endp
, urb
,
973 cc_to_error
[condition_code
]);
977 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
978 "unlinked=%d\n", urb
, urb
->unlinked
);
979 mutex_unlock(&u132
->scheduler_lock
);
980 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
985 static void u132_hcd_configure_empty_recv(void *data
, struct urb
*urb
, u8
*buf
,
986 int len
, int toggle_bits
, int error_count
, int condition_code
,
987 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
989 struct u132_endp
*endp
= data
;
990 struct u132
*u132
= endp
->u132
;
991 mutex_lock(&u132
->scheduler_lock
);
992 if (u132
->going
> 1) {
993 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
995 mutex_unlock(&u132
->scheduler_lock
);
996 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
998 } else if (endp
->dequeueing
) {
999 endp
->dequeueing
= 0;
1000 mutex_unlock(&u132
->scheduler_lock
);
1001 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1003 } else if (u132
->going
> 0) {
1004 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1006 mutex_unlock(&u132
->scheduler_lock
);
1007 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1009 } else if (!urb
->unlinked
) {
1010 mutex_unlock(&u132
->scheduler_lock
);
1011 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1014 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1015 "unlinked=%d\n", urb
, urb
->unlinked
);
1016 mutex_unlock(&u132
->scheduler_lock
);
1017 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1022 static void u132_hcd_configure_setup_sent(void *data
, struct urb
*urb
, u8
*buf
,
1023 int len
, int toggle_bits
, int error_count
, int condition_code
,
1024 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1026 struct u132_endp
*endp
= data
;
1027 struct u132
*u132
= endp
->u132
;
1028 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1029 mutex_lock(&u132
->scheduler_lock
);
1030 if (u132
->going
> 1) {
1031 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1033 mutex_unlock(&u132
->scheduler_lock
);
1034 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1036 } else if (endp
->dequeueing
) {
1037 endp
->dequeueing
= 0;
1038 mutex_unlock(&u132
->scheduler_lock
);
1039 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1041 } else if (u132
->going
> 0) {
1042 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1044 mutex_unlock(&u132
->scheduler_lock
);
1045 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1047 } else if (!urb
->unlinked
) {
1048 if (usb_pipein(urb
->pipe
)) {
1050 struct u132_ring
*ring
= endp
->ring
;
1051 mutex_unlock(&u132
->scheduler_lock
);
1052 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1053 ring
->number
, endp
, urb
, address
,
1055 u132_hcd_configure_input_recv
);
1057 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1061 struct u132_ring
*ring
= endp
->ring
;
1062 mutex_unlock(&u132
->scheduler_lock
);
1063 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1064 ring
->number
, endp
, urb
, address
,
1066 u132_hcd_configure_empty_recv
);
1068 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1072 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1073 "unlinked=%d\n", urb
, urb
->unlinked
);
1074 mutex_unlock(&u132
->scheduler_lock
);
1075 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1080 static void u132_hcd_enumeration_empty_recv(void *data
, struct urb
*urb
,
1081 u8
*buf
, int len
, int toggle_bits
, int error_count
, int condition_code
,
1082 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1084 struct u132_endp
*endp
= data
;
1085 struct u132
*u132
= endp
->u132
;
1086 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1087 struct u132_udev
*udev
= &u132
->udev
[address
];
1088 mutex_lock(&u132
->scheduler_lock
);
1089 if (u132
->going
> 1) {
1090 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1092 mutex_unlock(&u132
->scheduler_lock
);
1093 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1095 } else if (endp
->dequeueing
) {
1096 endp
->dequeueing
= 0;
1097 mutex_unlock(&u132
->scheduler_lock
);
1098 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1100 } else if (u132
->going
> 0) {
1101 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1103 mutex_unlock(&u132
->scheduler_lock
);
1104 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1106 } else if (!urb
->unlinked
) {
1107 u132
->addr
[0].address
= 0;
1108 endp
->usb_addr
= udev
->usb_addr
;
1109 mutex_unlock(&u132
->scheduler_lock
);
1110 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1113 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1114 "unlinked=%d\n", urb
, urb
->unlinked
);
1115 mutex_unlock(&u132
->scheduler_lock
);
1116 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1121 static void u132_hcd_enumeration_address_sent(void *data
, struct urb
*urb
,
1122 u8
*buf
, int len
, int toggle_bits
, int error_count
, int condition_code
,
1123 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1125 struct u132_endp
*endp
= data
;
1126 struct u132
*u132
= endp
->u132
;
1127 mutex_lock(&u132
->scheduler_lock
);
1128 if (u132
->going
> 1) {
1129 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1131 mutex_unlock(&u132
->scheduler_lock
);
1132 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1134 } else if (endp
->dequeueing
) {
1135 endp
->dequeueing
= 0;
1136 mutex_unlock(&u132
->scheduler_lock
);
1137 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1139 } else if (u132
->going
> 0) {
1140 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1142 mutex_unlock(&u132
->scheduler_lock
);
1143 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1145 } else if (!urb
->unlinked
) {
1147 struct u132_ring
*ring
= endp
->ring
;
1148 mutex_unlock(&u132
->scheduler_lock
);
1149 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1150 ring
->number
, endp
, urb
, 0, endp
->usb_endp
, 0,
1151 u132_hcd_enumeration_empty_recv
);
1153 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1156 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1157 "unlinked=%d\n", urb
, urb
->unlinked
);
1158 mutex_unlock(&u132
->scheduler_lock
);
1159 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1164 static void u132_hcd_initial_empty_sent(void *data
, struct urb
*urb
, u8
*buf
,
1165 int len
, int toggle_bits
, int error_count
, int condition_code
,
1166 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1168 struct u132_endp
*endp
= data
;
1169 struct u132
*u132
= endp
->u132
;
1170 mutex_lock(&u132
->scheduler_lock
);
1171 if (u132
->going
> 1) {
1172 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1174 mutex_unlock(&u132
->scheduler_lock
);
1175 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1177 } else if (endp
->dequeueing
) {
1178 endp
->dequeueing
= 0;
1179 mutex_unlock(&u132
->scheduler_lock
);
1180 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1182 } else if (u132
->going
> 0) {
1183 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1185 mutex_unlock(&u132
->scheduler_lock
);
1186 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1188 } else if (!urb
->unlinked
) {
1189 mutex_unlock(&u132
->scheduler_lock
);
1190 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1193 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1194 "unlinked=%d\n", urb
, urb
->unlinked
);
1195 mutex_unlock(&u132
->scheduler_lock
);
1196 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1201 static void u132_hcd_initial_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
1202 int len
, int toggle_bits
, int error_count
, int condition_code
,
1203 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1205 struct u132_endp
*endp
= data
;
1206 struct u132
*u132
= endp
->u132
;
1207 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1208 mutex_lock(&u132
->scheduler_lock
);
1209 if (u132
->going
> 1) {
1210 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1212 mutex_unlock(&u132
->scheduler_lock
);
1213 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1215 } else if (endp
->dequeueing
) {
1216 endp
->dequeueing
= 0;
1217 mutex_unlock(&u132
->scheduler_lock
);
1218 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1220 } else if (u132
->going
> 0) {
1221 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1223 mutex_unlock(&u132
->scheduler_lock
);
1224 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1226 } else if (!urb
->unlinked
) {
1228 struct u132_ring
*ring
= endp
->ring
;
1229 u8
*u
= urb
->transfer_buffer
;
1236 urb
->actual_length
= len
;
1237 mutex_unlock(&u132
->scheduler_lock
);
1238 retval
= usb_ftdi_elan_edset_empty(u132
->platform_dev
,
1239 ring
->number
, endp
, urb
, address
, endp
->usb_endp
, 0x3,
1240 u132_hcd_initial_empty_sent
);
1242 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1245 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1246 "unlinked=%d\n", urb
, urb
->unlinked
);
1247 mutex_unlock(&u132
->scheduler_lock
);
1248 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1253 static void u132_hcd_initial_setup_sent(void *data
, struct urb
*urb
, u8
*buf
,
1254 int len
, int toggle_bits
, int error_count
, int condition_code
,
1255 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1257 struct u132_endp
*endp
= data
;
1258 struct u132
*u132
= endp
->u132
;
1259 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1260 mutex_lock(&u132
->scheduler_lock
);
1261 if (u132
->going
> 1) {
1262 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1264 mutex_unlock(&u132
->scheduler_lock
);
1265 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1267 } else if (endp
->dequeueing
) {
1268 endp
->dequeueing
= 0;
1269 mutex_unlock(&u132
->scheduler_lock
);
1270 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1272 } else if (u132
->going
> 0) {
1273 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1275 mutex_unlock(&u132
->scheduler_lock
);
1276 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1278 } else if (!urb
->unlinked
) {
1280 struct u132_ring
*ring
= endp
->ring
;
1281 mutex_unlock(&u132
->scheduler_lock
);
1282 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1283 ring
->number
, endp
, urb
, address
, endp
->usb_endp
, 0,
1284 u132_hcd_initial_input_recv
);
1286 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1289 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1290 "unlinked=%d\n", urb
, urb
->unlinked
);
1291 mutex_unlock(&u132
->scheduler_lock
);
1292 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1298 * this work function is only executed from the work queue
1301 static void u132_hcd_ring_work_scheduler(struct work_struct
*work
)
1303 struct u132_ring
*ring
=
1304 container_of(work
, struct u132_ring
, scheduler
.work
);
1305 struct u132
*u132
= ring
->u132
;
1306 mutex_lock(&u132
->scheduler_lock
);
1308 mutex_unlock(&u132
->scheduler_lock
);
1309 u132_ring_put_kref(u132
, ring
);
1311 } else if (ring
->curr_endp
) {
1312 struct u132_endp
*last_endp
= ring
->curr_endp
;
1313 struct list_head
*scan
;
1314 struct list_head
*head
= &last_endp
->endp_ring
;
1315 unsigned long wakeup
= 0;
1316 list_for_each(scan
, head
) {
1317 struct u132_endp
*endp
= list_entry(scan
,
1318 struct u132_endp
, endp_ring
);
1319 if (endp
->queue_next
== endp
->queue_last
) {
1320 } else if ((endp
->delayed
== 0)
1321 || time_after_eq(jiffies
, endp
->jiffies
)) {
1322 ring
->curr_endp
= endp
;
1323 u132_endp_cancel_work(u132
, last_endp
);
1324 u132_endp_queue_work(u132
, last_endp
, 0);
1325 mutex_unlock(&u132
->scheduler_lock
);
1326 u132_ring_put_kref(u132
, ring
);
1329 unsigned long delta
= endp
->jiffies
- jiffies
;
1334 if (last_endp
->queue_next
== last_endp
->queue_last
) {
1335 } else if ((last_endp
->delayed
== 0) || time_after_eq(jiffies
,
1336 last_endp
->jiffies
)) {
1337 u132_endp_cancel_work(u132
, last_endp
);
1338 u132_endp_queue_work(u132
, last_endp
, 0);
1339 mutex_unlock(&u132
->scheduler_lock
);
1340 u132_ring_put_kref(u132
, ring
);
1343 unsigned long delta
= last_endp
->jiffies
- jiffies
;
1348 u132_ring_requeue_work(u132
, ring
, wakeup
);
1349 mutex_unlock(&u132
->scheduler_lock
);
1352 mutex_unlock(&u132
->scheduler_lock
);
1353 u132_ring_put_kref(u132
, ring
);
1357 mutex_unlock(&u132
->scheduler_lock
);
1358 u132_ring_put_kref(u132
, ring
);
1363 static void u132_hcd_endp_work_scheduler(struct work_struct
*work
)
1365 struct u132_ring
*ring
;
1366 struct u132_endp
*endp
=
1367 container_of(work
, struct u132_endp
, scheduler
.work
);
1368 struct u132
*u132
= endp
->u132
;
1369 mutex_lock(&u132
->scheduler_lock
);
1371 if (endp
->edset_flush
) {
1372 endp
->edset_flush
= 0;
1373 if (endp
->dequeueing
)
1374 usb_ftdi_elan_edset_flush(u132
->platform_dev
,
1375 ring
->number
, endp
);
1376 mutex_unlock(&u132
->scheduler_lock
);
1377 u132_endp_put_kref(u132
, endp
);
1379 } else if (endp
->active
) {
1380 mutex_unlock(&u132
->scheduler_lock
);
1381 u132_endp_put_kref(u132
, endp
);
1383 } else if (ring
->in_use
) {
1384 mutex_unlock(&u132
->scheduler_lock
);
1385 u132_endp_put_kref(u132
, endp
);
1387 } else if (endp
->queue_next
== endp
->queue_last
) {
1388 mutex_unlock(&u132
->scheduler_lock
);
1389 u132_endp_put_kref(u132
, endp
);
1391 } else if (endp
->pipetype
== PIPE_INTERRUPT
) {
1392 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1394 mutex_unlock(&u132
->scheduler_lock
);
1395 u132_endp_put_kref(u132
, endp
);
1399 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1402 ring
->curr_endp
= endp
;
1404 mutex_unlock(&u132
->scheduler_lock
);
1405 retval
= edset_single(u132
, ring
, endp
, urb
, address
,
1406 endp
->toggle_bits
, u132_hcd_interrupt_recv
);
1408 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1411 } else if (endp
->pipetype
== PIPE_CONTROL
) {
1412 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1414 mutex_unlock(&u132
->scheduler_lock
);
1415 u132_endp_put_kref(u132
, endp
);
1417 } else if (address
== 0) {
1419 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1422 ring
->curr_endp
= endp
;
1424 mutex_unlock(&u132
->scheduler_lock
);
1425 retval
= edset_setup(u132
, ring
, endp
, urb
, address
,
1426 0x2, u132_hcd_initial_setup_sent
);
1428 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1430 } else if (endp
->usb_addr
== 0) {
1432 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1435 ring
->curr_endp
= endp
;
1437 mutex_unlock(&u132
->scheduler_lock
);
1438 retval
= edset_setup(u132
, ring
, endp
, urb
, 0, 0x2,
1439 u132_hcd_enumeration_address_sent
);
1441 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1445 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1447 address
= u132
->addr
[endp
->usb_addr
].address
;
1449 ring
->curr_endp
= endp
;
1451 mutex_unlock(&u132
->scheduler_lock
);
1452 retval
= edset_setup(u132
, ring
, endp
, urb
, address
,
1453 0x2, u132_hcd_configure_setup_sent
);
1455 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1460 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1462 mutex_unlock(&u132
->scheduler_lock
);
1463 u132_endp_put_kref(u132
, endp
);
1467 struct urb
*urb
= endp
->urb_list
[
1468 ENDP_QUEUE_MASK
& endp
->queue_next
];
1470 ring
->curr_endp
= endp
;
1472 mutex_unlock(&u132
->scheduler_lock
);
1473 retval
= edset_input(u132
, ring
, endp
, urb
,
1474 address
, endp
->toggle_bits
,
1475 u132_hcd_bulk_input_recv
);
1478 u132_hcd_giveback_urb(u132
, endp
, urb
,
1482 } else { /* output pipe */
1483 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1485 mutex_unlock(&u132
->scheduler_lock
);
1486 u132_endp_put_kref(u132
, endp
);
1490 struct urb
*urb
= endp
->urb_list
[
1491 ENDP_QUEUE_MASK
& endp
->queue_next
];
1493 ring
->curr_endp
= endp
;
1495 mutex_unlock(&u132
->scheduler_lock
);
1496 retval
= edset_output(u132
, ring
, endp
, urb
,
1497 address
, endp
->toggle_bits
,
1498 u132_hcd_bulk_output_sent
);
1501 u132_hcd_giveback_urb(u132
, endp
, urb
,
1510 static void port_power(struct u132
*u132
, int pn
, int is_on
)
1512 u132
->port
[pn
].power
= is_on
;
1517 static void u132_power(struct u132
*u132
, int is_on
)
1519 struct usb_hcd
*hcd
= u132_to_hcd(u132
)
1520 ; /* hub is inactive unless the port is powered */
1527 hcd
->state
= HC_STATE_HALT
;
1531 static int u132_periodic_reinit(struct u132
*u132
)
1534 u32 fi
= u132
->hc_fminterval
& 0x03fff;
1537 retval
= u132_read_pcimem(u132
, fminterval
, &fminterval
);
1540 fit
= fminterval
& FIT
;
1541 retval
= u132_write_pcimem(u132
, fminterval
,
1542 (fit
^ FIT
) | u132
->hc_fminterval
);
1545 return u132_write_pcimem(u132
, periodicstart
,
1546 ((9 * fi
) / 10) & 0x3fff);
1549 static char *hcfs2string(int state
)
1552 case OHCI_USB_RESET
:
1554 case OHCI_USB_RESUME
:
1557 return "operational";
1558 case OHCI_USB_SUSPEND
:
1564 static int u132_init(struct u132
*u132
)
1569 u132
->next_statechange
= jiffies
;
1570 retval
= u132_write_pcimem(u132
, intrdisable
, OHCI_INTR_MIE
);
1573 retval
= u132_read_pcimem(u132
, control
, &control
);
1576 if (u132
->num_ports
== 0) {
1578 retval
= u132_read_pcimem(u132
, roothub
.a
, &rh_a
);
1581 u132
->num_ports
= rh_a
& RH_A_NDP
;
1582 retval
= read_roothub_info(u132
);
1586 if (u132
->num_ports
> MAX_U132_PORTS
)
1593 /* Start an OHCI controller, set the BUS operational
1594 * resets USB and controller
1597 static int u132_run(struct u132
*u132
)
1606 int mask
= OHCI_INTR_INIT
;
1607 int first
= u132
->hc_fminterval
== 0;
1609 int reset_timeout
= 30; /* ... allow extra time */
1613 retval
= u132_read_pcimem(u132
, fminterval
, &temp
);
1616 u132
->hc_fminterval
= temp
& 0x3fff;
1617 u132
->hc_fminterval
|= FSMP(u132
->hc_fminterval
) << 16;
1619 retval
= u132_read_pcimem(u132
, control
, &u132
->hc_control
);
1622 dev_info(&u132
->platform_dev
->dev
, "resetting from state '%s', control "
1623 "= %08X\n", hcfs2string(u132
->hc_control
& OHCI_CTRL_HCFS
),
1625 switch (u132
->hc_control
& OHCI_CTRL_HCFS
) {
1629 case OHCI_USB_SUSPEND
:
1630 case OHCI_USB_RESUME
:
1631 u132
->hc_control
&= OHCI_CTRL_RWC
;
1632 u132
->hc_control
|= OHCI_USB_RESUME
;
1636 u132
->hc_control
&= OHCI_CTRL_RWC
;
1637 u132
->hc_control
|= OHCI_USB_RESET
;
1641 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1644 retval
= u132_read_pcimem(u132
, control
, &control
);
1648 retval
= u132_read_pcimem(u132
, roothub
.a
, &roothub_a
);
1651 if (!(roothub_a
& RH_A_NPS
)) {
1652 int temp
; /* power down each port */
1653 for (temp
= 0; temp
< u132
->num_ports
; temp
++) {
1654 retval
= u132_write_pcimem(u132
,
1655 roothub
.portstatus
[temp
], RH_PS_LSDA
);
1660 retval
= u132_read_pcimem(u132
, control
, &control
);
1664 retval
= u132_read_pcimem(u132
, cmdstatus
, &status
);
1667 retval
= u132_write_pcimem(u132
, cmdstatus
, OHCI_HCR
);
1671 retval
= u132_read_pcimem(u132
, cmdstatus
, &status
);
1674 if (0 != (status
& OHCI_HCR
)) {
1675 if (--reset_timeout
== 0) {
1676 dev_err(&u132
->platform_dev
->dev
, "USB HC reset"
1685 if (u132
->flags
& OHCI_QUIRK_INITRESET
) {
1686 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1689 retval
= u132_read_pcimem(u132
, control
, &control
);
1693 retval
= u132_write_pcimem(u132
, ed_controlhead
, 0x00000000);
1696 retval
= u132_write_pcimem(u132
, ed_bulkhead
, 0x11000000);
1699 retval
= u132_write_pcimem(u132
, hcca
, 0x00000000);
1702 retval
= u132_periodic_reinit(u132
);
1705 retval
= u132_read_pcimem(u132
, fminterval
, &fminterval
);
1708 retval
= u132_read_pcimem(u132
, periodicstart
, &periodicstart
);
1711 if (0 == (fminterval
& 0x3fff0000) || 0 == periodicstart
) {
1712 if (!(u132
->flags
& OHCI_QUIRK_INITRESET
)) {
1713 u132
->flags
|= OHCI_QUIRK_INITRESET
;
1716 dev_err(&u132
->platform_dev
->dev
, "init err(%08x %04x)"
1717 "\n", fminterval
, periodicstart
);
1718 } /* start controller operations */
1719 u132
->hc_control
&= OHCI_CTRL_RWC
;
1720 u132
->hc_control
|= OHCI_CONTROL_INIT
| OHCI_CTRL_BLE
| OHCI_USB_OPER
;
1721 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1724 retval
= u132_write_pcimem(u132
, cmdstatus
, OHCI_BLF
);
1727 retval
= u132_read_pcimem(u132
, cmdstatus
, &cmdstatus
);
1730 retval
= u132_read_pcimem(u132
, control
, &control
);
1733 u132_to_hcd(u132
)->state
= HC_STATE_RUNNING
;
1734 retval
= u132_write_pcimem(u132
, roothub
.status
, RH_HS_DRWE
);
1737 retval
= u132_write_pcimem(u132
, intrstatus
, mask
);
1740 retval
= u132_write_pcimem(u132
, intrdisable
,
1741 OHCI_INTR_MIE
| OHCI_INTR_OC
| OHCI_INTR_RHSC
| OHCI_INTR_FNO
|
1742 OHCI_INTR_UE
| OHCI_INTR_RD
| OHCI_INTR_SF
| OHCI_INTR_WDH
|
1745 return retval
; /* handle root hub init quirks ... */
1746 retval
= u132_read_pcimem(u132
, roothub
.a
, &roothub_a
);
1749 roothub_a
&= ~(RH_A_PSM
| RH_A_OCPM
);
1750 if (u132
->flags
& OHCI_QUIRK_SUPERIO
) {
1751 roothub_a
|= RH_A_NOCP
;
1752 roothub_a
&= ~(RH_A_POTPGT
| RH_A_NPS
);
1753 retval
= u132_write_pcimem(u132
, roothub
.a
, roothub_a
);
1756 } else if ((u132
->flags
& OHCI_QUIRK_AMD756
) || distrust_firmware
) {
1757 roothub_a
|= RH_A_NPS
;
1758 retval
= u132_write_pcimem(u132
, roothub
.a
, roothub_a
);
1762 retval
= u132_write_pcimem(u132
, roothub
.status
, RH_HS_LPSC
);
1765 retval
= u132_write_pcimem(u132
, roothub
.b
,
1766 (roothub_a
& RH_A_NPS
) ? 0 : RH_B_PPCM
);
1769 retval
= u132_read_pcimem(u132
, control
, &control
);
1772 mdelay((roothub_a
>> 23) & 0x1fe);
1773 u132_to_hcd(u132
)->state
= HC_STATE_RUNNING
;
1777 static void u132_hcd_stop(struct usb_hcd
*hcd
)
1779 struct u132
*u132
= hcd_to_u132(hcd
);
1780 if (u132
->going
> 1) {
1781 dev_err(&u132
->platform_dev
->dev
, "u132 device %p(hcd=%p) has b"
1782 "een removed %d\n", u132
, hcd
, u132
->going
);
1783 } else if (u132
->going
> 0) {
1784 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p is being remov"
1787 mutex_lock(&u132
->sw_lock
);
1789 u132_power(u132
, 0);
1790 mutex_unlock(&u132
->sw_lock
);
1794 static int u132_hcd_start(struct usb_hcd
*hcd
)
1796 struct u132
*u132
= hcd_to_u132(hcd
);
1797 if (u132
->going
> 1) {
1798 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1801 } else if (u132
->going
> 0) {
1802 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
1804 } else if (hcd
->self
.controller
) {
1806 struct platform_device
*pdev
=
1807 to_platform_device(hcd
->self
.controller
);
1808 u16 vendor
= ((struct u132_platform_data
*)
1809 dev_get_platdata(&pdev
->dev
))->vendor
;
1810 u16 device
= ((struct u132_platform_data
*)
1811 dev_get_platdata(&pdev
->dev
))->device
;
1812 mutex_lock(&u132
->sw_lock
);
1814 if (vendor
== PCI_VENDOR_ID_AMD
&& device
== 0x740c) {
1815 u132
->flags
= OHCI_QUIRK_AMD756
;
1816 } else if (vendor
== PCI_VENDOR_ID_OPTI
&& device
== 0xc861) {
1817 dev_err(&u132
->platform_dev
->dev
, "WARNING: OPTi workar"
1818 "ounds unavailable\n");
1819 } else if (vendor
== PCI_VENDOR_ID_COMPAQ
&& device
== 0xa0f8)
1820 u132
->flags
|= OHCI_QUIRK_ZFMICRO
;
1821 retval
= u132_run(u132
);
1827 mutex_unlock(&u132
->sw_lock
);
1830 dev_err(&u132
->platform_dev
->dev
, "platform_device missing\n");
1835 static int u132_hcd_reset(struct usb_hcd
*hcd
)
1837 struct u132
*u132
= hcd_to_u132(hcd
);
1838 if (u132
->going
> 1) {
1839 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1842 } else if (u132
->going
> 0) {
1843 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
1847 mutex_lock(&u132
->sw_lock
);
1848 retval
= u132_init(u132
);
1853 mutex_unlock(&u132
->sw_lock
);
1858 static int create_endpoint_and_queue_int(struct u132
*u132
,
1859 struct u132_udev
*udev
, struct urb
*urb
,
1860 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
, u8 address
,
1863 struct u132_ring
*ring
;
1867 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
1872 spin_lock_init(&endp
->queue_lock
.slock
);
1873 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
1874 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
1876 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1881 endp_number
= ++u132
->num_endpoints
;
1882 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
1883 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
1884 INIT_LIST_HEAD(&endp
->urb_more
);
1885 ring
= endp
->ring
= &u132
->ring
[0];
1886 if (ring
->curr_endp
) {
1887 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
1889 INIT_LIST_HEAD(&endp
->endp_ring
);
1890 ring
->curr_endp
= endp
;
1893 endp
->dequeueing
= 0;
1894 endp
->edset_flush
= 0;
1897 endp
->endp_number
= endp_number
;
1899 endp
->hep
= urb
->ep
;
1900 endp
->pipetype
= usb_pipetype(urb
->pipe
);
1901 u132_endp_init_kref(u132
, endp
);
1902 if (usb_pipein(urb
->pipe
)) {
1903 endp
->toggle_bits
= 0x2;
1904 usb_settoggle(udev
->usb_device
, usb_endp
, 0, 0);
1907 udev
->endp_number_in
[usb_endp
] = endp_number
;
1908 u132_udev_get_kref(u132
, udev
);
1910 endp
->toggle_bits
= 0x2;
1911 usb_settoggle(udev
->usb_device
, usb_endp
, 1, 0);
1914 udev
->endp_number_out
[usb_endp
] = endp_number
;
1915 u132_udev_get_kref(u132
, udev
);
1919 endp
->jiffies
= jiffies
+ msecs_to_jiffies(urb
->interval
);
1920 endp
->udev_number
= address
;
1921 endp
->usb_addr
= usb_addr
;
1922 endp
->usb_endp
= usb_endp
;
1923 endp
->queue_size
= 1;
1924 endp
->queue_last
= 0;
1925 endp
->queue_next
= 0;
1926 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
1927 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1928 u132_endp_queue_work(u132
, endp
, msecs_to_jiffies(urb
->interval
));
1932 static int queue_int_on_old_endpoint(struct u132
*u132
,
1933 struct u132_udev
*udev
, struct urb
*urb
,
1934 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
1935 u8 usb_endp
, u8 address
)
1939 endp
->jiffies
= jiffies
+ msecs_to_jiffies(urb
->interval
);
1940 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
1941 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
1943 struct u132_urbq
*urbq
= kmalloc(sizeof(struct u132_urbq
),
1946 endp
->queue_size
-= 1;
1949 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
1956 static int create_endpoint_and_queue_bulk(struct u132
*u132
,
1957 struct u132_udev
*udev
, struct urb
*urb
,
1958 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
, u8 address
,
1962 struct u132_ring
*ring
;
1966 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
1971 spin_lock_init(&endp
->queue_lock
.slock
);
1972 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
1973 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
1975 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1980 endp_number
= ++u132
->num_endpoints
;
1981 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
1982 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
1983 INIT_LIST_HEAD(&endp
->urb_more
);
1984 endp
->dequeueing
= 0;
1985 endp
->edset_flush
= 0;
1988 endp
->endp_number
= endp_number
;
1990 endp
->hep
= urb
->ep
;
1991 endp
->pipetype
= usb_pipetype(urb
->pipe
);
1992 u132_endp_init_kref(u132
, endp
);
1993 if (usb_pipein(urb
->pipe
)) {
1994 endp
->toggle_bits
= 0x2;
1995 usb_settoggle(udev
->usb_device
, usb_endp
, 0, 0);
1999 udev
->endp_number_in
[usb_endp
] = endp_number
;
2000 u132_udev_get_kref(u132
, udev
);
2002 endp
->toggle_bits
= 0x2;
2003 usb_settoggle(udev
->usb_device
, usb_endp
, 1, 0);
2007 udev
->endp_number_out
[usb_endp
] = endp_number
;
2008 u132_udev_get_kref(u132
, udev
);
2010 ring
= endp
->ring
= &u132
->ring
[ring_number
- 1];
2011 if (ring
->curr_endp
) {
2012 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
2014 INIT_LIST_HEAD(&endp
->endp_ring
);
2015 ring
->curr_endp
= endp
;
2019 endp
->udev_number
= address
;
2020 endp
->usb_addr
= usb_addr
;
2021 endp
->usb_endp
= usb_endp
;
2022 endp
->queue_size
= 1;
2023 endp
->queue_last
= 0;
2024 endp
->queue_next
= 0;
2025 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2026 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2027 u132_endp_queue_work(u132
, endp
, 0);
2031 static int queue_bulk_on_old_endpoint(struct u132
*u132
, struct u132_udev
*udev
,
2033 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
2034 u8 usb_endp
, u8 address
)
2037 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2038 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2040 struct u132_urbq
*urbq
= kmalloc(sizeof(struct u132_urbq
),
2043 endp
->queue_size
-= 1;
2046 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
2053 static int create_endpoint_and_queue_control(struct u132
*u132
,
2055 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
,
2058 struct u132_ring
*ring
;
2062 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
2067 spin_lock_init(&endp
->queue_lock
.slock
);
2068 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
2069 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
2071 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2076 endp_number
= ++u132
->num_endpoints
;
2077 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
2078 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
2079 INIT_LIST_HEAD(&endp
->urb_more
);
2080 ring
= endp
->ring
= &u132
->ring
[0];
2081 if (ring
->curr_endp
) {
2082 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
2084 INIT_LIST_HEAD(&endp
->endp_ring
);
2085 ring
->curr_endp
= endp
;
2088 endp
->dequeueing
= 0;
2089 endp
->edset_flush
= 0;
2092 endp
->endp_number
= endp_number
;
2094 endp
->hep
= urb
->ep
;
2095 u132_endp_init_kref(u132
, endp
);
2096 u132_endp_get_kref(u132
, endp
);
2097 if (usb_addr
== 0) {
2098 u8 address
= u132
->addr
[usb_addr
].address
;
2099 struct u132_udev
*udev
= &u132
->udev
[address
];
2100 endp
->udev_number
= address
;
2101 endp
->usb_addr
= usb_addr
;
2102 endp
->usb_endp
= usb_endp
;
2105 endp
->pipetype
= usb_pipetype(urb
->pipe
);
2106 u132_udev_init_kref(u132
, udev
);
2107 u132_udev_get_kref(u132
, udev
);
2108 udev
->endp_number_in
[usb_endp
] = endp_number
;
2109 udev
->endp_number_out
[usb_endp
] = endp_number
;
2111 endp
->queue_size
= 1;
2112 endp
->queue_last
= 0;
2113 endp
->queue_next
= 0;
2114 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2115 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2116 u132_endp_queue_work(u132
, endp
, 0);
2118 } else { /*(usb_addr > 0) */
2119 u8 address
= u132
->addr
[usb_addr
].address
;
2120 struct u132_udev
*udev
= &u132
->udev
[address
];
2121 endp
->udev_number
= address
;
2122 endp
->usb_addr
= usb_addr
;
2123 endp
->usb_endp
= usb_endp
;
2126 endp
->pipetype
= usb_pipetype(urb
->pipe
);
2127 u132_udev_get_kref(u132
, udev
);
2128 udev
->enumeration
= 2;
2129 udev
->endp_number_in
[usb_endp
] = endp_number
;
2130 udev
->endp_number_out
[usb_endp
] = endp_number
;
2132 endp
->queue_size
= 1;
2133 endp
->queue_last
= 0;
2134 endp
->queue_next
= 0;
2135 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2136 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2137 u132_endp_queue_work(u132
, endp
, 0);
2142 static int queue_control_on_old_endpoint(struct u132
*u132
,
2144 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
2147 if (usb_addr
== 0) {
2148 if (usb_pipein(urb
->pipe
)) {
2150 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2151 endp
->urb_list
[ENDP_QUEUE_MASK
&
2152 endp
->queue_last
++] = urb
;
2154 struct u132_urbq
*urbq
=
2155 kmalloc(sizeof(struct u132_urbq
),
2158 endp
->queue_size
-= 1;
2161 list_add_tail(&urbq
->urb_more
,
2167 } else { /* usb_pipeout(urb->pipe) */
2168 struct u132_addr
*addr
= &u132
->addr
[usb_dev
->devnum
];
2169 int I
= MAX_U132_UDEVS
;
2172 struct u132_udev
*udev
= &u132
->udev
[++i
];
2173 if (udev
->usb_device
) {
2176 udev
->enumeration
= 1;
2177 u132
->addr
[0].address
= i
;
2178 endp
->udev_number
= i
;
2179 udev
->udev_number
= i
;
2180 udev
->usb_addr
= usb_dev
->devnum
;
2181 u132_udev_init_kref(u132
, udev
);
2182 udev
->endp_number_in
[usb_endp
] =
2184 u132_udev_get_kref(u132
, udev
);
2185 udev
->endp_number_out
[usb_endp
] =
2187 udev
->usb_device
= usb_dev
;
2188 ((u8
*) (urb
->setup_packet
))[2] =
2190 u132_udev_get_kref(u132
, udev
);
2195 dev_err(&u132
->platform_dev
->dev
, "run out of d"
2200 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2201 endp
->urb_list
[ENDP_QUEUE_MASK
&
2202 endp
->queue_last
++] = urb
;
2204 struct u132_urbq
*urbq
=
2205 kmalloc(sizeof(struct u132_urbq
),
2208 endp
->queue_size
-= 1;
2211 list_add_tail(&urbq
->urb_more
,
2218 } else { /*(usb_addr > 0) */
2219 u8 address
= u132
->addr
[usb_addr
].address
;
2220 struct u132_udev
*udev
= &u132
->udev
[address
];
2222 if (udev
->enumeration
!= 2)
2223 udev
->enumeration
= 2;
2224 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2225 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
2228 struct u132_urbq
*urbq
=
2229 kmalloc(sizeof(struct u132_urbq
), GFP_ATOMIC
);
2231 endp
->queue_size
-= 1;
2234 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
2242 static int u132_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
,
2245 struct u132
*u132
= hcd_to_u132(hcd
);
2246 if (irqs_disabled()) {
2247 if (gfpflags_allow_blocking(mem_flags
)) {
2248 printk(KERN_ERR
"invalid context for function that might sleep\n");
2252 if (u132
->going
> 1) {
2253 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2256 } else if (u132
->going
> 0) {
2257 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
2261 u8 usb_addr
= usb_pipedevice(urb
->pipe
);
2262 u8 usb_endp
= usb_pipeendpoint(urb
->pipe
);
2263 struct usb_device
*usb_dev
= urb
->dev
;
2264 if (usb_pipetype(urb
->pipe
) == PIPE_INTERRUPT
) {
2265 u8 address
= u132
->addr
[usb_addr
].address
;
2266 struct u132_udev
*udev
= &u132
->udev
[address
];
2267 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2268 urb
->actual_length
= 0;
2272 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2274 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2276 retval
= queue_int_on_old_endpoint(
2282 usb_hcd_unlink_urb_from_ep(
2285 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2290 u132_endp_queue_work(u132
, endp
,
2291 msecs_to_jiffies(urb
->interval
))
2295 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2297 } else { /*(endp == NULL) */
2298 return create_endpoint_and_queue_int(u132
, udev
,
2299 urb
, usb_dev
, usb_addr
,
2300 usb_endp
, address
, mem_flags
);
2302 } else if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
2303 dev_err(&u132
->platform_dev
->dev
, "the hardware does no"
2304 "t support PIPE_ISOCHRONOUS\n");
2306 } else if (usb_pipetype(urb
->pipe
) == PIPE_BULK
) {
2307 u8 address
= u132
->addr
[usb_addr
].address
;
2308 struct u132_udev
*udev
= &u132
->udev
[address
];
2309 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2310 urb
->actual_length
= 0;
2314 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2316 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2318 retval
= queue_bulk_on_old_endpoint(
2324 usb_hcd_unlink_urb_from_ep(
2327 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2332 u132_endp_queue_work(u132
, endp
, 0);
2335 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2338 return create_endpoint_and_queue_bulk(u132
,
2339 udev
, urb
, usb_dev
, usb_addr
,
2340 usb_endp
, address
, mem_flags
);
2342 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2344 u8
*b
= urb
->setup_packet
;
2346 char data
[30 * 3 + 4];
2348 int m
= (sizeof(data
) - 1) / 3;
2351 while (urb_size
-- > 0) {
2353 } else if (i
++ < m
) {
2354 int w
= sprintf(d
, " %02X", *b
++);
2358 d
+= sprintf(d
, " ..");
2363 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2365 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2367 retval
= queue_control_on_old_endpoint(
2372 usb_hcd_unlink_urb_from_ep(
2375 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2380 u132_endp_queue_work(u132
, endp
, 0);
2383 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2386 return create_endpoint_and_queue_control(u132
,
2387 urb
, usb_dev
, usb_addr
, usb_endp
,
2393 static int dequeue_from_overflow_chain(struct u132
*u132
,
2394 struct u132_endp
*endp
, struct urb
*urb
)
2396 struct list_head
*scan
;
2397 struct list_head
*head
= &endp
->urb_more
;
2398 list_for_each(scan
, head
) {
2399 struct u132_urbq
*urbq
= list_entry(scan
, struct u132_urbq
,
2401 if (urbq
->urb
== urb
) {
2402 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
2404 endp
->queue_size
-= 1;
2405 urb
->error_count
= 0;
2406 usb_hcd_giveback_urb(hcd
, urb
, 0);
2411 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in endp[%d]=%p ring"
2412 "[%d] %c%c usb_endp=%d usb_addr=%d size=%d next=%04X last=%04X"
2413 "\n", urb
, endp
->endp_number
, endp
, endp
->ring
->number
,
2414 endp
->input
? 'I' : ' ', endp
->output
? 'O' : ' ',
2415 endp
->usb_endp
, endp
->usb_addr
, endp
->queue_size
,
2416 endp
->queue_next
, endp
->queue_last
);
2420 static int u132_endp_urb_dequeue(struct u132
*u132
, struct u132_endp
*endp
,
2421 struct urb
*urb
, int status
)
2426 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
2427 rc
= usb_hcd_check_unlink_urb(u132_to_hcd(u132
), urb
, status
);
2429 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2432 if (endp
->queue_size
== 0) {
2433 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in endp[%d]"
2434 "=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb
,
2435 endp
->endp_number
, endp
, endp
->ring
->number
,
2436 endp
->input
? 'I' : ' ', endp
->output
? 'O' : ' ',
2437 endp
->usb_endp
, endp
->usb_addr
);
2438 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2441 if (urb
== endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_next
]) {
2443 endp
->dequeueing
= 1;
2444 endp
->edset_flush
= 1;
2445 u132_endp_queue_work(u132
, endp
, 0);
2446 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2449 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2450 u132_hcd_abandon_urb(u132
, endp
, urb
, status
);
2455 u16 queue_size
= endp
->queue_size
;
2456 u16 queue_scan
= endp
->queue_next
;
2457 struct urb
**urb_slot
= NULL
;
2458 while (++queue_list
< ENDP_QUEUE_SIZE
&& --queue_size
> 0) {
2459 if (urb
== endp
->urb_list
[ENDP_QUEUE_MASK
&
2461 urb_slot
= &endp
->urb_list
[ENDP_QUEUE_MASK
&
2467 while (++queue_list
< ENDP_QUEUE_SIZE
&& --queue_size
> 0) {
2468 *urb_slot
= endp
->urb_list
[ENDP_QUEUE_MASK
&
2470 urb_slot
= &endp
->urb_list
[ENDP_QUEUE_MASK
&
2474 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
2476 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
2477 endp
->queue_size
-= 1;
2478 if (list_empty(&endp
->urb_more
)) {
2479 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2482 struct list_head
*next
= endp
->urb_more
.next
;
2483 struct u132_urbq
*urbq
= list_entry(next
,
2484 struct u132_urbq
, urb_more
);
2486 *urb_slot
= urbq
->urb
;
2487 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2490 } urb
->error_count
= 0;
2491 usb_hcd_giveback_urb(hcd
, urb
, status
);
2493 } else if (list_empty(&endp
->urb_more
)) {
2494 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in "
2495 "endp[%d]=%p ring[%d] %c%c usb_endp=%d usb_addr"
2496 "=%d size=%d next=%04X last=%04X\n", urb
,
2497 endp
->endp_number
, endp
, endp
->ring
->number
,
2498 endp
->input
? 'I' : ' ',
2499 endp
->output
? 'O' : ' ', endp
->usb_endp
,
2500 endp
->usb_addr
, endp
->queue_size
,
2501 endp
->queue_next
, endp
->queue_last
);
2502 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2507 usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132
), urb
);
2508 retval
= dequeue_from_overflow_chain(u132
, endp
,
2510 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2516 static int u132_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
2518 struct u132
*u132
= hcd_to_u132(hcd
);
2519 if (u132
->going
> 2) {
2520 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2524 u8 usb_addr
= usb_pipedevice(urb
->pipe
);
2525 u8 usb_endp
= usb_pipeendpoint(urb
->pipe
);
2526 u8 address
= u132
->addr
[usb_addr
].address
;
2527 struct u132_udev
*udev
= &u132
->udev
[address
];
2528 if (usb_pipein(urb
->pipe
)) {
2529 u8 endp_number
= udev
->endp_number_in
[usb_endp
];
2530 struct u132_endp
*endp
= u132
->endp
[endp_number
- 1];
2531 return u132_endp_urb_dequeue(u132
, endp
, urb
, status
);
2533 u8 endp_number
= udev
->endp_number_out
[usb_endp
];
2534 struct u132_endp
*endp
= u132
->endp
[endp_number
- 1];
2535 return u132_endp_urb_dequeue(u132
, endp
, urb
, status
);
2540 static void u132_endpoint_disable(struct usb_hcd
*hcd
,
2541 struct usb_host_endpoint
*hep
)
2543 struct u132
*u132
= hcd_to_u132(hcd
);
2544 if (u132
->going
> 2) {
2545 dev_err(&u132
->platform_dev
->dev
, "u132 device %p(hcd=%p hep=%p"
2546 ") has been removed %d\n", u132
, hcd
, hep
,
2549 struct u132_endp
*endp
= hep
->hcpriv
;
2551 u132_endp_put_kref(u132
, endp
);
2555 static int u132_get_frame(struct usb_hcd
*hcd
)
2557 struct u132
*u132
= hcd_to_u132(hcd
);
2558 if (u132
->going
> 1) {
2559 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2562 } else if (u132
->going
> 0) {
2563 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2567 dev_err(&u132
->platform_dev
->dev
, "TODO: u132_get_frame\n");
2573 static int u132_roothub_descriptor(struct u132
*u132
,
2574 struct usb_hub_descriptor
*desc
)
2580 retval
= u132_read_pcimem(u132
, roothub
.a
, &rh_a
);
2583 desc
->bDescriptorType
= USB_DT_HUB
;
2584 desc
->bPwrOn2PwrGood
= (rh_a
& RH_A_POTPGT
) >> 24;
2585 desc
->bHubContrCurrent
= 0;
2586 desc
->bNbrPorts
= u132
->num_ports
;
2587 temp
= 1 + (u132
->num_ports
/ 8);
2588 desc
->bDescLength
= 7 + 2 * temp
;
2589 temp
= HUB_CHAR_COMMON_LPSM
| HUB_CHAR_COMMON_OCPM
;
2590 if (rh_a
& RH_A_NPS
)
2591 temp
|= HUB_CHAR_NO_LPSM
;
2592 if (rh_a
& RH_A_PSM
)
2593 temp
|= HUB_CHAR_INDV_PORT_LPSM
;
2594 if (rh_a
& RH_A_NOCP
)
2595 temp
|= HUB_CHAR_NO_OCPM
;
2596 else if (rh_a
& RH_A_OCPM
)
2597 temp
|= HUB_CHAR_INDV_PORT_OCPM
;
2598 desc
->wHubCharacteristics
= cpu_to_le16(temp
);
2599 retval
= u132_read_pcimem(u132
, roothub
.b
, &rh_b
);
2602 memset(desc
->u
.hs
.DeviceRemovable
, 0xff,
2603 sizeof(desc
->u
.hs
.DeviceRemovable
));
2604 desc
->u
.hs
.DeviceRemovable
[0] = rh_b
& RH_B_DR
;
2605 if (u132
->num_ports
> 7) {
2606 desc
->u
.hs
.DeviceRemovable
[1] = (rh_b
& RH_B_DR
) >> 8;
2607 desc
->u
.hs
.DeviceRemovable
[2] = 0xff;
2609 desc
->u
.hs
.DeviceRemovable
[1] = 0xff;
2613 static int u132_roothub_status(struct u132
*u132
, __le32
*desc
)
2616 int ret_status
= u132_read_pcimem(u132
, roothub
.status
, &rh_status
);
2617 *desc
= cpu_to_le32(rh_status
);
2621 static int u132_roothub_portstatus(struct u132
*u132
, __le32
*desc
, u16 wIndex
)
2623 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2626 int port
= wIndex
- 1;
2627 u32 rh_portstatus
= -1;
2628 int ret_portstatus
= u132_read_pcimem(u132
,
2629 roothub
.portstatus
[port
], &rh_portstatus
);
2630 *desc
= cpu_to_le32(rh_portstatus
);
2631 if (*(u16
*) (desc
+ 2)) {
2632 dev_info(&u132
->platform_dev
->dev
, "Port %d Status Chan"
2633 "ge = %08X\n", port
, *desc
);
2635 return ret_portstatus
;
2640 /* this timer value might be vendor-specific ... */
2641 #define PORT_RESET_HW_MSEC 10
2642 #define PORT_RESET_MSEC 10
2643 /* wrap-aware logic morphed from <linux/jiffies.h> */
2644 #define tick_before(t1, t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0)
2645 static int u132_roothub_portreset(struct u132
*u132
, int port_index
)
2651 retval
= u132_read_pcimem(u132
, fmnumber
, &fmnumber
);
2655 reset_done
= now
+ PORT_RESET_MSEC
;
2659 retval
= u132_read_pcimem(u132
,
2660 roothub
.portstatus
[port_index
], &portstat
);
2663 if (RH_PS_PRS
& portstat
)
2667 } while (tick_before(now
, reset_done
));
2668 if (RH_PS_PRS
& portstat
)
2670 if (RH_PS_CCS
& portstat
) {
2671 if (RH_PS_PRSC
& portstat
) {
2672 retval
= u132_write_pcimem(u132
,
2673 roothub
.portstatus
[port_index
],
2679 break; /* start the next reset,
2680 sleep till it's probably done */
2681 retval
= u132_write_pcimem(u132
, roothub
.portstatus
[port_index
],
2685 msleep(PORT_RESET_HW_MSEC
);
2686 retval
= u132_read_pcimem(u132
, fmnumber
, &fmnumber
);
2690 } while (tick_before(now
, reset_done
));
2694 static int u132_roothub_setportfeature(struct u132
*u132
, u16 wValue
,
2697 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2700 int port_index
= wIndex
- 1;
2701 struct u132_port
*port
= &u132
->port
[port_index
];
2702 port
->Status
&= ~(1 << wValue
);
2704 case USB_PORT_FEAT_SUSPEND
:
2705 return u132_write_pcimem(u132
,
2706 roothub
.portstatus
[port_index
], RH_PS_PSS
);
2707 case USB_PORT_FEAT_POWER
:
2708 return u132_write_pcimem(u132
,
2709 roothub
.portstatus
[port_index
], RH_PS_PPS
);
2710 case USB_PORT_FEAT_RESET
:
2711 return u132_roothub_portreset(u132
, port_index
);
2718 static int u132_roothub_clearportfeature(struct u132
*u132
, u16 wValue
,
2721 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2724 int port_index
= wIndex
- 1;
2726 struct u132_port
*port
= &u132
->port
[port_index
];
2727 port
->Status
&= ~(1 << wValue
);
2729 case USB_PORT_FEAT_ENABLE
:
2732 case USB_PORT_FEAT_C_ENABLE
:
2735 case USB_PORT_FEAT_SUSPEND
:
2737 if ((u132
->hc_control
& OHCI_CTRL_HCFS
)
2739 dev_err(&u132
->platform_dev
->dev
, "TODO resume_"
2743 case USB_PORT_FEAT_C_SUSPEND
:
2746 case USB_PORT_FEAT_POWER
:
2749 case USB_PORT_FEAT_C_CONNECTION
:
2752 case USB_PORT_FEAT_C_OVER_CURRENT
:
2755 case USB_PORT_FEAT_C_RESET
:
2761 return u132_write_pcimem(u132
, roothub
.portstatus
[port_index
],
2767 /* the virtual root hub timer IRQ checks for hub status*/
2768 static int u132_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
2770 struct u132
*u132
= hcd_to_u132(hcd
);
2771 if (u132
->going
> 1) {
2772 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p has been remov"
2773 "ed %d\n", hcd
, u132
->going
);
2775 } else if (u132
->going
> 0) {
2776 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p is being remov"
2780 int i
, changed
= 0, length
= 1;
2781 if (u132
->flags
& OHCI_QUIRK_AMD756
) {
2782 if ((u132
->hc_roothub_a
& RH_A_NDP
) > MAX_ROOT_PORTS
) {
2783 dev_err(&u132
->platform_dev
->dev
, "bogus NDP, r"
2784 "ereads as NDP=%d\n",
2785 u132
->hc_roothub_a
& RH_A_NDP
);
2789 if (u132
->hc_roothub_status
& (RH_HS_LPSC
| RH_HS_OCIC
))
2790 buf
[0] = changed
= 1;
2793 if (u132
->num_ports
> 7) {
2797 for (i
= 0; i
< u132
->num_ports
; i
++) {
2798 if (u132
->hc_roothub_portstatus
[i
] & (RH_PS_CSC
|
2799 RH_PS_PESC
| RH_PS_PSSC
| RH_PS_OCIC
|
2803 buf
[0] |= 1 << (i
+ 1);
2805 buf
[1] |= 1 << (i
- 7);
2808 if (!(u132
->hc_roothub_portstatus
[i
] & RH_PS_CCS
))
2811 if ((u132
->hc_roothub_portstatus
[i
] & RH_PS_PSS
))
2815 return changed
? length
: 0;
2819 static int u132_hub_control(struct usb_hcd
*hcd
, u16 typeReq
, u16 wValue
,
2820 u16 wIndex
, char *buf
, u16 wLength
)
2822 struct u132
*u132
= hcd_to_u132(hcd
);
2823 if (u132
->going
> 1) {
2824 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2827 } else if (u132
->going
> 0) {
2828 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2832 mutex_lock(&u132
->sw_lock
);
2834 case ClearHubFeature
:
2836 case C_HUB_OVER_CURRENT
:
2837 case C_HUB_LOCAL_POWER
:
2845 case C_HUB_OVER_CURRENT
:
2846 case C_HUB_LOCAL_POWER
:
2852 case ClearPortFeature
:{
2853 retval
= u132_roothub_clearportfeature(u132
,
2859 case GetHubDescriptor
:{
2860 retval
= u132_roothub_descriptor(u132
,
2861 (struct usb_hub_descriptor
*)buf
);
2867 retval
= u132_roothub_status(u132
,
2873 case GetPortStatus
:{
2874 retval
= u132_roothub_portstatus(u132
,
2875 (__le32
*) buf
, wIndex
);
2880 case SetPortFeature
:{
2881 retval
= u132_roothub_setportfeature(u132
,
2897 mutex_unlock(&u132
->sw_lock
);
2902 static int u132_start_port_reset(struct usb_hcd
*hcd
, unsigned port_num
)
2904 struct u132
*u132
= hcd_to_u132(hcd
);
2905 if (u132
->going
> 1) {
2906 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2909 } else if (u132
->going
> 0) {
2910 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2918 static int u132_bus_suspend(struct usb_hcd
*hcd
)
2920 struct u132
*u132
= hcd_to_u132(hcd
);
2921 if (u132
->going
> 1) {
2922 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2925 } else if (u132
->going
> 0) {
2926 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2932 static int u132_bus_resume(struct usb_hcd
*hcd
)
2934 struct u132
*u132
= hcd_to_u132(hcd
);
2935 if (u132
->going
> 1) {
2936 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2939 } else if (u132
->going
> 0) {
2940 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2947 #define u132_bus_suspend NULL
2948 #define u132_bus_resume NULL
2950 static struct hc_driver u132_hc_driver
= {
2951 .description
= hcd_name
,
2952 .hcd_priv_size
= sizeof(struct u132
),
2954 .flags
= HCD_USB11
| HCD_MEMORY
,
2955 .reset
= u132_hcd_reset
,
2956 .start
= u132_hcd_start
,
2957 .stop
= u132_hcd_stop
,
2958 .urb_enqueue
= u132_urb_enqueue
,
2959 .urb_dequeue
= u132_urb_dequeue
,
2960 .endpoint_disable
= u132_endpoint_disable
,
2961 .get_frame_number
= u132_get_frame
,
2962 .hub_status_data
= u132_hub_status_data
,
2963 .hub_control
= u132_hub_control
,
2964 .bus_suspend
= u132_bus_suspend
,
2965 .bus_resume
= u132_bus_resume
,
2966 .start_port_reset
= u132_start_port_reset
,
2970 * This function may be called by the USB core whilst the "usb_all_devices_rwsem"
2971 * is held for writing, thus this module must not call usb_remove_hcd()
2972 * synchronously - but instead should immediately stop activity to the
2973 * device and asynchronously call usb_remove_hcd()
2975 static int u132_remove(struct platform_device
*pdev
)
2977 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
2979 struct u132
*u132
= hcd_to_u132(hcd
);
2980 if (u132
->going
++ > 1) {
2981 dev_err(&u132
->platform_dev
->dev
, "already being remove"
2985 int rings
= MAX_U132_RINGS
;
2986 int endps
= MAX_U132_ENDPS
;
2987 dev_err(&u132
->platform_dev
->dev
, "removing device u132"
2988 ".%d\n", u132
->sequence_num
);
2990 mutex_lock(&u132
->sw_lock
);
2991 u132_monitor_cancel_work(u132
);
2992 while (rings
-- > 0) {
2993 struct u132_ring
*ring
= &u132
->ring
[rings
];
2994 u132_ring_cancel_work(u132
, ring
);
2995 } while (endps
-- > 0) {
2996 struct u132_endp
*endp
= u132
->endp
[endps
];
2998 u132_endp_cancel_work(u132
, endp
);
3001 printk(KERN_INFO
"removing device u132.%d\n",
3002 u132
->sequence_num
);
3003 mutex_unlock(&u132
->sw_lock
);
3004 usb_remove_hcd(hcd
);
3005 u132_u132_put_kref(u132
);
3012 static void u132_initialise(struct u132
*u132
, struct platform_device
*pdev
)
3014 int rings
= MAX_U132_RINGS
;
3015 int ports
= MAX_U132_PORTS
;
3016 int addrs
= MAX_U132_ADDRS
;
3017 int udevs
= MAX_U132_UDEVS
;
3018 int endps
= MAX_U132_ENDPS
;
3019 u132
->board
= dev_get_platdata(&pdev
->dev
);
3020 u132
->platform_dev
= pdev
;
3023 mutex_init(&u132
->sw_lock
);
3024 mutex_init(&u132
->scheduler_lock
);
3025 while (rings
-- > 0) {
3026 struct u132_ring
*ring
= &u132
->ring
[rings
];
3028 ring
->number
= rings
+ 1;
3030 ring
->curr_endp
= NULL
;
3031 INIT_DELAYED_WORK(&ring
->scheduler
,
3032 u132_hcd_ring_work_scheduler
);
3034 mutex_lock(&u132
->sw_lock
);
3035 INIT_DELAYED_WORK(&u132
->monitor
, u132_hcd_monitor_work
);
3036 while (ports
-- > 0) {
3037 struct u132_port
*port
= &u132
->port
[ports
];
3044 while (addrs
-- > 0) {
3045 struct u132_addr
*addr
= &u132
->addr
[addrs
];
3048 while (udevs
-- > 0) {
3049 struct u132_udev
*udev
= &u132
->udev
[udevs
];
3050 int i
= ARRAY_SIZE(udev
->endp_number_in
);
3051 int o
= ARRAY_SIZE(udev
->endp_number_out
);
3052 udev
->usb_device
= NULL
;
3053 udev
->udev_number
= 0;
3055 udev
->portnumber
= 0;
3057 udev
->endp_number_in
[i
] = 0;
3060 udev
->endp_number_out
[o
] = 0;
3064 u132
->endp
[endps
] = NULL
;
3066 mutex_unlock(&u132
->sw_lock
);
3069 static int u132_probe(struct platform_device
*pdev
)
3071 struct usb_hcd
*hcd
;
3078 if (u132_exiting
> 0)
3081 retval
= ftdi_write_pcimem(pdev
, intrdisable
, OHCI_INTR_MIE
);
3084 retval
= ftdi_read_pcimem(pdev
, control
, &control
);
3087 retval
= ftdi_read_pcimem(pdev
, roothub
.a
, &rh_a
);
3090 num_ports
= rh_a
& RH_A_NDP
; /* refuse to confuse usbcore */
3091 if (pdev
->dev
.dma_mask
)
3094 hcd
= usb_create_hcd(&u132_hc_driver
, &pdev
->dev
, dev_name(&pdev
->dev
));
3096 printk(KERN_ERR
"failed to create the usb hcd struct for U132\n"
3098 ftdi_elan_gone_away(pdev
);
3101 struct u132
*u132
= hcd_to_u132(hcd
);
3103 hcd
->rsrc_start
= 0;
3104 mutex_lock(&u132_module_lock
);
3105 list_add_tail(&u132
->u132_list
, &u132_static_list
);
3106 u132
->sequence_num
= ++u132_instances
;
3107 mutex_unlock(&u132_module_lock
);
3108 u132_u132_init_kref(u132
);
3109 u132_initialise(u132
, pdev
);
3110 hcd
->product_desc
= "ELAN U132 Host Controller";
3111 retval
= usb_add_hcd(hcd
, 0, 0);
3113 dev_err(&u132
->platform_dev
->dev
, "init error %d\n",
3115 u132_u132_put_kref(u132
);
3118 device_wakeup_enable(hcd
->self
.controller
);
3119 u132_monitor_queue_work(u132
, 100);
3128 * for this device there's no useful distinction between the controller
3131 static int u132_suspend(struct platform_device
*pdev
, pm_message_t state
)
3133 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
3134 struct u132
*u132
= hcd_to_u132(hcd
);
3135 if (u132
->going
> 1) {
3136 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
3139 } else if (u132
->going
> 0) {
3140 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
3143 int retval
= 0, ports
;
3145 switch (state
.event
) {
3146 case PM_EVENT_FREEZE
:
3147 retval
= u132_bus_suspend(hcd
);
3149 case PM_EVENT_SUSPEND
:
3150 case PM_EVENT_HIBERNATE
:
3151 ports
= MAX_U132_PORTS
;
3152 while (ports
-- > 0) {
3153 port_power(u132
, ports
, 0);
3161 static int u132_resume(struct platform_device
*pdev
)
3163 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
3164 struct u132
*u132
= hcd_to_u132(hcd
);
3165 if (u132
->going
> 1) {
3166 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
3169 } else if (u132
->going
> 0) {
3170 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
3174 if (!u132
->port
[0].power
) {
3175 int ports
= MAX_U132_PORTS
;
3176 while (ports
-- > 0) {
3177 port_power(u132
, ports
, 1);
3181 retval
= u132_bus_resume(hcd
);
3188 #define u132_suspend NULL
3189 #define u132_resume NULL
3192 * this driver is loaded explicitly by ftdi_u132
3194 * the platform_driver struct is static because it is per type of module
3196 static struct platform_driver u132_platform_driver
= {
3197 .probe
= u132_probe
,
3198 .remove
= u132_remove
,
3199 .suspend
= u132_suspend
,
3200 .resume
= u132_resume
,
3205 static int __init
u132_hcd_init(void)
3208 INIT_LIST_HEAD(&u132_static_list
);
3211 mutex_init(&u132_module_lock
);
3214 printk(KERN_INFO
"driver %s\n", hcd_name
);
3215 workqueue
= create_singlethread_workqueue("u132");
3216 retval
= platform_driver_register(&u132_platform_driver
);
3221 module_init(u132_hcd_init
);
3222 static void __exit
u132_hcd_exit(void)
3226 mutex_lock(&u132_module_lock
);
3228 mutex_unlock(&u132_module_lock
);
3229 list_for_each_entry_safe(u132
, temp
, &u132_static_list
, u132_list
) {
3230 platform_device_unregister(u132
->platform_dev
);
3232 platform_driver_unregister(&u132_platform_driver
);
3233 printk(KERN_INFO
"u132-hcd driver deregistered\n");
3234 wait_event(u132_hcd_wait
, u132_instances
== 0);
3235 flush_workqueue(workqueue
);
3236 destroy_workqueue(workqueue
);
3240 module_exit(u132_hcd_exit
);
3241 MODULE_LICENSE("GPL");
3242 MODULE_ALIAS("platform:u132_hcd");