2 * Host Controller Driver for the Elan Digital Systems U132 adapter
4 * Copyright(C) 2006 Elan Digital Systems Limited
5 * http://www.elandigitalsystems.com
7 * Author and Maintainer - Tony Olech - Elan Digital Systems
8 * tony.olech@elandigitalsystems.com
10 * This program is free software;you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2.
15 * This driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
16 * based on various USB host drivers in the 2.6.15 linux kernel
17 * with constant reference to the 3rd Edition of Linux Device Drivers
18 * published by O'Reilly
20 * The U132 adapter is a USB to CardBus adapter specifically designed
21 * for PC cards that contain an OHCI host controller. Typical PC cards
22 * are the Orange Mobile 3G Option GlobeTrotter Fusion card.
24 * The U132 adapter will *NOT *work with PC cards that do not contain
25 * an OHCI controller. A simple way to test whether a PC card has an
26 * OHCI controller as an interface is to insert the PC card directly
27 * into a laptop(or desktop) with a CardBus slot and if "lspci" shows
28 * a new USB controller and "lsusb -v" shows a new OHCI Host Controller
29 * then there is a good chance that the U132 adapter will support the
30 * PC card.(you also need the specific client driver for the PC card)
32 * Please inform the Author and Maintainer about any PC cards that
33 * contain OHCI Host Controller and work when directly connected to
34 * an embedded CardBus slot but do not work when they are connected
35 * via an ELAN U132 adapter.
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/moduleparam.h>
41 #include <linux/delay.h>
42 #include <linux/ioport.h>
43 #include <linux/pci_ids.h>
44 #include <linux/sched.h>
45 #include <linux/slab.h>
46 #include <linux/errno.h>
47 #include <linux/init.h>
48 #include <linux/timer.h>
49 #include <linux/list.h>
50 #include <linux/interrupt.h>
51 #include <linux/usb.h>
52 #include <linux/workqueue.h>
53 #include <linux/platform_device.h>
54 #include <linux/mutex.h>
57 #include <asm/system.h>
58 #include <asm/byteorder.h>
59 #include "../core/hcd.h"
61 /* FIXME ohci.h is ONLY for internal use by the OHCI driver.
62 * If you're going to try stuff like this, you need to split
63 * out shareable stuff (register declarations?) into its own
64 * file, maybe name <linux/usb/ohci.h>
68 #define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
69 #define OHCI_INTR_INIT (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | \
71 MODULE_AUTHOR("Tony Olech - Elan Digital Systems Limited");
72 MODULE_DESCRIPTION("U132 USB Host Controller Driver");
73 MODULE_LICENSE("GPL");
74 #define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
75 INT_MODULE_PARM(testing
, 0);
76 /* Some boards misreport power switching/overcurrent*/
77 static int distrust_firmware
= 1;
78 module_param(distrust_firmware
, bool, 0);
79 MODULE_PARM_DESC(distrust_firmware
, "true to distrust firmware power/overcurren"
81 static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait
);
83 * u132_module_lock exists to protect access to global variables
86 static struct mutex u132_module_lock
;
87 static int u132_exiting
;
88 static int u132_instances
;
89 static struct list_head u132_static_list
;
91 * end of the global variables protected by u132_module_lock
93 static struct workqueue_struct
*workqueue
;
94 #define MAX_U132_PORTS 7
95 #define MAX_U132_ADDRS 128
96 #define MAX_U132_UDEVS 4
97 #define MAX_U132_ENDPS 100
98 #define MAX_U132_RINGS 4
99 static const char *cc_to_text
[16] = {
129 struct usb_device
*usb_device
;
134 u8 endp_number_in
[16];
135 u8 endp_number_out
[16];
137 #define ENDP_QUEUE_SHIFT 3
138 #define ENDP_QUEUE_SIZE (1<<ENDP_QUEUE_SHIFT)
139 #define ENDP_QUEUE_MASK (ENDP_QUEUE_SIZE-1)
141 struct list_head urb_more
;
154 struct list_head endp_ring
;
155 struct u132_ring
*ring
;
156 unsigned toggle_bits
:2;
162 unsigned dequeueing
:1;
163 unsigned edset_flush
:1;
164 unsigned spare_bits
:14;
165 unsigned long jiffies
;
166 struct usb_host_endpoint
*hep
;
167 struct u132_spin queue_lock
;
171 struct urb
*urb_list
[ENDP_QUEUE_SIZE
];
172 struct list_head urb_more
;
173 struct delayed_work scheduler
;
180 struct u132_endp
*curr_endp
;
181 struct delayed_work scheduler
;
185 struct list_head u132_list
;
186 struct mutex sw_lock
;
187 struct mutex scheduler_lock
;
188 struct u132_platform_data
*board
;
189 struct platform_device
*platform_dev
;
190 struct u132_ring ring
[MAX_U132_RINGS
];
198 u32 hc_roothub_status
;
200 u32 hc_roothub_portstatus
[MAX_ROOT_PORTS
];
202 unsigned long next_statechange
;
203 struct delayed_work monitor
;
205 struct u132_addr addr
[MAX_U132_ADDRS
];
206 struct u132_udev udev
[MAX_U132_UDEVS
];
207 struct u132_port port
[MAX_U132_PORTS
];
208 struct u132_endp
*endp
[MAX_U132_ENDPS
];
212 * these cannot be inlines because we need the structure offset!!
213 * Does anyone have a better way?????
215 #define ftdi_read_pcimem(pdev, member, data) usb_ftdi_elan_read_pcimem(pdev, \
216 offsetof(struct ohci_regs, member), 0, data);
217 #define ftdi_write_pcimem(pdev, member, data) usb_ftdi_elan_write_pcimem(pdev, \
218 offsetof(struct ohci_regs, member), 0, data);
219 #define u132_read_pcimem(u132, member, data) \
220 usb_ftdi_elan_read_pcimem(u132->platform_dev, offsetof(struct \
221 ohci_regs, member), 0, data);
222 #define u132_write_pcimem(u132, member, data) \
223 usb_ftdi_elan_write_pcimem(u132->platform_dev, offsetof(struct \
224 ohci_regs, member), 0, data);
225 static inline struct u132
*udev_to_u132(struct u132_udev
*udev
)
227 u8 udev_number
= udev
->udev_number
;
228 return container_of(udev
, struct u132
, udev
[udev_number
]);
231 static inline struct u132
*hcd_to_u132(struct usb_hcd
*hcd
)
233 return (struct u132
*)(hcd
->hcd_priv
);
236 static inline struct usb_hcd
*u132_to_hcd(struct u132
*u132
)
238 return container_of((void *)u132
, struct usb_hcd
, hcd_priv
);
241 static inline void u132_disable(struct u132
*u132
)
243 u132_to_hcd(u132
)->state
= HC_STATE_HALT
;
247 #define kref_to_u132(d) container_of(d, struct u132, kref)
248 #define kref_to_u132_endp(d) container_of(d, struct u132_endp, kref)
249 #define kref_to_u132_udev(d) container_of(d, struct u132_udev, kref)
250 #include "../misc/usb_u132.h"
251 static const char hcd_name
[] = "u132_hcd";
252 #define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE | \
253 USB_PORT_STAT_C_SUSPEND | USB_PORT_STAT_C_OVERCURRENT | \
254 USB_PORT_STAT_C_RESET) << 16)
255 static void u132_hcd_delete(struct kref
*kref
)
257 struct u132
*u132
= kref_to_u132(kref
);
258 struct platform_device
*pdev
= u132
->platform_dev
;
259 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
261 mutex_lock(&u132_module_lock
);
262 list_del_init(&u132
->u132_list
);
264 mutex_unlock(&u132_module_lock
);
265 dev_warn(&u132
->platform_dev
->dev
, "FREEING the hcd=%p and thus the u13"
266 "2=%p going=%d pdev=%p\n", hcd
, u132
, u132
->going
, pdev
);
270 static inline void u132_u132_put_kref(struct u132
*u132
)
272 kref_put(&u132
->kref
, u132_hcd_delete
);
275 static inline void u132_u132_init_kref(struct u132
*u132
)
277 kref_init(&u132
->kref
);
280 static void u132_udev_delete(struct kref
*kref
)
282 struct u132_udev
*udev
= kref_to_u132_udev(kref
);
283 udev
->udev_number
= 0;
284 udev
->usb_device
= NULL
;
286 udev
->enumeration
= 0;
289 static inline void u132_udev_put_kref(struct u132
*u132
, struct u132_udev
*udev
)
291 kref_put(&udev
->kref
, u132_udev_delete
);
294 static inline void u132_udev_get_kref(struct u132
*u132
, struct u132_udev
*udev
)
296 kref_get(&udev
->kref
);
299 static inline void u132_udev_init_kref(struct u132
*u132
,
300 struct u132_udev
*udev
)
302 kref_init(&udev
->kref
);
305 static inline void u132_ring_put_kref(struct u132
*u132
, struct u132_ring
*ring
)
307 kref_put(&u132
->kref
, u132_hcd_delete
);
310 static void u132_ring_requeue_work(struct u132
*u132
, struct u132_ring
*ring
,
314 if (queue_delayed_work(workqueue
, &ring
->scheduler
, delta
))
316 } else if (queue_delayed_work(workqueue
, &ring
->scheduler
, 0))
318 kref_put(&u132
->kref
, u132_hcd_delete
);
322 static void u132_ring_queue_work(struct u132
*u132
, struct u132_ring
*ring
,
325 kref_get(&u132
->kref
);
326 u132_ring_requeue_work(u132
, ring
, delta
);
330 static void u132_ring_cancel_work(struct u132
*u132
, struct u132_ring
*ring
)
332 if (cancel_delayed_work(&ring
->scheduler
))
333 kref_put(&u132
->kref
, u132_hcd_delete
);
336 static void u132_endp_delete(struct kref
*kref
)
338 struct u132_endp
*endp
= kref_to_u132_endp(kref
);
339 struct u132
*u132
= endp
->u132
;
340 u8 usb_addr
= endp
->usb_addr
;
341 u8 usb_endp
= endp
->usb_endp
;
342 u8 address
= u132
->addr
[usb_addr
].address
;
343 struct u132_udev
*udev
= &u132
->udev
[address
];
344 u8 endp_number
= endp
->endp_number
;
345 struct usb_host_endpoint
*hep
= endp
->hep
;
346 struct u132_ring
*ring
= endp
->ring
;
347 struct list_head
*head
= &endp
->endp_ring
;
349 if (endp
== ring
->curr_endp
) {
350 if (list_empty(head
)) {
351 ring
->curr_endp
= NULL
;
354 struct u132_endp
*next_endp
= list_entry(head
->next
,
355 struct u132_endp
, endp_ring
);
356 ring
->curr_endp
= next_endp
;
362 udev
->endp_number_in
[usb_endp
] = 0;
363 u132_udev_put_kref(u132
, udev
);
366 udev
->endp_number_out
[usb_endp
] = 0;
367 u132_udev_put_kref(u132
, udev
);
369 u132
->endp
[endp_number
- 1] = NULL
;
372 u132_u132_put_kref(u132
);
375 static inline void u132_endp_put_kref(struct u132
*u132
, struct u132_endp
*endp
)
377 kref_put(&endp
->kref
, u132_endp_delete
);
380 static inline void u132_endp_get_kref(struct u132
*u132
, struct u132_endp
*endp
)
382 kref_get(&endp
->kref
);
385 static inline void u132_endp_init_kref(struct u132
*u132
,
386 struct u132_endp
*endp
)
388 kref_init(&endp
->kref
);
389 kref_get(&u132
->kref
);
392 static void u132_endp_queue_work(struct u132
*u132
, struct u132_endp
*endp
,
395 if (queue_delayed_work(workqueue
, &endp
->scheduler
, delta
))
396 kref_get(&endp
->kref
);
399 static void u132_endp_cancel_work(struct u132
*u132
, struct u132_endp
*endp
)
401 if (cancel_delayed_work(&endp
->scheduler
))
402 kref_put(&endp
->kref
, u132_endp_delete
);
405 static inline void u132_monitor_put_kref(struct u132
*u132
)
407 kref_put(&u132
->kref
, u132_hcd_delete
);
410 static void u132_monitor_queue_work(struct u132
*u132
, unsigned int delta
)
412 if (queue_delayed_work(workqueue
, &u132
->monitor
, delta
))
413 kref_get(&u132
->kref
);
416 static void u132_monitor_requeue_work(struct u132
*u132
, unsigned int delta
)
418 if (!queue_delayed_work(workqueue
, &u132
->monitor
, delta
))
419 kref_put(&u132
->kref
, u132_hcd_delete
);
422 static void u132_monitor_cancel_work(struct u132
*u132
)
424 if (cancel_delayed_work(&u132
->monitor
))
425 kref_put(&u132
->kref
, u132_hcd_delete
);
428 static int read_roothub_info(struct u132
*u132
)
432 retval
= u132_read_pcimem(u132
, revision
, &revision
);
434 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device co"
437 } else if ((revision
& 0xFF) == 0x10) {
438 } else if ((revision
& 0xFF) == 0x11) {
440 dev_err(&u132
->platform_dev
->dev
, "device revision is not valid"
441 " %08X\n", revision
);
444 retval
= u132_read_pcimem(u132
, control
, &u132
->hc_control
);
446 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device co"
450 retval
= u132_read_pcimem(u132
, roothub
.status
,
451 &u132
->hc_roothub_status
);
453 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device re"
454 "g roothub.status\n", retval
);
457 retval
= u132_read_pcimem(u132
, roothub
.a
, &u132
->hc_roothub_a
);
459 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device re"
460 "g roothub.a\n", retval
);
464 int I
= u132
->num_ports
;
467 retval
= u132_read_pcimem(u132
, roothub
.portstatus
[i
],
468 &u132
->hc_roothub_portstatus
[i
]);
470 dev_err(&u132
->platform_dev
->dev
, "error %d acc"
471 "essing device roothub.portstatus[%d]\n"
481 static void u132_hcd_monitor_work(struct work_struct
*work
)
483 struct u132
*u132
= container_of(work
, struct u132
, monitor
.work
);
484 if (u132
->going
> 1) {
485 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
487 u132_monitor_put_kref(u132
);
489 } else if (u132
->going
> 0) {
490 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
491 u132_monitor_put_kref(u132
);
495 mutex_lock(&u132
->sw_lock
);
496 retval
= read_roothub_info(u132
);
498 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
501 mutex_unlock(&u132
->sw_lock
);
503 ftdi_elan_gone_away(u132
->platform_dev
);
504 u132_monitor_put_kref(u132
);
507 u132_monitor_requeue_work(u132
, 500);
508 mutex_unlock(&u132
->sw_lock
);
514 static void u132_hcd_giveback_urb(struct u132
*u132
, struct u132_endp
*endp
,
515 struct urb
*urb
, int status
)
517 struct u132_ring
*ring
;
519 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
520 urb
->error_count
= 0;
521 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
522 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
523 endp
->queue_next
+= 1;
524 if (ENDP_QUEUE_SIZE
> --endp
->queue_size
) {
526 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
528 struct list_head
*next
= endp
->urb_more
.next
;
529 struct u132_urbq
*urbq
= list_entry(next
, struct u132_urbq
,
532 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
535 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
538 mutex_lock(&u132
->scheduler_lock
);
541 u132_ring_cancel_work(u132
, ring
);
542 u132_ring_queue_work(u132
, ring
, 0);
543 mutex_unlock(&u132
->scheduler_lock
);
544 u132_endp_put_kref(u132
, endp
);
545 usb_hcd_giveback_urb(hcd
, urb
, status
);
549 static void u132_hcd_forget_urb(struct u132
*u132
, struct u132_endp
*endp
,
550 struct urb
*urb
, int status
)
552 u132_endp_put_kref(u132
, endp
);
555 static void u132_hcd_abandon_urb(struct u132
*u132
, struct u132_endp
*endp
,
556 struct urb
*urb
, int status
)
559 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
560 urb
->error_count
= 0;
561 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
562 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
563 endp
->queue_next
+= 1;
564 if (ENDP_QUEUE_SIZE
> --endp
->queue_size
) {
566 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
568 struct list_head
*next
= endp
->urb_more
.next
;
569 struct u132_urbq
*urbq
= list_entry(next
, struct u132_urbq
,
572 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
575 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
577 } usb_hcd_giveback_urb(hcd
, urb
, status
);
581 static inline int edset_input(struct u132
*u132
, struct u132_ring
*ring
,
582 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
583 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
584 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
585 int halted
, int skipped
, int actual
, int non_null
))
587 return usb_ftdi_elan_edset_input(u132
->platform_dev
, ring
->number
, endp
,
588 urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
591 static inline int edset_setup(struct u132
*u132
, struct u132_ring
*ring
,
592 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
593 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
594 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
595 int halted
, int skipped
, int actual
, int non_null
))
597 return usb_ftdi_elan_edset_setup(u132
->platform_dev
, ring
->number
, endp
,
598 urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
601 static inline int edset_single(struct u132
*u132
, struct u132_ring
*ring
,
602 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
603 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
604 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
605 int halted
, int skipped
, int actual
, int non_null
))
607 return usb_ftdi_elan_edset_single(u132
->platform_dev
, ring
->number
,
608 endp
, urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
611 static inline int edset_output(struct u132
*u132
, struct u132_ring
*ring
,
612 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
613 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
614 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
615 int halted
, int skipped
, int actual
, int non_null
))
617 return usb_ftdi_elan_edset_output(u132
->platform_dev
, ring
->number
,
618 endp
, urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
623 * must not LOCK sw_lock
626 static void u132_hcd_interrupt_recv(void *data
, struct urb
*urb
, u8
*buf
,
627 int len
, int toggle_bits
, int error_count
, int condition_code
,
628 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
630 struct u132_endp
*endp
= data
;
631 struct u132
*u132
= endp
->u132
;
632 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
633 struct u132_udev
*udev
= &u132
->udev
[address
];
634 mutex_lock(&u132
->scheduler_lock
);
635 if (u132
->going
> 1) {
636 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
638 mutex_unlock(&u132
->scheduler_lock
);
639 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
641 } else if (endp
->dequeueing
) {
642 endp
->dequeueing
= 0;
643 mutex_unlock(&u132
->scheduler_lock
);
644 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
646 } else if (u132
->going
> 0) {
647 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
649 mutex_unlock(&u132
->scheduler_lock
);
650 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
652 } else if (!urb
->unlinked
) {
653 struct u132_ring
*ring
= endp
->ring
;
654 u8
*u
= urb
->transfer_buffer
+ urb
->actual_length
;
661 urb
->actual_length
+= len
;
662 if ((condition_code
== TD_CC_NOERROR
) &&
663 (urb
->transfer_buffer_length
> urb
->actual_length
)) {
664 endp
->toggle_bits
= toggle_bits
;
665 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
667 if (urb
->actual_length
> 0) {
669 mutex_unlock(&u132
->scheduler_lock
);
670 retval
= edset_single(u132
, ring
, endp
, urb
,
671 address
, endp
->toggle_bits
,
672 u132_hcd_interrupt_recv
);
674 u132_hcd_giveback_urb(u132
, endp
, urb
,
679 endp
->jiffies
= jiffies
+
680 msecs_to_jiffies(urb
->interval
);
681 u132_ring_cancel_work(u132
, ring
);
682 u132_ring_queue_work(u132
, ring
, 0);
683 mutex_unlock(&u132
->scheduler_lock
);
684 u132_endp_put_kref(u132
, endp
);
687 } else if ((condition_code
== TD_DATAUNDERRUN
) &&
688 ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0)) {
689 endp
->toggle_bits
= toggle_bits
;
690 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
692 mutex_unlock(&u132
->scheduler_lock
);
693 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
696 if (condition_code
== TD_CC_NOERROR
) {
697 endp
->toggle_bits
= toggle_bits
;
698 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
700 } else if (condition_code
== TD_CC_STALL
) {
701 endp
->toggle_bits
= 0x2;
702 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
705 endp
->toggle_bits
= 0x2;
706 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
708 dev_err(&u132
->platform_dev
->dev
, "urb=%p givin"
709 "g back INTERRUPT %s\n", urb
,
710 cc_to_text
[condition_code
]);
712 mutex_unlock(&u132
->scheduler_lock
);
713 u132_hcd_giveback_urb(u132
, endp
, urb
,
714 cc_to_error
[condition_code
]);
718 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
719 "unlinked=%d\n", urb
, urb
->unlinked
);
720 mutex_unlock(&u132
->scheduler_lock
);
721 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
726 static void u132_hcd_bulk_output_sent(void *data
, struct urb
*urb
, u8
*buf
,
727 int len
, int toggle_bits
, int error_count
, int condition_code
,
728 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
730 struct u132_endp
*endp
= data
;
731 struct u132
*u132
= endp
->u132
;
732 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
733 mutex_lock(&u132
->scheduler_lock
);
734 if (u132
->going
> 1) {
735 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
737 mutex_unlock(&u132
->scheduler_lock
);
738 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
740 } else if (endp
->dequeueing
) {
741 endp
->dequeueing
= 0;
742 mutex_unlock(&u132
->scheduler_lock
);
743 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
745 } else if (u132
->going
> 0) {
746 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
748 mutex_unlock(&u132
->scheduler_lock
);
749 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
751 } else if (!urb
->unlinked
) {
752 struct u132_ring
*ring
= endp
->ring
;
753 urb
->actual_length
+= len
;
754 endp
->toggle_bits
= toggle_bits
;
755 if (urb
->transfer_buffer_length
> urb
->actual_length
) {
757 mutex_unlock(&u132
->scheduler_lock
);
758 retval
= edset_output(u132
, ring
, endp
, urb
, address
,
759 endp
->toggle_bits
, u132_hcd_bulk_output_sent
);
761 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
764 mutex_unlock(&u132
->scheduler_lock
);
765 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
769 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
770 "unlinked=%d\n", urb
, urb
->unlinked
);
771 mutex_unlock(&u132
->scheduler_lock
);
772 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
777 static void u132_hcd_bulk_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
778 int len
, int toggle_bits
, int error_count
, int condition_code
,
779 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
781 struct u132_endp
*endp
= data
;
782 struct u132
*u132
= endp
->u132
;
783 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
784 struct u132_udev
*udev
= &u132
->udev
[address
];
785 mutex_lock(&u132
->scheduler_lock
);
786 if (u132
->going
> 1) {
787 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
789 mutex_unlock(&u132
->scheduler_lock
);
790 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
792 } else if (endp
->dequeueing
) {
793 endp
->dequeueing
= 0;
794 mutex_unlock(&u132
->scheduler_lock
);
795 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
797 } else if (u132
->going
> 0) {
798 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
800 mutex_unlock(&u132
->scheduler_lock
);
801 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
803 } else if (!urb
->unlinked
) {
804 struct u132_ring
*ring
= endp
->ring
;
805 u8
*u
= urb
->transfer_buffer
+ urb
->actual_length
;
812 urb
->actual_length
+= len
;
813 if ((condition_code
== TD_CC_NOERROR
) &&
814 (urb
->transfer_buffer_length
> urb
->actual_length
)) {
816 endp
->toggle_bits
= toggle_bits
;
817 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
819 mutex_unlock(&u132
->scheduler_lock
);
820 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
821 ring
->number
, endp
, urb
, address
,
822 endp
->usb_endp
, endp
->toggle_bits
,
823 u132_hcd_bulk_input_recv
);
825 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
827 } else if (condition_code
== TD_CC_NOERROR
) {
828 endp
->toggle_bits
= toggle_bits
;
829 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
831 mutex_unlock(&u132
->scheduler_lock
);
832 u132_hcd_giveback_urb(u132
, endp
, urb
,
833 cc_to_error
[condition_code
]);
835 } else if ((condition_code
== TD_DATAUNDERRUN
) &&
836 ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0)) {
837 endp
->toggle_bits
= toggle_bits
;
838 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
840 mutex_unlock(&u132
->scheduler_lock
);
841 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
843 } else if (condition_code
== TD_DATAUNDERRUN
) {
844 endp
->toggle_bits
= toggle_bits
;
845 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
847 dev_warn(&u132
->platform_dev
->dev
, "urb=%p(SHORT NOT OK"
848 ") giving back BULK IN %s\n", urb
,
849 cc_to_text
[condition_code
]);
850 mutex_unlock(&u132
->scheduler_lock
);
851 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
853 } else if (condition_code
== TD_CC_STALL
) {
854 endp
->toggle_bits
= 0x2;
855 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0, 0);
856 mutex_unlock(&u132
->scheduler_lock
);
857 u132_hcd_giveback_urb(u132
, endp
, urb
,
858 cc_to_error
[condition_code
]);
861 endp
->toggle_bits
= 0x2;
862 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0, 0);
863 dev_err(&u132
->platform_dev
->dev
, "urb=%p giving back B"
864 "ULK IN code=%d %s\n", urb
, condition_code
,
865 cc_to_text
[condition_code
]);
866 mutex_unlock(&u132
->scheduler_lock
);
867 u132_hcd_giveback_urb(u132
, endp
, urb
,
868 cc_to_error
[condition_code
]);
872 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
873 "unlinked=%d\n", urb
, urb
->unlinked
);
874 mutex_unlock(&u132
->scheduler_lock
);
875 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
880 static void u132_hcd_configure_empty_sent(void *data
, struct urb
*urb
, u8
*buf
,
881 int len
, int toggle_bits
, int error_count
, int condition_code
,
882 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
884 struct u132_endp
*endp
= data
;
885 struct u132
*u132
= endp
->u132
;
886 mutex_lock(&u132
->scheduler_lock
);
887 if (u132
->going
> 1) {
888 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
890 mutex_unlock(&u132
->scheduler_lock
);
891 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
893 } else if (endp
->dequeueing
) {
894 endp
->dequeueing
= 0;
895 mutex_unlock(&u132
->scheduler_lock
);
896 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
898 } else if (u132
->going
> 0) {
899 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
901 mutex_unlock(&u132
->scheduler_lock
);
902 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
904 } else if (!urb
->unlinked
) {
905 mutex_unlock(&u132
->scheduler_lock
);
906 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
909 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
910 "unlinked=%d\n", urb
, urb
->unlinked
);
911 mutex_unlock(&u132
->scheduler_lock
);
912 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
917 static void u132_hcd_configure_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
918 int len
, int toggle_bits
, int error_count
, int condition_code
,
919 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
921 struct u132_endp
*endp
= data
;
922 struct u132
*u132
= endp
->u132
;
923 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
924 mutex_lock(&u132
->scheduler_lock
);
925 if (u132
->going
> 1) {
926 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
928 mutex_unlock(&u132
->scheduler_lock
);
929 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
931 } else if (endp
->dequeueing
) {
932 endp
->dequeueing
= 0;
933 mutex_unlock(&u132
->scheduler_lock
);
934 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
936 } else if (u132
->going
> 0) {
937 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
939 mutex_unlock(&u132
->scheduler_lock
);
940 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
942 } else if (!urb
->unlinked
) {
943 struct u132_ring
*ring
= endp
->ring
;
944 u8
*u
= urb
->transfer_buffer
;
951 urb
->actual_length
= len
;
952 if ((condition_code
== TD_CC_NOERROR
) || ((condition_code
==
953 TD_DATAUNDERRUN
) && ((urb
->transfer_flags
&
954 URB_SHORT_NOT_OK
) == 0))) {
956 mutex_unlock(&u132
->scheduler_lock
);
957 retval
= usb_ftdi_elan_edset_empty(u132
->platform_dev
,
958 ring
->number
, endp
, urb
, address
,
960 u132_hcd_configure_empty_sent
);
962 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
964 } else if (condition_code
== TD_CC_STALL
) {
965 mutex_unlock(&u132
->scheduler_lock
);
966 dev_warn(&u132
->platform_dev
->dev
, "giving back SETUP I"
967 "NPUT STALL urb %p\n", urb
);
968 u132_hcd_giveback_urb(u132
, endp
, urb
,
969 cc_to_error
[condition_code
]);
972 mutex_unlock(&u132
->scheduler_lock
);
973 dev_err(&u132
->platform_dev
->dev
, "giving back SETUP IN"
974 "PUT %s urb %p\n", cc_to_text
[condition_code
],
976 u132_hcd_giveback_urb(u132
, endp
, urb
,
977 cc_to_error
[condition_code
]);
981 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
982 "unlinked=%d\n", urb
, urb
->unlinked
);
983 mutex_unlock(&u132
->scheduler_lock
);
984 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
989 static void u132_hcd_configure_empty_recv(void *data
, struct urb
*urb
, u8
*buf
,
990 int len
, int toggle_bits
, int error_count
, int condition_code
,
991 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
993 struct u132_endp
*endp
= data
;
994 struct u132
*u132
= endp
->u132
;
995 mutex_lock(&u132
->scheduler_lock
);
996 if (u132
->going
> 1) {
997 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
999 mutex_unlock(&u132
->scheduler_lock
);
1000 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1002 } else if (endp
->dequeueing
) {
1003 endp
->dequeueing
= 0;
1004 mutex_unlock(&u132
->scheduler_lock
);
1005 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1007 } else if (u132
->going
> 0) {
1008 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1010 mutex_unlock(&u132
->scheduler_lock
);
1011 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1013 } else if (!urb
->unlinked
) {
1014 mutex_unlock(&u132
->scheduler_lock
);
1015 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1018 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1019 "unlinked=%d\n", urb
, urb
->unlinked
);
1020 mutex_unlock(&u132
->scheduler_lock
);
1021 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1026 static void u132_hcd_configure_setup_sent(void *data
, struct urb
*urb
, u8
*buf
,
1027 int len
, int toggle_bits
, int error_count
, int condition_code
,
1028 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1030 struct u132_endp
*endp
= data
;
1031 struct u132
*u132
= endp
->u132
;
1032 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1033 mutex_lock(&u132
->scheduler_lock
);
1034 if (u132
->going
> 1) {
1035 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1037 mutex_unlock(&u132
->scheduler_lock
);
1038 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1040 } else if (endp
->dequeueing
) {
1041 endp
->dequeueing
= 0;
1042 mutex_unlock(&u132
->scheduler_lock
);
1043 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1045 } else if (u132
->going
> 0) {
1046 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1048 mutex_unlock(&u132
->scheduler_lock
);
1049 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1051 } else if (!urb
->unlinked
) {
1052 if (usb_pipein(urb
->pipe
)) {
1054 struct u132_ring
*ring
= endp
->ring
;
1055 mutex_unlock(&u132
->scheduler_lock
);
1056 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1057 ring
->number
, endp
, urb
, address
,
1059 u132_hcd_configure_input_recv
);
1061 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1065 struct u132_ring
*ring
= endp
->ring
;
1066 mutex_unlock(&u132
->scheduler_lock
);
1067 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1068 ring
->number
, endp
, urb
, address
,
1070 u132_hcd_configure_empty_recv
);
1072 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1076 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1077 "unlinked=%d\n", urb
, urb
->unlinked
);
1078 mutex_unlock(&u132
->scheduler_lock
);
1079 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1084 static void u132_hcd_enumeration_empty_recv(void *data
, struct urb
*urb
,
1085 u8
*buf
, int len
, int toggle_bits
, int error_count
, int condition_code
,
1086 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1088 struct u132_endp
*endp
= data
;
1089 struct u132
*u132
= endp
->u132
;
1090 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1091 struct u132_udev
*udev
= &u132
->udev
[address
];
1092 mutex_lock(&u132
->scheduler_lock
);
1093 if (u132
->going
> 1) {
1094 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1096 mutex_unlock(&u132
->scheduler_lock
);
1097 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1099 } else if (endp
->dequeueing
) {
1100 endp
->dequeueing
= 0;
1101 mutex_unlock(&u132
->scheduler_lock
);
1102 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1104 } else if (u132
->going
> 0) {
1105 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1107 mutex_unlock(&u132
->scheduler_lock
);
1108 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1110 } else if (!urb
->unlinked
) {
1111 u132
->addr
[0].address
= 0;
1112 endp
->usb_addr
= udev
->usb_addr
;
1113 mutex_unlock(&u132
->scheduler_lock
);
1114 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1117 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1118 "unlinked=%d\n", urb
, urb
->unlinked
);
1119 mutex_unlock(&u132
->scheduler_lock
);
1120 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1125 static void u132_hcd_enumeration_address_sent(void *data
, struct urb
*urb
,
1126 u8
*buf
, int len
, int toggle_bits
, int error_count
, int condition_code
,
1127 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1129 struct u132_endp
*endp
= data
;
1130 struct u132
*u132
= endp
->u132
;
1131 mutex_lock(&u132
->scheduler_lock
);
1132 if (u132
->going
> 1) {
1133 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1135 mutex_unlock(&u132
->scheduler_lock
);
1136 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1138 } else if (endp
->dequeueing
) {
1139 endp
->dequeueing
= 0;
1140 mutex_unlock(&u132
->scheduler_lock
);
1141 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1143 } else if (u132
->going
> 0) {
1144 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1146 mutex_unlock(&u132
->scheduler_lock
);
1147 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1149 } else if (!urb
->unlinked
) {
1151 struct u132_ring
*ring
= endp
->ring
;
1152 mutex_unlock(&u132
->scheduler_lock
);
1153 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1154 ring
->number
, endp
, urb
, 0, endp
->usb_endp
, 0,
1155 u132_hcd_enumeration_empty_recv
);
1157 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1160 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1161 "unlinked=%d\n", urb
, urb
->unlinked
);
1162 mutex_unlock(&u132
->scheduler_lock
);
1163 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1168 static void u132_hcd_initial_empty_sent(void *data
, struct urb
*urb
, u8
*buf
,
1169 int len
, int toggle_bits
, int error_count
, int condition_code
,
1170 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1172 struct u132_endp
*endp
= data
;
1173 struct u132
*u132
= endp
->u132
;
1174 mutex_lock(&u132
->scheduler_lock
);
1175 if (u132
->going
> 1) {
1176 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1178 mutex_unlock(&u132
->scheduler_lock
);
1179 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1181 } else if (endp
->dequeueing
) {
1182 endp
->dequeueing
= 0;
1183 mutex_unlock(&u132
->scheduler_lock
);
1184 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1186 } else if (u132
->going
> 0) {
1187 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1189 mutex_unlock(&u132
->scheduler_lock
);
1190 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1192 } else if (!urb
->unlinked
) {
1193 mutex_unlock(&u132
->scheduler_lock
);
1194 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1197 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1198 "unlinked=%d\n", urb
, urb
->unlinked
);
1199 mutex_unlock(&u132
->scheduler_lock
);
1200 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1205 static void u132_hcd_initial_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
1206 int len
, int toggle_bits
, int error_count
, int condition_code
,
1207 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1209 struct u132_endp
*endp
= data
;
1210 struct u132
*u132
= endp
->u132
;
1211 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1212 mutex_lock(&u132
->scheduler_lock
);
1213 if (u132
->going
> 1) {
1214 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1216 mutex_unlock(&u132
->scheduler_lock
);
1217 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1219 } else if (endp
->dequeueing
) {
1220 endp
->dequeueing
= 0;
1221 mutex_unlock(&u132
->scheduler_lock
);
1222 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1224 } else if (u132
->going
> 0) {
1225 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1227 mutex_unlock(&u132
->scheduler_lock
);
1228 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1230 } else if (!urb
->unlinked
) {
1232 struct u132_ring
*ring
= endp
->ring
;
1233 u8
*u
= urb
->transfer_buffer
;
1240 urb
->actual_length
= len
;
1241 mutex_unlock(&u132
->scheduler_lock
);
1242 retval
= usb_ftdi_elan_edset_empty(u132
->platform_dev
,
1243 ring
->number
, endp
, urb
, address
, endp
->usb_endp
, 0x3,
1244 u132_hcd_initial_empty_sent
);
1246 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1249 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1250 "unlinked=%d\n", urb
, urb
->unlinked
);
1251 mutex_unlock(&u132
->scheduler_lock
);
1252 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1257 static void u132_hcd_initial_setup_sent(void *data
, struct urb
*urb
, u8
*buf
,
1258 int len
, int toggle_bits
, int error_count
, int condition_code
,
1259 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1261 struct u132_endp
*endp
= data
;
1262 struct u132
*u132
= endp
->u132
;
1263 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1264 mutex_lock(&u132
->scheduler_lock
);
1265 if (u132
->going
> 1) {
1266 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1268 mutex_unlock(&u132
->scheduler_lock
);
1269 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1271 } else if (endp
->dequeueing
) {
1272 endp
->dequeueing
= 0;
1273 mutex_unlock(&u132
->scheduler_lock
);
1274 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1276 } else if (u132
->going
> 0) {
1277 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1279 mutex_unlock(&u132
->scheduler_lock
);
1280 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1282 } else if (!urb
->unlinked
) {
1284 struct u132_ring
*ring
= endp
->ring
;
1285 mutex_unlock(&u132
->scheduler_lock
);
1286 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1287 ring
->number
, endp
, urb
, address
, endp
->usb_endp
, 0,
1288 u132_hcd_initial_input_recv
);
1290 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1293 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1294 "unlinked=%d\n", urb
, urb
->unlinked
);
1295 mutex_unlock(&u132
->scheduler_lock
);
1296 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1302 * this work function is only executed from the work queue
1305 static void u132_hcd_ring_work_scheduler(struct work_struct
*work
)
1307 struct u132_ring
*ring
=
1308 container_of(work
, struct u132_ring
, scheduler
.work
);
1309 struct u132
*u132
= ring
->u132
;
1310 mutex_lock(&u132
->scheduler_lock
);
1312 mutex_unlock(&u132
->scheduler_lock
);
1313 u132_ring_put_kref(u132
, ring
);
1315 } else if (ring
->curr_endp
) {
1316 struct u132_endp
*last_endp
= ring
->curr_endp
;
1317 struct list_head
*scan
;
1318 struct list_head
*head
= &last_endp
->endp_ring
;
1319 unsigned long wakeup
= 0;
1320 list_for_each(scan
, head
) {
1321 struct u132_endp
*endp
= list_entry(scan
,
1322 struct u132_endp
, endp_ring
);
1323 if (endp
->queue_next
== endp
->queue_last
) {
1324 } else if ((endp
->delayed
== 0)
1325 || time_after_eq(jiffies
, endp
->jiffies
)) {
1326 ring
->curr_endp
= endp
;
1327 u132_endp_cancel_work(u132
, last_endp
);
1328 u132_endp_queue_work(u132
, last_endp
, 0);
1329 mutex_unlock(&u132
->scheduler_lock
);
1330 u132_ring_put_kref(u132
, ring
);
1333 unsigned long delta
= endp
->jiffies
- jiffies
;
1338 if (last_endp
->queue_next
== last_endp
->queue_last
) {
1339 } else if ((last_endp
->delayed
== 0) || time_after_eq(jiffies
,
1340 last_endp
->jiffies
)) {
1341 u132_endp_cancel_work(u132
, last_endp
);
1342 u132_endp_queue_work(u132
, last_endp
, 0);
1343 mutex_unlock(&u132
->scheduler_lock
);
1344 u132_ring_put_kref(u132
, ring
);
1347 unsigned long delta
= last_endp
->jiffies
- jiffies
;
1352 u132_ring_requeue_work(u132
, ring
, wakeup
);
1353 mutex_unlock(&u132
->scheduler_lock
);
1356 mutex_unlock(&u132
->scheduler_lock
);
1357 u132_ring_put_kref(u132
, ring
);
1361 mutex_unlock(&u132
->scheduler_lock
);
1362 u132_ring_put_kref(u132
, ring
);
1367 static void u132_hcd_endp_work_scheduler(struct work_struct
*work
)
1369 struct u132_ring
*ring
;
1370 struct u132_endp
*endp
=
1371 container_of(work
, struct u132_endp
, scheduler
.work
);
1372 struct u132
*u132
= endp
->u132
;
1373 mutex_lock(&u132
->scheduler_lock
);
1375 if (endp
->edset_flush
) {
1376 endp
->edset_flush
= 0;
1377 if (endp
->dequeueing
)
1378 usb_ftdi_elan_edset_flush(u132
->platform_dev
,
1379 ring
->number
, endp
);
1380 mutex_unlock(&u132
->scheduler_lock
);
1381 u132_endp_put_kref(u132
, endp
);
1383 } else if (endp
->active
) {
1384 mutex_unlock(&u132
->scheduler_lock
);
1385 u132_endp_put_kref(u132
, endp
);
1387 } else if (ring
->in_use
) {
1388 mutex_unlock(&u132
->scheduler_lock
);
1389 u132_endp_put_kref(u132
, endp
);
1391 } else if (endp
->queue_next
== endp
->queue_last
) {
1392 mutex_unlock(&u132
->scheduler_lock
);
1393 u132_endp_put_kref(u132
, endp
);
1395 } else if (endp
->pipetype
== PIPE_INTERRUPT
) {
1396 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1398 mutex_unlock(&u132
->scheduler_lock
);
1399 u132_endp_put_kref(u132
, endp
);
1403 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1406 ring
->curr_endp
= endp
;
1408 mutex_unlock(&u132
->scheduler_lock
);
1409 retval
= edset_single(u132
, ring
, endp
, urb
, address
,
1410 endp
->toggle_bits
, u132_hcd_interrupt_recv
);
1412 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1415 } else if (endp
->pipetype
== PIPE_CONTROL
) {
1416 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1418 mutex_unlock(&u132
->scheduler_lock
);
1419 u132_endp_put_kref(u132
, endp
);
1421 } else if (address
== 0) {
1423 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1426 ring
->curr_endp
= endp
;
1428 mutex_unlock(&u132
->scheduler_lock
);
1429 retval
= edset_setup(u132
, ring
, endp
, urb
, address
,
1430 0x2, u132_hcd_initial_setup_sent
);
1432 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1434 } else if (endp
->usb_addr
== 0) {
1436 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1439 ring
->curr_endp
= endp
;
1441 mutex_unlock(&u132
->scheduler_lock
);
1442 retval
= edset_setup(u132
, ring
, endp
, urb
, 0, 0x2,
1443 u132_hcd_enumeration_address_sent
);
1445 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1449 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1450 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1453 ring
->curr_endp
= endp
;
1455 mutex_unlock(&u132
->scheduler_lock
);
1456 retval
= edset_setup(u132
, ring
, endp
, urb
, address
,
1457 0x2, u132_hcd_configure_setup_sent
);
1459 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1464 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1466 mutex_unlock(&u132
->scheduler_lock
);
1467 u132_endp_put_kref(u132
, endp
);
1471 struct urb
*urb
= endp
->urb_list
[
1472 ENDP_QUEUE_MASK
& endp
->queue_next
];
1474 ring
->curr_endp
= endp
;
1476 mutex_unlock(&u132
->scheduler_lock
);
1477 retval
= edset_input(u132
, ring
, endp
, urb
,
1478 address
, endp
->toggle_bits
,
1479 u132_hcd_bulk_input_recv
);
1482 u132_hcd_giveback_urb(u132
, endp
, urb
,
1486 } else { /* output pipe */
1487 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1489 mutex_unlock(&u132
->scheduler_lock
);
1490 u132_endp_put_kref(u132
, endp
);
1494 struct urb
*urb
= endp
->urb_list
[
1495 ENDP_QUEUE_MASK
& endp
->queue_next
];
1497 ring
->curr_endp
= endp
;
1499 mutex_unlock(&u132
->scheduler_lock
);
1500 retval
= edset_output(u132
, ring
, endp
, urb
,
1501 address
, endp
->toggle_bits
,
1502 u132_hcd_bulk_output_sent
);
1505 u132_hcd_giveback_urb(u132
, endp
, urb
,
1514 static void port_power(struct u132
*u132
, int pn
, int is_on
)
1516 u132
->port
[pn
].power
= is_on
;
1521 static void u132_power(struct u132
*u132
, int is_on
)
1523 struct usb_hcd
*hcd
= u132_to_hcd(u132
)
1524 ; /* hub is inactive unless the port is powered */
1531 hcd
->state
= HC_STATE_HALT
;
1535 static int u132_periodic_reinit(struct u132
*u132
)
1538 u32 fi
= u132
->hc_fminterval
& 0x03fff;
1541 retval
= u132_read_pcimem(u132
, fminterval
, &fminterval
);
1544 fit
= fminterval
& FIT
;
1545 retval
= u132_write_pcimem(u132
, fminterval
,
1546 (fit
^ FIT
) | u132
->hc_fminterval
);
1549 retval
= u132_write_pcimem(u132
, periodicstart
,
1550 ((9 * fi
) / 10) & 0x3fff);
1556 static char *hcfs2string(int state
)
1559 case OHCI_USB_RESET
:
1561 case OHCI_USB_RESUME
:
1564 return "operational";
1565 case OHCI_USB_SUSPEND
:
1571 static int u132_init(struct u132
*u132
)
1576 u132
->next_statechange
= jiffies
;
1577 retval
= u132_write_pcimem(u132
, intrdisable
, OHCI_INTR_MIE
);
1580 retval
= u132_read_pcimem(u132
, control
, &control
);
1583 if (u132
->num_ports
== 0) {
1585 retval
= u132_read_pcimem(u132
, roothub
.a
, &rh_a
);
1588 u132
->num_ports
= rh_a
& RH_A_NDP
;
1589 retval
= read_roothub_info(u132
);
1593 if (u132
->num_ports
> MAX_U132_PORTS
)
1600 /* Start an OHCI controller, set the BUS operational
1601 * resets USB and controller
1604 static int u132_run(struct u132
*u132
)
1613 int mask
= OHCI_INTR_INIT
;
1614 int first
= u132
->hc_fminterval
== 0;
1616 int reset_timeout
= 30; /* ... allow extra time */
1620 retval
= u132_read_pcimem(u132
, fminterval
, &temp
);
1623 u132
->hc_fminterval
= temp
& 0x3fff;
1624 u132
->hc_fminterval
|= FSMP(u132
->hc_fminterval
) << 16;
1626 retval
= u132_read_pcimem(u132
, control
, &u132
->hc_control
);
1629 dev_info(&u132
->platform_dev
->dev
, "resetting from state '%s', control "
1630 "= %08X\n", hcfs2string(u132
->hc_control
& OHCI_CTRL_HCFS
),
1632 switch (u132
->hc_control
& OHCI_CTRL_HCFS
) {
1636 case OHCI_USB_SUSPEND
:
1637 case OHCI_USB_RESUME
:
1638 u132
->hc_control
&= OHCI_CTRL_RWC
;
1639 u132
->hc_control
|= OHCI_USB_RESUME
;
1643 u132
->hc_control
&= OHCI_CTRL_RWC
;
1644 u132
->hc_control
|= OHCI_USB_RESET
;
1648 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1651 retval
= u132_read_pcimem(u132
, control
, &control
);
1655 retval
= u132_read_pcimem(u132
, roothub
.a
, &roothub_a
);
1658 if (!(roothub_a
& RH_A_NPS
)) {
1659 int temp
; /* power down each port */
1660 for (temp
= 0; temp
< u132
->num_ports
; temp
++) {
1661 retval
= u132_write_pcimem(u132
,
1662 roothub
.portstatus
[temp
], RH_PS_LSDA
);
1667 retval
= u132_read_pcimem(u132
, control
, &control
);
1671 retval
= u132_read_pcimem(u132
, cmdstatus
, &status
);
1674 retval
= u132_write_pcimem(u132
, cmdstatus
, OHCI_HCR
);
1678 retval
= u132_read_pcimem(u132
, cmdstatus
, &status
);
1681 if (0 != (status
& OHCI_HCR
)) {
1682 if (--reset_timeout
== 0) {
1683 dev_err(&u132
->platform_dev
->dev
, "USB HC reset"
1692 if (u132
->flags
& OHCI_QUIRK_INITRESET
) {
1693 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1696 retval
= u132_read_pcimem(u132
, control
, &control
);
1700 retval
= u132_write_pcimem(u132
, ed_controlhead
, 0x00000000);
1703 retval
= u132_write_pcimem(u132
, ed_bulkhead
, 0x11000000);
1706 retval
= u132_write_pcimem(u132
, hcca
, 0x00000000);
1709 retval
= u132_periodic_reinit(u132
);
1712 retval
= u132_read_pcimem(u132
, fminterval
, &fminterval
);
1715 retval
= u132_read_pcimem(u132
, periodicstart
, &periodicstart
);
1718 if (0 == (fminterval
& 0x3fff0000) || 0 == periodicstart
) {
1719 if (!(u132
->flags
& OHCI_QUIRK_INITRESET
)) {
1720 u132
->flags
|= OHCI_QUIRK_INITRESET
;
1723 dev_err(&u132
->platform_dev
->dev
, "init err(%08x %04x)"
1724 "\n", fminterval
, periodicstart
);
1725 } /* start controller operations */
1726 u132
->hc_control
&= OHCI_CTRL_RWC
;
1727 u132
->hc_control
|= OHCI_CONTROL_INIT
| OHCI_CTRL_BLE
| OHCI_USB_OPER
;
1728 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1731 retval
= u132_write_pcimem(u132
, cmdstatus
, OHCI_BLF
);
1734 retval
= u132_read_pcimem(u132
, cmdstatus
, &cmdstatus
);
1737 retval
= u132_read_pcimem(u132
, control
, &control
);
1740 u132_to_hcd(u132
)->state
= HC_STATE_RUNNING
;
1741 retval
= u132_write_pcimem(u132
, roothub
.status
, RH_HS_DRWE
);
1744 retval
= u132_write_pcimem(u132
, intrstatus
, mask
);
1747 retval
= u132_write_pcimem(u132
, intrdisable
,
1748 OHCI_INTR_MIE
| OHCI_INTR_OC
| OHCI_INTR_RHSC
| OHCI_INTR_FNO
|
1749 OHCI_INTR_UE
| OHCI_INTR_RD
| OHCI_INTR_SF
| OHCI_INTR_WDH
|
1752 return retval
; /* handle root hub init quirks ... */
1753 retval
= u132_read_pcimem(u132
, roothub
.a
, &roothub_a
);
1756 roothub_a
&= ~(RH_A_PSM
| RH_A_OCPM
);
1757 if (u132
->flags
& OHCI_QUIRK_SUPERIO
) {
1758 roothub_a
|= RH_A_NOCP
;
1759 roothub_a
&= ~(RH_A_POTPGT
| RH_A_NPS
);
1760 retval
= u132_write_pcimem(u132
, roothub
.a
, roothub_a
);
1763 } else if ((u132
->flags
& OHCI_QUIRK_AMD756
) || distrust_firmware
) {
1764 roothub_a
|= RH_A_NPS
;
1765 retval
= u132_write_pcimem(u132
, roothub
.a
, roothub_a
);
1769 retval
= u132_write_pcimem(u132
, roothub
.status
, RH_HS_LPSC
);
1772 retval
= u132_write_pcimem(u132
, roothub
.b
,
1773 (roothub_a
& RH_A_NPS
) ? 0 : RH_B_PPCM
);
1776 retval
= u132_read_pcimem(u132
, control
, &control
);
1779 mdelay((roothub_a
>> 23) & 0x1fe);
1780 u132_to_hcd(u132
)->state
= HC_STATE_RUNNING
;
1784 static void u132_hcd_stop(struct usb_hcd
*hcd
)
1786 struct u132
*u132
= hcd_to_u132(hcd
);
1787 if (u132
->going
> 1) {
1788 dev_err(&u132
->platform_dev
->dev
, "u132 device %p(hcd=%p) has b"
1789 "een removed %d\n", u132
, hcd
, u132
->going
);
1790 } else if (u132
->going
> 0) {
1791 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p is being remov"
1794 mutex_lock(&u132
->sw_lock
);
1796 u132_power(u132
, 0);
1797 mutex_unlock(&u132
->sw_lock
);
1801 static int u132_hcd_start(struct usb_hcd
*hcd
)
1803 struct u132
*u132
= hcd_to_u132(hcd
);
1804 if (u132
->going
> 1) {
1805 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1808 } else if (u132
->going
> 0) {
1809 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
1811 } else if (hcd
->self
.controller
) {
1813 struct platform_device
*pdev
=
1814 to_platform_device(hcd
->self
.controller
);
1815 u16 vendor
= ((struct u132_platform_data
*)
1816 (pdev
->dev
.platform_data
))->vendor
;
1817 u16 device
= ((struct u132_platform_data
*)
1818 (pdev
->dev
.platform_data
))->device
;
1819 mutex_lock(&u132
->sw_lock
);
1821 if (vendor
== PCI_VENDOR_ID_AMD
&& device
== 0x740c) {
1822 u132
->flags
= OHCI_QUIRK_AMD756
;
1823 } else if (vendor
== PCI_VENDOR_ID_OPTI
&& device
== 0xc861) {
1824 dev_err(&u132
->platform_dev
->dev
, "WARNING: OPTi workar"
1825 "ounds unavailable\n");
1826 } else if (vendor
== PCI_VENDOR_ID_COMPAQ
&& device
== 0xa0f8)
1827 u132
->flags
|= OHCI_QUIRK_ZFMICRO
;
1828 retval
= u132_run(u132
);
1834 mutex_unlock(&u132
->sw_lock
);
1837 dev_err(&u132
->platform_dev
->dev
, "platform_device missing\n");
1842 static int u132_hcd_reset(struct usb_hcd
*hcd
)
1844 struct u132
*u132
= hcd_to_u132(hcd
);
1845 if (u132
->going
> 1) {
1846 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1849 } else if (u132
->going
> 0) {
1850 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
1854 mutex_lock(&u132
->sw_lock
);
1855 retval
= u132_init(u132
);
1860 mutex_unlock(&u132
->sw_lock
);
1865 static int create_endpoint_and_queue_int(struct u132
*u132
,
1866 struct u132_udev
*udev
, struct urb
*urb
,
1867 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
, u8 address
,
1870 struct u132_ring
*ring
;
1874 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
1879 spin_lock_init(&endp
->queue_lock
.slock
);
1880 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
1881 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
1883 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1888 endp_number
= ++u132
->num_endpoints
;
1889 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
1890 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
1891 INIT_LIST_HEAD(&endp
->urb_more
);
1892 ring
= endp
->ring
= &u132
->ring
[0];
1893 if (ring
->curr_endp
) {
1894 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
1896 INIT_LIST_HEAD(&endp
->endp_ring
);
1897 ring
->curr_endp
= endp
;
1900 endp
->dequeueing
= 0;
1901 endp
->edset_flush
= 0;
1904 endp
->endp_number
= endp_number
;
1906 endp
->hep
= urb
->ep
;
1907 endp
->pipetype
= usb_pipetype(urb
->pipe
);
1908 u132_endp_init_kref(u132
, endp
);
1909 if (usb_pipein(urb
->pipe
)) {
1910 endp
->toggle_bits
= 0x2;
1911 usb_settoggle(udev
->usb_device
, usb_endp
, 0, 0);
1914 udev
->endp_number_in
[usb_endp
] = endp_number
;
1915 u132_udev_get_kref(u132
, udev
);
1917 endp
->toggle_bits
= 0x2;
1918 usb_settoggle(udev
->usb_device
, usb_endp
, 1, 0);
1921 udev
->endp_number_out
[usb_endp
] = endp_number
;
1922 u132_udev_get_kref(u132
, udev
);
1926 endp
->jiffies
= jiffies
+ msecs_to_jiffies(urb
->interval
);
1927 endp
->udev_number
= address
;
1928 endp
->usb_addr
= usb_addr
;
1929 endp
->usb_endp
= usb_endp
;
1930 endp
->queue_size
= 1;
1931 endp
->queue_last
= 0;
1932 endp
->queue_next
= 0;
1933 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
1934 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1935 u132_endp_queue_work(u132
, endp
, msecs_to_jiffies(urb
->interval
));
1939 static int queue_int_on_old_endpoint(struct u132
*u132
,
1940 struct u132_udev
*udev
, struct urb
*urb
,
1941 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
1942 u8 usb_endp
, u8 address
)
1946 endp
->jiffies
= jiffies
+ msecs_to_jiffies(urb
->interval
);
1947 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
1948 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
1950 struct u132_urbq
*urbq
= kmalloc(sizeof(struct u132_urbq
),
1953 endp
->queue_size
-= 1;
1956 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
1963 static int create_endpoint_and_queue_bulk(struct u132
*u132
,
1964 struct u132_udev
*udev
, struct urb
*urb
,
1965 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
, u8 address
,
1969 struct u132_ring
*ring
;
1973 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
1978 spin_lock_init(&endp
->queue_lock
.slock
);
1979 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
1980 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
1982 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1987 endp_number
= ++u132
->num_endpoints
;
1988 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
1989 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
1990 INIT_LIST_HEAD(&endp
->urb_more
);
1991 endp
->dequeueing
= 0;
1992 endp
->edset_flush
= 0;
1995 endp
->endp_number
= endp_number
;
1997 endp
->hep
= urb
->ep
;
1998 endp
->pipetype
= usb_pipetype(urb
->pipe
);
1999 u132_endp_init_kref(u132
, endp
);
2000 if (usb_pipein(urb
->pipe
)) {
2001 endp
->toggle_bits
= 0x2;
2002 usb_settoggle(udev
->usb_device
, usb_endp
, 0, 0);
2006 udev
->endp_number_in
[usb_endp
] = endp_number
;
2007 u132_udev_get_kref(u132
, udev
);
2009 endp
->toggle_bits
= 0x2;
2010 usb_settoggle(udev
->usb_device
, usb_endp
, 1, 0);
2014 udev
->endp_number_out
[usb_endp
] = endp_number
;
2015 u132_udev_get_kref(u132
, udev
);
2017 ring
= endp
->ring
= &u132
->ring
[ring_number
- 1];
2018 if (ring
->curr_endp
) {
2019 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
2021 INIT_LIST_HEAD(&endp
->endp_ring
);
2022 ring
->curr_endp
= endp
;
2026 endp
->udev_number
= address
;
2027 endp
->usb_addr
= usb_addr
;
2028 endp
->usb_endp
= usb_endp
;
2029 endp
->queue_size
= 1;
2030 endp
->queue_last
= 0;
2031 endp
->queue_next
= 0;
2032 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2033 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2034 u132_endp_queue_work(u132
, endp
, 0);
2038 static int queue_bulk_on_old_endpoint(struct u132
*u132
, struct u132_udev
*udev
,
2040 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
2041 u8 usb_endp
, u8 address
)
2044 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2045 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2047 struct u132_urbq
*urbq
= kmalloc(sizeof(struct u132_urbq
),
2050 endp
->queue_size
-= 1;
2053 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
2060 static int create_endpoint_and_queue_control(struct u132
*u132
,
2062 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
,
2065 struct u132_ring
*ring
;
2069 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
2074 spin_lock_init(&endp
->queue_lock
.slock
);
2075 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
2076 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
2078 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2083 endp_number
= ++u132
->num_endpoints
;
2084 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
2085 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
2086 INIT_LIST_HEAD(&endp
->urb_more
);
2087 ring
= endp
->ring
= &u132
->ring
[0];
2088 if (ring
->curr_endp
) {
2089 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
2091 INIT_LIST_HEAD(&endp
->endp_ring
);
2092 ring
->curr_endp
= endp
;
2095 endp
->dequeueing
= 0;
2096 endp
->edset_flush
= 0;
2099 endp
->endp_number
= endp_number
;
2101 endp
->hep
= urb
->ep
;
2102 u132_endp_init_kref(u132
, endp
);
2103 u132_endp_get_kref(u132
, endp
);
2104 if (usb_addr
== 0) {
2105 u8 address
= u132
->addr
[usb_addr
].address
;
2106 struct u132_udev
*udev
= &u132
->udev
[address
];
2107 endp
->udev_number
= address
;
2108 endp
->usb_addr
= usb_addr
;
2109 endp
->usb_endp
= usb_endp
;
2112 endp
->pipetype
= usb_pipetype(urb
->pipe
);
2113 u132_udev_init_kref(u132
, udev
);
2114 u132_udev_get_kref(u132
, udev
);
2115 udev
->endp_number_in
[usb_endp
] = endp_number
;
2116 udev
->endp_number_out
[usb_endp
] = endp_number
;
2118 endp
->queue_size
= 1;
2119 endp
->queue_last
= 0;
2120 endp
->queue_next
= 0;
2121 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2122 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2123 u132_endp_queue_work(u132
, endp
, 0);
2125 } else { /*(usb_addr > 0) */
2126 u8 address
= u132
->addr
[usb_addr
].address
;
2127 struct u132_udev
*udev
= &u132
->udev
[address
];
2128 endp
->udev_number
= address
;
2129 endp
->usb_addr
= usb_addr
;
2130 endp
->usb_endp
= usb_endp
;
2133 endp
->pipetype
= usb_pipetype(urb
->pipe
);
2134 u132_udev_get_kref(u132
, udev
);
2135 udev
->enumeration
= 2;
2136 udev
->endp_number_in
[usb_endp
] = endp_number
;
2137 udev
->endp_number_out
[usb_endp
] = endp_number
;
2139 endp
->queue_size
= 1;
2140 endp
->queue_last
= 0;
2141 endp
->queue_next
= 0;
2142 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2143 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2144 u132_endp_queue_work(u132
, endp
, 0);
2149 static int queue_control_on_old_endpoint(struct u132
*u132
,
2151 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
2154 if (usb_addr
== 0) {
2155 if (usb_pipein(urb
->pipe
)) {
2157 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2158 endp
->urb_list
[ENDP_QUEUE_MASK
&
2159 endp
->queue_last
++] = urb
;
2161 struct u132_urbq
*urbq
=
2162 kmalloc(sizeof(struct u132_urbq
),
2165 endp
->queue_size
-= 1;
2168 list_add_tail(&urbq
->urb_more
,
2174 } else { /* usb_pipeout(urb->pipe) */
2175 struct u132_addr
*addr
= &u132
->addr
[usb_dev
->devnum
];
2176 int I
= MAX_U132_UDEVS
;
2179 struct u132_udev
*udev
= &u132
->udev
[++i
];
2180 if (udev
->usb_device
) {
2183 udev
->enumeration
= 1;
2184 u132
->addr
[0].address
= i
;
2185 endp
->udev_number
= i
;
2186 udev
->udev_number
= i
;
2187 udev
->usb_addr
= usb_dev
->devnum
;
2188 u132_udev_init_kref(u132
, udev
);
2189 udev
->endp_number_in
[usb_endp
] =
2191 u132_udev_get_kref(u132
, udev
);
2192 udev
->endp_number_out
[usb_endp
] =
2194 udev
->usb_device
= usb_dev
;
2195 ((u8
*) (urb
->setup_packet
))[2] =
2197 u132_udev_get_kref(u132
, udev
);
2202 dev_err(&u132
->platform_dev
->dev
, "run out of d"
2207 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2208 endp
->urb_list
[ENDP_QUEUE_MASK
&
2209 endp
->queue_last
++] = urb
;
2211 struct u132_urbq
*urbq
=
2212 kmalloc(sizeof(struct u132_urbq
),
2215 endp
->queue_size
-= 1;
2218 list_add_tail(&urbq
->urb_more
,
2225 } else { /*(usb_addr > 0) */
2226 u8 address
= u132
->addr
[usb_addr
].address
;
2227 struct u132_udev
*udev
= &u132
->udev
[address
];
2229 if (udev
->enumeration
!= 2)
2230 udev
->enumeration
= 2;
2231 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2232 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
2235 struct u132_urbq
*urbq
=
2236 kmalloc(sizeof(struct u132_urbq
), GFP_ATOMIC
);
2238 endp
->queue_size
-= 1;
2241 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
2249 static int u132_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
,
2252 struct u132
*u132
= hcd_to_u132(hcd
);
2253 if (irqs_disabled()) {
2254 if (__GFP_WAIT
& mem_flags
) {
2255 printk(KERN_ERR
"invalid context for function that migh"
2260 if (u132
->going
> 1) {
2261 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2264 } else if (u132
->going
> 0) {
2265 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
2269 u8 usb_addr
= usb_pipedevice(urb
->pipe
);
2270 u8 usb_endp
= usb_pipeendpoint(urb
->pipe
);
2271 struct usb_device
*usb_dev
= urb
->dev
;
2272 if (usb_pipetype(urb
->pipe
) == PIPE_INTERRUPT
) {
2273 u8 address
= u132
->addr
[usb_addr
].address
;
2274 struct u132_udev
*udev
= &u132
->udev
[address
];
2275 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2276 urb
->actual_length
= 0;
2280 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2282 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2284 retval
= queue_int_on_old_endpoint(
2290 usb_hcd_unlink_urb_from_ep(
2293 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2298 u132_endp_queue_work(u132
, endp
,
2299 msecs_to_jiffies(urb
->interval
))
2303 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2305 } else { /*(endp == NULL) */
2306 return create_endpoint_and_queue_int(u132
, udev
,
2307 urb
, usb_dev
, usb_addr
,
2308 usb_endp
, address
, mem_flags
);
2310 } else if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
2311 dev_err(&u132
->platform_dev
->dev
, "the hardware does no"
2312 "t support PIPE_ISOCHRONOUS\n");
2314 } else if (usb_pipetype(urb
->pipe
) == PIPE_BULK
) {
2315 u8 address
= u132
->addr
[usb_addr
].address
;
2316 struct u132_udev
*udev
= &u132
->udev
[address
];
2317 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2318 urb
->actual_length
= 0;
2322 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2324 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2326 retval
= queue_bulk_on_old_endpoint(
2332 usb_hcd_unlink_urb_from_ep(
2335 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2340 u132_endp_queue_work(u132
, endp
, 0);
2343 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2346 return create_endpoint_and_queue_bulk(u132
,
2347 udev
, urb
, usb_dev
, usb_addr
,
2348 usb_endp
, address
, mem_flags
);
2350 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2352 u8
*b
= urb
->setup_packet
;
2354 char data
[30 * 3 + 4];
2356 int m
= (sizeof(data
) - 1) / 3;
2359 while (urb_size
-- > 0) {
2361 } else if (i
++ < m
) {
2362 int w
= sprintf(d
, " %02X", *b
++);
2366 d
+= sprintf(d
, " ..");
2371 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2373 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2375 retval
= queue_control_on_old_endpoint(
2380 usb_hcd_unlink_urb_from_ep(
2383 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2388 u132_endp_queue_work(u132
, endp
, 0);
2391 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2394 return create_endpoint_and_queue_control(u132
,
2395 urb
, usb_dev
, usb_addr
, usb_endp
,
2401 static int dequeue_from_overflow_chain(struct u132
*u132
,
2402 struct u132_endp
*endp
, struct urb
*urb
)
2404 struct list_head
*scan
;
2405 struct list_head
*head
= &endp
->urb_more
;
2406 list_for_each(scan
, head
) {
2407 struct u132_urbq
*urbq
= list_entry(scan
, struct u132_urbq
,
2409 if (urbq
->urb
== urb
) {
2410 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
2412 endp
->queue_size
-= 1;
2413 urb
->error_count
= 0;
2414 usb_hcd_giveback_urb(hcd
, urb
, 0);
2419 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in endp[%d]=%p ring"
2420 "[%d] %c%c usb_endp=%d usb_addr=%d size=%d next=%04X last=%04X"
2421 "\n", urb
, endp
->endp_number
, endp
, endp
->ring
->number
,
2422 endp
->input
? 'I' : ' ', endp
->output
? 'O' : ' ',
2423 endp
->usb_endp
, endp
->usb_addr
, endp
->queue_size
,
2424 endp
->queue_next
, endp
->queue_last
);
2428 static int u132_endp_urb_dequeue(struct u132
*u132
, struct u132_endp
*endp
,
2429 struct urb
*urb
, int status
)
2434 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
2435 rc
= usb_hcd_check_unlink_urb(u132_to_hcd(u132
), urb
, status
);
2437 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2440 if (endp
->queue_size
== 0) {
2441 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in endp[%d]"
2442 "=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb
,
2443 endp
->endp_number
, endp
, endp
->ring
->number
,
2444 endp
->input
? 'I' : ' ', endp
->output
? 'O' : ' ',
2445 endp
->usb_endp
, endp
->usb_addr
);
2446 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2449 if (urb
== endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_next
]) {
2451 endp
->dequeueing
= 1;
2452 endp
->edset_flush
= 1;
2453 u132_endp_queue_work(u132
, endp
, 0);
2454 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2457 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2458 u132_hcd_abandon_urb(u132
, endp
, urb
, status
);
2463 u16 queue_size
= endp
->queue_size
;
2464 u16 queue_scan
= endp
->queue_next
;
2465 struct urb
**urb_slot
= NULL
;
2466 while (++queue_list
< ENDP_QUEUE_SIZE
&& --queue_size
> 0) {
2467 if (urb
== endp
->urb_list
[ENDP_QUEUE_MASK
&
2469 urb_slot
= &endp
->urb_list
[ENDP_QUEUE_MASK
&
2475 while (++queue_list
< ENDP_QUEUE_SIZE
&& --queue_size
> 0) {
2476 *urb_slot
= endp
->urb_list
[ENDP_QUEUE_MASK
&
2478 urb_slot
= &endp
->urb_list
[ENDP_QUEUE_MASK
&
2482 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
2484 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
2485 endp
->queue_size
-= 1;
2486 if (list_empty(&endp
->urb_more
)) {
2487 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2490 struct list_head
*next
= endp
->urb_more
.next
;
2491 struct u132_urbq
*urbq
= list_entry(next
,
2492 struct u132_urbq
, urb_more
);
2494 *urb_slot
= urbq
->urb
;
2495 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2498 } urb
->error_count
= 0;
2499 usb_hcd_giveback_urb(hcd
, urb
, status
);
2501 } else if (list_empty(&endp
->urb_more
)) {
2502 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in "
2503 "endp[%d]=%p ring[%d] %c%c usb_endp=%d usb_addr"
2504 "=%d size=%d next=%04X last=%04X\n", urb
,
2505 endp
->endp_number
, endp
, endp
->ring
->number
,
2506 endp
->input
? 'I' : ' ',
2507 endp
->output
? 'O' : ' ', endp
->usb_endp
,
2508 endp
->usb_addr
, endp
->queue_size
,
2509 endp
->queue_next
, endp
->queue_last
);
2510 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2515 usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132
), urb
);
2516 retval
= dequeue_from_overflow_chain(u132
, endp
,
2518 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2524 static int u132_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
2526 struct u132
*u132
= hcd_to_u132(hcd
);
2527 if (u132
->going
> 2) {
2528 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2532 u8 usb_addr
= usb_pipedevice(urb
->pipe
);
2533 u8 usb_endp
= usb_pipeendpoint(urb
->pipe
);
2534 u8 address
= u132
->addr
[usb_addr
].address
;
2535 struct u132_udev
*udev
= &u132
->udev
[address
];
2536 if (usb_pipein(urb
->pipe
)) {
2537 u8 endp_number
= udev
->endp_number_in
[usb_endp
];
2538 struct u132_endp
*endp
= u132
->endp
[endp_number
- 1];
2539 return u132_endp_urb_dequeue(u132
, endp
, urb
, status
);
2541 u8 endp_number
= udev
->endp_number_out
[usb_endp
];
2542 struct u132_endp
*endp
= u132
->endp
[endp_number
- 1];
2543 return u132_endp_urb_dequeue(u132
, endp
, urb
, status
);
2548 static void u132_endpoint_disable(struct usb_hcd
*hcd
,
2549 struct usb_host_endpoint
*hep
)
2551 struct u132
*u132
= hcd_to_u132(hcd
);
2552 if (u132
->going
> 2) {
2553 dev_err(&u132
->platform_dev
->dev
, "u132 device %p(hcd=%p hep=%p"
2554 ") has been removed %d\n", u132
, hcd
, hep
,
2557 struct u132_endp
*endp
= hep
->hcpriv
;
2559 u132_endp_put_kref(u132
, endp
);
2563 static int u132_get_frame(struct usb_hcd
*hcd
)
2565 struct u132
*u132
= hcd_to_u132(hcd
);
2566 if (u132
->going
> 1) {
2567 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2570 } else if (u132
->going
> 0) {
2571 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2575 dev_err(&u132
->platform_dev
->dev
, "TODO: u132_get_frame\n");
2581 static int u132_roothub_descriptor(struct u132
*u132
,
2582 struct usb_hub_descriptor
*desc
)
2588 retval
= u132_read_pcimem(u132
, roothub
.a
, &rh_a
);
2591 desc
->bDescriptorType
= 0x29;
2592 desc
->bPwrOn2PwrGood
= (rh_a
& RH_A_POTPGT
) >> 24;
2593 desc
->bHubContrCurrent
= 0;
2594 desc
->bNbrPorts
= u132
->num_ports
;
2595 temp
= 1 + (u132
->num_ports
/ 8);
2596 desc
->bDescLength
= 7 + 2 * temp
;
2598 if (rh_a
& RH_A_NPS
)
2600 if (rh_a
& RH_A_PSM
)
2602 if (rh_a
& RH_A_NOCP
)
2604 else if (rh_a
& RH_A_OCPM
)
2606 desc
->wHubCharacteristics
= cpu_to_le16(temp
);
2607 retval
= u132_read_pcimem(u132
, roothub
.b
, &rh_b
);
2610 memset(desc
->bitmap
, 0xff, sizeof(desc
->bitmap
));
2611 desc
->bitmap
[0] = rh_b
& RH_B_DR
;
2612 if (u132
->num_ports
> 7) {
2613 desc
->bitmap
[1] = (rh_b
& RH_B_DR
) >> 8;
2614 desc
->bitmap
[2] = 0xff;
2616 desc
->bitmap
[1] = 0xff;
2620 static int u132_roothub_status(struct u132
*u132
, __le32
*desc
)
2623 int ret_status
= u132_read_pcimem(u132
, roothub
.status
, &rh_status
);
2624 *desc
= cpu_to_le32(rh_status
);
2628 static int u132_roothub_portstatus(struct u132
*u132
, __le32
*desc
, u16 wIndex
)
2630 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2633 int port
= wIndex
- 1;
2634 u32 rh_portstatus
= -1;
2635 int ret_portstatus
= u132_read_pcimem(u132
,
2636 roothub
.portstatus
[port
], &rh_portstatus
);
2637 *desc
= cpu_to_le32(rh_portstatus
);
2638 if (*(u16
*) (desc
+ 2)) {
2639 dev_info(&u132
->platform_dev
->dev
, "Port %d Status Chan"
2640 "ge = %08X\n", port
, *desc
);
2642 return ret_portstatus
;
2647 /* this timer value might be vendor-specific ... */
2648 #define PORT_RESET_HW_MSEC 10
2649 #define PORT_RESET_MSEC 10
2650 /* wrap-aware logic morphed from <linux/jiffies.h> */
2651 #define tick_before(t1, t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0)
2652 static int u132_roothub_portreset(struct u132
*u132
, int port_index
)
2658 retval
= u132_read_pcimem(u132
, fmnumber
, &fmnumber
);
2662 reset_done
= now
+ PORT_RESET_MSEC
;
2666 retval
= u132_read_pcimem(u132
,
2667 roothub
.portstatus
[port_index
], &portstat
);
2670 if (RH_PS_PRS
& portstat
)
2674 } while (tick_before(now
, reset_done
));
2675 if (RH_PS_PRS
& portstat
)
2677 if (RH_PS_CCS
& portstat
) {
2678 if (RH_PS_PRSC
& portstat
) {
2679 retval
= u132_write_pcimem(u132
,
2680 roothub
.portstatus
[port_index
],
2686 break; /* start the next reset,
2687 sleep till it's probably done */
2688 retval
= u132_write_pcimem(u132
, roothub
.portstatus
[port_index
],
2692 msleep(PORT_RESET_HW_MSEC
);
2693 retval
= u132_read_pcimem(u132
, fmnumber
, &fmnumber
);
2697 } while (tick_before(now
, reset_done
));
2701 static int u132_roothub_setportfeature(struct u132
*u132
, u16 wValue
,
2704 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2708 int port_index
= wIndex
- 1;
2709 struct u132_port
*port
= &u132
->port
[port_index
];
2710 port
->Status
&= ~(1 << wValue
);
2712 case USB_PORT_FEAT_SUSPEND
:
2713 retval
= u132_write_pcimem(u132
,
2714 roothub
.portstatus
[port_index
], RH_PS_PSS
);
2718 case USB_PORT_FEAT_POWER
:
2719 retval
= u132_write_pcimem(u132
,
2720 roothub
.portstatus
[port_index
], RH_PS_PPS
);
2724 case USB_PORT_FEAT_RESET
:
2725 retval
= u132_roothub_portreset(u132
, port_index
);
2735 static int u132_roothub_clearportfeature(struct u132
*u132
, u16 wValue
,
2738 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2741 int port_index
= wIndex
- 1;
2744 struct u132_port
*port
= &u132
->port
[port_index
];
2745 port
->Status
&= ~(1 << wValue
);
2747 case USB_PORT_FEAT_ENABLE
:
2750 case USB_PORT_FEAT_C_ENABLE
:
2753 case USB_PORT_FEAT_SUSPEND
:
2755 if ((u132
->hc_control
& OHCI_CTRL_HCFS
)
2757 dev_err(&u132
->platform_dev
->dev
, "TODO resume_"
2761 case USB_PORT_FEAT_C_SUSPEND
:
2764 case USB_PORT_FEAT_POWER
:
2767 case USB_PORT_FEAT_C_CONNECTION
:
2770 case USB_PORT_FEAT_C_OVER_CURRENT
:
2773 case USB_PORT_FEAT_C_RESET
:
2779 retval
= u132_write_pcimem(u132
, roothub
.portstatus
[port_index
],
2788 /* the virtual root hub timer IRQ checks for hub status*/
2789 static int u132_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
2791 struct u132
*u132
= hcd_to_u132(hcd
);
2792 if (u132
->going
> 1) {
2793 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p has been remov"
2794 "ed %d\n", hcd
, u132
->going
);
2796 } else if (u132
->going
> 0) {
2797 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p is being remov"
2801 int i
, changed
= 0, length
= 1;
2802 if (u132
->flags
& OHCI_QUIRK_AMD756
) {
2803 if ((u132
->hc_roothub_a
& RH_A_NDP
) > MAX_ROOT_PORTS
) {
2804 dev_err(&u132
->platform_dev
->dev
, "bogus NDP, r"
2805 "ereads as NDP=%d\n",
2806 u132
->hc_roothub_a
& RH_A_NDP
);
2810 if (u132
->hc_roothub_status
& (RH_HS_LPSC
| RH_HS_OCIC
))
2811 buf
[0] = changed
= 1;
2814 if (u132
->num_ports
> 7) {
2818 for (i
= 0; i
< u132
->num_ports
; i
++) {
2819 if (u132
->hc_roothub_portstatus
[i
] & (RH_PS_CSC
|
2820 RH_PS_PESC
| RH_PS_PSSC
| RH_PS_OCIC
|
2824 buf
[0] |= 1 << (i
+ 1);
2826 buf
[1] |= 1 << (i
- 7);
2829 if (!(u132
->hc_roothub_portstatus
[i
] & RH_PS_CCS
))
2832 if ((u132
->hc_roothub_portstatus
[i
] & RH_PS_PSS
))
2836 return changed
? length
: 0;
2840 static int u132_hub_control(struct usb_hcd
*hcd
, u16 typeReq
, u16 wValue
,
2841 u16 wIndex
, char *buf
, u16 wLength
)
2843 struct u132
*u132
= hcd_to_u132(hcd
);
2844 if (u132
->going
> 1) {
2845 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2848 } else if (u132
->going
> 0) {
2849 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2853 mutex_lock(&u132
->sw_lock
);
2855 case ClearHubFeature
:
2857 case C_HUB_OVER_CURRENT
:
2858 case C_HUB_LOCAL_POWER
:
2866 case C_HUB_OVER_CURRENT
:
2867 case C_HUB_LOCAL_POWER
:
2873 case ClearPortFeature
:{
2874 retval
= u132_roothub_clearportfeature(u132
,
2880 case GetHubDescriptor
:{
2881 retval
= u132_roothub_descriptor(u132
,
2882 (struct usb_hub_descriptor
*)buf
);
2888 retval
= u132_roothub_status(u132
,
2894 case GetPortStatus
:{
2895 retval
= u132_roothub_portstatus(u132
,
2896 (__le32
*) buf
, wIndex
);
2901 case SetPortFeature
:{
2902 retval
= u132_roothub_setportfeature(u132
,
2918 mutex_unlock(&u132
->sw_lock
);
2923 static int u132_start_port_reset(struct usb_hcd
*hcd
, unsigned port_num
)
2925 struct u132
*u132
= hcd_to_u132(hcd
);
2926 if (u132
->going
> 1) {
2927 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2930 } else if (u132
->going
> 0) {
2931 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2939 static int u132_bus_suspend(struct usb_hcd
*hcd
)
2941 struct u132
*u132
= hcd_to_u132(hcd
);
2942 if (u132
->going
> 1) {
2943 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2946 } else if (u132
->going
> 0) {
2947 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2953 static int u132_bus_resume(struct usb_hcd
*hcd
)
2955 struct u132
*u132
= hcd_to_u132(hcd
);
2956 if (u132
->going
> 1) {
2957 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2960 } else if (u132
->going
> 0) {
2961 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2968 #define u132_bus_suspend NULL
2969 #define u132_bus_resume NULL
2971 static struct hc_driver u132_hc_driver
= {
2972 .description
= hcd_name
,
2973 .hcd_priv_size
= sizeof(struct u132
),
2975 .flags
= HCD_USB11
| HCD_MEMORY
,
2976 .reset
= u132_hcd_reset
,
2977 .start
= u132_hcd_start
,
2978 .stop
= u132_hcd_stop
,
2979 .urb_enqueue
= u132_urb_enqueue
,
2980 .urb_dequeue
= u132_urb_dequeue
,
2981 .endpoint_disable
= u132_endpoint_disable
,
2982 .get_frame_number
= u132_get_frame
,
2983 .hub_status_data
= u132_hub_status_data
,
2984 .hub_control
= u132_hub_control
,
2985 .bus_suspend
= u132_bus_suspend
,
2986 .bus_resume
= u132_bus_resume
,
2987 .start_port_reset
= u132_start_port_reset
,
2991 * This function may be called by the USB core whilst the "usb_all_devices_rwsem"
2992 * is held for writing, thus this module must not call usb_remove_hcd()
2993 * synchronously - but instead should immediately stop activity to the
2994 * device and asynchronously call usb_remove_hcd()
2996 static int __devexit
u132_remove(struct platform_device
*pdev
)
2998 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
3000 struct u132
*u132
= hcd_to_u132(hcd
);
3001 if (u132
->going
++ > 1) {
3002 dev_err(&u132
->platform_dev
->dev
, "already being remove"
3006 int rings
= MAX_U132_RINGS
;
3007 int endps
= MAX_U132_ENDPS
;
3008 dev_err(&u132
->platform_dev
->dev
, "removing device u132"
3009 ".%d\n", u132
->sequence_num
);
3011 mutex_lock(&u132
->sw_lock
);
3012 u132_monitor_cancel_work(u132
);
3013 while (rings
-- > 0) {
3014 struct u132_ring
*ring
= &u132
->ring
[rings
];
3015 u132_ring_cancel_work(u132
, ring
);
3016 } while (endps
-- > 0) {
3017 struct u132_endp
*endp
= u132
->endp
[endps
];
3019 u132_endp_cancel_work(u132
, endp
);
3022 printk(KERN_INFO
"removing device u132.%d\n",
3023 u132
->sequence_num
);
3024 mutex_unlock(&u132
->sw_lock
);
3025 usb_remove_hcd(hcd
);
3026 u132_u132_put_kref(u132
);
3033 static void u132_initialise(struct u132
*u132
, struct platform_device
*pdev
)
3035 int rings
= MAX_U132_RINGS
;
3036 int ports
= MAX_U132_PORTS
;
3037 int addrs
= MAX_U132_ADDRS
;
3038 int udevs
= MAX_U132_UDEVS
;
3039 int endps
= MAX_U132_ENDPS
;
3040 u132
->board
= pdev
->dev
.platform_data
;
3041 u132
->platform_dev
= pdev
;
3044 mutex_init(&u132
->sw_lock
);
3045 mutex_init(&u132
->scheduler_lock
);
3046 while (rings
-- > 0) {
3047 struct u132_ring
*ring
= &u132
->ring
[rings
];
3049 ring
->number
= rings
+ 1;
3051 ring
->curr_endp
= NULL
;
3052 INIT_DELAYED_WORK(&ring
->scheduler
,
3053 u132_hcd_ring_work_scheduler
);
3055 mutex_lock(&u132
->sw_lock
);
3056 INIT_DELAYED_WORK(&u132
->monitor
, u132_hcd_monitor_work
);
3057 while (ports
-- > 0) {
3058 struct u132_port
*port
= &u132
->port
[ports
];
3065 while (addrs
-- > 0) {
3066 struct u132_addr
*addr
= &u132
->addr
[addrs
];
3069 while (udevs
-- > 0) {
3070 struct u132_udev
*udev
= &u132
->udev
[udevs
];
3071 int i
= ARRAY_SIZE(udev
->endp_number_in
);
3072 int o
= ARRAY_SIZE(udev
->endp_number_out
);
3073 udev
->usb_device
= NULL
;
3074 udev
->udev_number
= 0;
3076 udev
->portnumber
= 0;
3078 udev
->endp_number_in
[i
] = 0;
3081 udev
->endp_number_out
[o
] = 0;
3085 u132
->endp
[endps
] = NULL
;
3087 mutex_unlock(&u132
->sw_lock
);
3091 static int __devinit
u132_probe(struct platform_device
*pdev
)
3093 struct usb_hcd
*hcd
;
3100 if (u132_exiting
> 0)
3103 retval
= ftdi_write_pcimem(pdev
, intrdisable
, OHCI_INTR_MIE
);
3106 retval
= ftdi_read_pcimem(pdev
, control
, &control
);
3109 retval
= ftdi_read_pcimem(pdev
, roothub
.a
, &rh_a
);
3112 num_ports
= rh_a
& RH_A_NDP
; /* refuse to confuse usbcore */
3113 if (pdev
->dev
.dma_mask
)
3116 hcd
= usb_create_hcd(&u132_hc_driver
, &pdev
->dev
, dev_name(&pdev
->dev
));
3118 printk(KERN_ERR
"failed to create the usb hcd struct for U132\n"
3120 ftdi_elan_gone_away(pdev
);
3124 struct u132
*u132
= hcd_to_u132(hcd
);
3125 hcd
->rsrc_start
= 0;
3126 mutex_lock(&u132_module_lock
);
3127 list_add_tail(&u132
->u132_list
, &u132_static_list
);
3128 u132
->sequence_num
= ++u132_instances
;
3129 mutex_unlock(&u132_module_lock
);
3130 u132_u132_init_kref(u132
);
3131 u132_initialise(u132
, pdev
);
3132 hcd
->product_desc
= "ELAN U132 Host Controller";
3133 retval
= usb_add_hcd(hcd
, 0, 0);
3135 dev_err(&u132
->platform_dev
->dev
, "init error %d\n",
3137 u132_u132_put_kref(u132
);
3140 u132_monitor_queue_work(u132
, 100);
3148 /* for this device there's no useful distinction between the controller
3149 * and its root hub, except that the root hub only gets direct PM calls
3150 * when CONFIG_USB_SUSPEND is enabled.
3152 static int u132_suspend(struct platform_device
*pdev
, pm_message_t state
)
3154 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
3155 struct u132
*u132
= hcd_to_u132(hcd
);
3156 if (u132
->going
> 1) {
3157 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
3160 } else if (u132
->going
> 0) {
3161 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
3164 int retval
= 0, ports
;
3166 switch (state
.event
) {
3167 case PM_EVENT_FREEZE
:
3168 retval
= u132_bus_suspend(hcd
);
3170 case PM_EVENT_SUSPEND
:
3171 case PM_EVENT_HIBERNATE
:
3172 ports
= MAX_U132_PORTS
;
3173 while (ports
-- > 0) {
3174 port_power(u132
, ports
, 0);
3182 static int u132_resume(struct platform_device
*pdev
)
3184 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
3185 struct u132
*u132
= hcd_to_u132(hcd
);
3186 if (u132
->going
> 1) {
3187 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
3190 } else if (u132
->going
> 0) {
3191 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
3195 if (!u132
->port
[0].power
) {
3196 int ports
= MAX_U132_PORTS
;
3197 while (ports
-- > 0) {
3198 port_power(u132
, ports
, 1);
3202 retval
= u132_bus_resume(hcd
);
3209 #define u132_suspend NULL
3210 #define u132_resume NULL
3213 * this driver is loaded explicitly by ftdi_u132
3215 * the platform_driver struct is static because it is per type of module
3217 static struct platform_driver u132_platform_driver
= {
3218 .probe
= u132_probe
,
3219 .remove
= __devexit_p(u132_remove
),
3220 .suspend
= u132_suspend
,
3221 .resume
= u132_resume
,
3223 .name
= (char *)hcd_name
,
3224 .owner
= THIS_MODULE
,
3227 static int __init
u132_hcd_init(void)
3230 INIT_LIST_HEAD(&u132_static_list
);
3233 mutex_init(&u132_module_lock
);
3236 printk(KERN_INFO
"driver %s built at %s on %s\n", hcd_name
, __TIME__
,
3238 workqueue
= create_singlethread_workqueue("u132");
3239 retval
= platform_driver_register(&u132_platform_driver
);
3244 module_init(u132_hcd_init
);
3245 static void __exit
u132_hcd_exit(void)
3249 mutex_lock(&u132_module_lock
);
3251 mutex_unlock(&u132_module_lock
);
3252 list_for_each_entry_safe(u132
, temp
, &u132_static_list
, u132_list
) {
3253 platform_device_unregister(u132
->platform_dev
);
3255 platform_driver_unregister(&u132_platform_driver
);
3256 printk(KERN_INFO
"u132-hcd driver deregistered\n");
3257 wait_event(u132_hcd_wait
, u132_instances
== 0);
3258 flush_workqueue(workqueue
);
3259 destroy_workqueue(workqueue
);
3263 module_exit(u132_hcd_exit
);
3264 MODULE_LICENSE("GPL");
3265 MODULE_ALIAS("platform:u132_hcd");