[ARM] pxa: update defconfig for Verdex Pro
[linux-2.6/verdex.git] / drivers / usb / host / u132-hcd.c
blob228f2b070f2b7d558e33271827cccb3588794ebd
1 /*
2 * Host Controller Driver for the Elan Digital Systems U132 adapter
4 * Copyright(C) 2006 Elan Digital Systems Limited
5 * http://www.elandigitalsystems.com
7 * Author and Maintainer - Tony Olech - Elan Digital Systems
8 * tony.olech@elandigitalsystems.com
10 * This program is free software;you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2.
15 * This driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
16 * based on various USB host drivers in the 2.6.15 linux kernel
17 * with constant reference to the 3rd Edition of Linux Device Drivers
18 * published by O'Reilly
20 * The U132 adapter is a USB to CardBus adapter specifically designed
21 * for PC cards that contain an OHCI host controller. Typical PC cards
22 * are the Orange Mobile 3G Option GlobeTrotter Fusion card.
24 * The U132 adapter will *NOT *work with PC cards that do not contain
25 * an OHCI controller. A simple way to test whether a PC card has an
26 * OHCI controller as an interface is to insert the PC card directly
27 * into a laptop(or desktop) with a CardBus slot and if "lspci" shows
28 * a new USB controller and "lsusb -v" shows a new OHCI Host Controller
29 * then there is a good chance that the U132 adapter will support the
30 * PC card.(you also need the specific client driver for the PC card)
32 * Please inform the Author and Maintainer about any PC cards that
33 * contain OHCI Host Controller and work when directly connected to
34 * an embedded CardBus slot but do not work when they are connected
35 * via an ELAN U132 adapter.
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/moduleparam.h>
41 #include <linux/delay.h>
42 #include <linux/ioport.h>
43 #include <linux/pci_ids.h>
44 #include <linux/sched.h>
45 #include <linux/slab.h>
46 #include <linux/errno.h>
47 #include <linux/init.h>
48 #include <linux/timer.h>
49 #include <linux/list.h>
50 #include <linux/interrupt.h>
51 #include <linux/usb.h>
52 #include <linux/workqueue.h>
53 #include <linux/platform_device.h>
54 #include <linux/mutex.h>
55 #include <asm/io.h>
56 #include <asm/irq.h>
57 #include <asm/system.h>
58 #include <asm/byteorder.h>
59 #include "../core/hcd.h"
61 /* FIXME ohci.h is ONLY for internal use by the OHCI driver.
62 * If you're going to try stuff like this, you need to split
63 * out shareable stuff (register declarations?) into its own
64 * file, maybe name <linux/usb/ohci.h>
67 #include "ohci.h"
68 #define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
69 #define OHCI_INTR_INIT (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | \
70 OHCI_INTR_WDH)
71 MODULE_AUTHOR("Tony Olech - Elan Digital Systems Limited");
72 MODULE_DESCRIPTION("U132 USB Host Controller Driver");
73 MODULE_LICENSE("GPL");
74 #define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
75 INT_MODULE_PARM(testing, 0);
76 /* Some boards misreport power switching/overcurrent*/
77 static int distrust_firmware = 1;
78 module_param(distrust_firmware, bool, 0);
79 MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
80 "t setup");
81 static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
83 * u132_module_lock exists to protect access to global variables
86 static struct mutex u132_module_lock;
87 static int u132_exiting;
88 static int u132_instances;
89 static struct list_head u132_static_list;
91 * end of the global variables protected by u132_module_lock
93 static struct workqueue_struct *workqueue;
94 #define MAX_U132_PORTS 7
95 #define MAX_U132_ADDRS 128
96 #define MAX_U132_UDEVS 4
97 #define MAX_U132_ENDPS 100
98 #define MAX_U132_RINGS 4
99 static const char *cc_to_text[16] = {
100 "No Error ",
101 "CRC Error ",
102 "Bit Stuff ",
103 "Data Togg ",
104 "Stall ",
105 "DevNotResp ",
106 "PIDCheck ",
107 "UnExpPID ",
108 "DataOver ",
109 "DataUnder ",
110 "(for hw) ",
111 "(for hw) ",
112 "BufferOver ",
113 "BuffUnder ",
114 "(for HCD) ",
115 "(for HCD) "
117 struct u132_port {
118 struct u132 *u132;
119 int reset;
120 int enable;
121 int power;
122 int Status;
124 struct u132_addr {
125 u8 address;
127 struct u132_udev {
128 struct kref kref;
129 struct usb_device *usb_device;
130 u8 enumeration;
131 u8 udev_number;
132 u8 usb_addr;
133 u8 portnumber;
134 u8 endp_number_in[16];
135 u8 endp_number_out[16];
137 #define ENDP_QUEUE_SHIFT 3
138 #define ENDP_QUEUE_SIZE (1<<ENDP_QUEUE_SHIFT)
139 #define ENDP_QUEUE_MASK (ENDP_QUEUE_SIZE-1)
140 struct u132_urbq {
141 struct list_head urb_more;
142 struct urb *urb;
144 struct u132_spin {
145 spinlock_t slock;
147 struct u132_endp {
148 struct kref kref;
149 u8 udev_number;
150 u8 endp_number;
151 u8 usb_addr;
152 u8 usb_endp;
153 struct u132 *u132;
154 struct list_head endp_ring;
155 struct u132_ring *ring;
156 unsigned toggle_bits:2;
157 unsigned active:1;
158 unsigned delayed:1;
159 unsigned input:1;
160 unsigned output:1;
161 unsigned pipetype:2;
162 unsigned dequeueing:1;
163 unsigned edset_flush:1;
164 unsigned spare_bits:14;
165 unsigned long jiffies;
166 struct usb_host_endpoint *hep;
167 struct u132_spin queue_lock;
168 u16 queue_size;
169 u16 queue_last;
170 u16 queue_next;
171 struct urb *urb_list[ENDP_QUEUE_SIZE];
172 struct list_head urb_more;
173 struct delayed_work scheduler;
175 struct u132_ring {
176 unsigned in_use:1;
177 unsigned length:7;
178 u8 number;
179 struct u132 *u132;
180 struct u132_endp *curr_endp;
181 struct delayed_work scheduler;
183 struct u132 {
184 struct kref kref;
185 struct list_head u132_list;
186 struct mutex sw_lock;
187 struct mutex scheduler_lock;
188 struct u132_platform_data *board;
189 struct platform_device *platform_dev;
190 struct u132_ring ring[MAX_U132_RINGS];
191 int sequence_num;
192 int going;
193 int power;
194 int reset;
195 int num_ports;
196 u32 hc_control;
197 u32 hc_fminterval;
198 u32 hc_roothub_status;
199 u32 hc_roothub_a;
200 u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
201 int flags;
202 unsigned long next_statechange;
203 struct delayed_work monitor;
204 int num_endpoints;
205 struct u132_addr addr[MAX_U132_ADDRS];
206 struct u132_udev udev[MAX_U132_UDEVS];
207 struct u132_port port[MAX_U132_PORTS];
208 struct u132_endp *endp[MAX_U132_ENDPS];
212 * these cannot be inlines because we need the structure offset!!
213 * Does anyone have a better way?????
215 #define ftdi_read_pcimem(pdev, member, data) usb_ftdi_elan_read_pcimem(pdev, \
216 offsetof(struct ohci_regs, member), 0, data);
217 #define ftdi_write_pcimem(pdev, member, data) usb_ftdi_elan_write_pcimem(pdev, \
218 offsetof(struct ohci_regs, member), 0, data);
219 #define u132_read_pcimem(u132, member, data) \
220 usb_ftdi_elan_read_pcimem(u132->platform_dev, offsetof(struct \
221 ohci_regs, member), 0, data);
222 #define u132_write_pcimem(u132, member, data) \
223 usb_ftdi_elan_write_pcimem(u132->platform_dev, offsetof(struct \
224 ohci_regs, member), 0, data);
225 static inline struct u132 *udev_to_u132(struct u132_udev *udev)
227 u8 udev_number = udev->udev_number;
228 return container_of(udev, struct u132, udev[udev_number]);
231 static inline struct u132 *hcd_to_u132(struct usb_hcd *hcd)
233 return (struct u132 *)(hcd->hcd_priv);
236 static inline struct usb_hcd *u132_to_hcd(struct u132 *u132)
238 return container_of((void *)u132, struct usb_hcd, hcd_priv);
241 static inline void u132_disable(struct u132 *u132)
243 u132_to_hcd(u132)->state = HC_STATE_HALT;
247 #define kref_to_u132(d) container_of(d, struct u132, kref)
248 #define kref_to_u132_endp(d) container_of(d, struct u132_endp, kref)
249 #define kref_to_u132_udev(d) container_of(d, struct u132_udev, kref)
250 #include "../misc/usb_u132.h"
251 static const char hcd_name[] = "u132_hcd";
252 #define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE | \
253 USB_PORT_STAT_C_SUSPEND | USB_PORT_STAT_C_OVERCURRENT | \
254 USB_PORT_STAT_C_RESET) << 16)
255 static void u132_hcd_delete(struct kref *kref)
257 struct u132 *u132 = kref_to_u132(kref);
258 struct platform_device *pdev = u132->platform_dev;
259 struct usb_hcd *hcd = u132_to_hcd(u132);
260 u132->going += 1;
261 mutex_lock(&u132_module_lock);
262 list_del_init(&u132->u132_list);
263 u132_instances -= 1;
264 mutex_unlock(&u132_module_lock);
265 dev_warn(&u132->platform_dev->dev, "FREEING the hcd=%p and thus the u13"
266 "2=%p going=%d pdev=%p\n", hcd, u132, u132->going, pdev);
267 usb_put_hcd(hcd);
270 static inline void u132_u132_put_kref(struct u132 *u132)
272 kref_put(&u132->kref, u132_hcd_delete);
275 static inline void u132_u132_init_kref(struct u132 *u132)
277 kref_init(&u132->kref);
280 static void u132_udev_delete(struct kref *kref)
282 struct u132_udev *udev = kref_to_u132_udev(kref);
283 udev->udev_number = 0;
284 udev->usb_device = NULL;
285 udev->usb_addr = 0;
286 udev->enumeration = 0;
289 static inline void u132_udev_put_kref(struct u132 *u132, struct u132_udev *udev)
291 kref_put(&udev->kref, u132_udev_delete);
294 static inline void u132_udev_get_kref(struct u132 *u132, struct u132_udev *udev)
296 kref_get(&udev->kref);
299 static inline void u132_udev_init_kref(struct u132 *u132,
300 struct u132_udev *udev)
302 kref_init(&udev->kref);
305 static inline void u132_ring_put_kref(struct u132 *u132, struct u132_ring *ring)
307 kref_put(&u132->kref, u132_hcd_delete);
310 static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring,
311 unsigned int delta)
313 if (delta > 0) {
314 if (queue_delayed_work(workqueue, &ring->scheduler, delta))
315 return;
316 } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
317 return;
318 kref_put(&u132->kref, u132_hcd_delete);
319 return;
322 static void u132_ring_queue_work(struct u132 *u132, struct u132_ring *ring,
323 unsigned int delta)
325 kref_get(&u132->kref);
326 u132_ring_requeue_work(u132, ring, delta);
327 return;
330 static void u132_ring_cancel_work(struct u132 *u132, struct u132_ring *ring)
332 if (cancel_delayed_work(&ring->scheduler))
333 kref_put(&u132->kref, u132_hcd_delete);
336 static void u132_endp_delete(struct kref *kref)
338 struct u132_endp *endp = kref_to_u132_endp(kref);
339 struct u132 *u132 = endp->u132;
340 u8 usb_addr = endp->usb_addr;
341 u8 usb_endp = endp->usb_endp;
342 u8 address = u132->addr[usb_addr].address;
343 struct u132_udev *udev = &u132->udev[address];
344 u8 endp_number = endp->endp_number;
345 struct usb_host_endpoint *hep = endp->hep;
346 struct u132_ring *ring = endp->ring;
347 struct list_head *head = &endp->endp_ring;
348 ring->length -= 1;
349 if (endp == ring->curr_endp) {
350 if (list_empty(head)) {
351 ring->curr_endp = NULL;
352 list_del(head);
353 } else {
354 struct u132_endp *next_endp = list_entry(head->next,
355 struct u132_endp, endp_ring);
356 ring->curr_endp = next_endp;
357 list_del(head);
359 } else
360 list_del(head);
361 if (endp->input) {
362 udev->endp_number_in[usb_endp] = 0;
363 u132_udev_put_kref(u132, udev);
365 if (endp->output) {
366 udev->endp_number_out[usb_endp] = 0;
367 u132_udev_put_kref(u132, udev);
369 u132->endp[endp_number - 1] = NULL;
370 hep->hcpriv = NULL;
371 kfree(endp);
372 u132_u132_put_kref(u132);
375 static inline void u132_endp_put_kref(struct u132 *u132, struct u132_endp *endp)
377 kref_put(&endp->kref, u132_endp_delete);
380 static inline void u132_endp_get_kref(struct u132 *u132, struct u132_endp *endp)
382 kref_get(&endp->kref);
385 static inline void u132_endp_init_kref(struct u132 *u132,
386 struct u132_endp *endp)
388 kref_init(&endp->kref);
389 kref_get(&u132->kref);
392 static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
393 unsigned int delta)
395 if (queue_delayed_work(workqueue, &endp->scheduler, delta))
396 kref_get(&endp->kref);
399 static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
401 if (cancel_delayed_work(&endp->scheduler))
402 kref_put(&endp->kref, u132_endp_delete);
405 static inline void u132_monitor_put_kref(struct u132 *u132)
407 kref_put(&u132->kref, u132_hcd_delete);
410 static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
412 if (queue_delayed_work(workqueue, &u132->monitor, delta))
413 kref_get(&u132->kref);
416 static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
418 if (!queue_delayed_work(workqueue, &u132->monitor, delta))
419 kref_put(&u132->kref, u132_hcd_delete);
422 static void u132_monitor_cancel_work(struct u132 *u132)
424 if (cancel_delayed_work(&u132->monitor))
425 kref_put(&u132->kref, u132_hcd_delete);
428 static int read_roothub_info(struct u132 *u132)
430 u32 revision;
431 int retval;
432 retval = u132_read_pcimem(u132, revision, &revision);
433 if (retval) {
434 dev_err(&u132->platform_dev->dev, "error %d accessing device co"
435 "ntrol\n", retval);
436 return retval;
437 } else if ((revision & 0xFF) == 0x10) {
438 } else if ((revision & 0xFF) == 0x11) {
439 } else {
440 dev_err(&u132->platform_dev->dev, "device revision is not valid"
441 " %08X\n", revision);
442 return -ENODEV;
444 retval = u132_read_pcimem(u132, control, &u132->hc_control);
445 if (retval) {
446 dev_err(&u132->platform_dev->dev, "error %d accessing device co"
447 "ntrol\n", retval);
448 return retval;
450 retval = u132_read_pcimem(u132, roothub.status,
451 &u132->hc_roothub_status);
452 if (retval) {
453 dev_err(&u132->platform_dev->dev, "error %d accessing device re"
454 "g roothub.status\n", retval);
455 return retval;
457 retval = u132_read_pcimem(u132, roothub.a, &u132->hc_roothub_a);
458 if (retval) {
459 dev_err(&u132->platform_dev->dev, "error %d accessing device re"
460 "g roothub.a\n", retval);
461 return retval;
464 int I = u132->num_ports;
465 int i = 0;
466 while (I-- > 0) {
467 retval = u132_read_pcimem(u132, roothub.portstatus[i],
468 &u132->hc_roothub_portstatus[i]);
469 if (retval) {
470 dev_err(&u132->platform_dev->dev, "error %d acc"
471 "essing device roothub.portstatus[%d]\n"
472 , retval, i);
473 return retval;
474 } else
475 i += 1;
478 return 0;
481 static void u132_hcd_monitor_work(struct work_struct *work)
483 struct u132 *u132 = container_of(work, struct u132, monitor.work);
484 if (u132->going > 1) {
485 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
486 , u132->going);
487 u132_monitor_put_kref(u132);
488 return;
489 } else if (u132->going > 0) {
490 dev_err(&u132->platform_dev->dev, "device is being removed\n");
491 u132_monitor_put_kref(u132);
492 return;
493 } else {
494 int retval;
495 mutex_lock(&u132->sw_lock);
496 retval = read_roothub_info(u132);
497 if (retval) {
498 struct usb_hcd *hcd = u132_to_hcd(u132);
499 u132_disable(u132);
500 u132->going = 1;
501 mutex_unlock(&u132->sw_lock);
502 usb_hc_died(hcd);
503 ftdi_elan_gone_away(u132->platform_dev);
504 u132_monitor_put_kref(u132);
505 return;
506 } else {
507 u132_monitor_requeue_work(u132, 500);
508 mutex_unlock(&u132->sw_lock);
509 return;
514 static void u132_hcd_giveback_urb(struct u132 *u132, struct u132_endp *endp,
515 struct urb *urb, int status)
517 struct u132_ring *ring;
518 unsigned long irqs;
519 struct usb_hcd *hcd = u132_to_hcd(u132);
520 urb->error_count = 0;
521 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
522 usb_hcd_unlink_urb_from_ep(hcd, urb);
523 endp->queue_next += 1;
524 if (ENDP_QUEUE_SIZE > --endp->queue_size) {
525 endp->active = 0;
526 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
527 } else {
528 struct list_head *next = endp->urb_more.next;
529 struct u132_urbq *urbq = list_entry(next, struct u132_urbq,
530 urb_more);
531 list_del(next);
532 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
533 urbq->urb;
534 endp->active = 0;
535 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
536 kfree(urbq);
538 mutex_lock(&u132->scheduler_lock);
539 ring = endp->ring;
540 ring->in_use = 0;
541 u132_ring_cancel_work(u132, ring);
542 u132_ring_queue_work(u132, ring, 0);
543 mutex_unlock(&u132->scheduler_lock);
544 u132_endp_put_kref(u132, endp);
545 usb_hcd_giveback_urb(hcd, urb, status);
546 return;
549 static void u132_hcd_forget_urb(struct u132 *u132, struct u132_endp *endp,
550 struct urb *urb, int status)
552 u132_endp_put_kref(u132, endp);
555 static void u132_hcd_abandon_urb(struct u132 *u132, struct u132_endp *endp,
556 struct urb *urb, int status)
558 unsigned long irqs;
559 struct usb_hcd *hcd = u132_to_hcd(u132);
560 urb->error_count = 0;
561 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
562 usb_hcd_unlink_urb_from_ep(hcd, urb);
563 endp->queue_next += 1;
564 if (ENDP_QUEUE_SIZE > --endp->queue_size) {
565 endp->active = 0;
566 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
567 } else {
568 struct list_head *next = endp->urb_more.next;
569 struct u132_urbq *urbq = list_entry(next, struct u132_urbq,
570 urb_more);
571 list_del(next);
572 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
573 urbq->urb;
574 endp->active = 0;
575 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
576 kfree(urbq);
577 } usb_hcd_giveback_urb(hcd, urb, status);
578 return;
581 static inline int edset_input(struct u132 *u132, struct u132_ring *ring,
582 struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
583 void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
584 int toggle_bits, int error_count, int condition_code, int repeat_number,
585 int halted, int skipped, int actual, int non_null))
587 return usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp,
588 urb, address, endp->usb_endp, toggle_bits, callback);
591 static inline int edset_setup(struct u132 *u132, struct u132_ring *ring,
592 struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
593 void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
594 int toggle_bits, int error_count, int condition_code, int repeat_number,
595 int halted, int skipped, int actual, int non_null))
597 return usb_ftdi_elan_edset_setup(u132->platform_dev, ring->number, endp,
598 urb, address, endp->usb_endp, toggle_bits, callback);
601 static inline int edset_single(struct u132 *u132, struct u132_ring *ring,
602 struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
603 void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
604 int toggle_bits, int error_count, int condition_code, int repeat_number,
605 int halted, int skipped, int actual, int non_null))
607 return usb_ftdi_elan_edset_single(u132->platform_dev, ring->number,
608 endp, urb, address, endp->usb_endp, toggle_bits, callback);
611 static inline int edset_output(struct u132 *u132, struct u132_ring *ring,
612 struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
613 void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
614 int toggle_bits, int error_count, int condition_code, int repeat_number,
615 int halted, int skipped, int actual, int non_null))
617 return usb_ftdi_elan_edset_output(u132->platform_dev, ring->number,
618 endp, urb, address, endp->usb_endp, toggle_bits, callback);
623 * must not LOCK sw_lock
626 static void u132_hcd_interrupt_recv(void *data, struct urb *urb, u8 *buf,
627 int len, int toggle_bits, int error_count, int condition_code,
628 int repeat_number, int halted, int skipped, int actual, int non_null)
630 struct u132_endp *endp = data;
631 struct u132 *u132 = endp->u132;
632 u8 address = u132->addr[endp->usb_addr].address;
633 struct u132_udev *udev = &u132->udev[address];
634 mutex_lock(&u132->scheduler_lock);
635 if (u132->going > 1) {
636 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
637 , u132->going);
638 mutex_unlock(&u132->scheduler_lock);
639 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
640 return;
641 } else if (endp->dequeueing) {
642 endp->dequeueing = 0;
643 mutex_unlock(&u132->scheduler_lock);
644 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
645 return;
646 } else if (u132->going > 0) {
647 dev_err(&u132->platform_dev->dev, "device is being removed "
648 "urb=%p\n", urb);
649 mutex_unlock(&u132->scheduler_lock);
650 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
651 return;
652 } else if (!urb->unlinked) {
653 struct u132_ring *ring = endp->ring;
654 u8 *u = urb->transfer_buffer + urb->actual_length;
655 u8 *b = buf;
656 int L = len;
658 while (L-- > 0)
659 *u++ = *b++;
661 urb->actual_length += len;
662 if ((condition_code == TD_CC_NOERROR) &&
663 (urb->transfer_buffer_length > urb->actual_length)) {
664 endp->toggle_bits = toggle_bits;
665 usb_settoggle(udev->usb_device, endp->usb_endp, 0,
666 1 & toggle_bits);
667 if (urb->actual_length > 0) {
668 int retval;
669 mutex_unlock(&u132->scheduler_lock);
670 retval = edset_single(u132, ring, endp, urb,
671 address, endp->toggle_bits,
672 u132_hcd_interrupt_recv);
673 if (retval != 0)
674 u132_hcd_giveback_urb(u132, endp, urb,
675 retval);
676 } else {
677 ring->in_use = 0;
678 endp->active = 0;
679 endp->jiffies = jiffies +
680 msecs_to_jiffies(urb->interval);
681 u132_ring_cancel_work(u132, ring);
682 u132_ring_queue_work(u132, ring, 0);
683 mutex_unlock(&u132->scheduler_lock);
684 u132_endp_put_kref(u132, endp);
686 return;
687 } else if ((condition_code == TD_DATAUNDERRUN) &&
688 ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)) {
689 endp->toggle_bits = toggle_bits;
690 usb_settoggle(udev->usb_device, endp->usb_endp, 0,
691 1 & toggle_bits);
692 mutex_unlock(&u132->scheduler_lock);
693 u132_hcd_giveback_urb(u132, endp, urb, 0);
694 return;
695 } else {
696 if (condition_code == TD_CC_NOERROR) {
697 endp->toggle_bits = toggle_bits;
698 usb_settoggle(udev->usb_device, endp->usb_endp,
699 0, 1 & toggle_bits);
700 } else if (condition_code == TD_CC_STALL) {
701 endp->toggle_bits = 0x2;
702 usb_settoggle(udev->usb_device, endp->usb_endp,
703 0, 0);
704 } else {
705 endp->toggle_bits = 0x2;
706 usb_settoggle(udev->usb_device, endp->usb_endp,
707 0, 0);
708 dev_err(&u132->platform_dev->dev, "urb=%p givin"
709 "g back INTERRUPT %s\n", urb,
710 cc_to_text[condition_code]);
712 mutex_unlock(&u132->scheduler_lock);
713 u132_hcd_giveback_urb(u132, endp, urb,
714 cc_to_error[condition_code]);
715 return;
717 } else {
718 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
719 "unlinked=%d\n", urb, urb->unlinked);
720 mutex_unlock(&u132->scheduler_lock);
721 u132_hcd_giveback_urb(u132, endp, urb, 0);
722 return;
726 static void u132_hcd_bulk_output_sent(void *data, struct urb *urb, u8 *buf,
727 int len, int toggle_bits, int error_count, int condition_code,
728 int repeat_number, int halted, int skipped, int actual, int non_null)
730 struct u132_endp *endp = data;
731 struct u132 *u132 = endp->u132;
732 u8 address = u132->addr[endp->usb_addr].address;
733 mutex_lock(&u132->scheduler_lock);
734 if (u132->going > 1) {
735 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
736 , u132->going);
737 mutex_unlock(&u132->scheduler_lock);
738 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
739 return;
740 } else if (endp->dequeueing) {
741 endp->dequeueing = 0;
742 mutex_unlock(&u132->scheduler_lock);
743 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
744 return;
745 } else if (u132->going > 0) {
746 dev_err(&u132->platform_dev->dev, "device is being removed "
747 "urb=%p\n", urb);
748 mutex_unlock(&u132->scheduler_lock);
749 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
750 return;
751 } else if (!urb->unlinked) {
752 struct u132_ring *ring = endp->ring;
753 urb->actual_length += len;
754 endp->toggle_bits = toggle_bits;
755 if (urb->transfer_buffer_length > urb->actual_length) {
756 int retval;
757 mutex_unlock(&u132->scheduler_lock);
758 retval = edset_output(u132, ring, endp, urb, address,
759 endp->toggle_bits, u132_hcd_bulk_output_sent);
760 if (retval != 0)
761 u132_hcd_giveback_urb(u132, endp, urb, retval);
762 return;
763 } else {
764 mutex_unlock(&u132->scheduler_lock);
765 u132_hcd_giveback_urb(u132, endp, urb, 0);
766 return;
768 } else {
769 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
770 "unlinked=%d\n", urb, urb->unlinked);
771 mutex_unlock(&u132->scheduler_lock);
772 u132_hcd_giveback_urb(u132, endp, urb, 0);
773 return;
777 static void u132_hcd_bulk_input_recv(void *data, struct urb *urb, u8 *buf,
778 int len, int toggle_bits, int error_count, int condition_code,
779 int repeat_number, int halted, int skipped, int actual, int non_null)
781 struct u132_endp *endp = data;
782 struct u132 *u132 = endp->u132;
783 u8 address = u132->addr[endp->usb_addr].address;
784 struct u132_udev *udev = &u132->udev[address];
785 mutex_lock(&u132->scheduler_lock);
786 if (u132->going > 1) {
787 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
788 , u132->going);
789 mutex_unlock(&u132->scheduler_lock);
790 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
791 return;
792 } else if (endp->dequeueing) {
793 endp->dequeueing = 0;
794 mutex_unlock(&u132->scheduler_lock);
795 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
796 return;
797 } else if (u132->going > 0) {
798 dev_err(&u132->platform_dev->dev, "device is being removed "
799 "urb=%p\n", urb);
800 mutex_unlock(&u132->scheduler_lock);
801 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
802 return;
803 } else if (!urb->unlinked) {
804 struct u132_ring *ring = endp->ring;
805 u8 *u = urb->transfer_buffer + urb->actual_length;
806 u8 *b = buf;
807 int L = len;
809 while (L-- > 0)
810 *u++ = *b++;
812 urb->actual_length += len;
813 if ((condition_code == TD_CC_NOERROR) &&
814 (urb->transfer_buffer_length > urb->actual_length)) {
815 int retval;
816 endp->toggle_bits = toggle_bits;
817 usb_settoggle(udev->usb_device, endp->usb_endp, 0,
818 1 & toggle_bits);
819 mutex_unlock(&u132->scheduler_lock);
820 retval = usb_ftdi_elan_edset_input(u132->platform_dev,
821 ring->number, endp, urb, address,
822 endp->usb_endp, endp->toggle_bits,
823 u132_hcd_bulk_input_recv);
824 if (retval != 0)
825 u132_hcd_giveback_urb(u132, endp, urb, retval);
826 return;
827 } else if (condition_code == TD_CC_NOERROR) {
828 endp->toggle_bits = toggle_bits;
829 usb_settoggle(udev->usb_device, endp->usb_endp, 0,
830 1 & toggle_bits);
831 mutex_unlock(&u132->scheduler_lock);
832 u132_hcd_giveback_urb(u132, endp, urb,
833 cc_to_error[condition_code]);
834 return;
835 } else if ((condition_code == TD_DATAUNDERRUN) &&
836 ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)) {
837 endp->toggle_bits = toggle_bits;
838 usb_settoggle(udev->usb_device, endp->usb_endp, 0,
839 1 & toggle_bits);
840 mutex_unlock(&u132->scheduler_lock);
841 u132_hcd_giveback_urb(u132, endp, urb, 0);
842 return;
843 } else if (condition_code == TD_DATAUNDERRUN) {
844 endp->toggle_bits = toggle_bits;
845 usb_settoggle(udev->usb_device, endp->usb_endp, 0,
846 1 & toggle_bits);
847 dev_warn(&u132->platform_dev->dev, "urb=%p(SHORT NOT OK"
848 ") giving back BULK IN %s\n", urb,
849 cc_to_text[condition_code]);
850 mutex_unlock(&u132->scheduler_lock);
851 u132_hcd_giveback_urb(u132, endp, urb, 0);
852 return;
853 } else if (condition_code == TD_CC_STALL) {
854 endp->toggle_bits = 0x2;
855 usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0);
856 mutex_unlock(&u132->scheduler_lock);
857 u132_hcd_giveback_urb(u132, endp, urb,
858 cc_to_error[condition_code]);
859 return;
860 } else {
861 endp->toggle_bits = 0x2;
862 usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0);
863 dev_err(&u132->platform_dev->dev, "urb=%p giving back B"
864 "ULK IN code=%d %s\n", urb, condition_code,
865 cc_to_text[condition_code]);
866 mutex_unlock(&u132->scheduler_lock);
867 u132_hcd_giveback_urb(u132, endp, urb,
868 cc_to_error[condition_code]);
869 return;
871 } else {
872 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
873 "unlinked=%d\n", urb, urb->unlinked);
874 mutex_unlock(&u132->scheduler_lock);
875 u132_hcd_giveback_urb(u132, endp, urb, 0);
876 return;
880 static void u132_hcd_configure_empty_sent(void *data, struct urb *urb, u8 *buf,
881 int len, int toggle_bits, int error_count, int condition_code,
882 int repeat_number, int halted, int skipped, int actual, int non_null)
884 struct u132_endp *endp = data;
885 struct u132 *u132 = endp->u132;
886 mutex_lock(&u132->scheduler_lock);
887 if (u132->going > 1) {
888 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
889 , u132->going);
890 mutex_unlock(&u132->scheduler_lock);
891 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
892 return;
893 } else if (endp->dequeueing) {
894 endp->dequeueing = 0;
895 mutex_unlock(&u132->scheduler_lock);
896 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
897 return;
898 } else if (u132->going > 0) {
899 dev_err(&u132->platform_dev->dev, "device is being removed "
900 "urb=%p\n", urb);
901 mutex_unlock(&u132->scheduler_lock);
902 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
903 return;
904 } else if (!urb->unlinked) {
905 mutex_unlock(&u132->scheduler_lock);
906 u132_hcd_giveback_urb(u132, endp, urb, 0);
907 return;
908 } else {
909 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
910 "unlinked=%d\n", urb, urb->unlinked);
911 mutex_unlock(&u132->scheduler_lock);
912 u132_hcd_giveback_urb(u132, endp, urb, 0);
913 return;
917 static void u132_hcd_configure_input_recv(void *data, struct urb *urb, u8 *buf,
918 int len, int toggle_bits, int error_count, int condition_code,
919 int repeat_number, int halted, int skipped, int actual, int non_null)
921 struct u132_endp *endp = data;
922 struct u132 *u132 = endp->u132;
923 u8 address = u132->addr[endp->usb_addr].address;
924 mutex_lock(&u132->scheduler_lock);
925 if (u132->going > 1) {
926 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
927 , u132->going);
928 mutex_unlock(&u132->scheduler_lock);
929 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
930 return;
931 } else if (endp->dequeueing) {
932 endp->dequeueing = 0;
933 mutex_unlock(&u132->scheduler_lock);
934 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
935 return;
936 } else if (u132->going > 0) {
937 dev_err(&u132->platform_dev->dev, "device is being removed "
938 "urb=%p\n", urb);
939 mutex_unlock(&u132->scheduler_lock);
940 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
941 return;
942 } else if (!urb->unlinked) {
943 struct u132_ring *ring = endp->ring;
944 u8 *u = urb->transfer_buffer;
945 u8 *b = buf;
946 int L = len;
948 while (L-- > 0)
949 *u++ = *b++;
951 urb->actual_length = len;
952 if ((condition_code == TD_CC_NOERROR) || ((condition_code ==
953 TD_DATAUNDERRUN) && ((urb->transfer_flags &
954 URB_SHORT_NOT_OK) == 0))) {
955 int retval;
956 mutex_unlock(&u132->scheduler_lock);
957 retval = usb_ftdi_elan_edset_empty(u132->platform_dev,
958 ring->number, endp, urb, address,
959 endp->usb_endp, 0x3,
960 u132_hcd_configure_empty_sent);
961 if (retval != 0)
962 u132_hcd_giveback_urb(u132, endp, urb, retval);
963 return;
964 } else if (condition_code == TD_CC_STALL) {
965 mutex_unlock(&u132->scheduler_lock);
966 dev_warn(&u132->platform_dev->dev, "giving back SETUP I"
967 "NPUT STALL urb %p\n", urb);
968 u132_hcd_giveback_urb(u132, endp, urb,
969 cc_to_error[condition_code]);
970 return;
971 } else {
972 mutex_unlock(&u132->scheduler_lock);
973 dev_err(&u132->platform_dev->dev, "giving back SETUP IN"
974 "PUT %s urb %p\n", cc_to_text[condition_code],
975 urb);
976 u132_hcd_giveback_urb(u132, endp, urb,
977 cc_to_error[condition_code]);
978 return;
980 } else {
981 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
982 "unlinked=%d\n", urb, urb->unlinked);
983 mutex_unlock(&u132->scheduler_lock);
984 u132_hcd_giveback_urb(u132, endp, urb, 0);
985 return;
989 static void u132_hcd_configure_empty_recv(void *data, struct urb *urb, u8 *buf,
990 int len, int toggle_bits, int error_count, int condition_code,
991 int repeat_number, int halted, int skipped, int actual, int non_null)
993 struct u132_endp *endp = data;
994 struct u132 *u132 = endp->u132;
995 mutex_lock(&u132->scheduler_lock);
996 if (u132->going > 1) {
997 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
998 , u132->going);
999 mutex_unlock(&u132->scheduler_lock);
1000 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
1001 return;
1002 } else if (endp->dequeueing) {
1003 endp->dequeueing = 0;
1004 mutex_unlock(&u132->scheduler_lock);
1005 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1006 return;
1007 } else if (u132->going > 0) {
1008 dev_err(&u132->platform_dev->dev, "device is being removed "
1009 "urb=%p\n", urb);
1010 mutex_unlock(&u132->scheduler_lock);
1011 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1012 return;
1013 } else if (!urb->unlinked) {
1014 mutex_unlock(&u132->scheduler_lock);
1015 u132_hcd_giveback_urb(u132, endp, urb, 0);
1016 return;
1017 } else {
1018 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1019 "unlinked=%d\n", urb, urb->unlinked);
1020 mutex_unlock(&u132->scheduler_lock);
1021 u132_hcd_giveback_urb(u132, endp, urb, 0);
1022 return;
1026 static void u132_hcd_configure_setup_sent(void *data, struct urb *urb, u8 *buf,
1027 int len, int toggle_bits, int error_count, int condition_code,
1028 int repeat_number, int halted, int skipped, int actual, int non_null)
1030 struct u132_endp *endp = data;
1031 struct u132 *u132 = endp->u132;
1032 u8 address = u132->addr[endp->usb_addr].address;
1033 mutex_lock(&u132->scheduler_lock);
1034 if (u132->going > 1) {
1035 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
1036 , u132->going);
1037 mutex_unlock(&u132->scheduler_lock);
1038 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
1039 return;
1040 } else if (endp->dequeueing) {
1041 endp->dequeueing = 0;
1042 mutex_unlock(&u132->scheduler_lock);
1043 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1044 return;
1045 } else if (u132->going > 0) {
1046 dev_err(&u132->platform_dev->dev, "device is being removed "
1047 "urb=%p\n", urb);
1048 mutex_unlock(&u132->scheduler_lock);
1049 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1050 return;
1051 } else if (!urb->unlinked) {
1052 if (usb_pipein(urb->pipe)) {
1053 int retval;
1054 struct u132_ring *ring = endp->ring;
1055 mutex_unlock(&u132->scheduler_lock);
1056 retval = usb_ftdi_elan_edset_input(u132->platform_dev,
1057 ring->number, endp, urb, address,
1058 endp->usb_endp, 0,
1059 u132_hcd_configure_input_recv);
1060 if (retval != 0)
1061 u132_hcd_giveback_urb(u132, endp, urb, retval);
1062 return;
1063 } else {
1064 int retval;
1065 struct u132_ring *ring = endp->ring;
1066 mutex_unlock(&u132->scheduler_lock);
1067 retval = usb_ftdi_elan_edset_input(u132->platform_dev,
1068 ring->number, endp, urb, address,
1069 endp->usb_endp, 0,
1070 u132_hcd_configure_empty_recv);
1071 if (retval != 0)
1072 u132_hcd_giveback_urb(u132, endp, urb, retval);
1073 return;
1075 } else {
1076 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1077 "unlinked=%d\n", urb, urb->unlinked);
1078 mutex_unlock(&u132->scheduler_lock);
1079 u132_hcd_giveback_urb(u132, endp, urb, 0);
1080 return;
1084 static void u132_hcd_enumeration_empty_recv(void *data, struct urb *urb,
1085 u8 *buf, int len, int toggle_bits, int error_count, int condition_code,
1086 int repeat_number, int halted, int skipped, int actual, int non_null)
1088 struct u132_endp *endp = data;
1089 struct u132 *u132 = endp->u132;
1090 u8 address = u132->addr[endp->usb_addr].address;
1091 struct u132_udev *udev = &u132->udev[address];
1092 mutex_lock(&u132->scheduler_lock);
1093 if (u132->going > 1) {
1094 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
1095 , u132->going);
1096 mutex_unlock(&u132->scheduler_lock);
1097 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
1098 return;
1099 } else if (endp->dequeueing) {
1100 endp->dequeueing = 0;
1101 mutex_unlock(&u132->scheduler_lock);
1102 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1103 return;
1104 } else if (u132->going > 0) {
1105 dev_err(&u132->platform_dev->dev, "device is being removed "
1106 "urb=%p\n", urb);
1107 mutex_unlock(&u132->scheduler_lock);
1108 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1109 return;
1110 } else if (!urb->unlinked) {
1111 u132->addr[0].address = 0;
1112 endp->usb_addr = udev->usb_addr;
1113 mutex_unlock(&u132->scheduler_lock);
1114 u132_hcd_giveback_urb(u132, endp, urb, 0);
1115 return;
1116 } else {
1117 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1118 "unlinked=%d\n", urb, urb->unlinked);
1119 mutex_unlock(&u132->scheduler_lock);
1120 u132_hcd_giveback_urb(u132, endp, urb, 0);
1121 return;
1125 static void u132_hcd_enumeration_address_sent(void *data, struct urb *urb,
1126 u8 *buf, int len, int toggle_bits, int error_count, int condition_code,
1127 int repeat_number, int halted, int skipped, int actual, int non_null)
1129 struct u132_endp *endp = data;
1130 struct u132 *u132 = endp->u132;
1131 mutex_lock(&u132->scheduler_lock);
1132 if (u132->going > 1) {
1133 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
1134 , u132->going);
1135 mutex_unlock(&u132->scheduler_lock);
1136 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
1137 return;
1138 } else if (endp->dequeueing) {
1139 endp->dequeueing = 0;
1140 mutex_unlock(&u132->scheduler_lock);
1141 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1142 return;
1143 } else if (u132->going > 0) {
1144 dev_err(&u132->platform_dev->dev, "device is being removed "
1145 "urb=%p\n", urb);
1146 mutex_unlock(&u132->scheduler_lock);
1147 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1148 return;
1149 } else if (!urb->unlinked) {
1150 int retval;
1151 struct u132_ring *ring = endp->ring;
1152 mutex_unlock(&u132->scheduler_lock);
1153 retval = usb_ftdi_elan_edset_input(u132->platform_dev,
1154 ring->number, endp, urb, 0, endp->usb_endp, 0,
1155 u132_hcd_enumeration_empty_recv);
1156 if (retval != 0)
1157 u132_hcd_giveback_urb(u132, endp, urb, retval);
1158 return;
1159 } else {
1160 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1161 "unlinked=%d\n", urb, urb->unlinked);
1162 mutex_unlock(&u132->scheduler_lock);
1163 u132_hcd_giveback_urb(u132, endp, urb, 0);
1164 return;
1168 static void u132_hcd_initial_empty_sent(void *data, struct urb *urb, u8 *buf,
1169 int len, int toggle_bits, int error_count, int condition_code,
1170 int repeat_number, int halted, int skipped, int actual, int non_null)
1172 struct u132_endp *endp = data;
1173 struct u132 *u132 = endp->u132;
1174 mutex_lock(&u132->scheduler_lock);
1175 if (u132->going > 1) {
1176 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
1177 , u132->going);
1178 mutex_unlock(&u132->scheduler_lock);
1179 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
1180 return;
1181 } else if (endp->dequeueing) {
1182 endp->dequeueing = 0;
1183 mutex_unlock(&u132->scheduler_lock);
1184 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1185 return;
1186 } else if (u132->going > 0) {
1187 dev_err(&u132->platform_dev->dev, "device is being removed "
1188 "urb=%p\n", urb);
1189 mutex_unlock(&u132->scheduler_lock);
1190 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1191 return;
1192 } else if (!urb->unlinked) {
1193 mutex_unlock(&u132->scheduler_lock);
1194 u132_hcd_giveback_urb(u132, endp, urb, 0);
1195 return;
1196 } else {
1197 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1198 "unlinked=%d\n", urb, urb->unlinked);
1199 mutex_unlock(&u132->scheduler_lock);
1200 u132_hcd_giveback_urb(u132, endp, urb, 0);
1201 return;
1205 static void u132_hcd_initial_input_recv(void *data, struct urb *urb, u8 *buf,
1206 int len, int toggle_bits, int error_count, int condition_code,
1207 int repeat_number, int halted, int skipped, int actual, int non_null)
1209 struct u132_endp *endp = data;
1210 struct u132 *u132 = endp->u132;
1211 u8 address = u132->addr[endp->usb_addr].address;
1212 mutex_lock(&u132->scheduler_lock);
1213 if (u132->going > 1) {
1214 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
1215 , u132->going);
1216 mutex_unlock(&u132->scheduler_lock);
1217 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
1218 return;
1219 } else if (endp->dequeueing) {
1220 endp->dequeueing = 0;
1221 mutex_unlock(&u132->scheduler_lock);
1222 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1223 return;
1224 } else if (u132->going > 0) {
1225 dev_err(&u132->platform_dev->dev, "device is being removed "
1226 "urb=%p\n", urb);
1227 mutex_unlock(&u132->scheduler_lock);
1228 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1229 return;
1230 } else if (!urb->unlinked) {
1231 int retval;
1232 struct u132_ring *ring = endp->ring;
1233 u8 *u = urb->transfer_buffer;
1234 u8 *b = buf;
1235 int L = len;
1237 while (L-- > 0)
1238 *u++ = *b++;
1240 urb->actual_length = len;
1241 mutex_unlock(&u132->scheduler_lock);
1242 retval = usb_ftdi_elan_edset_empty(u132->platform_dev,
1243 ring->number, endp, urb, address, endp->usb_endp, 0x3,
1244 u132_hcd_initial_empty_sent);
1245 if (retval != 0)
1246 u132_hcd_giveback_urb(u132, endp, urb, retval);
1247 return;
1248 } else {
1249 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1250 "unlinked=%d\n", urb, urb->unlinked);
1251 mutex_unlock(&u132->scheduler_lock);
1252 u132_hcd_giveback_urb(u132, endp, urb, 0);
1253 return;
1257 static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf,
1258 int len, int toggle_bits, int error_count, int condition_code,
1259 int repeat_number, int halted, int skipped, int actual, int non_null)
1261 struct u132_endp *endp = data;
1262 struct u132 *u132 = endp->u132;
1263 u8 address = u132->addr[endp->usb_addr].address;
1264 mutex_lock(&u132->scheduler_lock);
1265 if (u132->going > 1) {
1266 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
1267 , u132->going);
1268 mutex_unlock(&u132->scheduler_lock);
1269 u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
1270 return;
1271 } else if (endp->dequeueing) {
1272 endp->dequeueing = 0;
1273 mutex_unlock(&u132->scheduler_lock);
1274 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1275 return;
1276 } else if (u132->going > 0) {
1277 dev_err(&u132->platform_dev->dev, "device is being removed "
1278 "urb=%p\n", urb);
1279 mutex_unlock(&u132->scheduler_lock);
1280 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1281 return;
1282 } else if (!urb->unlinked) {
1283 int retval;
1284 struct u132_ring *ring = endp->ring;
1285 mutex_unlock(&u132->scheduler_lock);
1286 retval = usb_ftdi_elan_edset_input(u132->platform_dev,
1287 ring->number, endp, urb, address, endp->usb_endp, 0,
1288 u132_hcd_initial_input_recv);
1289 if (retval != 0)
1290 u132_hcd_giveback_urb(u132, endp, urb, retval);
1291 return;
1292 } else {
1293 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1294 "unlinked=%d\n", urb, urb->unlinked);
1295 mutex_unlock(&u132->scheduler_lock);
1296 u132_hcd_giveback_urb(u132, endp, urb, 0);
1297 return;
1302 * this work function is only executed from the work queue
1305 static void u132_hcd_ring_work_scheduler(struct work_struct *work)
1307 struct u132_ring *ring =
1308 container_of(work, struct u132_ring, scheduler.work);
1309 struct u132 *u132 = ring->u132;
1310 mutex_lock(&u132->scheduler_lock);
1311 if (ring->in_use) {
1312 mutex_unlock(&u132->scheduler_lock);
1313 u132_ring_put_kref(u132, ring);
1314 return;
1315 } else if (ring->curr_endp) {
1316 struct u132_endp *last_endp = ring->curr_endp;
1317 struct list_head *scan;
1318 struct list_head *head = &last_endp->endp_ring;
1319 unsigned long wakeup = 0;
1320 list_for_each(scan, head) {
1321 struct u132_endp *endp = list_entry(scan,
1322 struct u132_endp, endp_ring);
1323 if (endp->queue_next == endp->queue_last) {
1324 } else if ((endp->delayed == 0)
1325 || time_after_eq(jiffies, endp->jiffies)) {
1326 ring->curr_endp = endp;
1327 u132_endp_cancel_work(u132, last_endp);
1328 u132_endp_queue_work(u132, last_endp, 0);
1329 mutex_unlock(&u132->scheduler_lock);
1330 u132_ring_put_kref(u132, ring);
1331 return;
1332 } else {
1333 unsigned long delta = endp->jiffies - jiffies;
1334 if (delta > wakeup)
1335 wakeup = delta;
1338 if (last_endp->queue_next == last_endp->queue_last) {
1339 } else if ((last_endp->delayed == 0) || time_after_eq(jiffies,
1340 last_endp->jiffies)) {
1341 u132_endp_cancel_work(u132, last_endp);
1342 u132_endp_queue_work(u132, last_endp, 0);
1343 mutex_unlock(&u132->scheduler_lock);
1344 u132_ring_put_kref(u132, ring);
1345 return;
1346 } else {
1347 unsigned long delta = last_endp->jiffies - jiffies;
1348 if (delta > wakeup)
1349 wakeup = delta;
1351 if (wakeup > 0) {
1352 u132_ring_requeue_work(u132, ring, wakeup);
1353 mutex_unlock(&u132->scheduler_lock);
1354 return;
1355 } else {
1356 mutex_unlock(&u132->scheduler_lock);
1357 u132_ring_put_kref(u132, ring);
1358 return;
1360 } else {
1361 mutex_unlock(&u132->scheduler_lock);
1362 u132_ring_put_kref(u132, ring);
1363 return;
1367 static void u132_hcd_endp_work_scheduler(struct work_struct *work)
1369 struct u132_ring *ring;
1370 struct u132_endp *endp =
1371 container_of(work, struct u132_endp, scheduler.work);
1372 struct u132 *u132 = endp->u132;
1373 mutex_lock(&u132->scheduler_lock);
1374 ring = endp->ring;
1375 if (endp->edset_flush) {
1376 endp->edset_flush = 0;
1377 if (endp->dequeueing)
1378 usb_ftdi_elan_edset_flush(u132->platform_dev,
1379 ring->number, endp);
1380 mutex_unlock(&u132->scheduler_lock);
1381 u132_endp_put_kref(u132, endp);
1382 return;
1383 } else if (endp->active) {
1384 mutex_unlock(&u132->scheduler_lock);
1385 u132_endp_put_kref(u132, endp);
1386 return;
1387 } else if (ring->in_use) {
1388 mutex_unlock(&u132->scheduler_lock);
1389 u132_endp_put_kref(u132, endp);
1390 return;
1391 } else if (endp->queue_next == endp->queue_last) {
1392 mutex_unlock(&u132->scheduler_lock);
1393 u132_endp_put_kref(u132, endp);
1394 return;
1395 } else if (endp->pipetype == PIPE_INTERRUPT) {
1396 u8 address = u132->addr[endp->usb_addr].address;
1397 if (ring->in_use) {
1398 mutex_unlock(&u132->scheduler_lock);
1399 u132_endp_put_kref(u132, endp);
1400 return;
1401 } else {
1402 int retval;
1403 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
1404 endp->queue_next];
1405 endp->active = 1;
1406 ring->curr_endp = endp;
1407 ring->in_use = 1;
1408 mutex_unlock(&u132->scheduler_lock);
1409 retval = edset_single(u132, ring, endp, urb, address,
1410 endp->toggle_bits, u132_hcd_interrupt_recv);
1411 if (retval != 0)
1412 u132_hcd_giveback_urb(u132, endp, urb, retval);
1413 return;
1415 } else if (endp->pipetype == PIPE_CONTROL) {
1416 u8 address = u132->addr[endp->usb_addr].address;
1417 if (ring->in_use) {
1418 mutex_unlock(&u132->scheduler_lock);
1419 u132_endp_put_kref(u132, endp);
1420 return;
1421 } else if (address == 0) {
1422 int retval;
1423 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
1424 endp->queue_next];
1425 endp->active = 1;
1426 ring->curr_endp = endp;
1427 ring->in_use = 1;
1428 mutex_unlock(&u132->scheduler_lock);
1429 retval = edset_setup(u132, ring, endp, urb, address,
1430 0x2, u132_hcd_initial_setup_sent);
1431 if (retval != 0)
1432 u132_hcd_giveback_urb(u132, endp, urb, retval);
1433 return;
1434 } else if (endp->usb_addr == 0) {
1435 int retval;
1436 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
1437 endp->queue_next];
1438 endp->active = 1;
1439 ring->curr_endp = endp;
1440 ring->in_use = 1;
1441 mutex_unlock(&u132->scheduler_lock);
1442 retval = edset_setup(u132, ring, endp, urb, 0, 0x2,
1443 u132_hcd_enumeration_address_sent);
1444 if (retval != 0)
1445 u132_hcd_giveback_urb(u132, endp, urb, retval);
1446 return;
1447 } else {
1448 int retval;
1449 u8 address = u132->addr[endp->usb_addr].address;
1450 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
1451 endp->queue_next];
1452 endp->active = 1;
1453 ring->curr_endp = endp;
1454 ring->in_use = 1;
1455 mutex_unlock(&u132->scheduler_lock);
1456 retval = edset_setup(u132, ring, endp, urb, address,
1457 0x2, u132_hcd_configure_setup_sent);
1458 if (retval != 0)
1459 u132_hcd_giveback_urb(u132, endp, urb, retval);
1460 return;
1462 } else {
1463 if (endp->input) {
1464 u8 address = u132->addr[endp->usb_addr].address;
1465 if (ring->in_use) {
1466 mutex_unlock(&u132->scheduler_lock);
1467 u132_endp_put_kref(u132, endp);
1468 return;
1469 } else {
1470 int retval;
1471 struct urb *urb = endp->urb_list[
1472 ENDP_QUEUE_MASK & endp->queue_next];
1473 endp->active = 1;
1474 ring->curr_endp = endp;
1475 ring->in_use = 1;
1476 mutex_unlock(&u132->scheduler_lock);
1477 retval = edset_input(u132, ring, endp, urb,
1478 address, endp->toggle_bits,
1479 u132_hcd_bulk_input_recv);
1480 if (retval == 0) {
1481 } else
1482 u132_hcd_giveback_urb(u132, endp, urb,
1483 retval);
1484 return;
1486 } else { /* output pipe */
1487 u8 address = u132->addr[endp->usb_addr].address;
1488 if (ring->in_use) {
1489 mutex_unlock(&u132->scheduler_lock);
1490 u132_endp_put_kref(u132, endp);
1491 return;
1492 } else {
1493 int retval;
1494 struct urb *urb = endp->urb_list[
1495 ENDP_QUEUE_MASK & endp->queue_next];
1496 endp->active = 1;
1497 ring->curr_endp = endp;
1498 ring->in_use = 1;
1499 mutex_unlock(&u132->scheduler_lock);
1500 retval = edset_output(u132, ring, endp, urb,
1501 address, endp->toggle_bits,
1502 u132_hcd_bulk_output_sent);
1503 if (retval == 0) {
1504 } else
1505 u132_hcd_giveback_urb(u132, endp, urb,
1506 retval);
1507 return;
1512 #ifdef CONFIG_PM
1514 static void port_power(struct u132 *u132, int pn, int is_on)
1516 u132->port[pn].power = is_on;
1519 #endif
1521 static void u132_power(struct u132 *u132, int is_on)
1523 struct usb_hcd *hcd = u132_to_hcd(u132)
1524 ; /* hub is inactive unless the port is powered */
1525 if (is_on) {
1526 if (u132->power)
1527 return;
1528 u132->power = 1;
1529 } else {
1530 u132->power = 0;
1531 hcd->state = HC_STATE_HALT;
1535 static int u132_periodic_reinit(struct u132 *u132)
1537 int retval;
1538 u32 fi = u132->hc_fminterval & 0x03fff;
1539 u32 fit;
1540 u32 fminterval;
1541 retval = u132_read_pcimem(u132, fminterval, &fminterval);
1542 if (retval)
1543 return retval;
1544 fit = fminterval & FIT;
1545 retval = u132_write_pcimem(u132, fminterval,
1546 (fit ^ FIT) | u132->hc_fminterval);
1547 if (retval)
1548 return retval;
1549 retval = u132_write_pcimem(u132, periodicstart,
1550 ((9 * fi) / 10) & 0x3fff);
1551 if (retval)
1552 return retval;
1553 return 0;
1556 static char *hcfs2string(int state)
1558 switch (state) {
1559 case OHCI_USB_RESET:
1560 return "reset";
1561 case OHCI_USB_RESUME:
1562 return "resume";
1563 case OHCI_USB_OPER:
1564 return "operational";
1565 case OHCI_USB_SUSPEND:
1566 return "suspend";
1568 return "?";
1571 static int u132_init(struct u132 *u132)
1573 int retval;
1574 u32 control;
1575 u132_disable(u132);
1576 u132->next_statechange = jiffies;
1577 retval = u132_write_pcimem(u132, intrdisable, OHCI_INTR_MIE);
1578 if (retval)
1579 return retval;
1580 retval = u132_read_pcimem(u132, control, &control);
1581 if (retval)
1582 return retval;
1583 if (u132->num_ports == 0) {
1584 u32 rh_a = -1;
1585 retval = u132_read_pcimem(u132, roothub.a, &rh_a);
1586 if (retval)
1587 return retval;
1588 u132->num_ports = rh_a & RH_A_NDP;
1589 retval = read_roothub_info(u132);
1590 if (retval)
1591 return retval;
1593 if (u132->num_ports > MAX_U132_PORTS)
1594 return -EINVAL;
1596 return 0;
1600 /* Start an OHCI controller, set the BUS operational
1601 * resets USB and controller
1602 * enable interrupts
1604 static int u132_run(struct u132 *u132)
1606 int retval;
1607 u32 control;
1608 u32 status;
1609 u32 fminterval;
1610 u32 periodicstart;
1611 u32 cmdstatus;
1612 u32 roothub_a;
1613 int mask = OHCI_INTR_INIT;
1614 int first = u132->hc_fminterval == 0;
1615 int sleep_time = 0;
1616 int reset_timeout = 30; /* ... allow extra time */
1617 u132_disable(u132);
1618 if (first) {
1619 u32 temp;
1620 retval = u132_read_pcimem(u132, fminterval, &temp);
1621 if (retval)
1622 return retval;
1623 u132->hc_fminterval = temp & 0x3fff;
1624 u132->hc_fminterval |= FSMP(u132->hc_fminterval) << 16;
1626 retval = u132_read_pcimem(u132, control, &u132->hc_control);
1627 if (retval)
1628 return retval;
1629 dev_info(&u132->platform_dev->dev, "resetting from state '%s', control "
1630 "= %08X\n", hcfs2string(u132->hc_control & OHCI_CTRL_HCFS),
1631 u132->hc_control);
1632 switch (u132->hc_control & OHCI_CTRL_HCFS) {
1633 case OHCI_USB_OPER:
1634 sleep_time = 0;
1635 break;
1636 case OHCI_USB_SUSPEND:
1637 case OHCI_USB_RESUME:
1638 u132->hc_control &= OHCI_CTRL_RWC;
1639 u132->hc_control |= OHCI_USB_RESUME;
1640 sleep_time = 10;
1641 break;
1642 default:
1643 u132->hc_control &= OHCI_CTRL_RWC;
1644 u132->hc_control |= OHCI_USB_RESET;
1645 sleep_time = 50;
1646 break;
1648 retval = u132_write_pcimem(u132, control, u132->hc_control);
1649 if (retval)
1650 return retval;
1651 retval = u132_read_pcimem(u132, control, &control);
1652 if (retval)
1653 return retval;
1654 msleep(sleep_time);
1655 retval = u132_read_pcimem(u132, roothub.a, &roothub_a);
1656 if (retval)
1657 return retval;
1658 if (!(roothub_a & RH_A_NPS)) {
1659 int temp; /* power down each port */
1660 for (temp = 0; temp < u132->num_ports; temp++) {
1661 retval = u132_write_pcimem(u132,
1662 roothub.portstatus[temp], RH_PS_LSDA);
1663 if (retval)
1664 return retval;
1667 retval = u132_read_pcimem(u132, control, &control);
1668 if (retval)
1669 return retval;
1670 retry:
1671 retval = u132_read_pcimem(u132, cmdstatus, &status);
1672 if (retval)
1673 return retval;
1674 retval = u132_write_pcimem(u132, cmdstatus, OHCI_HCR);
1675 if (retval)
1676 return retval;
1677 extra: {
1678 retval = u132_read_pcimem(u132, cmdstatus, &status);
1679 if (retval)
1680 return retval;
1681 if (0 != (status & OHCI_HCR)) {
1682 if (--reset_timeout == 0) {
1683 dev_err(&u132->platform_dev->dev, "USB HC reset"
1684 " timed out!\n");
1685 return -ENODEV;
1686 } else {
1687 msleep(5);
1688 goto extra;
1692 if (u132->flags & OHCI_QUIRK_INITRESET) {
1693 retval = u132_write_pcimem(u132, control, u132->hc_control);
1694 if (retval)
1695 return retval;
1696 retval = u132_read_pcimem(u132, control, &control);
1697 if (retval)
1698 return retval;
1700 retval = u132_write_pcimem(u132, ed_controlhead, 0x00000000);
1701 if (retval)
1702 return retval;
1703 retval = u132_write_pcimem(u132, ed_bulkhead, 0x11000000);
1704 if (retval)
1705 return retval;
1706 retval = u132_write_pcimem(u132, hcca, 0x00000000);
1707 if (retval)
1708 return retval;
1709 retval = u132_periodic_reinit(u132);
1710 if (retval)
1711 return retval;
1712 retval = u132_read_pcimem(u132, fminterval, &fminterval);
1713 if (retval)
1714 return retval;
1715 retval = u132_read_pcimem(u132, periodicstart, &periodicstart);
1716 if (retval)
1717 return retval;
1718 if (0 == (fminterval & 0x3fff0000) || 0 == periodicstart) {
1719 if (!(u132->flags & OHCI_QUIRK_INITRESET)) {
1720 u132->flags |= OHCI_QUIRK_INITRESET;
1721 goto retry;
1722 } else
1723 dev_err(&u132->platform_dev->dev, "init err(%08x %04x)"
1724 "\n", fminterval, periodicstart);
1725 } /* start controller operations */
1726 u132->hc_control &= OHCI_CTRL_RWC;
1727 u132->hc_control |= OHCI_CONTROL_INIT | OHCI_CTRL_BLE | OHCI_USB_OPER;
1728 retval = u132_write_pcimem(u132, control, u132->hc_control);
1729 if (retval)
1730 return retval;
1731 retval = u132_write_pcimem(u132, cmdstatus, OHCI_BLF);
1732 if (retval)
1733 return retval;
1734 retval = u132_read_pcimem(u132, cmdstatus, &cmdstatus);
1735 if (retval)
1736 return retval;
1737 retval = u132_read_pcimem(u132, control, &control);
1738 if (retval)
1739 return retval;
1740 u132_to_hcd(u132)->state = HC_STATE_RUNNING;
1741 retval = u132_write_pcimem(u132, roothub.status, RH_HS_DRWE);
1742 if (retval)
1743 return retval;
1744 retval = u132_write_pcimem(u132, intrstatus, mask);
1745 if (retval)
1746 return retval;
1747 retval = u132_write_pcimem(u132, intrdisable,
1748 OHCI_INTR_MIE | OHCI_INTR_OC | OHCI_INTR_RHSC | OHCI_INTR_FNO |
1749 OHCI_INTR_UE | OHCI_INTR_RD | OHCI_INTR_SF | OHCI_INTR_WDH |
1750 OHCI_INTR_SO);
1751 if (retval)
1752 return retval; /* handle root hub init quirks ... */
1753 retval = u132_read_pcimem(u132, roothub.a, &roothub_a);
1754 if (retval)
1755 return retval;
1756 roothub_a &= ~(RH_A_PSM | RH_A_OCPM);
1757 if (u132->flags & OHCI_QUIRK_SUPERIO) {
1758 roothub_a |= RH_A_NOCP;
1759 roothub_a &= ~(RH_A_POTPGT | RH_A_NPS);
1760 retval = u132_write_pcimem(u132, roothub.a, roothub_a);
1761 if (retval)
1762 return retval;
1763 } else if ((u132->flags & OHCI_QUIRK_AMD756) || distrust_firmware) {
1764 roothub_a |= RH_A_NPS;
1765 retval = u132_write_pcimem(u132, roothub.a, roothub_a);
1766 if (retval)
1767 return retval;
1769 retval = u132_write_pcimem(u132, roothub.status, RH_HS_LPSC);
1770 if (retval)
1771 return retval;
1772 retval = u132_write_pcimem(u132, roothub.b,
1773 (roothub_a & RH_A_NPS) ? 0 : RH_B_PPCM);
1774 if (retval)
1775 return retval;
1776 retval = u132_read_pcimem(u132, control, &control);
1777 if (retval)
1778 return retval;
1779 mdelay((roothub_a >> 23) & 0x1fe);
1780 u132_to_hcd(u132)->state = HC_STATE_RUNNING;
1781 return 0;
1784 static void u132_hcd_stop(struct usb_hcd *hcd)
1786 struct u132 *u132 = hcd_to_u132(hcd);
1787 if (u132->going > 1) {
1788 dev_err(&u132->platform_dev->dev, "u132 device %p(hcd=%p) has b"
1789 "een removed %d\n", u132, hcd, u132->going);
1790 } else if (u132->going > 0) {
1791 dev_err(&u132->platform_dev->dev, "device hcd=%p is being remov"
1792 "ed\n", hcd);
1793 } else {
1794 mutex_lock(&u132->sw_lock);
1795 msleep(100);
1796 u132_power(u132, 0);
1797 mutex_unlock(&u132->sw_lock);
1801 static int u132_hcd_start(struct usb_hcd *hcd)
1803 struct u132 *u132 = hcd_to_u132(hcd);
1804 if (u132->going > 1) {
1805 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
1806 , u132->going);
1807 return -ENODEV;
1808 } else if (u132->going > 0) {
1809 dev_err(&u132->platform_dev->dev, "device is being removed\n");
1810 return -ESHUTDOWN;
1811 } else if (hcd->self.controller) {
1812 int retval;
1813 struct platform_device *pdev =
1814 to_platform_device(hcd->self.controller);
1815 u16 vendor = ((struct u132_platform_data *)
1816 (pdev->dev.platform_data))->vendor;
1817 u16 device = ((struct u132_platform_data *)
1818 (pdev->dev.platform_data))->device;
1819 mutex_lock(&u132->sw_lock);
1820 msleep(10);
1821 if (vendor == PCI_VENDOR_ID_AMD && device == 0x740c) {
1822 u132->flags = OHCI_QUIRK_AMD756;
1823 } else if (vendor == PCI_VENDOR_ID_OPTI && device == 0xc861) {
1824 dev_err(&u132->platform_dev->dev, "WARNING: OPTi workar"
1825 "ounds unavailable\n");
1826 } else if (vendor == PCI_VENDOR_ID_COMPAQ && device == 0xa0f8)
1827 u132->flags |= OHCI_QUIRK_ZFMICRO;
1828 retval = u132_run(u132);
1829 if (retval) {
1830 u132_disable(u132);
1831 u132->going = 1;
1833 msleep(100);
1834 mutex_unlock(&u132->sw_lock);
1835 return retval;
1836 } else {
1837 dev_err(&u132->platform_dev->dev, "platform_device missing\n");
1838 return -ENODEV;
1842 static int u132_hcd_reset(struct usb_hcd *hcd)
1844 struct u132 *u132 = hcd_to_u132(hcd);
1845 if (u132->going > 1) {
1846 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
1847 , u132->going);
1848 return -ENODEV;
1849 } else if (u132->going > 0) {
1850 dev_err(&u132->platform_dev->dev, "device is being removed\n");
1851 return -ESHUTDOWN;
1852 } else {
1853 int retval;
1854 mutex_lock(&u132->sw_lock);
1855 retval = u132_init(u132);
1856 if (retval) {
1857 u132_disable(u132);
1858 u132->going = 1;
1860 mutex_unlock(&u132->sw_lock);
1861 return retval;
1865 static int create_endpoint_and_queue_int(struct u132 *u132,
1866 struct u132_udev *udev, struct urb *urb,
1867 struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
1868 gfp_t mem_flags)
1870 struct u132_ring *ring;
1871 unsigned long irqs;
1872 int rc;
1873 u8 endp_number;
1874 struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
1876 if (!endp)
1877 return -ENOMEM;
1879 spin_lock_init(&endp->queue_lock.slock);
1880 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
1881 rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
1882 if (rc) {
1883 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
1884 kfree(endp);
1885 return rc;
1888 endp_number = ++u132->num_endpoints;
1889 urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
1890 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
1891 INIT_LIST_HEAD(&endp->urb_more);
1892 ring = endp->ring = &u132->ring[0];
1893 if (ring->curr_endp) {
1894 list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
1895 } else {
1896 INIT_LIST_HEAD(&endp->endp_ring);
1897 ring->curr_endp = endp;
1899 ring->length += 1;
1900 endp->dequeueing = 0;
1901 endp->edset_flush = 0;
1902 endp->active = 0;
1903 endp->delayed = 0;
1904 endp->endp_number = endp_number;
1905 endp->u132 = u132;
1906 endp->hep = urb->ep;
1907 endp->pipetype = usb_pipetype(urb->pipe);
1908 u132_endp_init_kref(u132, endp);
1909 if (usb_pipein(urb->pipe)) {
1910 endp->toggle_bits = 0x2;
1911 usb_settoggle(udev->usb_device, usb_endp, 0, 0);
1912 endp->input = 1;
1913 endp->output = 0;
1914 udev->endp_number_in[usb_endp] = endp_number;
1915 u132_udev_get_kref(u132, udev);
1916 } else {
1917 endp->toggle_bits = 0x2;
1918 usb_settoggle(udev->usb_device, usb_endp, 1, 0);
1919 endp->input = 0;
1920 endp->output = 1;
1921 udev->endp_number_out[usb_endp] = endp_number;
1922 u132_udev_get_kref(u132, udev);
1924 urb->hcpriv = u132;
1925 endp->delayed = 1;
1926 endp->jiffies = jiffies + msecs_to_jiffies(urb->interval);
1927 endp->udev_number = address;
1928 endp->usb_addr = usb_addr;
1929 endp->usb_endp = usb_endp;
1930 endp->queue_size = 1;
1931 endp->queue_last = 0;
1932 endp->queue_next = 0;
1933 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
1934 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
1935 u132_endp_queue_work(u132, endp, msecs_to_jiffies(urb->interval));
1936 return 0;
1939 static int queue_int_on_old_endpoint(struct u132 *u132,
1940 struct u132_udev *udev, struct urb *urb,
1941 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
1942 u8 usb_endp, u8 address)
1944 urb->hcpriv = u132;
1945 endp->delayed = 1;
1946 endp->jiffies = jiffies + msecs_to_jiffies(urb->interval);
1947 if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
1948 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
1949 } else {
1950 struct u132_urbq *urbq = kmalloc(sizeof(struct u132_urbq),
1951 GFP_ATOMIC);
1952 if (urbq == NULL) {
1953 endp->queue_size -= 1;
1954 return -ENOMEM;
1955 } else {
1956 list_add_tail(&urbq->urb_more, &endp->urb_more);
1957 urbq->urb = urb;
1960 return 0;
1963 static int create_endpoint_and_queue_bulk(struct u132 *u132,
1964 struct u132_udev *udev, struct urb *urb,
1965 struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
1966 gfp_t mem_flags)
1968 int ring_number;
1969 struct u132_ring *ring;
1970 unsigned long irqs;
1971 int rc;
1972 u8 endp_number;
1973 struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
1975 if (!endp)
1976 return -ENOMEM;
1978 spin_lock_init(&endp->queue_lock.slock);
1979 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
1980 rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
1981 if (rc) {
1982 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
1983 kfree(endp);
1984 return rc;
1987 endp_number = ++u132->num_endpoints;
1988 urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
1989 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
1990 INIT_LIST_HEAD(&endp->urb_more);
1991 endp->dequeueing = 0;
1992 endp->edset_flush = 0;
1993 endp->active = 0;
1994 endp->delayed = 0;
1995 endp->endp_number = endp_number;
1996 endp->u132 = u132;
1997 endp->hep = urb->ep;
1998 endp->pipetype = usb_pipetype(urb->pipe);
1999 u132_endp_init_kref(u132, endp);
2000 if (usb_pipein(urb->pipe)) {
2001 endp->toggle_bits = 0x2;
2002 usb_settoggle(udev->usb_device, usb_endp, 0, 0);
2003 ring_number = 3;
2004 endp->input = 1;
2005 endp->output = 0;
2006 udev->endp_number_in[usb_endp] = endp_number;
2007 u132_udev_get_kref(u132, udev);
2008 } else {
2009 endp->toggle_bits = 0x2;
2010 usb_settoggle(udev->usb_device, usb_endp, 1, 0);
2011 ring_number = 2;
2012 endp->input = 0;
2013 endp->output = 1;
2014 udev->endp_number_out[usb_endp] = endp_number;
2015 u132_udev_get_kref(u132, udev);
2017 ring = endp->ring = &u132->ring[ring_number - 1];
2018 if (ring->curr_endp) {
2019 list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
2020 } else {
2021 INIT_LIST_HEAD(&endp->endp_ring);
2022 ring->curr_endp = endp;
2024 ring->length += 1;
2025 urb->hcpriv = u132;
2026 endp->udev_number = address;
2027 endp->usb_addr = usb_addr;
2028 endp->usb_endp = usb_endp;
2029 endp->queue_size = 1;
2030 endp->queue_last = 0;
2031 endp->queue_next = 0;
2032 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
2033 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2034 u132_endp_queue_work(u132, endp, 0);
2035 return 0;
2038 static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev,
2039 struct urb *urb,
2040 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
2041 u8 usb_endp, u8 address)
2043 urb->hcpriv = u132;
2044 if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
2045 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
2046 } else {
2047 struct u132_urbq *urbq = kmalloc(sizeof(struct u132_urbq),
2048 GFP_ATOMIC);
2049 if (urbq == NULL) {
2050 endp->queue_size -= 1;
2051 return -ENOMEM;
2052 } else {
2053 list_add_tail(&urbq->urb_more, &endp->urb_more);
2054 urbq->urb = urb;
2057 return 0;
2060 static int create_endpoint_and_queue_control(struct u132 *u132,
2061 struct urb *urb,
2062 struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp,
2063 gfp_t mem_flags)
2065 struct u132_ring *ring;
2066 unsigned long irqs;
2067 int rc;
2068 u8 endp_number;
2069 struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
2071 if (!endp)
2072 return -ENOMEM;
2074 spin_lock_init(&endp->queue_lock.slock);
2075 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
2076 rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
2077 if (rc) {
2078 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2079 kfree(endp);
2080 return rc;
2083 endp_number = ++u132->num_endpoints;
2084 urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
2085 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
2086 INIT_LIST_HEAD(&endp->urb_more);
2087 ring = endp->ring = &u132->ring[0];
2088 if (ring->curr_endp) {
2089 list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
2090 } else {
2091 INIT_LIST_HEAD(&endp->endp_ring);
2092 ring->curr_endp = endp;
2094 ring->length += 1;
2095 endp->dequeueing = 0;
2096 endp->edset_flush = 0;
2097 endp->active = 0;
2098 endp->delayed = 0;
2099 endp->endp_number = endp_number;
2100 endp->u132 = u132;
2101 endp->hep = urb->ep;
2102 u132_endp_init_kref(u132, endp);
2103 u132_endp_get_kref(u132, endp);
2104 if (usb_addr == 0) {
2105 u8 address = u132->addr[usb_addr].address;
2106 struct u132_udev *udev = &u132->udev[address];
2107 endp->udev_number = address;
2108 endp->usb_addr = usb_addr;
2109 endp->usb_endp = usb_endp;
2110 endp->input = 1;
2111 endp->output = 1;
2112 endp->pipetype = usb_pipetype(urb->pipe);
2113 u132_udev_init_kref(u132, udev);
2114 u132_udev_get_kref(u132, udev);
2115 udev->endp_number_in[usb_endp] = endp_number;
2116 udev->endp_number_out[usb_endp] = endp_number;
2117 urb->hcpriv = u132;
2118 endp->queue_size = 1;
2119 endp->queue_last = 0;
2120 endp->queue_next = 0;
2121 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
2122 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2123 u132_endp_queue_work(u132, endp, 0);
2124 return 0;
2125 } else { /*(usb_addr > 0) */
2126 u8 address = u132->addr[usb_addr].address;
2127 struct u132_udev *udev = &u132->udev[address];
2128 endp->udev_number = address;
2129 endp->usb_addr = usb_addr;
2130 endp->usb_endp = usb_endp;
2131 endp->input = 1;
2132 endp->output = 1;
2133 endp->pipetype = usb_pipetype(urb->pipe);
2134 u132_udev_get_kref(u132, udev);
2135 udev->enumeration = 2;
2136 udev->endp_number_in[usb_endp] = endp_number;
2137 udev->endp_number_out[usb_endp] = endp_number;
2138 urb->hcpriv = u132;
2139 endp->queue_size = 1;
2140 endp->queue_last = 0;
2141 endp->queue_next = 0;
2142 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
2143 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2144 u132_endp_queue_work(u132, endp, 0);
2145 return 0;
2149 static int queue_control_on_old_endpoint(struct u132 *u132,
2150 struct urb *urb,
2151 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
2152 u8 usb_endp)
2154 if (usb_addr == 0) {
2155 if (usb_pipein(urb->pipe)) {
2156 urb->hcpriv = u132;
2157 if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
2158 endp->urb_list[ENDP_QUEUE_MASK &
2159 endp->queue_last++] = urb;
2160 } else {
2161 struct u132_urbq *urbq =
2162 kmalloc(sizeof(struct u132_urbq),
2163 GFP_ATOMIC);
2164 if (urbq == NULL) {
2165 endp->queue_size -= 1;
2166 return -ENOMEM;
2167 } else {
2168 list_add_tail(&urbq->urb_more,
2169 &endp->urb_more);
2170 urbq->urb = urb;
2173 return 0;
2174 } else { /* usb_pipeout(urb->pipe) */
2175 struct u132_addr *addr = &u132->addr[usb_dev->devnum];
2176 int I = MAX_U132_UDEVS;
2177 int i = 0;
2178 while (--I > 0) {
2179 struct u132_udev *udev = &u132->udev[++i];
2180 if (udev->usb_device) {
2181 continue;
2182 } else {
2183 udev->enumeration = 1;
2184 u132->addr[0].address = i;
2185 endp->udev_number = i;
2186 udev->udev_number = i;
2187 udev->usb_addr = usb_dev->devnum;
2188 u132_udev_init_kref(u132, udev);
2189 udev->endp_number_in[usb_endp] =
2190 endp->endp_number;
2191 u132_udev_get_kref(u132, udev);
2192 udev->endp_number_out[usb_endp] =
2193 endp->endp_number;
2194 udev->usb_device = usb_dev;
2195 ((u8 *) (urb->setup_packet))[2] =
2196 addr->address = i;
2197 u132_udev_get_kref(u132, udev);
2198 break;
2201 if (I == 0) {
2202 dev_err(&u132->platform_dev->dev, "run out of d"
2203 "evice space\n");
2204 return -EINVAL;
2206 urb->hcpriv = u132;
2207 if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
2208 endp->urb_list[ENDP_QUEUE_MASK &
2209 endp->queue_last++] = urb;
2210 } else {
2211 struct u132_urbq *urbq =
2212 kmalloc(sizeof(struct u132_urbq),
2213 GFP_ATOMIC);
2214 if (urbq == NULL) {
2215 endp->queue_size -= 1;
2216 return -ENOMEM;
2217 } else {
2218 list_add_tail(&urbq->urb_more,
2219 &endp->urb_more);
2220 urbq->urb = urb;
2223 return 0;
2225 } else { /*(usb_addr > 0) */
2226 u8 address = u132->addr[usb_addr].address;
2227 struct u132_udev *udev = &u132->udev[address];
2228 urb->hcpriv = u132;
2229 if (udev->enumeration != 2)
2230 udev->enumeration = 2;
2231 if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
2232 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
2233 urb;
2234 } else {
2235 struct u132_urbq *urbq =
2236 kmalloc(sizeof(struct u132_urbq), GFP_ATOMIC);
2237 if (urbq == NULL) {
2238 endp->queue_size -= 1;
2239 return -ENOMEM;
2240 } else {
2241 list_add_tail(&urbq->urb_more, &endp->urb_more);
2242 urbq->urb = urb;
2245 return 0;
2249 static int u132_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
2250 gfp_t mem_flags)
2252 struct u132 *u132 = hcd_to_u132(hcd);
2253 if (irqs_disabled()) {
2254 if (__GFP_WAIT & mem_flags) {
2255 printk(KERN_ERR "invalid context for function that migh"
2256 "t sleep\n");
2257 return -EINVAL;
2260 if (u132->going > 1) {
2261 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
2262 , u132->going);
2263 return -ENODEV;
2264 } else if (u132->going > 0) {
2265 dev_err(&u132->platform_dev->dev, "device is being removed "
2266 "urb=%p\n", urb);
2267 return -ESHUTDOWN;
2268 } else {
2269 u8 usb_addr = usb_pipedevice(urb->pipe);
2270 u8 usb_endp = usb_pipeendpoint(urb->pipe);
2271 struct usb_device *usb_dev = urb->dev;
2272 if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
2273 u8 address = u132->addr[usb_addr].address;
2274 struct u132_udev *udev = &u132->udev[address];
2275 struct u132_endp *endp = urb->ep->hcpriv;
2276 urb->actual_length = 0;
2277 if (endp) {
2278 unsigned long irqs;
2279 int retval;
2280 spin_lock_irqsave(&endp->queue_lock.slock,
2281 irqs);
2282 retval = usb_hcd_link_urb_to_ep(hcd, urb);
2283 if (retval == 0) {
2284 retval = queue_int_on_old_endpoint(
2285 u132, udev, urb,
2286 usb_dev, endp,
2287 usb_addr, usb_endp,
2288 address);
2289 if (retval)
2290 usb_hcd_unlink_urb_from_ep(
2291 hcd, urb);
2293 spin_unlock_irqrestore(&endp->queue_lock.slock,
2294 irqs);
2295 if (retval) {
2296 return retval;
2297 } else {
2298 u132_endp_queue_work(u132, endp,
2299 msecs_to_jiffies(urb->interval))
2301 return 0;
2303 } else if (u132->num_endpoints == MAX_U132_ENDPS) {
2304 return -EINVAL;
2305 } else { /*(endp == NULL) */
2306 return create_endpoint_and_queue_int(u132, udev,
2307 urb, usb_dev, usb_addr,
2308 usb_endp, address, mem_flags);
2310 } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2311 dev_err(&u132->platform_dev->dev, "the hardware does no"
2312 "t support PIPE_ISOCHRONOUS\n");
2313 return -EINVAL;
2314 } else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
2315 u8 address = u132->addr[usb_addr].address;
2316 struct u132_udev *udev = &u132->udev[address];
2317 struct u132_endp *endp = urb->ep->hcpriv;
2318 urb->actual_length = 0;
2319 if (endp) {
2320 unsigned long irqs;
2321 int retval;
2322 spin_lock_irqsave(&endp->queue_lock.slock,
2323 irqs);
2324 retval = usb_hcd_link_urb_to_ep(hcd, urb);
2325 if (retval == 0) {
2326 retval = queue_bulk_on_old_endpoint(
2327 u132, udev, urb,
2328 usb_dev, endp,
2329 usb_addr, usb_endp,
2330 address);
2331 if (retval)
2332 usb_hcd_unlink_urb_from_ep(
2333 hcd, urb);
2335 spin_unlock_irqrestore(&endp->queue_lock.slock,
2336 irqs);
2337 if (retval) {
2338 return retval;
2339 } else {
2340 u132_endp_queue_work(u132, endp, 0);
2341 return 0;
2343 } else if (u132->num_endpoints == MAX_U132_ENDPS) {
2344 return -EINVAL;
2345 } else
2346 return create_endpoint_and_queue_bulk(u132,
2347 udev, urb, usb_dev, usb_addr,
2348 usb_endp, address, mem_flags);
2349 } else {
2350 struct u132_endp *endp = urb->ep->hcpriv;
2351 u16 urb_size = 8;
2352 u8 *b = urb->setup_packet;
2353 int i = 0;
2354 char data[30 * 3 + 4];
2355 char *d = data;
2356 int m = (sizeof(data) - 1) / 3;
2357 int l = 0;
2358 data[0] = 0;
2359 while (urb_size-- > 0) {
2360 if (i > m) {
2361 } else if (i++ < m) {
2362 int w = sprintf(d, " %02X", *b++);
2363 d += w;
2364 l += w;
2365 } else
2366 d += sprintf(d, " ..");
2368 if (endp) {
2369 unsigned long irqs;
2370 int retval;
2371 spin_lock_irqsave(&endp->queue_lock.slock,
2372 irqs);
2373 retval = usb_hcd_link_urb_to_ep(hcd, urb);
2374 if (retval == 0) {
2375 retval = queue_control_on_old_endpoint(
2376 u132, urb, usb_dev,
2377 endp, usb_addr,
2378 usb_endp);
2379 if (retval)
2380 usb_hcd_unlink_urb_from_ep(
2381 hcd, urb);
2383 spin_unlock_irqrestore(&endp->queue_lock.slock,
2384 irqs);
2385 if (retval) {
2386 return retval;
2387 } else {
2388 u132_endp_queue_work(u132, endp, 0);
2389 return 0;
2391 } else if (u132->num_endpoints == MAX_U132_ENDPS) {
2392 return -EINVAL;
2393 } else
2394 return create_endpoint_and_queue_control(u132,
2395 urb, usb_dev, usb_addr, usb_endp,
2396 mem_flags);
2401 static int dequeue_from_overflow_chain(struct u132 *u132,
2402 struct u132_endp *endp, struct urb *urb)
2404 struct list_head *scan;
2405 struct list_head *head = &endp->urb_more;
2406 list_for_each(scan, head) {
2407 struct u132_urbq *urbq = list_entry(scan, struct u132_urbq,
2408 urb_more);
2409 if (urbq->urb == urb) {
2410 struct usb_hcd *hcd = u132_to_hcd(u132);
2411 list_del(scan);
2412 endp->queue_size -= 1;
2413 urb->error_count = 0;
2414 usb_hcd_giveback_urb(hcd, urb, 0);
2415 return 0;
2416 } else
2417 continue;
2419 dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]=%p ring"
2420 "[%d] %c%c usb_endp=%d usb_addr=%d size=%d next=%04X last=%04X"
2421 "\n", urb, endp->endp_number, endp, endp->ring->number,
2422 endp->input ? 'I' : ' ', endp->output ? 'O' : ' ',
2423 endp->usb_endp, endp->usb_addr, endp->queue_size,
2424 endp->queue_next, endp->queue_last);
2425 return -EINVAL;
2428 static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
2429 struct urb *urb, int status)
2431 unsigned long irqs;
2432 int rc;
2434 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
2435 rc = usb_hcd_check_unlink_urb(u132_to_hcd(u132), urb, status);
2436 if (rc) {
2437 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2438 return rc;
2440 if (endp->queue_size == 0) {
2441 dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]"
2442 "=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb,
2443 endp->endp_number, endp, endp->ring->number,
2444 endp->input ? 'I' : ' ', endp->output ? 'O' : ' ',
2445 endp->usb_endp, endp->usb_addr);
2446 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2447 return -EINVAL;
2449 if (urb == endp->urb_list[ENDP_QUEUE_MASK & endp->queue_next]) {
2450 if (endp->active) {
2451 endp->dequeueing = 1;
2452 endp->edset_flush = 1;
2453 u132_endp_queue_work(u132, endp, 0);
2454 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2455 return 0;
2456 } else {
2457 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2458 u132_hcd_abandon_urb(u132, endp, urb, status);
2459 return 0;
2461 } else {
2462 u16 queue_list = 0;
2463 u16 queue_size = endp->queue_size;
2464 u16 queue_scan = endp->queue_next;
2465 struct urb **urb_slot = NULL;
2466 while (++queue_list < ENDP_QUEUE_SIZE && --queue_size > 0) {
2467 if (urb == endp->urb_list[ENDP_QUEUE_MASK &
2468 ++queue_scan]) {
2469 urb_slot = &endp->urb_list[ENDP_QUEUE_MASK &
2470 queue_scan];
2471 break;
2472 } else
2473 continue;
2475 while (++queue_list < ENDP_QUEUE_SIZE && --queue_size > 0) {
2476 *urb_slot = endp->urb_list[ENDP_QUEUE_MASK &
2477 ++queue_scan];
2478 urb_slot = &endp->urb_list[ENDP_QUEUE_MASK &
2479 queue_scan];
2481 if (urb_slot) {
2482 struct usb_hcd *hcd = u132_to_hcd(u132);
2484 usb_hcd_unlink_urb_from_ep(hcd, urb);
2485 endp->queue_size -= 1;
2486 if (list_empty(&endp->urb_more)) {
2487 spin_unlock_irqrestore(&endp->queue_lock.slock,
2488 irqs);
2489 } else {
2490 struct list_head *next = endp->urb_more.next;
2491 struct u132_urbq *urbq = list_entry(next,
2492 struct u132_urbq, urb_more);
2493 list_del(next);
2494 *urb_slot = urbq->urb;
2495 spin_unlock_irqrestore(&endp->queue_lock.slock,
2496 irqs);
2497 kfree(urbq);
2498 } urb->error_count = 0;
2499 usb_hcd_giveback_urb(hcd, urb, status);
2500 return 0;
2501 } else if (list_empty(&endp->urb_more)) {
2502 dev_err(&u132->platform_dev->dev, "urb=%p not found in "
2503 "endp[%d]=%p ring[%d] %c%c usb_endp=%d usb_addr"
2504 "=%d size=%d next=%04X last=%04X\n", urb,
2505 endp->endp_number, endp, endp->ring->number,
2506 endp->input ? 'I' : ' ',
2507 endp->output ? 'O' : ' ', endp->usb_endp,
2508 endp->usb_addr, endp->queue_size,
2509 endp->queue_next, endp->queue_last);
2510 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2511 return -EINVAL;
2512 } else {
2513 int retval;
2515 usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132), urb);
2516 retval = dequeue_from_overflow_chain(u132, endp,
2517 urb);
2518 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2519 return retval;
2524 static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2526 struct u132 *u132 = hcd_to_u132(hcd);
2527 if (u132->going > 2) {
2528 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
2529 , u132->going);
2530 return -ENODEV;
2531 } else {
2532 u8 usb_addr = usb_pipedevice(urb->pipe);
2533 u8 usb_endp = usb_pipeendpoint(urb->pipe);
2534 u8 address = u132->addr[usb_addr].address;
2535 struct u132_udev *udev = &u132->udev[address];
2536 if (usb_pipein(urb->pipe)) {
2537 u8 endp_number = udev->endp_number_in[usb_endp];
2538 struct u132_endp *endp = u132->endp[endp_number - 1];
2539 return u132_endp_urb_dequeue(u132, endp, urb, status);
2540 } else {
2541 u8 endp_number = udev->endp_number_out[usb_endp];
2542 struct u132_endp *endp = u132->endp[endp_number - 1];
2543 return u132_endp_urb_dequeue(u132, endp, urb, status);
2548 static void u132_endpoint_disable(struct usb_hcd *hcd,
2549 struct usb_host_endpoint *hep)
2551 struct u132 *u132 = hcd_to_u132(hcd);
2552 if (u132->going > 2) {
2553 dev_err(&u132->platform_dev->dev, "u132 device %p(hcd=%p hep=%p"
2554 ") has been removed %d\n", u132, hcd, hep,
2555 u132->going);
2556 } else {
2557 struct u132_endp *endp = hep->hcpriv;
2558 if (endp)
2559 u132_endp_put_kref(u132, endp);
2563 static int u132_get_frame(struct usb_hcd *hcd)
2565 struct u132 *u132 = hcd_to_u132(hcd);
2566 if (u132->going > 1) {
2567 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
2568 , u132->going);
2569 return -ENODEV;
2570 } else if (u132->going > 0) {
2571 dev_err(&u132->platform_dev->dev, "device is being removed\n");
2572 return -ESHUTDOWN;
2573 } else {
2574 int frame = 0;
2575 dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n");
2576 msleep(100);
2577 return frame;
2581 static int u132_roothub_descriptor(struct u132 *u132,
2582 struct usb_hub_descriptor *desc)
2584 int retval;
2585 u16 temp;
2586 u32 rh_a = -1;
2587 u32 rh_b = -1;
2588 retval = u132_read_pcimem(u132, roothub.a, &rh_a);
2589 if (retval)
2590 return retval;
2591 desc->bDescriptorType = 0x29;
2592 desc->bPwrOn2PwrGood = (rh_a & RH_A_POTPGT) >> 24;
2593 desc->bHubContrCurrent = 0;
2594 desc->bNbrPorts = u132->num_ports;
2595 temp = 1 + (u132->num_ports / 8);
2596 desc->bDescLength = 7 + 2 * temp;
2597 temp = 0;
2598 if (rh_a & RH_A_NPS)
2599 temp |= 0x0002;
2600 if (rh_a & RH_A_PSM)
2601 temp |= 0x0001;
2602 if (rh_a & RH_A_NOCP)
2603 temp |= 0x0010;
2604 else if (rh_a & RH_A_OCPM)
2605 temp |= 0x0008;
2606 desc->wHubCharacteristics = cpu_to_le16(temp);
2607 retval = u132_read_pcimem(u132, roothub.b, &rh_b);
2608 if (retval)
2609 return retval;
2610 memset(desc->bitmap, 0xff, sizeof(desc->bitmap));
2611 desc->bitmap[0] = rh_b & RH_B_DR;
2612 if (u132->num_ports > 7) {
2613 desc->bitmap[1] = (rh_b & RH_B_DR) >> 8;
2614 desc->bitmap[2] = 0xff;
2615 } else
2616 desc->bitmap[1] = 0xff;
2617 return 0;
2620 static int u132_roothub_status(struct u132 *u132, __le32 *desc)
2622 u32 rh_status = -1;
2623 int ret_status = u132_read_pcimem(u132, roothub.status, &rh_status);
2624 *desc = cpu_to_le32(rh_status);
2625 return ret_status;
2628 static int u132_roothub_portstatus(struct u132 *u132, __le32 *desc, u16 wIndex)
2630 if (wIndex == 0 || wIndex > u132->num_ports) {
2631 return -EINVAL;
2632 } else {
2633 int port = wIndex - 1;
2634 u32 rh_portstatus = -1;
2635 int ret_portstatus = u132_read_pcimem(u132,
2636 roothub.portstatus[port], &rh_portstatus);
2637 *desc = cpu_to_le32(rh_portstatus);
2638 if (*(u16 *) (desc + 2)) {
2639 dev_info(&u132->platform_dev->dev, "Port %d Status Chan"
2640 "ge = %08X\n", port, *desc);
2642 return ret_portstatus;
2647 /* this timer value might be vendor-specific ... */
2648 #define PORT_RESET_HW_MSEC 10
2649 #define PORT_RESET_MSEC 10
2650 /* wrap-aware logic morphed from <linux/jiffies.h> */
2651 #define tick_before(t1, t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0)
2652 static int u132_roothub_portreset(struct u132 *u132, int port_index)
2654 int retval;
2655 u32 fmnumber;
2656 u16 now;
2657 u16 reset_done;
2658 retval = u132_read_pcimem(u132, fmnumber, &fmnumber);
2659 if (retval)
2660 return retval;
2661 now = fmnumber;
2662 reset_done = now + PORT_RESET_MSEC;
2663 do {
2664 u32 portstat;
2665 do {
2666 retval = u132_read_pcimem(u132,
2667 roothub.portstatus[port_index], &portstat);
2668 if (retval)
2669 return retval;
2670 if (RH_PS_PRS & portstat)
2671 continue;
2672 else
2673 break;
2674 } while (tick_before(now, reset_done));
2675 if (RH_PS_PRS & portstat)
2676 return -ENODEV;
2677 if (RH_PS_CCS & portstat) {
2678 if (RH_PS_PRSC & portstat) {
2679 retval = u132_write_pcimem(u132,
2680 roothub.portstatus[port_index],
2681 RH_PS_PRSC);
2682 if (retval)
2683 return retval;
2685 } else
2686 break; /* start the next reset,
2687 sleep till it's probably done */
2688 retval = u132_write_pcimem(u132, roothub.portstatus[port_index],
2689 RH_PS_PRS);
2690 if (retval)
2691 return retval;
2692 msleep(PORT_RESET_HW_MSEC);
2693 retval = u132_read_pcimem(u132, fmnumber, &fmnumber);
2694 if (retval)
2695 return retval;
2696 now = fmnumber;
2697 } while (tick_before(now, reset_done));
2698 return 0;
2701 static int u132_roothub_setportfeature(struct u132 *u132, u16 wValue,
2702 u16 wIndex)
2704 if (wIndex == 0 || wIndex > u132->num_ports) {
2705 return -EINVAL;
2706 } else {
2707 int retval;
2708 int port_index = wIndex - 1;
2709 struct u132_port *port = &u132->port[port_index];
2710 port->Status &= ~(1 << wValue);
2711 switch (wValue) {
2712 case USB_PORT_FEAT_SUSPEND:
2713 retval = u132_write_pcimem(u132,
2714 roothub.portstatus[port_index], RH_PS_PSS);
2715 if (retval)
2716 return retval;
2717 return 0;
2718 case USB_PORT_FEAT_POWER:
2719 retval = u132_write_pcimem(u132,
2720 roothub.portstatus[port_index], RH_PS_PPS);
2721 if (retval)
2722 return retval;
2723 return 0;
2724 case USB_PORT_FEAT_RESET:
2725 retval = u132_roothub_portreset(u132, port_index);
2726 if (retval)
2727 return retval;
2728 return 0;
2729 default:
2730 return -EPIPE;
2735 static int u132_roothub_clearportfeature(struct u132 *u132, u16 wValue,
2736 u16 wIndex)
2738 if (wIndex == 0 || wIndex > u132->num_ports) {
2739 return -EINVAL;
2740 } else {
2741 int port_index = wIndex - 1;
2742 u32 temp;
2743 int retval;
2744 struct u132_port *port = &u132->port[port_index];
2745 port->Status &= ~(1 << wValue);
2746 switch (wValue) {
2747 case USB_PORT_FEAT_ENABLE:
2748 temp = RH_PS_CCS;
2749 break;
2750 case USB_PORT_FEAT_C_ENABLE:
2751 temp = RH_PS_PESC;
2752 break;
2753 case USB_PORT_FEAT_SUSPEND:
2754 temp = RH_PS_POCI;
2755 if ((u132->hc_control & OHCI_CTRL_HCFS)
2756 != OHCI_USB_OPER) {
2757 dev_err(&u132->platform_dev->dev, "TODO resume_"
2758 "root_hub\n");
2760 break;
2761 case USB_PORT_FEAT_C_SUSPEND:
2762 temp = RH_PS_PSSC;
2763 break;
2764 case USB_PORT_FEAT_POWER:
2765 temp = RH_PS_LSDA;
2766 break;
2767 case USB_PORT_FEAT_C_CONNECTION:
2768 temp = RH_PS_CSC;
2769 break;
2770 case USB_PORT_FEAT_C_OVER_CURRENT:
2771 temp = RH_PS_OCIC;
2772 break;
2773 case USB_PORT_FEAT_C_RESET:
2774 temp = RH_PS_PRSC;
2775 break;
2776 default:
2777 return -EPIPE;
2779 retval = u132_write_pcimem(u132, roothub.portstatus[port_index],
2780 temp);
2781 if (retval)
2782 return retval;
2783 return 0;
2788 /* the virtual root hub timer IRQ checks for hub status*/
2789 static int u132_hub_status_data(struct usb_hcd *hcd, char *buf)
2791 struct u132 *u132 = hcd_to_u132(hcd);
2792 if (u132->going > 1) {
2793 dev_err(&u132->platform_dev->dev, "device hcd=%p has been remov"
2794 "ed %d\n", hcd, u132->going);
2795 return -ENODEV;
2796 } else if (u132->going > 0) {
2797 dev_err(&u132->platform_dev->dev, "device hcd=%p is being remov"
2798 "ed\n", hcd);
2799 return -ESHUTDOWN;
2800 } else {
2801 int i, changed = 0, length = 1;
2802 if (u132->flags & OHCI_QUIRK_AMD756) {
2803 if ((u132->hc_roothub_a & RH_A_NDP) > MAX_ROOT_PORTS) {
2804 dev_err(&u132->platform_dev->dev, "bogus NDP, r"
2805 "ereads as NDP=%d\n",
2806 u132->hc_roothub_a & RH_A_NDP);
2807 goto done;
2810 if (u132->hc_roothub_status & (RH_HS_LPSC | RH_HS_OCIC))
2811 buf[0] = changed = 1;
2812 else
2813 buf[0] = 0;
2814 if (u132->num_ports > 7) {
2815 buf[1] = 0;
2816 length++;
2818 for (i = 0; i < u132->num_ports; i++) {
2819 if (u132->hc_roothub_portstatus[i] & (RH_PS_CSC |
2820 RH_PS_PESC | RH_PS_PSSC | RH_PS_OCIC |
2821 RH_PS_PRSC)) {
2822 changed = 1;
2823 if (i < 7)
2824 buf[0] |= 1 << (i + 1);
2825 else
2826 buf[1] |= 1 << (i - 7);
2827 continue;
2829 if (!(u132->hc_roothub_portstatus[i] & RH_PS_CCS))
2830 continue;
2832 if ((u132->hc_roothub_portstatus[i] & RH_PS_PSS))
2833 continue;
2835 done:
2836 return changed ? length : 0;
2840 static int u132_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2841 u16 wIndex, char *buf, u16 wLength)
2843 struct u132 *u132 = hcd_to_u132(hcd);
2844 if (u132->going > 1) {
2845 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
2846 , u132->going);
2847 return -ENODEV;
2848 } else if (u132->going > 0) {
2849 dev_err(&u132->platform_dev->dev, "device is being removed\n");
2850 return -ESHUTDOWN;
2851 } else {
2852 int retval = 0;
2853 mutex_lock(&u132->sw_lock);
2854 switch (typeReq) {
2855 case ClearHubFeature:
2856 switch (wValue) {
2857 case C_HUB_OVER_CURRENT:
2858 case C_HUB_LOCAL_POWER:
2859 break;
2860 default:
2861 goto stall;
2863 break;
2864 case SetHubFeature:
2865 switch (wValue) {
2866 case C_HUB_OVER_CURRENT:
2867 case C_HUB_LOCAL_POWER:
2868 break;
2869 default:
2870 goto stall;
2872 break;
2873 case ClearPortFeature:{
2874 retval = u132_roothub_clearportfeature(u132,
2875 wValue, wIndex);
2876 if (retval)
2877 goto error;
2878 break;
2880 case GetHubDescriptor:{
2881 retval = u132_roothub_descriptor(u132,
2882 (struct usb_hub_descriptor *)buf);
2883 if (retval)
2884 goto error;
2885 break;
2887 case GetHubStatus:{
2888 retval = u132_roothub_status(u132,
2889 (__le32 *) buf);
2890 if (retval)
2891 goto error;
2892 break;
2894 case GetPortStatus:{
2895 retval = u132_roothub_portstatus(u132,
2896 (__le32 *) buf, wIndex);
2897 if (retval)
2898 goto error;
2899 break;
2901 case SetPortFeature:{
2902 retval = u132_roothub_setportfeature(u132,
2903 wValue, wIndex);
2904 if (retval)
2905 goto error;
2906 break;
2908 default:
2909 goto stall;
2910 error:
2911 u132_disable(u132);
2912 u132->going = 1;
2913 break;
2914 stall:
2915 retval = -EPIPE;
2916 break;
2918 mutex_unlock(&u132->sw_lock);
2919 return retval;
2923 static int u132_start_port_reset(struct usb_hcd *hcd, unsigned port_num)
2925 struct u132 *u132 = hcd_to_u132(hcd);
2926 if (u132->going > 1) {
2927 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
2928 , u132->going);
2929 return -ENODEV;
2930 } else if (u132->going > 0) {
2931 dev_err(&u132->platform_dev->dev, "device is being removed\n");
2932 return -ESHUTDOWN;
2933 } else
2934 return 0;
2938 #ifdef CONFIG_PM
2939 static int u132_bus_suspend(struct usb_hcd *hcd)
2941 struct u132 *u132 = hcd_to_u132(hcd);
2942 if (u132->going > 1) {
2943 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
2944 , u132->going);
2945 return -ENODEV;
2946 } else if (u132->going > 0) {
2947 dev_err(&u132->platform_dev->dev, "device is being removed\n");
2948 return -ESHUTDOWN;
2949 } else
2950 return 0;
2953 static int u132_bus_resume(struct usb_hcd *hcd)
2955 struct u132 *u132 = hcd_to_u132(hcd);
2956 if (u132->going > 1) {
2957 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
2958 , u132->going);
2959 return -ENODEV;
2960 } else if (u132->going > 0) {
2961 dev_err(&u132->platform_dev->dev, "device is being removed\n");
2962 return -ESHUTDOWN;
2963 } else
2964 return 0;
2967 #else
2968 #define u132_bus_suspend NULL
2969 #define u132_bus_resume NULL
2970 #endif
2971 static struct hc_driver u132_hc_driver = {
2972 .description = hcd_name,
2973 .hcd_priv_size = sizeof(struct u132),
2974 .irq = NULL,
2975 .flags = HCD_USB11 | HCD_MEMORY,
2976 .reset = u132_hcd_reset,
2977 .start = u132_hcd_start,
2978 .stop = u132_hcd_stop,
2979 .urb_enqueue = u132_urb_enqueue,
2980 .urb_dequeue = u132_urb_dequeue,
2981 .endpoint_disable = u132_endpoint_disable,
2982 .get_frame_number = u132_get_frame,
2983 .hub_status_data = u132_hub_status_data,
2984 .hub_control = u132_hub_control,
2985 .bus_suspend = u132_bus_suspend,
2986 .bus_resume = u132_bus_resume,
2987 .start_port_reset = u132_start_port_reset,
2991 * This function may be called by the USB core whilst the "usb_all_devices_rwsem"
2992 * is held for writing, thus this module must not call usb_remove_hcd()
2993 * synchronously - but instead should immediately stop activity to the
2994 * device and asynchronously call usb_remove_hcd()
2996 static int __devexit u132_remove(struct platform_device *pdev)
2998 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2999 if (hcd) {
3000 struct u132 *u132 = hcd_to_u132(hcd);
3001 if (u132->going++ > 1) {
3002 dev_err(&u132->platform_dev->dev, "already being remove"
3003 "d\n");
3004 return -ENODEV;
3005 } else {
3006 int rings = MAX_U132_RINGS;
3007 int endps = MAX_U132_ENDPS;
3008 dev_err(&u132->platform_dev->dev, "removing device u132"
3009 ".%d\n", u132->sequence_num);
3010 msleep(100);
3011 mutex_lock(&u132->sw_lock);
3012 u132_monitor_cancel_work(u132);
3013 while (rings-- > 0) {
3014 struct u132_ring *ring = &u132->ring[rings];
3015 u132_ring_cancel_work(u132, ring);
3016 } while (endps-- > 0) {
3017 struct u132_endp *endp = u132->endp[endps];
3018 if (endp)
3019 u132_endp_cancel_work(u132, endp);
3021 u132->going += 1;
3022 printk(KERN_INFO "removing device u132.%d\n",
3023 u132->sequence_num);
3024 mutex_unlock(&u132->sw_lock);
3025 usb_remove_hcd(hcd);
3026 u132_u132_put_kref(u132);
3027 return 0;
3029 } else
3030 return 0;
3033 static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
3035 int rings = MAX_U132_RINGS;
3036 int ports = MAX_U132_PORTS;
3037 int addrs = MAX_U132_ADDRS;
3038 int udevs = MAX_U132_UDEVS;
3039 int endps = MAX_U132_ENDPS;
3040 u132->board = pdev->dev.platform_data;
3041 u132->platform_dev = pdev;
3042 u132->power = 0;
3043 u132->reset = 0;
3044 mutex_init(&u132->sw_lock);
3045 mutex_init(&u132->scheduler_lock);
3046 while (rings-- > 0) {
3047 struct u132_ring *ring = &u132->ring[rings];
3048 ring->u132 = u132;
3049 ring->number = rings + 1;
3050 ring->length = 0;
3051 ring->curr_endp = NULL;
3052 INIT_DELAYED_WORK(&ring->scheduler,
3053 u132_hcd_ring_work_scheduler);
3055 mutex_lock(&u132->sw_lock);
3056 INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
3057 while (ports-- > 0) {
3058 struct u132_port *port = &u132->port[ports];
3059 port->u132 = u132;
3060 port->reset = 0;
3061 port->enable = 0;
3062 port->power = 0;
3063 port->Status = 0;
3065 while (addrs-- > 0) {
3066 struct u132_addr *addr = &u132->addr[addrs];
3067 addr->address = 0;
3069 while (udevs-- > 0) {
3070 struct u132_udev *udev = &u132->udev[udevs];
3071 int i = ARRAY_SIZE(udev->endp_number_in);
3072 int o = ARRAY_SIZE(udev->endp_number_out);
3073 udev->usb_device = NULL;
3074 udev->udev_number = 0;
3075 udev->usb_addr = 0;
3076 udev->portnumber = 0;
3077 while (i-- > 0)
3078 udev->endp_number_in[i] = 0;
3080 while (o-- > 0)
3081 udev->endp_number_out[o] = 0;
3084 while (endps-- > 0)
3085 u132->endp[endps] = NULL;
3087 mutex_unlock(&u132->sw_lock);
3088 return;
3091 static int __devinit u132_probe(struct platform_device *pdev)
3093 struct usb_hcd *hcd;
3094 int retval;
3095 u32 control;
3096 u32 rh_a = -1;
3097 u32 num_ports;
3099 msleep(100);
3100 if (u132_exiting > 0)
3101 return -ENODEV;
3103 retval = ftdi_write_pcimem(pdev, intrdisable, OHCI_INTR_MIE);
3104 if (retval)
3105 return retval;
3106 retval = ftdi_read_pcimem(pdev, control, &control);
3107 if (retval)
3108 return retval;
3109 retval = ftdi_read_pcimem(pdev, roothub.a, &rh_a);
3110 if (retval)
3111 return retval;
3112 num_ports = rh_a & RH_A_NDP; /* refuse to confuse usbcore */
3113 if (pdev->dev.dma_mask)
3114 return -EINVAL;
3116 hcd = usb_create_hcd(&u132_hc_driver, &pdev->dev, dev_name(&pdev->dev));
3117 if (!hcd) {
3118 printk(KERN_ERR "failed to create the usb hcd struct for U132\n"
3120 ftdi_elan_gone_away(pdev);
3121 return -ENOMEM;
3122 } else {
3123 int retval = 0;
3124 struct u132 *u132 = hcd_to_u132(hcd);
3125 hcd->rsrc_start = 0;
3126 mutex_lock(&u132_module_lock);
3127 list_add_tail(&u132->u132_list, &u132_static_list);
3128 u132->sequence_num = ++u132_instances;
3129 mutex_unlock(&u132_module_lock);
3130 u132_u132_init_kref(u132);
3131 u132_initialise(u132, pdev);
3132 hcd->product_desc = "ELAN U132 Host Controller";
3133 retval = usb_add_hcd(hcd, 0, 0);
3134 if (retval != 0) {
3135 dev_err(&u132->platform_dev->dev, "init error %d\n",
3136 retval);
3137 u132_u132_put_kref(u132);
3138 return retval;
3139 } else {
3140 u132_monitor_queue_work(u132, 100);
3141 return 0;
3147 #ifdef CONFIG_PM
3148 /* for this device there's no useful distinction between the controller
3149 * and its root hub, except that the root hub only gets direct PM calls
3150 * when CONFIG_USB_SUSPEND is enabled.
3152 static int u132_suspend(struct platform_device *pdev, pm_message_t state)
3154 struct usb_hcd *hcd = platform_get_drvdata(pdev);
3155 struct u132 *u132 = hcd_to_u132(hcd);
3156 if (u132->going > 1) {
3157 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
3158 , u132->going);
3159 return -ENODEV;
3160 } else if (u132->going > 0) {
3161 dev_err(&u132->platform_dev->dev, "device is being removed\n");
3162 return -ESHUTDOWN;
3163 } else {
3164 int retval = 0, ports;
3166 switch (state.event) {
3167 case PM_EVENT_FREEZE:
3168 retval = u132_bus_suspend(hcd);
3169 break;
3170 case PM_EVENT_SUSPEND:
3171 case PM_EVENT_HIBERNATE:
3172 ports = MAX_U132_PORTS;
3173 while (ports-- > 0) {
3174 port_power(u132, ports, 0);
3176 break;
3178 return retval;
3182 static int u132_resume(struct platform_device *pdev)
3184 struct usb_hcd *hcd = platform_get_drvdata(pdev);
3185 struct u132 *u132 = hcd_to_u132(hcd);
3186 if (u132->going > 1) {
3187 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
3188 , u132->going);
3189 return -ENODEV;
3190 } else if (u132->going > 0) {
3191 dev_err(&u132->platform_dev->dev, "device is being removed\n");
3192 return -ESHUTDOWN;
3193 } else {
3194 int retval = 0;
3195 if (!u132->port[0].power) {
3196 int ports = MAX_U132_PORTS;
3197 while (ports-- > 0) {
3198 port_power(u132, ports, 1);
3200 retval = 0;
3201 } else {
3202 retval = u132_bus_resume(hcd);
3204 return retval;
3208 #else
3209 #define u132_suspend NULL
3210 #define u132_resume NULL
3211 #endif
3213 * this driver is loaded explicitly by ftdi_u132
3215 * the platform_driver struct is static because it is per type of module
3217 static struct platform_driver u132_platform_driver = {
3218 .probe = u132_probe,
3219 .remove = __devexit_p(u132_remove),
3220 .suspend = u132_suspend,
3221 .resume = u132_resume,
3222 .driver = {
3223 .name = (char *)hcd_name,
3224 .owner = THIS_MODULE,
3227 static int __init u132_hcd_init(void)
3229 int retval;
3230 INIT_LIST_HEAD(&u132_static_list);
3231 u132_instances = 0;
3232 u132_exiting = 0;
3233 mutex_init(&u132_module_lock);
3234 if (usb_disabled())
3235 return -ENODEV;
3236 printk(KERN_INFO "driver %s built at %s on %s\n", hcd_name, __TIME__,
3237 __DATE__);
3238 workqueue = create_singlethread_workqueue("u132");
3239 retval = platform_driver_register(&u132_platform_driver);
3240 return retval;
3244 module_init(u132_hcd_init);
3245 static void __exit u132_hcd_exit(void)
3247 struct u132 *u132;
3248 struct u132 *temp;
3249 mutex_lock(&u132_module_lock);
3250 u132_exiting += 1;
3251 mutex_unlock(&u132_module_lock);
3252 list_for_each_entry_safe(u132, temp, &u132_static_list, u132_list) {
3253 platform_device_unregister(u132->platform_dev);
3255 platform_driver_unregister(&u132_platform_driver);
3256 printk(KERN_INFO "u132-hcd driver deregistered\n");
3257 wait_event(u132_hcd_wait, u132_instances == 0);
3258 flush_workqueue(workqueue);
3259 destroy_workqueue(workqueue);
3263 module_exit(u132_hcd_exit);
3264 MODULE_LICENSE("GPL");
3265 MODULE_ALIAS("platform:u132_hcd");